ggml.c 316 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384438543864387438843894390439143924393439443954396439743984399440044014402440344044405440644074408440944104411441244134414441544164417441844194420442144224423442444254426442744284429443044314432443344344435443644374438443944404441444244434444444544464447444844494450445144524453445444554456445744584459446044614462446344644465446644674468446944704471447244734474447544764477447844794480448144824483448444854486448744884489449044914492449344944495449644974498449945004501450245034504450545064507450845094510451145124513451445154516451745184519452045214522452345244525452645274528452945304531453245334534453545364537453845394540454145424543454445454546454745484549455045514552455345544555455645574558455945604561456245634564456545664567456845694570457145724573457445754576457745784579458045814582458345844585458645874588458945904591459245934594459545964597459845994600460146024603460446054606460746084609461046114612461346144615461646174618461946204621462246234624462546264627462846294630463146324633463446354636463746384639464046414642464346444645464646474648464946504651465246534654465546564657465846594660466146624663466446654666466746684669467046714672467346744675467646774678467946804681468246834684468546864687468846894690469146924693469446954696469746984699470047014702470347044705470647074708470947104711471247134714471547164717471847194720472147224723472447254726472747284729473047314732473347344735473647374738473947404741474247434744474547464747474847494750475147524753475447554756475747584759476047614762476347644765476647674768476947704771477247734774477547764777477847794780478147824783478447854786478747884789479047914792479347944795479647974798479948004801480248034804480548064807480848094810481148124813481448154816481748184819482048214822482348244825482648274828482948304831483248334834483548364837483848394840484148424843484448454846484748484849485048514852485348544855485648574858485948604861486248634864486548664867486848694870487148724873487448754876487748784879488048814882488348844885488648874888488948904891489248934894489548964897489848994900490149024903490449054906490749084909491049114912491349144915491649174918491949204921492249234924492549264927492849294930493149324933493449354936493749384939494049414942494349444945494649474948494949504951495249534954495549564957495849594960496149624963496449654966496749684969497049714972497349744975497649774978497949804981498249834984498549864987498849894990499149924993499449954996499749984999500050015002500350045005500650075008500950105011501250135014501550165017501850195020502150225023502450255026502750285029503050315032503350345035503650375038503950405041504250435044504550465047504850495050505150525053505450555056505750585059506050615062506350645065506650675068506950705071507250735074507550765077507850795080508150825083508450855086508750885089509050915092509350945095509650975098509951005101510251035104510551065107510851095110511151125113511451155116511751185119512051215122512351245125512651275128512951305131513251335134513551365137513851395140514151425143514451455146514751485149515051515152515351545155515651575158515951605161516251635164516551665167516851695170517151725173517451755176517751785179518051815182518351845185518651875188518951905191519251935194519551965197519851995200520152025203520452055206520752085209521052115212521352145215521652175218521952205221522252235224522552265227522852295230523152325233523452355236523752385239524052415242524352445245524652475248524952505251525252535254525552565257525852595260526152625263526452655266526752685269527052715272527352745275527652775278527952805281528252835284528552865287528852895290529152925293529452955296529752985299530053015302530353045305530653075308530953105311531253135314531553165317531853195320532153225323532453255326532753285329533053315332533353345335533653375338533953405341534253435344534553465347534853495350535153525353535453555356535753585359536053615362536353645365536653675368536953705371537253735374537553765377537853795380538153825383538453855386538753885389539053915392539353945395539653975398539954005401540254035404540554065407540854095410541154125413541454155416541754185419542054215422542354245425542654275428542954305431543254335434543554365437543854395440544154425443544454455446544754485449545054515452545354545455545654575458545954605461546254635464546554665467546854695470547154725473547454755476547754785479548054815482548354845485548654875488548954905491549254935494549554965497549854995500550155025503550455055506550755085509551055115512551355145515551655175518551955205521552255235524552555265527552855295530553155325533553455355536553755385539554055415542554355445545554655475548554955505551555255535554555555565557555855595560556155625563556455655566556755685569557055715572557355745575557655775578557955805581558255835584558555865587558855895590559155925593559455955596559755985599560056015602560356045605560656075608560956105611561256135614561556165617561856195620562156225623562456255626562756285629563056315632563356345635563656375638563956405641564256435644564556465647564856495650565156525653565456555656565756585659566056615662566356645665566656675668566956705671567256735674567556765677567856795680568156825683568456855686568756885689569056915692569356945695569656975698569957005701570257035704570557065707570857095710571157125713571457155716571757185719572057215722572357245725572657275728572957305731573257335734573557365737573857395740574157425743574457455746574757485749575057515752575357545755575657575758575957605761576257635764576557665767576857695770577157725773577457755776577757785779578057815782578357845785578657875788578957905791579257935794579557965797579857995800580158025803580458055806580758085809581058115812581358145815581658175818581958205821582258235824582558265827582858295830583158325833583458355836583758385839584058415842584358445845584658475848584958505851585258535854585558565857585858595860586158625863586458655866586758685869587058715872587358745875587658775878587958805881588258835884588558865887588858895890589158925893589458955896589758985899590059015902590359045905590659075908590959105911591259135914591559165917591859195920592159225923592459255926592759285929593059315932593359345935593659375938593959405941594259435944594559465947594859495950595159525953595459555956595759585959596059615962596359645965596659675968596959705971597259735974597559765977597859795980598159825983598459855986598759885989599059915992599359945995599659975998599960006001600260036004600560066007600860096010601160126013601460156016601760186019602060216022602360246025602660276028602960306031603260336034603560366037603860396040604160426043604460456046604760486049605060516052605360546055605660576058605960606061606260636064606560666067606860696070607160726073607460756076607760786079608060816082608360846085608660876088608960906091609260936094609560966097609860996100610161026103610461056106610761086109611061116112611361146115611661176118611961206121612261236124612561266127612861296130613161326133613461356136613761386139614061416142614361446145614661476148614961506151615261536154615561566157615861596160616161626163616461656166616761686169617061716172617361746175617661776178617961806181618261836184618561866187618861896190619161926193619461956196619761986199620062016202620362046205620662076208620962106211621262136214621562166217621862196220622162226223622462256226622762286229623062316232623362346235623662376238623962406241624262436244624562466247624862496250625162526253625462556256625762586259626062616262626362646265626662676268626962706271627262736274627562766277627862796280628162826283628462856286628762886289629062916292629362946295629662976298629963006301630263036304630563066307630863096310631163126313631463156316631763186319632063216322632363246325632663276328632963306331633263336334633563366337633863396340634163426343634463456346634763486349635063516352635363546355635663576358635963606361636263636364636563666367636863696370637163726373637463756376637763786379638063816382638363846385638663876388638963906391639263936394639563966397639863996400640164026403640464056406640764086409641064116412641364146415641664176418641964206421642264236424642564266427642864296430643164326433643464356436643764386439644064416442644364446445644664476448644964506451645264536454645564566457645864596460646164626463646464656466646764686469647064716472647364746475647664776478647964806481648264836484648564866487648864896490649164926493649464956496649764986499650065016502650365046505650665076508650965106511651265136514651565166517651865196520652165226523652465256526652765286529653065316532653365346535653665376538653965406541654265436544654565466547654865496550655165526553655465556556655765586559656065616562656365646565656665676568656965706571657265736574657565766577657865796580658165826583658465856586658765886589659065916592659365946595659665976598659966006601660266036604660566066607660866096610661166126613661466156616661766186619662066216622662366246625662666276628662966306631663266336634663566366637663866396640664166426643664466456646664766486649665066516652665366546655665666576658665966606661666266636664666566666667666866696670667166726673667466756676667766786679668066816682668366846685668666876688668966906691669266936694669566966697669866996700670167026703670467056706670767086709671067116712671367146715671667176718671967206721672267236724672567266727672867296730673167326733673467356736673767386739674067416742674367446745674667476748674967506751675267536754675567566757675867596760676167626763676467656766676767686769677067716772677367746775677667776778677967806781678267836784678567866787678867896790679167926793679467956796679767986799680068016802680368046805680668076808680968106811681268136814681568166817681868196820682168226823682468256826682768286829683068316832683368346835683668376838683968406841684268436844684568466847684868496850685168526853685468556856685768586859686068616862686368646865686668676868686968706871687268736874687568766877687868796880688168826883688468856886688768886889689068916892689368946895689668976898689969006901690269036904690569066907690869096910691169126913691469156916691769186919692069216922692369246925692669276928692969306931693269336934693569366937693869396940694169426943694469456946694769486949695069516952695369546955695669576958695969606961696269636964696569666967696869696970697169726973697469756976697769786979698069816982698369846985698669876988698969906991699269936994699569966997699869997000700170027003700470057006700770087009701070117012701370147015701670177018701970207021702270237024702570267027702870297030703170327033703470357036703770387039704070417042704370447045704670477048704970507051705270537054705570567057705870597060706170627063706470657066706770687069707070717072707370747075707670777078707970807081708270837084708570867087708870897090709170927093709470957096709770987099710071017102710371047105710671077108710971107111711271137114711571167117711871197120712171227123712471257126712771287129713071317132713371347135713671377138713971407141714271437144714571467147714871497150715171527153715471557156715771587159716071617162716371647165716671677168716971707171717271737174717571767177717871797180718171827183718471857186718771887189719071917192719371947195719671977198719972007201720272037204720572067207720872097210721172127213721472157216721772187219722072217222722372247225722672277228722972307231723272337234723572367237723872397240724172427243724472457246724772487249725072517252725372547255725672577258725972607261726272637264726572667267726872697270727172727273727472757276727772787279728072817282728372847285728672877288728972907291729272937294729572967297729872997300730173027303730473057306730773087309731073117312731373147315731673177318731973207321732273237324732573267327732873297330733173327333733473357336733773387339734073417342734373447345734673477348734973507351735273537354735573567357735873597360736173627363736473657366736773687369737073717372737373747375737673777378737973807381738273837384738573867387738873897390739173927393739473957396739773987399740074017402740374047405740674077408740974107411741274137414741574167417741874197420742174227423742474257426742774287429743074317432743374347435743674377438743974407441744274437444744574467447744874497450745174527453745474557456745774587459746074617462746374647465746674677468746974707471747274737474747574767477747874797480748174827483748474857486748774887489749074917492749374947495749674977498749975007501750275037504750575067507750875097510751175127513751475157516751775187519752075217522752375247525752675277528752975307531753275337534753575367537753875397540754175427543754475457546754775487549755075517552755375547555755675577558755975607561756275637564756575667567756875697570757175727573757475757576757775787579758075817582758375847585758675877588758975907591759275937594759575967597759875997600760176027603760476057606760776087609761076117612761376147615761676177618761976207621762276237624762576267627762876297630763176327633763476357636763776387639764076417642764376447645764676477648764976507651765276537654765576567657765876597660766176627663766476657666766776687669767076717672767376747675767676777678767976807681768276837684768576867687768876897690769176927693769476957696769776987699770077017702770377047705770677077708770977107711771277137714771577167717771877197720772177227723772477257726772777287729773077317732773377347735773677377738773977407741774277437744774577467747774877497750775177527753775477557756775777587759776077617762776377647765776677677768776977707771777277737774777577767777777877797780778177827783778477857786778777887789779077917792779377947795779677977798779978007801780278037804780578067807780878097810781178127813781478157816781778187819782078217822782378247825782678277828782978307831783278337834783578367837783878397840784178427843784478457846784778487849785078517852785378547855785678577858785978607861786278637864786578667867786878697870787178727873787478757876787778787879788078817882788378847885788678877888788978907891789278937894789578967897789878997900790179027903790479057906790779087909791079117912791379147915791679177918791979207921792279237924792579267927792879297930793179327933793479357936793779387939794079417942794379447945794679477948794979507951795279537954795579567957795879597960796179627963796479657966796779687969797079717972797379747975797679777978797979807981798279837984798579867987798879897990799179927993799479957996799779987999800080018002800380048005800680078008800980108011801280138014801580168017801880198020802180228023802480258026802780288029803080318032803380348035803680378038803980408041804280438044804580468047804880498050805180528053805480558056805780588059806080618062806380648065806680678068806980708071807280738074807580768077807880798080808180828083808480858086808780888089809080918092809380948095809680978098809981008101810281038104810581068107810881098110811181128113811481158116811781188119812081218122812381248125812681278128812981308131813281338134813581368137813881398140814181428143814481458146814781488149815081518152815381548155815681578158815981608161816281638164816581668167816881698170817181728173817481758176817781788179818081818182818381848185818681878188818981908191819281938194819581968197819881998200820182028203820482058206820782088209821082118212821382148215821682178218821982208221822282238224822582268227822882298230823182328233823482358236823782388239824082418242824382448245824682478248824982508251825282538254825582568257825882598260826182628263826482658266826782688269827082718272827382748275827682778278827982808281828282838284828582868287828882898290829182928293829482958296829782988299830083018302830383048305830683078308830983108311831283138314831583168317831883198320832183228323832483258326832783288329833083318332833383348335833683378338833983408341834283438344834583468347834883498350835183528353835483558356835783588359836083618362836383648365836683678368836983708371837283738374837583768377837883798380838183828383838483858386838783888389839083918392839383948395839683978398839984008401840284038404840584068407840884098410841184128413841484158416841784188419842084218422842384248425842684278428842984308431843284338434843584368437843884398440844184428443844484458446844784488449845084518452845384548455845684578458845984608461846284638464846584668467846884698470847184728473847484758476847784788479848084818482848384848485848684878488848984908491849284938494849584968497849884998500850185028503850485058506850785088509851085118512851385148515851685178518851985208521852285238524852585268527852885298530853185328533853485358536853785388539854085418542854385448545854685478548854985508551855285538554855585568557855885598560856185628563856485658566856785688569857085718572857385748575857685778578857985808581858285838584858585868587858885898590859185928593859485958596859785988599860086018602860386048605860686078608860986108611861286138614861586168617861886198620862186228623862486258626862786288629863086318632863386348635863686378638863986408641864286438644864586468647864886498650865186528653865486558656865786588659866086618662866386648665866686678668866986708671867286738674867586768677867886798680868186828683868486858686868786888689869086918692869386948695869686978698869987008701870287038704870587068707870887098710871187128713871487158716871787188719872087218722872387248725872687278728872987308731873287338734873587368737873887398740874187428743874487458746874787488749875087518752875387548755875687578758875987608761876287638764876587668767876887698770877187728773877487758776877787788779878087818782878387848785878687878788878987908791879287938794879587968797879887998800880188028803880488058806880788088809881088118812881388148815881688178818881988208821882288238824882588268827882888298830883188328833883488358836883788388839884088418842884388448845884688478848884988508851885288538854885588568857885888598860886188628863886488658866886788688869887088718872887388748875887688778878887988808881888288838884888588868887888888898890889188928893889488958896889788988899890089018902890389048905890689078908890989108911891289138914891589168917891889198920892189228923892489258926892789288929893089318932893389348935893689378938893989408941894289438944894589468947894889498950895189528953895489558956895789588959896089618962896389648965896689678968896989708971897289738974897589768977897889798980898189828983898489858986898789888989899089918992899389948995899689978998899990009001900290039004900590069007900890099010901190129013901490159016901790189019902090219022902390249025902690279028902990309031903290339034903590369037903890399040904190429043904490459046904790489049905090519052905390549055905690579058905990609061906290639064906590669067906890699070907190729073907490759076907790789079908090819082908390849085908690879088908990909091909290939094909590969097909890999100910191029103910491059106910791089109911091119112911391149115911691179118911991209121912291239124912591269127912891299130913191329133913491359136913791389139914091419142914391449145914691479148914991509151915291539154915591569157915891599160916191629163916491659166916791689169917091719172917391749175917691779178917991809181918291839184918591869187918891899190919191929193919491959196919791989199920092019202920392049205920692079208920992109211921292139214921592169217921892199220922192229223922492259226922792289229923092319232923392349235923692379238923992409241924292439244924592469247924892499250925192529253925492559256925792589259926092619262926392649265926692679268926992709271927292739274927592769277927892799280928192829283928492859286928792889289929092919292929392949295929692979298929993009301930293039304930593069307930893099310931193129313931493159316931793189319932093219322932393249325932693279328932993309331933293339334933593369337933893399340934193429343934493459346934793489349935093519352935393549355935693579358935993609361936293639364936593669367936893699370937193729373937493759376937793789379938093819382938393849385938693879388938993909391939293939394939593969397939893999400940194029403940494059406940794089409941094119412941394149415941694179418941994209421942294239424942594269427942894299430943194329433943494359436943794389439944094419442944394449445944694479448944994509451945294539454945594569457945894599460946194629463946494659466946794689469947094719472947394749475947694779478947994809481948294839484948594869487948894899490949194929493949494959496949794989499950095019502950395049505950695079508950995109511951295139514951595169517951895199520952195229523952495259526952795289529953095319532953395349535953695379538953995409541954295439544954595469547954895499550955195529553955495559556955795589559956095619562956395649565956695679568956995709571957295739574957595769577957895799580958195829583958495859586958795889589959095919592959395949595959695979598959996009601960296039604960596069607960896099610961196129613961496159616961796189619962096219622962396249625962696279628962996309631963296339634963596369637963896399640964196429643964496459646964796489649965096519652965396549655965696579658965996609661966296639664966596669667966896699670967196729673967496759676967796789679968096819682968396849685968696879688968996909691969296939694969596969697969896999700970197029703970497059706970797089709971097119712971397149715971697179718971997209721972297239724972597269727972897299730973197329733973497359736973797389739974097419742974397449745974697479748974997509751975297539754975597569757975897599760976197629763976497659766976797689769977097719772977397749775977697779778977997809781978297839784978597869787978897899790979197929793979497959796979797989799980098019802980398049805980698079808980998109811981298139814981598169817981898199820982198229823982498259826982798289829983098319832983398349835983698379838983998409841984298439844984598469847984898499850985198529853985498559856985798589859986098619862986398649865986698679868986998709871987298739874987598769877987898799880988198829883988498859886988798889889989098919892989398949895989698979898989999009901990299039904990599069907990899099910991199129913991499159916991799189919992099219922992399249925992699279928992999309931993299339934993599369937993899399940994199429943994499459946994799489949995099519952995399549955995699579958995999609961996299639964996599669967996899699970997199729973997499759976997799789979998099819982998399849985998699879988998999909991999299939994999599969997999899991000010001100021000310004100051000610007100081000910010100111001210013100141001510016100171001810019100201002110022100231002410025100261002710028100291003010031100321003310034100351003610037100381003910040100411004210043100441004510046100471004810049100501005110052100531005410055100561005710058100591006010061100621006310064100651006610067100681006910070100711007210073100741007510076100771007810079100801008110082100831008410085100861008710088100891009010091100921009310094100951009610097100981009910100101011010210103101041010510106101071010810109101101011110112101131011410115101161011710118101191012010121101221012310124101251012610127101281012910130101311013210133101341013510136101371013810139101401014110142101431014410145101461014710148101491015010151101521015310154101551015610157101581015910160101611016210163101641016510166101671016810169101701017110172101731017410175101761017710178101791018010181101821018310184101851018610187101881018910190101911019210193101941019510196101971019810199102001020110202102031020410205102061020710208102091021010211102121021310214102151021610217102181021910220102211022210223102241022510226102271022810229102301023110232102331023410235102361023710238102391024010241102421024310244102451024610247102481024910250102511025210253102541025510256102571025810259102601026110262102631026410265102661026710268102691027010271102721027310274102751027610277102781027910280102811028210283102841028510286102871028810289102901029110292102931029410295102961029710298102991030010301103021030310304103051030610307103081030910310103111031210313103141031510316103171031810319103201032110322103231032410325103261032710328103291033010331103321033310334103351033610337103381033910340103411034210343103441034510346103471034810349103501035110352103531035410355103561035710358103591036010361103621036310364103651036610367103681036910370103711037210373103741037510376103771037810379103801038110382103831038410385103861038710388103891039010391103921039310394103951039610397103981039910400104011040210403104041040510406104071040810409104101041110412104131041410415104161041710418104191042010421104221042310424104251042610427104281042910430104311043210433104341043510436104371043810439104401044110442104431044410445104461044710448104491045010451104521045310454104551045610457104581045910460104611046210463104641046510466104671046810469104701047110472104731047410475104761047710478104791048010481104821048310484
  1. // Defines CLOCK_MONOTONIC and asprintf on Linux
  2. #define _GNU_SOURCE
  3. #include "ggml.h"
  4. #if defined(_MSC_VER) || defined(__MINGW32__)
  5. #include <malloc.h> // using malloc.h with MSC/MINGW
  6. #elif !defined(__FreeBSD__) && !defined(__NetBSD__) && !defined(__OpenBSD__)
  7. #include <alloca.h>
  8. #endif
  9. #include <assert.h>
  10. #include <errno.h>
  11. #include <time.h>
  12. #include <math.h>
  13. #include <stdlib.h>
  14. #include <string.h>
  15. #include <stdint.h>
  16. #include <stdio.h>
  17. #include <float.h>
  18. // if C99 - static_assert is noop
  19. // ref: https://stackoverflow.com/a/53923785/4039976
  20. #ifndef static_assert
  21. #define static_assert(cond, msg) struct global_scope_noop_trick
  22. #endif
  23. #if defined _MSC_VER || defined(__MINGW32__)
  24. #if !defined(__MINGW32__)
  25. #include <Windows.h>
  26. #else
  27. // ref: https://github.com/ggerganov/whisper.cpp/issues/168
  28. #include <windows.h>
  29. #endif
  30. typedef volatile LONG atomic_int;
  31. typedef atomic_int atomic_bool;
  32. static void atomic_store(atomic_int* ptr, LONG val) {
  33. InterlockedExchange(ptr, val);
  34. }
  35. static LONG atomic_load(atomic_int* ptr) {
  36. return InterlockedCompareExchange(ptr, 0, 0);
  37. }
  38. static LONG atomic_fetch_add(atomic_int* ptr, LONG inc) {
  39. return InterlockedExchangeAdd(ptr, inc);
  40. }
  41. static LONG atomic_fetch_sub(atomic_int* ptr, LONG dec) {
  42. return atomic_fetch_add(ptr, -(dec));
  43. }
  44. typedef HANDLE pthread_t;
  45. typedef DWORD thread_ret_t;
  46. static int pthread_create(pthread_t* out, void* unused, thread_ret_t(*func)(void*), void* arg) {
  47. HANDLE handle = CreateThread(NULL, 0, (LPTHREAD_START_ROUTINE) func, arg, 0, NULL);
  48. if (handle == NULL)
  49. {
  50. return EAGAIN;
  51. }
  52. *out = handle;
  53. return 0;
  54. }
  55. static int pthread_join(pthread_t thread, void* unused) {
  56. return (int) WaitForSingleObject(thread, INFINITE);
  57. }
  58. static int sched_yield (void) {
  59. Sleep (0);
  60. return 0;
  61. }
  62. #else
  63. #include <pthread.h>
  64. #include <stdatomic.h>
  65. typedef void* thread_ret_t;
  66. #endif
  67. // __FMA__ and __F16C__ are not defined in MSVC, however they are implied with AVX2/AVX512
  68. #if defined(_MSC_VER) && (defined(__AVX2__) || defined(__AVX512F__))
  69. #ifndef __FMA__
  70. #define __FMA__
  71. #endif
  72. #ifndef __F16C__
  73. #define __F16C__
  74. #endif
  75. #ifndef __SSE3__
  76. #define __SSE3__
  77. #endif
  78. #endif
  79. #ifdef __HAIKU__
  80. #define static_assert(cond, msg) _Static_assert(cond, msg)
  81. #endif
  82. #define GGML_MLOCK_SUPPORT 0
  83. #ifdef __has_include
  84. #if __has_include(<sys/mman.h>)
  85. #undef GGML_MLOCK_SUPPORT
  86. #define GGML_MLOCK_SUPPORT 1
  87. #include <sys/mman.h>
  88. #endif
  89. #endif
  90. /*#define GGML_PERF*/
  91. #define GGML_DEBUG 0
  92. #define GGML_GELU_FP16
  93. #define GGML_SILU_FP16
  94. #define GGML_SOFT_MAX_UNROLL 4
  95. #define GGML_VEC_DOT_UNROLL 2
  96. #ifdef GGML_USE_ACCELERATE
  97. // uncomment to use vDSP for soft max computation
  98. // note: not sure if it is actually faster
  99. //#define GGML_SOFT_MAX_ACCELERATE
  100. #endif
  101. #if UINTPTR_MAX == 0xFFFFFFFF
  102. #define GGML_MEM_ALIGN 4
  103. #else
  104. #define GGML_MEM_ALIGN 16
  105. #endif
  106. #define UNUSED(x) (void)(x)
  107. #define SWAP(x, y, T) do { T SWAP = x; x = y; y = SWAP; } while (0)
  108. #define GGML_ASSERT(x) \
  109. do { \
  110. if (!(x)) { \
  111. fprintf(stderr, "GGML_ASSERT: %s:%d: %s\n", __FILE__, __LINE__, #x); \
  112. abort(); \
  113. } \
  114. } while (0)
  115. #ifdef GGML_USE_ACCELERATE
  116. #include <Accelerate/Accelerate.h>
  117. #elif GGML_USE_OPENBLAS
  118. #include <cblas.h>
  119. #endif
  120. #undef MIN
  121. #undef MAX
  122. #define MIN(a, b) ((a) < (b) ? (a) : (b))
  123. #define MAX(a, b) ((a) > (b) ? (a) : (b))
  124. // floating point type used to accumulate sums
  125. typedef double ggml_float;
  126. // 16-bit float
  127. // on Arm, we use __fp16
  128. // on x86, we use uint16_t
  129. #ifdef __ARM_NEON
  130. // if YCM cannot find <arm_neon.h>, make a symbolic link to it, for example:
  131. //
  132. // $ ln -sfn /Library/Developer/CommandLineTools/usr/lib/clang/13.1.6/include/arm_neon.h ./src/
  133. //
  134. #include <arm_neon.h>
  135. #define GGML_COMPUTE_FP16_TO_FP32(x) ((float) (x))
  136. #define GGML_COMPUTE_FP32_TO_FP16(x) (x)
  137. #define GGML_FP16_TO_FP32(x) ((float) (x))
  138. #define GGML_FP32_TO_FP16(x) (x)
  139. #else
  140. #ifdef __wasm_simd128__
  141. #include <wasm_simd128.h>
  142. #else
  143. #ifdef __POWER9_VECTOR__
  144. #include <altivec.h>
  145. #undef bool
  146. #define bool _Bool
  147. #else
  148. #include <immintrin.h>
  149. #endif
  150. #endif
  151. #ifdef __F16C__
  152. #ifdef _MSC_VER
  153. #define GGML_COMPUTE_FP16_TO_FP32(x) _mm_cvtss_f32(_mm_cvtph_ps(_mm_cvtsi32_si128(x)))
  154. #define GGML_COMPUTE_FP32_TO_FP16(x) _mm_extract_epi16(_mm_cvtps_ph(_mm_set_ss(x), 0), 0)
  155. #else
  156. #define GGML_COMPUTE_FP16_TO_FP32(x) _cvtsh_ss(x)
  157. #define GGML_COMPUTE_FP32_TO_FP16(x) _cvtss_sh(x, 0)
  158. #endif
  159. #elif defined(__POWER9_VECTOR__)
  160. #define GGML_COMPUTE_FP16_TO_FP32(x) ggml_compute_fp16_to_fp32(x)
  161. #define GGML_COMPUTE_FP32_TO_FP16(x) ggml_compute_fp32_to_fp16(x)
  162. /* the inline asm below is about 12% faster than the lookup method */
  163. #define GGML_FP16_TO_FP32(x) GGML_COMPUTE_FP16_TO_FP32(x)
  164. #define GGML_FP32_TO_FP16(x) GGML_COMPUTE_FP32_TO_FP16(x)
  165. static inline float ggml_compute_fp16_to_fp32(ggml_fp16_t h) {
  166. register float f;
  167. register double d;
  168. __asm__(
  169. "mtfprd %0,%2\n"
  170. "xscvhpdp %0,%0\n"
  171. "frsp %1,%0\n" :
  172. /* temp */ "=d"(d),
  173. /* out */ "=f"(f):
  174. /* in */ "r"(h));
  175. return f;
  176. }
  177. static inline ggml_fp16_t ggml_compute_fp32_to_fp16(float f) {
  178. register double d;
  179. register ggml_fp16_t r;
  180. __asm__( /* xscvdphp can work on double or single precision */
  181. "xscvdphp %0,%2\n"
  182. "mffprd %1,%0\n" :
  183. /* temp */ "=d"(d),
  184. /* out */ "=r"(r):
  185. /* in */ "f"(f));
  186. return r;
  187. }
  188. #else
  189. // FP16 <-> FP32
  190. // ref: https://github.com/Maratyszcza/FP16
  191. static inline float fp32_from_bits(uint32_t w) {
  192. union {
  193. uint32_t as_bits;
  194. float as_value;
  195. } fp32;
  196. fp32.as_bits = w;
  197. return fp32.as_value;
  198. }
  199. static inline uint32_t fp32_to_bits(float f) {
  200. union {
  201. float as_value;
  202. uint32_t as_bits;
  203. } fp32;
  204. fp32.as_value = f;
  205. return fp32.as_bits;
  206. }
  207. static inline float ggml_compute_fp16_to_fp32(ggml_fp16_t h) {
  208. const uint32_t w = (uint32_t) h << 16;
  209. const uint32_t sign = w & UINT32_C(0x80000000);
  210. const uint32_t two_w = w + w;
  211. const uint32_t exp_offset = UINT32_C(0xE0) << 23;
  212. #if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) || defined(__GNUC__) && !defined(__STRICT_ANSI__)
  213. const float exp_scale = 0x1.0p-112f;
  214. #else
  215. const float exp_scale = fp32_from_bits(UINT32_C(0x7800000));
  216. #endif
  217. const float normalized_value = fp32_from_bits((two_w >> 4) + exp_offset) * exp_scale;
  218. const uint32_t magic_mask = UINT32_C(126) << 23;
  219. const float magic_bias = 0.5f;
  220. const float denormalized_value = fp32_from_bits((two_w >> 17) | magic_mask) - magic_bias;
  221. const uint32_t denormalized_cutoff = UINT32_C(1) << 27;
  222. const uint32_t result = sign |
  223. (two_w < denormalized_cutoff ? fp32_to_bits(denormalized_value) : fp32_to_bits(normalized_value));
  224. return fp32_from_bits(result);
  225. }
  226. static inline ggml_fp16_t ggml_compute_fp32_to_fp16(float f) {
  227. #if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) || defined(__GNUC__) && !defined(__STRICT_ANSI__)
  228. const float scale_to_inf = 0x1.0p+112f;
  229. const float scale_to_zero = 0x1.0p-110f;
  230. #else
  231. const float scale_to_inf = fp32_from_bits(UINT32_C(0x77800000));
  232. const float scale_to_zero = fp32_from_bits(UINT32_C(0x08800000));
  233. #endif
  234. float base = (fabsf(f) * scale_to_inf) * scale_to_zero;
  235. const uint32_t w = fp32_to_bits(f);
  236. const uint32_t shl1_w = w + w;
  237. const uint32_t sign = w & UINT32_C(0x80000000);
  238. uint32_t bias = shl1_w & UINT32_C(0xFF000000);
  239. if (bias < UINT32_C(0x71000000)) {
  240. bias = UINT32_C(0x71000000);
  241. }
  242. base = fp32_from_bits((bias >> 1) + UINT32_C(0x07800000)) + base;
  243. const uint32_t bits = fp32_to_bits(base);
  244. const uint32_t exp_bits = (bits >> 13) & UINT32_C(0x00007C00);
  245. const uint32_t mantissa_bits = bits & UINT32_C(0x00000FFF);
  246. const uint32_t nonsign = exp_bits + mantissa_bits;
  247. return (sign >> 16) | (shl1_w > UINT32_C(0xFF000000) ? UINT16_C(0x7E00) : nonsign);
  248. }
  249. #define GGML_COMPUTE_FP16_TO_FP32(x) ggml_compute_fp16_to_fp32(x)
  250. #define GGML_COMPUTE_FP32_TO_FP16(x) ggml_compute_fp32_to_fp16(x)
  251. #endif // __F16C__
  252. #endif // __ARM_NEON
  253. //
  254. // global data
  255. //
  256. // precomputed gelu table for f16 (128 KB)
  257. static ggml_fp16_t table_gelu_f16[1 << 16];
  258. // precomputed silu table for f16 (128 KB)
  259. static ggml_fp16_t table_silu_f16[1 << 16];
  260. // precomputed exp table for f16 (128 KB)
  261. static ggml_fp16_t table_exp_f16[1 << 16];
  262. // precomputed f32 table for f16 (256 KB)
  263. static float table_f32_f16[1 << 16];
  264. // On ARM NEON, it's quicker to directly convert x -> x instead of calling into ggml_lookup_fp16_to_fp32,
  265. // so we define GGML_FP16_TO_FP32 and GGML_FP32_TO_FP16 elsewhere for NEON.
  266. // This is also true for POWER9.
  267. #if !defined(GGML_FP16_TO_FP32) || !defined(GGML_FP32_TO_FP16)
  268. inline static float ggml_lookup_fp16_to_fp32(ggml_fp16_t f) {
  269. uint16_t s;
  270. memcpy(&s, &f, sizeof(uint16_t));
  271. return table_f32_f16[s];
  272. }
  273. #define GGML_FP16_TO_FP32(x) ggml_lookup_fp16_to_fp32(x)
  274. #define GGML_FP32_TO_FP16(x) GGML_COMPUTE_FP32_TO_FP16(x)
  275. #endif
  276. // note: do not use these inside ggml.c
  277. // these are meant to be used via the ggml.h API
  278. float ggml_fp16_to_fp32(ggml_fp16_t x) {
  279. return (float) GGML_FP16_TO_FP32(x);
  280. }
  281. ggml_fp16_t ggml_fp32_to_fp16(float x) {
  282. return GGML_FP32_TO_FP16(x);
  283. }
  284. //
  285. // timing
  286. //
  287. #if defined(_MSC_VER) || defined(__MINGW32__)
  288. static int64_t timer_freq;
  289. void ggml_time_init(void) {
  290. LARGE_INTEGER frequency;
  291. QueryPerformanceFrequency(&frequency);
  292. timer_freq = frequency.QuadPart;
  293. }
  294. int64_t ggml_time_ms(void) {
  295. LARGE_INTEGER t;
  296. QueryPerformanceCounter(&t);
  297. return (t.QuadPart * 1000) / timer_freq;
  298. }
  299. int64_t ggml_time_us(void) {
  300. LARGE_INTEGER t;
  301. QueryPerformanceCounter(&t);
  302. return (t.QuadPart * 1000000) / timer_freq;
  303. }
  304. #else
  305. void ggml_time_init(void) {}
  306. int64_t ggml_time_ms(void) {
  307. struct timespec ts;
  308. clock_gettime(CLOCK_MONOTONIC, &ts);
  309. return (int64_t)ts.tv_sec*1000 + (int64_t)ts.tv_nsec/1000000;
  310. }
  311. int64_t ggml_time_us(void) {
  312. struct timespec ts;
  313. clock_gettime(CLOCK_MONOTONIC, &ts);
  314. return (int64_t)ts.tv_sec*1000000 + (int64_t)ts.tv_nsec/1000;
  315. }
  316. #endif
  317. int64_t ggml_cycles(void) {
  318. return clock();
  319. }
  320. int64_t ggml_cycles_per_ms(void) {
  321. return CLOCKS_PER_SEC/1000;
  322. }
  323. #ifdef GGML_PERF
  324. #define ggml_perf_time_ms() ggml_time_ms()
  325. #define ggml_perf_time_us() ggml_time_us()
  326. #define ggml_perf_cycles() ggml_cycles()
  327. #define ggml_perf_cycles_per_ms() ggml_cycles_per_ms()
  328. #else
  329. #define ggml_perf_time_ms() 0
  330. #define ggml_perf_time_us() 0
  331. #define ggml_perf_cycles() 0
  332. #define ggml_perf_cycles_per_ms() 0
  333. #endif
  334. //
  335. // cache line
  336. //
  337. #if defined(__cpp_lib_hardware_interference_size)
  338. #define CACHE_LINE_SIZE hardware_destructive_interference_size
  339. #else
  340. #if defined(__POWER9_VECTOR__)
  341. #define CACHE_LINE_SIZE 128
  342. #else
  343. #define CACHE_LINE_SIZE 64
  344. #endif
  345. #endif
  346. static const size_t CACHE_LINE_SIZE_F32 = CACHE_LINE_SIZE/sizeof(float);
  347. //
  348. // quantization
  349. //
  350. #define QK 32
  351. // AVX routines provided by GH user Const-me
  352. // ref: https://github.com/ggerganov/ggml/pull/27#issuecomment-1464934600
  353. #if __AVX2__ || __AVX512F__
  354. // Unpack 32 4-bit fields into 32 bytes
  355. // The output vector contains 32 bytes, each one in [ 0 .. 15 ] interval
  356. static inline __m256i bytesFromNibbles( const uint8_t* rsi )
  357. {
  358. // Load 16 bytes from memory
  359. __m128i tmp = _mm_loadu_si128( ( const __m128i* )rsi );
  360. // Expand bytes into uint16_t values
  361. __m256i bytes = _mm256_cvtepu8_epi16( tmp );
  362. // Unpack values into individual bytes
  363. const __m256i lowMask = _mm256_set1_epi8( 0xF );
  364. __m256i high = _mm256_andnot_si256( lowMask, bytes );
  365. __m256i low = _mm256_and_si256( lowMask, bytes );
  366. high = _mm256_slli_epi16( high, 4 );
  367. bytes = _mm256_or_si256( low, high );
  368. return bytes;
  369. }
  370. static inline __m128i packNibbles( __m256i bytes )
  371. {
  372. // Move bits within 16-bit lanes from 0000_abcd_0000_efgh into 0000_0000_abcd_efgh
  373. const __m256i lowByte = _mm256_set1_epi16( 0xFF );
  374. __m256i high = _mm256_andnot_si256( lowByte, bytes );
  375. __m256i low = _mm256_and_si256( lowByte, bytes );
  376. high = _mm256_srli_epi16( high, 4 );
  377. bytes = _mm256_or_si256( low, high );
  378. // Compress uint16_t lanes into bytes
  379. __m128i r0 = _mm256_castsi256_si128( bytes );
  380. __m128i r1 = _mm256_extracti128_si256( bytes, 1 );
  381. return _mm_packus_epi16( r0, r1 );
  382. }
  383. #endif
  384. // method 5
  385. // blocks of QK elements
  386. // represented with a single float (delta) and QK/2 8-bit ints (i.e QK 4-bit signed integer factors)
  387. typedef struct {
  388. float d; // delta
  389. uint8_t qs[QK / 2]; // nibbles / quants
  390. } block_q4_0;
  391. static_assert(sizeof(block_q4_0) == sizeof(float) + QK / 2, "wrong q4_0 block size/padding");
  392. // method 4
  393. // blocks of QK elements
  394. // represented with 2 floats (delta + min) and QK/2 8-bit ints (i.e QK 4-bit unsigned integer factors)
  395. typedef struct {
  396. float d;
  397. float m;
  398. uint8_t qs[QK / 2]; // nibbles / quants
  399. } block_q4_1;
  400. static_assert(sizeof(block_q4_1) == sizeof(float) * 2 + QK / 2, "wrong q4_1 block size/padding");
  401. // reference implementation for deterministic creation of model files
  402. static void quantize_row_q4_0_reference(const float * restrict x, block_q4_0 * restrict y, int k) {
  403. assert(k % QK == 0);
  404. const int nb = k / QK;
  405. uint8_t pp[QK/2];
  406. for (int i = 0; i < nb; i++) {
  407. float amax = 0.0f; // absolute max
  408. for (int l = 0; l < QK; l++) {
  409. const float v = x[i*QK + l];
  410. amax = MAX(amax, fabsf(v));
  411. }
  412. const float d = amax / ((1 << 3) - 1);
  413. const float id = d ? 1.0f/d : 0.0f;
  414. y[i].d = d;
  415. for (int l = 0; l < QK; l += 2) {
  416. const float v0 = x[i*QK + l + 0]*id;
  417. const float v1 = x[i*QK + l + 1]*id;
  418. const uint8_t vi0 = (int8_t)roundf(v0) + 8;
  419. const uint8_t vi1 = (int8_t)roundf(v1) + 8;
  420. assert(vi0 >= 0 && vi0 < 16);
  421. assert(vi1 >= 0 && vi1 < 16);
  422. pp[l/2] = vi0 | (vi1 << 4);
  423. }
  424. memcpy(y[i].qs, pp, sizeof(pp));
  425. }
  426. }
  427. static void quantize_row_q4_0(const float * restrict x, void * restrict vy, int k) {
  428. assert(k % QK == 0);
  429. const int nb = k / QK;
  430. block_q4_0 * restrict y = vy;
  431. #if defined(__POWER9_VECTOR__)
  432. const vector float v85 = vec_splats(8.5f);
  433. for (int i = 0; i < nb; i++) {
  434. float amax = 0.0f; // absolute max
  435. vector float srcv [8];
  436. vector float asrcv[8];
  437. vector float amaxv[8];
  438. for (int l = 0; l < 8; l++) srcv[l] = *(vector float *)(x + i*32 + 4*l);
  439. for (int l = 0; l < 8; l++) asrcv[l] = vec_abs(srcv[l]);
  440. for (int l = 0; l < 4; l++) amaxv[2*l] = vec_max(asrcv[2*l], asrcv[2*l+1]);
  441. //for (int l = 0; l < 2; l++) amaxv[4*l] = vec_max(amaxv[4*l], amaxv[4*l+2]);
  442. amaxv[0] = vec_max(amaxv[0], amaxv[2]);
  443. amaxv[4] = vec_max(amaxv[4], amaxv[6]);
  444. //for (int l = 0; l < 1; l++) amaxv[8*l] = vec_max(amaxv[8*l], amaxv[8*l+4]);
  445. amaxv[0] = vec_max(amaxv[0], amaxv[4]);
  446. amax = MAX(
  447. MAX(vec_extract(amaxv[0], 0), vec_extract(amaxv[0], 1)),
  448. MAX(vec_extract(amaxv[0], 2), vec_extract(amaxv[0], 3)));
  449. const float d = amax / ((1 << 3) - 1);
  450. const float id = d ? 1.0/d : 0.0;
  451. y[i].d = d;
  452. const vector float vid = vec_splats(id);
  453. uint8_t * restrict pb = y[i].qs;
  454. for (int l = 0; l < 8; l++) {
  455. const vector float vf = vec_madd(srcv[l], vid, v85);
  456. const vector signed int vi = vec_signed(vf);
  457. pb[2*l + 0] = vec_extract(vi, 0) | (vec_extract(vi, 1) << 4);
  458. pb[2*l + 1] = vec_extract(vi, 2) | (vec_extract(vi, 3) << 4);
  459. }
  460. }
  461. #elif __ARM_NEON
  462. for (int i = 0; i < nb; i++) {
  463. float32x4_t srcv [8];
  464. float32x4_t asrcv[8];
  465. float32x4_t amaxv[8];
  466. for (int l = 0; l < 8; l++) srcv[l] = vld1q_f32(x + i*32 + 4*l);
  467. for (int l = 0; l < 8; l++) asrcv[l] = vabsq_f32(srcv[l]);
  468. for (int l = 0; l < 4; l++) amaxv[2*l] = vmaxq_f32(asrcv[2*l], asrcv[2*l+1]);
  469. for (int l = 0; l < 2; l++) amaxv[4*l] = vmaxq_f32(amaxv[4*l], amaxv[4*l+2]);
  470. for (int l = 0; l < 1; l++) amaxv[8*l] = vmaxq_f32(amaxv[8*l], amaxv[8*l+4]);
  471. // absolute max
  472. const float amax = MAX(
  473. MAX(vgetq_lane_f32(amaxv[0], 0), vgetq_lane_f32(amaxv[0], 1)),
  474. MAX(vgetq_lane_f32(amaxv[0], 2), vgetq_lane_f32(amaxv[0], 3)));
  475. const float d = amax / ((1 << 3) - 1);
  476. const float id = d ? 1.0f/d : 0.0f;
  477. y[i].d = d;
  478. for (int l = 0; l < 8; l++) {
  479. const float32x4_t v = vmulq_n_f32(srcv[l], id);
  480. const float32x4_t vf = vaddq_f32(v, vdupq_n_f32(8.5f));
  481. const int32x4_t vi = vcvtq_s32_f32(vf);
  482. y[i].qs[2*l + 0] = vgetq_lane_s32(vi, 0) | (vgetq_lane_s32(vi, 1) << 4);
  483. y[i].qs[2*l + 1] = vgetq_lane_s32(vi, 2) | (vgetq_lane_s32(vi, 3) << 4);
  484. }
  485. }
  486. #elif defined(__AVX2__)
  487. for (int i = 0; i < nb; i++) {
  488. // Load elements into 4 AVX vectors
  489. __m256 v0 = _mm256_loadu_ps( x );
  490. __m256 v1 = _mm256_loadu_ps( x + 8 );
  491. __m256 v2 = _mm256_loadu_ps( x + 16 );
  492. __m256 v3 = _mm256_loadu_ps( x + 24 );
  493. x += 32;
  494. // Compute max(abs(e)) for the block
  495. const __m256 signBit = _mm256_set1_ps( -0.0f );
  496. __m256 maxAbs = _mm256_andnot_ps( signBit, v0 );
  497. maxAbs = _mm256_max_ps( maxAbs, _mm256_andnot_ps( signBit, v1 ) );
  498. maxAbs = _mm256_max_ps( maxAbs, _mm256_andnot_ps( signBit, v2 ) );
  499. maxAbs = _mm256_max_ps( maxAbs, _mm256_andnot_ps( signBit, v3 ) );
  500. __m128 max4 = _mm_max_ps( _mm256_extractf128_ps( maxAbs, 1 ), _mm256_castps256_ps128( maxAbs ) );
  501. max4 = _mm_max_ps( max4, _mm_movehl_ps( max4, max4 ) );
  502. max4 = _mm_max_ss( max4, _mm_movehdup_ps( max4 ) );
  503. const float maxScalar = _mm_cvtss_f32( max4 );
  504. // Quantize these floats
  505. const float d = maxScalar / 7.0f;
  506. y[i].d = d;
  507. const float id = ( maxScalar != 0.0f ) ? 7.0f / maxScalar : 0.0f;
  508. const __m256 mul = _mm256_set1_ps( id );
  509. // Apply the multiplier
  510. v0 = _mm256_mul_ps( v0, mul );
  511. v1 = _mm256_mul_ps( v1, mul );
  512. v2 = _mm256_mul_ps( v2, mul );
  513. v3 = _mm256_mul_ps( v3, mul );
  514. // Round to nearest integer
  515. v0 = _mm256_round_ps( v0, _MM_ROUND_NEAREST );
  516. v1 = _mm256_round_ps( v1, _MM_ROUND_NEAREST );
  517. v2 = _mm256_round_ps( v2, _MM_ROUND_NEAREST );
  518. v3 = _mm256_round_ps( v3, _MM_ROUND_NEAREST );
  519. // Convert floats to integers
  520. __m256i i0 = _mm256_cvtps_epi32( v0 );
  521. __m256i i1 = _mm256_cvtps_epi32( v1 );
  522. __m256i i2 = _mm256_cvtps_epi32( v2 );
  523. __m256i i3 = _mm256_cvtps_epi32( v3 );
  524. // Convert int32 to int16
  525. i0 = _mm256_packs_epi32( i0, i1 ); // 0, 1, 2, 3, 8, 9, 10, 11, 4, 5, 6, 7, 12, 13, 14, 15
  526. i2 = _mm256_packs_epi32( i2, i3 ); // 16, 17, 18, 19, 24, 25, 26, 27, 20, 21, 22, 23, 28, 29, 30, 31
  527. // Convert int16 to int8
  528. i0 = _mm256_packs_epi16( i0, i2 ); // 0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27, 4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31
  529. // We got our precious signed bytes, but the order is now wrong
  530. // These AVX2 pack instructions process 16-byte pieces independently
  531. // The following instruction is fixing the order
  532. const __m256i perm = _mm256_setr_epi32( 0, 4, 1, 5, 2, 6, 3, 7 );
  533. i0 = _mm256_permutevar8x32_epi32( i0, perm );
  534. // Apply offset to translate the range from [ -7 .. +7 ] into [ +1 .. +15 ]
  535. const __m256i off = _mm256_set1_epi8( 8 );
  536. i0 = _mm256_add_epi8( i0, off );
  537. // Compress the vector into 4 bit/value, and store
  538. __m128i res = packNibbles( i0 );
  539. _mm_storeu_si128( ( __m128i* )y[i].qs, res );
  540. }
  541. #elif defined(__wasm_simd128__)
  542. for (int i = 0; i < nb; i++) {
  543. float amax = 0.0f; // absolute max
  544. v128_t srcv [8];
  545. v128_t asrcv[8];
  546. v128_t amaxv[8];
  547. for (int l = 0; l < 8; l++) srcv[l] = wasm_v128_load(x + i*32 + 4*l);
  548. for (int l = 0; l < 8; l++) asrcv[l] = wasm_f32x4_abs(srcv[l]);
  549. for (int l = 0; l < 4; l++) amaxv[2*l] = wasm_f32x4_max(asrcv[2*l], asrcv[2*l+1]);
  550. for (int l = 0; l < 2; l++) amaxv[4*l] = wasm_f32x4_max(amaxv[4*l], amaxv[4*l+2]);
  551. for (int l = 0; l < 1; l++) amaxv[8*l] = wasm_f32x4_max(amaxv[8*l], amaxv[8*l+4]);
  552. amax = MAX(
  553. MAX(wasm_f32x4_extract_lane(amaxv[0], 0), wasm_f32x4_extract_lane(amaxv[0], 1)),
  554. MAX(wasm_f32x4_extract_lane(amaxv[0], 2), wasm_f32x4_extract_lane(amaxv[0], 3)));
  555. const float d = amax / ((1 << 3) - 1);
  556. const float id = d ? 1.0/d : 0.0;
  557. y[i].d = d;
  558. for (int l = 0; l < 8; l++) {
  559. const v128_t v = wasm_f32x4_mul(srcv[l], wasm_f32x4_splat(id));
  560. const v128_t vf = wasm_f32x4_add(v, wasm_f32x4_splat(8.5f));
  561. const v128_t vi = wasm_i32x4_trunc_sat_f32x4(vf);
  562. y[i].qs[2*l + 0] = wasm_i32x4_extract_lane(vi, 0) | (wasm_i32x4_extract_lane(vi, 1) << 4);
  563. y[i].qs[2*l + 1] = wasm_i32x4_extract_lane(vi, 2) | (wasm_i32x4_extract_lane(vi, 3) << 4);
  564. }
  565. }
  566. #else
  567. // scalar
  568. quantize_row_q4_0_reference(x, y, k);
  569. #endif
  570. }
  571. static void quantize_row_q4_1_reference(const float * restrict x, void * restrict vy, int k) {
  572. assert(k % QK == 0);
  573. const int nb = k / QK;
  574. block_q4_1 * restrict y = vy;
  575. uint8_t pp[QK/2];
  576. for (int i = 0; i < nb; i++) {
  577. float min = FLT_MAX;
  578. float max = -FLT_MAX;
  579. for (int l = 0; l < QK; l++) {
  580. const float v = x[i*QK + l];
  581. if (v < min) min = v;
  582. if (v > max) max = v;
  583. }
  584. const float d = (max - min) / ((1 << 4) - 1);
  585. const float id = d ? 1.0f/d : 0.0f;
  586. y[i].d = d;
  587. y[i].m = min;
  588. for (int l = 0; l < QK; l += 2) {
  589. const float v0 = (x[i*QK + l + 0] - min)*id;
  590. const float v1 = (x[i*QK + l + 1] - min)*id;
  591. const uint8_t vi0 = roundf(v0);
  592. const uint8_t vi1 = roundf(v1);
  593. assert(vi0 >= 0 && vi0 < 16);
  594. assert(vi1 >= 0 && vi1 < 16);
  595. pp[l/2] = vi0 | (vi1 << 4);
  596. }
  597. memcpy(y[i].qs, pp, sizeof(pp));
  598. }
  599. }
  600. static void quantize_row_q4_1(const float * restrict x, void * restrict vy, int k) {
  601. assert(k % QK == 0);
  602. const int nb = k / QK;
  603. block_q4_1 * restrict y = vy;
  604. #if defined(__AVX2__)
  605. for (int i = 0; i < nb; i++) {
  606. // Load elements into 4 AVX vectors
  607. __m256 v0 = _mm256_loadu_ps( x );
  608. __m256 v1 = _mm256_loadu_ps( x + 8 );
  609. __m256 v2 = _mm256_loadu_ps( x + 16 );
  610. __m256 v3 = _mm256_loadu_ps( x + 24 );
  611. x += 32;
  612. // Compute max for the block
  613. __m256 vmax;
  614. vmax = _mm256_max_ps( v0, v1 );
  615. vmax = _mm256_max_ps( vmax, v2 );
  616. vmax = _mm256_max_ps( vmax, v3 );
  617. __m128 max4 = _mm_max_ps( _mm256_extractf128_ps( vmax, 1 ), _mm256_castps256_ps128( vmax ) );
  618. max4 = _mm_max_ps( max4, _mm_movehl_ps( max4, max4 ) );
  619. max4 = _mm_max_ss( max4, _mm_movehdup_ps( max4 ) );
  620. const float maxScalar = _mm_cvtss_f32( max4 );
  621. // Compute min for the block
  622. __m256 vmin;
  623. vmin = _mm256_min_ps( v0, v1 );
  624. vmin = _mm256_min_ps( vmin, v2 );
  625. vmin = _mm256_min_ps( vmin, v3 );
  626. __m128 min4 = _mm_min_ps( _mm256_extractf128_ps( vmin, 1 ), _mm256_castps256_ps128( vmin ) );
  627. min4 = _mm_min_ps( min4, _mm_movehl_ps( min4, min4 ) );
  628. min4 = _mm_min_ss( min4, _mm_movehdup_ps( min4 ) );
  629. const float minScalar = _mm_cvtss_f32( min4 );
  630. // Quantize these floats
  631. const float d = (maxScalar - minScalar) / ((1 << 4) - 1);
  632. const float id = d ? 1.0f/d : 0.0f;
  633. y[i].m = minScalar;
  634. y[i].d = d;
  635. // x = (x-min)*id
  636. const __m256 mul = _mm256_set1_ps( id );
  637. const __m256 off = _mm256_set1_ps( minScalar );
  638. v0 = _mm256_mul_ps( _mm256_sub_ps( v0, off ), mul );
  639. v1 = _mm256_mul_ps( _mm256_sub_ps( v1, off ), mul );
  640. v2 = _mm256_mul_ps( _mm256_sub_ps( v2, off ), mul );
  641. v3 = _mm256_mul_ps( _mm256_sub_ps( v3, off ), mul );
  642. // Round to nearest integer
  643. v0 = _mm256_round_ps( v0, _MM_ROUND_NEAREST );
  644. v1 = _mm256_round_ps( v1, _MM_ROUND_NEAREST );
  645. v2 = _mm256_round_ps( v2, _MM_ROUND_NEAREST );
  646. v3 = _mm256_round_ps( v3, _MM_ROUND_NEAREST );
  647. // Convert floats to integers
  648. __m256i i0 = _mm256_cvtps_epi32( v0 );
  649. __m256i i1 = _mm256_cvtps_epi32( v1 );
  650. __m256i i2 = _mm256_cvtps_epi32( v2 );
  651. __m256i i3 = _mm256_cvtps_epi32( v3 );
  652. // Convert int32 to int16
  653. i0 = _mm256_packs_epi32( i0, i1 ); // 0, 1, 2, 3, 8, 9, 10, 11, 4, 5, 6, 7, 12, 13, 14, 15
  654. i2 = _mm256_packs_epi32( i2, i3 ); // 16, 17, 18, 19, 24, 25, 26, 27, 20, 21, 22, 23, 28, 29, 30, 31
  655. // Convert int16 to int8
  656. i0 = _mm256_packs_epi16( i0, i2 ); // 0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27, 4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31
  657. // We got our precious signed bytes, but the order is now wrong
  658. // These AVX2 pack instructions process 16-byte pieces independently
  659. // The following instruction is fixing the order
  660. const __m256i perm = _mm256_setr_epi32( 0, 4, 1, 5, 2, 6, 3, 7 );
  661. i0 = _mm256_permutevar8x32_epi32( i0, perm );
  662. // Compress the vector into 4 bit/value, and store
  663. __m128i res = packNibbles( i0 );
  664. _mm_storeu_si128( ( __m128i* )y[i].qs, res );
  665. }
  666. #elif __ARM_NEON
  667. for (int i = 0; i < nb; i++) {
  668. float32x4_t srcv[8];
  669. float32x4_t minv[8];
  670. float32x4_t maxv[8];
  671. for (int l = 0; l < 8; l++) srcv[l] = vld1q_f32(x + i*32 + 4*l);
  672. for (int l = 0; l < 4; l++) minv[2*l] = vminq_f32(srcv[2*l], srcv[2*l + 1]);
  673. for (int l = 0; l < 2; l++) minv[4*l] = vminq_f32(minv[4*l], minv[4*l + 2]);
  674. for (int l = 0; l < 1; l++) minv[8*l] = vminq_f32(minv[8*l], minv[8*l + 4]);
  675. for (int l = 0; l < 4; l++) maxv[2*l] = vmaxq_f32(srcv[2*l], srcv[2*l + 1]);
  676. for (int l = 0; l < 2; l++) maxv[4*l] = vmaxq_f32(maxv[4*l], maxv[4*l + 2]);
  677. for (int l = 0; l < 1; l++) maxv[8*l] = vmaxq_f32(maxv[8*l], maxv[8*l + 4]);
  678. const float min = vminvq_f32(minv[0]);
  679. const float max = vmaxvq_f32(maxv[0]);
  680. const float d = (max - min) / ((1 << 4) - 1);
  681. const float id = d ? 1.0f/d : 0.0f;
  682. y[i].d = d;
  683. y[i].m = min;
  684. const float32x4_t minv0 = vdupq_n_f32(min);
  685. for (int l = 0; l < 8; l++) {
  686. const float32x4_t v = vmulq_n_f32(vsubq_f32(srcv[l], minv0), id);
  687. const int32x4_t vi = vcvtq_s32_f32(v);
  688. y[i].qs[2*l + 0] = vgetq_lane_s32(vi, 0) | (vgetq_lane_s32(vi, 1) << 4);
  689. y[i].qs[2*l + 1] = vgetq_lane_s32(vi, 2) | (vgetq_lane_s32(vi, 3) << 4);
  690. }
  691. }
  692. #else
  693. // scalar
  694. quantize_row_q4_1_reference(x, vy, k);
  695. #endif
  696. }
  697. static void dequantize_row_q4_0(const void * restrict vx, float * restrict y, int k) {
  698. assert(k % QK == 0);
  699. const int nb = k / QK;
  700. const block_q4_0 * restrict x = vx;
  701. #if defined(__AVX2__)
  702. for (int i = 0; i < nb; i++) {
  703. // scale factor
  704. const __m256 d_v = _mm256_broadcast_ss(&x[i].d);
  705. const uint8_t * restrict pp = x[i].qs;
  706. for (int l = 0; l < QK; l += 32) {
  707. // Load 32x4-bit integers into 32x8-bit integers
  708. __m256i vx8 = bytesFromNibbles(pp+l/2);
  709. // Subtract 8 from the integers
  710. vx8 = _mm256_sub_epi8(vx8, _mm256_set1_epi8(8));
  711. // Convert to 16-bit int
  712. const __m256i vx16_lo = _mm256_cvtepi8_epi16(_mm256_extracti128_si256(vx8, 0));
  713. const __m256i vx16_hi = _mm256_cvtepi8_epi16(_mm256_extracti128_si256(vx8, 1));
  714. // Convert to 32-bit int -> float 32
  715. const __m256 vf[4] = {
  716. _mm256_cvtepi32_ps(_mm256_cvtepi16_epi32(_mm256_extracti128_si256(vx16_lo, 0))),
  717. _mm256_cvtepi32_ps(_mm256_cvtepi16_epi32(_mm256_extracti128_si256(vx16_lo, 1))),
  718. _mm256_cvtepi32_ps(_mm256_cvtepi16_epi32(_mm256_extracti128_si256(vx16_hi, 0))),
  719. _mm256_cvtepi32_ps(_mm256_cvtepi16_epi32(_mm256_extracti128_si256(vx16_hi, 1)))
  720. };
  721. // Scale and store
  722. for (int j = 0; j < 4; j++) {
  723. const __m256 result = _mm256_mul_ps(vf[j], d_v);
  724. _mm256_storeu_ps(y + i * QK + l + j*8, result);
  725. }
  726. }
  727. }
  728. #elif defined(__ARM_NEON)
  729. for (int i = 0; i < nb; i++) {
  730. const float32x4_t vd = vdupq_n_f32(x[i].d);
  731. const uint8_t * restrict pp = x[i].qs;
  732. for (int l = 0; l < QK; l += 16) {
  733. // Load 16x4-bit integers into 8x8-bit integers
  734. const uint8x8_t v8 = vld1_u8(pp + l/2);
  735. // Expand 4-bit qs to 8-bit bytes
  736. const uint8x8_t v0 = vand_u8(v8, vdup_n_u8(0x0f));
  737. const uint8x8_t v1 = vshr_n_u8(v8, 4);
  738. // Convert to signed 8-bit integers
  739. const int8x8_t vs_0 = vreinterpret_s8_u8(v0);
  740. const int8x8_t vs_1 = vreinterpret_s8_u8(v1);
  741. // Subtract 8 from each byte
  742. const int8x8_t vb_0 = vsub_s8(vs_0, vdup_n_s8(8));
  743. const int8x8_t vb_1 = vsub_s8(vs_1, vdup_n_s8(8));
  744. // Interleave and combine
  745. const int8x8_t vx_0 = vzip1_s8(vb_0, vb_1);
  746. const int8x8_t vx_1 = vzip2_s8(vb_0, vb_1);
  747. const int8x16_t vq = vcombine_s8(vx_0, vx_1);
  748. // convert to 2x int16x8_t
  749. const int16x8_t vi_0 = vmovl_s8(vget_low_s8 (vq));
  750. const int16x8_t vi_1 = vmovl_s8(vget_high_s8(vq));
  751. // convert to 4x float32x4_t
  752. const float32x4_t vf_0 = vcvtq_f32_s32(vmovl_s16(vget_low_s16 (vi_0)));
  753. const float32x4_t vf_1 = vcvtq_f32_s32(vmovl_s16(vget_high_s16(vi_0)));
  754. const float32x4_t vf_2 = vcvtq_f32_s32(vmovl_s16(vget_low_s16 (vi_1)));
  755. const float32x4_t vf_3 = vcvtq_f32_s32(vmovl_s16(vget_high_s16(vi_1)));
  756. // Multiply by d
  757. const float32x4_t r0 = vmulq_f32(vf_0, vd);
  758. const float32x4_t r1 = vmulq_f32(vf_1, vd);
  759. const float32x4_t r2 = vmulq_f32(vf_2, vd);
  760. const float32x4_t r3 = vmulq_f32(vf_3, vd);
  761. // Store
  762. vst1q_f32(y + i*QK + l + 0, r0);
  763. vst1q_f32(y + i*QK + l + 4, r1);
  764. vst1q_f32(y + i*QK + l + 8, r2);
  765. vst1q_f32(y + i*QK + l + 12, r3);
  766. }
  767. }
  768. #else
  769. // scalar
  770. for (int i = 0; i < nb; i++) {
  771. const float d = x[i].d;
  772. const uint8_t * restrict pp = x[i].qs;
  773. for (int l = 0; l < QK; l += 2) {
  774. const uint8_t vi = pp[l/2];
  775. const int8_t vi0 = vi & 0xf;
  776. const int8_t vi1 = vi >> 4;
  777. const float v0 = (vi0 - 8)*d;
  778. const float v1 = (vi1 - 8)*d;
  779. //printf("d = %f, vi = %d, vi0 = %d, vi1 = %d, v0 = %f, v1 = %f\n", d, vi, vi0, vi1, v0, v1);
  780. y[i*QK + l + 0] = v0;
  781. y[i*QK + l + 1] = v1;
  782. assert(!isnan(y[i*QK + l + 0]));
  783. assert(!isnan(y[i*QK + l + 1]));
  784. }
  785. }
  786. #endif
  787. }
  788. static void dequantize_row_q4_1(const void * restrict vx, float * restrict y, int k) {
  789. assert(k % QK == 0);
  790. const int nb = k / QK;
  791. const block_q4_1 * restrict x = vx;
  792. #if defined(__AVX2__)
  793. for (int i = 0; i < nb; i++) {
  794. const __m256 d_v = _mm256_broadcast_ss(&x[i].d);
  795. const __m256 d_m = _mm256_broadcast_ss(&x[i].m);
  796. const uint8_t * restrict pp = x[i].qs;
  797. for (int l = 0; l < QK; l += 32) {
  798. // Load 32x4-bit integers into 32x8-bit integers
  799. __m256i vx8 = bytesFromNibbles(pp+l/2);
  800. // Convert to 16-bit int
  801. const __m256i vx16_lo = _mm256_cvtepi8_epi16(_mm256_extracti128_si256(vx8, 0));
  802. const __m256i vx16_hi = _mm256_cvtepi8_epi16(_mm256_extracti128_si256(vx8, 1));
  803. // Convert to 32-bit int -> float 32
  804. const __m256 vf[4] = {
  805. _mm256_cvtepi32_ps(_mm256_cvtepi16_epi32(_mm256_extracti128_si256(vx16_lo, 0))),
  806. _mm256_cvtepi32_ps(_mm256_cvtepi16_epi32(_mm256_extracti128_si256(vx16_lo, 1))),
  807. _mm256_cvtepi32_ps(_mm256_cvtepi16_epi32(_mm256_extracti128_si256(vx16_hi, 0))),
  808. _mm256_cvtepi32_ps(_mm256_cvtepi16_epi32(_mm256_extracti128_si256(vx16_hi, 1)))
  809. };
  810. // Scale, add m and store
  811. for (int j = 0; j < 4; j++) {
  812. const __m256 result = _mm256_add_ps(_mm256_mul_ps(vf[j], d_v), d_m);
  813. _mm256_storeu_ps(y + i * QK + l + j*8, result);
  814. }
  815. }
  816. }
  817. #elif defined(__ARM_NEON)
  818. for (int i = 0; i < nb; i++) {
  819. const float32x4_t vd = vdupq_n_f32(x[i].d);
  820. const float32x4_t vm = vdupq_n_f32(x[i].m);
  821. const uint8_t * restrict pp = x[i].qs;
  822. for (int l = 0; l < QK; l += 16) {
  823. // Load 16x4-bit integers into 8x8-bit integers
  824. const uint8x8_t v8 = vld1_u8(pp + l/2);
  825. // Expand 4-bit qs to 8-bit bytes
  826. const uint8x8_t v0 = vand_u8(v8, vdup_n_u8(0x0f));
  827. const uint8x8_t v1 = vshr_n_u8(v8, 4);
  828. // Interleave and combine
  829. const uint8x8_t vx_0 = vzip1_u8(v0, v1);
  830. const uint8x8_t vx_1 = vzip2_u8(v0, v1);
  831. const uint8x16_t vq = vcombine_u8(vx_0, vx_1);
  832. // convert to 2x uint16x8_t
  833. const uint16x8_t vi_0 = vmovl_u8(vget_low_u8 (vq));
  834. const uint16x8_t vi_1 = vmovl_u8(vget_high_u8(vq));
  835. // convert to 4x float32x4_t
  836. const float32x4_t vf_0 = vcvtq_f32_u32(vmovl_u16(vget_low_u16 (vi_0)));
  837. const float32x4_t vf_1 = vcvtq_f32_u32(vmovl_u16(vget_high_u16(vi_0)));
  838. const float32x4_t vf_2 = vcvtq_f32_u32(vmovl_u16(vget_low_u16 (vi_1)));
  839. const float32x4_t vf_3 = vcvtq_f32_u32(vmovl_u16(vget_high_u16(vi_1)));
  840. // multiply by d and add m
  841. const float32x4_t r0 = vmlaq_f32(vm, vf_0, vd);
  842. const float32x4_t r1 = vmlaq_f32(vm, vf_1, vd);
  843. const float32x4_t r2 = vmlaq_f32(vm, vf_2, vd);
  844. const float32x4_t r3 = vmlaq_f32(vm, vf_3, vd);
  845. // Store
  846. vst1q_f32(y + i*QK + l + 0, r0);
  847. vst1q_f32(y + i*QK + l + 4, r1);
  848. vst1q_f32(y + i*QK + l + 8, r2);
  849. vst1q_f32(y + i*QK + l + 12, r3);
  850. }
  851. }
  852. #else
  853. for (int i = 0; i < nb; i++) {
  854. const float d = x[i].d;
  855. const float m = x[i].m;
  856. const uint8_t * restrict pp = x[i].qs;
  857. for (int l = 0; l < QK; l += 2) {
  858. const uint8_t vi = pp[l/2];
  859. const int8_t vi0 = vi & 0xf;
  860. const int8_t vi1 = vi >> 4;
  861. const float v0 = vi0*d + m;
  862. const float v1 = vi1*d + m;
  863. y[i*QK + l + 0] = v0;
  864. y[i*QK + l + 1] = v1;
  865. assert(!isnan(y[i*QK + l + 0]));
  866. assert(!isnan(y[i*QK + l + 1]));
  867. }
  868. }
  869. #endif
  870. }
  871. //
  872. // simd mappings
  873. //
  874. // we define a common set of C macros which map to specific intrinsics based on the current architecture
  875. // we then implement the fundamental computation operations below using only these macros
  876. // adding support for new architectures requires to define the corresponding SIMD macros
  877. //
  878. // GGML_F32_STEP / GGML_F16_STEP
  879. // number of elements to process in a single step
  880. //
  881. // GGML_F32_EPR / GGML_F16_EPR
  882. // number of elements to fit in a single register
  883. //
  884. #if defined(__ARM_NEON) && defined(__ARM_FEATURE_FMA)
  885. #define GGML_SIMD
  886. // F32 NEON
  887. #define GGML_F32_STEP 16
  888. #define GGML_F32_EPR 4
  889. #define GGML_F32x4 float32x4_t
  890. #define GGML_F32x4_ZERO vdupq_n_f32(0.0f)
  891. #define GGML_F32x4_SET1(x) vdupq_n_f32(x)
  892. #define GGML_F32x4_LOAD vld1q_f32
  893. #define GGML_F32x4_STORE vst1q_f32
  894. #define GGML_F32x4_FMA(a, b, c) vfmaq_f32(a, b, c)
  895. #define GGML_F32x4_ADD vaddq_f32
  896. #define GGML_F32x4_MUL vmulq_f32
  897. #if defined(__ARM_FEATURE_QRDMX)
  898. #define GGML_F32x4_REDUCE_ONE(x) vaddvq_f32(x)
  899. #else
  900. #define GGML_F32x4_REDUCE_ONE(x) \
  901. (vgetq_lane_f32(x, 0) + \
  902. vgetq_lane_f32(x, 1) + \
  903. vgetq_lane_f32(x, 2) + \
  904. vgetq_lane_f32(x, 3))
  905. #endif
  906. #define GGML_F32x4_REDUCE(res, x) \
  907. { \
  908. for (int i = 0; i < GGML_F32_ARR/2; ++i) { \
  909. x[2*i] = vaddq_f32(x[2*i], x[2*i+1]); \
  910. } \
  911. for (int i = 0; i < GGML_F32_ARR/4; ++i) { \
  912. x[4*i] = vaddq_f32(x[4*i], x[4*i+2]); \
  913. } \
  914. for (int i = 0; i < GGML_F32_ARR/8; ++i) { \
  915. x[8*i] = vaddq_f32(x[8*i], x[8*i+4]); \
  916. } \
  917. res = GGML_F32x4_REDUCE_ONE(x[0]); \
  918. }
  919. #define GGML_F32_VEC GGML_F32x4
  920. #define GGML_F32_VEC_ZERO GGML_F32x4_ZERO
  921. #define GGML_F32_VEC_SET1 GGML_F32x4_SET1
  922. #define GGML_F32_VEC_LOAD GGML_F32x4_LOAD
  923. #define GGML_F32_VEC_STORE GGML_F32x4_STORE
  924. #define GGML_F32_VEC_FMA GGML_F32x4_FMA
  925. #define GGML_F32_VEC_ADD GGML_F32x4_ADD
  926. #define GGML_F32_VEC_MUL GGML_F32x4_MUL
  927. #define GGML_F32_VEC_REDUCE GGML_F32x4_REDUCE
  928. // F16 NEON
  929. #if defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC)
  930. #define GGML_F16_STEP 32
  931. #define GGML_F16_EPR 8
  932. #define GGML_F16x8 float16x8_t
  933. #define GGML_F16x8_ZERO vdupq_n_f16(0.0f)
  934. #define GGML_F16x8_SET1(x) vdupq_n_f16(x)
  935. #define GGML_F16x8_LOAD vld1q_f16
  936. #define GGML_F16x8_STORE vst1q_f16
  937. #define GGML_F16x8_FMA(a, b, c) vfmaq_f16(a, b, c)
  938. #define GGML_F16x8_ADD vaddq_f16
  939. #define GGML_F16x8_MUL vmulq_f16
  940. #define GGML_F16x8_REDUCE(res, x) \
  941. { \
  942. for (int i = 0; i < GGML_F16_ARR/2; ++i) { \
  943. x[2*i] = vaddq_f16(x[2*i], x[2*i+1]); \
  944. } \
  945. for (int i = 0; i < GGML_F16_ARR/4; ++i) { \
  946. x[4*i] = vaddq_f16(x[4*i], x[4*i+2]); \
  947. } \
  948. for (int i = 0; i < GGML_F16_ARR/8; ++i) { \
  949. x[8*i] = vaddq_f16(x[8*i], x[8*i+4]); \
  950. } \
  951. const float32x4_t t0 = vcvt_f32_f16(vget_low_f16 (x[0])); \
  952. const float32x4_t t1 = vcvt_f32_f16(vget_high_f16(x[0])); \
  953. res = (ggml_float) vaddvq_f32(vaddq_f32(t0, t1)); \
  954. }
  955. #define GGML_F16_VEC GGML_F16x8
  956. #define GGML_F16_VEC_ZERO GGML_F16x8_ZERO
  957. #define GGML_F16_VEC_SET1 GGML_F16x8_SET1
  958. #define GGML_F16_VEC_LOAD(p, i) GGML_F16x8_LOAD(p)
  959. #define GGML_F16_VEC_STORE(p, r, i) GGML_F16x8_STORE(p, r[i])
  960. #define GGML_F16_VEC_FMA GGML_F16x8_FMA
  961. #define GGML_F16_VEC_ADD GGML_F16x8_ADD
  962. #define GGML_F16_VEC_MUL GGML_F16x8_MUL
  963. #define GGML_F16_VEC_REDUCE GGML_F16x8_REDUCE
  964. #else
  965. // if FP16 vector arithmetic is not supported, we use FP32 instead
  966. // and take advantage of the vcvt_ functions to convert to/from FP16
  967. #define GGML_F16_STEP 16
  968. #define GGML_F16_EPR 4
  969. #define GGML_F32Cx4 float32x4_t
  970. #define GGML_F32Cx4_ZERO vdupq_n_f32(0.0f)
  971. #define GGML_F32Cx4_SET1(x) vdupq_n_f32(x)
  972. #define GGML_F32Cx4_LOAD(x) vcvt_f32_f16(vld1_f16(x))
  973. #define GGML_F32Cx4_STORE(x, y) vst1_f16(x, vcvt_f16_f32(y))
  974. #define GGML_F32Cx4_FMA(a, b, c) vfmaq_f32(a, b, c)
  975. #define GGML_F32Cx4_ADD vaddq_f32
  976. #define GGML_F32Cx4_MUL vmulq_f32
  977. #define GGML_F32Cx4_REDUCE GGML_F32x4_REDUCE
  978. #define GGML_F16_VEC GGML_F32Cx4
  979. #define GGML_F16_VEC_ZERO GGML_F32Cx4_ZERO
  980. #define GGML_F16_VEC_SET1 GGML_F32Cx4_SET1
  981. #define GGML_F16_VEC_LOAD(p, i) GGML_F32Cx4_LOAD(p)
  982. #define GGML_F16_VEC_STORE(p, r, i) GGML_F32Cx4_STORE(p, r[i])
  983. #define GGML_F16_VEC_FMA GGML_F32Cx4_FMA
  984. #define GGML_F16_VEC_ADD GGML_F32Cx4_ADD
  985. #define GGML_F16_VEC_MUL GGML_F32Cx4_MUL
  986. #define GGML_F16_VEC_REDUCE GGML_F32Cx4_REDUCE
  987. #endif
  988. #elif defined(__AVX__)
  989. #define GGML_SIMD
  990. // F32 AVX
  991. #define GGML_F32_STEP 32
  992. #define GGML_F32_EPR 8
  993. #define GGML_F32x8 __m256
  994. #define GGML_F32x8_ZERO _mm256_setzero_ps()
  995. #define GGML_F32x8_SET1(x) _mm256_set1_ps(x)
  996. #define GGML_F32x8_LOAD _mm256_loadu_ps
  997. #define GGML_F32x8_STORE _mm256_storeu_ps
  998. #if defined(__FMA__)
  999. #define GGML_F32x8_FMA(a, b, c) _mm256_fmadd_ps(b, c, a)
  1000. #else
  1001. #define GGML_F32x8_FMA(a, b, c) _mm256_add_ps(_mm256_mul_ps(b, c), a)
  1002. #endif
  1003. #define GGML_F32x8_ADD _mm256_add_ps
  1004. #define GGML_F32x8_MUL _mm256_mul_ps
  1005. #define GGML_F32x8_REDUCE(res, x) \
  1006. { \
  1007. for (int i = 0; i < GGML_F32_ARR/2; ++i) { \
  1008. x[2*i] = _mm256_add_ps(x[2*i], x[2*i+1]); \
  1009. } \
  1010. for (int i = 0; i < GGML_F32_ARR/4; ++i) { \
  1011. x[4*i] = _mm256_add_ps(x[4*i], x[4*i+2]); \
  1012. } \
  1013. for (int i = 0; i < GGML_F32_ARR/8; ++i) { \
  1014. x[8*i] = _mm256_add_ps(x[8*i], x[8*i+4]); \
  1015. } \
  1016. const __m128 t0 = _mm_add_ps(_mm256_castps256_ps128(x[0]), \
  1017. _mm256_extractf128_ps(x[0], 1)); \
  1018. const __m128 t1 = _mm_hadd_ps(t0, t0); \
  1019. res = _mm_cvtss_f32(_mm_hadd_ps(t1, t1)); \
  1020. }
  1021. // TODO: is this optimal ?
  1022. #define GGML_F32_VEC GGML_F32x8
  1023. #define GGML_F32_VEC_ZERO GGML_F32x8_ZERO
  1024. #define GGML_F32_VEC_SET1 GGML_F32x8_SET1
  1025. #define GGML_F32_VEC_LOAD GGML_F32x8_LOAD
  1026. #define GGML_F32_VEC_STORE GGML_F32x8_STORE
  1027. #define GGML_F32_VEC_FMA GGML_F32x8_FMA
  1028. #define GGML_F32_VEC_ADD GGML_F32x8_ADD
  1029. #define GGML_F32_VEC_MUL GGML_F32x8_MUL
  1030. #define GGML_F32_VEC_REDUCE GGML_F32x8_REDUCE
  1031. // F16 AVX
  1032. #define GGML_F16_STEP 32
  1033. #define GGML_F16_EPR 8
  1034. // F16 arithmetic is not supported by AVX, so we use F32 instead
  1035. #define GGML_F32Cx8 __m256
  1036. #define GGML_F32Cx8_ZERO _mm256_setzero_ps()
  1037. #define GGML_F32Cx8_SET1(x) _mm256_set1_ps(x)
  1038. #if defined(__F16C__)
  1039. // the _mm256_cvt intrinsics require F16C
  1040. #define GGML_F32Cx8_LOAD(x) _mm256_cvtph_ps(_mm_loadu_si128((__m128i *)(x)))
  1041. #define GGML_F32Cx8_STORE(x, y) _mm_storeu_si128((__m128i *)(x), _mm256_cvtps_ph(y, 0))
  1042. #else
  1043. static inline __m256 __avx_f32cx8_load(ggml_fp16_t *x) {
  1044. float tmp[8];
  1045. for (int i = 0; i < 8; i++)
  1046. tmp[i] = GGML_FP16_TO_FP32(x[i]);
  1047. return _mm256_loadu_ps(tmp);
  1048. }
  1049. static inline void __avx_f32cx8_store(ggml_fp16_t *x, __m256 y) {
  1050. float arr[8];
  1051. _mm256_storeu_ps(arr, y);
  1052. for (int i = 0; i < 8; i++)
  1053. x[i] = GGML_FP32_TO_FP16(arr[i]);
  1054. }
  1055. #define GGML_F32Cx8_LOAD(x) __avx_f32cx8_load(x)
  1056. #define GGML_F32Cx8_STORE(x, y) __avx_f32cx8_store(x, y)
  1057. #endif
  1058. #define GGML_F32Cx8_FMA GGML_F32x8_FMA
  1059. #define GGML_F32Cx8_ADD _mm256_add_ps
  1060. #define GGML_F32Cx8_MUL _mm256_mul_ps
  1061. #define GGML_F32Cx8_REDUCE GGML_F32x8_REDUCE
  1062. #define GGML_F16_VEC GGML_F32Cx8
  1063. #define GGML_F16_VEC_ZERO GGML_F32Cx8_ZERO
  1064. #define GGML_F16_VEC_SET1 GGML_F32Cx8_SET1
  1065. #define GGML_F16_VEC_LOAD(p, i) GGML_F32Cx8_LOAD(p)
  1066. #define GGML_F16_VEC_STORE(p, r, i) GGML_F32Cx8_STORE(p, r[i])
  1067. #define GGML_F16_VEC_FMA GGML_F32Cx8_FMA
  1068. #define GGML_F16_VEC_ADD GGML_F32Cx8_ADD
  1069. #define GGML_F16_VEC_MUL GGML_F32Cx8_MUL
  1070. #define GGML_F16_VEC_REDUCE GGML_F32Cx8_REDUCE
  1071. #elif defined(__POWER9_VECTOR__)
  1072. #define GGML_SIMD
  1073. // F32 POWER9
  1074. #define GGML_F32_STEP 32
  1075. #define GGML_F32_EPR 4
  1076. #define GGML_F32x4 vector float
  1077. #define GGML_F32x4_ZERO 0.0f
  1078. #define GGML_F32x4_SET1 vec_splats
  1079. #define GGML_F32x4_LOAD(p) vec_xl(0, p)
  1080. #define GGML_F32x4_STORE(p, r) vec_xst(r, 0, p)
  1081. #define GGML_F32x4_FMA(a, b, c) vec_madd(b, c, a)
  1082. #define GGML_F32x4_ADD vec_add
  1083. #define GGML_F32x4_MUL vec_mul
  1084. #define GGML_F32x4_REDUCE(res, x) \
  1085. { \
  1086. for (int i = 0; i < GGML_F32_ARR/2; ++i) { \
  1087. x[2*i] = vec_add(x[2*i], x[2*i+1]); \
  1088. } \
  1089. for (int i = 0; i < GGML_F32_ARR/4; ++i) { \
  1090. x[4*i] = vec_add(x[4*i], x[4*i+2]); \
  1091. } \
  1092. for (int i = 0; i < GGML_F32_ARR/8; ++i) { \
  1093. x[8*i] = vec_add(x[8*i], x[8*i+4]); \
  1094. } \
  1095. res = vec_extract(x[0], 0) + \
  1096. vec_extract(x[0], 1) + \
  1097. vec_extract(x[0], 2) + \
  1098. vec_extract(x[0], 3); \
  1099. }
  1100. #define GGML_F32_VEC GGML_F32x4
  1101. #define GGML_F32_VEC_ZERO GGML_F32x4_ZERO
  1102. #define GGML_F32_VEC_SET1 GGML_F32x4_SET1
  1103. #define GGML_F32_VEC_LOAD GGML_F32x4_LOAD
  1104. #define GGML_F32_VEC_STORE GGML_F32x4_STORE
  1105. #define GGML_F32_VEC_FMA GGML_F32x4_FMA
  1106. #define GGML_F32_VEC_ADD GGML_F32x4_ADD
  1107. #define GGML_F32_VEC_MUL GGML_F32x4_MUL
  1108. #define GGML_F32_VEC_REDUCE GGML_F32x4_REDUCE
  1109. // F16 POWER9
  1110. #define GGML_F16_STEP GGML_F32_STEP
  1111. #define GGML_F16_EPR GGML_F32_EPR
  1112. #define GGML_F16_VEC GGML_F32x4
  1113. #define GGML_F16_VEC_ZERO GGML_F32x4_ZERO
  1114. #define GGML_F16_VEC_SET1 GGML_F32x4_SET1
  1115. #define GGML_F16_VEC_FMA GGML_F32x4_FMA
  1116. #define GGML_F16_VEC_REDUCE GGML_F32x4_REDUCE
  1117. // Use vec_xl, not vec_ld, in case the load address is not aligned.
  1118. #define GGML_F16_VEC_LOAD(p, i) (i & 0x1) ? \
  1119. vec_extract_fp32_from_shorth(vec_xl(0, p - GGML_F16_EPR)) : \
  1120. vec_extract_fp32_from_shortl(vec_xl(0, p))
  1121. #define GGML_ENDIAN_BYTE(i) ((unsigned char *)&(uint16_t){1})[i]
  1122. #define GGML_F16_VEC_STORE(p, r, i) \
  1123. if (i & 0x1) \
  1124. vec_xst(vec_pack_to_short_fp32(r[i - GGML_ENDIAN_BYTE(1)], \
  1125. r[i - GGML_ENDIAN_BYTE(0)]), \
  1126. 0, p - GGML_F16_EPR)
  1127. #elif defined(__wasm_simd128__)
  1128. #define GGML_SIMD
  1129. // F32 WASM
  1130. #define GGML_F32_STEP 16
  1131. #define GGML_F32_EPR 4
  1132. #define GGML_F32x4 v128_t
  1133. #define GGML_F32x4_ZERO wasm_f32x4_splat(0.0f)
  1134. #define GGML_F32x4_SET1(x) wasm_f32x4_splat(x)
  1135. #define GGML_F32x4_LOAD wasm_v128_load
  1136. #define GGML_F32x4_STORE wasm_v128_store
  1137. #define GGML_F32x4_FMA(a, b, c) wasm_f32x4_add(wasm_f32x4_mul(b, c), a)
  1138. #define GGML_F32x4_ADD wasm_f32x4_add
  1139. #define GGML_F32x4_MUL wasm_f32x4_mul
  1140. #define GGML_F32x4_REDUCE(res, x) \
  1141. { \
  1142. for (int i = 0; i < GGML_F32_ARR/2; ++i) { \
  1143. x[2*i] = wasm_f32x4_add(x[2*i], x[2*i+1]); \
  1144. } \
  1145. for (int i = 0; i < GGML_F32_ARR/4; ++i) { \
  1146. x[4*i] = wasm_f32x4_add(x[4*i], x[4*i+2]); \
  1147. } \
  1148. for (int i = 0; i < GGML_F32_ARR/8; ++i) { \
  1149. x[8*i] = wasm_f32x4_add(x[8*i], x[8*i+4]); \
  1150. } \
  1151. res = wasm_f32x4_extract_lane(x[0], 0) + \
  1152. wasm_f32x4_extract_lane(x[0], 1) + \
  1153. wasm_f32x4_extract_lane(x[0], 2) + \
  1154. wasm_f32x4_extract_lane(x[0], 3); \
  1155. }
  1156. #define GGML_F32_VEC GGML_F32x4
  1157. #define GGML_F32_VEC_ZERO GGML_F32x4_ZERO
  1158. #define GGML_F32_VEC_SET1 GGML_F32x4_SET1
  1159. #define GGML_F32_VEC_LOAD GGML_F32x4_LOAD
  1160. #define GGML_F32_VEC_STORE GGML_F32x4_STORE
  1161. #define GGML_F32_VEC_FMA GGML_F32x4_FMA
  1162. #define GGML_F32_VEC_ADD GGML_F32x4_ADD
  1163. #define GGML_F32_VEC_MUL GGML_F32x4_MUL
  1164. #define GGML_F32_VEC_REDUCE GGML_F32x4_REDUCE
  1165. // F16 WASM
  1166. #define GGML_F16_STEP 16
  1167. #define GGML_F16_EPR 4
  1168. inline static v128_t __wasm_f16x4_load(const ggml_fp16_t * p) {
  1169. float tmp[4];
  1170. tmp[0] = GGML_FP16_TO_FP32(p[0]);
  1171. tmp[1] = GGML_FP16_TO_FP32(p[1]);
  1172. tmp[2] = GGML_FP16_TO_FP32(p[2]);
  1173. tmp[3] = GGML_FP16_TO_FP32(p[3]);
  1174. return wasm_v128_load(tmp);
  1175. }
  1176. inline static void __wasm_f16x4_store(ggml_fp16_t * p, v128_t x) {
  1177. float tmp[4];
  1178. wasm_v128_store(tmp, x);
  1179. p[0] = GGML_FP32_TO_FP16(tmp[0]);
  1180. p[1] = GGML_FP32_TO_FP16(tmp[1]);
  1181. p[2] = GGML_FP32_TO_FP16(tmp[2]);
  1182. p[3] = GGML_FP32_TO_FP16(tmp[3]);
  1183. }
  1184. #define GGML_F16x4 v128_t
  1185. #define GGML_F16x4_ZERO wasm_f32x4_splat(0.0f)
  1186. #define GGML_F16x4_SET1(x) wasm_f32x4_splat(x)
  1187. #define GGML_F16x4_LOAD(x) __wasm_f16x4_load(x)
  1188. #define GGML_F16x4_STORE(x, y) __wasm_f16x4_store(x, y)
  1189. #define GGML_F16x4_FMA GGML_F32x4_FMA
  1190. #define GGML_F16x4_ADD wasm_f32x4_add
  1191. #define GGML_F16x4_MUL wasm_f32x4_mul
  1192. #define GGML_F16x4_REDUCE(res, x) \
  1193. { \
  1194. for (int i = 0; i < GGML_F16_ARR/2; ++i) { \
  1195. x[2*i] = wasm_f32x4_add(x[2*i], x[2*i+1]); \
  1196. } \
  1197. for (int i = 0; i < GGML_F16_ARR/4; ++i) { \
  1198. x[4*i] = wasm_f32x4_add(x[4*i], x[4*i+2]); \
  1199. } \
  1200. for (int i = 0; i < GGML_F16_ARR/8; ++i) { \
  1201. x[8*i] = wasm_f32x4_add(x[8*i], x[8*i+4]); \
  1202. } \
  1203. res = wasm_f32x4_extract_lane(x[0], 0) + \
  1204. wasm_f32x4_extract_lane(x[0], 1) + \
  1205. wasm_f32x4_extract_lane(x[0], 2) + \
  1206. wasm_f32x4_extract_lane(x[0], 3); \
  1207. }
  1208. #define GGML_F16_VEC GGML_F16x4
  1209. #define GGML_F16_VEC_ZERO GGML_F16x4_ZERO
  1210. #define GGML_F16_VEC_SET1 GGML_F16x4_SET1
  1211. #define GGML_F16_VEC_LOAD(p, i) GGML_F16x4_LOAD(p)
  1212. #define GGML_F16_VEC_STORE(p, r, i) GGML_F16x4_STORE(p, r[i])
  1213. #define GGML_F16_VEC_FMA GGML_F16x4_FMA
  1214. #define GGML_F16_VEC_ADD GGML_F16x4_ADD
  1215. #define GGML_F16_VEC_MUL GGML_F16x4_MUL
  1216. #define GGML_F16_VEC_REDUCE GGML_F16x4_REDUCE
  1217. #elif defined(__SSE3__)
  1218. #define GGML_SIMD
  1219. // F32 SSE
  1220. #define GGML_F32_STEP 32
  1221. #define GGML_F32_EPR 4
  1222. #define GGML_F32x4 __m128
  1223. #define GGML_F32x4_ZERO _mm_setzero_ps()
  1224. #define GGML_F32x4_SET1(x) _mm_set1_ps(x)
  1225. #define GGML_F32x4_LOAD _mm_loadu_ps
  1226. #define GGML_F32x4_STORE _mm_storeu_ps
  1227. #if defined(__FMA__)
  1228. // TODO: Does this work?
  1229. #define GGML_F32x4_FMA(a, b, c) _mm_fmadd_ps(b, c, a)
  1230. #else
  1231. #define GGML_F32x4_FMA(a, b, c) _mm_add_ps(_mm_mul_ps(b, c), a)
  1232. #endif
  1233. #define GGML_F32x4_ADD _mm_add_ps
  1234. #define GGML_F32x4_MUL _mm_mul_ps
  1235. #define GGML_F32x4_REDUCE(res, x) \
  1236. { \
  1237. for (int i = 0; i < GGML_F32_ARR/2; ++i) { \
  1238. x[2*i] = _mm_add_ps(x[2*i], x[2*i+1]); \
  1239. } \
  1240. for (int i = 0; i < GGML_F32_ARR/4; ++i) { \
  1241. x[4*i] = _mm_add_ps(x[4*i], x[4*i+2]); \
  1242. } \
  1243. for (int i = 0; i < GGML_F32_ARR/8; ++i) { \
  1244. x[8*i] = _mm_add_ps(x[8*i], x[8*i+4]); \
  1245. } \
  1246. const __m128 t0 = _mm_hadd_ps(x[0], x[0]); \
  1247. res = _mm_cvtss_f32(_mm_hadd_ps(t0, t0)); \
  1248. }
  1249. // TODO: is this optimal ?
  1250. #define GGML_F32_VEC GGML_F32x4
  1251. #define GGML_F32_VEC_ZERO GGML_F32x4_ZERO
  1252. #define GGML_F32_VEC_SET1 GGML_F32x4_SET1
  1253. #define GGML_F32_VEC_LOAD GGML_F32x4_LOAD
  1254. #define GGML_F32_VEC_STORE GGML_F32x4_STORE
  1255. #define GGML_F32_VEC_FMA GGML_F32x4_FMA
  1256. #define GGML_F32_VEC_ADD GGML_F32x4_ADD
  1257. #define GGML_F32_VEC_MUL GGML_F32x4_MUL
  1258. #define GGML_F32_VEC_REDUCE GGML_F32x4_REDUCE
  1259. // F16 SSE
  1260. #define GGML_F16_STEP 32
  1261. #define GGML_F16_EPR 4
  1262. static inline __m128 __sse_f16x4_load(ggml_fp16_t *x) {
  1263. float tmp[4];
  1264. tmp[0] = GGML_FP16_TO_FP32(x[0]);
  1265. tmp[1] = GGML_FP16_TO_FP32(x[1]);
  1266. tmp[2] = GGML_FP16_TO_FP32(x[2]);
  1267. tmp[3] = GGML_FP16_TO_FP32(x[3]);
  1268. return _mm_loadu_ps(tmp);
  1269. }
  1270. static inline void __sse_f16x4_store(ggml_fp16_t *x, __m128 y) {
  1271. float arr[4];
  1272. _mm_storeu_ps(arr, y);
  1273. x[0] = GGML_FP32_TO_FP16(arr[0]);
  1274. x[1] = GGML_FP32_TO_FP16(arr[1]);
  1275. x[2] = GGML_FP32_TO_FP16(arr[2]);
  1276. x[3] = GGML_FP32_TO_FP16(arr[3]);
  1277. }
  1278. #define GGML_F32Cx4 __m128
  1279. #define GGML_F32Cx4_ZERO _mm_setzero_ps()
  1280. #define GGML_F32Cx4_SET1(x) _mm_set1_ps(x)
  1281. #define GGML_F32Cx4_LOAD(x) __sse_f16x4_load(x)
  1282. #define GGML_F32Cx4_STORE(x, y) __sse_f16x4_store(x, y)
  1283. #define GGML_F32Cx4_FMA GGML_F32x4_FMA
  1284. #define GGML_F32Cx4_ADD _mm_add_ps
  1285. #define GGML_F32Cx4_MUL _mm_mul_ps
  1286. #define GGML_F32Cx4_REDUCE GGML_F32x4_REDUCE
  1287. #define GGML_F16_VEC GGML_F32Cx4
  1288. #define GGML_F16_VEC_ZERO GGML_F32Cx4_ZERO
  1289. #define GGML_F16_VEC_SET1 GGML_F32Cx4_SET1
  1290. #define GGML_F16_VEC_LOAD(p, i) GGML_F32Cx4_LOAD(p)
  1291. #define GGML_F16_VEC_STORE(p, r, i) GGML_F32Cx4_STORE(p, r[i])
  1292. #define GGML_F16_VEC_FMA GGML_F32Cx4_FMA
  1293. #define GGML_F16_VEC_ADD GGML_F32Cx4_ADD
  1294. #define GGML_F16_VEC_MUL GGML_F32Cx4_MUL
  1295. #define GGML_F16_VEC_REDUCE GGML_F32Cx4_REDUCE
  1296. #endif
  1297. // GGML_F32_ARR / GGML_F16_ARR
  1298. // number of registers to use per step
  1299. #ifdef GGML_SIMD
  1300. #define GGML_F32_ARR (GGML_F32_STEP/GGML_F32_EPR)
  1301. #define GGML_F16_ARR (GGML_F16_STEP/GGML_F16_EPR)
  1302. #endif
  1303. //
  1304. // fundamental operations
  1305. //
  1306. inline static void ggml_vec_set_i8(const int n, int8_t * x, const int8_t v) { for (int i = 0; i < n; ++i) x[i] = v; }
  1307. inline static void ggml_vec_set_i16(const int n, int16_t * x, const int16_t v) { for (int i = 0; i < n; ++i) x[i] = v; }
  1308. inline static void ggml_vec_set_i32(const int n, int32_t * x, const int32_t v) { for (int i = 0; i < n; ++i) x[i] = v; }
  1309. inline static void ggml_vec_set_f16(const int n, ggml_fp16_t * x, const int32_t v) { for (int i = 0; i < n; ++i) x[i] = v; }
  1310. inline static void ggml_vec_add_f32 (const int n, float * z, const float * x, const float * y) { for (int i = 0; i < n; ++i) z[i] = x[i] + y[i]; }
  1311. inline static void ggml_vec_acc_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] += x[i]; }
  1312. inline static void ggml_vec_acc1_f32(const int n, float * y, const float v) { for (int i = 0; i < n; ++i) y[i] += v; }
  1313. inline static void ggml_vec_sub_f32 (const int n, float * z, const float * x, const float * y) { for (int i = 0; i < n; ++i) z[i] = x[i] - y[i]; }
  1314. inline static void ggml_vec_set_f32 (const int n, float * x, const float v) { for (int i = 0; i < n; ++i) x[i] = v; }
  1315. inline static void ggml_vec_cpy_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = x[i]; }
  1316. inline static void ggml_vec_neg_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = -x[i]; }
  1317. inline static void ggml_vec_mul_f32 (const int n, float * z, const float * x, const float * y) { for (int i = 0; i < n; ++i) z[i] = x[i]*y[i]; }
  1318. inline static void ggml_vec_div_f32 (const int n, float * z, const float * x, const float * y) { for (int i = 0; i < n; ++i) z[i] = x[i]/y[i]; }
  1319. inline static void ggml_vec_dot_f32(const int n, float * restrict s, const float * restrict x, const float * restrict y) {
  1320. #ifdef GGML_SIMD
  1321. float sumf = 0.0f;
  1322. const int np = (n & ~(GGML_F32_STEP - 1));
  1323. GGML_F32_VEC sum[GGML_F32_ARR] = { GGML_F32_VEC_ZERO };
  1324. GGML_F32_VEC ax[GGML_F32_ARR];
  1325. GGML_F32_VEC ay[GGML_F32_ARR];
  1326. for (int i = 0; i < np; i += GGML_F32_STEP) {
  1327. for (int j = 0; j < GGML_F32_ARR; j++) {
  1328. ax[j] = GGML_F32_VEC_LOAD(x + i + j*GGML_F32_EPR);
  1329. ay[j] = GGML_F32_VEC_LOAD(y + i + j*GGML_F32_EPR);
  1330. sum[j] = GGML_F32_VEC_FMA(sum[j], ax[j], ay[j]);
  1331. }
  1332. }
  1333. // reduce sum0..sum3 to sum0
  1334. GGML_F32_VEC_REDUCE(sumf, sum);
  1335. // leftovers
  1336. for (int i = np; i < n; ++i) {
  1337. sumf += x[i]*y[i];
  1338. }
  1339. #else
  1340. // scalar
  1341. ggml_float sumf = 0.0;
  1342. for (int i = 0; i < n; ++i) {
  1343. sumf += (ggml_float)(x[i]*y[i]);
  1344. }
  1345. #endif
  1346. *s = sumf;
  1347. }
  1348. #if __AVX512F__ && QK == 32
  1349. static inline __m512 dot_q4_0_oneblock_avx512(
  1350. __m512 acc,
  1351. const block_q4_0 * restrict x,
  1352. const block_q4_0 * restrict y,
  1353. int i
  1354. ) {
  1355. // Compute combined scale for the block
  1356. __m512 d = _mm512_set1_ps( x[i].d * y[i].d );
  1357. __m256i bx = bytesFromNibbles( x[i].qs );
  1358. __m256i by = bytesFromNibbles( y[i].qs );
  1359. // Now we have a vector with bytes in [ 0 .. 15 ] interval. Offset them into [ -8 .. +7 ] interval.
  1360. const __m256i off = _mm256_set1_epi8( 8 );
  1361. bx = _mm256_sub_epi8( bx, off );
  1362. by = _mm256_sub_epi8( by, off );
  1363. // Sign-extend 16 signed bytes into int16_t
  1364. __m512i x32 = _mm512_cvtepi8_epi16( bx );
  1365. __m512i y32 = _mm512_cvtepi8_epi16( by );
  1366. // Compute products of int16_t integers, add pairwise
  1367. __m512i i64 = _mm512_madd_epi16( x32, y32 );
  1368. // Convert int32_t to float
  1369. __m512 p = _mm512_cvtepi32_ps( i64 );
  1370. // Apply the scale, and accumulate
  1371. return _mm512_fmadd_ps( d, p, acc );
  1372. }
  1373. #endif
  1374. inline static void ggml_vec_dot_f16(const int n, float * restrict s, ggml_fp16_t * restrict x, ggml_fp16_t * restrict y) {
  1375. ggml_float sumf = 0.0;
  1376. #if defined(GGML_SIMD)
  1377. const int np = (n & ~(GGML_F16_STEP - 1));
  1378. GGML_F16_VEC sum[GGML_F16_ARR] = { GGML_F16_VEC_ZERO };
  1379. GGML_F16_VEC ax[GGML_F16_ARR];
  1380. GGML_F16_VEC ay[GGML_F16_ARR];
  1381. for (int i = 0; i < np; i += GGML_F16_STEP) {
  1382. for (int j = 0; j < GGML_F16_ARR; j++) {
  1383. ax[j] = GGML_F16_VEC_LOAD(x + i + j*GGML_F16_EPR, j);
  1384. ay[j] = GGML_F16_VEC_LOAD(y + i + j*GGML_F16_EPR, j);
  1385. sum[j] = GGML_F16_VEC_FMA(sum[j], ax[j], ay[j]);
  1386. }
  1387. }
  1388. // reduce sum0..sum3 to sum0
  1389. GGML_F16_VEC_REDUCE(sumf, sum);
  1390. // leftovers
  1391. for (int i = np; i < n; ++i) {
  1392. sumf += (ggml_float)(GGML_FP16_TO_FP32(x[i])*GGML_FP16_TO_FP32(y[i]));
  1393. }
  1394. #else
  1395. for (int i = 0; i < n; ++i) {
  1396. sumf += (ggml_float)(GGML_FP16_TO_FP32(x[i])*GGML_FP16_TO_FP32(y[i]));
  1397. }
  1398. #endif
  1399. *s = sumf;
  1400. }
  1401. static void ggml_vec_dot_q4_0(const int n, float * restrict s, const void * restrict vx, const void * restrict vy) {
  1402. const int nb = n / QK;
  1403. assert(n % QK == 0);
  1404. assert(nb % 2 == 0);
  1405. const block_q4_0 * restrict x = vx;
  1406. const block_q4_0 * restrict y = vy;
  1407. ggml_float sumf = 0.0;
  1408. #if defined(__ARM_NEON)
  1409. float sum0 = 0.0f;
  1410. float sum1 = 0.0f;
  1411. for (int i = 0; i < nb; i += 2) {
  1412. const block_q4_0 * restrict x0 = &x[i + 0];
  1413. const block_q4_0 * restrict y0 = &y[i + 0];
  1414. const block_q4_0 * restrict x1 = &x[i + 1];
  1415. const block_q4_0 * restrict y1 = &y[i + 1];
  1416. const uint8x16_t m4b = vdupq_n_u8(0xf);
  1417. const int8x16_t s8b = vdupq_n_s8(0x8);
  1418. const uint8x16_t v0_0 = vld1q_u8(x0->qs);
  1419. const uint8x16_t v1_0 = vld1q_u8(y0->qs);
  1420. const uint8x16_t v0_1 = vld1q_u8(x1->qs);
  1421. const uint8x16_t v1_1 = vld1q_u8(y1->qs);
  1422. // 4-bit -> 8-bit
  1423. const int8x16_t v0_0l = vreinterpretq_s8_u8(vandq_u8(v0_0, m4b));
  1424. const int8x16_t v1_0l = vreinterpretq_s8_u8(vandq_u8(v1_0, m4b));
  1425. const int8x16_t v0_0h = vreinterpretq_s8_u8(vshrq_n_u8(v0_0, 4));
  1426. const int8x16_t v1_0h = vreinterpretq_s8_u8(vshrq_n_u8(v1_0, 4));
  1427. const int8x16_t v0_1l = vreinterpretq_s8_u8(vandq_u8(v0_1, m4b));
  1428. const int8x16_t v1_1l = vreinterpretq_s8_u8(vandq_u8(v1_1, m4b));
  1429. const int8x16_t v0_1h = vreinterpretq_s8_u8(vshrq_n_u8(v0_1, 4));
  1430. const int8x16_t v1_1h = vreinterpretq_s8_u8(vshrq_n_u8(v1_1, 4));
  1431. // sub 8
  1432. const int8x16_t v0_0ls = vsubq_s8(v0_0l, s8b);
  1433. const int8x16_t v1_0ls = vsubq_s8(v1_0l, s8b);
  1434. const int8x16_t v0_0hs = vsubq_s8(v0_0h, s8b);
  1435. const int8x16_t v1_0hs = vsubq_s8(v1_0h, s8b);
  1436. const int8x16_t v0_1ls = vsubq_s8(v0_1l, s8b);
  1437. const int8x16_t v1_1ls = vsubq_s8(v1_1l, s8b);
  1438. const int8x16_t v0_1hs = vsubq_s8(v0_1h, s8b);
  1439. const int8x16_t v1_1hs = vsubq_s8(v1_1h, s8b);
  1440. #if defined(__ARM_FEATURE_DOTPROD)
  1441. // dot product into int16x8_t
  1442. int32x4_t p_0 = vdotq_s32(vdupq_n_s32(0), v0_0ls, v1_0ls);
  1443. int32x4_t p_1 = vdotq_s32(vdupq_n_s32(0), v0_1ls, v1_1ls);
  1444. p_0 = vdotq_s32(p_0, v0_0hs, v1_0hs);
  1445. p_1 = vdotq_s32(p_1, v0_1hs, v1_1hs);
  1446. // scalar
  1447. #if defined(__ARM_FEATURE_QRDMX)
  1448. sum0 += x0->d * y0->d * vaddvq_s32(p_0);
  1449. sum1 += x1->d * y1->d * vaddvq_s32(p_1);
  1450. #else
  1451. sum0 += x0->d * y0->d * (vgetq_lane_s32(p_0, 0) + vgetq_lane_s32(p_0, 1) + vgetq_lane_s32(p_0, 2) + vgetq_lane_s32(p_0, 3));
  1452. sum1 += x1->d * y1->d * (vgetq_lane_s32(p_1, 0) + vgetq_lane_s32(p_1, 1) + vgetq_lane_s32(p_1, 2) + vgetq_lane_s32(p_1, 3));
  1453. #endif
  1454. #else
  1455. const int16x8_t pl0l = vmull_s8(vget_low_s8 (v0_0ls), vget_low_s8 (v1_0ls));
  1456. const int16x8_t pl0h = vmull_s8(vget_high_s8(v0_0ls), vget_high_s8(v1_0ls));
  1457. const int16x8_t ph0l = vmull_s8(vget_low_s8 (v0_0hs), vget_low_s8 (v1_0hs));
  1458. const int16x8_t ph0h = vmull_s8(vget_high_s8(v0_0hs), vget_high_s8(v1_0hs));
  1459. const int16x8_t pl1l = vmull_s8(vget_low_s8 (v0_1ls), vget_low_s8 (v1_1ls));
  1460. const int16x8_t pl1h = vmull_s8(vget_high_s8(v0_1ls), vget_high_s8(v1_1ls));
  1461. const int16x8_t ph1l = vmull_s8(vget_low_s8 (v0_1hs), vget_low_s8 (v1_1hs));
  1462. const int16x8_t ph1h = vmull_s8(vget_high_s8(v0_1hs), vget_high_s8(v1_1hs));
  1463. const int16x8_t pl_0 = vaddq_s16(pl0l, pl0h);
  1464. const int16x8_t ph_0 = vaddq_s16(ph0l, ph0h);
  1465. const int16x8_t pl_1 = vaddq_s16(pl1l, pl1h);
  1466. const int16x8_t ph_1 = vaddq_s16(ph1l, ph1h);
  1467. const int16x8_t p_0 = vaddq_s16(pl_0, ph_0);
  1468. const int16x8_t p_1 = vaddq_s16(pl_1, ph_1);
  1469. // scalar
  1470. #if defined(__ARM_FEATURE_QRDMX)
  1471. sum0 += x0->d * y0->d * vaddvq_s16(p_0);
  1472. sum1 += x1->d * y1->d * vaddvq_s16(p_1);
  1473. #else
  1474. sum0 += x0->d * y0->d * (vgetq_lane_s16(p_0, 0) + vgetq_lane_s16(p_0, 1) + vgetq_lane_s16(p_0, 2) + vgetq_lane_s16(p_0, 3) + vgetq_lane_s16(p_0, 4) + vgetq_lane_s16(p_0, 5) + vgetq_lane_s16(p_0, 6) + vgetq_lane_s16(p_0, 7));
  1475. sum1 += x1->d * y1->d * (vgetq_lane_s16(p_1, 0) + vgetq_lane_s16(p_1, 1) + vgetq_lane_s16(p_1, 2) + vgetq_lane_s16(p_1, 3) + vgetq_lane_s16(p_1, 4) + vgetq_lane_s16(p_1, 5) + vgetq_lane_s16(p_1, 6) + vgetq_lane_s16(p_1, 7));
  1476. #endif
  1477. #endif
  1478. }
  1479. sumf = (ggml_float)(sum0 + sum1);
  1480. #elif defined(__AVX512F__)
  1481. // Initialize accumulator with zeros
  1482. __m512 acc0 = _mm512_setzero_ps();
  1483. __m512 acc1 = _mm512_setzero_ps();
  1484. const int superblock_size = 8;
  1485. const int superblock_count = nb / superblock_size;
  1486. for (int superblock_ix = 0; superblock_ix < superblock_count; superblock_ix += 1) {
  1487. int i = superblock_ix * superblock_size;
  1488. acc0 = dot_q4_0_oneblock_avx512( acc0, x, y, i+0 );
  1489. acc1 = dot_q4_0_oneblock_avx512( acc1, x, y, i+1 );
  1490. acc0 = dot_q4_0_oneblock_avx512( acc0, x, y, i+2 );
  1491. acc1 = dot_q4_0_oneblock_avx512( acc1, x, y, i+3 );
  1492. acc0 = dot_q4_0_oneblock_avx512( acc0, x, y, i+4 );
  1493. acc1 = dot_q4_0_oneblock_avx512( acc1, x, y, i+5 );
  1494. acc0 = dot_q4_0_oneblock_avx512( acc0, x, y, i+6 );
  1495. acc1 = dot_q4_0_oneblock_avx512( acc1, x, y, i+7 );
  1496. }
  1497. // Remainders
  1498. for (int i = superblock_count * superblock_size; i < nb; ++i) {
  1499. acc0 = dot_q4_0_oneblock_avx512( acc0, x, y, i );
  1500. }
  1501. // Horizontal sum of all lanes of the accumulator
  1502. sumf = _mm512_reduce_add_ps( acc0 ) + _mm512_reduce_add_ps( acc1 );
  1503. #elif defined(__AVX2__)
  1504. // Initialize accumulator with zeros
  1505. __m256 acc = _mm256_setzero_ps();
  1506. // Main loop
  1507. for (int i = 0; i < nb; ++i) {
  1508. // Compute combined scale for the block
  1509. const __m256 d = _mm256_mul_ps( _mm256_broadcast_ss( &x[i].d ), _mm256_broadcast_ss( &y[i].d ) );
  1510. // Load 16 bytes, and unpack 4 bit fields into bytes, making 32 bytes
  1511. __m256i bx = bytesFromNibbles( x[i].qs );
  1512. __m256i by = bytesFromNibbles( y[i].qs );
  1513. // Now we have a vector with bytes in [ 0 .. 15 ] interval. Offset them into [ -8 .. +7 ] interval.
  1514. const __m256i off = _mm256_set1_epi8( 8 );
  1515. bx = _mm256_sub_epi8( bx, off );
  1516. by = _mm256_sub_epi8( by, off );
  1517. // Sign-extend first 16 signed bytes into int16_t
  1518. __m256i x16 = _mm256_cvtepi8_epi16( _mm256_castsi256_si128( bx ) );
  1519. __m256i y16 = _mm256_cvtepi8_epi16( _mm256_castsi256_si128( by ) );
  1520. // Compute products of int16_t integers, add pairwise
  1521. __m256i i32 = _mm256_madd_epi16( x16, y16 );
  1522. // Sign-extend last 16 signed bytes into int16_t vectors
  1523. x16 = _mm256_cvtepi8_epi16( _mm256_extracti128_si256( bx, 1 ) );
  1524. y16 = _mm256_cvtepi8_epi16( _mm256_extracti128_si256( by, 1 ) );
  1525. // Accumulate products of int16_t integers
  1526. i32 = _mm256_add_epi32( i32, _mm256_madd_epi16( x16, y16 ) );
  1527. // Convert int32_t to float
  1528. __m256 p = _mm256_cvtepi32_ps( i32 );
  1529. // Apply the scale, and accumulate
  1530. acc = _mm256_fmadd_ps( d, p, acc );
  1531. }
  1532. // Return horizontal sum of the acc vector
  1533. __m128 res = _mm256_extractf128_ps( acc, 1 );
  1534. res = _mm_add_ps( res, _mm256_castps256_ps128( acc ) );
  1535. res = _mm_add_ps( res, _mm_movehl_ps( res, res ) );
  1536. res = _mm_add_ss( res, _mm_movehdup_ps( res ) );
  1537. sumf = _mm_cvtss_f32( res );
  1538. #elif defined(__wasm_simd128__)
  1539. // wasm simd
  1540. float sum0 = 0.0f;
  1541. float sum1 = 0.0f;
  1542. for (int i = 0; i < nb; i += 2) {
  1543. const block_q4_0 * restrict x0 = &px[i + 0];
  1544. const block_q4_0 * restrict y0 = &py[i + 0];
  1545. const block_q4_0 * restrict x1 = &px[i + 1];
  1546. const block_q4_0 * restrict y1 = &py[i + 1];
  1547. const v128_t m4b = wasm_u8x16_splat(0xf);
  1548. const v128_t s8b = wasm_i8x16_splat(0x8);
  1549. const v128_t v0_0 = wasm_v128_load(x0.qs);
  1550. const v128_t v0_1 = wasm_v128_load(y0.qs);
  1551. const v128_t v1_0 = wasm_v128_load(x1.qs);
  1552. const v128_t v1_1 = wasm_v128_load(y1.qs);
  1553. // 4-bit -> 8-bit
  1554. const v128_t v0_0l = wasm_v128_and(v0_0, m4b);
  1555. const v128_t v1_0l = wasm_v128_and(v1_0, m4b);
  1556. const v128_t v0_0h = wasm_u8x16_shr(v0_0, 4);
  1557. const v128_t v1_0h = wasm_u8x16_shr(v1_0, 4);
  1558. const v128_t v0_1l = wasm_v128_and(v0_1, m4b);
  1559. const v128_t v1_1l = wasm_v128_and(v1_1, m4b);
  1560. const v128_t v0_1h = wasm_u8x16_shr(v0_1, 4);
  1561. const v128_t v1_1h = wasm_u8x16_shr(v1_1, 4);
  1562. // sub 8
  1563. const v128_t v0_0ls = wasm_i8x16_sub(v0_0l, s8b);
  1564. const v128_t v1_0ls = wasm_i8x16_sub(v1_0l, s8b);
  1565. const v128_t v0_0hs = wasm_i8x16_sub(v0_0h, s8b);
  1566. const v128_t v1_0hs = wasm_i8x16_sub(v1_0h, s8b);
  1567. const v128_t v0_1ls = wasm_i8x16_sub(v0_1l, s8b);
  1568. const v128_t v1_1ls = wasm_i8x16_sub(v1_1l, s8b);
  1569. const v128_t v0_1hs = wasm_i8x16_sub(v0_1h, s8b);
  1570. const v128_t v1_1hs = wasm_i8x16_sub(v1_1h, s8b);
  1571. // dot product into int16x8_t
  1572. const v128_t pl0l = wasm_i16x8_mul(wasm_i16x8_extend_low_i8x16(v0_0ls), wasm_i16x8_extend_low_i8x16(v1_0ls));
  1573. const v128_t pl0h = wasm_i16x8_mul(wasm_i16x8_extend_high_i8x16(v0_0ls), wasm_i16x8_extend_high_i8x16(v1_0ls));
  1574. const v128_t ph0l = wasm_i16x8_mul(wasm_i16x8_extend_low_i8x16(v0_0hs), wasm_i16x8_extend_low_i8x16(v1_0hs));
  1575. const v128_t ph0h = wasm_i16x8_mul(wasm_i16x8_extend_high_i8x16(v0_0hs), wasm_i16x8_extend_high_i8x16(v1_0hs));
  1576. const v128_t pl1l = wasm_i16x8_mul(wasm_i16x8_extend_low_i8x16(v0_1ls), wasm_i16x8_extend_low_i8x16(v1_1ls));
  1577. const v128_t pl1h = wasm_i16x8_mul(wasm_i16x8_extend_high_i8x16(v0_1ls), wasm_i16x8_extend_high_i8x16(v1_1ls));
  1578. const v128_t ph1l = wasm_i16x8_mul(wasm_i16x8_extend_low_i8x16(v0_1hs), wasm_i16x8_extend_low_i8x16(v1_1hs));
  1579. const v128_t ph1h = wasm_i16x8_mul(wasm_i16x8_extend_high_i8x16(v0_1hs), wasm_i16x8_extend_high_i8x16(v1_1hs));
  1580. const v128_t pl_0 = wasm_i16x8_add(pl0l, pl0h);
  1581. const v128_t ph_0 = wasm_i16x8_add(ph0l, ph0h);
  1582. const v128_t pl_1 = wasm_i16x8_add(pl1l, pl1h);
  1583. const v128_t ph_1 = wasm_i16x8_add(ph1l, ph1h);
  1584. const v128_t p_0 = wasm_i16x8_add(pl_0, ph_0);
  1585. const v128_t p_1 = wasm_i16x8_add(pl_1, ph_1);
  1586. sum0 += x0->d * y0->d * (
  1587. wasm_i16x8_extract_lane(p_0, 0) + wasm_i16x8_extract_lane(p_0, 1) +
  1588. wasm_i16x8_extract_lane(p_0, 2) + wasm_i16x8_extract_lane(p_0, 3) +
  1589. wasm_i16x8_extract_lane(p_0, 4) + wasm_i16x8_extract_lane(p_0, 5) +
  1590. wasm_i16x8_extract_lane(p_0, 6) + wasm_i16x8_extract_lane(p_0, 7));
  1591. sum1 += x1->d * y1->d * (
  1592. wasm_i16x8_extract_lane(p_1, 0) + wasm_i16x8_extract_lane(p_1, 1) +
  1593. wasm_i16x8_extract_lane(p_1, 2) + wasm_i16x8_extract_lane(p_1, 3) +
  1594. wasm_i16x8_extract_lane(p_1, 4) + wasm_i16x8_extract_lane(p_1, 5) +
  1595. wasm_i16x8_extract_lane(p_1, 6) + wasm_i16x8_extract_lane(p_1, 7));
  1596. }
  1597. sumf = sum0 + sum1;
  1598. #else
  1599. // scalar
  1600. for (int i = 0; i < nb; i++) {
  1601. const float d0 = x[i].d;
  1602. const float d1 = y[i].d;
  1603. const uint8_t * restrict p0 = x[i].qs;
  1604. const uint8_t * restrict p1 = y[i].qs;
  1605. for (int j = 0; j < QK/2; j++) {
  1606. const uint8_t v0 = p0[j];
  1607. const uint8_t v1 = p1[j];
  1608. const float f0 = d0*((int8_t) (v0 & 0xf) - 8);
  1609. const float f1 = d0*((int8_t) (v0 >> 4) - 8);
  1610. const float f2 = d1*((int8_t) (v1 & 0xf) - 8);
  1611. const float f3 = d1*((int8_t) (v1 >> 4) - 8);
  1612. sumf += f0*f2 + f1*f3;
  1613. }
  1614. }
  1615. #endif
  1616. *s = sumf;
  1617. }
  1618. static void ggml_vec_dot_q4_1(const int n, float * restrict s, const void * restrict vx, const void * restrict vy) {
  1619. const int nb = n / QK;
  1620. const block_q4_1 * restrict x = vx;
  1621. const block_q4_1 * restrict y = vy;
  1622. float sumf = 0.0;
  1623. #if defined(__AVX2__)
  1624. // Initialize accumulator with zeros
  1625. __m256 acc = _mm256_setzero_ps();
  1626. // Accumulator for constant offsets
  1627. float acc_offset = 0.0f;
  1628. // Main loop
  1629. for (int i = 0; i < nb; ++i) {
  1630. const float * d0 = &x[i].d;
  1631. const float * d1 = &y[i].d;
  1632. const float * m0 = &x[i].m;
  1633. const float * m1 = &y[i].m;
  1634. const __m256 d0v = _mm256_broadcast_ss( d0 );
  1635. const __m256 d1v = _mm256_broadcast_ss( d1 );
  1636. const __m256 m0v = _mm256_broadcast_ss( m0 );
  1637. const __m256 m1v = _mm256_broadcast_ss( m1 );
  1638. // Compute combined scale for the block
  1639. const __m256 scale_01 = _mm256_mul_ps( d0v, d1v );
  1640. // Compute cross scales for the block
  1641. const __m256 scale_0 = _mm256_mul_ps( d0v, m1v );
  1642. const __m256 scale_1 = _mm256_mul_ps( m0v, d1v );
  1643. const __m256 cross_scales = _mm256_blend_ps( scale_0, scale_1, 0xAA /* 0b10101010 */ );
  1644. // Load 16 bytes, and unpack 4 bit fields into bytes, making 32 bytes
  1645. __m256i bx = bytesFromNibbles( x[i].qs );
  1646. __m256i by = bytesFromNibbles( y[i].qs );
  1647. // Now we have a vector with bytes in [ 0 .. 15 ] interval.
  1648. // Sign-extend first 16 signed bytes into int16_t
  1649. __m256i x16 = _mm256_cvtepi8_epi16( _mm256_castsi256_si128( bx ) );
  1650. __m256i y16 = _mm256_cvtepi8_epi16( _mm256_castsi256_si128( by ) );
  1651. // Compute products of int16_t integers, add pairwise
  1652. __m256i i32 = _mm256_madd_epi16( x16, y16 );
  1653. // Sign-extend last 16 signed bytes into int16_t vectors
  1654. __m256i x16_h = _mm256_cvtepi8_epi16( _mm256_extracti128_si256( bx, 1 ) );
  1655. __m256i y16_h = _mm256_cvtepi8_epi16( _mm256_extracti128_si256( by, 1 ) );
  1656. // Accumulate products of int16_t integers
  1657. i32 = _mm256_add_epi32( i32, _mm256_madd_epi16( x16_h, y16_h ) );
  1658. // compute sums of unsigned bytes in bx, by in blocks of 8.
  1659. // This results in a layout like X100 0000 X200 0000 X300 0000 X400 0000,
  1660. // which we then interleave as X100 Y100 X200 Y200 X300 Y300 X400 Y400.
  1661. // so if we then cast to 8 singles, we get 8 floats like [ x0_7, y0_7, x8_15, y8_15, x16_23, y16_23, x24_31, y24_31 ]
  1662. __m256i xsumi = _mm256_sad_epu8( bx, _mm256_setzero_si256() );
  1663. __m256i ysumi = _mm256_sad_epu8( by, _mm256_setzero_si256() );
  1664. __m256i sumsi = _mm256_or_si256( xsumi, _mm256_slli_si256( ysumi, 4 ) );
  1665. __m256 sums = _mm256_cvtepi32_ps( sumsi );
  1666. // Convert int32_t to float
  1667. __m256 p = _mm256_cvtepi32_ps( i32 );
  1668. // Apply the scale, and accumulate
  1669. // acc += d0*d1*x*y + d0*m1*x + d1*m0*y
  1670. acc = _mm256_fmadd_ps( scale_01, p, acc );
  1671. acc = _mm256_fmadd_ps( cross_scales, sums, acc );
  1672. // acc_offset += m0*m1 (for each entry in the block)
  1673. acc_offset += (*m0)*(*m1);
  1674. }
  1675. // Return horizontal sum of the acc vector
  1676. __m128 res = _mm256_extractf128_ps( acc, 1 );
  1677. res = _mm_add_ps( res, _mm256_castps256_ps128( acc ) );
  1678. res = _mm_add_ps( res, _mm_movehl_ps( res, res ) );
  1679. res = _mm_add_ss( res, _mm_movehdup_ps( res ) );
  1680. sumf = _mm_cvtss_f32( res ) + acc_offset * QK;
  1681. #elif defined(__ARM_NEON)
  1682. float sum00 = 0.0f;
  1683. float sum01 = 0.0f;
  1684. float sum10 = 0.0f;
  1685. float sum11 = 0.0f;
  1686. for (int i = 0; i < nb; ++i) {
  1687. const block_q4_1 * restrict x0 = &x[i + 0];
  1688. const block_q4_1 * restrict y0 = &y[i + 0];
  1689. const uint8x16_t m4b = vdupq_n_u8(0xf);
  1690. const uint8x16_t v0_0 = vld1q_u8(x0->qs);
  1691. const uint8x16_t v1_0 = vld1q_u8(y0->qs);
  1692. // and with 0xf
  1693. const uint8x16_t v0_0l = vandq_u8(v0_0, m4b);
  1694. const uint8x16_t v1_0l = vandq_u8(v1_0, m4b);
  1695. const uint8x16_t v0_0h = vshrq_n_u8(v0_0, 4);
  1696. const uint8x16_t v1_0h = vshrq_n_u8(v1_0, 4);
  1697. // dot product into uint16x8_t
  1698. const uint16x8_t pl0l = vmull_u8(vget_low_u8 (v0_0l), vget_low_u8 (v1_0l));
  1699. const uint16x8_t pl0h = vmull_u8(vget_high_u8(v0_0l), vget_high_u8(v1_0l));
  1700. const uint16x8_t ph0l = vmull_u8(vget_low_u8 (v0_0h), vget_low_u8 (v1_0h));
  1701. const uint16x8_t ph0h = vmull_u8(vget_high_u8(v0_0h), vget_high_u8(v1_0h));
  1702. const uint16x8_t pl0 = vaddq_u16(pl0l, pl0h);
  1703. const uint16x8_t ph0 = vaddq_u16(ph0l, ph0h);
  1704. sum00 += x0->m*y0->m;
  1705. sum01 += y0->m*x0->d*(vaddvq_u8(v0_0l) + vaddvq_u8(v0_0h));
  1706. sum10 += x0->m*y0->d*(vaddvq_u8(v1_0l) + vaddvq_u8(v1_0h));
  1707. sum11 += x0->d*y0->d*vaddvq_u16(vaddq_u16(pl0, ph0));
  1708. }
  1709. sumf = QK*sum00 + sum01 + sum10 + sum11;
  1710. #else
  1711. // scalar
  1712. for (int i = 0; i < nb; i++) {
  1713. const float d0 = x[i].d;
  1714. const float d1 = y[i].d;
  1715. const float m0 = x[i].m;
  1716. const float m1 = y[i].m;
  1717. const uint8_t * restrict p0 = x[i].qs;
  1718. const uint8_t * restrict p1 = y[i].qs;
  1719. for (int j = 0; j < QK/2; j++) {
  1720. const uint8_t v0 = p0[j];
  1721. const uint8_t v1 = p1[j];
  1722. const float f0 = d0*(v0 & 0xf) + m0;
  1723. const float f1 = d0*(v0 >> 4) + m0;
  1724. const float f2 = d1*(v1 & 0xf) + m1;
  1725. const float f3 = d1*(v1 >> 4) + m1;
  1726. sumf += f0*f2 + f1*f3;
  1727. }
  1728. }
  1729. #endif
  1730. *s = sumf;
  1731. }
  1732. // compute GGML_VEC_DOT_UNROLL dot products at once
  1733. // xs - x row stride in bytes
  1734. inline static void ggml_vec_dot_f16_unroll(const int n, const int xs, float * restrict s, void * restrict xv, ggml_fp16_t * restrict y) {
  1735. ggml_float sumf[GGML_VEC_DOT_UNROLL] = { 0.0 };
  1736. ggml_fp16_t * restrict x[GGML_VEC_DOT_UNROLL];
  1737. for (int i = 0; i < GGML_VEC_DOT_UNROLL; ++i) {
  1738. x[i] = (ggml_fp16_t *) ((char *) xv + i*xs);
  1739. }
  1740. #if defined(GGML_SIMD)
  1741. const int np = (n & ~(GGML_F16_STEP - 1));
  1742. GGML_F16_VEC sum[GGML_VEC_DOT_UNROLL][GGML_F16_ARR] = { { GGML_F16_VEC_ZERO } };
  1743. GGML_F16_VEC ax[GGML_F16_ARR];
  1744. GGML_F16_VEC ay[GGML_F16_ARR];
  1745. for (int i = 0; i < np; i += GGML_F16_STEP) {
  1746. for (int j = 0; j < GGML_F16_ARR; j++) {
  1747. ay[j] = GGML_F16_VEC_LOAD(y + i + j*GGML_F16_EPR, j);
  1748. for (int k = 0; k < GGML_VEC_DOT_UNROLL; ++k) {
  1749. ax[j] = GGML_F16_VEC_LOAD(x[k] + i + j*GGML_F16_EPR, j);
  1750. sum[k][j] = GGML_F16_VEC_FMA(sum[k][j], ax[j], ay[j]);
  1751. }
  1752. }
  1753. }
  1754. // reduce sum0..sum3 to sum0
  1755. for (int k = 0; k < GGML_VEC_DOT_UNROLL; ++k) {
  1756. GGML_F16_VEC_REDUCE(sumf[k], sum[k]);
  1757. }
  1758. // leftovers
  1759. for (int i = np; i < n; ++i) {
  1760. for (int j = 0; j < GGML_VEC_DOT_UNROLL; ++j) {
  1761. sumf[j] += (ggml_float)(GGML_FP16_TO_FP32(x[j][i])*GGML_FP16_TO_FP32(y[i]));
  1762. }
  1763. }
  1764. #else
  1765. for (int i = 0; i < n; ++i) {
  1766. for (int j = 0; j < GGML_VEC_DOT_UNROLL; ++j) {
  1767. sumf[j] += (ggml_float)(GGML_FP16_TO_FP32(x[j][i])*GGML_FP16_TO_FP32(y[i]));
  1768. }
  1769. }
  1770. #endif
  1771. for (int i = 0; i < GGML_VEC_DOT_UNROLL; ++i) {
  1772. s[i] = sumf[i];
  1773. }
  1774. }
  1775. inline static void ggml_vec_mad_f32(const int n, float * restrict y, const float * restrict x, const float v) {
  1776. #if defined(GGML_SIMD)
  1777. const int np = (n & ~(GGML_F32_STEP - 1));
  1778. GGML_F32_VEC vx = GGML_F32_VEC_SET1(v);
  1779. GGML_F32_VEC ax[GGML_F32_ARR];
  1780. GGML_F32_VEC ay[GGML_F32_ARR];
  1781. for (int i = 0; i < np; i += GGML_F32_STEP) {
  1782. for (int j = 0; j < GGML_F32_ARR; j++) {
  1783. ax[j] = GGML_F32_VEC_LOAD(x + i + j*GGML_F32_EPR);
  1784. ay[j] = GGML_F32_VEC_LOAD(y + i + j*GGML_F32_EPR);
  1785. ay[j] = GGML_F32_VEC_FMA(ay[j], ax[j], vx);
  1786. GGML_F32_VEC_STORE(y + i + j*GGML_F32_EPR, ay[j]);
  1787. }
  1788. }
  1789. // leftovers
  1790. for (int i = np; i < n; ++i) {
  1791. y[i] += x[i]*v;
  1792. }
  1793. #else
  1794. // scalar
  1795. for (int i = 0; i < n; ++i) {
  1796. y[i] += x[i]*v;
  1797. }
  1798. #endif
  1799. }
  1800. //inline static void ggml_vec_scale_f32(const int n, float * y, const float v) { for (int i = 0; i < n; ++i) y[i] *= v; }
  1801. inline static void ggml_vec_scale_f32(const int n, float * y, const float v) {
  1802. #if defined(GGML_SIMD)
  1803. const int np = (n & ~(GGML_F32_STEP - 1));
  1804. GGML_F32_VEC vx = GGML_F32_VEC_SET1(v);
  1805. GGML_F32_VEC ay[GGML_F32_ARR];
  1806. for (int i = 0; i < np; i += GGML_F32_STEP) {
  1807. for (int j = 0; j < GGML_F32_ARR; j++) {
  1808. ay[j] = GGML_F32_VEC_LOAD(y + i + j*GGML_F32_EPR);
  1809. ay[j] = GGML_F32_VEC_MUL(ay[j], vx);
  1810. GGML_F32_VEC_STORE(y + i + j*GGML_F32_EPR, ay[j]);
  1811. }
  1812. }
  1813. // leftovers
  1814. for (int i = np; i < n; ++i) {
  1815. y[i] *= v;
  1816. }
  1817. #else
  1818. // scalar
  1819. for (int i = 0; i < n; ++i) {
  1820. y[i] *= v;
  1821. }
  1822. #endif
  1823. }
  1824. inline static void ggml_vec_norm_f32 (const int n, float * s, const float * x) { ggml_vec_dot_f32(n, s, x, x); *s = sqrtf(*s); }
  1825. inline static void ggml_vec_sqr_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = x[i]*x[i]; }
  1826. inline static void ggml_vec_sqrt_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = sqrtf(x[i]); }
  1827. inline static void ggml_vec_abs_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = fabsf(x[i]); }
  1828. inline static void ggml_vec_sgn_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = (x[i] > 0.f) ? 1.f : ((x[i] < 0.f) ? -1.f : 0.f); }
  1829. inline static void ggml_vec_step_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = (x[i] > 0.f) ? 1.f : 0.f; }
  1830. inline static void ggml_vec_relu_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = (x[i] > 0.f) ? x[i] : 0.f; }
  1831. static const float GELU_COEF_A = 0.044715f;
  1832. static const float SQRT_2_OVER_PI = 0.79788456080286535587989211986876f;
  1833. inline static float ggml_gelu_f32(float x) {
  1834. return 0.5f*x*(1.0f + tanhf(SQRT_2_OVER_PI*x*(1.0f + GELU_COEF_A*x*x)));
  1835. }
  1836. inline static void ggml_vec_gelu_f16(const int n, ggml_fp16_t * y, const ggml_fp16_t * x) {
  1837. const uint16_t * i16 = (const uint16_t *) x;
  1838. for (int i = 0; i < n; ++i) {
  1839. y[i] = table_gelu_f16[i16[i]];
  1840. }
  1841. }
  1842. #ifdef GGML_GELU_FP16
  1843. inline static void ggml_vec_gelu_f32(const int n, float * y, const float * x) {
  1844. uint16_t t;
  1845. for (int i = 0; i < n; ++i) {
  1846. ggml_fp16_t fp16 = GGML_FP32_TO_FP16(x[i]);
  1847. memcpy(&t, &fp16, sizeof(uint16_t));
  1848. y[i] = GGML_FP16_TO_FP32(table_gelu_f16[t]);
  1849. }
  1850. }
  1851. #else
  1852. inline static void ggml_vec_gelu_f32(const int n, float * y, const float * x) {
  1853. for (int i = 0; i < n; ++i) {
  1854. y[i] = ggml_gelu_f32(x[i]);
  1855. }
  1856. }
  1857. #endif
  1858. // Sigmoid Linear Unit (SiLU) function
  1859. inline static float ggml_silu_f32(float x) {
  1860. return x/(1.0f + expf(-x));
  1861. }
  1862. inline static void ggml_vec_silu_f16(const int n, ggml_fp16_t * y, const ggml_fp16_t * x) {
  1863. const uint16_t * i16 = (const uint16_t *) x;
  1864. for (int i = 0; i < n; ++i) {
  1865. y[i] = table_silu_f16[i16[i]];
  1866. }
  1867. }
  1868. #ifdef GGML_SILU_FP16
  1869. inline static void ggml_vec_silu_f32(const int n, float * y, const float * x) {
  1870. uint16_t t;
  1871. for (int i = 0; i < n; ++i) {
  1872. ggml_fp16_t fp16 = GGML_FP32_TO_FP16(x[i]);
  1873. memcpy(&t, &fp16, sizeof(uint16_t));
  1874. y[i] = GGML_FP16_TO_FP32(table_silu_f16[t]);
  1875. }
  1876. }
  1877. #else
  1878. inline static void ggml_vec_silu_f32(const int n, float * y, const float * x) {
  1879. for (int i = 0; i < n; ++i) {
  1880. y[i] = ggml_silu_f32(x[i]);
  1881. }
  1882. }
  1883. #endif
  1884. inline static void ggml_vec_sum_f32(const int n, float * s, const float * x) {
  1885. #ifndef GGML_USE_ACCELERATE
  1886. ggml_float sum = 0.0;
  1887. for (int i = 0; i < n; ++i) {
  1888. sum += (ggml_float)x[i];
  1889. }
  1890. *s = sum;
  1891. #else
  1892. vDSP_sve(x, 1, s, n);
  1893. #endif
  1894. }
  1895. inline static void ggml_vec_max_f32(const int n, float * s, const float * x) {
  1896. #ifndef GGML_USE_ACCELERATE
  1897. float max = -INFINITY;
  1898. for (int i = 0; i < n; ++i) {
  1899. max = MAX(max, x[i]);
  1900. }
  1901. *s = max;
  1902. #else
  1903. vDSP_maxv(x, 1, s, n);
  1904. #endif
  1905. }
  1906. inline static void ggml_vec_norm_inv_f32(const int n, float * s, const float * x) {
  1907. ggml_vec_norm_f32(n, s, x);
  1908. *s = 1.f/(*s);
  1909. }
  1910. //
  1911. // logging
  1912. //
  1913. #if (GGML_DEBUG >= 1)
  1914. #define GGML_PRINT_DEBUG(...) printf(__VA_ARGS__)
  1915. #else
  1916. #define GGML_PRINT_DEBUG(...)
  1917. #endif
  1918. #if (GGML_DEBUG >= 5)
  1919. #define GGML_PRINT_DEBUG_5(...) printf(__VA_ARGS__)
  1920. #else
  1921. #define GGML_PRINT_DEBUG_5(...)
  1922. #endif
  1923. #if (GGML_DEBUG >= 10)
  1924. #define GGML_PRINT_DEBUG_10(...) printf(__VA_ARGS__)
  1925. #else
  1926. #define GGML_PRINT_DEBUG_10(...)
  1927. #endif
  1928. #define GGML_PRINT(...) printf(__VA_ARGS__)
  1929. //
  1930. // data types
  1931. //
  1932. static const int GGML_BLCK_SIZE[GGML_TYPE_COUNT] = {
  1933. QK,
  1934. QK,
  1935. 1,
  1936. 1,
  1937. 1,
  1938. 1,
  1939. 1,
  1940. };
  1941. static_assert(GGML_TYPE_COUNT == 7, "GGML_TYPE_COUNT != 5");
  1942. static const size_t GGML_TYPE_SIZE[GGML_TYPE_COUNT] = {
  1943. sizeof(block_q4_0),
  1944. sizeof(block_q4_1),
  1945. sizeof(int8_t ),
  1946. sizeof(int16_t),
  1947. sizeof(int32_t),
  1948. sizeof(ggml_fp16_t),
  1949. sizeof(float ),
  1950. };
  1951. // don't forget to update the array above when adding new types
  1952. static_assert(GGML_TYPE_COUNT == 7, "GGML_TYPE_COUNT != 5");
  1953. static const char * GGML_OP_LABEL[GGML_OP_COUNT] = {
  1954. "NONE",
  1955. "DUP",
  1956. "ADD",
  1957. "SUB",
  1958. "MUL",
  1959. "DIV",
  1960. "SQR",
  1961. "SQRT",
  1962. "SUM",
  1963. "MEAN",
  1964. "REPEAT",
  1965. "ABS",
  1966. "SGN",
  1967. "NEG",
  1968. "STEP",
  1969. "RELU",
  1970. "GELU",
  1971. "SILU",
  1972. "NORM",
  1973. "RMS_NORM",
  1974. "MUL_MAT",
  1975. "SCALE",
  1976. "CPY",
  1977. "RESHAPE",
  1978. "VIEW",
  1979. "PERMUTE",
  1980. "TRANSPOSE",
  1981. "GET_ROWS",
  1982. "DIAG_MASK_INF",
  1983. "SOFT_MAX",
  1984. "ROPE",
  1985. "CONV_1D_1S",
  1986. "CONV_1D_2S",
  1987. "FLASH_ATTN",
  1988. "FLASH_FF",
  1989. };
  1990. static_assert(GGML_OP_COUNT == 35, "GGML_OP_COUNT != 35");
  1991. static const char * GGML_OP_SYMBOL[GGML_OP_COUNT] = {
  1992. "none",
  1993. "x",
  1994. "x+y",
  1995. "x-y",
  1996. "x*y",
  1997. "x/y",
  1998. "x^2",
  1999. "√x",
  2000. "Σx",
  2001. "Σx/n",
  2002. "repeat(x)",
  2003. "abs(x)",
  2004. "sgn(x)",
  2005. "-x",
  2006. "step(x)",
  2007. "relu(x)",
  2008. "gelu(x)",
  2009. "silu(x)",
  2010. "norm(x)",
  2011. "rms_norm(x)",
  2012. "X*Y",
  2013. "x*v",
  2014. "x-\\>y",
  2015. "reshape(x)",
  2016. "view(x)",
  2017. "permute(x)",
  2018. "transpose(x)",
  2019. "get_rows(x)",
  2020. "diag_mask_inf(x)",
  2021. "soft_max(x)",
  2022. "rope(x)",
  2023. "conv_1d_1s(x)",
  2024. "conv_1d_2s(x)",
  2025. "flash_attn(x)",
  2026. "flash_ff(x)",
  2027. };
  2028. static_assert(GGML_OP_COUNT == 35, "GGML_OP_COUNT != 35");
  2029. //
  2030. // ggml object
  2031. //
  2032. struct ggml_object {
  2033. size_t offs;
  2034. size_t size;
  2035. struct ggml_object * next;
  2036. char padding[8];
  2037. };
  2038. static const size_t GGML_OBJECT_SIZE = sizeof(struct ggml_object);
  2039. static_assert(sizeof(struct ggml_object)%GGML_MEM_ALIGN == 0, "ggml_object size must be a multiple of GGML_MEM_ALIGN");
  2040. static_assert(sizeof(struct ggml_tensor)%GGML_MEM_ALIGN == 0, "ggml_tensor size must be a multiple of GGML_MEM_ALIGN");
  2041. //
  2042. // ggml context
  2043. //
  2044. struct ggml_context {
  2045. size_t mem_size;
  2046. void * mem_buffer;
  2047. bool mem_buffer_owned;
  2048. bool mem_buffer_mlocked;
  2049. bool no_alloc;
  2050. int n_objects;
  2051. struct ggml_object * objects_begin;
  2052. struct ggml_object * objects_end;
  2053. struct ggml_scratch scratch;
  2054. struct ggml_scratch scratch_save;
  2055. };
  2056. struct ggml_context_container {
  2057. bool used;
  2058. struct ggml_context context;
  2059. };
  2060. //
  2061. // compute types
  2062. //
  2063. enum ggml_task_type {
  2064. GGML_TASK_INIT = 0,
  2065. GGML_TASK_COMPUTE,
  2066. GGML_TASK_FINALIZE,
  2067. };
  2068. struct ggml_compute_params {
  2069. enum ggml_task_type type;
  2070. int ith, nth;
  2071. // work buffer for all threads
  2072. size_t wsize;
  2073. void * wdata;
  2074. };
  2075. //
  2076. // ggml state
  2077. //
  2078. struct ggml_state {
  2079. struct ggml_context_container contexts[GGML_MAX_CONTEXTS];
  2080. };
  2081. // global state
  2082. static struct ggml_state g_state;
  2083. static atomic_int g_state_barrier = 0;
  2084. // barrier via spin lock
  2085. inline static void ggml_critical_section_start(void) {
  2086. int processing = atomic_fetch_add(&g_state_barrier, 1);
  2087. while (processing > 0) {
  2088. // wait for other threads to finish
  2089. atomic_fetch_sub(&g_state_barrier, 1);
  2090. sched_yield(); // TODO: reconsider this
  2091. processing = atomic_fetch_add(&g_state_barrier, 1);
  2092. }
  2093. }
  2094. // TODO: make this somehow automatically executed
  2095. // some sort of "sentry" mechanism
  2096. inline static void ggml_critical_section_end(void) {
  2097. atomic_fetch_sub(&g_state_barrier, 1);
  2098. }
  2099. ////////////////////////////////////////////////////////////////////////////////
  2100. void ggml_print_object(const struct ggml_object * obj) {
  2101. GGML_PRINT(" - ggml_object: offset = %zu, size = %zu, next = %p\n",
  2102. obj->offs, obj->size, (const void *) obj->next);
  2103. }
  2104. void ggml_print_objects(const struct ggml_context * ctx) {
  2105. struct ggml_object * obj = ctx->objects_begin;
  2106. GGML_PRINT("%s: objects in context %p:\n", __func__, (const void *) ctx);
  2107. while (obj != NULL) {
  2108. ggml_print_object(obj);
  2109. obj = obj->next;
  2110. }
  2111. GGML_PRINT("%s: --- end ---\n", __func__);
  2112. }
  2113. int ggml_nelements(const struct ggml_tensor * tensor) {
  2114. static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function");
  2115. return tensor->ne[0]*tensor->ne[1]*tensor->ne[2]*tensor->ne[3];
  2116. }
  2117. int ggml_nrows(const struct ggml_tensor * tensor) {
  2118. static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function");
  2119. return tensor->ne[1]*tensor->ne[2]*tensor->ne[3];
  2120. }
  2121. size_t ggml_nbytes(const struct ggml_tensor * tensor) {
  2122. static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function");
  2123. return (ggml_nelements(tensor)*GGML_TYPE_SIZE[tensor->type])/GGML_BLCK_SIZE[tensor->type];
  2124. }
  2125. int ggml_blck_size(enum ggml_type type) {
  2126. return GGML_BLCK_SIZE[type];
  2127. }
  2128. size_t ggml_type_size(enum ggml_type type) {
  2129. return GGML_TYPE_SIZE[type];
  2130. }
  2131. float ggml_type_sizef(enum ggml_type type) {
  2132. return ((float)(GGML_TYPE_SIZE[type]))/GGML_BLCK_SIZE[type];
  2133. }
  2134. size_t ggml_element_size(const struct ggml_tensor * tensor) {
  2135. return GGML_TYPE_SIZE[tensor->type];
  2136. }
  2137. static inline bool ggml_is_scalar(const struct ggml_tensor * tensor) {
  2138. static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function");
  2139. return tensor->ne[0] == 1 && tensor->ne[1] == 1 && tensor->ne[2] == 1 && tensor->ne[3] == 1;
  2140. }
  2141. static inline bool ggml_is_vector(const struct ggml_tensor * tensor) {
  2142. static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function");
  2143. return tensor->ne[1] == 1 && tensor->ne[2] == 1 && tensor->ne[3] == 1;
  2144. }
  2145. static inline bool ggml_is_matrix(const struct ggml_tensor * tensor) {
  2146. static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function");
  2147. return tensor->ne[2] == 1 && tensor->ne[3] == 1;
  2148. }
  2149. static inline bool ggml_can_mul_mat(const struct ggml_tensor * t0, const struct ggml_tensor * t1) {
  2150. static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function");
  2151. return
  2152. (t0->ne[0] == t1->ne[0]) &&
  2153. (t0->ne[2] == t1->ne[2]) &&
  2154. (t0->ne[3] == t1->ne[3]);
  2155. }
  2156. static inline bool ggml_is_transposed(const struct ggml_tensor * tensor) {
  2157. return tensor->nb[0] > tensor->nb[1];
  2158. }
  2159. static inline bool ggml_is_contiguous(const struct ggml_tensor * tensor) {
  2160. static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function");
  2161. return
  2162. tensor->nb[0] == GGML_TYPE_SIZE[tensor->type] &&
  2163. tensor->nb[1] == (tensor->nb[0]*tensor->ne[0])/GGML_BLCK_SIZE[tensor->type] &&
  2164. tensor->nb[2] == tensor->nb[1]*tensor->ne[1] &&
  2165. tensor->nb[3] == tensor->nb[2]*tensor->ne[2];
  2166. }
  2167. static inline bool ggml_is_padded_1d(const struct ggml_tensor * tensor) {
  2168. static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function");
  2169. return
  2170. tensor->nb[0] == GGML_TYPE_SIZE[tensor->type] &&
  2171. tensor->nb[2] == tensor->nb[1]*tensor->ne[1] &&
  2172. tensor->nb[3] == tensor->nb[2]*tensor->ne[2];
  2173. }
  2174. static inline bool ggml_are_same_shape(const struct ggml_tensor * t0, const struct ggml_tensor * t1) {
  2175. static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function");
  2176. return
  2177. (t0->ne[0] == t1->ne[0] ) &&
  2178. (t0->ne[1] == t1->ne[1] ) &&
  2179. (t0->ne[2] == t1->ne[2] ) &&
  2180. (t0->ne[3] == t1->ne[3] );
  2181. }
  2182. // check if t1 can be represented as a repeatition of t0
  2183. static inline bool ggml_can_repeat(const struct ggml_tensor * t0, const struct ggml_tensor * t1) {
  2184. static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function");
  2185. return
  2186. (t1->ne[0]%t0->ne[0] == 0) &&
  2187. (t1->ne[1]%t0->ne[1] == 0) &&
  2188. (t1->ne[2]%t0->ne[2] == 0) &&
  2189. (t1->ne[3]%t0->ne[3] == 0);
  2190. }
  2191. static inline int ggml_up32(int n) {
  2192. return (n + 31) & ~31;
  2193. }
  2194. static inline int ggml_up64(int n) {
  2195. return (n + 63) & ~63;
  2196. }
  2197. static inline int ggml_up(int n, int m) {
  2198. // assert m is a power of 2
  2199. GGML_ASSERT((m & (m - 1)) == 0);
  2200. return (n + m - 1) & ~(m - 1);
  2201. }
  2202. // assert that pointer is aligned to GGML_MEM_ALIGN
  2203. #define ggml_assert_aligned(ptr) \
  2204. GGML_ASSERT(((uintptr_t) (ptr))%GGML_MEM_ALIGN == 0)
  2205. ////////////////////////////////////////////////////////////////////////////////
  2206. struct ggml_context * ggml_init(struct ggml_init_params params) {
  2207. // make this function thread safe
  2208. ggml_critical_section_start();
  2209. static bool is_first_call = true;
  2210. if (is_first_call) {
  2211. // initialize time system (required on Windows)
  2212. ggml_time_init();
  2213. // initialize GELU, SILU and EXP F32 tables
  2214. {
  2215. const uint64_t t_start = ggml_time_us(); UNUSED(t_start);
  2216. ggml_fp16_t ii;
  2217. for (int i = 0; i < (1 << 16); ++i) {
  2218. uint16_t ui = i;
  2219. memcpy(&ii, &ui, sizeof(ii));
  2220. const float f = table_f32_f16[i] = GGML_COMPUTE_FP16_TO_FP32(ii);
  2221. table_gelu_f16[i] = GGML_FP32_TO_FP16(ggml_gelu_f32(f));
  2222. table_silu_f16[i] = GGML_FP32_TO_FP16(ggml_silu_f32(f));
  2223. table_exp_f16[i] = GGML_FP32_TO_FP16(expf(f));
  2224. }
  2225. const uint64_t t_end = ggml_time_us(); UNUSED(t_end);
  2226. GGML_PRINT_DEBUG("%s: GELU, SILU and EXP tables initialized in %f ms\n", __func__, (t_end - t_start)/1000.0f);
  2227. }
  2228. // initialize g_state
  2229. {
  2230. const uint64_t t_start = ggml_time_us(); UNUSED(t_start);
  2231. g_state = (struct ggml_state) {
  2232. /*.contexts =*/ { { 0 } },
  2233. };
  2234. for (int i = 0; i < GGML_MAX_CONTEXTS; ++i) {
  2235. g_state.contexts[i].used = false;
  2236. }
  2237. const uint64_t t_end = ggml_time_us(); UNUSED(t_end);
  2238. GGML_PRINT_DEBUG("%s: g_state initialized in %f ms\n", __func__, (t_end - t_start)/1000.0f);
  2239. }
  2240. is_first_call = false;
  2241. }
  2242. // find non-used context in g_state
  2243. struct ggml_context * ctx = NULL;
  2244. for (int i = 0; i < GGML_MAX_CONTEXTS; i++) {
  2245. if (!g_state.contexts[i].used) {
  2246. g_state.contexts[i].used = true;
  2247. ctx = &g_state.contexts[i].context;
  2248. GGML_PRINT_DEBUG("%s: found unused context %d\n", __func__, i);
  2249. break;
  2250. }
  2251. }
  2252. if (ctx == NULL) {
  2253. GGML_PRINT_DEBUG("%s: no unused context found\n", __func__);
  2254. ggml_critical_section_end();
  2255. return NULL;
  2256. }
  2257. *ctx = (struct ggml_context) {
  2258. /*.mem_size =*/ params.mem_size,
  2259. /*.mem_buffer =*/ params.mem_buffer ? params.mem_buffer : malloc(params.mem_size),
  2260. /*.mem_buffer_owned =*/ params.mem_buffer ? false : true,
  2261. /*.mem_buffer_mlocked =*/ false,
  2262. /*.no_alloc =*/ params.no_alloc,
  2263. /*.n_objects =*/ 0,
  2264. /*.objects_begin =*/ NULL,
  2265. /*.objects_end =*/ NULL,
  2266. /*.scratch =*/ { 0, 0, NULL, },
  2267. /*.scratch_save =*/ { 0, 0, NULL, },
  2268. };
  2269. GGML_ASSERT(ctx->mem_buffer != NULL); // check for allocation failure
  2270. ggml_assert_aligned(ctx->mem_buffer);
  2271. GGML_PRINT_DEBUG("%s: context initialized\n", __func__);
  2272. ggml_critical_section_end();
  2273. return ctx;
  2274. }
  2275. void ggml_free(struct ggml_context * ctx) {
  2276. // make this function thread safe
  2277. ggml_critical_section_start();
  2278. bool found = false;
  2279. for (int i = 0; i < GGML_MAX_CONTEXTS; i++) {
  2280. if (&g_state.contexts[i].context == ctx) {
  2281. g_state.contexts[i].used = false;
  2282. GGML_PRINT_DEBUG("%s: context %d with %d objects has been freed. memory used = %zu\n",
  2283. __func__, i, ctx->n_objects, ctx->objects_end->offs + ctx->objects_end->size);
  2284. #if GGML_MLOCK_SUPPORT
  2285. if (ctx->mem_buffer_mlocked) {
  2286. if (munlock(ctx->mem_buffer, ctx->mem_size)) {
  2287. fprintf(stderr, "%s: failed to munlock buffer: %s\n", __func__, strerror(errno));
  2288. }
  2289. }
  2290. #endif
  2291. if (ctx->mem_buffer_owned) {
  2292. free(ctx->mem_buffer);
  2293. }
  2294. found = true;
  2295. break;
  2296. }
  2297. }
  2298. if (!found) {
  2299. GGML_PRINT_DEBUG("%s: context not found\n", __func__);
  2300. }
  2301. ggml_critical_section_end();
  2302. }
  2303. size_t ggml_used_mem(const struct ggml_context * ctx) {
  2304. return ctx->objects_end->offs + ctx->objects_end->size;
  2305. }
  2306. size_t ggml_set_scratch(struct ggml_context * ctx, struct ggml_scratch scratch) {
  2307. const size_t result = ctx->scratch.data ? ctx->scratch.offs : 0;
  2308. ctx->scratch = scratch;
  2309. return result;
  2310. }
  2311. #ifdef __APPLE__
  2312. #define MLOCK_SUGGESTION \
  2313. "Try increasing the sysctl values 'vm.user_wire_limit' and 'vm.global_user_wire_limit' and/or " \
  2314. "decreasing 'vm.global_no_user_wire_amount'. Also try increasing RLIMIT_MLOCK (ulimit -l).\n"
  2315. #else
  2316. #define MLOCK_SUGGESTION \
  2317. "Try increasing RLIMIT_MLOCK ('ulimit -l' as root).\n"
  2318. #endif
  2319. bool ggml_mlock_supported(void) {
  2320. return GGML_MLOCK_SUPPORT;
  2321. }
  2322. bool ggml_mlock(
  2323. struct ggml_context * ctx,
  2324. const void *opt_extra_addr,
  2325. size_t opt_extra_len,
  2326. char **err_p) {
  2327. // TODO: Use SetProcessWorkingSetSize() + VirtualLock() on WIN32
  2328. #if GGML_MLOCK_SUPPORT
  2329. if (ctx->mem_buffer_mlocked) {
  2330. return true;
  2331. }
  2332. if (mlock(ctx->mem_buffer, ctx->mem_size) ||
  2333. (opt_extra_len &&
  2334. mlock(opt_extra_addr, opt_extra_len))) {
  2335. if ((*err_p = malloc(1024))) {
  2336. snprintf(*err_p, 1024,
  2337. "failed to mlock %zu-byte buffer: %s\n" MLOCK_SUGGESTION,
  2338. ctx->mem_size + opt_extra_len,
  2339. strerror(errno));
  2340. }
  2341. return false;
  2342. }
  2343. ctx->mem_buffer_mlocked = true;
  2344. return true;
  2345. #else // GGML_MLOCK_SUPPORT
  2346. *err_p = strdup("can't mlock because it's not supported on this system");
  2347. return false;
  2348. #endif // GGML_MLOCK_SUPPORT
  2349. }
  2350. ////////////////////////////////////////////////////////////////////////////////
  2351. struct ggml_tensor * ggml_new_tensor_impl(
  2352. struct ggml_context * ctx,
  2353. enum ggml_type type,
  2354. int n_dims,
  2355. const int* ne,
  2356. void* data) {
  2357. // always insert objects at the end of the context's memory pool
  2358. struct ggml_object * obj_cur = ctx->objects_end;
  2359. const size_t cur_offs = obj_cur == NULL ? 0 : obj_cur->offs;
  2360. const size_t cur_size = obj_cur == NULL ? 0 : obj_cur->size;
  2361. const size_t cur_end = cur_offs + cur_size;
  2362. size_t size_needed = 0;
  2363. if (data == NULL && !ctx->no_alloc) {
  2364. size_needed += GGML_TYPE_SIZE[type]*(ne[0]/GGML_BLCK_SIZE[type]);
  2365. for (int i = 1; i < n_dims; i++) {
  2366. size_needed *= ne[i];
  2367. }
  2368. // align to GGML_MEM_ALIGN
  2369. size_needed = ((size_needed + GGML_MEM_ALIGN - 1)/GGML_MEM_ALIGN)*GGML_MEM_ALIGN;
  2370. }
  2371. char * const mem_buffer = ctx->mem_buffer;
  2372. struct ggml_object * const obj_new = (struct ggml_object *)(mem_buffer + cur_end);
  2373. if (ctx->scratch.data == NULL || data != NULL) {
  2374. size_needed += sizeof(struct ggml_tensor);
  2375. if (cur_end + size_needed + GGML_OBJECT_SIZE > ctx->mem_size) {
  2376. GGML_PRINT("%s: not enough space in the context's memory pool (needed %zu, available %zu)\n",
  2377. __func__, cur_end + size_needed + GGML_OBJECT_SIZE, ctx->mem_size);
  2378. assert(false);
  2379. return NULL;
  2380. }
  2381. *obj_new = (struct ggml_object) {
  2382. .offs = cur_end + GGML_OBJECT_SIZE,
  2383. .size = size_needed,
  2384. .next = NULL,
  2385. };
  2386. } else {
  2387. if (ctx->scratch.offs + size_needed > ctx->scratch.size) {
  2388. GGML_PRINT("%s: not enough space in the scratch memory\n", __func__);
  2389. assert(false);
  2390. return NULL;
  2391. }
  2392. if (cur_end + sizeof(struct ggml_tensor) + GGML_OBJECT_SIZE > ctx->mem_size) {
  2393. GGML_PRINT("%s: not enough space in the context's memory pool (needed %zu, available %zu)\n",
  2394. __func__, cur_end + sizeof(struct ggml_tensor) + GGML_OBJECT_SIZE, ctx->mem_size);
  2395. assert(false);
  2396. return NULL;
  2397. }
  2398. data = (char * const) ctx->scratch.data + ctx->scratch.offs;
  2399. *obj_new = (struct ggml_object) {
  2400. .offs = cur_end + GGML_OBJECT_SIZE,
  2401. .size = sizeof(struct ggml_tensor),
  2402. .next = NULL,
  2403. };
  2404. //printf("scratch offs = %zu, size_needed = %zu\n", ctx->scratch.offs, size_needed);
  2405. ctx->scratch.offs += size_needed;
  2406. }
  2407. if (obj_cur != NULL) {
  2408. obj_cur->next = obj_new;
  2409. } else {
  2410. // this is the first object in this context
  2411. ctx->objects_begin = obj_new;
  2412. }
  2413. ctx->objects_end = obj_new;
  2414. //printf("%s: inserted new object at %zu, size = %zu\n", __func__, cur_end, obj_new->size);
  2415. struct ggml_tensor * const result = (struct ggml_tensor *)(mem_buffer + obj_new->offs);
  2416. ggml_assert_aligned(result);
  2417. *result = (struct ggml_tensor) {
  2418. /*.type =*/ type,
  2419. /*.n_dims =*/ n_dims,
  2420. /*.ne =*/ { 1, 1, 1, 1 },
  2421. /*.nb =*/ { 0, 0, 0, 0 },
  2422. /*.op =*/ GGML_OP_NONE,
  2423. /*.is_param =*/ false,
  2424. /*.grad =*/ NULL,
  2425. /*.src0 =*/ NULL,
  2426. /*.src1 =*/ NULL,
  2427. /*.opt =*/ { NULL },
  2428. /*.n_tasks =*/ 0,
  2429. /*.perf_runs =*/ 0,
  2430. /*.perf_cycles =*/ 0,
  2431. /*.perf_time_us =*/ 0,
  2432. /*.data =*/ (data == NULL && !ctx->no_alloc) ? (void *)(result + 1) : data,
  2433. /*.pad =*/ { 0 },
  2434. };
  2435. ggml_assert_aligned(result->data);
  2436. for (int i = 0; i < n_dims; i++) {
  2437. result->ne[i] = ne[i];
  2438. }
  2439. result->nb[0] = GGML_TYPE_SIZE[type];
  2440. result->nb[1] = result->nb[0]*(result->ne[0]/GGML_BLCK_SIZE[type]);
  2441. for (int i = 2; i < GGML_MAX_DIMS; i++) {
  2442. result->nb[i] = result->nb[i - 1]*result->ne[i - 1];
  2443. }
  2444. ctx->n_objects++;
  2445. return result;
  2446. }
  2447. struct ggml_tensor * ggml_new_tensor(
  2448. struct ggml_context * ctx,
  2449. enum ggml_type type,
  2450. int n_dims,
  2451. const int * ne) {
  2452. return ggml_new_tensor_impl(ctx, type, n_dims, ne, NULL);
  2453. }
  2454. struct ggml_tensor * ggml_new_tensor_1d(
  2455. struct ggml_context * ctx,
  2456. enum ggml_type type,
  2457. int ne0) {
  2458. return ggml_new_tensor(ctx, type, 1, &ne0);
  2459. }
  2460. struct ggml_tensor * ggml_new_tensor_2d(
  2461. struct ggml_context * ctx,
  2462. enum ggml_type type,
  2463. int ne0,
  2464. int ne1) {
  2465. const int ne[2] = { ne0, ne1 };
  2466. return ggml_new_tensor(ctx, type, 2, ne);
  2467. }
  2468. struct ggml_tensor * ggml_new_tensor_3d(
  2469. struct ggml_context * ctx,
  2470. enum ggml_type type,
  2471. int ne0,
  2472. int ne1,
  2473. int ne2) {
  2474. const int ne[3] = { ne0, ne1, ne2 };
  2475. return ggml_new_tensor(ctx, type, 3, ne);
  2476. }
  2477. struct ggml_tensor * ggml_new_tensor_4d(
  2478. struct ggml_context * ctx,
  2479. enum ggml_type type,
  2480. int ne0,
  2481. int ne1,
  2482. int ne2,
  2483. int ne3) {
  2484. const int ne[4] = { ne0, ne1, ne2, ne3 };
  2485. return ggml_new_tensor(ctx, type, 4, ne);
  2486. }
  2487. struct ggml_tensor * ggml_new_i32(struct ggml_context * ctx, int32_t value) {
  2488. ctx->scratch_save = ctx->scratch;
  2489. ctx->scratch.data = NULL;
  2490. struct ggml_tensor * result = ggml_new_tensor_1d(ctx, GGML_TYPE_I32, 1);
  2491. ctx->scratch = ctx->scratch_save;
  2492. ggml_set_i32(result, value);
  2493. return result;
  2494. }
  2495. struct ggml_tensor * ggml_new_f32(struct ggml_context * ctx, float value) {
  2496. ctx->scratch_save = ctx->scratch;
  2497. ctx->scratch.data = NULL;
  2498. struct ggml_tensor * result = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, 1);
  2499. ctx->scratch = ctx->scratch_save;
  2500. ggml_set_f32(result, value);
  2501. return result;
  2502. }
  2503. struct ggml_tensor * ggml_dup_tensor(struct ggml_context * ctx, const struct ggml_tensor * src) {
  2504. return ggml_new_tensor_impl(ctx, src->type, src->n_dims, src->ne, NULL);
  2505. }
  2506. struct ggml_tensor * ggml_set_zero(struct ggml_tensor * tensor) {
  2507. memset(tensor->data, 0, ggml_nbytes(tensor));
  2508. return tensor;
  2509. }
  2510. struct ggml_tensor * ggml_set_i32 (struct ggml_tensor * tensor, int32_t value) {
  2511. const int n = ggml_nrows(tensor);
  2512. const int nc = tensor->ne[0];
  2513. const size_t n1 = tensor->nb[1];
  2514. char * const data = tensor->data;
  2515. switch (tensor->type) {
  2516. case GGML_TYPE_Q4_0:
  2517. {
  2518. GGML_ASSERT(false);
  2519. } break;
  2520. case GGML_TYPE_Q4_1:
  2521. {
  2522. GGML_ASSERT(false);
  2523. } break;
  2524. case GGML_TYPE_I8:
  2525. {
  2526. assert(tensor->nb[0] == sizeof(int8_t));
  2527. for (int i = 0; i < n; i++) {
  2528. ggml_vec_set_i8(nc, (int8_t *)(data + i*n1), value);
  2529. }
  2530. } break;
  2531. case GGML_TYPE_I16:
  2532. {
  2533. assert(tensor->nb[0] == sizeof(int16_t));
  2534. for (int i = 0; i < n; i++) {
  2535. ggml_vec_set_i16(nc, (int16_t *)(data + i*n1), value);
  2536. }
  2537. } break;
  2538. case GGML_TYPE_I32:
  2539. {
  2540. assert(tensor->nb[0] == sizeof(int32_t));
  2541. for (int i = 0; i < n; i++) {
  2542. ggml_vec_set_i32(nc, (int32_t *)(data + i*n1), value);
  2543. }
  2544. } break;
  2545. case GGML_TYPE_F16:
  2546. {
  2547. assert(tensor->nb[0] == sizeof(ggml_fp16_t));
  2548. for (int i = 0; i < n; i++) {
  2549. ggml_vec_set_f16(nc, (ggml_fp16_t *)(data + i*n1), value);
  2550. }
  2551. } break;
  2552. case GGML_TYPE_F32:
  2553. {
  2554. assert(tensor->nb[0] == sizeof(float));
  2555. for (int i = 0; i < n; i++) {
  2556. ggml_vec_set_f32(nc, (float *)(data + i*n1), value);
  2557. }
  2558. } break;
  2559. case GGML_TYPE_COUNT:
  2560. {
  2561. GGML_ASSERT(false);
  2562. } break;
  2563. }
  2564. return tensor;
  2565. }
  2566. struct ggml_tensor * ggml_set_f32(struct ggml_tensor * tensor, float value) {
  2567. const int n = ggml_nrows(tensor);
  2568. const int nc = tensor->ne[0];
  2569. const size_t n1 = tensor->nb[1];
  2570. char * const data = tensor->data;
  2571. switch (tensor->type) {
  2572. case GGML_TYPE_Q4_0:
  2573. {
  2574. GGML_ASSERT(false);
  2575. } break;
  2576. case GGML_TYPE_Q4_1:
  2577. {
  2578. GGML_ASSERT(false);
  2579. } break;
  2580. case GGML_TYPE_I8:
  2581. {
  2582. assert(tensor->nb[0] == sizeof(int8_t));
  2583. for (int i = 0; i < n; i++) {
  2584. ggml_vec_set_i8(nc, (int8_t *)(data + i*n1), value);
  2585. }
  2586. } break;
  2587. case GGML_TYPE_I16:
  2588. {
  2589. assert(tensor->nb[0] == sizeof(int16_t));
  2590. for (int i = 0; i < n; i++) {
  2591. ggml_vec_set_i16(nc, (int16_t *)(data + i*n1), value);
  2592. }
  2593. } break;
  2594. case GGML_TYPE_I32:
  2595. {
  2596. assert(tensor->nb[0] == sizeof(int32_t));
  2597. for (int i = 0; i < n; i++) {
  2598. ggml_vec_set_i32(nc, (int32_t *)(data + i*n1), value);
  2599. }
  2600. } break;
  2601. case GGML_TYPE_F16:
  2602. {
  2603. assert(tensor->nb[0] == sizeof(ggml_fp16_t));
  2604. for (int i = 0; i < n; i++) {
  2605. ggml_vec_set_f16(nc, (ggml_fp16_t *)(data + i*n1), value);
  2606. }
  2607. } break;
  2608. case GGML_TYPE_F32:
  2609. {
  2610. assert(tensor->nb[0] == sizeof(float));
  2611. for (int i = 0; i < n; i++) {
  2612. ggml_vec_set_f32(nc, (float *)(data + i*n1), value);
  2613. }
  2614. } break;
  2615. case GGML_TYPE_COUNT:
  2616. {
  2617. GGML_ASSERT(false);
  2618. } break;
  2619. }
  2620. return tensor;
  2621. }
  2622. int32_t ggml_get_i32_1d(const struct ggml_tensor * tensor, int i) {
  2623. switch (tensor->type) {
  2624. case GGML_TYPE_Q4_0:
  2625. {
  2626. GGML_ASSERT(false);
  2627. } break;
  2628. case GGML_TYPE_Q4_1:
  2629. {
  2630. GGML_ASSERT(false);
  2631. } break;
  2632. case GGML_TYPE_I8:
  2633. {
  2634. GGML_ASSERT(tensor->nb[0] == sizeof(int8_t));
  2635. return ((int8_t *)(tensor->data))[i];
  2636. } break;
  2637. case GGML_TYPE_I16:
  2638. {
  2639. GGML_ASSERT(tensor->nb[0] == sizeof(int16_t));
  2640. return ((int16_t *)(tensor->data))[i];
  2641. } break;
  2642. case GGML_TYPE_I32:
  2643. {
  2644. GGML_ASSERT(tensor->nb[0] == sizeof(int32_t));
  2645. return ((int32_t *)(tensor->data))[i];
  2646. } break;
  2647. case GGML_TYPE_F16:
  2648. {
  2649. GGML_ASSERT(tensor->nb[0] == sizeof(ggml_fp16_t));
  2650. return GGML_FP16_TO_FP32(((ggml_fp16_t *)(tensor->data))[i]);
  2651. } break;
  2652. case GGML_TYPE_F32:
  2653. {
  2654. GGML_ASSERT(tensor->nb[0] == sizeof(float));
  2655. return ((float *)(tensor->data))[i];
  2656. } break;
  2657. case GGML_TYPE_COUNT:
  2658. {
  2659. GGML_ASSERT(false);
  2660. } break;
  2661. }
  2662. return 0.0f;
  2663. }
  2664. void ggml_set_i32_1d(const struct ggml_tensor * tensor, int i, int32_t value) {
  2665. switch (tensor->type) {
  2666. case GGML_TYPE_Q4_0:
  2667. {
  2668. GGML_ASSERT(false);
  2669. } break;
  2670. case GGML_TYPE_Q4_1:
  2671. {
  2672. GGML_ASSERT(false);
  2673. } break;
  2674. case GGML_TYPE_I8:
  2675. {
  2676. GGML_ASSERT(tensor->nb[0] == sizeof(int8_t));
  2677. ((int8_t *)(tensor->data))[i] = value;
  2678. } break;
  2679. case GGML_TYPE_I16:
  2680. {
  2681. GGML_ASSERT(tensor->nb[0] == sizeof(int16_t));
  2682. ((int16_t *)(tensor->data))[i] = value;
  2683. } break;
  2684. case GGML_TYPE_I32:
  2685. {
  2686. GGML_ASSERT(tensor->nb[0] == sizeof(int32_t));
  2687. ((int32_t *)(tensor->data))[i] = value;
  2688. } break;
  2689. case GGML_TYPE_F16:
  2690. {
  2691. GGML_ASSERT(tensor->nb[0] == sizeof(ggml_fp16_t));
  2692. ((ggml_fp16_t *)(tensor->data))[i] = GGML_FP32_TO_FP16(value);
  2693. } break;
  2694. case GGML_TYPE_F32:
  2695. {
  2696. GGML_ASSERT(tensor->nb[0] == sizeof(float));
  2697. ((float *)(tensor->data))[i] = value;
  2698. } break;
  2699. case GGML_TYPE_COUNT:
  2700. {
  2701. GGML_ASSERT(false);
  2702. } break;
  2703. }
  2704. }
  2705. float ggml_get_f32_1d(const struct ggml_tensor * tensor, int i) {
  2706. switch (tensor->type) {
  2707. case GGML_TYPE_Q4_0:
  2708. {
  2709. GGML_ASSERT(false);
  2710. } break;
  2711. case GGML_TYPE_Q4_1:
  2712. {
  2713. GGML_ASSERT(false);
  2714. } break;
  2715. case GGML_TYPE_I8:
  2716. {
  2717. GGML_ASSERT(tensor->nb[0] == sizeof(int8_t));
  2718. return ((int8_t *)(tensor->data))[i];
  2719. } break;
  2720. case GGML_TYPE_I16:
  2721. {
  2722. GGML_ASSERT(tensor->nb[0] == sizeof(int16_t));
  2723. return ((int16_t *)(tensor->data))[i];
  2724. } break;
  2725. case GGML_TYPE_I32:
  2726. {
  2727. GGML_ASSERT(tensor->nb[0] == sizeof(int32_t));
  2728. return ((int32_t *)(tensor->data))[i];
  2729. } break;
  2730. case GGML_TYPE_F16:
  2731. {
  2732. GGML_ASSERT(tensor->nb[0] == sizeof(ggml_fp16_t));
  2733. return GGML_FP16_TO_FP32(((ggml_fp16_t *)(tensor->data))[i]);
  2734. } break;
  2735. case GGML_TYPE_F32:
  2736. {
  2737. GGML_ASSERT(tensor->nb[0] == sizeof(float));
  2738. return ((float *)(tensor->data))[i];
  2739. } break;
  2740. case GGML_TYPE_COUNT:
  2741. {
  2742. GGML_ASSERT(false);
  2743. } break;
  2744. }
  2745. return 0.0f;
  2746. }
  2747. void ggml_set_f32_1d(const struct ggml_tensor * tensor, int i, float value) {
  2748. switch (tensor->type) {
  2749. case GGML_TYPE_Q4_0:
  2750. {
  2751. GGML_ASSERT(false);
  2752. } break;
  2753. case GGML_TYPE_Q4_1:
  2754. {
  2755. GGML_ASSERT(false);
  2756. } break;
  2757. case GGML_TYPE_I8:
  2758. {
  2759. GGML_ASSERT(tensor->nb[0] == sizeof(int8_t));
  2760. ((int8_t *)(tensor->data))[i] = value;
  2761. } break;
  2762. case GGML_TYPE_I16:
  2763. {
  2764. GGML_ASSERT(tensor->nb[0] == sizeof(int16_t));
  2765. ((int16_t *)(tensor->data))[i] = value;
  2766. } break;
  2767. case GGML_TYPE_I32:
  2768. {
  2769. GGML_ASSERT(tensor->nb[0] == sizeof(int32_t));
  2770. ((int32_t *)(tensor->data))[i] = value;
  2771. } break;
  2772. case GGML_TYPE_F16:
  2773. {
  2774. GGML_ASSERT(tensor->nb[0] == sizeof(ggml_fp16_t));
  2775. ((ggml_fp16_t *)(tensor->data))[i] = GGML_FP32_TO_FP16(value);
  2776. } break;
  2777. case GGML_TYPE_F32:
  2778. {
  2779. GGML_ASSERT(tensor->nb[0] == sizeof(float));
  2780. ((float *)(tensor->data))[i] = value;
  2781. } break;
  2782. case GGML_TYPE_COUNT:
  2783. {
  2784. GGML_ASSERT(false);
  2785. } break;
  2786. }
  2787. }
  2788. void * ggml_get_data(const struct ggml_tensor * tensor) {
  2789. return tensor->data;
  2790. }
  2791. float * ggml_get_data_f32(const struct ggml_tensor * tensor) {
  2792. assert(tensor->type == GGML_TYPE_F32);
  2793. return (float *)(tensor->data);
  2794. }
  2795. struct ggml_tensor * ggml_view_tensor(
  2796. struct ggml_context * ctx,
  2797. const struct ggml_tensor * src) {
  2798. return ggml_new_tensor_impl(ctx, src->type, src->n_dims, src->ne, src->data);
  2799. }
  2800. ////////////////////////////////////////////////////////////////////////////////
  2801. // ggml_dup
  2802. struct ggml_tensor * ggml_dup_impl(
  2803. struct ggml_context * ctx,
  2804. struct ggml_tensor * a,
  2805. bool inplace) {
  2806. bool is_node = false;
  2807. if (!inplace && (a->grad)) {
  2808. is_node = true;
  2809. }
  2810. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  2811. result->op = GGML_OP_DUP;
  2812. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  2813. result->src0 = a;
  2814. result->src1 = NULL;
  2815. return result;
  2816. }
  2817. struct ggml_tensor * ggml_dup(
  2818. struct ggml_context * ctx,
  2819. struct ggml_tensor * a) {
  2820. return ggml_dup_impl(ctx, a, false);
  2821. }
  2822. struct ggml_tensor * ggml_dup_inplace(
  2823. struct ggml_context * ctx,
  2824. struct ggml_tensor * a) {
  2825. return ggml_dup_impl(ctx, a, true);
  2826. }
  2827. // ggml_add
  2828. struct ggml_tensor * ggml_add_impl(
  2829. struct ggml_context * ctx,
  2830. struct ggml_tensor * a,
  2831. struct ggml_tensor * b,
  2832. bool inplace) {
  2833. GGML_ASSERT(ggml_are_same_shape(a, b));
  2834. bool is_node = false;
  2835. if (!inplace && (a->grad || b->grad)) {
  2836. is_node = true;
  2837. }
  2838. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  2839. result->op = GGML_OP_ADD;
  2840. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  2841. result->src0 = a;
  2842. result->src1 = b;
  2843. return result;
  2844. }
  2845. struct ggml_tensor * ggml_add(
  2846. struct ggml_context * ctx,
  2847. struct ggml_tensor * a,
  2848. struct ggml_tensor * b) {
  2849. return ggml_add_impl(ctx, a, b, false);
  2850. }
  2851. struct ggml_tensor * ggml_add_inplace(
  2852. struct ggml_context * ctx,
  2853. struct ggml_tensor * a,
  2854. struct ggml_tensor * b) {
  2855. return ggml_add_impl(ctx, a, b, true);
  2856. }
  2857. // ggml_sub
  2858. struct ggml_tensor * ggml_sub_impl(
  2859. struct ggml_context * ctx,
  2860. struct ggml_tensor * a,
  2861. struct ggml_tensor * b,
  2862. bool inplace) {
  2863. GGML_ASSERT(ggml_are_same_shape(a, b));
  2864. bool is_node = false;
  2865. if (!inplace && (a->grad || b->grad)) {
  2866. is_node = true;
  2867. }
  2868. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  2869. result->op = GGML_OP_SUB;
  2870. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  2871. result->src0 = a;
  2872. result->src1 = b;
  2873. return result;
  2874. }
  2875. struct ggml_tensor * ggml_sub(
  2876. struct ggml_context * ctx,
  2877. struct ggml_tensor * a,
  2878. struct ggml_tensor * b) {
  2879. return ggml_sub_impl(ctx, a, b, false);
  2880. }
  2881. struct ggml_tensor * ggml_sub_inplace(
  2882. struct ggml_context * ctx,
  2883. struct ggml_tensor * a,
  2884. struct ggml_tensor * b) {
  2885. return ggml_sub_impl(ctx, a, b, true);
  2886. }
  2887. // ggml_mul
  2888. struct ggml_tensor * ggml_mul_impl(
  2889. struct ggml_context * ctx,
  2890. struct ggml_tensor * a,
  2891. struct ggml_tensor * b,
  2892. bool inplace) {
  2893. GGML_ASSERT(ggml_are_same_shape(a, b));
  2894. bool is_node = false;
  2895. if (!inplace && (a->grad || b->grad)) {
  2896. is_node = true;
  2897. }
  2898. if (inplace) {
  2899. GGML_ASSERT(is_node == false);
  2900. }
  2901. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  2902. result->op = GGML_OP_MUL;
  2903. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  2904. result->src0 = a;
  2905. result->src1 = b;
  2906. return result;
  2907. }
  2908. struct ggml_tensor * ggml_mul(
  2909. struct ggml_context * ctx,
  2910. struct ggml_tensor * a,
  2911. struct ggml_tensor * b) {
  2912. return ggml_mul_impl(ctx, a, b, false);
  2913. }
  2914. struct ggml_tensor * ggml_mul_inplace(
  2915. struct ggml_context * ctx,
  2916. struct ggml_tensor * a,
  2917. struct ggml_tensor * b) {
  2918. return ggml_mul_impl(ctx, a, b, true);
  2919. }
  2920. // ggml_div
  2921. struct ggml_tensor * ggml_div_impl(
  2922. struct ggml_context * ctx,
  2923. struct ggml_tensor * a,
  2924. struct ggml_tensor * b,
  2925. bool inplace) {
  2926. GGML_ASSERT(ggml_are_same_shape(a, b));
  2927. bool is_node = false;
  2928. if (!inplace && (a->grad || b->grad)) {
  2929. is_node = true;
  2930. }
  2931. if (inplace) {
  2932. GGML_ASSERT(is_node == false);
  2933. }
  2934. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  2935. result->op = GGML_OP_DIV;
  2936. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  2937. result->src0 = a;
  2938. result->src1 = b;
  2939. return result;
  2940. }
  2941. struct ggml_tensor * ggml_div(
  2942. struct ggml_context * ctx,
  2943. struct ggml_tensor * a,
  2944. struct ggml_tensor * b) {
  2945. return ggml_div_impl(ctx, a, b, false);
  2946. }
  2947. struct ggml_tensor * ggml_div_inplace(
  2948. struct ggml_context * ctx,
  2949. struct ggml_tensor * a,
  2950. struct ggml_tensor * b) {
  2951. return ggml_div_impl(ctx, a, b, true);
  2952. }
  2953. // ggml_sqr
  2954. struct ggml_tensor * ggml_sqr_impl(
  2955. struct ggml_context * ctx,
  2956. struct ggml_tensor * a,
  2957. bool inplace) {
  2958. bool is_node = false;
  2959. if (!inplace && (a->grad)) {
  2960. is_node = true;
  2961. }
  2962. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  2963. result->op = GGML_OP_SQR;
  2964. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  2965. result->src0 = a;
  2966. result->src1 = NULL;
  2967. return result;
  2968. }
  2969. struct ggml_tensor * ggml_sqr(
  2970. struct ggml_context * ctx,
  2971. struct ggml_tensor * a) {
  2972. return ggml_sqr_impl(ctx, a, false);
  2973. }
  2974. struct ggml_tensor * ggml_sqr_inplace(
  2975. struct ggml_context * ctx,
  2976. struct ggml_tensor * a) {
  2977. return ggml_sqr_impl(ctx, a, true);
  2978. }
  2979. // ggml_sqrt
  2980. struct ggml_tensor * ggml_sqrt_impl(
  2981. struct ggml_context * ctx,
  2982. struct ggml_tensor * a,
  2983. bool inplace) {
  2984. bool is_node = false;
  2985. if (!inplace && (a->grad)) {
  2986. is_node = true;
  2987. }
  2988. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  2989. result->op = GGML_OP_SQRT;
  2990. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  2991. result->src0 = a;
  2992. result->src1 = NULL;
  2993. return result;
  2994. }
  2995. struct ggml_tensor * ggml_sqrt(
  2996. struct ggml_context * ctx,
  2997. struct ggml_tensor * a) {
  2998. return ggml_sqrt_impl(ctx, a, false);
  2999. }
  3000. struct ggml_tensor * ggml_sqrt_inplace(
  3001. struct ggml_context * ctx,
  3002. struct ggml_tensor * a) {
  3003. return ggml_sqrt_impl(ctx, a, true);
  3004. }
  3005. // ggml_sum
  3006. struct ggml_tensor * ggml_sum(
  3007. struct ggml_context * ctx,
  3008. struct ggml_tensor * a) {
  3009. bool is_node = false;
  3010. if (a->grad) {
  3011. is_node = true;
  3012. }
  3013. struct ggml_tensor * result = ggml_new_tensor_1d(ctx, a->type, 1);
  3014. result->op = GGML_OP_SUM;
  3015. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3016. result->src0 = a;
  3017. result->src1 = NULL;
  3018. return result;
  3019. }
  3020. // ggml_mean
  3021. struct ggml_tensor * ggml_mean(
  3022. struct ggml_context * ctx,
  3023. struct ggml_tensor * a) {
  3024. bool is_node = false;
  3025. if (a->grad) {
  3026. GGML_ASSERT(false); // TODO: implement
  3027. is_node = true;
  3028. }
  3029. int ne[GGML_MAX_DIMS] = { 1, a->ne[1], a->ne[2], a->ne[3] };
  3030. struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, a->n_dims, ne);
  3031. result->op = GGML_OP_MEAN;
  3032. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3033. result->src0 = a;
  3034. result->src1 = NULL;
  3035. return result;
  3036. }
  3037. // ggml_repeat
  3038. struct ggml_tensor * ggml_repeat(
  3039. struct ggml_context * ctx,
  3040. struct ggml_tensor * a,
  3041. struct ggml_tensor * b) {
  3042. GGML_ASSERT(ggml_can_repeat(a, b));
  3043. bool is_node = false;
  3044. if (a->grad) {
  3045. is_node = true;
  3046. }
  3047. if (ggml_are_same_shape(a, b) && !is_node) {
  3048. return a;
  3049. }
  3050. struct ggml_tensor * result = ggml_new_tensor(ctx, a->type, b->n_dims, b->ne);
  3051. result->op = GGML_OP_REPEAT;
  3052. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3053. result->src0 = a;
  3054. result->src1 = b;
  3055. return result;
  3056. }
  3057. // ggml_abs
  3058. struct ggml_tensor * ggml_abs_impl(
  3059. struct ggml_context * ctx,
  3060. struct ggml_tensor * a,
  3061. bool inplace) {
  3062. bool is_node = false;
  3063. if (!inplace && (a->grad)) {
  3064. is_node = true;
  3065. }
  3066. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  3067. result->op = GGML_OP_ABS;
  3068. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3069. result->src0 = a;
  3070. result->src1 = NULL;
  3071. return result;
  3072. }
  3073. struct ggml_tensor * ggml_abs(
  3074. struct ggml_context * ctx,
  3075. struct ggml_tensor * a) {
  3076. return ggml_abs_impl(ctx, a, false);
  3077. }
  3078. struct ggml_tensor * ggml_abs_inplace(
  3079. struct ggml_context * ctx,
  3080. struct ggml_tensor * a) {
  3081. return ggml_abs_impl(ctx, a, true);
  3082. }
  3083. // ggml_sgn
  3084. struct ggml_tensor * ggml_sgn_impl(
  3085. struct ggml_context * ctx,
  3086. struct ggml_tensor * a,
  3087. bool inplace) {
  3088. bool is_node = false;
  3089. if (!inplace && (a->grad)) {
  3090. is_node = true;
  3091. }
  3092. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  3093. result->op = GGML_OP_SGN;
  3094. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3095. result->src0 = a;
  3096. result->src1 = NULL;
  3097. return result;
  3098. }
  3099. struct ggml_tensor * ggml_sgn(
  3100. struct ggml_context * ctx,
  3101. struct ggml_tensor * a) {
  3102. return ggml_sgn_impl(ctx, a, false);
  3103. }
  3104. struct ggml_tensor * ggml_sgn_inplace(
  3105. struct ggml_context * ctx,
  3106. struct ggml_tensor * a) {
  3107. return ggml_sgn_impl(ctx, a, true);
  3108. }
  3109. // ggml_neg
  3110. struct ggml_tensor * ggml_neg_impl(
  3111. struct ggml_context * ctx,
  3112. struct ggml_tensor * a,
  3113. bool inplace) {
  3114. bool is_node = false;
  3115. if (!inplace && (a->grad)) {
  3116. is_node = true;
  3117. }
  3118. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  3119. result->op = GGML_OP_NEG;
  3120. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3121. result->src0 = a;
  3122. result->src1 = NULL;
  3123. return result;
  3124. }
  3125. struct ggml_tensor * ggml_neg(
  3126. struct ggml_context * ctx,
  3127. struct ggml_tensor * a) {
  3128. return ggml_neg_impl(ctx, a, false);
  3129. }
  3130. struct ggml_tensor * ggml_neg_inplace(
  3131. struct ggml_context * ctx,
  3132. struct ggml_tensor * a) {
  3133. return ggml_neg_impl(ctx, a, true);
  3134. }
  3135. // ggml_step
  3136. struct ggml_tensor * ggml_step_impl(
  3137. struct ggml_context * ctx,
  3138. struct ggml_tensor * a,
  3139. bool inplace) {
  3140. bool is_node = false;
  3141. if (!inplace && (a->grad)) {
  3142. is_node = true;
  3143. }
  3144. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  3145. result->op = GGML_OP_STEP;
  3146. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3147. result->src0 = a;
  3148. result->src1 = NULL;
  3149. return result;
  3150. }
  3151. struct ggml_tensor * ggml_step(
  3152. struct ggml_context * ctx,
  3153. struct ggml_tensor * a) {
  3154. return ggml_step_impl(ctx, a, false);
  3155. }
  3156. struct ggml_tensor * ggml_step_inplace(
  3157. struct ggml_context * ctx,
  3158. struct ggml_tensor * a) {
  3159. return ggml_step_impl(ctx, a, true);
  3160. }
  3161. // ggml_relu
  3162. struct ggml_tensor * ggml_relu_impl(
  3163. struct ggml_context * ctx,
  3164. struct ggml_tensor * a,
  3165. bool inplace) {
  3166. bool is_node = false;
  3167. if (!inplace && (a->grad)) {
  3168. is_node = true;
  3169. }
  3170. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  3171. result->op = GGML_OP_RELU;
  3172. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3173. result->src0 = a;
  3174. result->src1 = NULL;
  3175. return result;
  3176. }
  3177. struct ggml_tensor * ggml_relu(
  3178. struct ggml_context * ctx,
  3179. struct ggml_tensor * a) {
  3180. return ggml_relu_impl(ctx, a, false);
  3181. }
  3182. struct ggml_tensor * ggml_relu_inplace(
  3183. struct ggml_context * ctx,
  3184. struct ggml_tensor * a) {
  3185. return ggml_relu_impl(ctx, a, true);
  3186. }
  3187. // ggml_gelu
  3188. struct ggml_tensor * ggml_gelu_impl(
  3189. struct ggml_context * ctx,
  3190. struct ggml_tensor * a,
  3191. bool inplace) {
  3192. bool is_node = false;
  3193. if (!inplace && (a->grad)) {
  3194. is_node = true;
  3195. }
  3196. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  3197. result->op = GGML_OP_GELU;
  3198. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3199. result->src0 = a;
  3200. result->src1 = NULL;
  3201. return result;
  3202. }
  3203. struct ggml_tensor * ggml_gelu(
  3204. struct ggml_context * ctx,
  3205. struct ggml_tensor * a) {
  3206. return ggml_gelu_impl(ctx, a, false);
  3207. }
  3208. struct ggml_tensor * ggml_gelu_inplace(
  3209. struct ggml_context * ctx,
  3210. struct ggml_tensor * a) {
  3211. return ggml_gelu_impl(ctx, a, true);
  3212. }
  3213. // ggml_silu
  3214. struct ggml_tensor * ggml_silu_impl(
  3215. struct ggml_context * ctx,
  3216. struct ggml_tensor * a,
  3217. bool inplace) {
  3218. bool is_node = false;
  3219. if (!inplace && (a->grad)) {
  3220. is_node = true;
  3221. }
  3222. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  3223. result->op = GGML_OP_SILU;
  3224. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3225. result->src0 = a;
  3226. result->src1 = NULL;
  3227. return result;
  3228. }
  3229. struct ggml_tensor * ggml_silu(
  3230. struct ggml_context * ctx,
  3231. struct ggml_tensor * a) {
  3232. return ggml_silu_impl(ctx, a, false);
  3233. }
  3234. struct ggml_tensor * ggml_silu_inplace(
  3235. struct ggml_context * ctx,
  3236. struct ggml_tensor * a) {
  3237. return ggml_silu_impl(ctx, a, true);
  3238. }
  3239. // ggml_norm
  3240. struct ggml_tensor * ggml_norm_impl(
  3241. struct ggml_context * ctx,
  3242. struct ggml_tensor * a,
  3243. bool inplace) {
  3244. bool is_node = false;
  3245. if (!inplace && (a->grad)) {
  3246. GGML_ASSERT(false); // TODO: implement backward
  3247. is_node = true;
  3248. }
  3249. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  3250. result->op = GGML_OP_NORM;
  3251. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3252. result->src0 = a;
  3253. result->src1 = NULL; // TODO: maybe store epsilon here?
  3254. return result;
  3255. }
  3256. struct ggml_tensor * ggml_norm(
  3257. struct ggml_context * ctx,
  3258. struct ggml_tensor * a) {
  3259. return ggml_norm_impl(ctx, a, false);
  3260. }
  3261. struct ggml_tensor * ggml_norm_inplace(
  3262. struct ggml_context * ctx,
  3263. struct ggml_tensor * a) {
  3264. return ggml_norm_impl(ctx, a, true);
  3265. }
  3266. struct ggml_tensor * ggml_rms_norm_impl(
  3267. struct ggml_context * ctx,
  3268. struct ggml_tensor * a,
  3269. bool inplace) {
  3270. bool is_node = false;
  3271. if (!inplace && (a->grad)) {
  3272. GGML_ASSERT(false); // TODO: implement backward
  3273. is_node = true;
  3274. }
  3275. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  3276. result->op = GGML_OP_RMS_NORM;
  3277. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3278. result->src0 = a;
  3279. result->src1 = NULL; // TODO: maybe store epsilon here?
  3280. return result;
  3281. }
  3282. struct ggml_tensor * ggml_rms_norm(
  3283. struct ggml_context * ctx,
  3284. struct ggml_tensor * a) {
  3285. return ggml_rms_norm_impl(ctx, a, false);
  3286. }
  3287. struct ggml_tensor * ggml_rms_norm_inplace(
  3288. struct ggml_context * ctx,
  3289. struct ggml_tensor * a) {
  3290. return ggml_rms_norm_impl(ctx, a, true);
  3291. }
  3292. // ggml_mul_mat
  3293. struct ggml_tensor * ggml_mul_mat(
  3294. struct ggml_context * ctx,
  3295. struct ggml_tensor * a,
  3296. struct ggml_tensor * b) {
  3297. GGML_ASSERT(ggml_can_mul_mat(a, b));
  3298. GGML_ASSERT(!ggml_is_transposed(a));
  3299. bool is_node = false;
  3300. if (a->grad || b->grad) {
  3301. is_node = true;
  3302. }
  3303. const int ne[4] = { a->ne[1], b->ne[1], a->ne[2], b->ne[3] };
  3304. struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, MIN(a->n_dims, b->n_dims), ne);
  3305. result->op = GGML_OP_MUL_MAT;
  3306. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3307. result->src0 = a;
  3308. result->src1 = b;
  3309. return result;
  3310. }
  3311. // ggml_scale
  3312. struct ggml_tensor * ggml_scale_impl(
  3313. struct ggml_context * ctx,
  3314. struct ggml_tensor * a,
  3315. struct ggml_tensor * b,
  3316. bool inplace) {
  3317. GGML_ASSERT(ggml_is_scalar(b));
  3318. GGML_ASSERT(ggml_is_padded_1d(a));
  3319. bool is_node = false;
  3320. if (!inplace && (a->grad || b->grad)) {
  3321. GGML_ASSERT(false); // TODO: implement backward
  3322. is_node = true;
  3323. }
  3324. // TODO: when implement backward, fix this:
  3325. //struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  3326. struct ggml_tensor * result = ggml_view_tensor(ctx, a);
  3327. result->op = GGML_OP_SCALE;
  3328. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3329. result->src0 = a;
  3330. result->src1 = b;
  3331. return result;
  3332. }
  3333. struct ggml_tensor * ggml_scale(
  3334. struct ggml_context * ctx,
  3335. struct ggml_tensor * a,
  3336. struct ggml_tensor * b) {
  3337. return ggml_scale_impl(ctx, a, b, false);
  3338. }
  3339. struct ggml_tensor * ggml_scale_inplace(
  3340. struct ggml_context * ctx,
  3341. struct ggml_tensor * a,
  3342. struct ggml_tensor * b) {
  3343. return ggml_scale_impl(ctx, a, b, true);
  3344. }
  3345. // ggml_cpy
  3346. struct ggml_tensor * ggml_cpy_impl(
  3347. struct ggml_context * ctx,
  3348. struct ggml_tensor * a,
  3349. struct ggml_tensor * b,
  3350. bool inplace) {
  3351. GGML_ASSERT(ggml_nelements(a) == ggml_nelements(b));
  3352. bool is_node = false;
  3353. if (!inplace && (a->grad || b->grad)) {
  3354. GGML_ASSERT(false); // TODO: implement backward
  3355. is_node = true;
  3356. }
  3357. // make a view of the destination
  3358. struct ggml_tensor * result = ggml_view_tensor(ctx, b);
  3359. result->op = GGML_OP_CPY;
  3360. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3361. result->src0 = a;
  3362. result->src1 = b;
  3363. return result;
  3364. }
  3365. struct ggml_tensor * ggml_cpy(
  3366. struct ggml_context * ctx,
  3367. struct ggml_tensor * a,
  3368. struct ggml_tensor * b) {
  3369. return ggml_cpy_impl(ctx, a, b, false);
  3370. }
  3371. struct ggml_tensor * ggml_cpy_inplace(
  3372. struct ggml_context * ctx,
  3373. struct ggml_tensor * a,
  3374. struct ggml_tensor * b) {
  3375. return ggml_cpy_impl(ctx, a, b, true);
  3376. }
  3377. // ggml_reshape
  3378. struct ggml_tensor * ggml_reshape(
  3379. struct ggml_context * ctx,
  3380. struct ggml_tensor * a,
  3381. struct ggml_tensor * b) {
  3382. GGML_ASSERT(ggml_is_contiguous(a));
  3383. GGML_ASSERT(ggml_is_contiguous(b));
  3384. GGML_ASSERT(ggml_nelements(a) == ggml_nelements(b));
  3385. bool is_node = false;
  3386. if (a->grad || b->grad) {
  3387. GGML_ASSERT(false); // TODO: implement backward
  3388. is_node = true;
  3389. }
  3390. struct ggml_tensor * result = ggml_new_tensor_impl(ctx, a->type, b->n_dims, b->ne, a->data);
  3391. result->op = GGML_OP_RESHAPE;
  3392. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3393. result->src0 = a;
  3394. result->src1 = NULL;
  3395. return result;
  3396. }
  3397. struct ggml_tensor * ggml_reshape_2d(
  3398. struct ggml_context * ctx,
  3399. struct ggml_tensor * a,
  3400. int ne0,
  3401. int ne1) {
  3402. GGML_ASSERT(ggml_is_contiguous(a));
  3403. GGML_ASSERT(ggml_nelements(a) == ne0*ne1);
  3404. bool is_node = false;
  3405. if (a->grad) {
  3406. GGML_ASSERT(false); // TODO: implement backward
  3407. is_node = true;
  3408. }
  3409. const int ne[2] = { ne0, ne1 };
  3410. struct ggml_tensor * result = ggml_new_tensor_impl(ctx, a->type, 2, ne, a->data);
  3411. result->op = GGML_OP_RESHAPE;
  3412. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3413. result->src0 = a;
  3414. result->src1 = NULL;
  3415. return result;
  3416. }
  3417. struct ggml_tensor * ggml_reshape_3d(
  3418. struct ggml_context * ctx,
  3419. struct ggml_tensor * a,
  3420. int ne0,
  3421. int ne1,
  3422. int ne2) {
  3423. GGML_ASSERT(ggml_is_contiguous(a));
  3424. GGML_ASSERT(ggml_nelements(a) == ne0*ne1*ne2);
  3425. bool is_node = false;
  3426. if (a->grad) {
  3427. GGML_ASSERT(false); // TODO: implement backward
  3428. is_node = true;
  3429. }
  3430. const int ne[3] = { ne0, ne1, ne2 };
  3431. struct ggml_tensor * result = ggml_new_tensor_impl(ctx, a->type, 3, ne, a->data);
  3432. result->op = GGML_OP_RESHAPE;
  3433. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3434. result->src0 = a;
  3435. result->src1 = NULL;
  3436. return result;
  3437. }
  3438. // ggml_view_1d
  3439. struct ggml_tensor * ggml_view_1d(
  3440. struct ggml_context * ctx,
  3441. struct ggml_tensor * a,
  3442. int ne0,
  3443. size_t offset) {
  3444. if (a->grad) {
  3445. GGML_ASSERT(false); // gradient propagation is not supported
  3446. }
  3447. struct ggml_tensor * result = ggml_new_tensor_impl(ctx, a->type, 1, &ne0, (char *) a->data + offset);
  3448. result->op = GGML_OP_VIEW;
  3449. result->grad = NULL;
  3450. result->src0 = a;
  3451. result->src1 = NULL; // TODO: maybe store the offset here?
  3452. return result;
  3453. }
  3454. // ggml_view_2d
  3455. struct ggml_tensor * ggml_view_2d(
  3456. struct ggml_context * ctx,
  3457. struct ggml_tensor * a,
  3458. int ne0,
  3459. int ne1,
  3460. size_t nb1,
  3461. size_t offset) {
  3462. if (a->grad) {
  3463. GGML_ASSERT(false); // gradient propagation is not supported
  3464. }
  3465. const int ne[GGML_MAX_DIMS] = { ne0, ne1, 1, 1 };
  3466. struct ggml_tensor * result = ggml_new_tensor_impl(ctx, a->type, 2, ne, (char *) a->data + offset);
  3467. result->nb[1] = nb1;
  3468. result->nb[2] = result->nb[1]*ne1;
  3469. result->nb[3] = result->nb[2];
  3470. result->op = GGML_OP_VIEW;
  3471. result->grad = NULL;
  3472. result->src0 = a;
  3473. result->src1 = NULL; // TODO: maybe store the offset here?
  3474. return result;
  3475. }
  3476. // ggml_permute
  3477. struct ggml_tensor * ggml_permute(
  3478. struct ggml_context * ctx,
  3479. struct ggml_tensor * a,
  3480. int axis0,
  3481. int axis1,
  3482. int axis2,
  3483. int axis3) {
  3484. GGML_ASSERT(axis0 >= 0 && axis0 < GGML_MAX_DIMS);
  3485. GGML_ASSERT(axis1 >= 0 && axis1 < GGML_MAX_DIMS);
  3486. GGML_ASSERT(axis2 >= 0 && axis2 < GGML_MAX_DIMS);
  3487. GGML_ASSERT(axis3 >= 0 && axis3 < GGML_MAX_DIMS);
  3488. GGML_ASSERT(axis0 != axis1);
  3489. GGML_ASSERT(axis0 != axis2);
  3490. GGML_ASSERT(axis0 != axis3);
  3491. GGML_ASSERT(axis1 != axis2);
  3492. GGML_ASSERT(axis1 != axis3);
  3493. GGML_ASSERT(axis2 != axis3);
  3494. bool is_node = false;
  3495. if (a->grad) {
  3496. GGML_ASSERT(false); // TODO: implement backward
  3497. is_node = true;
  3498. }
  3499. struct ggml_tensor * result = ggml_view_tensor(ctx, a);
  3500. int ne[GGML_MAX_DIMS];
  3501. int nb[GGML_MAX_DIMS];
  3502. ne[axis0] = a->ne[0];
  3503. ne[axis1] = a->ne[1];
  3504. ne[axis2] = a->ne[2];
  3505. ne[axis3] = a->ne[3];
  3506. nb[axis0] = a->nb[0];
  3507. nb[axis1] = a->nb[1];
  3508. nb[axis2] = a->nb[2];
  3509. nb[axis3] = a->nb[3];
  3510. result->ne[0] = ne[0];
  3511. result->ne[1] = ne[1];
  3512. result->ne[2] = ne[2];
  3513. result->ne[3] = ne[3];
  3514. result->nb[0] = nb[0];
  3515. result->nb[1] = nb[1];
  3516. result->nb[2] = nb[2];
  3517. result->nb[3] = nb[3];
  3518. result->op = GGML_OP_PERMUTE;
  3519. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3520. result->src0 = a;
  3521. result->src1 = NULL; // TODO: maybe store the permutation here?
  3522. return result;
  3523. }
  3524. // ggml_transpose
  3525. struct ggml_tensor * ggml_transpose(
  3526. struct ggml_context * ctx,
  3527. struct ggml_tensor * a) {
  3528. bool is_node = false;
  3529. if (a->grad) {
  3530. GGML_ASSERT(false); // TODO: implement backward
  3531. is_node = true;
  3532. }
  3533. struct ggml_tensor * result = ggml_view_tensor(ctx, a);
  3534. result->ne[0] = a->ne[1];
  3535. result->ne[1] = a->ne[0];
  3536. result->nb[0] = a->nb[1];
  3537. result->nb[1] = a->nb[0];
  3538. result->op = GGML_OP_TRANSPOSE;
  3539. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3540. result->src0 = a;
  3541. result->src1 = NULL;
  3542. return result;
  3543. }
  3544. // ggml_get_rows
  3545. struct ggml_tensor * ggml_get_rows(
  3546. struct ggml_context * ctx,
  3547. struct ggml_tensor * a,
  3548. struct ggml_tensor * b) {
  3549. GGML_ASSERT(ggml_is_matrix(a) && ggml_is_vector(b) && b->type == GGML_TYPE_I32);
  3550. bool is_node = false;
  3551. if (a->grad || b->grad) {
  3552. GGML_ASSERT(false); // TODO: implement backward
  3553. is_node = true;
  3554. }
  3555. // TODO: implement non F32 return
  3556. //struct ggml_tensor * result = ggml_new_tensor_2d(ctx, a->type, a->ne[0], b->ne[0]);
  3557. struct ggml_tensor * result = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, a->ne[0], b->ne[0]);
  3558. result->op = GGML_OP_GET_ROWS;
  3559. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3560. result->src0 = a;
  3561. result->src1 = b;
  3562. return result;
  3563. }
  3564. // ggml_diag_mask_inf
  3565. struct ggml_tensor * ggml_diag_mask_inf(
  3566. struct ggml_context * ctx,
  3567. struct ggml_tensor * a,
  3568. int n_past) {
  3569. bool is_node = false;
  3570. if (a->grad) {
  3571. GGML_ASSERT(false); // TODO: implement backward
  3572. is_node = true;
  3573. }
  3574. // TODO: when implement backward, fix this:
  3575. //struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  3576. struct ggml_tensor * result = ggml_view_tensor(ctx, a);
  3577. struct ggml_tensor * b = ggml_new_i32(ctx, n_past);
  3578. result->op = GGML_OP_DIAG_MASK_INF;
  3579. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3580. result->src0 = a;
  3581. result->src1 = b;
  3582. return result;
  3583. }
  3584. // ggml_soft_max
  3585. struct ggml_tensor * ggml_soft_max(
  3586. struct ggml_context * ctx,
  3587. struct ggml_tensor * a) {
  3588. bool is_node = false;
  3589. if (a->grad) {
  3590. GGML_ASSERT(false); // TODO: implement backward
  3591. is_node = true;
  3592. }
  3593. // TODO: when implement backward, fix this:
  3594. //struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  3595. struct ggml_tensor * result = ggml_view_tensor(ctx, a);
  3596. result->op = GGML_OP_SOFT_MAX;
  3597. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3598. result->src0 = a;
  3599. result->src1 = NULL;
  3600. return result;
  3601. }
  3602. // ggml_rope
  3603. struct ggml_tensor * ggml_rope(
  3604. struct ggml_context * ctx,
  3605. struct ggml_tensor * a,
  3606. int n_past,
  3607. int n_dims,
  3608. int mode) {
  3609. GGML_ASSERT(n_past >= 0);
  3610. bool is_node = false;
  3611. if (a->grad) {
  3612. GGML_ASSERT(false); // TODO: implement backward
  3613. is_node = true;
  3614. }
  3615. // TODO: when implement backward, fix this:
  3616. //struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  3617. struct ggml_tensor * result = ggml_view_tensor(ctx, a);
  3618. struct ggml_tensor * b = ggml_new_tensor_1d(ctx, GGML_TYPE_I32, 3);
  3619. ((int32_t *) b->data)[0] = n_past;
  3620. ((int32_t *) b->data)[1] = n_dims;
  3621. ((int32_t *) b->data)[2] = mode;
  3622. result->op = GGML_OP_ROPE;
  3623. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3624. result->src0 = a;
  3625. result->src1 = b;
  3626. return result;
  3627. }
  3628. // ggml_conv_1d_1s
  3629. struct ggml_tensor * ggml_conv_1d_1s(
  3630. struct ggml_context * ctx,
  3631. struct ggml_tensor * a,
  3632. struct ggml_tensor * b) {
  3633. GGML_ASSERT(ggml_is_matrix(b));
  3634. GGML_ASSERT(a->ne[1] == b->ne[1]);
  3635. GGML_ASSERT(a->ne[3] == 1);
  3636. bool is_node = false;
  3637. if (a->grad || b->grad) {
  3638. GGML_ASSERT(false); // TODO: implement backward
  3639. is_node = true;
  3640. }
  3641. const int ne[4] = { b->ne[0], a->ne[2], 1, 1, };
  3642. struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, 2, ne);
  3643. result->op = GGML_OP_CONV_1D_1S;
  3644. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3645. result->src0 = a;
  3646. result->src1 = b;
  3647. return result;
  3648. }
  3649. // ggml_conv_1d_2s
  3650. struct ggml_tensor * ggml_conv_1d_2s(
  3651. struct ggml_context * ctx,
  3652. struct ggml_tensor * a,
  3653. struct ggml_tensor * b) {
  3654. GGML_ASSERT(ggml_is_matrix(b));
  3655. GGML_ASSERT(a->ne[1] == b->ne[1]);
  3656. GGML_ASSERT(a->ne[3] == 1);
  3657. bool is_node = false;
  3658. if (a->grad || b->grad) {
  3659. GGML_ASSERT(false); // TODO: implement backward
  3660. is_node = true;
  3661. }
  3662. const int ne[4] = { b->ne[0]/2, a->ne[2], 1, 1, };
  3663. struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, 2, ne);
  3664. result->op = GGML_OP_CONV_1D_2S;
  3665. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3666. result->src0 = a;
  3667. result->src1 = b;
  3668. return result;
  3669. }
  3670. // ggml_flash_attn
  3671. struct ggml_tensor * ggml_flash_attn(
  3672. struct ggml_context * ctx,
  3673. struct ggml_tensor * q,
  3674. struct ggml_tensor * k,
  3675. struct ggml_tensor * v,
  3676. bool masked) {
  3677. GGML_ASSERT(ggml_can_mul_mat(k, q));
  3678. // TODO: check if vT can be multiplied by (k*qT)
  3679. bool is_node = false;
  3680. if (q->grad || k->grad || v->grad) {
  3681. GGML_ASSERT(false); // TODO: implement backward
  3682. is_node = true;
  3683. }
  3684. //struct ggml_tensor * result = ggml_dup_tensor(ctx, q);
  3685. struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, 4, q->ne);
  3686. result->op = GGML_OP_FLASH_ATTN;
  3687. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3688. result->src0 = q;
  3689. result->src1 = k;
  3690. result->opt[0] = v;
  3691. result->opt[1] = ggml_new_i32(ctx, masked ? 1 : 0);
  3692. return result;
  3693. }
  3694. // ggml_flash_ff
  3695. struct ggml_tensor * ggml_flash_ff(
  3696. struct ggml_context * ctx,
  3697. struct ggml_tensor * a,
  3698. struct ggml_tensor * b0,
  3699. struct ggml_tensor * b1,
  3700. struct ggml_tensor * c0,
  3701. struct ggml_tensor * c1) {
  3702. GGML_ASSERT(ggml_can_mul_mat(b0, a));
  3703. // TODO: more checks
  3704. bool is_node = false;
  3705. if (a->grad || b0->grad || b1->grad || c0->grad || c1->grad) {
  3706. GGML_ASSERT(false); // TODO: implement backward
  3707. is_node = true;
  3708. }
  3709. //struct ggml_tensor * result = ggml_dup_tensor(ctx, a);
  3710. struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, 4, a->ne);
  3711. result->op = GGML_OP_FLASH_FF;
  3712. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3713. result->src0 = a;
  3714. result->src1 = b0;
  3715. result->opt[0] = b1;
  3716. result->opt[1] = c0;
  3717. result->opt[2] = c1;
  3718. return result;
  3719. }
  3720. ////////////////////////////////////////////////////////////////////////////////
  3721. void ggml_set_param(
  3722. struct ggml_context * ctx,
  3723. struct ggml_tensor * tensor) {
  3724. tensor->is_param = true;
  3725. GGML_ASSERT(tensor->grad == NULL);
  3726. tensor->grad = ggml_dup_tensor(ctx, tensor);
  3727. }
  3728. // ggml_compute_forward_dup
  3729. static void ggml_compute_forward_dup_f16(
  3730. const struct ggml_compute_params * params,
  3731. const struct ggml_tensor * src0,
  3732. struct ggml_tensor * dst) {
  3733. GGML_ASSERT(params->ith == 0);
  3734. GGML_ASSERT(ggml_is_contiguous(dst));
  3735. GGML_ASSERT(ggml_nelements(dst) == ggml_nelements(src0));
  3736. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  3737. return;
  3738. }
  3739. const int ne00 = src0->ne[0];
  3740. const int ne01 = src0->ne[1];
  3741. const int ne02 = src0->ne[2];
  3742. const int ne03 = src0->ne[3];
  3743. const size_t nb00 = src0->nb[0];
  3744. const size_t nb01 = src0->nb[1];
  3745. const size_t nb02 = src0->nb[2];
  3746. const size_t nb03 = src0->nb[3];
  3747. if (ggml_is_contiguous(src0) && src0->type == dst->type) {
  3748. memcpy(dst->data, src0->data, ggml_nelements(dst) * GGML_TYPE_SIZE[src0->type]);
  3749. return;
  3750. }
  3751. if (src0->nb[0] == sizeof(ggml_fp16_t)) {
  3752. if (dst->type == GGML_TYPE_F16) {
  3753. size_t id = 0;
  3754. const size_t rs = ne00*nb00;
  3755. for (int i03 = 0; i03 < ne03; i03++) {
  3756. for (int i02 = 0; i02 < ne02; i02++) {
  3757. for (int i01 = 0; i01 < ne01; i01++) {
  3758. const char * src0_ptr = (char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03;
  3759. char * dst_ptr = (char *) dst->data + id*rs;
  3760. memcpy(dst_ptr, src0_ptr, rs);
  3761. id++;
  3762. }
  3763. }
  3764. }
  3765. } else if (dst->type == GGML_TYPE_F32) {
  3766. size_t id = 0;
  3767. float * dst_ptr = (float *) dst->data;
  3768. for (int i03 = 0; i03 < ne03; i03++) {
  3769. for (int i02 = 0; i02 < ne02; i02++) {
  3770. for (int i01 = 0; i01 < ne01; i01++) {
  3771. for (int i00 = 0; i00 < ne00; i00++) {
  3772. const ggml_fp16_t * src0_ptr = (ggml_fp16_t *) ((char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03);
  3773. dst_ptr[id] = GGML_FP16_TO_FP32(*src0_ptr);
  3774. id++;
  3775. }
  3776. }
  3777. }
  3778. }
  3779. } else {
  3780. GGML_ASSERT(false); // TODO: implement
  3781. }
  3782. } else {
  3783. //printf("%s: this is not optimal - fix me\n", __func__);
  3784. if (dst->type == GGML_TYPE_F32) {
  3785. size_t id = 0;
  3786. float * dst_ptr = (float *) dst->data;
  3787. for (int i03 = 0; i03 < ne03; i03++) {
  3788. for (int i02 = 0; i02 < ne02; i02++) {
  3789. for (int i01 = 0; i01 < ne01; i01++) {
  3790. for (int i00 = 0; i00 < ne00; i00++) {
  3791. const ggml_fp16_t * src0_ptr = (ggml_fp16_t *) ((char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03);
  3792. dst_ptr[id] = GGML_FP16_TO_FP32(*src0_ptr);
  3793. id++;
  3794. }
  3795. }
  3796. }
  3797. }
  3798. } else if (dst->type == GGML_TYPE_F16) {
  3799. size_t id = 0;
  3800. ggml_fp16_t * dst_ptr = (ggml_fp16_t *) dst->data;
  3801. for (int i03 = 0; i03 < ne03; i03++) {
  3802. for (int i02 = 0; i02 < ne02; i02++) {
  3803. for (int i01 = 0; i01 < ne01; i01++) {
  3804. for (int i00 = 0; i00 < ne00; i00++) {
  3805. const ggml_fp16_t * src0_ptr = (ggml_fp16_t *) ((char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03);
  3806. dst_ptr[id] = *src0_ptr;
  3807. id++;
  3808. }
  3809. }
  3810. }
  3811. }
  3812. } else {
  3813. GGML_ASSERT(false); // TODO: implement
  3814. }
  3815. }
  3816. }
  3817. static void ggml_compute_forward_dup_f32(
  3818. const struct ggml_compute_params * params,
  3819. const struct ggml_tensor * src0,
  3820. struct ggml_tensor * dst) {
  3821. GGML_ASSERT(params->ith == 0);
  3822. GGML_ASSERT(ggml_is_contiguous(dst));
  3823. GGML_ASSERT(ggml_nelements(dst) == ggml_nelements(src0));
  3824. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  3825. return;
  3826. }
  3827. const int ne00 = src0->ne[0];
  3828. const int ne01 = src0->ne[1];
  3829. const int ne02 = src0->ne[2];
  3830. const int ne03 = src0->ne[3];
  3831. const size_t nb00 = src0->nb[0];
  3832. const size_t nb01 = src0->nb[1];
  3833. const size_t nb02 = src0->nb[2];
  3834. const size_t nb03 = src0->nb[3];
  3835. if (ggml_is_contiguous(src0) && src0->type == dst->type) {
  3836. memcpy(dst->data, src0->data, ggml_nelements(dst) * GGML_TYPE_SIZE[src0->type]);
  3837. return;
  3838. }
  3839. if (src0->nb[0] == sizeof(float)) {
  3840. if (dst->type == GGML_TYPE_F32) {
  3841. size_t id = 0;
  3842. const size_t rs = ne00*nb00;
  3843. for (int i03 = 0; i03 < ne03; i03++) {
  3844. for (int i02 = 0; i02 < ne02; i02++) {
  3845. for (int i01 = 0; i01 < ne01; i01++) {
  3846. const char * src0_ptr = (char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03;
  3847. char * dst_ptr = (char *) dst->data + id*rs;
  3848. memcpy(dst_ptr, src0_ptr, rs);
  3849. id++;
  3850. }
  3851. }
  3852. }
  3853. } else if (dst->type == GGML_TYPE_F16) {
  3854. size_t id = 0;
  3855. ggml_fp16_t * dst_ptr = (ggml_fp16_t *) dst->data;
  3856. for (int i03 = 0; i03 < ne03; i03++) {
  3857. for (int i02 = 0; i02 < ne02; i02++) {
  3858. for (int i01 = 0; i01 < ne01; i01++) {
  3859. for (int i00 = 0; i00 < ne00; i00++) {
  3860. const float * src0_ptr = (float *) ((char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03);
  3861. dst_ptr[id] = GGML_FP32_TO_FP16(*src0_ptr);
  3862. id++;
  3863. }
  3864. }
  3865. }
  3866. }
  3867. } else {
  3868. GGML_ASSERT(false); // TODO: implement
  3869. }
  3870. } else {
  3871. //printf("%s: this is not optimal - fix me\n", __func__);
  3872. if (dst->type == GGML_TYPE_F32) {
  3873. size_t id = 0;
  3874. float * dst_ptr = (float *) dst->data;
  3875. for (int i03 = 0; i03 < ne03; i03++) {
  3876. for (int i02 = 0; i02 < ne02; i02++) {
  3877. for (int i01 = 0; i01 < ne01; i01++) {
  3878. for (int i00 = 0; i00 < ne00; i00++) {
  3879. const float * src0_ptr = (float *) ((char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03);
  3880. dst_ptr[id] = *src0_ptr;
  3881. id++;
  3882. }
  3883. }
  3884. }
  3885. }
  3886. } else if (dst->type == GGML_TYPE_F16) {
  3887. size_t id = 0;
  3888. ggml_fp16_t * dst_ptr = (ggml_fp16_t *) dst->data;
  3889. for (int i03 = 0; i03 < ne03; i03++) {
  3890. for (int i02 = 0; i02 < ne02; i02++) {
  3891. for (int i01 = 0; i01 < ne01; i01++) {
  3892. for (int i00 = 0; i00 < ne00; i00++) {
  3893. const float * src0_ptr = (float *) ((char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03);
  3894. dst_ptr[id] = GGML_FP32_TO_FP16(*src0_ptr);
  3895. id++;
  3896. }
  3897. }
  3898. }
  3899. }
  3900. } else {
  3901. GGML_ASSERT(false); // TODO: implement
  3902. }
  3903. }
  3904. }
  3905. static void ggml_compute_forward_dup(
  3906. const struct ggml_compute_params * params,
  3907. const struct ggml_tensor * src0,
  3908. struct ggml_tensor * dst) {
  3909. switch (src0->type) {
  3910. case GGML_TYPE_F16:
  3911. {
  3912. ggml_compute_forward_dup_f16(params, src0, dst);
  3913. } break;
  3914. case GGML_TYPE_F32:
  3915. {
  3916. ggml_compute_forward_dup_f32(params, src0, dst);
  3917. } break;
  3918. case GGML_TYPE_Q4_0:
  3919. case GGML_TYPE_Q4_1:
  3920. case GGML_TYPE_I8:
  3921. case GGML_TYPE_I16:
  3922. case GGML_TYPE_I32:
  3923. case GGML_TYPE_COUNT:
  3924. {
  3925. GGML_ASSERT(false);
  3926. } break;
  3927. }
  3928. }
  3929. // ggml_compute_forward_add
  3930. static void ggml_compute_forward_add_f32(
  3931. const struct ggml_compute_params * params,
  3932. const struct ggml_tensor * src0,
  3933. const struct ggml_tensor * src1,
  3934. struct ggml_tensor * dst) {
  3935. GGML_ASSERT(ggml_are_same_shape(src0, src1) && ggml_are_same_shape(src0, dst));
  3936. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  3937. return;
  3938. }
  3939. const int ith = params->ith;
  3940. const int nth = params->nth;
  3941. const int n = ggml_nrows(src0);
  3942. const int nc = src0->ne[0];
  3943. const size_t nb00 = src0->nb[0];
  3944. const size_t nb01 = src0->nb[1];
  3945. const size_t nb10 = src1->nb[0];
  3946. const size_t nb11 = src1->nb[1];
  3947. const size_t nb0 = dst->nb[0];
  3948. const size_t nb1 = dst->nb[1];
  3949. GGML_ASSERT( nb0 == sizeof(float));
  3950. GGML_ASSERT(nb00 == sizeof(float));
  3951. if (nb10 == sizeof(float)) {
  3952. const int j0 = (n/nth)*ith;
  3953. const int j1 = ith == nth - 1 ? n : (n/nth)*(ith + 1);
  3954. for (int j = j0; j < j1; j++) {
  3955. ggml_vec_add_f32(nc,
  3956. (float *) ((char *) dst->data + j*nb1),
  3957. (float *) ((char *) src0->data + j*nb01),
  3958. (float *) ((char *) src1->data + j*nb11));
  3959. }
  3960. } else {
  3961. // src1 is not contiguous
  3962. for (int j = ith; j < n; j += nth) {
  3963. float * dst_ptr = (float *) ((char *) dst->data + j*nb1);
  3964. float * src0_ptr = (float *) ((char *) src0->data + j*nb01);
  3965. for (int i = 0; i < nc; i++) {
  3966. float * src1_ptr = (float *) ((char *) src1->data + j*nb11 + i*nb10);
  3967. dst_ptr[i] = src0_ptr[i] + *src1_ptr;
  3968. }
  3969. }
  3970. }
  3971. }
  3972. static void ggml_compute_forward_add(
  3973. const struct ggml_compute_params * params,
  3974. const struct ggml_tensor * src0,
  3975. const struct ggml_tensor * src1,
  3976. struct ggml_tensor * dst) {
  3977. switch (src0->type) {
  3978. case GGML_TYPE_F32:
  3979. {
  3980. ggml_compute_forward_add_f32(params, src0, src1, dst);
  3981. } break;
  3982. case GGML_TYPE_Q4_0:
  3983. case GGML_TYPE_Q4_1:
  3984. case GGML_TYPE_I8:
  3985. case GGML_TYPE_I16:
  3986. case GGML_TYPE_I32:
  3987. case GGML_TYPE_F16:
  3988. case GGML_TYPE_COUNT:
  3989. {
  3990. GGML_ASSERT(false);
  3991. } break;
  3992. }
  3993. }
  3994. // ggml_compute_forward_sub
  3995. static void ggml_compute_forward_sub_f32(
  3996. const struct ggml_compute_params * params,
  3997. const struct ggml_tensor * src0,
  3998. const struct ggml_tensor * src1,
  3999. struct ggml_tensor * dst) {
  4000. assert(params->ith == 0);
  4001. assert(ggml_are_same_shape(src0, src1) && ggml_are_same_shape(src0, dst));
  4002. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  4003. return;
  4004. }
  4005. const int n = ggml_nrows(src0);
  4006. const int nc = src0->ne[0];
  4007. assert( dst->nb[0] == sizeof(float));
  4008. assert(src0->nb[0] == sizeof(float));
  4009. assert(src1->nb[0] == sizeof(float));
  4010. for (int i = 0; i < n; i++) {
  4011. ggml_vec_sub_f32(nc,
  4012. (float *) ((char *) dst->data + i*( dst->nb[1])),
  4013. (float *) ((char *) src0->data + i*(src0->nb[1])),
  4014. (float *) ((char *) src1->data + i*(src1->nb[1])));
  4015. }
  4016. }
  4017. static void ggml_compute_forward_sub(
  4018. const struct ggml_compute_params * params,
  4019. const struct ggml_tensor * src0,
  4020. const struct ggml_tensor * src1,
  4021. struct ggml_tensor * dst) {
  4022. switch (src0->type) {
  4023. case GGML_TYPE_F32:
  4024. {
  4025. ggml_compute_forward_sub_f32(params, src0, src1, dst);
  4026. } break;
  4027. case GGML_TYPE_Q4_0:
  4028. case GGML_TYPE_Q4_1:
  4029. case GGML_TYPE_I8:
  4030. case GGML_TYPE_I16:
  4031. case GGML_TYPE_I32:
  4032. case GGML_TYPE_F16:
  4033. case GGML_TYPE_COUNT:
  4034. {
  4035. GGML_ASSERT(false);
  4036. } break;
  4037. }
  4038. }
  4039. // ggml_compute_forward_mul
  4040. static void ggml_compute_forward_mul_f32(
  4041. const struct ggml_compute_params * params,
  4042. const struct ggml_tensor * src0,
  4043. const struct ggml_tensor * src1,
  4044. struct ggml_tensor * dst) {
  4045. assert(params->ith == 0);
  4046. assert(ggml_are_same_shape(src0, src1) && ggml_are_same_shape(src0, dst));
  4047. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  4048. return;
  4049. }
  4050. const int n = ggml_nrows(src0);
  4051. const int nc = src0->ne[0];
  4052. assert( dst->nb[0] == sizeof(float));
  4053. assert(src0->nb[0] == sizeof(float));
  4054. assert(src1->nb[0] == sizeof(float));
  4055. for (int i = 0; i < n; i++) {
  4056. ggml_vec_mul_f32(nc,
  4057. (float *) ((char *) dst->data + i*( dst->nb[1])),
  4058. (float *) ((char *) src0->data + i*(src0->nb[1])),
  4059. (float *) ((char *) src1->data + i*(src1->nb[1])));
  4060. }
  4061. }
  4062. static void ggml_compute_forward_mul(
  4063. const struct ggml_compute_params * params,
  4064. const struct ggml_tensor * src0,
  4065. const struct ggml_tensor * src1,
  4066. struct ggml_tensor * dst) {
  4067. switch (src0->type) {
  4068. case GGML_TYPE_F32:
  4069. {
  4070. ggml_compute_forward_mul_f32(params, src0, src1, dst);
  4071. } break;
  4072. case GGML_TYPE_Q4_0:
  4073. case GGML_TYPE_Q4_1:
  4074. case GGML_TYPE_I8:
  4075. case GGML_TYPE_I16:
  4076. case GGML_TYPE_I32:
  4077. case GGML_TYPE_F16:
  4078. case GGML_TYPE_COUNT:
  4079. {
  4080. GGML_ASSERT(false);
  4081. } break;
  4082. }
  4083. }
  4084. // ggml_compute_forward_div
  4085. static void ggml_compute_forward_div_f32(
  4086. const struct ggml_compute_params * params,
  4087. const struct ggml_tensor * src0,
  4088. const struct ggml_tensor * src1,
  4089. struct ggml_tensor * dst) {
  4090. assert(params->ith == 0);
  4091. assert(ggml_are_same_shape(src0, src1) && ggml_are_same_shape(src0, dst));
  4092. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  4093. return;
  4094. }
  4095. const int n = ggml_nrows(src0);
  4096. const int nc = src0->ne[0];
  4097. assert( dst->nb[0] == sizeof(float));
  4098. assert(src0->nb[0] == sizeof(float));
  4099. assert(src1->nb[0] == sizeof(float));
  4100. for (int i = 0; i < n; i++) {
  4101. ggml_vec_div_f32(nc,
  4102. (float *) ((char *) dst->data + i*( dst->nb[1])),
  4103. (float *) ((char *) src0->data + i*(src0->nb[1])),
  4104. (float *) ((char *) src1->data + i*(src1->nb[1])));
  4105. }
  4106. }
  4107. static void ggml_compute_forward_div(
  4108. const struct ggml_compute_params * params,
  4109. const struct ggml_tensor * src0,
  4110. const struct ggml_tensor * src1,
  4111. struct ggml_tensor * dst) {
  4112. switch (src0->type) {
  4113. case GGML_TYPE_F32:
  4114. {
  4115. ggml_compute_forward_div_f32(params, src0, src1, dst);
  4116. } break;
  4117. case GGML_TYPE_Q4_0:
  4118. case GGML_TYPE_Q4_1:
  4119. case GGML_TYPE_I8:
  4120. case GGML_TYPE_I16:
  4121. case GGML_TYPE_I32:
  4122. case GGML_TYPE_F16:
  4123. case GGML_TYPE_COUNT:
  4124. {
  4125. GGML_ASSERT(false);
  4126. } break;
  4127. }
  4128. }
  4129. // ggml_compute_forward_sqr
  4130. static void ggml_compute_forward_sqr_f32(
  4131. const struct ggml_compute_params * params,
  4132. const struct ggml_tensor * src0,
  4133. struct ggml_tensor * dst) {
  4134. assert(params->ith == 0);
  4135. assert(ggml_are_same_shape(src0, dst));
  4136. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  4137. return;
  4138. }
  4139. const int n = ggml_nrows(src0);
  4140. const int nc = src0->ne[0];
  4141. assert( dst->nb[0] == sizeof(float));
  4142. assert(src0->nb[0] == sizeof(float));
  4143. for (int i = 0; i < n; i++) {
  4144. ggml_vec_sqr_f32(nc,
  4145. (float *) ((char *) dst->data + i*( dst->nb[1])),
  4146. (float *) ((char *) src0->data + i*(src0->nb[1])));
  4147. }
  4148. }
  4149. static void ggml_compute_forward_sqr(
  4150. const struct ggml_compute_params * params,
  4151. const struct ggml_tensor * src0,
  4152. struct ggml_tensor * dst) {
  4153. switch (src0->type) {
  4154. case GGML_TYPE_F32:
  4155. {
  4156. ggml_compute_forward_sqr_f32(params, src0, dst);
  4157. } break;
  4158. case GGML_TYPE_Q4_0:
  4159. case GGML_TYPE_Q4_1:
  4160. case GGML_TYPE_I8:
  4161. case GGML_TYPE_I16:
  4162. case GGML_TYPE_I32:
  4163. case GGML_TYPE_F16:
  4164. case GGML_TYPE_COUNT:
  4165. {
  4166. GGML_ASSERT(false);
  4167. } break;
  4168. }
  4169. }
  4170. // ggml_compute_forward_sqrt
  4171. static void ggml_compute_forward_sqrt_f32(
  4172. const struct ggml_compute_params * params,
  4173. const struct ggml_tensor * src0,
  4174. struct ggml_tensor * dst) {
  4175. assert(params->ith == 0);
  4176. assert(ggml_are_same_shape(src0, dst));
  4177. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  4178. return;
  4179. }
  4180. const int n = ggml_nrows(src0);
  4181. const int nc = src0->ne[0];
  4182. assert( dst->nb[0] == sizeof(float));
  4183. assert(src0->nb[0] == sizeof(float));
  4184. for (int i = 0; i < n; i++) {
  4185. ggml_vec_sqrt_f32(nc,
  4186. (float *) ((char *) dst->data + i*( dst->nb[1])),
  4187. (float *) ((char *) src0->data + i*(src0->nb[1])));
  4188. }
  4189. }
  4190. static void ggml_compute_forward_sqrt(
  4191. const struct ggml_compute_params * params,
  4192. const struct ggml_tensor * src0,
  4193. struct ggml_tensor * dst) {
  4194. switch (src0->type) {
  4195. case GGML_TYPE_F32:
  4196. {
  4197. ggml_compute_forward_sqrt_f32(params, src0, dst);
  4198. } break;
  4199. case GGML_TYPE_Q4_0:
  4200. case GGML_TYPE_Q4_1:
  4201. case GGML_TYPE_I8:
  4202. case GGML_TYPE_I16:
  4203. case GGML_TYPE_I32:
  4204. case GGML_TYPE_F16:
  4205. case GGML_TYPE_COUNT:
  4206. {
  4207. GGML_ASSERT(false);
  4208. } break;
  4209. }
  4210. }
  4211. // ggml_compute_forward_sum
  4212. static void ggml_compute_forward_sum_f32(
  4213. const struct ggml_compute_params * params,
  4214. const struct ggml_tensor * src0,
  4215. struct ggml_tensor * dst) {
  4216. assert(params->ith == 0);
  4217. assert(ggml_is_scalar(dst));
  4218. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  4219. return;
  4220. }
  4221. assert(ggml_is_scalar(dst));
  4222. assert(src0->nb[0] == sizeof(float));
  4223. const int ne00 = src0->ne[0];
  4224. const int ne01 = src0->ne[1];
  4225. const int ne02 = src0->ne[2];
  4226. const int ne03 = src0->ne[3];
  4227. const size_t nb01 = src0->nb[1];
  4228. const size_t nb02 = src0->nb[2];
  4229. const size_t nb03 = src0->nb[3];
  4230. for (int i03 = 0; i03 < ne03; i03++) {
  4231. for (int i02 = 0; i02 < ne02; i02++) {
  4232. for (int i01 = 0; i01 < ne01; i01++) {
  4233. ggml_vec_sum_f32(ne00,
  4234. (float *) (dst->data),
  4235. (float *) ((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03));
  4236. }
  4237. }
  4238. }
  4239. }
  4240. static void ggml_compute_forward_sum(
  4241. const struct ggml_compute_params * params,
  4242. const struct ggml_tensor * src0,
  4243. struct ggml_tensor * dst) {
  4244. switch (src0->type) {
  4245. case GGML_TYPE_F32:
  4246. {
  4247. ggml_compute_forward_sum_f32(params, src0, dst);
  4248. } break;
  4249. case GGML_TYPE_Q4_0:
  4250. case GGML_TYPE_Q4_1:
  4251. case GGML_TYPE_I8:
  4252. case GGML_TYPE_I16:
  4253. case GGML_TYPE_I32:
  4254. case GGML_TYPE_F16:
  4255. case GGML_TYPE_COUNT:
  4256. {
  4257. GGML_ASSERT(false);
  4258. } break;
  4259. }
  4260. }
  4261. // ggml_compute_forward_mean
  4262. static void ggml_compute_forward_mean_f32(
  4263. const struct ggml_compute_params * params,
  4264. const struct ggml_tensor * src0,
  4265. struct ggml_tensor * dst) {
  4266. assert(params->ith == 0);
  4267. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  4268. return;
  4269. }
  4270. assert(src0->nb[0] == sizeof(float));
  4271. const int ne00 = src0->ne[0];
  4272. const int ne01 = src0->ne[1];
  4273. const int ne02 = src0->ne[2];
  4274. const int ne03 = src0->ne[3];
  4275. const size_t nb01 = src0->nb[1];
  4276. const size_t nb02 = src0->nb[2];
  4277. const size_t nb03 = src0->nb[3];
  4278. const int ne0 = dst->ne[0];
  4279. const int ne1 = dst->ne[1];
  4280. const int ne2 = dst->ne[2];
  4281. const int ne3 = dst->ne[3];
  4282. assert(ne0 == 1);
  4283. assert(ne1 == ne01);
  4284. assert(ne2 == ne02);
  4285. assert(ne3 == ne03);
  4286. UNUSED(ne0);
  4287. UNUSED(ne1);
  4288. UNUSED(ne2);
  4289. UNUSED(ne3);
  4290. const size_t nb1 = dst->nb[1];
  4291. const size_t nb2 = dst->nb[2];
  4292. const size_t nb3 = dst->nb[3];
  4293. for (int i03 = 0; i03 < ne03; i03++) {
  4294. for (int i02 = 0; i02 < ne02; i02++) {
  4295. for (int i01 = 0; i01 < ne01; i01++) {
  4296. ggml_vec_sum_f32(ne00,
  4297. (float *) ((char *) dst->data + i01*nb1 + i02*nb2 + i03*nb3),
  4298. (float *) ((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03));
  4299. *(float *) ((char *) dst->data + i01*nb1 + i02*nb2 + i03*nb3) /= (float) ne00;
  4300. }
  4301. }
  4302. }
  4303. }
  4304. static void ggml_compute_forward_mean(
  4305. const struct ggml_compute_params * params,
  4306. const struct ggml_tensor * src0,
  4307. struct ggml_tensor * dst) {
  4308. switch (src0->type) {
  4309. case GGML_TYPE_F32:
  4310. {
  4311. ggml_compute_forward_mean_f32(params, src0, dst);
  4312. } break;
  4313. case GGML_TYPE_Q4_0:
  4314. case GGML_TYPE_Q4_1:
  4315. case GGML_TYPE_I8:
  4316. case GGML_TYPE_I16:
  4317. case GGML_TYPE_I32:
  4318. case GGML_TYPE_F16:
  4319. case GGML_TYPE_COUNT:
  4320. {
  4321. GGML_ASSERT(false);
  4322. } break;
  4323. }
  4324. }
  4325. // ggml_compute_forward_repeat
  4326. static void ggml_compute_forward_repeat_f32(
  4327. const struct ggml_compute_params * params,
  4328. const struct ggml_tensor * src0,
  4329. struct ggml_tensor * dst) {
  4330. assert(params->ith == 0);
  4331. assert(ggml_can_repeat(src0, dst));
  4332. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  4333. return;
  4334. }
  4335. // TODO: implement support for rank > 2 tensors
  4336. assert(src0->ne[2] == 1);
  4337. assert(src0->ne[3] == 1);
  4338. assert( dst->ne[2] == 1);
  4339. assert( dst->ne[3] == 1);
  4340. const int nc = dst->ne[0];
  4341. const int nr = dst->ne[1];
  4342. const int nc0 = src0->ne[0];
  4343. const int nr0 = src0->ne[1];
  4344. const int ncr = nc/nc0; // guaranteed to be an integer due to the check in ggml_can_repeat
  4345. const int nrr = nr/nr0; // guaranteed to be an integer due to the check in ggml_can_repeat
  4346. // TODO: support for transposed / permuted tensors
  4347. assert( dst->nb[0] == sizeof(float));
  4348. assert(src0->nb[0] == sizeof(float));
  4349. // TODO: maybe this is not optimal?
  4350. for (int i = 0; i < nrr; i++) {
  4351. for (int j = 0; j < ncr; j++) {
  4352. for (int k = 0; k < nr0; k++) {
  4353. ggml_vec_cpy_f32(nc0,
  4354. (float *) ((char *) dst->data + (i*nr0 + k)*( dst->nb[1]) + j*nc0*( dst->nb[0])),
  4355. (float *) ((char *) src0->data + ( k)*(src0->nb[1])));
  4356. }
  4357. }
  4358. }
  4359. }
  4360. static void ggml_compute_forward_repeat(
  4361. const struct ggml_compute_params * params,
  4362. const struct ggml_tensor * src0,
  4363. struct ggml_tensor * dst) {
  4364. switch (src0->type) {
  4365. case GGML_TYPE_F32:
  4366. {
  4367. ggml_compute_forward_repeat_f32(params, src0, dst);
  4368. } break;
  4369. case GGML_TYPE_Q4_0:
  4370. case GGML_TYPE_Q4_1:
  4371. case GGML_TYPE_I8:
  4372. case GGML_TYPE_I16:
  4373. case GGML_TYPE_I32:
  4374. case GGML_TYPE_F16:
  4375. case GGML_TYPE_COUNT:
  4376. {
  4377. GGML_ASSERT(false);
  4378. } break;
  4379. }
  4380. }
  4381. // ggml_compute_forward_abs
  4382. static void ggml_compute_forward_abs_f32(
  4383. const struct ggml_compute_params * params,
  4384. const struct ggml_tensor * src0,
  4385. struct ggml_tensor * dst) {
  4386. assert(params->ith == 0);
  4387. assert(ggml_are_same_shape(src0, dst));
  4388. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  4389. return;
  4390. }
  4391. const int n = ggml_nrows(src0);
  4392. const int nc = src0->ne[0];
  4393. assert(dst->nb[0] == sizeof(float));
  4394. assert(src0->nb[0] == sizeof(float));
  4395. for (int i = 0; i < n; i++) {
  4396. ggml_vec_abs_f32(nc,
  4397. (float *) ((char *) dst->data + i*( dst->nb[1])),
  4398. (float *) ((char *) src0->data + i*(src0->nb[1])));
  4399. }
  4400. }
  4401. static void ggml_compute_forward_abs(
  4402. const struct ggml_compute_params * params,
  4403. const struct ggml_tensor * src0,
  4404. struct ggml_tensor * dst) {
  4405. switch (src0->type) {
  4406. case GGML_TYPE_F32:
  4407. {
  4408. ggml_compute_forward_abs_f32(params, src0, dst);
  4409. } break;
  4410. case GGML_TYPE_Q4_0:
  4411. case GGML_TYPE_Q4_1:
  4412. case GGML_TYPE_I8:
  4413. case GGML_TYPE_I16:
  4414. case GGML_TYPE_I32:
  4415. case GGML_TYPE_F16:
  4416. case GGML_TYPE_COUNT:
  4417. {
  4418. GGML_ASSERT(false);
  4419. } break;
  4420. }
  4421. }
  4422. // ggml_compute_forward_sgn
  4423. static void ggml_compute_forward_sgn_f32(
  4424. const struct ggml_compute_params * params,
  4425. const struct ggml_tensor * src0,
  4426. struct ggml_tensor * dst) {
  4427. assert(params->ith == 0);
  4428. assert(ggml_are_same_shape(src0, dst));
  4429. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  4430. return;
  4431. }
  4432. const int n = ggml_nrows(src0);
  4433. const int nc = src0->ne[0];
  4434. assert(dst->nb[0] == sizeof(float));
  4435. assert(src0->nb[0] == sizeof(float));
  4436. for (int i = 0; i < n; i++) {
  4437. ggml_vec_sgn_f32(nc,
  4438. (float *) ((char *) dst->data + i*( dst->nb[1])),
  4439. (float *) ((char *) src0->data + i*(src0->nb[1])));
  4440. }
  4441. }
  4442. static void ggml_compute_forward_sgn(
  4443. const struct ggml_compute_params * params,
  4444. const struct ggml_tensor * src0,
  4445. struct ggml_tensor * dst) {
  4446. switch (src0->type) {
  4447. case GGML_TYPE_F32:
  4448. {
  4449. ggml_compute_forward_sgn_f32(params, src0, dst);
  4450. } break;
  4451. case GGML_TYPE_Q4_0:
  4452. case GGML_TYPE_Q4_1:
  4453. case GGML_TYPE_I8:
  4454. case GGML_TYPE_I16:
  4455. case GGML_TYPE_I32:
  4456. case GGML_TYPE_F16:
  4457. case GGML_TYPE_COUNT:
  4458. {
  4459. GGML_ASSERT(false);
  4460. } break;
  4461. }
  4462. }
  4463. // ggml_compute_forward_neg
  4464. static void ggml_compute_forward_neg_f32(
  4465. const struct ggml_compute_params * params,
  4466. const struct ggml_tensor * src0,
  4467. struct ggml_tensor * dst) {
  4468. assert(params->ith == 0);
  4469. assert(ggml_are_same_shape(src0, dst));
  4470. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  4471. return;
  4472. }
  4473. const int n = ggml_nrows(src0);
  4474. const int nc = src0->ne[0];
  4475. assert(dst->nb[0] == sizeof(float));
  4476. assert(src0->nb[0] == sizeof(float));
  4477. for (int i = 0; i < n; i++) {
  4478. ggml_vec_neg_f32(nc,
  4479. (float *) ((char *) dst->data + i*( dst->nb[1])),
  4480. (float *) ((char *) src0->data + i*(src0->nb[1])));
  4481. }
  4482. }
  4483. static void ggml_compute_forward_neg(
  4484. const struct ggml_compute_params * params,
  4485. const struct ggml_tensor * src0,
  4486. struct ggml_tensor * dst) {
  4487. switch (src0->type) {
  4488. case GGML_TYPE_F32:
  4489. {
  4490. ggml_compute_forward_neg_f32(params, src0, dst);
  4491. } break;
  4492. case GGML_TYPE_Q4_0:
  4493. case GGML_TYPE_Q4_1:
  4494. case GGML_TYPE_I8:
  4495. case GGML_TYPE_I16:
  4496. case GGML_TYPE_I32:
  4497. case GGML_TYPE_F16:
  4498. case GGML_TYPE_COUNT:
  4499. {
  4500. GGML_ASSERT(false);
  4501. } break;
  4502. }
  4503. }
  4504. // ggml_compute_forward_step
  4505. static void ggml_compute_forward_step_f32(
  4506. const struct ggml_compute_params * params,
  4507. const struct ggml_tensor * src0,
  4508. struct ggml_tensor * dst) {
  4509. assert(params->ith == 0);
  4510. assert(ggml_are_same_shape(src0, dst));
  4511. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  4512. return;
  4513. }
  4514. const int n = ggml_nrows(src0);
  4515. const int nc = src0->ne[0];
  4516. assert(dst->nb[0] == sizeof(float));
  4517. assert(src0->nb[0] == sizeof(float));
  4518. for (int i = 0; i < n; i++) {
  4519. ggml_vec_step_f32(nc,
  4520. (float *) ((char *) dst->data + i*( dst->nb[1])),
  4521. (float *) ((char *) src0->data + i*(src0->nb[1])));
  4522. }
  4523. }
  4524. static void ggml_compute_forward_step(
  4525. const struct ggml_compute_params * params,
  4526. const struct ggml_tensor * src0,
  4527. struct ggml_tensor * dst) {
  4528. switch (src0->type) {
  4529. case GGML_TYPE_F32:
  4530. {
  4531. ggml_compute_forward_step_f32(params, src0, dst);
  4532. } break;
  4533. case GGML_TYPE_Q4_0:
  4534. case GGML_TYPE_Q4_1:
  4535. case GGML_TYPE_I8:
  4536. case GGML_TYPE_I16:
  4537. case GGML_TYPE_I32:
  4538. case GGML_TYPE_F16:
  4539. case GGML_TYPE_COUNT:
  4540. {
  4541. GGML_ASSERT(false);
  4542. } break;
  4543. }
  4544. }
  4545. // ggml_compute_forward_relu
  4546. static void ggml_compute_forward_relu_f32(
  4547. const struct ggml_compute_params * params,
  4548. const struct ggml_tensor * src0,
  4549. struct ggml_tensor * dst) {
  4550. assert(params->ith == 0);
  4551. assert(ggml_are_same_shape(src0, dst));
  4552. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  4553. return;
  4554. }
  4555. const int n = ggml_nrows(src0);
  4556. const int nc = src0->ne[0];
  4557. assert(dst->nb[0] == sizeof(float));
  4558. assert(src0->nb[0] == sizeof(float));
  4559. for (int i = 0; i < n; i++) {
  4560. ggml_vec_relu_f32(nc,
  4561. (float *) ((char *) dst->data + i*( dst->nb[1])),
  4562. (float *) ((char *) src0->data + i*(src0->nb[1])));
  4563. }
  4564. }
  4565. static void ggml_compute_forward_relu(
  4566. const struct ggml_compute_params * params,
  4567. const struct ggml_tensor * src0,
  4568. struct ggml_tensor * dst) {
  4569. switch (src0->type) {
  4570. case GGML_TYPE_F32:
  4571. {
  4572. ggml_compute_forward_relu_f32(params, src0, dst);
  4573. } break;
  4574. case GGML_TYPE_Q4_0:
  4575. case GGML_TYPE_Q4_1:
  4576. case GGML_TYPE_I8:
  4577. case GGML_TYPE_I16:
  4578. case GGML_TYPE_I32:
  4579. case GGML_TYPE_F16:
  4580. case GGML_TYPE_COUNT:
  4581. {
  4582. GGML_ASSERT(false);
  4583. } break;
  4584. }
  4585. }
  4586. // ggml_compute_forward_gelu
  4587. static void ggml_compute_forward_gelu_f32(
  4588. const struct ggml_compute_params * params,
  4589. const struct ggml_tensor * src0,
  4590. struct ggml_tensor * dst) {
  4591. GGML_ASSERT(ggml_is_contiguous(src0));
  4592. GGML_ASSERT(ggml_is_contiguous(dst));
  4593. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  4594. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  4595. return;
  4596. }
  4597. const int ith = params->ith;
  4598. const int nth = params->nth;
  4599. const int nc = src0->ne[0];
  4600. const int nr = ggml_nrows(src0);
  4601. // rows per thread
  4602. const int dr = (nr + nth - 1)/nth;
  4603. // row range for this thread
  4604. const int ir0 = dr*ith;
  4605. const int ir1 = MIN(ir0 + dr, nr);
  4606. for (int i1 = ir0; i1 < ir1; i1++) {
  4607. ggml_vec_gelu_f32(nc,
  4608. (float *) ((char *) dst->data + i1*( dst->nb[1])),
  4609. (float *) ((char *) src0->data + i1*(src0->nb[1])));
  4610. #ifndef NDEBUG
  4611. for (int k = 0; k < nc; k++) {
  4612. const float x = ((float *) ((char *) dst->data + i1*( dst->nb[1])))[k];
  4613. UNUSED(x);
  4614. assert(!isnan(x));
  4615. assert(!isinf(x));
  4616. }
  4617. #endif
  4618. }
  4619. }
  4620. static void ggml_compute_forward_gelu(
  4621. const struct ggml_compute_params * params,
  4622. const struct ggml_tensor * src0,
  4623. struct ggml_tensor * dst) {
  4624. switch (src0->type) {
  4625. case GGML_TYPE_F32:
  4626. {
  4627. ggml_compute_forward_gelu_f32(params, src0, dst);
  4628. } break;
  4629. case GGML_TYPE_Q4_0:
  4630. case GGML_TYPE_Q4_1:
  4631. case GGML_TYPE_I8:
  4632. case GGML_TYPE_I16:
  4633. case GGML_TYPE_I32:
  4634. case GGML_TYPE_F16:
  4635. case GGML_TYPE_COUNT:
  4636. {
  4637. GGML_ASSERT(false);
  4638. } break;
  4639. }
  4640. //printf("XXXXXXXX gelu\n");
  4641. }
  4642. // ggml_compute_forward_silu
  4643. static void ggml_compute_forward_silu_f32(
  4644. const struct ggml_compute_params * params,
  4645. const struct ggml_tensor * src0,
  4646. struct ggml_tensor * dst) {
  4647. GGML_ASSERT(ggml_is_contiguous(src0));
  4648. GGML_ASSERT(ggml_is_contiguous(dst));
  4649. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  4650. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  4651. return;
  4652. }
  4653. const int ith = params->ith;
  4654. const int nth = params->nth;
  4655. const int nc = src0->ne[0];
  4656. const int nr = ggml_nrows(src0);
  4657. // rows per thread
  4658. const int dr = (nr + nth - 1)/nth;
  4659. // row range for this thread
  4660. const int ir0 = dr*ith;
  4661. const int ir1 = MIN(ir0 + dr, nr);
  4662. for (int i1 = ir0; i1 < ir1; i1++) {
  4663. ggml_vec_silu_f32(nc,
  4664. (float *) ((char *) dst->data + i1*( dst->nb[1])),
  4665. (float *) ((char *) src0->data + i1*(src0->nb[1])));
  4666. #ifndef NDEBUG
  4667. for (int k = 0; k < nc; k++) {
  4668. const float x = ((float *) ((char *) dst->data + i1*( dst->nb[1])))[k];
  4669. UNUSED(x);
  4670. assert(!isnan(x));
  4671. assert(!isinf(x));
  4672. }
  4673. #endif
  4674. }
  4675. }
  4676. static void ggml_compute_forward_silu(
  4677. const struct ggml_compute_params * params,
  4678. const struct ggml_tensor * src0,
  4679. struct ggml_tensor * dst) {
  4680. switch (src0->type) {
  4681. case GGML_TYPE_F32:
  4682. {
  4683. ggml_compute_forward_silu_f32(params, src0, dst);
  4684. } break;
  4685. case GGML_TYPE_Q4_0:
  4686. case GGML_TYPE_Q4_1:
  4687. case GGML_TYPE_I8:
  4688. case GGML_TYPE_I16:
  4689. case GGML_TYPE_I32:
  4690. case GGML_TYPE_F16:
  4691. case GGML_TYPE_COUNT:
  4692. {
  4693. GGML_ASSERT(false);
  4694. } break;
  4695. }
  4696. }
  4697. // ggml_compute_forward_norm
  4698. static void ggml_compute_forward_norm_f32(
  4699. const struct ggml_compute_params * params,
  4700. const struct ggml_tensor * src0,
  4701. struct ggml_tensor * dst) {
  4702. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  4703. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  4704. return;
  4705. }
  4706. GGML_ASSERT(src0->nb[0] == sizeof(float));
  4707. const int ith = params->ith;
  4708. const int nth = params->nth;
  4709. const int ne00 = src0->ne[0];
  4710. const int ne01 = src0->ne[1];
  4711. const int ne02 = src0->ne[2];
  4712. const int ne03 = src0->ne[3];
  4713. const size_t nb01 = src0->nb[1];
  4714. const size_t nb02 = src0->nb[2];
  4715. const size_t nb03 = src0->nb[3];
  4716. const size_t nb1 = dst->nb[1];
  4717. const size_t nb2 = dst->nb[2];
  4718. const size_t nb3 = dst->nb[3];
  4719. const float eps = 1e-5f; // TODO: make this a parameter
  4720. // TODO: optimize
  4721. for (int i03 = 0; i03 < ne03; i03++) {
  4722. for (int i02 = 0; i02 < ne02; i02++) {
  4723. for (int i01 = ith; i01 < ne01; i01 += nth) {
  4724. const float * x = (float *) ((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03);
  4725. ggml_float sum = 0.0;
  4726. for (int i00 = 0; i00 < ne00; i00++) {
  4727. sum += (ggml_float)x[i00];
  4728. }
  4729. float mean = sum/ne00;
  4730. float * y = (float *) ((char *) dst->data + i01*nb1 + i02*nb2 + i03*nb3);
  4731. ggml_float sum2 = 0.0;
  4732. for (int i00 = 0; i00 < ne00; i00++) {
  4733. float v = x[i00] - mean;
  4734. y[i00] = v;
  4735. sum2 += (ggml_float)(v*v);
  4736. }
  4737. float variance = sum2/ne00;
  4738. const float scale = 1.0f/sqrtf(variance + eps);
  4739. ggml_vec_scale_f32(ne00, y, scale);
  4740. }
  4741. }
  4742. }
  4743. }
  4744. static void ggml_compute_forward_norm(
  4745. const struct ggml_compute_params * params,
  4746. const struct ggml_tensor * src0,
  4747. struct ggml_tensor * dst) {
  4748. switch (src0->type) {
  4749. case GGML_TYPE_F32:
  4750. {
  4751. ggml_compute_forward_norm_f32(params, src0, dst);
  4752. } break;
  4753. case GGML_TYPE_Q4_0:
  4754. case GGML_TYPE_Q4_1:
  4755. case GGML_TYPE_I8:
  4756. case GGML_TYPE_I16:
  4757. case GGML_TYPE_I32:
  4758. case GGML_TYPE_F16:
  4759. case GGML_TYPE_COUNT:
  4760. {
  4761. GGML_ASSERT(false);
  4762. } break;
  4763. }
  4764. }
  4765. static void ggml_compute_forward_rms_norm_f32(
  4766. const struct ggml_compute_params * params,
  4767. const struct ggml_tensor * src0,
  4768. struct ggml_tensor * dst) {
  4769. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  4770. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  4771. return;
  4772. }
  4773. GGML_ASSERT(src0->nb[0] == sizeof(float));
  4774. const int ith = params->ith;
  4775. const int nth = params->nth;
  4776. const int ne00 = src0->ne[0];
  4777. const int ne01 = src0->ne[1];
  4778. const int ne02 = src0->ne[2];
  4779. const int ne03 = src0->ne[3];
  4780. const size_t nb01 = src0->nb[1];
  4781. const size_t nb02 = src0->nb[2];
  4782. const size_t nb03 = src0->nb[3];
  4783. const size_t nb1 = dst->nb[1];
  4784. const size_t nb2 = dst->nb[2];
  4785. const size_t nb3 = dst->nb[3];
  4786. const float eps = 1e-6f; // TODO: make this a parameter
  4787. // TODO: optimize
  4788. for (int i03 = 0; i03 < ne03; i03++) {
  4789. for (int i02 = 0; i02 < ne02; i02++) {
  4790. for (int i01 = ith; i01 < ne01; i01 += nth) {
  4791. const float * x = (float *) ((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03);
  4792. ggml_float sum = 0.0;
  4793. for (int i00 = 0; i00 < ne00; i00++) {
  4794. sum += (ggml_float)(x[i00] * x[i00]);
  4795. }
  4796. float mean = sum/ne00;
  4797. float * y = (float *) ((char *) dst->data + i01*nb1 + i02*nb2 + i03*nb3);
  4798. memcpy(y, x, ne00 * sizeof(float));
  4799. // for (int i00 = 0; i00 < ne00; i00++) {
  4800. // y[i00] = x[i00];
  4801. // }
  4802. const float scale = 1.0f/sqrtf(mean + eps);
  4803. ggml_vec_scale_f32(ne00, y, scale);
  4804. }
  4805. }
  4806. }
  4807. }
  4808. static void ggml_compute_forward_rms_norm(
  4809. const struct ggml_compute_params * params,
  4810. const struct ggml_tensor * src0,
  4811. struct ggml_tensor * dst) {
  4812. switch (src0->type) {
  4813. case GGML_TYPE_F32:
  4814. {
  4815. ggml_compute_forward_rms_norm_f32(params, src0, dst);
  4816. } break;
  4817. case GGML_TYPE_Q4_0:
  4818. case GGML_TYPE_Q4_1:
  4819. case GGML_TYPE_I8:
  4820. case GGML_TYPE_I16:
  4821. case GGML_TYPE_I32:
  4822. case GGML_TYPE_F16:
  4823. case GGML_TYPE_COUNT:
  4824. {
  4825. GGML_ASSERT(false);
  4826. } break;
  4827. }
  4828. }
  4829. // ggml_compute_forward_mul_mat
  4830. #if defined(GGML_USE_ACCELERATE) || defined(GGML_USE_OPENBLAS)
  4831. // helper function to determine if it is better to use BLAS or not
  4832. // for large matrices, BLAS is faster
  4833. static bool ggml_compute_forward_mul_mat_use_blas(
  4834. const struct ggml_tensor * src0,
  4835. const struct ggml_tensor * src1,
  4836. struct ggml_tensor * dst) {
  4837. //const int ne00 = src0->ne[0];
  4838. //const int ne01 = src0->ne[1];
  4839. const int ne10 = src1->ne[0];
  4840. const int ne0 = dst->ne[0];
  4841. const int ne1 = dst->ne[1];
  4842. // TODO: find the optimal values for these
  4843. if (ggml_is_contiguous(src0) &&
  4844. ggml_is_contiguous(src1) && ((ne0 >= 32 && ne1 >= 32 && ne10 >= 32))) {
  4845. /*printf("BLAS: %d %d %d %d %d\n", ne0, ne1, ne10, ne00, ne01);*/
  4846. return true;
  4847. }
  4848. return false;
  4849. }
  4850. #endif
  4851. static void ggml_compute_forward_mul_mat_f32(
  4852. const struct ggml_compute_params * params,
  4853. const struct ggml_tensor * src0,
  4854. const struct ggml_tensor * src1,
  4855. struct ggml_tensor * dst) {
  4856. int64_t t0 = ggml_perf_time_us();
  4857. UNUSED(t0);
  4858. const int ne00 = src0->ne[0];
  4859. const int ne01 = src0->ne[1];
  4860. const int ne02 = src0->ne[2];
  4861. const int ne03 = src0->ne[3];
  4862. #if defined(GGML_USE_ACCELERATE) || defined(GGML_USE_OPENBLAS)
  4863. const int ne10 = src1->ne[0];
  4864. #endif
  4865. const int ne11 = src1->ne[1];
  4866. #ifndef NDEBUG
  4867. const int ne12 = src1->ne[2];
  4868. const int ne13 = src1->ne[3];
  4869. const int ne0 = dst->ne[0];
  4870. const int ne1 = dst->ne[1];
  4871. const int ne2 = dst->ne[2];
  4872. const int ne3 = dst->ne[3];
  4873. const int nb00 = src0->nb[0];
  4874. #endif
  4875. const int nb01 = src0->nb[1];
  4876. const int nb02 = src0->nb[2];
  4877. const int nb03 = src0->nb[3];
  4878. #ifndef NDEBUG
  4879. const int nb10 = src1->nb[0];
  4880. #endif
  4881. const int nb11 = src1->nb[1];
  4882. const int nb12 = src1->nb[2];
  4883. const int nb13 = src1->nb[3];
  4884. const int nb0 = dst->nb[0];
  4885. const int nb1 = dst->nb[1];
  4886. const int nb2 = dst->nb[2];
  4887. const int nb3 = dst->nb[3];
  4888. const int ith = params->ith;
  4889. const int nth = params->nth;
  4890. assert(ne02 == ne12);
  4891. assert(ne03 == ne13);
  4892. assert(ne2 == ne12);
  4893. assert(ne3 == ne13);
  4894. // we don't support permuted src0 or src1
  4895. assert(nb00 == sizeof(float));
  4896. assert(nb10 == sizeof(float));
  4897. // dst cannot be transposed or permuted
  4898. assert(nb0 == sizeof(float));
  4899. assert(nb0 <= nb1);
  4900. assert(nb1 <= nb2);
  4901. assert(nb2 <= nb3);
  4902. assert(ne0 == ne01);
  4903. assert(ne1 == ne11);
  4904. assert(ne2 == ne02);
  4905. assert(ne3 == ne03);
  4906. // nb01 >= nb00 - src0 is not transposed
  4907. // compute by src0 rows
  4908. #if defined(GGML_USE_ACCELERATE) || defined(GGML_USE_OPENBLAS)
  4909. if (ggml_compute_forward_mul_mat_use_blas(src0, src1, dst)) {
  4910. if (params->ith != 0) {
  4911. return;
  4912. }
  4913. if (params->type == GGML_TASK_INIT) {
  4914. return;
  4915. }
  4916. if (params->type == GGML_TASK_FINALIZE) {
  4917. return;
  4918. }
  4919. for (int i03 = 0; i03 < ne03; i03++) {
  4920. for (int i02 = 0; i02 < ne02; i02++) {
  4921. const float * x = (float *) ((char *) src0->data + i02*nb02 + i03*nb03);
  4922. const float * y = (float *) ((char *) src1->data + i02*nb12 + i03*nb13);
  4923. float * d = (float *) ((char *) dst->data + i02*nb2 + i03*nb3);
  4924. // zT = y * xT
  4925. cblas_sgemm(CblasRowMajor, CblasNoTrans, CblasTrans,
  4926. ne11, ne01, ne10,
  4927. 1.0f, y, ne10,
  4928. x, ne10,
  4929. 0.0f, d, ne01);
  4930. }
  4931. }
  4932. //printf("CBLAS F32 = %f ms, %d x %d x %d x %d\n", (ggml_perf_time_us() - t0)/1000.0, ne0, ne1, ne2, ne3);
  4933. return;
  4934. }
  4935. #endif
  4936. if (params->type == GGML_TASK_INIT) {
  4937. return;
  4938. }
  4939. if (params->type == GGML_TASK_FINALIZE) {
  4940. return;
  4941. }
  4942. // parallelize by src0 rows using ggml_vec_dot_f32
  4943. // total rows in src0
  4944. const int nr = ne01*ne02*ne03;
  4945. // rows per thread
  4946. const int dr = (nr + nth - 1)/nth;
  4947. // row range for this thread
  4948. const int ir0 = dr*ith;
  4949. const int ir1 = MIN(ir0 + dr, nr);
  4950. for (int ir = ir0; ir < ir1; ++ir) {
  4951. // src0 indices
  4952. const int i03 = ir/(ne02*ne01);
  4953. const int i02 = (ir - i03*ne02*ne01)/ne01;
  4954. const int i01 = (ir - i03*ne02*ne01 - i02*ne01);
  4955. for (int ic = 0; ic < ne11; ++ic) {
  4956. // src1 indices
  4957. const int i13 = i03;
  4958. const int i12 = i02;
  4959. const int i11 = ic;
  4960. // dst indices
  4961. const int i0 = i01;
  4962. const int i1 = i11;
  4963. const int i2 = i02;
  4964. const int i3 = i03;
  4965. ggml_vec_dot_f32(ne00,
  4966. (float *) ((char *) dst->data + (i0*nb0 + i1*nb1 + i2*nb2 + i3*nb3)),
  4967. (float *) ((char *) src0->data + (i01*nb01 + i02*nb02 + i03*nb03)),
  4968. (float *) ((char *) src1->data + (i11*nb11 + i12*nb12 + i13*nb13)));
  4969. }
  4970. }
  4971. //int64_t t1 = ggml_perf_time_us();
  4972. //static int64_t acc = 0;
  4973. //acc += t1 - t0;
  4974. //if (t1 - t0 > 10) {
  4975. // printf("\n");
  4976. // printf("ne00 = %5d, ne01 = %5d, ne02 = %5d, ne03 = %5d\n", ne00, ne01, ne02, ne03);
  4977. // printf("nb00 = %5d, nb01 = %5d, nb02 = %5d, nb03 = %5d\n", nb00, nb01, nb02, nb03);
  4978. // printf("ne10 = %5d, ne11 = %5d, ne12 = %5d, ne13 = %5d\n", ne10, ne11, ne12, ne13);
  4979. // printf("nb10 = %5d, nb11 = %5d, nb12 = %5d, nb13 = %5d\n", nb10, nb11, nb12, nb13);
  4980. // printf("XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX task %d/%d: %d us, acc = %d\n", ith, nth, (int) (t1 - t0), (int) acc);
  4981. //}
  4982. }
  4983. static void ggml_compute_forward_mul_mat_f16_f32(
  4984. const struct ggml_compute_params * params,
  4985. const struct ggml_tensor * src0,
  4986. const struct ggml_tensor * src1,
  4987. struct ggml_tensor * dst) {
  4988. int64_t t0 = ggml_perf_time_us();
  4989. UNUSED(t0);
  4990. const int ne00 = src0->ne[0];
  4991. const int ne01 = src0->ne[1];
  4992. const int ne02 = src0->ne[2];
  4993. const int ne03 = src0->ne[3];
  4994. const int ne10 = src1->ne[0];
  4995. const int ne11 = src1->ne[1];
  4996. const int ne12 = src1->ne[2];
  4997. const int ne13 = src1->ne[3];
  4998. const int ne0 = dst->ne[0];
  4999. const int ne1 = dst->ne[1];
  5000. const int ne2 = dst->ne[2];
  5001. const int ne3 = dst->ne[3];
  5002. //const int ne = ne0*ne1*ne2*ne3;
  5003. const int nb00 = src0->nb[0];
  5004. const int nb01 = src0->nb[1];
  5005. const int nb02 = src0->nb[2];
  5006. const int nb03 = src0->nb[3];
  5007. const int nb10 = src1->nb[0];
  5008. const int nb11 = src1->nb[1];
  5009. const int nb12 = src1->nb[2];
  5010. const int nb13 = src1->nb[3];
  5011. const int nb0 = dst->nb[0];
  5012. const int nb1 = dst->nb[1];
  5013. const int nb2 = dst->nb[2];
  5014. const int nb3 = dst->nb[3];
  5015. const int ith = params->ith;
  5016. const int nth = params->nth;
  5017. GGML_ASSERT(ne02 == ne12);
  5018. GGML_ASSERT(ne03 == ne13);
  5019. GGML_ASSERT(ne2 == ne12);
  5020. GGML_ASSERT(ne3 == ne13);
  5021. // TODO: we don't support permuted src0
  5022. GGML_ASSERT(nb00 == sizeof(ggml_fp16_t));
  5023. // dst cannot be transposed or permuted
  5024. GGML_ASSERT(nb0 == sizeof(float));
  5025. GGML_ASSERT(nb0 <= nb1);
  5026. GGML_ASSERT(nb1 <= nb2);
  5027. GGML_ASSERT(nb2 <= nb3);
  5028. GGML_ASSERT(ne0 == ne01);
  5029. GGML_ASSERT(ne1 == ne11);
  5030. GGML_ASSERT(ne2 == ne02);
  5031. GGML_ASSERT(ne3 == ne03);
  5032. // nb01 >= nb00 - src0 is not transposed
  5033. // compute by src0 rows
  5034. #if defined(GGML_USE_ACCELERATE) || defined(GGML_USE_OPENBLAS)
  5035. if (ggml_compute_forward_mul_mat_use_blas(src0, src1, dst)) {
  5036. GGML_ASSERT(nb10 == sizeof(float));
  5037. if (params->ith != 0) {
  5038. return;
  5039. }
  5040. if (params->type == GGML_TASK_INIT) {
  5041. return;
  5042. }
  5043. if (params->type == GGML_TASK_FINALIZE) {
  5044. return;
  5045. }
  5046. float * const wdata = params->wdata;
  5047. for (int i03 = 0; i03 < ne03; i03++) {
  5048. for (int i02 = 0; i02 < ne02; i02++) {
  5049. {
  5050. size_t id = 0;
  5051. for (int i01 = 0; i01 < ne01; ++i01) {
  5052. for (int i00 = 0; i00 < ne00; ++i00) {
  5053. wdata[id++] = GGML_FP16_TO_FP32(*(ggml_fp16_t *) ((char *) src0->data + i03*nb03 + i02*nb02 + i01*nb01 + i00*nb00));
  5054. }
  5055. }
  5056. }
  5057. const float * x = wdata;
  5058. const float * y = (float *) ((char *) src1->data + i02*nb12 + i03*nb13);
  5059. float * d = (float *) ((char *) dst->data + i02*nb2 + i03*nb3);
  5060. // zT = y * xT
  5061. cblas_sgemm(CblasRowMajor, CblasNoTrans, CblasTrans,
  5062. ne11, ne01, ne10,
  5063. 1.0f, y, ne10,
  5064. x, ne10,
  5065. 0.0f, d, ne01);
  5066. }
  5067. }
  5068. /*printf("CBLAS F16 = %f ms, %d x %d x %d x %d\n", (ggml_perf_time_us() - t0)/1000.0, ne0, ne1, ne2, ne3);*/
  5069. return;
  5070. }
  5071. #endif
  5072. if (params->type == GGML_TASK_INIT) {
  5073. ggml_fp16_t * const wdata = params->wdata;
  5074. size_t id = 0;
  5075. for (int i13 = 0; i13 < ne13; ++i13) {
  5076. for (int i12 = 0; i12 < ne12; ++i12) {
  5077. for (int i11 = 0; i11 < ne11; ++i11) {
  5078. for (int i10 = 0; i10 < ne10; ++i10) {
  5079. wdata[id++] = GGML_FP32_TO_FP16(*(float *)((char *) src1->data + i13*nb13 + i12*nb12 + i11*nb11 + i10*nb10));
  5080. }
  5081. }
  5082. }
  5083. }
  5084. GGML_ASSERT(id*sizeof(ggml_fp16_t) <= params->wsize);
  5085. return;
  5086. }
  5087. if (params->type == GGML_TASK_FINALIZE) {
  5088. return;
  5089. }
  5090. // fp16 -> half the size, so divide by 2
  5091. // TODO: do not support transposed src1
  5092. assert(nb10/2 == sizeof(ggml_fp16_t));
  5093. // parallelize by src0 rows using ggml_vec_dot_f16
  5094. // total rows in src0
  5095. const int nr = ne01*ne02*ne03;
  5096. // rows per thread
  5097. const int dr = (nr + nth - 1)/nth;
  5098. // row range for this thread
  5099. const int ir0 = dr*ith;
  5100. const int ir1 = MIN(ir0 + dr, nr);
  5101. ggml_fp16_t * wdata = params->wdata;
  5102. for (int ir = ir0; ir < ir1; ++ir) {
  5103. // src0 indices
  5104. const int i03 = ir/(ne02*ne01);
  5105. const int i02 = (ir - i03*ne02*ne01)/ne01;
  5106. const int i01 = (ir - i03*ne02*ne01 - i02*ne01);
  5107. const int i13 = i03;
  5108. const int i12 = i02;
  5109. const int i0 = i01;
  5110. const int i2 = i02;
  5111. const int i3 = i03;
  5112. ggml_fp16_t * src0_row = (ggml_fp16_t *) ((char *) src0->data + (i01*nb01 + i02*nb02 + i03*nb03));
  5113. ggml_fp16_t * src1_col = wdata + ( 0 + i12*ne11 + i13*ne12*ne11)*ne00;
  5114. float * dst_col = (float *) ((char *) dst->data + (i0*nb0 + 0*nb1 + i2*nb2 + i3*nb3));
  5115. for (int ic = 0; ic < ne11; ++ic) {
  5116. ggml_vec_dot_f16(ne00, &dst_col[ic*ne0], src0_row, src1_col + ic*ne00);
  5117. }
  5118. }
  5119. //int64_t t1 = ggml_time_us();
  5120. //static int64_t acc = 0;
  5121. //acc += t1 - t0;
  5122. //if (t1 - t0 > 10) {
  5123. // printf("\n");
  5124. // printf("ne00 = %5d, ne01 = %5d, ne02 = %5d, ne03 = %5d\n", ne00, ne01, ne02, ne03);
  5125. // printf("nb00 = %5d, nb01 = %5d, nb02 = %5d, nb03 = %5d\n", nb00, nb01, nb02, nb03);
  5126. // printf("ne10 = %5d, ne11 = %5d, ne12 = %5d, ne13 = %5d\n", ne10, ne11, ne12, ne13);
  5127. // printf("XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX task %d/%d: %d us, acc = %d\n", ith, nth, (int) (t1 - t0), (int) acc);
  5128. //}
  5129. }
  5130. typedef void (*dequantize_row_q_t)(const void * restrict x, float * restrict y, int k);
  5131. typedef void (*quantize_row_q_t)(const float * restrict x, void * restrict y, int k);
  5132. typedef void (*vec_dot_q_t)(const int n, float * restrict s, const void * restrict x, const void * restrict y);
  5133. typedef struct {
  5134. dequantize_row_q_t dequantize_row_q;
  5135. quantize_row_q_t quantize_row_q;
  5136. vec_dot_q_t vec_dot_q;
  5137. } quantize_fns_t;
  5138. static const quantize_fns_t quantize_fns[GGML_TYPE_COUNT] = {
  5139. [GGML_TYPE_Q4_0] = {
  5140. .dequantize_row_q = dequantize_row_q4_0,
  5141. .quantize_row_q = quantize_row_q4_0,
  5142. .vec_dot_q = ggml_vec_dot_q4_0,
  5143. },
  5144. [GGML_TYPE_Q4_1] = {
  5145. .dequantize_row_q = dequantize_row_q4_1,
  5146. .quantize_row_q = quantize_row_q4_1,
  5147. .vec_dot_q = ggml_vec_dot_q4_1,
  5148. },
  5149. };
  5150. static void ggml_compute_forward_mul_mat_q_f32(
  5151. const struct ggml_compute_params * params,
  5152. const struct ggml_tensor * src0,
  5153. const struct ggml_tensor * src1,
  5154. struct ggml_tensor * dst) {
  5155. int64_t t0 = ggml_perf_time_us();
  5156. UNUSED(t0);
  5157. const int ne00 = src0->ne[0];
  5158. const int ne01 = src0->ne[1];
  5159. const int ne02 = src0->ne[2];
  5160. const int ne03 = src0->ne[3];
  5161. const int ne10 = src1->ne[0];
  5162. const int ne11 = src1->ne[1];
  5163. const int ne12 = src1->ne[2];
  5164. const int ne13 = src1->ne[3];
  5165. const int ne0 = dst->ne[0];
  5166. const int ne1 = dst->ne[1];
  5167. const int ne2 = dst->ne[2];
  5168. const int ne3 = dst->ne[3];
  5169. const int nb00 = src0->nb[0];
  5170. const int nb01 = src0->nb[1];
  5171. const int nb02 = src0->nb[2];
  5172. const int nb03 = src0->nb[3];
  5173. const int nb10 = src1->nb[0];
  5174. const int nb11 = src1->nb[1];
  5175. const int nb12 = src1->nb[2];
  5176. const int nb13 = src1->nb[3];
  5177. const int nb0 = dst->nb[0];
  5178. const int nb1 = dst->nb[1];
  5179. const int nb2 = dst->nb[2];
  5180. const int nb3 = dst->nb[3];
  5181. const int ith = params->ith;
  5182. const int nth = params->nth;
  5183. GGML_ASSERT(ne02 == ne12);
  5184. GGML_ASSERT(ne03 == ne13);
  5185. GGML_ASSERT(ne2 == ne12);
  5186. GGML_ASSERT(ne3 == ne13);
  5187. const enum ggml_type type = src0->type;
  5188. quantize_row_q_t const quantize_row_q = quantize_fns[type].quantize_row_q;
  5189. vec_dot_q_t const vec_dot_q = quantize_fns[type].vec_dot_q;
  5190. // we don't support permuted src0 or src1
  5191. GGML_ASSERT(nb00 == (int) GGML_TYPE_SIZE[type]);
  5192. GGML_ASSERT(nb10 == sizeof(float));
  5193. // dst cannot be transposed or permuted
  5194. GGML_ASSERT(nb0 == sizeof(float));
  5195. GGML_ASSERT(nb0 <= nb1);
  5196. GGML_ASSERT(nb1 <= nb2);
  5197. GGML_ASSERT(nb2 <= nb3);
  5198. GGML_ASSERT(ne0 == ne01);
  5199. GGML_ASSERT(ne1 == ne11);
  5200. GGML_ASSERT(ne2 == ne02);
  5201. GGML_ASSERT(ne3 == ne03);
  5202. // nb01 >= nb00 - src0 is not transposed
  5203. // compute by src0 rows
  5204. #if defined(GGML_USE_ACCELERATE) || defined(GGML_USE_OPENBLAS)
  5205. if (ggml_compute_forward_mul_mat_use_blas(src0, src1, dst)) {
  5206. if (params->ith != 0) {
  5207. return;
  5208. }
  5209. if (params->type == GGML_TASK_INIT) {
  5210. return;
  5211. }
  5212. if (params->type == GGML_TASK_FINALIZE) {
  5213. return;
  5214. }
  5215. float * const wdata = params->wdata;
  5216. dequantize_row_q_t const dequantize_row_q = quantize_fns[type].dequantize_row_q;
  5217. for (int i03 = 0; i03 < ne03; i03++) {
  5218. for (int i02 = 0; i02 < ne02; i02++) {
  5219. {
  5220. size_t id = 0;
  5221. for (int i01 = 0; i01 < ne01; ++i01) {
  5222. dequantize_row_q((char *) src0->data + i03*nb03 + i02*nb02 + i01*nb01, wdata + id, ne00);
  5223. id += ne00;
  5224. }
  5225. }
  5226. const float * x = wdata;
  5227. const float * y = (float *) ((char *) src1->data + i02*nb12 + i03*nb13);
  5228. float * d = (float *) ((char *) dst->data + i02*nb2 + i03*nb3);
  5229. // zT = y * xT
  5230. cblas_sgemm(CblasRowMajor, CblasNoTrans, CblasTrans,
  5231. ne11, ne01, ne10,
  5232. 1.0f, y, ne10,
  5233. x, ne10,
  5234. 0.0f, d, ne01);
  5235. }
  5236. }
  5237. //printf("CBLAS = %f ms, %d x %d x %d x %d\n", (ggml_perf_time_us() - t0)/1000.0, ne0, ne1, ne2, ne3);
  5238. return;
  5239. }
  5240. #endif
  5241. if (params->type == GGML_TASK_INIT) {
  5242. char * wdata = params->wdata;
  5243. const size_t row_size = ne10*GGML_TYPE_SIZE[type]/GGML_BLCK_SIZE[type];
  5244. for (int i13 = 0; i13 < ne13; ++i13) {
  5245. for (int i12 = 0; i12 < ne12; ++i12) {
  5246. for (int i11 = 0; i11 < ne11; ++i11) {
  5247. quantize_row_q((float *)((char *) src1->data + i13*nb13 + i12*nb12 + i11*nb11), (void *) wdata, ne10);
  5248. wdata += row_size;
  5249. }
  5250. }
  5251. }
  5252. return;
  5253. }
  5254. if (params->type == GGML_TASK_FINALIZE) {
  5255. return;
  5256. }
  5257. // parallelize by src0 rows using ggml_vec_dot_q
  5258. // total rows in src0
  5259. const int nr = ne01*ne02*ne03;
  5260. // rows per thread
  5261. const int dr = (nr + nth - 1)/nth;
  5262. // row range for this thread
  5263. const int ir0 = dr*ith;
  5264. const int ir1 = MIN(ir0 + dr, nr);
  5265. void * wdata = params->wdata;
  5266. const size_t row_size = ne00*GGML_TYPE_SIZE[type]/GGML_BLCK_SIZE[type];
  5267. for (int ir = ir0; ir < ir1; ++ir) {
  5268. // src0 indices
  5269. const int i03 = ir/(ne02*ne01);
  5270. const int i02 = (ir - i03*ne02*ne01)/ne01;
  5271. const int i01 = (ir - i03*ne02*ne01 - i02*ne01);
  5272. const int i13 = i03;
  5273. const int i12 = i02;
  5274. const int i0 = i01;
  5275. const int i2 = i02;
  5276. const int i3 = i03;
  5277. void * src0_row = (void *) ((char *) src0->data + (i01*nb01 + i02*nb02 + i03*nb03));
  5278. char * src1_col = ((char *) wdata + ( (0 + i12*ne11 + i13*ne12*ne11)*row_size));
  5279. float * dst_col = (float *) ((char *) dst->data + (i0*nb0 + 0*nb1 + i2*nb2 + i3*nb3));
  5280. assert(ne00 % 32 == 0);
  5281. for (int ic = 0; ic < ne11; ++ic) {
  5282. vec_dot_q(ne00, &dst_col[ic*ne0], src0_row, (void *) (src1_col + ic*row_size));
  5283. }
  5284. }
  5285. //int64_t t1 = ggml_time_us();
  5286. //static int64_t acc = 0;
  5287. //acc += t1 - t0;
  5288. //if (t1 - t0 > 10) {
  5289. // printf("\n");
  5290. // printf("ne00 = %5d, ne01 = %5d, ne02 = %5d, ne03 = %5d\n", ne00, ne01, ne02, ne03);
  5291. // printf("nb00 = %5d, nb01 = %5d, nb02 = %5d, nb03 = %5d\n", nb00, nb01, nb02, nb03);
  5292. // printf("ne10 = %5d, ne11 = %5d, ne12 = %5d, ne13 = %5d\n", ne10, ne11, ne12, ne13);
  5293. // printf("XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX task %d/%d: %d us, acc = %d\n", ith, nth, (int) (t1 - t0), (int) acc);
  5294. //}
  5295. }
  5296. static void ggml_compute_forward_mul_mat(
  5297. const struct ggml_compute_params * params,
  5298. const struct ggml_tensor * src0,
  5299. const struct ggml_tensor * src1,
  5300. struct ggml_tensor * dst) {
  5301. switch (src0->type) {
  5302. case GGML_TYPE_Q4_0:
  5303. case GGML_TYPE_Q4_1:
  5304. {
  5305. ggml_compute_forward_mul_mat_q_f32(params, src0, src1, dst);
  5306. } break;
  5307. case GGML_TYPE_F16:
  5308. {
  5309. ggml_compute_forward_mul_mat_f16_f32(params, src0, src1, dst);
  5310. } break;
  5311. case GGML_TYPE_F32:
  5312. {
  5313. ggml_compute_forward_mul_mat_f32(params, src0, src1, dst);
  5314. } break;
  5315. case GGML_TYPE_I8:
  5316. case GGML_TYPE_I16:
  5317. case GGML_TYPE_I32:
  5318. case GGML_TYPE_COUNT:
  5319. {
  5320. GGML_ASSERT(false);
  5321. } break;
  5322. }
  5323. #if 0
  5324. if (src0->type == GGML_TYPE_F16 || src0->type == GGML_TYPE_Q4_1) {
  5325. static int first = 8;
  5326. printf("src0: ne0 = %5d, ne1 = %5d, ne2 = %5d\n", src0->ne[0], src0->ne[1], src0->ne[2]);
  5327. printf("src1: ne0 = %5d, ne1 = %5d, ne2 = %5d\n", src1->ne[0], src1->ne[1], src1->ne[2]);
  5328. printf("dst: ne0 = %5d, ne1 = %5d, ne2 = %5d\n", dst->ne[0], dst->ne[1], dst->ne[2]);
  5329. if (first) {
  5330. --first;
  5331. } else {
  5332. for (int k = 0; k < dst->ne[1]; ++k) {
  5333. for (int j = 0; j < dst->ne[0]/16; ++j) {
  5334. for (int i = 0; i < 16; ++i) {
  5335. printf("%8.4f ", ((float *) dst->data)[k*dst->ne[0] + j*16 + i]);
  5336. }
  5337. printf("\n");
  5338. }
  5339. printf("\n");
  5340. }
  5341. printf("\n");
  5342. exit(0);
  5343. }
  5344. } else {
  5345. printf("aaaa src0: ne0 = %5d, ne1 = %5d, ne2 = %5d\n", src0->ne[0], src0->ne[1], src0->ne[2]);
  5346. printf("aaaa src1: ne0 = %5d, ne1 = %5d, ne2 = %5d\n", src1->ne[0], src1->ne[1], src1->ne[2]);
  5347. printf("aaaa dst: ne0 = %5d, ne1 = %5d, ne2 = %5d\n", dst->ne[0], dst->ne[1], dst->ne[2]);
  5348. }
  5349. #endif
  5350. }
  5351. // ggml_compute_forward_scale
  5352. static void ggml_compute_forward_scale_f32(
  5353. const struct ggml_compute_params * params,
  5354. const struct ggml_tensor * src0,
  5355. const struct ggml_tensor * src1,
  5356. struct ggml_tensor * dst) {
  5357. GGML_ASSERT(ggml_is_contiguous(src0));
  5358. GGML_ASSERT(ggml_is_contiguous(dst));
  5359. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  5360. GGML_ASSERT(ggml_is_scalar(src1));
  5361. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  5362. return;
  5363. }
  5364. // scale factor
  5365. const float v = *(float *) src1->data;
  5366. const int ith = params->ith;
  5367. const int nth = params->nth;
  5368. const int nc = src0->ne[0];
  5369. const int nr = ggml_nrows(src0);
  5370. // rows per thread
  5371. const int dr = (nr + nth - 1)/nth;
  5372. // row range for this thread
  5373. const int ir0 = dr*ith;
  5374. const int ir1 = MIN(ir0 + dr, nr);
  5375. for (int i1 = ir0; i1 < ir1; i1++) {
  5376. ggml_vec_scale_f32(nc, (float *) ((char *) dst->data + i1*(dst->nb[1])), v);
  5377. }
  5378. }
  5379. static void ggml_compute_forward_scale(
  5380. const struct ggml_compute_params * params,
  5381. const struct ggml_tensor * src0,
  5382. const struct ggml_tensor * src1,
  5383. struct ggml_tensor * dst) {
  5384. switch (src0->type) {
  5385. case GGML_TYPE_F32:
  5386. {
  5387. ggml_compute_forward_scale_f32(params, src0, src1, dst);
  5388. } break;
  5389. case GGML_TYPE_Q4_0:
  5390. case GGML_TYPE_Q4_1:
  5391. case GGML_TYPE_I8:
  5392. case GGML_TYPE_I16:
  5393. case GGML_TYPE_I32:
  5394. case GGML_TYPE_F16:
  5395. case GGML_TYPE_COUNT:
  5396. {
  5397. GGML_ASSERT(false);
  5398. } break;
  5399. }
  5400. }
  5401. // ggml_compute_forward_cpy
  5402. static void ggml_compute_forward_cpy(
  5403. const struct ggml_compute_params * params,
  5404. const struct ggml_tensor * src0,
  5405. struct ggml_tensor * dst) {
  5406. ggml_compute_forward_dup(params, src0, dst);
  5407. }
  5408. // ggml_compute_forward_reshape
  5409. static void ggml_compute_forward_reshape(
  5410. const struct ggml_compute_params * params,
  5411. const struct ggml_tensor * src0,
  5412. struct ggml_tensor * dst) {
  5413. // NOP
  5414. UNUSED(params);
  5415. UNUSED(src0);
  5416. UNUSED(dst);
  5417. }
  5418. // ggml_compute_forward_view
  5419. static void ggml_compute_forward_view(
  5420. const struct ggml_compute_params * params,
  5421. const struct ggml_tensor * src0) {
  5422. // NOP
  5423. UNUSED(params);
  5424. UNUSED(src0);
  5425. }
  5426. // ggml_compute_forward_permute
  5427. static void ggml_compute_forward_permute(
  5428. const struct ggml_compute_params * params,
  5429. const struct ggml_tensor * src0) {
  5430. // NOP
  5431. UNUSED(params);
  5432. UNUSED(src0);
  5433. }
  5434. // ggml_compute_forward_transpose
  5435. static void ggml_compute_forward_transpose(
  5436. const struct ggml_compute_params * params,
  5437. const struct ggml_tensor * src0) {
  5438. // NOP
  5439. UNUSED(params);
  5440. UNUSED(src0);
  5441. }
  5442. // ggml_compute_forward_get_rows
  5443. static void ggml_compute_forward_get_rows_q(
  5444. const struct ggml_compute_params * params,
  5445. const struct ggml_tensor * src0,
  5446. const struct ggml_tensor * src1,
  5447. struct ggml_tensor * dst) {
  5448. assert(params->ith == 0);
  5449. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  5450. return;
  5451. }
  5452. const int nc = src0->ne[0];
  5453. const int nr = ggml_nelements(src1);
  5454. const enum ggml_type type = src0->type;
  5455. dequantize_row_q_t const dequantize_row_q = quantize_fns[type].dequantize_row_q;
  5456. assert( dst->ne[0] == nc);
  5457. assert( dst->ne[1] == nr);
  5458. assert(src0->nb[0] == GGML_TYPE_SIZE[type]);
  5459. for (int i = 0; i < nr; ++i) {
  5460. const int r = ((int32_t *) src1->data)[i];
  5461. dequantize_row_q(
  5462. (const void *) ((char *) src0->data + r*src0->nb[1]),
  5463. (float *) ((char *) dst->data + i*dst->nb[1]), nc);
  5464. }
  5465. }
  5466. static void ggml_compute_forward_get_rows_f16(
  5467. const struct ggml_compute_params * params,
  5468. const struct ggml_tensor * src0,
  5469. const struct ggml_tensor * src1,
  5470. struct ggml_tensor * dst) {
  5471. assert(params->ith == 0);
  5472. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  5473. return;
  5474. }
  5475. const int nc = src0->ne[0];
  5476. const int nr = ggml_nelements(src1);
  5477. assert( dst->ne[0] == nc);
  5478. assert( dst->ne[1] == nr);
  5479. assert(src0->nb[0] == sizeof(ggml_fp16_t));
  5480. for (int i = 0; i < nr; ++i) {
  5481. const int r = ((int32_t *) src1->data)[i];
  5482. for (int j = 0; j < nc; ++j) {
  5483. ggml_fp16_t v = ((ggml_fp16_t *) ((char *) src0->data + r*src0->nb[1]))[j];
  5484. ((float *) ((char *) dst->data + i*dst->nb[1]))[j] = GGML_FP16_TO_FP32(v);
  5485. }
  5486. }
  5487. }
  5488. static void ggml_compute_forward_get_rows_f32(
  5489. const struct ggml_compute_params * params,
  5490. const struct ggml_tensor * src0,
  5491. const struct ggml_tensor * src1,
  5492. struct ggml_tensor * dst) {
  5493. assert(params->ith == 0);
  5494. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  5495. return;
  5496. }
  5497. const int nc = src0->ne[0];
  5498. const int nr = ggml_nelements(src1);
  5499. assert( dst->ne[0] == nc);
  5500. assert( dst->ne[1] == nr);
  5501. assert(src0->nb[0] == sizeof(float));
  5502. for (int i = 0; i < nr; ++i) {
  5503. const int r = ((int32_t *) src1->data)[i];
  5504. ggml_vec_cpy_f32(nc,
  5505. (float *) ((char *) dst->data + i*dst->nb[1]),
  5506. (float *) ((char *) src0->data + r*src0->nb[1]));
  5507. }
  5508. }
  5509. static void ggml_compute_forward_get_rows(
  5510. const struct ggml_compute_params * params,
  5511. const struct ggml_tensor * src0,
  5512. const struct ggml_tensor * src1,
  5513. struct ggml_tensor * dst) {
  5514. switch (src0->type) {
  5515. case GGML_TYPE_Q4_0:
  5516. case GGML_TYPE_Q4_1:
  5517. {
  5518. ggml_compute_forward_get_rows_q(params, src0, src1, dst);
  5519. } break;
  5520. case GGML_TYPE_F16:
  5521. {
  5522. ggml_compute_forward_get_rows_f16(params, src0, src1, dst);
  5523. } break;
  5524. case GGML_TYPE_F32:
  5525. {
  5526. ggml_compute_forward_get_rows_f32(params, src0, src1, dst);
  5527. } break;
  5528. case GGML_TYPE_I8:
  5529. case GGML_TYPE_I16:
  5530. case GGML_TYPE_I32:
  5531. case GGML_TYPE_COUNT:
  5532. {
  5533. GGML_ASSERT(false);
  5534. } break;
  5535. }
  5536. //static bool first = true;
  5537. //printf("ne0 = %d, ne1 = %d, ne2 = %d\n", dst->ne[0], dst->ne[1], dst->ne[2]);
  5538. //if (first) {
  5539. // first = false;
  5540. //} else {
  5541. // for (int k = 0; k < dst->ne[1]; ++k) {
  5542. // for (int j = 0; j < dst->ne[0]/16; ++j) {
  5543. // for (int i = 0; i < 16; ++i) {
  5544. // printf("%8.4f ", ((float *) dst->data)[k*dst->ne[0] + j*16 + i]);
  5545. // }
  5546. // printf("\n");
  5547. // }
  5548. // printf("\n");
  5549. // }
  5550. // printf("\n");
  5551. // exit(0);
  5552. //}
  5553. }
  5554. // ggml_compute_forward_diag_mask_inf
  5555. static void ggml_compute_forward_diag_mask_inf_f32(
  5556. const struct ggml_compute_params * params,
  5557. const struct ggml_tensor * src0,
  5558. const struct ggml_tensor * src1,
  5559. struct ggml_tensor * dst) {
  5560. assert(params->ith == 0);
  5561. assert(src1->type == GGML_TYPE_I32);
  5562. assert(ggml_nelements(src1) == 1);
  5563. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  5564. return;
  5565. }
  5566. const int n_past = ((int32_t *) src1->data)[0];
  5567. // TODO: handle transposed/permuted matrices
  5568. const int n = ggml_nrows(src0);
  5569. const int nc = src0->ne[0];
  5570. const int nr = src0->ne[1];
  5571. const int nz = n/nr;
  5572. assert( dst->nb[0] == sizeof(float));
  5573. assert(src0->nb[0] == sizeof(float));
  5574. for (int k = 0; k < nz; k++) {
  5575. for (int j = 0; j < nr; j++) {
  5576. for (int i = n_past; i < nc; i++) {
  5577. if (i > n_past + j) {
  5578. *(float *)((char *) dst->data + k*dst->nb[2] + j*dst->nb[1] + i*dst->nb[0]) = -INFINITY;
  5579. }
  5580. }
  5581. }
  5582. }
  5583. }
  5584. static void ggml_compute_forward_diag_mask_inf(
  5585. const struct ggml_compute_params * params,
  5586. const struct ggml_tensor * src0,
  5587. const struct ggml_tensor * src1,
  5588. struct ggml_tensor * dst) {
  5589. switch (src0->type) {
  5590. case GGML_TYPE_F32:
  5591. {
  5592. ggml_compute_forward_diag_mask_inf_f32(params, src0, src1, dst);
  5593. } break;
  5594. case GGML_TYPE_Q4_0:
  5595. case GGML_TYPE_Q4_1:
  5596. case GGML_TYPE_I8:
  5597. case GGML_TYPE_I16:
  5598. case GGML_TYPE_I32:
  5599. case GGML_TYPE_F16:
  5600. case GGML_TYPE_COUNT:
  5601. {
  5602. GGML_ASSERT(false);
  5603. } break;
  5604. }
  5605. }
  5606. // ggml_compute_forward_soft_max
  5607. static void ggml_compute_forward_soft_max_f32(
  5608. const struct ggml_compute_params * params,
  5609. const struct ggml_tensor * src0,
  5610. struct ggml_tensor * dst) {
  5611. GGML_ASSERT(ggml_is_contiguous(src0));
  5612. GGML_ASSERT(ggml_is_contiguous(dst));
  5613. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  5614. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  5615. return;
  5616. }
  5617. // TODO: handle transposed/permuted matrices
  5618. const int ith = params->ith;
  5619. const int nth = params->nth;
  5620. const int nc = src0->ne[0];
  5621. const int nr = ggml_nrows(src0);
  5622. // rows per thread
  5623. const int dr = (nr + nth - 1)/nth;
  5624. // row range for this thread
  5625. const int ir0 = dr*ith;
  5626. const int ir1 = MIN(ir0 + dr, nr);
  5627. for (int i1 = ir0; i1 < ir1; i1++) {
  5628. float *p = (float *)((char *) dst->data + i1*dst->nb[1]);
  5629. #ifndef NDEBUG
  5630. for (int i = 0; i < nc; ++i) {
  5631. //printf("p[%d] = %f\n", i, p[i]);
  5632. assert(!isnan(p[i]));
  5633. }
  5634. #endif
  5635. float max = -INFINITY;
  5636. ggml_vec_max_f32(nc, &max, p);
  5637. ggml_float sum = 0.0;
  5638. uint16_t scvt;
  5639. for (int i = 0; i < nc; i++) {
  5640. if (p[i] == -INFINITY) {
  5641. p[i] = 0.0f;
  5642. } else {
  5643. //const float val = (p[i] == -INFINITY) ? 0.0 : exp(p[i] - max);
  5644. ggml_fp16_t s = GGML_FP32_TO_FP16(p[i] - max);
  5645. memcpy(&scvt, &s, sizeof(scvt));
  5646. const float val = GGML_FP16_TO_FP32(table_exp_f16[scvt]);
  5647. sum += (ggml_float)val;
  5648. p[i] = val;
  5649. }
  5650. }
  5651. assert(sum > 0.0);
  5652. sum = 1.0/sum;
  5653. ggml_vec_scale_f32(nc, p, sum);
  5654. #ifndef NDEBUG
  5655. for (int i = 0; i < nc; ++i) {
  5656. assert(!isnan(p[i]));
  5657. assert(!isinf(p[i]));
  5658. }
  5659. #endif
  5660. }
  5661. }
  5662. static void ggml_compute_forward_soft_max(
  5663. const struct ggml_compute_params * params,
  5664. const struct ggml_tensor * src0,
  5665. struct ggml_tensor * dst) {
  5666. switch (src0->type) {
  5667. case GGML_TYPE_F32:
  5668. {
  5669. ggml_compute_forward_soft_max_f32(params, src0, dst);
  5670. } break;
  5671. case GGML_TYPE_Q4_0:
  5672. case GGML_TYPE_Q4_1:
  5673. case GGML_TYPE_I8:
  5674. case GGML_TYPE_I16:
  5675. case GGML_TYPE_I32:
  5676. case GGML_TYPE_F16:
  5677. case GGML_TYPE_COUNT:
  5678. {
  5679. GGML_ASSERT(false);
  5680. } break;
  5681. }
  5682. }
  5683. // ggml_compute_forward_rope
  5684. static void ggml_compute_forward_rope_f32(
  5685. const struct ggml_compute_params * params,
  5686. const struct ggml_tensor * src0,
  5687. const struct ggml_tensor * src1,
  5688. struct ggml_tensor * dst) {
  5689. assert(params->ith == 0);
  5690. assert(src1->type == GGML_TYPE_I32);
  5691. assert(ggml_nelements(src1) == 3);
  5692. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  5693. return;
  5694. }
  5695. const int n_past = ((int32_t *) src1->data)[0];
  5696. const int n_dims = ((int32_t *) src1->data)[1];
  5697. const int mode = ((int32_t *) src1->data)[2];
  5698. //const int ne0 = src0->ne[0];
  5699. const int ne1 = src0->ne[1];
  5700. const int ne2 = src0->ne[2];
  5701. const int ne3 = src0->ne[3];
  5702. const int nb0 = src0->nb[0];
  5703. const int nb1 = src0->nb[1];
  5704. const int nb2 = src0->nb[2];
  5705. const int nb3 = src0->nb[3];
  5706. //printf("ne0: %d, ne1: %d, ne2: %d, ne3: %d\n", ne0, ne1, ne2, ne3);
  5707. //printf("n_past = %d, ne2 = %d\n", n_past, ne2);
  5708. assert(nb0 == sizeof(float));
  5709. // TODO: optimize
  5710. for (int i3 = 0; i3 < ne3; i3++) {
  5711. for (int i2 = (mode == 0 ? 0 : n_past); i2 < ne2; i2++) {
  5712. const int p = (mode == 0 ? n_past + i2 : i2);
  5713. for (int i1 = 0; i1 < ne1; i1++) {
  5714. for (int i0 = 0; i0 < n_dims; i0 += 2) {
  5715. const float theta = powf(10000.0, ((float)-i0)/n_dims);
  5716. const float cos_theta = cosf(p*theta);
  5717. const float sin_theta = sinf(p*theta);
  5718. const float * const src = (float *)((char *) src0->data + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0);
  5719. float * dst_data = (float *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0);
  5720. const float x0 = src[0];
  5721. const float x1 = src[1];
  5722. dst_data[0] = x0*cos_theta - x1*sin_theta;
  5723. dst_data[1] = x0*sin_theta + x1*cos_theta;
  5724. }
  5725. }
  5726. }
  5727. }
  5728. }
  5729. static void ggml_compute_forward_rope_f16(
  5730. const struct ggml_compute_params * params,
  5731. const struct ggml_tensor * src0,
  5732. const struct ggml_tensor * src1,
  5733. struct ggml_tensor * dst) {
  5734. assert(params->ith == 0);
  5735. assert(src1->type == GGML_TYPE_I32);
  5736. assert(ggml_nelements(src1) == 3);
  5737. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  5738. return;
  5739. }
  5740. const int n_past = ((int32_t *) src1->data)[0];
  5741. const int n_dims = ((int32_t *) src1->data)[1];
  5742. const int mode = ((int32_t *) src1->data)[2];
  5743. //const int ne0 = src0->ne[0];
  5744. const int ne1 = src0->ne[1];
  5745. const int ne2 = src0->ne[2];
  5746. const int ne3 = src0->ne[3];
  5747. const int nb0 = src0->nb[0];
  5748. const int nb1 = src0->nb[1];
  5749. const int nb2 = src0->nb[2];
  5750. const int nb3 = src0->nb[3];
  5751. //printf("ne0: %d, ne1: %d, ne2: %d, ne3: %d\n", ne0, ne1, ne2, ne3);
  5752. //printf("n_past = %d, ne2 = %d\n", n_past, ne2);
  5753. assert(nb0 == sizeof(ggml_fp16_t));
  5754. for (int i3 = 0; i3 < ne3; i3++) {
  5755. for (int i2 = (mode == 0 ? 0 : n_past); i2 < ne2; i2++) {
  5756. const int p = (mode == 0 ? n_past + i2 : i2);
  5757. for (int i1 = 0; i1 < ne1; i1++) {
  5758. for (int i0 = 0; i0 < n_dims; i0 += 2) {
  5759. const float theta = powf(10000.0, ((float)-i0)/n_dims);
  5760. const float cos_theta = cosf(p*theta);
  5761. const float sin_theta = sinf(p*theta);
  5762. const ggml_fp16_t * const src = (ggml_fp16_t *)((char *) src0->data + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0);
  5763. ggml_fp16_t * dst_data = (ggml_fp16_t *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0);
  5764. const float x0 = ggml_fp16_to_fp32(src[0]);
  5765. const float x1 = ggml_fp16_to_fp32(src[1]);
  5766. dst_data[0] = ggml_fp32_to_fp16(x0*cos_theta - x1*sin_theta);
  5767. dst_data[1] = ggml_fp32_to_fp16(x0*sin_theta + x1*cos_theta);
  5768. }
  5769. }
  5770. }
  5771. }
  5772. }
  5773. static void ggml_compute_forward_rope(
  5774. const struct ggml_compute_params * params,
  5775. const struct ggml_tensor * src0,
  5776. const struct ggml_tensor * src1,
  5777. struct ggml_tensor * dst) {
  5778. switch (src0->type) {
  5779. case GGML_TYPE_F16:
  5780. {
  5781. ggml_compute_forward_rope_f16(params, src0, src1, dst);
  5782. } break;
  5783. case GGML_TYPE_F32:
  5784. {
  5785. ggml_compute_forward_rope_f32(params, src0, src1, dst);
  5786. } break;
  5787. case GGML_TYPE_Q4_0:
  5788. case GGML_TYPE_Q4_1:
  5789. case GGML_TYPE_I8:
  5790. case GGML_TYPE_I16:
  5791. case GGML_TYPE_I32:
  5792. case GGML_TYPE_COUNT:
  5793. {
  5794. GGML_ASSERT(false);
  5795. } break;
  5796. }
  5797. }
  5798. // ggml_compute_forward_conv_1d_1s
  5799. static void ggml_compute_forward_conv_1d_1s_f16_f32(
  5800. const struct ggml_compute_params * params,
  5801. const struct ggml_tensor * src0,
  5802. const struct ggml_tensor * src1,
  5803. struct ggml_tensor * dst) {
  5804. GGML_ASSERT(src0->type == GGML_TYPE_F16);
  5805. GGML_ASSERT(src1->type == GGML_TYPE_F32);
  5806. GGML_ASSERT( dst->type == GGML_TYPE_F32);
  5807. int64_t t0 = ggml_perf_time_us();
  5808. UNUSED(t0);
  5809. const int ne00 = src0->ne[0];
  5810. const int ne01 = src0->ne[1];
  5811. const int ne02 = src0->ne[2];
  5812. //const int ne03 = src0->ne[3];
  5813. const int ne10 = src1->ne[0];
  5814. const int ne11 = src1->ne[1];
  5815. //const int ne12 = src1->ne[2];
  5816. //const int ne13 = src1->ne[3];
  5817. //const int ne0 = dst->ne[0];
  5818. //const int ne1 = dst->ne[1];
  5819. //const int ne2 = dst->ne[2];
  5820. //const int ne3 = dst->ne[3];
  5821. //const int ne = ne0*ne1*ne2*ne3;
  5822. const int nb00 = src0->nb[0];
  5823. const int nb01 = src0->nb[1];
  5824. const int nb02 = src0->nb[2];
  5825. //const int nb03 = src0->nb[3];
  5826. const int nb10 = src1->nb[0];
  5827. const int nb11 = src1->nb[1];
  5828. //const int nb12 = src1->nb[2];
  5829. //const int nb13 = src1->nb[3];
  5830. //const int nb0 = dst->nb[0];
  5831. const int nb1 = dst->nb[1];
  5832. //const int nb2 = dst->nb[2];
  5833. //const int nb3 = dst->nb[3];
  5834. const int ith = params->ith;
  5835. const int nth = params->nth;
  5836. const int nk = ne00;
  5837. const int nh = nk/2;
  5838. const int ew0 = ggml_up32(ne01);
  5839. GGML_ASSERT(ne00 % 2 == 1); // TODO: support even kernel sizes
  5840. GGML_ASSERT(nb00 == sizeof(ggml_fp16_t));
  5841. GGML_ASSERT(nb10 == sizeof(float));
  5842. if (params->type == GGML_TASK_INIT) {
  5843. // TODO: fix this memset (wsize is overestimated)
  5844. memset(params->wdata, 0, params->wsize);
  5845. // prepare kernel data (src0)
  5846. {
  5847. ggml_fp16_t * const wdata = (ggml_fp16_t *) params->wdata + 0;
  5848. for (int i02 = 0; i02 < ne02; i02++) {
  5849. for (int i01 = 0; i01 < ne01; i01++) {
  5850. const ggml_fp16_t * const src = (ggml_fp16_t *)((char *) src0->data + i02*nb02 + i01*nb01);
  5851. ggml_fp16_t * dst_data = wdata + i02*ew0*ne00;
  5852. for (int i00 = 0; i00 < ne00; i00++) {
  5853. dst_data[i00*ew0 + i01] = src[i00];
  5854. }
  5855. }
  5856. }
  5857. }
  5858. // prepare source data (src1)
  5859. {
  5860. ggml_fp16_t * const wdata = (ggml_fp16_t *) params->wdata + ne02*ew0*ne00;
  5861. for (int i11 = 0; i11 < ne11; i11++) {
  5862. const float * const src = (float *)((char *) src1->data + i11*nb11);
  5863. ggml_fp16_t * dst_data = wdata;
  5864. for (int i10 = 0; i10 < ne10; i10++) {
  5865. dst_data[(i10 + nh)*ew0 + i11] = GGML_FP32_TO_FP16(src[i10]);
  5866. }
  5867. }
  5868. }
  5869. return;
  5870. }
  5871. if (params->type == GGML_TASK_FINALIZE) {
  5872. return;
  5873. }
  5874. // total rows in dst
  5875. const int nr = ne02;
  5876. // rows per thread
  5877. const int dr = (nr + nth - 1)/nth;
  5878. // row range for this thread
  5879. const int ir0 = dr*ith;
  5880. const int ir1 = MIN(ir0 + dr, nr);
  5881. for (int i1 = ir0; i1 < ir1; i1++) {
  5882. float * dst_data = (float *)((char *) dst->data + i1*nb1);
  5883. for (int i0 = 0; i0 < ne10; ++i0) {
  5884. dst_data[i0] = 0;
  5885. for (int k = -nh; k <= nh; k++) {
  5886. float v = 0.0f;
  5887. ggml_vec_dot_f16(ew0, &v,
  5888. (ggml_fp16_t *) params->wdata + i1*ew0*ne00 + (nh + k)*ew0,
  5889. (ggml_fp16_t *) params->wdata + ne02*ew0*ne00 + (i0 + nh + k)*ew0);
  5890. dst_data[i0] += v;
  5891. }
  5892. }
  5893. }
  5894. }
  5895. static void ggml_compute_forward_conv_1d_1s_f32(
  5896. const struct ggml_compute_params * params,
  5897. const struct ggml_tensor * src0,
  5898. const struct ggml_tensor * src1,
  5899. struct ggml_tensor * dst) {
  5900. GGML_ASSERT(src0->type == GGML_TYPE_F32);
  5901. GGML_ASSERT(src1->type == GGML_TYPE_F32);
  5902. GGML_ASSERT( dst->type == GGML_TYPE_F32);
  5903. int64_t t0 = ggml_perf_time_us();
  5904. UNUSED(t0);
  5905. const int ne00 = src0->ne[0];
  5906. const int ne01 = src0->ne[1];
  5907. const int ne02 = src0->ne[2];
  5908. //const int ne03 = src0->ne[3];
  5909. const int ne10 = src1->ne[0];
  5910. const int ne11 = src1->ne[1];
  5911. //const int ne12 = src1->ne[2];
  5912. //const int ne13 = src1->ne[3];
  5913. //const int ne0 = dst->ne[0];
  5914. //const int ne1 = dst->ne[1];
  5915. //const int ne2 = dst->ne[2];
  5916. //const int ne3 = dst->ne[3];
  5917. //const int ne = ne0*ne1*ne2*ne3;
  5918. const int nb00 = src0->nb[0];
  5919. const int nb01 = src0->nb[1];
  5920. const int nb02 = src0->nb[2];
  5921. //const int nb03 = src0->nb[3];
  5922. const int nb10 = src1->nb[0];
  5923. const int nb11 = src1->nb[1];
  5924. //const int nb12 = src1->nb[2];
  5925. //const int nb13 = src1->nb[3];
  5926. //const int nb0 = dst->nb[0];
  5927. const int nb1 = dst->nb[1];
  5928. //const int nb2 = dst->nb[2];
  5929. //const int nb3 = dst->nb[3];
  5930. const int ith = params->ith;
  5931. const int nth = params->nth;
  5932. const int nk = ne00;
  5933. const int nh = nk/2;
  5934. const int ew0 = ggml_up32(ne01);
  5935. GGML_ASSERT(ne00 % 2 == 1); // TODO: support even kernel sizes
  5936. GGML_ASSERT(nb00 == sizeof(float));
  5937. GGML_ASSERT(nb10 == sizeof(float));
  5938. if (params->type == GGML_TASK_INIT) {
  5939. // TODO: fix this memset (wsize is overestimated)
  5940. memset(params->wdata, 0, params->wsize);
  5941. // prepare kernel data (src0)
  5942. {
  5943. float * const wdata = (float *) params->wdata + 0;
  5944. for (int i02 = 0; i02 < ne02; i02++) {
  5945. for (int i01 = 0; i01 < ne01; i01++) {
  5946. const float * const src = (float *)((char *) src0->data + i02*nb02 + i01*nb01);
  5947. float * dst_data = wdata + i02*ew0*ne00;
  5948. for (int i00 = 0; i00 < ne00; i00++) {
  5949. dst_data[i00*ew0 + i01] = src[i00];
  5950. }
  5951. }
  5952. }
  5953. }
  5954. // prepare source data (src1)
  5955. {
  5956. float * const wdata = (float *) params->wdata + ne02*ew0*ne00;
  5957. for (int i11 = 0; i11 < ne11; i11++) {
  5958. const float * const src = (float *)((char *) src1->data + i11*nb11);
  5959. float * dst_data = wdata;
  5960. for (int i10 = 0; i10 < ne10; i10++) {
  5961. dst_data[(i10 + nh)*ew0 + i11] = src[i10];
  5962. }
  5963. }
  5964. }
  5965. return;
  5966. }
  5967. if (params->type == GGML_TASK_FINALIZE) {
  5968. return;
  5969. }
  5970. // total rows in dst
  5971. const int nr = ne02;
  5972. // rows per thread
  5973. const int dr = (nr + nth - 1)/nth;
  5974. // row range for this thread
  5975. const int ir0 = dr*ith;
  5976. const int ir1 = MIN(ir0 + dr, nr);
  5977. for (int i1 = ir0; i1 < ir1; i1++) {
  5978. float * dst_data = (float *)((char *) dst->data + i1*nb1);
  5979. for (int i0 = 0; i0 < ne10; ++i0) {
  5980. dst_data[i0] = 0;
  5981. for (int k = -nh; k <= nh; k++) {
  5982. float v = 0.0f;
  5983. ggml_vec_dot_f32(ew0, &v,
  5984. (float *) params->wdata + i1*ew0*ne00 + (nh + k)*ew0,
  5985. (float *) params->wdata + ne02*ew0*ne00 + (i0 + nh + k)*ew0);
  5986. dst_data[i0] += v;
  5987. }
  5988. }
  5989. }
  5990. }
  5991. static void ggml_compute_forward_conv_1d_1s(
  5992. const struct ggml_compute_params * params,
  5993. const struct ggml_tensor * src0,
  5994. const struct ggml_tensor * src1,
  5995. struct ggml_tensor * dst) {
  5996. switch (src0->type) {
  5997. case GGML_TYPE_F16:
  5998. {
  5999. ggml_compute_forward_conv_1d_1s_f16_f32(params, src0, src1, dst);
  6000. } break;
  6001. case GGML_TYPE_F32:
  6002. {
  6003. ggml_compute_forward_conv_1d_1s_f32(params, src0, src1, dst);
  6004. } break;
  6005. case GGML_TYPE_Q4_0:
  6006. case GGML_TYPE_Q4_1:
  6007. case GGML_TYPE_I8:
  6008. case GGML_TYPE_I16:
  6009. case GGML_TYPE_I32:
  6010. case GGML_TYPE_COUNT:
  6011. {
  6012. GGML_ASSERT(false);
  6013. } break;
  6014. }
  6015. }
  6016. // ggml_compute_forward_conv_1d_2s
  6017. static void ggml_compute_forward_conv_1d_2s_f16_f32(
  6018. const struct ggml_compute_params * params,
  6019. const struct ggml_tensor * src0,
  6020. const struct ggml_tensor * src1,
  6021. struct ggml_tensor * dst) {
  6022. GGML_ASSERT(src0->type == GGML_TYPE_F16);
  6023. GGML_ASSERT(src1->type == GGML_TYPE_F32);
  6024. GGML_ASSERT( dst->type == GGML_TYPE_F32);
  6025. int64_t t0 = ggml_perf_time_us();
  6026. UNUSED(t0);
  6027. const int ne00 = src0->ne[0];
  6028. const int ne01 = src0->ne[1];
  6029. const int ne02 = src0->ne[2];
  6030. //const int ne03 = src0->ne[3];
  6031. const int ne10 = src1->ne[0];
  6032. const int ne11 = src1->ne[1];
  6033. //const int ne12 = src1->ne[2];
  6034. //const int ne13 = src1->ne[3];
  6035. //const int ne0 = dst->ne[0];
  6036. //const int ne1 = dst->ne[1];
  6037. //const int ne2 = dst->ne[2];
  6038. //const int ne3 = dst->ne[3];
  6039. //const int ne = ne0*ne1*ne2*ne3;
  6040. const int nb00 = src0->nb[0];
  6041. const int nb01 = src0->nb[1];
  6042. const int nb02 = src0->nb[2];
  6043. //const int nb03 = src0->nb[3];
  6044. const int nb10 = src1->nb[0];
  6045. const int nb11 = src1->nb[1];
  6046. //const int nb12 = src1->nb[2];
  6047. //const int nb13 = src1->nb[3];
  6048. //const int nb0 = dst->nb[0];
  6049. const int nb1 = dst->nb[1];
  6050. //const int nb2 = dst->nb[2];
  6051. //const int nb3 = dst->nb[3];
  6052. const int ith = params->ith;
  6053. const int nth = params->nth;
  6054. const int nk = ne00;
  6055. const int nh = nk/2;
  6056. const int ew0 = ggml_up32(ne01);
  6057. GGML_ASSERT(ne00 % 2 == 1); // TODO: support even kernel sizes
  6058. GGML_ASSERT(nb00 == sizeof(ggml_fp16_t));
  6059. GGML_ASSERT(nb10 == sizeof(float));
  6060. if (params->type == GGML_TASK_INIT) {
  6061. // TODO: fix this memset (wsize is overestimated)
  6062. memset(params->wdata, 0, params->wsize);
  6063. // prepare kernel data (src0)
  6064. {
  6065. ggml_fp16_t * const wdata = (ggml_fp16_t *) params->wdata + 0;
  6066. for (int i02 = 0; i02 < ne02; i02++) {
  6067. for (int i01 = 0; i01 < ne01; i01++) {
  6068. const ggml_fp16_t * const src = (ggml_fp16_t *)((char *) src0->data + i02*nb02 + i01*nb01);
  6069. ggml_fp16_t * dst_data = wdata + i02*ew0*ne00;
  6070. for (int i00 = 0; i00 < ne00; i00++) {
  6071. dst_data[i00*ew0 + i01] = src[i00];
  6072. }
  6073. }
  6074. }
  6075. }
  6076. // prepare source data (src1)
  6077. {
  6078. ggml_fp16_t * const wdata = (ggml_fp16_t *) params->wdata + ne02*ew0*ne00;
  6079. for (int i11 = 0; i11 < ne11; i11++) {
  6080. const float * const src = (float *)((char *) src1->data + i11*nb11);
  6081. ggml_fp16_t * dst_data = wdata;
  6082. for (int i10 = 0; i10 < ne10; i10++) {
  6083. dst_data[(i10 + nh)*ew0 + i11] = GGML_FP32_TO_FP16(src[i10]);
  6084. }
  6085. }
  6086. }
  6087. return;
  6088. }
  6089. if (params->type == GGML_TASK_FINALIZE) {
  6090. return;
  6091. }
  6092. // total rows in dst
  6093. const int nr = ne02;
  6094. // rows per thread
  6095. const int dr = (nr + nth - 1)/nth;
  6096. // row range for this thread
  6097. const int ir0 = dr*ith;
  6098. const int ir1 = MIN(ir0 + dr, nr);
  6099. for (int i1 = ir0; i1 < ir1; i1++) {
  6100. float * dst_data = (float *)((char *) dst->data + i1*nb1);
  6101. for (int i0 = 0; i0 < ne10; i0 += 2) {
  6102. dst_data[i0/2] = 0;
  6103. for (int k = -nh; k <= nh; k++) {
  6104. float v = 0.0f;
  6105. ggml_vec_dot_f16(ew0, &v,
  6106. (ggml_fp16_t *) params->wdata + i1*ew0*ne00 + (nh + k)*ew0,
  6107. (ggml_fp16_t *) params->wdata + ne02*ew0*ne00 + (i0 + nh + k)*ew0);
  6108. dst_data[i0/2] += v;
  6109. }
  6110. }
  6111. }
  6112. }
  6113. static void ggml_compute_forward_conv_1d_2s_f32(
  6114. const struct ggml_compute_params * params,
  6115. const struct ggml_tensor * src0,
  6116. const struct ggml_tensor * src1,
  6117. struct ggml_tensor * dst) {
  6118. GGML_ASSERT(src0->type == GGML_TYPE_F32);
  6119. GGML_ASSERT(src1->type == GGML_TYPE_F32);
  6120. GGML_ASSERT( dst->type == GGML_TYPE_F32);
  6121. int64_t t0 = ggml_perf_time_us();
  6122. UNUSED(t0);
  6123. const int ne00 = src0->ne[0];
  6124. const int ne01 = src0->ne[1];
  6125. const int ne02 = src0->ne[2];
  6126. //const int ne03 = src0->ne[3];
  6127. const int ne10 = src1->ne[0];
  6128. const int ne11 = src1->ne[1];
  6129. //const int ne12 = src1->ne[2];
  6130. //const int ne13 = src1->ne[3];
  6131. //const int ne0 = dst->ne[0];
  6132. //const int ne1 = dst->ne[1];
  6133. //const int ne2 = dst->ne[2];
  6134. //const int ne3 = dst->ne[3];
  6135. //const int ne = ne0*ne1*ne2*ne3;
  6136. const int nb00 = src0->nb[0];
  6137. const int nb01 = src0->nb[1];
  6138. const int nb02 = src0->nb[2];
  6139. //const int nb03 = src0->nb[3];
  6140. const int nb10 = src1->nb[0];
  6141. const int nb11 = src1->nb[1];
  6142. //const int nb12 = src1->nb[2];
  6143. //const int nb13 = src1->nb[3];
  6144. //const int nb0 = dst->nb[0];
  6145. const int nb1 = dst->nb[1];
  6146. //const int nb2 = dst->nb[2];
  6147. //const int nb3 = dst->nb[3];
  6148. const int ith = params->ith;
  6149. const int nth = params->nth;
  6150. const int nk = ne00;
  6151. const int nh = nk/2;
  6152. const int ew0 = ggml_up32(ne01);
  6153. GGML_ASSERT(ne00 % 2 == 1); // TODO: support even kernel sizes
  6154. GGML_ASSERT(nb00 == sizeof(float));
  6155. GGML_ASSERT(nb10 == sizeof(float));
  6156. if (params->type == GGML_TASK_INIT) {
  6157. // TODO: fix this memset (wsize is overestimated)
  6158. memset(params->wdata, 0, params->wsize);
  6159. // prepare kernel data (src0)
  6160. {
  6161. float * const wdata = (float *) params->wdata + 0;
  6162. for (int i02 = 0; i02 < ne02; i02++) {
  6163. for (int i01 = 0; i01 < ne01; i01++) {
  6164. const float * const src = (float *)((char *) src0->data + i02*nb02 + i01*nb01);
  6165. float * dst_data = wdata + i02*ew0*ne00;
  6166. for (int i00 = 0; i00 < ne00; i00++) {
  6167. dst_data[i00*ew0 + i01] = src[i00];
  6168. }
  6169. }
  6170. }
  6171. }
  6172. // prepare source data (src1)
  6173. {
  6174. float * const wdata = (float *) params->wdata + ne02*ew0*ne00;
  6175. for (int i11 = 0; i11 < ne11; i11++) {
  6176. const float * const src = (float *)((char *) src1->data + i11*nb11);
  6177. float * dst_data = wdata;
  6178. for (int i10 = 0; i10 < ne10; i10++) {
  6179. dst_data[(i10 + nh)*ew0 + i11] = src[i10];
  6180. }
  6181. }
  6182. }
  6183. return;
  6184. }
  6185. if (params->type == GGML_TASK_FINALIZE) {
  6186. return;
  6187. }
  6188. // total rows in dst
  6189. const int nr = ne02;
  6190. // rows per thread
  6191. const int dr = (nr + nth - 1)/nth;
  6192. // row range for this thread
  6193. const int ir0 = dr*ith;
  6194. const int ir1 = MIN(ir0 + dr, nr);
  6195. for (int i1 = ir0; i1 < ir1; i1++) {
  6196. float * dst_data = (float *)((char *) dst->data + i1*nb1);
  6197. for (int i0 = 0; i0 < ne10; i0 += 2) {
  6198. dst_data[i0/2] = 0;
  6199. for (int k = -nh; k <= nh; k++) {
  6200. float v = 0.0f;
  6201. ggml_vec_dot_f32(ew0, &v,
  6202. (float *) params->wdata + i1*ew0*ne00 + (nh + k)*ew0,
  6203. (float *) params->wdata + ne02*ew0*ne00 + (i0 + nh + k)*ew0);
  6204. dst_data[i0/2] += v;
  6205. }
  6206. }
  6207. }
  6208. }
  6209. static void ggml_compute_forward_conv_1d_2s(
  6210. const struct ggml_compute_params * params,
  6211. const struct ggml_tensor * src0,
  6212. const struct ggml_tensor * src1,
  6213. struct ggml_tensor * dst) {
  6214. switch (src0->type) {
  6215. case GGML_TYPE_F16:
  6216. {
  6217. ggml_compute_forward_conv_1d_2s_f16_f32(params, src0, src1, dst);
  6218. } break;
  6219. case GGML_TYPE_F32:
  6220. {
  6221. ggml_compute_forward_conv_1d_2s_f32(params, src0, src1, dst);
  6222. } break;
  6223. case GGML_TYPE_Q4_0:
  6224. case GGML_TYPE_Q4_1:
  6225. case GGML_TYPE_I8:
  6226. case GGML_TYPE_I16:
  6227. case GGML_TYPE_I32:
  6228. case GGML_TYPE_COUNT:
  6229. {
  6230. GGML_ASSERT(false);
  6231. } break;
  6232. }
  6233. }
  6234. // ggml_compute_forward_flash_attn
  6235. static void ggml_compute_forward_flash_attn_f32(
  6236. const struct ggml_compute_params * params,
  6237. const struct ggml_tensor * q,
  6238. const struct ggml_tensor * k,
  6239. const struct ggml_tensor * v,
  6240. const bool masked,
  6241. struct ggml_tensor * dst) {
  6242. int64_t t0 = ggml_perf_time_us();
  6243. UNUSED(t0);
  6244. const int neq0 = q->ne[0];
  6245. const int neq1 = q->ne[1];
  6246. const int neq2 = q->ne[2];
  6247. const int neq3 = q->ne[3];
  6248. const int nek0 = k->ne[0];
  6249. const int nek1 = k->ne[1];
  6250. //const int nek2 = k->ne[2];
  6251. //const int nek3 = k->ne[3];
  6252. //const int nev0 = v->ne[0];
  6253. const int nev1 = v->ne[1];
  6254. //const int nev2 = v->ne[2];
  6255. //const int nev3 = v->ne[3];
  6256. const int ne0 = dst->ne[0];
  6257. const int ne1 = dst->ne[1];
  6258. //const int ne2 = dst->ne[2];
  6259. //const int ne3 = dst->ne[3];
  6260. const int nbk0 = k->nb[0];
  6261. const int nbk1 = k->nb[1];
  6262. const int nbk2 = k->nb[2];
  6263. const int nbk3 = k->nb[3];
  6264. const int nbq0 = q->nb[0];
  6265. const int nbq1 = q->nb[1];
  6266. const int nbq2 = q->nb[2];
  6267. const int nbq3 = q->nb[3];
  6268. const int nbv0 = v->nb[0];
  6269. const int nbv1 = v->nb[1];
  6270. const int nbv2 = v->nb[2];
  6271. const int nbv3 = v->nb[3];
  6272. const int nb0 = dst->nb[0];
  6273. const int nb1 = dst->nb[1];
  6274. const int nb2 = dst->nb[2];
  6275. const int nb3 = dst->nb[3];
  6276. const int ith = params->ith;
  6277. const int nth = params->nth;
  6278. const int D = neq0;
  6279. const int N = neq1;
  6280. const int P = nek1 - N;
  6281. const int M = P + N;
  6282. const int Mup = ggml_up(M, GGML_SOFT_MAX_UNROLL);
  6283. GGML_ASSERT(ne0 == D);
  6284. GGML_ASSERT(ne1 == N);
  6285. GGML_ASSERT(P >= 0);
  6286. GGML_ASSERT(nbq0 == sizeof(float));
  6287. GGML_ASSERT(nbk0 == sizeof(float));
  6288. GGML_ASSERT(nbv0 == sizeof(float));
  6289. GGML_ASSERT(neq0 == D);
  6290. GGML_ASSERT(nek0 == D);
  6291. GGML_ASSERT(nev1 == D);
  6292. GGML_ASSERT(neq1 == N);
  6293. GGML_ASSERT(nek1 == N + P);
  6294. GGML_ASSERT(nev1 == D);
  6295. // dst cannot be transposed or permuted
  6296. GGML_ASSERT(nb0 == sizeof(float));
  6297. GGML_ASSERT(nb0 <= nb1);
  6298. GGML_ASSERT(nb1 <= nb2);
  6299. GGML_ASSERT(nb2 <= nb3);
  6300. if (params->type == GGML_TASK_INIT) {
  6301. return;
  6302. }
  6303. if (params->type == GGML_TASK_FINALIZE) {
  6304. return;
  6305. }
  6306. // parallelize by q rows using ggml_vec_dot_f32
  6307. // total rows in q
  6308. const int nr = neq1*neq2*neq3;
  6309. // rows per thread
  6310. const int dr = (nr + nth - 1)/nth;
  6311. // row range for this thread
  6312. const int ir0 = dr*ith;
  6313. const int ir1 = MIN(ir0 + dr, nr);
  6314. const float scale = 1.0f/sqrtf(D);
  6315. //printf("P=%d N=%d D=%d ir0=%d ir1=%d scale = %f\n", P, N, D, ir0, ir1, scale);
  6316. for (int ir = ir0; ir < ir1; ++ir) {
  6317. // q indices
  6318. const int iq3 = ir/(neq2*neq1);
  6319. const int iq2 = (ir - iq3*neq2*neq1)/neq1;
  6320. const int iq1 = (ir - iq3*neq2*neq1 - iq2*neq1);
  6321. float * S = (float *) params->wdata + ith*(Mup + CACHE_LINE_SIZE_F32);
  6322. for (int i = M; i < Mup; ++i) {
  6323. S[i] = -INFINITY;
  6324. }
  6325. for (int ic = 0; ic < nek1; ++ic) {
  6326. // k indices
  6327. const int ik3 = iq3;
  6328. const int ik2 = iq2;
  6329. const int ik1 = ic;
  6330. // S indices
  6331. const int i1 = ik1;
  6332. ggml_vec_dot_f32(neq0,
  6333. S + i1,
  6334. (float *) ((char *) k->data + (ik1*nbk1 + ik2*nbk2 + ik3*nbk3)),
  6335. (float *) ((char *) q->data + (iq1*nbq1 + iq2*nbq2 + iq3*nbq3)));
  6336. }
  6337. // scale
  6338. ggml_vec_scale_f32(nek1, S, scale);
  6339. if (masked) {
  6340. for (int i = P; i < M; i++) {
  6341. if (i > P + iq1) {
  6342. S[i] = -INFINITY;
  6343. }
  6344. }
  6345. }
  6346. // softmax
  6347. {
  6348. float max = -INFINITY;
  6349. ggml_vec_max_f32(M, &max, S);
  6350. ggml_float sum = 0.0;
  6351. {
  6352. #ifdef GGML_SOFT_MAX_ACCELERATE
  6353. max = -max;
  6354. vDSP_vsadd(S, 1, &max, S, 1, Mup);
  6355. vvexpf(S, S, &Mup);
  6356. ggml_vec_sum_f32(Mup, &sum, S);
  6357. #else
  6358. uint16_t scvt[GGML_SOFT_MAX_UNROLL];
  6359. ggml_float sump[GGML_SOFT_MAX_UNROLL] = { 0.0 };
  6360. for (int i = 0; i < Mup; i += GGML_SOFT_MAX_UNROLL) {
  6361. float * SS = S + i;
  6362. for (int j = 0; j < GGML_SOFT_MAX_UNROLL; ++j) {
  6363. if (SS[j] == -INFINITY) {
  6364. SS[j] = 0.0f;
  6365. } else {
  6366. ggml_fp16_t s = GGML_FP32_TO_FP16(SS[j] - max);
  6367. memcpy(&scvt[j], &s, sizeof(uint16_t));
  6368. const float val = GGML_FP16_TO_FP32(table_exp_f16[scvt[j]]);
  6369. sump[j] += (ggml_float)val;
  6370. SS[j] = val;
  6371. }
  6372. }
  6373. }
  6374. for (int i = 0; i < GGML_SOFT_MAX_UNROLL; i++) {
  6375. sum += sump[i];
  6376. }
  6377. #endif
  6378. }
  6379. assert(sum > 0.0);
  6380. sum = 1.0/sum;
  6381. ggml_vec_scale_f32(M, S, sum);
  6382. #ifndef NDEBUG
  6383. for (int i = 0; i < M; ++i) {
  6384. assert(!isnan(S[i]));
  6385. assert(!isinf(S[i]));
  6386. }
  6387. #endif
  6388. }
  6389. for (int ic = 0; ic < nev1; ++ic) {
  6390. // dst indices
  6391. const int i1 = iq1;
  6392. const int i2 = iq2;
  6393. const int i3 = iq3;
  6394. ggml_vec_dot_f32(nek1,
  6395. (float *) ((char *) dst->data + (ic*nb0 + i1*nb1 + i2*nb2 + i3*nb3)),
  6396. (float *) ((char *) v->data + ( ic*nbv1 + i2*nbv2 + i3*nbv3)),
  6397. S);
  6398. }
  6399. }
  6400. }
  6401. static void ggml_compute_forward_flash_attn_f16(
  6402. const struct ggml_compute_params * params,
  6403. const struct ggml_tensor * q,
  6404. const struct ggml_tensor * k,
  6405. const struct ggml_tensor * v,
  6406. const bool masked,
  6407. struct ggml_tensor * dst) {
  6408. int64_t t0 = ggml_perf_time_us();
  6409. UNUSED(t0);
  6410. const int neq0 = q->ne[0];
  6411. const int neq1 = q->ne[1];
  6412. const int neq2 = q->ne[2];
  6413. const int neq3 = q->ne[3];
  6414. const int nek0 = k->ne[0];
  6415. const int nek1 = k->ne[1];
  6416. //const int nek2 = k->ne[2];
  6417. //const int nek3 = k->ne[3];
  6418. //const int nev0 = v->ne[0];
  6419. const int nev1 = v->ne[1];
  6420. //const int nev2 = v->ne[2];
  6421. //const int nev3 = v->ne[3];
  6422. const int ne0 = dst->ne[0];
  6423. const int ne1 = dst->ne[1];
  6424. //const int ne2 = dst->ne[2];
  6425. //const int ne3 = dst->ne[3];
  6426. const int nbk0 = k->nb[0];
  6427. const int nbk1 = k->nb[1];
  6428. const int nbk2 = k->nb[2];
  6429. const int nbk3 = k->nb[3];
  6430. const int nbq0 = q->nb[0];
  6431. const int nbq1 = q->nb[1];
  6432. const int nbq2 = q->nb[2];
  6433. const int nbq3 = q->nb[3];
  6434. const int nbv0 = v->nb[0];
  6435. const int nbv1 = v->nb[1];
  6436. const int nbv2 = v->nb[2];
  6437. const int nbv3 = v->nb[3];
  6438. const int nb0 = dst->nb[0];
  6439. const int nb1 = dst->nb[1];
  6440. const int nb2 = dst->nb[2];
  6441. const int nb3 = dst->nb[3];
  6442. const int ith = params->ith;
  6443. const int nth = params->nth;
  6444. const int D = neq0;
  6445. const int N = neq1;
  6446. const int P = nek1 - N;
  6447. const int M = P + N;
  6448. const int Mup = ggml_up(M, GGML_SOFT_MAX_UNROLL);
  6449. GGML_ASSERT(ne0 == D);
  6450. GGML_ASSERT(ne1 == N);
  6451. GGML_ASSERT(P >= 0);
  6452. GGML_ASSERT(nbq0 == sizeof(ggml_fp16_t));
  6453. GGML_ASSERT(nbk0 == sizeof(ggml_fp16_t));
  6454. GGML_ASSERT(nbv0 == sizeof(ggml_fp16_t));
  6455. GGML_ASSERT(neq0 == D);
  6456. GGML_ASSERT(nek0 == D);
  6457. GGML_ASSERT(nev1 == D);
  6458. GGML_ASSERT(neq1 == N);
  6459. GGML_ASSERT(nek1 == N + P);
  6460. GGML_ASSERT(nev1 == D);
  6461. // dst cannot be transposed or permuted
  6462. GGML_ASSERT(nb0 == sizeof(float));
  6463. GGML_ASSERT(nb0 <= nb1);
  6464. GGML_ASSERT(nb1 <= nb2);
  6465. GGML_ASSERT(nb2 <= nb3);
  6466. if (params->type == GGML_TASK_INIT) {
  6467. return;
  6468. }
  6469. if (params->type == GGML_TASK_FINALIZE) {
  6470. return;
  6471. }
  6472. // parallelize by q rows using ggml_vec_dot_f32
  6473. // total rows in q
  6474. const int nr = neq1*neq2*neq3;
  6475. // rows per thread
  6476. const int dr = (nr + nth - 1)/nth;
  6477. // row range for this thread
  6478. const int ir0 = dr*ith;
  6479. const int ir1 = MIN(ir0 + dr, nr);
  6480. const float scale = 1.0f/sqrtf(D);
  6481. //printf("P=%d N=%d D=%d ir0=%d ir1=%d scale = %f\n", P, N, D, ir0, ir1, scale);
  6482. for (int ir = ir0; ir < ir1; ++ir) {
  6483. // q indices
  6484. const int iq3 = ir/(neq2*neq1);
  6485. const int iq2 = (ir - iq3*neq2*neq1)/neq1;
  6486. const int iq1 = (ir - iq3*neq2*neq1 - iq2*neq1);
  6487. float * S = (float *) params->wdata + ith*(2*Mup + CACHE_LINE_SIZE_F32);
  6488. for (int i = M; i < Mup; ++i) {
  6489. S[i] = -INFINITY;
  6490. }
  6491. if (GGML_VEC_DOT_UNROLL > 2 || nek1 % GGML_VEC_DOT_UNROLL != 0) {
  6492. for (int ic = 0; ic < nek1; ++ic) {
  6493. // k indices
  6494. const int ik3 = iq3;
  6495. const int ik2 = iq2;
  6496. const int ik1 = ic;
  6497. // S indices
  6498. const int i1 = ik1;
  6499. ggml_vec_dot_f16(neq0,
  6500. S + i1,
  6501. (ggml_fp16_t *) ((char *) k->data + (ik1*nbk1 + ik2*nbk2 + ik3*nbk3)),
  6502. (ggml_fp16_t *) ((char *) q->data + (iq1*nbq1 + iq2*nbq2 + iq3*nbq3)));
  6503. }
  6504. } else {
  6505. for (int ic = 0; ic < nek1; ic += GGML_VEC_DOT_UNROLL) {
  6506. // k indices
  6507. const int ik3 = iq3;
  6508. const int ik2 = iq2;
  6509. const int ik1 = ic;
  6510. // S indices
  6511. const int i1 = ik1;
  6512. ggml_vec_dot_f16_unroll(neq0, nbk1,
  6513. S + i1,
  6514. ((char *) k->data + (ik1*nbk1 + ik2*nbk2 + ik3*nbk3)),
  6515. (ggml_fp16_t *) ((char *) q->data + (iq1*nbq1 + iq2*nbq2 + iq3*nbq3)));
  6516. }
  6517. }
  6518. // scale
  6519. ggml_vec_scale_f32(nek1, S, scale);
  6520. if (masked) {
  6521. for (int i = P; i < M; i++) {
  6522. if (i > P + iq1) {
  6523. S[i] = -INFINITY;
  6524. }
  6525. }
  6526. }
  6527. // softmax
  6528. {
  6529. float max = -INFINITY;
  6530. ggml_vec_max_f32(M, &max, S);
  6531. ggml_float sum = 0.0;
  6532. {
  6533. #ifdef GGML_SOFT_MAX_ACCELERATE
  6534. max = -max;
  6535. vDSP_vsadd(S, 1, &max, S, 1, Mup);
  6536. vvexpf(S, S, &Mup);
  6537. ggml_vec_sum_f32(Mup, &sum, S);
  6538. #else
  6539. uint16_t scvt[GGML_SOFT_MAX_UNROLL];
  6540. ggml_float sump[GGML_SOFT_MAX_UNROLL] = { 0.0 };
  6541. for (int i = 0; i < Mup; i += GGML_SOFT_MAX_UNROLL) {
  6542. float * SS = S + i;
  6543. for (int j = 0; j < GGML_SOFT_MAX_UNROLL; ++j) {
  6544. if (SS[j] == -INFINITY) {
  6545. SS[j] = 0.0f;
  6546. } else {
  6547. ggml_fp16_t s = GGML_FP32_TO_FP16(SS[j] - max);
  6548. memcpy(&scvt[j], &s, sizeof(uint16_t));
  6549. const float val = GGML_FP16_TO_FP32(table_exp_f16[scvt[j]]);
  6550. sump[j] += (ggml_float)val;
  6551. SS[j] = val;
  6552. }
  6553. }
  6554. }
  6555. for (int i = 0; i < GGML_SOFT_MAX_UNROLL; i++) {
  6556. sum += sump[i];
  6557. }
  6558. #endif
  6559. }
  6560. assert(sum > 0.0);
  6561. sum = 1.0/sum;
  6562. ggml_vec_scale_f32(M, S, sum);
  6563. #ifndef NDEBUG
  6564. for (int i = 0; i < M; ++i) {
  6565. assert(!isnan(S[i]));
  6566. assert(!isinf(S[i]));
  6567. }
  6568. #endif
  6569. }
  6570. ggml_fp16_t * S16 = (ggml_fp16_t *) ((float *) params->wdata + ith*(2*Mup + CACHE_LINE_SIZE_F32) + Mup);
  6571. for (int i = 0; i < M; i++) {
  6572. S16[i] = GGML_FP32_TO_FP16(S[i]);
  6573. }
  6574. if (GGML_VEC_DOT_UNROLL == 1 || (nev1 % GGML_VEC_DOT_UNROLL != 0)) {
  6575. for (int ic = 0; ic < nev1; ++ic) {
  6576. // dst indices
  6577. const int i1 = iq1;
  6578. const int i2 = iq2;
  6579. const int i3 = iq3;
  6580. ggml_vec_dot_f16(nek1,
  6581. (float *) ((char *) dst->data + (ic*nb0 + i1*nb1 + i2*nb2 + i3*nb3)),
  6582. (ggml_fp16_t *) ((char *) v->data + ( ic*nbv1 + i2*nbv2 + i3*nbv3)),
  6583. S16);
  6584. }
  6585. } else {
  6586. for (int ic = 0; ic < nev1; ic += GGML_VEC_DOT_UNROLL) {
  6587. // dst indices
  6588. const int i1 = iq1;
  6589. const int i2 = iq2;
  6590. const int i3 = iq3;
  6591. ggml_vec_dot_f16_unroll(nek1, nbv1,
  6592. (float *) ((char *) dst->data + (ic*nb0 + i1*nb1 + i2*nb2 + i3*nb3)),
  6593. ((char *) v->data + ( ic*nbv1 + i2*nbv2 + i3*nbv3)),
  6594. S16);
  6595. }
  6596. }
  6597. }
  6598. }
  6599. static void ggml_compute_forward_flash_attn(
  6600. const struct ggml_compute_params * params,
  6601. const struct ggml_tensor * q,
  6602. const struct ggml_tensor * k,
  6603. const struct ggml_tensor * v,
  6604. const bool masked,
  6605. struct ggml_tensor * dst) {
  6606. switch (q->type) {
  6607. case GGML_TYPE_F16:
  6608. {
  6609. ggml_compute_forward_flash_attn_f16(params, q, k, v, masked, dst);
  6610. } break;
  6611. case GGML_TYPE_F32:
  6612. {
  6613. ggml_compute_forward_flash_attn_f32(params, q, k, v, masked, dst);
  6614. } break;
  6615. case GGML_TYPE_Q4_0:
  6616. case GGML_TYPE_Q4_1:
  6617. case GGML_TYPE_I8:
  6618. case GGML_TYPE_I16:
  6619. case GGML_TYPE_I32:
  6620. case GGML_TYPE_COUNT:
  6621. {
  6622. GGML_ASSERT(false);
  6623. } break;
  6624. }
  6625. }
  6626. // ggml_compute_forward_flash_ff
  6627. static void ggml_compute_forward_flash_ff_f16(
  6628. const struct ggml_compute_params * params,
  6629. const struct ggml_tensor * a, // F16
  6630. const struct ggml_tensor * b0, // F16 fc_w
  6631. const struct ggml_tensor * b1, // F32 fc_b
  6632. const struct ggml_tensor * c0, // F16 proj_w
  6633. const struct ggml_tensor * c1, // F32 proj_b
  6634. struct ggml_tensor * dst) {
  6635. int64_t t0 = ggml_perf_time_us();
  6636. UNUSED(t0);
  6637. const int nea0 = a->ne[0];
  6638. const int nea1 = a->ne[1];
  6639. const int nea2 = a->ne[2];
  6640. const int nea3 = a->ne[3];
  6641. const int neb00 = b0->ne[0];
  6642. const int neb01 = b0->ne[1];
  6643. //const int neb02 = b0->ne[2];
  6644. //const int neb03 = b0->ne[3];
  6645. const int neb10 = b1->ne[0];
  6646. const int neb11 = b1->ne[1];
  6647. //const int neb12 = b1->ne[2];
  6648. //const int neb13 = b1->ne[3];
  6649. const int nec00 = c0->ne[0];
  6650. const int nec01 = c0->ne[1];
  6651. //const int nec02 = c0->ne[2];
  6652. //const int nec03 = c0->ne[3];
  6653. const int nec10 = c1->ne[0];
  6654. const int nec11 = c1->ne[1];
  6655. //const int nec12 = c1->ne[2];
  6656. //const int nec13 = c1->ne[3];
  6657. const int ne0 = dst->ne[0];
  6658. const int ne1 = dst->ne[1];
  6659. const int ne2 = dst->ne[2];
  6660. //const int ne3 = dst->ne[3];
  6661. const int nba0 = a->nb[0];
  6662. const int nba1 = a->nb[1];
  6663. const int nba2 = a->nb[2];
  6664. const int nba3 = a->nb[3];
  6665. const int nbb00 = b0->nb[0];
  6666. const int nbb01 = b0->nb[1];
  6667. const int nbb02 = b0->nb[2];
  6668. const int nbb03 = b0->nb[3];
  6669. const int nbb10 = b1->nb[0];
  6670. //const int nbb11 = b1->nb[1];
  6671. //const int nbb12 = b1->nb[2];
  6672. //const int nbb13 = b1->nb[3];
  6673. const int nbc00 = c0->nb[0];
  6674. const int nbc01 = c0->nb[1];
  6675. const int nbc02 = c0->nb[2];
  6676. const int nbc03 = c0->nb[3];
  6677. const int nbc10 = c1->nb[0];
  6678. //const int nbc11 = c1->nb[1];
  6679. //const int nbc12 = c1->nb[2];
  6680. //const int nbc13 = c1->nb[3];
  6681. const int nb0 = dst->nb[0];
  6682. const int nb1 = dst->nb[1];
  6683. const int nb2 = dst->nb[2];
  6684. const int nb3 = dst->nb[3];
  6685. const int ith = params->ith;
  6686. const int nth = params->nth;
  6687. const int D = nea0;
  6688. //const int N = nea1;
  6689. const int M = neb01;
  6690. GGML_ASSERT(ne0 == nea0);
  6691. GGML_ASSERT(ne1 == nea1);
  6692. GGML_ASSERT(ne2 == nea2);
  6693. GGML_ASSERT(nba0 == sizeof(ggml_fp16_t));
  6694. GGML_ASSERT(nbb00 == sizeof(ggml_fp16_t));
  6695. GGML_ASSERT(nbb10 == sizeof(float));
  6696. GGML_ASSERT(nbc00 == sizeof(ggml_fp16_t));
  6697. GGML_ASSERT(nbc10 == sizeof(float));
  6698. GGML_ASSERT(neb00 == D);
  6699. GGML_ASSERT(neb01 == M);
  6700. GGML_ASSERT(neb10 == M);
  6701. GGML_ASSERT(neb11 == 1);
  6702. GGML_ASSERT(nec00 == M);
  6703. GGML_ASSERT(nec01 == D);
  6704. GGML_ASSERT(nec10 == D);
  6705. GGML_ASSERT(nec11 == 1);
  6706. // dst cannot be transposed or permuted
  6707. GGML_ASSERT(nb0 == sizeof(float));
  6708. GGML_ASSERT(nb0 <= nb1);
  6709. GGML_ASSERT(nb1 <= nb2);
  6710. GGML_ASSERT(nb2 <= nb3);
  6711. if (params->type == GGML_TASK_INIT) {
  6712. return;
  6713. }
  6714. if (params->type == GGML_TASK_FINALIZE) {
  6715. return;
  6716. }
  6717. // parallelize by a rows using ggml_vec_dot_f32
  6718. // total rows in a
  6719. const int nr = nea1*nea2*nea3;
  6720. // rows per thread
  6721. const int dr = (nr + nth - 1)/nth;
  6722. // row range for this thread
  6723. const int ir0 = dr*ith;
  6724. const int ir1 = MIN(ir0 + dr, nr);
  6725. for (int ir = ir0; ir < ir1; ++ir) {
  6726. // a indices
  6727. const int ia3 = ir/(nea2*nea1);
  6728. const int ia2 = (ir - ia3*nea2*nea1)/nea1;
  6729. const int ia1 = (ir - ia3*nea2*nea1 - ia2*nea1);
  6730. float * S = (float *) params->wdata + ith*(2*M + CACHE_LINE_SIZE_F32);
  6731. for (int ic = 0; ic < neb01; ++ic) {
  6732. // b0 indices
  6733. const int ib03 = ia3;
  6734. const int ib02 = ia2;
  6735. const int ib01 = ic;
  6736. // S indices
  6737. const int i1 = ib01;
  6738. ggml_vec_dot_f16(nea0,
  6739. S + i1,
  6740. (ggml_fp16_t *) ((char *) b0->data + (ib01*nbb01 + ib02*nbb02 + ib03*nbb03)),
  6741. (ggml_fp16_t *) ((char *) a->data + ( ia1*nba1 + ia2*nba2 + ia3*nba3)));
  6742. }
  6743. ggml_vec_add_f32(neb01, S, S, (float *) b1->data);
  6744. //ggml_vec_gelu_f32(neb01, S, S);
  6745. ggml_fp16_t * S16 = (ggml_fp16_t *) ((float *) params->wdata + ith*(2*M + CACHE_LINE_SIZE_F32) + M);
  6746. for (int i = 0; i < M; i++) {
  6747. S16[i] = GGML_FP32_TO_FP16(S[i]);
  6748. }
  6749. ggml_vec_gelu_f16(neb01, S16, S16);
  6750. {
  6751. // dst indices
  6752. const int i1 = ia1;
  6753. const int i2 = ia2;
  6754. const int i3 = ia3;
  6755. for (int ic = 0; ic < nec01; ++ic) {
  6756. ggml_vec_dot_f16(neb01,
  6757. (float *) ((char *) dst->data + (ic*nb0 + i1*nb1 + i2*nb2 + i3*nb3)),
  6758. (ggml_fp16_t *) ((char *) c0->data + ( ic*nbc01 + i2*nbc02 + i3*nbc03)),
  6759. S16);
  6760. }
  6761. ggml_vec_add_f32(nec01,
  6762. (float *) ((char *) dst->data + (i1*nb1 + i2*nb2 + i3*nb3)),
  6763. (float *) ((char *) dst->data + (i1*nb1 + i2*nb2 + i3*nb3)),
  6764. (float *) c1->data);
  6765. }
  6766. }
  6767. }
  6768. static void ggml_compute_forward_flash_ff(
  6769. const struct ggml_compute_params * params,
  6770. const struct ggml_tensor * a,
  6771. const struct ggml_tensor * b0,
  6772. const struct ggml_tensor * b1,
  6773. const struct ggml_tensor * c0,
  6774. const struct ggml_tensor * c1,
  6775. struct ggml_tensor * dst) {
  6776. switch (b0->type) {
  6777. case GGML_TYPE_F16:
  6778. {
  6779. ggml_compute_forward_flash_ff_f16(params, a, b0, b1, c0, c1, dst);
  6780. } break;
  6781. case GGML_TYPE_F32:
  6782. {
  6783. GGML_ASSERT(false); // TODO
  6784. } break;
  6785. case GGML_TYPE_Q4_0:
  6786. case GGML_TYPE_Q4_1:
  6787. case GGML_TYPE_I8:
  6788. case GGML_TYPE_I16:
  6789. case GGML_TYPE_I32:
  6790. case GGML_TYPE_COUNT:
  6791. {
  6792. GGML_ASSERT(false);
  6793. } break;
  6794. }
  6795. }
  6796. /////////////////////////////////
  6797. static void ggml_compute_forward(struct ggml_compute_params * params, struct ggml_tensor * tensor) {
  6798. GGML_ASSERT(params);
  6799. switch (tensor->op) {
  6800. case GGML_OP_DUP:
  6801. {
  6802. ggml_compute_forward_dup(params, tensor->src0, tensor);
  6803. } break;
  6804. case GGML_OP_ADD:
  6805. {
  6806. ggml_compute_forward_add(params, tensor->src0, tensor->src1, tensor);
  6807. } break;
  6808. case GGML_OP_SUB:
  6809. {
  6810. ggml_compute_forward_sub(params, tensor->src0, tensor->src1, tensor);
  6811. } break;
  6812. case GGML_OP_MUL:
  6813. {
  6814. ggml_compute_forward_mul(params, tensor->src0, tensor->src1, tensor);
  6815. } break;
  6816. case GGML_OP_DIV:
  6817. {
  6818. ggml_compute_forward_div(params, tensor->src0, tensor->src1, tensor);
  6819. } break;
  6820. case GGML_OP_SQR:
  6821. {
  6822. ggml_compute_forward_sqr(params, tensor->src0, tensor);
  6823. } break;
  6824. case GGML_OP_SQRT:
  6825. {
  6826. ggml_compute_forward_sqrt(params, tensor->src0, tensor);
  6827. } break;
  6828. case GGML_OP_SUM:
  6829. {
  6830. ggml_compute_forward_sum(params, tensor->src0, tensor);
  6831. } break;
  6832. case GGML_OP_MEAN:
  6833. {
  6834. ggml_compute_forward_mean(params, tensor->src0, tensor);
  6835. } break;
  6836. case GGML_OP_REPEAT:
  6837. {
  6838. ggml_compute_forward_repeat(params, tensor->src0, tensor);
  6839. } break;
  6840. case GGML_OP_ABS:
  6841. {
  6842. ggml_compute_forward_abs(params, tensor->src0, tensor);
  6843. } break;
  6844. case GGML_OP_SGN:
  6845. {
  6846. ggml_compute_forward_sgn(params, tensor->src0, tensor);
  6847. } break;
  6848. case GGML_OP_NEG:
  6849. {
  6850. ggml_compute_forward_neg(params, tensor->src0, tensor);
  6851. } break;
  6852. case GGML_OP_STEP:
  6853. {
  6854. ggml_compute_forward_step(params, tensor->src0, tensor);
  6855. } break;
  6856. case GGML_OP_RELU:
  6857. {
  6858. ggml_compute_forward_relu(params, tensor->src0, tensor);
  6859. } break;
  6860. case GGML_OP_GELU:
  6861. {
  6862. ggml_compute_forward_gelu(params, tensor->src0, tensor);
  6863. } break;
  6864. case GGML_OP_SILU:
  6865. {
  6866. ggml_compute_forward_silu(params, tensor->src0, tensor);
  6867. } break;
  6868. case GGML_OP_NORM:
  6869. {
  6870. ggml_compute_forward_norm(params, tensor->src0, tensor);
  6871. } break;
  6872. case GGML_OP_RMS_NORM:
  6873. {
  6874. ggml_compute_forward_rms_norm(params, tensor->src0, tensor);
  6875. } break;
  6876. case GGML_OP_MUL_MAT:
  6877. {
  6878. ggml_compute_forward_mul_mat(params, tensor->src0, tensor->src1, tensor);
  6879. } break;
  6880. case GGML_OP_SCALE:
  6881. {
  6882. ggml_compute_forward_scale(params, tensor->src0, tensor->src1, tensor);
  6883. } break;
  6884. case GGML_OP_CPY:
  6885. {
  6886. ggml_compute_forward_cpy(params, tensor->src0, tensor);
  6887. } break;
  6888. case GGML_OP_RESHAPE:
  6889. {
  6890. ggml_compute_forward_reshape(params, tensor->src0, tensor);
  6891. } break;
  6892. case GGML_OP_VIEW:
  6893. {
  6894. ggml_compute_forward_view(params, tensor->src0);
  6895. } break;
  6896. case GGML_OP_PERMUTE:
  6897. {
  6898. ggml_compute_forward_permute(params, tensor->src0);
  6899. } break;
  6900. case GGML_OP_TRANSPOSE:
  6901. {
  6902. ggml_compute_forward_transpose(params, tensor->src0);
  6903. } break;
  6904. case GGML_OP_GET_ROWS:
  6905. {
  6906. ggml_compute_forward_get_rows(params, tensor->src0, tensor->src1, tensor);
  6907. } break;
  6908. case GGML_OP_DIAG_MASK_INF:
  6909. {
  6910. ggml_compute_forward_diag_mask_inf(params, tensor->src0, tensor->src1, tensor);
  6911. } break;
  6912. case GGML_OP_SOFT_MAX:
  6913. {
  6914. ggml_compute_forward_soft_max(params, tensor->src0, tensor);
  6915. } break;
  6916. case GGML_OP_ROPE:
  6917. {
  6918. ggml_compute_forward_rope(params, tensor->src0, tensor->src1, tensor);
  6919. } break;
  6920. case GGML_OP_CONV_1D_1S:
  6921. {
  6922. ggml_compute_forward_conv_1d_1s(params, tensor->src0, tensor->src1, tensor);
  6923. } break;
  6924. case GGML_OP_CONV_1D_2S:
  6925. {
  6926. ggml_compute_forward_conv_1d_2s(params, tensor->src0, tensor->src1, tensor);
  6927. } break;
  6928. case GGML_OP_FLASH_ATTN:
  6929. {
  6930. int32_t t = ggml_get_i32_1d(tensor->opt[1], 0);
  6931. GGML_ASSERT(t == 0 || t == 1);
  6932. bool masked = t != 0;
  6933. ggml_compute_forward_flash_attn(params, tensor->src0, tensor->src1, tensor->opt[0], masked, tensor);
  6934. } break;
  6935. case GGML_OP_FLASH_FF:
  6936. {
  6937. ggml_compute_forward_flash_ff(params, tensor->src0, tensor->src1, tensor->opt[0], tensor->opt[1], tensor->opt[2], tensor);
  6938. } break;
  6939. case GGML_OP_NONE:
  6940. {
  6941. // nop
  6942. } break;
  6943. case GGML_OP_COUNT:
  6944. {
  6945. GGML_ASSERT(false);
  6946. } break;
  6947. }
  6948. }
  6949. ////////////////////////////////////////////////////////////////////////////////
  6950. static void ggml_compute_backward(struct ggml_context * ctx, struct ggml_tensor * tensor, bool inplace) {
  6951. struct ggml_tensor * src0 = tensor->src0;
  6952. struct ggml_tensor * src1 = tensor->src1;
  6953. switch (tensor->op) {
  6954. case GGML_OP_DUP:
  6955. {
  6956. if (src0->grad) {
  6957. src0->grad = ggml_add_impl(ctx, src0->grad, tensor->grad, inplace);
  6958. }
  6959. } break;
  6960. case GGML_OP_ADD:
  6961. {
  6962. if (src0->grad) {
  6963. src0->grad = ggml_add_impl(ctx, src0->grad, tensor->grad, inplace);
  6964. }
  6965. if (src1->grad) {
  6966. src1->grad = ggml_add_impl(ctx, src1->grad, tensor->grad, inplace);
  6967. }
  6968. } break;
  6969. case GGML_OP_SUB:
  6970. {
  6971. if (src0->grad) {
  6972. src0->grad = ggml_add_impl(ctx, src0->grad, tensor->grad, inplace);
  6973. }
  6974. if (src1->grad) {
  6975. src1->grad = ggml_sub_impl(ctx, src1->grad, tensor->grad, inplace);
  6976. }
  6977. } break;
  6978. case GGML_OP_MUL:
  6979. {
  6980. if (src0->grad) {
  6981. src0->grad =
  6982. ggml_add_impl(ctx,
  6983. src0->grad,
  6984. ggml_mul(ctx, src1, tensor->grad),
  6985. inplace);
  6986. }
  6987. if (src1->grad) {
  6988. src1->grad =
  6989. ggml_add_impl(ctx,
  6990. src1->grad,
  6991. ggml_mul(ctx, src0, tensor->grad),
  6992. inplace);
  6993. }
  6994. } break;
  6995. case GGML_OP_DIV:
  6996. {
  6997. if (src0->grad) {
  6998. src0->grad =
  6999. ggml_add_impl(ctx,
  7000. src0->grad,
  7001. ggml_div(ctx, tensor->grad, src1),
  7002. inplace);
  7003. }
  7004. if (src1->grad) {
  7005. src1->grad =
  7006. ggml_sub_impl(ctx,
  7007. src1->grad,
  7008. ggml_mul(ctx,
  7009. tensor->grad,
  7010. ggml_div(ctx, tensor, src1)),
  7011. inplace);
  7012. }
  7013. } break;
  7014. case GGML_OP_SQR:
  7015. {
  7016. if (src0->grad) {
  7017. src0->grad =
  7018. ggml_add_impl(ctx,
  7019. src0->grad,
  7020. ggml_mul(ctx,
  7021. ggml_mul(ctx, src0, tensor->grad),
  7022. ggml_repeat(ctx, ggml_new_f32(ctx, 2.0f), src0)),
  7023. inplace);
  7024. }
  7025. } break;
  7026. case GGML_OP_SQRT:
  7027. {
  7028. if (src0->grad) {
  7029. src0->grad =
  7030. ggml_add_impl(ctx,
  7031. src0->grad,
  7032. ggml_div(ctx,
  7033. ggml_repeat(ctx, ggml_new_f32(ctx, 0.5f), tensor),
  7034. tensor),
  7035. inplace);
  7036. }
  7037. } break;
  7038. case GGML_OP_SUM:
  7039. {
  7040. if (src0->grad) {
  7041. src0->grad =
  7042. ggml_add_impl(ctx,
  7043. src0->grad,
  7044. ggml_repeat(ctx, tensor->grad, src0->grad),
  7045. inplace);
  7046. }
  7047. } break;
  7048. case GGML_OP_MEAN:
  7049. {
  7050. GGML_ASSERT(false); // TODO: implement
  7051. } break;
  7052. case GGML_OP_REPEAT:
  7053. {
  7054. if (src0->grad) {
  7055. src0->grad =
  7056. ggml_add_impl(ctx,
  7057. src0->grad,
  7058. ggml_sum(ctx, tensor->grad),
  7059. inplace);
  7060. }
  7061. } break;
  7062. case GGML_OP_ABS:
  7063. {
  7064. if (src0->grad) {
  7065. src0->grad =
  7066. ggml_add_impl(ctx,
  7067. src0->grad,
  7068. ggml_mul(ctx,
  7069. ggml_sgn(ctx, src0),
  7070. tensor->grad),
  7071. inplace);
  7072. }
  7073. } break;
  7074. case GGML_OP_SGN:
  7075. {
  7076. if (src0->grad) {
  7077. // noop
  7078. }
  7079. } break;
  7080. case GGML_OP_NEG:
  7081. {
  7082. if (src0->grad) {
  7083. src0->grad = ggml_sub_impl(ctx, src0->grad, tensor->grad, inplace);
  7084. }
  7085. } break;
  7086. case GGML_OP_STEP:
  7087. {
  7088. if (src0->grad) {
  7089. // noop
  7090. }
  7091. } break;
  7092. case GGML_OP_RELU:
  7093. {
  7094. if (src0->grad) {
  7095. src0->grad = ggml_sub_impl(ctx,
  7096. src0->grad,
  7097. ggml_mul(ctx,
  7098. ggml_step(ctx, src0),
  7099. tensor->grad),
  7100. inplace);
  7101. }
  7102. } break;
  7103. case GGML_OP_GELU:
  7104. {
  7105. GGML_ASSERT(false); // TODO: not implemented
  7106. } break;
  7107. case GGML_OP_SILU:
  7108. {
  7109. GGML_ASSERT(false); // TODO: not implemented
  7110. } break;
  7111. case GGML_OP_NORM:
  7112. {
  7113. GGML_ASSERT(false); // TODO: not implemented
  7114. } break;
  7115. case GGML_OP_RMS_NORM:
  7116. {
  7117. GGML_ASSERT(false); // TODO: not implemented
  7118. } break;
  7119. case GGML_OP_MUL_MAT:
  7120. {
  7121. if (src0->grad) {
  7122. // TODO: this requires outer product - ggml_out_prod(ctx, src1, tensor->grad);
  7123. GGML_ASSERT(false);
  7124. }
  7125. if (src1->grad) {
  7126. src1->grad =
  7127. ggml_add_impl(ctx,
  7128. src1->grad,
  7129. // TODO: fix transpose, the node will break the graph connections
  7130. ggml_mul_mat(ctx, ggml_transpose(ctx, src0), tensor->grad),
  7131. inplace);
  7132. }
  7133. } break;
  7134. case GGML_OP_SCALE:
  7135. {
  7136. GGML_ASSERT(false); // TODO: not implemented
  7137. } break;
  7138. case GGML_OP_CPY:
  7139. {
  7140. GGML_ASSERT(false); // TODO: not implemented
  7141. } break;
  7142. case GGML_OP_RESHAPE:
  7143. {
  7144. GGML_ASSERT(false); // TODO: not implemented
  7145. } break;
  7146. case GGML_OP_VIEW:
  7147. {
  7148. GGML_ASSERT(false); // not supported
  7149. } break;
  7150. case GGML_OP_PERMUTE:
  7151. {
  7152. GGML_ASSERT(false); // TODO: not implemented
  7153. } break;
  7154. case GGML_OP_TRANSPOSE:
  7155. {
  7156. GGML_ASSERT(false); // TODO: not implemented
  7157. } break;
  7158. case GGML_OP_GET_ROWS:
  7159. {
  7160. GGML_ASSERT(false); // TODO: not implemented
  7161. } break;
  7162. case GGML_OP_DIAG_MASK_INF:
  7163. {
  7164. GGML_ASSERT(false); // TODO: not implemented
  7165. } break;
  7166. case GGML_OP_SOFT_MAX:
  7167. {
  7168. GGML_ASSERT(false); // TODO: not implemented
  7169. } break;
  7170. case GGML_OP_ROPE:
  7171. {
  7172. GGML_ASSERT(false); // TODO: not implemented
  7173. } break;
  7174. case GGML_OP_CONV_1D_1S:
  7175. {
  7176. GGML_ASSERT(false); // TODO: not implemented
  7177. } break;
  7178. case GGML_OP_CONV_1D_2S:
  7179. {
  7180. GGML_ASSERT(false); // TODO: not implemented
  7181. } break;
  7182. case GGML_OP_FLASH_ATTN:
  7183. {
  7184. GGML_ASSERT(false); // not supported
  7185. } break;
  7186. case GGML_OP_FLASH_FF:
  7187. {
  7188. GGML_ASSERT(false); // not supported
  7189. } break;
  7190. case GGML_OP_NONE:
  7191. {
  7192. // nop
  7193. } break;
  7194. case GGML_OP_COUNT:
  7195. {
  7196. GGML_ASSERT(false);
  7197. } break;
  7198. }
  7199. }
  7200. static void ggml_visit_parents(struct ggml_cgraph * cgraph, struct ggml_tensor * node) {
  7201. if (node->grad == NULL) {
  7202. // this usually happens when we generate intermediate nodes from constants in the backward pass
  7203. // it can also happen during forward pass, if the user performs computations with constants
  7204. if (node->op != GGML_OP_NONE) {
  7205. //GGML_PRINT_DEBUG("%s: warning: node %p has no grad, but op %d\n", __func__, (void *) node, node->op);
  7206. }
  7207. }
  7208. // check if already visited
  7209. for (int i = 0; i < cgraph->n_nodes; i++) {
  7210. if (cgraph->nodes[i] == node) {
  7211. return;
  7212. }
  7213. }
  7214. for (int i = 0; i < cgraph->n_leafs; i++) {
  7215. if (cgraph->leafs[i] == node) {
  7216. return;
  7217. }
  7218. }
  7219. if (node->src0) {
  7220. ggml_visit_parents(cgraph, node->src0);
  7221. }
  7222. if (node->src1) {
  7223. ggml_visit_parents(cgraph, node->src1);
  7224. }
  7225. for (int i = 0; i < GGML_MAX_OPT; ++i) {
  7226. if (node->opt[i]) {
  7227. ggml_visit_parents(cgraph, node->opt[i]);
  7228. }
  7229. }
  7230. if (node->op == GGML_OP_NONE && node->grad == NULL) {
  7231. // reached a leaf node, not part of the gradient graph (e.g. a constant)
  7232. GGML_ASSERT(cgraph->n_leafs < GGML_MAX_NODES);
  7233. cgraph->leafs[cgraph->n_leafs] = node;
  7234. cgraph->n_leafs++;
  7235. } else {
  7236. GGML_ASSERT(cgraph->n_nodes < GGML_MAX_NODES);
  7237. cgraph->nodes[cgraph->n_nodes] = node;
  7238. cgraph->grads[cgraph->n_nodes] = node->grad;
  7239. cgraph->n_nodes++;
  7240. }
  7241. }
  7242. static void ggml_build_forward_impl(struct ggml_cgraph * cgraph, struct ggml_tensor * tensor, bool expand) {
  7243. if (!expand) {
  7244. cgraph->n_nodes = 0;
  7245. cgraph->n_leafs = 0;
  7246. }
  7247. const int n0 = cgraph->n_nodes;
  7248. UNUSED(n0);
  7249. ggml_visit_parents(cgraph, tensor);
  7250. const int n_new = cgraph->n_nodes - n0;
  7251. GGML_PRINT_DEBUG("%s: visited %d new nodes\n", __func__, n_new);
  7252. if (n_new > 0) {
  7253. // the last added node should always be starting point
  7254. GGML_ASSERT(cgraph->nodes[cgraph->n_nodes - 1] == tensor);
  7255. }
  7256. }
  7257. void ggml_build_forward_expand(struct ggml_cgraph * cgraph, struct ggml_tensor * tensor) {
  7258. ggml_build_forward_impl(cgraph, tensor, true);
  7259. }
  7260. struct ggml_cgraph ggml_build_forward(struct ggml_tensor * tensor) {
  7261. struct ggml_cgraph result = {
  7262. /*.n_nodes =*/ 0,
  7263. /*.n_leafs =*/ 0,
  7264. /*.n_threads =*/ 0,
  7265. /*.work_size =*/ 0,
  7266. /*.work =*/ NULL,
  7267. /*.nodes =*/ { NULL },
  7268. /*.grads =*/ { NULL },
  7269. /*.leafs =*/ { NULL },
  7270. /*.perf_runs =*/ 0,
  7271. /*.perf_cycles =*/ 0,
  7272. /*.perf_time_us =*/ 0,
  7273. };
  7274. ggml_build_forward_impl(&result, tensor, false);
  7275. return result;
  7276. }
  7277. struct ggml_cgraph ggml_build_backward(struct ggml_context * ctx, struct ggml_cgraph * gf, bool keep) {
  7278. struct ggml_cgraph result = *gf;
  7279. GGML_ASSERT(gf->n_nodes > 0);
  7280. // if we are keeping the gradient graph, we have to detach the gradient nodes from the original graph
  7281. if (keep) {
  7282. for (int i = 0; i < gf->n_nodes; i++) {
  7283. struct ggml_tensor * node = gf->nodes[i];
  7284. if (node->grad) {
  7285. node->grad = ggml_dup_tensor(ctx, node);
  7286. gf->grads[i] = node->grad;
  7287. }
  7288. }
  7289. }
  7290. for (int i = gf->n_nodes - 1; i >= 0; i--) {
  7291. struct ggml_tensor * node = gf->nodes[i];
  7292. // because we detached the grad nodes from the original graph, we can afford inplace operations
  7293. if (node->grad) {
  7294. ggml_compute_backward(ctx, node, keep);
  7295. }
  7296. }
  7297. for (int i = gf->n_nodes - 1; i >= 0; i--) {
  7298. struct ggml_tensor * node = gf->nodes[i];
  7299. if (node->is_param) {
  7300. GGML_PRINT_DEBUG("%s: found root node %p\n", __func__, (void *) node);
  7301. ggml_build_forward_impl(&result, node->grad, true);
  7302. }
  7303. }
  7304. return result;
  7305. }
  7306. //
  7307. // thread data
  7308. //
  7309. // synchronization is done via busy loops
  7310. // I tried using spin locks, but not sure how to use them correctly - the things I tried were slower than busy loops
  7311. //
  7312. #ifdef __APPLE__
  7313. //#include <os/lock.h>
  7314. //
  7315. //typedef os_unfair_lock ggml_lock_t;
  7316. //
  7317. //#define ggml_lock_init(x) UNUSED(x)
  7318. //#define ggml_lock_destroy(x) UNUSED(x)
  7319. //#define ggml_lock_lock os_unfair_lock_lock
  7320. //#define ggml_lock_unlock os_unfair_lock_unlock
  7321. //
  7322. //#define GGML_LOCK_INITIALIZER OS_UNFAIR_LOCK_INIT
  7323. typedef int ggml_lock_t;
  7324. #define ggml_lock_init(x) UNUSED(x)
  7325. #define ggml_lock_destroy(x) UNUSED(x)
  7326. #define ggml_lock_lock(x) UNUSED(x)
  7327. #define ggml_lock_unlock(x) UNUSED(x)
  7328. #define GGML_LOCK_INITIALIZER 0
  7329. typedef pthread_t ggml_thread_t;
  7330. #define ggml_thread_create pthread_create
  7331. #define ggml_thread_join pthread_join
  7332. #else
  7333. //typedef pthread_spinlock_t ggml_lock_t;
  7334. //#define ggml_lock_init(x) pthread_spin_init(x, PTHREAD_PROCESS_PRIVATE)
  7335. //#define ggml_lock_destroy pthread_spin_destroy
  7336. //#define ggml_lock_lock pthread_spin_lock
  7337. //#define ggml_lock_unlock pthread_spin_unlock
  7338. typedef int ggml_lock_t;
  7339. #define ggml_lock_init(x) UNUSED(x)
  7340. #define ggml_lock_destroy(x) UNUSED(x)
  7341. #define ggml_lock_lock(x) UNUSED(x)
  7342. #define ggml_lock_unlock(x) UNUSED(x)
  7343. #define GGML_LOCK_INITIALIZER 0
  7344. typedef pthread_t ggml_thread_t;
  7345. #define ggml_thread_create pthread_create
  7346. #define ggml_thread_join pthread_join
  7347. #endif
  7348. struct ggml_compute_state_shared {
  7349. ggml_lock_t spin;
  7350. int n_threads;
  7351. // synchronization primitives
  7352. atomic_int n_ready;
  7353. atomic_bool has_work;
  7354. atomic_bool stop; // stop all threads
  7355. };
  7356. struct ggml_compute_state {
  7357. ggml_thread_t thrd;
  7358. struct ggml_compute_params params;
  7359. struct ggml_tensor * node;
  7360. struct ggml_compute_state_shared * shared;
  7361. };
  7362. static thread_ret_t ggml_graph_compute_thread(void * data) {
  7363. struct ggml_compute_state * state = (struct ggml_compute_state *) data;
  7364. const int n_threads = state->shared->n_threads;
  7365. while (true) {
  7366. if (atomic_fetch_add(&state->shared->n_ready, 1) == n_threads - 1) {
  7367. atomic_store(&state->shared->has_work, false);
  7368. } else {
  7369. while (atomic_load(&state->shared->has_work)) {
  7370. if (atomic_load(&state->shared->stop)) {
  7371. return 0;
  7372. }
  7373. ggml_lock_lock (&state->shared->spin);
  7374. ggml_lock_unlock(&state->shared->spin);
  7375. }
  7376. }
  7377. atomic_fetch_sub(&state->shared->n_ready, 1);
  7378. // wait for work
  7379. while (!atomic_load(&state->shared->has_work)) {
  7380. if (atomic_load(&state->shared->stop)) {
  7381. return 0;
  7382. }
  7383. ggml_lock_lock (&state->shared->spin);
  7384. ggml_lock_unlock(&state->shared->spin);
  7385. }
  7386. // check if we should stop
  7387. if (atomic_load(&state->shared->stop)) {
  7388. break;
  7389. }
  7390. if (state->node) {
  7391. if (state->params.ith < state->params.nth) {
  7392. ggml_compute_forward(&state->params, state->node);
  7393. }
  7394. state->node = NULL;
  7395. } else {
  7396. break;
  7397. }
  7398. }
  7399. return 0;
  7400. }
  7401. void ggml_graph_compute(struct ggml_context * ctx, struct ggml_cgraph * cgraph) {
  7402. const int n_threads = cgraph->n_threads;
  7403. struct ggml_compute_state_shared state_shared = {
  7404. /*.spin =*/ GGML_LOCK_INITIALIZER,
  7405. /*.n_threads =*/ n_threads,
  7406. /*.n_ready =*/ 0,
  7407. /*.has_work =*/ false,
  7408. /*.stop =*/ false,
  7409. };
  7410. struct ggml_compute_state * workers = n_threads > 1 ? alloca(sizeof(struct ggml_compute_state)*(n_threads - 1)) : NULL;
  7411. // create thread pool
  7412. if (n_threads > 1) {
  7413. ggml_lock_init(&state_shared.spin);
  7414. atomic_store(&state_shared.has_work, true);
  7415. for (int j = 0; j < n_threads - 1; j++) {
  7416. workers[j] = (struct ggml_compute_state) {
  7417. .thrd = 0,
  7418. .params = {
  7419. .type = GGML_TASK_COMPUTE,
  7420. .ith = j + 1,
  7421. .nth = n_threads,
  7422. .wsize = cgraph->work ? ggml_nbytes(cgraph->work) : 0,
  7423. .wdata = cgraph->work ? cgraph->work->data : NULL,
  7424. },
  7425. .node = NULL,
  7426. .shared = &state_shared,
  7427. };
  7428. int rc = ggml_thread_create(&workers[j].thrd, NULL, ggml_graph_compute_thread, &workers[j]);
  7429. GGML_ASSERT(rc == 0);
  7430. UNUSED(rc);
  7431. }
  7432. }
  7433. // initialize tasks + work buffer
  7434. {
  7435. size_t work_size = 0;
  7436. // thread scheduling for the different operations
  7437. for (int i = 0; i < cgraph->n_nodes; i++) {
  7438. struct ggml_tensor * node = cgraph->nodes[i];
  7439. switch (node->op) {
  7440. case GGML_OP_DUP:
  7441. {
  7442. node->n_tasks = 1;
  7443. } break;
  7444. case GGML_OP_ADD:
  7445. {
  7446. node->n_tasks = n_threads;
  7447. } break;
  7448. case GGML_OP_SUB:
  7449. case GGML_OP_MUL:
  7450. case GGML_OP_DIV:
  7451. case GGML_OP_SQR:
  7452. case GGML_OP_SQRT:
  7453. case GGML_OP_SUM:
  7454. case GGML_OP_MEAN:
  7455. case GGML_OP_REPEAT:
  7456. case GGML_OP_ABS:
  7457. case GGML_OP_SGN:
  7458. case GGML_OP_NEG:
  7459. case GGML_OP_STEP:
  7460. case GGML_OP_RELU:
  7461. {
  7462. node->n_tasks = 1;
  7463. } break;
  7464. case GGML_OP_GELU:
  7465. {
  7466. node->n_tasks = n_threads;
  7467. } break;
  7468. case GGML_OP_SILU:
  7469. {
  7470. node->n_tasks = n_threads;
  7471. } break;
  7472. case GGML_OP_NORM:
  7473. case GGML_OP_RMS_NORM:
  7474. {
  7475. node->n_tasks = n_threads;
  7476. } break;
  7477. case GGML_OP_MUL_MAT:
  7478. {
  7479. node->n_tasks = n_threads;
  7480. // TODO: use different scheduling for different matrix sizes
  7481. //const int nr0 = ggml_nrows(node->src0);
  7482. //const int nr1 = ggml_nrows(node->src1);
  7483. //node->n_tasks = MIN(n_threads, MAX(1, nr0/128));
  7484. //printf("nr0 = %8d, nr1 = %8d, nr0*nr1 = %8d, n_tasks = %d\n", nr0, nr1, nr0*nr1, node->n_tasks);
  7485. size_t cur = 0;
  7486. if (node->src0->type == GGML_TYPE_F16 && node->src1->type == GGML_TYPE_F32) {
  7487. #if defined(GGML_USE_ACCELERATE) || defined(GGML_USE_OPENBLAS)
  7488. if (ggml_compute_forward_mul_mat_use_blas(node->src0, node->src1, node)) {
  7489. node->n_tasks = 1; // TODO: this actually is doing nothing
  7490. // the threads are still spinning
  7491. cur = GGML_TYPE_SIZE[GGML_TYPE_F32]*(node->src0->ne[0]*node->src0->ne[1]);
  7492. //printf("src0: ne0 = %d, ne1 = %d, ne = %d\n", node->src0->ne[0], node->src0->ne[1], node->src0->ne[0]*node->src0->ne[1]);
  7493. //printf("src1: ne0 = %d, ne1 = %d, ne = %d\n", node->src1->ne[0], node->src1->ne[1], node->src1->ne[0]*node->src1->ne[1]);
  7494. //printf("cur = %zu\n", cur);
  7495. } else {
  7496. cur = GGML_TYPE_SIZE[GGML_TYPE_F16]*ggml_nelements(node->src1);
  7497. }
  7498. #else
  7499. cur = GGML_TYPE_SIZE[GGML_TYPE_F16]*ggml_nelements(node->src1);
  7500. #endif
  7501. } else if (node->src0->type == GGML_TYPE_F32 && node->src1->type == GGML_TYPE_F32) {
  7502. cur = 0;
  7503. } else if (quantize_fns[node->src0->type].vec_dot_q && node->src1->type == GGML_TYPE_F32) {
  7504. #if defined(GGML_USE_ACCELERATE) || defined(GGML_USE_OPENBLAS)
  7505. if (ggml_compute_forward_mul_mat_use_blas(node->src0, node->src1, node)) {
  7506. node->n_tasks = 1;
  7507. cur = GGML_TYPE_SIZE[GGML_TYPE_F32]*(node->src0->ne[0]*node->src0->ne[1]);
  7508. } else
  7509. #endif
  7510. {
  7511. cur = GGML_TYPE_SIZE[node->src0->type]*ggml_nelements(node->src1)/GGML_BLCK_SIZE[node->src0->type];
  7512. }
  7513. } else {
  7514. GGML_ASSERT(false);
  7515. }
  7516. work_size = MAX(work_size, cur);
  7517. } break;
  7518. case GGML_OP_SCALE:
  7519. {
  7520. node->n_tasks = n_threads;
  7521. } break;
  7522. case GGML_OP_CPY:
  7523. case GGML_OP_RESHAPE:
  7524. case GGML_OP_VIEW:
  7525. case GGML_OP_PERMUTE:
  7526. case GGML_OP_TRANSPOSE:
  7527. case GGML_OP_GET_ROWS:
  7528. case GGML_OP_DIAG_MASK_INF:
  7529. {
  7530. node->n_tasks = 1;
  7531. } break;
  7532. case GGML_OP_SOFT_MAX:
  7533. {
  7534. node->n_tasks = n_threads;
  7535. } break;
  7536. case GGML_OP_ROPE:
  7537. {
  7538. node->n_tasks = 1;
  7539. } break;
  7540. case GGML_OP_CONV_1D_1S:
  7541. case GGML_OP_CONV_1D_2S:
  7542. {
  7543. node->n_tasks = n_threads;
  7544. GGML_ASSERT(node->src0->ne[3] == 1);
  7545. GGML_ASSERT(node->src1->ne[2] == 1);
  7546. GGML_ASSERT(node->src1->ne[3] == 1);
  7547. size_t cur = 0;
  7548. const int nk = node->src0->ne[0];
  7549. if (node->src0->type == GGML_TYPE_F16 &&
  7550. node->src1->type == GGML_TYPE_F32) {
  7551. cur = sizeof(ggml_fp16_t)*(
  7552. nk*ggml_up32(node->src0->ne[1])*node->src0->ne[2] +
  7553. ( 2*(nk/2) + node->src1->ne[0])*node->src1->ne[1]
  7554. );
  7555. } else if (node->src0->type == GGML_TYPE_F32 &&
  7556. node->src1->type == GGML_TYPE_F32) {
  7557. cur = sizeof(float)*(
  7558. nk*ggml_up32(node->src0->ne[1])*node->src0->ne[2] +
  7559. ( 2*(nk/2) + node->src1->ne[0])*node->src1->ne[1]
  7560. );
  7561. } else {
  7562. GGML_ASSERT(false);
  7563. }
  7564. work_size = MAX(work_size, cur);
  7565. } break;
  7566. case GGML_OP_FLASH_ATTN:
  7567. {
  7568. node->n_tasks = n_threads;
  7569. size_t cur = 0;
  7570. const int ne11 = ggml_up(node->src1->ne[1], GGML_SOFT_MAX_UNROLL);
  7571. if (node->src1->type == GGML_TYPE_F32) {
  7572. cur = sizeof(float)*ne11*node->n_tasks; // TODO: this can become (n_tasks-1)
  7573. cur += sizeof(float)*ne11*node->n_tasks; // this is overestimated by x2
  7574. }
  7575. if (node->src1->type == GGML_TYPE_F16) {
  7576. cur = sizeof(float)*ne11*node->n_tasks; // TODO: this can become (n_tasks-1)
  7577. cur += sizeof(float)*ne11*node->n_tasks; // this is overestimated by x2
  7578. }
  7579. work_size = MAX(work_size, cur);
  7580. } break;
  7581. case GGML_OP_FLASH_FF:
  7582. {
  7583. node->n_tasks = n_threads;
  7584. size_t cur = 0;
  7585. if (node->src1->type == GGML_TYPE_F32) {
  7586. cur = sizeof(float)*node->src1->ne[1]*node->n_tasks; // TODO: this can become (n_tasks-1)
  7587. cur += sizeof(float)*node->src1->ne[1]*node->n_tasks; // this is overestimated by x2
  7588. }
  7589. if (node->src1->type == GGML_TYPE_F16) {
  7590. cur = sizeof(float)*node->src1->ne[1]*node->n_tasks; // TODO: this can become (n_tasks-1)
  7591. cur += sizeof(float)*node->src1->ne[1]*node->n_tasks; // this is overestimated by x2
  7592. }
  7593. work_size = MAX(work_size, cur);
  7594. } break;
  7595. case GGML_OP_NONE:
  7596. {
  7597. node->n_tasks = 1;
  7598. } break;
  7599. case GGML_OP_COUNT:
  7600. {
  7601. GGML_ASSERT(false);
  7602. } break;
  7603. }
  7604. }
  7605. if (cgraph->work != NULL && work_size > cgraph->work_size) {
  7606. GGML_ASSERT(false); // TODO: better handling
  7607. }
  7608. if (work_size > 0 && cgraph->work == NULL) {
  7609. cgraph->work_size = work_size + CACHE_LINE_SIZE*(n_threads - 1);
  7610. GGML_PRINT_DEBUG("%s: allocating work buffer for graph (%zu bytes)\n", __func__, cgraph->work_size);
  7611. cgraph->work = ggml_new_tensor_1d(ctx, GGML_TYPE_I8, cgraph->work_size);
  7612. }
  7613. }
  7614. const int64_t perf_start_cycles = ggml_perf_cycles();
  7615. const int64_t perf_start_time_us = ggml_perf_time_us();
  7616. for (int i = 0; i < cgraph->n_nodes; i++) {
  7617. GGML_PRINT_DEBUG_5("%s: %d/%d\n", __func__, i, cgraph->n_nodes);
  7618. struct ggml_tensor * node = cgraph->nodes[i];
  7619. // TODO: this could be used to avoid unnecessary computations, but it needs to be improved
  7620. //if (node->grad == NULL && node->perf_runs > 0) {
  7621. // continue;
  7622. //}
  7623. const int64_t perf_node_start_cycles = ggml_perf_cycles();
  7624. const int64_t perf_node_start_time_us = ggml_perf_time_us();
  7625. // INIT
  7626. struct ggml_compute_params params = {
  7627. /*.type =*/ GGML_TASK_INIT,
  7628. /*.ith =*/ 0,
  7629. /*.nth =*/ node->n_tasks,
  7630. /*.wsize =*/ cgraph->work ? ggml_nbytes(cgraph->work) : 0,
  7631. /*.wdata =*/ cgraph->work ? cgraph->work->data : NULL,
  7632. };
  7633. ggml_compute_forward(&params, node);
  7634. // COMPUTE
  7635. if (node->n_tasks > 1) {
  7636. if (atomic_fetch_add(&state_shared.n_ready, 1) == n_threads - 1) {
  7637. atomic_store(&state_shared.has_work, false);
  7638. }
  7639. while (atomic_load(&state_shared.has_work)) {
  7640. ggml_lock_lock (&state_shared.spin);
  7641. ggml_lock_unlock(&state_shared.spin);
  7642. }
  7643. // launch thread pool
  7644. for (int j = 0; j < n_threads - 1; j++) {
  7645. workers[j].params = (struct ggml_compute_params) {
  7646. .type = GGML_TASK_COMPUTE,
  7647. .ith = j + 1,
  7648. .nth = node->n_tasks,
  7649. .wsize = cgraph->work ? ggml_nbytes(cgraph->work) : 0,
  7650. .wdata = cgraph->work ? cgraph->work->data : NULL,
  7651. };
  7652. workers[j].node = node;
  7653. }
  7654. atomic_fetch_sub(&state_shared.n_ready, 1);
  7655. while (atomic_load(&state_shared.n_ready) > 0) {
  7656. ggml_lock_lock (&state_shared.spin);
  7657. ggml_lock_unlock(&state_shared.spin);
  7658. }
  7659. atomic_store(&state_shared.has_work, true);
  7660. }
  7661. params.type = GGML_TASK_COMPUTE;
  7662. ggml_compute_forward(&params, node);
  7663. // wait for thread pool
  7664. if (node->n_tasks > 1) {
  7665. if (atomic_fetch_add(&state_shared.n_ready, 1) == n_threads - 1) {
  7666. atomic_store(&state_shared.has_work, false);
  7667. }
  7668. while (atomic_load(&state_shared.has_work)) {
  7669. ggml_lock_lock (&state_shared.spin);
  7670. ggml_lock_unlock(&state_shared.spin);
  7671. }
  7672. atomic_fetch_sub(&state_shared.n_ready, 1);
  7673. while (atomic_load(&state_shared.n_ready) != 0) {
  7674. ggml_lock_lock (&state_shared.spin);
  7675. ggml_lock_unlock(&state_shared.spin);
  7676. }
  7677. }
  7678. // FINALIZE
  7679. if (node->n_tasks > 1) {
  7680. if (atomic_fetch_add(&state_shared.n_ready, 1) == n_threads - 1) {
  7681. atomic_store(&state_shared.has_work, false);
  7682. }
  7683. while (atomic_load(&state_shared.has_work)) {
  7684. ggml_lock_lock (&state_shared.spin);
  7685. ggml_lock_unlock(&state_shared.spin);
  7686. }
  7687. // launch thread pool
  7688. for (int j = 0; j < n_threads - 1; j++) {
  7689. workers[j].params = (struct ggml_compute_params) {
  7690. .type = GGML_TASK_FINALIZE,
  7691. .ith = j + 1,
  7692. .nth = node->n_tasks,
  7693. .wsize = cgraph->work ? ggml_nbytes(cgraph->work) : 0,
  7694. .wdata = cgraph->work ? cgraph->work->data : NULL,
  7695. };
  7696. workers[j].node = node;
  7697. }
  7698. atomic_fetch_sub(&state_shared.n_ready, 1);
  7699. while (atomic_load(&state_shared.n_ready) > 0) {
  7700. ggml_lock_lock (&state_shared.spin);
  7701. ggml_lock_unlock(&state_shared.spin);
  7702. }
  7703. atomic_store(&state_shared.has_work, true);
  7704. }
  7705. params.type = GGML_TASK_FINALIZE;
  7706. ggml_compute_forward(&params, node);
  7707. // wait for thread pool
  7708. if (node->n_tasks > 1) {
  7709. if (atomic_fetch_add(&state_shared.n_ready, 1) == n_threads - 1) {
  7710. atomic_store(&state_shared.has_work, false);
  7711. }
  7712. while (atomic_load(&state_shared.has_work)) {
  7713. ggml_lock_lock (&state_shared.spin);
  7714. ggml_lock_unlock(&state_shared.spin);
  7715. }
  7716. atomic_fetch_sub(&state_shared.n_ready, 1);
  7717. while (atomic_load(&state_shared.n_ready) != 0) {
  7718. ggml_lock_lock (&state_shared.spin);
  7719. ggml_lock_unlock(&state_shared.spin);
  7720. }
  7721. }
  7722. // performance stats (node)
  7723. {
  7724. int64_t perf_cycles_cur = ggml_perf_cycles() - perf_node_start_cycles;
  7725. int64_t perf_time_us_cur = ggml_perf_time_us() - perf_node_start_time_us;
  7726. node->perf_runs++;
  7727. node->perf_cycles += perf_cycles_cur;
  7728. node->perf_time_us += perf_time_us_cur;
  7729. }
  7730. }
  7731. // join thread pool
  7732. if (n_threads > 1) {
  7733. atomic_store(&state_shared.stop, true);
  7734. atomic_store(&state_shared.has_work, true);
  7735. for (int j = 0; j < n_threads - 1; j++) {
  7736. int rc = ggml_thread_join(workers[j].thrd, NULL);
  7737. GGML_ASSERT(rc == 0);
  7738. UNUSED(rc);
  7739. }
  7740. ggml_lock_destroy(&state_shared.spin);
  7741. }
  7742. // performance stats (graph)
  7743. {
  7744. int64_t perf_cycles_cur = ggml_perf_cycles() - perf_start_cycles;
  7745. int64_t perf_time_us_cur = ggml_perf_time_us() - perf_start_time_us;
  7746. cgraph->perf_runs++;
  7747. cgraph->perf_cycles += perf_cycles_cur;
  7748. cgraph->perf_time_us += perf_time_us_cur;
  7749. GGML_PRINT_DEBUG("%s: perf (%d) - cpu = %.3f / %.3f ms, wall = %.3f / %.3f ms\n",
  7750. __func__, cgraph->perf_runs,
  7751. (double) perf_cycles_cur / (double) ggml_cycles_per_ms(),
  7752. (double) cgraph->perf_cycles / (double) ggml_cycles_per_ms() / (double) cgraph->perf_runs,
  7753. (double) perf_time_us_cur / 1000.0,
  7754. (double) cgraph->perf_time_us / 1000.0 / cgraph->perf_runs);
  7755. }
  7756. }
  7757. void ggml_graph_reset(struct ggml_cgraph * cgraph) {
  7758. for (int i = 0; i < cgraph->n_nodes; i++) {
  7759. struct ggml_tensor * grad = cgraph->grads[i];
  7760. if (grad) {
  7761. ggml_set_zero(grad);
  7762. }
  7763. }
  7764. }
  7765. void ggml_graph_print(const struct ggml_cgraph * cgraph) {
  7766. int64_t perf_total_per_op_us[GGML_OP_COUNT] = {0};
  7767. GGML_PRINT("=== GRAPH ===\n");
  7768. GGML_PRINT_DEBUG("n_threads = %d\n", cgraph->n_threads);
  7769. GGML_PRINT_DEBUG("total work size = %zu bytes\n",cgraph->work_size);
  7770. GGML_PRINT("n_nodes = %d\n", cgraph->n_nodes);
  7771. for (int i = 0; i < cgraph->n_nodes; i++) {
  7772. struct ggml_tensor * node = cgraph->nodes[i];
  7773. perf_total_per_op_us[node->op] += node->perf_time_us;
  7774. GGML_PRINT(" - %3d: [ %6d, %6d, %6d] %16s %s (%3d) cpu = %7.3f / %7.3f ms, wall = %7.3f / %7.3f ms\n",
  7775. i,
  7776. node->ne[0], node->ne[1], node->ne[2],
  7777. GGML_OP_LABEL[node->op], node->is_param ? "x" : node->grad ? "g" : " ", node->perf_runs,
  7778. (double) node->perf_cycles / (double) ggml_cycles_per_ms(),
  7779. (double) node->perf_cycles / (double) ggml_cycles_per_ms() / (double) node->perf_runs,
  7780. (double) node->perf_time_us / 1000.0,
  7781. (double) node->perf_time_us / 1000.0 / node->perf_runs);
  7782. }
  7783. GGML_PRINT("n_leafs = %d\n", cgraph->n_leafs);
  7784. for (int i = 0; i < cgraph->n_leafs; i++) {
  7785. struct ggml_tensor * node = cgraph->leafs[i];
  7786. GGML_PRINT(" - %3d: [ %6d, %6d] %8s\n",
  7787. i,
  7788. node->ne[0], node->ne[1],
  7789. GGML_OP_LABEL[node->op]);
  7790. }
  7791. for (int i = 0; i < GGML_OP_COUNT; i++) {
  7792. GGML_PRINT("perf_total_per_op_us[%16s] = %7.3f ms\n", GGML_OP_LABEL[i], (double) perf_total_per_op_us[i] / 1000.0);
  7793. }
  7794. GGML_PRINT("========================================\n");
  7795. }
  7796. // check if node is part of the graph
  7797. static bool ggml_graph_find(const struct ggml_cgraph * cgraph, const struct ggml_tensor * node) {
  7798. if (cgraph == NULL) {
  7799. return true;
  7800. }
  7801. for (int i = 0; i < cgraph->n_nodes; i++) {
  7802. if (cgraph->nodes[i] == node) {
  7803. return true;
  7804. }
  7805. }
  7806. return false;
  7807. }
  7808. static struct ggml_tensor * ggml_graph_get_parent(const struct ggml_cgraph * cgraph, const struct ggml_tensor * node) {
  7809. for (int i = 0; i < cgraph->n_nodes; i++) {
  7810. struct ggml_tensor * parent = cgraph->nodes[i];
  7811. if (parent->grad == node) {
  7812. return parent;
  7813. }
  7814. }
  7815. return NULL;
  7816. }
  7817. void ggml_graph_dump_dot(const struct ggml_cgraph * gb, const struct ggml_cgraph * gf, const char * filename) {
  7818. char color[16];
  7819. FILE * fp = fopen(filename, "w");
  7820. GGML_ASSERT(fp);
  7821. fprintf(fp, "digraph G {\n");
  7822. fprintf(fp, " newrank = true;\n");
  7823. fprintf(fp, " rankdir = LR;\n");
  7824. for (int i = 0; i < gb->n_nodes; i++) {
  7825. struct ggml_tensor * node = gb->nodes[i];
  7826. if (ggml_graph_get_parent(gb, node) != NULL) {
  7827. continue;
  7828. }
  7829. if (node->is_param) {
  7830. snprintf(color, sizeof(color), "yellow");
  7831. } else if (node->grad) {
  7832. if (ggml_graph_find(gf, node)) {
  7833. snprintf(color, sizeof(color), "green");
  7834. } else {
  7835. snprintf(color, sizeof(color), "lightblue");
  7836. }
  7837. } else {
  7838. snprintf(color, sizeof(color), "white");
  7839. }
  7840. fprintf(fp, " \"%p\" [ \
  7841. style = filled; fillcolor = %s; shape = record; \
  7842. label=\"%d [%d, %d] | <x>%s",
  7843. (void *) node, color,
  7844. i, node->ne[0], node->ne[1],
  7845. GGML_OP_SYMBOL[node->op]);
  7846. if (node->grad) {
  7847. fprintf(fp, " | <g>%s\"; ]\n", GGML_OP_SYMBOL[node->grad->op]);
  7848. } else {
  7849. fprintf(fp, "\"; ]\n");
  7850. }
  7851. }
  7852. for (int i = 0; i < gb->n_leafs; i++) {
  7853. struct ggml_tensor * node = gb->leafs[i];
  7854. snprintf(color, sizeof(color), "pink");
  7855. if (ggml_nelements(node) == 1) {
  7856. fprintf(fp, " \"%p\" [ \
  7857. style = filled; fillcolor = %s; shape = record; \
  7858. label=\"<x>%.1e\"; ]\n",
  7859. (void *) node, color, (double)ggml_get_f32_1d(node, 0));
  7860. } else {
  7861. fprintf(fp, " \"%p\" [ \
  7862. style = filled; fillcolor = %s; shape = record; \
  7863. label=\"<x>CONST %d [%d, %d]\"; ]\n",
  7864. (void *) node, color,
  7865. i, node->ne[0], node->ne[1]);
  7866. }
  7867. }
  7868. for (int i = 0; i < gb->n_nodes; i++) {
  7869. struct ggml_tensor * node = gb->nodes[i];
  7870. struct ggml_tensor * parent = ggml_graph_get_parent(gb, node);
  7871. if (node->src0) {
  7872. struct ggml_tensor * parent0 = ggml_graph_get_parent(gb, node->src0);
  7873. fprintf(fp, " \"%p\":%s -> \"%p\":%s [ arrowhead = %s; style = %s; label = \"x\"; ]\n",
  7874. parent0 ? (void *) parent0 : (void *) node->src0,
  7875. parent0 ? "g" : "x",
  7876. parent ? (void *) parent : (void *) node,
  7877. parent ? "g" : "x",
  7878. parent ? "empty" : "vee",
  7879. parent ? "dashed" : "solid");
  7880. }
  7881. if (node->src1) {
  7882. struct ggml_tensor * parent1 = ggml_graph_get_parent(gb, node->src1);
  7883. fprintf(fp, " \"%p\":%s -> \"%p\":%s [ arrowhead = %s; style = %s; label = \"y\"; ]\n",
  7884. parent1 ? (void *) parent1 : (void *) node->src1,
  7885. parent1 ? "g" : "x",
  7886. parent ? (void *) parent : (void *) node,
  7887. parent ? "g" : "x",
  7888. parent ? "empty" : "vee",
  7889. parent ? "dashed" : "solid");
  7890. }
  7891. }
  7892. for (int i = 0; i < gb->n_leafs; i++) {
  7893. struct ggml_tensor * node = gb->leafs[i];
  7894. if (node->src0) {
  7895. fprintf(fp, " \"%p\":%s -> \"%p\":%s [ label = \"x\"; ]\n",
  7896. (void *) node->src0, "x",
  7897. (void *) node, "x");
  7898. }
  7899. if (node->src1) {
  7900. fprintf(fp, " \"%p\":%s -> \"%p\":%s [ label = \"y\"; ]\n",
  7901. (void *) node->src1, "x",
  7902. (void *) node, "x");
  7903. }
  7904. }
  7905. fprintf(fp, "}\n");
  7906. fclose(fp);
  7907. GGML_PRINT("%s: dot -Tpng %s -o %s.png && open %s.png\n", __func__, filename, filename, filename);
  7908. }
  7909. ////////////////////////////////////////////////////////////////////////////////
  7910. static void ggml_opt_set_params(int np, struct ggml_tensor * const ps[], const float * x) {
  7911. int i = 0;
  7912. for (int p = 0; p < np; ++p) {
  7913. const int ne = ggml_nelements(ps[p]) ;
  7914. // TODO: add function to set tensor from array
  7915. for (int j = 0; j < ne; ++j) {
  7916. ggml_set_f32_1d(ps[p], j, x[i++]);
  7917. }
  7918. }
  7919. }
  7920. static void ggml_opt_get_params(int np, struct ggml_tensor * const ps[], float * x) {
  7921. int i = 0;
  7922. for (int p = 0; p < np; ++p) {
  7923. const int ne = ggml_nelements(ps[p]) ;
  7924. // TODO: add function to get all elements at once
  7925. for (int j = 0; j < ne; ++j) {
  7926. x[i++] = ggml_get_f32_1d(ps[p], j);
  7927. }
  7928. }
  7929. }
  7930. static void ggml_opt_get_grad(int np, struct ggml_tensor * const ps[], float * g) {
  7931. int i = 0;
  7932. for (int p = 0; p < np; ++p) {
  7933. const int ne = ggml_nelements(ps[p]) ;
  7934. // TODO: add function to get all elements at once
  7935. for (int j = 0; j < ne; ++j) {
  7936. g[i++] = ggml_get_f32_1d(ps[p]->grad, j);
  7937. }
  7938. }
  7939. }
  7940. //
  7941. // ADAM
  7942. //
  7943. // ref: https://arxiv.org/pdf/1412.6980.pdf
  7944. //
  7945. static enum ggml_opt_result ggml_opt_adam(
  7946. struct ggml_context * ctx,
  7947. struct ggml_opt_params params,
  7948. struct ggml_tensor * f,
  7949. struct ggml_cgraph * gf,
  7950. struct ggml_cgraph * gb) {
  7951. GGML_ASSERT(ggml_is_scalar(f));
  7952. gf->n_threads = params.n_threads;
  7953. gb->n_threads = params.n_threads;
  7954. // these will store the parameters we want to optimize
  7955. struct ggml_tensor * ps[GGML_MAX_PARAMS];
  7956. int np = 0;
  7957. int nx = 0;
  7958. for (int i = 0; i < gf->n_nodes; ++i) {
  7959. if (gf->nodes[i]->is_param) {
  7960. GGML_PRINT_DEBUG("found param %d: grad->op = %d\n", np, gf->nodes[i]->grad->op);
  7961. GGML_ASSERT(np < GGML_MAX_PARAMS);
  7962. ps[np++] = gf->nodes[i];
  7963. nx += ggml_nelements(gf->nodes[i]);
  7964. }
  7965. }
  7966. // constants
  7967. const float alpha = params.adam.alpha;
  7968. const float beta1 = params.adam.beta1;
  7969. const float beta2 = params.adam.beta2;
  7970. const float eps = params.adam.eps;
  7971. float * x = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, nx)->data; // view of the parameters
  7972. float * g1 = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, nx)->data; // gradient
  7973. float * g2 = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, nx)->data; // gradient squared
  7974. float * m = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, nx)->data; // first moment
  7975. float * v = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, nx)->data; // second moment
  7976. float * mh = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, nx)->data; // first moment hat
  7977. float * vh = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, nx)->data; // second moment hat
  7978. float * pf = params.past > 0 ? ggml_new_tensor_1d(ctx, GGML_TYPE_F32, params.past)->data : NULL; // past function values
  7979. // initialize
  7980. ggml_vec_set_f32(nx, m, 0.0f);
  7981. ggml_vec_set_f32(nx, v, 0.0f);
  7982. // update view
  7983. ggml_opt_get_params(np, ps, x);
  7984. // compute the function value
  7985. ggml_graph_reset (gf);
  7986. ggml_set_f32 (f->grad, 1.0f);
  7987. ggml_graph_compute(ctx, gb);
  7988. float fx_prev = ggml_get_f32_1d(f, 0);
  7989. if (pf) {
  7990. pf[0] = fx_prev;
  7991. }
  7992. int n_no_improvement = 0;
  7993. float fx_best = fx_prev;
  7994. // run the optimizer
  7995. for (int t = 0; t < params.adam.n_iter; ++t) {
  7996. GGML_PRINT_DEBUG ("=== iter %d ===\n", t);
  7997. GGML_PRINT_DEBUG ("f = %10.6f\n", ggml_get_f32_1d(f, 0));
  7998. GGML_PRINT_DEBUG_5("df/dx0 = %10.6f\n", ggml_get_f32_1d(ps[0]->grad, 0));
  7999. GGML_PRINT_DEBUG_5("df/dx1 = %10.6f\n", ggml_get_f32_1d(ps[1]->grad, 0));
  8000. for (int i = 0; i < np; ++i) {
  8001. GGML_PRINT_DEBUG("param %d: %10.6f, g = %10.6f\n", i,
  8002. ggml_get_f32_1d(ps[i], 0), ggml_get_f32_1d(ps[i]->grad, 0));
  8003. }
  8004. const int64_t t_start_wall = ggml_time_us();
  8005. const int64_t t_start_cpu = ggml_cycles();
  8006. UNUSED(t_start_wall);
  8007. UNUSED(t_start_cpu);
  8008. {
  8009. // update the gradient
  8010. ggml_opt_get_grad(np, ps, g1);
  8011. // m_t = beta1*m_t-1 + (1 - beta1)*g_t
  8012. ggml_vec_scale_f32(nx, m, beta1);
  8013. ggml_vec_mad_f32 (nx, m, g1, 1.0f - beta1);
  8014. // g2 = g1^2
  8015. ggml_vec_sqr_f32 (nx, g2, g1);
  8016. // v_t = beta2*v_t-1 + (1 - beta2)*g_t^2
  8017. ggml_vec_scale_f32(nx, v, beta2);
  8018. ggml_vec_mad_f32 (nx, v, g2, 1.0f - beta2);
  8019. // m^hat = m_t / (1 - beta1^t)
  8020. // v^hat = v_t / (1 - beta2^t)
  8021. // x_t = x_t-1 - alpha*m^hat/(sqrt(v^hat) + eps)
  8022. ggml_vec_cpy_f32 (nx, mh, m);
  8023. ggml_vec_cpy_f32 (nx, vh, v);
  8024. ggml_vec_scale_f32(nx, mh, alpha/(1.0f - powf(beta1, t + 1)));
  8025. ggml_vec_scale_f32(nx, vh, 1.0f/(1.0f - powf(beta2, t + 1)));
  8026. ggml_vec_sqrt_f32 (nx, vh, vh);
  8027. ggml_vec_acc1_f32 (nx, vh, eps);
  8028. ggml_vec_div_f32 (nx, mh, mh, vh);
  8029. ggml_vec_sub_f32 (nx, x, x, mh);
  8030. // update the parameters
  8031. ggml_opt_set_params(np, ps, x);
  8032. }
  8033. ggml_graph_reset (gf);
  8034. ggml_set_f32 (f->grad, 1.0f);
  8035. ggml_graph_compute(ctx, gb);
  8036. const float fx = ggml_get_f32_1d(f, 0);
  8037. // check convergence
  8038. if (fabsf(fx - fx_prev)/fx < params.adam.eps_f) {
  8039. GGML_PRINT_DEBUG("converged\n");
  8040. return GGML_OPT_OK;
  8041. }
  8042. // delta-based convergence test
  8043. if (pf != NULL) {
  8044. // need at least params.past iterations to start checking for convergence
  8045. if (params.past <= t) {
  8046. const float rate = (pf[t%params.past] - fx)/fx;
  8047. if (fabsf(rate) < params.delta) {
  8048. return GGML_OPT_OK;
  8049. }
  8050. }
  8051. pf[t%params.past] = fx;
  8052. }
  8053. // check for improvement
  8054. if (params.max_no_improvement > 0) {
  8055. if (fx_best > fx) {
  8056. fx_best = fx;
  8057. n_no_improvement = 0;
  8058. } else {
  8059. ++n_no_improvement;
  8060. if (n_no_improvement >= params.max_no_improvement) {
  8061. return GGML_OPT_OK;
  8062. }
  8063. }
  8064. }
  8065. fx_prev = fx;
  8066. {
  8067. const int64_t t_end_cpu = ggml_cycles();
  8068. GGML_PRINT_DEBUG("time iter: %5.3f s\n", ((float)(t_end_cpu - t_start_cpu))/CLOCKS_PER_SEC);
  8069. UNUSED(t_end_cpu);
  8070. const int64_t t_end_wall = ggml_time_us();
  8071. GGML_PRINT_DEBUG("wall time iter: %5.3f s\n", (t_end_wall - t_start_wall)/1e6);
  8072. UNUSED(t_end_wall);
  8073. }
  8074. }
  8075. return GGML_OPT_DID_NOT_CONVERGE;
  8076. }
  8077. //
  8078. // L-BFGS
  8079. //
  8080. // the L-BFGS implementation below is based on the following implementation:
  8081. //
  8082. // https://github.com/chokkan/liblbfgs
  8083. //
  8084. struct ggml_lbfgs_iteration_data {
  8085. float alpha;
  8086. float ys;
  8087. float * s;
  8088. float * y;
  8089. };
  8090. static enum ggml_opt_result linesearch_backtracking(
  8091. struct ggml_context * ctx,
  8092. const struct ggml_opt_params * params,
  8093. int nx,
  8094. float * x,
  8095. float * fx,
  8096. float * g,
  8097. float * d,
  8098. float * step,
  8099. const float * xp,
  8100. struct ggml_tensor * f,
  8101. struct ggml_cgraph * gf,
  8102. struct ggml_cgraph * gb,
  8103. const int np,
  8104. struct ggml_tensor * ps[]) {
  8105. int count = 0;
  8106. float width = 0.0f;
  8107. float dg = 0.0f;
  8108. float finit = 0.0f;
  8109. float dginit = 0.0f;
  8110. float dgtest = 0.0f;
  8111. const float dec = 0.5f;
  8112. const float inc = 2.1f;
  8113. if (*step <= 0.f) {
  8114. return GGML_LINESEARCH_INVALID_PARAMETERS;
  8115. }
  8116. // compute the initial gradient in the search direction
  8117. ggml_vec_dot_f32(nx, &dginit, g, d);
  8118. // make sure that d points to a descent direction
  8119. if (0 < dginit) {
  8120. return GGML_LINESEARCH_FAIL;
  8121. }
  8122. // initialize local variables
  8123. finit = *fx;
  8124. dgtest = params->lbfgs.ftol*dginit;
  8125. while (true) {
  8126. ggml_vec_cpy_f32(nx, x, xp);
  8127. ggml_vec_mad_f32(nx, x, d, *step);
  8128. // evaluate the function and gradient values
  8129. {
  8130. ggml_opt_set_params(np, ps, x);
  8131. ggml_graph_reset (gf);
  8132. ggml_set_f32 (f->grad, 1.0f);
  8133. ggml_graph_compute(ctx, gb);
  8134. ggml_opt_get_grad(np, ps, g);
  8135. *fx = ggml_get_f32_1d(f, 0);
  8136. }
  8137. ++count;
  8138. if (*fx > finit + (*step)*dgtest) {
  8139. width = dec;
  8140. } else {
  8141. // Armijo condition is satisfied
  8142. if (params->lbfgs.linesearch == GGML_LINESEARCH_BACKTRACKING_ARMIJO) {
  8143. return count;
  8144. }
  8145. ggml_vec_dot_f32(nx, &dg, g, d);
  8146. // check the Wolfe condition
  8147. if (dg < params->lbfgs.wolfe * dginit) {
  8148. width = inc;
  8149. } else {
  8150. if(params->lbfgs.linesearch == GGML_LINESEARCH_BACKTRACKING_WOLFE) {
  8151. // regular Wolfe conditions
  8152. return count;
  8153. }
  8154. if(dg > -params->lbfgs.wolfe*dginit) {
  8155. width = dec;
  8156. } else {
  8157. // strong Wolfe condition (GGML_LINESEARCH_BACKTRACKING_STRONG_WOLFE)
  8158. return count;
  8159. }
  8160. return count;
  8161. }
  8162. }
  8163. if (*step < params->lbfgs.min_step) {
  8164. return GGML_LINESEARCH_MINIMUM_STEP;
  8165. }
  8166. if (*step > params->lbfgs.max_step) {
  8167. return GGML_LINESEARCH_MAXIMUM_STEP;
  8168. }
  8169. if (params->lbfgs.max_linesearch <= count) {
  8170. return GGML_LINESEARCH_MAXIMUM_ITERATIONS;
  8171. }
  8172. (*step) *= width;
  8173. }
  8174. return GGML_LINESEARCH_FAIL;
  8175. }
  8176. static enum ggml_opt_result ggml_opt_lbfgs(
  8177. struct ggml_context * ctx,
  8178. struct ggml_opt_params params,
  8179. struct ggml_tensor * f,
  8180. struct ggml_cgraph * gf,
  8181. struct ggml_cgraph * gb) {
  8182. if (params.lbfgs.linesearch == GGML_LINESEARCH_BACKTRACKING_WOLFE ||
  8183. params.lbfgs.linesearch == GGML_LINESEARCH_BACKTRACKING_STRONG_WOLFE) {
  8184. if (params.lbfgs.wolfe <= params.lbfgs.ftol || 1.f <= params.lbfgs.wolfe) {
  8185. return GGML_OPT_INVALID_WOLFE;
  8186. }
  8187. }
  8188. gf->n_threads = params.n_threads;
  8189. gb->n_threads = params.n_threads;
  8190. const int m = params.lbfgs.m;
  8191. // these will store the parameters we want to optimize
  8192. struct ggml_tensor * ps[GGML_MAX_PARAMS];
  8193. int np = 0;
  8194. int nx = 0;
  8195. for (int i = 0; i < gf->n_nodes; ++i) {
  8196. if (gf->nodes[i]->is_param) {
  8197. GGML_PRINT_DEBUG("found param %d: grad->op = %d\n", np, gf->nodes[i]->grad->op);
  8198. GGML_ASSERT(np < GGML_MAX_PARAMS);
  8199. ps[np++] = gf->nodes[i];
  8200. nx += ggml_nelements(gf->nodes[i]);
  8201. }
  8202. }
  8203. float * x = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, nx)->data; // current parameters
  8204. float * xp = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, nx)->data; // previous parameters
  8205. float * g = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, nx)->data; // current gradient
  8206. float * gp = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, nx)->data; // previous gradient
  8207. float * d = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, nx)->data; // search direction
  8208. float * pf = params.past > 0 ? ggml_new_tensor_1d(ctx, GGML_TYPE_F32, params.past)->data : NULL; // past function values
  8209. float fx = 0.0f; // cost function value
  8210. float xnorm = 0.0f; // ||x||
  8211. float gnorm = 0.0f; // ||g||
  8212. float step = 0.0f;
  8213. // initialize x from the graph nodes
  8214. ggml_opt_get_params(np, ps, x);
  8215. // the L-BFGS memory
  8216. struct ggml_lbfgs_iteration_data * lm = alloca(sizeof(struct ggml_lbfgs_iteration_data)*m);
  8217. for (int i = 0; i < m; ++i) {
  8218. lm[i].alpha = 0.0f;
  8219. lm[i].ys = 0.0f;
  8220. lm[i].s = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, nx)->data;
  8221. lm[i].y = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, nx)->data;
  8222. }
  8223. // evaluate the function value and its gradient
  8224. {
  8225. ggml_opt_set_params(np, ps, x);
  8226. ggml_graph_reset (gf);
  8227. ggml_set_f32 (f->grad, 1.0f);
  8228. ggml_graph_compute(ctx, gb);
  8229. ggml_opt_get_grad(np, ps, g);
  8230. fx = ggml_get_f32_1d(f, 0);
  8231. }
  8232. if (pf) {
  8233. pf[0] = fx;
  8234. }
  8235. float fx_best = fx;
  8236. // search direction = -gradient
  8237. ggml_vec_neg_f32(nx, d, g);
  8238. // ||x||, ||g||
  8239. ggml_vec_norm_f32(nx, &xnorm, x);
  8240. ggml_vec_norm_f32(nx, &gnorm, g);
  8241. if (xnorm < 1.0f) {
  8242. xnorm = 1.0f;
  8243. }
  8244. // already optimized
  8245. if (gnorm/xnorm <= params.lbfgs.eps) {
  8246. return GGML_OPT_OK;
  8247. }
  8248. // initial step
  8249. ggml_vec_norm_inv_f32(nx, &step, d);
  8250. int j = 0;
  8251. int k = 1;
  8252. int ls = 0;
  8253. int end = 0;
  8254. int bound = 0;
  8255. int n_no_improvement = 0;
  8256. float ys = 0.0f;
  8257. float yy = 0.0f;
  8258. float beta = 0.0f;
  8259. while (true) {
  8260. // store the current position and gradient vectors
  8261. ggml_vec_cpy_f32(nx, xp, x);
  8262. ggml_vec_cpy_f32(nx, gp, g);
  8263. ls = linesearch_backtracking(ctx, &params, nx, x, &fx, g, d, &step, xp, f, gf, gb, np, ps);
  8264. if (ls < 0) {
  8265. // linesearch failed - go back to the previous point and return
  8266. ggml_vec_cpy_f32(nx, x, xp);
  8267. ggml_vec_cpy_f32(nx, g, gp);
  8268. return ls;
  8269. }
  8270. ggml_vec_norm_f32(nx, &xnorm, x);
  8271. ggml_vec_norm_f32(nx, &gnorm, g);
  8272. GGML_PRINT_DEBUG("f = %10.6f\n", ggml_get_f32_1d(f, 0));
  8273. if (xnorm < 1.0f) {
  8274. xnorm = 1.0f;
  8275. }
  8276. if (gnorm/xnorm <= params.lbfgs.eps) {
  8277. // converged
  8278. return GGML_OPT_OK;
  8279. }
  8280. // delta-based convergence test
  8281. if (pf != NULL) {
  8282. // need at least params.past iterations to start checking for convergence
  8283. if (params.past <= k) {
  8284. const float rate = (pf[k%params.past] - fx)/fx;
  8285. if (fabsf(rate) < params.delta) {
  8286. return GGML_OPT_OK;
  8287. }
  8288. }
  8289. pf[k%params.past] = fx;
  8290. }
  8291. // check for improvement
  8292. if (params.max_no_improvement > 0) {
  8293. if (fx < fx_best) {
  8294. fx_best = fx;
  8295. n_no_improvement = 0;
  8296. } else {
  8297. n_no_improvement++;
  8298. if (n_no_improvement >= params.max_no_improvement) {
  8299. return GGML_OPT_OK;
  8300. }
  8301. }
  8302. }
  8303. if (params.lbfgs.n_iter != 0 && params.lbfgs.n_iter < k + 1) {
  8304. // reached the maximum number of iterations
  8305. return GGML_OPT_DID_NOT_CONVERGE;
  8306. }
  8307. // update vectors s and y:
  8308. // s_{k+1} = x_{k+1} - x_{k} = \step * d_{k}.
  8309. // y_{k+1} = g_{k+1} - g_{k}.
  8310. //
  8311. ggml_vec_sub_f32(nx, lm[end].s, x, xp);
  8312. ggml_vec_sub_f32(nx, lm[end].y, g, gp);
  8313. // compute scalars ys and yy:
  8314. // ys = y^t \cdot s -> 1 / \rho.
  8315. // yy = y^t \cdot y.
  8316. //
  8317. ggml_vec_dot_f32(nx, &ys, lm[end].y, lm[end].s);
  8318. ggml_vec_dot_f32(nx, &yy, lm[end].y, lm[end].y);
  8319. lm[end].ys = ys;
  8320. // find new search direction
  8321. // ref: https://en.wikipedia.org/wiki/Limited-memory_BFGS
  8322. bound = (m <= k) ? m : k;
  8323. k++;
  8324. end = (end + 1)%m;
  8325. // initialize search direction with -g
  8326. ggml_vec_neg_f32(nx, d, g);
  8327. j = end;
  8328. for (int i = 0; i < bound; ++i) {
  8329. j = (j + m - 1) % m;
  8330. // \alpha_{j} = \rho_{j} s^{t}_{j} \cdot q_{k+1}
  8331. ggml_vec_dot_f32(nx, &lm[j].alpha, lm[j].s, d);
  8332. lm[j].alpha /= lm[j].ys;
  8333. // q_{i} = q_{i+1} - \alpha_{i} y_{i}
  8334. ggml_vec_mad_f32(nx, d, lm[j].y, -lm[j].alpha);
  8335. }
  8336. ggml_vec_scale_f32(nx, d, ys/yy);
  8337. for (int i = 0; i < bound; ++i) {
  8338. // \beta_{j} = \rho_{j} y^t_{j} \cdot \gamma_{i}
  8339. ggml_vec_dot_f32(nx, &beta, lm[j].y, d);
  8340. beta /= lm[j].ys;
  8341. // \gamma_{i+1} = \gamma_{i} + (\alpha_{j} - \beta_{j}) s_{j}
  8342. ggml_vec_mad_f32(nx, d, lm[j].s, lm[j].alpha - beta);
  8343. j = (j + 1)%m;
  8344. }
  8345. step = 1.0;
  8346. }
  8347. return GGML_OPT_DID_NOT_CONVERGE;
  8348. }
  8349. struct ggml_opt_params ggml_opt_default_params(enum ggml_opt_type type) {
  8350. struct ggml_opt_params result;
  8351. switch (type) {
  8352. case GGML_OPT_ADAM:
  8353. {
  8354. result = (struct ggml_opt_params) {
  8355. .type = GGML_OPT_ADAM,
  8356. .n_threads = 1,
  8357. .past = 0,
  8358. .delta = 1e-5f,
  8359. .max_no_improvement = 100,
  8360. .print_forward_graph = true,
  8361. .print_backward_graph = true,
  8362. .adam = {
  8363. .n_iter = 10000,
  8364. .alpha = 0.001f,
  8365. .beta1 = 0.9f,
  8366. .beta2 = 0.999f,
  8367. .eps = 1e-8f,
  8368. .eps_f = 1e-5f,
  8369. .eps_g = 1e-3f,
  8370. },
  8371. };
  8372. } break;
  8373. case GGML_OPT_LBFGS:
  8374. {
  8375. result = (struct ggml_opt_params) {
  8376. .type = GGML_OPT_LBFGS,
  8377. .n_threads = 1,
  8378. .past = 0,
  8379. .delta = 1e-5f,
  8380. .max_no_improvement = 0,
  8381. .print_forward_graph = true,
  8382. .print_backward_graph = true,
  8383. .lbfgs = {
  8384. .m = 6,
  8385. .n_iter = 100,
  8386. .max_linesearch = 20,
  8387. .eps = 1e-5f,
  8388. .ftol = 1e-4f,
  8389. .wolfe = 0.9f,
  8390. .min_step = 1e-20f,
  8391. .max_step = 1e+20f,
  8392. .linesearch = GGML_LINESEARCH_DEFAULT,
  8393. },
  8394. };
  8395. } break;
  8396. }
  8397. return result;
  8398. }
  8399. enum ggml_opt_result ggml_opt(
  8400. struct ggml_context * ctx,
  8401. struct ggml_opt_params params,
  8402. struct ggml_tensor * f) {
  8403. bool free_ctx = false;
  8404. if (ctx == NULL) {
  8405. struct ggml_init_params params_ctx = {
  8406. .mem_size = 16*1024*1024,
  8407. .mem_buffer = NULL,
  8408. .no_alloc = false,
  8409. };
  8410. ctx = ggml_init(params_ctx);
  8411. if (ctx == NULL) {
  8412. return GGML_OPT_NO_CONTEXT;
  8413. }
  8414. free_ctx = true;
  8415. }
  8416. enum ggml_opt_result result = GGML_OPT_OK;
  8417. // build forward + backward compute graphs
  8418. struct ggml_cgraph gf = ggml_build_forward (f);
  8419. struct ggml_cgraph gb = ggml_build_backward(ctx, &gf, false);
  8420. switch (params.type) {
  8421. case GGML_OPT_ADAM:
  8422. {
  8423. result = ggml_opt_adam(ctx, params, f, &gf, &gb);
  8424. } break;
  8425. case GGML_OPT_LBFGS:
  8426. {
  8427. result = ggml_opt_lbfgs(ctx, params, f, &gf, &gb);
  8428. } break;
  8429. }
  8430. if (params.print_forward_graph) {
  8431. ggml_graph_print (&gf);
  8432. ggml_graph_dump_dot(&gf, NULL, "opt-forward.dot");
  8433. }
  8434. if (params.print_backward_graph) {
  8435. ggml_graph_print (&gb);
  8436. ggml_graph_dump_dot(&gb, &gf, "opt-backward.dot");
  8437. }
  8438. if (free_ctx) {
  8439. ggml_free(ctx);
  8440. }
  8441. return result;
  8442. }
  8443. ////////////////////////////////////////////////////////////////////////////////
  8444. size_t ggml_quantize_q4_0(const float * src, void * dst, int n, int k, int64_t * hist) {
  8445. assert(k % QK == 0);
  8446. const int nb = k / QK;
  8447. for (int j = 0; j < n; j += k) {
  8448. block_q4_0 * restrict y = (block_q4_0 *)dst + j/QK;
  8449. quantize_row_q4_0_reference(src + j, y, k);
  8450. for (int i = 0; i < nb; i++) {
  8451. for (int l = 0; l < QK; l += 2) {
  8452. const uint8_t vi0 = y[i].qs[l/2] & 0xF;
  8453. const uint8_t vi1 = y[i].qs[l/2] >> 4;
  8454. hist[vi0]++;
  8455. hist[vi1]++;
  8456. }
  8457. }
  8458. }
  8459. return (n/QK*sizeof(block_q4_0));
  8460. }
  8461. size_t ggml_quantize_q4_1(const float * src, void * dst, int n, int k, int64_t * hist) {
  8462. assert(k % QK == 0);
  8463. const int nb = k / QK;
  8464. for (int j = 0; j < n; j += k) {
  8465. block_q4_1 * restrict y = (block_q4_1 *)dst + j/QK;
  8466. quantize_row_q4_1_reference(src + j, y, k);
  8467. for (int i = 0; i < nb; i++) {
  8468. for (int l = 0; l < QK; l += 2) {
  8469. const uint8_t vi0 = y[i].qs[l/2] & 0xF;
  8470. const uint8_t vi1 = y[i].qs[l/2] >> 4;
  8471. hist[vi0]++;
  8472. hist[vi1]++;
  8473. }
  8474. }
  8475. }
  8476. return (n/QK*sizeof(block_q4_1));
  8477. }
  8478. ////////////////////////////////////////////////////////////////////////////////
  8479. int ggml_cpu_has_avx(void) {
  8480. #if defined(__AVX__)
  8481. return 1;
  8482. #else
  8483. return 0;
  8484. #endif
  8485. }
  8486. int ggml_cpu_has_avx2(void) {
  8487. #if defined(__AVX2__)
  8488. return 1;
  8489. #else
  8490. return 0;
  8491. #endif
  8492. }
  8493. int ggml_cpu_has_avx512(void) {
  8494. #if defined(__AVX512F__)
  8495. return 1;
  8496. #else
  8497. return 0;
  8498. #endif
  8499. }
  8500. int ggml_cpu_has_fma(void) {
  8501. #if defined(__FMA__)
  8502. return 1;
  8503. #else
  8504. return 0;
  8505. #endif
  8506. }
  8507. int ggml_cpu_has_neon(void) {
  8508. #if defined(__ARM_NEON)
  8509. return 1;
  8510. #else
  8511. return 0;
  8512. #endif
  8513. }
  8514. int ggml_cpu_has_arm_fma(void) {
  8515. #if defined(__ARM_FEATURE_FMA)
  8516. return 1;
  8517. #else
  8518. return 0;
  8519. #endif
  8520. }
  8521. int ggml_cpu_has_f16c(void) {
  8522. #if defined(__F16C__)
  8523. return 1;
  8524. #else
  8525. return 0;
  8526. #endif
  8527. }
  8528. int ggml_cpu_has_fp16_va(void) {
  8529. #if defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC)
  8530. return 1;
  8531. #else
  8532. return 0;
  8533. #endif
  8534. }
  8535. int ggml_cpu_has_wasm_simd(void) {
  8536. #if defined(__wasm_simd128__)
  8537. return 1;
  8538. #else
  8539. return 0;
  8540. #endif
  8541. }
  8542. int ggml_cpu_has_blas(void) {
  8543. #if defined(GGML_USE_ACCELERATE) || defined(GGML_USE_OPENBLAS)
  8544. return 1;
  8545. #else
  8546. return 0;
  8547. #endif
  8548. }
  8549. int ggml_cpu_has_sse3(void) {
  8550. #if defined(__SSE3__)
  8551. return 1;
  8552. #else
  8553. return 0;
  8554. #endif
  8555. }
  8556. int ggml_cpu_has_vsx(void) {
  8557. #if defined(__POWER9_VECTOR__)
  8558. return 1;
  8559. #else
  8560. return 0;
  8561. #endif
  8562. }
  8563. ////////////////////////////////////////////////////////////////////////////////