ggml.c 590 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316431743184319432043214322432343244325432643274328432943304331433243334334433543364337433843394340434143424343434443454346434743484349435043514352435343544355435643574358435943604361436243634364436543664367436843694370437143724373437443754376437743784379438043814382438343844385438643874388438943904391439243934394439543964397439843994400440144024403440444054406440744084409441044114412441344144415441644174418441944204421442244234424442544264427442844294430443144324433443444354436443744384439444044414442444344444445444644474448444944504451445244534454445544564457445844594460446144624463446444654466446744684469447044714472447344744475447644774478447944804481448244834484448544864487448844894490449144924493449444954496449744984499450045014502450345044505450645074508450945104511451245134514451545164517451845194520452145224523452445254526452745284529453045314532453345344535453645374538453945404541454245434544454545464547454845494550455145524553455445554556455745584559456045614562456345644565456645674568456945704571457245734574457545764577457845794580458145824583458445854586458745884589459045914592459345944595459645974598459946004601460246034604460546064607460846094610461146124613461446154616461746184619462046214622462346244625462646274628462946304631463246334634463546364637463846394640464146424643464446454646464746484649465046514652465346544655465646574658465946604661466246634664466546664667466846694670467146724673467446754676467746784679468046814682468346844685468646874688468946904691469246934694469546964697469846994700470147024703470447054706470747084709471047114712471347144715471647174718471947204721472247234724472547264727472847294730473147324733473447354736473747384739474047414742474347444745474647474748474947504751475247534754475547564757475847594760476147624763476447654766476747684769477047714772477347744775477647774778477947804781478247834784478547864787478847894790479147924793479447954796479747984799480048014802480348044805480648074808480948104811481248134814481548164817481848194820482148224823482448254826482748284829483048314832483348344835483648374838483948404841484248434844484548464847484848494850485148524853485448554856485748584859486048614862486348644865486648674868486948704871487248734874487548764877487848794880488148824883488448854886488748884889489048914892489348944895489648974898489949004901490249034904490549064907490849094910491149124913491449154916491749184919492049214922492349244925492649274928492949304931493249334934493549364937493849394940494149424943494449454946494749484949495049514952495349544955495649574958495949604961496249634964496549664967496849694970497149724973497449754976497749784979498049814982498349844985498649874988498949904991499249934994499549964997499849995000500150025003500450055006500750085009501050115012501350145015501650175018501950205021502250235024502550265027502850295030503150325033503450355036503750385039504050415042504350445045504650475048504950505051505250535054505550565057505850595060506150625063506450655066506750685069507050715072507350745075507650775078507950805081508250835084508550865087508850895090509150925093509450955096509750985099510051015102510351045105510651075108510951105111511251135114511551165117511851195120512151225123512451255126512751285129513051315132513351345135513651375138513951405141514251435144514551465147514851495150515151525153515451555156515751585159516051615162516351645165516651675168516951705171517251735174517551765177517851795180518151825183518451855186518751885189519051915192519351945195519651975198519952005201520252035204520552065207520852095210521152125213521452155216521752185219522052215222522352245225522652275228522952305231523252335234523552365237523852395240524152425243524452455246524752485249525052515252525352545255525652575258525952605261526252635264526552665267526852695270527152725273527452755276527752785279528052815282528352845285528652875288528952905291529252935294529552965297529852995300530153025303530453055306530753085309531053115312531353145315531653175318531953205321532253235324532553265327532853295330533153325333533453355336533753385339534053415342534353445345534653475348534953505351535253535354535553565357535853595360536153625363536453655366536753685369537053715372537353745375537653775378537953805381538253835384538553865387538853895390539153925393539453955396539753985399540054015402540354045405540654075408540954105411541254135414541554165417541854195420542154225423542454255426542754285429543054315432543354345435543654375438543954405441544254435444544554465447544854495450545154525453545454555456545754585459546054615462546354645465546654675468546954705471547254735474547554765477547854795480548154825483548454855486548754885489549054915492549354945495549654975498549955005501550255035504550555065507550855095510551155125513551455155516551755185519552055215522552355245525552655275528552955305531553255335534553555365537553855395540554155425543554455455546554755485549555055515552555355545555555655575558555955605561556255635564556555665567556855695570557155725573557455755576557755785579558055815582558355845585558655875588558955905591559255935594559555965597559855995600560156025603560456055606560756085609561056115612561356145615561656175618561956205621562256235624562556265627562856295630563156325633563456355636563756385639564056415642564356445645564656475648564956505651565256535654565556565657565856595660566156625663566456655666566756685669567056715672567356745675567656775678567956805681568256835684568556865687568856895690569156925693569456955696569756985699570057015702570357045705570657075708570957105711571257135714571557165717571857195720572157225723572457255726572757285729573057315732573357345735573657375738573957405741574257435744574557465747574857495750575157525753575457555756575757585759576057615762576357645765576657675768576957705771577257735774577557765777577857795780578157825783578457855786578757885789579057915792579357945795579657975798579958005801580258035804580558065807580858095810581158125813581458155816581758185819582058215822582358245825582658275828582958305831583258335834583558365837583858395840584158425843584458455846584758485849585058515852585358545855585658575858585958605861586258635864586558665867586858695870587158725873587458755876587758785879588058815882588358845885588658875888588958905891589258935894589558965897589858995900590159025903590459055906590759085909591059115912591359145915591659175918591959205921592259235924592559265927592859295930593159325933593459355936593759385939594059415942594359445945594659475948594959505951595259535954595559565957595859595960596159625963596459655966596759685969597059715972597359745975597659775978597959805981598259835984598559865987598859895990599159925993599459955996599759985999600060016002600360046005600660076008600960106011601260136014601560166017601860196020602160226023602460256026602760286029603060316032603360346035603660376038603960406041604260436044604560466047604860496050605160526053605460556056605760586059606060616062606360646065606660676068606960706071607260736074607560766077607860796080608160826083608460856086608760886089609060916092609360946095609660976098609961006101610261036104610561066107610861096110611161126113611461156116611761186119612061216122612361246125612661276128612961306131613261336134613561366137613861396140614161426143614461456146614761486149615061516152615361546155615661576158615961606161616261636164616561666167616861696170617161726173617461756176617761786179618061816182618361846185618661876188618961906191619261936194619561966197619861996200620162026203620462056206620762086209621062116212621362146215621662176218621962206221622262236224622562266227622862296230623162326233623462356236623762386239624062416242624362446245624662476248624962506251625262536254625562566257625862596260626162626263626462656266626762686269627062716272627362746275627662776278627962806281628262836284628562866287628862896290629162926293629462956296629762986299630063016302630363046305630663076308630963106311631263136314631563166317631863196320632163226323632463256326632763286329633063316332633363346335633663376338633963406341634263436344634563466347634863496350635163526353635463556356635763586359636063616362636363646365636663676368636963706371637263736374637563766377637863796380638163826383638463856386638763886389639063916392639363946395639663976398639964006401640264036404640564066407640864096410641164126413641464156416641764186419642064216422642364246425642664276428642964306431643264336434643564366437643864396440644164426443644464456446644764486449645064516452645364546455645664576458645964606461646264636464646564666467646864696470647164726473647464756476647764786479648064816482648364846485648664876488648964906491649264936494649564966497649864996500650165026503650465056506650765086509651065116512651365146515651665176518651965206521652265236524652565266527652865296530653165326533653465356536653765386539654065416542654365446545654665476548654965506551655265536554655565566557655865596560656165626563656465656566656765686569657065716572657365746575657665776578657965806581658265836584658565866587658865896590659165926593659465956596659765986599660066016602660366046605660666076608660966106611661266136614661566166617661866196620662166226623662466256626662766286629663066316632663366346635663666376638663966406641664266436644664566466647664866496650665166526653665466556656665766586659666066616662666366646665666666676668666966706671667266736674667566766677667866796680668166826683668466856686668766886689669066916692669366946695669666976698669967006701670267036704670567066707670867096710671167126713671467156716671767186719672067216722672367246725672667276728672967306731673267336734673567366737673867396740674167426743674467456746674767486749675067516752675367546755675667576758675967606761676267636764676567666767676867696770677167726773677467756776677767786779678067816782678367846785678667876788678967906791679267936794679567966797679867996800680168026803680468056806680768086809681068116812681368146815681668176818681968206821682268236824682568266827682868296830683168326833683468356836683768386839684068416842684368446845684668476848684968506851685268536854685568566857685868596860686168626863686468656866686768686869687068716872687368746875687668776878687968806881688268836884688568866887688868896890689168926893689468956896689768986899690069016902690369046905690669076908690969106911691269136914691569166917691869196920692169226923692469256926692769286929693069316932693369346935693669376938693969406941694269436944694569466947694869496950695169526953695469556956695769586959696069616962696369646965696669676968696969706971697269736974697569766977697869796980698169826983698469856986698769886989699069916992699369946995699669976998699970007001700270037004700570067007700870097010701170127013701470157016701770187019702070217022702370247025702670277028702970307031703270337034703570367037703870397040704170427043704470457046704770487049705070517052705370547055705670577058705970607061706270637064706570667067706870697070707170727073707470757076707770787079708070817082708370847085708670877088708970907091709270937094709570967097709870997100710171027103710471057106710771087109711071117112711371147115711671177118711971207121712271237124712571267127712871297130713171327133713471357136713771387139714071417142714371447145714671477148714971507151715271537154715571567157715871597160716171627163716471657166716771687169717071717172717371747175717671777178717971807181718271837184718571867187718871897190719171927193719471957196719771987199720072017202720372047205720672077208720972107211721272137214721572167217721872197220722172227223722472257226722772287229723072317232723372347235723672377238723972407241724272437244724572467247724872497250725172527253725472557256725772587259726072617262726372647265726672677268726972707271727272737274727572767277727872797280728172827283728472857286728772887289729072917292729372947295729672977298729973007301730273037304730573067307730873097310731173127313731473157316731773187319732073217322732373247325732673277328732973307331733273337334733573367337733873397340734173427343734473457346734773487349735073517352735373547355735673577358735973607361736273637364736573667367736873697370737173727373737473757376737773787379738073817382738373847385738673877388738973907391739273937394739573967397739873997400740174027403740474057406740774087409741074117412741374147415741674177418741974207421742274237424742574267427742874297430743174327433743474357436743774387439744074417442744374447445744674477448744974507451745274537454745574567457745874597460746174627463746474657466746774687469747074717472747374747475747674777478747974807481748274837484748574867487748874897490749174927493749474957496749774987499750075017502750375047505750675077508750975107511751275137514751575167517751875197520752175227523752475257526752775287529753075317532753375347535753675377538753975407541754275437544754575467547754875497550755175527553755475557556755775587559756075617562756375647565756675677568756975707571757275737574757575767577757875797580758175827583758475857586758775887589759075917592759375947595759675977598759976007601760276037604760576067607760876097610761176127613761476157616761776187619762076217622762376247625762676277628762976307631763276337634763576367637763876397640764176427643764476457646764776487649765076517652765376547655765676577658765976607661766276637664766576667667766876697670767176727673767476757676767776787679768076817682768376847685768676877688768976907691769276937694769576967697769876997700770177027703770477057706770777087709771077117712771377147715771677177718771977207721772277237724772577267727772877297730773177327733773477357736773777387739774077417742774377447745774677477748774977507751775277537754775577567757775877597760776177627763776477657766776777687769777077717772777377747775777677777778777977807781778277837784778577867787778877897790779177927793779477957796779777987799780078017802780378047805780678077808780978107811781278137814781578167817781878197820782178227823782478257826782778287829783078317832783378347835783678377838783978407841784278437844784578467847784878497850785178527853785478557856785778587859786078617862786378647865786678677868786978707871787278737874787578767877787878797880788178827883788478857886788778887889789078917892789378947895789678977898789979007901790279037904790579067907790879097910791179127913791479157916791779187919792079217922792379247925792679277928792979307931793279337934793579367937793879397940794179427943794479457946794779487949795079517952795379547955795679577958795979607961796279637964796579667967796879697970797179727973797479757976797779787979798079817982798379847985798679877988798979907991799279937994799579967997799879998000800180028003800480058006800780088009801080118012801380148015801680178018801980208021802280238024802580268027802880298030803180328033803480358036803780388039804080418042804380448045804680478048804980508051805280538054805580568057805880598060806180628063806480658066806780688069807080718072807380748075807680778078807980808081808280838084808580868087808880898090809180928093809480958096809780988099810081018102810381048105810681078108810981108111811281138114811581168117811881198120812181228123812481258126812781288129813081318132813381348135813681378138813981408141814281438144814581468147814881498150815181528153815481558156815781588159816081618162816381648165816681678168816981708171817281738174817581768177817881798180818181828183818481858186818781888189819081918192819381948195819681978198819982008201820282038204820582068207820882098210821182128213821482158216821782188219822082218222822382248225822682278228822982308231823282338234823582368237823882398240824182428243824482458246824782488249825082518252825382548255825682578258825982608261826282638264826582668267826882698270827182728273827482758276827782788279828082818282828382848285828682878288828982908291829282938294829582968297829882998300830183028303830483058306830783088309831083118312831383148315831683178318831983208321832283238324832583268327832883298330833183328333833483358336833783388339834083418342834383448345834683478348834983508351835283538354835583568357835883598360836183628363836483658366836783688369837083718372837383748375837683778378837983808381838283838384838583868387838883898390839183928393839483958396839783988399840084018402840384048405840684078408840984108411841284138414841584168417841884198420842184228423842484258426842784288429843084318432843384348435843684378438843984408441844284438444844584468447844884498450845184528453845484558456845784588459846084618462846384648465846684678468846984708471847284738474847584768477847884798480848184828483848484858486848784888489849084918492849384948495849684978498849985008501850285038504850585068507850885098510851185128513851485158516851785188519852085218522852385248525852685278528852985308531853285338534853585368537853885398540854185428543854485458546854785488549855085518552855385548555855685578558855985608561856285638564856585668567856885698570857185728573857485758576857785788579858085818582858385848585858685878588858985908591859285938594859585968597859885998600860186028603860486058606860786088609861086118612861386148615861686178618861986208621862286238624862586268627862886298630863186328633863486358636863786388639864086418642864386448645864686478648864986508651865286538654865586568657865886598660866186628663866486658666866786688669867086718672867386748675867686778678867986808681868286838684868586868687868886898690869186928693869486958696869786988699870087018702870387048705870687078708870987108711871287138714871587168717871887198720872187228723872487258726872787288729873087318732873387348735873687378738873987408741874287438744874587468747874887498750875187528753875487558756875787588759876087618762876387648765876687678768876987708771877287738774877587768777877887798780878187828783878487858786878787888789879087918792879387948795879687978798879988008801880288038804880588068807880888098810881188128813881488158816881788188819882088218822882388248825882688278828882988308831883288338834883588368837883888398840884188428843884488458846884788488849885088518852885388548855885688578858885988608861886288638864886588668867886888698870887188728873887488758876887788788879888088818882888388848885888688878888888988908891889288938894889588968897889888998900890189028903890489058906890789088909891089118912891389148915891689178918891989208921892289238924892589268927892889298930893189328933893489358936893789388939894089418942894389448945894689478948894989508951895289538954895589568957895889598960896189628963896489658966896789688969897089718972897389748975897689778978897989808981898289838984898589868987898889898990899189928993899489958996899789988999900090019002900390049005900690079008900990109011901290139014901590169017901890199020902190229023902490259026902790289029903090319032903390349035903690379038903990409041904290439044904590469047904890499050905190529053905490559056905790589059906090619062906390649065906690679068906990709071907290739074907590769077907890799080908190829083908490859086908790889089909090919092909390949095909690979098909991009101910291039104910591069107910891099110911191129113911491159116911791189119912091219122912391249125912691279128912991309131913291339134913591369137913891399140914191429143914491459146914791489149915091519152915391549155915691579158915991609161916291639164916591669167916891699170917191729173917491759176917791789179918091819182918391849185918691879188918991909191919291939194919591969197919891999200920192029203920492059206920792089209921092119212921392149215921692179218921992209221922292239224922592269227922892299230923192329233923492359236923792389239924092419242924392449245924692479248924992509251925292539254925592569257925892599260926192629263926492659266926792689269927092719272927392749275927692779278927992809281928292839284928592869287928892899290929192929293929492959296929792989299930093019302930393049305930693079308930993109311931293139314931593169317931893199320932193229323932493259326932793289329933093319332933393349335933693379338933993409341934293439344934593469347934893499350935193529353935493559356935793589359936093619362936393649365936693679368936993709371937293739374937593769377937893799380938193829383938493859386938793889389939093919392939393949395939693979398939994009401940294039404940594069407940894099410941194129413941494159416941794189419942094219422942394249425942694279428942994309431943294339434943594369437943894399440944194429443944494459446944794489449945094519452945394549455945694579458945994609461946294639464946594669467946894699470947194729473947494759476947794789479948094819482948394849485948694879488948994909491949294939494949594969497949894999500950195029503950495059506950795089509951095119512951395149515951695179518951995209521952295239524952595269527952895299530953195329533953495359536953795389539954095419542954395449545954695479548954995509551955295539554955595569557955895599560956195629563956495659566956795689569957095719572957395749575957695779578957995809581958295839584958595869587958895899590959195929593959495959596959795989599960096019602960396049605960696079608960996109611961296139614961596169617961896199620962196229623962496259626962796289629963096319632963396349635963696379638963996409641964296439644964596469647964896499650965196529653965496559656965796589659966096619662966396649665966696679668966996709671967296739674967596769677967896799680968196829683968496859686968796889689969096919692969396949695969696979698969997009701970297039704970597069707970897099710971197129713971497159716971797189719972097219722972397249725972697279728972997309731973297339734973597369737973897399740974197429743974497459746974797489749975097519752975397549755975697579758975997609761976297639764976597669767976897699770977197729773977497759776977797789779978097819782978397849785978697879788978997909791979297939794979597969797979897999800980198029803980498059806980798089809981098119812981398149815981698179818981998209821982298239824982598269827982898299830983198329833983498359836983798389839984098419842984398449845984698479848984998509851985298539854985598569857985898599860986198629863986498659866986798689869987098719872987398749875987698779878987998809881988298839884988598869887988898899890989198929893989498959896989798989899990099019902990399049905990699079908990999109911991299139914991599169917991899199920992199229923992499259926992799289929993099319932993399349935993699379938993999409941994299439944994599469947994899499950995199529953995499559956995799589959996099619962996399649965996699679968996999709971997299739974997599769977997899799980998199829983998499859986998799889989999099919992999399949995999699979998999910000100011000210003100041000510006100071000810009100101001110012100131001410015100161001710018100191002010021100221002310024100251002610027100281002910030100311003210033100341003510036100371003810039100401004110042100431004410045100461004710048100491005010051100521005310054100551005610057100581005910060100611006210063100641006510066100671006810069100701007110072100731007410075100761007710078100791008010081100821008310084100851008610087100881008910090100911009210093100941009510096100971009810099101001010110102101031010410105101061010710108101091011010111101121011310114101151011610117101181011910120101211012210123101241012510126101271012810129101301013110132101331013410135101361013710138101391014010141101421014310144101451014610147101481014910150101511015210153101541015510156101571015810159101601016110162101631016410165101661016710168101691017010171101721017310174101751017610177101781017910180101811018210183101841018510186101871018810189101901019110192101931019410195101961019710198101991020010201102021020310204102051020610207102081020910210102111021210213102141021510216102171021810219102201022110222102231022410225102261022710228102291023010231102321023310234102351023610237102381023910240102411024210243102441024510246102471024810249102501025110252102531025410255102561025710258102591026010261102621026310264102651026610267102681026910270102711027210273102741027510276102771027810279102801028110282102831028410285102861028710288102891029010291102921029310294102951029610297102981029910300103011030210303103041030510306103071030810309103101031110312103131031410315103161031710318103191032010321103221032310324103251032610327103281032910330103311033210333103341033510336103371033810339103401034110342103431034410345103461034710348103491035010351103521035310354103551035610357103581035910360103611036210363103641036510366103671036810369103701037110372103731037410375103761037710378103791038010381103821038310384103851038610387103881038910390103911039210393103941039510396103971039810399104001040110402104031040410405104061040710408104091041010411104121041310414104151041610417104181041910420104211042210423104241042510426104271042810429104301043110432104331043410435104361043710438104391044010441104421044310444104451044610447104481044910450104511045210453104541045510456104571045810459104601046110462104631046410465104661046710468104691047010471104721047310474104751047610477104781047910480104811048210483104841048510486104871048810489104901049110492104931049410495104961049710498104991050010501105021050310504105051050610507105081050910510105111051210513105141051510516105171051810519105201052110522105231052410525105261052710528105291053010531105321053310534105351053610537105381053910540105411054210543105441054510546105471054810549105501055110552105531055410555105561055710558105591056010561105621056310564105651056610567105681056910570105711057210573105741057510576105771057810579105801058110582105831058410585105861058710588105891059010591105921059310594105951059610597105981059910600106011060210603106041060510606106071060810609106101061110612106131061410615106161061710618106191062010621106221062310624106251062610627106281062910630106311063210633106341063510636106371063810639106401064110642106431064410645106461064710648106491065010651106521065310654106551065610657106581065910660106611066210663106641066510666106671066810669106701067110672106731067410675106761067710678106791068010681106821068310684106851068610687106881068910690106911069210693106941069510696106971069810699107001070110702107031070410705107061070710708107091071010711107121071310714107151071610717107181071910720107211072210723107241072510726107271072810729107301073110732107331073410735107361073710738107391074010741107421074310744107451074610747107481074910750107511075210753107541075510756107571075810759107601076110762107631076410765107661076710768107691077010771107721077310774107751077610777107781077910780107811078210783107841078510786107871078810789107901079110792107931079410795107961079710798107991080010801108021080310804108051080610807108081080910810108111081210813108141081510816108171081810819108201082110822108231082410825108261082710828108291083010831108321083310834108351083610837108381083910840108411084210843108441084510846108471084810849108501085110852108531085410855108561085710858108591086010861108621086310864108651086610867108681086910870108711087210873108741087510876108771087810879108801088110882108831088410885108861088710888108891089010891108921089310894108951089610897108981089910900109011090210903109041090510906109071090810909109101091110912109131091410915109161091710918109191092010921109221092310924109251092610927109281092910930109311093210933109341093510936109371093810939109401094110942109431094410945109461094710948109491095010951109521095310954109551095610957109581095910960109611096210963109641096510966109671096810969109701097110972109731097410975109761097710978109791098010981109821098310984109851098610987109881098910990109911099210993109941099510996109971099810999110001100111002110031100411005110061100711008110091101011011110121101311014110151101611017110181101911020110211102211023110241102511026110271102811029110301103111032110331103411035110361103711038110391104011041110421104311044110451104611047110481104911050110511105211053110541105511056110571105811059110601106111062110631106411065110661106711068110691107011071110721107311074110751107611077110781107911080110811108211083110841108511086110871108811089110901109111092110931109411095110961109711098110991110011101111021110311104111051110611107111081110911110111111111211113111141111511116111171111811119111201112111122111231112411125111261112711128111291113011131111321113311134111351113611137111381113911140111411114211143111441114511146111471114811149111501115111152111531115411155111561115711158111591116011161111621116311164111651116611167111681116911170111711117211173111741117511176111771117811179111801118111182111831118411185111861118711188111891119011191111921119311194111951119611197111981119911200112011120211203112041120511206112071120811209112101121111212112131121411215112161121711218112191122011221112221122311224112251122611227112281122911230112311123211233112341123511236112371123811239112401124111242112431124411245112461124711248112491125011251112521125311254112551125611257112581125911260112611126211263112641126511266112671126811269112701127111272112731127411275112761127711278112791128011281112821128311284112851128611287112881128911290112911129211293112941129511296112971129811299113001130111302113031130411305113061130711308113091131011311113121131311314113151131611317113181131911320113211132211323113241132511326113271132811329113301133111332113331133411335113361133711338113391134011341113421134311344113451134611347113481134911350113511135211353113541135511356113571135811359113601136111362113631136411365113661136711368113691137011371113721137311374113751137611377113781137911380113811138211383113841138511386113871138811389113901139111392113931139411395113961139711398113991140011401114021140311404114051140611407114081140911410114111141211413114141141511416114171141811419114201142111422114231142411425114261142711428114291143011431114321143311434114351143611437114381143911440114411144211443114441144511446114471144811449114501145111452114531145411455114561145711458114591146011461114621146311464114651146611467114681146911470114711147211473114741147511476114771147811479114801148111482114831148411485114861148711488114891149011491114921149311494114951149611497114981149911500115011150211503115041150511506115071150811509115101151111512115131151411515115161151711518115191152011521115221152311524115251152611527115281152911530115311153211533115341153511536115371153811539115401154111542115431154411545115461154711548115491155011551115521155311554115551155611557115581155911560115611156211563115641156511566115671156811569115701157111572115731157411575115761157711578115791158011581115821158311584115851158611587115881158911590115911159211593115941159511596115971159811599116001160111602116031160411605116061160711608116091161011611116121161311614116151161611617116181161911620116211162211623116241162511626116271162811629116301163111632116331163411635116361163711638116391164011641116421164311644116451164611647116481164911650116511165211653116541165511656116571165811659116601166111662116631166411665116661166711668116691167011671116721167311674116751167611677116781167911680116811168211683116841168511686116871168811689116901169111692116931169411695116961169711698116991170011701117021170311704117051170611707117081170911710117111171211713117141171511716117171171811719117201172111722117231172411725117261172711728117291173011731117321173311734117351173611737117381173911740117411174211743117441174511746117471174811749117501175111752117531175411755117561175711758117591176011761117621176311764117651176611767117681176911770117711177211773117741177511776117771177811779117801178111782117831178411785117861178711788117891179011791117921179311794117951179611797117981179911800118011180211803118041180511806118071180811809118101181111812118131181411815118161181711818118191182011821118221182311824118251182611827118281182911830118311183211833118341183511836118371183811839118401184111842118431184411845118461184711848118491185011851118521185311854118551185611857118581185911860118611186211863118641186511866118671186811869118701187111872118731187411875118761187711878118791188011881118821188311884118851188611887118881188911890118911189211893118941189511896118971189811899119001190111902119031190411905119061190711908119091191011911119121191311914119151191611917119181191911920119211192211923119241192511926119271192811929119301193111932119331193411935119361193711938119391194011941119421194311944119451194611947119481194911950119511195211953119541195511956119571195811959119601196111962119631196411965119661196711968119691197011971119721197311974119751197611977119781197911980119811198211983119841198511986119871198811989119901199111992119931199411995119961199711998119991200012001120021200312004120051200612007120081200912010120111201212013120141201512016120171201812019120201202112022120231202412025120261202712028120291203012031120321203312034120351203612037120381203912040120411204212043120441204512046120471204812049120501205112052120531205412055120561205712058120591206012061120621206312064120651206612067120681206912070120711207212073120741207512076120771207812079120801208112082120831208412085120861208712088120891209012091120921209312094120951209612097120981209912100121011210212103121041210512106121071210812109121101211112112121131211412115121161211712118121191212012121121221212312124121251212612127121281212912130121311213212133121341213512136121371213812139121401214112142121431214412145121461214712148121491215012151121521215312154121551215612157121581215912160121611216212163121641216512166121671216812169121701217112172121731217412175121761217712178121791218012181121821218312184121851218612187121881218912190121911219212193121941219512196121971219812199122001220112202122031220412205122061220712208122091221012211122121221312214122151221612217122181221912220122211222212223122241222512226122271222812229122301223112232122331223412235122361223712238122391224012241122421224312244122451224612247122481224912250122511225212253122541225512256122571225812259122601226112262122631226412265122661226712268122691227012271122721227312274122751227612277122781227912280122811228212283122841228512286122871228812289122901229112292122931229412295122961229712298122991230012301123021230312304123051230612307123081230912310123111231212313123141231512316123171231812319123201232112322123231232412325123261232712328123291233012331123321233312334123351233612337123381233912340123411234212343123441234512346123471234812349123501235112352123531235412355123561235712358123591236012361123621236312364123651236612367123681236912370123711237212373123741237512376123771237812379123801238112382123831238412385123861238712388123891239012391123921239312394123951239612397123981239912400124011240212403124041240512406124071240812409124101241112412124131241412415124161241712418124191242012421124221242312424124251242612427124281242912430124311243212433124341243512436124371243812439124401244112442124431244412445124461244712448124491245012451124521245312454124551245612457124581245912460124611246212463124641246512466124671246812469124701247112472124731247412475124761247712478124791248012481124821248312484124851248612487124881248912490124911249212493124941249512496124971249812499125001250112502125031250412505125061250712508125091251012511125121251312514125151251612517125181251912520125211252212523125241252512526125271252812529125301253112532125331253412535125361253712538125391254012541125421254312544125451254612547125481254912550125511255212553125541255512556125571255812559125601256112562125631256412565125661256712568125691257012571125721257312574125751257612577125781257912580125811258212583125841258512586125871258812589125901259112592125931259412595125961259712598125991260012601126021260312604126051260612607126081260912610126111261212613126141261512616126171261812619126201262112622126231262412625126261262712628126291263012631126321263312634126351263612637126381263912640126411264212643126441264512646126471264812649126501265112652126531265412655126561265712658126591266012661126621266312664126651266612667126681266912670126711267212673126741267512676126771267812679126801268112682126831268412685126861268712688126891269012691126921269312694126951269612697126981269912700127011270212703127041270512706127071270812709127101271112712127131271412715127161271712718127191272012721127221272312724127251272612727127281272912730127311273212733127341273512736127371273812739127401274112742127431274412745127461274712748127491275012751127521275312754127551275612757127581275912760127611276212763127641276512766127671276812769127701277112772127731277412775127761277712778127791278012781127821278312784127851278612787127881278912790127911279212793127941279512796127971279812799128001280112802128031280412805128061280712808128091281012811128121281312814128151281612817128181281912820128211282212823128241282512826128271282812829128301283112832128331283412835128361283712838128391284012841128421284312844128451284612847128481284912850128511285212853128541285512856128571285812859128601286112862128631286412865128661286712868128691287012871128721287312874128751287612877128781287912880128811288212883128841288512886128871288812889128901289112892128931289412895128961289712898128991290012901129021290312904129051290612907129081290912910129111291212913129141291512916129171291812919129201292112922129231292412925129261292712928129291293012931129321293312934129351293612937129381293912940129411294212943129441294512946129471294812949129501295112952129531295412955129561295712958129591296012961129621296312964129651296612967129681296912970129711297212973129741297512976129771297812979129801298112982129831298412985129861298712988129891299012991129921299312994129951299612997129981299913000130011300213003130041300513006130071300813009130101301113012130131301413015130161301713018130191302013021130221302313024130251302613027130281302913030130311303213033130341303513036130371303813039130401304113042130431304413045130461304713048130491305013051130521305313054130551305613057130581305913060130611306213063130641306513066130671306813069130701307113072130731307413075130761307713078130791308013081130821308313084130851308613087130881308913090130911309213093130941309513096130971309813099131001310113102131031310413105131061310713108131091311013111131121311313114131151311613117131181311913120131211312213123131241312513126131271312813129131301313113132131331313413135131361313713138131391314013141131421314313144131451314613147131481314913150131511315213153131541315513156131571315813159131601316113162131631316413165131661316713168131691317013171131721317313174131751317613177131781317913180131811318213183131841318513186131871318813189131901319113192131931319413195131961319713198131991320013201132021320313204132051320613207132081320913210132111321213213132141321513216132171321813219132201322113222132231322413225132261322713228132291323013231132321323313234132351323613237132381323913240132411324213243132441324513246132471324813249132501325113252132531325413255132561325713258132591326013261132621326313264132651326613267132681326913270132711327213273132741327513276132771327813279132801328113282132831328413285132861328713288132891329013291132921329313294132951329613297132981329913300133011330213303133041330513306133071330813309133101331113312133131331413315133161331713318133191332013321133221332313324133251332613327133281332913330133311333213333133341333513336133371333813339133401334113342133431334413345133461334713348133491335013351133521335313354133551335613357133581335913360133611336213363133641336513366133671336813369133701337113372133731337413375133761337713378133791338013381133821338313384133851338613387133881338913390133911339213393133941339513396133971339813399134001340113402134031340413405134061340713408134091341013411134121341313414134151341613417134181341913420134211342213423134241342513426134271342813429134301343113432134331343413435134361343713438134391344013441134421344313444134451344613447134481344913450134511345213453134541345513456134571345813459134601346113462134631346413465134661346713468134691347013471134721347313474134751347613477134781347913480134811348213483134841348513486134871348813489134901349113492134931349413495134961349713498134991350013501135021350313504135051350613507135081350913510135111351213513135141351513516135171351813519135201352113522135231352413525135261352713528135291353013531135321353313534135351353613537135381353913540135411354213543135441354513546135471354813549135501355113552135531355413555135561355713558135591356013561135621356313564135651356613567135681356913570135711357213573135741357513576135771357813579135801358113582135831358413585135861358713588135891359013591135921359313594135951359613597135981359913600136011360213603136041360513606136071360813609136101361113612136131361413615136161361713618136191362013621136221362313624136251362613627136281362913630136311363213633136341363513636136371363813639136401364113642136431364413645136461364713648136491365013651136521365313654136551365613657136581365913660136611366213663136641366513666136671366813669136701367113672136731367413675136761367713678136791368013681136821368313684136851368613687136881368913690136911369213693136941369513696136971369813699137001370113702137031370413705137061370713708137091371013711137121371313714137151371613717137181371913720137211372213723137241372513726137271372813729137301373113732137331373413735137361373713738137391374013741137421374313744137451374613747137481374913750137511375213753137541375513756137571375813759137601376113762137631376413765137661376713768137691377013771137721377313774137751377613777137781377913780137811378213783137841378513786137871378813789137901379113792137931379413795137961379713798137991380013801138021380313804138051380613807138081380913810138111381213813138141381513816138171381813819138201382113822138231382413825138261382713828138291383013831138321383313834138351383613837138381383913840138411384213843138441384513846138471384813849138501385113852138531385413855138561385713858138591386013861138621386313864138651386613867138681386913870138711387213873138741387513876138771387813879138801388113882138831388413885138861388713888138891389013891138921389313894138951389613897138981389913900139011390213903139041390513906139071390813909139101391113912139131391413915139161391713918139191392013921139221392313924139251392613927139281392913930139311393213933139341393513936139371393813939139401394113942139431394413945139461394713948139491395013951139521395313954139551395613957139581395913960139611396213963139641396513966139671396813969139701397113972139731397413975139761397713978139791398013981139821398313984139851398613987139881398913990139911399213993139941399513996139971399813999140001400114002140031400414005140061400714008140091401014011140121401314014140151401614017140181401914020140211402214023140241402514026140271402814029140301403114032140331403414035140361403714038140391404014041140421404314044140451404614047140481404914050140511405214053140541405514056140571405814059140601406114062140631406414065140661406714068140691407014071140721407314074140751407614077140781407914080140811408214083140841408514086140871408814089140901409114092140931409414095140961409714098140991410014101141021410314104141051410614107141081410914110141111411214113141141411514116141171411814119141201412114122141231412414125141261412714128141291413014131141321413314134141351413614137141381413914140141411414214143141441414514146141471414814149141501415114152141531415414155141561415714158141591416014161141621416314164141651416614167141681416914170141711417214173141741417514176141771417814179141801418114182141831418414185141861418714188141891419014191141921419314194141951419614197141981419914200142011420214203142041420514206142071420814209142101421114212142131421414215142161421714218142191422014221142221422314224142251422614227142281422914230142311423214233142341423514236142371423814239142401424114242142431424414245142461424714248142491425014251142521425314254142551425614257142581425914260142611426214263142641426514266142671426814269142701427114272142731427414275142761427714278142791428014281142821428314284142851428614287142881428914290142911429214293142941429514296142971429814299143001430114302143031430414305143061430714308143091431014311143121431314314143151431614317143181431914320143211432214323143241432514326143271432814329143301433114332143331433414335143361433714338143391434014341143421434314344143451434614347143481434914350143511435214353143541435514356143571435814359143601436114362143631436414365143661436714368143691437014371143721437314374143751437614377143781437914380143811438214383143841438514386143871438814389143901439114392143931439414395143961439714398143991440014401144021440314404144051440614407144081440914410144111441214413144141441514416144171441814419144201442114422144231442414425144261442714428144291443014431144321443314434144351443614437144381443914440144411444214443144441444514446144471444814449144501445114452144531445414455144561445714458144591446014461144621446314464144651446614467144681446914470144711447214473144741447514476144771447814479144801448114482144831448414485144861448714488144891449014491144921449314494144951449614497144981449914500145011450214503145041450514506145071450814509145101451114512145131451414515145161451714518145191452014521145221452314524145251452614527145281452914530145311453214533145341453514536145371453814539145401454114542145431454414545145461454714548145491455014551145521455314554145551455614557145581455914560145611456214563145641456514566145671456814569145701457114572145731457414575145761457714578145791458014581145821458314584145851458614587145881458914590145911459214593145941459514596145971459814599146001460114602146031460414605146061460714608146091461014611146121461314614146151461614617146181461914620146211462214623146241462514626146271462814629146301463114632146331463414635146361463714638146391464014641146421464314644146451464614647146481464914650146511465214653146541465514656146571465814659146601466114662146631466414665146661466714668146691467014671146721467314674146751467614677146781467914680146811468214683146841468514686146871468814689146901469114692146931469414695146961469714698146991470014701147021470314704147051470614707147081470914710147111471214713147141471514716147171471814719147201472114722147231472414725147261472714728147291473014731147321473314734147351473614737147381473914740147411474214743147441474514746147471474814749147501475114752147531475414755147561475714758147591476014761147621476314764147651476614767147681476914770147711477214773147741477514776147771477814779147801478114782147831478414785147861478714788147891479014791147921479314794147951479614797147981479914800148011480214803148041480514806148071480814809148101481114812148131481414815148161481714818148191482014821148221482314824148251482614827148281482914830148311483214833148341483514836148371483814839148401484114842148431484414845148461484714848148491485014851148521485314854148551485614857148581485914860148611486214863148641486514866148671486814869148701487114872148731487414875148761487714878148791488014881148821488314884148851488614887148881488914890148911489214893148941489514896148971489814899149001490114902149031490414905149061490714908149091491014911149121491314914149151491614917149181491914920149211492214923149241492514926149271492814929149301493114932149331493414935149361493714938149391494014941149421494314944149451494614947149481494914950149511495214953149541495514956149571495814959149601496114962149631496414965149661496714968149691497014971149721497314974149751497614977149781497914980149811498214983149841498514986149871498814989149901499114992149931499414995149961499714998149991500015001150021500315004150051500615007150081500915010150111501215013150141501515016150171501815019150201502115022150231502415025150261502715028150291503015031150321503315034150351503615037150381503915040150411504215043150441504515046150471504815049150501505115052150531505415055150561505715058150591506015061150621506315064150651506615067150681506915070150711507215073150741507515076150771507815079150801508115082150831508415085150861508715088150891509015091150921509315094150951509615097150981509915100151011510215103151041510515106151071510815109151101511115112151131511415115151161511715118151191512015121151221512315124151251512615127151281512915130151311513215133151341513515136151371513815139151401514115142151431514415145151461514715148151491515015151151521515315154151551515615157151581515915160151611516215163151641516515166151671516815169151701517115172151731517415175151761517715178151791518015181151821518315184151851518615187151881518915190151911519215193151941519515196151971519815199152001520115202152031520415205152061520715208152091521015211152121521315214152151521615217152181521915220152211522215223152241522515226152271522815229152301523115232152331523415235152361523715238152391524015241152421524315244152451524615247152481524915250152511525215253152541525515256152571525815259152601526115262152631526415265152661526715268152691527015271152721527315274152751527615277152781527915280152811528215283152841528515286152871528815289152901529115292152931529415295152961529715298152991530015301153021530315304153051530615307153081530915310153111531215313153141531515316153171531815319153201532115322153231532415325153261532715328153291533015331153321533315334153351533615337153381533915340153411534215343153441534515346153471534815349153501535115352153531535415355153561535715358153591536015361153621536315364153651536615367153681536915370153711537215373153741537515376153771537815379153801538115382153831538415385153861538715388153891539015391153921539315394153951539615397153981539915400154011540215403154041540515406154071540815409154101541115412154131541415415154161541715418154191542015421154221542315424154251542615427154281542915430154311543215433154341543515436154371543815439154401544115442154431544415445154461544715448154491545015451154521545315454154551545615457154581545915460154611546215463154641546515466154671546815469154701547115472154731547415475154761547715478154791548015481154821548315484154851548615487154881548915490154911549215493154941549515496154971549815499155001550115502155031550415505155061550715508155091551015511155121551315514155151551615517155181551915520155211552215523155241552515526155271552815529155301553115532155331553415535155361553715538155391554015541155421554315544155451554615547155481554915550155511555215553155541555515556155571555815559155601556115562155631556415565155661556715568155691557015571155721557315574155751557615577155781557915580155811558215583155841558515586155871558815589155901559115592155931559415595155961559715598155991560015601156021560315604156051560615607156081560915610156111561215613156141561515616156171561815619156201562115622156231562415625156261562715628156291563015631156321563315634156351563615637156381563915640156411564215643156441564515646156471564815649156501565115652156531565415655156561565715658156591566015661156621566315664156651566615667156681566915670156711567215673156741567515676156771567815679156801568115682156831568415685156861568715688156891569015691156921569315694156951569615697156981569915700157011570215703157041570515706157071570815709157101571115712157131571415715157161571715718157191572015721157221572315724157251572615727157281572915730157311573215733157341573515736157371573815739157401574115742157431574415745157461574715748157491575015751157521575315754157551575615757157581575915760157611576215763157641576515766157671576815769157701577115772157731577415775157761577715778157791578015781157821578315784157851578615787157881578915790157911579215793157941579515796157971579815799158001580115802158031580415805158061580715808158091581015811158121581315814158151581615817158181581915820158211582215823158241582515826158271582815829158301583115832158331583415835158361583715838158391584015841158421584315844158451584615847158481584915850158511585215853158541585515856158571585815859158601586115862158631586415865158661586715868158691587015871158721587315874158751587615877158781587915880158811588215883158841588515886158871588815889158901589115892158931589415895158961589715898158991590015901159021590315904159051590615907159081590915910159111591215913159141591515916159171591815919159201592115922159231592415925159261592715928159291593015931159321593315934159351593615937159381593915940159411594215943159441594515946159471594815949159501595115952159531595415955159561595715958159591596015961159621596315964159651596615967159681596915970159711597215973159741597515976159771597815979159801598115982159831598415985159861598715988159891599015991159921599315994159951599615997159981599916000160011600216003160041600516006160071600816009160101601116012160131601416015160161601716018160191602016021160221602316024160251602616027160281602916030160311603216033160341603516036160371603816039160401604116042160431604416045160461604716048160491605016051160521605316054160551605616057160581605916060160611606216063160641606516066160671606816069160701607116072160731607416075160761607716078160791608016081160821608316084160851608616087160881608916090160911609216093160941609516096160971609816099161001610116102161031610416105161061610716108161091611016111161121611316114161151611616117161181611916120161211612216123161241612516126161271612816129161301613116132161331613416135161361613716138161391614016141161421614316144161451614616147161481614916150161511615216153161541615516156161571615816159161601616116162161631616416165161661616716168161691617016171161721617316174161751617616177161781617916180161811618216183161841618516186161871618816189161901619116192161931619416195161961619716198161991620016201162021620316204162051620616207162081620916210162111621216213162141621516216162171621816219162201622116222162231622416225162261622716228162291623016231162321623316234162351623616237162381623916240162411624216243162441624516246162471624816249162501625116252162531625416255162561625716258162591626016261162621626316264162651626616267162681626916270162711627216273162741627516276162771627816279162801628116282162831628416285162861628716288162891629016291162921629316294162951629616297162981629916300163011630216303163041630516306163071630816309163101631116312163131631416315163161631716318163191632016321163221632316324163251632616327163281632916330163311633216333163341633516336163371633816339163401634116342163431634416345163461634716348163491635016351163521635316354163551635616357163581635916360163611636216363163641636516366163671636816369163701637116372163731637416375163761637716378163791638016381163821638316384163851638616387163881638916390163911639216393163941639516396163971639816399164001640116402164031640416405164061640716408164091641016411164121641316414164151641616417164181641916420164211642216423164241642516426164271642816429164301643116432164331643416435164361643716438164391644016441164421644316444164451644616447164481644916450164511645216453164541645516456164571645816459164601646116462164631646416465164661646716468164691647016471164721647316474164751647616477164781647916480164811648216483164841648516486164871648816489164901649116492164931649416495164961649716498164991650016501165021650316504165051650616507165081650916510165111651216513165141651516516165171651816519165201652116522165231652416525165261652716528165291653016531165321653316534165351653616537165381653916540165411654216543165441654516546165471654816549165501655116552165531655416555165561655716558165591656016561165621656316564165651656616567165681656916570165711657216573165741657516576165771657816579165801658116582165831658416585165861658716588165891659016591165921659316594165951659616597165981659916600166011660216603166041660516606166071660816609166101661116612166131661416615166161661716618166191662016621166221662316624166251662616627166281662916630166311663216633166341663516636166371663816639166401664116642166431664416645166461664716648166491665016651166521665316654166551665616657166581665916660166611666216663166641666516666166671666816669166701667116672166731667416675166761667716678166791668016681166821668316684166851668616687166881668916690166911669216693166941669516696166971669816699167001670116702167031670416705167061670716708167091671016711167121671316714167151671616717167181671916720167211672216723167241672516726167271672816729167301673116732167331673416735167361673716738167391674016741167421674316744167451674616747167481674916750167511675216753167541675516756167571675816759167601676116762167631676416765167661676716768167691677016771167721677316774167751677616777167781677916780167811678216783167841678516786167871678816789167901679116792167931679416795167961679716798167991680016801168021680316804168051680616807168081680916810168111681216813168141681516816168171681816819168201682116822168231682416825168261682716828168291683016831168321683316834168351683616837168381683916840168411684216843168441684516846168471684816849168501685116852168531685416855168561685716858168591686016861168621686316864168651686616867168681686916870168711687216873168741687516876168771687816879168801688116882168831688416885168861688716888168891689016891168921689316894168951689616897168981689916900169011690216903169041690516906169071690816909169101691116912169131691416915169161691716918169191692016921169221692316924169251692616927169281692916930169311693216933169341693516936169371693816939169401694116942169431694416945169461694716948169491695016951169521695316954169551695616957169581695916960169611696216963169641696516966169671696816969169701697116972169731697416975169761697716978169791698016981169821698316984169851698616987169881698916990169911699216993169941699516996169971699816999170001700117002170031700417005170061700717008170091701017011170121701317014170151701617017170181701917020170211702217023170241702517026170271702817029170301703117032170331703417035170361703717038170391704017041170421704317044170451704617047170481704917050170511705217053170541705517056170571705817059170601706117062170631706417065170661706717068170691707017071170721707317074170751707617077170781707917080170811708217083170841708517086170871708817089170901709117092170931709417095170961709717098170991710017101171021710317104171051710617107171081710917110171111711217113171141711517116171171711817119171201712117122171231712417125171261712717128171291713017131171321713317134171351713617137171381713917140171411714217143171441714517146171471714817149171501715117152171531715417155171561715717158171591716017161171621716317164171651716617167171681716917170171711717217173171741717517176171771717817179171801718117182171831718417185171861718717188171891719017191171921719317194171951719617197171981719917200172011720217203172041720517206172071720817209172101721117212172131721417215172161721717218172191722017221172221722317224172251722617227172281722917230172311723217233172341723517236172371723817239172401724117242172431724417245172461724717248172491725017251172521725317254172551725617257172581725917260172611726217263172641726517266172671726817269172701727117272172731727417275172761727717278172791728017281172821728317284172851728617287172881728917290172911729217293172941729517296172971729817299173001730117302173031730417305173061730717308173091731017311173121731317314173151731617317173181731917320173211732217323173241732517326173271732817329173301733117332173331733417335173361733717338173391734017341173421734317344173451734617347173481734917350173511735217353173541735517356173571735817359173601736117362173631736417365173661736717368173691737017371173721737317374173751737617377173781737917380173811738217383173841738517386173871738817389173901739117392173931739417395173961739717398173991740017401174021740317404174051740617407174081740917410174111741217413174141741517416174171741817419174201742117422174231742417425174261742717428174291743017431174321743317434174351743617437174381743917440174411744217443174441744517446174471744817449174501745117452174531745417455174561745717458174591746017461174621746317464174651746617467174681746917470174711747217473174741747517476174771747817479174801748117482174831748417485174861748717488174891749017491174921749317494174951749617497174981749917500175011750217503175041750517506175071750817509175101751117512175131751417515175161751717518175191752017521175221752317524175251752617527175281752917530175311753217533175341753517536175371753817539175401754117542175431754417545175461754717548175491755017551175521755317554175551755617557175581755917560175611756217563175641756517566175671756817569175701757117572175731757417575175761757717578175791758017581175821758317584175851758617587175881758917590175911759217593175941759517596175971759817599176001760117602176031760417605176061760717608176091761017611176121761317614176151761617617176181761917620176211762217623176241762517626176271762817629176301763117632176331763417635176361763717638176391764017641176421764317644176451764617647176481764917650176511765217653176541765517656176571765817659176601766117662176631766417665176661766717668176691767017671176721767317674176751767617677176781767917680176811768217683176841768517686176871768817689176901769117692176931769417695176961769717698176991770017701177021770317704177051770617707177081770917710177111771217713177141771517716177171771817719177201772117722177231772417725177261772717728177291773017731177321773317734177351773617737177381773917740177411774217743177441774517746177471774817749177501775117752177531775417755177561775717758177591776017761177621776317764177651776617767177681776917770177711777217773177741777517776177771777817779177801778117782177831778417785177861778717788177891779017791177921779317794177951779617797177981779917800178011780217803178041780517806178071780817809178101781117812178131781417815178161781717818178191782017821178221782317824178251782617827178281782917830178311783217833178341783517836178371783817839178401784117842178431784417845178461784717848178491785017851178521785317854178551785617857178581785917860178611786217863178641786517866178671786817869178701787117872178731787417875178761787717878178791788017881178821788317884178851788617887178881788917890178911789217893178941789517896178971789817899179001790117902179031790417905179061790717908179091791017911179121791317914179151791617917179181791917920179211792217923179241792517926179271792817929179301793117932179331793417935179361793717938179391794017941179421794317944179451794617947179481794917950179511795217953179541795517956179571795817959179601796117962179631796417965179661796717968179691797017971179721797317974179751797617977179781797917980179811798217983179841798517986179871798817989179901799117992179931799417995179961799717998179991800018001180021800318004180051800618007180081800918010180111801218013180141801518016180171801818019180201802118022180231802418025180261802718028180291803018031180321803318034180351803618037180381803918040180411804218043180441804518046180471804818049180501805118052180531805418055180561805718058180591806018061180621806318064180651806618067180681806918070180711807218073180741807518076180771807818079180801808118082180831808418085180861808718088180891809018091180921809318094180951809618097180981809918100181011810218103181041810518106181071810818109181101811118112181131811418115181161811718118181191812018121181221812318124181251812618127181281812918130181311813218133181341813518136181371813818139181401814118142181431814418145181461814718148181491815018151181521815318154181551815618157181581815918160181611816218163181641816518166181671816818169181701817118172181731817418175181761817718178181791818018181181821818318184181851818618187181881818918190181911819218193181941819518196181971819818199182001820118202182031820418205182061820718208182091821018211182121821318214182151821618217182181821918220182211822218223182241822518226182271822818229182301823118232182331823418235182361823718238182391824018241182421824318244182451824618247182481824918250182511825218253182541825518256182571825818259182601826118262182631826418265182661826718268182691827018271182721827318274182751827618277182781827918280182811828218283182841828518286182871828818289182901829118292182931829418295182961829718298182991830018301183021830318304183051830618307183081830918310183111831218313183141831518316183171831818319183201832118322183231832418325183261832718328183291833018331183321833318334183351833618337183381833918340183411834218343183441834518346183471834818349183501835118352183531835418355183561835718358183591836018361183621836318364183651836618367183681836918370183711837218373183741837518376183771837818379183801838118382183831838418385183861838718388183891839018391183921839318394183951839618397183981839918400184011840218403
  1. #define _GNU_SOURCE // Defines CLOCK_MONOTONIC on Linux
  2. #define _CRT_SECURE_NO_DEPRECATE // Disables ridiculous "unsafe" warnigns on Windows
  3. #include "ggml.h"
  4. #ifdef GGML_USE_K_QUANTS
  5. #include "k_quants.h"
  6. #endif
  7. #if defined(_MSC_VER) || defined(__MINGW32__)
  8. #include <malloc.h> // using malloc.h with MSC/MINGW
  9. #elif !defined(__FreeBSD__) && !defined(__NetBSD__) && !defined(__OpenBSD__)
  10. #include <alloca.h>
  11. #endif
  12. #include <assert.h>
  13. #include <errno.h>
  14. #include <time.h>
  15. #include <math.h>
  16. #include <stdlib.h>
  17. #include <string.h>
  18. #include <stdint.h>
  19. #include <inttypes.h>
  20. #include <stdio.h>
  21. #include <float.h>
  22. #include <limits.h>
  23. #include <stdarg.h>
  24. #include <signal.h>
  25. #ifdef GGML_USE_METAL
  26. #include <unistd.h>
  27. #endif
  28. // static_assert should be a #define, but if it's not,
  29. // fall back to the _Static_assert C11 keyword.
  30. // if C99 - static_assert is noop
  31. // ref: https://stackoverflow.com/a/53923785/4039976
  32. #ifndef static_assert
  33. #if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201100L)
  34. #define static_assert(cond, msg) _Static_assert(cond, msg)
  35. #else
  36. #define static_assert(cond, msg) struct global_scope_noop_trick
  37. #endif
  38. #endif
  39. #if defined(_MSC_VER)
  40. // disable "possible loss of data" to avoid hundreds of casts
  41. // we should just be careful :)
  42. #pragma warning(disable: 4244 4267)
  43. #endif
  44. #if defined(_WIN32)
  45. #include <windows.h>
  46. typedef volatile LONG atomic_int;
  47. typedef atomic_int atomic_bool;
  48. static void atomic_store(atomic_int * ptr, LONG val) {
  49. InterlockedExchange(ptr, val);
  50. }
  51. static LONG atomic_load(atomic_int * ptr) {
  52. return InterlockedCompareExchange(ptr, 0, 0);
  53. }
  54. static LONG atomic_fetch_add(atomic_int * ptr, LONG inc) {
  55. return InterlockedExchangeAdd(ptr, inc);
  56. }
  57. static LONG atomic_fetch_sub(atomic_int * ptr, LONG dec) {
  58. return atomic_fetch_add(ptr, -(dec));
  59. }
  60. typedef HANDLE pthread_t;
  61. typedef DWORD thread_ret_t;
  62. static int pthread_create(pthread_t * out, void * unused, thread_ret_t(*func)(void *), void * arg) {
  63. (void) unused;
  64. HANDLE handle = CreateThread(NULL, 0, (LPTHREAD_START_ROUTINE) func, arg, 0, NULL);
  65. if (handle == NULL)
  66. {
  67. return EAGAIN;
  68. }
  69. *out = handle;
  70. return 0;
  71. }
  72. static int pthread_join(pthread_t thread, void * unused) {
  73. (void) unused;
  74. return (int) WaitForSingleObject(thread, INFINITE);
  75. }
  76. static int sched_yield (void) {
  77. Sleep (0);
  78. return 0;
  79. }
  80. #else
  81. #include <pthread.h>
  82. #include <stdatomic.h>
  83. typedef void * thread_ret_t;
  84. #include <sys/types.h>
  85. #include <sys/stat.h>
  86. #include <unistd.h>
  87. #endif
  88. // __FMA__ and __F16C__ are not defined in MSVC, however they are implied with AVX2/AVX512
  89. #if defined(_MSC_VER) && (defined(__AVX2__) || defined(__AVX512F__))
  90. #ifndef __FMA__
  91. #define __FMA__
  92. #endif
  93. #ifndef __F16C__
  94. #define __F16C__
  95. #endif
  96. #ifndef __SSE3__
  97. #define __SSE3__
  98. #endif
  99. #endif
  100. /*#define GGML_PERF*/
  101. #define GGML_DEBUG 0
  102. #define GGML_GELU_FP16
  103. #define GGML_GELU_QUICK_FP16
  104. #define GGML_SILU_FP16
  105. #define GGML_SOFT_MAX_UNROLL 4
  106. #define GGML_VEC_DOT_UNROLL 2
  107. //
  108. // logging
  109. //
  110. #if (GGML_DEBUG >= 1)
  111. #define GGML_PRINT_DEBUG(...) printf(__VA_ARGS__)
  112. #else
  113. #define GGML_PRINT_DEBUG(...)
  114. #endif
  115. #if (GGML_DEBUG >= 5)
  116. #define GGML_PRINT_DEBUG_5(...) printf(__VA_ARGS__)
  117. #else
  118. #define GGML_PRINT_DEBUG_5(...)
  119. #endif
  120. #if (GGML_DEBUG >= 10)
  121. #define GGML_PRINT_DEBUG_10(...) printf(__VA_ARGS__)
  122. #else
  123. #define GGML_PRINT_DEBUG_10(...)
  124. #endif
  125. #define GGML_PRINT(...) printf(__VA_ARGS__)
  126. #ifdef GGML_USE_ACCELERATE
  127. // uncomment to use vDSP for soft max computation
  128. // note: not sure if it is actually faster
  129. //#define GGML_SOFT_MAX_ACCELERATE
  130. #endif
  131. #if UINTPTR_MAX == 0xFFFFFFFF
  132. #define GGML_MEM_ALIGN 4
  133. #else
  134. #define GGML_MEM_ALIGN 16
  135. #endif
  136. //
  137. // logging
  138. //
  139. #if (GGML_DEBUG >= 1)
  140. #define GGML_PRINT_DEBUG(...) printf(__VA_ARGS__)
  141. #else
  142. #define GGML_PRINT_DEBUG(...)
  143. #endif
  144. #if (GGML_DEBUG >= 5)
  145. #define GGML_PRINT_DEBUG_5(...) printf(__VA_ARGS__)
  146. #else
  147. #define GGML_PRINT_DEBUG_5(...)
  148. #endif
  149. #if (GGML_DEBUG >= 10)
  150. #define GGML_PRINT_DEBUG_10(...) printf(__VA_ARGS__)
  151. #else
  152. #define GGML_PRINT_DEBUG_10(...)
  153. #endif
  154. #define GGML_PRINT(...) printf(__VA_ARGS__)
  155. //
  156. // end of logging block
  157. //
  158. #if defined(_MSC_VER) || defined(__MINGW32__)
  159. #define GGML_ALIGNED_MALLOC(size) _aligned_malloc(size, GGML_MEM_ALIGN)
  160. #define GGML_ALIGNED_FREE(ptr) _aligned_free(ptr)
  161. #else
  162. inline static void* ggml_aligned_malloc(size_t size) {
  163. void* aligned_memory = NULL;
  164. #ifdef GGML_USE_METAL
  165. int result = posix_memalign(&aligned_memory, getpagesize(), size);
  166. #else
  167. int result = posix_memalign(&aligned_memory, GGML_MEM_ALIGN, size);
  168. #endif
  169. if (result != 0) {
  170. // Handle allocation failure
  171. const char *error_desc = "unknown allocation error";
  172. switch (result) {
  173. case EINVAL:
  174. error_desc = "invalid alignment value";
  175. break;
  176. case ENOMEM:
  177. error_desc = "insufficient memory";
  178. break;
  179. }
  180. GGML_PRINT("%s: %s (attempted to allocate %6.2f MB)\n",
  181. __func__, error_desc, size/(1024.0*1024.0));
  182. return NULL;
  183. }
  184. return aligned_memory;
  185. }
  186. #define GGML_ALIGNED_MALLOC(size) ggml_aligned_malloc(size)
  187. #define GGML_ALIGNED_FREE(ptr) free(ptr)
  188. #endif
  189. #define UNUSED GGML_UNUSED
  190. #define SWAP(x, y, T) do { T SWAP = x; x = y; y = SWAP; } while (0)
  191. //
  192. // tensor access macros
  193. //
  194. #define GGML_TENSOR_UNARY_OP_LOCALS \
  195. GGML_TENSOR_LOCALS(int64_t, ne0, src0, ne); \
  196. GGML_TENSOR_LOCALS(size_t, nb0, src0, nb); \
  197. GGML_TENSOR_LOCALS(int64_t, ne, dst, ne); \
  198. GGML_TENSOR_LOCALS(size_t, nb, dst, nb);
  199. #define GGML_TENSOR_BINARY_OP_LOCALS \
  200. GGML_TENSOR_LOCALS(int64_t, ne0, src0, ne); \
  201. GGML_TENSOR_LOCALS(size_t, nb0, src0, nb); \
  202. GGML_TENSOR_LOCALS(int64_t, ne1, src1, ne); \
  203. GGML_TENSOR_LOCALS(size_t, nb1, src1, nb); \
  204. GGML_TENSOR_LOCALS(int64_t, ne, dst, ne); \
  205. GGML_TENSOR_LOCALS(size_t, nb, dst, nb);
  206. #if defined(GGML_USE_ACCELERATE)
  207. #include <Accelerate/Accelerate.h>
  208. #if defined(GGML_USE_CLBLAST) // allow usage of CLBlast alongside Accelerate functions
  209. #include "ggml-opencl.h"
  210. #endif
  211. #elif defined(GGML_USE_OPENBLAS)
  212. #if defined(GGML_BLAS_USE_MKL)
  213. #include <mkl.h>
  214. #else
  215. #include <cblas.h>
  216. #endif
  217. #elif defined(GGML_USE_CUBLAS)
  218. #include "ggml-cuda.h"
  219. #elif defined(GGML_USE_CLBLAST)
  220. #include "ggml-opencl.h"
  221. #endif
  222. #undef MIN
  223. #undef MAX
  224. #define MIN(a, b) ((a) < (b) ? (a) : (b))
  225. #define MAX(a, b) ((a) > (b) ? (a) : (b))
  226. // floating point type used to accumulate sums
  227. typedef double ggml_float;
  228. // 16-bit float
  229. // on Arm, we use __fp16
  230. // on x86, we use uint16_t
  231. #ifdef __ARM_NEON
  232. // if YCM cannot find <arm_neon.h>, make a symbolic link to it, for example:
  233. //
  234. // $ ln -sfn /Library/Developer/CommandLineTools/usr/lib/clang/13.1.6/include/arm_neon.h ./src/
  235. //
  236. #include <arm_neon.h>
  237. #define GGML_COMPUTE_FP16_TO_FP32(x) ((float) (x))
  238. #define GGML_COMPUTE_FP32_TO_FP16(x) (x)
  239. #define GGML_FP16_TO_FP32(x) ((float) (x))
  240. #define GGML_FP32_TO_FP16(x) (x)
  241. #else
  242. #ifdef __wasm_simd128__
  243. #include <wasm_simd128.h>
  244. #else
  245. #ifdef __POWER9_VECTOR__
  246. #include <altivec.h>
  247. #undef bool
  248. #define bool _Bool
  249. #else
  250. #if defined(_MSC_VER) || defined(__MINGW32__)
  251. #include <intrin.h>
  252. #else
  253. #if !defined(__riscv)
  254. #include <immintrin.h>
  255. #endif
  256. #endif
  257. #endif
  258. #endif
  259. #ifdef __F16C__
  260. #ifdef _MSC_VER
  261. #define GGML_COMPUTE_FP16_TO_FP32(x) _mm_cvtss_f32(_mm_cvtph_ps(_mm_cvtsi32_si128(x)))
  262. #define GGML_COMPUTE_FP32_TO_FP16(x) _mm_extract_epi16(_mm_cvtps_ph(_mm_set_ss(x), 0), 0)
  263. #else
  264. #define GGML_COMPUTE_FP16_TO_FP32(x) _cvtsh_ss(x)
  265. #define GGML_COMPUTE_FP32_TO_FP16(x) _cvtss_sh(x, 0)
  266. #endif
  267. #elif defined(__POWER9_VECTOR__)
  268. #define GGML_COMPUTE_FP16_TO_FP32(x) ggml_compute_fp16_to_fp32(x)
  269. #define GGML_COMPUTE_FP32_TO_FP16(x) ggml_compute_fp32_to_fp16(x)
  270. /* the inline asm below is about 12% faster than the lookup method */
  271. #define GGML_FP16_TO_FP32(x) GGML_COMPUTE_FP16_TO_FP32(x)
  272. #define GGML_FP32_TO_FP16(x) GGML_COMPUTE_FP32_TO_FP16(x)
  273. static inline float ggml_compute_fp16_to_fp32(ggml_fp16_t h) {
  274. register float f;
  275. register double d;
  276. __asm__(
  277. "mtfprd %0,%2\n"
  278. "xscvhpdp %0,%0\n"
  279. "frsp %1,%0\n" :
  280. /* temp */ "=d"(d),
  281. /* out */ "=f"(f):
  282. /* in */ "r"(h));
  283. return f;
  284. }
  285. static inline ggml_fp16_t ggml_compute_fp32_to_fp16(float f) {
  286. register double d;
  287. register ggml_fp16_t r;
  288. __asm__( /* xscvdphp can work on double or single precision */
  289. "xscvdphp %0,%2\n"
  290. "mffprd %1,%0\n" :
  291. /* temp */ "=d"(d),
  292. /* out */ "=r"(r):
  293. /* in */ "f"(f));
  294. return r;
  295. }
  296. #else
  297. // FP16 <-> FP32
  298. // ref: https://github.com/Maratyszcza/FP16
  299. static inline float fp32_from_bits(uint32_t w) {
  300. union {
  301. uint32_t as_bits;
  302. float as_value;
  303. } fp32;
  304. fp32.as_bits = w;
  305. return fp32.as_value;
  306. }
  307. static inline uint32_t fp32_to_bits(float f) {
  308. union {
  309. float as_value;
  310. uint32_t as_bits;
  311. } fp32;
  312. fp32.as_value = f;
  313. return fp32.as_bits;
  314. }
  315. static inline float ggml_compute_fp16_to_fp32(ggml_fp16_t h) {
  316. const uint32_t w = (uint32_t) h << 16;
  317. const uint32_t sign = w & UINT32_C(0x80000000);
  318. const uint32_t two_w = w + w;
  319. const uint32_t exp_offset = UINT32_C(0xE0) << 23;
  320. #if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) || defined(__GNUC__) && !defined(__STRICT_ANSI__)
  321. const float exp_scale = 0x1.0p-112f;
  322. #else
  323. const float exp_scale = fp32_from_bits(UINT32_C(0x7800000));
  324. #endif
  325. const float normalized_value = fp32_from_bits((two_w >> 4) + exp_offset) * exp_scale;
  326. const uint32_t magic_mask = UINT32_C(126) << 23;
  327. const float magic_bias = 0.5f;
  328. const float denormalized_value = fp32_from_bits((two_w >> 17) | magic_mask) - magic_bias;
  329. const uint32_t denormalized_cutoff = UINT32_C(1) << 27;
  330. const uint32_t result = sign |
  331. (two_w < denormalized_cutoff ? fp32_to_bits(denormalized_value) : fp32_to_bits(normalized_value));
  332. return fp32_from_bits(result);
  333. }
  334. static inline ggml_fp16_t ggml_compute_fp32_to_fp16(float f) {
  335. #if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) || defined(__GNUC__) && !defined(__STRICT_ANSI__)
  336. const float scale_to_inf = 0x1.0p+112f;
  337. const float scale_to_zero = 0x1.0p-110f;
  338. #else
  339. const float scale_to_inf = fp32_from_bits(UINT32_C(0x77800000));
  340. const float scale_to_zero = fp32_from_bits(UINT32_C(0x08800000));
  341. #endif
  342. float base = (fabsf(f) * scale_to_inf) * scale_to_zero;
  343. const uint32_t w = fp32_to_bits(f);
  344. const uint32_t shl1_w = w + w;
  345. const uint32_t sign = w & UINT32_C(0x80000000);
  346. uint32_t bias = shl1_w & UINT32_C(0xFF000000);
  347. if (bias < UINT32_C(0x71000000)) {
  348. bias = UINT32_C(0x71000000);
  349. }
  350. base = fp32_from_bits((bias >> 1) + UINT32_C(0x07800000)) + base;
  351. const uint32_t bits = fp32_to_bits(base);
  352. const uint32_t exp_bits = (bits >> 13) & UINT32_C(0x00007C00);
  353. const uint32_t mantissa_bits = bits & UINT32_C(0x00000FFF);
  354. const uint32_t nonsign = exp_bits + mantissa_bits;
  355. return (sign >> 16) | (shl1_w > UINT32_C(0xFF000000) ? UINT16_C(0x7E00) : nonsign);
  356. }
  357. #define GGML_COMPUTE_FP16_TO_FP32(x) ggml_compute_fp16_to_fp32(x)
  358. #define GGML_COMPUTE_FP32_TO_FP16(x) ggml_compute_fp32_to_fp16(x)
  359. #endif // __F16C__
  360. #endif // __ARM_NEON
  361. //
  362. // global data
  363. //
  364. // precomputed gelu table for f16 (128 KB)
  365. static ggml_fp16_t table_gelu_f16[1 << 16];
  366. // precomputed quick gelu table for f16 (128 KB)
  367. static ggml_fp16_t table_gelu_quick_f16[1 << 16];
  368. // precomputed silu table for f16 (128 KB)
  369. static ggml_fp16_t table_silu_f16[1 << 16];
  370. // precomputed exp table for f16 (128 KB)
  371. static ggml_fp16_t table_exp_f16[1 << 16];
  372. // precomputed f32 table for f16 (256 KB)
  373. static float table_f32_f16[1 << 16];
  374. #if defined(__ARM_NEON) || defined(__wasm_simd128__)
  375. #define B1(c,s,n) 0x ## n ## c , 0x ## n ## s
  376. #define B2(c,s,n) B1(c,s,n ## c), B1(c,s,n ## s)
  377. #define B3(c,s,n) B2(c,s,n ## c), B2(c,s,n ## s)
  378. #define B4(c,s,n) B3(c,s,n ## c), B3(c,s,n ## s)
  379. #define B5(c,s,n) B4(c,s,n ## c), B4(c,s,n ## s)
  380. #define B6(c,s,n) B5(c,s,n ## c), B5(c,s,n ## s)
  381. #define B7(c,s,n) B6(c,s,n ## c), B6(c,s,n ## s)
  382. #define B8(c,s ) B7(c,s, c), B7(c,s, s)
  383. // precomputed tables for expanding 8bits to 8 bytes:
  384. static const uint64_t table_b2b_0[1 << 8] = { B8(00, 10) }; // ( b) << 4
  385. static const uint64_t table_b2b_1[1 << 8] = { B8(10, 00) }; // (!b) << 4
  386. #endif
  387. // On ARM NEON, it's quicker to directly convert x -> x instead of calling into ggml_lookup_fp16_to_fp32,
  388. // so we define GGML_FP16_TO_FP32 and GGML_FP32_TO_FP16 elsewhere for NEON.
  389. // This is also true for POWER9.
  390. #if !defined(GGML_FP16_TO_FP32) || !defined(GGML_FP32_TO_FP16)
  391. inline static float ggml_lookup_fp16_to_fp32(ggml_fp16_t f) {
  392. uint16_t s;
  393. memcpy(&s, &f, sizeof(uint16_t));
  394. return table_f32_f16[s];
  395. }
  396. #define GGML_FP16_TO_FP32(x) ggml_lookup_fp16_to_fp32(x)
  397. #define GGML_FP32_TO_FP16(x) GGML_COMPUTE_FP32_TO_FP16(x)
  398. #endif
  399. // note: do not use these inside ggml.c
  400. // these are meant to be used via the ggml.h API
  401. float ggml_fp16_to_fp32(ggml_fp16_t x) {
  402. return (float) GGML_FP16_TO_FP32(x);
  403. }
  404. ggml_fp16_t ggml_fp32_to_fp16(float x) {
  405. return GGML_FP32_TO_FP16(x);
  406. }
  407. void ggml_fp16_to_fp32_row(const ggml_fp16_t * x, float * y, int n) {
  408. for (int i = 0; i < n; i++) {
  409. y[i] = GGML_FP16_TO_FP32(x[i]);
  410. }
  411. }
  412. void ggml_fp32_to_fp16_row(const float * x, ggml_fp16_t * y, int n) {
  413. int i = 0;
  414. #if defined(__F16C__)
  415. for (; i + 7 < n; i += 8) {
  416. __m256 x_vec = _mm256_loadu_ps(x + i);
  417. __m128i y_vec = _mm256_cvtps_ph(x_vec, _MM_FROUND_TO_NEAREST_INT);
  418. _mm_storeu_si128((__m128i *)(y + i), y_vec);
  419. }
  420. for(; i + 3 < n; i += 4) {
  421. __m128 x_vec = _mm_loadu_ps(x + i);
  422. __m128i y_vec = _mm_cvtps_ph(x_vec, _MM_FROUND_TO_NEAREST_INT);
  423. _mm_storel_epi64((__m128i *)(y + i), y_vec);
  424. }
  425. #endif
  426. for (; i < n; i++) {
  427. y[i] = GGML_FP32_TO_FP16(x[i]);
  428. }
  429. }
  430. //
  431. // timing
  432. //
  433. #if defined(_MSC_VER) || defined(__MINGW32__)
  434. static int64_t timer_freq, timer_start;
  435. void ggml_time_init(void) {
  436. LARGE_INTEGER t;
  437. QueryPerformanceFrequency(&t);
  438. timer_freq = t.QuadPart;
  439. // The multiplication by 1000 or 1000000 below can cause an overflow if timer_freq
  440. // and the uptime is high enough.
  441. // We subtract the program start time to reduce the likelihood of that happening.
  442. QueryPerformanceCounter(&t);
  443. timer_start = t.QuadPart;
  444. }
  445. int64_t ggml_time_ms(void) {
  446. LARGE_INTEGER t;
  447. QueryPerformanceCounter(&t);
  448. return ((t.QuadPart-timer_start) * 1000) / timer_freq;
  449. }
  450. int64_t ggml_time_us(void) {
  451. LARGE_INTEGER t;
  452. QueryPerformanceCounter(&t);
  453. return ((t.QuadPart-timer_start) * 1000000) / timer_freq;
  454. }
  455. #else
  456. void ggml_time_init(void) {}
  457. int64_t ggml_time_ms(void) {
  458. struct timespec ts;
  459. clock_gettime(CLOCK_MONOTONIC, &ts);
  460. return (int64_t)ts.tv_sec*1000 + (int64_t)ts.tv_nsec/1000000;
  461. }
  462. int64_t ggml_time_us(void) {
  463. struct timespec ts;
  464. clock_gettime(CLOCK_MONOTONIC, &ts);
  465. return (int64_t)ts.tv_sec*1000000 + (int64_t)ts.tv_nsec/1000;
  466. }
  467. #endif
  468. int64_t ggml_cycles(void) {
  469. return clock();
  470. }
  471. int64_t ggml_cycles_per_ms(void) {
  472. return CLOCKS_PER_SEC/1000;
  473. }
  474. #ifdef GGML_PERF
  475. #define ggml_perf_time_ms() ggml_time_ms()
  476. #define ggml_perf_time_us() ggml_time_us()
  477. #define ggml_perf_cycles() ggml_cycles()
  478. #define ggml_perf_cycles_per_ms() ggml_cycles_per_ms()
  479. #else
  480. #define ggml_perf_time_ms() 0
  481. #define ggml_perf_time_us() 0
  482. #define ggml_perf_cycles() 0
  483. #define ggml_perf_cycles_per_ms() 0
  484. #endif
  485. //
  486. // cache line
  487. //
  488. #if defined(__cpp_lib_hardware_interference_size)
  489. #define CACHE_LINE_SIZE hardware_destructive_interference_size
  490. #else
  491. #if defined(__POWER9_VECTOR__)
  492. #define CACHE_LINE_SIZE 128
  493. #else
  494. #define CACHE_LINE_SIZE 64
  495. #endif
  496. #endif
  497. static const size_t CACHE_LINE_SIZE_F32 = CACHE_LINE_SIZE/sizeof(float);
  498. //
  499. // quantization
  500. //
  501. #define MM256_SET_M128I(a, b) _mm256_insertf128_si256(_mm256_castsi128_si256(b), (a), 1)
  502. #if defined(__AVX__) || defined(__AVX2__) || defined(__AVX512F__) || defined(__SSSE3__)
  503. // multiply int8_t, add results pairwise twice
  504. static inline __m128i mul_sum_i8_pairs(const __m128i x, const __m128i y) {
  505. // Get absolute values of x vectors
  506. const __m128i ax = _mm_sign_epi8(x, x);
  507. // Sign the values of the y vectors
  508. const __m128i sy = _mm_sign_epi8(y, x);
  509. // Perform multiplication and create 16-bit values
  510. const __m128i dot = _mm_maddubs_epi16(ax, sy);
  511. const __m128i ones = _mm_set1_epi16(1);
  512. return _mm_madd_epi16(ones, dot);
  513. }
  514. #if __AVX__ || __AVX2__ || __AVX512F__
  515. // horizontally add 8 floats
  516. static inline float hsum_float_8(const __m256 x) {
  517. __m128 res = _mm256_extractf128_ps(x, 1);
  518. res = _mm_add_ps(res, _mm256_castps256_ps128(x));
  519. res = _mm_add_ps(res, _mm_movehl_ps(res, res));
  520. res = _mm_add_ss(res, _mm_movehdup_ps(res));
  521. return _mm_cvtss_f32(res);
  522. }
  523. // horizontally add 8 int32_t
  524. static inline int hsum_i32_8(const __m256i a) {
  525. const __m128i sum128 = _mm_add_epi32(_mm256_castsi256_si128(a), _mm256_extractf128_si256(a, 1));
  526. const __m128i hi64 = _mm_unpackhi_epi64(sum128, sum128);
  527. const __m128i sum64 = _mm_add_epi32(hi64, sum128);
  528. const __m128i hi32 = _mm_shuffle_epi32(sum64, _MM_SHUFFLE(2, 3, 0, 1));
  529. return _mm_cvtsi128_si32(_mm_add_epi32(sum64, hi32));
  530. }
  531. // horizontally add 4 int32_t
  532. static inline int hsum_i32_4(const __m128i a) {
  533. const __m128i hi64 = _mm_unpackhi_epi64(a, a);
  534. const __m128i sum64 = _mm_add_epi32(hi64, a);
  535. const __m128i hi32 = _mm_shuffle_epi32(sum64, _MM_SHUFFLE(2, 3, 0, 1));
  536. return _mm_cvtsi128_si32(_mm_add_epi32(sum64, hi32));
  537. }
  538. #if defined(__AVX2__) || defined(__AVX512F__)
  539. // spread 32 bits to 32 bytes { 0x00, 0xFF }
  540. static inline __m256i bytes_from_bits_32(const uint8_t * x) {
  541. uint32_t x32;
  542. memcpy(&x32, x, sizeof(uint32_t));
  543. const __m256i shuf_mask = _mm256_set_epi64x(
  544. 0x0303030303030303, 0x0202020202020202,
  545. 0x0101010101010101, 0x0000000000000000);
  546. __m256i bytes = _mm256_shuffle_epi8(_mm256_set1_epi32(x32), shuf_mask);
  547. const __m256i bit_mask = _mm256_set1_epi64x(0x7fbfdfeff7fbfdfe);
  548. bytes = _mm256_or_si256(bytes, bit_mask);
  549. return _mm256_cmpeq_epi8(bytes, _mm256_set1_epi64x(-1));
  550. }
  551. // Unpack 32 4-bit fields into 32 bytes
  552. // The output vector contains 32 bytes, each one in [ 0 .. 15 ] interval
  553. static inline __m256i bytes_from_nibbles_32(const uint8_t * rsi)
  554. {
  555. const __m128i tmp = _mm_loadu_si128((const __m128i *)rsi);
  556. const __m256i bytes = MM256_SET_M128I(_mm_srli_epi16(tmp, 4), tmp);
  557. const __m256i lowMask = _mm256_set1_epi8( 0xF );
  558. return _mm256_and_si256(lowMask, bytes);
  559. }
  560. // add int16_t pairwise and return as float vector
  561. static inline __m256 sum_i16_pairs_float(const __m256i x) {
  562. const __m256i ones = _mm256_set1_epi16(1);
  563. const __m256i summed_pairs = _mm256_madd_epi16(ones, x);
  564. return _mm256_cvtepi32_ps(summed_pairs);
  565. }
  566. static inline __m256 mul_sum_us8_pairs_float(const __m256i ax, const __m256i sy) {
  567. #if __AVXVNNI__
  568. const __m256i zero = _mm256_setzero_si256();
  569. const __m256i summed_pairs = _mm256_dpbusd_epi32(zero, ax, sy);
  570. return _mm256_cvtepi32_ps(summed_pairs);
  571. #else
  572. // Perform multiplication and create 16-bit values
  573. const __m256i dot = _mm256_maddubs_epi16(ax, sy);
  574. return sum_i16_pairs_float(dot);
  575. #endif
  576. }
  577. // multiply int8_t, add results pairwise twice and return as float vector
  578. static inline __m256 mul_sum_i8_pairs_float(const __m256i x, const __m256i y) {
  579. #if __AVXVNNIINT8__
  580. const __m256i zero = _mm256_setzero_si256();
  581. const __m256i summed_pairs = _mm256_dpbssd_epi32(zero, x, y);
  582. return _mm256_cvtepi32_ps(summed_pairs);
  583. #else
  584. // Get absolute values of x vectors
  585. const __m256i ax = _mm256_sign_epi8(x, x);
  586. // Sign the values of the y vectors
  587. const __m256i sy = _mm256_sign_epi8(y, x);
  588. return mul_sum_us8_pairs_float(ax, sy);
  589. #endif
  590. }
  591. static inline __m128i packNibbles( __m256i bytes )
  592. {
  593. // Move bits within 16-bit lanes from 0000_abcd_0000_efgh into 0000_0000_abcd_efgh
  594. #if __AVX512F__
  595. const __m256i bytes_srli_4 = _mm256_srli_epi16(bytes, 4); // 0000_0000_abcd_0000
  596. bytes = _mm256_or_si256(bytes, bytes_srli_4); // 0000_abcd_abcd_efgh
  597. return _mm256_cvtepi16_epi8(bytes); // abcd_efgh
  598. #else
  599. const __m256i lowByte = _mm256_set1_epi16( 0xFF );
  600. __m256i high = _mm256_andnot_si256( lowByte, bytes );
  601. __m256i low = _mm256_and_si256( lowByte, bytes );
  602. high = _mm256_srli_epi16( high, 4 );
  603. bytes = _mm256_or_si256( low, high );
  604. // Compress uint16_t lanes into bytes
  605. __m128i r0 = _mm256_castsi256_si128( bytes );
  606. __m128i r1 = _mm256_extracti128_si256( bytes, 1 );
  607. return _mm_packus_epi16( r0, r1 );
  608. #endif
  609. }
  610. #elif defined(__AVX__)
  611. // spread 32 bits to 32 bytes { 0x00, 0xFF }
  612. static inline __m256i bytes_from_bits_32(const uint8_t * x) {
  613. uint32_t x32;
  614. memcpy(&x32, x, sizeof(uint32_t));
  615. const __m128i shuf_maskl = _mm_set_epi64x(0x0101010101010101, 0x0000000000000000);
  616. const __m128i shuf_maskh = _mm_set_epi64x(0x0303030303030303, 0x0202020202020202);
  617. __m128i bytesl = _mm_shuffle_epi8(_mm_set1_epi32(x32), shuf_maskl);
  618. __m128i bytesh = _mm_shuffle_epi8(_mm_set1_epi32(x32), shuf_maskh);
  619. const __m128i bit_mask = _mm_set1_epi64x(0x7fbfdfeff7fbfdfe);
  620. bytesl = _mm_or_si128(bytesl, bit_mask);
  621. bytesh = _mm_or_si128(bytesh, bit_mask);
  622. bytesl = _mm_cmpeq_epi8(bytesl, _mm_set1_epi64x(-1));
  623. bytesh = _mm_cmpeq_epi8(bytesh, _mm_set1_epi64x(-1));
  624. return MM256_SET_M128I(bytesh, bytesl);
  625. }
  626. // Unpack 32 4-bit fields into 32 bytes
  627. // The output vector contains 32 bytes, each one in [ 0 .. 15 ] interval
  628. static inline __m256i bytes_from_nibbles_32(const uint8_t * rsi)
  629. {
  630. // Load 16 bytes from memory
  631. __m128i tmpl = _mm_loadu_si128((const __m128i *)rsi);
  632. __m128i tmph = _mm_srli_epi16(tmpl, 4);
  633. const __m128i lowMask = _mm_set1_epi8(0xF);
  634. tmpl = _mm_and_si128(lowMask, tmpl);
  635. tmph = _mm_and_si128(lowMask, tmph);
  636. return MM256_SET_M128I(tmph, tmpl);
  637. }
  638. // add int16_t pairwise and return as float vector
  639. static inline __m256 sum_i16_pairs_float(const __m128i xh, const __m128i xl) {
  640. const __m128i ones = _mm_set1_epi16(1);
  641. const __m128i summed_pairsl = _mm_madd_epi16(ones, xl);
  642. const __m128i summed_pairsh = _mm_madd_epi16(ones, xh);
  643. const __m256i summed_pairs = MM256_SET_M128I(summed_pairsh, summed_pairsl);
  644. return _mm256_cvtepi32_ps(summed_pairs);
  645. }
  646. static inline __m256 mul_sum_us8_pairs_float(const __m256i ax, const __m256i sy) {
  647. const __m128i axl = _mm256_castsi256_si128(ax);
  648. const __m128i axh = _mm256_extractf128_si256(ax, 1);
  649. const __m128i syl = _mm256_castsi256_si128(sy);
  650. const __m128i syh = _mm256_extractf128_si256(sy, 1);
  651. // Perform multiplication and create 16-bit values
  652. const __m128i dotl = _mm_maddubs_epi16(axl, syl);
  653. const __m128i doth = _mm_maddubs_epi16(axh, syh);
  654. return sum_i16_pairs_float(doth, dotl);
  655. }
  656. // multiply int8_t, add results pairwise twice and return as float vector
  657. static inline __m256 mul_sum_i8_pairs_float(const __m256i x, const __m256i y) {
  658. const __m128i xl = _mm256_castsi256_si128(x);
  659. const __m128i xh = _mm256_extractf128_si256(x, 1);
  660. const __m128i yl = _mm256_castsi256_si128(y);
  661. const __m128i yh = _mm256_extractf128_si256(y, 1);
  662. // Get absolute values of x vectors
  663. const __m128i axl = _mm_sign_epi8(xl, xl);
  664. const __m128i axh = _mm_sign_epi8(xh, xh);
  665. // Sign the values of the y vectors
  666. const __m128i syl = _mm_sign_epi8(yl, xl);
  667. const __m128i syh = _mm_sign_epi8(yh, xh);
  668. // Perform multiplication and create 16-bit values
  669. const __m128i dotl = _mm_maddubs_epi16(axl, syl);
  670. const __m128i doth = _mm_maddubs_epi16(axh, syh);
  671. return sum_i16_pairs_float(doth, dotl);
  672. }
  673. static inline __m128i packNibbles( __m128i bytes1, __m128i bytes2 )
  674. {
  675. // Move bits within 16-bit lanes from 0000_abcd_0000_efgh into 0000_0000_abcd_efgh
  676. const __m128i lowByte = _mm_set1_epi16( 0xFF );
  677. __m128i high = _mm_andnot_si128( lowByte, bytes1 );
  678. __m128i low = _mm_and_si128( lowByte, bytes1 );
  679. high = _mm_srli_epi16( high, 4 );
  680. bytes1 = _mm_or_si128( low, high );
  681. high = _mm_andnot_si128( lowByte, bytes2 );
  682. low = _mm_and_si128( lowByte, bytes2 );
  683. high = _mm_srli_epi16( high, 4 );
  684. bytes2 = _mm_or_si128( low, high );
  685. return _mm_packus_epi16( bytes1, bytes2);
  686. }
  687. #endif
  688. #elif defined(__SSSE3__)
  689. // horizontally add 4x4 floats
  690. static inline float hsum_float_4x4(const __m128 a, const __m128 b, const __m128 c, const __m128 d) {
  691. __m128 res_0 =_mm_hadd_ps(a, b);
  692. __m128 res_1 =_mm_hadd_ps(c, d);
  693. __m128 res =_mm_hadd_ps(res_0, res_1);
  694. res =_mm_hadd_ps(res, res);
  695. res =_mm_hadd_ps(res, res);
  696. return _mm_cvtss_f32(res);
  697. }
  698. #endif // __AVX__ || __AVX2__ || __AVX512F__
  699. #endif // defined(__AVX__) || defined(__AVX2__) || defined(__AVX512F__) || defined(__SSSE3__)
  700. #if defined(__ARM_NEON)
  701. #if !defined(__aarch64__)
  702. inline static uint16_t vaddvq_u8(uint8x16_t v) {
  703. return
  704. (uint16_t)vgetq_lane_u8(v, 0) + (uint16_t)vgetq_lane_u8(v, 1) +
  705. (uint16_t)vgetq_lane_u8(v, 2) + (uint16_t)vgetq_lane_u8(v, 3) +
  706. (uint16_t)vgetq_lane_u8(v, 4) + (uint16_t)vgetq_lane_u8(v, 5) +
  707. (uint16_t)vgetq_lane_u8(v, 6) + (uint16_t)vgetq_lane_u8(v, 7) +
  708. (uint16_t)vgetq_lane_u8(v, 8) + (uint16_t)vgetq_lane_u8(v, 9) +
  709. (uint16_t)vgetq_lane_u8(v, 10) + (uint16_t)vgetq_lane_u8(v, 11) +
  710. (uint16_t)vgetq_lane_u8(v, 12) + (uint16_t)vgetq_lane_u8(v, 13) +
  711. (uint16_t)vgetq_lane_u8(v, 14) + (uint16_t)vgetq_lane_u8(v, 15);
  712. }
  713. inline static int16_t vaddvq_s8(int8x16_t v) {
  714. return
  715. (int16_t)vgetq_lane_s8(v, 0) + (int16_t)vgetq_lane_s8(v, 1) +
  716. (int16_t)vgetq_lane_s8(v, 2) + (int16_t)vgetq_lane_s8(v, 3) +
  717. (int16_t)vgetq_lane_s8(v, 4) + (int16_t)vgetq_lane_s8(v, 5) +
  718. (int16_t)vgetq_lane_s8(v, 6) + (int16_t)vgetq_lane_s8(v, 7) +
  719. (int16_t)vgetq_lane_s8(v, 8) + (int16_t)vgetq_lane_s8(v, 9) +
  720. (int16_t)vgetq_lane_s8(v, 10) + (int16_t)vgetq_lane_s8(v, 11) +
  721. (int16_t)vgetq_lane_s8(v, 12) + (int16_t)vgetq_lane_s8(v, 13) +
  722. (int16_t)vgetq_lane_s8(v, 14) + (int16_t)vgetq_lane_s8(v, 15);
  723. }
  724. inline static int32_t vaddvq_s16(int16x8_t v) {
  725. return
  726. (int32_t)vgetq_lane_s16(v, 0) + (int32_t)vgetq_lane_s16(v, 1) +
  727. (int32_t)vgetq_lane_s16(v, 2) + (int32_t)vgetq_lane_s16(v, 3) +
  728. (int32_t)vgetq_lane_s16(v, 4) + (int32_t)vgetq_lane_s16(v, 5) +
  729. (int32_t)vgetq_lane_s16(v, 6) + (int32_t)vgetq_lane_s16(v, 7);
  730. }
  731. inline static uint32_t vaddvq_u16(uint16x8_t v) {
  732. return
  733. (uint32_t)vgetq_lane_u16(v, 0) + (uint32_t)vgetq_lane_u16(v, 1) +
  734. (uint32_t)vgetq_lane_u16(v, 2) + (uint32_t)vgetq_lane_u16(v, 3) +
  735. (uint32_t)vgetq_lane_u16(v, 4) + (uint32_t)vgetq_lane_u16(v, 5) +
  736. (uint32_t)vgetq_lane_u16(v, 6) + (uint32_t)vgetq_lane_u16(v, 7);
  737. }
  738. inline static int32_t vaddvq_s32(int32x4_t v) {
  739. return vgetq_lane_s32(v, 0) + vgetq_lane_s32(v, 1) + vgetq_lane_s32(v, 2) + vgetq_lane_s32(v, 3);
  740. }
  741. inline static float vaddvq_f32(float32x4_t v) {
  742. return vgetq_lane_f32(v, 0) + vgetq_lane_f32(v, 1) + vgetq_lane_f32(v, 2) + vgetq_lane_f32(v, 3);
  743. }
  744. inline static float vminvq_f32(float32x4_t v) {
  745. return
  746. MIN(MIN(vgetq_lane_f32(v, 0), vgetq_lane_f32(v, 1)),
  747. MIN(vgetq_lane_f32(v, 2), vgetq_lane_f32(v, 3)));
  748. }
  749. inline static float vmaxvq_f32(float32x4_t v) {
  750. return
  751. MAX(MAX(vgetq_lane_f32(v, 0), vgetq_lane_f32(v, 1)),
  752. MAX(vgetq_lane_f32(v, 2), vgetq_lane_f32(v, 3)));
  753. }
  754. inline static int32x4_t vcvtnq_s32_f32(float32x4_t v) {
  755. int32x4_t res;
  756. res[0] = roundf(vgetq_lane_f32(v, 0));
  757. res[1] = roundf(vgetq_lane_f32(v, 1));
  758. res[2] = roundf(vgetq_lane_f32(v, 2));
  759. res[3] = roundf(vgetq_lane_f32(v, 3));
  760. return res;
  761. }
  762. #endif
  763. #endif
  764. #define QK4_0 32
  765. typedef struct {
  766. ggml_fp16_t d; // delta
  767. uint8_t qs[QK4_0 / 2]; // nibbles / quants
  768. } block_q4_0;
  769. static_assert(sizeof(block_q4_0) == sizeof(ggml_fp16_t) + QK4_0 / 2, "wrong q4_0 block size/padding");
  770. #define QK4_1 32
  771. typedef struct {
  772. ggml_fp16_t d; // delta
  773. ggml_fp16_t m; // min
  774. uint8_t qs[QK4_1 / 2]; // nibbles / quants
  775. } block_q4_1;
  776. static_assert(sizeof(block_q4_1) == 2 * sizeof(ggml_fp16_t) + QK4_1 / 2, "wrong q4_1 block size/padding");
  777. #define QK5_0 32
  778. typedef struct {
  779. ggml_fp16_t d; // delta
  780. uint8_t qh[4]; // 5-th bit of quants
  781. uint8_t qs[QK5_0 / 2]; // nibbles / quants
  782. } block_q5_0;
  783. static_assert(sizeof(block_q5_0) == sizeof(ggml_fp16_t) + sizeof(uint32_t) + QK5_0 / 2, "wrong q5_0 block size/padding");
  784. #define QK5_1 32
  785. typedef struct {
  786. ggml_fp16_t d; // delta
  787. ggml_fp16_t m; // min
  788. uint8_t qh[4]; // 5-th bit of quants
  789. uint8_t qs[QK5_1 / 2]; // nibbles / quants
  790. } block_q5_1;
  791. static_assert(sizeof(block_q5_1) == 2 * sizeof(ggml_fp16_t) + sizeof(uint32_t) + QK5_1 / 2, "wrong q5_1 block size/padding");
  792. #define QK8_0 32
  793. typedef struct {
  794. ggml_fp16_t d; // delta
  795. int8_t qs[QK8_0]; // quants
  796. } block_q8_0;
  797. static_assert(sizeof(block_q8_0) == sizeof(ggml_fp16_t) + QK8_0, "wrong q8_0 block size/padding");
  798. #define QK8_1 32
  799. typedef struct {
  800. float d; // delta
  801. float s; // d * sum(qs[i])
  802. int8_t qs[QK8_1]; // quants
  803. } block_q8_1;
  804. static_assert(sizeof(block_q8_1) == 2*sizeof(float) + QK8_1, "wrong q8_1 block size/padding");
  805. // reference implementation for deterministic creation of model files
  806. static void quantize_row_q4_0_reference(const float * restrict x, block_q4_0 * restrict y, int k) {
  807. static const int qk = QK4_0;
  808. assert(k % qk == 0);
  809. const int nb = k / qk;
  810. for (int i = 0; i < nb; i++) {
  811. float amax = 0.0f; // absolute max
  812. float max = 0.0f;
  813. for (int j = 0; j < qk; j++) {
  814. const float v = x[i*qk + j];
  815. if (amax < fabsf(v)) {
  816. amax = fabsf(v);
  817. max = v;
  818. }
  819. }
  820. const float d = max / -8;
  821. const float id = d ? 1.0f/d : 0.0f;
  822. y[i].d = GGML_FP32_TO_FP16(d);
  823. for (int j = 0; j < qk/2; ++j) {
  824. const float x0 = x[i*qk + 0 + j]*id;
  825. const float x1 = x[i*qk + qk/2 + j]*id;
  826. const uint8_t xi0 = MIN(15, (int8_t)(x0 + 8.5f));
  827. const uint8_t xi1 = MIN(15, (int8_t)(x1 + 8.5f));
  828. y[i].qs[j] = xi0;
  829. y[i].qs[j] |= xi1 << 4;
  830. }
  831. }
  832. }
  833. static void quantize_row_q4_0(const float * restrict x, void * restrict y, int k) {
  834. quantize_row_q4_0_reference(x, y, k);
  835. }
  836. static void quantize_row_q4_1_reference(const float * restrict x, block_q4_1 * restrict y, int k) {
  837. const int qk = QK4_1;
  838. assert(k % qk == 0);
  839. const int nb = k / qk;
  840. for (int i = 0; i < nb; i++) {
  841. float min = FLT_MAX;
  842. float max = -FLT_MAX;
  843. for (int j = 0; j < qk; j++) {
  844. const float v = x[i*qk + j];
  845. if (v < min) min = v;
  846. if (v > max) max = v;
  847. }
  848. const float d = (max - min) / ((1 << 4) - 1);
  849. const float id = d ? 1.0f/d : 0.0f;
  850. y[i].d = GGML_FP32_TO_FP16(d);
  851. y[i].m = GGML_FP32_TO_FP16(min);
  852. for (int j = 0; j < qk/2; ++j) {
  853. const float x0 = (x[i*qk + 0 + j] - min)*id;
  854. const float x1 = (x[i*qk + qk/2 + j] - min)*id;
  855. const uint8_t xi0 = MIN(15, (int8_t)(x0 + 0.5f));
  856. const uint8_t xi1 = MIN(15, (int8_t)(x1 + 0.5f));
  857. y[i].qs[j] = xi0;
  858. y[i].qs[j] |= xi1 << 4;
  859. }
  860. }
  861. }
  862. static void quantize_row_q4_1(const float * restrict x, void * restrict y, int k) {
  863. quantize_row_q4_1_reference(x, y, k);
  864. }
  865. static void quantize_row_q5_0_reference(const float * restrict x, block_q5_0 * restrict y, int k) {
  866. static const int qk = QK5_0;
  867. assert(k % qk == 0);
  868. const int nb = k / qk;
  869. for (int i = 0; i < nb; i++) {
  870. float amax = 0.0f; // absolute max
  871. float max = 0.0f;
  872. for (int j = 0; j < qk; j++) {
  873. const float v = x[i*qk + j];
  874. if (amax < fabsf(v)) {
  875. amax = fabsf(v);
  876. max = v;
  877. }
  878. }
  879. const float d = max / -16;
  880. const float id = d ? 1.0f/d : 0.0f;
  881. y[i].d = GGML_FP32_TO_FP16(d);
  882. uint32_t qh = 0;
  883. for (int j = 0; j < qk/2; ++j) {
  884. const float x0 = x[i*qk + 0 + j]*id;
  885. const float x1 = x[i*qk + qk/2 + j]*id;
  886. const uint8_t xi0 = MIN(31, (int8_t)(x0 + 16.5f));
  887. const uint8_t xi1 = MIN(31, (int8_t)(x1 + 16.5f));
  888. y[i].qs[j] = (xi0 & 0x0F) | ((xi1 & 0x0F) << 4);
  889. // get the 5-th bit and store it in qh at the right position
  890. qh |= ((xi0 & 0x10) >> 4) << (j + 0);
  891. qh |= ((xi1 & 0x10) >> 4) << (j + qk/2);
  892. }
  893. memcpy(&y[i].qh, &qh, sizeof(qh));
  894. }
  895. }
  896. static void quantize_row_q5_0(const float * restrict x, void * restrict y, int k) {
  897. quantize_row_q5_0_reference(x, y, k);
  898. }
  899. static void quantize_row_q5_1_reference(const float * restrict x, block_q5_1 * restrict y, int k) {
  900. const int qk = QK5_1;
  901. assert(k % qk == 0);
  902. const int nb = k / qk;
  903. for (int i = 0; i < nb; i++) {
  904. float min = FLT_MAX;
  905. float max = -FLT_MAX;
  906. for (int j = 0; j < qk; j++) {
  907. const float v = x[i*qk + j];
  908. if (v < min) min = v;
  909. if (v > max) max = v;
  910. }
  911. const float d = (max - min) / ((1 << 5) - 1);
  912. const float id = d ? 1.0f/d : 0.0f;
  913. y[i].d = GGML_FP32_TO_FP16(d);
  914. y[i].m = GGML_FP32_TO_FP16(min);
  915. uint32_t qh = 0;
  916. for (int j = 0; j < qk/2; ++j) {
  917. const float x0 = (x[i*qk + 0 + j] - min)*id;
  918. const float x1 = (x[i*qk + qk/2 + j] - min)*id;
  919. const uint8_t xi0 = (uint8_t)(x0 + 0.5f);
  920. const uint8_t xi1 = (uint8_t)(x1 + 0.5f);
  921. y[i].qs[j] = (xi0 & 0x0F) | ((xi1 & 0x0F) << 4);
  922. // get the 5-th bit and store it in qh at the right position
  923. qh |= ((xi0 & 0x10) >> 4) << (j + 0);
  924. qh |= ((xi1 & 0x10) >> 4) << (j + qk/2);
  925. }
  926. memcpy(&y[i].qh, &qh, sizeof(y[i].qh));
  927. }
  928. }
  929. static void quantize_row_q5_1(const float * restrict x, void * restrict y, int k) {
  930. quantize_row_q5_1_reference(x, y, k);
  931. }
  932. // reference implementation for deterministic creation of model files
  933. static void quantize_row_q8_0_reference(const float * restrict x, block_q8_0 * restrict y, int k) {
  934. assert(k % QK8_0 == 0);
  935. const int nb = k / QK8_0;
  936. for (int i = 0; i < nb; i++) {
  937. float amax = 0.0f; // absolute max
  938. for (int j = 0; j < QK8_0; j++) {
  939. const float v = x[i*QK8_0 + j];
  940. amax = MAX(amax, fabsf(v));
  941. }
  942. const float d = amax / ((1 << 7) - 1);
  943. const float id = d ? 1.0f/d : 0.0f;
  944. y[i].d = GGML_FP32_TO_FP16(d);
  945. for (int j = 0; j < QK8_0; ++j) {
  946. const float x0 = x[i*QK8_0 + j]*id;
  947. y[i].qs[j] = roundf(x0);
  948. }
  949. }
  950. }
  951. static void quantize_row_q8_0(const float * restrict x, void * restrict vy, int k) {
  952. assert(QK8_0 == 32);
  953. assert(k % QK8_0 == 0);
  954. const int nb = k / QK8_0;
  955. block_q8_0 * restrict y = vy;
  956. #if defined(__ARM_NEON)
  957. for (int i = 0; i < nb; i++) {
  958. float32x4_t srcv [8];
  959. float32x4_t asrcv[8];
  960. float32x4_t amaxv[8];
  961. for (int j = 0; j < 8; j++) srcv[j] = vld1q_f32(x + i*32 + 4*j);
  962. for (int j = 0; j < 8; j++) asrcv[j] = vabsq_f32(srcv[j]);
  963. for (int j = 0; j < 4; j++) amaxv[2*j] = vmaxq_f32(asrcv[2*j], asrcv[2*j+1]);
  964. for (int j = 0; j < 2; j++) amaxv[4*j] = vmaxq_f32(amaxv[4*j], amaxv[4*j+2]);
  965. for (int j = 0; j < 1; j++) amaxv[8*j] = vmaxq_f32(amaxv[8*j], amaxv[8*j+4]);
  966. const float amax = vmaxvq_f32(amaxv[0]);
  967. const float d = amax / ((1 << 7) - 1);
  968. const float id = d ? 1.0f/d : 0.0f;
  969. y[i].d = GGML_FP32_TO_FP16(d);
  970. for (int j = 0; j < 8; j++) {
  971. const float32x4_t v = vmulq_n_f32(srcv[j], id);
  972. const int32x4_t vi = vcvtnq_s32_f32(v);
  973. y[i].qs[4*j + 0] = vgetq_lane_s32(vi, 0);
  974. y[i].qs[4*j + 1] = vgetq_lane_s32(vi, 1);
  975. y[i].qs[4*j + 2] = vgetq_lane_s32(vi, 2);
  976. y[i].qs[4*j + 3] = vgetq_lane_s32(vi, 3);
  977. }
  978. }
  979. #elif defined(__wasm_simd128__)
  980. for (int i = 0; i < nb; i++) {
  981. v128_t srcv [8];
  982. v128_t asrcv[8];
  983. v128_t amaxv[8];
  984. for (int j = 0; j < 8; j++) srcv[j] = wasm_v128_load(x + i*32 + 4*j);
  985. for (int j = 0; j < 8; j++) asrcv[j] = wasm_f32x4_abs(srcv[j]);
  986. for (int j = 0; j < 4; j++) amaxv[2*j] = wasm_f32x4_max(asrcv[2*j], asrcv[2*j+1]);
  987. for (int j = 0; j < 2; j++) amaxv[4*j] = wasm_f32x4_max(amaxv[4*j], amaxv[4*j+2]);
  988. for (int j = 0; j < 1; j++) amaxv[8*j] = wasm_f32x4_max(amaxv[8*j], amaxv[8*j+4]);
  989. const float amax = MAX(MAX(wasm_f32x4_extract_lane(amaxv[0], 0),
  990. wasm_f32x4_extract_lane(amaxv[0], 1)),
  991. MAX(wasm_f32x4_extract_lane(amaxv[0], 2),
  992. wasm_f32x4_extract_lane(amaxv[0], 3)));
  993. const float d = amax / ((1 << 7) - 1);
  994. const float id = d ? 1.0f/d : 0.0f;
  995. y[i].d = GGML_FP32_TO_FP16(d);
  996. for (int j = 0; j < 8; j++) {
  997. const v128_t v = wasm_f32x4_mul(srcv[j], wasm_f32x4_splat(id));
  998. const v128_t vi = wasm_i32x4_trunc_sat_f32x4(v);
  999. y[i].qs[4*j + 0] = wasm_i32x4_extract_lane(vi, 0);
  1000. y[i].qs[4*j + 1] = wasm_i32x4_extract_lane(vi, 1);
  1001. y[i].qs[4*j + 2] = wasm_i32x4_extract_lane(vi, 2);
  1002. y[i].qs[4*j + 3] = wasm_i32x4_extract_lane(vi, 3);
  1003. }
  1004. }
  1005. #elif defined(__AVX2__) || defined(__AVX__)
  1006. for (int i = 0; i < nb; i++) {
  1007. // Load elements into 4 AVX vectors
  1008. __m256 v0 = _mm256_loadu_ps( x );
  1009. __m256 v1 = _mm256_loadu_ps( x + 8 );
  1010. __m256 v2 = _mm256_loadu_ps( x + 16 );
  1011. __m256 v3 = _mm256_loadu_ps( x + 24 );
  1012. x += 32;
  1013. // Compute max(abs(e)) for the block
  1014. const __m256 signBit = _mm256_set1_ps( -0.0f );
  1015. __m256 maxAbs = _mm256_andnot_ps( signBit, v0 );
  1016. maxAbs = _mm256_max_ps( maxAbs, _mm256_andnot_ps( signBit, v1 ) );
  1017. maxAbs = _mm256_max_ps( maxAbs, _mm256_andnot_ps( signBit, v2 ) );
  1018. maxAbs = _mm256_max_ps( maxAbs, _mm256_andnot_ps( signBit, v3 ) );
  1019. __m128 max4 = _mm_max_ps( _mm256_extractf128_ps( maxAbs, 1 ), _mm256_castps256_ps128( maxAbs ) );
  1020. max4 = _mm_max_ps( max4, _mm_movehl_ps( max4, max4 ) );
  1021. max4 = _mm_max_ss( max4, _mm_movehdup_ps( max4 ) );
  1022. const float maxScalar = _mm_cvtss_f32( max4 );
  1023. // Quantize these floats
  1024. const float d = maxScalar / 127.f;
  1025. y[i].d = GGML_FP32_TO_FP16(d);
  1026. const float id = ( maxScalar != 0.0f ) ? 127.f / maxScalar : 0.0f;
  1027. const __m256 mul = _mm256_set1_ps( id );
  1028. // Apply the multiplier
  1029. v0 = _mm256_mul_ps( v0, mul );
  1030. v1 = _mm256_mul_ps( v1, mul );
  1031. v2 = _mm256_mul_ps( v2, mul );
  1032. v3 = _mm256_mul_ps( v3, mul );
  1033. // Round to nearest integer
  1034. v0 = _mm256_round_ps( v0, _MM_ROUND_NEAREST );
  1035. v1 = _mm256_round_ps( v1, _MM_ROUND_NEAREST );
  1036. v2 = _mm256_round_ps( v2, _MM_ROUND_NEAREST );
  1037. v3 = _mm256_round_ps( v3, _MM_ROUND_NEAREST );
  1038. // Convert floats to integers
  1039. __m256i i0 = _mm256_cvtps_epi32( v0 );
  1040. __m256i i1 = _mm256_cvtps_epi32( v1 );
  1041. __m256i i2 = _mm256_cvtps_epi32( v2 );
  1042. __m256i i3 = _mm256_cvtps_epi32( v3 );
  1043. #if defined(__AVX2__)
  1044. // Convert int32 to int16
  1045. i0 = _mm256_packs_epi32( i0, i1 ); // 0, 1, 2, 3, 8, 9, 10, 11, 4, 5, 6, 7, 12, 13, 14, 15
  1046. i2 = _mm256_packs_epi32( i2, i3 ); // 16, 17, 18, 19, 24, 25, 26, 27, 20, 21, 22, 23, 28, 29, 30, 31
  1047. // Convert int16 to int8
  1048. i0 = _mm256_packs_epi16( i0, i2 ); // 0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27, 4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31
  1049. // We got our precious signed bytes, but the order is now wrong
  1050. // These AVX2 pack instructions process 16-byte pieces independently
  1051. // The following instruction is fixing the order
  1052. const __m256i perm = _mm256_setr_epi32( 0, 4, 1, 5, 2, 6, 3, 7 );
  1053. i0 = _mm256_permutevar8x32_epi32( i0, perm );
  1054. _mm256_storeu_si256((__m256i *)y[i].qs, i0);
  1055. #else
  1056. // Since we don't have in AVX some necessary functions,
  1057. // we split the registers in half and call AVX2 analogs from SSE
  1058. __m128i ni0 = _mm256_castsi256_si128( i0 );
  1059. __m128i ni1 = _mm256_extractf128_si256( i0, 1);
  1060. __m128i ni2 = _mm256_castsi256_si128( i1 );
  1061. __m128i ni3 = _mm256_extractf128_si256( i1, 1);
  1062. __m128i ni4 = _mm256_castsi256_si128( i2 );
  1063. __m128i ni5 = _mm256_extractf128_si256( i2, 1);
  1064. __m128i ni6 = _mm256_castsi256_si128( i3 );
  1065. __m128i ni7 = _mm256_extractf128_si256( i3, 1);
  1066. // Convert int32 to int16
  1067. ni0 = _mm_packs_epi32( ni0, ni1 );
  1068. ni2 = _mm_packs_epi32( ni2, ni3 );
  1069. ni4 = _mm_packs_epi32( ni4, ni5 );
  1070. ni6 = _mm_packs_epi32( ni6, ni7 );
  1071. // Convert int16 to int8
  1072. ni0 = _mm_packs_epi16( ni0, ni2 );
  1073. ni4 = _mm_packs_epi16( ni4, ni6 );
  1074. _mm_storeu_si128((__m128i *)(y[i].qs + 0), ni0);
  1075. _mm_storeu_si128((__m128i *)(y[i].qs + 16), ni4);
  1076. #endif
  1077. }
  1078. #else
  1079. // scalar
  1080. quantize_row_q8_0_reference(x, y, k);
  1081. #endif
  1082. }
  1083. // reference implementation for deterministic creation of model files
  1084. static void quantize_row_q8_1_reference(const float * restrict x, block_q8_1 * restrict y, int k) {
  1085. assert(QK8_1 == 32);
  1086. assert(k % QK8_1 == 0);
  1087. const int nb = k / QK8_1;
  1088. for (int i = 0; i < nb; i++) {
  1089. float amax = 0.0f; // absolute max
  1090. for (int j = 0; j < QK8_1; j++) {
  1091. const float v = x[i*QK8_1 + j];
  1092. amax = MAX(amax, fabsf(v));
  1093. }
  1094. const float d = amax / ((1 << 7) - 1);
  1095. const float id = d ? 1.0f/d : 0.0f;
  1096. y[i].d = d;
  1097. int sum = 0;
  1098. for (int j = 0; j < QK8_1/2; ++j) {
  1099. const float v0 = x[i*QK8_1 + j]*id;
  1100. const float v1 = x[i*QK8_1 + QK8_1/2 + j]*id;
  1101. y[i].qs[ j] = roundf(v0);
  1102. y[i].qs[QK8_1/2 + j] = roundf(v1);
  1103. sum += y[i].qs[ j];
  1104. sum += y[i].qs[QK8_1/2 + j];
  1105. }
  1106. y[i].s = sum*d;
  1107. }
  1108. }
  1109. static void quantize_row_q8_1(const float * restrict x, void * restrict vy, int k) {
  1110. assert(k % QK8_1 == 0);
  1111. const int nb = k / QK8_1;
  1112. block_q8_1 * restrict y = vy;
  1113. #if defined(__ARM_NEON)
  1114. for (int i = 0; i < nb; i++) {
  1115. float32x4_t srcv [8];
  1116. float32x4_t asrcv[8];
  1117. float32x4_t amaxv[8];
  1118. for (int j = 0; j < 8; j++) srcv[j] = vld1q_f32(x + i*32 + 4*j);
  1119. for (int j = 0; j < 8; j++) asrcv[j] = vabsq_f32(srcv[j]);
  1120. for (int j = 0; j < 4; j++) amaxv[2*j] = vmaxq_f32(asrcv[2*j], asrcv[2*j+1]);
  1121. for (int j = 0; j < 2; j++) amaxv[4*j] = vmaxq_f32(amaxv[4*j], amaxv[4*j+2]);
  1122. for (int j = 0; j < 1; j++) amaxv[8*j] = vmaxq_f32(amaxv[8*j], amaxv[8*j+4]);
  1123. const float amax = vmaxvq_f32(amaxv[0]);
  1124. const float d = amax / ((1 << 7) - 1);
  1125. const float id = d ? 1.0f/d : 0.0f;
  1126. y[i].d = d;
  1127. int32x4_t accv = vdupq_n_s32(0);
  1128. for (int j = 0; j < 8; j++) {
  1129. const float32x4_t v = vmulq_n_f32(srcv[j], id);
  1130. const int32x4_t vi = vcvtnq_s32_f32(v);
  1131. y[i].qs[4*j + 0] = vgetq_lane_s32(vi, 0);
  1132. y[i].qs[4*j + 1] = vgetq_lane_s32(vi, 1);
  1133. y[i].qs[4*j + 2] = vgetq_lane_s32(vi, 2);
  1134. y[i].qs[4*j + 3] = vgetq_lane_s32(vi, 3);
  1135. accv = vaddq_s32(accv, vi);
  1136. }
  1137. y[i].s = d * vaddvq_s32(accv);
  1138. }
  1139. #elif defined(__wasm_simd128__)
  1140. for (int i = 0; i < nb; i++) {
  1141. v128_t srcv [8];
  1142. v128_t asrcv[8];
  1143. v128_t amaxv[8];
  1144. for (int j = 0; j < 8; j++) srcv[j] = wasm_v128_load(x + i*32 + 4*j);
  1145. for (int j = 0; j < 8; j++) asrcv[j] = wasm_f32x4_abs(srcv[j]);
  1146. for (int j = 0; j < 4; j++) amaxv[2*j] = wasm_f32x4_max(asrcv[2*j], asrcv[2*j+1]);
  1147. for (int j = 0; j < 2; j++) amaxv[4*j] = wasm_f32x4_max(amaxv[4*j], amaxv[4*j+2]);
  1148. for (int j = 0; j < 1; j++) amaxv[8*j] = wasm_f32x4_max(amaxv[8*j], amaxv[8*j+4]);
  1149. const float amax = MAX(MAX(wasm_f32x4_extract_lane(amaxv[0], 0),
  1150. wasm_f32x4_extract_lane(amaxv[0], 1)),
  1151. MAX(wasm_f32x4_extract_lane(amaxv[0], 2),
  1152. wasm_f32x4_extract_lane(amaxv[0], 3)));
  1153. const float d = amax / ((1 << 7) - 1);
  1154. const float id = d ? 1.0f/d : 0.0f;
  1155. y[i].d = d;
  1156. v128_t accv = wasm_i32x4_splat(0);
  1157. for (int j = 0; j < 8; j++) {
  1158. const v128_t v = wasm_f32x4_mul(srcv[j], wasm_f32x4_splat(id));
  1159. const v128_t vi = wasm_i32x4_trunc_sat_f32x4(v);
  1160. y[i].qs[4*j + 0] = wasm_i32x4_extract_lane(vi, 0);
  1161. y[i].qs[4*j + 1] = wasm_i32x4_extract_lane(vi, 1);
  1162. y[i].qs[4*j + 2] = wasm_i32x4_extract_lane(vi, 2);
  1163. y[i].qs[4*j + 3] = wasm_i32x4_extract_lane(vi, 3);
  1164. accv = wasm_i32x4_add(accv, vi);
  1165. }
  1166. y[i].s = d * (wasm_i32x4_extract_lane(accv, 0) +
  1167. wasm_i32x4_extract_lane(accv, 1) +
  1168. wasm_i32x4_extract_lane(accv, 2) +
  1169. wasm_i32x4_extract_lane(accv, 3));
  1170. }
  1171. #elif defined(__AVX2__) || defined(__AVX__)
  1172. for (int i = 0; i < nb; i++) {
  1173. // Load elements into 4 AVX vectors
  1174. __m256 v0 = _mm256_loadu_ps( x );
  1175. __m256 v1 = _mm256_loadu_ps( x + 8 );
  1176. __m256 v2 = _mm256_loadu_ps( x + 16 );
  1177. __m256 v3 = _mm256_loadu_ps( x + 24 );
  1178. x += 32;
  1179. // Compute max(abs(e)) for the block
  1180. const __m256 signBit = _mm256_set1_ps( -0.0f );
  1181. __m256 maxAbs = _mm256_andnot_ps( signBit, v0 );
  1182. maxAbs = _mm256_max_ps( maxAbs, _mm256_andnot_ps( signBit, v1 ) );
  1183. maxAbs = _mm256_max_ps( maxAbs, _mm256_andnot_ps( signBit, v2 ) );
  1184. maxAbs = _mm256_max_ps( maxAbs, _mm256_andnot_ps( signBit, v3 ) );
  1185. __m128 max4 = _mm_max_ps( _mm256_extractf128_ps( maxAbs, 1 ), _mm256_castps256_ps128( maxAbs ) );
  1186. max4 = _mm_max_ps( max4, _mm_movehl_ps( max4, max4 ) );
  1187. max4 = _mm_max_ss( max4, _mm_movehdup_ps( max4 ) );
  1188. const float maxScalar = _mm_cvtss_f32( max4 );
  1189. // Quantize these floats
  1190. const float d = maxScalar / 127.f;
  1191. y[i].d = d;
  1192. const float id = ( maxScalar != 0.0f ) ? 127.f / maxScalar : 0.0f;
  1193. const __m256 mul = _mm256_set1_ps( id );
  1194. // Apply the multiplier
  1195. v0 = _mm256_mul_ps( v0, mul );
  1196. v1 = _mm256_mul_ps( v1, mul );
  1197. v2 = _mm256_mul_ps( v2, mul );
  1198. v3 = _mm256_mul_ps( v3, mul );
  1199. // Round to nearest integer
  1200. v0 = _mm256_round_ps( v0, _MM_ROUND_NEAREST );
  1201. v1 = _mm256_round_ps( v1, _MM_ROUND_NEAREST );
  1202. v2 = _mm256_round_ps( v2, _MM_ROUND_NEAREST );
  1203. v3 = _mm256_round_ps( v3, _MM_ROUND_NEAREST );
  1204. // Convert floats to integers
  1205. __m256i i0 = _mm256_cvtps_epi32( v0 );
  1206. __m256i i1 = _mm256_cvtps_epi32( v1 );
  1207. __m256i i2 = _mm256_cvtps_epi32( v2 );
  1208. __m256i i3 = _mm256_cvtps_epi32( v3 );
  1209. #if defined(__AVX2__)
  1210. // Compute the sum of the quants and set y[i].s
  1211. y[i].s = d * hsum_i32_8(_mm256_add_epi32(_mm256_add_epi32(i0, i1), _mm256_add_epi32(i2, i3)));
  1212. // Convert int32 to int16
  1213. i0 = _mm256_packs_epi32( i0, i1 ); // 0, 1, 2, 3, 8, 9, 10, 11, 4, 5, 6, 7, 12, 13, 14, 15
  1214. i2 = _mm256_packs_epi32( i2, i3 ); // 16, 17, 18, 19, 24, 25, 26, 27, 20, 21, 22, 23, 28, 29, 30, 31
  1215. // Convert int16 to int8
  1216. i0 = _mm256_packs_epi16( i0, i2 ); // 0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27, 4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31
  1217. // We got our precious signed bytes, but the order is now wrong
  1218. // These AVX2 pack instructions process 16-byte pieces independently
  1219. // The following instruction is fixing the order
  1220. const __m256i perm = _mm256_setr_epi32( 0, 4, 1, 5, 2, 6, 3, 7 );
  1221. i0 = _mm256_permutevar8x32_epi32( i0, perm );
  1222. _mm256_storeu_si256((__m256i *)y[i].qs, i0);
  1223. #else
  1224. // Since we don't have in AVX some necessary functions,
  1225. // we split the registers in half and call AVX2 analogs from SSE
  1226. __m128i ni0 = _mm256_castsi256_si128( i0 );
  1227. __m128i ni1 = _mm256_extractf128_si256( i0, 1);
  1228. __m128i ni2 = _mm256_castsi256_si128( i1 );
  1229. __m128i ni3 = _mm256_extractf128_si256( i1, 1);
  1230. __m128i ni4 = _mm256_castsi256_si128( i2 );
  1231. __m128i ni5 = _mm256_extractf128_si256( i2, 1);
  1232. __m128i ni6 = _mm256_castsi256_si128( i3 );
  1233. __m128i ni7 = _mm256_extractf128_si256( i3, 1);
  1234. // Compute the sum of the quants and set y[i].s
  1235. const __m128i s0 = _mm_add_epi32(_mm_add_epi32(ni0, ni1), _mm_add_epi32(ni2, ni3));
  1236. const __m128i s1 = _mm_add_epi32(_mm_add_epi32(ni4, ni5), _mm_add_epi32(ni6, ni7));
  1237. y[i].s = d * hsum_i32_4(_mm_add_epi32(s0, s1));
  1238. // Convert int32 to int16
  1239. ni0 = _mm_packs_epi32( ni0, ni1 );
  1240. ni2 = _mm_packs_epi32( ni2, ni3 );
  1241. ni4 = _mm_packs_epi32( ni4, ni5 );
  1242. ni6 = _mm_packs_epi32( ni6, ni7 );
  1243. // Convert int16 to int8
  1244. ni0 = _mm_packs_epi16( ni0, ni2 );
  1245. ni4 = _mm_packs_epi16( ni4, ni6 );
  1246. _mm_storeu_si128((__m128i *)(y[i].qs + 0), ni0);
  1247. _mm_storeu_si128((__m128i *)(y[i].qs + 16), ni4);
  1248. #endif
  1249. }
  1250. #else
  1251. // scalar
  1252. quantize_row_q8_1_reference(x, y, k);
  1253. #endif
  1254. }
  1255. static void dequantize_row_q4_0(const block_q4_0 * restrict x, float * restrict y, int k) {
  1256. static const int qk = QK4_0;
  1257. assert(k % qk == 0);
  1258. const int nb = k / qk;
  1259. for (int i = 0; i < nb; i++) {
  1260. const float d = GGML_FP16_TO_FP32(x[i].d);
  1261. for (int j = 0; j < qk/2; ++j) {
  1262. const int x0 = (x[i].qs[j] & 0x0F) - 8;
  1263. const int x1 = (x[i].qs[j] >> 4) - 8;
  1264. y[i*qk + j + 0 ] = x0*d;
  1265. y[i*qk + j + qk/2] = x1*d;
  1266. }
  1267. }
  1268. }
  1269. static void dequantize_row_q4_1(const block_q4_1 * restrict x, float * restrict y, int k) {
  1270. static const int qk = QK4_1;
  1271. assert(k % qk == 0);
  1272. const int nb = k / qk;
  1273. for (int i = 0; i < nb; i++) {
  1274. const float d = GGML_FP16_TO_FP32(x[i].d);
  1275. const float m = GGML_FP16_TO_FP32(x[i].m);
  1276. for (int j = 0; j < qk/2; ++j) {
  1277. const int x0 = (x[i].qs[j] & 0x0F);
  1278. const int x1 = (x[i].qs[j] >> 4);
  1279. y[i*qk + j + 0 ] = x0*d + m;
  1280. y[i*qk + j + qk/2] = x1*d + m;
  1281. }
  1282. }
  1283. }
  1284. static void dequantize_row_q5_0(const block_q5_0 * restrict x, float * restrict y, int k) {
  1285. static const int qk = QK5_0;
  1286. assert(k % qk == 0);
  1287. const int nb = k / qk;
  1288. for (int i = 0; i < nb; i++) {
  1289. const float d = GGML_FP16_TO_FP32(x[i].d);
  1290. uint32_t qh;
  1291. memcpy(&qh, x[i].qh, sizeof(qh));
  1292. for (int j = 0; j < qk/2; ++j) {
  1293. const uint8_t xh_0 = ((qh >> (j + 0)) << 4) & 0x10;
  1294. const uint8_t xh_1 = ((qh >> (j + 12)) ) & 0x10;
  1295. const int32_t x0 = ((x[i].qs[j] & 0x0F) | xh_0) - 16;
  1296. const int32_t x1 = ((x[i].qs[j] >> 4) | xh_1) - 16;
  1297. y[i*qk + j + 0 ] = x0*d;
  1298. y[i*qk + j + qk/2] = x1*d;
  1299. }
  1300. }
  1301. }
  1302. static void dequantize_row_q5_1(const block_q5_1 * restrict x, float * restrict y, int k) {
  1303. static const int qk = QK5_1;
  1304. assert(k % qk == 0);
  1305. const int nb = k / qk;
  1306. for (int i = 0; i < nb; i++) {
  1307. const float d = GGML_FP16_TO_FP32(x[i].d);
  1308. const float m = GGML_FP16_TO_FP32(x[i].m);
  1309. uint32_t qh;
  1310. memcpy(&qh, x[i].qh, sizeof(qh));
  1311. for (int j = 0; j < qk/2; ++j) {
  1312. const uint8_t xh_0 = ((qh >> (j + 0)) << 4) & 0x10;
  1313. const uint8_t xh_1 = ((qh >> (j + 12)) ) & 0x10;
  1314. const int x0 = (x[i].qs[j] & 0x0F) | xh_0;
  1315. const int x1 = (x[i].qs[j] >> 4) | xh_1;
  1316. y[i*qk + j + 0 ] = x0*d + m;
  1317. y[i*qk + j + qk/2] = x1*d + m;
  1318. }
  1319. }
  1320. }
  1321. static void dequantize_row_q8_0(const void * restrict vx, float * restrict y, int k) {
  1322. static const int qk = QK8_0;
  1323. assert(k % qk == 0);
  1324. const int nb = k / qk;
  1325. const block_q8_0 * restrict x = vx;
  1326. for (int i = 0; i < nb; i++) {
  1327. const float d = GGML_FP16_TO_FP32(x[i].d);
  1328. for (int j = 0; j < qk; ++j) {
  1329. y[i*qk + j] = x[i].qs[j]*d;
  1330. }
  1331. }
  1332. }
  1333. static void ggml_vec_dot_f32(const int n, float * restrict s, const float * restrict x, const float * restrict y);
  1334. static void ggml_vec_dot_f16(const int n, float * restrict s, ggml_fp16_t * restrict x, ggml_fp16_t * restrict y);
  1335. static void ggml_vec_dot_q4_0_q8_0(const int n, float * restrict s, const void * restrict vx, const void * restrict vy);
  1336. static void ggml_vec_dot_q4_1_q8_1(const int n, float * restrict s, const void * restrict vx, const void * restrict vy);
  1337. static void ggml_vec_dot_q5_0_q8_0(const int n, float * restrict s, const void * restrict vx, const void * restrict vy);
  1338. static void ggml_vec_dot_q5_1_q8_1(const int n, float * restrict s, const void * restrict vx, const void * restrict vy);
  1339. static void ggml_vec_dot_q8_0_q8_0(const int n, float * restrict s, const void * restrict vx, const void * restrict vy);
  1340. static const ggml_type_traits_t type_traits[GGML_TYPE_COUNT] = {
  1341. [GGML_TYPE_F32] = {
  1342. .vec_dot = (ggml_vec_dot_t) ggml_vec_dot_f32,
  1343. .vec_dot_type = GGML_TYPE_F32,
  1344. },
  1345. [GGML_TYPE_F16] = {
  1346. .to_float = (ggml_to_float_t) ggml_fp16_to_fp32_row,
  1347. .from_float = (ggml_from_float_t) ggml_fp32_to_fp16_row,
  1348. .from_float_reference = (ggml_from_float_t) ggml_fp32_to_fp16_row,
  1349. .vec_dot = (ggml_vec_dot_t) ggml_vec_dot_f16,
  1350. .vec_dot_type = GGML_TYPE_F16,
  1351. },
  1352. [GGML_TYPE_Q4_0] = {
  1353. .to_float = (ggml_to_float_t) dequantize_row_q4_0,
  1354. .from_float = quantize_row_q4_0,
  1355. .from_float_reference = (ggml_from_float_t) quantize_row_q4_0_reference,
  1356. .vec_dot = ggml_vec_dot_q4_0_q8_0,
  1357. .vec_dot_type = GGML_TYPE_Q8_0,
  1358. },
  1359. [GGML_TYPE_Q4_1] = {
  1360. .to_float = (ggml_to_float_t) dequantize_row_q4_1,
  1361. .from_float = quantize_row_q4_1,
  1362. .from_float_reference = (ggml_from_float_t) quantize_row_q4_1_reference,
  1363. .vec_dot = ggml_vec_dot_q4_1_q8_1,
  1364. .vec_dot_type = GGML_TYPE_Q8_1,
  1365. },
  1366. [GGML_TYPE_Q5_0] = {
  1367. .to_float = (ggml_to_float_t) dequantize_row_q5_0,
  1368. .from_float = quantize_row_q5_0,
  1369. .from_float_reference = (ggml_from_float_t) quantize_row_q5_0_reference,
  1370. .vec_dot = ggml_vec_dot_q5_0_q8_0,
  1371. .vec_dot_type = GGML_TYPE_Q8_0,
  1372. },
  1373. [GGML_TYPE_Q5_1] = {
  1374. .to_float = (ggml_to_float_t) dequantize_row_q5_1,
  1375. .from_float = quantize_row_q5_1,
  1376. .from_float_reference = (ggml_from_float_t) quantize_row_q5_1_reference,
  1377. .vec_dot = ggml_vec_dot_q5_1_q8_1,
  1378. .vec_dot_type = GGML_TYPE_Q8_1,
  1379. },
  1380. [GGML_TYPE_Q8_0] = {
  1381. .to_float = dequantize_row_q8_0,
  1382. .from_float = quantize_row_q8_0,
  1383. .from_float_reference = (ggml_from_float_t) quantize_row_q8_0_reference,
  1384. .vec_dot = ggml_vec_dot_q8_0_q8_0,
  1385. .vec_dot_type = GGML_TYPE_Q8_0,
  1386. },
  1387. [GGML_TYPE_Q8_1] = {
  1388. .from_float = quantize_row_q8_1,
  1389. .from_float_reference = (ggml_from_float_t) quantize_row_q8_1_reference,
  1390. .vec_dot_type = GGML_TYPE_Q8_1,
  1391. },
  1392. #ifdef GGML_USE_K_QUANTS
  1393. [GGML_TYPE_Q2_K] = {
  1394. .to_float = (ggml_to_float_t) dequantize_row_q2_K,
  1395. .from_float = quantize_row_q2_K,
  1396. .from_float_reference = (ggml_from_float_t) quantize_row_q2_K_reference,
  1397. .vec_dot = ggml_vec_dot_q2_K_q8_K,
  1398. .vec_dot_type = GGML_TYPE_Q8_K,
  1399. },
  1400. [GGML_TYPE_Q3_K] = {
  1401. .to_float = (ggml_to_float_t) dequantize_row_q3_K,
  1402. .from_float = quantize_row_q3_K,
  1403. .from_float_reference = (ggml_from_float_t) quantize_row_q3_K_reference,
  1404. .vec_dot = ggml_vec_dot_q3_K_q8_K,
  1405. .vec_dot_type = GGML_TYPE_Q8_K,
  1406. },
  1407. [GGML_TYPE_Q4_K] = {
  1408. .to_float = (ggml_to_float_t) dequantize_row_q4_K,
  1409. .from_float = quantize_row_q4_K,
  1410. .from_float_reference = (ggml_from_float_t) quantize_row_q4_K_reference,
  1411. .vec_dot = ggml_vec_dot_q4_K_q8_K,
  1412. .vec_dot_type = GGML_TYPE_Q8_K,
  1413. },
  1414. [GGML_TYPE_Q5_K] = {
  1415. .to_float = (ggml_to_float_t) dequantize_row_q5_K,
  1416. .from_float = quantize_row_q5_K,
  1417. .from_float_reference = (ggml_from_float_t) quantize_row_q5_K_reference,
  1418. .vec_dot = ggml_vec_dot_q5_K_q8_K,
  1419. .vec_dot_type = GGML_TYPE_Q8_K,
  1420. },
  1421. [GGML_TYPE_Q6_K] = {
  1422. .to_float = (ggml_to_float_t) dequantize_row_q6_K,
  1423. .from_float = quantize_row_q6_K,
  1424. .from_float_reference = (ggml_from_float_t) quantize_row_q6_K_reference,
  1425. .vec_dot = ggml_vec_dot_q6_K_q8_K,
  1426. .vec_dot_type = GGML_TYPE_Q8_K,
  1427. },
  1428. [GGML_TYPE_Q8_K] = {
  1429. .from_float = quantize_row_q8_K,
  1430. }
  1431. #endif
  1432. };
  1433. // For internal test use
  1434. ggml_type_traits_t ggml_internal_get_type_traits(enum ggml_type i) {
  1435. GGML_ASSERT(i < GGML_TYPE_COUNT);
  1436. return type_traits[i];
  1437. }
  1438. //
  1439. // simd mappings
  1440. //
  1441. // we define a common set of C macros which map to specific intrinsics based on the current architecture
  1442. // we then implement the fundamental computation operations below using only these macros
  1443. // adding support for new architectures requires to define the corresponding SIMD macros
  1444. //
  1445. // GGML_F32_STEP / GGML_F16_STEP
  1446. // number of elements to process in a single step
  1447. //
  1448. // GGML_F32_EPR / GGML_F16_EPR
  1449. // number of elements to fit in a single register
  1450. //
  1451. #if defined(__ARM_NEON) && defined(__ARM_FEATURE_FMA)
  1452. #define GGML_SIMD
  1453. // F32 NEON
  1454. #define GGML_F32_STEP 16
  1455. #define GGML_F32_EPR 4
  1456. #define GGML_F32x4 float32x4_t
  1457. #define GGML_F32x4_ZERO vdupq_n_f32(0.0f)
  1458. #define GGML_F32x4_SET1(x) vdupq_n_f32(x)
  1459. #define GGML_F32x4_LOAD vld1q_f32
  1460. #define GGML_F32x4_STORE vst1q_f32
  1461. #define GGML_F32x4_FMA(a, b, c) vfmaq_f32(a, b, c)
  1462. #define GGML_F32x4_ADD vaddq_f32
  1463. #define GGML_F32x4_MUL vmulq_f32
  1464. #define GGML_F32x4_REDUCE_ONE(x) vaddvq_f32(x)
  1465. #define GGML_F32x4_REDUCE(res, x) \
  1466. { \
  1467. int offset = GGML_F32_ARR >> 1; \
  1468. for (int i = 0; i < offset; ++i) { \
  1469. x[i] = vaddq_f32(x[i], x[offset+i]); \
  1470. } \
  1471. offset >>= 1; \
  1472. for (int i = 0; i < offset; ++i) { \
  1473. x[i] = vaddq_f32(x[i], x[offset+i]); \
  1474. } \
  1475. offset >>= 1; \
  1476. for (int i = 0; i < offset; ++i) { \
  1477. x[i] = vaddq_f32(x[i], x[offset+i]); \
  1478. } \
  1479. res = GGML_F32x4_REDUCE_ONE(x[0]); \
  1480. }
  1481. #define GGML_F32_VEC GGML_F32x4
  1482. #define GGML_F32_VEC_ZERO GGML_F32x4_ZERO
  1483. #define GGML_F32_VEC_SET1 GGML_F32x4_SET1
  1484. #define GGML_F32_VEC_LOAD GGML_F32x4_LOAD
  1485. #define GGML_F32_VEC_STORE GGML_F32x4_STORE
  1486. #define GGML_F32_VEC_FMA GGML_F32x4_FMA
  1487. #define GGML_F32_VEC_ADD GGML_F32x4_ADD
  1488. #define GGML_F32_VEC_MUL GGML_F32x4_MUL
  1489. #define GGML_F32_VEC_REDUCE GGML_F32x4_REDUCE
  1490. // F16 NEON
  1491. #if defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC)
  1492. #define GGML_F16_STEP 32
  1493. #define GGML_F16_EPR 8
  1494. #define GGML_F16x8 float16x8_t
  1495. #define GGML_F16x8_ZERO vdupq_n_f16(0.0f)
  1496. #define GGML_F16x8_SET1(x) vdupq_n_f16(x)
  1497. #define GGML_F16x8_LOAD vld1q_f16
  1498. #define GGML_F16x8_STORE vst1q_f16
  1499. #define GGML_F16x8_FMA(a, b, c) vfmaq_f16(a, b, c)
  1500. #define GGML_F16x8_ADD vaddq_f16
  1501. #define GGML_F16x8_MUL vmulq_f16
  1502. #define GGML_F16x8_REDUCE(res, x) \
  1503. { \
  1504. int offset = GGML_F16_ARR >> 1; \
  1505. for (int i = 0; i < offset; ++i) { \
  1506. x[i] = vaddq_f16(x[i], x[offset+i]); \
  1507. } \
  1508. offset >>= 1; \
  1509. for (int i = 0; i < offset; ++i) { \
  1510. x[i] = vaddq_f16(x[i], x[offset+i]); \
  1511. } \
  1512. offset >>= 1; \
  1513. for (int i = 0; i < offset; ++i) { \
  1514. x[i] = vaddq_f16(x[i], x[offset+i]); \
  1515. } \
  1516. const float32x4_t t0 = vcvt_f32_f16(vget_low_f16 (x[0])); \
  1517. const float32x4_t t1 = vcvt_f32_f16(vget_high_f16(x[0])); \
  1518. res = (ggml_float) vaddvq_f32(vaddq_f32(t0, t1)); \
  1519. }
  1520. #define GGML_F16_VEC GGML_F16x8
  1521. #define GGML_F16_VEC_ZERO GGML_F16x8_ZERO
  1522. #define GGML_F16_VEC_SET1 GGML_F16x8_SET1
  1523. #define GGML_F16_VEC_LOAD(p, i) GGML_F16x8_LOAD(p)
  1524. #define GGML_F16_VEC_STORE(p, r, i) GGML_F16x8_STORE(p, r[i])
  1525. #define GGML_F16_VEC_FMA GGML_F16x8_FMA
  1526. #define GGML_F16_VEC_ADD GGML_F16x8_ADD
  1527. #define GGML_F16_VEC_MUL GGML_F16x8_MUL
  1528. #define GGML_F16_VEC_REDUCE GGML_F16x8_REDUCE
  1529. #else
  1530. // if FP16 vector arithmetic is not supported, we use FP32 instead
  1531. // and take advantage of the vcvt_ functions to convert to/from FP16
  1532. #define GGML_F16_STEP 16
  1533. #define GGML_F16_EPR 4
  1534. #define GGML_F32Cx4 float32x4_t
  1535. #define GGML_F32Cx4_ZERO vdupq_n_f32(0.0f)
  1536. #define GGML_F32Cx4_SET1(x) vdupq_n_f32(x)
  1537. #define GGML_F32Cx4_LOAD(x) vcvt_f32_f16(vld1_f16(x))
  1538. #define GGML_F32Cx4_STORE(x, y) vst1_f16(x, vcvt_f16_f32(y))
  1539. #define GGML_F32Cx4_FMA(a, b, c) vfmaq_f32(a, b, c)
  1540. #define GGML_F32Cx4_ADD vaddq_f32
  1541. #define GGML_F32Cx4_MUL vmulq_f32
  1542. #define GGML_F32Cx4_REDUCE GGML_F32x4_REDUCE
  1543. #define GGML_F16_VEC GGML_F32Cx4
  1544. #define GGML_F16_VEC_ZERO GGML_F32Cx4_ZERO
  1545. #define GGML_F16_VEC_SET1 GGML_F32Cx4_SET1
  1546. #define GGML_F16_VEC_LOAD(p, i) GGML_F32Cx4_LOAD(p)
  1547. #define GGML_F16_VEC_STORE(p, r, i) GGML_F32Cx4_STORE(p, r[i])
  1548. #define GGML_F16_VEC_FMA GGML_F32Cx4_FMA
  1549. #define GGML_F16_VEC_ADD GGML_F32Cx4_ADD
  1550. #define GGML_F16_VEC_MUL GGML_F32Cx4_MUL
  1551. #define GGML_F16_VEC_REDUCE GGML_F32Cx4_REDUCE
  1552. #endif
  1553. #elif defined(__AVX__)
  1554. #define GGML_SIMD
  1555. // F32 AVX
  1556. #define GGML_F32_STEP 32
  1557. #define GGML_F32_EPR 8
  1558. #define GGML_F32x8 __m256
  1559. #define GGML_F32x8_ZERO _mm256_setzero_ps()
  1560. #define GGML_F32x8_SET1(x) _mm256_set1_ps(x)
  1561. #define GGML_F32x8_LOAD _mm256_loadu_ps
  1562. #define GGML_F32x8_STORE _mm256_storeu_ps
  1563. #if defined(__FMA__)
  1564. #define GGML_F32x8_FMA(a, b, c) _mm256_fmadd_ps(b, c, a)
  1565. #else
  1566. #define GGML_F32x8_FMA(a, b, c) _mm256_add_ps(_mm256_mul_ps(b, c), a)
  1567. #endif
  1568. #define GGML_F32x8_ADD _mm256_add_ps
  1569. #define GGML_F32x8_MUL _mm256_mul_ps
  1570. #define GGML_F32x8_REDUCE(res, x) \
  1571. { \
  1572. int offset = GGML_F32_ARR >> 1; \
  1573. for (int i = 0; i < offset; ++i) { \
  1574. x[i] = _mm256_add_ps(x[i], x[offset+i]); \
  1575. } \
  1576. offset >>= 1; \
  1577. for (int i = 0; i < offset; ++i) { \
  1578. x[i] = _mm256_add_ps(x[i], x[offset+i]); \
  1579. } \
  1580. offset >>= 1; \
  1581. for (int i = 0; i < offset; ++i) { \
  1582. x[i] = _mm256_add_ps(x[i], x[offset+i]); \
  1583. } \
  1584. const __m128 t0 = _mm_add_ps(_mm256_castps256_ps128(x[0]), \
  1585. _mm256_extractf128_ps(x[0], 1)); \
  1586. const __m128 t1 = _mm_hadd_ps(t0, t0); \
  1587. res = _mm_cvtss_f32(_mm_hadd_ps(t1, t1)); \
  1588. }
  1589. // TODO: is this optimal ?
  1590. #define GGML_F32_VEC GGML_F32x8
  1591. #define GGML_F32_VEC_ZERO GGML_F32x8_ZERO
  1592. #define GGML_F32_VEC_SET1 GGML_F32x8_SET1
  1593. #define GGML_F32_VEC_LOAD GGML_F32x8_LOAD
  1594. #define GGML_F32_VEC_STORE GGML_F32x8_STORE
  1595. #define GGML_F32_VEC_FMA GGML_F32x8_FMA
  1596. #define GGML_F32_VEC_ADD GGML_F32x8_ADD
  1597. #define GGML_F32_VEC_MUL GGML_F32x8_MUL
  1598. #define GGML_F32_VEC_REDUCE GGML_F32x8_REDUCE
  1599. // F16 AVX
  1600. #define GGML_F16_STEP 32
  1601. #define GGML_F16_EPR 8
  1602. // F16 arithmetic is not supported by AVX, so we use F32 instead
  1603. #define GGML_F32Cx8 __m256
  1604. #define GGML_F32Cx8_ZERO _mm256_setzero_ps()
  1605. #define GGML_F32Cx8_SET1(x) _mm256_set1_ps(x)
  1606. #if defined(__F16C__)
  1607. // the _mm256_cvt intrinsics require F16C
  1608. #define GGML_F32Cx8_LOAD(x) _mm256_cvtph_ps(_mm_loadu_si128((__m128i *)(x)))
  1609. #define GGML_F32Cx8_STORE(x, y) _mm_storeu_si128((__m128i *)(x), _mm256_cvtps_ph(y, 0))
  1610. #else
  1611. static inline __m256 __avx_f32cx8_load(ggml_fp16_t *x) {
  1612. float tmp[8];
  1613. for (int i = 0; i < 8; i++) {
  1614. tmp[i] = GGML_FP16_TO_FP32(x[i]);
  1615. }
  1616. return _mm256_loadu_ps(tmp);
  1617. }
  1618. static inline void __avx_f32cx8_store(ggml_fp16_t *x, __m256 y) {
  1619. float arr[8];
  1620. _mm256_storeu_ps(arr, y);
  1621. for (int i = 0; i < 8; i++)
  1622. x[i] = GGML_FP32_TO_FP16(arr[i]);
  1623. }
  1624. #define GGML_F32Cx8_LOAD(x) __avx_f32cx8_load(x)
  1625. #define GGML_F32Cx8_STORE(x, y) __avx_f32cx8_store(x, y)
  1626. #endif
  1627. #define GGML_F32Cx8_FMA GGML_F32x8_FMA
  1628. #define GGML_F32Cx8_ADD _mm256_add_ps
  1629. #define GGML_F32Cx8_MUL _mm256_mul_ps
  1630. #define GGML_F32Cx8_REDUCE GGML_F32x8_REDUCE
  1631. #define GGML_F16_VEC GGML_F32Cx8
  1632. #define GGML_F16_VEC_ZERO GGML_F32Cx8_ZERO
  1633. #define GGML_F16_VEC_SET1 GGML_F32Cx8_SET1
  1634. #define GGML_F16_VEC_LOAD(p, i) GGML_F32Cx8_LOAD(p)
  1635. #define GGML_F16_VEC_STORE(p, r, i) GGML_F32Cx8_STORE(p, r[i])
  1636. #define GGML_F16_VEC_FMA GGML_F32Cx8_FMA
  1637. #define GGML_F16_VEC_ADD GGML_F32Cx8_ADD
  1638. #define GGML_F16_VEC_MUL GGML_F32Cx8_MUL
  1639. #define GGML_F16_VEC_REDUCE GGML_F32Cx8_REDUCE
  1640. #elif defined(__POWER9_VECTOR__)
  1641. #define GGML_SIMD
  1642. // F32 POWER9
  1643. #define GGML_F32_STEP 32
  1644. #define GGML_F32_EPR 4
  1645. #define GGML_F32x4 vector float
  1646. #define GGML_F32x4_ZERO 0.0f
  1647. #define GGML_F32x4_SET1 vec_splats
  1648. #define GGML_F32x4_LOAD(p) vec_xl(0, p)
  1649. #define GGML_F32x4_STORE(p, r) vec_xst(r, 0, p)
  1650. #define GGML_F32x4_FMA(a, b, c) vec_madd(b, c, a)
  1651. #define GGML_F32x4_ADD vec_add
  1652. #define GGML_F32x4_MUL vec_mul
  1653. #define GGML_F32x4_REDUCE(res, x) \
  1654. { \
  1655. int offset = GGML_F32_ARR >> 1; \
  1656. for (int i = 0; i < offset; ++i) { \
  1657. x[i] = vec_add(x[i], x[offset+i]); \
  1658. } \
  1659. offset >>= 1; \
  1660. for (int i = 0; i < offset; ++i) { \
  1661. x[i] = vec_add(x[i], x[offset+i]); \
  1662. } \
  1663. offset >>= 1; \
  1664. for (int i = 0; i < offset; ++i) { \
  1665. x[i] = vec_add(x[i], x[offset+i]); \
  1666. } \
  1667. res = vec_extract(x[0], 0) + \
  1668. vec_extract(x[0], 1) + \
  1669. vec_extract(x[0], 2) + \
  1670. vec_extract(x[0], 3); \
  1671. }
  1672. #define GGML_F32_VEC GGML_F32x4
  1673. #define GGML_F32_VEC_ZERO GGML_F32x4_ZERO
  1674. #define GGML_F32_VEC_SET1 GGML_F32x4_SET1
  1675. #define GGML_F32_VEC_LOAD GGML_F32x4_LOAD
  1676. #define GGML_F32_VEC_STORE GGML_F32x4_STORE
  1677. #define GGML_F32_VEC_FMA GGML_F32x4_FMA
  1678. #define GGML_F32_VEC_ADD GGML_F32x4_ADD
  1679. #define GGML_F32_VEC_MUL GGML_F32x4_MUL
  1680. #define GGML_F32_VEC_REDUCE GGML_F32x4_REDUCE
  1681. // F16 POWER9
  1682. #define GGML_F16_STEP GGML_F32_STEP
  1683. #define GGML_F16_EPR GGML_F32_EPR
  1684. #define GGML_F16_VEC GGML_F32x4
  1685. #define GGML_F16_VEC_ZERO GGML_F32x4_ZERO
  1686. #define GGML_F16_VEC_SET1 GGML_F32x4_SET1
  1687. #define GGML_F16_VEC_FMA GGML_F32x4_FMA
  1688. #define GGML_F16_VEC_REDUCE GGML_F32x4_REDUCE
  1689. // Use vec_xl, not vec_ld, in case the load address is not aligned.
  1690. #define GGML_F16_VEC_LOAD(p, i) (i & 0x1) ? \
  1691. vec_extract_fp32_from_shorth(vec_xl(0, p - GGML_F16_EPR)) : \
  1692. vec_extract_fp32_from_shortl(vec_xl(0, p))
  1693. #define GGML_ENDIAN_BYTE(i) ((unsigned char *)&(uint16_t){1})[i]
  1694. #define GGML_F16_VEC_STORE(p, r, i) \
  1695. if (i & 0x1) \
  1696. vec_xst(vec_pack_to_short_fp32(r[i - GGML_ENDIAN_BYTE(1)], \
  1697. r[i - GGML_ENDIAN_BYTE(0)]), \
  1698. 0, p - GGML_F16_EPR)
  1699. #elif defined(__wasm_simd128__)
  1700. #define GGML_SIMD
  1701. // F32 WASM
  1702. #define GGML_F32_STEP 16
  1703. #define GGML_F32_EPR 4
  1704. #define GGML_F32x4 v128_t
  1705. #define GGML_F32x4_ZERO wasm_f32x4_splat(0.0f)
  1706. #define GGML_F32x4_SET1(x) wasm_f32x4_splat(x)
  1707. #define GGML_F32x4_LOAD wasm_v128_load
  1708. #define GGML_F32x4_STORE wasm_v128_store
  1709. #define GGML_F32x4_FMA(a, b, c) wasm_f32x4_add(wasm_f32x4_mul(b, c), a)
  1710. #define GGML_F32x4_ADD wasm_f32x4_add
  1711. #define GGML_F32x4_MUL wasm_f32x4_mul
  1712. #define GGML_F32x4_REDUCE(res, x) \
  1713. { \
  1714. int offset = GGML_F32_ARR >> 1; \
  1715. for (int i = 0; i < offset; ++i) { \
  1716. x[i] = wasm_f32x4_add(x[i], x[offset+i]); \
  1717. } \
  1718. offset >>= 1; \
  1719. for (int i = 0; i < offset; ++i) { \
  1720. x[i] = wasm_f32x4_add(x[i], x[offset+i]); \
  1721. } \
  1722. offset >>= 1; \
  1723. for (int i = 0; i < offset; ++i) { \
  1724. x[i] = wasm_f32x4_add(x[i], x[offset+i]); \
  1725. } \
  1726. res = wasm_f32x4_extract_lane(x[0], 0) + \
  1727. wasm_f32x4_extract_lane(x[0], 1) + \
  1728. wasm_f32x4_extract_lane(x[0], 2) + \
  1729. wasm_f32x4_extract_lane(x[0], 3); \
  1730. }
  1731. #define GGML_F32_VEC GGML_F32x4
  1732. #define GGML_F32_VEC_ZERO GGML_F32x4_ZERO
  1733. #define GGML_F32_VEC_SET1 GGML_F32x4_SET1
  1734. #define GGML_F32_VEC_LOAD GGML_F32x4_LOAD
  1735. #define GGML_F32_VEC_STORE GGML_F32x4_STORE
  1736. #define GGML_F32_VEC_FMA GGML_F32x4_FMA
  1737. #define GGML_F32_VEC_ADD GGML_F32x4_ADD
  1738. #define GGML_F32_VEC_MUL GGML_F32x4_MUL
  1739. #define GGML_F32_VEC_REDUCE GGML_F32x4_REDUCE
  1740. // F16 WASM
  1741. #define GGML_F16_STEP 16
  1742. #define GGML_F16_EPR 4
  1743. inline static v128_t __wasm_f16x4_load(const ggml_fp16_t * p) {
  1744. float tmp[4];
  1745. tmp[0] = GGML_FP16_TO_FP32(p[0]);
  1746. tmp[1] = GGML_FP16_TO_FP32(p[1]);
  1747. tmp[2] = GGML_FP16_TO_FP32(p[2]);
  1748. tmp[3] = GGML_FP16_TO_FP32(p[3]);
  1749. return wasm_v128_load(tmp);
  1750. }
  1751. inline static void __wasm_f16x4_store(ggml_fp16_t * p, v128_t x) {
  1752. float tmp[4];
  1753. wasm_v128_store(tmp, x);
  1754. p[0] = GGML_FP32_TO_FP16(tmp[0]);
  1755. p[1] = GGML_FP32_TO_FP16(tmp[1]);
  1756. p[2] = GGML_FP32_TO_FP16(tmp[2]);
  1757. p[3] = GGML_FP32_TO_FP16(tmp[3]);
  1758. }
  1759. #define GGML_F16x4 v128_t
  1760. #define GGML_F16x4_ZERO wasm_f32x4_splat(0.0f)
  1761. #define GGML_F16x4_SET1(x) wasm_f32x4_splat(x)
  1762. #define GGML_F16x4_LOAD(x) __wasm_f16x4_load(x)
  1763. #define GGML_F16x4_STORE(x, y) __wasm_f16x4_store(x, y)
  1764. #define GGML_F16x4_FMA GGML_F32x4_FMA
  1765. #define GGML_F16x4_ADD wasm_f32x4_add
  1766. #define GGML_F16x4_MUL wasm_f32x4_mul
  1767. #define GGML_F16x4_REDUCE(res, x) \
  1768. { \
  1769. int offset = GGML_F16_ARR >> 1; \
  1770. for (int i = 0; i < offset; ++i) { \
  1771. x[i] = wasm_f32x4_add(x[i], x[offset+i]); \
  1772. } \
  1773. offset >>= 1; \
  1774. for (int i = 0; i < offset; ++i) { \
  1775. x[i] = wasm_f32x4_add(x[i], x[offset+i]); \
  1776. } \
  1777. offset >>= 1; \
  1778. for (int i = 0; i < offset; ++i) { \
  1779. x[i] = wasm_f32x4_add(x[i], x[offset+i]); \
  1780. } \
  1781. res = wasm_f32x4_extract_lane(x[0], 0) + \
  1782. wasm_f32x4_extract_lane(x[0], 1) + \
  1783. wasm_f32x4_extract_lane(x[0], 2) + \
  1784. wasm_f32x4_extract_lane(x[0], 3); \
  1785. }
  1786. #define GGML_F16_VEC GGML_F16x4
  1787. #define GGML_F16_VEC_ZERO GGML_F16x4_ZERO
  1788. #define GGML_F16_VEC_SET1 GGML_F16x4_SET1
  1789. #define GGML_F16_VEC_LOAD(p, i) GGML_F16x4_LOAD(p)
  1790. #define GGML_F16_VEC_STORE(p, r, i) GGML_F16x4_STORE(p, r[i])
  1791. #define GGML_F16_VEC_FMA GGML_F16x4_FMA
  1792. #define GGML_F16_VEC_ADD GGML_F16x4_ADD
  1793. #define GGML_F16_VEC_MUL GGML_F16x4_MUL
  1794. #define GGML_F16_VEC_REDUCE GGML_F16x4_REDUCE
  1795. #elif defined(__SSE3__)
  1796. #define GGML_SIMD
  1797. // F32 SSE
  1798. #define GGML_F32_STEP 32
  1799. #define GGML_F32_EPR 4
  1800. #define GGML_F32x4 __m128
  1801. #define GGML_F32x4_ZERO _mm_setzero_ps()
  1802. #define GGML_F32x4_SET1(x) _mm_set1_ps(x)
  1803. #define GGML_F32x4_LOAD _mm_loadu_ps
  1804. #define GGML_F32x4_STORE _mm_storeu_ps
  1805. #if defined(__FMA__)
  1806. // TODO: Does this work?
  1807. #define GGML_F32x4_FMA(a, b, c) _mm_fmadd_ps(b, c, a)
  1808. #else
  1809. #define GGML_F32x4_FMA(a, b, c) _mm_add_ps(_mm_mul_ps(b, c), a)
  1810. #endif
  1811. #define GGML_F32x4_ADD _mm_add_ps
  1812. #define GGML_F32x4_MUL _mm_mul_ps
  1813. #define GGML_F32x4_REDUCE(res, x) \
  1814. { \
  1815. int offset = GGML_F32_ARR >> 1; \
  1816. for (int i = 0; i < offset; ++i) { \
  1817. x[i] = _mm_add_ps(x[i], x[offset+i]); \
  1818. } \
  1819. offset >>= 1; \
  1820. for (int i = 0; i < offset; ++i) { \
  1821. x[i] = _mm_add_ps(x[i], x[offset+i]); \
  1822. } \
  1823. offset >>= 1; \
  1824. for (int i = 0; i < offset; ++i) { \
  1825. x[i] = _mm_add_ps(x[i], x[offset+i]); \
  1826. } \
  1827. const __m128 t0 = _mm_hadd_ps(x[0], x[0]); \
  1828. res = _mm_cvtss_f32(_mm_hadd_ps(t0, t0)); \
  1829. }
  1830. // TODO: is this optimal ?
  1831. #define GGML_F32_VEC GGML_F32x4
  1832. #define GGML_F32_VEC_ZERO GGML_F32x4_ZERO
  1833. #define GGML_F32_VEC_SET1 GGML_F32x4_SET1
  1834. #define GGML_F32_VEC_LOAD GGML_F32x4_LOAD
  1835. #define GGML_F32_VEC_STORE GGML_F32x4_STORE
  1836. #define GGML_F32_VEC_FMA GGML_F32x4_FMA
  1837. #define GGML_F32_VEC_ADD GGML_F32x4_ADD
  1838. #define GGML_F32_VEC_MUL GGML_F32x4_MUL
  1839. #define GGML_F32_VEC_REDUCE GGML_F32x4_REDUCE
  1840. // F16 SSE
  1841. #define GGML_F16_STEP 32
  1842. #define GGML_F16_EPR 4
  1843. static inline __m128 __sse_f16x4_load(ggml_fp16_t *x) {
  1844. float tmp[4];
  1845. tmp[0] = GGML_FP16_TO_FP32(x[0]);
  1846. tmp[1] = GGML_FP16_TO_FP32(x[1]);
  1847. tmp[2] = GGML_FP16_TO_FP32(x[2]);
  1848. tmp[3] = GGML_FP16_TO_FP32(x[3]);
  1849. return _mm_loadu_ps(tmp);
  1850. }
  1851. static inline void __sse_f16x4_store(ggml_fp16_t *x, __m128 y) {
  1852. float arr[4];
  1853. _mm_storeu_ps(arr, y);
  1854. x[0] = GGML_FP32_TO_FP16(arr[0]);
  1855. x[1] = GGML_FP32_TO_FP16(arr[1]);
  1856. x[2] = GGML_FP32_TO_FP16(arr[2]);
  1857. x[3] = GGML_FP32_TO_FP16(arr[3]);
  1858. }
  1859. #define GGML_F32Cx4 __m128
  1860. #define GGML_F32Cx4_ZERO _mm_setzero_ps()
  1861. #define GGML_F32Cx4_SET1(x) _mm_set1_ps(x)
  1862. #define GGML_F32Cx4_LOAD(x) __sse_f16x4_load(x)
  1863. #define GGML_F32Cx4_STORE(x, y) __sse_f16x4_store(x, y)
  1864. #define GGML_F32Cx4_FMA GGML_F32x4_FMA
  1865. #define GGML_F32Cx4_ADD _mm_add_ps
  1866. #define GGML_F32Cx4_MUL _mm_mul_ps
  1867. #define GGML_F32Cx4_REDUCE GGML_F32x4_REDUCE
  1868. #define GGML_F16_VEC GGML_F32Cx4
  1869. #define GGML_F16_VEC_ZERO GGML_F32Cx4_ZERO
  1870. #define GGML_F16_VEC_SET1 GGML_F32Cx4_SET1
  1871. #define GGML_F16_VEC_LOAD(p, i) GGML_F32Cx4_LOAD(p)
  1872. #define GGML_F16_VEC_STORE(p, r, i) GGML_F32Cx4_STORE(p, r[i])
  1873. #define GGML_F16_VEC_FMA GGML_F32Cx4_FMA
  1874. #define GGML_F16_VEC_ADD GGML_F32Cx4_ADD
  1875. #define GGML_F16_VEC_MUL GGML_F32Cx4_MUL
  1876. #define GGML_F16_VEC_REDUCE GGML_F32Cx4_REDUCE
  1877. #endif
  1878. // GGML_F32_ARR / GGML_F16_ARR
  1879. // number of registers to use per step
  1880. #ifdef GGML_SIMD
  1881. #define GGML_F32_ARR (GGML_F32_STEP/GGML_F32_EPR)
  1882. #define GGML_F16_ARR (GGML_F16_STEP/GGML_F16_EPR)
  1883. #endif
  1884. //
  1885. // fundamental operations
  1886. //
  1887. inline static void ggml_vec_set_i8(const int n, int8_t * x, const int8_t v) { for (int i = 0; i < n; ++i) x[i] = v; }
  1888. inline static void ggml_vec_set_i16(const int n, int16_t * x, const int16_t v) { for (int i = 0; i < n; ++i) x[i] = v; }
  1889. inline static void ggml_vec_set_i32(const int n, int32_t * x, const int32_t v) { for (int i = 0; i < n; ++i) x[i] = v; }
  1890. inline static void ggml_vec_set_f16(const int n, ggml_fp16_t * x, const int32_t v) { for (int i = 0; i < n; ++i) x[i] = v; }
  1891. inline static void ggml_vec_add_f32 (const int n, float * z, const float * x, const float * y) { for (int i = 0; i < n; ++i) z[i] = x[i] + y[i]; }
  1892. inline static void ggml_vec_add1_f32(const int n, float * z, const float * x, const float v) { for (int i = 0; i < n; ++i) z[i] = x[i] + v; }
  1893. inline static void ggml_vec_acc_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] += x[i]; }
  1894. inline static void ggml_vec_acc1_f32(const int n, float * y, const float v) { for (int i = 0; i < n; ++i) y[i] += v; }
  1895. inline static void ggml_vec_sub_f32 (const int n, float * z, const float * x, const float * y) { for (int i = 0; i < n; ++i) z[i] = x[i] - y[i]; }
  1896. inline static void ggml_vec_set_f32 (const int n, float * x, const float v) { for (int i = 0; i < n; ++i) x[i] = v; }
  1897. inline static void ggml_vec_cpy_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = x[i]; }
  1898. inline static void ggml_vec_neg_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = -x[i]; }
  1899. inline static void ggml_vec_mul_f32 (const int n, float * z, const float * x, const float * y) { for (int i = 0; i < n; ++i) z[i] = x[i]*y[i]; }
  1900. inline static void ggml_vec_div_f32 (const int n, float * z, const float * x, const float * y) { for (int i = 0; i < n; ++i) z[i] = x[i]/y[i]; }
  1901. static void ggml_vec_dot_f32(const int n, float * restrict s, const float * restrict x, const float * restrict y) {
  1902. #ifdef GGML_SIMD
  1903. float sumf = 0.0f;
  1904. const int np = (n & ~(GGML_F32_STEP - 1));
  1905. GGML_F32_VEC sum[GGML_F32_ARR] = { GGML_F32_VEC_ZERO };
  1906. GGML_F32_VEC ax[GGML_F32_ARR];
  1907. GGML_F32_VEC ay[GGML_F32_ARR];
  1908. for (int i = 0; i < np; i += GGML_F32_STEP) {
  1909. for (int j = 0; j < GGML_F32_ARR; j++) {
  1910. ax[j] = GGML_F32_VEC_LOAD(x + i + j*GGML_F32_EPR);
  1911. ay[j] = GGML_F32_VEC_LOAD(y + i + j*GGML_F32_EPR);
  1912. sum[j] = GGML_F32_VEC_FMA(sum[j], ax[j], ay[j]);
  1913. }
  1914. }
  1915. // reduce sum0..sum3 to sum0
  1916. GGML_F32_VEC_REDUCE(sumf, sum);
  1917. // leftovers
  1918. for (int i = np; i < n; ++i) {
  1919. sumf += x[i]*y[i];
  1920. }
  1921. #else
  1922. // scalar
  1923. ggml_float sumf = 0.0;
  1924. for (int i = 0; i < n; ++i) {
  1925. sumf += (ggml_float)(x[i]*y[i]);
  1926. }
  1927. #endif
  1928. *s = sumf;
  1929. }
  1930. static void ggml_vec_dot_f16(const int n, float * restrict s, ggml_fp16_t * restrict x, ggml_fp16_t * restrict y) {
  1931. ggml_float sumf = 0.0;
  1932. #if defined(GGML_SIMD)
  1933. const int np = (n & ~(GGML_F16_STEP - 1));
  1934. GGML_F16_VEC sum[GGML_F16_ARR] = { GGML_F16_VEC_ZERO };
  1935. GGML_F16_VEC ax[GGML_F16_ARR];
  1936. GGML_F16_VEC ay[GGML_F16_ARR];
  1937. for (int i = 0; i < np; i += GGML_F16_STEP) {
  1938. for (int j = 0; j < GGML_F16_ARR; j++) {
  1939. ax[j] = GGML_F16_VEC_LOAD(x + i + j*GGML_F16_EPR, j);
  1940. ay[j] = GGML_F16_VEC_LOAD(y + i + j*GGML_F16_EPR, j);
  1941. sum[j] = GGML_F16_VEC_FMA(sum[j], ax[j], ay[j]);
  1942. }
  1943. }
  1944. // reduce sum0..sum3 to sum0
  1945. GGML_F16_VEC_REDUCE(sumf, sum);
  1946. // leftovers
  1947. for (int i = np; i < n; ++i) {
  1948. sumf += (ggml_float)(GGML_FP16_TO_FP32(x[i])*GGML_FP16_TO_FP32(y[i]));
  1949. }
  1950. #else
  1951. for (int i = 0; i < n; ++i) {
  1952. sumf += (ggml_float)(GGML_FP16_TO_FP32(x[i])*GGML_FP16_TO_FP32(y[i]));
  1953. }
  1954. #endif
  1955. *s = sumf;
  1956. }
  1957. static void ggml_vec_dot_q4_0_q8_0(const int n, float * restrict s, const void * restrict vx, const void * restrict vy) {
  1958. const int qk = QK8_0;
  1959. const int nb = n / qk;
  1960. assert(n % qk == 0);
  1961. assert(nb % 2 == 0);
  1962. const block_q4_0 * restrict x = vx;
  1963. const block_q8_0 * restrict y = vy;
  1964. #if defined(__ARM_NEON)
  1965. float32x4_t sumv0 = vdupq_n_f32(0.0f);
  1966. float32x4_t sumv1 = vdupq_n_f32(0.0f);
  1967. for (int i = 0; i < nb; i += 2) {
  1968. const block_q4_0 * restrict x0 = &x[i + 0];
  1969. const block_q4_0 * restrict x1 = &x[i + 1];
  1970. const block_q8_0 * restrict y0 = &y[i + 0];
  1971. const block_q8_0 * restrict y1 = &y[i + 1];
  1972. const uint8x16_t m4b = vdupq_n_u8(0x0F);
  1973. const int8x16_t s8b = vdupq_n_s8(0x8);
  1974. const uint8x16_t v0_0 = vld1q_u8(x0->qs);
  1975. const uint8x16_t v0_1 = vld1q_u8(x1->qs);
  1976. // 4-bit -> 8-bit
  1977. const int8x16_t v0_0l = vreinterpretq_s8_u8(vandq_u8 (v0_0, m4b));
  1978. const int8x16_t v0_0h = vreinterpretq_s8_u8(vshrq_n_u8(v0_0, 4));
  1979. const int8x16_t v0_1l = vreinterpretq_s8_u8(vandq_u8 (v0_1, m4b));
  1980. const int8x16_t v0_1h = vreinterpretq_s8_u8(vshrq_n_u8(v0_1, 4));
  1981. // sub 8
  1982. const int8x16_t v0_0ls = vsubq_s8(v0_0l, s8b);
  1983. const int8x16_t v0_0hs = vsubq_s8(v0_0h, s8b);
  1984. const int8x16_t v0_1ls = vsubq_s8(v0_1l, s8b);
  1985. const int8x16_t v0_1hs = vsubq_s8(v0_1h, s8b);
  1986. // load y
  1987. const int8x16_t v1_0l = vld1q_s8(y0->qs);
  1988. const int8x16_t v1_0h = vld1q_s8(y0->qs + 16);
  1989. const int8x16_t v1_1l = vld1q_s8(y1->qs);
  1990. const int8x16_t v1_1h = vld1q_s8(y1->qs + 16);
  1991. #if defined(__ARM_FEATURE_DOTPROD)
  1992. // dot product into int32x4_t
  1993. const int32x4_t p_0 = vdotq_s32(vdotq_s32(vdupq_n_s32(0), v0_0ls, v1_0l), v0_0hs, v1_0h);
  1994. const int32x4_t p_1 = vdotq_s32(vdotq_s32(vdupq_n_s32(0), v0_1ls, v1_1l), v0_1hs, v1_1h);
  1995. sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(p_0), GGML_FP16_TO_FP32(x0->d)*GGML_FP16_TO_FP32(y0->d));
  1996. sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(p_1), GGML_FP16_TO_FP32(x1->d)*GGML_FP16_TO_FP32(y1->d));
  1997. #else
  1998. const int16x8_t pl0l = vmull_s8(vget_low_s8 (v0_0ls), vget_low_s8 (v1_0l));
  1999. const int16x8_t pl0h = vmull_s8(vget_high_s8(v0_0ls), vget_high_s8(v1_0l));
  2000. const int16x8_t ph0l = vmull_s8(vget_low_s8 (v0_0hs), vget_low_s8 (v1_0h));
  2001. const int16x8_t ph0h = vmull_s8(vget_high_s8(v0_0hs), vget_high_s8(v1_0h));
  2002. const int16x8_t pl1l = vmull_s8(vget_low_s8 (v0_1ls), vget_low_s8 (v1_1l));
  2003. const int16x8_t pl1h = vmull_s8(vget_high_s8(v0_1ls), vget_high_s8(v1_1l));
  2004. const int16x8_t ph1l = vmull_s8(vget_low_s8 (v0_1hs), vget_low_s8 (v1_1h));
  2005. const int16x8_t ph1h = vmull_s8(vget_high_s8(v0_1hs), vget_high_s8(v1_1h));
  2006. const int32x4_t pl0 = vaddq_s32(vpaddlq_s16(pl0l), vpaddlq_s16(pl0h));
  2007. const int32x4_t ph0 = vaddq_s32(vpaddlq_s16(ph0l), vpaddlq_s16(ph0h));
  2008. const int32x4_t pl1 = vaddq_s32(vpaddlq_s16(pl1l), vpaddlq_s16(pl1h));
  2009. const int32x4_t ph1 = vaddq_s32(vpaddlq_s16(ph1l), vpaddlq_s16(ph1h));
  2010. sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(vaddq_s32(pl0, ph0)), GGML_FP16_TO_FP32(x0->d)*GGML_FP16_TO_FP32(y0->d));
  2011. sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(vaddq_s32(pl1, ph1)), GGML_FP16_TO_FP32(x1->d)*GGML_FP16_TO_FP32(y1->d));
  2012. #endif
  2013. }
  2014. *s = vaddvq_f32(sumv0) + vaddvq_f32(sumv1);
  2015. #elif defined(__AVX2__)
  2016. // Initialize accumulator with zeros
  2017. __m256 acc = _mm256_setzero_ps();
  2018. // Main loop
  2019. for (int i = 0; i < nb; ++i) {
  2020. /* Compute combined scale for the block */
  2021. const __m256 d = _mm256_set1_ps( GGML_FP16_TO_FP32(x[i].d) * GGML_FP16_TO_FP32(y[i].d) );
  2022. __m256i bx = bytes_from_nibbles_32(x[i].qs);
  2023. // Now we have a vector with bytes in [ 0 .. 15 ] interval. Offset them into [ -8 .. +7 ] interval.
  2024. const __m256i off = _mm256_set1_epi8( 8 );
  2025. bx = _mm256_sub_epi8( bx, off );
  2026. __m256i by = _mm256_loadu_si256((const __m256i *)y[i].qs);
  2027. const __m256 q = mul_sum_i8_pairs_float(bx, by);
  2028. /* Multiply q with scale and accumulate */
  2029. acc = _mm256_fmadd_ps( d, q, acc );
  2030. }
  2031. *s = hsum_float_8(acc);
  2032. #elif defined(__AVX__)
  2033. // Initialize accumulator with zeros
  2034. __m256 acc = _mm256_setzero_ps();
  2035. // Main loop
  2036. for (int i = 0; i < nb; ++i) {
  2037. // Compute combined scale for the block
  2038. const __m256 d = _mm256_set1_ps( GGML_FP16_TO_FP32(x[i].d) * GGML_FP16_TO_FP32(y[i].d) );
  2039. const __m128i lowMask = _mm_set1_epi8(0xF);
  2040. const __m128i off = _mm_set1_epi8(8);
  2041. const __m128i tmp = _mm_loadu_si128((const __m128i *)x[i].qs);
  2042. __m128i bx = _mm_and_si128(lowMask, tmp);
  2043. __m128i by = _mm_loadu_si128((const __m128i *)y[i].qs);
  2044. bx = _mm_sub_epi8(bx, off);
  2045. const __m128i i32_0 = mul_sum_i8_pairs(bx, by);
  2046. bx = _mm_and_si128(lowMask, _mm_srli_epi64(tmp, 4));
  2047. by = _mm_loadu_si128((const __m128i *)(y[i].qs + 16));
  2048. bx = _mm_sub_epi8(bx, off);
  2049. const __m128i i32_1 = mul_sum_i8_pairs(bx, by);
  2050. // Convert int32_t to float
  2051. __m256 p = _mm256_cvtepi32_ps(MM256_SET_M128I(i32_0, i32_1));
  2052. // Apply the scale, and accumulate
  2053. acc = _mm256_add_ps(_mm256_mul_ps( d, p ), acc);
  2054. }
  2055. *s = hsum_float_8(acc);
  2056. #elif defined(__SSSE3__)
  2057. // set constants
  2058. const __m128i lowMask = _mm_set1_epi8(0xF);
  2059. const __m128i off = _mm_set1_epi8(8);
  2060. // Initialize accumulator with zeros
  2061. __m128 acc_0 = _mm_setzero_ps();
  2062. __m128 acc_1 = _mm_setzero_ps();
  2063. __m128 acc_2 = _mm_setzero_ps();
  2064. __m128 acc_3 = _mm_setzero_ps();
  2065. // First round without accumulation
  2066. {
  2067. _mm_prefetch(&x[0] + sizeof(block_q4_0), _MM_HINT_T0);
  2068. _mm_prefetch(&y[0] + sizeof(block_q8_0), _MM_HINT_T0);
  2069. // Compute combined scale for the block 0 and 1
  2070. const __m128 d_0_1 = _mm_set1_ps( GGML_FP16_TO_FP32(x[0].d) * GGML_FP16_TO_FP32(y[0].d) );
  2071. const __m128i tmp_0_1 = _mm_loadu_si128((const __m128i *)x[0].qs);
  2072. __m128i bx_0 = _mm_and_si128(lowMask, tmp_0_1);
  2073. __m128i by_0 = _mm_loadu_si128((const __m128i *)y[0].qs);
  2074. bx_0 = _mm_sub_epi8(bx_0, off);
  2075. const __m128i i32_0 = mul_sum_i8_pairs(bx_0, by_0);
  2076. __m128i bx_1 = _mm_and_si128(lowMask, _mm_srli_epi64(tmp_0_1, 4));
  2077. __m128i by_1 = _mm_loadu_si128((const __m128i *)(y[0].qs + 16));
  2078. bx_1 = _mm_sub_epi8(bx_1, off);
  2079. const __m128i i32_1 = mul_sum_i8_pairs(bx_1, by_1);
  2080. _mm_prefetch(&x[1] + sizeof(block_q4_0), _MM_HINT_T0);
  2081. _mm_prefetch(&y[1] + sizeof(block_q8_0), _MM_HINT_T0);
  2082. // Compute combined scale for the block 2 and 3
  2083. const __m128 d_2_3 = _mm_set1_ps( GGML_FP16_TO_FP32(x[1].d) * GGML_FP16_TO_FP32(y[1].d) );
  2084. const __m128i tmp_2_3 = _mm_loadu_si128((const __m128i *)x[1].qs);
  2085. __m128i bx_2 = _mm_and_si128(lowMask, tmp_2_3);
  2086. __m128i by_2 = _mm_loadu_si128((const __m128i *)y[1].qs);
  2087. bx_2 = _mm_sub_epi8(bx_2, off);
  2088. const __m128i i32_2 = mul_sum_i8_pairs(bx_2, by_2);
  2089. __m128i bx_3 = _mm_and_si128(lowMask, _mm_srli_epi64(tmp_2_3, 4));
  2090. __m128i by_3 = _mm_loadu_si128((const __m128i *)(y[1].qs + 16));
  2091. bx_3 = _mm_sub_epi8(bx_3, off);
  2092. const __m128i i32_3 = mul_sum_i8_pairs(bx_3, by_3);
  2093. // Convert int32_t to float
  2094. __m128 p0 = _mm_cvtepi32_ps(i32_0);
  2095. __m128 p1 = _mm_cvtepi32_ps(i32_1);
  2096. __m128 p2 = _mm_cvtepi32_ps(i32_2);
  2097. __m128 p3 = _mm_cvtepi32_ps(i32_3);
  2098. // Apply the scale
  2099. acc_0 = _mm_mul_ps( d_0_1, p0 );
  2100. acc_1 = _mm_mul_ps( d_0_1, p1 );
  2101. acc_2 = _mm_mul_ps( d_2_3, p2 );
  2102. acc_3 = _mm_mul_ps( d_2_3, p3 );
  2103. }
  2104. // Main loop
  2105. for (int i = 2; i < nb; i+=2) {
  2106. _mm_prefetch(&x[i] + sizeof(block_q4_0), _MM_HINT_T0);
  2107. _mm_prefetch(&y[i] + sizeof(block_q8_0), _MM_HINT_T0);
  2108. // Compute combined scale for the block 0 and 1
  2109. const __m128 d_0_1 = _mm_set1_ps( GGML_FP16_TO_FP32(x[i].d) * GGML_FP16_TO_FP32(y[i].d) );
  2110. const __m128i tmp_0_1 = _mm_loadu_si128((const __m128i *)x[i].qs);
  2111. __m128i bx_0 = _mm_and_si128(lowMask, tmp_0_1);
  2112. __m128i by_0 = _mm_loadu_si128((const __m128i *)y[i].qs);
  2113. bx_0 = _mm_sub_epi8(bx_0, off);
  2114. const __m128i i32_0 = mul_sum_i8_pairs(bx_0, by_0);
  2115. __m128i bx_1 = _mm_and_si128(lowMask, _mm_srli_epi64(tmp_0_1, 4));
  2116. __m128i by_1 = _mm_loadu_si128((const __m128i *)(y[i].qs + 16));
  2117. bx_1 = _mm_sub_epi8(bx_1, off);
  2118. const __m128i i32_1 = mul_sum_i8_pairs(bx_1, by_1);
  2119. _mm_prefetch(&x[i] + 2 * sizeof(block_q4_0), _MM_HINT_T0);
  2120. _mm_prefetch(&y[i] + 2 * sizeof(block_q8_0), _MM_HINT_T0);
  2121. // Compute combined scale for the block 2 and 3
  2122. const __m128 d_2_3 = _mm_set1_ps( GGML_FP16_TO_FP32(x[i + 1].d) * GGML_FP16_TO_FP32(y[i + 1].d) );
  2123. const __m128i tmp_2_3 = _mm_loadu_si128((const __m128i *)x[i + 1].qs);
  2124. __m128i bx_2 = _mm_and_si128(lowMask, tmp_2_3);
  2125. __m128i by_2 = _mm_loadu_si128((const __m128i *)y[i + 1].qs);
  2126. bx_2 = _mm_sub_epi8(bx_2, off);
  2127. const __m128i i32_2 = mul_sum_i8_pairs(bx_2, by_2);
  2128. __m128i bx_3 = _mm_and_si128(lowMask, _mm_srli_epi64(tmp_2_3, 4));
  2129. __m128i by_3 = _mm_loadu_si128((const __m128i *)(y[i + 1].qs + 16));
  2130. bx_3 = _mm_sub_epi8(bx_3, off);
  2131. const __m128i i32_3 = mul_sum_i8_pairs(bx_3, by_3);
  2132. // Convert int32_t to float
  2133. __m128 p0 = _mm_cvtepi32_ps(i32_0);
  2134. __m128 p1 = _mm_cvtepi32_ps(i32_1);
  2135. __m128 p2 = _mm_cvtepi32_ps(i32_2);
  2136. __m128 p3 = _mm_cvtepi32_ps(i32_3);
  2137. // Apply the scale
  2138. __m128 p0_d = _mm_mul_ps( d_0_1, p0 );
  2139. __m128 p1_d = _mm_mul_ps( d_0_1, p1 );
  2140. __m128 p2_d = _mm_mul_ps( d_2_3, p2 );
  2141. __m128 p3_d = _mm_mul_ps( d_2_3, p3 );
  2142. // Acummulate
  2143. acc_0 = _mm_add_ps(p0_d, acc_0);
  2144. acc_1 = _mm_add_ps(p1_d, acc_1);
  2145. acc_2 = _mm_add_ps(p2_d, acc_2);
  2146. acc_3 = _mm_add_ps(p3_d, acc_3);
  2147. }
  2148. *s = hsum_float_4x4(acc_0, acc_1, acc_2, acc_3);
  2149. #else
  2150. // scalar
  2151. float sumf = 0.0;
  2152. for (int i = 0; i < nb; i++) {
  2153. int sumi = 0;
  2154. for (int j = 0; j < qk/2; ++j) {
  2155. const int v0 = (x[i].qs[j] & 0x0F) - 8;
  2156. const int v1 = (x[i].qs[j] >> 4) - 8;
  2157. sumi += (v0 * y[i].qs[j]) + (v1 * y[i].qs[j + qk/2]);
  2158. }
  2159. sumf += sumi*GGML_FP16_TO_FP32(x[i].d)*GGML_FP16_TO_FP32(y[i].d);
  2160. }
  2161. *s = sumf;
  2162. #endif
  2163. }
  2164. static void ggml_vec_dot_q4_1_q8_1(const int n, float * restrict s, const void * restrict vx, const void * restrict vy) {
  2165. const int qk = QK8_1;
  2166. const int nb = n / qk;
  2167. assert(n % qk == 0);
  2168. assert(nb % 2 == 0);
  2169. const block_q4_1 * restrict x = vx;
  2170. const block_q8_1 * restrict y = vy;
  2171. // TODO: add WASM SIMD
  2172. #if defined(__ARM_NEON)
  2173. float32x4_t sumv0 = vdupq_n_f32(0.0f);
  2174. float32x4_t sumv1 = vdupq_n_f32(0.0f);
  2175. float summs = 0;
  2176. for (int i = 0; i < nb; i += 2) {
  2177. const block_q4_1 * restrict x0 = &x[i + 0];
  2178. const block_q4_1 * restrict x1 = &x[i + 1];
  2179. const block_q8_1 * restrict y0 = &y[i + 0];
  2180. const block_q8_1 * restrict y1 = &y[i + 1];
  2181. summs += GGML_FP16_TO_FP32(x0->m) * y0->s + GGML_FP16_TO_FP32(x1->m) * y1->s;
  2182. const uint8x16_t m4b = vdupq_n_u8(0x0F);
  2183. const uint8x16_t v0_0 = vld1q_u8(x0->qs);
  2184. const uint8x16_t v0_1 = vld1q_u8(x1->qs);
  2185. // 4-bit -> 8-bit
  2186. const int8x16_t v0_0l = vreinterpretq_s8_u8(vandq_u8 (v0_0, m4b));
  2187. const int8x16_t v0_0h = vreinterpretq_s8_u8(vshrq_n_u8(v0_0, 4));
  2188. const int8x16_t v0_1l = vreinterpretq_s8_u8(vandq_u8 (v0_1, m4b));
  2189. const int8x16_t v0_1h = vreinterpretq_s8_u8(vshrq_n_u8(v0_1, 4));
  2190. // load y
  2191. const int8x16_t v1_0l = vld1q_s8(y0->qs);
  2192. const int8x16_t v1_0h = vld1q_s8(y0->qs + 16);
  2193. const int8x16_t v1_1l = vld1q_s8(y1->qs);
  2194. const int8x16_t v1_1h = vld1q_s8(y1->qs + 16);
  2195. #if defined(__ARM_FEATURE_DOTPROD)
  2196. // dot product into int32x4_t
  2197. const int32x4_t p_0 = vdotq_s32(vdotq_s32(vdupq_n_s32(0), v0_0l, v1_0l), v0_0h, v1_0h);
  2198. const int32x4_t p_1 = vdotq_s32(vdotq_s32(vdupq_n_s32(0), v0_1l, v1_1l), v0_1h, v1_1h);
  2199. sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(p_0), GGML_FP16_TO_FP32(x0->d)*y0->d);
  2200. sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(p_1), GGML_FP16_TO_FP32(x1->d)*y1->d);
  2201. #else
  2202. const int16x8_t pl0l = vmull_s8(vget_low_s8 (v0_0l), vget_low_s8 (v1_0l));
  2203. const int16x8_t pl0h = vmull_s8(vget_high_s8(v0_0l), vget_high_s8(v1_0l));
  2204. const int16x8_t ph0l = vmull_s8(vget_low_s8 (v0_0h), vget_low_s8 (v1_0h));
  2205. const int16x8_t ph0h = vmull_s8(vget_high_s8(v0_0h), vget_high_s8(v1_0h));
  2206. const int16x8_t pl1l = vmull_s8(vget_low_s8 (v0_1l), vget_low_s8 (v1_1l));
  2207. const int16x8_t pl1h = vmull_s8(vget_high_s8(v0_1l), vget_high_s8(v1_1l));
  2208. const int16x8_t ph1l = vmull_s8(vget_low_s8 (v0_1h), vget_low_s8 (v1_1h));
  2209. const int16x8_t ph1h = vmull_s8(vget_high_s8(v0_1h), vget_high_s8(v1_1h));
  2210. const int32x4_t pl0 = vaddq_s32(vpaddlq_s16(pl0l), vpaddlq_s16(pl0h));
  2211. const int32x4_t ph0 = vaddq_s32(vpaddlq_s16(ph0l), vpaddlq_s16(ph0h));
  2212. const int32x4_t pl1 = vaddq_s32(vpaddlq_s16(pl1l), vpaddlq_s16(pl1h));
  2213. const int32x4_t ph1 = vaddq_s32(vpaddlq_s16(ph1l), vpaddlq_s16(ph1h));
  2214. sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(vaddq_s32(pl0, ph0)), GGML_FP16_TO_FP32(x0->d)*y0->d);
  2215. sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(vaddq_s32(pl1, ph1)), GGML_FP16_TO_FP32(x1->d)*y1->d);
  2216. #endif
  2217. }
  2218. *s = vaddvq_f32(sumv0) + vaddvq_f32(sumv1) + summs;
  2219. #elif defined(__AVX2__) || defined(__AVX__)
  2220. // Initialize accumulator with zeros
  2221. __m256 acc = _mm256_setzero_ps();
  2222. float summs = 0;
  2223. // Main loop
  2224. for (int i = 0; i < nb; ++i) {
  2225. const float d0 = GGML_FP16_TO_FP32(x[i].d);
  2226. const float d1 = y[i].d;
  2227. summs += GGML_FP16_TO_FP32(x[i].m) * y[i].s;
  2228. const __m256 d0v = _mm256_set1_ps( d0 );
  2229. const __m256 d1v = _mm256_set1_ps( d1 );
  2230. // Compute combined scales
  2231. const __m256 d0d1 = _mm256_mul_ps( d0v, d1v );
  2232. // Load 16 bytes, and unpack 4 bit fields into bytes, making 32 bytes
  2233. const __m256i bx = bytes_from_nibbles_32(x[i].qs);
  2234. const __m256i by = _mm256_loadu_si256( (const __m256i *)y[i].qs );
  2235. const __m256 xy = mul_sum_us8_pairs_float(bx, by);
  2236. // Accumulate d0*d1*x*y
  2237. #if defined(__AVX2__)
  2238. acc = _mm256_fmadd_ps( d0d1, xy, acc );
  2239. #else
  2240. acc = _mm256_add_ps( _mm256_mul_ps( d0d1, xy ), acc );
  2241. #endif
  2242. }
  2243. *s = hsum_float_8(acc) + summs;
  2244. #else
  2245. // scalar
  2246. float sumf = 0.0;
  2247. for (int i = 0; i < nb; i++) {
  2248. int sumi = 0;
  2249. for (int j = 0; j < qk/2; ++j) {
  2250. const int v0 = (x[i].qs[j] & 0x0F);
  2251. const int v1 = (x[i].qs[j] >> 4);
  2252. sumi += (v0 * y[i].qs[j]) + (v1 * y[i].qs[j + qk/2]);
  2253. }
  2254. sumf += (GGML_FP16_TO_FP32(x[i].d)*y[i].d)*sumi + GGML_FP16_TO_FP32(x[i].m)*y[i].s;
  2255. }
  2256. *s = sumf;
  2257. #endif
  2258. }
  2259. static void ggml_vec_dot_q5_0_q8_0(const int n, float * restrict s, const void * restrict vx, const void * restrict vy) {
  2260. const int qk = QK8_0;
  2261. const int nb = n / qk;
  2262. assert(n % qk == 0);
  2263. assert(nb % 2 == 0);
  2264. assert(qk == QK5_0);
  2265. const block_q5_0 * restrict x = vx;
  2266. const block_q8_0 * restrict y = vy;
  2267. #if defined(__ARM_NEON)
  2268. float32x4_t sumv0 = vdupq_n_f32(0.0f);
  2269. float32x4_t sumv1 = vdupq_n_f32(0.0f);
  2270. uint32_t qh0;
  2271. uint32_t qh1;
  2272. uint64_t tmp0[4];
  2273. uint64_t tmp1[4];
  2274. for (int i = 0; i < nb; i += 2) {
  2275. const block_q5_0 * restrict x0 = &x[i];
  2276. const block_q5_0 * restrict x1 = &x[i + 1];
  2277. const block_q8_0 * restrict y0 = &y[i];
  2278. const block_q8_0 * restrict y1 = &y[i + 1];
  2279. const uint8x16_t m4b = vdupq_n_u8(0x0F);
  2280. // extract the 5th bit via lookup table ((!b) << 4)
  2281. memcpy(&qh0, x0->qh, sizeof(qh0));
  2282. memcpy(&qh1, x1->qh, sizeof(qh1));
  2283. tmp0[0] = table_b2b_1[(qh0 >> 0) & 0xFF];
  2284. tmp0[1] = table_b2b_1[(qh0 >> 8) & 0xFF];
  2285. tmp0[2] = table_b2b_1[(qh0 >> 16) & 0xFF];
  2286. tmp0[3] = table_b2b_1[(qh0 >> 24) ];
  2287. tmp1[0] = table_b2b_1[(qh1 >> 0) & 0xFF];
  2288. tmp1[1] = table_b2b_1[(qh1 >> 8) & 0xFF];
  2289. tmp1[2] = table_b2b_1[(qh1 >> 16) & 0xFF];
  2290. tmp1[3] = table_b2b_1[(qh1 >> 24) ];
  2291. const int8x16_t qhl0 = vld1q_s8((const int8_t *)(tmp0 + 0));
  2292. const int8x16_t qhh0 = vld1q_s8((const int8_t *)(tmp0 + 2));
  2293. const int8x16_t qhl1 = vld1q_s8((const int8_t *)(tmp1 + 0));
  2294. const int8x16_t qhh1 = vld1q_s8((const int8_t *)(tmp1 + 2));
  2295. const uint8x16_t v0_0 = vld1q_u8(x0->qs);
  2296. const uint8x16_t v0_1 = vld1q_u8(x1->qs);
  2297. // 4-bit -> 8-bit
  2298. int8x16_t v0_0l = vreinterpretq_s8_u8(vandq_u8 (v0_0, m4b));
  2299. int8x16_t v0_0h = vreinterpretq_s8_u8(vshrq_n_u8(v0_0, 4));
  2300. int8x16_t v0_1l = vreinterpretq_s8_u8(vandq_u8 (v0_1, m4b));
  2301. int8x16_t v0_1h = vreinterpretq_s8_u8(vshrq_n_u8(v0_1, 4));
  2302. // add high bit and sub 16 (equivalent to sub 0x10 when bit is zero)
  2303. const int8x16_t v0_0lf = vsubq_s8(v0_0l, qhl0);
  2304. const int8x16_t v0_0hf = vsubq_s8(v0_0h, qhh0);
  2305. const int8x16_t v0_1lf = vsubq_s8(v0_1l, qhl1);
  2306. const int8x16_t v0_1hf = vsubq_s8(v0_1h, qhh1);
  2307. // load y
  2308. const int8x16_t v1_0l = vld1q_s8(y0->qs);
  2309. const int8x16_t v1_0h = vld1q_s8(y0->qs + 16);
  2310. const int8x16_t v1_1l = vld1q_s8(y1->qs);
  2311. const int8x16_t v1_1h = vld1q_s8(y1->qs + 16);
  2312. #if defined(__ARM_FEATURE_DOTPROD)
  2313. sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(vaddq_s32(
  2314. vdotq_s32(vdupq_n_s32(0), v0_0lf, v1_0l),
  2315. vdotq_s32(vdupq_n_s32(0), v0_0hf, v1_0h))), GGML_FP16_TO_FP32(x0->d)*GGML_FP16_TO_FP32(y0->d));
  2316. sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(vaddq_s32(
  2317. vdotq_s32(vdupq_n_s32(0), v0_1lf, v1_1l),
  2318. vdotq_s32(vdupq_n_s32(0), v0_1hf, v1_1h))), GGML_FP16_TO_FP32(x1->d)*GGML_FP16_TO_FP32(y1->d));
  2319. #else
  2320. const int16x8_t pl0l = vmull_s8(vget_low_s8 (v0_0lf), vget_low_s8 (v1_0l));
  2321. const int16x8_t pl0h = vmull_s8(vget_high_s8(v0_0lf), vget_high_s8(v1_0l));
  2322. const int16x8_t ph0l = vmull_s8(vget_low_s8 (v0_0hf), vget_low_s8 (v1_0h));
  2323. const int16x8_t ph0h = vmull_s8(vget_high_s8(v0_0hf), vget_high_s8(v1_0h));
  2324. const int16x8_t pl1l = vmull_s8(vget_low_s8 (v0_1lf), vget_low_s8 (v1_1l));
  2325. const int16x8_t pl1h = vmull_s8(vget_high_s8(v0_1lf), vget_high_s8(v1_1l));
  2326. const int16x8_t ph1l = vmull_s8(vget_low_s8 (v0_1hf), vget_low_s8 (v1_1h));
  2327. const int16x8_t ph1h = vmull_s8(vget_high_s8(v0_1hf), vget_high_s8(v1_1h));
  2328. const int32x4_t pl0 = vaddq_s32(vpaddlq_s16(pl0l), vpaddlq_s16(pl0h));
  2329. const int32x4_t ph0 = vaddq_s32(vpaddlq_s16(ph0l), vpaddlq_s16(ph0h));
  2330. const int32x4_t pl1 = vaddq_s32(vpaddlq_s16(pl1l), vpaddlq_s16(pl1h));
  2331. const int32x4_t ph1 = vaddq_s32(vpaddlq_s16(ph1l), vpaddlq_s16(ph1h));
  2332. sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(vaddq_s32(pl0, ph0)), GGML_FP16_TO_FP32(x0->d)*GGML_FP16_TO_FP32(y0->d));
  2333. sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(vaddq_s32(pl1, ph1)), GGML_FP16_TO_FP32(x1->d)*GGML_FP16_TO_FP32(y1->d));
  2334. #endif
  2335. }
  2336. *s = vaddvq_f32(sumv0) + vaddvq_f32(sumv1);
  2337. #elif defined(__wasm_simd128__)
  2338. v128_t sumv = wasm_f32x4_splat(0.0f);
  2339. uint32_t qh;
  2340. uint64_t tmp[4];
  2341. // TODO: check if unrolling this is better
  2342. for (int i = 0; i < nb; ++i) {
  2343. const block_q5_0 * restrict x0 = &x[i];
  2344. const block_q8_0 * restrict y0 = &y[i];
  2345. const v128_t m4b = wasm_i8x16_splat(0x0F);
  2346. // extract the 5th bit
  2347. memcpy(&qh, x0->qh, sizeof(qh));
  2348. tmp[0] = table_b2b_1[(qh >> 0) & 0xFF];
  2349. tmp[1] = table_b2b_1[(qh >> 8) & 0xFF];
  2350. tmp[2] = table_b2b_1[(qh >> 16) & 0xFF];
  2351. tmp[3] = table_b2b_1[(qh >> 24) ];
  2352. const v128_t qhl = wasm_v128_load(tmp + 0);
  2353. const v128_t qhh = wasm_v128_load(tmp + 2);
  2354. const v128_t v0 = wasm_v128_load(x0->qs);
  2355. // 4-bit -> 8-bit
  2356. const v128_t v0l = wasm_v128_and (v0, m4b);
  2357. const v128_t v0h = wasm_u8x16_shr(v0, 4);
  2358. // add high bit and sub 16 (equivalent to sub 0x10 when bit is zero)
  2359. const v128_t v0lf = wasm_i8x16_sub(v0l, qhl);
  2360. const v128_t v0hf = wasm_i8x16_sub(v0h, qhh);
  2361. // load y
  2362. const v128_t v1l = wasm_v128_load(y0->qs);
  2363. const v128_t v1h = wasm_v128_load(y0->qs + 16);
  2364. // int8x16 -> int16x8
  2365. const v128_t v0lfl = wasm_i16x8_extend_low_i8x16 (v0lf);
  2366. const v128_t v0lfh = wasm_i16x8_extend_high_i8x16(v0lf);
  2367. const v128_t v0hfl = wasm_i16x8_extend_low_i8x16 (v0hf);
  2368. const v128_t v0hfh = wasm_i16x8_extend_high_i8x16(v0hf);
  2369. const v128_t v1ll = wasm_i16x8_extend_low_i8x16 (v1l);
  2370. const v128_t v1lh = wasm_i16x8_extend_high_i8x16(v1l);
  2371. const v128_t v1hl = wasm_i16x8_extend_low_i8x16 (v1h);
  2372. const v128_t v1hh = wasm_i16x8_extend_high_i8x16(v1h);
  2373. // dot product
  2374. sumv = wasm_f32x4_add(sumv, wasm_f32x4_mul(wasm_f32x4_convert_i32x4(
  2375. wasm_i32x4_add(
  2376. wasm_i32x4_add(wasm_i32x4_dot_i16x8(v0lfl, v1ll),
  2377. wasm_i32x4_dot_i16x8(v0lfh, v1lh)),
  2378. wasm_i32x4_add(wasm_i32x4_dot_i16x8(v0hfl, v1hl),
  2379. wasm_i32x4_dot_i16x8(v0hfh, v1hh)))),
  2380. wasm_f32x4_splat(GGML_FP16_TO_FP32(x0->d) * GGML_FP16_TO_FP32(y0->d))));
  2381. }
  2382. *s = wasm_f32x4_extract_lane(sumv, 0) + wasm_f32x4_extract_lane(sumv, 1) +
  2383. wasm_f32x4_extract_lane(sumv, 2) + wasm_f32x4_extract_lane(sumv, 3);
  2384. #elif defined(__AVX2__)
  2385. // Initialize accumulator with zeros
  2386. __m256 acc = _mm256_setzero_ps();
  2387. // Main loop
  2388. for (int i = 0; i < nb; i++) {
  2389. /* Compute combined scale for the block */
  2390. const __m256 d = _mm256_set1_ps(GGML_FP16_TO_FP32(x[i].d) * GGML_FP16_TO_FP32(y[i].d));
  2391. __m256i bx = bytes_from_nibbles_32(x[i].qs);
  2392. __m256i bxhi = bytes_from_bits_32(x[i].qh);
  2393. bxhi = _mm256_andnot_si256(bxhi, _mm256_set1_epi8((char)0xF0));
  2394. bx = _mm256_or_si256(bx, bxhi);
  2395. __m256i by = _mm256_loadu_si256((const __m256i *)y[i].qs);
  2396. const __m256 q = mul_sum_i8_pairs_float(bx, by);
  2397. /* Multiply q with scale and accumulate */
  2398. acc = _mm256_fmadd_ps(d, q, acc);
  2399. }
  2400. *s = hsum_float_8(acc);
  2401. #elif defined(__AVX__)
  2402. // Initialize accumulator with zeros
  2403. __m256 acc = _mm256_setzero_ps();
  2404. __m128i mask = _mm_set1_epi8((char)0xF0);
  2405. // Main loop
  2406. for (int i = 0; i < nb; i++) {
  2407. /* Compute combined scale for the block */
  2408. const __m256 d = _mm256_set1_ps(GGML_FP16_TO_FP32(x[i].d) * GGML_FP16_TO_FP32(y[i].d));
  2409. __m256i bx = bytes_from_nibbles_32(x[i].qs);
  2410. const __m256i bxhi = bytes_from_bits_32(x[i].qh);
  2411. __m128i bxhil = _mm256_castsi256_si128(bxhi);
  2412. __m128i bxhih = _mm256_extractf128_si256(bxhi, 1);
  2413. bxhil = _mm_andnot_si128(bxhil, mask);
  2414. bxhih = _mm_andnot_si128(bxhih, mask);
  2415. __m128i bxl = _mm256_castsi256_si128(bx);
  2416. __m128i bxh = _mm256_extractf128_si256(bx, 1);
  2417. bxl = _mm_or_si128(bxl, bxhil);
  2418. bxh = _mm_or_si128(bxh, bxhih);
  2419. bx = MM256_SET_M128I(bxh, bxl);
  2420. const __m256i by = _mm256_loadu_si256((const __m256i *)y[i].qs);
  2421. const __m256 q = mul_sum_i8_pairs_float(bx, by);
  2422. /* Multiply q with scale and accumulate */
  2423. acc = _mm256_add_ps(_mm256_mul_ps(d, q), acc);
  2424. }
  2425. *s = hsum_float_8(acc);
  2426. #else
  2427. // scalar
  2428. float sumf = 0.0;
  2429. for (int i = 0; i < nb; i++) {
  2430. uint32_t qh;
  2431. memcpy(&qh, x[i].qh, sizeof(qh));
  2432. int sumi = 0;
  2433. for (int j = 0; j < qk/2; ++j) {
  2434. const uint8_t xh_0 = ((qh & (1u << (j + 0 ))) >> (j + 0 )) << 4;
  2435. const uint8_t xh_1 = ((qh & (1u << (j + 16))) >> (j + 12));
  2436. const int32_t x0 = ((x[i].qs[j] & 0x0F) | xh_0) - 16;
  2437. const int32_t x1 = ((x[i].qs[j] >> 4) | xh_1) - 16;
  2438. sumi += (x0 * y[i].qs[j]) + (x1 * y[i].qs[j + qk/2]);
  2439. }
  2440. sumf += (GGML_FP16_TO_FP32(x[i].d)*GGML_FP16_TO_FP32(y[i].d)) * sumi;
  2441. }
  2442. *s = sumf;
  2443. #endif
  2444. }
  2445. static void ggml_vec_dot_q5_1_q8_1(const int n, float * restrict s, const void * restrict vx, const void * restrict vy) {
  2446. const int qk = QK8_1;
  2447. const int nb = n / qk;
  2448. assert(n % qk == 0);
  2449. assert(nb % 2 == 0);
  2450. assert(qk == QK5_1);
  2451. const block_q5_1 * restrict x = vx;
  2452. const block_q8_1 * restrict y = vy;
  2453. #if defined(__ARM_NEON)
  2454. float32x4_t sumv0 = vdupq_n_f32(0.0f);
  2455. float32x4_t sumv1 = vdupq_n_f32(0.0f);
  2456. float summs0 = 0.0f;
  2457. float summs1 = 0.0f;
  2458. uint32_t qh0;
  2459. uint32_t qh1;
  2460. uint64_t tmp0[4];
  2461. uint64_t tmp1[4];
  2462. for (int i = 0; i < nb; i += 2) {
  2463. const block_q5_1 * restrict x0 = &x[i];
  2464. const block_q5_1 * restrict x1 = &x[i + 1];
  2465. const block_q8_1 * restrict y0 = &y[i];
  2466. const block_q8_1 * restrict y1 = &y[i + 1];
  2467. const uint8x16_t m4b = vdupq_n_u8(0x0F);
  2468. summs0 += GGML_FP16_TO_FP32(x0->m) * y0->s;
  2469. summs1 += GGML_FP16_TO_FP32(x1->m) * y1->s;
  2470. // extract the 5th bit via lookup table ((b) << 4)
  2471. memcpy(&qh0, x0->qh, sizeof(qh0));
  2472. memcpy(&qh1, x1->qh, sizeof(qh1));
  2473. tmp0[0] = table_b2b_0[(qh0 >> 0) & 0xFF];
  2474. tmp0[1] = table_b2b_0[(qh0 >> 8) & 0xFF];
  2475. tmp0[2] = table_b2b_0[(qh0 >> 16) & 0xFF];
  2476. tmp0[3] = table_b2b_0[(qh0 >> 24) ];
  2477. tmp1[0] = table_b2b_0[(qh1 >> 0) & 0xFF];
  2478. tmp1[1] = table_b2b_0[(qh1 >> 8) & 0xFF];
  2479. tmp1[2] = table_b2b_0[(qh1 >> 16) & 0xFF];
  2480. tmp1[3] = table_b2b_0[(qh1 >> 24) ];
  2481. const int8x16_t qhl0 = vld1q_s8((const int8_t *)(tmp0 + 0));
  2482. const int8x16_t qhh0 = vld1q_s8((const int8_t *)(tmp0 + 2));
  2483. const int8x16_t qhl1 = vld1q_s8((const int8_t *)(tmp1 + 0));
  2484. const int8x16_t qhh1 = vld1q_s8((const int8_t *)(tmp1 + 2));
  2485. const uint8x16_t v0_0 = vld1q_u8(x0->qs);
  2486. const uint8x16_t v0_1 = vld1q_u8(x1->qs);
  2487. // 4-bit -> 8-bit
  2488. const int8x16_t v0_0l = vreinterpretq_s8_u8(vandq_u8 (v0_0, m4b));
  2489. const int8x16_t v0_0h = vreinterpretq_s8_u8(vshrq_n_u8(v0_0, 4));
  2490. const int8x16_t v0_1l = vreinterpretq_s8_u8(vandq_u8 (v0_1, m4b));
  2491. const int8x16_t v0_1h = vreinterpretq_s8_u8(vshrq_n_u8(v0_1, 4));
  2492. // add high bit
  2493. const int8x16_t v0_0lf = vorrq_s8(v0_0l, qhl0);
  2494. const int8x16_t v0_0hf = vorrq_s8(v0_0h, qhh0);
  2495. const int8x16_t v0_1lf = vorrq_s8(v0_1l, qhl1);
  2496. const int8x16_t v0_1hf = vorrq_s8(v0_1h, qhh1);
  2497. // load y
  2498. const int8x16_t v1_0l = vld1q_s8(y0->qs);
  2499. const int8x16_t v1_0h = vld1q_s8(y0->qs + 16);
  2500. const int8x16_t v1_1l = vld1q_s8(y1->qs);
  2501. const int8x16_t v1_1h = vld1q_s8(y1->qs + 16);
  2502. #if defined(__ARM_FEATURE_DOTPROD)
  2503. sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(vaddq_s32(
  2504. vdotq_s32(vdupq_n_s32(0), v0_0lf, v1_0l),
  2505. vdotq_s32(vdupq_n_s32(0), v0_0hf, v1_0h))), GGML_FP16_TO_FP32(x0->d)*y0->d);
  2506. sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(vaddq_s32(
  2507. vdotq_s32(vdupq_n_s32(0), v0_1lf, v1_1l),
  2508. vdotq_s32(vdupq_n_s32(0), v0_1hf, v1_1h))), GGML_FP16_TO_FP32(x1->d)*y1->d);
  2509. #else
  2510. const int16x8_t pl0l = vmull_s8(vget_low_s8 (v0_0lf), vget_low_s8 (v1_0l));
  2511. const int16x8_t pl0h = vmull_s8(vget_high_s8(v0_0lf), vget_high_s8(v1_0l));
  2512. const int16x8_t ph0l = vmull_s8(vget_low_s8 (v0_0hf), vget_low_s8 (v1_0h));
  2513. const int16x8_t ph0h = vmull_s8(vget_high_s8(v0_0hf), vget_high_s8(v1_0h));
  2514. const int16x8_t pl1l = vmull_s8(vget_low_s8 (v0_1lf), vget_low_s8 (v1_1l));
  2515. const int16x8_t pl1h = vmull_s8(vget_high_s8(v0_1lf), vget_high_s8(v1_1l));
  2516. const int16x8_t ph1l = vmull_s8(vget_low_s8 (v0_1hf), vget_low_s8 (v1_1h));
  2517. const int16x8_t ph1h = vmull_s8(vget_high_s8(v0_1hf), vget_high_s8(v1_1h));
  2518. const int32x4_t pl0 = vaddq_s32(vpaddlq_s16(pl0l), vpaddlq_s16(pl0h));
  2519. const int32x4_t ph0 = vaddq_s32(vpaddlq_s16(ph0l), vpaddlq_s16(ph0h));
  2520. const int32x4_t pl1 = vaddq_s32(vpaddlq_s16(pl1l), vpaddlq_s16(pl1h));
  2521. const int32x4_t ph1 = vaddq_s32(vpaddlq_s16(ph1l), vpaddlq_s16(ph1h));
  2522. sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(vaddq_s32(pl0, ph0)), GGML_FP16_TO_FP32(x0->d)*y0->d);
  2523. sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(vaddq_s32(pl1, ph1)), GGML_FP16_TO_FP32(x1->d)*y1->d);
  2524. #endif
  2525. }
  2526. *s = vaddvq_f32(sumv0) + vaddvq_f32(sumv1) + summs0 + summs1;
  2527. #elif defined(__wasm_simd128__)
  2528. v128_t sumv = wasm_f32x4_splat(0.0f);
  2529. float summs = 0.0f;
  2530. uint32_t qh;
  2531. uint64_t tmp[4];
  2532. // TODO: check if unrolling this is better
  2533. for (int i = 0; i < nb; ++i) {
  2534. const block_q5_1 * restrict x0 = &x[i];
  2535. const block_q8_1 * restrict y0 = &y[i];
  2536. summs += GGML_FP16_TO_FP32(x0->m) * y0->s;
  2537. const v128_t m4b = wasm_i8x16_splat(0x0F);
  2538. // extract the 5th bit
  2539. memcpy(&qh, x0->qh, sizeof(qh));
  2540. tmp[0] = table_b2b_0[(qh >> 0) & 0xFF];
  2541. tmp[1] = table_b2b_0[(qh >> 8) & 0xFF];
  2542. tmp[2] = table_b2b_0[(qh >> 16) & 0xFF];
  2543. tmp[3] = table_b2b_0[(qh >> 24) ];
  2544. const v128_t qhl = wasm_v128_load(tmp + 0);
  2545. const v128_t qhh = wasm_v128_load(tmp + 2);
  2546. const v128_t v0 = wasm_v128_load(x0->qs);
  2547. // 4-bit -> 8-bit
  2548. const v128_t v0l = wasm_v128_and (v0, m4b);
  2549. const v128_t v0h = wasm_u8x16_shr(v0, 4);
  2550. // add high bit
  2551. const v128_t v0lf = wasm_v128_or(v0l, qhl);
  2552. const v128_t v0hf = wasm_v128_or(v0h, qhh);
  2553. // load y
  2554. const v128_t v1l = wasm_v128_load(y0->qs);
  2555. const v128_t v1h = wasm_v128_load(y0->qs + 16);
  2556. // int8x16 -> int16x8
  2557. const v128_t v0lfl = wasm_i16x8_extend_low_i8x16 (v0lf);
  2558. const v128_t v0lfh = wasm_i16x8_extend_high_i8x16(v0lf);
  2559. const v128_t v0hfl = wasm_i16x8_extend_low_i8x16 (v0hf);
  2560. const v128_t v0hfh = wasm_i16x8_extend_high_i8x16(v0hf);
  2561. const v128_t v1ll = wasm_i16x8_extend_low_i8x16 (v1l);
  2562. const v128_t v1lh = wasm_i16x8_extend_high_i8x16(v1l);
  2563. const v128_t v1hl = wasm_i16x8_extend_low_i8x16 (v1h);
  2564. const v128_t v1hh = wasm_i16x8_extend_high_i8x16(v1h);
  2565. // dot product
  2566. sumv = wasm_f32x4_add(sumv,
  2567. wasm_f32x4_mul(wasm_f32x4_convert_i32x4(wasm_i32x4_add(
  2568. wasm_i32x4_add(wasm_i32x4_dot_i16x8(v0lfl, v1ll),
  2569. wasm_i32x4_dot_i16x8(v0lfh, v1lh)),
  2570. wasm_i32x4_add(wasm_i32x4_dot_i16x8(v0hfl, v1hl),
  2571. wasm_i32x4_dot_i16x8(v0hfh, v1hh)))),
  2572. wasm_f32x4_splat(GGML_FP16_TO_FP32(x0->d) * y0->d)));
  2573. }
  2574. *s = wasm_f32x4_extract_lane(sumv, 0) + wasm_f32x4_extract_lane(sumv, 1) +
  2575. wasm_f32x4_extract_lane(sumv, 2) + wasm_f32x4_extract_lane(sumv, 3) + summs;
  2576. #elif defined(__AVX2__)
  2577. // Initialize accumulator with zeros
  2578. __m256 acc = _mm256_setzero_ps();
  2579. float summs = 0.0f;
  2580. // Main loop
  2581. for (int i = 0; i < nb; i++) {
  2582. const __m256 dx = _mm256_set1_ps(GGML_FP16_TO_FP32(x[i].d));
  2583. summs += GGML_FP16_TO_FP32(x[i].m) * y[i].s;
  2584. __m256i bx = bytes_from_nibbles_32(x[i].qs);
  2585. __m256i bxhi = bytes_from_bits_32(x[i].qh);
  2586. bxhi = _mm256_and_si256(bxhi, _mm256_set1_epi8(0x10));
  2587. bx = _mm256_or_si256(bx, bxhi);
  2588. const __m256 dy = _mm256_set1_ps(y[i].d);
  2589. const __m256i by = _mm256_loadu_si256((const __m256i *)y[i].qs);
  2590. const __m256 q = mul_sum_us8_pairs_float(bx, by);
  2591. acc = _mm256_fmadd_ps(q, _mm256_mul_ps(dx, dy), acc);
  2592. }
  2593. *s = hsum_float_8(acc) + summs;
  2594. #elif defined(__AVX__)
  2595. // Initialize accumulator with zeros
  2596. __m256 acc = _mm256_setzero_ps();
  2597. __m128i mask = _mm_set1_epi8(0x10);
  2598. float summs = 0.0f;
  2599. // Main loop
  2600. for (int i = 0; i < nb; i++) {
  2601. const __m256 dx = _mm256_set1_ps(GGML_FP16_TO_FP32(x[i].d));
  2602. summs += GGML_FP16_TO_FP32(x[i].m) * y[i].s;
  2603. __m256i bx = bytes_from_nibbles_32(x[i].qs);
  2604. const __m256i bxhi = bytes_from_bits_32(x[i].qh);
  2605. __m128i bxhil = _mm256_castsi256_si128(bxhi);
  2606. __m128i bxhih = _mm256_extractf128_si256(bxhi, 1);
  2607. bxhil = _mm_and_si128(bxhil, mask);
  2608. bxhih = _mm_and_si128(bxhih, mask);
  2609. __m128i bxl = _mm256_castsi256_si128(bx);
  2610. __m128i bxh = _mm256_extractf128_si256(bx, 1);
  2611. bxl = _mm_or_si128(bxl, bxhil);
  2612. bxh = _mm_or_si128(bxh, bxhih);
  2613. bx = MM256_SET_M128I(bxh, bxl);
  2614. const __m256 dy = _mm256_set1_ps(y[i].d);
  2615. const __m256i by = _mm256_loadu_si256((const __m256i *)y[i].qs);
  2616. const __m256 q = mul_sum_us8_pairs_float(bx, by);
  2617. acc = _mm256_add_ps(_mm256_mul_ps(q, _mm256_mul_ps(dx, dy)), acc);
  2618. }
  2619. *s = hsum_float_8(acc) + summs;
  2620. #else
  2621. // scalar
  2622. float sumf = 0.0;
  2623. for (int i = 0; i < nb; i++) {
  2624. uint32_t qh;
  2625. memcpy(&qh, x[i].qh, sizeof(qh));
  2626. int sumi = 0;
  2627. for (int j = 0; j < qk/2; ++j) {
  2628. const uint8_t xh_0 = ((qh >> (j + 0)) << 4) & 0x10;
  2629. const uint8_t xh_1 = ((qh >> (j + 12)) ) & 0x10;
  2630. const int32_t x0 = (x[i].qs[j] & 0xF) | xh_0;
  2631. const int32_t x1 = (x[i].qs[j] >> 4) | xh_1;
  2632. sumi += (x0 * y[i].qs[j]) + (x1 * y[i].qs[j + qk/2]);
  2633. }
  2634. sumf += (GGML_FP16_TO_FP32(x[i].d)*y[i].d)*sumi + GGML_FP16_TO_FP32(x[i].m)*y[i].s;
  2635. }
  2636. *s = sumf;
  2637. #endif
  2638. }
  2639. static void ggml_vec_dot_q8_0_q8_0(const int n, float * restrict s, const void * restrict vx, const void * restrict vy) {
  2640. const int qk = QK8_0;
  2641. const int nb = n / qk;
  2642. assert(n % qk == 0);
  2643. assert(nb % 2 == 0);
  2644. const block_q8_0 * restrict x = vx;
  2645. const block_q8_0 * restrict y = vy;
  2646. #if defined(__ARM_NEON)
  2647. float32x4_t sumv0 = vdupq_n_f32(0.0f);
  2648. float32x4_t sumv1 = vdupq_n_f32(0.0f);
  2649. for (int i = 0; i < nb; i += 2) {
  2650. const block_q8_0 * restrict x0 = &x[i + 0];
  2651. const block_q8_0 * restrict x1 = &x[i + 1];
  2652. const block_q8_0 * restrict y0 = &y[i + 0];
  2653. const block_q8_0 * restrict y1 = &y[i + 1];
  2654. const int8x16_t x0_0 = vld1q_s8(x0->qs);
  2655. const int8x16_t x0_1 = vld1q_s8(x0->qs + 16);
  2656. const int8x16_t x1_0 = vld1q_s8(x1->qs);
  2657. const int8x16_t x1_1 = vld1q_s8(x1->qs + 16);
  2658. // load y
  2659. const int8x16_t y0_0 = vld1q_s8(y0->qs);
  2660. const int8x16_t y0_1 = vld1q_s8(y0->qs + 16);
  2661. const int8x16_t y1_0 = vld1q_s8(y1->qs);
  2662. const int8x16_t y1_1 = vld1q_s8(y1->qs + 16);
  2663. #if defined(__ARM_FEATURE_DOTPROD)
  2664. sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(vaddq_s32(
  2665. vdotq_s32(vdupq_n_s32(0), x0_0, y0_0),
  2666. vdotq_s32(vdupq_n_s32(0), x0_1, y0_1))), GGML_FP16_TO_FP32(x0->d)*GGML_FP16_TO_FP32(y0->d));
  2667. sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(vaddq_s32(
  2668. vdotq_s32(vdupq_n_s32(0), x1_0, y1_0),
  2669. vdotq_s32(vdupq_n_s32(0), x1_1, y1_1))), GGML_FP16_TO_FP32(x1->d)*GGML_FP16_TO_FP32(y1->d));
  2670. #else
  2671. const int16x8_t p0_0 = vmull_s8(vget_low_s8 (x0_0), vget_low_s8 (y0_0));
  2672. const int16x8_t p0_1 = vmull_s8(vget_high_s8(x0_0), vget_high_s8(y0_0));
  2673. const int16x8_t p0_2 = vmull_s8(vget_low_s8 (x0_1), vget_low_s8 (y0_1));
  2674. const int16x8_t p0_3 = vmull_s8(vget_high_s8(x0_1), vget_high_s8(y0_1));
  2675. const int16x8_t p1_0 = vmull_s8(vget_low_s8 (x1_0), vget_low_s8 (y1_0));
  2676. const int16x8_t p1_1 = vmull_s8(vget_high_s8(x1_0), vget_high_s8(y1_0));
  2677. const int16x8_t p1_2 = vmull_s8(vget_low_s8 (x1_1), vget_low_s8 (y1_1));
  2678. const int16x8_t p1_3 = vmull_s8(vget_high_s8(x1_1), vget_high_s8(y1_1));
  2679. const int32x4_t p0 = vaddq_s32(vpaddlq_s16(p0_0), vpaddlq_s16(p0_1));
  2680. const int32x4_t p1 = vaddq_s32(vpaddlq_s16(p0_2), vpaddlq_s16(p0_3));
  2681. const int32x4_t p2 = vaddq_s32(vpaddlq_s16(p1_0), vpaddlq_s16(p1_1));
  2682. const int32x4_t p3 = vaddq_s32(vpaddlq_s16(p1_2), vpaddlq_s16(p1_3));
  2683. sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(vaddq_s32(p0, p1)), GGML_FP16_TO_FP32(x0->d)*GGML_FP16_TO_FP32(y0->d));
  2684. sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(vaddq_s32(p2, p3)), GGML_FP16_TO_FP32(x1->d)*GGML_FP16_TO_FP32(y1->d));
  2685. #endif
  2686. }
  2687. *s = vaddvq_f32(sumv0) + vaddvq_f32(sumv1);
  2688. #elif defined(__AVX2__) || defined(__AVX__)
  2689. // Initialize accumulator with zeros
  2690. __m256 acc = _mm256_setzero_ps();
  2691. // Main loop
  2692. for (int i = 0; i < nb; ++i) {
  2693. // Compute combined scale for the block
  2694. const __m256 d = _mm256_set1_ps(GGML_FP16_TO_FP32(x[i].d) * GGML_FP16_TO_FP32(y[i].d));
  2695. __m256i bx = _mm256_loadu_si256((const __m256i *)x[i].qs);
  2696. __m256i by = _mm256_loadu_si256((const __m256i *)y[i].qs);
  2697. const __m256 q = mul_sum_i8_pairs_float(bx, by);
  2698. // Multiply q with scale and accumulate
  2699. #if defined(__AVX2__)
  2700. acc = _mm256_fmadd_ps( d, q, acc );
  2701. #else
  2702. acc = _mm256_add_ps( _mm256_mul_ps( d, q ), acc );
  2703. #endif
  2704. }
  2705. *s = hsum_float_8(acc);
  2706. #else
  2707. // scalar
  2708. float sumf = 0.0;
  2709. for (int i = 0; i < nb; i++) {
  2710. int sumi = 0;
  2711. for (int j = 0; j < qk; j++) {
  2712. sumi += x[i].qs[j]*y[i].qs[j];
  2713. }
  2714. sumf += sumi*(GGML_FP16_TO_FP32(x[i].d)*GGML_FP16_TO_FP32(y[i].d));
  2715. }
  2716. *s = sumf;
  2717. #endif
  2718. }
  2719. // compute GGML_VEC_DOT_UNROLL dot products at once
  2720. // xs - x row stride in bytes
  2721. inline static void ggml_vec_dot_f16_unroll(const int n, const int xs, float * restrict s, void * restrict xv, ggml_fp16_t * restrict y) {
  2722. ggml_float sumf[GGML_VEC_DOT_UNROLL] = { 0.0 };
  2723. ggml_fp16_t * restrict x[GGML_VEC_DOT_UNROLL];
  2724. for (int i = 0; i < GGML_VEC_DOT_UNROLL; ++i) {
  2725. x[i] = (ggml_fp16_t *) ((char *) xv + i*xs);
  2726. }
  2727. #if defined(GGML_SIMD)
  2728. const int np = (n & ~(GGML_F16_STEP - 1));
  2729. GGML_F16_VEC sum[GGML_VEC_DOT_UNROLL][GGML_F16_ARR] = { { GGML_F16_VEC_ZERO } };
  2730. GGML_F16_VEC ax[GGML_F16_ARR];
  2731. GGML_F16_VEC ay[GGML_F16_ARR];
  2732. for (int i = 0; i < np; i += GGML_F16_STEP) {
  2733. for (int j = 0; j < GGML_F16_ARR; j++) {
  2734. ay[j] = GGML_F16_VEC_LOAD(y + i + j*GGML_F16_EPR, j);
  2735. for (int k = 0; k < GGML_VEC_DOT_UNROLL; ++k) {
  2736. ax[j] = GGML_F16_VEC_LOAD(x[k] + i + j*GGML_F16_EPR, j);
  2737. sum[k][j] = GGML_F16_VEC_FMA(sum[k][j], ax[j], ay[j]);
  2738. }
  2739. }
  2740. }
  2741. // reduce sum0..sum3 to sum0
  2742. for (int k = 0; k < GGML_VEC_DOT_UNROLL; ++k) {
  2743. GGML_F16_VEC_REDUCE(sumf[k], sum[k]);
  2744. }
  2745. // leftovers
  2746. for (int i = np; i < n; ++i) {
  2747. for (int j = 0; j < GGML_VEC_DOT_UNROLL; ++j) {
  2748. sumf[j] += (ggml_float)(GGML_FP16_TO_FP32(x[j][i])*GGML_FP16_TO_FP32(y[i]));
  2749. }
  2750. }
  2751. #else
  2752. for (int i = 0; i < n; ++i) {
  2753. for (int j = 0; j < GGML_VEC_DOT_UNROLL; ++j) {
  2754. sumf[j] += (ggml_float)(GGML_FP16_TO_FP32(x[j][i])*GGML_FP16_TO_FP32(y[i]));
  2755. }
  2756. }
  2757. #endif
  2758. for (int i = 0; i < GGML_VEC_DOT_UNROLL; ++i) {
  2759. s[i] = sumf[i];
  2760. }
  2761. }
  2762. inline static void ggml_vec_mad_f32(const int n, float * restrict y, const float * restrict x, const float v) {
  2763. #if defined(GGML_SIMD)
  2764. const int np = (n & ~(GGML_F32_STEP - 1));
  2765. GGML_F32_VEC vx = GGML_F32_VEC_SET1(v);
  2766. GGML_F32_VEC ax[GGML_F32_ARR];
  2767. GGML_F32_VEC ay[GGML_F32_ARR];
  2768. for (int i = 0; i < np; i += GGML_F32_STEP) {
  2769. for (int j = 0; j < GGML_F32_ARR; j++) {
  2770. ax[j] = GGML_F32_VEC_LOAD(x + i + j*GGML_F32_EPR);
  2771. ay[j] = GGML_F32_VEC_LOAD(y + i + j*GGML_F32_EPR);
  2772. ay[j] = GGML_F32_VEC_FMA(ay[j], ax[j], vx);
  2773. GGML_F32_VEC_STORE(y + i + j*GGML_F32_EPR, ay[j]);
  2774. }
  2775. }
  2776. // leftovers
  2777. for (int i = np; i < n; ++i) {
  2778. y[i] += x[i]*v;
  2779. }
  2780. #else
  2781. // scalar
  2782. for (int i = 0; i < n; ++i) {
  2783. y[i] += x[i]*v;
  2784. }
  2785. #endif
  2786. }
  2787. //inline static void ggml_vec_scale_f32(const int n, float * y, const float v) { for (int i = 0; i < n; ++i) y[i] *= v; }
  2788. inline static void ggml_vec_scale_f32(const int n, float * y, const float v) {
  2789. #if defined(GGML_USE_ACCELERATE)
  2790. vDSP_vsmul(y, 1, &v, y, 1, n);
  2791. #elif defined(GGML_SIMD)
  2792. const int np = (n & ~(GGML_F32_STEP - 1));
  2793. GGML_F32_VEC vx = GGML_F32_VEC_SET1(v);
  2794. GGML_F32_VEC ay[GGML_F32_ARR];
  2795. for (int i = 0; i < np; i += GGML_F32_STEP) {
  2796. for (int j = 0; j < GGML_F32_ARR; j++) {
  2797. ay[j] = GGML_F32_VEC_LOAD(y + i + j*GGML_F32_EPR);
  2798. ay[j] = GGML_F32_VEC_MUL(ay[j], vx);
  2799. GGML_F32_VEC_STORE(y + i + j*GGML_F32_EPR, ay[j]);
  2800. }
  2801. }
  2802. // leftovers
  2803. for (int i = np; i < n; ++i) {
  2804. y[i] *= v;
  2805. }
  2806. #else
  2807. // scalar
  2808. for (int i = 0; i < n; ++i) {
  2809. y[i] *= v;
  2810. }
  2811. #endif
  2812. }
  2813. inline static void ggml_vec_norm_f32 (const int n, float * s, const float * x) { ggml_vec_dot_f32(n, s, x, x); *s = sqrtf(*s); }
  2814. inline static void ggml_vec_sqr_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = x[i]*x[i]; }
  2815. inline static void ggml_vec_sqrt_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = sqrtf(x[i]); }
  2816. inline static void ggml_vec_log_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = logf(x[i]); }
  2817. inline static void ggml_vec_abs_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = fabsf(x[i]); }
  2818. inline static void ggml_vec_sgn_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = (x[i] > 0.f) ? 1.f : ((x[i] < 0.f) ? -1.f : 0.f); }
  2819. inline static void ggml_vec_step_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = (x[i] > 0.f) ? 1.f : 0.f; }
  2820. inline static void ggml_vec_tanh_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = tanhf(x[i]); }
  2821. inline static void ggml_vec_elu_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = (x[i] > 0.f) ? x[i] : expf(x[i])-1; }
  2822. inline static void ggml_vec_relu_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = (x[i] > 0.f) ? x[i] : 0.f; }
  2823. static const float GELU_COEF_A = 0.044715f;
  2824. static const float GELU_QUICK_COEF = -1.702f;
  2825. static const float SQRT_2_OVER_PI = 0.79788456080286535587989211986876f;
  2826. inline static float ggml_gelu_f32(float x) {
  2827. return 0.5f*x*(1.0f + tanhf(SQRT_2_OVER_PI*x*(1.0f + GELU_COEF_A*x*x)));
  2828. }
  2829. inline static void ggml_vec_gelu_f16(const int n, ggml_fp16_t * y, const ggml_fp16_t * x) {
  2830. const uint16_t * i16 = (const uint16_t *) x;
  2831. for (int i = 0; i < n; ++i) {
  2832. y[i] = table_gelu_f16[i16[i]];
  2833. }
  2834. }
  2835. #ifdef GGML_GELU_FP16
  2836. inline static void ggml_vec_gelu_f32(const int n, float * y, const float * x) {
  2837. uint16_t t;
  2838. for (int i = 0; i < n; ++i) {
  2839. ggml_fp16_t fp16 = GGML_FP32_TO_FP16(x[i]);
  2840. memcpy(&t, &fp16, sizeof(uint16_t));
  2841. y[i] = GGML_FP16_TO_FP32(table_gelu_f16[t]);
  2842. }
  2843. }
  2844. #else
  2845. inline static void ggml_vec_gelu_f32(const int n, float * y, const float * x) {
  2846. for (int i = 0; i < n; ++i) {
  2847. y[i] = ggml_gelu_f32(x[i]);
  2848. }
  2849. }
  2850. #endif
  2851. inline static float ggml_gelu_quick_f32(float x) {
  2852. return x*(1.0f/(1.0f+expf(GELU_QUICK_COEF*x)));
  2853. }
  2854. //inline static void ggml_vec_gelu_quick_f16(const int n, ggml_fp16_t * y, const ggml_fp16_t * x) {
  2855. // const uint16_t * i16 = (const uint16_t *) x;
  2856. // for (int i = 0; i < n; ++i) {
  2857. // y[i] = table_gelu_quick_f16[i16[i]];
  2858. // }
  2859. //}
  2860. #ifdef GGML_GELU_QUICK_FP16
  2861. inline static void ggml_vec_gelu_quick_f32(const int n, float * y, const float * x) {
  2862. uint16_t t;
  2863. for (int i = 0; i < n; ++i) {
  2864. ggml_fp16_t fp16 = GGML_FP32_TO_FP16(x[i]);
  2865. memcpy(&t, &fp16, sizeof(uint16_t));
  2866. y[i] = GGML_FP16_TO_FP32(table_gelu_quick_f16[t]);
  2867. }
  2868. }
  2869. #else
  2870. inline static void ggml_vec_gelu_quick_f32(const int n, float * y, const float * x) {
  2871. for (int i = 0; i < n; ++i) {
  2872. y[i] = ggml_gelu_quick_f32(x[i]);
  2873. }
  2874. }
  2875. #endif
  2876. // Sigmoid Linear Unit (SiLU) function
  2877. inline static float ggml_silu_f32(float x) {
  2878. return x/(1.0f + expf(-x));
  2879. }
  2880. //inline static void ggml_vec_silu_f16(const int n, ggml_fp16_t * y, const ggml_fp16_t * x) {
  2881. // const uint16_t * i16 = (const uint16_t *) x;
  2882. // for (int i = 0; i < n; ++i) {
  2883. // y[i] = table_silu_f16[i16[i]];
  2884. // }
  2885. //}
  2886. #ifdef GGML_SILU_FP16
  2887. inline static void ggml_vec_silu_f32(const int n, float * y, const float * x) {
  2888. uint16_t t;
  2889. for (int i = 0; i < n; ++i) {
  2890. ggml_fp16_t fp16 = GGML_FP32_TO_FP16(x[i]);
  2891. memcpy(&t, &fp16, sizeof(uint16_t));
  2892. y[i] = GGML_FP16_TO_FP32(table_silu_f16[t]);
  2893. }
  2894. }
  2895. #else
  2896. inline static void ggml_vec_silu_f32(const int n, float * y, const float * x) {
  2897. for (int i = 0; i < n; ++i) {
  2898. y[i] = ggml_silu_f32(x[i]);
  2899. }
  2900. }
  2901. #endif
  2902. inline static float ggml_silu_backward_f32(float x, float dy) {
  2903. const float s = 1.0f/(1.0f + expf(-x));
  2904. return dy*s*(1.0f + x*(1.0f - s));
  2905. }
  2906. #ifdef GGML_SILU_FP16
  2907. inline static void ggml_vec_silu_backward_f32(const int n, float * dx, const float * x, const float * dy) {
  2908. for (int i = 0; i < n; ++i) {
  2909. // we did not use x[i] to compute forward silu but its f16 equivalent
  2910. // take derivative at f16 of x[i]:
  2911. ggml_fp16_t fp16 = GGML_FP32_TO_FP16(x[i]);
  2912. float usedx = GGML_FP16_TO_FP32(fp16);
  2913. dx[i] = ggml_silu_backward_f32(usedx, dy[i]);
  2914. }
  2915. }
  2916. #else
  2917. inline static void ggml_vec_silu_backward_f32(const int n, float * dx, const float * x, const float * dy) {
  2918. for (int i = 0; i < n; ++i) {
  2919. dx[i] = ggml_silu_backward_f32(x[i], dy[i]);
  2920. }
  2921. }
  2922. #endif
  2923. inline static void ggml_vec_sum_f32(const int n, float * s, const float * x) {
  2924. #ifndef GGML_USE_ACCELERATE
  2925. ggml_float sum = 0.0;
  2926. for (int i = 0; i < n; ++i) {
  2927. sum += (ggml_float)x[i];
  2928. }
  2929. *s = sum;
  2930. #else
  2931. vDSP_sve(x, 1, s, n);
  2932. #endif
  2933. }
  2934. inline static void ggml_vec_sum_f32_ggf(const int n, ggml_float * s, const float * x) {
  2935. ggml_float sum = 0.0;
  2936. for (int i = 0; i < n; ++i) {
  2937. sum += (ggml_float)x[i];
  2938. }
  2939. *s = sum;
  2940. }
  2941. inline static void ggml_vec_sum_f16_ggf(const int n, float * s, const ggml_fp16_t * x) {
  2942. float sum = 0.0f;
  2943. for (int i = 0; i < n; ++i) {
  2944. sum += GGML_FP16_TO_FP32(x[i]);
  2945. }
  2946. *s = sum;
  2947. }
  2948. inline static void ggml_vec_max_f32(const int n, float * s, const float * x) {
  2949. #ifndef GGML_USE_ACCELERATE
  2950. float max = -INFINITY;
  2951. for (int i = 0; i < n; ++i) {
  2952. max = MAX(max, x[i]);
  2953. }
  2954. *s = max;
  2955. #else
  2956. vDSP_maxv(x, 1, s, n);
  2957. #endif
  2958. }
  2959. inline static void ggml_vec_norm_inv_f32(const int n, float * s, const float * x) {
  2960. ggml_vec_norm_f32(n, s, x);
  2961. *s = 1.f/(*s);
  2962. }
  2963. inline static void ggml_vec_argmax_f32(const int n, int * s, const float * x) {
  2964. float max = -INFINITY;
  2965. int idx = 0;
  2966. for (int i = 0; i < n; ++i) {
  2967. max = MAX(max, x[i]);
  2968. if (max == x[i]) { idx = i; }
  2969. }
  2970. *s = idx;
  2971. }
  2972. //
  2973. // data types
  2974. //
  2975. static const int GGML_BLCK_SIZE[GGML_TYPE_COUNT] = {
  2976. [GGML_TYPE_F32] = 1,
  2977. [GGML_TYPE_F16] = 1,
  2978. [GGML_TYPE_Q4_0] = QK4_0,
  2979. [GGML_TYPE_Q4_1] = QK4_1,
  2980. [GGML_TYPE_Q5_0] = QK5_0,
  2981. [GGML_TYPE_Q5_1] = QK5_1,
  2982. [GGML_TYPE_Q8_0] = QK8_0,
  2983. [GGML_TYPE_Q8_1] = QK8_1,
  2984. #ifdef GGML_USE_K_QUANTS
  2985. [GGML_TYPE_Q2_K] = QK_K,
  2986. [GGML_TYPE_Q3_K] = QK_K,
  2987. [GGML_TYPE_Q4_K] = QK_K,
  2988. [GGML_TYPE_Q5_K] = QK_K,
  2989. [GGML_TYPE_Q6_K] = QK_K,
  2990. [GGML_TYPE_Q8_K] = QK_K,
  2991. #endif
  2992. [GGML_TYPE_I8] = 1,
  2993. [GGML_TYPE_I16] = 1,
  2994. [GGML_TYPE_I32] = 1,
  2995. };
  2996. static_assert(GGML_TYPE_COUNT == 19, "GGML_BLCK_SIZE is outdated");
  2997. static const size_t GGML_TYPE_SIZE[GGML_TYPE_COUNT] = {
  2998. [GGML_TYPE_F32] = sizeof(float),
  2999. [GGML_TYPE_F16] = sizeof(ggml_fp16_t),
  3000. [GGML_TYPE_Q4_0] = sizeof(block_q4_0),
  3001. [GGML_TYPE_Q4_1] = sizeof(block_q4_1),
  3002. [GGML_TYPE_Q5_0] = sizeof(block_q5_0),
  3003. [GGML_TYPE_Q5_1] = sizeof(block_q5_1),
  3004. [GGML_TYPE_Q8_0] = sizeof(block_q8_0),
  3005. [GGML_TYPE_Q8_1] = sizeof(block_q8_1),
  3006. #ifdef GGML_USE_K_QUANTS
  3007. [GGML_TYPE_Q2_K] = sizeof(block_q2_K),
  3008. [GGML_TYPE_Q3_K] = sizeof(block_q3_K),
  3009. [GGML_TYPE_Q4_K] = sizeof(block_q4_K),
  3010. [GGML_TYPE_Q5_K] = sizeof(block_q5_K),
  3011. [GGML_TYPE_Q6_K] = sizeof(block_q6_K),
  3012. [GGML_TYPE_Q8_K] = sizeof(block_q8_K),
  3013. #endif
  3014. [GGML_TYPE_I8] = sizeof(int8_t),
  3015. [GGML_TYPE_I16] = sizeof(int16_t),
  3016. [GGML_TYPE_I32] = sizeof(int32_t),
  3017. };
  3018. static_assert(GGML_TYPE_COUNT == 19, "GGML_TYPE_SIZE is outdated");
  3019. static const char * GGML_TYPE_NAME[GGML_TYPE_COUNT] = {
  3020. [GGML_TYPE_F32] = "f32",
  3021. [GGML_TYPE_F16] = "f16",
  3022. [GGML_TYPE_Q4_0] = "q4_0",
  3023. [GGML_TYPE_Q4_1] = "q4_1",
  3024. [GGML_TYPE_Q5_0] = "q5_0",
  3025. [GGML_TYPE_Q5_1] = "q5_1",
  3026. [GGML_TYPE_Q8_0] = "q8_0",
  3027. [GGML_TYPE_Q8_1] = "q8_1",
  3028. [GGML_TYPE_Q2_K] = "q2_K",
  3029. [GGML_TYPE_Q3_K] = "q3_K",
  3030. [GGML_TYPE_Q4_K] = "q4_K",
  3031. [GGML_TYPE_Q5_K] = "q5_K",
  3032. [GGML_TYPE_Q6_K] = "q6_K",
  3033. [GGML_TYPE_Q8_K] = "q8_K",
  3034. [GGML_TYPE_I8] = "i8",
  3035. [GGML_TYPE_I16] = "i16",
  3036. [GGML_TYPE_I32] = "i32",
  3037. };
  3038. static_assert(GGML_TYPE_COUNT == 19, "GGML_TYPE_NAME is outdated");
  3039. static bool GGML_IS_QUANTIZED[GGML_TYPE_COUNT] = {
  3040. [GGML_TYPE_F32] = false,
  3041. [GGML_TYPE_F16] = false,
  3042. [GGML_TYPE_Q4_0] = true,
  3043. [GGML_TYPE_Q4_1] = true,
  3044. [GGML_TYPE_Q5_0] = true,
  3045. [GGML_TYPE_Q5_1] = true,
  3046. [GGML_TYPE_Q8_0] = true,
  3047. [GGML_TYPE_Q8_1] = true,
  3048. [GGML_TYPE_Q2_K] = true,
  3049. [GGML_TYPE_Q3_K] = true,
  3050. [GGML_TYPE_Q4_K] = true,
  3051. [GGML_TYPE_Q5_K] = true,
  3052. [GGML_TYPE_Q6_K] = true,
  3053. [GGML_TYPE_Q8_K] = true,
  3054. [GGML_TYPE_I8] = false,
  3055. [GGML_TYPE_I16] = false,
  3056. [GGML_TYPE_I32] = false,
  3057. };
  3058. static_assert(GGML_TYPE_COUNT == 19, "GGML_IS_QUANTIZED is outdated");
  3059. static const char * GGML_OP_NAME[GGML_OP_COUNT] = {
  3060. "NONE",
  3061. "DUP",
  3062. "ADD",
  3063. "ADD1",
  3064. "ACC",
  3065. "SUB",
  3066. "MUL",
  3067. "DIV",
  3068. "SQR",
  3069. "SQRT",
  3070. "LOG",
  3071. "SUM",
  3072. "SUM_ROWS",
  3073. "MEAN",
  3074. "ARGMAX",
  3075. "REPEAT",
  3076. "REPEAT_BACK",
  3077. "SILU_BACK",
  3078. "NORM",
  3079. "RMS_NORM",
  3080. "RMS_NORM_BACK",
  3081. "MUL_MAT",
  3082. "OUT_PROD",
  3083. "SCALE",
  3084. "SET",
  3085. "CPY",
  3086. "CONT",
  3087. "RESHAPE",
  3088. "VIEW",
  3089. "PERMUTE",
  3090. "TRANSPOSE",
  3091. "GET_ROWS",
  3092. "GET_ROWS_BACK",
  3093. "DIAG",
  3094. "DIAG_MASK_INF",
  3095. "DIAG_MASK_ZERO",
  3096. "SOFT_MAX",
  3097. "SOFT_MAX_BACK",
  3098. "ROPE",
  3099. "ROPE_BACK",
  3100. "ALIBI",
  3101. "CLAMP",
  3102. "CONV_1D",
  3103. "CONV_2D",
  3104. "POOL_1D",
  3105. "POOL_2D",
  3106. "FLASH_ATTN",
  3107. "FLASH_FF",
  3108. "FLASH_ATTN_BACK",
  3109. "WIN_PART",
  3110. "WIN_UNPART",
  3111. "UNARY",
  3112. "MAP_UNARY",
  3113. "MAP_BINARY",
  3114. "MAP_CUSTOM1",
  3115. "MAP_CUSTOM2",
  3116. "MAP_CUSTOM3",
  3117. "CROSS_ENTROPY_LOSS",
  3118. "CROSS_ENTROPY_LOSS_BACK",
  3119. };
  3120. static_assert(GGML_OP_COUNT == 59, "GGML_OP_COUNT != 59");
  3121. static const char * GGML_OP_SYMBOL[GGML_OP_COUNT] = {
  3122. "none",
  3123. "x",
  3124. "x+y",
  3125. "x+y",
  3126. "view(x,nb,offset)+=y->x",
  3127. "x-y",
  3128. "x*y",
  3129. "x/y",
  3130. "x^2",
  3131. "√x",
  3132. "log(x)",
  3133. "Σx",
  3134. "Σx_k",
  3135. "Σx/n",
  3136. "argmax(x)",
  3137. "repeat(x)",
  3138. "repeat_back(x)",
  3139. "silu_back(x)",
  3140. "norm(x)",
  3141. "rms_norm(x)",
  3142. "rms_norm_back(x)",
  3143. "X*Y",
  3144. "X*Y",
  3145. "x*v",
  3146. "y-\\>view(x)",
  3147. "x-\\>y",
  3148. "cont(x)",
  3149. "reshape(x)",
  3150. "view(x)",
  3151. "permute(x)",
  3152. "transpose(x)",
  3153. "get_rows(x)",
  3154. "get_rows_back(x)",
  3155. "diag(x)",
  3156. "diag_mask_inf(x)",
  3157. "diag_mask_zero(x)",
  3158. "soft_max(x)",
  3159. "soft_max_back(x)",
  3160. "rope(x)",
  3161. "rope_back(x)",
  3162. "alibi(x)",
  3163. "clamp(x)",
  3164. "conv_1d(x)",
  3165. "conv_2d(x)",
  3166. "pool_1d(x)",
  3167. "pool_2d(x)",
  3168. "flash_attn(x)",
  3169. "flash_ff(x)",
  3170. "flash_attn_back(x)",
  3171. "win_part(x)",
  3172. "win_unpart(x)",
  3173. "unary(x)",
  3174. "f(x)",
  3175. "f(x,y)",
  3176. "custom(x)",
  3177. "custom(x,y)",
  3178. "custom(x,y,z)",
  3179. "cross_entropy_loss(x,y)",
  3180. "cross_entropy_loss_back(x,y)",
  3181. };
  3182. static_assert(GGML_OP_COUNT == 59, "GGML_OP_COUNT != 59");
  3183. static_assert(GGML_OP_POOL_COUNT == 2, "GGML_OP_POOL_COUNT != 2");
  3184. static_assert(sizeof(struct ggml_object)%GGML_MEM_ALIGN == 0, "ggml_object size must be a multiple of GGML_MEM_ALIGN");
  3185. static_assert(sizeof(struct ggml_tensor)%GGML_MEM_ALIGN == 0, "ggml_tensor size must be a multiple of GGML_MEM_ALIGN");
  3186. // WARN:
  3187. // Mis-confguration can lead to problem that's hard to reason about:
  3188. // * At best it crash or talks nosense.
  3189. // * At worst it talks slightly difference but hard to perceive.
  3190. //
  3191. // An op has to enable INIT or FINALIZE when any of it's branch needs that pass.
  3192. // Take care about compile options (e.g., GGML_USE_xxx).
  3193. static bool GGML_OP_HAS_INIT [GGML_OP_COUNT] = { 0 };
  3194. static bool GGML_OP_HAS_FINALIZE[GGML_OP_COUNT] = { 0 };
  3195. static void ggml_setup_op_has_task_pass(void) {
  3196. { // INIT
  3197. bool * p = GGML_OP_HAS_INIT;
  3198. p[GGML_OP_ACC ] = true;
  3199. p[GGML_OP_MUL_MAT ] = true;
  3200. p[GGML_OP_OUT_PROD ] = true;
  3201. p[GGML_OP_SET ] = true;
  3202. p[GGML_OP_GET_ROWS_BACK ] = true;
  3203. p[GGML_OP_DIAG_MASK_INF ] = true;
  3204. p[GGML_OP_DIAG_MASK_ZERO ] = true;
  3205. p[GGML_OP_CONV_1D ] = true;
  3206. p[GGML_OP_CONV_2D ] = true;
  3207. p[GGML_OP_FLASH_ATTN_BACK ] = true;
  3208. p[GGML_OP_CROSS_ENTROPY_LOSS ] = true;
  3209. }
  3210. { // FINALIZE
  3211. bool * p = GGML_OP_HAS_FINALIZE;
  3212. p[GGML_OP_CROSS_ENTROPY_LOSS ] = true;
  3213. }
  3214. }
  3215. //
  3216. // ggml context
  3217. //
  3218. struct ggml_context {
  3219. size_t mem_size;
  3220. void * mem_buffer;
  3221. bool mem_buffer_owned;
  3222. bool no_alloc;
  3223. bool no_alloc_save; // this is used to save the no_alloc state when using scratch buffers
  3224. int n_objects;
  3225. struct ggml_object * objects_begin;
  3226. struct ggml_object * objects_end;
  3227. struct ggml_scratch scratch;
  3228. struct ggml_scratch scratch_save;
  3229. };
  3230. struct ggml_context_container {
  3231. bool used;
  3232. struct ggml_context context;
  3233. };
  3234. //
  3235. // NUMA support
  3236. //
  3237. #define GGML_NUMA_MAX_NODES 8
  3238. #define GGML_NUMA_MAX_CPUS 512
  3239. struct ggml_numa_node {
  3240. uint32_t cpus[GGML_NUMA_MAX_CPUS]; // hardware threads on this node
  3241. uint32_t n_cpus;
  3242. };
  3243. struct ggml_numa_nodes {
  3244. struct ggml_numa_node nodes[GGML_NUMA_MAX_NODES];
  3245. uint32_t n_nodes;
  3246. uint32_t total_cpus; // hardware threads on system
  3247. };
  3248. //
  3249. // ggml state
  3250. //
  3251. struct ggml_state {
  3252. struct ggml_context_container contexts[GGML_MAX_CONTEXTS];
  3253. struct ggml_numa_nodes numa;
  3254. };
  3255. // global state
  3256. static struct ggml_state g_state;
  3257. static atomic_int g_state_barrier = 0;
  3258. // barrier via spin lock
  3259. inline static void ggml_critical_section_start(void) {
  3260. int processing = atomic_fetch_add(&g_state_barrier, 1);
  3261. while (processing > 0) {
  3262. // wait for other threads to finish
  3263. atomic_fetch_sub(&g_state_barrier, 1);
  3264. sched_yield(); // TODO: reconsider this
  3265. processing = atomic_fetch_add(&g_state_barrier, 1);
  3266. }
  3267. }
  3268. // TODO: make this somehow automatically executed
  3269. // some sort of "sentry" mechanism
  3270. inline static void ggml_critical_section_end(void) {
  3271. atomic_fetch_sub(&g_state_barrier, 1);
  3272. }
  3273. void ggml_numa_init(void) {
  3274. if (g_state.numa.n_nodes > 0) {
  3275. fprintf(stderr, "ggml_numa_init: NUMA already initialized\n");
  3276. return;
  3277. }
  3278. #ifdef __linux__
  3279. struct stat st;
  3280. char path[256];
  3281. int rv;
  3282. // enumerate nodes
  3283. while (g_state.numa.n_nodes < GGML_NUMA_MAX_NODES) {
  3284. rv = snprintf(path, sizeof(path), "/sys/devices/system/node/node%u", g_state.numa.n_nodes);
  3285. GGML_ASSERT(rv > 0 && (unsigned)rv < sizeof(path));
  3286. if (stat(path, &st) != 0) { break; }
  3287. ++g_state.numa.n_nodes;
  3288. }
  3289. // enumerate CPUs
  3290. while (g_state.numa.total_cpus < GGML_NUMA_MAX_CPUS) {
  3291. rv = snprintf(path, sizeof(path), "/sys/devices/system/cpu/cpu%u", g_state.numa.total_cpus);
  3292. GGML_ASSERT(rv > 0 && (unsigned)rv < sizeof(path));
  3293. if (stat(path, &st) != 0) { break; }
  3294. ++g_state.numa.total_cpus;
  3295. }
  3296. GGML_PRINT_DEBUG("found %u numa nodes, %u CPUs\n", g_state.numa.n_nodes, g_state.numa.total_cpus);
  3297. if (g_state.numa.n_nodes < 1 || g_state.numa.total_cpus < 1) {
  3298. g_state.numa.n_nodes = 0;
  3299. return;
  3300. }
  3301. for (uint32_t n = 0; n < g_state.numa.n_nodes; ++n) {
  3302. struct ggml_numa_node * node = &g_state.numa.nodes[n];
  3303. GGML_PRINT_DEBUG("CPUs on node %u:", n);
  3304. node->n_cpus = 0;
  3305. for (uint32_t c = 0; c < g_state.numa.total_cpus; ++c) {
  3306. rv = snprintf(path, sizeof(path), "/sys/devices/system/node/node%u/cpu%u", n, c);
  3307. GGML_ASSERT(rv > 0 && (unsigned)rv < sizeof(path));
  3308. if (stat(path, &st) == 0) {
  3309. node->cpus[node->n_cpus++] = c;
  3310. GGML_PRINT_DEBUG(" %u", c);
  3311. }
  3312. }
  3313. GGML_PRINT_DEBUG("\n");
  3314. }
  3315. if (ggml_is_numa()) {
  3316. FILE *fptr = fopen("/proc/sys/kernel/numa_balancing", "r");
  3317. if (fptr != NULL) {
  3318. char buf[42];
  3319. if (fgets(buf, sizeof(buf), fptr) && strncmp(buf, "0\n", sizeof(buf)) != 0) {
  3320. GGML_PRINT("WARNING: /proc/sys/kernel/numa_balancing is enabled, this has been observed to impair performance\n");
  3321. }
  3322. fclose(fptr);
  3323. }
  3324. }
  3325. #else
  3326. // TODO
  3327. #endif
  3328. }
  3329. bool ggml_is_numa(void) {
  3330. return g_state.numa.n_nodes > 1;
  3331. }
  3332. ////////////////////////////////////////////////////////////////////////////////
  3333. void ggml_print_object(const struct ggml_object * obj) {
  3334. GGML_PRINT(" - ggml_object: offset = %zu, size = %zu, next = %p\n",
  3335. obj->offs, obj->size, (const void *) obj->next);
  3336. }
  3337. void ggml_print_objects(const struct ggml_context * ctx) {
  3338. struct ggml_object * obj = ctx->objects_begin;
  3339. GGML_PRINT("%s: objects in context %p:\n", __func__, (const void *) ctx);
  3340. while (obj != NULL) {
  3341. ggml_print_object(obj);
  3342. obj = obj->next;
  3343. }
  3344. GGML_PRINT("%s: --- end ---\n", __func__);
  3345. }
  3346. int64_t ggml_nelements(const struct ggml_tensor * tensor) {
  3347. static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function");
  3348. return tensor->ne[0]*tensor->ne[1]*tensor->ne[2]*tensor->ne[3];
  3349. }
  3350. int64_t ggml_nrows(const struct ggml_tensor * tensor) {
  3351. static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function");
  3352. return tensor->ne[1]*tensor->ne[2]*tensor->ne[3];
  3353. }
  3354. size_t ggml_nbytes(const struct ggml_tensor * tensor) {
  3355. static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function");
  3356. // this should handle cases where the tensor is not contiguous in memory
  3357. // probaby just:
  3358. //
  3359. // return tensor->ne[3]*tensor->nb[3]
  3360. //
  3361. // is enough, but just in case, adding the second part
  3362. return MAX(tensor->ne[3]*tensor->nb[3], (ggml_nelements(tensor)*GGML_TYPE_SIZE[tensor->type])/GGML_BLCK_SIZE[tensor->type]);
  3363. }
  3364. size_t ggml_nbytes_split(const struct ggml_tensor * tensor, int nrows_split) {
  3365. static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function");
  3366. return (nrows_split*tensor->ne[0]*GGML_TYPE_SIZE[tensor->type])/GGML_BLCK_SIZE[tensor->type];
  3367. }
  3368. int ggml_blck_size(enum ggml_type type) {
  3369. return GGML_BLCK_SIZE[type];
  3370. }
  3371. size_t ggml_type_size(enum ggml_type type) {
  3372. return GGML_TYPE_SIZE[type];
  3373. }
  3374. float ggml_type_sizef(enum ggml_type type) {
  3375. return ((float)(GGML_TYPE_SIZE[type]))/GGML_BLCK_SIZE[type];
  3376. }
  3377. const char * ggml_type_name(enum ggml_type type) {
  3378. return GGML_TYPE_NAME[type];
  3379. }
  3380. const char * ggml_op_name(enum ggml_op op) {
  3381. return GGML_OP_NAME[op];
  3382. }
  3383. const char * ggml_op_symbol(enum ggml_op op) {
  3384. return GGML_OP_SYMBOL[op];
  3385. }
  3386. size_t ggml_element_size(const struct ggml_tensor * tensor) {
  3387. return GGML_TYPE_SIZE[tensor->type];
  3388. }
  3389. static inline bool ggml_is_scalar(const struct ggml_tensor * tensor) {
  3390. static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function");
  3391. return tensor->ne[0] == 1 && tensor->ne[1] == 1 && tensor->ne[2] == 1 && tensor->ne[3] == 1;
  3392. }
  3393. static inline bool ggml_is_vector(const struct ggml_tensor * tensor) {
  3394. static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function");
  3395. return tensor->ne[1] == 1 && tensor->ne[2] == 1 && tensor->ne[3] == 1;
  3396. }
  3397. static inline bool ggml_is_matrix(const struct ggml_tensor * tensor) {
  3398. static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function");
  3399. return tensor->ne[2] == 1 && tensor->ne[3] == 1;
  3400. }
  3401. static inline bool ggml_can_mul_mat(const struct ggml_tensor * t0, const struct ggml_tensor * t1) {
  3402. static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function");
  3403. return (t0->ne[0] == t1->ne[0]) &&
  3404. (t1->ne[2]%t0->ne[2] == 0) && // verify t0 is broadcastable
  3405. (t1->ne[3]%t0->ne[3] == 0);
  3406. }
  3407. static inline bool ggml_can_out_prod(const struct ggml_tensor * t0, const struct ggml_tensor * t1) {
  3408. static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function");
  3409. return
  3410. (t0->ne[1] == t1->ne[1]) &&
  3411. (t0->ne[2] == t1->ne[2]) &&
  3412. (t0->ne[3] == t1->ne[3]);
  3413. }
  3414. bool ggml_is_quantized(enum ggml_type type) {
  3415. return GGML_IS_QUANTIZED[type];
  3416. }
  3417. enum ggml_type ggml_ftype_to_ggml_type(enum ggml_ftype ftype) {
  3418. enum ggml_type wtype = GGML_TYPE_COUNT;
  3419. switch (ftype) {
  3420. case GGML_FTYPE_ALL_F32: wtype = GGML_TYPE_F32; break;
  3421. case GGML_FTYPE_MOSTLY_F16: wtype = GGML_TYPE_F16; break;
  3422. case GGML_FTYPE_MOSTLY_Q4_0: wtype = GGML_TYPE_Q4_0; break;
  3423. case GGML_FTYPE_MOSTLY_Q4_1: wtype = GGML_TYPE_Q4_1; break;
  3424. case GGML_FTYPE_MOSTLY_Q5_0: wtype = GGML_TYPE_Q5_0; break;
  3425. case GGML_FTYPE_MOSTLY_Q5_1: wtype = GGML_TYPE_Q5_1; break;
  3426. case GGML_FTYPE_MOSTLY_Q8_0: wtype = GGML_TYPE_Q8_0; break;
  3427. case GGML_FTYPE_MOSTLY_Q2_K: wtype = GGML_TYPE_Q2_K; break;
  3428. case GGML_FTYPE_MOSTLY_Q3_K: wtype = GGML_TYPE_Q3_K; break;
  3429. case GGML_FTYPE_MOSTLY_Q4_K: wtype = GGML_TYPE_Q4_K; break;
  3430. case GGML_FTYPE_MOSTLY_Q5_K: wtype = GGML_TYPE_Q5_K; break;
  3431. case GGML_FTYPE_MOSTLY_Q6_K: wtype = GGML_TYPE_Q6_K; break;
  3432. case GGML_FTYPE_UNKNOWN: wtype = GGML_TYPE_COUNT; break;
  3433. case GGML_FTYPE_MOSTLY_Q4_1_SOME_F16: wtype = GGML_TYPE_COUNT; break;
  3434. }
  3435. GGML_ASSERT(wtype != GGML_TYPE_COUNT);
  3436. return wtype;
  3437. }
  3438. size_t ggml_tensor_overhead(void) {
  3439. return GGML_OBJECT_SIZE + GGML_TENSOR_SIZE + 16;
  3440. }
  3441. bool ggml_is_transposed(const struct ggml_tensor * tensor) {
  3442. return tensor->nb[0] > tensor->nb[1];
  3443. }
  3444. bool ggml_is_contiguous(const struct ggml_tensor * tensor) {
  3445. static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function");
  3446. return
  3447. tensor->nb[0] == GGML_TYPE_SIZE[tensor->type] &&
  3448. tensor->nb[1] == (tensor->nb[0]*tensor->ne[0])/GGML_BLCK_SIZE[tensor->type] &&
  3449. tensor->nb[2] == tensor->nb[1]*tensor->ne[1] &&
  3450. tensor->nb[3] == tensor->nb[2]*tensor->ne[2];
  3451. }
  3452. static inline bool ggml_is_contiguous_except_dim_1(const struct ggml_tensor * tensor) {
  3453. static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function");
  3454. return
  3455. tensor->nb[0] == GGML_TYPE_SIZE[tensor->type] &&
  3456. tensor->nb[2] == tensor->nb[1]*tensor->ne[1] &&
  3457. tensor->nb[3] == tensor->nb[2]*tensor->ne[2];
  3458. }
  3459. bool ggml_is_permuted(const struct ggml_tensor * tensor) {
  3460. static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function");
  3461. return tensor->nb[0] > tensor->nb[1] || tensor->nb[1] > tensor->nb[2] || tensor->nb[2] > tensor->nb[3];
  3462. }
  3463. static inline bool ggml_is_padded_1d(const struct ggml_tensor * tensor) {
  3464. static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function");
  3465. return
  3466. tensor->nb[0] == GGML_TYPE_SIZE[tensor->type] &&
  3467. tensor->nb[2] == tensor->nb[1]*tensor->ne[1] &&
  3468. tensor->nb[3] == tensor->nb[2]*tensor->ne[2];
  3469. }
  3470. static inline bool ggml_are_same_shape(const struct ggml_tensor * t0, const struct ggml_tensor * t1) {
  3471. static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function");
  3472. return
  3473. (t0->ne[0] == t1->ne[0] ) &&
  3474. (t0->ne[1] == t1->ne[1] ) &&
  3475. (t0->ne[2] == t1->ne[2] ) &&
  3476. (t0->ne[3] == t1->ne[3] );
  3477. }
  3478. // check if t1 can be represented as a repeatition of t0
  3479. static inline bool ggml_can_repeat(const struct ggml_tensor * t0, const struct ggml_tensor * t1) {
  3480. static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function");
  3481. return
  3482. (t1->ne[0]%t0->ne[0] == 0) &&
  3483. (t1->ne[1]%t0->ne[1] == 0) &&
  3484. (t1->ne[2]%t0->ne[2] == 0) &&
  3485. (t1->ne[3]%t0->ne[3] == 0);
  3486. }
  3487. static inline bool ggml_can_repeat_rows(const struct ggml_tensor * t0, const struct ggml_tensor * t1) {
  3488. static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function");
  3489. return (t0->ne[0] == t1->ne[0]) && ggml_can_repeat(t0, t1);
  3490. }
  3491. static inline int ggml_up32(int n) {
  3492. return (n + 31) & ~31;
  3493. }
  3494. //static inline int ggml_up64(int n) {
  3495. // return (n + 63) & ~63;
  3496. //}
  3497. static inline int ggml_up(int n, int m) {
  3498. // assert m is a power of 2
  3499. GGML_ASSERT((m & (m - 1)) == 0);
  3500. return (n + m - 1) & ~(m - 1);
  3501. }
  3502. // assert that pointer is aligned to GGML_MEM_ALIGN
  3503. #define ggml_assert_aligned(ptr) \
  3504. GGML_ASSERT(((uintptr_t) (ptr))%GGML_MEM_ALIGN == 0)
  3505. ////////////////////////////////////////////////////////////////////////////////
  3506. struct ggml_context * ggml_init(struct ggml_init_params params) {
  3507. // make this function thread safe
  3508. ggml_critical_section_start();
  3509. static bool is_first_call = true;
  3510. if (is_first_call) {
  3511. // initialize time system (required on Windows)
  3512. ggml_time_init();
  3513. // initialize GELU, Quick GELU, SILU and EXP F32 tables
  3514. {
  3515. const uint64_t t_start = ggml_time_us(); UNUSED(t_start);
  3516. ggml_fp16_t ii;
  3517. for (int i = 0; i < (1 << 16); ++i) {
  3518. uint16_t ui = i;
  3519. memcpy(&ii, &ui, sizeof(ii));
  3520. const float f = table_f32_f16[i] = GGML_COMPUTE_FP16_TO_FP32(ii);
  3521. table_gelu_f16[i] = GGML_FP32_TO_FP16(ggml_gelu_f32(f));
  3522. table_gelu_quick_f16[i] = GGML_FP32_TO_FP16(ggml_gelu_quick_f32(f));
  3523. table_silu_f16[i] = GGML_FP32_TO_FP16(ggml_silu_f32(f));
  3524. table_exp_f16[i] = GGML_FP32_TO_FP16(expf(f));
  3525. }
  3526. const uint64_t t_end = ggml_time_us(); UNUSED(t_end);
  3527. GGML_PRINT_DEBUG("%s: GELU, Quick GELU, SILU and EXP tables initialized in %f ms\n", __func__, (t_end - t_start)/1000.0f);
  3528. }
  3529. // initialize g_state
  3530. {
  3531. const uint64_t t_start = ggml_time_us(); UNUSED(t_start);
  3532. g_state = (struct ggml_state) {
  3533. /*.contexts =*/ { { 0 } },
  3534. /*.numa =*/ {
  3535. .n_nodes = 0,
  3536. .total_cpus = 0,
  3537. },
  3538. };
  3539. for (int i = 0; i < GGML_MAX_CONTEXTS; ++i) {
  3540. g_state.contexts[i].used = false;
  3541. }
  3542. const uint64_t t_end = ggml_time_us(); UNUSED(t_end);
  3543. GGML_PRINT_DEBUG("%s: g_state initialized in %f ms\n", __func__, (t_end - t_start)/1000.0f);
  3544. }
  3545. #if defined(GGML_USE_CUBLAS)
  3546. ggml_init_cublas();
  3547. #elif defined(GGML_USE_CLBLAST)
  3548. ggml_cl_init();
  3549. #endif
  3550. ggml_setup_op_has_task_pass();
  3551. is_first_call = false;
  3552. }
  3553. // find non-used context in g_state
  3554. struct ggml_context * ctx = NULL;
  3555. for (int i = 0; i < GGML_MAX_CONTEXTS; i++) {
  3556. if (!g_state.contexts[i].used) {
  3557. g_state.contexts[i].used = true;
  3558. ctx = &g_state.contexts[i].context;
  3559. GGML_PRINT_DEBUG("%s: found unused context %d\n", __func__, i);
  3560. break;
  3561. }
  3562. }
  3563. if (ctx == NULL) {
  3564. GGML_PRINT_DEBUG("%s: no unused context found\n", __func__);
  3565. ggml_critical_section_end();
  3566. return NULL;
  3567. }
  3568. const size_t mem_size = (params.mem_size + GGML_MEM_ALIGN - 1) & ~(GGML_MEM_ALIGN - 1);
  3569. *ctx = (struct ggml_context) {
  3570. /*.mem_size =*/ mem_size,
  3571. /*.mem_buffer =*/ params.mem_buffer ? params.mem_buffer : GGML_ALIGNED_MALLOC(mem_size),
  3572. /*.mem_buffer_owned =*/ params.mem_buffer ? false : true,
  3573. /*.no_alloc =*/ params.no_alloc,
  3574. /*.no_alloc_save =*/ params.no_alloc,
  3575. /*.n_objects =*/ 0,
  3576. /*.objects_begin =*/ NULL,
  3577. /*.objects_end =*/ NULL,
  3578. /*.scratch =*/ { 0, 0, NULL, },
  3579. /*.scratch_save =*/ { 0, 0, NULL, },
  3580. };
  3581. GGML_ASSERT(ctx->mem_buffer != NULL);
  3582. ggml_assert_aligned(ctx->mem_buffer);
  3583. GGML_PRINT_DEBUG("%s: context initialized\n", __func__);
  3584. ggml_critical_section_end();
  3585. return ctx;
  3586. }
  3587. void ggml_free(struct ggml_context * ctx) {
  3588. // make this function thread safe
  3589. ggml_critical_section_start();
  3590. bool found = false;
  3591. for (int i = 0; i < GGML_MAX_CONTEXTS; i++) {
  3592. if (&g_state.contexts[i].context == ctx) {
  3593. g_state.contexts[i].used = false;
  3594. GGML_PRINT_DEBUG("%s: context %d has been freed. memory used = %zu\n",
  3595. __func__, i, ggml_used_mem(ctx));
  3596. if (ctx->mem_buffer_owned) {
  3597. GGML_ALIGNED_FREE(ctx->mem_buffer);
  3598. }
  3599. found = true;
  3600. break;
  3601. }
  3602. }
  3603. if (!found) {
  3604. GGML_PRINT_DEBUG("%s: context not found\n", __func__);
  3605. }
  3606. ggml_critical_section_end();
  3607. }
  3608. size_t ggml_used_mem(const struct ggml_context * ctx) {
  3609. return ctx->objects_end == NULL ? 0 : ctx->objects_end->offs + ctx->objects_end->size;
  3610. }
  3611. size_t ggml_set_scratch(struct ggml_context * ctx, struct ggml_scratch scratch) {
  3612. const size_t result = ctx->scratch.data ? ctx->scratch.offs : 0;
  3613. ctx->scratch = scratch;
  3614. return result;
  3615. }
  3616. bool ggml_get_no_alloc(struct ggml_context * ctx) {
  3617. return ctx->no_alloc;
  3618. }
  3619. void ggml_set_no_alloc(struct ggml_context * ctx, bool no_alloc) {
  3620. ctx->no_alloc = no_alloc;
  3621. }
  3622. void * ggml_get_mem_buffer(const struct ggml_context * ctx) {
  3623. return ctx->mem_buffer;
  3624. }
  3625. size_t ggml_get_mem_size(const struct ggml_context * ctx) {
  3626. return ctx->mem_size;
  3627. }
  3628. size_t ggml_get_max_tensor_size(const struct ggml_context * ctx) {
  3629. size_t max_size = 0;
  3630. struct ggml_object * obj = ctx->objects_begin;
  3631. while (obj != NULL) {
  3632. struct ggml_tensor * tensor = (struct ggml_tensor *) ((char *) ctx->mem_buffer + obj->offs);
  3633. const size_t size = ggml_nbytes(tensor);
  3634. if (max_size < size) {
  3635. max_size = size;
  3636. }
  3637. obj = obj->next;
  3638. }
  3639. return max_size;
  3640. }
  3641. // IMPORTANT:
  3642. // when creating "opt" tensors, always save and load the scratch buffer
  3643. // this is an error prone process, but it is necessary to support inplace
  3644. // operators when using scratch buffers
  3645. // TODO: implement a better way
  3646. static void ggml_scratch_save(struct ggml_context * ctx) {
  3647. // this is needed to allow opt tensors to store their data
  3648. // TODO: again, need to find a better way
  3649. ctx->no_alloc_save = ctx->no_alloc;
  3650. ctx->no_alloc = false;
  3651. ctx->scratch_save = ctx->scratch;
  3652. ctx->scratch.data = NULL;
  3653. }
  3654. static void ggml_scratch_load(struct ggml_context * ctx) {
  3655. ctx->no_alloc = ctx->no_alloc_save;
  3656. ctx->scratch = ctx->scratch_save;
  3657. }
  3658. ////////////////////////////////////////////////////////////////////////////////
  3659. static struct ggml_tensor * ggml_new_tensor_impl(
  3660. struct ggml_context * ctx,
  3661. enum ggml_type type,
  3662. int n_dims,
  3663. const int64_t* ne,
  3664. void* data) {
  3665. // always insert objects at the end of the context's memory pool
  3666. struct ggml_object * obj_cur = ctx->objects_end;
  3667. const size_t cur_offs = obj_cur == NULL ? 0 : obj_cur->offs;
  3668. const size_t cur_size = obj_cur == NULL ? 0 : obj_cur->size;
  3669. const size_t cur_end = cur_offs + cur_size;
  3670. size_t size_needed = 0;
  3671. if (data == NULL && !ctx->no_alloc) {
  3672. size_needed += GGML_TYPE_SIZE[type]*(ne[0]/GGML_BLCK_SIZE[type]);
  3673. for (int i = 1; i < n_dims; i++) {
  3674. size_needed *= ne[i];
  3675. }
  3676. // align to GGML_MEM_ALIGN
  3677. size_needed = ((size_needed + GGML_MEM_ALIGN - 1)/GGML_MEM_ALIGN)*GGML_MEM_ALIGN;
  3678. }
  3679. char * const mem_buffer = ctx->mem_buffer;
  3680. struct ggml_object * const obj_new = (struct ggml_object *)(mem_buffer + cur_end);
  3681. if (ctx->scratch.data == NULL || data != NULL) {
  3682. size_needed += GGML_TENSOR_SIZE;
  3683. if (cur_end + size_needed + GGML_OBJECT_SIZE > ctx->mem_size) {
  3684. GGML_PRINT("%s: not enough space in the context's memory pool (needed %zu, available %zu)\n",
  3685. __func__, cur_end + size_needed + GGML_OBJECT_SIZE, ctx->mem_size);
  3686. assert(false);
  3687. return NULL;
  3688. }
  3689. *obj_new = (struct ggml_object) {
  3690. .offs = cur_end + GGML_OBJECT_SIZE,
  3691. .size = size_needed,
  3692. .next = NULL,
  3693. };
  3694. } else {
  3695. if (ctx->scratch.offs + size_needed > ctx->scratch.size) {
  3696. GGML_PRINT("%s: not enough space in the scratch memory pool (needed %zu, available %zu)\n",
  3697. __func__, ctx->scratch.offs + size_needed, ctx->scratch.size);
  3698. assert(false);
  3699. return NULL;
  3700. }
  3701. if (cur_end + GGML_TENSOR_SIZE + GGML_OBJECT_SIZE > ctx->mem_size) {
  3702. GGML_PRINT("%s: not enough space in the context's memory pool (needed %zu, available %zu)\n",
  3703. __func__, cur_end + GGML_TENSOR_SIZE + GGML_OBJECT_SIZE, ctx->mem_size);
  3704. assert(false);
  3705. return NULL;
  3706. }
  3707. data = (char * const) ctx->scratch.data + ctx->scratch.offs;
  3708. *obj_new = (struct ggml_object) {
  3709. .offs = cur_end + GGML_OBJECT_SIZE,
  3710. .size = GGML_TENSOR_SIZE,
  3711. .next = NULL,
  3712. };
  3713. //printf("scratch offs = %zu, size_needed = %zu\n", ctx->scratch.offs, size_needed);
  3714. ctx->scratch.offs += size_needed;
  3715. }
  3716. if (obj_cur != NULL) {
  3717. obj_cur->next = obj_new;
  3718. } else {
  3719. // this is the first object in this context
  3720. ctx->objects_begin = obj_new;
  3721. }
  3722. ctx->objects_end = obj_new;
  3723. //printf("%s: inserted new object at %zu, size = %zu\n", __func__, cur_end, obj_new->size);
  3724. struct ggml_tensor * const result = (struct ggml_tensor *)(mem_buffer + obj_new->offs);
  3725. ggml_assert_aligned(result);
  3726. *result = (struct ggml_tensor) {
  3727. /*.type =*/ type,
  3728. /*.backend =*/ GGML_BACKEND_CPU,
  3729. /*.n_dims =*/ n_dims,
  3730. /*.ne =*/ { 1, 1, 1, 1 },
  3731. /*.nb =*/ { 0, 0, 0, 0 },
  3732. /*.op =*/ GGML_OP_NONE,
  3733. /*.op_params =*/ {0},
  3734. /*.is_param =*/ false,
  3735. /*.grad =*/ NULL,
  3736. /*.src =*/ { NULL },
  3737. /*.perf_runs =*/ 0,
  3738. /*.perf_cycles =*/ 0,
  3739. /*.perf_time_us =*/ 0,
  3740. /*.data =*/ (data == NULL && !ctx->no_alloc) ? (void *)(result + 1) : data,
  3741. /*.name =*/ { 0 },
  3742. /*.extra =*/ NULL,
  3743. /*.padding =*/ { 0 },
  3744. };
  3745. // TODO: this should not be needed as long as we don't rely on aligned SIMD loads
  3746. //ggml_assert_aligned(result->data);
  3747. for (int i = 0; i < n_dims; i++) {
  3748. result->ne[i] = ne[i];
  3749. }
  3750. result->nb[0] = GGML_TYPE_SIZE[type];
  3751. result->nb[1] = result->nb[0]*(result->ne[0]/GGML_BLCK_SIZE[type]);
  3752. for (int i = 2; i < GGML_MAX_DIMS; i++) {
  3753. result->nb[i] = result->nb[i - 1]*result->ne[i - 1];
  3754. }
  3755. ctx->n_objects++;
  3756. return result;
  3757. }
  3758. static void ggml_set_op_params(struct ggml_tensor * tensor, const void * params, size_t params_size) {
  3759. assert(params_size <= GGML_MAX_OP_PARAMS);
  3760. memcpy(tensor->op_params, params, params_size);
  3761. }
  3762. static int32_t ggml_get_op_params_i32(const struct ggml_tensor * tensor, uint32_t i) {
  3763. assert(i < GGML_MAX_OP_PARAMS / sizeof(int32_t));
  3764. return ((const int32_t *)(tensor->op_params))[i];
  3765. }
  3766. static void ggml_set_op_params_i32(struct ggml_tensor * tensor, uint32_t i, int32_t value) {
  3767. assert(i < GGML_MAX_OP_PARAMS / sizeof(int32_t));
  3768. ((int32_t *)(tensor->op_params))[i] = value;
  3769. }
  3770. struct ggml_tensor * ggml_new_tensor(
  3771. struct ggml_context * ctx,
  3772. enum ggml_type type,
  3773. int n_dims,
  3774. const int64_t * ne) {
  3775. return ggml_new_tensor_impl(ctx, type, n_dims, ne, NULL);
  3776. }
  3777. struct ggml_tensor * ggml_new_tensor_1d(
  3778. struct ggml_context * ctx,
  3779. enum ggml_type type,
  3780. int64_t ne0) {
  3781. return ggml_new_tensor(ctx, type, 1, &ne0);
  3782. }
  3783. struct ggml_tensor * ggml_new_tensor_2d(
  3784. struct ggml_context * ctx,
  3785. enum ggml_type type,
  3786. int64_t ne0,
  3787. int64_t ne1) {
  3788. const int64_t ne[2] = { ne0, ne1 };
  3789. return ggml_new_tensor(ctx, type, 2, ne);
  3790. }
  3791. struct ggml_tensor * ggml_new_tensor_3d(
  3792. struct ggml_context * ctx,
  3793. enum ggml_type type,
  3794. int64_t ne0,
  3795. int64_t ne1,
  3796. int64_t ne2) {
  3797. const int64_t ne[3] = { ne0, ne1, ne2 };
  3798. return ggml_new_tensor(ctx, type, 3, ne);
  3799. }
  3800. struct ggml_tensor * ggml_new_tensor_4d(
  3801. struct ggml_context * ctx,
  3802. enum ggml_type type,
  3803. int64_t ne0,
  3804. int64_t ne1,
  3805. int64_t ne2,
  3806. int64_t ne3) {
  3807. const int64_t ne[4] = { ne0, ne1, ne2, ne3 };
  3808. return ggml_new_tensor(ctx, type, 4, ne);
  3809. }
  3810. struct ggml_tensor * ggml_new_i32(struct ggml_context * ctx, int32_t value) {
  3811. ggml_scratch_save(ctx);
  3812. struct ggml_tensor * result = ggml_new_tensor_1d(ctx, GGML_TYPE_I32, 1);
  3813. ggml_scratch_load(ctx);
  3814. ggml_set_i32(result, value);
  3815. return result;
  3816. }
  3817. struct ggml_tensor * ggml_new_f32(struct ggml_context * ctx, float value) {
  3818. ggml_scratch_save(ctx);
  3819. struct ggml_tensor * result = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, 1);
  3820. ggml_scratch_load(ctx);
  3821. ggml_set_f32(result, value);
  3822. return result;
  3823. }
  3824. struct ggml_tensor * ggml_dup_tensor(struct ggml_context * ctx, const struct ggml_tensor * src) {
  3825. return ggml_new_tensor_impl(ctx, src->type, src->n_dims, src->ne, NULL);
  3826. }
  3827. struct ggml_tensor * ggml_set_zero(struct ggml_tensor * tensor) {
  3828. memset(tensor->data, 0, ggml_nbytes(tensor));
  3829. return tensor;
  3830. }
  3831. struct ggml_tensor * ggml_set_i32 (struct ggml_tensor * tensor, int32_t value) {
  3832. const int n = ggml_nrows(tensor);
  3833. const int nc = tensor->ne[0];
  3834. const size_t n1 = tensor->nb[1];
  3835. char * const data = tensor->data;
  3836. switch (tensor->type) {
  3837. case GGML_TYPE_I8:
  3838. {
  3839. assert(tensor->nb[0] == sizeof(int8_t));
  3840. for (int i = 0; i < n; i++) {
  3841. ggml_vec_set_i8(nc, (int8_t *)(data + i*n1), value);
  3842. }
  3843. } break;
  3844. case GGML_TYPE_I16:
  3845. {
  3846. assert(tensor->nb[0] == sizeof(int16_t));
  3847. for (int i = 0; i < n; i++) {
  3848. ggml_vec_set_i16(nc, (int16_t *)(data + i*n1), value);
  3849. }
  3850. } break;
  3851. case GGML_TYPE_I32:
  3852. {
  3853. assert(tensor->nb[0] == sizeof(int32_t));
  3854. for (int i = 0; i < n; i++) {
  3855. ggml_vec_set_i32(nc, (int32_t *)(data + i*n1), value);
  3856. }
  3857. } break;
  3858. case GGML_TYPE_F16:
  3859. {
  3860. assert(tensor->nb[0] == sizeof(ggml_fp16_t));
  3861. for (int i = 0; i < n; i++) {
  3862. ggml_vec_set_f16(nc, (ggml_fp16_t *)(data + i*n1), GGML_FP32_TO_FP16(value));
  3863. }
  3864. } break;
  3865. case GGML_TYPE_F32:
  3866. {
  3867. assert(tensor->nb[0] == sizeof(float));
  3868. for (int i = 0; i < n; i++) {
  3869. ggml_vec_set_f32(nc, (float *)(data + i*n1), value);
  3870. }
  3871. } break;
  3872. default:
  3873. {
  3874. GGML_ASSERT(false);
  3875. } break;
  3876. }
  3877. return tensor;
  3878. }
  3879. struct ggml_tensor * ggml_set_f32(struct ggml_tensor * tensor, float value) {
  3880. const int n = ggml_nrows(tensor);
  3881. const int nc = tensor->ne[0];
  3882. const size_t n1 = tensor->nb[1];
  3883. char * const data = tensor->data;
  3884. switch (tensor->type) {
  3885. case GGML_TYPE_I8:
  3886. {
  3887. assert(tensor->nb[0] == sizeof(int8_t));
  3888. for (int i = 0; i < n; i++) {
  3889. ggml_vec_set_i8(nc, (int8_t *)(data + i*n1), value);
  3890. }
  3891. } break;
  3892. case GGML_TYPE_I16:
  3893. {
  3894. assert(tensor->nb[0] == sizeof(int16_t));
  3895. for (int i = 0; i < n; i++) {
  3896. ggml_vec_set_i16(nc, (int16_t *)(data + i*n1), value);
  3897. }
  3898. } break;
  3899. case GGML_TYPE_I32:
  3900. {
  3901. assert(tensor->nb[0] == sizeof(int32_t));
  3902. for (int i = 0; i < n; i++) {
  3903. ggml_vec_set_i32(nc, (int32_t *)(data + i*n1), value);
  3904. }
  3905. } break;
  3906. case GGML_TYPE_F16:
  3907. {
  3908. assert(tensor->nb[0] == sizeof(ggml_fp16_t));
  3909. for (int i = 0; i < n; i++) {
  3910. ggml_vec_set_f16(nc, (ggml_fp16_t *)(data + i*n1), GGML_FP32_TO_FP16(value));
  3911. }
  3912. } break;
  3913. case GGML_TYPE_F32:
  3914. {
  3915. assert(tensor->nb[0] == sizeof(float));
  3916. for (int i = 0; i < n; i++) {
  3917. ggml_vec_set_f32(nc, (float *)(data + i*n1), value);
  3918. }
  3919. } break;
  3920. default:
  3921. {
  3922. GGML_ASSERT(false);
  3923. } break;
  3924. }
  3925. return tensor;
  3926. }
  3927. int32_t ggml_get_i32_1d(const struct ggml_tensor * tensor, int i) {
  3928. switch (tensor->type) {
  3929. case GGML_TYPE_I8:
  3930. {
  3931. GGML_ASSERT(tensor->nb[0] == sizeof(int8_t));
  3932. return ((int8_t *)(tensor->data))[i];
  3933. } break;
  3934. case GGML_TYPE_I16:
  3935. {
  3936. GGML_ASSERT(tensor->nb[0] == sizeof(int16_t));
  3937. return ((int16_t *)(tensor->data))[i];
  3938. } break;
  3939. case GGML_TYPE_I32:
  3940. {
  3941. GGML_ASSERT(tensor->nb[0] == sizeof(int32_t));
  3942. return ((int32_t *)(tensor->data))[i];
  3943. } break;
  3944. case GGML_TYPE_F16:
  3945. {
  3946. GGML_ASSERT(tensor->nb[0] == sizeof(ggml_fp16_t));
  3947. return GGML_FP16_TO_FP32(((ggml_fp16_t *)(tensor->data))[i]);
  3948. } break;
  3949. case GGML_TYPE_F32:
  3950. {
  3951. GGML_ASSERT(tensor->nb[0] == sizeof(float));
  3952. return ((float *)(tensor->data))[i];
  3953. } break;
  3954. default:
  3955. {
  3956. GGML_ASSERT(false);
  3957. } break;
  3958. }
  3959. return 0.0f;
  3960. }
  3961. void ggml_set_i32_1d(const struct ggml_tensor * tensor, int i, int32_t value) {
  3962. switch (tensor->type) {
  3963. case GGML_TYPE_I8:
  3964. {
  3965. GGML_ASSERT(tensor->nb[0] == sizeof(int8_t));
  3966. ((int8_t *)(tensor->data))[i] = value;
  3967. } break;
  3968. case GGML_TYPE_I16:
  3969. {
  3970. GGML_ASSERT(tensor->nb[0] == sizeof(int16_t));
  3971. ((int16_t *)(tensor->data))[i] = value;
  3972. } break;
  3973. case GGML_TYPE_I32:
  3974. {
  3975. GGML_ASSERT(tensor->nb[0] == sizeof(int32_t));
  3976. ((int32_t *)(tensor->data))[i] = value;
  3977. } break;
  3978. case GGML_TYPE_F16:
  3979. {
  3980. GGML_ASSERT(tensor->nb[0] == sizeof(ggml_fp16_t));
  3981. ((ggml_fp16_t *)(tensor->data))[i] = GGML_FP32_TO_FP16(value);
  3982. } break;
  3983. case GGML_TYPE_F32:
  3984. {
  3985. GGML_ASSERT(tensor->nb[0] == sizeof(float));
  3986. ((float *)(tensor->data))[i] = value;
  3987. } break;
  3988. default:
  3989. {
  3990. GGML_ASSERT(false);
  3991. } break;
  3992. }
  3993. }
  3994. float ggml_get_f32_1d(const struct ggml_tensor * tensor, int i) {
  3995. switch (tensor->type) {
  3996. case GGML_TYPE_I8:
  3997. {
  3998. GGML_ASSERT(tensor->nb[0] == sizeof(int8_t));
  3999. return ((int8_t *)(tensor->data))[i];
  4000. } break;
  4001. case GGML_TYPE_I16:
  4002. {
  4003. GGML_ASSERT(tensor->nb[0] == sizeof(int16_t));
  4004. return ((int16_t *)(tensor->data))[i];
  4005. } break;
  4006. case GGML_TYPE_I32:
  4007. {
  4008. GGML_ASSERT(tensor->nb[0] == sizeof(int32_t));
  4009. return ((int32_t *)(tensor->data))[i];
  4010. } break;
  4011. case GGML_TYPE_F16:
  4012. {
  4013. GGML_ASSERT(tensor->nb[0] == sizeof(ggml_fp16_t));
  4014. return GGML_FP16_TO_FP32(((ggml_fp16_t *)(tensor->data))[i]);
  4015. } break;
  4016. case GGML_TYPE_F32:
  4017. {
  4018. GGML_ASSERT(tensor->nb[0] == sizeof(float));
  4019. return ((float *)(tensor->data))[i];
  4020. } break;
  4021. default:
  4022. {
  4023. GGML_ASSERT(false);
  4024. } break;
  4025. }
  4026. return 0.0f;
  4027. }
  4028. void ggml_set_f32_1d(const struct ggml_tensor * tensor, int i, float value) {
  4029. switch (tensor->type) {
  4030. case GGML_TYPE_I8:
  4031. {
  4032. GGML_ASSERT(tensor->nb[0] == sizeof(int8_t));
  4033. ((int8_t *)(tensor->data))[i] = value;
  4034. } break;
  4035. case GGML_TYPE_I16:
  4036. {
  4037. GGML_ASSERT(tensor->nb[0] == sizeof(int16_t));
  4038. ((int16_t *)(tensor->data))[i] = value;
  4039. } break;
  4040. case GGML_TYPE_I32:
  4041. {
  4042. GGML_ASSERT(tensor->nb[0] == sizeof(int32_t));
  4043. ((int32_t *)(tensor->data))[i] = value;
  4044. } break;
  4045. case GGML_TYPE_F16:
  4046. {
  4047. GGML_ASSERT(tensor->nb[0] == sizeof(ggml_fp16_t));
  4048. ((ggml_fp16_t *)(tensor->data))[i] = GGML_FP32_TO_FP16(value);
  4049. } break;
  4050. case GGML_TYPE_F32:
  4051. {
  4052. GGML_ASSERT(tensor->nb[0] == sizeof(float));
  4053. ((float *)(tensor->data))[i] = value;
  4054. } break;
  4055. default:
  4056. {
  4057. GGML_ASSERT(false);
  4058. } break;
  4059. }
  4060. }
  4061. void * ggml_get_data(const struct ggml_tensor * tensor) {
  4062. return tensor->data;
  4063. }
  4064. float * ggml_get_data_f32(const struct ggml_tensor * tensor) {
  4065. assert(tensor->type == GGML_TYPE_F32);
  4066. return (float *)(tensor->data);
  4067. }
  4068. enum ggml_unary_op ggml_get_unary_op(const struct ggml_tensor * tensor) {
  4069. GGML_ASSERT(tensor->op == GGML_OP_UNARY);
  4070. return (enum ggml_unary_op) ggml_get_op_params_i32(tensor, 0);
  4071. }
  4072. static void ggml_set_unary_op(struct ggml_tensor * tensor, enum ggml_unary_op op) {
  4073. GGML_ASSERT(tensor->op = GGML_OP_UNARY);
  4074. ggml_set_op_params_i32(tensor, 0, (int32_t) op);
  4075. }
  4076. const char * ggml_get_name(const struct ggml_tensor * tensor) {
  4077. return tensor->name;
  4078. }
  4079. struct ggml_tensor * ggml_set_name(struct ggml_tensor * tensor, const char * name) {
  4080. strncpy(tensor->name, name, sizeof(tensor->name));
  4081. tensor->name[sizeof(tensor->name) - 1] = '\0';
  4082. return tensor;
  4083. }
  4084. struct ggml_tensor * ggml_format_name(struct ggml_tensor * tensor, const char * fmt, ...) {
  4085. va_list args;
  4086. va_start(args, fmt);
  4087. vsnprintf(tensor->name, sizeof(tensor->name), fmt, args);
  4088. va_end(args);
  4089. return tensor;
  4090. }
  4091. struct ggml_tensor * ggml_view_tensor(
  4092. struct ggml_context * ctx,
  4093. const struct ggml_tensor * src) {
  4094. struct ggml_tensor * result = ggml_new_tensor_impl(ctx, src->type, src->n_dims, src->ne, src->data);
  4095. ggml_format_name(result, "%s (view)", src->name);
  4096. result->nb[0] = src->nb[0];
  4097. result->nb[1] = src->nb[1];
  4098. result->nb[2] = src->nb[2];
  4099. result->nb[3] = src->nb[3];
  4100. return result;
  4101. }
  4102. struct ggml_tensor * ggml_get_tensor(struct ggml_context * ctx, const char * name) {
  4103. struct ggml_object * obj = ctx->objects_begin;
  4104. char * const mem_buffer = ctx->mem_buffer;
  4105. while (obj != NULL) {
  4106. struct ggml_tensor * cur = (struct ggml_tensor *)(mem_buffer + obj->offs);
  4107. if (strcmp(cur->name, name) == 0) {
  4108. return cur;
  4109. }
  4110. obj = obj->next;
  4111. }
  4112. return NULL;
  4113. }
  4114. ////////////////////////////////////////////////////////////////////////////////
  4115. // ggml_dup
  4116. static struct ggml_tensor * ggml_dup_impl(
  4117. struct ggml_context * ctx,
  4118. struct ggml_tensor * a,
  4119. bool inplace) {
  4120. bool is_node = false;
  4121. if (!inplace && (a->grad)) {
  4122. is_node = true;
  4123. }
  4124. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  4125. result->op = GGML_OP_DUP;
  4126. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4127. result->src[0] = a;
  4128. return result;
  4129. }
  4130. struct ggml_tensor * ggml_dup(
  4131. struct ggml_context * ctx,
  4132. struct ggml_tensor * a) {
  4133. return ggml_dup_impl(ctx, a, false);
  4134. }
  4135. struct ggml_tensor * ggml_dup_inplace(
  4136. struct ggml_context * ctx,
  4137. struct ggml_tensor * a) {
  4138. return ggml_dup_impl(ctx, a, true);
  4139. }
  4140. // ggml_add
  4141. static struct ggml_tensor * ggml_add_impl(
  4142. struct ggml_context * ctx,
  4143. struct ggml_tensor * a,
  4144. struct ggml_tensor * b,
  4145. bool inplace) {
  4146. // TODO: support less-strict constraint
  4147. // GGML_ASSERT(ggml_can_repeat(b, a));
  4148. GGML_ASSERT(ggml_can_repeat_rows(b, a));
  4149. bool is_node = false;
  4150. if (!inplace && (a->grad || b->grad)) {
  4151. // TODO: support backward pass for broadcasting
  4152. GGML_ASSERT(ggml_are_same_shape(a, b));
  4153. is_node = true;
  4154. }
  4155. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  4156. result->op = GGML_OP_ADD;
  4157. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4158. result->src[0] = a;
  4159. result->src[1] = b;
  4160. return result;
  4161. }
  4162. struct ggml_tensor * ggml_add(
  4163. struct ggml_context * ctx,
  4164. struct ggml_tensor * a,
  4165. struct ggml_tensor * b) {
  4166. return ggml_add_impl(ctx, a, b, false);
  4167. }
  4168. struct ggml_tensor * ggml_add_inplace(
  4169. struct ggml_context * ctx,
  4170. struct ggml_tensor * a,
  4171. struct ggml_tensor * b) {
  4172. return ggml_add_impl(ctx, a, b, true);
  4173. }
  4174. // ggml_add1
  4175. static struct ggml_tensor * ggml_add1_impl(
  4176. struct ggml_context * ctx,
  4177. struct ggml_tensor * a,
  4178. struct ggml_tensor * b,
  4179. bool inplace) {
  4180. GGML_ASSERT(ggml_is_scalar(b));
  4181. GGML_ASSERT(ggml_is_padded_1d(a));
  4182. bool is_node = false;
  4183. if (a->grad || b->grad) {
  4184. is_node = true;
  4185. }
  4186. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  4187. result->op = GGML_OP_ADD1;
  4188. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4189. result->src[0] = a;
  4190. result->src[1] = b;
  4191. return result;
  4192. }
  4193. struct ggml_tensor * ggml_add1(
  4194. struct ggml_context * ctx,
  4195. struct ggml_tensor * a,
  4196. struct ggml_tensor * b) {
  4197. return ggml_add1_impl(ctx, a, b, false);
  4198. }
  4199. struct ggml_tensor * ggml_add1_inplace(
  4200. struct ggml_context * ctx,
  4201. struct ggml_tensor * a,
  4202. struct ggml_tensor * b) {
  4203. return ggml_add1_impl(ctx, a, b, true);
  4204. }
  4205. // ggml_acc
  4206. static struct ggml_tensor * ggml_acc_impl(
  4207. struct ggml_context * ctx,
  4208. struct ggml_tensor * a,
  4209. struct ggml_tensor * b,
  4210. size_t nb1,
  4211. size_t nb2,
  4212. size_t nb3,
  4213. size_t offset,
  4214. bool inplace) {
  4215. GGML_ASSERT(ggml_nelements(b) <= ggml_nelements(a));
  4216. GGML_ASSERT(ggml_is_contiguous(a));
  4217. GGML_ASSERT(a->type == GGML_TYPE_F32);
  4218. GGML_ASSERT(b->type == GGML_TYPE_F32);
  4219. bool is_node = false;
  4220. if (!inplace && (a->grad || b->grad)) {
  4221. is_node = true;
  4222. }
  4223. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  4224. int32_t params[] = { nb1, nb2, nb3, offset, inplace ? 1 : 0 };
  4225. ggml_set_op_params(result, params, sizeof(params));
  4226. result->op = GGML_OP_ACC;
  4227. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4228. result->src[0] = a;
  4229. result->src[1] = b;
  4230. return result;
  4231. }
  4232. struct ggml_tensor * ggml_acc(
  4233. struct ggml_context * ctx,
  4234. struct ggml_tensor * a,
  4235. struct ggml_tensor * b,
  4236. size_t nb1,
  4237. size_t nb2,
  4238. size_t nb3,
  4239. size_t offset) {
  4240. return ggml_acc_impl(ctx, a, b, nb1, nb2, nb3, offset, false);
  4241. }
  4242. struct ggml_tensor * ggml_acc_inplace(
  4243. struct ggml_context * ctx,
  4244. struct ggml_tensor * a,
  4245. struct ggml_tensor * b,
  4246. size_t nb1,
  4247. size_t nb2,
  4248. size_t nb3,
  4249. size_t offset) {
  4250. return ggml_acc_impl(ctx, a, b, nb1, nb2, nb3, offset, true);
  4251. }
  4252. // ggml_sub
  4253. static struct ggml_tensor * ggml_sub_impl(
  4254. struct ggml_context * ctx,
  4255. struct ggml_tensor * a,
  4256. struct ggml_tensor * b,
  4257. bool inplace) {
  4258. GGML_ASSERT(ggml_are_same_shape(a, b));
  4259. bool is_node = false;
  4260. if (!inplace && (a->grad || b->grad)) {
  4261. is_node = true;
  4262. }
  4263. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  4264. result->op = GGML_OP_SUB;
  4265. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4266. result->src[0] = a;
  4267. result->src[1] = b;
  4268. return result;
  4269. }
  4270. struct ggml_tensor * ggml_sub(
  4271. struct ggml_context * ctx,
  4272. struct ggml_tensor * a,
  4273. struct ggml_tensor * b) {
  4274. return ggml_sub_impl(ctx, a, b, false);
  4275. }
  4276. struct ggml_tensor * ggml_sub_inplace(
  4277. struct ggml_context * ctx,
  4278. struct ggml_tensor * a,
  4279. struct ggml_tensor * b) {
  4280. return ggml_sub_impl(ctx, a, b, true);
  4281. }
  4282. // ggml_mul
  4283. static struct ggml_tensor * ggml_mul_impl(
  4284. struct ggml_context * ctx,
  4285. struct ggml_tensor * a,
  4286. struct ggml_tensor * b,
  4287. bool inplace) {
  4288. // TODO: support less-strict constraint
  4289. // GGML_ASSERT(ggml_can_repeat(b, a));
  4290. GGML_ASSERT(ggml_can_repeat_rows(b, a));
  4291. bool is_node = false;
  4292. if (!inplace && (a->grad || b->grad)) {
  4293. // TODO: support backward pass for broadcasting
  4294. GGML_ASSERT(ggml_are_same_shape(a, b));
  4295. is_node = true;
  4296. }
  4297. if (inplace) {
  4298. GGML_ASSERT(is_node == false);
  4299. }
  4300. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  4301. result->op = GGML_OP_MUL;
  4302. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4303. result->src[0] = a;
  4304. result->src[1] = b;
  4305. return result;
  4306. }
  4307. struct ggml_tensor * ggml_mul(
  4308. struct ggml_context * ctx,
  4309. struct ggml_tensor * a,
  4310. struct ggml_tensor * b) {
  4311. return ggml_mul_impl(ctx, a, b, false);
  4312. }
  4313. struct ggml_tensor * ggml_mul_inplace(
  4314. struct ggml_context * ctx,
  4315. struct ggml_tensor * a,
  4316. struct ggml_tensor * b) {
  4317. return ggml_mul_impl(ctx, a, b, true);
  4318. }
  4319. // ggml_div
  4320. static struct ggml_tensor * ggml_div_impl(
  4321. struct ggml_context * ctx,
  4322. struct ggml_tensor * a,
  4323. struct ggml_tensor * b,
  4324. bool inplace) {
  4325. GGML_ASSERT(ggml_are_same_shape(a, b));
  4326. bool is_node = false;
  4327. if (!inplace && (a->grad || b->grad)) {
  4328. is_node = true;
  4329. }
  4330. if (inplace) {
  4331. GGML_ASSERT(is_node == false);
  4332. }
  4333. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  4334. result->op = GGML_OP_DIV;
  4335. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4336. result->src[0] = a;
  4337. result->src[1] = b;
  4338. return result;
  4339. }
  4340. struct ggml_tensor * ggml_div(
  4341. struct ggml_context * ctx,
  4342. struct ggml_tensor * a,
  4343. struct ggml_tensor * b) {
  4344. return ggml_div_impl(ctx, a, b, false);
  4345. }
  4346. struct ggml_tensor * ggml_div_inplace(
  4347. struct ggml_context * ctx,
  4348. struct ggml_tensor * a,
  4349. struct ggml_tensor * b) {
  4350. return ggml_div_impl(ctx, a, b, true);
  4351. }
  4352. // ggml_sqr
  4353. static struct ggml_tensor * ggml_sqr_impl(
  4354. struct ggml_context * ctx,
  4355. struct ggml_tensor * a,
  4356. bool inplace) {
  4357. bool is_node = false;
  4358. if (!inplace && (a->grad)) {
  4359. is_node = true;
  4360. }
  4361. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  4362. result->op = GGML_OP_SQR;
  4363. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4364. result->src[0] = a;
  4365. return result;
  4366. }
  4367. struct ggml_tensor * ggml_sqr(
  4368. struct ggml_context * ctx,
  4369. struct ggml_tensor * a) {
  4370. return ggml_sqr_impl(ctx, a, false);
  4371. }
  4372. struct ggml_tensor * ggml_sqr_inplace(
  4373. struct ggml_context * ctx,
  4374. struct ggml_tensor * a) {
  4375. return ggml_sqr_impl(ctx, a, true);
  4376. }
  4377. // ggml_sqrt
  4378. static struct ggml_tensor * ggml_sqrt_impl(
  4379. struct ggml_context * ctx,
  4380. struct ggml_tensor * a,
  4381. bool inplace) {
  4382. bool is_node = false;
  4383. if (!inplace && (a->grad)) {
  4384. is_node = true;
  4385. }
  4386. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  4387. result->op = GGML_OP_SQRT;
  4388. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4389. result->src[0] = a;
  4390. return result;
  4391. }
  4392. struct ggml_tensor * ggml_sqrt(
  4393. struct ggml_context * ctx,
  4394. struct ggml_tensor * a) {
  4395. return ggml_sqrt_impl(ctx, a, false);
  4396. }
  4397. struct ggml_tensor * ggml_sqrt_inplace(
  4398. struct ggml_context * ctx,
  4399. struct ggml_tensor * a) {
  4400. return ggml_sqrt_impl(ctx, a, true);
  4401. }
  4402. // ggml_log
  4403. static struct ggml_tensor * ggml_log_impl(
  4404. struct ggml_context * ctx,
  4405. struct ggml_tensor * a,
  4406. bool inplace) {
  4407. bool is_node = false;
  4408. if (!inplace && (a->grad)) {
  4409. is_node = true;
  4410. }
  4411. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  4412. result->op = GGML_OP_LOG;
  4413. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4414. result->src[0] = a;
  4415. return result;
  4416. }
  4417. struct ggml_tensor * ggml_log(
  4418. struct ggml_context * ctx,
  4419. struct ggml_tensor * a) {
  4420. return ggml_log_impl(ctx, a, false);
  4421. }
  4422. struct ggml_tensor * ggml_log_inplace(
  4423. struct ggml_context * ctx,
  4424. struct ggml_tensor * a) {
  4425. return ggml_log_impl(ctx, a, true);
  4426. }
  4427. // ggml_sum
  4428. struct ggml_tensor * ggml_sum(
  4429. struct ggml_context * ctx,
  4430. struct ggml_tensor * a) {
  4431. bool is_node = false;
  4432. if (a->grad) {
  4433. is_node = true;
  4434. }
  4435. struct ggml_tensor * result = ggml_new_tensor_1d(ctx, a->type, 1);
  4436. result->op = GGML_OP_SUM;
  4437. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4438. result->src[0] = a;
  4439. return result;
  4440. }
  4441. // ggml_sum_rows
  4442. struct ggml_tensor * ggml_sum_rows(
  4443. struct ggml_context * ctx,
  4444. struct ggml_tensor * a) {
  4445. bool is_node = false;
  4446. if (a->grad) {
  4447. is_node = true;
  4448. }
  4449. int64_t ne[4] = {1,1,1,1};
  4450. for (int i=1; i<a->n_dims; ++i) {
  4451. ne[i] = a->ne[i];
  4452. }
  4453. struct ggml_tensor * result = ggml_new_tensor(ctx, a->type, a->n_dims, ne);
  4454. result->op = GGML_OP_SUM_ROWS;
  4455. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4456. result->src[0] = a;
  4457. return result;
  4458. }
  4459. // ggml_mean
  4460. struct ggml_tensor * ggml_mean(
  4461. struct ggml_context * ctx,
  4462. struct ggml_tensor * a) {
  4463. bool is_node = false;
  4464. if (a->grad) {
  4465. GGML_ASSERT(false); // TODO: implement
  4466. is_node = true;
  4467. }
  4468. int64_t ne[GGML_MAX_DIMS] = { 1, a->ne[1], a->ne[2], a->ne[3] };
  4469. struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, a->n_dims, ne);
  4470. result->op = GGML_OP_MEAN;
  4471. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4472. result->src[0] = a;
  4473. return result;
  4474. }
  4475. // ggml_argmax
  4476. struct ggml_tensor * ggml_argmax(
  4477. struct ggml_context * ctx,
  4478. struct ggml_tensor * a) {
  4479. GGML_ASSERT(ggml_is_matrix(a));
  4480. bool is_node = false;
  4481. if (a->grad) {
  4482. GGML_ASSERT(false);
  4483. is_node = true;
  4484. }
  4485. int64_t ne[GGML_MAX_DIMS] = { a->ne[1], 1, 1, 1 };
  4486. struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_I32, a->n_dims, ne);
  4487. result->op = GGML_OP_ARGMAX;
  4488. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4489. result->src[0] = a;
  4490. return result;
  4491. }
  4492. // ggml_repeat
  4493. struct ggml_tensor * ggml_repeat(
  4494. struct ggml_context * ctx,
  4495. struct ggml_tensor * a,
  4496. struct ggml_tensor * b) {
  4497. GGML_ASSERT(ggml_can_repeat(a, b));
  4498. bool is_node = false;
  4499. if (a->grad) {
  4500. is_node = true;
  4501. }
  4502. if (ggml_are_same_shape(a, b) && !is_node) {
  4503. return a;
  4504. }
  4505. struct ggml_tensor * result = ggml_new_tensor(ctx, a->type, b->n_dims, b->ne);
  4506. result->op = GGML_OP_REPEAT;
  4507. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4508. result->src[0] = a;
  4509. result->src[1] = b;
  4510. return result;
  4511. }
  4512. // ggml_repeat_back
  4513. struct ggml_tensor * ggml_repeat_back(
  4514. struct ggml_context * ctx,
  4515. struct ggml_tensor * a,
  4516. struct ggml_tensor * b) {
  4517. GGML_ASSERT(ggml_can_repeat(b, a));
  4518. bool is_node = false;
  4519. if (a->grad) {
  4520. is_node = true;
  4521. }
  4522. if (ggml_are_same_shape(a, b) && !is_node) {
  4523. return a;
  4524. }
  4525. struct ggml_tensor * result = ggml_new_tensor(ctx, a->type, b->n_dims, b->ne);
  4526. result->op = GGML_OP_REPEAT_BACK;
  4527. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4528. result->src[0] = a;
  4529. result->src[1] = b;
  4530. return result;
  4531. }
  4532. // ggml_abs
  4533. struct ggml_tensor * ggml_abs(
  4534. struct ggml_context * ctx,
  4535. struct ggml_tensor * a) {
  4536. return ggml_unary(ctx, a, GGML_UNARY_OP_ABS);
  4537. }
  4538. struct ggml_tensor * ggml_abs_inplace(
  4539. struct ggml_context * ctx,
  4540. struct ggml_tensor * a) {
  4541. return ggml_unary_inplace(ctx, a, GGML_UNARY_OP_ABS);
  4542. }
  4543. // ggml_sgn
  4544. struct ggml_tensor * ggml_sgn(
  4545. struct ggml_context * ctx,
  4546. struct ggml_tensor * a) {
  4547. return ggml_unary(ctx, a, GGML_UNARY_OP_SGN);
  4548. }
  4549. struct ggml_tensor * ggml_sgn_inplace(
  4550. struct ggml_context * ctx,
  4551. struct ggml_tensor * a) {
  4552. return ggml_unary_inplace(ctx, a, GGML_UNARY_OP_SGN);
  4553. }
  4554. // ggml_neg
  4555. struct ggml_tensor * ggml_neg(
  4556. struct ggml_context * ctx,
  4557. struct ggml_tensor * a) {
  4558. return ggml_unary(ctx, a, GGML_UNARY_OP_NEG);
  4559. }
  4560. struct ggml_tensor * ggml_neg_inplace(
  4561. struct ggml_context * ctx,
  4562. struct ggml_tensor * a) {
  4563. return ggml_unary_inplace(ctx, a, GGML_UNARY_OP_NEG);
  4564. }
  4565. // ggml_step
  4566. struct ggml_tensor * ggml_step(
  4567. struct ggml_context * ctx,
  4568. struct ggml_tensor * a) {
  4569. return ggml_unary(ctx, a, GGML_UNARY_OP_STEP);
  4570. }
  4571. struct ggml_tensor * ggml_step_inplace(
  4572. struct ggml_context * ctx,
  4573. struct ggml_tensor * a) {
  4574. return ggml_unary_inplace(ctx, a, GGML_UNARY_OP_STEP);
  4575. }
  4576. // ggml_tanh
  4577. struct ggml_tensor * ggml_tanh(
  4578. struct ggml_context * ctx,
  4579. struct ggml_tensor * a) {
  4580. return ggml_unary(ctx, a, GGML_UNARY_OP_TANH);
  4581. }
  4582. struct ggml_tensor * ggml_tanh_inplace(
  4583. struct ggml_context * ctx,
  4584. struct ggml_tensor * a) {
  4585. return ggml_unary_inplace(ctx, a, GGML_UNARY_OP_TANH);
  4586. }
  4587. // ggml_elu
  4588. struct ggml_tensor * ggml_elu(
  4589. struct ggml_context * ctx,
  4590. struct ggml_tensor * a) {
  4591. return ggml_unary(ctx, a, GGML_UNARY_OP_ELU);
  4592. }
  4593. struct ggml_tensor * ggml_elu_inplace(
  4594. struct ggml_context * ctx,
  4595. struct ggml_tensor * a) {
  4596. return ggml_unary_inplace(ctx, a, GGML_UNARY_OP_ELU);
  4597. }
  4598. // ggml_relu
  4599. struct ggml_tensor * ggml_relu(
  4600. struct ggml_context * ctx,
  4601. struct ggml_tensor * a) {
  4602. return ggml_unary(ctx, a, GGML_UNARY_OP_RELU);
  4603. }
  4604. struct ggml_tensor * ggml_relu_inplace(
  4605. struct ggml_context * ctx,
  4606. struct ggml_tensor * a) {
  4607. return ggml_unary_inplace(ctx, a, GGML_UNARY_OP_RELU);
  4608. }
  4609. // ggml_gelu
  4610. struct ggml_tensor * ggml_gelu(
  4611. struct ggml_context * ctx,
  4612. struct ggml_tensor * a) {
  4613. return ggml_unary(ctx, a, GGML_UNARY_OP_GELU);
  4614. }
  4615. struct ggml_tensor * ggml_gelu_inplace(
  4616. struct ggml_context * ctx,
  4617. struct ggml_tensor * a) {
  4618. return ggml_unary_inplace(ctx, a, GGML_UNARY_OP_GELU);
  4619. }
  4620. // ggml_gelu_quick
  4621. struct ggml_tensor * ggml_gelu_quick(
  4622. struct ggml_context * ctx,
  4623. struct ggml_tensor * a) {
  4624. return ggml_unary(ctx, a, GGML_UNARY_OP_GELU_QUICK);
  4625. }
  4626. struct ggml_tensor * ggml_gelu_quick_inplace(
  4627. struct ggml_context * ctx,
  4628. struct ggml_tensor * a) {
  4629. return ggml_unary_inplace(ctx, a, GGML_UNARY_OP_GELU_QUICK);
  4630. }
  4631. // ggml_silu
  4632. struct ggml_tensor * ggml_silu(
  4633. struct ggml_context * ctx,
  4634. struct ggml_tensor * a) {
  4635. return ggml_unary(ctx, a, GGML_UNARY_OP_SILU);
  4636. }
  4637. struct ggml_tensor * ggml_silu_inplace(
  4638. struct ggml_context * ctx,
  4639. struct ggml_tensor * a) {
  4640. return ggml_unary_inplace(ctx, a, GGML_UNARY_OP_SILU);
  4641. }
  4642. // ggml_silu_back
  4643. struct ggml_tensor * ggml_silu_back(
  4644. struct ggml_context * ctx,
  4645. struct ggml_tensor * a,
  4646. struct ggml_tensor * b) {
  4647. bool is_node = false;
  4648. if (a->grad || b->grad) {
  4649. // TODO: implement backward
  4650. is_node = true;
  4651. }
  4652. struct ggml_tensor * result = ggml_dup_tensor(ctx, a);
  4653. result->op = GGML_OP_SILU_BACK;
  4654. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4655. result->src[0] = a;
  4656. result->src[1] = b;
  4657. return result;
  4658. }
  4659. // ggml_norm
  4660. static struct ggml_tensor * ggml_norm_impl(
  4661. struct ggml_context * ctx,
  4662. struct ggml_tensor * a,
  4663. bool inplace) {
  4664. bool is_node = false;
  4665. if (!inplace && (a->grad)) {
  4666. GGML_ASSERT(false); // TODO: implement backward
  4667. is_node = true;
  4668. }
  4669. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  4670. // TODO: maybe store epsilon here?
  4671. result->op = GGML_OP_NORM;
  4672. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4673. result->src[0] = a;
  4674. return result;
  4675. }
  4676. struct ggml_tensor * ggml_norm(
  4677. struct ggml_context * ctx,
  4678. struct ggml_tensor * a) {
  4679. return ggml_norm_impl(ctx, a, false);
  4680. }
  4681. struct ggml_tensor * ggml_norm_inplace(
  4682. struct ggml_context * ctx,
  4683. struct ggml_tensor * a) {
  4684. return ggml_norm_impl(ctx, a, true);
  4685. }
  4686. static struct ggml_tensor * ggml_rms_norm_impl(
  4687. struct ggml_context * ctx,
  4688. struct ggml_tensor * a,
  4689. float eps,
  4690. bool inplace) {
  4691. bool is_node = false;
  4692. if (!inplace && (a->grad)) {
  4693. is_node = true;
  4694. }
  4695. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  4696. ggml_set_op_params(result, &eps, sizeof(eps));
  4697. result->op = GGML_OP_RMS_NORM;
  4698. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4699. result->src[0] = a;
  4700. return result;
  4701. }
  4702. struct ggml_tensor * ggml_rms_norm(
  4703. struct ggml_context * ctx,
  4704. struct ggml_tensor * a,
  4705. float eps) {
  4706. return ggml_rms_norm_impl(ctx, a, eps, false);
  4707. }
  4708. struct ggml_tensor * ggml_rms_norm_inplace(
  4709. struct ggml_context * ctx,
  4710. struct ggml_tensor * a,
  4711. float eps) {
  4712. return ggml_rms_norm_impl(ctx, a, eps, true);
  4713. }
  4714. struct ggml_tensor * ggml_rms_norm_back(
  4715. struct ggml_context * ctx,
  4716. struct ggml_tensor * a,
  4717. struct ggml_tensor * b) {
  4718. bool is_node = false;
  4719. if (a->grad) {
  4720. // TODO: implement backward
  4721. is_node = true;
  4722. }
  4723. struct ggml_tensor * result = ggml_dup_tensor(ctx, a);
  4724. result->op = GGML_OP_RMS_NORM_BACK;
  4725. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4726. result->src[0] = a;
  4727. result->src[1] = b;
  4728. return result;
  4729. }
  4730. // ggml_mul_mat
  4731. struct ggml_tensor * ggml_mul_mat(
  4732. struct ggml_context * ctx,
  4733. struct ggml_tensor * a,
  4734. struct ggml_tensor * b) {
  4735. GGML_ASSERT(ggml_can_mul_mat(a, b));
  4736. GGML_ASSERT(!ggml_is_transposed(a));
  4737. bool is_node = false;
  4738. if (a->grad || b->grad) {
  4739. is_node = true;
  4740. }
  4741. const int64_t ne[4] = { a->ne[1], b->ne[1], b->ne[2], b->ne[3] };
  4742. struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, MAX(a->n_dims, b->n_dims), ne);
  4743. result->op = GGML_OP_MUL_MAT;
  4744. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4745. result->src[0] = a;
  4746. result->src[1] = b;
  4747. return result;
  4748. }
  4749. // ggml_out_prod
  4750. struct ggml_tensor * ggml_out_prod(
  4751. struct ggml_context * ctx,
  4752. struct ggml_tensor * a,
  4753. struct ggml_tensor * b) {
  4754. GGML_ASSERT(ggml_can_out_prod(a, b));
  4755. GGML_ASSERT(!ggml_is_transposed(a));
  4756. bool is_node = false;
  4757. if (a->grad || b->grad) {
  4758. is_node = true;
  4759. }
  4760. const int64_t ne[4] = { a->ne[0], b->ne[0], a->ne[2], b->ne[3] };
  4761. struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, MIN(a->n_dims, b->n_dims), ne);
  4762. result->op = GGML_OP_OUT_PROD;
  4763. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4764. result->src[0] = a;
  4765. result->src[1] = b;
  4766. return result;
  4767. }
  4768. // ggml_scale
  4769. static struct ggml_tensor * ggml_scale_impl(
  4770. struct ggml_context * ctx,
  4771. struct ggml_tensor * a,
  4772. struct ggml_tensor * b,
  4773. bool inplace) {
  4774. GGML_ASSERT(ggml_is_scalar(b));
  4775. GGML_ASSERT(ggml_is_padded_1d(a));
  4776. bool is_node = false;
  4777. if (a->grad || b->grad) {
  4778. is_node = true;
  4779. }
  4780. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  4781. result->op = GGML_OP_SCALE;
  4782. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4783. result->src[0] = a;
  4784. result->src[1] = b;
  4785. return result;
  4786. }
  4787. struct ggml_tensor * ggml_scale(
  4788. struct ggml_context * ctx,
  4789. struct ggml_tensor * a,
  4790. struct ggml_tensor * b) {
  4791. return ggml_scale_impl(ctx, a, b, false);
  4792. }
  4793. struct ggml_tensor * ggml_scale_inplace(
  4794. struct ggml_context * ctx,
  4795. struct ggml_tensor * a,
  4796. struct ggml_tensor * b) {
  4797. return ggml_scale_impl(ctx, a, b, true);
  4798. }
  4799. // ggml_set
  4800. static struct ggml_tensor * ggml_set_impl(
  4801. struct ggml_context * ctx,
  4802. struct ggml_tensor * a,
  4803. struct ggml_tensor * b,
  4804. size_t nb1,
  4805. size_t nb2,
  4806. size_t nb3,
  4807. size_t offset,
  4808. bool inplace) {
  4809. GGML_ASSERT(ggml_nelements(a) >= ggml_nelements(b));
  4810. bool is_node = false;
  4811. if (a->grad || b->grad) {
  4812. is_node = true;
  4813. }
  4814. // make a view of the destination
  4815. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  4816. int32_t params[] = { nb1, nb2, nb3, offset, inplace ? 1 : 0 };
  4817. ggml_set_op_params(result, params, sizeof(params));
  4818. result->op = GGML_OP_SET;
  4819. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4820. result->src[0] = a;
  4821. result->src[1] = b;
  4822. return result;
  4823. }
  4824. struct ggml_tensor * ggml_set(
  4825. struct ggml_context * ctx,
  4826. struct ggml_tensor * a,
  4827. struct ggml_tensor * b,
  4828. size_t nb1,
  4829. size_t nb2,
  4830. size_t nb3,
  4831. size_t offset) {
  4832. return ggml_set_impl(ctx, a, b, nb1, nb2, nb3, offset, false);
  4833. }
  4834. struct ggml_tensor * ggml_set_inplace(
  4835. struct ggml_context * ctx,
  4836. struct ggml_tensor * a,
  4837. struct ggml_tensor * b,
  4838. size_t nb1,
  4839. size_t nb2,
  4840. size_t nb3,
  4841. size_t offset) {
  4842. return ggml_set_impl(ctx, a, b, nb1, nb2, nb3, offset, true);
  4843. }
  4844. struct ggml_tensor * ggml_set_1d(
  4845. struct ggml_context * ctx,
  4846. struct ggml_tensor * a,
  4847. struct ggml_tensor * b,
  4848. size_t offset) {
  4849. return ggml_set_impl(ctx, a, b, a->nb[1], a->nb[2], a->nb[3], offset, false);
  4850. }
  4851. struct ggml_tensor * ggml_set_1d_inplace(
  4852. struct ggml_context * ctx,
  4853. struct ggml_tensor * a,
  4854. struct ggml_tensor * b,
  4855. size_t offset) {
  4856. return ggml_set_impl(ctx, a, b, a->nb[1], a->nb[2], a->nb[3], offset, true);
  4857. }
  4858. struct ggml_tensor * ggml_set_2d(
  4859. struct ggml_context * ctx,
  4860. struct ggml_tensor * a,
  4861. struct ggml_tensor * b,
  4862. size_t nb1,
  4863. size_t offset) {
  4864. return ggml_set_impl(ctx, a, b, nb1, a->nb[2], a->nb[3], offset, false);
  4865. }
  4866. struct ggml_tensor * ggml_set_2d_inplace(
  4867. struct ggml_context * ctx,
  4868. struct ggml_tensor * a,
  4869. struct ggml_tensor * b,
  4870. size_t nb1,
  4871. size_t offset) {
  4872. return ggml_set_impl(ctx, a, b, nb1, a->nb[2], a->nb[3], offset, false);
  4873. }
  4874. // ggml_cpy
  4875. static struct ggml_tensor * ggml_cpy_impl(
  4876. struct ggml_context * ctx,
  4877. struct ggml_tensor * a,
  4878. struct ggml_tensor * b,
  4879. bool inplace) {
  4880. GGML_ASSERT(ggml_nelements(a) == ggml_nelements(b));
  4881. bool is_node = false;
  4882. if (!inplace && (a->grad || b->grad)) {
  4883. is_node = true;
  4884. }
  4885. // make a view of the destination
  4886. struct ggml_tensor * result = ggml_view_tensor(ctx, b);
  4887. if (strlen(b->name) > 0) {
  4888. ggml_format_name(result, "%s (copy of %s)", b->name, a->name);
  4889. } else {
  4890. ggml_format_name(result, "%s (copy)", a->name);
  4891. }
  4892. result->op = GGML_OP_CPY;
  4893. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4894. result->src[0] = a;
  4895. result->src[1] = b;
  4896. return result;
  4897. }
  4898. struct ggml_tensor * ggml_cpy(
  4899. struct ggml_context * ctx,
  4900. struct ggml_tensor * a,
  4901. struct ggml_tensor * b) {
  4902. return ggml_cpy_impl(ctx, a, b, false);
  4903. }
  4904. struct ggml_tensor * ggml_cpy_inplace(
  4905. struct ggml_context * ctx,
  4906. struct ggml_tensor * a,
  4907. struct ggml_tensor * b) {
  4908. return ggml_cpy_impl(ctx, a, b, true);
  4909. }
  4910. // ggml_cont
  4911. static struct ggml_tensor * ggml_cont_impl(
  4912. struct ggml_context * ctx,
  4913. struct ggml_tensor * a,
  4914. bool inplace) {
  4915. bool is_node = false;
  4916. if (!inplace && a->grad) {
  4917. is_node = true;
  4918. }
  4919. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  4920. ggml_format_name(result, "%s (cont)", a->name);
  4921. result->op = GGML_OP_CONT;
  4922. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4923. result->src[0] = a;
  4924. return result;
  4925. }
  4926. struct ggml_tensor * ggml_cont(
  4927. struct ggml_context * ctx,
  4928. struct ggml_tensor * a) {
  4929. return ggml_cont_impl(ctx, a, false);
  4930. }
  4931. struct ggml_tensor * ggml_cont_inplace(
  4932. struct ggml_context * ctx,
  4933. struct ggml_tensor * a) {
  4934. return ggml_cont_impl(ctx, a, true);
  4935. }
  4936. // ggml_reshape
  4937. struct ggml_tensor * ggml_reshape(
  4938. struct ggml_context * ctx,
  4939. struct ggml_tensor * a,
  4940. struct ggml_tensor * b) {
  4941. GGML_ASSERT(ggml_is_contiguous(a));
  4942. GGML_ASSERT(ggml_is_contiguous(b));
  4943. GGML_ASSERT(ggml_nelements(a) == ggml_nelements(b));
  4944. bool is_node = false;
  4945. if (a->grad) {
  4946. is_node = true;
  4947. }
  4948. if (b->grad) {
  4949. // gradient propagation is not supported
  4950. //GGML_ASSERT(false);
  4951. }
  4952. struct ggml_tensor * result = ggml_new_tensor_impl(ctx, a->type, b->n_dims, b->ne, a->data);
  4953. ggml_format_name(result, "%s (reshaped)", a->name);
  4954. result->op = GGML_OP_RESHAPE;
  4955. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4956. result->src[0] = a;
  4957. return result;
  4958. }
  4959. struct ggml_tensor * ggml_reshape_1d(
  4960. struct ggml_context * ctx,
  4961. struct ggml_tensor * a,
  4962. int64_t ne0) {
  4963. GGML_ASSERT(ggml_is_contiguous(a));
  4964. GGML_ASSERT(ggml_nelements(a) == ne0);
  4965. bool is_node = false;
  4966. if (a->grad) {
  4967. is_node = true;
  4968. }
  4969. const int64_t ne[1] = { ne0 };
  4970. struct ggml_tensor * result = ggml_new_tensor_impl(ctx, a->type, 1, ne, a->data);
  4971. ggml_format_name(result, "%s (reshaped)", a->name);
  4972. result->op = GGML_OP_RESHAPE;
  4973. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4974. result->src[0] = a;
  4975. return result;
  4976. }
  4977. struct ggml_tensor * ggml_reshape_2d(
  4978. struct ggml_context * ctx,
  4979. struct ggml_tensor * a,
  4980. int64_t ne0,
  4981. int64_t ne1) {
  4982. GGML_ASSERT(ggml_is_contiguous(a));
  4983. GGML_ASSERT(ggml_nelements(a) == ne0*ne1);
  4984. bool is_node = false;
  4985. if (a->grad) {
  4986. is_node = true;
  4987. }
  4988. const int64_t ne[2] = { ne0, ne1 };
  4989. struct ggml_tensor * result = ggml_new_tensor_impl(ctx, a->type, 2, ne, a->data);
  4990. ggml_format_name(result, "%s (reshaped)", a->name);
  4991. result->op = GGML_OP_RESHAPE;
  4992. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4993. result->src[0] = a;
  4994. return result;
  4995. }
  4996. struct ggml_tensor * ggml_reshape_3d(
  4997. struct ggml_context * ctx,
  4998. struct ggml_tensor * a,
  4999. int64_t ne0,
  5000. int64_t ne1,
  5001. int64_t ne2) {
  5002. GGML_ASSERT(ggml_is_contiguous(a));
  5003. GGML_ASSERT(ggml_nelements(a) == ne0*ne1*ne2);
  5004. bool is_node = false;
  5005. if (a->grad) {
  5006. is_node = true;
  5007. }
  5008. const int64_t ne[3] = { ne0, ne1, ne2 };
  5009. struct ggml_tensor * result = ggml_new_tensor_impl(ctx, a->type, 3, ne, a->data);
  5010. ggml_format_name(result, "%s (reshaped)", a->name);
  5011. result->op = GGML_OP_RESHAPE;
  5012. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5013. result->src[0] = a;
  5014. return result;
  5015. }
  5016. struct ggml_tensor * ggml_reshape_4d(
  5017. struct ggml_context * ctx,
  5018. struct ggml_tensor * a,
  5019. int64_t ne0,
  5020. int64_t ne1,
  5021. int64_t ne2,
  5022. int64_t ne3) {
  5023. GGML_ASSERT(ggml_is_contiguous(a));
  5024. GGML_ASSERT(ggml_nelements(a) == ne0*ne1*ne2*ne3);
  5025. bool is_node = false;
  5026. if (a->grad) {
  5027. is_node = true;
  5028. }
  5029. const int64_t ne[4] = { ne0, ne1, ne2, ne3 };
  5030. struct ggml_tensor * result = ggml_new_tensor_impl(ctx, a->type, 4, ne, a->data);
  5031. ggml_format_name(result, "%s (reshaped)", a->name);
  5032. result->op = GGML_OP_RESHAPE;
  5033. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5034. result->src[0] = a;
  5035. return result;
  5036. }
  5037. // ggml_view_1d
  5038. struct ggml_tensor * ggml_view_1d(
  5039. struct ggml_context * ctx,
  5040. struct ggml_tensor * a,
  5041. int64_t ne0,
  5042. size_t offset) {
  5043. bool is_node = false;
  5044. if (a->grad) {
  5045. is_node = true;
  5046. }
  5047. struct ggml_tensor * result = ggml_new_tensor_impl(ctx, a->type, 1, &ne0, (char *) a->data + offset);
  5048. ggml_format_name(result, "%s (view)", a->name);
  5049. ggml_set_op_params(result, &offset, sizeof(offset));
  5050. result->op = GGML_OP_VIEW;
  5051. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5052. result->src[0] = a;
  5053. return result;
  5054. }
  5055. // ggml_view_2d
  5056. struct ggml_tensor * ggml_view_2d(
  5057. struct ggml_context * ctx,
  5058. struct ggml_tensor * a,
  5059. int64_t ne0,
  5060. int64_t ne1,
  5061. size_t nb1,
  5062. size_t offset) {
  5063. bool is_node = false;
  5064. if (a->grad) {
  5065. is_node = true;
  5066. }
  5067. const int64_t ne[GGML_MAX_DIMS] = { ne0, ne1, 1, 1 };
  5068. struct ggml_tensor * result = ggml_new_tensor_impl(ctx, a->type, 2, ne, (char *) a->data + offset);
  5069. ggml_format_name(result, "%s (view)", a->name);
  5070. ggml_set_op_params(result, &offset, sizeof(offset));
  5071. result->nb[1] = nb1;
  5072. result->nb[2] = result->nb[1]*ne1;
  5073. result->nb[3] = result->nb[2];
  5074. result->op = GGML_OP_VIEW;
  5075. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5076. result->src[0] = a;
  5077. return result;
  5078. }
  5079. // ggml_view_3d
  5080. struct ggml_tensor * ggml_view_3d(
  5081. struct ggml_context * ctx,
  5082. struct ggml_tensor * a,
  5083. int64_t ne0,
  5084. int64_t ne1,
  5085. int64_t ne2,
  5086. size_t nb1,
  5087. size_t nb2,
  5088. size_t offset) {
  5089. bool is_node = false;
  5090. if (a->grad) {
  5091. is_node = true;
  5092. }
  5093. const int64_t ne[GGML_MAX_DIMS] = { ne0, ne1, ne2, 1 };
  5094. struct ggml_tensor * result = ggml_new_tensor_impl(ctx, a->type, 3, ne, (char *) a->data + offset);
  5095. ggml_format_name(result, "%s (view)", a->name);
  5096. ggml_set_op_params(result, &offset, sizeof(offset));
  5097. result->nb[1] = nb1;
  5098. result->nb[2] = nb2;
  5099. result->nb[3] = result->nb[2]*ne2;
  5100. result->op = GGML_OP_VIEW;
  5101. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5102. result->src[0] = a;
  5103. return result;
  5104. }
  5105. // ggml_view_4d
  5106. struct ggml_tensor * ggml_view_4d(
  5107. struct ggml_context * ctx,
  5108. struct ggml_tensor * a,
  5109. int64_t ne0,
  5110. int64_t ne1,
  5111. int64_t ne2,
  5112. int64_t ne3,
  5113. size_t nb1,
  5114. size_t nb2,
  5115. size_t nb3,
  5116. size_t offset) {
  5117. bool is_node = false;
  5118. if (a->grad) {
  5119. is_node = true;
  5120. }
  5121. const int64_t ne[GGML_MAX_DIMS] = { ne0, ne1, ne2, ne3 };
  5122. struct ggml_tensor * result = ggml_new_tensor_impl(ctx, a->type, 4, ne, (char *) a->data + offset);
  5123. ggml_format_name(result, "%s (view)", a->name);
  5124. ggml_set_op_params(result, &offset, sizeof(offset));
  5125. result->nb[1] = nb1;
  5126. result->nb[2] = nb2;
  5127. result->nb[3] = nb3;
  5128. result->op = GGML_OP_VIEW;
  5129. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5130. result->src[0] = a;
  5131. return result;
  5132. }
  5133. // ggml_permute
  5134. struct ggml_tensor * ggml_permute(
  5135. struct ggml_context * ctx,
  5136. struct ggml_tensor * a,
  5137. int axis0,
  5138. int axis1,
  5139. int axis2,
  5140. int axis3) {
  5141. GGML_ASSERT(axis0 >= 0 && axis0 < GGML_MAX_DIMS);
  5142. GGML_ASSERT(axis1 >= 0 && axis1 < GGML_MAX_DIMS);
  5143. GGML_ASSERT(axis2 >= 0 && axis2 < GGML_MAX_DIMS);
  5144. GGML_ASSERT(axis3 >= 0 && axis3 < GGML_MAX_DIMS);
  5145. GGML_ASSERT(axis0 != axis1);
  5146. GGML_ASSERT(axis0 != axis2);
  5147. GGML_ASSERT(axis0 != axis3);
  5148. GGML_ASSERT(axis1 != axis2);
  5149. GGML_ASSERT(axis1 != axis3);
  5150. GGML_ASSERT(axis2 != axis3);
  5151. bool is_node = false;
  5152. if (a->grad) {
  5153. is_node = true;
  5154. }
  5155. struct ggml_tensor * result = ggml_view_tensor(ctx, a);
  5156. ggml_format_name(result, "%s (permuted)", a->name);
  5157. int ne[GGML_MAX_DIMS];
  5158. int nb[GGML_MAX_DIMS];
  5159. ne[axis0] = a->ne[0];
  5160. ne[axis1] = a->ne[1];
  5161. ne[axis2] = a->ne[2];
  5162. ne[axis3] = a->ne[3];
  5163. nb[axis0] = a->nb[0];
  5164. nb[axis1] = a->nb[1];
  5165. nb[axis2] = a->nb[2];
  5166. nb[axis3] = a->nb[3];
  5167. result->ne[0] = ne[0];
  5168. result->ne[1] = ne[1];
  5169. result->ne[2] = ne[2];
  5170. result->ne[3] = ne[3];
  5171. result->nb[0] = nb[0];
  5172. result->nb[1] = nb[1];
  5173. result->nb[2] = nb[2];
  5174. result->nb[3] = nb[3];
  5175. result->op = GGML_OP_PERMUTE;
  5176. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5177. result->src[0] = a;
  5178. int32_t params[] = { axis0, axis1, axis2, axis3 };
  5179. ggml_set_op_params(result, &params, sizeof(params));
  5180. return result;
  5181. }
  5182. // ggml_transpose
  5183. struct ggml_tensor * ggml_transpose(
  5184. struct ggml_context * ctx,
  5185. struct ggml_tensor * a) {
  5186. bool is_node = false;
  5187. if (a->grad) {
  5188. is_node = true;
  5189. }
  5190. struct ggml_tensor * result = ggml_view_tensor(ctx, a);
  5191. ggml_format_name(result, "%s (transposed)", a->name);
  5192. result->ne[0] = a->ne[1];
  5193. result->ne[1] = a->ne[0];
  5194. result->nb[0] = a->nb[1];
  5195. result->nb[1] = a->nb[0];
  5196. result->op = GGML_OP_TRANSPOSE;
  5197. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5198. result->src[0] = a;
  5199. return result;
  5200. }
  5201. // ggml_get_rows
  5202. struct ggml_tensor * ggml_get_rows(
  5203. struct ggml_context * ctx,
  5204. struct ggml_tensor * a,
  5205. struct ggml_tensor * b) {
  5206. GGML_ASSERT(ggml_is_matrix(a) && ggml_is_vector(b) && b->type == GGML_TYPE_I32);
  5207. bool is_node = false;
  5208. if (a->grad || b->grad) {
  5209. is_node = true;
  5210. }
  5211. // TODO: implement non F32 return
  5212. //struct ggml_tensor * result = ggml_new_tensor_2d(ctx, a->type, a->ne[0], b->ne[0]);
  5213. struct ggml_tensor * result = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, a->ne[0], b->ne[0]);
  5214. result->op = GGML_OP_GET_ROWS;
  5215. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5216. result->src[0] = a;
  5217. result->src[1] = b;
  5218. return result;
  5219. }
  5220. // ggml_get_rows_back
  5221. struct ggml_tensor * ggml_get_rows_back(
  5222. struct ggml_context * ctx,
  5223. struct ggml_tensor * a,
  5224. struct ggml_tensor * b,
  5225. struct ggml_tensor * c) {
  5226. GGML_ASSERT(ggml_is_matrix(a) && ggml_is_vector(b) && b->type == GGML_TYPE_I32);
  5227. GGML_ASSERT(ggml_is_matrix(c) && (a->ne[0] == c->ne[0]));
  5228. bool is_node = false;
  5229. if (a->grad || b->grad) {
  5230. is_node = true;
  5231. }
  5232. // TODO: implement non F32 return
  5233. //struct ggml_tensor * result = ggml_new_tensor_2d(ctx, a->type, a->ne[0], b->ne[0]);
  5234. struct ggml_tensor * result = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, c->ne[0], c->ne[1]);
  5235. result->op = GGML_OP_GET_ROWS_BACK;
  5236. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5237. result->src[0] = a;
  5238. result->src[1] = b;
  5239. result->src[2] = c;
  5240. return result;
  5241. }
  5242. // ggml_diag
  5243. struct ggml_tensor * ggml_diag(
  5244. struct ggml_context * ctx,
  5245. struct ggml_tensor * a) {
  5246. GGML_ASSERT(a->ne[1] == 1);
  5247. bool is_node = false;
  5248. if (a->grad) {
  5249. is_node = true;
  5250. }
  5251. const int64_t ne[4] = { a->ne[0], a->ne[0], a->ne[2], a->ne[3] };
  5252. struct ggml_tensor * result = ggml_new_tensor(ctx, a->type, MAX(a->n_dims, 2), ne);
  5253. result->op = GGML_OP_DIAG;
  5254. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5255. result->src[0] = a;
  5256. return result;
  5257. }
  5258. // ggml_diag_mask_inf
  5259. static struct ggml_tensor * ggml_diag_mask_inf_impl(
  5260. struct ggml_context * ctx,
  5261. struct ggml_tensor * a,
  5262. int n_past,
  5263. bool inplace) {
  5264. bool is_node = false;
  5265. if (a->grad) {
  5266. is_node = true;
  5267. }
  5268. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  5269. int32_t params[] = { n_past, inplace ? 1 : 0 };
  5270. ggml_set_op_params(result, &params, sizeof(params));
  5271. result->op = GGML_OP_DIAG_MASK_INF;
  5272. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5273. result->src[0] = a;
  5274. return result;
  5275. }
  5276. struct ggml_tensor * ggml_diag_mask_inf(
  5277. struct ggml_context * ctx,
  5278. struct ggml_tensor * a,
  5279. int n_past) {
  5280. return ggml_diag_mask_inf_impl(ctx, a, n_past, false);
  5281. }
  5282. struct ggml_tensor * ggml_diag_mask_inf_inplace(
  5283. struct ggml_context * ctx,
  5284. struct ggml_tensor * a,
  5285. int n_past) {
  5286. return ggml_diag_mask_inf_impl(ctx, a, n_past, true);
  5287. }
  5288. // ggml_diag_mask_zero
  5289. static struct ggml_tensor * ggml_diag_mask_zero_impl(
  5290. struct ggml_context * ctx,
  5291. struct ggml_tensor * a,
  5292. int n_past,
  5293. bool inplace) {
  5294. bool is_node = false;
  5295. if (a->grad) {
  5296. is_node = true;
  5297. }
  5298. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  5299. int32_t params[] = { n_past, inplace ? 1 : 0 };
  5300. ggml_set_op_params(result, &params, sizeof(params));
  5301. result->op = GGML_OP_DIAG_MASK_ZERO;
  5302. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5303. result->src[0] = a;
  5304. return result;
  5305. }
  5306. struct ggml_tensor * ggml_diag_mask_zero(
  5307. struct ggml_context * ctx,
  5308. struct ggml_tensor * a,
  5309. int n_past) {
  5310. return ggml_diag_mask_zero_impl(ctx, a, n_past, false);
  5311. }
  5312. struct ggml_tensor * ggml_diag_mask_zero_inplace(
  5313. struct ggml_context * ctx,
  5314. struct ggml_tensor * a,
  5315. int n_past) {
  5316. return ggml_diag_mask_zero_impl(ctx, a, n_past, true);
  5317. }
  5318. // ggml_soft_max
  5319. static struct ggml_tensor * ggml_soft_max_impl(
  5320. struct ggml_context * ctx,
  5321. struct ggml_tensor * a,
  5322. bool inplace) {
  5323. bool is_node = false;
  5324. if (a->grad) {
  5325. is_node = true;
  5326. }
  5327. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  5328. result->op = GGML_OP_SOFT_MAX;
  5329. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5330. result->src[0] = a;
  5331. return result;
  5332. }
  5333. struct ggml_tensor * ggml_soft_max(
  5334. struct ggml_context * ctx,
  5335. struct ggml_tensor * a) {
  5336. return ggml_soft_max_impl(ctx, a, false);
  5337. }
  5338. struct ggml_tensor * ggml_soft_max_inplace(
  5339. struct ggml_context * ctx,
  5340. struct ggml_tensor * a) {
  5341. return ggml_soft_max_impl(ctx, a, true);
  5342. }
  5343. // ggml_soft_max_back
  5344. static struct ggml_tensor * ggml_soft_max_back_impl(
  5345. struct ggml_context * ctx,
  5346. struct ggml_tensor * a,
  5347. struct ggml_tensor * b,
  5348. bool inplace) {
  5349. bool is_node = false;
  5350. if (a->grad || b->grad) {
  5351. is_node = true; // TODO : implement backward pass
  5352. }
  5353. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  5354. result->op = GGML_OP_SOFT_MAX_BACK;
  5355. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5356. result->src[0] = a;
  5357. result->src[1] = b;
  5358. return result;
  5359. }
  5360. struct ggml_tensor * ggml_soft_max_back(
  5361. struct ggml_context * ctx,
  5362. struct ggml_tensor * a,
  5363. struct ggml_tensor * b) {
  5364. return ggml_soft_max_back_impl(ctx, a, b, false);
  5365. }
  5366. struct ggml_tensor * ggml_soft_max_back_inplace(
  5367. struct ggml_context * ctx,
  5368. struct ggml_tensor * a,
  5369. struct ggml_tensor * b) {
  5370. return ggml_soft_max_back_impl(ctx, a, b, true);
  5371. }
  5372. // ggml_rope
  5373. static struct ggml_tensor * ggml_rope_impl(
  5374. struct ggml_context * ctx,
  5375. struct ggml_tensor * a,
  5376. int n_past,
  5377. int n_dims,
  5378. int mode,
  5379. int n_ctx,
  5380. float freq_base,
  5381. float freq_scale,
  5382. bool inplace) {
  5383. GGML_ASSERT(n_past >= 0);
  5384. bool is_node = false;
  5385. if (a->grad) {
  5386. is_node = true;
  5387. }
  5388. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  5389. int32_t params[6] = { n_past, n_dims, mode, n_ctx };
  5390. memcpy(params + 4, &freq_base, sizeof(float));
  5391. memcpy(params + 5, &freq_scale, sizeof(float));
  5392. ggml_set_op_params(result, &params, sizeof(params));
  5393. result->op = GGML_OP_ROPE;
  5394. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5395. result->src[0] = a;
  5396. return result;
  5397. }
  5398. struct ggml_tensor * ggml_rope(
  5399. struct ggml_context * ctx,
  5400. struct ggml_tensor * a,
  5401. int n_past,
  5402. int n_dims,
  5403. int mode,
  5404. int n_ctx) {
  5405. return ggml_rope_impl(ctx, a, n_past, n_dims, mode, n_ctx, 10000.0f, 1.0f, false);
  5406. }
  5407. struct ggml_tensor * ggml_rope_inplace(
  5408. struct ggml_context * ctx,
  5409. struct ggml_tensor * a,
  5410. int n_past,
  5411. int n_dims,
  5412. int mode,
  5413. int n_ctx) {
  5414. return ggml_rope_impl(ctx, a, n_past, n_dims, mode, n_ctx, 10000.0f, 1.0f, true);
  5415. }
  5416. struct ggml_tensor * ggml_rope_custom_inplace(
  5417. struct ggml_context * ctx,
  5418. struct ggml_tensor * a,
  5419. int n_past,
  5420. int n_dims,
  5421. int mode,
  5422. int n_ctx,
  5423. float freq_base,
  5424. float freq_scale) {
  5425. return ggml_rope_impl(ctx, a, n_past, n_dims, mode, n_ctx, freq_base, freq_scale, true);
  5426. }
  5427. // ggml_rope_back
  5428. struct ggml_tensor * ggml_rope_back(
  5429. struct ggml_context * ctx,
  5430. struct ggml_tensor * a,
  5431. int n_past,
  5432. int n_dims,
  5433. int mode,
  5434. int n_ctx) {
  5435. GGML_ASSERT(n_past >= 0);
  5436. GGML_ASSERT((mode & 4) == 0 && "ggml_rope_back() for ChatGLM not implemented yet");
  5437. bool is_node = false;
  5438. if (a->grad) {
  5439. is_node = false; // TODO: implement backward
  5440. }
  5441. struct ggml_tensor * result = ggml_dup_tensor(ctx, a);
  5442. int32_t params[] = { n_past, n_dims, mode, n_ctx };
  5443. ggml_set_op_params(result, &params, sizeof(params));
  5444. result->op = GGML_OP_ROPE_BACK;
  5445. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5446. result->src[0] = a;
  5447. return result;
  5448. }
  5449. // ggml_alibi
  5450. struct ggml_tensor * ggml_alibi(
  5451. struct ggml_context * ctx,
  5452. struct ggml_tensor * a,
  5453. int n_past,
  5454. int n_head,
  5455. float bias_max) {
  5456. GGML_ASSERT(n_past >= 0);
  5457. bool is_node = false;
  5458. if (a->grad) {
  5459. GGML_ASSERT(false); // TODO: implement backward
  5460. is_node = true;
  5461. }
  5462. // TODO: when implement backward, fix this:
  5463. //struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  5464. struct ggml_tensor * result = ggml_view_tensor(ctx, a);
  5465. int32_t op_params[3] = { n_past, n_head };
  5466. memcpy(op_params + 2, &bias_max, sizeof(float));
  5467. ggml_set_op_params(result, &op_params, sizeof(op_params));
  5468. result->op = GGML_OP_ALIBI;
  5469. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5470. result->src[0] = a;
  5471. return result;
  5472. }
  5473. // ggml_clamp
  5474. struct ggml_tensor * ggml_clamp(
  5475. struct ggml_context * ctx,
  5476. struct ggml_tensor * a,
  5477. float min,
  5478. float max) {
  5479. bool is_node = false;
  5480. if (a->grad) {
  5481. GGML_ASSERT(false); // TODO: implement backward
  5482. is_node = true;
  5483. }
  5484. // TODO: when implement backward, fix this:
  5485. struct ggml_tensor * result = ggml_view_tensor(ctx, a);
  5486. float params[] = { min, max };
  5487. ggml_set_op_params(result, &params, sizeof(params));
  5488. result->op = GGML_OP_CLAMP;
  5489. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5490. result->src[0] = a;
  5491. return result;
  5492. }
  5493. // ggml_conv_1d
  5494. static int64_t ggml_calc_conv_output_size(int64_t ins, int64_t ks, int s, int p, int d) {
  5495. return (ins + 2 * p - d * (ks - 1) - 1) / s + 1;
  5496. }
  5497. GGML_API struct ggml_tensor * ggml_conv_1d(
  5498. struct ggml_context * ctx,
  5499. struct ggml_tensor * a,
  5500. struct ggml_tensor * b,
  5501. int s0,
  5502. int p0,
  5503. int d0) {
  5504. GGML_ASSERT(ggml_is_matrix(b));
  5505. GGML_ASSERT(a->ne[1] == b->ne[1]);
  5506. bool is_node = false;
  5507. if (a->grad || b->grad) {
  5508. GGML_ASSERT(false); // TODO: implement backward
  5509. is_node = true;
  5510. }
  5511. const int64_t ne[4] = {
  5512. ggml_calc_conv_output_size(b->ne[0], a->ne[0], s0, p0, d0),
  5513. a->ne[2], 1, 1,
  5514. };
  5515. struct ggml_tensor* result = ggml_new_tensor(ctx, GGML_TYPE_F32, 2, ne);
  5516. int32_t params[] = { s0, p0, d0 };
  5517. ggml_set_op_params(result, &params, sizeof(params));
  5518. result->op = GGML_OP_CONV_1D;
  5519. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5520. result->src[0] = a;
  5521. result->src[1] = b;
  5522. return result;
  5523. }
  5524. // ggml_conv_2d
  5525. struct ggml_tensor* ggml_conv_2d(
  5526. struct ggml_context* ctx,
  5527. struct ggml_tensor * a,
  5528. struct ggml_tensor * b,
  5529. int s0,
  5530. int s1,
  5531. int p0,
  5532. int p1,
  5533. int d0,
  5534. int d1) {
  5535. GGML_ASSERT(a->ne[2] == b->ne[2]);
  5536. bool is_node = false;
  5537. if (a->grad || b->grad) {
  5538. GGML_ASSERT(false); // TODO: implement backward
  5539. is_node = true;
  5540. }
  5541. const int64_t ne[4] = {
  5542. ggml_calc_conv_output_size(b->ne[0], a->ne[0], s0, p0, d0),
  5543. ggml_calc_conv_output_size(b->ne[1], a->ne[1], s1, p1, d1),
  5544. a->ne[3], b->ne[3],
  5545. };
  5546. struct ggml_tensor* result = ggml_new_tensor(ctx, GGML_TYPE_F32, 4, ne);
  5547. int32_t params[] = { s0, s1, p0, p1, d0, d1 };
  5548. ggml_set_op_params(result, &params, sizeof(params));
  5549. result->op = GGML_OP_CONV_2D;
  5550. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5551. result->src[0] = a;
  5552. result->src[1] = b;
  5553. return result;
  5554. }
  5555. // ggml_conv_1d_ph
  5556. struct ggml_tensor* ggml_conv_1d_ph(
  5557. struct ggml_context * ctx,
  5558. struct ggml_tensor * a,
  5559. struct ggml_tensor * b,
  5560. int s,
  5561. int d) {
  5562. return ggml_conv_1d(ctx, a, b, s, a->ne[0] / 2, d);
  5563. }
  5564. // ggml_pool_*
  5565. static int64_t ggml_calc_pool_output_size(int64_t ins, int ks, int s, int p) {
  5566. return (ins + 2 * p - ks) / s + 1;
  5567. }
  5568. // ggml_pool_1d
  5569. struct ggml_tensor* ggml_pool_1d(
  5570. struct ggml_context * ctx,
  5571. struct ggml_tensor * a,
  5572. enum ggml_op_pool op,
  5573. int k0,
  5574. int s0,
  5575. int p0) {
  5576. bool is_node = false;
  5577. if (a->grad) {
  5578. GGML_ASSERT(false); // TODO: implement backward
  5579. is_node = true;
  5580. }
  5581. const int64_t ne[3] = {
  5582. ggml_calc_pool_output_size(a->ne[0], k0, s0, p0),
  5583. a->ne[1],
  5584. };
  5585. struct ggml_tensor* result = ggml_new_tensor(ctx, GGML_TYPE_F32, 2, ne);
  5586. int32_t params[] = { op, k0, s0, p0 };
  5587. ggml_set_op_params(result, &params, sizeof(params));
  5588. result->op = GGML_OP_POOL_1D;
  5589. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5590. result->src[0] = a;
  5591. return result;
  5592. }
  5593. // ggml_pool_2d
  5594. struct ggml_tensor* ggml_pool_2d(
  5595. struct ggml_context * ctx,
  5596. struct ggml_tensor * a,
  5597. enum ggml_op_pool op,
  5598. int k0,
  5599. int k1,
  5600. int s0,
  5601. int s1,
  5602. int p0,
  5603. int p1) {
  5604. bool is_node = false;
  5605. if (a->grad) {
  5606. GGML_ASSERT(false); // TODO: implement backward
  5607. is_node = true;
  5608. }
  5609. const int64_t ne[3] = {
  5610. ggml_calc_pool_output_size(a->ne[0], k0, s0, p0),
  5611. ggml_calc_pool_output_size(a->ne[1], k1, s1, p1),
  5612. a->ne[2],
  5613. };
  5614. struct ggml_tensor* result = ggml_new_tensor(ctx, GGML_TYPE_F32, 3, ne);
  5615. int32_t params[] = { op, k0, k1, s0, s1, p0, p1 };
  5616. ggml_set_op_params(result, &params, sizeof(params));
  5617. result->op = GGML_OP_POOL_2D;
  5618. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5619. result->src[0] = a;
  5620. return result;
  5621. }
  5622. // ggml_flash_attn
  5623. struct ggml_tensor * ggml_flash_attn(
  5624. struct ggml_context * ctx,
  5625. struct ggml_tensor * q,
  5626. struct ggml_tensor * k,
  5627. struct ggml_tensor * v,
  5628. bool masked) {
  5629. GGML_ASSERT(ggml_can_mul_mat(k, q));
  5630. // TODO: check if vT can be multiplied by (k*qT)
  5631. bool is_node = false;
  5632. if (q->grad || k->grad || v->grad) {
  5633. is_node = true;
  5634. }
  5635. //struct ggml_tensor * result = ggml_dup_tensor(ctx, q);
  5636. struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, q->n_dims, q->ne);
  5637. int32_t t = masked ? 1 : 0;
  5638. ggml_set_op_params(result, &t, sizeof(t));
  5639. result->op = GGML_OP_FLASH_ATTN;
  5640. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5641. result->src[0] = q;
  5642. result->src[1] = k;
  5643. result->src[2] = v;
  5644. return result;
  5645. }
  5646. // ggml_flash_ff
  5647. struct ggml_tensor * ggml_flash_ff(
  5648. struct ggml_context * ctx,
  5649. struct ggml_tensor * a,
  5650. struct ggml_tensor * b0,
  5651. struct ggml_tensor * b1,
  5652. struct ggml_tensor * c0,
  5653. struct ggml_tensor * c1) {
  5654. GGML_ASSERT(ggml_can_mul_mat(b0, a));
  5655. // TODO: more checks
  5656. bool is_node = false;
  5657. if (a->grad || b0->grad || b1->grad || c0->grad || c1->grad) {
  5658. is_node = true;
  5659. }
  5660. //struct ggml_tensor * result = ggml_dup_tensor(ctx, a);
  5661. struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, a->n_dims, a->ne);
  5662. result->op = GGML_OP_FLASH_FF;
  5663. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5664. result->src[0] = a;
  5665. result->src[1] = b0;
  5666. result->src[2] = b1;
  5667. result->src[3] = c0;
  5668. result->src[4] = c1;
  5669. return result;
  5670. }
  5671. // ggml_flash_attn_back
  5672. struct ggml_tensor * ggml_flash_attn_back(
  5673. struct ggml_context * ctx,
  5674. struct ggml_tensor * q,
  5675. struct ggml_tensor * k,
  5676. struct ggml_tensor * v,
  5677. struct ggml_tensor * d,
  5678. bool masked) {
  5679. GGML_ASSERT(ggml_can_mul_mat(k, q));
  5680. // TODO: check if vT can be multiplied by (k*qT)
  5681. // d shape [D,N,ne2,ne3]
  5682. // q shape [D,N,ne2,ne3]
  5683. // k shape [D,M,ne2,ne3]
  5684. // v shape [M,D,ne2,ne3]
  5685. const int64_t D = q->ne[0];
  5686. const int64_t N = q->ne[1];
  5687. const int64_t M = k->ne[1];
  5688. const int64_t ne2 = q->ne[2];
  5689. const int64_t ne3 = q->ne[3];
  5690. GGML_ASSERT(k->ne[0] == D);
  5691. GGML_ASSERT(v->ne[0] == M);
  5692. GGML_ASSERT(v->ne[1] == D);
  5693. GGML_ASSERT(d->ne[0] == D);
  5694. GGML_ASSERT(d->ne[1] == N);
  5695. GGML_ASSERT(k->ne[2] == ne2);
  5696. GGML_ASSERT(k->ne[3] == ne3);
  5697. GGML_ASSERT(v->ne[2] == ne2);
  5698. GGML_ASSERT(v->ne[3] == ne3);
  5699. GGML_ASSERT(d->ne[2] == ne2);
  5700. GGML_ASSERT(d->ne[3] == ne3);
  5701. bool is_node = false;
  5702. if (q->grad || k->grad || v->grad) {
  5703. // when using this operation (in backwards pass) these grads are set.
  5704. // we don't want to create (big) grad of our result, so is_node is false.
  5705. is_node = false;
  5706. }
  5707. // store gradients of q, k and v as continuous tensors concatenated in result.
  5708. // q shape[D,N,ne2,ne3] ; k shape [D,M,ne2,ne3] ; v shape [M,D,ne2,ne3]
  5709. // gradq->data = result->data
  5710. // gradk->data = result->data + nb0*D*N*ne2*ne3
  5711. // gradv->data = result->data + nb0*D*N*ne2*ne3 + nb0*D*M*ne2*ne3
  5712. // note: v and gradv are actually transposed, i.e. v->ne[0] != D.
  5713. int64_t ne[4] = {D,M+N+M,ne2,ne3};
  5714. struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, 4, ne);
  5715. int32_t masked_i = masked ? 1 : 0;
  5716. ggml_set_op_params(result, &masked_i, sizeof(masked_i));
  5717. result->op = GGML_OP_FLASH_ATTN_BACK;
  5718. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5719. result->src[0] = q;
  5720. result->src[1] = k;
  5721. result->src[2] = v;
  5722. result->src[3] = d;
  5723. return result;
  5724. }
  5725. // ggml_win_part
  5726. struct ggml_tensor * ggml_win_part(
  5727. struct ggml_context * ctx,
  5728. struct ggml_tensor * a,
  5729. int w) {
  5730. GGML_ASSERT(a->ne[3] == 1);
  5731. GGML_ASSERT(a->type == GGML_TYPE_F32);
  5732. bool is_node = false;
  5733. if (a->grad) {
  5734. GGML_ASSERT(false); // TODO: implement backward
  5735. is_node = true;
  5736. }
  5737. // padding
  5738. const int px = (w - a->ne[1]%w)%w;
  5739. const int py = (w - a->ne[2]%w)%w;
  5740. const int npx = (px + a->ne[1])/w;
  5741. const int npy = (py + a->ne[2])/w;
  5742. const int np = npx*npy;
  5743. const int64_t ne[4] = { a->ne[0], w, w, np, };
  5744. struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, 4, ne);
  5745. int32_t params[] = { npx, npy, w };
  5746. ggml_set_op_params(result, &params, sizeof(params));
  5747. result->op = GGML_OP_WIN_PART;
  5748. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5749. result->src[0] = a;
  5750. return result;
  5751. }
  5752. // ggml_win_unpart
  5753. struct ggml_tensor * ggml_win_unpart(
  5754. struct ggml_context * ctx,
  5755. struct ggml_tensor * a,
  5756. int w0,
  5757. int h0,
  5758. int w) {
  5759. GGML_ASSERT(a->type == GGML_TYPE_F32);
  5760. bool is_node = false;
  5761. if (a->grad) {
  5762. GGML_ASSERT(false); // TODO: implement backward
  5763. is_node = true;
  5764. }
  5765. const int64_t ne[4] = { a->ne[0], w0, h0, 1, };
  5766. struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, 3, ne);
  5767. int32_t params[] = { w };
  5768. ggml_set_op_params(result, &params, sizeof(params));
  5769. result->op = GGML_OP_WIN_UNPART;
  5770. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5771. result->src[0] = a;
  5772. return result;
  5773. }
  5774. // gmml_unary
  5775. static struct ggml_tensor * ggml_unary_impl(
  5776. struct ggml_context * ctx,
  5777. struct ggml_tensor * a,
  5778. enum ggml_unary_op op,
  5779. bool inplace) {
  5780. bool is_node = false;
  5781. if (!inplace && (a->grad)) {
  5782. is_node = true;
  5783. }
  5784. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  5785. ggml_set_unary_op(result, op);
  5786. result->op = GGML_OP_UNARY;
  5787. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5788. result->src[0] = a;
  5789. return result;
  5790. }
  5791. struct ggml_tensor * ggml_unary(
  5792. struct ggml_context * ctx,
  5793. struct ggml_tensor * a,
  5794. enum ggml_unary_op op) {
  5795. return ggml_unary_impl(ctx, a, op, false);
  5796. }
  5797. struct ggml_tensor * ggml_unary_inplace(
  5798. struct ggml_context * ctx,
  5799. struct ggml_tensor * a,
  5800. enum ggml_unary_op op) {
  5801. return ggml_unary_impl(ctx, a, op, true);
  5802. }
  5803. // ggml_map_unary
  5804. static struct ggml_tensor * ggml_map_unary_impl_f32(
  5805. struct ggml_context * ctx,
  5806. struct ggml_tensor * a,
  5807. const ggml_unary_op_f32_t fun,
  5808. bool inplace) {
  5809. bool is_node = false;
  5810. if (!inplace && a->grad) {
  5811. is_node = true;
  5812. }
  5813. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  5814. ggml_set_op_params(result, (const void *) &fun, sizeof(fun));
  5815. result->op = GGML_OP_MAP_UNARY;
  5816. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5817. result->src[0] = a;
  5818. return result;
  5819. }
  5820. struct ggml_tensor * ggml_map_unary_f32(
  5821. struct ggml_context * ctx,
  5822. struct ggml_tensor * a,
  5823. const ggml_unary_op_f32_t fun) {
  5824. return ggml_map_unary_impl_f32(ctx, a, fun, false);
  5825. }
  5826. struct ggml_tensor * ggml_map_unary_inplace_f32(
  5827. struct ggml_context * ctx,
  5828. struct ggml_tensor * a,
  5829. const ggml_unary_op_f32_t fun) {
  5830. return ggml_map_unary_impl_f32(ctx, a, fun, true);
  5831. }
  5832. // ggml_map_binary
  5833. static struct ggml_tensor * ggml_map_binary_impl_f32(
  5834. struct ggml_context * ctx,
  5835. struct ggml_tensor * a,
  5836. struct ggml_tensor * b,
  5837. const ggml_binary_op_f32_t fun,
  5838. bool inplace) {
  5839. GGML_ASSERT(ggml_are_same_shape(a, b));
  5840. bool is_node = false;
  5841. if (!inplace && (a->grad || b->grad)) {
  5842. is_node = true;
  5843. }
  5844. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  5845. ggml_set_op_params(result, (const void *) &fun, sizeof(fun));
  5846. result->op = GGML_OP_MAP_BINARY;
  5847. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5848. result->src[0] = a;
  5849. result->src[1] = b;
  5850. return result;
  5851. }
  5852. struct ggml_tensor * ggml_map_binary_f32(
  5853. struct ggml_context * ctx,
  5854. struct ggml_tensor * a,
  5855. struct ggml_tensor * b,
  5856. const ggml_binary_op_f32_t fun) {
  5857. return ggml_map_binary_impl_f32(ctx, a, b, fun, false);
  5858. }
  5859. struct ggml_tensor * ggml_map_binary_inplace_f32(
  5860. struct ggml_context * ctx,
  5861. struct ggml_tensor * a,
  5862. struct ggml_tensor * b,
  5863. const ggml_binary_op_f32_t fun) {
  5864. return ggml_map_binary_impl_f32(ctx, a, b, fun, true);
  5865. }
  5866. // ggml_map_custom1
  5867. static struct ggml_tensor * ggml_map_custom1_impl_f32(
  5868. struct ggml_context * ctx,
  5869. struct ggml_tensor * a,
  5870. const ggml_custom1_op_f32_t fun,
  5871. bool inplace) {
  5872. bool is_node = false;
  5873. if (!inplace && a->grad) {
  5874. is_node = true;
  5875. }
  5876. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  5877. ggml_set_op_params(result, (const void *) &fun, sizeof(fun));
  5878. result->op = GGML_OP_MAP_CUSTOM1;
  5879. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5880. result->src[0] = a;
  5881. return result;
  5882. }
  5883. struct ggml_tensor * ggml_map_custom1_f32(
  5884. struct ggml_context * ctx,
  5885. struct ggml_tensor * a,
  5886. const ggml_custom1_op_f32_t fun) {
  5887. return ggml_map_custom1_impl_f32(ctx, a, fun, false);
  5888. }
  5889. struct ggml_tensor * ggml_map_custom1_inplace_f32(
  5890. struct ggml_context * ctx,
  5891. struct ggml_tensor * a,
  5892. const ggml_custom1_op_f32_t fun) {
  5893. return ggml_map_custom1_impl_f32(ctx, a, fun, true);
  5894. }
  5895. // ggml_map_custom2
  5896. static struct ggml_tensor * ggml_map_custom2_impl_f32(
  5897. struct ggml_context * ctx,
  5898. struct ggml_tensor * a,
  5899. struct ggml_tensor * b,
  5900. const ggml_custom2_op_f32_t fun,
  5901. bool inplace) {
  5902. bool is_node = false;
  5903. if (!inplace && (a->grad || b->grad)) {
  5904. is_node = true;
  5905. }
  5906. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  5907. ggml_set_op_params(result, (const void *) &fun, sizeof(fun));
  5908. result->op = GGML_OP_MAP_CUSTOM2;
  5909. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5910. result->src[0] = a;
  5911. result->src[1] = b;
  5912. return result;
  5913. }
  5914. struct ggml_tensor * ggml_map_custom2_f32(
  5915. struct ggml_context * ctx,
  5916. struct ggml_tensor * a,
  5917. struct ggml_tensor * b,
  5918. const ggml_custom2_op_f32_t fun) {
  5919. return ggml_map_custom2_impl_f32(ctx, a, b, fun, false);
  5920. }
  5921. struct ggml_tensor * ggml_map_custom2_inplace_f32(
  5922. struct ggml_context * ctx,
  5923. struct ggml_tensor * a,
  5924. struct ggml_tensor * b,
  5925. const ggml_custom2_op_f32_t fun) {
  5926. return ggml_map_custom2_impl_f32(ctx, a, b, fun, true);
  5927. }
  5928. // ggml_map_custom3
  5929. static struct ggml_tensor * ggml_map_custom3_impl_f32(
  5930. struct ggml_context * ctx,
  5931. struct ggml_tensor * a,
  5932. struct ggml_tensor * b,
  5933. struct ggml_tensor * c,
  5934. const ggml_custom3_op_f32_t fun,
  5935. bool inplace) {
  5936. bool is_node = false;
  5937. if (!inplace && (a->grad || b->grad || c->grad)) {
  5938. is_node = true;
  5939. }
  5940. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  5941. ggml_set_op_params(result, (const void *) &fun, sizeof(fun));
  5942. result->op = GGML_OP_MAP_CUSTOM3;
  5943. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5944. result->src[0] = a;
  5945. result->src[1] = b;
  5946. result->src[2] = c;
  5947. return result;
  5948. }
  5949. struct ggml_tensor * ggml_map_custom3_f32(
  5950. struct ggml_context * ctx,
  5951. struct ggml_tensor * a,
  5952. struct ggml_tensor * b,
  5953. struct ggml_tensor * c,
  5954. const ggml_custom3_op_f32_t fun) {
  5955. return ggml_map_custom3_impl_f32(ctx, a, b, c, fun, false);
  5956. }
  5957. struct ggml_tensor * ggml_map_custom3_inplace_f32(
  5958. struct ggml_context * ctx,
  5959. struct ggml_tensor * a,
  5960. struct ggml_tensor * b,
  5961. struct ggml_tensor * c,
  5962. const ggml_custom3_op_f32_t fun) {
  5963. return ggml_map_custom3_impl_f32(ctx, a, b, c, fun, true);
  5964. }
  5965. // ggml_cross_entropy_loss
  5966. struct ggml_tensor * ggml_cross_entropy_loss(
  5967. struct ggml_context * ctx,
  5968. struct ggml_tensor * a,
  5969. struct ggml_tensor * b) {
  5970. GGML_ASSERT(ggml_are_same_shape(a, b));
  5971. bool is_node = false;
  5972. if (a->grad || b->grad) {
  5973. is_node = true;
  5974. }
  5975. struct ggml_tensor * result = ggml_new_tensor_1d(ctx, a->type, 1);
  5976. result->op = GGML_OP_CROSS_ENTROPY_LOSS;
  5977. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5978. result->src[0] = a;
  5979. result->src[1] = b;
  5980. return result;
  5981. }
  5982. // ggml_cross_entropy_loss_back
  5983. struct ggml_tensor * ggml_cross_entropy_loss_back(
  5984. struct ggml_context * ctx,
  5985. struct ggml_tensor * a,
  5986. struct ggml_tensor * b,
  5987. struct ggml_tensor * c) {
  5988. GGML_ASSERT(ggml_are_same_shape(a, b));
  5989. GGML_ASSERT(ggml_is_scalar(c));
  5990. struct ggml_tensor * result = ggml_dup_tensor(ctx, a);
  5991. result->op = GGML_OP_CROSS_ENTROPY_LOSS_BACK;
  5992. result->grad = NULL;
  5993. result->src[0] = a;
  5994. result->src[1] = b;
  5995. result->src[2] = c;
  5996. return result;
  5997. }
  5998. ////////////////////////////////////////////////////////////////////////////////
  5999. void ggml_set_param(
  6000. struct ggml_context * ctx,
  6001. struct ggml_tensor * tensor) {
  6002. tensor->is_param = true;
  6003. GGML_ASSERT(tensor->grad == NULL);
  6004. tensor->grad = ggml_dup_tensor(ctx, tensor);
  6005. }
  6006. // ggml_compute_forward_dup
  6007. static void ggml_compute_forward_dup_same_cont(
  6008. const struct ggml_compute_params * params,
  6009. const struct ggml_tensor * src0,
  6010. struct ggml_tensor * dst) {
  6011. GGML_ASSERT(ggml_nelements(dst) == ggml_nelements(src0));
  6012. GGML_ASSERT(ggml_is_contiguous(dst) && ggml_is_contiguous(src0));
  6013. GGML_ASSERT(src0->type == dst->type);
  6014. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  6015. return;
  6016. }
  6017. const size_t nb00 = src0->nb[0];
  6018. const size_t nb0 = dst->nb[0];
  6019. const int ith = params->ith; // thread index
  6020. const int nth = params->nth; // number of threads
  6021. // parallelize by elements
  6022. const int ne = ggml_nelements(dst);
  6023. const int dr = (ne + nth - 1) / nth;
  6024. const int ie0 = dr * ith;
  6025. const int ie1 = MIN(ie0 + dr, ne);
  6026. if (ie0 < ie1) {
  6027. memcpy(
  6028. ((char *) dst->data + ie0*nb0),
  6029. ((char *) src0->data + ie0*nb00),
  6030. (ie1 - ie0) * GGML_TYPE_SIZE[src0->type]);
  6031. }
  6032. }
  6033. static void ggml_compute_forward_dup_f16(
  6034. const struct ggml_compute_params * params,
  6035. const struct ggml_tensor * src0,
  6036. struct ggml_tensor * dst) {
  6037. GGML_ASSERT(ggml_nelements(dst) == ggml_nelements(src0));
  6038. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  6039. return;
  6040. }
  6041. GGML_TENSOR_UNARY_OP_LOCALS;
  6042. const int ith = params->ith; // thread index
  6043. const int nth = params->nth; // number of threads
  6044. if (ggml_is_contiguous(src0) && ggml_is_contiguous(dst) && src0->type == dst->type) {
  6045. ggml_compute_forward_dup_same_cont(params, src0, dst);
  6046. return;
  6047. }
  6048. // parallelize by rows
  6049. const int nr = ne01;
  6050. // number of rows per thread
  6051. const int dr = (nr + nth - 1) / nth;
  6052. // row range for this thread
  6053. const int ir0 = dr * ith;
  6054. const int ir1 = MIN(ir0 + dr, nr);
  6055. if (src0->type == dst->type &&
  6056. ne00 == ne0 &&
  6057. nb00 == GGML_TYPE_SIZE[src0->type] && nb0 == GGML_TYPE_SIZE[dst->type]) {
  6058. // copy by rows
  6059. const size_t rs = ne00*nb00;
  6060. for (int64_t i03 = 0; i03 < ne03; i03++) {
  6061. for (int64_t i02 = 0; i02 < ne02; i02++) {
  6062. for (int64_t i01 = ir0; i01 < ir1; i01++) {
  6063. memcpy(
  6064. ((char *) dst->data + i01*nb1 + i02*nb2 + i03*nb3),
  6065. ((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03),
  6066. rs);
  6067. }
  6068. }
  6069. }
  6070. return;
  6071. }
  6072. // TODO: add more special-case implementations for tensor shapes/strides that can benefit from memcpy
  6073. if (ggml_is_contiguous(dst)) {
  6074. if (nb00 == sizeof(ggml_fp16_t)) {
  6075. if (dst->type == GGML_TYPE_F16) {
  6076. size_t id = 0;
  6077. const size_t rs = ne00 * nb00;
  6078. char * dst_ptr = (char *) dst->data;
  6079. for (int i03 = 0; i03 < ne03; i03++) {
  6080. for (int i02 = 0; i02 < ne02; i02++) {
  6081. id += rs * ir0;
  6082. for (int i01 = ir0; i01 < ir1; i01++) {
  6083. const char * src0_ptr = (char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03;
  6084. memcpy(dst_ptr + id, src0_ptr, rs);
  6085. id += rs;
  6086. }
  6087. id += rs * (ne01 - ir1);
  6088. }
  6089. }
  6090. } else if (dst->type == GGML_TYPE_F32) {
  6091. size_t id = 0;
  6092. float * dst_ptr = (float *) dst->data;
  6093. for (int i03 = 0; i03 < ne03; i03++) {
  6094. for (int i02 = 0; i02 < ne02; i02++) {
  6095. id += ne00 * ir0;
  6096. for (int i01 = ir0; i01 < ir1; i01++) {
  6097. const ggml_fp16_t * src0_ptr = (ggml_fp16_t *) ((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03);
  6098. for (int i00 = 0; i00 < ne00; i00++) {
  6099. dst_ptr[id] = GGML_FP16_TO_FP32(src0_ptr[i00]);
  6100. id++;
  6101. }
  6102. }
  6103. id += ne00 * (ne01 - ir1);
  6104. }
  6105. }
  6106. } else if (type_traits[dst->type].from_float) {
  6107. ggml_from_float_t const quantize_row_q = type_traits[dst->type].from_float;
  6108. float * src0_f32 = (float *) params->wdata + (ne00 + CACHE_LINE_SIZE_F32) * ith;
  6109. size_t id = 0;
  6110. size_t rs = nb0 * (ne00 / GGML_BLCK_SIZE[dst->type]);
  6111. char * dst_ptr = (char *) dst->data;
  6112. for (int i03 = 0; i03 < ne03; i03++) {
  6113. for (int i02 = 0; i02 < ne02; i02++) {
  6114. id += rs * ir0;
  6115. for (int i01 = ir0; i01 < ir1; i01++) {
  6116. const ggml_fp16_t * src0_ptr = (ggml_fp16_t *) ((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03);
  6117. for (int i00 = 0; i00 < ne00; i00++) {
  6118. src0_f32[i00] = GGML_FP16_TO_FP32(src0_ptr[i00]);
  6119. }
  6120. quantize_row_q(src0_f32, dst_ptr + id, ne00);
  6121. id += rs;
  6122. }
  6123. id += rs * (ne01 - ir1);
  6124. }
  6125. }
  6126. } else {
  6127. GGML_ASSERT(false); // TODO: implement
  6128. }
  6129. } else {
  6130. //printf("%s: this is not optimal - fix me\n", __func__);
  6131. if (dst->type == GGML_TYPE_F32) {
  6132. size_t id = 0;
  6133. float * dst_ptr = (float *) dst->data;
  6134. for (int i03 = 0; i03 < ne03; i03++) {
  6135. for (int i02 = 0; i02 < ne02; i02++) {
  6136. id += ne00 * ir0;
  6137. for (int i01 = ir0; i01 < ir1; i01++) {
  6138. for (int i00 = 0; i00 < ne00; i00++) {
  6139. const ggml_fp16_t * src0_ptr = (ggml_fp16_t *) ((char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03);
  6140. dst_ptr[id] = GGML_FP16_TO_FP32(*src0_ptr);
  6141. id++;
  6142. }
  6143. }
  6144. id += ne00 * (ne01 - ir1);
  6145. }
  6146. }
  6147. } else if (dst->type == GGML_TYPE_F16) {
  6148. size_t id = 0;
  6149. ggml_fp16_t * dst_ptr = (ggml_fp16_t *) dst->data;
  6150. for (int i03 = 0; i03 < ne03; i03++) {
  6151. for (int i02 = 0; i02 < ne02; i02++) {
  6152. id += ne00 * ir0;
  6153. for (int i01 = ir0; i01 < ir1; i01++) {
  6154. for (int i00 = 0; i00 < ne00; i00++) {
  6155. const ggml_fp16_t * src0_ptr = (ggml_fp16_t *) ((char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03);
  6156. dst_ptr[id] = *src0_ptr;
  6157. id++;
  6158. }
  6159. }
  6160. id += ne00 * (ne01 - ir1);
  6161. }
  6162. }
  6163. } else {
  6164. GGML_ASSERT(false); // TODO: implement
  6165. }
  6166. }
  6167. return;
  6168. }
  6169. // dst counters
  6170. int64_t i10 = 0;
  6171. int64_t i11 = 0;
  6172. int64_t i12 = 0;
  6173. int64_t i13 = 0;
  6174. if (dst->type == GGML_TYPE_F16) {
  6175. for (int64_t i03 = 0; i03 < ne03; i03++) {
  6176. for (int64_t i02 = 0; i02 < ne02; i02++) {
  6177. i10 += ne00 * ir0;
  6178. while (i10 >= ne0) {
  6179. i10 -= ne0;
  6180. if (++i11 == ne1) {
  6181. i11 = 0;
  6182. if (++i12 == ne2) {
  6183. i12 = 0;
  6184. if (++i13 == ne3) {
  6185. i13 = 0;
  6186. }
  6187. }
  6188. }
  6189. }
  6190. for (int64_t i01 = ir0; i01 < ir1; i01++) {
  6191. for (int64_t i00 = 0; i00 < ne00; i00++) {
  6192. const char * src0_ptr = ((char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03);
  6193. char * dst_ptr = ((char *) dst->data + i10*nb0 + i11*nb1 + i12*nb2 + i13*nb3);
  6194. memcpy(dst_ptr, src0_ptr, sizeof(ggml_fp16_t));
  6195. if (++i10 == ne00) {
  6196. i10 = 0;
  6197. if (++i11 == ne01) {
  6198. i11 = 0;
  6199. if (++i12 == ne02) {
  6200. i12 = 0;
  6201. if (++i13 == ne03) {
  6202. i13 = 0;
  6203. }
  6204. }
  6205. }
  6206. }
  6207. }
  6208. }
  6209. i10 += ne00 * (ne01 - ir1);
  6210. while (i10 >= ne0) {
  6211. i10 -= ne0;
  6212. if (++i11 == ne1) {
  6213. i11 = 0;
  6214. if (++i12 == ne2) {
  6215. i12 = 0;
  6216. if (++i13 == ne3) {
  6217. i13 = 0;
  6218. }
  6219. }
  6220. }
  6221. }
  6222. }
  6223. }
  6224. } else if (dst->type == GGML_TYPE_F32) {
  6225. for (int64_t i03 = 0; i03 < ne03; i03++) {
  6226. for (int64_t i02 = 0; i02 < ne02; i02++) {
  6227. i10 += ne00 * ir0;
  6228. while (i10 >= ne0) {
  6229. i10 -= ne0;
  6230. if (++i11 == ne1) {
  6231. i11 = 0;
  6232. if (++i12 == ne2) {
  6233. i12 = 0;
  6234. if (++i13 == ne3) {
  6235. i13 = 0;
  6236. }
  6237. }
  6238. }
  6239. }
  6240. for (int64_t i01 = ir0; i01 < ir1; i01++) {
  6241. for (int64_t i00 = 0; i00 < ne00; i00++) {
  6242. const char * src0_ptr = ((char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03);
  6243. char * dst_ptr = ((char *) dst->data + i10*nb0 + i11*nb1 + i12*nb2 + i13*nb3);
  6244. *(float *) dst_ptr = GGML_FP16_TO_FP32(*(const ggml_fp16_t *) src0_ptr);
  6245. if (++i10 == ne0) {
  6246. i10 = 0;
  6247. if (++i11 == ne1) {
  6248. i11 = 0;
  6249. if (++i12 == ne2) {
  6250. i12 = 0;
  6251. if (++i13 == ne3) {
  6252. i13 = 0;
  6253. }
  6254. }
  6255. }
  6256. }
  6257. }
  6258. }
  6259. i10 += ne00 * (ne01 - ir1);
  6260. while (i10 >= ne0) {
  6261. i10 -= ne0;
  6262. if (++i11 == ne1) {
  6263. i11 = 0;
  6264. if (++i12 == ne2) {
  6265. i12 = 0;
  6266. if (++i13 == ne3) {
  6267. i13 = 0;
  6268. }
  6269. }
  6270. }
  6271. }
  6272. }
  6273. }
  6274. } else {
  6275. GGML_ASSERT(false); // TODO: implement
  6276. }
  6277. }
  6278. static void ggml_compute_forward_dup_f32(
  6279. const struct ggml_compute_params * params,
  6280. const struct ggml_tensor * src0,
  6281. struct ggml_tensor * dst) {
  6282. GGML_ASSERT(ggml_nelements(dst) == ggml_nelements(src0));
  6283. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  6284. return;
  6285. }
  6286. GGML_TENSOR_UNARY_OP_LOCALS;
  6287. const int ith = params->ith; // thread index
  6288. const int nth = params->nth; // number of threads
  6289. if (ggml_is_contiguous(src0) && ggml_is_contiguous(dst) && src0->type == dst->type) {
  6290. ggml_compute_forward_dup_same_cont(params, src0, dst);
  6291. return;
  6292. }
  6293. // parallelize by rows
  6294. const int nr = ne01;
  6295. // number of rows per thread
  6296. const int dr = (nr + nth - 1) / nth;
  6297. // row range for this thread
  6298. const int ir0 = dr * ith;
  6299. const int ir1 = MIN(ir0 + dr, nr);
  6300. if (src0->type == dst->type &&
  6301. ne00 == ne0 &&
  6302. nb00 == GGML_TYPE_SIZE[src0->type] && nb0 == GGML_TYPE_SIZE[dst->type]) {
  6303. // copy by rows
  6304. const size_t rs = ne00*nb00;
  6305. for (int64_t i03 = 0; i03 < ne03; i03++) {
  6306. for (int64_t i02 = 0; i02 < ne02; i02++) {
  6307. for (int64_t i01 = ir0; i01 < ir1; i01++) {
  6308. memcpy(
  6309. ((char *) dst->data + i01*nb1 + i02*nb2 + i03*nb3),
  6310. ((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03),
  6311. rs);
  6312. }
  6313. }
  6314. }
  6315. return;
  6316. }
  6317. if (ggml_is_contiguous(dst)) {
  6318. // TODO: simplify
  6319. if (nb00 == sizeof(float)) {
  6320. if (dst->type == GGML_TYPE_F32) {
  6321. size_t id = 0;
  6322. const size_t rs = ne00 * nb00;
  6323. char * dst_ptr = (char *) dst->data;
  6324. for (int i03 = 0; i03 < ne03; i03++) {
  6325. for (int i02 = 0; i02 < ne02; i02++) {
  6326. id += rs * ir0;
  6327. for (int i01 = ir0; i01 < ir1; i01++) {
  6328. const char * src0_ptr = (char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03;
  6329. memcpy(dst_ptr + id, src0_ptr, rs);
  6330. id += rs;
  6331. }
  6332. id += rs * (ne01 - ir1);
  6333. }
  6334. }
  6335. } else if (type_traits[dst->type].from_float) {
  6336. ggml_from_float_t const quantize_row_q = type_traits[dst->type].from_float;
  6337. size_t id = 0;
  6338. size_t rs = nb0 * (ne00 / GGML_BLCK_SIZE[dst->type]);
  6339. char * dst_ptr = (char *) dst->data;
  6340. for (int i03 = 0; i03 < ne03; i03++) {
  6341. for (int i02 = 0; i02 < ne02; i02++) {
  6342. id += rs * ir0;
  6343. for (int i01 = ir0; i01 < ir1; i01++) {
  6344. const float * src0_ptr = (float *) ((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03);
  6345. quantize_row_q(src0_ptr, dst_ptr + id, ne00);
  6346. id += rs;
  6347. }
  6348. id += rs * (ne01 - ir1);
  6349. }
  6350. }
  6351. } else {
  6352. GGML_ASSERT(false); // TODO: implement
  6353. }
  6354. } else {
  6355. //printf("%s: this is not optimal - fix me\n", __func__);
  6356. if (dst->type == GGML_TYPE_F32) {
  6357. size_t id = 0;
  6358. float * dst_ptr = (float *) dst->data;
  6359. for (int i03 = 0; i03 < ne03; i03++) {
  6360. for (int i02 = 0; i02 < ne02; i02++) {
  6361. id += ne00 * ir0;
  6362. for (int i01 = ir0; i01 < ir1; i01++) {
  6363. for (int i00 = 0; i00 < ne00; i00++) {
  6364. const float * src0_ptr = (float *) ((char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03);
  6365. dst_ptr[id] = *src0_ptr;
  6366. id++;
  6367. }
  6368. }
  6369. id += ne00 * (ne01 - ir1);
  6370. }
  6371. }
  6372. } else if (dst->type == GGML_TYPE_F16) {
  6373. size_t id = 0;
  6374. ggml_fp16_t * dst_ptr = (ggml_fp16_t *) dst->data;
  6375. for (int i03 = 0; i03 < ne03; i03++) {
  6376. for (int i02 = 0; i02 < ne02; i02++) {
  6377. id += ne00 * ir0;
  6378. for (int i01 = ir0; i01 < ir1; i01++) {
  6379. for (int i00 = 0; i00 < ne00; i00++) {
  6380. const float * src0_ptr = (float *) ((char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03);
  6381. dst_ptr[id] = GGML_FP32_TO_FP16(*src0_ptr);
  6382. id++;
  6383. }
  6384. }
  6385. id += ne00 * (ne01 - ir1);
  6386. }
  6387. }
  6388. } else {
  6389. GGML_ASSERT(false); // TODO: implement
  6390. }
  6391. }
  6392. return;
  6393. }
  6394. // dst counters
  6395. int64_t i10 = 0;
  6396. int64_t i11 = 0;
  6397. int64_t i12 = 0;
  6398. int64_t i13 = 0;
  6399. if (dst->type == GGML_TYPE_F32) {
  6400. for (int64_t i03 = 0; i03 < ne03; i03++) {
  6401. for (int64_t i02 = 0; i02 < ne02; i02++) {
  6402. i10 += ne00 * ir0;
  6403. while (i10 >= ne0) {
  6404. i10 -= ne0;
  6405. if (++i11 == ne1) {
  6406. i11 = 0;
  6407. if (++i12 == ne2) {
  6408. i12 = 0;
  6409. if (++i13 == ne3) {
  6410. i13 = 0;
  6411. }
  6412. }
  6413. }
  6414. }
  6415. for (int64_t i01 = ir0; i01 < ir1; i01++) {
  6416. for (int64_t i00 = 0; i00 < ne00; i00++) {
  6417. const char * src0_ptr = ((char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03);
  6418. char * dst_ptr = ((char *) dst->data + i10*nb0 + i11*nb1 + i12*nb2 + i13*nb3);
  6419. memcpy(dst_ptr, src0_ptr, sizeof(float));
  6420. if (++i10 == ne0) {
  6421. i10 = 0;
  6422. if (++i11 == ne1) {
  6423. i11 = 0;
  6424. if (++i12 == ne2) {
  6425. i12 = 0;
  6426. if (++i13 == ne3) {
  6427. i13 = 0;
  6428. }
  6429. }
  6430. }
  6431. }
  6432. }
  6433. }
  6434. i10 += ne00 * (ne01 - ir1);
  6435. while (i10 >= ne0) {
  6436. i10 -= ne0;
  6437. if (++i11 == ne1) {
  6438. i11 = 0;
  6439. if (++i12 == ne2) {
  6440. i12 = 0;
  6441. if (++i13 == ne3) {
  6442. i13 = 0;
  6443. }
  6444. }
  6445. }
  6446. }
  6447. }
  6448. }
  6449. } else if (dst->type == GGML_TYPE_F16) {
  6450. for (int64_t i03 = 0; i03 < ne03; i03++) {
  6451. for (int64_t i02 = 0; i02 < ne02; i02++) {
  6452. i10 += ne00 * ir0;
  6453. while (i10 >= ne0) {
  6454. i10 -= ne0;
  6455. if (++i11 == ne1) {
  6456. i11 = 0;
  6457. if (++i12 == ne2) {
  6458. i12 = 0;
  6459. if (++i13 == ne3) {
  6460. i13 = 0;
  6461. }
  6462. }
  6463. }
  6464. }
  6465. for (int64_t i01 = ir0; i01 < ir1; i01++) {
  6466. for (int64_t i00 = 0; i00 < ne00; i00++) {
  6467. const char * src0_ptr = ((char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03);
  6468. char * dst_ptr = ((char *) dst->data + i10*nb0 + i11*nb1 + i12*nb2 + i13*nb3);
  6469. *(ggml_fp16_t *) dst_ptr = GGML_FP32_TO_FP16(*(const float *) src0_ptr);
  6470. if (++i10 == ne0) {
  6471. i10 = 0;
  6472. if (++i11 == ne1) {
  6473. i11 = 0;
  6474. if (++i12 == ne2) {
  6475. i12 = 0;
  6476. if (++i13 == ne3) {
  6477. i13 = 0;
  6478. }
  6479. }
  6480. }
  6481. }
  6482. }
  6483. }
  6484. i10 += ne00 * (ne01 - ir1);
  6485. while (i10 >= ne0) {
  6486. i10 -= ne0;
  6487. if (++i11 == ne1) {
  6488. i11 = 0;
  6489. if (++i12 == ne2) {
  6490. i12 = 0;
  6491. if (++i13 == ne3) {
  6492. i13 = 0;
  6493. }
  6494. }
  6495. }
  6496. }
  6497. }
  6498. }
  6499. } else {
  6500. GGML_ASSERT(false); // TODO: implement
  6501. }
  6502. }
  6503. static void ggml_compute_forward_dup(
  6504. const struct ggml_compute_params * params,
  6505. const struct ggml_tensor * src0,
  6506. struct ggml_tensor * dst) {
  6507. if (ggml_is_contiguous(src0) && ggml_is_contiguous(dst) && src0->type == dst->type) {
  6508. ggml_compute_forward_dup_same_cont(params, src0, dst);
  6509. return;
  6510. }
  6511. switch (src0->type) {
  6512. case GGML_TYPE_F16:
  6513. {
  6514. ggml_compute_forward_dup_f16(params, src0, dst);
  6515. } break;
  6516. case GGML_TYPE_F32:
  6517. {
  6518. ggml_compute_forward_dup_f32(params, src0, dst);
  6519. } break;
  6520. default:
  6521. {
  6522. GGML_ASSERT(false);
  6523. } break;
  6524. }
  6525. }
  6526. // ggml_compute_forward_add
  6527. static void ggml_compute_forward_add_f32(
  6528. const struct ggml_compute_params * params,
  6529. const struct ggml_tensor * src0,
  6530. const struct ggml_tensor * src1,
  6531. struct ggml_tensor * dst) {
  6532. GGML_ASSERT(ggml_can_repeat_rows(src1, src0) && ggml_are_same_shape(src0, dst));
  6533. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  6534. return;
  6535. }
  6536. const int ith = params->ith;
  6537. const int nth = params->nth;
  6538. const int nr = ggml_nrows(src0);
  6539. GGML_TENSOR_BINARY_OP_LOCALS;
  6540. GGML_ASSERT( nb0 == sizeof(float));
  6541. GGML_ASSERT(nb00 == sizeof(float));
  6542. // rows per thread
  6543. const int dr = (nr + nth - 1)/nth;
  6544. // row range for this thread
  6545. const int ir0 = dr*ith;
  6546. const int ir1 = MIN(ir0 + dr, nr);
  6547. if (nb10 == sizeof(float)) {
  6548. for (int ir = ir0; ir < ir1; ++ir) {
  6549. // src1 is broadcastable across src0 and dst in i1, i2, i3
  6550. const int64_t i03 = ir/(ne02*ne01);
  6551. const int64_t i02 = (ir - i03*ne02*ne01)/ne01;
  6552. const int64_t i01 = (ir - i03*ne02*ne01 - i02*ne01);
  6553. const int64_t i13 = i03 % ne13;
  6554. const int64_t i12 = i02 % ne12;
  6555. const int64_t i11 = i01 % ne11;
  6556. float * dst_ptr = (float *) ((char *) dst->data + i03*nb3 + i02*nb2 + i01*nb1 );
  6557. float * src0_ptr = (float *) ((char *) src0->data + i03*nb03 + i02*nb02 + i01*nb01);
  6558. float * src1_ptr = (float *) ((char *) src1->data + i13*nb13 + i12*nb12 + i11*nb11);
  6559. #ifdef GGML_USE_ACCELERATE
  6560. vDSP_vadd(src0_ptr, 1, src1_ptr, 1, dst_ptr, 1, ne00);
  6561. #else
  6562. ggml_vec_add_f32(ne00, dst_ptr, src0_ptr, src1_ptr);
  6563. #endif
  6564. // }
  6565. // }
  6566. }
  6567. } else {
  6568. // src1 is not contiguous
  6569. for (int ir = ir0; ir < ir1; ++ir) {
  6570. // src1 is broadcastable across src0 and dst in i1, i2, i3
  6571. const int64_t i03 = ir/(ne02*ne01);
  6572. const int64_t i02 = (ir - i03*ne02*ne01)/ne01;
  6573. const int64_t i01 = (ir - i03*ne02*ne01 - i02*ne01);
  6574. const int64_t i13 = i03 % ne13;
  6575. const int64_t i12 = i02 % ne12;
  6576. const int64_t i11 = i01 % ne11;
  6577. float * dst_ptr = (float *) ((char *) dst->data + i03*nb3 + i02*nb2 + i01*nb1 );
  6578. float * src0_ptr = (float *) ((char *) src0->data + i03*nb03 + i02*nb02 + i01*nb01);
  6579. for (int i0 = 0; i0 < ne0; i0++) {
  6580. float * src1_ptr = (float *) ((char *) src1->data + i13*nb13 + i12*nb12 + i11*nb11 + i0*nb10);
  6581. dst_ptr[i0] = src0_ptr[i0] + *src1_ptr;
  6582. }
  6583. }
  6584. }
  6585. }
  6586. static void ggml_compute_forward_add_f16_f32(
  6587. const struct ggml_compute_params * params,
  6588. const struct ggml_tensor * src0,
  6589. const struct ggml_tensor * src1,
  6590. struct ggml_tensor * dst) {
  6591. GGML_ASSERT(ggml_are_same_shape(src0, src1) && ggml_are_same_shape(src0, dst));
  6592. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  6593. return;
  6594. }
  6595. const int ith = params->ith;
  6596. const int nth = params->nth;
  6597. const int nr = ggml_nrows(src0);
  6598. GGML_TENSOR_BINARY_OP_LOCALS;
  6599. GGML_ASSERT(src0->type == GGML_TYPE_F16);
  6600. GGML_ASSERT(src1->type == GGML_TYPE_F32);
  6601. GGML_ASSERT(dst->type == GGML_TYPE_F16);
  6602. GGML_ASSERT( nb0 == sizeof(ggml_fp16_t));
  6603. GGML_ASSERT(nb00 == sizeof(ggml_fp16_t));
  6604. // rows per thread
  6605. const int dr = (nr + nth - 1)/nth;
  6606. // row range for this thread
  6607. const int ir0 = dr*ith;
  6608. const int ir1 = MIN(ir0 + dr, nr);
  6609. if (nb10 == sizeof(float)) {
  6610. for (int ir = ir0; ir < ir1; ++ir) {
  6611. // src0, src1 and dst are same shape => same indices
  6612. const int i3 = ir/(ne2*ne1);
  6613. const int i2 = (ir - i3*ne2*ne1)/ne1;
  6614. const int i1 = (ir - i3*ne2*ne1 - i2*ne1);
  6615. ggml_fp16_t * dst_ptr = (ggml_fp16_t *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1);
  6616. ggml_fp16_t * src0_ptr = (ggml_fp16_t *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01);
  6617. float * src1_ptr = (float *) ((char *) src1->data + i3*nb13 + i2*nb12 + i1*nb11);
  6618. for (int i = 0; i < ne0; i++) {
  6619. dst_ptr[i] = GGML_FP32_TO_FP16(GGML_FP16_TO_FP32(src0_ptr[i]) + src1_ptr[i]);
  6620. }
  6621. }
  6622. }
  6623. else {
  6624. // src1 is not contiguous
  6625. GGML_ASSERT(false);
  6626. }
  6627. }
  6628. static void ggml_compute_forward_add_f16_f16(
  6629. const struct ggml_compute_params * params,
  6630. const struct ggml_tensor * src0,
  6631. const struct ggml_tensor * src1,
  6632. struct ggml_tensor * dst) {
  6633. GGML_ASSERT(ggml_are_same_shape(src0, src1) && ggml_are_same_shape(src0, dst));
  6634. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  6635. return;
  6636. }
  6637. const int ith = params->ith;
  6638. const int nth = params->nth;
  6639. const int nr = ggml_nrows(src0);
  6640. GGML_TENSOR_BINARY_OP_LOCALS;
  6641. GGML_ASSERT(src0->type == GGML_TYPE_F16);
  6642. GGML_ASSERT(src1->type == GGML_TYPE_F16);
  6643. GGML_ASSERT(dst->type == GGML_TYPE_F16);
  6644. GGML_ASSERT( nb0 == sizeof(ggml_fp16_t));
  6645. GGML_ASSERT(nb00 == sizeof(ggml_fp16_t));
  6646. // rows per thread
  6647. const int dr = (nr + nth - 1)/nth;
  6648. // row range for this thread
  6649. const int ir0 = dr*ith;
  6650. const int ir1 = MIN(ir0 + dr, nr);
  6651. if (nb10 == sizeof(ggml_fp16_t)) {
  6652. for (int ir = ir0; ir < ir1; ++ir) {
  6653. // src0, src1 and dst are same shape => same indices
  6654. const int i3 = ir/(ne2*ne1);
  6655. const int i2 = (ir - i3*ne2*ne1)/ne1;
  6656. const int i1 = (ir - i3*ne2*ne1 - i2*ne1);
  6657. ggml_fp16_t * dst_ptr = (ggml_fp16_t *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1);
  6658. ggml_fp16_t * src0_ptr = (ggml_fp16_t *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01);
  6659. ggml_fp16_t * src1_ptr = (ggml_fp16_t *) ((char *) src1->data + i3*nb13 + i2*nb12 + i1*nb11);
  6660. for (int i = 0; i < ne0; i++) {
  6661. dst_ptr[i] = GGML_FP32_TO_FP16(GGML_FP16_TO_FP32(src0_ptr[i]) + GGML_FP16_TO_FP32(src1_ptr[i]));
  6662. }
  6663. }
  6664. }
  6665. else {
  6666. // src1 is not contiguous
  6667. GGML_ASSERT(false);
  6668. }
  6669. }
  6670. static void ggml_compute_forward_add_q_f32(
  6671. const struct ggml_compute_params * params,
  6672. const struct ggml_tensor * src0,
  6673. const struct ggml_tensor * src1,
  6674. struct ggml_tensor * dst) {
  6675. GGML_ASSERT(ggml_are_same_shape(src0, src1) && ggml_are_same_shape(src0, dst));
  6676. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  6677. return;
  6678. }
  6679. const int nr = ggml_nrows(src0);
  6680. GGML_TENSOR_BINARY_OP_LOCALS;
  6681. const int ith = params->ith;
  6682. const int nth = params->nth;
  6683. const enum ggml_type type = src0->type;
  6684. ggml_to_float_t const dequantize_row_q = type_traits[type].to_float;
  6685. ggml_from_float_t const quantize_row_q = type_traits[type].from_float;
  6686. // we don't support permuted src0 or src1
  6687. GGML_ASSERT(nb00 == GGML_TYPE_SIZE[type]);
  6688. GGML_ASSERT(nb10 == sizeof(float));
  6689. // dst cannot be transposed or permuted
  6690. GGML_ASSERT(nb0 <= nb1);
  6691. GGML_ASSERT(nb1 <= nb2);
  6692. GGML_ASSERT(nb2 <= nb3);
  6693. GGML_ASSERT(ggml_is_quantized(src0->type));
  6694. GGML_ASSERT(dst->type == src0->type);
  6695. GGML_ASSERT(src1->type == GGML_TYPE_F32);
  6696. // rows per thread
  6697. const int dr = (nr + nth - 1)/nth;
  6698. // row range for this thread
  6699. const int ir0 = dr*ith;
  6700. const int ir1 = MIN(ir0 + dr, nr);
  6701. float * wdata = (float *) params->wdata + (ne00 + CACHE_LINE_SIZE_F32) * ith;
  6702. for (int ir = ir0; ir < ir1; ++ir) {
  6703. // src0 indices
  6704. const int i03 = ir/(ne02*ne01);
  6705. const int i02 = (ir - i03*ne02*ne01)/ne01;
  6706. const int i01 = (ir - i03*ne02*ne01 - i02*ne01);
  6707. // src1 and dst are same shape as src0 => same indices
  6708. const int i13 = i03;
  6709. const int i12 = i02;
  6710. const int i11 = i01;
  6711. const int i3 = i03;
  6712. const int i2 = i02;
  6713. const int i1 = i01;
  6714. void * src0_row = (void *) ((char *) src0->data + (i01*nb01 + i02*nb02 + i03*nb03));
  6715. float * src1_row = (float *)((char *) src1->data + (i11*nb11 + i12*nb12 + i13*nb13));
  6716. void * dst_row = (void *) ((char *) dst->data + ( i1*nb1 + i2*nb2 + i3*nb3));
  6717. assert(ne00 % 32 == 0);
  6718. // unquantize row from src0 to temp buffer
  6719. dequantize_row_q(src0_row, wdata, ne00);
  6720. // add src1
  6721. ggml_vec_acc_f32(ne00, wdata, src1_row);
  6722. // quantize row to dst
  6723. quantize_row_q(wdata, dst_row, ne00);
  6724. }
  6725. }
  6726. static void ggml_compute_forward_add(
  6727. const struct ggml_compute_params * params,
  6728. const struct ggml_tensor * src0,
  6729. const struct ggml_tensor * src1,
  6730. struct ggml_tensor * dst) {
  6731. switch (src0->type) {
  6732. case GGML_TYPE_F32:
  6733. {
  6734. ggml_compute_forward_add_f32(params, src0, src1, dst);
  6735. } break;
  6736. case GGML_TYPE_F16:
  6737. {
  6738. if (src1->type == GGML_TYPE_F16) {
  6739. ggml_compute_forward_add_f16_f16(params, src0, src1, dst);
  6740. }
  6741. else if (src1->type == GGML_TYPE_F32) {
  6742. ggml_compute_forward_add_f16_f32(params, src0, src1, dst);
  6743. }
  6744. else {
  6745. GGML_ASSERT(false);
  6746. }
  6747. } break;
  6748. case GGML_TYPE_Q4_0:
  6749. case GGML_TYPE_Q4_1:
  6750. case GGML_TYPE_Q5_0:
  6751. case GGML_TYPE_Q5_1:
  6752. case GGML_TYPE_Q8_0:
  6753. case GGML_TYPE_Q2_K:
  6754. case GGML_TYPE_Q3_K:
  6755. case GGML_TYPE_Q4_K:
  6756. case GGML_TYPE_Q5_K:
  6757. case GGML_TYPE_Q6_K:
  6758. {
  6759. ggml_compute_forward_add_q_f32(params, src0, src1, dst);
  6760. } break;
  6761. default:
  6762. {
  6763. GGML_ASSERT(false);
  6764. } break;
  6765. }
  6766. }
  6767. // ggml_compute_forward_add1
  6768. static void ggml_compute_forward_add1_f32(
  6769. const struct ggml_compute_params * params,
  6770. const struct ggml_tensor * src0,
  6771. const struct ggml_tensor * src1,
  6772. struct ggml_tensor * dst) {
  6773. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  6774. GGML_ASSERT(ggml_is_scalar(src1));
  6775. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  6776. return;
  6777. }
  6778. const int ith = params->ith;
  6779. const int nth = params->nth;
  6780. const int nr = ggml_nrows(src0);
  6781. GGML_TENSOR_UNARY_OP_LOCALS;
  6782. GGML_ASSERT( nb0 == sizeof(float));
  6783. GGML_ASSERT(nb00 == sizeof(float));
  6784. // rows per thread
  6785. const int dr = (nr + nth - 1)/nth;
  6786. // row range for this thread
  6787. const int ir0 = dr*ith;
  6788. const int ir1 = MIN(ir0 + dr, nr);
  6789. for (int ir = ir0; ir < ir1; ++ir) {
  6790. // src0 and dst are same shape => same indices
  6791. const int i3 = ir/(ne2*ne1);
  6792. const int i2 = (ir - i3*ne2*ne1)/ne1;
  6793. const int i1 = (ir - i3*ne2*ne1 - i2*ne1);
  6794. #ifdef GGML_USE_ACCELERATE
  6795. UNUSED(ggml_vec_add1_f32);
  6796. vDSP_vadd(
  6797. (float *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01), 1,
  6798. (float *) ((char *) src1->data), 0,
  6799. (float *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 ), 1,
  6800. ne0);
  6801. #else
  6802. ggml_vec_add1_f32(ne0,
  6803. (float *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 ),
  6804. (float *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01),
  6805. *(float *) src1->data);
  6806. #endif
  6807. }
  6808. }
  6809. static void ggml_compute_forward_add1_f16_f32(
  6810. const struct ggml_compute_params * params,
  6811. const struct ggml_tensor * src0,
  6812. const struct ggml_tensor * src1,
  6813. struct ggml_tensor * dst) {
  6814. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  6815. GGML_ASSERT(ggml_is_scalar(src1));
  6816. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  6817. return;
  6818. }
  6819. // scalar to add
  6820. const float v = *(float *) src1->data;
  6821. const int ith = params->ith;
  6822. const int nth = params->nth;
  6823. const int nr = ggml_nrows(src0);
  6824. GGML_TENSOR_UNARY_OP_LOCALS;
  6825. GGML_ASSERT(src0->type == GGML_TYPE_F16);
  6826. GGML_ASSERT(src1->type == GGML_TYPE_F32);
  6827. GGML_ASSERT(dst->type == GGML_TYPE_F16);
  6828. GGML_ASSERT( nb0 == sizeof(ggml_fp16_t));
  6829. GGML_ASSERT(nb00 == sizeof(ggml_fp16_t));
  6830. // rows per thread
  6831. const int dr = (nr + nth - 1)/nth;
  6832. // row range for this thread
  6833. const int ir0 = dr*ith;
  6834. const int ir1 = MIN(ir0 + dr, nr);
  6835. for (int ir = ir0; ir < ir1; ++ir) {
  6836. // src0 and dst are same shape => same indices
  6837. const int i3 = ir/(ne2*ne1);
  6838. const int i2 = (ir - i3*ne2*ne1)/ne1;
  6839. const int i1 = (ir - i3*ne2*ne1 - i2*ne1);
  6840. ggml_fp16_t * dst_ptr = (ggml_fp16_t *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 );
  6841. ggml_fp16_t * src0_ptr = (ggml_fp16_t *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01);
  6842. for (int i = 0; i < ne0; i++) {
  6843. dst_ptr[i] = GGML_FP32_TO_FP16(GGML_FP16_TO_FP32(src0_ptr[i]) + v);
  6844. }
  6845. }
  6846. }
  6847. static void ggml_compute_forward_add1_f16_f16(
  6848. const struct ggml_compute_params * params,
  6849. const struct ggml_tensor * src0,
  6850. const struct ggml_tensor * src1,
  6851. struct ggml_tensor * dst) {
  6852. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  6853. GGML_ASSERT(ggml_is_scalar(src1));
  6854. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  6855. return;
  6856. }
  6857. // scalar to add
  6858. const float v = GGML_FP16_TO_FP32(*(ggml_fp16_t *) src1->data);
  6859. const int ith = params->ith;
  6860. const int nth = params->nth;
  6861. const int nr = ggml_nrows(src0);
  6862. GGML_TENSOR_UNARY_OP_LOCALS;
  6863. GGML_ASSERT(src0->type == GGML_TYPE_F16);
  6864. GGML_ASSERT(src1->type == GGML_TYPE_F16);
  6865. GGML_ASSERT(dst->type == GGML_TYPE_F16);
  6866. GGML_ASSERT( nb0 == sizeof(ggml_fp16_t));
  6867. GGML_ASSERT(nb00 == sizeof(ggml_fp16_t));
  6868. // rows per thread
  6869. const int dr = (nr + nth - 1)/nth;
  6870. // row range for this thread
  6871. const int ir0 = dr*ith;
  6872. const int ir1 = MIN(ir0 + dr, nr);
  6873. for (int ir = ir0; ir < ir1; ++ir) {
  6874. // src0 and dst are same shape => same indices
  6875. const int i3 = ir/(ne2*ne1);
  6876. const int i2 = (ir - i3*ne2*ne1)/ne1;
  6877. const int i1 = (ir - i3*ne2*ne1 - i2*ne1);
  6878. ggml_fp16_t * dst_ptr = (ggml_fp16_t *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 );
  6879. ggml_fp16_t * src0_ptr = (ggml_fp16_t *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01);
  6880. for (int i = 0; i < ne0; i++) {
  6881. dst_ptr[i] = GGML_FP32_TO_FP16(GGML_FP16_TO_FP32(src0_ptr[i]) + v);
  6882. }
  6883. }
  6884. }
  6885. static void ggml_compute_forward_add1_q_f32(
  6886. const struct ggml_compute_params * params,
  6887. const struct ggml_tensor * src0,
  6888. const struct ggml_tensor * src1,
  6889. struct ggml_tensor * dst) {
  6890. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  6891. GGML_ASSERT(ggml_is_scalar(src1));
  6892. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  6893. return;
  6894. }
  6895. // scalar to add
  6896. const float v = *(float *) src1->data;
  6897. const int ith = params->ith;
  6898. const int nth = params->nth;
  6899. const int nr = ggml_nrows(src0);
  6900. GGML_TENSOR_UNARY_OP_LOCALS;
  6901. const enum ggml_type type = src0->type;
  6902. ggml_to_float_t const dequantize_row_q = type_traits[type].to_float;
  6903. ggml_from_float_t const quantize_row_q = type_traits[type].from_float;
  6904. // we don't support permuted src0
  6905. GGML_ASSERT(nb00 == GGML_TYPE_SIZE[type]);
  6906. // dst cannot be transposed or permuted
  6907. GGML_ASSERT(nb0 <= nb1);
  6908. GGML_ASSERT(nb1 <= nb2);
  6909. GGML_ASSERT(nb2 <= nb3);
  6910. GGML_ASSERT(ggml_is_quantized(src0->type));
  6911. GGML_ASSERT(dst->type == src0->type);
  6912. GGML_ASSERT(src1->type == GGML_TYPE_F32);
  6913. // rows per thread
  6914. const int dr = (nr + nth - 1)/nth;
  6915. // row range for this thread
  6916. const int ir0 = dr*ith;
  6917. const int ir1 = MIN(ir0 + dr, nr);
  6918. float * wdata = (float *) params->wdata + (ne0 + CACHE_LINE_SIZE_F32) * ith;
  6919. for (int ir = ir0; ir < ir1; ++ir) {
  6920. // src0 and dst are same shape => same indices
  6921. const int i3 = ir/(ne2*ne1);
  6922. const int i2 = (ir - i3*ne2*ne1)/ne1;
  6923. const int i1 = (ir - i3*ne2*ne1 - i2*ne1);
  6924. void * src0_row = (void *) ((char *) src0->data + (i1*nb01 + i2*nb02 + i3*nb03));
  6925. void * dst_row = (void *) ((char *) dst->data + (i1*nb1 + i2*nb2 + i3*nb0 ));
  6926. assert(ne0 % 32 == 0);
  6927. // unquantize row from src0 to temp buffer
  6928. dequantize_row_q(src0_row, wdata, ne0);
  6929. // add src1
  6930. ggml_vec_acc1_f32(ne0, wdata, v);
  6931. // quantize row to dst
  6932. quantize_row_q(wdata, dst_row, ne0);
  6933. }
  6934. }
  6935. static void ggml_compute_forward_add1(
  6936. const struct ggml_compute_params * params,
  6937. const struct ggml_tensor * src0,
  6938. const struct ggml_tensor * src1,
  6939. struct ggml_tensor * dst) {
  6940. switch (src0->type) {
  6941. case GGML_TYPE_F32:
  6942. {
  6943. ggml_compute_forward_add1_f32(params, src0, src1, dst);
  6944. } break;
  6945. case GGML_TYPE_F16:
  6946. {
  6947. if (src1->type == GGML_TYPE_F16) {
  6948. ggml_compute_forward_add1_f16_f16(params, src0, src1, dst);
  6949. }
  6950. else if (src1->type == GGML_TYPE_F32) {
  6951. ggml_compute_forward_add1_f16_f32(params, src0, src1, dst);
  6952. }
  6953. else {
  6954. GGML_ASSERT(false);
  6955. }
  6956. } break;
  6957. case GGML_TYPE_Q4_0:
  6958. case GGML_TYPE_Q4_1:
  6959. case GGML_TYPE_Q5_0:
  6960. case GGML_TYPE_Q5_1:
  6961. case GGML_TYPE_Q8_0:
  6962. case GGML_TYPE_Q8_1:
  6963. case GGML_TYPE_Q2_K:
  6964. case GGML_TYPE_Q3_K:
  6965. case GGML_TYPE_Q4_K:
  6966. case GGML_TYPE_Q5_K:
  6967. case GGML_TYPE_Q6_K:
  6968. {
  6969. ggml_compute_forward_add1_q_f32(params, src0, src1, dst);
  6970. } break;
  6971. default:
  6972. {
  6973. GGML_ASSERT(false);
  6974. } break;
  6975. }
  6976. }
  6977. // ggml_compute_forward_acc
  6978. static void ggml_compute_forward_acc_f32(
  6979. const struct ggml_compute_params * params,
  6980. const struct ggml_tensor * src0,
  6981. const struct ggml_tensor * src1,
  6982. struct ggml_tensor * dst) {
  6983. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  6984. GGML_ASSERT(ggml_is_contiguous(dst) && ggml_is_contiguous(src0));
  6985. // view src0 and dst with these strides and data offset inbytes during acc
  6986. // nb0 is implicitely element_size because src0 and dst are contiguous
  6987. size_t nb1 = ((int32_t *) dst->op_params)[0];
  6988. size_t nb2 = ((int32_t *) dst->op_params)[1];
  6989. size_t nb3 = ((int32_t *) dst->op_params)[2];
  6990. size_t offset = ((int32_t *) dst->op_params)[3];
  6991. bool inplace = (bool) ((int32_t *) dst->op_params)[4];
  6992. if (!inplace && (params->type == GGML_TASK_INIT)) {
  6993. // memcpy needs to be synchronized across threads to avoid race conditions.
  6994. // => do it in INIT phase
  6995. memcpy(
  6996. ((char *) dst->data),
  6997. ((char *) src0->data),
  6998. ggml_nbytes(dst));
  6999. }
  7000. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  7001. return;
  7002. }
  7003. const int ith = params->ith;
  7004. const int nth = params->nth;
  7005. const int nr = ggml_nrows(src1);
  7006. const int nc = src1->ne[0];
  7007. GGML_TENSOR_LOCALS(int64_t, ne1, src1, ne);
  7008. GGML_TENSOR_LOCALS(size_t, nb1, src1, nb);
  7009. // src0 and dst as viewed during acc
  7010. const size_t nb0 = ggml_element_size(src0);
  7011. const size_t nb00 = nb0;
  7012. const size_t nb01 = nb1;
  7013. const size_t nb02 = nb2;
  7014. const size_t nb03 = nb3;
  7015. GGML_ASSERT(offset + (ne10 == 0 ? 0 : ne10-1)*nb0 + (ne11 == 0 ? 0 : ne11-1)*nb1 + (ne12 == 0 ? 0 : ne12-1)*nb2 + (ne13 == 0 ? 0 : ne13-1)*nb3 < ggml_nbytes(dst));
  7016. GGML_ASSERT(offset + (ne10 == 0 ? 0 : ne10-1)*nb00 + (ne11 == 0 ? 0 : ne11-1)*nb01 + (ne12 == 0 ? 0 : ne12-1)*nb02 + (ne13 == 0 ? 0 : ne13-1)*nb03 < ggml_nbytes(src0));
  7017. GGML_ASSERT(nb10 == sizeof(float));
  7018. // rows per thread
  7019. const int dr = (nr + nth - 1)/nth;
  7020. // row range for this thread
  7021. const int ir0 = dr*ith;
  7022. const int ir1 = MIN(ir0 + dr, nr);
  7023. for (int ir = ir0; ir < ir1; ++ir) {
  7024. // src0 and dst are viewed with shape of src1 and offset
  7025. // => same indices
  7026. const int i3 = ir/(ne12*ne11);
  7027. const int i2 = (ir - i3*ne12*ne11)/ne11;
  7028. const int i1 = (ir - i3*ne12*ne11 - i2*ne11);
  7029. #ifdef GGML_USE_ACCELERATE
  7030. vDSP_vadd(
  7031. (float *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + offset), 1,
  7032. (float *) ((char *) src1->data + i3*nb13 + i2*nb12 + i1*nb11), 1,
  7033. (float *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + offset), 1, nc);
  7034. #else
  7035. ggml_vec_add_f32(nc,
  7036. (float *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + offset),
  7037. (float *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + offset),
  7038. (float *) ((char *) src1->data + i3*nb13 + i2*nb12 + i1*nb11));
  7039. #endif
  7040. }
  7041. }
  7042. static void ggml_compute_forward_acc(
  7043. const struct ggml_compute_params * params,
  7044. const struct ggml_tensor * src0,
  7045. const struct ggml_tensor * src1,
  7046. struct ggml_tensor * dst) {
  7047. switch (src0->type) {
  7048. case GGML_TYPE_F32:
  7049. {
  7050. ggml_compute_forward_acc_f32(params, src0, src1, dst);
  7051. } break;
  7052. case GGML_TYPE_F16:
  7053. case GGML_TYPE_Q4_0:
  7054. case GGML_TYPE_Q4_1:
  7055. case GGML_TYPE_Q5_0:
  7056. case GGML_TYPE_Q5_1:
  7057. case GGML_TYPE_Q8_0:
  7058. case GGML_TYPE_Q8_1:
  7059. case GGML_TYPE_Q2_K:
  7060. case GGML_TYPE_Q3_K:
  7061. case GGML_TYPE_Q4_K:
  7062. case GGML_TYPE_Q5_K:
  7063. case GGML_TYPE_Q6_K:
  7064. default:
  7065. {
  7066. GGML_ASSERT(false);
  7067. } break;
  7068. }
  7069. }
  7070. // ggml_compute_forward_sub
  7071. static void ggml_compute_forward_sub_f32(
  7072. const struct ggml_compute_params * params,
  7073. const struct ggml_tensor * src0,
  7074. const struct ggml_tensor * src1,
  7075. struct ggml_tensor * dst) {
  7076. assert(params->ith == 0);
  7077. assert(ggml_are_same_shape(src0, src1) && ggml_are_same_shape(src0, dst));
  7078. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  7079. return;
  7080. }
  7081. const int nr = ggml_nrows(src0);
  7082. GGML_TENSOR_BINARY_OP_LOCALS;
  7083. GGML_ASSERT( nb0 == sizeof(float));
  7084. GGML_ASSERT(nb00 == sizeof(float));
  7085. if (nb10 == sizeof(float)) {
  7086. for (int ir = 0; ir < nr; ++ir) {
  7087. // src0, src1 and dst are same shape => same indices
  7088. const int i3 = ir/(ne2*ne1);
  7089. const int i2 = (ir - i3*ne2*ne1)/ne1;
  7090. const int i1 = (ir - i3*ne2*ne1 - i2*ne1);
  7091. #ifdef GGML_USE_ACCELERATE
  7092. vDSP_vsub(
  7093. (float *) ((char *) src1->data + i3*nb13 + i2*nb12 + i1*nb11), 1,
  7094. (float *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01), 1,
  7095. (float *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 ), 1,
  7096. ne0);
  7097. #else
  7098. ggml_vec_sub_f32(ne0,
  7099. (float *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 ),
  7100. (float *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01),
  7101. (float *) ((char *) src1->data + i3*nb13 + i2*nb12 + i1*nb11));
  7102. #endif
  7103. // }
  7104. // }
  7105. }
  7106. } else {
  7107. // src1 is not contiguous
  7108. for (int ir = 0; ir < nr; ++ir) {
  7109. // src0, src1 and dst are same shape => same indices
  7110. const int i3 = ir/(ne2*ne1);
  7111. const int i2 = (ir - i3*ne2*ne1)/ne1;
  7112. const int i1 = (ir - i3*ne2*ne1 - i2*ne1);
  7113. float * dst_ptr = (float *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 );
  7114. float * src0_ptr = (float *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01);
  7115. for (int i0 = 0; i0 < ne0; i0++) {
  7116. float * src1_ptr = (float *) ((char *) src1->data + i3*nb13 + i2*nb12 + i1*nb11 + i0*nb10);
  7117. dst_ptr[i0] = src0_ptr[i0] - *src1_ptr;
  7118. }
  7119. }
  7120. }
  7121. }
  7122. static void ggml_compute_forward_sub(
  7123. const struct ggml_compute_params * params,
  7124. const struct ggml_tensor * src0,
  7125. const struct ggml_tensor * src1,
  7126. struct ggml_tensor * dst) {
  7127. switch (src0->type) {
  7128. case GGML_TYPE_F32:
  7129. {
  7130. ggml_compute_forward_sub_f32(params, src0, src1, dst);
  7131. } break;
  7132. default:
  7133. {
  7134. GGML_ASSERT(false);
  7135. } break;
  7136. }
  7137. }
  7138. // ggml_compute_forward_mul
  7139. static void ggml_compute_forward_mul_f32(
  7140. const struct ggml_compute_params * params,
  7141. const struct ggml_tensor * src0,
  7142. const struct ggml_tensor * src1,
  7143. struct ggml_tensor * dst) {
  7144. GGML_ASSERT(ggml_can_repeat_rows(src1, src0) && ggml_are_same_shape(src0, dst));
  7145. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  7146. return;
  7147. }
  7148. const int ith = params->ith;
  7149. const int nth = params->nth;
  7150. #ifdef GGML_USE_CLBLAST
  7151. if (src1->backend == GGML_BACKEND_GPU) {
  7152. if (ith == 0) {
  7153. ggml_cl_mul(src0, src1, dst);
  7154. }
  7155. return;
  7156. }
  7157. #endif
  7158. const int64_t nr = ggml_nrows(src0);
  7159. GGML_TENSOR_BINARY_OP_LOCALS;
  7160. GGML_ASSERT( nb0 == sizeof(float));
  7161. GGML_ASSERT(nb00 == sizeof(float));
  7162. GGML_ASSERT(ne00 == ne10);
  7163. if (nb10 == sizeof(float)) {
  7164. for (int64_t ir = ith; ir < nr; ir += nth) {
  7165. // src0 and dst are same shape => same indices
  7166. const int64_t i03 = ir/(ne02*ne01);
  7167. const int64_t i02 = (ir - i03*ne02*ne01)/ne01;
  7168. const int64_t i01 = (ir - i03*ne02*ne01 - i02*ne01);
  7169. const int64_t i13 = i03 % ne13;
  7170. const int64_t i12 = i02 % ne12;
  7171. const int64_t i11 = i01 % ne11;
  7172. float * dst_ptr = (float *) ((char *) dst->data + i03*nb3 + i02*nb2 + i01*nb1 );
  7173. float * src0_ptr = (float *) ((char *) src0->data + i03*nb03 + i02*nb02 + i01*nb01);
  7174. float * src1_ptr = (float *) ((char *) src1->data + i13*nb13 + i12*nb12 + i11*nb11);
  7175. #ifdef GGML_USE_ACCELERATE
  7176. UNUSED(ggml_vec_mul_f32);
  7177. vDSP_vmul( src0_ptr, 1, src1_ptr, 1, dst_ptr, 1, ne00);
  7178. #else
  7179. ggml_vec_mul_f32(ne00, dst_ptr, src0_ptr, src1_ptr);
  7180. #endif
  7181. // }
  7182. // }
  7183. }
  7184. } else {
  7185. // src1 is not contiguous
  7186. for (int64_t ir = ith; ir < nr; ir += nth) {
  7187. // src0 and dst are same shape => same indices
  7188. // src1 is broadcastable across src0 and dst in i1, i2, i3
  7189. const int64_t i03 = ir/(ne02*ne01);
  7190. const int64_t i02 = (ir - i03*ne02*ne01)/ne01;
  7191. const int64_t i01 = (ir - i03*ne02*ne01 - i02*ne01);
  7192. const int64_t i13 = i03 % ne13;
  7193. const int64_t i12 = i02 % ne12;
  7194. const int64_t i11 = i01 % ne11;
  7195. float * dst_ptr = (float *) ((char *) dst->data + i03*nb3 + i02*nb2 + i01*nb1 );
  7196. float * src0_ptr = (float *) ((char *) src0->data + i03*nb03 + i02*nb02 + i01*nb01);
  7197. for (int64_t i0 = 0; i0 < ne00; i0++) {
  7198. float * src1_ptr = (float *) ((char *) src1->data + i13*nb13 + i12*nb12 + i11*nb11 + i0*nb10);
  7199. dst_ptr[i0] = src0_ptr[i0] * (*src1_ptr);
  7200. }
  7201. }
  7202. }
  7203. }
  7204. static void ggml_compute_forward_mul(
  7205. const struct ggml_compute_params * params,
  7206. const struct ggml_tensor * src0,
  7207. const struct ggml_tensor * src1,
  7208. struct ggml_tensor * dst) {
  7209. switch (src0->type) {
  7210. case GGML_TYPE_F32:
  7211. {
  7212. ggml_compute_forward_mul_f32(params, src0, src1, dst);
  7213. } break;
  7214. default:
  7215. {
  7216. GGML_ASSERT(false);
  7217. } break;
  7218. }
  7219. }
  7220. // ggml_compute_forward_div
  7221. static void ggml_compute_forward_div_f32(
  7222. const struct ggml_compute_params * params,
  7223. const struct ggml_tensor * src0,
  7224. const struct ggml_tensor * src1,
  7225. struct ggml_tensor * dst) {
  7226. assert(params->ith == 0);
  7227. assert(ggml_are_same_shape(src0, src1) && ggml_are_same_shape(src0, dst));
  7228. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  7229. return;
  7230. }
  7231. const int nr = ggml_nrows(src0);
  7232. GGML_TENSOR_BINARY_OP_LOCALS;
  7233. GGML_ASSERT( nb0 == sizeof(float));
  7234. GGML_ASSERT(nb00 == sizeof(float));
  7235. if (nb10 == sizeof(float)) {
  7236. for (int ir = 0; ir < nr; ++ir) {
  7237. // src0, src1 and dst are same shape => same indices
  7238. const int i3 = ir/(ne2*ne1);
  7239. const int i2 = (ir - i3*ne2*ne1)/ne1;
  7240. const int i1 = (ir - i3*ne2*ne1 - i2*ne1);
  7241. #ifdef GGML_USE_ACCELERATE
  7242. vDSP_vdiv(
  7243. (float *) ((char *) src1->data + i3*nb13 + i2*nb12 + i1*nb11), 1,
  7244. (float *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01), 1,
  7245. (float *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 ), 1,
  7246. ne0);
  7247. #else
  7248. ggml_vec_div_f32(ne0,
  7249. (float *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 ),
  7250. (float *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01),
  7251. (float *) ((char *) src1->data + i3*nb13 + i2*nb12 + i1*nb11));
  7252. #endif
  7253. // }
  7254. // }
  7255. }
  7256. } else {
  7257. // src1 is not contiguous
  7258. for (int ir = 0; ir < nr; ++ir) {
  7259. // src0, src1 and dst are same shape => same indices
  7260. const int i3 = ir/(ne2*ne1);
  7261. const int i2 = (ir - i3*ne2*ne1)/ne1;
  7262. const int i1 = (ir - i3*ne2*ne1 - i2*ne1);
  7263. float * dst_ptr = (float *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 );
  7264. float * src0_ptr = (float *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01);
  7265. for (int i0 = 0; i0 < ne0; i0++) {
  7266. float * src1_ptr = (float *) ((char *) src1->data + i3*nb13 + i2*nb12 + i1*nb11 + i0*nb10);
  7267. dst_ptr[i0] = src0_ptr[i0] / (*src1_ptr);
  7268. }
  7269. }
  7270. }
  7271. }
  7272. static void ggml_compute_forward_div(
  7273. const struct ggml_compute_params * params,
  7274. const struct ggml_tensor * src0,
  7275. const struct ggml_tensor * src1,
  7276. struct ggml_tensor * dst) {
  7277. switch (src0->type) {
  7278. case GGML_TYPE_F32:
  7279. {
  7280. ggml_compute_forward_div_f32(params, src0, src1, dst);
  7281. } break;
  7282. default:
  7283. {
  7284. GGML_ASSERT(false);
  7285. } break;
  7286. }
  7287. }
  7288. // ggml_compute_forward_sqr
  7289. static void ggml_compute_forward_sqr_f32(
  7290. const struct ggml_compute_params * params,
  7291. const struct ggml_tensor * src0,
  7292. struct ggml_tensor * dst) {
  7293. assert(params->ith == 0);
  7294. assert(ggml_are_same_shape(src0, dst));
  7295. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  7296. return;
  7297. }
  7298. const int n = ggml_nrows(src0);
  7299. const int nc = src0->ne[0];
  7300. assert( dst->nb[0] == sizeof(float));
  7301. assert(src0->nb[0] == sizeof(float));
  7302. for (int i = 0; i < n; i++) {
  7303. ggml_vec_sqr_f32(nc,
  7304. (float *) ((char *) dst->data + i*( dst->nb[1])),
  7305. (float *) ((char *) src0->data + i*(src0->nb[1])));
  7306. }
  7307. }
  7308. static void ggml_compute_forward_sqr(
  7309. const struct ggml_compute_params * params,
  7310. const struct ggml_tensor * src0,
  7311. struct ggml_tensor * dst) {
  7312. switch (src0->type) {
  7313. case GGML_TYPE_F32:
  7314. {
  7315. ggml_compute_forward_sqr_f32(params, src0, dst);
  7316. } break;
  7317. default:
  7318. {
  7319. GGML_ASSERT(false);
  7320. } break;
  7321. }
  7322. }
  7323. // ggml_compute_forward_sqrt
  7324. static void ggml_compute_forward_sqrt_f32(
  7325. const struct ggml_compute_params * params,
  7326. const struct ggml_tensor * src0,
  7327. struct ggml_tensor * dst) {
  7328. assert(params->ith == 0);
  7329. assert(ggml_are_same_shape(src0, dst));
  7330. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  7331. return;
  7332. }
  7333. const int n = ggml_nrows(src0);
  7334. const int nc = src0->ne[0];
  7335. assert( dst->nb[0] == sizeof(float));
  7336. assert(src0->nb[0] == sizeof(float));
  7337. for (int i = 0; i < n; i++) {
  7338. ggml_vec_sqrt_f32(nc,
  7339. (float *) ((char *) dst->data + i*( dst->nb[1])),
  7340. (float *) ((char *) src0->data + i*(src0->nb[1])));
  7341. }
  7342. }
  7343. static void ggml_compute_forward_sqrt(
  7344. const struct ggml_compute_params * params,
  7345. const struct ggml_tensor * src0,
  7346. struct ggml_tensor * dst) {
  7347. switch (src0->type) {
  7348. case GGML_TYPE_F32:
  7349. {
  7350. ggml_compute_forward_sqrt_f32(params, src0, dst);
  7351. } break;
  7352. default:
  7353. {
  7354. GGML_ASSERT(false);
  7355. } break;
  7356. }
  7357. }
  7358. // ggml_compute_forward_log
  7359. static void ggml_compute_forward_log_f32(
  7360. const struct ggml_compute_params * params,
  7361. const struct ggml_tensor * src0,
  7362. struct ggml_tensor * dst) {
  7363. GGML_ASSERT(params->ith == 0);
  7364. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  7365. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  7366. return;
  7367. }
  7368. const int n = ggml_nrows(src0);
  7369. const int nc = src0->ne[0];
  7370. GGML_ASSERT( dst->nb[0] == sizeof(float));
  7371. GGML_ASSERT(src0->nb[0] == sizeof(float));
  7372. for (int i = 0; i < n; i++) {
  7373. ggml_vec_log_f32(nc,
  7374. (float *) ((char *) dst->data + i*( dst->nb[1])),
  7375. (float *) ((char *) src0->data + i*(src0->nb[1])));
  7376. }
  7377. }
  7378. static void ggml_compute_forward_log(
  7379. const struct ggml_compute_params * params,
  7380. const struct ggml_tensor * src0,
  7381. struct ggml_tensor * dst) {
  7382. switch (src0->type) {
  7383. case GGML_TYPE_F32:
  7384. {
  7385. ggml_compute_forward_log_f32(params, src0, dst);
  7386. } break;
  7387. default:
  7388. {
  7389. GGML_ASSERT(false);
  7390. } break;
  7391. }
  7392. }
  7393. // ggml_compute_forward_sum
  7394. static void ggml_compute_forward_sum_f32(
  7395. const struct ggml_compute_params * params,
  7396. const struct ggml_tensor * src0,
  7397. struct ggml_tensor * dst) {
  7398. assert(params->ith == 0);
  7399. assert(ggml_is_scalar(dst));
  7400. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  7401. return;
  7402. }
  7403. assert(ggml_is_scalar(dst));
  7404. assert(src0->nb[0] == sizeof(float));
  7405. GGML_TENSOR_LOCALS(int64_t, ne0, src0, ne);
  7406. GGML_TENSOR_LOCALS(size_t, nb0, src0, nb);
  7407. ggml_float sum = 0;
  7408. ggml_float row_sum = 0;
  7409. for (int64_t i03 = 0; i03 < ne03; i03++) {
  7410. for (int64_t i02 = 0; i02 < ne02; i02++) {
  7411. for (int64_t i01 = 0; i01 < ne01; i01++) {
  7412. ggml_vec_sum_f32_ggf(ne00,
  7413. &row_sum,
  7414. (float *) ((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03));
  7415. sum += row_sum;
  7416. }
  7417. }
  7418. }
  7419. ((float *) dst->data)[0] = sum;
  7420. }
  7421. static void ggml_compute_forward_sum_f16(
  7422. const struct ggml_compute_params * params,
  7423. const struct ggml_tensor * src0,
  7424. struct ggml_tensor * dst) {
  7425. assert(params->ith == 0);
  7426. assert(ggml_is_scalar(dst));
  7427. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  7428. return;
  7429. }
  7430. assert(src0->nb[0] == sizeof(ggml_fp16_t));
  7431. GGML_TENSOR_LOCALS(int64_t, ne0, src0, ne);
  7432. GGML_TENSOR_LOCALS(size_t, nb0, src0, nb);
  7433. float sum = 0;
  7434. float row_sum = 0;
  7435. for (int64_t i03 = 0; i03 < ne03; i03++) {
  7436. for (int64_t i02 = 0; i02 < ne02; i02++) {
  7437. for (int64_t i01 = 0; i01 < ne01; i01++) {
  7438. ggml_vec_sum_f16_ggf(ne00,
  7439. &row_sum,
  7440. (ggml_fp16_t *) ((char *) src0->data + i01 * nb01 + i02 * nb02 + i03 * nb03));
  7441. sum += row_sum;
  7442. }
  7443. }
  7444. }
  7445. ((ggml_fp16_t *) dst->data)[0] = GGML_FP32_TO_FP16(sum);
  7446. }
  7447. static void ggml_compute_forward_sum(
  7448. const struct ggml_compute_params * params,
  7449. const struct ggml_tensor * src0,
  7450. struct ggml_tensor * dst) {
  7451. switch (src0->type) {
  7452. case GGML_TYPE_F32:
  7453. {
  7454. ggml_compute_forward_sum_f32(params, src0, dst);
  7455. } break;
  7456. case GGML_TYPE_F16:
  7457. {
  7458. ggml_compute_forward_sum_f16(params, src0, dst);
  7459. } break;
  7460. default:
  7461. {
  7462. GGML_ASSERT(false);
  7463. } break;
  7464. }
  7465. }
  7466. // ggml_compute_forward_sum_rows
  7467. static void ggml_compute_forward_sum_rows_f32(
  7468. const struct ggml_compute_params * params,
  7469. const struct ggml_tensor * src0,
  7470. struct ggml_tensor * dst) {
  7471. GGML_ASSERT(params->ith == 0);
  7472. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  7473. return;
  7474. }
  7475. GGML_ASSERT(src0->nb[0] == sizeof(float));
  7476. GGML_ASSERT(dst->nb[0] == sizeof(float));
  7477. GGML_TENSOR_UNARY_OP_LOCALS;
  7478. GGML_ASSERT(ne0 == 1);
  7479. GGML_ASSERT(ne1 == ne01);
  7480. GGML_ASSERT(ne2 == ne02);
  7481. GGML_ASSERT(ne3 == ne03);
  7482. for (int64_t i3 = 0; i3 < ne03; i3++) {
  7483. for (int64_t i2 = 0; i2 < ne02; i2++) {
  7484. for (int64_t i1 = 0; i1 < ne01; i1++) {
  7485. float* src_row = (float *) ((char *) src0->data + i1*nb01 + i2*nb02 + i3*nb03);
  7486. float* dst_row = (float *) ((char *) dst->data + i1*nb1 + i2*nb2 + i3*nb3);
  7487. float row_sum = 0;
  7488. ggml_vec_sum_f32(ne00, &row_sum, src_row);
  7489. dst_row[0] = row_sum;
  7490. }
  7491. }
  7492. }
  7493. }
  7494. static void ggml_compute_forward_sum_rows(
  7495. const struct ggml_compute_params * params,
  7496. const struct ggml_tensor * src0,
  7497. struct ggml_tensor * dst) {
  7498. switch (src0->type) {
  7499. case GGML_TYPE_F32:
  7500. {
  7501. ggml_compute_forward_sum_rows_f32(params, src0, dst);
  7502. } break;
  7503. default:
  7504. {
  7505. GGML_ASSERT(false);
  7506. } break;
  7507. }
  7508. }
  7509. // ggml_compute_forward_mean
  7510. static void ggml_compute_forward_mean_f32(
  7511. const struct ggml_compute_params * params,
  7512. const struct ggml_tensor * src0,
  7513. struct ggml_tensor * dst) {
  7514. assert(params->ith == 0);
  7515. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  7516. return;
  7517. }
  7518. assert(src0->nb[0] == sizeof(float));
  7519. GGML_TENSOR_UNARY_OP_LOCALS;
  7520. assert(ne0 == 1);
  7521. assert(ne1 == ne01);
  7522. assert(ne2 == ne02);
  7523. assert(ne3 == ne03);
  7524. UNUSED(ne0);
  7525. UNUSED(ne1);
  7526. UNUSED(ne2);
  7527. UNUSED(ne3);
  7528. for (int64_t i03 = 0; i03 < ne03; i03++) {
  7529. for (int64_t i02 = 0; i02 < ne02; i02++) {
  7530. for (int64_t i01 = 0; i01 < ne01; i01++) {
  7531. ggml_vec_sum_f32(ne00,
  7532. (float *) ((char *) dst->data + i01*nb1 + i02*nb2 + i03*nb3),
  7533. (float *) ((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03));
  7534. *(float *) ((char *) dst->data + i01*nb1 + i02*nb2 + i03*nb3) /= (float) ne00;
  7535. }
  7536. }
  7537. }
  7538. }
  7539. static void ggml_compute_forward_mean(
  7540. const struct ggml_compute_params * params,
  7541. const struct ggml_tensor * src0,
  7542. struct ggml_tensor * dst) {
  7543. switch (src0->type) {
  7544. case GGML_TYPE_F32:
  7545. {
  7546. ggml_compute_forward_mean_f32(params, src0, dst);
  7547. } break;
  7548. default:
  7549. {
  7550. GGML_ASSERT(false);
  7551. } break;
  7552. }
  7553. }
  7554. // ggml_compute_forward_argmax
  7555. static void ggml_compute_forward_argmax_f32(
  7556. const struct ggml_compute_params * params,
  7557. const struct ggml_tensor * src0,
  7558. struct ggml_tensor * dst) {
  7559. assert(params->ith == 0);
  7560. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  7561. return;
  7562. }
  7563. assert(src0->nb[0] == sizeof(float));
  7564. assert(dst->nb[0] == sizeof(float));
  7565. const int64_t ne00 = src0->ne[0];
  7566. const int64_t ne01 = src0->ne[1];
  7567. const size_t nb01 = src0->nb[1];
  7568. const size_t nb0 = dst->nb[0];
  7569. for (int64_t i1 = 0; i1 < ne01; i1++) {
  7570. float * src = (float *) ((char *) src0->data + i1*nb01);
  7571. int32_t * dst_ = (int32_t *) ((char *) dst->data + i1*nb0);
  7572. int v = 0;
  7573. ggml_vec_argmax_f32(ne00, &v, src);
  7574. dst_[0] = v;
  7575. }
  7576. }
  7577. static void ggml_compute_forward_argmax(
  7578. const struct ggml_compute_params * params,
  7579. const struct ggml_tensor * src0,
  7580. struct ggml_tensor * dst) {
  7581. switch (src0->type) {
  7582. case GGML_TYPE_F32:
  7583. {
  7584. ggml_compute_forward_argmax_f32(params, src0, dst);
  7585. } break;
  7586. default:
  7587. {
  7588. GGML_ASSERT(false);
  7589. } break;
  7590. }
  7591. }
  7592. // ggml_compute_forward_repeat
  7593. static void ggml_compute_forward_repeat_f32(
  7594. const struct ggml_compute_params * params,
  7595. const struct ggml_tensor * src0,
  7596. struct ggml_tensor * dst) {
  7597. GGML_ASSERT(params->ith == 0);
  7598. GGML_ASSERT(ggml_can_repeat(src0, dst));
  7599. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  7600. return;
  7601. }
  7602. GGML_TENSOR_UNARY_OP_LOCALS;
  7603. // guaranteed to be an integer due to the check in ggml_can_repeat
  7604. const int nr0 = (int)(ne0/ne00);
  7605. const int nr1 = (int)(ne1/ne01);
  7606. const int nr2 = (int)(ne2/ne02);
  7607. const int nr3 = (int)(ne3/ne03);
  7608. // TODO: support for transposed / permuted tensors
  7609. GGML_ASSERT(nb0 == sizeof(float));
  7610. GGML_ASSERT(nb00 == sizeof(float));
  7611. // TODO: maybe this is not optimal?
  7612. for (int i3 = 0; i3 < nr3; i3++) {
  7613. for (int k3 = 0; k3 < ne03; k3++) {
  7614. for (int i2 = 0; i2 < nr2; i2++) {
  7615. for (int k2 = 0; k2 < ne02; k2++) {
  7616. for (int i1 = 0; i1 < nr1; i1++) {
  7617. for (int k1 = 0; k1 < ne01; k1++) {
  7618. for (int i0 = 0; i0 < nr0; i0++) {
  7619. ggml_vec_cpy_f32(ne00,
  7620. (float *) ((char *) dst->data + (i3*ne03 + k3)*nb3 + (i2*ne02 + k2)*nb2 + (i1*ne01 + k1)*nb1 + (i0*ne00)*nb0),
  7621. (float *) ((char *) src0->data + ( k3)*nb03 + ( k2)*nb02 + ( k1)*nb01));
  7622. }
  7623. }
  7624. }
  7625. }
  7626. }
  7627. }
  7628. }
  7629. }
  7630. static void ggml_compute_forward_repeat(
  7631. const struct ggml_compute_params * params,
  7632. const struct ggml_tensor * src0,
  7633. struct ggml_tensor * dst) {
  7634. switch (src0->type) {
  7635. case GGML_TYPE_F32:
  7636. {
  7637. ggml_compute_forward_repeat_f32(params, src0, dst);
  7638. } break;
  7639. default:
  7640. {
  7641. GGML_ASSERT(false);
  7642. } break;
  7643. }
  7644. }
  7645. // ggml_compute_forward_repeat_back
  7646. static void ggml_compute_forward_repeat_back_f32(
  7647. const struct ggml_compute_params * params,
  7648. const struct ggml_tensor * src0,
  7649. struct ggml_tensor * dst) {
  7650. GGML_ASSERT(params->ith == 0);
  7651. GGML_ASSERT(ggml_can_repeat(dst, src0));
  7652. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  7653. return;
  7654. }
  7655. GGML_TENSOR_UNARY_OP_LOCALS;
  7656. // guaranteed to be an integer due to the check in ggml_can_repeat
  7657. const int nr0 = (int)(ne00/ne0);
  7658. const int nr1 = (int)(ne01/ne1);
  7659. const int nr2 = (int)(ne02/ne2);
  7660. const int nr3 = (int)(ne03/ne3);
  7661. // TODO: support for transposed / permuted tensors
  7662. GGML_ASSERT(nb0 == sizeof(float));
  7663. GGML_ASSERT(nb00 == sizeof(float));
  7664. if (ggml_is_contiguous(dst)) {
  7665. ggml_vec_set_f32(ne0*ne1*ne2*ne3, dst->data, 0);
  7666. } else {
  7667. for (int k3 = 0; k3 < ne3; k3++) {
  7668. for (int k2 = 0; k2 < ne2; k2++) {
  7669. for (int k1 = 0; k1 < ne1; k1++) {
  7670. ggml_vec_set_f32(ne0,
  7671. (float *) ((char *) dst->data + k1*nb1 + k2*nb2 + k3*nb3),
  7672. 0);
  7673. }
  7674. }
  7675. }
  7676. }
  7677. // TODO: maybe this is not optimal?
  7678. for (int i3 = 0; i3 < nr3; i3++) {
  7679. for (int k3 = 0; k3 < ne3; k3++) {
  7680. for (int i2 = 0; i2 < nr2; i2++) {
  7681. for (int k2 = 0; k2 < ne2; k2++) {
  7682. for (int i1 = 0; i1 < nr1; i1++) {
  7683. for (int k1 = 0; k1 < ne1; k1++) {
  7684. for (int i0 = 0; i0 < nr0; i0++) {
  7685. ggml_vec_acc_f32(ne0,
  7686. (float *) ((char *) dst->data + ( k3)*nb3 + ( k2)*nb2 + ( k1)*nb1),
  7687. (float *) ((char *) src0->data + (i3*ne3 + k3)*nb03 + (i2*ne2 + k2)*nb02 + (i1*ne1 + k1)*nb01 + (i0*ne0)*nb00));
  7688. }
  7689. }
  7690. }
  7691. }
  7692. }
  7693. }
  7694. }
  7695. }
  7696. static void ggml_compute_forward_repeat_back(
  7697. const struct ggml_compute_params * params,
  7698. const struct ggml_tensor * src0,
  7699. struct ggml_tensor * dst) {
  7700. switch (src0->type) {
  7701. case GGML_TYPE_F32:
  7702. {
  7703. ggml_compute_forward_repeat_back_f32(params, src0, dst);
  7704. } break;
  7705. default:
  7706. {
  7707. GGML_ASSERT(false);
  7708. } break;
  7709. }
  7710. }
  7711. // ggml_compute_forward_abs
  7712. static void ggml_compute_forward_abs_f32(
  7713. const struct ggml_compute_params * params,
  7714. const struct ggml_tensor * src0,
  7715. struct ggml_tensor * dst) {
  7716. assert(params->ith == 0);
  7717. assert(ggml_are_same_shape(src0, dst));
  7718. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  7719. return;
  7720. }
  7721. const int n = ggml_nrows(src0);
  7722. const int nc = src0->ne[0];
  7723. assert(dst->nb[0] == sizeof(float));
  7724. assert(src0->nb[0] == sizeof(float));
  7725. for (int i = 0; i < n; i++) {
  7726. ggml_vec_abs_f32(nc,
  7727. (float *) ((char *) dst->data + i*( dst->nb[1])),
  7728. (float *) ((char *) src0->data + i*(src0->nb[1])));
  7729. }
  7730. }
  7731. static void ggml_compute_forward_abs(
  7732. const struct ggml_compute_params * params,
  7733. const struct ggml_tensor * src0,
  7734. struct ggml_tensor * dst) {
  7735. switch (src0->type) {
  7736. case GGML_TYPE_F32:
  7737. {
  7738. ggml_compute_forward_abs_f32(params, src0, dst);
  7739. } break;
  7740. default:
  7741. {
  7742. GGML_ASSERT(false);
  7743. } break;
  7744. }
  7745. }
  7746. // ggml_compute_forward_sgn
  7747. static void ggml_compute_forward_sgn_f32(
  7748. const struct ggml_compute_params * params,
  7749. const struct ggml_tensor * src0,
  7750. struct ggml_tensor * dst) {
  7751. assert(params->ith == 0);
  7752. assert(ggml_are_same_shape(src0, dst));
  7753. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  7754. return;
  7755. }
  7756. const int n = ggml_nrows(src0);
  7757. const int nc = src0->ne[0];
  7758. assert(dst->nb[0] == sizeof(float));
  7759. assert(src0->nb[0] == sizeof(float));
  7760. for (int i = 0; i < n; i++) {
  7761. ggml_vec_sgn_f32(nc,
  7762. (float *) ((char *) dst->data + i*( dst->nb[1])),
  7763. (float *) ((char *) src0->data + i*(src0->nb[1])));
  7764. }
  7765. }
  7766. static void ggml_compute_forward_sgn(
  7767. const struct ggml_compute_params * params,
  7768. const struct ggml_tensor * src0,
  7769. struct ggml_tensor * dst) {
  7770. switch (src0->type) {
  7771. case GGML_TYPE_F32:
  7772. {
  7773. ggml_compute_forward_sgn_f32(params, src0, dst);
  7774. } break;
  7775. default:
  7776. {
  7777. GGML_ASSERT(false);
  7778. } break;
  7779. }
  7780. }
  7781. // ggml_compute_forward_neg
  7782. static void ggml_compute_forward_neg_f32(
  7783. const struct ggml_compute_params * params,
  7784. const struct ggml_tensor * src0,
  7785. struct ggml_tensor * dst) {
  7786. assert(params->ith == 0);
  7787. assert(ggml_are_same_shape(src0, dst));
  7788. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  7789. return;
  7790. }
  7791. const int n = ggml_nrows(src0);
  7792. const int nc = src0->ne[0];
  7793. assert(dst->nb[0] == sizeof(float));
  7794. assert(src0->nb[0] == sizeof(float));
  7795. for (int i = 0; i < n; i++) {
  7796. ggml_vec_neg_f32(nc,
  7797. (float *) ((char *) dst->data + i*( dst->nb[1])),
  7798. (float *) ((char *) src0->data + i*(src0->nb[1])));
  7799. }
  7800. }
  7801. static void ggml_compute_forward_neg(
  7802. const struct ggml_compute_params * params,
  7803. const struct ggml_tensor * src0,
  7804. struct ggml_tensor * dst) {
  7805. switch (src0->type) {
  7806. case GGML_TYPE_F32:
  7807. {
  7808. ggml_compute_forward_neg_f32(params, src0, dst);
  7809. } break;
  7810. default:
  7811. {
  7812. GGML_ASSERT(false);
  7813. } break;
  7814. }
  7815. }
  7816. // ggml_compute_forward_step
  7817. static void ggml_compute_forward_step_f32(
  7818. const struct ggml_compute_params * params,
  7819. const struct ggml_tensor * src0,
  7820. struct ggml_tensor * dst) {
  7821. assert(params->ith == 0);
  7822. assert(ggml_are_same_shape(src0, dst));
  7823. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  7824. return;
  7825. }
  7826. const int n = ggml_nrows(src0);
  7827. const int nc = src0->ne[0];
  7828. assert(dst->nb[0] == sizeof(float));
  7829. assert(src0->nb[0] == sizeof(float));
  7830. for (int i = 0; i < n; i++) {
  7831. ggml_vec_step_f32(nc,
  7832. (float *) ((char *) dst->data + i*( dst->nb[1])),
  7833. (float *) ((char *) src0->data + i*(src0->nb[1])));
  7834. }
  7835. }
  7836. static void ggml_compute_forward_step(
  7837. const struct ggml_compute_params * params,
  7838. const struct ggml_tensor * src0,
  7839. struct ggml_tensor * dst) {
  7840. switch (src0->type) {
  7841. case GGML_TYPE_F32:
  7842. {
  7843. ggml_compute_forward_step_f32(params, src0, dst);
  7844. } break;
  7845. default:
  7846. {
  7847. GGML_ASSERT(false);
  7848. } break;
  7849. }
  7850. }
  7851. // ggml_compute_forward_tanh
  7852. static void ggml_compute_forward_tanh_f32(
  7853. const struct ggml_compute_params * params,
  7854. const struct ggml_tensor * src0,
  7855. struct ggml_tensor * dst) {
  7856. assert(params->ith == 0);
  7857. assert(ggml_are_same_shape(src0, dst));
  7858. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  7859. return;
  7860. }
  7861. const int n = ggml_nrows(src0);
  7862. const int nc = src0->ne[0];
  7863. assert(dst->nb[0] == sizeof(float));
  7864. assert(src0->nb[0] == sizeof(float));
  7865. for (int i = 0; i < n; i++) {
  7866. ggml_vec_tanh_f32(nc,
  7867. (float *) ((char *) dst->data + i*( dst->nb[1])),
  7868. (float *) ((char *) src0->data + i*(src0->nb[1])));
  7869. }
  7870. }
  7871. static void ggml_compute_forward_tanh(
  7872. const struct ggml_compute_params * params,
  7873. const struct ggml_tensor * src0,
  7874. struct ggml_tensor * dst) {
  7875. switch (src0->type) {
  7876. case GGML_TYPE_F32:
  7877. {
  7878. ggml_compute_forward_tanh_f32(params, src0, dst);
  7879. } break;
  7880. default:
  7881. {
  7882. GGML_ASSERT(false);
  7883. } break;
  7884. }
  7885. }
  7886. // ggml_compute_forward_elu
  7887. static void ggml_compute_forward_elu_f32(
  7888. const struct ggml_compute_params * params,
  7889. const struct ggml_tensor * src0,
  7890. struct ggml_tensor * dst) {
  7891. assert(params->ith == 0);
  7892. assert(ggml_are_same_shape(src0, dst));
  7893. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  7894. return;
  7895. }
  7896. const int n = ggml_nrows(src0);
  7897. const int nc = src0->ne[0];
  7898. assert(dst->nb[0] == sizeof(float));
  7899. assert(src0->nb[0] == sizeof(float));
  7900. for (int i = 0; i < n; i++) {
  7901. ggml_vec_elu_f32(nc,
  7902. (float *) ((char *) dst->data + i*( dst->nb[1])),
  7903. (float *) ((char *) src0->data + i*(src0->nb[1])));
  7904. }
  7905. }
  7906. static void ggml_compute_forward_elu(
  7907. const struct ggml_compute_params * params,
  7908. const struct ggml_tensor * src0,
  7909. struct ggml_tensor * dst) {
  7910. switch (src0->type) {
  7911. case GGML_TYPE_F32:
  7912. {
  7913. ggml_compute_forward_elu_f32(params, src0, dst);
  7914. } break;
  7915. default:
  7916. {
  7917. GGML_ASSERT(false);
  7918. } break;
  7919. }
  7920. }
  7921. // ggml_compute_forward_relu
  7922. static void ggml_compute_forward_relu_f32(
  7923. const struct ggml_compute_params * params,
  7924. const struct ggml_tensor * src0,
  7925. struct ggml_tensor * dst) {
  7926. assert(params->ith == 0);
  7927. assert(ggml_are_same_shape(src0, dst));
  7928. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  7929. return;
  7930. }
  7931. const int n = ggml_nrows(src0);
  7932. const int nc = src0->ne[0];
  7933. assert(dst->nb[0] == sizeof(float));
  7934. assert(src0->nb[0] == sizeof(float));
  7935. for (int i = 0; i < n; i++) {
  7936. ggml_vec_relu_f32(nc,
  7937. (float *) ((char *) dst->data + i*( dst->nb[1])),
  7938. (float *) ((char *) src0->data + i*(src0->nb[1])));
  7939. }
  7940. }
  7941. static void ggml_compute_forward_relu(
  7942. const struct ggml_compute_params * params,
  7943. const struct ggml_tensor * src0,
  7944. struct ggml_tensor * dst) {
  7945. switch (src0->type) {
  7946. case GGML_TYPE_F32:
  7947. {
  7948. ggml_compute_forward_relu_f32(params, src0, dst);
  7949. } break;
  7950. default:
  7951. {
  7952. GGML_ASSERT(false);
  7953. } break;
  7954. }
  7955. }
  7956. // ggml_compute_forward_gelu
  7957. static void ggml_compute_forward_gelu_f32(
  7958. const struct ggml_compute_params * params,
  7959. const struct ggml_tensor * src0,
  7960. struct ggml_tensor * dst) {
  7961. GGML_ASSERT(ggml_is_contiguous_except_dim_1(src0));
  7962. GGML_ASSERT(ggml_is_contiguous_except_dim_1(dst));
  7963. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  7964. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  7965. return;
  7966. }
  7967. const int ith = params->ith;
  7968. const int nth = params->nth;
  7969. const int nc = src0->ne[0];
  7970. const int nr = ggml_nrows(src0);
  7971. // rows per thread
  7972. const int dr = (nr + nth - 1)/nth;
  7973. // row range for this thread
  7974. const int ir0 = dr*ith;
  7975. const int ir1 = MIN(ir0 + dr, nr);
  7976. for (int i1 = ir0; i1 < ir1; i1++) {
  7977. ggml_vec_gelu_f32(nc,
  7978. (float *) ((char *) dst->data + i1*( dst->nb[1])),
  7979. (float *) ((char *) src0->data + i1*(src0->nb[1])));
  7980. #ifndef NDEBUG
  7981. for (int k = 0; k < nc; k++) {
  7982. const float x = ((float *) ((char *) dst->data + i1*( dst->nb[1])))[k];
  7983. UNUSED(x);
  7984. assert(!isnan(x));
  7985. assert(!isinf(x));
  7986. }
  7987. #endif
  7988. }
  7989. }
  7990. static void ggml_compute_forward_gelu(
  7991. const struct ggml_compute_params * params,
  7992. const struct ggml_tensor * src0,
  7993. struct ggml_tensor * dst) {
  7994. switch (src0->type) {
  7995. case GGML_TYPE_F32:
  7996. {
  7997. ggml_compute_forward_gelu_f32(params, src0, dst);
  7998. } break;
  7999. default:
  8000. {
  8001. GGML_ASSERT(false);
  8002. } break;
  8003. }
  8004. }
  8005. // ggml_compute_forward_gelu_quick
  8006. static void ggml_compute_forward_gelu_quick_f32(
  8007. const struct ggml_compute_params * params,
  8008. const struct ggml_tensor * src0,
  8009. struct ggml_tensor * dst) {
  8010. GGML_ASSERT(ggml_is_contiguous_except_dim_1(src0));
  8011. GGML_ASSERT(ggml_is_contiguous_except_dim_1(dst));
  8012. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  8013. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  8014. return;
  8015. }
  8016. const int ith = params->ith;
  8017. const int nth = params->nth;
  8018. const int nc = src0->ne[0];
  8019. const int nr = ggml_nrows(src0);
  8020. // rows per thread
  8021. const int dr = (nr + nth - 1)/nth;
  8022. // row range for this thread
  8023. const int ir0 = dr*ith;
  8024. const int ir1 = MIN(ir0 + dr, nr);
  8025. for (int i1 = ir0; i1 < ir1; i1++) {
  8026. ggml_vec_gelu_quick_f32(nc,
  8027. (float *) ((char *) dst->data + i1*( dst->nb[1])),
  8028. (float *) ((char *) src0->data + i1*(src0->nb[1])));
  8029. #ifndef NDEBUG
  8030. for (int k = 0; k < nc; k++) {
  8031. const float x = ((float *) ((char *) dst->data + i1*( dst->nb[1])))[k];
  8032. UNUSED(x);
  8033. assert(!isnan(x));
  8034. assert(!isinf(x));
  8035. }
  8036. #endif
  8037. }
  8038. }
  8039. static void ggml_compute_forward_gelu_quick(
  8040. const struct ggml_compute_params * params,
  8041. const struct ggml_tensor * src0,
  8042. struct ggml_tensor * dst) {
  8043. switch (src0->type) {
  8044. case GGML_TYPE_F32:
  8045. {
  8046. ggml_compute_forward_gelu_quick_f32(params, src0, dst);
  8047. } break;
  8048. default:
  8049. {
  8050. GGML_ASSERT(false);
  8051. } break;
  8052. }
  8053. }
  8054. // ggml_compute_forward_silu
  8055. static void ggml_compute_forward_silu_f32(
  8056. const struct ggml_compute_params * params,
  8057. const struct ggml_tensor * src0,
  8058. struct ggml_tensor * dst) {
  8059. GGML_ASSERT(ggml_is_contiguous_except_dim_1(src0));
  8060. GGML_ASSERT(ggml_is_contiguous_except_dim_1(dst));
  8061. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  8062. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  8063. return;
  8064. }
  8065. const int ith = params->ith;
  8066. const int nth = params->nth;
  8067. const int nc = src0->ne[0];
  8068. const int nr = ggml_nrows(src0);
  8069. // rows per thread
  8070. const int dr = (nr + nth - 1)/nth;
  8071. // row range for this thread
  8072. const int ir0 = dr*ith;
  8073. const int ir1 = MIN(ir0 + dr, nr);
  8074. for (int i1 = ir0; i1 < ir1; i1++) {
  8075. ggml_vec_silu_f32(nc,
  8076. (float *) ((char *) dst->data + i1*( dst->nb[1])),
  8077. (float *) ((char *) src0->data + i1*(src0->nb[1])));
  8078. #ifndef NDEBUG
  8079. for (int k = 0; k < nc; k++) {
  8080. const float x = ((float *) ((char *) dst->data + i1*( dst->nb[1])))[k];
  8081. UNUSED(x);
  8082. assert(!isnan(x));
  8083. assert(!isinf(x));
  8084. }
  8085. #endif
  8086. }
  8087. }
  8088. static void ggml_compute_forward_silu(
  8089. const struct ggml_compute_params * params,
  8090. const struct ggml_tensor * src0,
  8091. struct ggml_tensor * dst) {
  8092. switch (src0->type) {
  8093. case GGML_TYPE_F32:
  8094. {
  8095. ggml_compute_forward_silu_f32(params, src0, dst);
  8096. } break;
  8097. default:
  8098. {
  8099. GGML_ASSERT(false);
  8100. } break;
  8101. }
  8102. }
  8103. // ggml_compute_forward_silu_back
  8104. static void ggml_compute_forward_silu_back_f32(
  8105. const struct ggml_compute_params * params,
  8106. const struct ggml_tensor * src0,
  8107. const struct ggml_tensor * grad,
  8108. struct ggml_tensor * dst) {
  8109. GGML_ASSERT(ggml_is_contiguous_except_dim_1(grad));
  8110. GGML_ASSERT(ggml_is_contiguous_except_dim_1(src0));
  8111. GGML_ASSERT(ggml_is_contiguous_except_dim_1(dst));
  8112. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  8113. GGML_ASSERT(ggml_are_same_shape(src0, grad));
  8114. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  8115. return;
  8116. }
  8117. const int ith = params->ith;
  8118. const int nth = params->nth;
  8119. const int nc = src0->ne[0];
  8120. const int nr = ggml_nrows(src0);
  8121. // rows per thread
  8122. const int dr = (nr + nth - 1)/nth;
  8123. // row range for this thread
  8124. const int ir0 = dr*ith;
  8125. const int ir1 = MIN(ir0 + dr, nr);
  8126. for (int i1 = ir0; i1 < ir1; i1++) {
  8127. ggml_vec_silu_backward_f32(nc,
  8128. (float *) ((char *) dst->data + i1*( dst->nb[1])),
  8129. (float *) ((char *) src0->data + i1*(src0->nb[1])),
  8130. (float *) ((char *) grad->data + i1*(grad->nb[1])));
  8131. #ifndef NDEBUG
  8132. for (int k = 0; k < nc; k++) {
  8133. const float x = ((float *) ((char *) dst->data + i1*( dst->nb[1])))[k];
  8134. UNUSED(x);
  8135. assert(!isnan(x));
  8136. assert(!isinf(x));
  8137. }
  8138. #endif
  8139. }
  8140. }
  8141. static void ggml_compute_forward_silu_back(
  8142. const struct ggml_compute_params * params,
  8143. const struct ggml_tensor * src0,
  8144. const struct ggml_tensor * grad,
  8145. struct ggml_tensor * dst) {
  8146. switch (src0->type) {
  8147. case GGML_TYPE_F32:
  8148. {
  8149. ggml_compute_forward_silu_back_f32(params, src0, grad, dst);
  8150. } break;
  8151. default:
  8152. {
  8153. GGML_ASSERT(false);
  8154. } break;
  8155. }
  8156. }
  8157. // ggml_compute_forward_norm
  8158. static void ggml_compute_forward_norm_f32(
  8159. const struct ggml_compute_params * params,
  8160. const struct ggml_tensor * src0,
  8161. struct ggml_tensor * dst) {
  8162. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  8163. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  8164. return;
  8165. }
  8166. GGML_ASSERT(src0->nb[0] == sizeof(float));
  8167. const int ith = params->ith;
  8168. const int nth = params->nth;
  8169. GGML_TENSOR_UNARY_OP_LOCALS;
  8170. const float eps = 1e-5f; // TODO: make this a parameter
  8171. // TODO: optimize
  8172. for (int64_t i03 = 0; i03 < ne03; i03++) {
  8173. for (int64_t i02 = 0; i02 < ne02; i02++) {
  8174. for (int64_t i01 = ith; i01 < ne01; i01 += nth) {
  8175. const float * x = (float *) ((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03);
  8176. ggml_float sum = 0.0;
  8177. for (int64_t i00 = 0; i00 < ne00; i00++) {
  8178. sum += (ggml_float)x[i00];
  8179. }
  8180. float mean = sum/ne00;
  8181. float * y = (float *) ((char *) dst->data + i01*nb1 + i02*nb2 + i03*nb3);
  8182. ggml_float sum2 = 0.0;
  8183. for (int64_t i00 = 0; i00 < ne00; i00++) {
  8184. float v = x[i00] - mean;
  8185. y[i00] = v;
  8186. sum2 += (ggml_float)(v*v);
  8187. }
  8188. float variance = sum2/ne00;
  8189. const float scale = 1.0f/sqrtf(variance + eps);
  8190. ggml_vec_scale_f32(ne00, y, scale);
  8191. }
  8192. }
  8193. }
  8194. }
  8195. static void ggml_compute_forward_norm(
  8196. const struct ggml_compute_params * params,
  8197. const struct ggml_tensor * src0,
  8198. struct ggml_tensor * dst) {
  8199. switch (src0->type) {
  8200. case GGML_TYPE_F32:
  8201. {
  8202. ggml_compute_forward_norm_f32(params, src0, dst);
  8203. } break;
  8204. default:
  8205. {
  8206. GGML_ASSERT(false);
  8207. } break;
  8208. }
  8209. }
  8210. static void ggml_compute_forward_rms_norm_f32(
  8211. const struct ggml_compute_params * params,
  8212. const struct ggml_tensor * src0,
  8213. struct ggml_tensor * dst) {
  8214. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  8215. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  8216. return;
  8217. }
  8218. GGML_ASSERT(src0->nb[0] == sizeof(float));
  8219. const int ith = params->ith;
  8220. const int nth = params->nth;
  8221. GGML_TENSOR_UNARY_OP_LOCALS;
  8222. float eps;
  8223. memcpy(&eps, dst->op_params, sizeof(float));
  8224. // TODO: optimize
  8225. for (int64_t i03 = 0; i03 < ne03; i03++) {
  8226. for (int64_t i02 = 0; i02 < ne02; i02++) {
  8227. for (int64_t i01 = ith; i01 < ne01; i01 += nth) {
  8228. const float * x = (float *) ((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03);
  8229. ggml_float sum = 0.0;
  8230. for (int64_t i00 = 0; i00 < ne00; i00++) {
  8231. sum += (ggml_float)(x[i00] * x[i00]);
  8232. }
  8233. const float mean = sum/ne00;
  8234. float * y = (float *) ((char *) dst->data + i01*nb1 + i02*nb2 + i03*nb3);
  8235. memcpy(y, x, ne00 * sizeof(float));
  8236. // for (int i00 = 0; i00 < ne00; i00++) {
  8237. // y[i00] = x[i00];
  8238. // }
  8239. const float scale = 1.0f/sqrtf(mean + eps);
  8240. ggml_vec_scale_f32(ne00, y, scale);
  8241. }
  8242. }
  8243. }
  8244. }
  8245. static void ggml_compute_forward_rms_norm(
  8246. const struct ggml_compute_params * params,
  8247. const struct ggml_tensor * src0,
  8248. struct ggml_tensor * dst) {
  8249. switch (src0->type) {
  8250. case GGML_TYPE_F32:
  8251. {
  8252. ggml_compute_forward_rms_norm_f32(params, src0, dst);
  8253. } break;
  8254. default:
  8255. {
  8256. GGML_ASSERT(false);
  8257. } break;
  8258. }
  8259. }
  8260. static void ggml_compute_forward_rms_norm_back_f32(
  8261. const struct ggml_compute_params * params,
  8262. const struct ggml_tensor * src0,
  8263. const struct ggml_tensor * src1,
  8264. struct ggml_tensor * dst) {
  8265. GGML_ASSERT(ggml_are_same_shape(src0, dst) && ggml_are_same_shape(src0, src1));
  8266. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  8267. return;
  8268. }
  8269. GGML_ASSERT(src0->nb[0] == sizeof(float));
  8270. const int ith = params->ith;
  8271. const int nth = params->nth;
  8272. GGML_TENSOR_BINARY_OP_LOCALS;
  8273. const float eps = 1e-6f; // TODO: make this a parameter
  8274. // TODO: optimize
  8275. for (int64_t i03 = 0; i03 < ne03; i03++) {
  8276. for (int64_t i02 = 0; i02 < ne02; i02++) {
  8277. for (int64_t i01 = ith; i01 < ne01; i01 += nth) {
  8278. // src1 is same shape as src0 => same indices
  8279. const int64_t i11 = i01;
  8280. const int64_t i12 = i02;
  8281. const int64_t i13 = i03;
  8282. const float * x = (float *) ((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03);
  8283. const float * dz = (float *) ((char *) src1->data + i11*nb11 + i12*nb12 + i13*nb13);
  8284. ggml_float sum_xx = 0.0;
  8285. ggml_float sum_xdz = 0.0;
  8286. for (int64_t i00 = 0; i00 < ne00; i00++) {
  8287. sum_xx += (ggml_float)(x[i00] * x[i00]);
  8288. sum_xdz += (ggml_float)(x[i00] * dz[i00]);
  8289. }
  8290. //const float mean = (float)(sum_xx)/ne00;
  8291. const float mean_eps = (float)(sum_xx)/ne00 + eps;
  8292. const float sum_eps = (float)(sum_xx) + eps*ne00;
  8293. //const float mean_xdz = (float)(sum_xdz)/ne00;
  8294. // we could cache rms from forward pass to improve performance.
  8295. // to do this implement ggml_rms and compose ggml_rms_norm using ggml_rms.
  8296. //const float rms = sqrtf(mean_eps);
  8297. const float rrms = 1.0f / sqrtf(mean_eps);
  8298. //const float scale = -rrms/(ne00 * mean_eps); // -1/(n*rms**3)
  8299. {
  8300. // z = rms_norm(x)
  8301. //
  8302. // rms_norm(src0) =
  8303. // scale(
  8304. // src0,
  8305. // div(
  8306. // 1,
  8307. // sqrt(
  8308. // add(
  8309. // scale(
  8310. // sum(
  8311. // sqr(
  8312. // src0)),
  8313. // (1.0/N)),
  8314. // eps))));
  8315. // postorder:
  8316. // ## op args grad
  8317. // 00 param src0 grad[#00]
  8318. // 01 const 1
  8319. // 02 sqr (#00) grad[#02]
  8320. // 03 sum (#02) grad[#03]
  8321. // 04 const 1/N
  8322. // 05 scale (#03, #04) grad[#05]
  8323. // 06 const eps
  8324. // 07 add (#05, #06) grad[#07]
  8325. // 08 sqrt (#07) grad[#08]
  8326. // 09 div (#01,#08) grad[#09]
  8327. // 10 scale (#00,#09) grad[#10]
  8328. //
  8329. // backward pass, given grad[#10]
  8330. // #10: scale
  8331. // grad[#00] += scale(grad[#10],#09)
  8332. // grad[#09] += sum(mul(grad[#10],#00))
  8333. // #09: div
  8334. // grad[#08] += neg(mul(grad[#09], div(#09,#08)))
  8335. // #08: sqrt
  8336. // grad[#07] += mul(grad[#08], div(0.5, #08))
  8337. // #07: add
  8338. // grad[#05] += grad[#07]
  8339. // #05: scale
  8340. // grad[#03] += scale(grad[#05],#04)
  8341. // #03: sum
  8342. // grad[#02] += repeat(grad[#03], #02)
  8343. // #02:
  8344. // grad[#00] += scale(mul(#00, grad[#02]), 2.0)
  8345. //
  8346. // substitute and simplify:
  8347. // grad[#00] = scale(grad(#10), #09) + scale(mul(#00, grad[#02]), 2.0)
  8348. // grad[#02] = repeat(grad[#03], #02)
  8349. // grad[#02] = repeat(scale(grad[#05],#04), #02)
  8350. // grad[#02] = repeat(scale(grad[#07],#04), #02)
  8351. // grad[#02] = repeat(scale(mul(grad[#08], div(0.5, #08)),#04), #02)
  8352. // grad[#02] = repeat(scale(mul(neg(mul(grad[#09], div(#09,#08))), div(0.5, #08)),#04), #02)
  8353. // grad[#02] = repeat(scale(mul(neg(mul(sum(mul(grad[#10],#00)), div(#09,#08))), div(0.5, #08)),#04), #02)
  8354. // grad[#02] = repeat(-(sum(mul(grad[#10],#00)) * div(#09,#08) * div(0.5, #08) * (1/N)), #02)
  8355. // grad[#02] = repeat(-(sum(mul(grad[#10],#00)) * div(div(#01,#08),#08) * div(0.5, #08) * (1/N)), #02)
  8356. // grad[#02] = repeat(-(sum(mul(grad[#10],#00)) * div(1,#08*#08) * div(0.5, #08) * (1/N)), #02)
  8357. // grad[#02] = repeat(-(sum(mul(grad[#10],#00)) * div(1,#07) * div(0.5, #08) * (1/N)), #02)
  8358. // grad[#00] = scale(grad(#10), #09) + scale(mul(#00, grad[#02]), 2.0)
  8359. // grad[#00] = scale(grad(#10), #09) + scale(mul(#00, repeat(-(sum(mul(grad[#10],#00)) * div(1,#07) * div(0.5, #08) * (1/N)), #02)), 2.0)
  8360. // grad[#00] = scale(grad(#10), #09) + scale(scale(#00, -(sum(mul(grad[#10],#00)) * div(1,#07) * div(0.5, #08) * (1/N))), 2.0)
  8361. // grad[#00] = scale(grad(#10), #09) + scale(#00, -(sum(mul(grad[#10],#00)) * div(1,#07) * div(1,#08) * (1/N)))
  8362. // grad[#00] = scale(grad(#10), #09) + scale(#00, sum(mul(grad[#10],#00)) * div(1,#07*#08) * (-1/N))
  8363. // grad[#00] = scale(grad(#10), #09) + scale(#00, sum(mul(grad[#10],#00)) * div(1,#07*#08) * (-1/N))
  8364. // grad[#00] = scale(grad(#10), #09) + scale(#00, sum(mul(grad[#10],#00)) * div(1,mean_eps*rms) * (-1/N))
  8365. // grad[#00] = scale(grad(#10), #09) + scale(#00, sum(mul(grad[#10],#00)) * div(-1,rms*N*mean_eps))
  8366. // grad[#00] = scale(grad(#10), #09) + scale(#00, sum(mul(grad[#10],#00)) * div(-1,rms*N*(sum_xx/N+eps)))
  8367. // grad[#00] = scale(grad(#10), #09) + scale(#00, sum(mul(grad[#10],#00)) * div(-1,rms*N*sum_xx+rms*N*eps))
  8368. // grad[#00] = scale(dz, rrms) + scale(x, sum(mul(dz,x)) * div(-1,rms*N*mean_eps))
  8369. // grad[#00] = scale(dz, rrms) + scale(x, sum_xdz * div(-1,rms*N*mean_eps))
  8370. // a = b*c + d*e
  8371. // a = b*c*f/f + d*e*f/f
  8372. // a = (b*c*f + d*e*f)*(1/f)
  8373. // a = (b*c*(1/c) + d*e*(1/c))*(1/(1/c))
  8374. // a = (b + d*e/c)*c
  8375. // b = dz, c = rrms, d = x, e = sum_xdz * div(-1,rms*N*mean_eps)
  8376. // a = (dz + x*sum_xdz * div(-1,rms*N*mean_eps)/rrms)*rrms
  8377. // a = (dz + x*sum_xdz * div(-1,rms*N*mean_eps)*rms)*rrms
  8378. // a = (dz + x*sum_xdz * div(-rms,rms*N*mean_eps))*rrms
  8379. // a = (dz + x*sum_xdz * div(-1,N*mean_eps))*rrms
  8380. // a = (dz + x*div(-sum_xdz,N*mean_eps))*rrms
  8381. // a = (dz + x*div(-mean_xdz,mean_eps))*rrms
  8382. // grad[#00] = scale(dz + scale(x, div(-mean_xdz,mean_eps)),rrms)
  8383. // grad[#00] = scale(dz + scale(x, -mean_xdz/mean_eps),rrms)
  8384. // dx = scale(dz + scale(x, -mean_xdz/mean_eps),rrms)
  8385. }
  8386. // dx = scale(dz + scale(x, -mean_xdz/mean_eps),rrms)
  8387. // post-order:
  8388. // dx := x
  8389. // dx := scale(dx,-mean_xdz/mean_eps)
  8390. // dx := add(dx, dz)
  8391. // dx := scale(dx, rrms)
  8392. float * dx = (float *) ((char *) dst->data + i01*nb1 + i02*nb2 + i03*nb3);
  8393. ggml_vec_cpy_f32 (ne00, dx, x);
  8394. // ggml_vec_scale_f32(ne00, dx, -mean_xdz/mean_eps);
  8395. ggml_vec_scale_f32(ne00, dx, (float)(-sum_xdz)/sum_eps);
  8396. ggml_vec_acc_f32 (ne00, dx, dz);
  8397. ggml_vec_scale_f32(ne00, dx, rrms);
  8398. }
  8399. }
  8400. }
  8401. }
  8402. static void ggml_compute_forward_rms_norm_back(
  8403. const struct ggml_compute_params * params,
  8404. const struct ggml_tensor * src0,
  8405. const struct ggml_tensor * src1,
  8406. struct ggml_tensor * dst) {
  8407. switch (src0->type) {
  8408. case GGML_TYPE_F32:
  8409. {
  8410. ggml_compute_forward_rms_norm_back_f32(params, src0, src1, dst);
  8411. } break;
  8412. default:
  8413. {
  8414. GGML_ASSERT(false);
  8415. } break;
  8416. }
  8417. }
  8418. // ggml_compute_forward_mul_mat
  8419. #if defined(GGML_USE_ACCELERATE) || defined(GGML_USE_OPENBLAS)
  8420. // helper function to determine if it is better to use BLAS or not
  8421. // for large matrices, BLAS is faster
  8422. static bool ggml_compute_forward_mul_mat_use_blas(
  8423. const struct ggml_tensor * src0,
  8424. const struct ggml_tensor * src1,
  8425. struct ggml_tensor * dst) {
  8426. //const int64_t ne00 = src0->ne[0];
  8427. //const int64_t ne01 = src0->ne[1];
  8428. const int64_t ne10 = src1->ne[0];
  8429. const int64_t ne0 = dst->ne[0];
  8430. const int64_t ne1 = dst->ne[1];
  8431. // TODO: find the optimal values for these
  8432. if (ggml_is_contiguous(src0) &&
  8433. ggml_is_contiguous(src1) &&
  8434. (ne0 >= 32 && ne1 >= 32 && ne10 >= 32)) {
  8435. /*printf("BLAS: %d %d %d %d %d\n", ne0, ne1, ne10, ne00, ne01);*/
  8436. return true;
  8437. }
  8438. return false;
  8439. }
  8440. #endif
  8441. static void ggml_compute_forward_mul_mat(
  8442. const struct ggml_compute_params * params,
  8443. const struct ggml_tensor * src0,
  8444. const struct ggml_tensor * src1,
  8445. struct ggml_tensor * dst) {
  8446. int64_t t0 = ggml_perf_time_us();
  8447. UNUSED(t0);
  8448. GGML_TENSOR_BINARY_OP_LOCALS;
  8449. const int ith = params->ith;
  8450. const int nth = params->nth;
  8451. const enum ggml_type type = src0->type;
  8452. const bool src1_cont = ggml_is_contiguous(src1);
  8453. ggml_vec_dot_t const vec_dot = type_traits[type].vec_dot;
  8454. enum ggml_type const vec_dot_type = type_traits[type].vec_dot_type;
  8455. ggml_from_float_t const from_float_to_vec_dot = type_traits[vec_dot_type].from_float;
  8456. GGML_ASSERT(ne0 == ne01);
  8457. GGML_ASSERT(ne1 == ne11);
  8458. GGML_ASSERT(ne2 == ne12);
  8459. GGML_ASSERT(ne3 == ne13);
  8460. // we don't support permuted src0 or src1
  8461. GGML_ASSERT(nb00 == GGML_TYPE_SIZE[type]);
  8462. GGML_ASSERT(nb10 == sizeof(float));
  8463. // dst cannot be transposed or permuted
  8464. GGML_ASSERT(nb0 == sizeof(float));
  8465. GGML_ASSERT(nb0 <= nb1);
  8466. GGML_ASSERT(nb1 <= nb2);
  8467. GGML_ASSERT(nb2 <= nb3);
  8468. // nb01 >= nb00 - src0 is not transposed
  8469. // compute by src0 rows
  8470. #if defined(GGML_USE_CLBLAST)
  8471. if (ggml_cl_can_mul_mat(src0, src1, dst)) {
  8472. // TODO: handle case when src0 is broadcast-able into src1 across 2nd,3rd dimension
  8473. // ref: https://github.com/ggerganov/ggml/pull/224
  8474. GGML_ASSERT(ne02 == ne12);
  8475. GGML_ASSERT(ne03 == ne13);
  8476. if (params->ith == 0 && params->type == GGML_TASK_COMPUTE) {
  8477. ggml_cl_mul_mat(src0, src1, dst, params->wdata, params->wsize);
  8478. }
  8479. return;
  8480. }
  8481. #endif
  8482. #if defined(GGML_USE_ACCELERATE) || defined(GGML_USE_OPENBLAS)
  8483. if (ggml_compute_forward_mul_mat_use_blas(src0, src1, dst)) {
  8484. // TODO: handle case when src0 is broadcast-able into src1 across 2nd,3rd dimension
  8485. // ref: https://github.com/ggerganov/ggml/pull/224
  8486. GGML_ASSERT(ne02 == ne12);
  8487. GGML_ASSERT(ne03 == ne13);
  8488. if (params->ith != 0) {
  8489. return;
  8490. }
  8491. if (params->type == GGML_TASK_INIT) {
  8492. return;
  8493. }
  8494. if (params->type == GGML_TASK_FINALIZE) {
  8495. return;
  8496. }
  8497. for (int64_t i03 = 0; i03 < ne03; i03++) {
  8498. for (int64_t i02 = 0; i02 < ne02; i02++) {
  8499. const void * x = (char *) src0->data + i03*nb03 + i02*nb02;
  8500. const float * y = (float *) ((char *) src1->data + i02*nb12 + i03*nb13);
  8501. float * d = (float *) ((char *) dst->data + i02*nb2 + i03*nb3);
  8502. if (type != GGML_TYPE_F32) {
  8503. float * const wdata = params->wdata;
  8504. ggml_to_float_t const to_float = type_traits[type].to_float;
  8505. size_t id = 0;
  8506. for (int64_t i01 = 0; i01 < ne01; ++i01) {
  8507. to_float((char *) src0->data + i03*nb03 + i02*nb02 + i01*nb01, wdata + id, ne00);
  8508. id += ne00;
  8509. }
  8510. assert(id*sizeof(float) <= params->wsize);
  8511. x = wdata;
  8512. }
  8513. cblas_sgemm(CblasRowMajor, CblasNoTrans, CblasTrans,
  8514. ne11, ne01, ne10,
  8515. 1.0f, y, ne10,
  8516. x, ne00,
  8517. 0.0f, d, ne01);
  8518. }
  8519. }
  8520. //printf("CBLAS = %f ms, %d x %d x %d x %d\n", (ggml_perf_time_us() - t0)/1000.0, ne0, ne1, ne2, ne3);
  8521. return;
  8522. }
  8523. #endif
  8524. if (params->type == GGML_TASK_INIT) {
  8525. if (src1->type != vec_dot_type) {
  8526. char * wdata = params->wdata;
  8527. const size_t row_size = ne10*GGML_TYPE_SIZE[vec_dot_type]/GGML_BLCK_SIZE[vec_dot_type];
  8528. for (int64_t i13 = 0; i13 < ne13; ++i13) {
  8529. for (int64_t i12 = 0; i12 < ne12; ++i12) {
  8530. for (int64_t i11 = 0; i11 < ne11; ++i11) {
  8531. from_float_to_vec_dot((float *)((char *) src1->data + i13*nb13 + i12*nb12 + i11*nb11), (void *) wdata, ne10);
  8532. wdata += row_size;
  8533. }
  8534. }
  8535. }
  8536. }
  8537. return;
  8538. }
  8539. if (params->type == GGML_TASK_FINALIZE) {
  8540. return;
  8541. }
  8542. // parallelize by src0 rows
  8543. const int64_t dr = (ne01 + nth - 1)/nth;
  8544. const int64_t ir10 = dr*ith;
  8545. const int64_t ir11 = MIN(ir10 + dr, ne01);
  8546. // src1 rows
  8547. const int64_t nr1 = ne11*ne12*ne13;
  8548. const void * wdata = (src1->type == vec_dot_type) ? src1->data : params->wdata;
  8549. const size_t row_size = ne10*GGML_TYPE_SIZE[vec_dot_type]/GGML_BLCK_SIZE[vec_dot_type];
  8550. for (int64_t ir1 = 0; ir1 < nr1; ++ir1) {
  8551. const int64_t i13 = (ir1/(ne12*ne11));
  8552. const int64_t i12 = (ir1 - i13*ne12*ne11)/ne11;
  8553. const int64_t i11 = (ir1 - i13*ne12*ne11 - i12*ne11);
  8554. const int64_t ir0 = (ir1/ne11)%(ne02*ne03);
  8555. const int64_t i03 = (ir0/(ne02));
  8556. // Hack for "Falcon multi-query-attention key stutter" / alternative to ggml_repeat2.
  8557. // See https://github.com/ggerganov/llama.cpp/issues/1602#issuecomment-1606087470:
  8558. // GG: this is likely the correct way to broadcast, though need some more thought
  8559. // therefore leaving the comments to remind us for now
  8560. const int64_t i02 = (i12 / (ne12 / ne02));
  8561. // Original from PR/224 (and also essential/correct for non-broadcast matmuls in Falcon)
  8562. // const int64_t i02 = (ir0 - i03*ne02);
  8563. const int64_t i1 = i11;
  8564. const int64_t i2 = i12;
  8565. const int64_t i3 = i13;
  8566. const char * src0_row = (const char *) src0->data + ( 0 + i02*nb02 + i03*nb03 );
  8567. // desc: when src1 is not a contiguous memory block we have to calculate the offset using the strides
  8568. // if it is, then we have either copied the data to params->wdata and made it contiguous or we are using
  8569. // the original src1 data pointer, so we should index using the indices directly
  8570. // TODO: this is a bit of a hack, we should probably have a better way to handle this
  8571. const char * src1_col = (const char *) wdata +
  8572. (src1_cont || src1->type != vec_dot_type
  8573. ? (i11 + i12*ne11 + i13*ne12*ne11)*row_size
  8574. : (i11*nb11 + i12*nb12 + i13*nb13));
  8575. float * dst_col = (float *) ((char *) dst->data + (i1*nb1 + i2*nb2 + i3*nb3));
  8576. for (int64_t ir = ir10; ir < ir11; ++ir) {
  8577. vec_dot(ne00, &dst_col[ir], src0_row + ir*nb01, src1_col);
  8578. }
  8579. }
  8580. //int64_t t1 = ggml_time_us();
  8581. //static int64_t acc = 0;
  8582. //acc += t1 - t0;
  8583. //if (t1 - t0 > 10) {
  8584. // printf("\n");
  8585. // printf("ne00 = %5d, ne01 = %5d, ne02 = %5d, ne03 = %5d\n", ne00, ne01, ne02, ne03);
  8586. // printf("nb00 = %5d, nb01 = %5d, nb02 = %5d, nb03 = %5d\n", nb00, nb01, nb02, nb03);
  8587. // printf("ne10 = %5d, ne11 = %5d, ne12 = %5d, ne13 = %5d\n", ne10, ne11, ne12, ne13);
  8588. // printf("XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX task %d/%d: %d us, acc = %d\n", ith, nth, (int) (t1 - t0), (int) acc);
  8589. //}
  8590. }
  8591. // ggml_compute_forward_out_prod
  8592. static void ggml_compute_forward_out_prod_f32(
  8593. const struct ggml_compute_params * params,
  8594. const struct ggml_tensor * src0,
  8595. const struct ggml_tensor * src1,
  8596. struct ggml_tensor * dst) {
  8597. int64_t t0 = ggml_perf_time_us();
  8598. UNUSED(t0);
  8599. GGML_TENSOR_BINARY_OP_LOCALS;
  8600. const int ith = params->ith;
  8601. const int nth = params->nth;
  8602. GGML_ASSERT(ne02 == ne12);
  8603. GGML_ASSERT(ne03 == ne13);
  8604. GGML_ASSERT(ne2 == ne12);
  8605. GGML_ASSERT(ne3 == ne13);
  8606. // we don't support permuted src0 or src1
  8607. GGML_ASSERT(nb00 == sizeof(float));
  8608. // dst cannot be transposed or permuted
  8609. GGML_ASSERT(nb0 == sizeof(float));
  8610. // GGML_ASSERT(nb0 <= nb1);
  8611. // GGML_ASSERT(nb1 <= nb2);
  8612. // GGML_ASSERT(nb2 <= nb3);
  8613. GGML_ASSERT(ne0 == ne00);
  8614. GGML_ASSERT(ne1 == ne10);
  8615. GGML_ASSERT(ne2 == ne02);
  8616. GGML_ASSERT(ne3 == ne03);
  8617. // nb01 >= nb00 - src0 is not transposed
  8618. // compute by src0 rows
  8619. // TODO: #if defined(GGML_USE_CUBLAS) ggml_cuda_out_prod
  8620. // TODO: #if defined(GGML_USE_ACCELERATE) || defined(GGML_USE_OPENBLAS) || defined(GGML_USE_CLBLAST)
  8621. if (params->type == GGML_TASK_INIT) {
  8622. ggml_vec_set_f32(ne0*ne1*ne2*ne3, dst->data, 0);
  8623. return;
  8624. }
  8625. if (params->type == GGML_TASK_FINALIZE) {
  8626. return;
  8627. }
  8628. // parallelize by last three dimensions
  8629. // total rows in dst
  8630. const int64_t nr = ne1*ne2*ne3;
  8631. // rows per thread
  8632. const int64_t dr = (nr + nth - 1)/nth;
  8633. // row range for this thread
  8634. const int64_t ir0 = dr*ith;
  8635. const int64_t ir1 = MIN(ir0 + dr, nr);
  8636. // dst[:,:,:,:] = 0
  8637. // for i2,i3:
  8638. // for i1:
  8639. // for i01:
  8640. // for i0:
  8641. // dst[i0,i1,i2,i3] += src0[i0,i01,i2,i3] * src1[i1,i01,i2,i3]
  8642. for (int64_t ir = ir0; ir < ir1; ++ir) {
  8643. // dst indices
  8644. const int64_t i3 = ir/(ne2*ne1);
  8645. const int64_t i2 = (ir - i3*ne2*ne1)/ne1;
  8646. const int64_t i1 = (ir - i3*ne2*ne1 - i2*ne1);
  8647. const int64_t i02 = i2;
  8648. const int64_t i03 = i3;
  8649. //const int64_t i10 = i1;
  8650. const int64_t i12 = i2;
  8651. const int64_t i13 = i3;
  8652. for (int64_t i01 = 0; i01 < ne01; ++i01) {
  8653. const int64_t i11 = i01;
  8654. float * s0 = (float *) ((char *) src0->data + ( i01*nb01 + i02*nb02 + i03*nb03));
  8655. float * s1 = (float *) ((char *) src1->data + (i1*nb10 + i11*nb11 + i12*nb12 + i13*nb13));
  8656. float * d = (float *) ((char *) dst->data + ( i1*nb1 + i2*nb2 + i3*nb3));
  8657. ggml_vec_mad_f32(ne0, d, s0, *s1);
  8658. // for (int64_t i0 = 0; i0 < ne0; ++i0) {
  8659. // d[i0] += s0[i0] * s1[i1];
  8660. // }
  8661. }
  8662. }
  8663. //int64_t t1 = ggml_perf_time_us();
  8664. //static int64_t acc = 0;
  8665. //acc += t1 - t0;
  8666. //if (t1 - t0 > 10) {
  8667. // printf("\n");
  8668. // printf("ne00 = %5d, ne01 = %5d, ne02 = %5d, ne03 = %5d\n", ne00, ne01, ne02, ne03);
  8669. // printf("nb00 = %5d, nb01 = %5d, nb02 = %5d, nb03 = %5d\n", nb00, nb01, nb02, nb03);
  8670. // printf("ne10 = %5d, ne11 = %5d, ne12 = %5d, ne13 = %5d\n", ne10, ne11, ne12, ne13);
  8671. // printf("nb10 = %5d, nb11 = %5d, nb12 = %5d, nb13 = %5d\n", nb10, nb11, nb12, nb13);
  8672. // printf("XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX task %d/%d: %d us, acc = %d\n", ith, nth, (int) (t1 - t0), (int) acc);
  8673. //}
  8674. }
  8675. static void ggml_compute_forward_out_prod(
  8676. const struct ggml_compute_params * params,
  8677. const struct ggml_tensor * src0,
  8678. const struct ggml_tensor * src1,
  8679. struct ggml_tensor * dst) {
  8680. switch (src0->type) {
  8681. case GGML_TYPE_Q4_0:
  8682. case GGML_TYPE_Q4_1:
  8683. case GGML_TYPE_Q5_0:
  8684. case GGML_TYPE_Q5_1:
  8685. case GGML_TYPE_Q8_0:
  8686. case GGML_TYPE_Q8_1:
  8687. {
  8688. GGML_ASSERT(false); // todo
  8689. // ggml_compute_forward_out_prod_q_f32(params, src0, src1, dst);
  8690. } break;
  8691. case GGML_TYPE_F16:
  8692. {
  8693. GGML_ASSERT(false); // todo
  8694. // ggml_compute_forward_out_prod_f16_f32(params, src0, src1, dst);
  8695. } break;
  8696. case GGML_TYPE_F32:
  8697. {
  8698. ggml_compute_forward_out_prod_f32(params, src0, src1, dst);
  8699. } break;
  8700. default:
  8701. {
  8702. GGML_ASSERT(false);
  8703. } break;
  8704. }
  8705. }
  8706. // ggml_compute_forward_scale
  8707. static void ggml_compute_forward_scale_f32(
  8708. const struct ggml_compute_params * params,
  8709. const struct ggml_tensor * src0,
  8710. const struct ggml_tensor * src1,
  8711. struct ggml_tensor * dst) {
  8712. GGML_ASSERT(ggml_is_contiguous(src0));
  8713. GGML_ASSERT(ggml_is_contiguous(dst));
  8714. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  8715. GGML_ASSERT(ggml_is_scalar(src1));
  8716. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  8717. return;
  8718. }
  8719. // scale factor
  8720. const float v = *(float *) src1->data;
  8721. const int ith = params->ith;
  8722. const int nth = params->nth;
  8723. const int nc = src0->ne[0];
  8724. const int nr = ggml_nrows(src0);
  8725. // rows per thread
  8726. const int dr = (nr + nth - 1)/nth;
  8727. // row range for this thread
  8728. const int ir0 = dr*ith;
  8729. const int ir1 = MIN(ir0 + dr, nr);
  8730. const size_t nb01 = src0->nb[1];
  8731. const size_t nb1 = dst->nb[1];
  8732. for (int i1 = ir0; i1 < ir1; i1++) {
  8733. if (dst->data != src0->data) {
  8734. // src0 is same shape as dst => same indices
  8735. memcpy((char *)dst->data + i1*nb1, (char *)src0->data + i1*nb01, nc * sizeof(float));
  8736. }
  8737. ggml_vec_scale_f32(nc, (float *) ((char *) dst->data + i1*nb1), v);
  8738. }
  8739. }
  8740. static void ggml_compute_forward_scale(
  8741. const struct ggml_compute_params * params,
  8742. const struct ggml_tensor * src0,
  8743. const struct ggml_tensor * src1,
  8744. struct ggml_tensor * dst) {
  8745. switch (src0->type) {
  8746. case GGML_TYPE_F32:
  8747. {
  8748. ggml_compute_forward_scale_f32(params, src0, src1, dst);
  8749. } break;
  8750. default:
  8751. {
  8752. GGML_ASSERT(false);
  8753. } break;
  8754. }
  8755. }
  8756. // ggml_compute_forward_set
  8757. static void ggml_compute_forward_set_f32(
  8758. const struct ggml_compute_params * params,
  8759. const struct ggml_tensor * src0,
  8760. const struct ggml_tensor * src1,
  8761. struct ggml_tensor * dst) {
  8762. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  8763. GGML_ASSERT(ggml_is_contiguous(dst) && ggml_is_contiguous(src0));
  8764. // view src0 and dst with these strides and data offset inbytes during set
  8765. // nb0 is implicitely element_size because src0 and dst are contiguous
  8766. size_t nb1 = ((int32_t *) dst->op_params)[0];
  8767. size_t nb2 = ((int32_t *) dst->op_params)[1];
  8768. size_t nb3 = ((int32_t *) dst->op_params)[2];
  8769. size_t offset = ((int32_t *) dst->op_params)[3];
  8770. bool inplace = (bool) ((int32_t *) dst->op_params)[4];
  8771. if (!inplace && (params->type == GGML_TASK_INIT)) {
  8772. // memcpy needs to be synchronized across threads to avoid race conditions.
  8773. // => do it in INIT phase
  8774. memcpy(
  8775. ((char *) dst->data),
  8776. ((char *) src0->data),
  8777. ggml_nbytes(dst));
  8778. }
  8779. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  8780. return;
  8781. }
  8782. const int ith = params->ith;
  8783. const int nth = params->nth;
  8784. const int nr = ggml_nrows(src1);
  8785. const int nc = src1->ne[0];
  8786. GGML_TENSOR_LOCALS(int64_t, ne1, src1, ne);
  8787. GGML_TENSOR_LOCALS(size_t, nb1, src1, nb);
  8788. // src0 and dst as viewed during set
  8789. const size_t nb0 = ggml_element_size(src0);
  8790. const int im0 = (ne10 == 0 ? 0 : ne10-1);
  8791. const int im1 = (ne11 == 0 ? 0 : ne11-1);
  8792. const int im2 = (ne12 == 0 ? 0 : ne12-1);
  8793. const int im3 = (ne13 == 0 ? 0 : ne13-1);
  8794. GGML_ASSERT(offset + im0*nb0 + im1*nb1 + im2*nb2 + im3*nb3 <= ggml_nbytes(dst));
  8795. GGML_ASSERT(nb10 == sizeof(float));
  8796. // rows per thread
  8797. const int dr = (nr + nth - 1)/nth;
  8798. // row range for this thread
  8799. const int ir0 = dr*ith;
  8800. const int ir1 = MIN(ir0 + dr, nr);
  8801. for (int ir = ir0; ir < ir1; ++ir) {
  8802. // src0 and dst are viewed with shape of src1 and offset
  8803. // => same indices
  8804. const int i3 = ir/(ne12*ne11);
  8805. const int i2 = (ir - i3*ne12*ne11)/ne11;
  8806. const int i1 = (ir - i3*ne12*ne11 - i2*ne11);
  8807. ggml_vec_cpy_f32(nc,
  8808. (float *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + offset),
  8809. (float *) ((char *) src1->data + i3*nb13 + i2*nb12 + i1*nb11));
  8810. }
  8811. }
  8812. static void ggml_compute_forward_set(
  8813. const struct ggml_compute_params * params,
  8814. const struct ggml_tensor * src0,
  8815. const struct ggml_tensor * src1,
  8816. struct ggml_tensor * dst) {
  8817. switch (src0->type) {
  8818. case GGML_TYPE_F32:
  8819. {
  8820. ggml_compute_forward_set_f32(params, src0, src1, dst);
  8821. } break;
  8822. case GGML_TYPE_F16:
  8823. case GGML_TYPE_Q4_0:
  8824. case GGML_TYPE_Q4_1:
  8825. case GGML_TYPE_Q5_0:
  8826. case GGML_TYPE_Q5_1:
  8827. case GGML_TYPE_Q8_0:
  8828. case GGML_TYPE_Q8_1:
  8829. case GGML_TYPE_Q2_K:
  8830. case GGML_TYPE_Q3_K:
  8831. case GGML_TYPE_Q4_K:
  8832. case GGML_TYPE_Q5_K:
  8833. case GGML_TYPE_Q6_K:
  8834. default:
  8835. {
  8836. GGML_ASSERT(false);
  8837. } break;
  8838. }
  8839. }
  8840. // ggml_compute_forward_cpy
  8841. static void ggml_compute_forward_cpy(
  8842. const struct ggml_compute_params * params,
  8843. const struct ggml_tensor * src0,
  8844. struct ggml_tensor * dst) {
  8845. ggml_compute_forward_dup(params, src0, dst);
  8846. }
  8847. // ggml_compute_forward_cont
  8848. static void ggml_compute_forward_cont(
  8849. const struct ggml_compute_params * params,
  8850. const struct ggml_tensor * src0,
  8851. struct ggml_tensor * dst) {
  8852. ggml_compute_forward_dup(params, src0, dst);
  8853. }
  8854. // ggml_compute_forward_reshape
  8855. static void ggml_compute_forward_reshape(
  8856. const struct ggml_compute_params * params,
  8857. const struct ggml_tensor * src0,
  8858. struct ggml_tensor * dst) {
  8859. // NOP
  8860. UNUSED(params);
  8861. UNUSED(src0);
  8862. UNUSED(dst);
  8863. }
  8864. // ggml_compute_forward_view
  8865. static void ggml_compute_forward_view(
  8866. const struct ggml_compute_params * params,
  8867. const struct ggml_tensor * src0) {
  8868. // NOP
  8869. UNUSED(params);
  8870. UNUSED(src0);
  8871. }
  8872. // ggml_compute_forward_permute
  8873. static void ggml_compute_forward_permute(
  8874. const struct ggml_compute_params * params,
  8875. const struct ggml_tensor * src0) {
  8876. // NOP
  8877. UNUSED(params);
  8878. UNUSED(src0);
  8879. }
  8880. // ggml_compute_forward_transpose
  8881. static void ggml_compute_forward_transpose(
  8882. const struct ggml_compute_params * params,
  8883. const struct ggml_tensor * src0) {
  8884. // NOP
  8885. UNUSED(params);
  8886. UNUSED(src0);
  8887. }
  8888. // ggml_compute_forward_get_rows
  8889. static void ggml_compute_forward_get_rows_q(
  8890. const struct ggml_compute_params * params,
  8891. const struct ggml_tensor * src0,
  8892. const struct ggml_tensor * src1,
  8893. struct ggml_tensor * dst) {
  8894. assert(params->ith == 0);
  8895. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  8896. return;
  8897. }
  8898. const int nc = src0->ne[0];
  8899. const int nr = ggml_nelements(src1);
  8900. const enum ggml_type type = src0->type;
  8901. ggml_to_float_t const dequantize_row_q = type_traits[type].to_float;
  8902. assert( dst->ne[0] == nc);
  8903. assert( dst->ne[1] == nr);
  8904. assert(src0->nb[0] == GGML_TYPE_SIZE[type]);
  8905. for (int i = 0; i < nr; ++i) {
  8906. const int r = ((int32_t *) src1->data)[i];
  8907. dequantize_row_q(
  8908. (const void *) ((char *) src0->data + r*src0->nb[1]),
  8909. (float *) ((char *) dst->data + i*dst->nb[1]), nc);
  8910. }
  8911. }
  8912. static void ggml_compute_forward_get_rows_f16(
  8913. const struct ggml_compute_params * params,
  8914. const struct ggml_tensor * src0,
  8915. const struct ggml_tensor * src1,
  8916. struct ggml_tensor * dst) {
  8917. assert(params->ith == 0);
  8918. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  8919. return;
  8920. }
  8921. const int nc = src0->ne[0];
  8922. const int nr = ggml_nelements(src1);
  8923. assert( dst->ne[0] == nc);
  8924. assert( dst->ne[1] == nr);
  8925. assert(src0->nb[0] == sizeof(ggml_fp16_t));
  8926. for (int i = 0; i < nr; ++i) {
  8927. const int r = ((int32_t *) src1->data)[i];
  8928. for (int j = 0; j < nc; ++j) {
  8929. ggml_fp16_t v = ((ggml_fp16_t *) ((char *) src0->data + r*src0->nb[1]))[j];
  8930. ((float *) ((char *) dst->data + i*dst->nb[1]))[j] = GGML_FP16_TO_FP32(v);
  8931. }
  8932. }
  8933. }
  8934. static void ggml_compute_forward_get_rows_f32(
  8935. const struct ggml_compute_params * params,
  8936. const struct ggml_tensor * src0,
  8937. const struct ggml_tensor * src1,
  8938. struct ggml_tensor * dst) {
  8939. assert(params->ith == 0);
  8940. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  8941. return;
  8942. }
  8943. const int nc = src0->ne[0];
  8944. const int nr = ggml_nelements(src1);
  8945. assert( dst->ne[0] == nc);
  8946. assert( dst->ne[1] == nr);
  8947. assert(src0->nb[0] == sizeof(float));
  8948. for (int i = 0; i < nr; ++i) {
  8949. const int r = ((int32_t *) src1->data)[i];
  8950. ggml_vec_cpy_f32(nc,
  8951. (float *) ((char *) dst->data + i*dst->nb[1]),
  8952. (float *) ((char *) src0->data + r*src0->nb[1]));
  8953. }
  8954. }
  8955. static void ggml_compute_forward_get_rows(
  8956. const struct ggml_compute_params * params,
  8957. const struct ggml_tensor * src0,
  8958. const struct ggml_tensor * src1,
  8959. struct ggml_tensor * dst) {
  8960. switch (src0->type) {
  8961. case GGML_TYPE_Q4_0:
  8962. case GGML_TYPE_Q4_1:
  8963. case GGML_TYPE_Q5_0:
  8964. case GGML_TYPE_Q5_1:
  8965. case GGML_TYPE_Q8_0:
  8966. case GGML_TYPE_Q8_1:
  8967. case GGML_TYPE_Q2_K:
  8968. case GGML_TYPE_Q3_K:
  8969. case GGML_TYPE_Q4_K:
  8970. case GGML_TYPE_Q5_K:
  8971. case GGML_TYPE_Q6_K:
  8972. {
  8973. ggml_compute_forward_get_rows_q(params, src0, src1, dst);
  8974. } break;
  8975. case GGML_TYPE_F16:
  8976. {
  8977. ggml_compute_forward_get_rows_f16(params, src0, src1, dst);
  8978. } break;
  8979. case GGML_TYPE_F32:
  8980. {
  8981. ggml_compute_forward_get_rows_f32(params, src0, src1, dst);
  8982. } break;
  8983. default:
  8984. {
  8985. GGML_ASSERT(false);
  8986. } break;
  8987. }
  8988. //static bool first = true;
  8989. //printf("ne0 = %d, ne1 = %d, ne2 = %d\n", dst->ne[0], dst->ne[1], dst->ne[2]);
  8990. //if (first) {
  8991. // first = false;
  8992. //} else {
  8993. // for (int k = 0; k < dst->ne[1]; ++k) {
  8994. // for (int j = 0; j < dst->ne[0]/16; ++j) {
  8995. // for (int i = 0; i < 16; ++i) {
  8996. // printf("%8.4f ", ((float *) dst->data)[k*dst->ne[0] + j*16 + i]);
  8997. // }
  8998. // printf("\n");
  8999. // }
  9000. // printf("\n");
  9001. // }
  9002. // printf("\n");
  9003. // exit(0);
  9004. //}
  9005. }
  9006. // ggml_compute_forward_get_rows_back
  9007. static void ggml_compute_forward_get_rows_back_f32_f16(
  9008. const struct ggml_compute_params * params,
  9009. const struct ggml_tensor * src0,
  9010. const struct ggml_tensor * src1,
  9011. const struct ggml_tensor * opt0,
  9012. struct ggml_tensor * dst) {
  9013. GGML_ASSERT(params->ith == 0);
  9014. GGML_ASSERT(ggml_are_same_shape(opt0, dst));
  9015. GGML_ASSERT(ggml_is_contiguous(opt0));
  9016. GGML_ASSERT(ggml_is_contiguous(dst));
  9017. ggml_compute_forward_dup_same_cont(params, opt0, dst);
  9018. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  9019. return;
  9020. }
  9021. const int nc = src0->ne[0];
  9022. const int nr = ggml_nelements(src1);
  9023. GGML_ASSERT( dst->ne[0] == nc);
  9024. GGML_ASSERT(src0->nb[0] == sizeof(ggml_fp16_t));
  9025. for (int i = 0; i < nr; ++i) {
  9026. const int r = ((int32_t *) src1->data)[i];
  9027. for (int j = 0; j < nc; ++j) {
  9028. ggml_fp16_t v = ((ggml_fp16_t *) ((char *) src0->data + i*src0->nb[1]))[j];
  9029. ((float *) ((char *) dst->data + r*dst->nb[1]))[j] += GGML_FP16_TO_FP32(v);
  9030. }
  9031. }
  9032. }
  9033. static void ggml_compute_forward_get_rows_back_f32(
  9034. const struct ggml_compute_params * params,
  9035. const struct ggml_tensor * src0,
  9036. const struct ggml_tensor * src1,
  9037. const struct ggml_tensor * opt0,
  9038. struct ggml_tensor * dst) {
  9039. GGML_ASSERT(params->ith == 0);
  9040. GGML_ASSERT(ggml_are_same_shape(opt0, dst));
  9041. GGML_ASSERT(ggml_is_contiguous(opt0));
  9042. GGML_ASSERT(ggml_is_contiguous(dst));
  9043. // ggml_compute_forward_dup_same_cont(params, opt0, dst);
  9044. if (params->type == GGML_TASK_INIT) {
  9045. memset(dst->data, 0, ggml_nbytes(dst));
  9046. }
  9047. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  9048. return;
  9049. }
  9050. const int nc = src0->ne[0];
  9051. const int nr = ggml_nelements(src1);
  9052. GGML_ASSERT( dst->ne[0] == nc);
  9053. GGML_ASSERT(src0->nb[0] == sizeof(float));
  9054. for (int i = 0; i < nr; ++i) {
  9055. const int r = ((int32_t *) src1->data)[i];
  9056. ggml_vec_add_f32(nc,
  9057. (float *) ((char *) dst->data + r*dst->nb[1]),
  9058. (float *) ((char *) dst->data + r*dst->nb[1]),
  9059. (float *) ((char *) src0->data + i*src0->nb[1]));
  9060. }
  9061. }
  9062. static void ggml_compute_forward_get_rows_back(
  9063. const struct ggml_compute_params * params,
  9064. const struct ggml_tensor * src0,
  9065. const struct ggml_tensor * src1,
  9066. const struct ggml_tensor * opt0,
  9067. struct ggml_tensor * dst) {
  9068. switch (src0->type) {
  9069. case GGML_TYPE_F16:
  9070. {
  9071. ggml_compute_forward_get_rows_back_f32_f16(params, src0, src1, opt0, dst);
  9072. } break;
  9073. case GGML_TYPE_F32:
  9074. {
  9075. ggml_compute_forward_get_rows_back_f32(params, src0, src1, opt0, dst);
  9076. } break;
  9077. default:
  9078. {
  9079. GGML_ASSERT(false);
  9080. } break;
  9081. }
  9082. //static bool first = true;
  9083. //printf("ne0 = %d, ne1 = %d, ne2 = %d\n", dst->ne[0], dst->ne[1], dst->ne[2]);
  9084. //if (first) {
  9085. // first = false;
  9086. //} else {
  9087. // for (int k = 0; k < dst->ne[1]; ++k) {
  9088. // for (int j = 0; j < dst->ne[0]/16; ++j) {
  9089. // for (int i = 0; i < 16; ++i) {
  9090. // printf("%8.4f ", ((float *) dst->data)[k*dst->ne[0] + j*16 + i]);
  9091. // }
  9092. // printf("\n");
  9093. // }
  9094. // printf("\n");
  9095. // }
  9096. // printf("\n");
  9097. // exit(0);
  9098. //}
  9099. }
  9100. // ggml_compute_forward_diag
  9101. static void ggml_compute_forward_diag_f32(
  9102. const struct ggml_compute_params * params,
  9103. const struct ggml_tensor * src0,
  9104. struct ggml_tensor * dst) {
  9105. GGML_ASSERT(params->ith == 0);
  9106. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  9107. return;
  9108. }
  9109. // TODO: handle transposed/permuted matrices
  9110. GGML_TENSOR_UNARY_OP_LOCALS;
  9111. GGML_ASSERT(ne00 == ne0);
  9112. GGML_ASSERT(ne00 == ne1);
  9113. GGML_ASSERT(ne01 == 1);
  9114. GGML_ASSERT(ne02 == ne2);
  9115. GGML_ASSERT(ne03 == ne3);
  9116. GGML_ASSERT(nb00 == sizeof(float));
  9117. GGML_ASSERT(nb0 == sizeof(float));
  9118. for (int i3 = 0; i3 < ne3; i3++) {
  9119. for (int i2 = 0; i2 < ne2; i2++) {
  9120. for (int i1 = 0; i1 < ne1; i1++) {
  9121. float * d = (float *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1);
  9122. float * s = (float *)((char *) src0->data + i3*nb03 + i2*nb02);
  9123. for (int i0 = 0; i0 < i1; i0++) {
  9124. d[i0] = 0;
  9125. }
  9126. d[i1] = s[i1];
  9127. for (int i0 = i1+1; i0 < ne0; i0++) {
  9128. d[i0] = 0;
  9129. }
  9130. }
  9131. }
  9132. }
  9133. }
  9134. static void ggml_compute_forward_diag(
  9135. const struct ggml_compute_params * params,
  9136. const struct ggml_tensor * src0,
  9137. struct ggml_tensor * dst) {
  9138. switch (src0->type) {
  9139. case GGML_TYPE_F32:
  9140. {
  9141. ggml_compute_forward_diag_f32(params, src0, dst);
  9142. } break;
  9143. default:
  9144. {
  9145. GGML_ASSERT(false);
  9146. } break;
  9147. }
  9148. }
  9149. // ggml_compute_forward_diag_mask_inf
  9150. static void ggml_compute_forward_diag_mask_f32(
  9151. const struct ggml_compute_params * params,
  9152. const struct ggml_tensor * src0,
  9153. struct ggml_tensor * dst,
  9154. const float value) {
  9155. const int ith = params->ith;
  9156. const int nth = params->nth;
  9157. const int n_past = ((int32_t *) dst->op_params)[0];
  9158. const bool inplace = (bool)((int32_t *) dst->op_params)[1];
  9159. GGML_ASSERT(n_past >= 0);
  9160. if (!inplace && (params->type == GGML_TASK_INIT)) {
  9161. // memcpy needs to be synchronized across threads to avoid race conditions.
  9162. // => do it in INIT phase
  9163. GGML_ASSERT(ggml_nelements(dst) == ggml_nelements(src0));
  9164. GGML_ASSERT(ggml_is_contiguous(dst) && ggml_is_contiguous(src0));
  9165. memcpy(
  9166. ((char *) dst->data),
  9167. ((char *) src0->data),
  9168. ggml_nbytes(dst));
  9169. }
  9170. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  9171. return;
  9172. }
  9173. // TODO: handle transposed/permuted matrices
  9174. const int n = ggml_nrows(src0);
  9175. const int nc = src0->ne[0];
  9176. const int nr = src0->ne[1];
  9177. const int nz = n/nr;
  9178. GGML_ASSERT( dst->nb[0] == sizeof(float));
  9179. GGML_ASSERT(src0->nb[0] == sizeof(float));
  9180. for (int k = 0; k < nz; k++) {
  9181. for (int j = ith; j < nr; j += nth) {
  9182. for (int i = n_past; i < nc; i++) {
  9183. if (i > n_past + j) {
  9184. *(float *)((char *) dst->data + k*dst->nb[2] + j*dst->nb[1] + i*dst->nb[0]) = value;
  9185. }
  9186. }
  9187. }
  9188. }
  9189. }
  9190. static void ggml_compute_forward_diag_mask_inf(
  9191. const struct ggml_compute_params * params,
  9192. const struct ggml_tensor * src0,
  9193. struct ggml_tensor * dst) {
  9194. switch (src0->type) {
  9195. case GGML_TYPE_F32:
  9196. {
  9197. ggml_compute_forward_diag_mask_f32(params, src0, dst, -INFINITY);
  9198. } break;
  9199. default:
  9200. {
  9201. GGML_ASSERT(false);
  9202. } break;
  9203. }
  9204. }
  9205. static void ggml_compute_forward_diag_mask_zero(
  9206. const struct ggml_compute_params * params,
  9207. const struct ggml_tensor * src0,
  9208. struct ggml_tensor * dst) {
  9209. switch (src0->type) {
  9210. case GGML_TYPE_F32:
  9211. {
  9212. ggml_compute_forward_diag_mask_f32(params, src0, dst, 0);
  9213. } break;
  9214. default:
  9215. {
  9216. GGML_ASSERT(false);
  9217. } break;
  9218. }
  9219. }
  9220. // ggml_compute_forward_soft_max
  9221. static void ggml_compute_forward_soft_max_f32(
  9222. const struct ggml_compute_params * params,
  9223. const struct ggml_tensor * src0,
  9224. struct ggml_tensor * dst) {
  9225. GGML_ASSERT(ggml_is_contiguous(src0));
  9226. GGML_ASSERT(ggml_is_contiguous(dst));
  9227. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  9228. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  9229. return;
  9230. }
  9231. // TODO: handle transposed/permuted matrices
  9232. const int ith = params->ith;
  9233. const int nth = params->nth;
  9234. const int nc = src0->ne[0];
  9235. const int nr = ggml_nrows(src0);
  9236. // rows per thread
  9237. const int dr = (nr + nth - 1)/nth;
  9238. // row range for this thread
  9239. const int ir0 = dr*ith;
  9240. const int ir1 = MIN(ir0 + dr, nr);
  9241. for (int i1 = ir0; i1 < ir1; i1++) {
  9242. float *sp = (float *)((char *) src0->data + i1*src0->nb[1]);
  9243. float *dp = (float *)((char *) dst->data + i1*dst->nb[1]);
  9244. #ifndef NDEBUG
  9245. for (int i = 0; i < nc; ++i) {
  9246. //printf("p[%d] = %f\n", i, p[i]);
  9247. assert(!isnan(sp[i]));
  9248. }
  9249. #endif
  9250. float max = -INFINITY;
  9251. ggml_vec_max_f32(nc, &max, sp);
  9252. ggml_float sum = 0.0;
  9253. uint16_t scvt;
  9254. for (int i = 0; i < nc; i++) {
  9255. if (sp[i] == -INFINITY) {
  9256. dp[i] = 0.0f;
  9257. } else {
  9258. // const float val = (sp[i] == -INFINITY) ? 0.0 : exp(sp[i] - max);
  9259. ggml_fp16_t s = GGML_FP32_TO_FP16(sp[i] - max);
  9260. memcpy(&scvt, &s, sizeof(scvt));
  9261. const float val = GGML_FP16_TO_FP32(table_exp_f16[scvt]);
  9262. sum += (ggml_float)val;
  9263. dp[i] = val;
  9264. }
  9265. }
  9266. assert(sum > 0.0);
  9267. sum = 1.0/sum;
  9268. ggml_vec_scale_f32(nc, dp, sum);
  9269. #ifndef NDEBUG
  9270. for (int i = 0; i < nc; ++i) {
  9271. assert(!isnan(dp[i]));
  9272. assert(!isinf(dp[i]));
  9273. }
  9274. #endif
  9275. }
  9276. }
  9277. static void ggml_compute_forward_soft_max(
  9278. const struct ggml_compute_params * params,
  9279. const struct ggml_tensor * src0,
  9280. struct ggml_tensor * dst) {
  9281. switch (src0->type) {
  9282. case GGML_TYPE_F32:
  9283. {
  9284. ggml_compute_forward_soft_max_f32(params, src0, dst);
  9285. } break;
  9286. default:
  9287. {
  9288. GGML_ASSERT(false);
  9289. } break;
  9290. }
  9291. }
  9292. // ggml_compute_forward_soft_max_back
  9293. static void ggml_compute_forward_soft_max_back_f32(
  9294. const struct ggml_compute_params * params,
  9295. const struct ggml_tensor * src0,
  9296. const struct ggml_tensor * src1,
  9297. struct ggml_tensor * dst) {
  9298. GGML_ASSERT(ggml_is_contiguous(src0));
  9299. GGML_ASSERT(ggml_is_contiguous(src1));
  9300. GGML_ASSERT(ggml_is_contiguous(dst));
  9301. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  9302. GGML_ASSERT(ggml_are_same_shape(src1, dst));
  9303. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  9304. return;
  9305. }
  9306. // TODO: handle transposed/permuted matrices
  9307. const int ith = params->ith;
  9308. const int nth = params->nth;
  9309. const int nc = src0->ne[0];
  9310. const int nr = ggml_nrows(src0);
  9311. // rows per thread
  9312. const int dr = (nr + nth - 1)/nth;
  9313. // row range for this thread
  9314. const int ir0 = dr*ith;
  9315. const int ir1 = MIN(ir0 + dr, nr);
  9316. for (int i1 = ir0; i1 < ir1; i1++) {
  9317. float *dy = (float *)((char *) src0->data + i1*src0->nb[1]);
  9318. float *y = (float *)((char *) src1->data + i1*src1->nb[1]);
  9319. float *dx = (float *)((char *) dst->data + i1*dst->nb[1]);
  9320. #ifndef NDEBUG
  9321. for (int i = 0; i < nc; ++i) {
  9322. //printf("p[%d] = %f\n", i, p[i]);
  9323. assert(!isnan(dy[i]));
  9324. assert(!isnan(y[i]));
  9325. }
  9326. #endif
  9327. // Jii = yi - yi*yi
  9328. // Jij = -yi*yj
  9329. // J = diag(y)-y.T*y
  9330. // dx = J * dy
  9331. // dxk = sum_i(Jki * dyi)
  9332. // dxk = sum_i(-yk*yi * dyi) - (-yk*yk)*dyk + (yk - yk*yk)*dyk
  9333. // dxk = sum_i(-yk*yi * dyi) + yk*dyk
  9334. // dxk = -yk * sum_i(yi * dyi) + yk*dyk
  9335. // dxk = -yk * dot(y, dy) + yk*dyk
  9336. // dxk = yk * (- dot(y, dy) + dyk)
  9337. // dxk = yk * (dyk - dot(y, dy))
  9338. //
  9339. // post-order:
  9340. // dot_y_dy := dot(y, dy)
  9341. // dx := dy
  9342. // dx := dx - dot_y_dy
  9343. // dx := dx * y
  9344. // linear runtime, no additional memory
  9345. float dot_y_dy = 0;
  9346. ggml_vec_dot_f32 (nc, &dot_y_dy, y, dy);
  9347. ggml_vec_cpy_f32 (nc, dx, dy);
  9348. ggml_vec_acc1_f32(nc, dx, -dot_y_dy);
  9349. ggml_vec_mul_f32 (nc, dx, dx, y);
  9350. #ifndef NDEBUG
  9351. for (int i = 0; i < nc; ++i) {
  9352. assert(!isnan(dx[i]));
  9353. assert(!isinf(dx[i]));
  9354. }
  9355. #endif
  9356. }
  9357. }
  9358. static void ggml_compute_forward_soft_max_back(
  9359. const struct ggml_compute_params * params,
  9360. const struct ggml_tensor * src0,
  9361. const struct ggml_tensor * src1,
  9362. struct ggml_tensor * dst) {
  9363. switch (src0->type) {
  9364. case GGML_TYPE_F32:
  9365. {
  9366. ggml_compute_forward_soft_max_back_f32(params, src0, src1, dst);
  9367. } break;
  9368. default:
  9369. {
  9370. GGML_ASSERT(false);
  9371. } break;
  9372. }
  9373. }
  9374. // ggml_compute_forward_alibi
  9375. static void ggml_compute_forward_alibi_f32(
  9376. const struct ggml_compute_params * params,
  9377. const struct ggml_tensor * src0,
  9378. struct ggml_tensor * dst) {
  9379. assert(params->ith == 0);
  9380. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  9381. return;
  9382. }
  9383. const int n_past = ((int32_t *) dst->op_params)[0];
  9384. const int n_head = ((int32_t *) dst->op_params)[1];
  9385. float max_bias;
  9386. memcpy(&max_bias, (int32_t *) dst->op_params + 2, sizeof(float));
  9387. assert(n_past >= 0);
  9388. const int ne0 = src0->ne[0]; // all_seq_len = n_past + ne1
  9389. const int ne1 = src0->ne[1]; // seq_len_without_past
  9390. const int ne2 = src0->ne[2]; // n_head -> this is k
  9391. //const int ne3 = src0->ne[3]; // 1 -> bsz
  9392. const int n = ggml_nrows(src0);
  9393. const int ne2_ne3 = n/ne1; // ne2*ne3
  9394. const int nb0 = src0->nb[0];
  9395. const int nb1 = src0->nb[1];
  9396. const int nb2 = src0->nb[2];
  9397. //const int nb3 = src0->nb[3];
  9398. GGML_ASSERT(nb0 == sizeof(float));
  9399. GGML_ASSERT(ne1 + n_past == ne0);
  9400. GGML_ASSERT(n_head == ne2);
  9401. // add alibi to src0 (KQ_scaled)
  9402. const int n_heads_log2_floor = 1 << (int) floor(log2(n_head));
  9403. const float m0 = powf(2.0f, -(max_bias) / n_heads_log2_floor);
  9404. const float m1 = powf(2.0f, -(max_bias / 2.0f) / n_heads_log2_floor);
  9405. for (int i = 0; i < ne0; i++) {
  9406. for (int j = 0; j < ne1; j++) {
  9407. for (int k = 0; k < ne2_ne3; k++) {
  9408. float * const src = (float *)((char *) src0->data + i*nb0 + j*nb1 + k*nb2);
  9409. float * pdst = (float *)((char *) dst->data + i*nb0 + j*nb1 + k*nb2);
  9410. // TODO: k*nb2 or k*nb3
  9411. float m_k;
  9412. if (k < n_heads_log2_floor) {
  9413. m_k = powf(m0, k + 1);
  9414. } else {
  9415. m_k = powf(m1, 2 * (k - n_heads_log2_floor) + 1);
  9416. }
  9417. pdst[0] = i * m_k + src[0];
  9418. }
  9419. }
  9420. }
  9421. }
  9422. static void ggml_compute_forward_alibi_f16(
  9423. const struct ggml_compute_params * params,
  9424. const struct ggml_tensor * src0,
  9425. struct ggml_tensor * dst) {
  9426. assert(params->ith == 0);
  9427. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  9428. return;
  9429. }
  9430. const int n_past = ((int32_t *) dst->op_params)[0];
  9431. const int n_head = ((int32_t *) dst->op_params)[1];
  9432. float max_bias;
  9433. memcpy(&max_bias, (int32_t *) dst->op_params + 2, sizeof(float));
  9434. assert(n_past >= 0);
  9435. const int ne0 = src0->ne[0]; // all_seq_len = n_past + ne1
  9436. const int ne1 = src0->ne[1]; // seq_len_without_past
  9437. const int ne2 = src0->ne[2]; // n_head -> this is k
  9438. //const int ne3 = src0->ne[3]; // 1 -> bsz
  9439. const int n = ggml_nrows(src0);
  9440. const int ne2_ne3 = n/ne1; // ne2*ne3
  9441. const int nb0 = src0->nb[0];
  9442. const int nb1 = src0->nb[1];
  9443. const int nb2 = src0->nb[2];
  9444. //const int nb3 = src0->nb[3];
  9445. GGML_ASSERT(nb0 == sizeof(ggml_fp16_t));
  9446. GGML_ASSERT(ne1 + n_past == ne0); (void) n_past;
  9447. GGML_ASSERT(n_head == ne2);
  9448. // add alibi to src0 (KQ_scaled)
  9449. const int n_heads_log2_floor = 1 << (int) floor(log2(n_head));
  9450. const float m0 = powf(2.0f, -(max_bias) / n_heads_log2_floor);
  9451. const float m1 = powf(2.0f, -(max_bias / 2.0f) / n_heads_log2_floor);
  9452. for (int i = 0; i < ne0; i++) {
  9453. for (int j = 0; j < ne1; j++) {
  9454. for (int k = 0; k < ne2_ne3; k++) {
  9455. ggml_fp16_t * const src = (ggml_fp16_t *)((char *) src0->data + i*nb0 + j*nb1 + k*nb2);
  9456. float * pdst = (float *)((char *) dst->data + i*nb0 + j*nb1 + k*nb2);
  9457. // TODO: k*nb2 or k*nb3
  9458. float m_k;
  9459. if (k < n_heads_log2_floor) {
  9460. m_k = powf(m0, k + 1);
  9461. } else {
  9462. m_k = powf(m1, 2 * (k - n_heads_log2_floor) + 1);
  9463. }
  9464. // we return F32
  9465. pdst[0] = i * m_k + GGML_FP16_TO_FP32(src[0]);
  9466. }
  9467. }
  9468. }
  9469. }
  9470. static void ggml_compute_forward_alibi(
  9471. const struct ggml_compute_params * params,
  9472. const struct ggml_tensor * src0,
  9473. struct ggml_tensor * dst) {
  9474. switch (src0->type) {
  9475. case GGML_TYPE_F16:
  9476. {
  9477. ggml_compute_forward_alibi_f16(params, src0, dst);
  9478. } break;
  9479. case GGML_TYPE_F32:
  9480. {
  9481. ggml_compute_forward_alibi_f32(params, src0, dst);
  9482. } break;
  9483. case GGML_TYPE_Q4_0:
  9484. case GGML_TYPE_Q4_1:
  9485. case GGML_TYPE_Q5_0:
  9486. case GGML_TYPE_Q5_1:
  9487. case GGML_TYPE_Q8_0:
  9488. case GGML_TYPE_Q8_1:
  9489. case GGML_TYPE_Q2_K:
  9490. case GGML_TYPE_Q3_K:
  9491. case GGML_TYPE_Q4_K:
  9492. case GGML_TYPE_Q5_K:
  9493. case GGML_TYPE_Q6_K:
  9494. case GGML_TYPE_Q8_K:
  9495. case GGML_TYPE_I8:
  9496. case GGML_TYPE_I16:
  9497. case GGML_TYPE_I32:
  9498. case GGML_TYPE_COUNT:
  9499. {
  9500. GGML_ASSERT(false);
  9501. } break;
  9502. }
  9503. }
  9504. // ggml_compute_forward_clamp
  9505. static void ggml_compute_forward_clamp_f32(
  9506. const struct ggml_compute_params * params,
  9507. const struct ggml_tensor * src0,
  9508. struct ggml_tensor * dst) {
  9509. assert(params->ith == 0);
  9510. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  9511. return;
  9512. }
  9513. float min;
  9514. float max;
  9515. memcpy(&min, (float *) dst->op_params + 0, sizeof(float));
  9516. memcpy(&max, (float *) dst->op_params + 1, sizeof(float));
  9517. const int ith = params->ith;
  9518. const int nth = params->nth;
  9519. const int n = ggml_nrows(src0);
  9520. const int nc = src0->ne[0];
  9521. const size_t nb00 = src0->nb[0];
  9522. const size_t nb01 = src0->nb[1];
  9523. const size_t nb0 = dst->nb[0];
  9524. const size_t nb1 = dst->nb[1];
  9525. GGML_ASSERT( nb0 == sizeof(float));
  9526. GGML_ASSERT(nb00 == sizeof(float));
  9527. for (int j = ith; j < n; j += nth) {
  9528. float * dst_ptr = (float *) ((char *) dst->data + j*nb1);
  9529. float * src0_ptr = (float *) ((char *) src0->data + j*nb01);
  9530. for (int i = 0; i < nc; i++) {
  9531. dst_ptr[i] = MAX(MIN(src0_ptr[i], max), min);
  9532. }
  9533. }
  9534. }
  9535. static void ggml_compute_forward_clamp(
  9536. const struct ggml_compute_params * params,
  9537. const struct ggml_tensor * src0,
  9538. struct ggml_tensor * dst) {
  9539. switch (src0->type) {
  9540. case GGML_TYPE_F32:
  9541. {
  9542. ggml_compute_forward_clamp_f32(params, src0, dst);
  9543. } break;
  9544. case GGML_TYPE_F16:
  9545. case GGML_TYPE_Q4_0:
  9546. case GGML_TYPE_Q4_1:
  9547. case GGML_TYPE_Q5_0:
  9548. case GGML_TYPE_Q5_1:
  9549. case GGML_TYPE_Q8_0:
  9550. case GGML_TYPE_Q8_1:
  9551. case GGML_TYPE_Q2_K:
  9552. case GGML_TYPE_Q3_K:
  9553. case GGML_TYPE_Q4_K:
  9554. case GGML_TYPE_Q5_K:
  9555. case GGML_TYPE_Q6_K:
  9556. case GGML_TYPE_Q8_K:
  9557. case GGML_TYPE_I8:
  9558. case GGML_TYPE_I16:
  9559. case GGML_TYPE_I32:
  9560. case GGML_TYPE_COUNT:
  9561. {
  9562. GGML_ASSERT(false);
  9563. } break;
  9564. }
  9565. }
  9566. // ggml_compute_forward_rope
  9567. static void ggml_compute_forward_rope_f32(
  9568. const struct ggml_compute_params * params,
  9569. const struct ggml_tensor * src0,
  9570. struct ggml_tensor * dst) {
  9571. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  9572. return;
  9573. }
  9574. float freq_base;
  9575. float freq_scale;
  9576. const int n_past = ((int32_t *) dst->op_params)[0];
  9577. const int n_dims = ((int32_t *) dst->op_params)[1];
  9578. const int mode = ((int32_t *) dst->op_params)[2];
  9579. const int n_ctx = ((int32_t *) dst->op_params)[3];
  9580. memcpy(&freq_base, (int32_t *) dst->op_params + 4, sizeof(float));
  9581. memcpy(&freq_scale, (int32_t *) dst->op_params + 5, sizeof(float));
  9582. assert(n_past >= 0);
  9583. GGML_TENSOR_UNARY_OP_LOCALS;
  9584. //printf("ne0: %d, ne1: %d, ne2: %d, ne3: %d\n", ne0, ne1, ne2, ne3);
  9585. //printf("n_past = %d, ne2 = %d\n", n_past, ne2);
  9586. GGML_ASSERT(nb00 == sizeof(float));
  9587. const int ith = params->ith;
  9588. const int nth = params->nth;
  9589. const int nr = ggml_nrows(dst);
  9590. GGML_ASSERT(n_dims <= ne0);
  9591. GGML_ASSERT(n_dims % 2 == 0);
  9592. // rows per thread
  9593. const int dr = (nr + nth - 1)/nth;
  9594. // row range for this thread
  9595. const int ir0 = dr*ith;
  9596. const int ir1 = MIN(ir0 + dr, nr);
  9597. // row index used to determine which thread to use
  9598. int ir = 0;
  9599. const float theta_scale = powf(freq_base, -2.0f/n_dims);
  9600. const bool is_neox = mode & 2;
  9601. const bool is_glm = mode & 4;
  9602. for (int64_t i3 = 0; i3 < ne3; i3++) {
  9603. for (int64_t i2 = ((mode & 1) == 0 ? 0 : n_past); i2 < ne2; i2++) {
  9604. const int64_t p = ((mode & 1) == 0 ? n_past + i2 : i2);
  9605. for (int64_t i1 = 0; i1 < ne1; i1++) {
  9606. if (ir++ < ir0) continue;
  9607. if (ir > ir1) break;
  9608. float theta = freq_scale * (float)p;
  9609. if (is_glm) {
  9610. theta = MIN(p, n_ctx - 2);
  9611. float block_theta = MAX(p - (n_ctx - 2), 0);
  9612. for (int64_t i0 = 0; i0 < ne0 / 4; i0++) {
  9613. const float cos_theta = cosf(theta);
  9614. const float sin_theta = sinf(theta);
  9615. const float cos_block_theta = cosf(block_theta);
  9616. const float sin_block_theta = sinf(block_theta);
  9617. theta *= theta_scale;
  9618. block_theta *= theta_scale;
  9619. const float * const src = (float *)((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00);
  9620. float * dst_data = (float *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0);
  9621. const float x0 = src[0];
  9622. const float x1 = src[n_dims/2];
  9623. const float x2 = src[n_dims];
  9624. const float x3 = src[n_dims/2*3];
  9625. dst_data[0] = x0*cos_theta - x1*sin_theta;
  9626. dst_data[n_dims/2] = x0*sin_theta + x1*cos_theta;
  9627. dst_data[n_dims] = x2*cos_block_theta - x3*sin_block_theta;
  9628. dst_data[n_dims/2*3] = x2*sin_block_theta + x3*cos_block_theta;
  9629. }
  9630. } else if (!is_neox) {
  9631. for (int64_t i0 = 0; i0 < ne0; i0 += 2) {
  9632. const float cos_theta = cosf(theta);
  9633. const float sin_theta = sinf(theta);
  9634. theta *= theta_scale;
  9635. const float * const src = (float *)((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00);
  9636. float * dst_data = (float *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0);
  9637. const float x0 = src[0];
  9638. const float x1 = src[1];
  9639. dst_data[0] = x0*cos_theta - x1*sin_theta;
  9640. dst_data[1] = x0*sin_theta + x1*cos_theta;
  9641. }
  9642. } else {
  9643. // TODO: this is probably wrong, but I can't figure it out ..
  9644. // ref: https://github.com/huggingface/transformers/blob/main/src/transformers/models/gpt_neox/modeling_gpt_neox.py#LL251C1-L294C28
  9645. for (int64_t ib = 0; ib < ne0/n_dims; ++ib) {
  9646. for (int64_t ic = 0; ic < n_dims; ic += 2) {
  9647. const float cos_theta = cosf(theta);
  9648. const float sin_theta = sinf(theta);
  9649. theta *= theta_scale;
  9650. const int64_t i0 = ib*n_dims + ic/2;
  9651. const float * const src = (float *)((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00);
  9652. float * dst_data = (float *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0);
  9653. const float x0 = src[0];
  9654. const float x1 = src[n_dims/2];
  9655. dst_data[0] = x0*cos_theta - x1*sin_theta;
  9656. dst_data[n_dims/2] = x0*sin_theta + x1*cos_theta;
  9657. }
  9658. }
  9659. }
  9660. }
  9661. }
  9662. }
  9663. }
  9664. static void ggml_compute_forward_rope_f16(
  9665. const struct ggml_compute_params * params,
  9666. const struct ggml_tensor * src0,
  9667. struct ggml_tensor * dst) {
  9668. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  9669. return;
  9670. }
  9671. float freq_base;
  9672. float freq_scale;
  9673. const int n_past = ((int32_t *) dst->op_params)[0];
  9674. const int n_dims = ((int32_t *) dst->op_params)[1];
  9675. const int mode = ((int32_t *) dst->op_params)[2];
  9676. const int n_ctx = ((int32_t *) dst->op_params)[3];
  9677. memcpy(&freq_base, (int32_t *) dst->op_params + 4, sizeof(float));
  9678. memcpy(&freq_scale, (int32_t *) dst->op_params + 5, sizeof(float));
  9679. assert(n_past >= 0);
  9680. GGML_TENSOR_UNARY_OP_LOCALS;
  9681. //printf("ne0: %d, ne1: %d, ne2: %d, ne3: %d\n", ne0, ne1, ne2, ne3);
  9682. //printf("n_past = %d, ne2 = %d\n", n_past, ne2);
  9683. GGML_ASSERT(nb0 == sizeof(ggml_fp16_t));
  9684. const int ith = params->ith;
  9685. const int nth = params->nth;
  9686. const int nr = ggml_nrows(dst);
  9687. GGML_ASSERT(n_dims <= ne0);
  9688. GGML_ASSERT(n_dims % 2 == 0);
  9689. // rows per thread
  9690. const int dr = (nr + nth - 1)/nth;
  9691. // row range for this thread
  9692. const int ir0 = dr*ith;
  9693. const int ir1 = MIN(ir0 + dr, nr);
  9694. // row index used to determine which thread to use
  9695. int ir = 0;
  9696. const float theta_scale = powf(freq_base, -2.0f/n_dims);
  9697. const bool is_neox = mode & 2;
  9698. const bool is_glm = mode & 4;
  9699. for (int64_t i3 = 0; i3 < ne3; i3++) {
  9700. for (int64_t i2 = ((mode & 1) == 0 ? 0 : n_past); i2 < ne2; i2++) {
  9701. const int64_t p = ((mode & 1) == 0 ? n_past + i2 : i2);
  9702. for (int64_t i1 = 0; i1 < ne1; i1++) {
  9703. if (ir++ < ir0) continue;
  9704. if (ir > ir1) break;
  9705. float theta = freq_scale * (float)p;
  9706. if (is_glm) {
  9707. theta = MIN(p, n_ctx - 2);
  9708. float block_theta = MAX(p - (n_ctx - 2), 0);
  9709. for (int64_t i0 = 0; i0 < ne0 / 4; i0++) {
  9710. const float cos_theta = cosf(theta);
  9711. const float sin_theta = sinf(theta);
  9712. const float cos_block_theta = cosf(block_theta);
  9713. const float sin_block_theta = sinf(block_theta);
  9714. theta *= theta_scale;
  9715. block_theta *= theta_scale;
  9716. const ggml_fp16_t * const src = (ggml_fp16_t *)((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00);
  9717. ggml_fp16_t * dst_data = (ggml_fp16_t *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0);
  9718. const float x0 = GGML_FP16_TO_FP32(src[0]);
  9719. const float x1 = GGML_FP16_TO_FP32(src[n_dims/2]);
  9720. const float x2 = GGML_FP16_TO_FP32(src[n_dims]);
  9721. const float x3 = GGML_FP16_TO_FP32(src[n_dims/2*3]);
  9722. dst_data[0] = GGML_FP32_TO_FP16(x0*cos_theta - x1*sin_theta);
  9723. dst_data[n_dims/2] = GGML_FP32_TO_FP16(x0*sin_theta + x1*cos_theta);
  9724. dst_data[n_dims] = GGML_FP32_TO_FP16(x2*cos_block_theta - x3*sin_block_theta);
  9725. dst_data[n_dims/2*3] = GGML_FP32_TO_FP16(x2*sin_block_theta + x3*cos_block_theta);
  9726. }
  9727. } if (!is_neox) {
  9728. for (int64_t i0 = 0; i0 < ne0; i0 += 2) {
  9729. const float cos_theta = cosf(theta);
  9730. const float sin_theta = sinf(theta);
  9731. theta *= theta_scale;
  9732. const ggml_fp16_t * const src = (ggml_fp16_t *)((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00);
  9733. ggml_fp16_t * dst_data = (ggml_fp16_t *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0);
  9734. const float x0 = GGML_FP16_TO_FP32(src[0]);
  9735. const float x1 = GGML_FP16_TO_FP32(src[1]);
  9736. dst_data[0] = GGML_FP32_TO_FP16(x0*cos_theta - x1*sin_theta);
  9737. dst_data[1] = GGML_FP32_TO_FP16(x0*sin_theta + x1*cos_theta);
  9738. }
  9739. } else {
  9740. // TODO: this is probably wrong, but I can't figure it out ..
  9741. // ref: https://github.com/huggingface/transformers/blob/main/src/transformers/models/gpt_neox/modeling_gpt_neox.py#LL251C1-L294C28
  9742. for (int64_t ib = 0; ib < ne0/n_dims; ++ib) {
  9743. for (int64_t ic = 0; ic < n_dims; ic += 2) {
  9744. const float cos_theta = cosf(theta);
  9745. const float sin_theta = sinf(theta);
  9746. theta *= theta_scale;
  9747. const int64_t i0 = ib*n_dims + ic/2;
  9748. const ggml_fp16_t * const src = (ggml_fp16_t *)((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00);
  9749. ggml_fp16_t * dst_data = (ggml_fp16_t *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0);
  9750. const float x0 = GGML_FP16_TO_FP32(src[0]);
  9751. const float x1 = GGML_FP16_TO_FP32(src[n_dims/2]);
  9752. dst_data[0] = GGML_FP32_TO_FP16(x0*cos_theta - x1*sin_theta);
  9753. dst_data[n_dims/2] = GGML_FP32_TO_FP16(x0*sin_theta + x1*cos_theta);
  9754. }
  9755. }
  9756. }
  9757. }
  9758. }
  9759. }
  9760. }
  9761. static void ggml_compute_forward_rope(
  9762. const struct ggml_compute_params * params,
  9763. const struct ggml_tensor * src0,
  9764. struct ggml_tensor * dst) {
  9765. switch (src0->type) {
  9766. case GGML_TYPE_F16:
  9767. {
  9768. ggml_compute_forward_rope_f16(params, src0, dst);
  9769. } break;
  9770. case GGML_TYPE_F32:
  9771. {
  9772. ggml_compute_forward_rope_f32(params, src0, dst);
  9773. } break;
  9774. default:
  9775. {
  9776. GGML_ASSERT(false);
  9777. } break;
  9778. }
  9779. }
  9780. // ggml_compute_forward_rope_back
  9781. static void ggml_compute_forward_rope_back_f32(
  9782. const struct ggml_compute_params * params,
  9783. const struct ggml_tensor * src0,
  9784. struct ggml_tensor * dst) {
  9785. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  9786. return;
  9787. }
  9788. // y = rope(x, src1)
  9789. // dx = rope_back(dy, src1)
  9790. // src0 is dy, src1 contains options
  9791. const int n_past = ((int32_t *) dst->op_params)[0];
  9792. const int n_dims = ((int32_t *) dst->op_params)[1];
  9793. const int mode = ((int32_t *) dst->op_params)[2];
  9794. assert(n_past >= 0);
  9795. GGML_TENSOR_UNARY_OP_LOCALS;
  9796. //printf("ne0: %d, ne1: %d, ne2: %d, ne3: %d\n", ne0, ne1, ne2, ne3);
  9797. //printf("n_past = %d, ne2 = %d\n", n_past, ne2);
  9798. assert(nb0 == sizeof(float));
  9799. const int ith = params->ith;
  9800. const int nth = params->nth;
  9801. const int nr = ggml_nrows(dst);
  9802. // rows per thread
  9803. const int dr = (nr + nth - 1)/nth;
  9804. // row range for this thread
  9805. const int ir0 = dr*ith;
  9806. const int ir1 = MIN(ir0 + dr, nr);
  9807. // row index used to determine which thread to use
  9808. int ir = 0;
  9809. const float theta_scale = powf(10000.0, -2.0f/n_dims);
  9810. const bool is_neox = mode & 2;
  9811. for (int64_t i3 = 0; i3 < ne3; i3++) {
  9812. for (int64_t i2 = ((mode & 1) == 0 ? 0 : n_past); i2 < ne2; i2++) {
  9813. const int64_t p = ((mode & 1) == 0 ? n_past + i2 : i2);
  9814. for (int64_t i1 = 0; i1 < ne1; i1++) {
  9815. if (ir++ < ir0) continue;
  9816. if (ir > ir1) break;
  9817. float theta = (float)p;
  9818. if (!is_neox) {
  9819. for (int64_t i0 = 0; i0 < ne0; i0 += 2) {
  9820. const float cos_theta = cosf(theta);
  9821. const float sin_theta = sinf(theta);
  9822. theta *= theta_scale;
  9823. const float * const dy = (float *)((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00);
  9824. float * dx = (float *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0);
  9825. const float dy0 = dy[0];
  9826. const float dy1 = dy[1];
  9827. dx[0] = dy0*cos_theta + dy1*sin_theta;
  9828. dx[1] = - dy0*sin_theta + dy1*cos_theta;
  9829. }
  9830. } else {
  9831. for (int64_t ib = 0; ib < ne0/n_dims; ++ib) {
  9832. for (int64_t ic = 0; ic < n_dims; ic += 2) {
  9833. const float cos_theta = cosf(theta);
  9834. const float sin_theta = sinf(theta);
  9835. theta *= theta_scale;
  9836. const int64_t i0 = ib*n_dims + ic/2;
  9837. const float * const dy = (float *)((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00);
  9838. float * dx = (float *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0);
  9839. const float dy0 = dy[0];
  9840. const float dy1 = dy[n_dims/2];
  9841. dx[0] = dy0*cos_theta + dy1*sin_theta;
  9842. dx[n_dims/2] = - dy0*sin_theta + dy1*cos_theta;
  9843. }
  9844. }
  9845. }
  9846. }
  9847. }
  9848. }
  9849. }
  9850. static void ggml_compute_forward_rope_back_f16(
  9851. const struct ggml_compute_params * params,
  9852. const struct ggml_tensor * src0,
  9853. struct ggml_tensor * dst) {
  9854. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  9855. return;
  9856. }
  9857. // y = rope(x, src1)
  9858. // dx = rope_back(dy, src1)
  9859. // src0 is dy, src1 contains options
  9860. const int n_past = ((int32_t *) dst->op_params)[0];
  9861. const int n_dims = ((int32_t *) dst->op_params)[1];
  9862. const int mode = ((int32_t *) dst->op_params)[2];
  9863. assert(n_past >= 0);
  9864. GGML_TENSOR_UNARY_OP_LOCALS;
  9865. //printf("ne0: %d, ne1: %d, ne2: %d, ne3: %d\n", ne0, ne1, ne2, ne3);
  9866. //printf("n_past = %d, ne2 = %d\n", n_past, ne2);
  9867. assert(nb0 == sizeof(ggml_fp16_t));
  9868. const int ith = params->ith;
  9869. const int nth = params->nth;
  9870. const int nr = ggml_nrows(dst);
  9871. // rows per thread
  9872. const int dr = (nr + nth - 1)/nth;
  9873. // row range for this thread
  9874. const int ir0 = dr*ith;
  9875. const int ir1 = MIN(ir0 + dr, nr);
  9876. // row index used to determine which thread to use
  9877. int ir = 0;
  9878. const float theta_scale = powf(10000.0, -2.0f/n_dims);
  9879. const bool is_neox = mode & 2;
  9880. for (int64_t i3 = 0; i3 < ne3; i3++) {
  9881. for (int64_t i2 = ((mode & 1) == 0 ? 0 : n_past); i2 < ne2; i2++) {
  9882. const int64_t p = ((mode & 1) == 0 ? n_past + i2 : i2);
  9883. for (int64_t i1 = 0; i1 < ne1; i1++) {
  9884. if (ir++ < ir0) continue;
  9885. if (ir > ir1) break;
  9886. float theta = (float)p;
  9887. if (!is_neox) {
  9888. for (int64_t i0 = 0; i0 < ne0; i0 += 2) {
  9889. const float cos_theta = cosf(theta);
  9890. const float sin_theta = sinf(theta);
  9891. theta *= theta_scale;
  9892. const ggml_fp16_t * const dy = (ggml_fp16_t *)((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00);
  9893. ggml_fp16_t * dx = (ggml_fp16_t *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0);
  9894. const float dy0 = GGML_FP16_TO_FP32(dy[0]);
  9895. const float dy1 = GGML_FP16_TO_FP32(dy[1]);
  9896. dx[0] = GGML_FP32_TO_FP16( dy0*cos_theta + dy1*sin_theta);
  9897. dx[1] = GGML_FP32_TO_FP16(-dy0*sin_theta + dy1*cos_theta);
  9898. }
  9899. } else {
  9900. for (int64_t ib = 0; ib < ne0/n_dims; ++ib) {
  9901. for (int64_t ic = 0; ic < n_dims; ic += 2) {
  9902. const float cos_theta = cosf(theta);
  9903. const float sin_theta = sinf(theta);
  9904. theta *= theta_scale;
  9905. const int64_t i0 = ib*n_dims + ic/2;
  9906. const ggml_fp16_t * const dy = (ggml_fp16_t *)((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00);
  9907. ggml_fp16_t * dx = (ggml_fp16_t *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0);
  9908. const float dy0 = GGML_FP16_TO_FP32(dy[0]);
  9909. const float dy1 = GGML_FP16_TO_FP32(dy[n_dims/2]);
  9910. dx[0] = GGML_FP32_TO_FP16( dy0*cos_theta + dy1*sin_theta);
  9911. dx[n_dims/2] = GGML_FP32_TO_FP16(-dy0*sin_theta + dy1*cos_theta);
  9912. }
  9913. }
  9914. }
  9915. }
  9916. }
  9917. }
  9918. }
  9919. static void ggml_compute_forward_rope_back(
  9920. const struct ggml_compute_params * params,
  9921. const struct ggml_tensor * src0,
  9922. struct ggml_tensor * dst) {
  9923. switch (src0->type) {
  9924. case GGML_TYPE_F16:
  9925. {
  9926. ggml_compute_forward_rope_back_f16(params, src0, dst);
  9927. } break;
  9928. case GGML_TYPE_F32:
  9929. {
  9930. ggml_compute_forward_rope_back_f32(params, src0, dst);
  9931. } break;
  9932. default:
  9933. {
  9934. GGML_ASSERT(false);
  9935. } break;
  9936. }
  9937. }
  9938. // ggml_compute_forward_conv_1d
  9939. static void ggml_compute_forward_conv_1d_s1_ph_f16_f32(
  9940. const struct ggml_compute_params * params,
  9941. const struct ggml_tensor * src0,
  9942. const struct ggml_tensor * src1,
  9943. struct ggml_tensor * dst) {
  9944. GGML_ASSERT(src0->type == GGML_TYPE_F16);
  9945. GGML_ASSERT(src1->type == GGML_TYPE_F32);
  9946. GGML_ASSERT( dst->type == GGML_TYPE_F32);
  9947. int64_t t0 = ggml_perf_time_us();
  9948. UNUSED(t0);
  9949. GGML_TENSOR_BINARY_OP_LOCALS;
  9950. const int ith = params->ith;
  9951. const int nth = params->nth;
  9952. const int nk = ne00;
  9953. const int nh = nk/2;
  9954. const int ew0 = ggml_up32(ne01);
  9955. GGML_ASSERT(ne00 % 2 == 1); // TODO: support even kernel sizes
  9956. GGML_ASSERT(nb00 == sizeof(ggml_fp16_t));
  9957. GGML_ASSERT(nb10 == sizeof(float));
  9958. if (params->type == GGML_TASK_INIT) {
  9959. // TODO: fix this memset (wsize is overestimated)
  9960. memset(params->wdata, 0, params->wsize);
  9961. // prepare kernel data (src0)
  9962. {
  9963. ggml_fp16_t * const wdata = (ggml_fp16_t *) params->wdata + 0;
  9964. for (int64_t i02 = 0; i02 < ne02; i02++) {
  9965. for (int64_t i01 = 0; i01 < ne01; i01++) {
  9966. const ggml_fp16_t * const src = (ggml_fp16_t *)((char *) src0->data + i02*nb02 + i01*nb01);
  9967. ggml_fp16_t * dst_data = wdata + i02*ew0*ne00;
  9968. for (int64_t i00 = 0; i00 < ne00; i00++) {
  9969. dst_data[i00*ew0 + i01] = src[i00];
  9970. }
  9971. }
  9972. }
  9973. }
  9974. // prepare source data (src1)
  9975. {
  9976. ggml_fp16_t * const wdata = (ggml_fp16_t *) params->wdata + ne02*ew0*ne00;
  9977. for (int64_t i11 = 0; i11 < ne11; i11++) {
  9978. const float * const src = (float *)((char *) src1->data + i11*nb11);
  9979. ggml_fp16_t * dst_data = wdata;
  9980. for (int64_t i10 = 0; i10 < ne10; i10++) {
  9981. dst_data[(i10 + nh)*ew0 + i11] = GGML_FP32_TO_FP16(src[i10]);
  9982. }
  9983. }
  9984. }
  9985. return;
  9986. }
  9987. if (params->type == GGML_TASK_FINALIZE) {
  9988. return;
  9989. }
  9990. // total rows in dst
  9991. const int nr = ne02;
  9992. // rows per thread
  9993. const int dr = (nr + nth - 1)/nth;
  9994. // row range for this thread
  9995. const int ir0 = dr*ith;
  9996. const int ir1 = MIN(ir0 + dr, nr);
  9997. for (int i1 = ir0; i1 < ir1; i1++) {
  9998. float * dst_data = (float *)((char *) dst->data + i1*nb1);
  9999. for (int64_t i0 = 0; i0 < ne10; ++i0) {
  10000. dst_data[i0] = 0;
  10001. for (int k = -nh; k <= nh; k++) {
  10002. float v = 0.0f;
  10003. ggml_vec_dot_f16(ew0, &v,
  10004. (ggml_fp16_t *) params->wdata + i1*ew0*ne00 + (nh + k)*ew0,
  10005. (ggml_fp16_t *) params->wdata + ne02*ew0*ne00 + (i0 + nh + k)*ew0);
  10006. dst_data[i0] += v;
  10007. }
  10008. }
  10009. }
  10010. }
  10011. static void ggml_compute_forward_conv_1d_s1_ph_f32(
  10012. const struct ggml_compute_params * params,
  10013. const struct ggml_tensor * src0,
  10014. const struct ggml_tensor * src1,
  10015. struct ggml_tensor * dst) {
  10016. GGML_ASSERT(src0->type == GGML_TYPE_F32);
  10017. GGML_ASSERT(src1->type == GGML_TYPE_F32);
  10018. GGML_ASSERT( dst->type == GGML_TYPE_F32);
  10019. int64_t t0 = ggml_perf_time_us();
  10020. UNUSED(t0);
  10021. GGML_TENSOR_BINARY_OP_LOCALS;
  10022. const int ith = params->ith;
  10023. const int nth = params->nth;
  10024. const int nk = ne00;
  10025. const int nh = nk/2;
  10026. const int ew0 = ggml_up32(ne01);
  10027. GGML_ASSERT(ne00 % 2 == 1); // TODO: support even kernel sizes
  10028. GGML_ASSERT(nb00 == sizeof(float));
  10029. GGML_ASSERT(nb10 == sizeof(float));
  10030. if (params->type == GGML_TASK_INIT) {
  10031. // TODO: fix this memset (wsize is overestimated)
  10032. memset(params->wdata, 0, params->wsize);
  10033. // prepare kernel data (src0)
  10034. {
  10035. float * const wdata = (float *) params->wdata + 0;
  10036. for (int64_t i02 = 0; i02 < ne02; i02++) {
  10037. for (int64_t i01 = 0; i01 < ne01; i01++) {
  10038. const float * const src = (float *)((char *) src0->data + i02*nb02 + i01*nb01);
  10039. float * dst_data = wdata + i02*ew0*ne00;
  10040. for (int64_t i00 = 0; i00 < ne00; i00++) {
  10041. dst_data[i00*ew0 + i01] = src[i00];
  10042. }
  10043. }
  10044. }
  10045. }
  10046. // prepare source data (src1)
  10047. {
  10048. float * const wdata = (float *) params->wdata + ne02*ew0*ne00;
  10049. for (int64_t i11 = 0; i11 < ne11; i11++) {
  10050. const float * const src = (float *)((char *) src1->data + i11*nb11);
  10051. float * dst_data = wdata;
  10052. for (int64_t i10 = 0; i10 < ne10; i10++) {
  10053. dst_data[(i10 + nh)*ew0 + i11] = src[i10];
  10054. }
  10055. }
  10056. }
  10057. return;
  10058. }
  10059. if (params->type == GGML_TASK_FINALIZE) {
  10060. return;
  10061. }
  10062. // total rows in dst
  10063. const int nr = ne02;
  10064. // rows per thread
  10065. const int dr = (nr + nth - 1)/nth;
  10066. // row range for this thread
  10067. const int ir0 = dr*ith;
  10068. const int ir1 = MIN(ir0 + dr, nr);
  10069. for (int i1 = ir0; i1 < ir1; i1++) {
  10070. float * dst_data = (float *)((char *) dst->data + i1*nb1);
  10071. for (int64_t i0 = 0; i0 < ne10; ++i0) {
  10072. dst_data[i0] = 0;
  10073. for (int k = -nh; k <= nh; k++) {
  10074. float v = 0.0f;
  10075. ggml_vec_dot_f32(ew0, &v,
  10076. (float *) params->wdata + i1*ew0*ne00 + (nh + k)*ew0,
  10077. (float *) params->wdata + ne02*ew0*ne00 + (i0 + nh + k)*ew0);
  10078. dst_data[i0] += v;
  10079. }
  10080. }
  10081. }
  10082. }
  10083. static void ggml_compute_forward_conv_1d_s1_ph(
  10084. const struct ggml_compute_params * params,
  10085. const struct ggml_tensor * src0,
  10086. const struct ggml_tensor * src1,
  10087. struct ggml_tensor * dst) {
  10088. switch (src0->type) {
  10089. case GGML_TYPE_F16:
  10090. {
  10091. ggml_compute_forward_conv_1d_s1_ph_f16_f32(params, src0, src1, dst);
  10092. } break;
  10093. case GGML_TYPE_F32:
  10094. {
  10095. ggml_compute_forward_conv_1d_s1_ph_f32(params, src0, src1, dst);
  10096. } break;
  10097. default:
  10098. {
  10099. GGML_ASSERT(false);
  10100. } break;
  10101. }
  10102. }
  10103. static void ggml_compute_forward_conv_1d_s2_ph_f16_f32(
  10104. const struct ggml_compute_params * params,
  10105. const struct ggml_tensor * src0,
  10106. const struct ggml_tensor * src1,
  10107. struct ggml_tensor * dst) {
  10108. GGML_ASSERT(src0->type == GGML_TYPE_F16);
  10109. GGML_ASSERT(src1->type == GGML_TYPE_F32);
  10110. GGML_ASSERT( dst->type == GGML_TYPE_F32);
  10111. int64_t t0 = ggml_perf_time_us();
  10112. UNUSED(t0);
  10113. GGML_TENSOR_BINARY_OP_LOCALS;
  10114. const int ith = params->ith;
  10115. const int nth = params->nth;
  10116. const int nk = ne00;
  10117. const int nh = nk/2;
  10118. const int ew0 = ggml_up32(ne01);
  10119. GGML_ASSERT(ne00 % 2 == 1); // TODO: support even kernel sizes
  10120. GGML_ASSERT(nb00 == sizeof(ggml_fp16_t));
  10121. GGML_ASSERT(nb10 == sizeof(float));
  10122. if (params->type == GGML_TASK_INIT) {
  10123. // TODO: fix this memset (wsize is overestimated)
  10124. memset(params->wdata, 0, params->wsize);
  10125. // prepare kernel data (src0)
  10126. {
  10127. ggml_fp16_t * const wdata = (ggml_fp16_t *) params->wdata + 0;
  10128. for (int64_t i02 = 0; i02 < ne02; i02++) {
  10129. for (int64_t i01 = 0; i01 < ne01; i01++) {
  10130. const ggml_fp16_t * const src = (ggml_fp16_t *)((char *) src0->data + i02*nb02 + i01*nb01);
  10131. ggml_fp16_t * dst_data = wdata + i02*ew0*ne00;
  10132. for (int64_t i00 = 0; i00 < ne00; i00++) {
  10133. dst_data[i00*ew0 + i01] = src[i00];
  10134. }
  10135. }
  10136. }
  10137. }
  10138. // prepare source data (src1)
  10139. {
  10140. ggml_fp16_t * const wdata = (ggml_fp16_t *) params->wdata + ne02*ew0*ne00;
  10141. for (int64_t i11 = 0; i11 < ne11; i11++) {
  10142. const float * const src = (float *)((char *) src1->data + i11*nb11);
  10143. ggml_fp16_t * dst_data = wdata;
  10144. for (int64_t i10 = 0; i10 < ne10; i10++) {
  10145. dst_data[(i10 + nh)*ew0 + i11] = GGML_FP32_TO_FP16(src[i10]);
  10146. }
  10147. }
  10148. }
  10149. return;
  10150. }
  10151. if (params->type == GGML_TASK_FINALIZE) {
  10152. return;
  10153. }
  10154. // total rows in dst
  10155. const int nr = ne02;
  10156. // rows per thread
  10157. const int dr = (nr + nth - 1)/nth;
  10158. // row range for this thread
  10159. const int ir0 = dr*ith;
  10160. const int ir1 = MIN(ir0 + dr, nr);
  10161. for (int i1 = ir0; i1 < ir1; i1++) {
  10162. float * dst_data = (float *)((char *) dst->data + i1*nb1);
  10163. for (int64_t i0 = 0; i0 < ne10; i0 += 2) {
  10164. dst_data[i0/2] = 0;
  10165. for (int k = -nh; k <= nh; k++) {
  10166. float v = 0.0f;
  10167. ggml_vec_dot_f16(ew0, &v,
  10168. (ggml_fp16_t *) params->wdata + i1*ew0*ne00 + (nh + k)*ew0,
  10169. (ggml_fp16_t *) params->wdata + ne02*ew0*ne00 + (i0 + nh + k)*ew0);
  10170. dst_data[i0/2] += v;
  10171. }
  10172. }
  10173. }
  10174. }
  10175. static void ggml_compute_forward_conv_1d_s2_ph_f32(
  10176. const struct ggml_compute_params * params,
  10177. const struct ggml_tensor * src0,
  10178. const struct ggml_tensor * src1,
  10179. struct ggml_tensor * dst) {
  10180. GGML_ASSERT(src0->type == GGML_TYPE_F32);
  10181. GGML_ASSERT(src1->type == GGML_TYPE_F32);
  10182. GGML_ASSERT( dst->type == GGML_TYPE_F32);
  10183. int64_t t0 = ggml_perf_time_us();
  10184. UNUSED(t0);
  10185. GGML_TENSOR_BINARY_OP_LOCALS;
  10186. const int ith = params->ith;
  10187. const int nth = params->nth;
  10188. const int nk = ne00;
  10189. const int nh = nk/2;
  10190. const int ew0 = ggml_up32(ne01);
  10191. GGML_ASSERT(ne00 % 2 == 1); // TODO: support even kernel sizes
  10192. GGML_ASSERT(nb00 == sizeof(float));
  10193. GGML_ASSERT(nb10 == sizeof(float));
  10194. if (params->type == GGML_TASK_INIT) {
  10195. // TODO: fix this memset (wsize is overestimated)
  10196. memset(params->wdata, 0, params->wsize);
  10197. // prepare kernel data (src0)
  10198. {
  10199. float * const wdata = (float *) params->wdata + 0;
  10200. for (int64_t i02 = 0; i02 < ne02; i02++) {
  10201. for (int64_t i01 = 0; i01 < ne01; i01++) {
  10202. const float * const src = (float *)((char *) src0->data + i02*nb02 + i01*nb01);
  10203. float * dst_data = wdata + i02*ew0*ne00;
  10204. for (int64_t i00 = 0; i00 < ne00; i00++) {
  10205. dst_data[i00*ew0 + i01] = src[i00];
  10206. }
  10207. }
  10208. }
  10209. }
  10210. // prepare source data (src1)
  10211. {
  10212. float * const wdata = (float *) params->wdata + ne02*ew0*ne00;
  10213. for (int64_t i11 = 0; i11 < ne11; i11++) {
  10214. const float * const src = (float *)((char *) src1->data + i11*nb11);
  10215. float * dst_data = wdata;
  10216. for (int64_t i10 = 0; i10 < ne10; i10++) {
  10217. dst_data[(i10 + nh)*ew0 + i11] = src[i10];
  10218. }
  10219. }
  10220. }
  10221. return;
  10222. }
  10223. if (params->type == GGML_TASK_FINALIZE) {
  10224. return;
  10225. }
  10226. // total rows in dst
  10227. const int nr = ne02;
  10228. // rows per thread
  10229. const int dr = (nr + nth - 1)/nth;
  10230. // row range for this thread
  10231. const int ir0 = dr*ith;
  10232. const int ir1 = MIN(ir0 + dr, nr);
  10233. for (int i1 = ir0; i1 < ir1; i1++) {
  10234. float * dst_data = (float *)((char *) dst->data + i1*nb1);
  10235. for (int64_t i0 = 0; i0 < ne10; i0 += 2) {
  10236. dst_data[i0/2] = 0;
  10237. for (int k = -nh; k <= nh; k++) {
  10238. float v = 0.0f;
  10239. ggml_vec_dot_f32(ew0, &v,
  10240. (float *) params->wdata + i1*ew0*ne00 + (nh + k)*ew0,
  10241. (float *) params->wdata + ne02*ew0*ne00 + (i0 + nh + k)*ew0);
  10242. dst_data[i0/2] += v;
  10243. }
  10244. }
  10245. }
  10246. }
  10247. static void ggml_compute_forward_conv_1d_s2_ph(
  10248. const struct ggml_compute_params * params,
  10249. const struct ggml_tensor * src0,
  10250. const struct ggml_tensor * src1,
  10251. struct ggml_tensor * dst) {
  10252. switch (src0->type) {
  10253. case GGML_TYPE_F16:
  10254. {
  10255. ggml_compute_forward_conv_1d_s2_ph_f16_f32(params, src0, src1, dst);
  10256. } break;
  10257. case GGML_TYPE_F32:
  10258. {
  10259. ggml_compute_forward_conv_1d_s2_ph_f32(params, src0, src1, dst);
  10260. } break;
  10261. default:
  10262. {
  10263. GGML_ASSERT(false);
  10264. } break;
  10265. }
  10266. }
  10267. // ggml_compute_forward_conv_1d
  10268. static void ggml_compute_forward_conv_1d(
  10269. const struct ggml_compute_params * params,
  10270. const struct ggml_tensor * src0,
  10271. const struct ggml_tensor * src1,
  10272. struct ggml_tensor * dst) {
  10273. const int32_t s0 = ((const int32_t*)(dst->op_params))[0];
  10274. const int32_t p0 = ((const int32_t*)(dst->op_params))[1];
  10275. const int32_t d0 = ((const int32_t*)(dst->op_params))[2];
  10276. GGML_ASSERT(d0 == 1); // dilation not supported
  10277. GGML_ASSERT(p0 == src0->ne[0]/2); // only half padding supported
  10278. if (s0 == 1) {
  10279. ggml_compute_forward_conv_1d_s1_ph(params, src0, src1, dst);
  10280. } else if (s0 == 2) {
  10281. ggml_compute_forward_conv_1d_s2_ph(params, src0, src1, dst);
  10282. } else {
  10283. GGML_ASSERT(false); // only stride 1 and 2 supported
  10284. };
  10285. }
  10286. // ggml_compute_forward_conv_2d
  10287. static void ggml_compute_forward_conv_2d_f16_f32(
  10288. const struct ggml_compute_params * params,
  10289. const struct ggml_tensor * src0,
  10290. const struct ggml_tensor * src1,
  10291. struct ggml_tensor * dst) {
  10292. GGML_ASSERT(src0->type == GGML_TYPE_F16);
  10293. GGML_ASSERT(src1->type == GGML_TYPE_F32);
  10294. GGML_ASSERT( dst->type == GGML_TYPE_F32);
  10295. int64_t t0 = ggml_perf_time_us();
  10296. UNUSED(t0);
  10297. GGML_TENSOR_BINARY_OP_LOCALS;
  10298. const int ith = params->ith;
  10299. const int nth = params->nth;
  10300. const int nk0 = ne00;
  10301. const int nk1 = ne01;
  10302. // size of the convolution row - the kernel size unrolled across all channels
  10303. const int ew0 = nk0*nk1*ne02;
  10304. const int32_t s0 = ((const int32_t*)(dst->op_params))[0];
  10305. const int32_t s1 = ((const int32_t*)(dst->op_params))[1];
  10306. const int32_t p0 = ((const int32_t*)(dst->op_params))[2];
  10307. const int32_t p1 = ((const int32_t*)(dst->op_params))[3];
  10308. const int32_t d0 = ((const int32_t*)(dst->op_params))[4];
  10309. const int32_t d1 = ((const int32_t*)(dst->op_params))[5];
  10310. GGML_ASSERT(nb00 == sizeof(ggml_fp16_t));
  10311. GGML_ASSERT(nb10 == sizeof(float));
  10312. if (params->type == GGML_TASK_INIT) {
  10313. memset(params->wdata, 0, params->wsize);
  10314. // prepare source data (src1)
  10315. {
  10316. ggml_fp16_t * const wdata = (ggml_fp16_t *) params->wdata + 0;
  10317. for (int i12 = 0; i12 < ne12; i12++) {
  10318. const float * const src = (float *)((char *) src1->data + i12*nb12);
  10319. ggml_fp16_t * dst_data = wdata;
  10320. for (int i1 = 0; i1 < ne1; i1++) {
  10321. for (int i0 = 0; i0 < ne0; i0++) {
  10322. for (int ik1 = 0; ik1 < nk1; ik1++) {
  10323. for (int ik0 = 0; ik0 < nk0; ik0++) {
  10324. const int idx0 = i0*s0 + ik0*d0 - p0;
  10325. const int idx1 = i1*s1 + ik1*d1 - p1;
  10326. if (!(idx1 < 0 || idx1 >= ne11 || idx0 < 0 || idx0 >= ne10)) {
  10327. dst_data[(i1*ne0 + i0)*ew0 + i12*(nk0*nk1) + ik1*nk0 + ik0] =
  10328. GGML_FP32_TO_FP16(src[idx1*ne10 + idx0]);
  10329. }
  10330. }
  10331. }
  10332. }
  10333. }
  10334. }
  10335. }
  10336. return;
  10337. }
  10338. if (params->type == GGML_TASK_FINALIZE) {
  10339. return;
  10340. }
  10341. // total patches in dst
  10342. const int np = ne2;
  10343. // patches per thread
  10344. const int dp = (np + nth - 1)/nth;
  10345. // patch range for this thread
  10346. const int ip0 = dp*ith;
  10347. const int ip1 = MIN(ip0 + dp, np);
  10348. ggml_fp16_t * const wdata = (ggml_fp16_t *) params->wdata + 0;
  10349. for (int i3 = 0; i3 < ne3; i3++) {
  10350. for (int i2 = ip0; i2 < ip1; i2++) {
  10351. float * dst_data = (float *)((char *) dst->data + i3*nb3 + i2*nb2);
  10352. for (int i1 = 0; i1 < ne1; ++i1) {
  10353. for (int i0 = 0; i0 < ne0; ++i0) {
  10354. ggml_vec_dot_f16(ew0, dst_data + i1*ne0 + i0,
  10355. (ggml_fp16_t *) ((char *) src0->data + i2*nb03),
  10356. (ggml_fp16_t *) wdata + i3*nb3 + (i1*ne0 + i0)*ew0);
  10357. }
  10358. }
  10359. }
  10360. }
  10361. }
  10362. static void ggml_compute_forward_conv_2d(
  10363. const struct ggml_compute_params * params,
  10364. const struct ggml_tensor * src0,
  10365. const struct ggml_tensor * src1,
  10366. struct ggml_tensor * dst) {
  10367. switch (src0->type) {
  10368. case GGML_TYPE_F16:
  10369. {
  10370. ggml_compute_forward_conv_2d_f16_f32(params, src0, src1, dst);
  10371. } break;
  10372. case GGML_TYPE_F32:
  10373. {
  10374. //ggml_compute_forward_conv_2d_f32(params, src0, src1, dst);
  10375. GGML_ASSERT(false);
  10376. } break;
  10377. default:
  10378. {
  10379. GGML_ASSERT(false);
  10380. } break;
  10381. }
  10382. }
  10383. // ggml_compute_forward_pool_1d_sk_p0
  10384. static void ggml_compute_forward_pool_1d_sk_p0(
  10385. const struct ggml_compute_params * params,
  10386. const enum ggml_op_pool op,
  10387. const struct ggml_tensor * src,
  10388. const int k,
  10389. struct ggml_tensor * dst) {
  10390. assert(src->type == GGML_TYPE_F32);
  10391. assert(params->ith == 0);
  10392. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  10393. return;
  10394. }
  10395. const char * cdata = (const char *)src->data;
  10396. const char * const data_end = cdata + ggml_nbytes(src);
  10397. float * drow = (float *)dst->data;
  10398. const int64_t rs = dst->ne[0];
  10399. while (cdata < data_end) {
  10400. const float * const srow = (const float *)cdata;
  10401. int j = 0;
  10402. for (int64_t i = 0; i < rs; ++i) {
  10403. switch (op) {
  10404. case GGML_OP_POOL_AVG: drow[i] = 0; break;
  10405. case GGML_OP_POOL_MAX: drow[i] = -FLT_MAX; break;
  10406. case GGML_OP_POOL_COUNT: GGML_ASSERT(false); break;
  10407. }
  10408. for (int ki = 0; ki < k; ++ki) {
  10409. switch (op) {
  10410. case GGML_OP_POOL_AVG: drow[i] += srow[j]; break;
  10411. case GGML_OP_POOL_MAX: if (srow[j] > drow[i]) drow[i] = srow[j]; break;
  10412. case GGML_OP_POOL_COUNT: GGML_ASSERT(false); break;
  10413. }
  10414. ++j;
  10415. }
  10416. switch (op) {
  10417. case GGML_OP_POOL_AVG: drow[i] /= k; break;
  10418. case GGML_OP_POOL_MAX: break;
  10419. case GGML_OP_POOL_COUNT: GGML_ASSERT(false); break;
  10420. }
  10421. }
  10422. cdata += src->nb[1];
  10423. drow += rs;
  10424. }
  10425. }
  10426. // ggml_compute_forward_pool_1d
  10427. static void ggml_compute_forward_pool_1d(
  10428. const struct ggml_compute_params * params,
  10429. const struct ggml_tensor * src0,
  10430. struct ggml_tensor * dst) {
  10431. const int32_t* opts = (const int32_t*)dst->op_params;
  10432. enum ggml_op_pool op = opts[0];
  10433. const int k0 = opts[1];
  10434. const int s0 = opts[2];
  10435. const int p0 = opts[3];
  10436. GGML_ASSERT(p0 == 0); // padding not supported
  10437. GGML_ASSERT(k0 == s0); // only s = k supported
  10438. ggml_compute_forward_pool_1d_sk_p0(params, op, src0, k0, dst);
  10439. }
  10440. // ggml_compute_forward_pool_2d_sk_p0
  10441. static void ggml_compute_forward_pool_2d_sk_p0(
  10442. const struct ggml_compute_params * params,
  10443. const enum ggml_op_pool op,
  10444. const struct ggml_tensor * src,
  10445. const int k0,
  10446. const int k1,
  10447. struct ggml_tensor * dst) {
  10448. assert(src->type == GGML_TYPE_F32);
  10449. assert(params->ith == 0);
  10450. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  10451. return;
  10452. }
  10453. const char * cdata = (const char*)src->data;
  10454. const char * const data_end = cdata + ggml_nbytes(src);
  10455. const int64_t px = dst->ne[0];
  10456. const int64_t py = dst->ne[1];
  10457. const int64_t pa = px * py;
  10458. float * dplane = (float *)dst->data;
  10459. const int ka = k0 * k1;
  10460. while (cdata < data_end) {
  10461. for (int oy = 0; oy < py; ++oy) {
  10462. float * const drow = dplane + oy * px;
  10463. for (int ox = 0; ox < px; ++ox) {
  10464. float * const out = drow + ox;
  10465. switch (op) {
  10466. case GGML_OP_POOL_AVG: *out = 0; break;
  10467. case GGML_OP_POOL_MAX: *out = -FLT_MAX; break;
  10468. case GGML_OP_POOL_COUNT: GGML_ASSERT(false); break;
  10469. }
  10470. const int ix = ox * k0;
  10471. const int iy = oy * k1;
  10472. for (int ky = 0; ky < k1; ++ky) {
  10473. const float * const srow = (const float *)(cdata + src->nb[1] * (iy + ky));
  10474. for (int kx = 0; kx < k0; ++kx) {
  10475. int j = ix + kx;
  10476. switch (op) {
  10477. case GGML_OP_POOL_AVG: *out += srow[j]; break;
  10478. case GGML_OP_POOL_MAX: if (srow[j] > *out) *out = srow[j]; break;
  10479. case GGML_OP_POOL_COUNT: GGML_ASSERT(false); break;
  10480. }
  10481. }
  10482. }
  10483. switch (op) {
  10484. case GGML_OP_POOL_AVG: *out /= ka; break;
  10485. case GGML_OP_POOL_MAX: break;
  10486. case GGML_OP_POOL_COUNT: GGML_ASSERT(false); break;
  10487. }
  10488. }
  10489. }
  10490. cdata += src->nb[2];
  10491. dplane += pa;
  10492. }
  10493. }
  10494. // ggml_compute_forward_pool_2d
  10495. static void ggml_compute_forward_pool_2d(
  10496. const struct ggml_compute_params * params,
  10497. const struct ggml_tensor * src0,
  10498. struct ggml_tensor * dst) {
  10499. const int32_t * opts = (const int32_t *)dst->op_params;
  10500. enum ggml_op_pool op = opts[0];
  10501. const int k0 = opts[1];
  10502. const int k1 = opts[2];
  10503. const int s0 = opts[3];
  10504. const int s1 = opts[4];
  10505. const int p0 = opts[5];
  10506. const int p1 = opts[6];
  10507. GGML_ASSERT(p0 == 0);
  10508. GGML_ASSERT(p1 == 0); // padding not supported
  10509. GGML_ASSERT(k0 == s0);
  10510. GGML_ASSERT(k1 == s1); // only s = k supported
  10511. ggml_compute_forward_pool_2d_sk_p0(params, op, src0, k0, k1, dst);
  10512. }
  10513. // ggml_compute_forward_flash_attn
  10514. static void ggml_compute_forward_flash_attn_f32(
  10515. const struct ggml_compute_params * params,
  10516. const struct ggml_tensor * q,
  10517. const struct ggml_tensor * k,
  10518. const struct ggml_tensor * v,
  10519. const bool masked,
  10520. struct ggml_tensor * dst) {
  10521. int64_t t0 = ggml_perf_time_us();
  10522. UNUSED(t0);
  10523. GGML_TENSOR_LOCALS(int64_t, neq, q, ne);
  10524. GGML_TENSOR_LOCALS(size_t, nbq, q, nb);
  10525. GGML_TENSOR_LOCALS(int64_t, nek, k, ne);
  10526. GGML_TENSOR_LOCALS(size_t, nbk, k, nb);
  10527. GGML_TENSOR_LOCALS(int64_t, nev, v, ne);
  10528. GGML_TENSOR_LOCALS(size_t, nbv, v, nb);
  10529. GGML_TENSOR_LOCALS(int64_t, ne, dst, ne);
  10530. GGML_TENSOR_LOCALS(size_t, nb, dst, nb);
  10531. const int ith = params->ith;
  10532. const int nth = params->nth;
  10533. const int64_t D = neq0;
  10534. const int64_t N = neq1;
  10535. const int64_t P = nek1 - N;
  10536. const int64_t M = P + N;
  10537. const int Mup = ggml_up(M, GGML_SOFT_MAX_UNROLL);
  10538. GGML_ASSERT(ne0 == D);
  10539. GGML_ASSERT(ne1 == N);
  10540. GGML_ASSERT(P >= 0);
  10541. GGML_ASSERT(nbq0 == sizeof(float));
  10542. GGML_ASSERT(nbk0 == sizeof(float));
  10543. GGML_ASSERT(nbv0 == sizeof(float));
  10544. GGML_ASSERT(neq0 == D);
  10545. GGML_ASSERT(nek0 == D);
  10546. GGML_ASSERT(nev1 == D);
  10547. GGML_ASSERT(neq1 == N);
  10548. GGML_ASSERT(nek1 == N + P);
  10549. GGML_ASSERT(nev1 == D);
  10550. // dst cannot be transposed or permuted
  10551. GGML_ASSERT(nb0 == sizeof(float));
  10552. GGML_ASSERT(nb0 <= nb1);
  10553. GGML_ASSERT(nb1 <= nb2);
  10554. GGML_ASSERT(nb2 <= nb3);
  10555. if (params->type == GGML_TASK_INIT) {
  10556. return;
  10557. }
  10558. if (params->type == GGML_TASK_FINALIZE) {
  10559. return;
  10560. }
  10561. // parallelize by q rows using ggml_vec_dot_f32
  10562. // total rows in q
  10563. const int nr = neq1*neq2*neq3;
  10564. // rows per thread
  10565. const int dr = (nr + nth - 1)/nth;
  10566. // row range for this thread
  10567. const int ir0 = dr*ith;
  10568. const int ir1 = MIN(ir0 + dr, nr);
  10569. const float scale = 1.0f/sqrtf(D);
  10570. //printf("P=%d N=%d D=%d ir0=%d ir1=%d scale = %f\n", P, N, D, ir0, ir1, scale);
  10571. for (int ir = ir0; ir < ir1; ++ir) {
  10572. // q indices
  10573. const int iq3 = ir/(neq2*neq1);
  10574. const int iq2 = (ir - iq3*neq2*neq1)/neq1;
  10575. const int iq1 = (ir - iq3*neq2*neq1 - iq2*neq1);
  10576. float * S = (float *) params->wdata + ith*(Mup + CACHE_LINE_SIZE_F32);
  10577. for (int i = M; i < Mup; ++i) {
  10578. S[i] = -INFINITY;
  10579. }
  10580. for (int64_t ic = 0; ic < nek1; ++ic) {
  10581. // k indices
  10582. const int ik3 = iq3;
  10583. const int ik2 = iq2;
  10584. const int ik1 = ic;
  10585. // S indices
  10586. const int i1 = ik1;
  10587. ggml_vec_dot_f32(neq0,
  10588. S + i1,
  10589. (float *) ((char *) k->data + (ik1*nbk1 + ik2*nbk2 + ik3*nbk3)),
  10590. (float *) ((char *) q->data + (iq1*nbq1 + iq2*nbq2 + iq3*nbq3)));
  10591. }
  10592. // scale
  10593. ggml_vec_scale_f32(nek1, S, scale);
  10594. if (masked) {
  10595. for (int64_t i = P; i < M; i++) {
  10596. if (i > P + iq1) {
  10597. S[i] = -INFINITY;
  10598. }
  10599. }
  10600. }
  10601. // softmax
  10602. {
  10603. float max = -INFINITY;
  10604. ggml_vec_max_f32(M, &max, S);
  10605. ggml_float sum = 0.0;
  10606. {
  10607. #ifdef GGML_SOFT_MAX_ACCELERATE
  10608. max = -max;
  10609. vDSP_vsadd(S, 1, &max, S, 1, Mup);
  10610. vvexpf(S, S, &Mup);
  10611. ggml_vec_sum_f32(Mup, &sum, S);
  10612. #else
  10613. uint16_t scvt[GGML_SOFT_MAX_UNROLL];
  10614. ggml_float sump[GGML_SOFT_MAX_UNROLL] = { 0.0 };
  10615. for (int i = 0; i < Mup; i += GGML_SOFT_MAX_UNROLL) {
  10616. float * SS = S + i;
  10617. for (int j = 0; j < GGML_SOFT_MAX_UNROLL; ++j) {
  10618. if (SS[j] == -INFINITY) {
  10619. SS[j] = 0.0f;
  10620. } else {
  10621. ggml_fp16_t s = GGML_FP32_TO_FP16(SS[j] - max);
  10622. memcpy(&scvt[j], &s, sizeof(uint16_t));
  10623. const float val = GGML_FP16_TO_FP32(table_exp_f16[scvt[j]]);
  10624. sump[j] += (ggml_float)val;
  10625. SS[j] = val;
  10626. }
  10627. }
  10628. }
  10629. for (int i = 0; i < GGML_SOFT_MAX_UNROLL; i++) {
  10630. sum += sump[i];
  10631. }
  10632. #endif
  10633. }
  10634. assert(sum > 0.0);
  10635. sum = 1.0/sum;
  10636. ggml_vec_scale_f32(M, S, sum);
  10637. #ifndef NDEBUG
  10638. for (int i = 0; i < M; ++i) {
  10639. assert(!isnan(S[i]));
  10640. assert(!isinf(S[i]));
  10641. }
  10642. #endif
  10643. }
  10644. for (int64_t ic = 0; ic < nev1; ++ic) {
  10645. // dst indices
  10646. const int i1 = iq1;
  10647. const int i2 = iq2;
  10648. const int i3 = iq3;
  10649. ggml_vec_dot_f32(nek1,
  10650. (float *) ((char *) dst->data + (ic*nb0 + i1*nb1 + i2*nb2 + i3*nb3)),
  10651. (float *) ((char *) v->data + ( ic*nbv1 + i2*nbv2 + i3*nbv3)),
  10652. S);
  10653. }
  10654. }
  10655. }
  10656. static void ggml_compute_forward_flash_attn_f16(
  10657. const struct ggml_compute_params * params,
  10658. const struct ggml_tensor * q,
  10659. const struct ggml_tensor * k,
  10660. const struct ggml_tensor * v,
  10661. const bool masked,
  10662. struct ggml_tensor * dst) {
  10663. int64_t t0 = ggml_perf_time_us();
  10664. UNUSED(t0);
  10665. GGML_TENSOR_LOCALS(int64_t, neq, q, ne);
  10666. GGML_TENSOR_LOCALS(size_t, nbq, q, nb);
  10667. GGML_TENSOR_LOCALS(int64_t, nek, k, ne);
  10668. GGML_TENSOR_LOCALS(size_t, nbk, k, nb);
  10669. GGML_TENSOR_LOCALS(int64_t, nev, v, ne);
  10670. GGML_TENSOR_LOCALS(size_t, nbv, v, nb);
  10671. GGML_TENSOR_LOCALS(int64_t, ne, dst, ne);
  10672. GGML_TENSOR_LOCALS(size_t, nb, dst, nb);
  10673. const int ith = params->ith;
  10674. const int nth = params->nth;
  10675. const int64_t D = neq0;
  10676. const int64_t N = neq1;
  10677. const int64_t P = nek1 - N;
  10678. const int64_t M = P + N;
  10679. const int Mup = ggml_up(M, GGML_SOFT_MAX_UNROLL);
  10680. GGML_ASSERT(ne0 == D);
  10681. GGML_ASSERT(ne1 == N);
  10682. GGML_ASSERT(P >= 0);
  10683. GGML_ASSERT(nbq0 == sizeof(ggml_fp16_t));
  10684. GGML_ASSERT(nbk0 == sizeof(ggml_fp16_t));
  10685. GGML_ASSERT(nbv0 == sizeof(ggml_fp16_t));
  10686. GGML_ASSERT(neq0 == D);
  10687. GGML_ASSERT(nek0 == D);
  10688. GGML_ASSERT(nev1 == D);
  10689. GGML_ASSERT(neq1 == N);
  10690. GGML_ASSERT(nek1 == N + P);
  10691. GGML_ASSERT(nev1 == D);
  10692. // dst cannot be transposed or permuted
  10693. GGML_ASSERT(nb0 == sizeof(float));
  10694. GGML_ASSERT(nb0 <= nb1);
  10695. GGML_ASSERT(nb1 <= nb2);
  10696. GGML_ASSERT(nb2 <= nb3);
  10697. if (params->type == GGML_TASK_INIT) {
  10698. return;
  10699. }
  10700. if (params->type == GGML_TASK_FINALIZE) {
  10701. return;
  10702. }
  10703. // parallelize by q rows using ggml_vec_dot_f32
  10704. // total rows in q
  10705. const int nr = neq1*neq2*neq3;
  10706. // rows per thread
  10707. const int dr = (nr + nth - 1)/nth;
  10708. // row range for this thread
  10709. const int ir0 = dr*ith;
  10710. const int ir1 = MIN(ir0 + dr, nr);
  10711. const float scale = 1.0f/sqrtf(D);
  10712. //printf("P=%d N=%d D=%d ir0=%d ir1=%d scale = %f\n", P, N, D, ir0, ir1, scale);
  10713. for (int ir = ir0; ir < ir1; ++ir) {
  10714. // q indices
  10715. const int iq3 = ir/(neq2*neq1);
  10716. const int iq2 = (ir - iq3*neq2*neq1)/neq1;
  10717. const int iq1 = (ir - iq3*neq2*neq1 - iq2*neq1);
  10718. float * S = (float *) params->wdata + ith*(2*Mup + CACHE_LINE_SIZE_F32);
  10719. for (int i = M; i < Mup; ++i) {
  10720. S[i] = -INFINITY;
  10721. }
  10722. if (GGML_VEC_DOT_UNROLL > 2 || nek1 % GGML_VEC_DOT_UNROLL != 0) {
  10723. for (int64_t ic = 0; ic < nek1; ++ic) {
  10724. // k indices
  10725. const int ik3 = iq3;
  10726. const int ik2 = iq2;
  10727. const int ik1 = ic;
  10728. // S indices
  10729. const int i1 = ik1;
  10730. ggml_vec_dot_f16(neq0,
  10731. S + i1,
  10732. (ggml_fp16_t *) ((char *) k->data + (ik1*nbk1 + ik2*nbk2 + ik3*nbk3)),
  10733. (ggml_fp16_t *) ((char *) q->data + (iq1*nbq1 + iq2*nbq2 + iq3*nbq3)));
  10734. }
  10735. } else {
  10736. for (int64_t ic = 0; ic < nek1; ic += GGML_VEC_DOT_UNROLL) {
  10737. // k indices
  10738. const int ik3 = iq3;
  10739. const int ik2 = iq2;
  10740. const int ik1 = ic;
  10741. // S indices
  10742. const int i1 = ik1;
  10743. ggml_vec_dot_f16_unroll(neq0, nbk1,
  10744. S + i1,
  10745. ((char *) k->data + (ik1*nbk1 + ik2*nbk2 + ik3*nbk3)),
  10746. (ggml_fp16_t *) ((char *) q->data + (iq1*nbq1 + iq2*nbq2 + iq3*nbq3)));
  10747. }
  10748. }
  10749. // scale
  10750. ggml_vec_scale_f32(nek1, S, scale);
  10751. if (masked) {
  10752. for (int64_t i = P; i < M; i++) {
  10753. if (i > P + iq1) {
  10754. S[i] = -INFINITY;
  10755. }
  10756. }
  10757. }
  10758. // softmax
  10759. {
  10760. float max = -INFINITY;
  10761. ggml_vec_max_f32(M, &max, S);
  10762. ggml_float sum = 0.0;
  10763. {
  10764. #ifdef GGML_SOFT_MAX_ACCELERATE
  10765. max = -max;
  10766. vDSP_vsadd(S, 1, &max, S, 1, Mup);
  10767. vvexpf(S, S, &Mup);
  10768. ggml_vec_sum_f32(Mup, &sum, S);
  10769. #else
  10770. uint16_t scvt[GGML_SOFT_MAX_UNROLL];
  10771. ggml_float sump[GGML_SOFT_MAX_UNROLL] = { 0.0 };
  10772. for (int i = 0; i < Mup; i += GGML_SOFT_MAX_UNROLL) {
  10773. float * SS = S + i;
  10774. for (int j = 0; j < GGML_SOFT_MAX_UNROLL; ++j) {
  10775. if (SS[j] == -INFINITY) {
  10776. SS[j] = 0.0f;
  10777. } else {
  10778. ggml_fp16_t s = GGML_FP32_TO_FP16(SS[j] - max);
  10779. memcpy(&scvt[j], &s, sizeof(uint16_t));
  10780. const float val = GGML_FP16_TO_FP32(table_exp_f16[scvt[j]]);
  10781. sump[j] += (ggml_float)val;
  10782. SS[j] = val;
  10783. }
  10784. }
  10785. }
  10786. for (int i = 0; i < GGML_SOFT_MAX_UNROLL; i++) {
  10787. sum += sump[i];
  10788. }
  10789. #endif
  10790. }
  10791. assert(sum > 0.0);
  10792. sum = 1.0/sum;
  10793. ggml_vec_scale_f32(M, S, sum);
  10794. #ifndef NDEBUG
  10795. for (int i = 0; i < M; ++i) {
  10796. assert(!isnan(S[i]));
  10797. assert(!isinf(S[i]));
  10798. }
  10799. #endif
  10800. }
  10801. ggml_fp16_t * S16 = (ggml_fp16_t *) ((float *) params->wdata + ith*(2*Mup + CACHE_LINE_SIZE_F32) + Mup);
  10802. for (int64_t i = 0; i < M; i++) {
  10803. S16[i] = GGML_FP32_TO_FP16(S[i]);
  10804. }
  10805. if (GGML_VEC_DOT_UNROLL == 1 || (nev1 % GGML_VEC_DOT_UNROLL != 0)) {
  10806. for (int64_t ic = 0; ic < nev1; ++ic) {
  10807. // dst indices
  10808. const int i1 = iq1;
  10809. const int i2 = iq2;
  10810. const int i3 = iq3;
  10811. ggml_vec_dot_f16(nek1,
  10812. (float *) ((char *) dst->data + (ic*nb0 + i1*nb1 + i2*nb2 + i3*nb3)),
  10813. (ggml_fp16_t *) ((char *) v->data + ( ic*nbv1 + i2*nbv2 + i3*nbv3)),
  10814. S16);
  10815. }
  10816. } else {
  10817. for (int64_t ic = 0; ic < nev1; ic += GGML_VEC_DOT_UNROLL) {
  10818. // dst indices
  10819. const int i1 = iq1;
  10820. const int i2 = iq2;
  10821. const int i3 = iq3;
  10822. ggml_vec_dot_f16_unroll(nek1, nbv1,
  10823. (float *) ((char *) dst->data + (ic*nb0 + i1*nb1 + i2*nb2 + i3*nb3)),
  10824. ((char *) v->data + ( ic*nbv1 + i2*nbv2 + i3*nbv3)),
  10825. S16);
  10826. }
  10827. }
  10828. }
  10829. }
  10830. static void ggml_compute_forward_flash_attn(
  10831. const struct ggml_compute_params * params,
  10832. const struct ggml_tensor * q,
  10833. const struct ggml_tensor * k,
  10834. const struct ggml_tensor * v,
  10835. const bool masked,
  10836. struct ggml_tensor * dst) {
  10837. switch (q->type) {
  10838. case GGML_TYPE_F16:
  10839. {
  10840. ggml_compute_forward_flash_attn_f16(params, q, k, v, masked, dst);
  10841. } break;
  10842. case GGML_TYPE_F32:
  10843. {
  10844. ggml_compute_forward_flash_attn_f32(params, q, k, v, masked, dst);
  10845. } break;
  10846. default:
  10847. {
  10848. GGML_ASSERT(false);
  10849. } break;
  10850. }
  10851. }
  10852. // ggml_compute_forward_flash_ff
  10853. static void ggml_compute_forward_flash_ff_f16(
  10854. const struct ggml_compute_params * params,
  10855. const struct ggml_tensor * a, // F16
  10856. const struct ggml_tensor * b0, // F16 fc_w
  10857. const struct ggml_tensor * b1, // F32 fc_b
  10858. const struct ggml_tensor * c0, // F16 proj_w
  10859. const struct ggml_tensor * c1, // F32 proj_b
  10860. struct ggml_tensor * dst) {
  10861. int64_t t0 = ggml_perf_time_us();
  10862. UNUSED(t0);
  10863. GGML_TENSOR_LOCALS(int64_t, nea, a, ne);
  10864. GGML_TENSOR_LOCALS(size_t, nba, a, nb);
  10865. GGML_TENSOR_LOCALS(int64_t, neb0, b0, ne);
  10866. GGML_TENSOR_LOCALS(size_t, nbb0, b0, nb);
  10867. GGML_TENSOR_LOCALS(int64_t, neb1, b1, ne);
  10868. GGML_TENSOR_LOCALS(size_t, nbb1, b1, nb);
  10869. GGML_TENSOR_LOCALS(int64_t, nec0, c0, ne);
  10870. GGML_TENSOR_LOCALS(size_t, nbc0, c0, nb);
  10871. GGML_TENSOR_LOCALS(int64_t, nec1, c1, ne);
  10872. GGML_TENSOR_LOCALS(size_t, nbc1, c1, nb);
  10873. GGML_TENSOR_LOCALS(int64_t, ne, dst, ne);
  10874. GGML_TENSOR_LOCALS(size_t, nb, dst, nb);
  10875. const int ith = params->ith;
  10876. const int nth = params->nth;
  10877. const int64_t D = nea0;
  10878. //const int64_t N = nea1;
  10879. const int64_t M = neb01;
  10880. GGML_ASSERT(ne0 == nea0);
  10881. GGML_ASSERT(ne1 == nea1);
  10882. GGML_ASSERT(ne2 == nea2);
  10883. GGML_ASSERT(nba0 == sizeof(ggml_fp16_t));
  10884. GGML_ASSERT(nbb00 == sizeof(ggml_fp16_t));
  10885. GGML_ASSERT(nbb10 == sizeof(float));
  10886. GGML_ASSERT(nbc00 == sizeof(ggml_fp16_t));
  10887. GGML_ASSERT(nbc10 == sizeof(float));
  10888. GGML_ASSERT(neb00 == D);
  10889. GGML_ASSERT(neb01 == M);
  10890. GGML_ASSERT(neb10 == M);
  10891. GGML_ASSERT(neb11 == 1);
  10892. GGML_ASSERT(nec00 == M);
  10893. GGML_ASSERT(nec01 == D);
  10894. GGML_ASSERT(nec10 == D);
  10895. GGML_ASSERT(nec11 == 1);
  10896. // dst cannot be transposed or permuted
  10897. GGML_ASSERT(nb0 == sizeof(float));
  10898. GGML_ASSERT(nb0 <= nb1);
  10899. GGML_ASSERT(nb1 <= nb2);
  10900. GGML_ASSERT(nb2 <= nb3);
  10901. if (params->type == GGML_TASK_INIT) {
  10902. return;
  10903. }
  10904. if (params->type == GGML_TASK_FINALIZE) {
  10905. return;
  10906. }
  10907. // parallelize by a rows using ggml_vec_dot_f32
  10908. // total rows in a
  10909. const int nr = nea1*nea2*nea3;
  10910. // rows per thread
  10911. const int dr = (nr + nth - 1)/nth;
  10912. // row range for this thread
  10913. const int ir0 = dr*ith;
  10914. const int ir1 = MIN(ir0 + dr, nr);
  10915. for (int ir = ir0; ir < ir1; ++ir) {
  10916. // a indices
  10917. const int ia3 = ir/(nea2*nea1);
  10918. const int ia2 = (ir - ia3*nea2*nea1)/nea1;
  10919. const int ia1 = (ir - ia3*nea2*nea1 - ia2*nea1);
  10920. float * S = (float *) params->wdata + ith*(2*M + CACHE_LINE_SIZE_F32);
  10921. for (int64_t ic = 0; ic < neb01; ++ic) {
  10922. // b0 indices
  10923. const int ib03 = ia3;
  10924. const int ib02 = ia2;
  10925. const int ib01 = ic;
  10926. // S indices
  10927. const int i1 = ib01;
  10928. ggml_vec_dot_f16(nea0,
  10929. S + i1,
  10930. (ggml_fp16_t *) ((char *) b0->data + (ib01*nbb01 + ib02*nbb02 + ib03*nbb03)),
  10931. (ggml_fp16_t *) ((char *) a->data + ( ia1*nba1 + ia2*nba2 + ia3*nba3)));
  10932. }
  10933. ggml_vec_add_f32(neb01, S, S, (float *) b1->data);
  10934. //ggml_vec_gelu_f32(neb01, S, S);
  10935. ggml_fp16_t * S16 = (ggml_fp16_t *) ((float *) params->wdata + ith*(2*M + CACHE_LINE_SIZE_F32) + M);
  10936. for (int64_t i = 0; i < M; i++) {
  10937. S16[i] = GGML_FP32_TO_FP16(S[i]);
  10938. }
  10939. ggml_vec_gelu_f16(neb01, S16, S16);
  10940. {
  10941. // dst indices
  10942. const int i1 = ia1;
  10943. const int i2 = ia2;
  10944. const int i3 = ia3;
  10945. for (int64_t ic = 0; ic < nec01; ++ic) {
  10946. ggml_vec_dot_f16(neb01,
  10947. (float *) ((char *) dst->data + (ic*nb0 + i1*nb1 + i2*nb2 + i3*nb3)),
  10948. (ggml_fp16_t *) ((char *) c0->data + ( ic*nbc01 + i2*nbc02 + i3*nbc03)),
  10949. S16);
  10950. }
  10951. ggml_vec_add_f32(nec01,
  10952. (float *) ((char *) dst->data + (i1*nb1 + i2*nb2 + i3*nb3)),
  10953. (float *) ((char *) dst->data + (i1*nb1 + i2*nb2 + i3*nb3)),
  10954. (float *) c1->data);
  10955. }
  10956. }
  10957. }
  10958. static void ggml_compute_forward_flash_ff(
  10959. const struct ggml_compute_params * params,
  10960. const struct ggml_tensor * a,
  10961. const struct ggml_tensor * b0,
  10962. const struct ggml_tensor * b1,
  10963. const struct ggml_tensor * c0,
  10964. const struct ggml_tensor * c1,
  10965. struct ggml_tensor * dst) {
  10966. switch (b0->type) {
  10967. case GGML_TYPE_F16:
  10968. {
  10969. ggml_compute_forward_flash_ff_f16(params, a, b0, b1, c0, c1, dst);
  10970. } break;
  10971. case GGML_TYPE_F32:
  10972. {
  10973. GGML_ASSERT(false); // TODO
  10974. } break;
  10975. default:
  10976. {
  10977. GGML_ASSERT(false);
  10978. } break;
  10979. }
  10980. }
  10981. // ggml_compute_forward_flash_attn_back
  10982. static void ggml_compute_forward_flash_attn_back_f32(
  10983. const struct ggml_compute_params * params,
  10984. const struct ggml_tensor * q,
  10985. const struct ggml_tensor * k,
  10986. const struct ggml_tensor * v,
  10987. const struct ggml_tensor * d,
  10988. const bool masked,
  10989. struct ggml_tensor * dst) {
  10990. int64_t t0 = ggml_perf_time_us();
  10991. UNUSED(t0);
  10992. GGML_TENSOR_LOCALS(int64_t, neq, q, ne);
  10993. GGML_TENSOR_LOCALS(size_t, nbq, q, nb);
  10994. GGML_TENSOR_LOCALS(int64_t, nek, k, ne);
  10995. GGML_TENSOR_LOCALS(size_t, nbk, k, nb);
  10996. GGML_TENSOR_LOCALS(int64_t, nev, v, ne);
  10997. GGML_TENSOR_LOCALS(size_t, nbv, v, nb);
  10998. GGML_TENSOR_LOCALS(int64_t, ned, d, ne);
  10999. GGML_TENSOR_LOCALS(size_t, nbd, d, nb);
  11000. GGML_TENSOR_LOCALS(int64_t, ne, dst, ne);
  11001. GGML_TENSOR_LOCALS(size_t, nb, dst, nb);
  11002. const int ith = params->ith;
  11003. const int nth = params->nth;
  11004. const int64_t D = neq0;
  11005. const int64_t N = neq1;
  11006. const int64_t P = nek1 - N;
  11007. const int64_t M = P + N;
  11008. const int Mup = ggml_up(M, GGML_SOFT_MAX_UNROLL);
  11009. const int mxDM = MAX(D, Mup);
  11010. // GGML_ASSERT(ne0 == D);
  11011. // GGML_ASSERT(ne1 == N);
  11012. GGML_ASSERT(P >= 0);
  11013. GGML_ASSERT(nbq0 == sizeof(float));
  11014. GGML_ASSERT(nbk0 == sizeof(float));
  11015. GGML_ASSERT(nbv0 == sizeof(float));
  11016. GGML_ASSERT(neq0 == D);
  11017. GGML_ASSERT(nek0 == D);
  11018. GGML_ASSERT(nev1 == D);
  11019. GGML_ASSERT(ned0 == D);
  11020. GGML_ASSERT(neq1 == N);
  11021. GGML_ASSERT(nek1 == N + P);
  11022. GGML_ASSERT(nev1 == D);
  11023. GGML_ASSERT(ned1 == N);
  11024. // dst cannot be transposed or permuted
  11025. GGML_ASSERT(nb0 == sizeof(float));
  11026. GGML_ASSERT(nb0 <= nb1);
  11027. GGML_ASSERT(nb1 <= nb2);
  11028. GGML_ASSERT(nb2 <= nb3);
  11029. if (params->type == GGML_TASK_INIT) {
  11030. if (ith == 0) {
  11031. memset(dst->data, 0, nb0*ne0*ne1*ne2*ne3);
  11032. }
  11033. return;
  11034. }
  11035. if (params->type == GGML_TASK_FINALIZE) {
  11036. return;
  11037. }
  11038. // parallelize by q rows using ggml_vec_dot_f32
  11039. // total rows in q
  11040. const int nr = neq2*neq3;
  11041. // rows per thread
  11042. const int dr = (nr + nth - 1)/nth;
  11043. // row range for this thread
  11044. const int ir0 = dr*ith;
  11045. const int ir1 = MIN(ir0 + dr, nr);
  11046. const float scale = 1.0f/sqrtf(D);
  11047. //printf("P=%d N=%d D=%d ir0=%d ir1=%d scale = %f\n", P, N, D, ir0, ir1, scale);
  11048. for (int ir = ir0; ir < ir1; ++ir) {
  11049. // q indices
  11050. const int iq3 = ir/(neq2);
  11051. const int iq2 = ir - iq3*neq2;
  11052. for ( int iq1 = 0; iq1 < neq1; ++iq1) {
  11053. // not sure about CACHE_LINE_SIZE_F32..
  11054. // - maybe it must not be multiplied by 2 and excluded from .. in SM 1*(..) offset?
  11055. float * S = (float *) params->wdata + ith*2*(mxDM + CACHE_LINE_SIZE_F32) + 0*(mxDM+CACHE_LINE_SIZE_F32);
  11056. float * SM = (float *) params->wdata + ith*2*(mxDM + CACHE_LINE_SIZE_F32) + 1*(mxDM+CACHE_LINE_SIZE_F32);
  11057. for (int i = M; i < Mup; ++i) {
  11058. S[i] = -INFINITY;
  11059. }
  11060. for (int64_t ic = 0; ic < nek1; ++ic) {
  11061. // k indices
  11062. const int ik3 = iq3;
  11063. const int ik2 = iq2;
  11064. const int ik1 = ic;
  11065. // S indices
  11066. const int i1 = ik1;
  11067. ggml_vec_dot_f32(neq0,
  11068. S + i1,
  11069. (float *) ((char *) k->data + (ik1*nbk1 + ik2*nbk2 + ik3*nbk3)),
  11070. (float *) ((char *) q->data + (iq1*nbq1 + iq2*nbq2 + iq3*nbq3)));
  11071. }
  11072. // scale
  11073. ggml_vec_scale_f32(nek1, S, scale);
  11074. if (masked) {
  11075. for (int64_t i = P; i < M; i++) {
  11076. if (i > P + iq1) {
  11077. S[i] = -INFINITY;
  11078. }
  11079. }
  11080. }
  11081. // softmax
  11082. {
  11083. float max = -INFINITY;
  11084. ggml_vec_max_f32(M, &max, S);
  11085. ggml_float sum = 0.0;
  11086. {
  11087. #ifdef GGML_SOFT_MAX_ACCELERATE
  11088. max = -max;
  11089. vDSP_vsadd(SM, 1, &max, SM, 1, Mup);
  11090. vvexpf(SM, SM, &Mup);
  11091. ggml_vec_sum_f32(Mup, &sum, SM);
  11092. #else
  11093. uint16_t scvt[GGML_SOFT_MAX_UNROLL];
  11094. ggml_float sump[GGML_SOFT_MAX_UNROLL] = { 0.0 };
  11095. for (int i = 0; i < Mup; i += GGML_SOFT_MAX_UNROLL) {
  11096. float * SR = S + i;
  11097. float * SW = SM + i;
  11098. for (int j = 0; j < GGML_SOFT_MAX_UNROLL; ++j) {
  11099. if (SR[j] == -INFINITY) {
  11100. SW[j] = 0.0f;
  11101. } else {
  11102. ggml_fp16_t s = GGML_FP32_TO_FP16(SR[j] - max);
  11103. memcpy(&scvt[j], &s, sizeof(uint16_t));
  11104. const float val = GGML_FP16_TO_FP32(table_exp_f16[scvt[j]]);
  11105. sump[j] += (ggml_float)val;
  11106. SW[j] = val;
  11107. }
  11108. }
  11109. }
  11110. for (int i = 0; i < GGML_SOFT_MAX_UNROLL; i++) {
  11111. sum += sump[i];
  11112. }
  11113. #endif
  11114. }
  11115. assert(sum > 0.0);
  11116. sum = 1.0/sum;
  11117. ggml_vec_scale_f32(M, SM, sum);
  11118. }
  11119. // step-by-step explanation
  11120. {
  11121. // forward-process shape grads from backward process
  11122. // parallel_for iq2,iq3:
  11123. // k[:D,:M,:,:] [D,M,:,:] grad[k][:D,:M,iq2,iq3] += grad[kcur]
  11124. // q[:D,:N,:,:] [D,N,:,:] grad[q][:D,iq1,iq2,iq3] += grad[qcur]
  11125. // v[:M,:D,:,:] [M,D,:,:] grad[v][:M,:D,iq2,iq3] += grad[vcur]
  11126. // for iq1:
  11127. // kcur = k[:D,:M,iq2,iq3] [D,M,1,1] grad[kcur] = grad[S1].T @ qcur
  11128. // qcur = q[:D,iq1,iq2,iq3] [D,1,1,1] grad[qcur] = grad[S1] @ kcur
  11129. // vcur = v[:M,:D,iq2,iq3] [M,D,1,1] grad[vcur] = grad[S5].T @ S4
  11130. // S0 = -Inf [D,1,1,1]
  11131. // ~S1[i] = dot(kcur[:D,i], qcur)
  11132. // S1 = qcur @ kcur.T [M,1,1,1] grad[S1] = grad[S2] * scale
  11133. // S2 = S1 * scale [M,1,1,1] grad[S2] = diag_mask_zero(grad[S3], P)
  11134. // S3 = diag_mask_inf(S2, P) [M,1,1,1] grad[S3] = S4 * (grad[S4] - dot(S4, grad[S4]))
  11135. // S4 = softmax(S3) [M,1,1,1] grad[S4] = grad[S5] @ vcur
  11136. // ~S5[i] = dot(vcur[:,i], S4)
  11137. // S5 = S4 @ vcur.T [D,1,1,1] grad[S5] = d[:D,iq1,iq2,iq3]
  11138. // ~dst[i,iq1,iq2,iq3] = S5[i] ^
  11139. // dst[:D,iq1,iq2,iq3] = S5 | grad[dst[:D,iq1,iq2,iq3]] = d[:D,iq1,iq2,iq3]
  11140. // dst backward-/ grad[dst] = d
  11141. //
  11142. // output gradients with their dependencies:
  11143. //
  11144. // grad[kcur] = grad[S1].T @ qcur
  11145. // grad[S1] = diag_mask_zero(grad[S3], P) * scale
  11146. // grad[S3] = S4 * (grad[S4] - dot(S4, grad[S4]))
  11147. // grad[S4] = grad[S5] @ vcur
  11148. // grad[S4] = d[:D,iq1,iq2,iq3] @ vcur
  11149. // grad[qcur] = grad[S1] @ kcur
  11150. // grad[vcur] = grad[S5].T @ S4
  11151. // grad[vcur] = d[:D,iq1,iq2,iq3].T @ S4
  11152. //
  11153. // in post-order:
  11154. //
  11155. // S1 = qcur @ kcur.T
  11156. // S2 = S1 * scale
  11157. // S3 = diag_mask_inf(S2, P)
  11158. // S4 = softmax(S3)
  11159. // grad[S4] = d[:D,iq1,iq2,iq3] @ vcur
  11160. // grad[S3] = S4 * (grad[S4] - dot(S4, grad[S4]))
  11161. // grad[S1] = diag_mask_zero(grad[S3], P) * scale
  11162. // grad[qcur] = grad[S1] @ kcur
  11163. // grad[kcur] = grad[S1].T @ qcur
  11164. // grad[vcur] = d[:D,iq1,iq2,iq3].T @ S4
  11165. //
  11166. // using less variables (SM=S4):
  11167. //
  11168. // S = diag_mask_inf(qcur @ kcur.T * scale, P)
  11169. // SM = softmax(S)
  11170. // S = d[:D,iq1,iq2,iq3] @ vcur
  11171. // dot_SM_gradSM = dot(SM, S)
  11172. // S = SM * (S - dot(SM, S))
  11173. // S = diag_mask_zero(S, P) * scale
  11174. //
  11175. // grad[q][:D,iq1,iq2,iq3] += S @ kcur
  11176. // grad[k][:D,:M,iq2,iq3] += S.T @ qcur
  11177. // grad[v][:M,:D,iq2,iq3] += d[:D,iq1,iq2,iq3].T @ SM
  11178. }
  11179. // S = gradSM = d[:D,iq1,iq2,iq3] @ vcur
  11180. // S = d[:D,iq1,iq2,iq3] @ vcur
  11181. // S[:M] += vcur[:M,ic] * d[ic,iq1,iq2,iq3]
  11182. ggml_vec_set_f32(M, S, 0);
  11183. for (int64_t ic = 0; ic < D; ++ic) {
  11184. // dst indices
  11185. const int i1 = iq1;
  11186. const int i2 = iq2;
  11187. const int i3 = iq3;
  11188. ggml_vec_mad_f32(M,
  11189. S,
  11190. (float *) ((char *) v->data + ( ic*nbv1 + i2*nbv2 + i3*nbv3)),
  11191. *(float *) ((char *) d->data + (ic*nbd0 + i1*nbd1 + i2*nbd2 + i3*nbd3)));
  11192. }
  11193. // S = SM * (S - dot(SM, S))
  11194. float dot_SM_gradSM = 0;
  11195. ggml_vec_dot_f32 (M, &dot_SM_gradSM, SM, S);
  11196. ggml_vec_acc1_f32(M, S, -dot_SM_gradSM);
  11197. ggml_vec_mul_f32 (M, S, S, SM);
  11198. // S = diag_mask_zero(S, P) * scale
  11199. if (masked) {
  11200. // for (int64_t i = P + iq1 + 1; i < M; i++) {
  11201. // S[i] = 0;
  11202. // }
  11203. for (int64_t i = P; i < M; i++) {
  11204. if (i > P + iq1) {
  11205. S[i] = 0;
  11206. }
  11207. }
  11208. }
  11209. ggml_vec_scale_f32(M, S, scale);
  11210. void * grad_q = (char *) dst->data;
  11211. void * grad_k = (char *) dst->data + nb0*D*N*neq2*neq3;
  11212. void * grad_v = (char *) dst->data + nb0*D*N*neq2*neq3 + nb0*D*M*neq2*neq3;
  11213. const size_t nbgq1 = nb0*neq0;
  11214. const size_t nbgq2 = nb0*neq0*neq1;
  11215. const size_t nbgq3 = nb0*neq0*neq1*neq2;
  11216. const size_t nbgk1 = nb0*nek0;
  11217. const size_t nbgk2 = nb0*nek0*nek1;
  11218. const size_t nbgk3 = nb0*nek0*nek1*neq2;
  11219. const size_t nbgv1 = nb0*nev0;
  11220. const size_t nbgv2 = nb0*nev0*nev1;
  11221. const size_t nbgv3 = nb0*nev0*nev1*neq2;
  11222. // S shape [M,1]
  11223. // SM shape [M,1]
  11224. // kcur shape [D,M]
  11225. // qcur shape [D,1]
  11226. // vcur shape [M,D]
  11227. //
  11228. // grad[q][:D,iq1,iq2,iq3] += S @ kcur
  11229. // grad[q][:D,iq1,iq2,iq3] += shape[M,1] @ shape[D,M]
  11230. // grad[q][:D,iq1,iq2,iq3] += S[ic] * kcur[:D,ic]
  11231. //
  11232. //// grad[q][ic,iq1,iq2,iq3] += dot(kcur[:,ic],S.T)
  11233. //// grad[q][ic,iq1,iq2,iq3] += dot(k[:D,ic,iq2,iq3],S.T)
  11234. for (int64_t ic = 0; ic < M; ++ic) {
  11235. // dst indices
  11236. const int i1 = iq1;
  11237. const int i2 = iq2;
  11238. const int i3 = iq3;
  11239. ggml_vec_mad_f32(D,
  11240. (float *) ((char *) grad_q + (i1*nbgq1 + i2*nbgq2 + i3*nbgq3)),
  11241. (float *) ((char *) k->data + (ic*nbk1 + i2*nbk2 + i3*nbk3)),
  11242. S[ic]);
  11243. }
  11244. // grad[k][:D,:M,iq2,iq3] += S.T @ qcur
  11245. // grad[k][:D,ic,iq2,iq3] += S.T[0,ic] * qcur[:D,0]
  11246. // grad[k][:D,ic,iq2,iq3] += S[ic] * qcur[:D,0]
  11247. for (int64_t ic = 0; ic < M; ++ic) {
  11248. // dst indices
  11249. const int i1 = iq1;
  11250. const int i2 = iq2;
  11251. const int i3 = iq3;
  11252. // ggml_vec_set_f32(D,
  11253. // (float *) ((char *) grad_k + (ic*nbgk1 + i2*nbgk2 + i3*nbgk3)),
  11254. // 0);
  11255. ggml_vec_mad_f32(D,
  11256. (float *) ((char *) grad_k + (ic*nbgk1 + i2*nbgk2 + i3*nbgk3)),
  11257. (float *) ((char *) q->data + (i1*nbq1 + i2*nbq2 + i3*nbq3)),
  11258. S[ic]);
  11259. }
  11260. // grad[v][:M,:D,iq2,iq3] += d[:D,iq1,iq2,iq3].T @ SM
  11261. // grad[v][:M,ic,iq2,iq3] += d[:D,iq1,iq2,iq3].T[0,ic] * SM[:M]
  11262. // grad[v][:M,ic,iq2,iq3] += d[ic,iq1,iq2,iq3] * SM[:M]
  11263. for (int64_t ic = 0; ic < D; ++ic) {
  11264. // dst indices
  11265. const int i1 = iq1;
  11266. const int i2 = iq2;
  11267. const int i3 = iq3;
  11268. // ggml_vec_set_f32(M,
  11269. // (float *) ((char *) grad_v + ( ic*nbgv1 + i2*nbgv2 + i3*nbgv3)),
  11270. // 0);
  11271. ggml_vec_mad_f32(M,
  11272. (float *) ((char *) grad_v + ( ic*nbgv1 + i2*nbgv2 + i3*nbgv3)),
  11273. SM,
  11274. *(float *) ((char *) d->data + (ic*nbd0 + i1*nbd1 + i2*nbd2 + i3*nbd3)));
  11275. }
  11276. }
  11277. }
  11278. }
  11279. static void ggml_compute_forward_flash_attn_back(
  11280. const struct ggml_compute_params * params,
  11281. const struct ggml_tensor * q,
  11282. const struct ggml_tensor * k,
  11283. const struct ggml_tensor * v,
  11284. const struct ggml_tensor * d,
  11285. const bool masked,
  11286. struct ggml_tensor * dst) {
  11287. switch (q->type) {
  11288. case GGML_TYPE_F32:
  11289. {
  11290. ggml_compute_forward_flash_attn_back_f32(params, q, k, v, d, masked, dst);
  11291. } break;
  11292. default:
  11293. {
  11294. GGML_ASSERT(false);
  11295. } break;
  11296. }
  11297. }
  11298. // ggml_compute_forward_win_part
  11299. static void ggml_compute_forward_win_part_f32(
  11300. const struct ggml_compute_params * params,
  11301. const struct ggml_tensor * src0,
  11302. struct ggml_tensor * dst) {
  11303. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  11304. return;
  11305. }
  11306. GGML_TENSOR_LOCALS(int64_t, ne0, src0, ne);
  11307. GGML_TENSOR_LOCALS(int64_t, ne, dst, ne);
  11308. const int32_t nep0 = ((const int32_t *)(dst->op_params))[0];
  11309. const int32_t nep1 = ((const int32_t *)(dst->op_params))[1];
  11310. const int32_t w = ((const int32_t *)(dst->op_params))[2];
  11311. assert(ne00 == ne0);
  11312. assert(ne3 == nep0*nep1);
  11313. // TODO: optimize / multi-thread
  11314. for (int py = 0; py < nep1; ++py) {
  11315. for (int px = 0; px < nep0; ++px) {
  11316. const int64_t i3 = py*nep0 + px;
  11317. for (int64_t i2 = 0; i2 < ne2; ++i2) {
  11318. for (int64_t i1 = 0; i1 < ne1; ++i1) {
  11319. for (int64_t i0 = 0; i0 < ne0; ++i0) {
  11320. const int64_t i02 = py*w + i2;
  11321. const int64_t i01 = px*w + i1;
  11322. const int64_t i00 = i0;
  11323. const int64_t i = i3*ne2*ne1*ne0 + i2*ne1*ne0 + i1*ne0 + i0;
  11324. const int64_t j = i02*ne01*ne00 + i01*ne00 + i00;
  11325. if (py*w + i2 >= ne02 || px*w + i1 >= ne01) {
  11326. ((float *) dst->data)[i] = 0.0f;
  11327. } else {
  11328. ((float *) dst->data)[i] = ((float *) src0->data)[j];
  11329. }
  11330. }
  11331. }
  11332. }
  11333. }
  11334. }
  11335. }
  11336. static void ggml_compute_forward_win_part(
  11337. const struct ggml_compute_params * params,
  11338. const struct ggml_tensor * src0,
  11339. struct ggml_tensor * dst) {
  11340. switch (src0->type) {
  11341. case GGML_TYPE_F32:
  11342. {
  11343. ggml_compute_forward_win_part_f32(params, src0, dst);
  11344. } break;
  11345. default:
  11346. {
  11347. GGML_ASSERT(false);
  11348. } break;
  11349. }
  11350. }
  11351. // ggml_compute_forward_win_unpart
  11352. static void ggml_compute_forward_win_unpart_f32(
  11353. const struct ggml_compute_params * params,
  11354. const struct ggml_tensor * src0,
  11355. struct ggml_tensor * dst) {
  11356. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  11357. return;
  11358. }
  11359. GGML_TENSOR_LOCALS(int64_t, ne0, src0, ne);
  11360. GGML_TENSOR_LOCALS(int64_t, ne, dst, ne);
  11361. const int32_t w = ((const int32_t *)(dst->op_params))[0];
  11362. // padding
  11363. const int px = (w - ne1%w)%w;
  11364. //const int py = (w - ne2%w)%w;
  11365. const int npx = (px + ne1)/w;
  11366. //const int npy = (py + ne2)/w;
  11367. assert(ne0 == ne00);
  11368. // TODO: optimize / multi-thread
  11369. for (int64_t i2 = 0; i2 < ne2; ++i2) {
  11370. for (int64_t i1 = 0; i1 < ne1; ++i1) {
  11371. for (int64_t i0 = 0; i0 < ne0; ++i0) {
  11372. const int ip2 = i2/w;
  11373. const int ip1 = i1/w;
  11374. const int64_t i02 = i2%w;
  11375. const int64_t i01 = i1%w;
  11376. const int64_t i00 = i0;
  11377. const int64_t i = (ip2*npx + ip1)*ne02*ne01*ne00 + i02*ne01*ne00 + i01*ne00 + i00;
  11378. const int64_t j = i2*ne1*ne0 + i1*ne0 + i0;
  11379. ((float *) dst->data)[j] = ((float *) src0->data)[i];
  11380. }
  11381. }
  11382. }
  11383. }
  11384. static void ggml_compute_forward_win_unpart(
  11385. const struct ggml_compute_params * params,
  11386. const struct ggml_tensor * src0,
  11387. struct ggml_tensor * dst) {
  11388. switch (src0->type) {
  11389. case GGML_TYPE_F32:
  11390. {
  11391. ggml_compute_forward_win_unpart_f32(params, src0, dst);
  11392. } break;
  11393. default:
  11394. {
  11395. GGML_ASSERT(false);
  11396. } break;
  11397. }
  11398. }
  11399. //gmml_compute_forward_unary
  11400. static void ggml_compute_forward_unary(
  11401. const struct ggml_compute_params * params,
  11402. const struct ggml_tensor * src0,
  11403. struct ggml_tensor * dst) {
  11404. const enum ggml_unary_op op = ggml_get_unary_op(dst);
  11405. switch (op) {
  11406. case GGML_UNARY_OP_ABS:
  11407. {
  11408. ggml_compute_forward_abs(params, src0, dst);
  11409. } break;
  11410. case GGML_UNARY_OP_SGN:
  11411. {
  11412. ggml_compute_forward_sgn(params, src0, dst);
  11413. } break;
  11414. case GGML_UNARY_OP_NEG:
  11415. {
  11416. ggml_compute_forward_neg(params, src0, dst);
  11417. } break;
  11418. case GGML_UNARY_OP_STEP:
  11419. {
  11420. ggml_compute_forward_step(params, src0, dst);
  11421. } break;
  11422. case GGML_UNARY_OP_TANH:
  11423. {
  11424. ggml_compute_forward_tanh(params, src0, dst);
  11425. } break;
  11426. case GGML_UNARY_OP_ELU:
  11427. {
  11428. ggml_compute_forward_elu(params, src0, dst);
  11429. } break;
  11430. case GGML_UNARY_OP_RELU:
  11431. {
  11432. ggml_compute_forward_relu(params, src0, dst);
  11433. } break;
  11434. case GGML_UNARY_OP_GELU:
  11435. {
  11436. ggml_compute_forward_gelu(params, src0, dst);
  11437. } break;
  11438. case GGML_UNARY_OP_GELU_QUICK:
  11439. {
  11440. ggml_compute_forward_gelu_quick(params, src0, dst);
  11441. } break;
  11442. case GGML_UNARY_OP_SILU:
  11443. {
  11444. ggml_compute_forward_silu(params, src0, dst);
  11445. } break;
  11446. default:
  11447. {
  11448. GGML_ASSERT(false);
  11449. } break;
  11450. }
  11451. }
  11452. // ggml_compute_forward_map_unary
  11453. static void ggml_compute_forward_map_unary_f32(
  11454. const struct ggml_compute_params * params,
  11455. const struct ggml_tensor * src0,
  11456. struct ggml_tensor * dst,
  11457. const ggml_unary_op_f32_t fun) {
  11458. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  11459. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  11460. return;
  11461. }
  11462. const int n = ggml_nrows(src0);
  11463. const int nc = src0->ne[0];
  11464. assert( dst->nb[0] == sizeof(float));
  11465. assert(src0->nb[0] == sizeof(float));
  11466. for (int i = 0; i < n; i++) {
  11467. fun(nc,
  11468. (float *) ((char *) dst->data + i*( dst->nb[1])),
  11469. (float *) ((char *) src0->data + i*(src0->nb[1])));
  11470. }
  11471. }
  11472. static void ggml_compute_forward_map_unary(
  11473. const struct ggml_compute_params * params,
  11474. const struct ggml_tensor * src0,
  11475. struct ggml_tensor * dst,
  11476. const ggml_unary_op_f32_t fun) {
  11477. switch (src0->type) {
  11478. case GGML_TYPE_F32:
  11479. {
  11480. ggml_compute_forward_map_unary_f32(params, src0, dst, fun);
  11481. } break;
  11482. default:
  11483. {
  11484. GGML_ASSERT(false);
  11485. } break;
  11486. }
  11487. }
  11488. // ggml_compute_forward_map_binary
  11489. static void ggml_compute_forward_map_binary_f32(
  11490. const struct ggml_compute_params * params,
  11491. const struct ggml_tensor * src0,
  11492. const struct ggml_tensor * src1,
  11493. struct ggml_tensor * dst,
  11494. const ggml_binary_op_f32_t fun) {
  11495. assert(params->ith == 0);
  11496. assert(ggml_are_same_shape(src0, src1) && ggml_are_same_shape(src0, dst));
  11497. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  11498. return;
  11499. }
  11500. const int n = ggml_nrows(src0);
  11501. const int nc = src0->ne[0];
  11502. assert( dst->nb[0] == sizeof(float));
  11503. assert(src0->nb[0] == sizeof(float));
  11504. assert(src1->nb[0] == sizeof(float));
  11505. for (int i = 0; i < n; i++) {
  11506. fun(nc,
  11507. (float *) ((char *) dst->data + i*( dst->nb[1])),
  11508. (float *) ((char *) src0->data + i*(src0->nb[1])),
  11509. (float *) ((char *) src1->data + i*(src1->nb[1])));
  11510. }
  11511. }
  11512. static void ggml_compute_forward_map_binary(
  11513. const struct ggml_compute_params * params,
  11514. const struct ggml_tensor * src0,
  11515. const struct ggml_tensor * src1,
  11516. struct ggml_tensor * dst,
  11517. const ggml_binary_op_f32_t fun) {
  11518. switch (src0->type) {
  11519. case GGML_TYPE_F32:
  11520. {
  11521. ggml_compute_forward_map_binary_f32(params, src0, src1, dst, fun);
  11522. } break;
  11523. default:
  11524. {
  11525. GGML_ASSERT(false);
  11526. } break;
  11527. }
  11528. }
  11529. // ggml_compute_forward_map_custom1
  11530. static void ggml_compute_forward_map_custom1_f32(
  11531. const struct ggml_compute_params * params,
  11532. const struct ggml_tensor * a,
  11533. struct ggml_tensor * dst,
  11534. const ggml_custom1_op_f32_t fun) {
  11535. assert(params->ith == 0);
  11536. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  11537. return;
  11538. }
  11539. fun(dst, a);
  11540. }
  11541. static void ggml_compute_forward_map_custom1(
  11542. const struct ggml_compute_params * params,
  11543. const struct ggml_tensor * a,
  11544. struct ggml_tensor * dst,
  11545. const ggml_custom1_op_f32_t fun) {
  11546. switch (a->type) {
  11547. case GGML_TYPE_F32:
  11548. {
  11549. ggml_compute_forward_map_custom1_f32(params, a, dst, fun);
  11550. } break;
  11551. default:
  11552. {
  11553. GGML_ASSERT(false);
  11554. } break;
  11555. }
  11556. }
  11557. // ggml_compute_forward_map_custom2
  11558. static void ggml_compute_forward_map_custom2_f32(
  11559. const struct ggml_compute_params * params,
  11560. const struct ggml_tensor * a,
  11561. const struct ggml_tensor * b,
  11562. struct ggml_tensor * dst,
  11563. const ggml_custom2_op_f32_t fun) {
  11564. assert(params->ith == 0);
  11565. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  11566. return;
  11567. }
  11568. fun(dst, a, b);
  11569. }
  11570. static void ggml_compute_forward_map_custom2(
  11571. const struct ggml_compute_params * params,
  11572. const struct ggml_tensor * a,
  11573. const struct ggml_tensor * b,
  11574. struct ggml_tensor * dst,
  11575. const ggml_custom2_op_f32_t fun) {
  11576. switch (a->type) {
  11577. case GGML_TYPE_F32:
  11578. {
  11579. ggml_compute_forward_map_custom2_f32(params, a, b, dst, fun);
  11580. } break;
  11581. default:
  11582. {
  11583. GGML_ASSERT(false);
  11584. } break;
  11585. }
  11586. }
  11587. // ggml_compute_forward_map_custom3
  11588. static void ggml_compute_forward_map_custom3_f32(
  11589. const struct ggml_compute_params * params,
  11590. const struct ggml_tensor * a,
  11591. const struct ggml_tensor * b,
  11592. const struct ggml_tensor * c,
  11593. struct ggml_tensor * dst,
  11594. const ggml_custom3_op_f32_t fun) {
  11595. assert(params->ith == 0);
  11596. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  11597. return;
  11598. }
  11599. fun(dst, a, b, c);
  11600. }
  11601. static void ggml_compute_forward_map_custom3(
  11602. const struct ggml_compute_params * params,
  11603. const struct ggml_tensor * a,
  11604. const struct ggml_tensor * b,
  11605. const struct ggml_tensor * c,
  11606. struct ggml_tensor * dst,
  11607. const ggml_custom3_op_f32_t fun) {
  11608. switch (a->type) {
  11609. case GGML_TYPE_F32:
  11610. {
  11611. ggml_compute_forward_map_custom3_f32(params, a, b, c, dst, fun);
  11612. } break;
  11613. default:
  11614. {
  11615. GGML_ASSERT(false);
  11616. } break;
  11617. }
  11618. }
  11619. // ggml_compute_forward_cross_entropy_loss
  11620. static void ggml_compute_forward_cross_entropy_loss_f32(
  11621. const struct ggml_compute_params * params,
  11622. const struct ggml_tensor * src0,
  11623. const struct ggml_tensor * src1,
  11624. struct ggml_tensor * dst) {
  11625. GGML_ASSERT(ggml_is_contiguous(src0));
  11626. GGML_ASSERT(ggml_is_contiguous(src1));
  11627. GGML_ASSERT(ggml_is_scalar(dst));
  11628. GGML_ASSERT(ggml_are_same_shape(src0, src1));
  11629. const int ith = params->ith;
  11630. const int nth = params->nth;
  11631. float * sums = (float *) params->wdata;
  11632. // TODO: handle transposed/permuted matrices
  11633. const int nc = src0->ne[0];
  11634. const int nr = ggml_nrows(src0);
  11635. if (params->type == GGML_TASK_INIT) {
  11636. if (ith == 0) {
  11637. memset(sums, 0, sizeof(float) * (nth + nth * nc));
  11638. }
  11639. return;
  11640. }
  11641. if (params->type == GGML_TASK_FINALIZE) {
  11642. if (ith == 0) {
  11643. float * dp = (float *) dst->data;
  11644. ggml_vec_sum_f32(nth, dp, sums);
  11645. dp[0] *= -1.0f;
  11646. }
  11647. return;
  11648. }
  11649. const double eps = 1e-9;
  11650. // rows per thread
  11651. const int dr = (nr + nth - 1)/nth;
  11652. // row range for this thread
  11653. const int ir0 = dr*ith;
  11654. const int ir1 = MIN(ir0 + dr, nr);
  11655. for (int i1 = ir0; i1 < ir1; i1++) {
  11656. float * s0 = (float *)((char *) src0->data + i1*src0->nb[1]);
  11657. float * s1 = (float *)((char *) src1->data + i1*src1->nb[1]);
  11658. float * st = (float *) params->wdata + nth + ith*nc;
  11659. #ifndef NDEBUG
  11660. for (int i = 0; i < nc; ++i) {
  11661. //printf("p[%d] = %f\n", i, p[i]);
  11662. assert(!isnan(s0[i]));
  11663. assert(!isnan(s1[i]));
  11664. }
  11665. #endif
  11666. // soft_max
  11667. ggml_float sum = 0.0;
  11668. {
  11669. float max = -INFINITY;
  11670. ggml_vec_max_f32(nc, &max, s0);
  11671. uint16_t scvt;
  11672. for (int i = 0; i < nc; i++) {
  11673. if (s0[i] == -INFINITY) {
  11674. st[i] = 0.0f;
  11675. } else {
  11676. // const float val = (s0[i] == -INFINITY) ? 0.0 : exp(s0[i] - max);
  11677. ggml_fp16_t s = GGML_FP32_TO_FP16(s0[i] - max);
  11678. memcpy(&scvt, &s, sizeof(scvt));
  11679. const float val = GGML_FP16_TO_FP32(table_exp_f16[scvt]);
  11680. sum += (ggml_float)val;
  11681. st[i] = val;
  11682. }
  11683. }
  11684. assert(sum > 0.0);
  11685. // sum = 1.0/sum;
  11686. }
  11687. // avoid log(0) by rescaling from [0..1] to [eps..1]
  11688. sum = (1.0 - eps) / sum;
  11689. ggml_vec_scale_f32(nc, st, sum);
  11690. ggml_vec_add1_f32(nc, st, st, eps);
  11691. ggml_vec_log_f32(nc, st, st);
  11692. ggml_vec_mul_f32(nc, st, st, s1);
  11693. ggml_vec_sum_f32(nc, sums + ith, st);
  11694. #ifndef NDEBUG
  11695. for (int i = 0; i < nc; ++i) {
  11696. assert(!isnan(st[i]));
  11697. assert(!isinf(st[i]));
  11698. }
  11699. #endif
  11700. }
  11701. }
  11702. static void ggml_compute_forward_cross_entropy_loss(
  11703. const struct ggml_compute_params * params,
  11704. const struct ggml_tensor * src0,
  11705. const struct ggml_tensor * src1,
  11706. struct ggml_tensor * dst) {
  11707. switch (src0->type) {
  11708. case GGML_TYPE_F32:
  11709. {
  11710. ggml_compute_forward_cross_entropy_loss_f32(params, src0, src1, dst);
  11711. } break;
  11712. default:
  11713. {
  11714. GGML_ASSERT(false);
  11715. } break;
  11716. }
  11717. }
  11718. // ggml_compute_forward_cross_entropy_loss_back
  11719. static void ggml_compute_forward_cross_entropy_loss_back_f32(
  11720. const struct ggml_compute_params * params,
  11721. const struct ggml_tensor * src0,
  11722. const struct ggml_tensor * src1,
  11723. const struct ggml_tensor * opt0,
  11724. struct ggml_tensor * dst) {
  11725. GGML_ASSERT(ggml_is_contiguous(dst));
  11726. GGML_ASSERT(ggml_is_contiguous(src0));
  11727. GGML_ASSERT(ggml_is_contiguous(src1));
  11728. GGML_ASSERT(ggml_is_contiguous(opt0));
  11729. GGML_ASSERT(ggml_are_same_shape(src0, src1) && ggml_are_same_shape(src0, dst));
  11730. const int64_t ith = params->ith;
  11731. const int64_t nth = params->nth;
  11732. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  11733. return;
  11734. }
  11735. const float eps = 1e-9f;
  11736. // TODO: handle transposed/permuted matrices
  11737. const int64_t nc = src0->ne[0];
  11738. const int64_t nr = ggml_nrows(src0);
  11739. // rows per thread
  11740. const int64_t dr = (nr + nth - 1)/nth;
  11741. // row range for this thread
  11742. const int64_t ir0 = dr*ith;
  11743. const int64_t ir1 = MIN(ir0 + dr, nr);
  11744. float * d = (float *) opt0->data;
  11745. for (int64_t i1 = ir0; i1 < ir1; i1++) {
  11746. float * ds0 = (float *)((char *) dst->data + i1*dst->nb[1]);
  11747. float * s0 = (float *)((char *) src0->data + i1*src0->nb[1]);
  11748. float * s1 = (float *)((char *) src1->data + i1*src1->nb[1]);
  11749. float * sm = (float *) params->wdata + ith*nc;
  11750. #ifndef NDEBUG
  11751. for (int i = 0; i < nc; ++i) {
  11752. //printf("p[%d] = %f\n", i, p[i]);
  11753. assert(!isnan(s0[i]));
  11754. assert(!isnan(s1[i]));
  11755. }
  11756. #endif
  11757. // step by step explanation:
  11758. {
  11759. //float * sums = (float *) params->wdata;
  11760. // forward pass with annotated gradients from backward pass
  11761. // (built by going in reverse operation order, adding to gradients of current operation args)
  11762. // st0 = exp(s0-max(s0)) grad[st0] = grad[st1]*(1.0 - eps)/sum
  11763. // from softmax_back: grad[s0] = st1_k * (grad[st1]_k - dot(st1, grad[st1]))
  11764. // ggml_vec_scale_f32(nc, st, sum); // st1 = st0*/sum = softmax(s0) grad[st1] = grad[st2]*(1.0 - eps)
  11765. // ggml_vec_scale_f32(nc, st, (1.0f - eps)); // st2 = st1*(1.0 - eps) grad[st2] = grad[st3]
  11766. // ggml_vec_add1_f32(nc, st, st, eps); // st3 = st2 + eps grad[st3] = grad[st4]/st3
  11767. // ggml_vec_log_f32(nc, st, st); // st4 = log(st3) grad[st4] = grad[st5] * s1
  11768. // ggml_vec_mul_f32(nc, st, st, s1); // st5 = st4 * s1 grad[st5] = grad[sums[ith]]
  11769. // ggml_vec_sum_f32(nc, sums + ith, st); // sums[ith] = st5 grad[sums[ith]] = grad[cross_entropy_loss] = -grad[cel]
  11770. // substitute into grad[st1], because we can reuse softmax_back from this point on
  11771. // grad[st1] = -grad[cel]*s1*(1.0 - eps)/(eps + softmax(s0)*(1.0 - eps))
  11772. // postorder:
  11773. // grad[st1] := softmax(s0)
  11774. // grad[st1] := grad[st1]*(1.0 - eps)
  11775. // grad[st1] := grad[st1] + eps
  11776. // grad[st1] := s1 / grad[st1]
  11777. // grad[st1] := grad[st1]*(1.0-eps)*-grad[cel]
  11778. // src0 gradients by going through softmax_back
  11779. // grad[s0] = st1_k * (grad[st1]_k - dot(st1, grad[st1]))
  11780. // from softmax_back:
  11781. // dxk = yk * (dyk - dot(y, dy))
  11782. // dot_y_dy := dot(y, dy)
  11783. // dx := dy
  11784. // dx := dx - dot_y_dy
  11785. // dx := dx * y
  11786. // postorder:
  11787. // dot_st1_dst1 := dot(st1, grad[st1])
  11788. // grad[s0] := grad[st1]
  11789. // grad[s0] := grad[s0] - dot_st1_dst1
  11790. // grad[s0] := grad[s0] * st1
  11791. // prepend postorder from grad[st1] directly using grad[s0] as memory location, as we will grad[s0] := grad[st1]
  11792. // sm := softmax(s0)
  11793. // grad[s0] := sm*(1.0 - eps)
  11794. // grad[s0] := grad[s0] + eps
  11795. // grad[s0] := s1 / grad[s0]
  11796. // grad[s0] := grad[s0]*(1.0-eps)*-grad[cel]
  11797. // dot_st1_dst1 := dot(sm, grad[s0])
  11798. // grad[s0] := grad[s0] - dot_st1_dst1
  11799. // grad[s0] := grad[s0] * sm
  11800. }
  11801. // soft_max
  11802. ggml_float sum = 0.0;
  11803. {
  11804. float max = -INFINITY;
  11805. ggml_vec_max_f32(nc, &max, s0);
  11806. uint16_t scvt;
  11807. for (int i = 0; i < nc; i++) {
  11808. if (s0[i] == -INFINITY) {
  11809. sm[i] = 0.0f;
  11810. } else {
  11811. // const float val = (s0[i] == -INFINITY) ? 0.0 : exp(s0[i] - max);
  11812. ggml_fp16_t s = GGML_FP32_TO_FP16(s0[i] - max);
  11813. memcpy(&scvt, &s, sizeof(scvt));
  11814. const float val = GGML_FP16_TO_FP32(table_exp_f16[scvt]);
  11815. sum += (ggml_float)val;
  11816. sm[i] = val;
  11817. }
  11818. }
  11819. assert(sum > 0.0);
  11820. sum = 1.0/sum;
  11821. }
  11822. float dot_st1_dst1 = 0;
  11823. ggml_vec_scale_f32(nc, sm, sum);
  11824. ggml_vec_cpy_f32 (nc, ds0, sm);
  11825. ggml_vec_scale_f32(nc, ds0, (1.0f - eps));
  11826. ggml_vec_add1_f32 (nc, ds0, ds0, eps);
  11827. ggml_vec_div_f32 (nc, ds0, s1, ds0);
  11828. ggml_vec_scale_f32(nc, ds0, -(1.0f - eps)*d[0]);
  11829. ggml_vec_dot_f32 (nc, &dot_st1_dst1, sm, ds0);
  11830. ggml_vec_acc1_f32 (nc, ds0, -dot_st1_dst1);
  11831. ggml_vec_mul_f32 (nc, ds0, ds0, sm);
  11832. #ifndef NDEBUG
  11833. for (int i = 0; i < nc; ++i) {
  11834. assert(!isnan(sm[i]));
  11835. assert(!isinf(sm[i]));
  11836. assert(!isnan(ds0[i]));
  11837. assert(!isinf(ds0[i]));
  11838. }
  11839. #endif
  11840. }
  11841. }
  11842. static void ggml_compute_forward_cross_entropy_loss_back(
  11843. const struct ggml_compute_params * params,
  11844. const struct ggml_tensor * src0,
  11845. const struct ggml_tensor * src1,
  11846. const struct ggml_tensor * opt0,
  11847. struct ggml_tensor * dst) {
  11848. switch (src0->type) {
  11849. case GGML_TYPE_F32:
  11850. {
  11851. ggml_compute_forward_cross_entropy_loss_back_f32(params, src0, src1, opt0, dst);
  11852. } break;
  11853. default:
  11854. {
  11855. GGML_ASSERT(false);
  11856. } break;
  11857. }
  11858. }
  11859. /////////////////////////////////
  11860. static void ggml_compute_forward(struct ggml_compute_params * params, struct ggml_tensor * tensor) {
  11861. GGML_ASSERT(params);
  11862. #ifdef GGML_USE_CUBLAS
  11863. bool skip_cpu = ggml_cuda_compute_forward(params, tensor);
  11864. if (skip_cpu) {
  11865. return;
  11866. }
  11867. GGML_ASSERT(tensor->src[0] == NULL || tensor->src[0]->backend == GGML_BACKEND_CPU);
  11868. GGML_ASSERT(tensor->src[1] == NULL || tensor->src[1]->backend == GGML_BACKEND_CPU);
  11869. #endif // GGML_USE_CUBLAS
  11870. switch (tensor->op) {
  11871. case GGML_OP_DUP:
  11872. {
  11873. ggml_compute_forward_dup(params, tensor->src[0], tensor);
  11874. } break;
  11875. case GGML_OP_ADD:
  11876. {
  11877. ggml_compute_forward_add(params, tensor->src[0], tensor->src[1], tensor);
  11878. } break;
  11879. case GGML_OP_ADD1:
  11880. {
  11881. ggml_compute_forward_add1(params, tensor->src[0], tensor->src[1], tensor);
  11882. } break;
  11883. case GGML_OP_ACC:
  11884. {
  11885. ggml_compute_forward_acc(params, tensor->src[0], tensor->src[1], tensor);
  11886. } break;
  11887. case GGML_OP_SUB:
  11888. {
  11889. ggml_compute_forward_sub(params, tensor->src[0], tensor->src[1], tensor);
  11890. } break;
  11891. case GGML_OP_MUL:
  11892. {
  11893. ggml_compute_forward_mul(params, tensor->src[0], tensor->src[1], tensor);
  11894. } break;
  11895. case GGML_OP_DIV:
  11896. {
  11897. ggml_compute_forward_div(params, tensor->src[0], tensor->src[1], tensor);
  11898. } break;
  11899. case GGML_OP_SQR:
  11900. {
  11901. ggml_compute_forward_sqr(params, tensor->src[0], tensor);
  11902. } break;
  11903. case GGML_OP_SQRT:
  11904. {
  11905. ggml_compute_forward_sqrt(params, tensor->src[0], tensor);
  11906. } break;
  11907. case GGML_OP_LOG:
  11908. {
  11909. ggml_compute_forward_log(params, tensor->src[0], tensor);
  11910. } break;
  11911. case GGML_OP_SUM:
  11912. {
  11913. ggml_compute_forward_sum(params, tensor->src[0], tensor);
  11914. } break;
  11915. case GGML_OP_SUM_ROWS:
  11916. {
  11917. ggml_compute_forward_sum_rows(params, tensor->src[0], tensor);
  11918. } break;
  11919. case GGML_OP_MEAN:
  11920. {
  11921. ggml_compute_forward_mean(params, tensor->src[0], tensor);
  11922. } break;
  11923. case GGML_OP_ARGMAX:
  11924. {
  11925. ggml_compute_forward_argmax(params, tensor->src[0], tensor);
  11926. } break;
  11927. case GGML_OP_REPEAT:
  11928. {
  11929. ggml_compute_forward_repeat(params, tensor->src[0], tensor);
  11930. } break;
  11931. case GGML_OP_REPEAT_BACK:
  11932. {
  11933. ggml_compute_forward_repeat_back(params, tensor->src[0], tensor);
  11934. } break;
  11935. case GGML_OP_SILU_BACK:
  11936. {
  11937. ggml_compute_forward_silu_back(params, tensor->src[0], tensor->src[1], tensor);
  11938. } break;
  11939. case GGML_OP_NORM:
  11940. {
  11941. ggml_compute_forward_norm(params, tensor->src[0], tensor);
  11942. } break;
  11943. case GGML_OP_RMS_NORM:
  11944. {
  11945. ggml_compute_forward_rms_norm(params, tensor->src[0], tensor);
  11946. } break;
  11947. case GGML_OP_RMS_NORM_BACK:
  11948. {
  11949. ggml_compute_forward_rms_norm_back(params, tensor->src[0], tensor->src[1], tensor);
  11950. } break;
  11951. case GGML_OP_MUL_MAT:
  11952. {
  11953. ggml_compute_forward_mul_mat(params, tensor->src[0], tensor->src[1], tensor);
  11954. } break;
  11955. case GGML_OP_OUT_PROD:
  11956. {
  11957. ggml_compute_forward_out_prod(params, tensor->src[0], tensor->src[1], tensor);
  11958. } break;
  11959. case GGML_OP_SCALE:
  11960. {
  11961. ggml_compute_forward_scale(params, tensor->src[0], tensor->src[1], tensor);
  11962. } break;
  11963. case GGML_OP_SET:
  11964. {
  11965. ggml_compute_forward_set(params, tensor->src[0], tensor->src[1], tensor);
  11966. } break;
  11967. case GGML_OP_CPY:
  11968. {
  11969. ggml_compute_forward_cpy(params, tensor->src[0], tensor);
  11970. } break;
  11971. case GGML_OP_CONT:
  11972. {
  11973. ggml_compute_forward_cont(params, tensor->src[0], tensor);
  11974. } break;
  11975. case GGML_OP_RESHAPE:
  11976. {
  11977. ggml_compute_forward_reshape(params, tensor->src[0], tensor);
  11978. } break;
  11979. case GGML_OP_VIEW:
  11980. {
  11981. ggml_compute_forward_view(params, tensor->src[0]);
  11982. } break;
  11983. case GGML_OP_PERMUTE:
  11984. {
  11985. ggml_compute_forward_permute(params, tensor->src[0]);
  11986. } break;
  11987. case GGML_OP_TRANSPOSE:
  11988. {
  11989. ggml_compute_forward_transpose(params, tensor->src[0]);
  11990. } break;
  11991. case GGML_OP_GET_ROWS:
  11992. {
  11993. ggml_compute_forward_get_rows(params, tensor->src[0], tensor->src[1], tensor);
  11994. } break;
  11995. case GGML_OP_GET_ROWS_BACK:
  11996. {
  11997. ggml_compute_forward_get_rows_back(params, tensor->src[0], tensor->src[1], tensor->src[2], tensor);
  11998. } break;
  11999. case GGML_OP_DIAG:
  12000. {
  12001. ggml_compute_forward_diag(params, tensor->src[0], tensor);
  12002. } break;
  12003. case GGML_OP_DIAG_MASK_INF:
  12004. {
  12005. ggml_compute_forward_diag_mask_inf(params, tensor->src[0], tensor);
  12006. } break;
  12007. case GGML_OP_DIAG_MASK_ZERO:
  12008. {
  12009. ggml_compute_forward_diag_mask_zero(params, tensor->src[0], tensor);
  12010. } break;
  12011. case GGML_OP_SOFT_MAX:
  12012. {
  12013. ggml_compute_forward_soft_max(params, tensor->src[0], tensor);
  12014. } break;
  12015. case GGML_OP_SOFT_MAX_BACK:
  12016. {
  12017. ggml_compute_forward_soft_max_back(params, tensor->src[0], tensor->src[1], tensor);
  12018. } break;
  12019. case GGML_OP_ROPE:
  12020. {
  12021. ggml_compute_forward_rope(params, tensor->src[0], tensor);
  12022. } break;
  12023. case GGML_OP_ROPE_BACK:
  12024. {
  12025. ggml_compute_forward_rope_back(params, tensor->src[0], tensor);
  12026. } break;
  12027. case GGML_OP_ALIBI:
  12028. {
  12029. ggml_compute_forward_alibi(params, tensor->src[0], tensor);
  12030. } break;
  12031. case GGML_OP_CLAMP:
  12032. {
  12033. ggml_compute_forward_clamp(params, tensor->src[0], tensor);
  12034. } break;
  12035. case GGML_OP_CONV_1D:
  12036. {
  12037. ggml_compute_forward_conv_1d(params, tensor->src[0], tensor->src[1], tensor);
  12038. } break;
  12039. case GGML_OP_CONV_2D:
  12040. {
  12041. ggml_compute_forward_conv_2d(params, tensor->src[0], tensor->src[1], tensor);
  12042. } break;
  12043. case GGML_OP_POOL_1D:
  12044. {
  12045. ggml_compute_forward_pool_1d(params, tensor->src[0], tensor);
  12046. } break;
  12047. case GGML_OP_POOL_2D:
  12048. {
  12049. ggml_compute_forward_pool_2d(params, tensor->src[0], tensor);
  12050. } break;
  12051. case GGML_OP_FLASH_ATTN:
  12052. {
  12053. const int32_t t = ggml_get_op_params_i32(tensor, 0);
  12054. GGML_ASSERT(t == 0 || t == 1);
  12055. const bool masked = t != 0;
  12056. ggml_compute_forward_flash_attn(params, tensor->src[0], tensor->src[1], tensor->src[2], masked, tensor);
  12057. } break;
  12058. case GGML_OP_FLASH_FF:
  12059. {
  12060. ggml_compute_forward_flash_ff(params, tensor->src[0], tensor->src[1], tensor->src[2], tensor->src[3], tensor->src[4], tensor);
  12061. } break;
  12062. case GGML_OP_FLASH_ATTN_BACK:
  12063. {
  12064. int32_t t = ggml_get_op_params_i32(tensor, 0);
  12065. GGML_ASSERT(t == 0 || t == 1);
  12066. bool masked = t != 0;
  12067. ggml_compute_forward_flash_attn_back(params, tensor->src[0], tensor->src[1], tensor->src[2], tensor->src[3], masked, tensor);
  12068. } break;
  12069. case GGML_OP_WIN_PART:
  12070. {
  12071. ggml_compute_forward_win_part(params, tensor->src[0], tensor);
  12072. } break;
  12073. case GGML_OP_WIN_UNPART:
  12074. {
  12075. ggml_compute_forward_win_unpart(params, tensor->src[0], tensor);
  12076. } break;
  12077. case GGML_OP_UNARY:
  12078. {
  12079. ggml_compute_forward_unary(params, tensor->src[0], tensor);
  12080. } break;
  12081. case GGML_OP_MAP_UNARY:
  12082. {
  12083. ggml_unary_op_f32_t fun;
  12084. memcpy(&fun, tensor->op_params, sizeof(fun));
  12085. ggml_compute_forward_map_unary(params, tensor->src[0], tensor, fun);
  12086. }
  12087. break;
  12088. case GGML_OP_MAP_BINARY:
  12089. {
  12090. ggml_binary_op_f32_t fun;
  12091. memcpy(&fun, tensor->op_params, sizeof(fun));
  12092. ggml_compute_forward_map_binary(params, tensor->src[0], tensor->src[1], tensor, fun);
  12093. }
  12094. break;
  12095. case GGML_OP_MAP_CUSTOM1:
  12096. {
  12097. ggml_custom1_op_f32_t fun;
  12098. memcpy(&fun, tensor->op_params, sizeof(fun));
  12099. ggml_compute_forward_map_custom1(params, tensor->src[0], tensor, fun);
  12100. }
  12101. break;
  12102. case GGML_OP_MAP_CUSTOM2:
  12103. {
  12104. ggml_custom2_op_f32_t fun;
  12105. memcpy(&fun, tensor->op_params, sizeof(fun));
  12106. ggml_compute_forward_map_custom2(params, tensor->src[0], tensor->src[1], tensor, fun);
  12107. }
  12108. break;
  12109. case GGML_OP_MAP_CUSTOM3:
  12110. {
  12111. ggml_custom3_op_f32_t fun;
  12112. memcpy(&fun, tensor->op_params, sizeof(fun));
  12113. ggml_compute_forward_map_custom3(params, tensor->src[0], tensor->src[1], tensor->src[2], tensor, fun);
  12114. }
  12115. break;
  12116. case GGML_OP_CROSS_ENTROPY_LOSS:
  12117. {
  12118. ggml_compute_forward_cross_entropy_loss(params, tensor->src[0], tensor->src[1], tensor);
  12119. }
  12120. break;
  12121. case GGML_OP_CROSS_ENTROPY_LOSS_BACK:
  12122. {
  12123. ggml_compute_forward_cross_entropy_loss_back(params, tensor->src[0], tensor->src[1], tensor->src[2], tensor);
  12124. }
  12125. break;
  12126. case GGML_OP_NONE:
  12127. {
  12128. // nop
  12129. } break;
  12130. case GGML_OP_COUNT:
  12131. {
  12132. GGML_ASSERT(false);
  12133. } break;
  12134. }
  12135. }
  12136. ////////////////////////////////////////////////////////////////////////////////
  12137. static void ggml_compute_backward(struct ggml_context * ctx, struct ggml_tensor * tensor, bool inplace) {
  12138. struct ggml_tensor * src0 = tensor->src[0];
  12139. struct ggml_tensor * src1 = tensor->src[1];
  12140. switch (tensor->op) {
  12141. case GGML_OP_DUP:
  12142. {
  12143. if (src0->grad) {
  12144. src0->grad = ggml_add_impl(ctx, src0->grad, tensor->grad, inplace);
  12145. }
  12146. } break;
  12147. case GGML_OP_ADD:
  12148. {
  12149. if (src0->grad) {
  12150. src0->grad = ggml_add_impl(ctx, src0->grad, tensor->grad, inplace);
  12151. }
  12152. if (src1->grad) {
  12153. src1->grad = ggml_add_impl(ctx, src1->grad, tensor->grad, inplace);
  12154. }
  12155. } break;
  12156. case GGML_OP_ADD1:
  12157. {
  12158. if (src0->grad) {
  12159. src0->grad = ggml_add_impl(ctx, src0->grad, tensor->grad, inplace);
  12160. }
  12161. if (src1->grad) {
  12162. src1->grad = ggml_add_impl(ctx,
  12163. src1->grad,
  12164. ggml_mean(ctx, tensor->grad), // TODO: should probably be sum instead of mean
  12165. inplace);
  12166. }
  12167. } break;
  12168. case GGML_OP_ACC:
  12169. {
  12170. if (src0->grad) {
  12171. src0->grad = ggml_add_impl(ctx, src0->grad, tensor->grad, inplace);
  12172. }
  12173. if (src1->grad) {
  12174. const size_t nb1 = ((int32_t *) tensor->op_params)[0];
  12175. const size_t nb2 = ((int32_t *) tensor->op_params)[1];
  12176. const size_t nb3 = ((int32_t *) tensor->op_params)[2];
  12177. const size_t offset = ((int32_t *) tensor->op_params)[3];
  12178. struct ggml_tensor * tensor_grad_view = ggml_view_4d(ctx,
  12179. tensor->grad,
  12180. src1->grad->ne[0],
  12181. src1->grad->ne[1],
  12182. src1->grad->ne[2],
  12183. src1->grad->ne[3],
  12184. nb1, nb2, nb3, offset);
  12185. src1->grad =
  12186. ggml_add_impl(ctx,
  12187. src1->grad,
  12188. ggml_reshape(ctx,
  12189. ggml_cont(ctx, tensor_grad_view),
  12190. src1->grad),
  12191. inplace);
  12192. }
  12193. } break;
  12194. case GGML_OP_SUB:
  12195. {
  12196. if (src0->grad) {
  12197. src0->grad = ggml_add_impl(ctx, src0->grad, tensor->grad, inplace);
  12198. }
  12199. if (src1->grad) {
  12200. src1->grad = ggml_sub_impl(ctx, src1->grad, tensor->grad, inplace);
  12201. }
  12202. } break;
  12203. case GGML_OP_MUL:
  12204. {
  12205. if (src0->grad) {
  12206. src0->grad =
  12207. ggml_add_impl(ctx,
  12208. src0->grad,
  12209. ggml_mul(ctx, src1, tensor->grad),
  12210. inplace);
  12211. }
  12212. if (src1->grad) {
  12213. src1->grad =
  12214. ggml_add_impl(ctx,
  12215. src1->grad,
  12216. ggml_mul(ctx, src0, tensor->grad),
  12217. inplace);
  12218. }
  12219. } break;
  12220. case GGML_OP_DIV:
  12221. {
  12222. if (src0->grad) {
  12223. src0->grad =
  12224. ggml_add_impl(ctx,
  12225. src0->grad,
  12226. ggml_div(ctx, tensor->grad, src1),
  12227. inplace);
  12228. }
  12229. if (src1->grad) {
  12230. src1->grad =
  12231. ggml_sub_impl(ctx,
  12232. src1->grad,
  12233. ggml_mul(ctx,
  12234. tensor->grad,
  12235. ggml_div(ctx, tensor, src1)),
  12236. inplace);
  12237. }
  12238. } break;
  12239. case GGML_OP_SQR:
  12240. {
  12241. if (src0->grad) {
  12242. src0->grad =
  12243. ggml_add_impl(ctx,
  12244. src0->grad,
  12245. ggml_scale(ctx,
  12246. ggml_mul(ctx, src0, tensor->grad),
  12247. ggml_new_f32(ctx, 2.0f)),
  12248. inplace);
  12249. }
  12250. } break;
  12251. case GGML_OP_SQRT:
  12252. {
  12253. if (src0->grad) {
  12254. src0->grad =
  12255. ggml_add_impl(ctx,
  12256. src0->grad,
  12257. ggml_scale(ctx,
  12258. ggml_div(ctx,
  12259. tensor->grad,
  12260. tensor),
  12261. ggml_new_f32(ctx, 0.5f)),
  12262. inplace);
  12263. }
  12264. } break;
  12265. case GGML_OP_LOG:
  12266. {
  12267. if (src0->grad) {
  12268. src0->grad =
  12269. ggml_add_impl(ctx,
  12270. src0->grad,
  12271. ggml_div(ctx,
  12272. tensor->grad,
  12273. src0),
  12274. inplace);
  12275. }
  12276. } break;
  12277. case GGML_OP_SUM:
  12278. {
  12279. if (src0->grad) {
  12280. src0->grad =
  12281. ggml_add1_impl(ctx,
  12282. src0->grad,
  12283. tensor->grad,
  12284. inplace);
  12285. }
  12286. } break;
  12287. case GGML_OP_SUM_ROWS:
  12288. {
  12289. if (src0->grad) {
  12290. src0->grad =
  12291. ggml_add_impl(ctx,
  12292. src0->grad,
  12293. ggml_repeat(ctx,
  12294. tensor->grad,
  12295. src0->grad),
  12296. inplace);
  12297. }
  12298. } break;
  12299. case GGML_OP_MEAN:
  12300. case GGML_OP_ARGMAX:
  12301. {
  12302. GGML_ASSERT(false); // TODO: implement
  12303. } break;
  12304. case GGML_OP_REPEAT:
  12305. {
  12306. // necessary for llama
  12307. if (src0->grad) {
  12308. src0->grad = ggml_add_impl(ctx,
  12309. src0->grad,
  12310. ggml_repeat_back(ctx, tensor->grad, src0->grad),
  12311. inplace);
  12312. }
  12313. } break;
  12314. case GGML_OP_REPEAT_BACK:
  12315. {
  12316. if (src0->grad) {
  12317. // TODO: test this
  12318. src0->grad = ggml_add_impl(ctx,
  12319. src0->grad,
  12320. ggml_repeat(ctx, tensor->grad, src0->grad),
  12321. inplace);
  12322. }
  12323. } break;
  12324. case GGML_OP_SILU_BACK:
  12325. {
  12326. GGML_ASSERT(false); // TODO: not implemented
  12327. } break;
  12328. case GGML_OP_NORM:
  12329. {
  12330. GGML_ASSERT(false); // TODO: not implemented
  12331. } break;
  12332. case GGML_OP_RMS_NORM:
  12333. {
  12334. // necessary for llama
  12335. if (src0->grad) {
  12336. src0->grad = ggml_add_impl(ctx,
  12337. src0->grad,
  12338. ggml_rms_norm_back(ctx, src0, tensor->grad),
  12339. inplace);
  12340. }
  12341. } break;
  12342. case GGML_OP_RMS_NORM_BACK:
  12343. {
  12344. GGML_ASSERT(false); // TODO: not implemented
  12345. } break;
  12346. case GGML_OP_MUL_MAT:
  12347. {
  12348. // https://cs231n.github.io/optimization-2/#staged
  12349. // # forward pass
  12350. // s0 = np.random.randn(5, 10)
  12351. // s1 = np.random.randn(10, 3)
  12352. // t = s0.dot(s1)
  12353. // # now suppose we had the gradient on t from above in the circuit
  12354. // dt = np.random.randn(*t.shape) # same shape as t
  12355. // ds0 = dt.dot(s1.T) #.T gives the transpose of the matrix
  12356. // ds1 = t.T.dot(dt)
  12357. // tensor.shape [m,p]
  12358. // src0.shape [n,m]
  12359. // src1.shape [n,p]
  12360. // necessary for llama
  12361. if (src0->grad) {
  12362. src0->grad =
  12363. ggml_add_impl(ctx,
  12364. src0->grad,
  12365. ggml_out_prod(ctx, // [n,m]
  12366. src1, // [n,p]
  12367. tensor->grad), // [m,p]
  12368. inplace);
  12369. }
  12370. if (src1->grad) {
  12371. src1->grad =
  12372. ggml_add_impl(ctx,
  12373. src1->grad,
  12374. // ggml_mul_mat(ctx, // [n,p]
  12375. // ggml_cont(ctx, // [m,n]
  12376. // ggml_transpose(ctx, src0)), // [m,n]
  12377. // tensor->grad), // [m,p]
  12378. // // when src0 is bigger than tensor->grad (this is mostly the case in llama),
  12379. // // avoid transpose of src0, rather transpose smaller tensor->grad
  12380. // // and then use ggml_out_prod
  12381. ggml_out_prod(ctx, // [n,p]
  12382. src0, // [n,m]
  12383. ggml_transpose(ctx, // [p,m]
  12384. tensor->grad)), // [m,p]
  12385. inplace);
  12386. }
  12387. } break;
  12388. case GGML_OP_OUT_PROD:
  12389. {
  12390. GGML_ASSERT(false); // TODO: not implemented
  12391. } break;
  12392. case GGML_OP_SCALE:
  12393. {
  12394. // necessary for llama
  12395. if (src0->grad) {
  12396. src0->grad =
  12397. ggml_add_impl(ctx,
  12398. src0->grad,
  12399. ggml_scale_impl(ctx, tensor->grad, src1, false),
  12400. inplace);
  12401. }
  12402. if (src1->grad) {
  12403. src1->grad =
  12404. ggml_add_impl(ctx,
  12405. src1->grad,
  12406. ggml_sum(ctx, ggml_mul_impl(ctx, tensor->grad, src0, false)),
  12407. inplace);
  12408. }
  12409. } break;
  12410. case GGML_OP_SET:
  12411. {
  12412. const size_t nb1 = ((int32_t *) tensor->op_params)[0];
  12413. const size_t nb2 = ((int32_t *) tensor->op_params)[1];
  12414. const size_t nb3 = ((int32_t *) tensor->op_params)[2];
  12415. const size_t offset = ((int32_t *) tensor->op_params)[3];
  12416. struct ggml_tensor * tensor_grad_view = NULL;
  12417. if (src0->grad || src1->grad) {
  12418. GGML_ASSERT(src0->type == tensor->type);
  12419. GGML_ASSERT(tensor->grad->type == tensor->type);
  12420. GGML_ASSERT(tensor->grad->type == src1->grad->type);
  12421. tensor_grad_view = ggml_view_4d(ctx,
  12422. tensor->grad,
  12423. src1->grad->ne[0],
  12424. src1->grad->ne[1],
  12425. src1->grad->ne[2],
  12426. src1->grad->ne[3],
  12427. nb1, nb2, nb3, offset);
  12428. }
  12429. if (src0->grad) {
  12430. src0->grad = ggml_add_impl(ctx,
  12431. src0->grad,
  12432. ggml_acc_impl(ctx,
  12433. tensor->grad,
  12434. ggml_neg(ctx, tensor_grad_view),
  12435. nb1, nb2, nb3, offset, false),
  12436. inplace);
  12437. }
  12438. if (src1->grad) {
  12439. src1->grad =
  12440. ggml_add_impl(ctx,
  12441. src1->grad,
  12442. ggml_reshape(ctx,
  12443. ggml_cont(ctx, tensor_grad_view),
  12444. src1->grad),
  12445. inplace);
  12446. }
  12447. } break;
  12448. case GGML_OP_CPY:
  12449. {
  12450. // necessary for llama
  12451. // cpy overwrites value of src1 by src0 and returns view(src1)
  12452. // the overwriting is mathematically equivalent to:
  12453. // tensor = src0 * 1 + src1 * 0
  12454. if (src0->grad) {
  12455. // dsrc0 = dtensor * 1
  12456. src0->grad = ggml_add_impl(ctx, src0->grad, tensor->grad, inplace);
  12457. }
  12458. if (src1->grad) {
  12459. // dsrc1 = dtensor * 0 -> noop
  12460. }
  12461. } break;
  12462. case GGML_OP_CONT:
  12463. {
  12464. // same as cpy
  12465. if (src0->grad) {
  12466. GGML_ASSERT(ggml_is_contiguous(src0->grad));
  12467. GGML_ASSERT(ggml_is_contiguous(tensor->grad));
  12468. src0->grad = ggml_add_impl(ctx, src0->grad, tensor->grad, inplace);
  12469. }
  12470. } break;
  12471. case GGML_OP_RESHAPE:
  12472. {
  12473. // necessary for llama
  12474. if (src0->grad) {
  12475. src0->grad =
  12476. ggml_add_impl(ctx, src0->grad,
  12477. ggml_reshape(ctx, tensor->grad, src0->grad),
  12478. inplace);
  12479. }
  12480. } break;
  12481. case GGML_OP_VIEW:
  12482. {
  12483. // necessary for llama
  12484. if (src0->grad) {
  12485. size_t offset;
  12486. memcpy(&offset, tensor->op_params, sizeof(offset));
  12487. size_t nb1 = tensor->nb[1];
  12488. size_t nb2 = tensor->nb[2];
  12489. size_t nb3 = tensor->nb[3];
  12490. if (src0->type != src0->grad->type) {
  12491. // gradient is typically F32, but src0 could be other type
  12492. size_t ng = ggml_element_size(src0->grad);
  12493. size_t n0 = ggml_element_size(src0);
  12494. GGML_ASSERT(offset % n0 == 0);
  12495. GGML_ASSERT(nb1 % n0 == 0);
  12496. GGML_ASSERT(nb2 % n0 == 0);
  12497. GGML_ASSERT(nb3 % n0 == 0);
  12498. offset = (offset / n0) * ng;
  12499. nb1 = (nb1 / n0) * ng;
  12500. nb2 = (nb2 / n0) * ng;
  12501. nb3 = (nb3 / n0) * ng;
  12502. }
  12503. src0->grad = ggml_acc_impl(ctx, src0->grad, tensor->grad, nb1, nb2, nb3, offset, inplace);
  12504. }
  12505. } break;
  12506. case GGML_OP_PERMUTE:
  12507. {
  12508. // necessary for llama
  12509. if (src0->grad) {
  12510. int32_t * axes = (int32_t *) tensor->op_params;
  12511. int axis0 = axes[0] & 0x3;
  12512. int axis1 = axes[1] & 0x3;
  12513. int axis2 = axes[2] & 0x3;
  12514. int axis3 = axes[3] & 0x3;
  12515. int axes_backward[4] = {0,0,0,0};
  12516. axes_backward[axis0] = 0;
  12517. axes_backward[axis1] = 1;
  12518. axes_backward[axis2] = 2;
  12519. axes_backward[axis3] = 3;
  12520. src0->grad =
  12521. ggml_add_impl(ctx, src0->grad,
  12522. ggml_permute(ctx,
  12523. tensor->grad,
  12524. axes_backward[0],
  12525. axes_backward[1],
  12526. axes_backward[2],
  12527. axes_backward[3]),
  12528. inplace);
  12529. }
  12530. } break;
  12531. case GGML_OP_TRANSPOSE:
  12532. {
  12533. // necessary for llama
  12534. if (src0->grad) {
  12535. src0->grad =
  12536. ggml_add_impl(ctx, src0->grad,
  12537. ggml_transpose(ctx, tensor->grad),
  12538. inplace);
  12539. }
  12540. } break;
  12541. case GGML_OP_GET_ROWS:
  12542. {
  12543. // necessary for llama (only for tokenizer)
  12544. if (src0->grad) {
  12545. src0->grad =
  12546. ggml_add_impl(ctx, src0->grad,
  12547. ggml_get_rows_back(ctx, tensor->grad, src1, src0->grad),
  12548. inplace);
  12549. }
  12550. if (src1->grad) {
  12551. // noop
  12552. }
  12553. } break;
  12554. case GGML_OP_GET_ROWS_BACK:
  12555. {
  12556. GGML_ASSERT(false); // TODO: not implemented
  12557. } break;
  12558. case GGML_OP_DIAG:
  12559. {
  12560. GGML_ASSERT(false); // TODO: not implemented
  12561. } break;
  12562. case GGML_OP_DIAG_MASK_INF:
  12563. {
  12564. // necessary for llama
  12565. if (src0->grad) {
  12566. const int n_past = ((int32_t *) tensor->op_params)[0];
  12567. src0->grad =
  12568. ggml_add_impl(ctx, src0->grad,
  12569. ggml_diag_mask_zero_impl(ctx, tensor->grad, n_past, false),
  12570. inplace);
  12571. }
  12572. } break;
  12573. case GGML_OP_DIAG_MASK_ZERO:
  12574. {
  12575. // necessary for llama
  12576. if (src0->grad) {
  12577. const int n_past = ((int32_t *) tensor->op_params)[0];
  12578. src0->grad =
  12579. ggml_add_impl(ctx, src0->grad,
  12580. ggml_diag_mask_zero_impl(ctx, tensor->grad, n_past, false),
  12581. inplace);
  12582. }
  12583. } break;
  12584. case GGML_OP_SOFT_MAX:
  12585. {
  12586. // necessary for llama
  12587. if (src0->grad) {
  12588. src0->grad =
  12589. ggml_add_impl(ctx, src0->grad,
  12590. ggml_soft_max_back(ctx, tensor->grad, tensor),
  12591. inplace);
  12592. }
  12593. } break;
  12594. case GGML_OP_SOFT_MAX_BACK:
  12595. {
  12596. GGML_ASSERT(false); // TODO: not implemented
  12597. } break;
  12598. case GGML_OP_ROPE:
  12599. {
  12600. // necessary for llama
  12601. if (src0->grad) {
  12602. const int n_past = ((int32_t *) tensor->op_params)[0];
  12603. const int n_dims = ((int32_t *) tensor->op_params)[1];
  12604. const int mode = ((int32_t *) tensor->op_params)[2];
  12605. const int n_ctx = ((int32_t *) tensor->op_params)[3];
  12606. src0->grad = ggml_add_impl(ctx,
  12607. src0->grad,
  12608. ggml_rope_back(ctx,
  12609. tensor->grad,
  12610. n_past,
  12611. n_dims,
  12612. mode,
  12613. n_ctx),
  12614. inplace);
  12615. }
  12616. } break;
  12617. case GGML_OP_ROPE_BACK:
  12618. {
  12619. if (src0->grad) {
  12620. const int n_past = ((int32_t *) tensor->op_params)[0];
  12621. const int n_dims = ((int32_t *) tensor->op_params)[1];
  12622. const int mode = ((int32_t *) tensor->op_params)[2];
  12623. const int n_ctx = ((int32_t *) tensor->op_params)[3];
  12624. src0->grad = ggml_add_impl(ctx,
  12625. src0->grad,
  12626. ggml_rope(ctx,
  12627. tensor->grad,
  12628. n_past,
  12629. n_dims,
  12630. mode,
  12631. n_ctx),
  12632. inplace);
  12633. }
  12634. } break;
  12635. case GGML_OP_ALIBI:
  12636. {
  12637. GGML_ASSERT(false); // TODO: not implemented
  12638. } break;
  12639. case GGML_OP_CLAMP:
  12640. {
  12641. GGML_ASSERT(false); // TODO: not implemented
  12642. } break;
  12643. case GGML_OP_CONV_1D:
  12644. {
  12645. GGML_ASSERT(false); // TODO: not implemented
  12646. } break;
  12647. case GGML_OP_CONV_2D:
  12648. {
  12649. GGML_ASSERT(false); // TODO: not implemented
  12650. } break;
  12651. case GGML_OP_POOL_1D:
  12652. {
  12653. GGML_ASSERT(false); // TODO: not implemented
  12654. } break;
  12655. case GGML_OP_POOL_2D:
  12656. {
  12657. GGML_ASSERT(false); // TODO: not implemented
  12658. } break;
  12659. case GGML_OP_FLASH_ATTN:
  12660. {
  12661. struct ggml_tensor * flash_grad = NULL;
  12662. if (src0->grad || src1->grad || tensor->src[2]->grad) {
  12663. int32_t t = ggml_get_op_params_i32(tensor, 0);
  12664. GGML_ASSERT(t == 0 || t == 1);
  12665. bool masked = t != 0;
  12666. flash_grad =
  12667. ggml_flash_attn_back(ctx,
  12668. src0,
  12669. src1,
  12670. tensor->src[2],
  12671. tensor->grad,
  12672. masked);
  12673. }
  12674. if (src0->grad) {
  12675. struct ggml_tensor * grad_q = NULL;
  12676. const size_t nb0 = flash_grad->nb[0];
  12677. const size_t offset = 0;
  12678. switch(src0->n_dims) {
  12679. case 2:
  12680. {
  12681. grad_q = ggml_view_2d(ctx,
  12682. flash_grad,
  12683. src0->ne[0],
  12684. src0->ne[1],
  12685. nb0*src0->ne[0],
  12686. offset);
  12687. } break;
  12688. case 3:
  12689. {
  12690. grad_q = ggml_view_3d(ctx,
  12691. flash_grad,
  12692. src0->ne[0],
  12693. src0->ne[1],
  12694. src0->ne[2],
  12695. nb0*src0->ne[0],
  12696. nb0*src0->ne[0]*src0->ne[1],
  12697. offset);
  12698. } break;
  12699. case 4:
  12700. {
  12701. grad_q = ggml_view_4d(ctx,
  12702. flash_grad,
  12703. src0->ne[0],
  12704. src0->ne[1],
  12705. src0->ne[2],
  12706. src0->ne[3],
  12707. nb0*src0->ne[0],
  12708. nb0*src0->ne[0]*src0->ne[1],
  12709. nb0*src0->ne[0]*src0->ne[1]*src0->ne[2],
  12710. offset);
  12711. } break;
  12712. }
  12713. src0->grad = ggml_add_impl(ctx,
  12714. src0->grad,
  12715. grad_q,
  12716. inplace);
  12717. }
  12718. if (src1->grad) {
  12719. struct ggml_tensor * grad_k = NULL;
  12720. const size_t nb0 = flash_grad->nb[0];
  12721. const size_t offset = nb0*src0->ne[0]*src0->ne[1]*src0->ne[2]*src0->ne[3];
  12722. switch(src1->n_dims) {
  12723. case 2:
  12724. {
  12725. grad_k = ggml_view_2d(ctx,
  12726. flash_grad,
  12727. src1->ne[0],
  12728. src1->ne[1],
  12729. nb0*src1->ne[0],
  12730. offset);
  12731. } break;
  12732. case 3:
  12733. {
  12734. grad_k = ggml_view_3d(ctx,
  12735. flash_grad,
  12736. src1->ne[0],
  12737. src1->ne[1],
  12738. src1->ne[2],
  12739. nb0*src1->ne[0],
  12740. nb0*src1->ne[0]*src1->ne[1],
  12741. offset);
  12742. } break;
  12743. case 4:
  12744. {
  12745. grad_k = ggml_view_4d(ctx,
  12746. flash_grad,
  12747. src1->ne[0],
  12748. src1->ne[1],
  12749. src1->ne[2],
  12750. src1->ne[3],
  12751. nb0*src1->ne[0],
  12752. nb0*src1->ne[0]*src1->ne[1],
  12753. nb0*src1->ne[0]*src1->ne[1]*src1->ne[2],
  12754. offset);
  12755. } break;
  12756. }
  12757. src1->grad = ggml_add_impl(ctx,
  12758. src1->grad,
  12759. grad_k,
  12760. inplace);
  12761. }
  12762. struct ggml_tensor * opt0 = tensor->src[2];
  12763. if (opt0->grad) {
  12764. struct ggml_tensor * grad_v = NULL;
  12765. const size_t nb0 = flash_grad->nb[0];
  12766. const size_t offset = nb0*src0->ne[0]*src0->ne[1]*src0->ne[2]*src0->ne[3]
  12767. + nb0*src1->ne[0]*src1->ne[1]*src1->ne[2]*src1->ne[3];
  12768. switch(opt0->n_dims) {
  12769. case 2:
  12770. {
  12771. grad_v = ggml_view_2d(ctx,
  12772. flash_grad,
  12773. opt0->ne[0],
  12774. opt0->ne[1],
  12775. nb0*opt0->ne[0],
  12776. offset);
  12777. } break;
  12778. case 3:
  12779. {
  12780. grad_v = ggml_view_3d(ctx,
  12781. flash_grad,
  12782. opt0->ne[0],
  12783. opt0->ne[1],
  12784. opt0->ne[2],
  12785. nb0*opt0->ne[0],
  12786. nb0*opt0->ne[0]*opt0->ne[1],
  12787. offset);
  12788. } break;
  12789. case 4:
  12790. {
  12791. grad_v = ggml_view_4d(ctx,
  12792. flash_grad,
  12793. opt0->ne[0],
  12794. opt0->ne[1],
  12795. opt0->ne[2],
  12796. opt0->ne[3],
  12797. nb0*opt0->ne[0],
  12798. nb0*opt0->ne[0]*opt0->ne[1],
  12799. nb0*opt0->ne[0]*opt0->ne[1]*opt0->ne[2],
  12800. offset);
  12801. } break;
  12802. }
  12803. opt0->grad = ggml_add_impl(ctx,
  12804. opt0->grad,
  12805. grad_v,
  12806. inplace);
  12807. }
  12808. } break;
  12809. case GGML_OP_FLASH_FF:
  12810. {
  12811. GGML_ASSERT(false); // not supported
  12812. } break;
  12813. case GGML_OP_FLASH_ATTN_BACK:
  12814. {
  12815. GGML_ASSERT(false); // not supported
  12816. } break;
  12817. case GGML_OP_WIN_PART:
  12818. case GGML_OP_WIN_UNPART:
  12819. case GGML_OP_UNARY:
  12820. {
  12821. switch (ggml_get_unary_op(tensor)) {
  12822. case GGML_UNARY_OP_ABS:
  12823. {
  12824. if (src0->grad) {
  12825. src0->grad =
  12826. ggml_add_impl(ctx,
  12827. src0->grad,
  12828. ggml_mul(ctx,
  12829. ggml_sgn(ctx, src0),
  12830. tensor->grad),
  12831. inplace);
  12832. }
  12833. } break;
  12834. case GGML_UNARY_OP_SGN:
  12835. {
  12836. if (src0->grad) {
  12837. // noop
  12838. }
  12839. } break;
  12840. case GGML_UNARY_OP_NEG:
  12841. {
  12842. if (src0->grad) {
  12843. src0->grad = ggml_sub_impl(ctx, src0->grad, tensor->grad, inplace);
  12844. }
  12845. } break;
  12846. case GGML_UNARY_OP_STEP:
  12847. {
  12848. if (src0->grad) {
  12849. // noop
  12850. }
  12851. } break;
  12852. case GGML_UNARY_OP_TANH:
  12853. {
  12854. GGML_ASSERT(false); // TODO: not implemented
  12855. } break;
  12856. case GGML_UNARY_OP_ELU:
  12857. {
  12858. GGML_ASSERT(false); // TODO: not implemented
  12859. } break;
  12860. case GGML_UNARY_OP_RELU:
  12861. {
  12862. if (src0->grad) {
  12863. src0->grad = ggml_add_impl(ctx,
  12864. src0->grad,
  12865. ggml_mul(ctx,
  12866. ggml_step(ctx, src0),
  12867. tensor->grad),
  12868. inplace);
  12869. }
  12870. } break;
  12871. case GGML_UNARY_OP_GELU:
  12872. {
  12873. GGML_ASSERT(false); // TODO: not implemented
  12874. } break;
  12875. case GGML_UNARY_OP_GELU_QUICK:
  12876. {
  12877. GGML_ASSERT(false); // TODO: not implemented
  12878. } break;
  12879. case GGML_UNARY_OP_SILU:
  12880. {
  12881. // necessary for llama
  12882. if (src0->grad) {
  12883. src0->grad = ggml_add_impl(ctx,
  12884. src0->grad,
  12885. ggml_silu_back(ctx, src0, tensor->grad),
  12886. inplace);
  12887. }
  12888. } break;
  12889. default:
  12890. GGML_ASSERT(false);
  12891. }
  12892. } break;
  12893. case GGML_OP_MAP_UNARY:
  12894. case GGML_OP_MAP_BINARY:
  12895. case GGML_OP_MAP_CUSTOM1:
  12896. case GGML_OP_MAP_CUSTOM2:
  12897. case GGML_OP_MAP_CUSTOM3:
  12898. {
  12899. GGML_ASSERT(false); // not supported
  12900. } break;
  12901. case GGML_OP_CROSS_ENTROPY_LOSS:
  12902. {
  12903. if (src0->grad) {
  12904. src0->grad = ggml_add_impl(ctx,
  12905. src0->grad,
  12906. ggml_cross_entropy_loss_back(ctx,
  12907. src0,
  12908. src1,
  12909. tensor->grad),
  12910. inplace);
  12911. }
  12912. } break;
  12913. case GGML_OP_CROSS_ENTROPY_LOSS_BACK:
  12914. {
  12915. GGML_ASSERT(false); // not supported
  12916. } break;
  12917. case GGML_OP_NONE:
  12918. {
  12919. // nop
  12920. } break;
  12921. case GGML_OP_COUNT:
  12922. {
  12923. GGML_ASSERT(false);
  12924. } break;
  12925. }
  12926. }
  12927. static_assert(GGML_GRAPH_HASHTABLE_SIZE > GGML_MAX_NODES * 2, "GGML_GRAPH_HT_SIZE is too small");
  12928. static size_t hash(void * p) {
  12929. return (size_t)p % GGML_GRAPH_HASHTABLE_SIZE;
  12930. }
  12931. static bool hash_insert(void * hash_table[], void * p) {
  12932. size_t h = hash(p);
  12933. // linear probing
  12934. size_t i = h;
  12935. while (hash_table[i] != NULL && hash_table[i] != p) {
  12936. i = (i + 1) % GGML_GRAPH_HASHTABLE_SIZE;
  12937. if (i == h) {
  12938. // hash table is full
  12939. GGML_ASSERT(false);
  12940. }
  12941. }
  12942. if (hash_table[i] == p) {
  12943. return true;
  12944. }
  12945. // insert
  12946. hash_table[i] = p;
  12947. return false;
  12948. }
  12949. static void ggml_visit_parents(struct ggml_cgraph * cgraph, struct ggml_tensor * node) {
  12950. if (node->grad == NULL) {
  12951. // this usually happens when we generate intermediate nodes from constants in the backward pass
  12952. // it can also happen during forward pass, if the user performs computations with constants
  12953. if (node->op != GGML_OP_NONE) {
  12954. //GGML_PRINT_DEBUG("%s: warning: node %p has no grad, but op %d\n", __func__, (void *) node, node->op);
  12955. }
  12956. }
  12957. // check if already visited
  12958. if (hash_insert(cgraph->visited_hash_table, node)) {
  12959. return;
  12960. }
  12961. for (int i = 0; i < GGML_MAX_SRC; ++i) {
  12962. if (node->src[i]) {
  12963. ggml_visit_parents(cgraph, node->src[i]);
  12964. }
  12965. }
  12966. if (node->op == GGML_OP_NONE && node->grad == NULL) {
  12967. // reached a leaf node, not part of the gradient graph (e.g. a constant)
  12968. GGML_ASSERT(cgraph->n_leafs < GGML_MAX_NODES);
  12969. if (strlen(node->name) == 0) {
  12970. ggml_format_name(node, "leaf_%d", cgraph->n_leafs);
  12971. }
  12972. cgraph->leafs[cgraph->n_leafs] = node;
  12973. cgraph->n_leafs++;
  12974. } else {
  12975. GGML_ASSERT(cgraph->n_nodes < GGML_MAX_NODES);
  12976. if (strlen(node->name) == 0) {
  12977. ggml_format_name(node, "node_%d", cgraph->n_nodes);
  12978. }
  12979. cgraph->nodes[cgraph->n_nodes] = node;
  12980. cgraph->grads[cgraph->n_nodes] = node->grad;
  12981. cgraph->n_nodes++;
  12982. }
  12983. }
  12984. static void ggml_build_forward_impl(struct ggml_cgraph * cgraph, struct ggml_tensor * tensor, bool expand) {
  12985. if (!expand) {
  12986. cgraph->n_nodes = 0;
  12987. cgraph->n_leafs = 0;
  12988. }
  12989. const int n0 = cgraph->n_nodes;
  12990. UNUSED(n0);
  12991. ggml_visit_parents(cgraph, tensor);
  12992. const int n_new = cgraph->n_nodes - n0;
  12993. GGML_PRINT_DEBUG("%s: visited %d new nodes\n", __func__, n_new);
  12994. if (n_new > 0) {
  12995. // the last added node should always be starting point
  12996. GGML_ASSERT(cgraph->nodes[cgraph->n_nodes - 1] == tensor);
  12997. }
  12998. }
  12999. void ggml_build_forward_expand(struct ggml_cgraph * cgraph, struct ggml_tensor * tensor) {
  13000. ggml_build_forward_impl(cgraph, tensor, true);
  13001. }
  13002. struct ggml_cgraph ggml_build_forward(struct ggml_tensor * tensor) {
  13003. struct ggml_cgraph result = {
  13004. /*.n_nodes =*/ 0,
  13005. /*.n_leafs =*/ 0,
  13006. /*.nodes =*/ { NULL },
  13007. /*.grads =*/ { NULL },
  13008. /*.leafs =*/ { NULL },
  13009. /*.hash_table =*/ { NULL },
  13010. /*.perf_runs =*/ 0,
  13011. /*.perf_cycles =*/ 0,
  13012. /*.perf_time_us =*/ 0,
  13013. };
  13014. ggml_build_forward_impl(&result, tensor, false);
  13015. return result;
  13016. }
  13017. struct ggml_cgraph ggml_build_backward(struct ggml_context * ctx, struct ggml_cgraph * gf, bool keep) {
  13018. struct ggml_cgraph result = *gf;
  13019. GGML_ASSERT(gf->n_nodes > 0);
  13020. // if we are keeping the gradient graph, we have to detach the gradient nodes from the original graph
  13021. if (keep) {
  13022. for (int i = 0; i < gf->n_nodes; i++) {
  13023. struct ggml_tensor * node = gf->nodes[i];
  13024. if (node->grad) {
  13025. node->grad = ggml_dup_tensor(ctx, node);
  13026. gf->grads[i] = node->grad;
  13027. }
  13028. }
  13029. }
  13030. for (int i = gf->n_nodes - 1; i >= 0; i--) {
  13031. struct ggml_tensor * node = gf->nodes[i];
  13032. // because we detached the grad nodes from the original graph, we can afford inplace operations
  13033. if (node->grad) {
  13034. ggml_compute_backward(ctx, node, keep);
  13035. }
  13036. }
  13037. for (int i = gf->n_nodes - 1; i >= 0; i--) {
  13038. struct ggml_tensor * node = gf->nodes[i];
  13039. if (node->is_param) {
  13040. GGML_PRINT_DEBUG("%s: found root node %p\n", __func__, (void *) node);
  13041. ggml_build_forward_expand(&result, node->grad);
  13042. }
  13043. }
  13044. return result;
  13045. }
  13046. //
  13047. // thread data
  13048. //
  13049. // synchronization is done via busy loops
  13050. // I tried using spin locks, but not sure how to use them correctly - the things I tried were slower than busy loops
  13051. //
  13052. #ifdef __APPLE__
  13053. //#include <os/lock.h>
  13054. //
  13055. //typedef os_unfair_lock ggml_lock_t;
  13056. //
  13057. //#define ggml_lock_init(x) UNUSED(x)
  13058. //#define ggml_lock_destroy(x) UNUSED(x)
  13059. //#define ggml_lock_lock os_unfair_lock_lock
  13060. //#define ggml_lock_unlock os_unfair_lock_unlock
  13061. //
  13062. //#define GGML_LOCK_INITIALIZER OS_UNFAIR_LOCK_INIT
  13063. typedef int ggml_lock_t;
  13064. #define ggml_lock_init(x) UNUSED(x)
  13065. #define ggml_lock_destroy(x) UNUSED(x)
  13066. #define ggml_lock_lock(x) UNUSED(x)
  13067. #define ggml_lock_unlock(x) UNUSED(x)
  13068. #define GGML_LOCK_INITIALIZER 0
  13069. typedef pthread_t ggml_thread_t;
  13070. #define ggml_thread_create pthread_create
  13071. #define ggml_thread_join pthread_join
  13072. #else
  13073. //typedef pthread_spinlock_t ggml_lock_t;
  13074. //#define ggml_lock_init(x) pthread_spin_init(x, PTHREAD_PROCESS_PRIVATE)
  13075. //#define ggml_lock_destroy pthread_spin_destroy
  13076. //#define ggml_lock_lock pthread_spin_lock
  13077. //#define ggml_lock_unlock pthread_spin_unlock
  13078. typedef int ggml_lock_t;
  13079. #define ggml_lock_init(x) UNUSED(x)
  13080. #define ggml_lock_destroy(x) UNUSED(x)
  13081. #if defined(__x86_64__) || (defined(_MSC_VER) && defined(_M_AMD64))
  13082. #define ggml_lock_lock(x) _mm_pause()
  13083. #else
  13084. #define ggml_lock_lock(x) UNUSED(x)
  13085. #endif
  13086. #define ggml_lock_unlock(x) UNUSED(x)
  13087. #define GGML_LOCK_INITIALIZER 0
  13088. typedef pthread_t ggml_thread_t;
  13089. #define ggml_thread_create pthread_create
  13090. #define ggml_thread_join pthread_join
  13091. #endif
  13092. // Android's libc implementation "bionic" does not support setting affinity
  13093. #if defined(__linux__) && !defined(__BIONIC__)
  13094. static void set_numa_thread_affinity(int thread_n, int n_threads) {
  13095. if (!ggml_is_numa()) {
  13096. return;
  13097. }
  13098. // run thread on node_num thread_n / (threads per node)
  13099. const int node_num = thread_n / ((n_threads + g_state.numa.n_nodes - 1) / g_state.numa.n_nodes);
  13100. struct ggml_numa_node * node = &g_state.numa.nodes[node_num];
  13101. size_t setsize = CPU_ALLOC_SIZE(g_state.numa.total_cpus);
  13102. cpu_set_t * cpus = CPU_ALLOC(g_state.numa.total_cpus);
  13103. CPU_ZERO_S(setsize, cpus);
  13104. for (size_t i = 0; i < node->n_cpus; ++i) {
  13105. CPU_SET_S(node->cpus[i], setsize, cpus);
  13106. }
  13107. int rv = pthread_setaffinity_np(pthread_self(), setsize, cpus);
  13108. if (rv) {
  13109. fprintf(stderr, "warning: pthread_setaffinity_np() failed: %s\n",
  13110. strerror(rv));
  13111. }
  13112. CPU_FREE(cpus);
  13113. }
  13114. static void clear_numa_thread_affinity(void) {
  13115. if (!ggml_is_numa()) {
  13116. return;
  13117. }
  13118. size_t setsize = CPU_ALLOC_SIZE(g_state.numa.total_cpus);
  13119. cpu_set_t * cpus = CPU_ALLOC(g_state.numa.total_cpus);
  13120. CPU_ZERO_S(setsize, cpus);
  13121. for (unsigned i = 0; i < g_state.numa.total_cpus; ++i) {
  13122. CPU_SET_S(i, setsize, cpus);
  13123. }
  13124. int rv = pthread_setaffinity_np(pthread_self(), setsize, cpus);
  13125. if (rv) {
  13126. fprintf(stderr, "warning: pthread_setaffinity_np() failed: %s\n",
  13127. strerror(rv));
  13128. }
  13129. CPU_FREE(cpus);
  13130. }
  13131. #else
  13132. // TODO: Windows etc.
  13133. // (the linux implementation may also work on BSD, someone should test)
  13134. static void set_numa_thread_affinity(int thread_n, int n_threads) { UNUSED(thread_n); UNUSED(n_threads); }
  13135. static void clear_numa_thread_affinity(void) {}
  13136. #endif
  13137. struct ggml_compute_state_shared {
  13138. const struct ggml_cgraph * cgraph;
  13139. const struct ggml_cplan * cplan;
  13140. int64_t perf_node_start_cycles;
  13141. int64_t perf_node_start_time_us;
  13142. const int n_threads;
  13143. // synchronization primitives
  13144. atomic_int n_active; // num active threads
  13145. atomic_int node_n; // active graph node
  13146. bool (*abort_callback)(void * data); // abort ggml_graph_compute when true
  13147. void * abort_callback_data;
  13148. };
  13149. struct ggml_compute_state {
  13150. ggml_thread_t thrd;
  13151. int ith;
  13152. struct ggml_compute_state_shared * shared;
  13153. };
  13154. static void ggml_graph_compute_perf_stats_node(struct ggml_tensor * node, const struct ggml_compute_state_shared * st) {
  13155. int64_t cycles_cur = ggml_perf_cycles() - st->perf_node_start_cycles;
  13156. int64_t time_us_cur = ggml_perf_time_us() - st->perf_node_start_time_us;
  13157. node->perf_runs++;
  13158. node->perf_cycles += cycles_cur;
  13159. node->perf_time_us += time_us_cur;
  13160. }
  13161. static thread_ret_t ggml_graph_compute_thread(void * data) {
  13162. struct ggml_compute_state * state = (struct ggml_compute_state *) data;
  13163. const struct ggml_cgraph * cgraph = state->shared->cgraph;
  13164. const struct ggml_cplan * cplan = state->shared->cplan;
  13165. const int * n_tasks_arr = cplan->n_tasks;
  13166. const int n_threads = state->shared->n_threads;
  13167. set_numa_thread_affinity(state->ith, n_threads);
  13168. int node_n = -1;
  13169. while (true) {
  13170. if (cplan->abort_callback && cplan->abort_callback(cplan->abort_callback_data)) {
  13171. state->shared->node_n += 1;
  13172. return (thread_ret_t) GGML_EXIT_ABORTED;
  13173. }
  13174. if (atomic_fetch_sub(&state->shared->n_active, 1) == 1) {
  13175. // all other threads are finished and spinning
  13176. // do finalize and init here so we don't have synchronize again
  13177. struct ggml_compute_params params = {
  13178. /*.type =*/ GGML_TASK_FINALIZE,
  13179. /*.ith =*/ 0,
  13180. /*.nth =*/ 0,
  13181. /*.wsize =*/ cplan->work_size,
  13182. /*.wdata =*/ cplan->work_data,
  13183. };
  13184. if (node_n != -1) {
  13185. /* FINALIZE */
  13186. struct ggml_tensor * node = state->shared->cgraph->nodes[node_n];
  13187. if (GGML_OP_HAS_FINALIZE[node->op]) {
  13188. params.nth = n_tasks_arr[node_n];
  13189. ggml_compute_forward(&params, node);
  13190. }
  13191. ggml_graph_compute_perf_stats_node(node, state->shared);
  13192. }
  13193. // distribute new work or execute it direct if 1T
  13194. while (++node_n < cgraph->n_nodes) {
  13195. GGML_PRINT_DEBUG_5("%s: %d/%d\n", __func__, node_n, cgraph->n_nodes);
  13196. struct ggml_tensor * node = cgraph->nodes[node_n];
  13197. const int n_tasks = n_tasks_arr[node_n];
  13198. state->shared->perf_node_start_cycles = ggml_perf_cycles();
  13199. state->shared->perf_node_start_time_us = ggml_perf_time_us();
  13200. params.nth = n_tasks;
  13201. /* INIT */
  13202. if (GGML_OP_HAS_INIT[node->op]) {
  13203. params.type = GGML_TASK_INIT;
  13204. ggml_compute_forward(&params, node);
  13205. }
  13206. if (n_tasks == 1) {
  13207. // TODO: maybe push node_n to the atomic but if other threads see n_tasks is 1,
  13208. // they do something more efficient than spinning (?)
  13209. params.type = GGML_TASK_COMPUTE;
  13210. ggml_compute_forward(&params, node);
  13211. if (GGML_OP_HAS_FINALIZE[node->op]) {
  13212. params.type = GGML_TASK_FINALIZE;
  13213. ggml_compute_forward(&params, node);
  13214. }
  13215. ggml_graph_compute_perf_stats_node(node, state->shared);
  13216. } else {
  13217. break;
  13218. }
  13219. if (cplan->abort_callback && cplan->abort_callback(cplan->abort_callback_data)) {
  13220. break;
  13221. }
  13222. }
  13223. atomic_store(&state->shared->n_active, n_threads);
  13224. atomic_store(&state->shared->node_n, node_n);
  13225. } else {
  13226. // wait for other threads to finish
  13227. const int last = node_n;
  13228. do {
  13229. //sched_yield();
  13230. node_n = atomic_load(&state->shared->node_n);
  13231. } while (node_n == last);
  13232. }
  13233. // check if we should stop
  13234. if (node_n >= cgraph->n_nodes) break;
  13235. /* COMPUTE */
  13236. struct ggml_tensor * node = cgraph->nodes[node_n];
  13237. const int n_tasks = n_tasks_arr[node_n];
  13238. struct ggml_compute_params params = {
  13239. /*.type =*/ GGML_TASK_COMPUTE,
  13240. /*.ith =*/ state->ith,
  13241. /*.nth =*/ n_tasks,
  13242. /*.wsize =*/ cplan->work_size,
  13243. /*.wdata =*/ cplan->work_data,
  13244. };
  13245. if (state->ith < n_tasks) {
  13246. ggml_compute_forward(&params, node);
  13247. }
  13248. }
  13249. return GGML_EXIT_SUCCESS;
  13250. }
  13251. struct ggml_cplan ggml_graph_plan(struct ggml_cgraph * cgraph, int n_threads) {
  13252. if (n_threads <= 0) {
  13253. n_threads = GGML_DEFAULT_N_THREADS;
  13254. }
  13255. size_t work_size = 0;
  13256. struct ggml_cplan cplan;
  13257. memset(&cplan, 0, sizeof(struct ggml_cplan));
  13258. // thread scheduling for the different operations + work buffer size estimation
  13259. for (int i = 0; i < cgraph->n_nodes; i++) {
  13260. int n_tasks = 1;
  13261. struct ggml_tensor * node = cgraph->nodes[i];
  13262. switch (node->op) {
  13263. case GGML_OP_CPY:
  13264. case GGML_OP_DUP:
  13265. {
  13266. n_tasks = n_threads;
  13267. size_t cur = 0;
  13268. if (ggml_is_quantized(node->type)) {
  13269. cur = GGML_TYPE_SIZE[GGML_TYPE_F32] * node->ne[0] * n_tasks;
  13270. }
  13271. work_size = MAX(work_size, cur);
  13272. } break;
  13273. case GGML_OP_ADD:
  13274. case GGML_OP_ADD1:
  13275. {
  13276. n_tasks = n_threads;
  13277. size_t cur = 0;
  13278. if (ggml_is_quantized(node->src[0]->type)) {
  13279. cur = GGML_TYPE_SIZE[GGML_TYPE_F32] * node->src[0]->ne[0] * n_tasks;
  13280. }
  13281. work_size = MAX(work_size, cur);
  13282. } break;
  13283. case GGML_OP_ACC:
  13284. {
  13285. n_tasks = n_threads;
  13286. size_t cur = 0;
  13287. if (ggml_is_quantized(node->src[0]->type)) {
  13288. cur = GGML_TYPE_SIZE[GGML_TYPE_F32] * node->src[1]->ne[0] * n_tasks;
  13289. }
  13290. work_size = MAX(work_size, cur);
  13291. } break;
  13292. case GGML_OP_SUB:
  13293. case GGML_OP_DIV:
  13294. case GGML_OP_SQR:
  13295. case GGML_OP_SQRT:
  13296. case GGML_OP_LOG:
  13297. case GGML_OP_SUM:
  13298. case GGML_OP_SUM_ROWS:
  13299. case GGML_OP_MEAN:
  13300. case GGML_OP_ARGMAX:
  13301. case GGML_OP_REPEAT:
  13302. case GGML_OP_REPEAT_BACK:
  13303. {
  13304. n_tasks = 1;
  13305. } break;
  13306. case GGML_OP_UNARY:
  13307. {
  13308. switch (ggml_get_unary_op(node)) {
  13309. case GGML_UNARY_OP_ABS:
  13310. case GGML_UNARY_OP_SGN:
  13311. case GGML_UNARY_OP_NEG:
  13312. case GGML_UNARY_OP_STEP:
  13313. case GGML_UNARY_OP_TANH:
  13314. case GGML_UNARY_OP_ELU:
  13315. case GGML_UNARY_OP_RELU:
  13316. {
  13317. n_tasks = 1;
  13318. } break;
  13319. case GGML_UNARY_OP_GELU:
  13320. case GGML_UNARY_OP_GELU_QUICK:
  13321. case GGML_UNARY_OP_SILU:
  13322. {
  13323. n_tasks = n_threads;
  13324. } break;
  13325. }
  13326. } break;
  13327. case GGML_OP_SILU_BACK:
  13328. case GGML_OP_MUL:
  13329. case GGML_OP_NORM:
  13330. case GGML_OP_RMS_NORM:
  13331. case GGML_OP_RMS_NORM_BACK:
  13332. {
  13333. n_tasks = n_threads;
  13334. } break;
  13335. case GGML_OP_MUL_MAT:
  13336. case GGML_OP_OUT_PROD:
  13337. {
  13338. n_tasks = n_threads;
  13339. // TODO: use different scheduling for different matrix sizes
  13340. //const int nr0 = ggml_nrows(node->src[0]);
  13341. //const int nr1 = ggml_nrows(node->src[1]);
  13342. //n_tasks = MIN(n_threads, MAX(1, nr0/128));
  13343. //printf("nr0 = %8d, nr1 = %8d, nr0*nr1 = %8d, n_tasks%d\n", nr0, nr1, nr0*nr1, n_tasks);
  13344. size_t cur = 0;
  13345. const enum ggml_type vec_dot_type = type_traits[node->src[0]->type].vec_dot_type;
  13346. #if defined(GGML_USE_CUBLAS)
  13347. if (ggml_cuda_can_mul_mat(node->src[0], node->src[1], node)) {
  13348. n_tasks = 1; // TODO: this actually is doing nothing
  13349. // the threads are still spinning
  13350. } else
  13351. #elif defined(GGML_USE_CLBLAST)
  13352. if (ggml_cl_can_mul_mat(node->src[0], node->src[1], node)) {
  13353. n_tasks = 1; // TODO: this actually is doing nothing
  13354. // the threads are still spinning
  13355. cur = ggml_cl_mul_mat_get_wsize(node->src[0], node->src[1], node);
  13356. } else
  13357. #endif
  13358. #if defined(GGML_USE_ACCELERATE) || defined(GGML_USE_OPENBLAS)
  13359. if (ggml_compute_forward_mul_mat_use_blas(node->src[0], node->src[1], node)) {
  13360. n_tasks = 1; // TODO: this actually is doing nothing
  13361. // the threads are still spinning
  13362. if (node->src[0]->type != GGML_TYPE_F32) {
  13363. // here we need memory just for single 2D matrix from src0
  13364. cur = GGML_TYPE_SIZE[GGML_TYPE_F32]*(node->src[0]->ne[0]*node->src[0]->ne[1]);
  13365. }
  13366. } else
  13367. #endif
  13368. if (node->src[1]->type != vec_dot_type) {
  13369. cur = GGML_TYPE_SIZE[vec_dot_type]*ggml_nelements(node->src[1])/GGML_BLCK_SIZE[vec_dot_type];
  13370. } else {
  13371. cur = 0;
  13372. }
  13373. work_size = MAX(work_size, cur);
  13374. } break;
  13375. case GGML_OP_SCALE:
  13376. {
  13377. n_tasks = 1;
  13378. } break;
  13379. case GGML_OP_SET:
  13380. case GGML_OP_CONT:
  13381. case GGML_OP_RESHAPE:
  13382. case GGML_OP_VIEW:
  13383. case GGML_OP_PERMUTE:
  13384. case GGML_OP_TRANSPOSE:
  13385. case GGML_OP_GET_ROWS:
  13386. case GGML_OP_GET_ROWS_BACK:
  13387. case GGML_OP_DIAG:
  13388. {
  13389. n_tasks = 1;
  13390. } break;
  13391. case GGML_OP_DIAG_MASK_ZERO:
  13392. case GGML_OP_DIAG_MASK_INF:
  13393. case GGML_OP_SOFT_MAX:
  13394. case GGML_OP_SOFT_MAX_BACK:
  13395. case GGML_OP_ROPE:
  13396. case GGML_OP_ROPE_BACK:
  13397. {
  13398. n_tasks = n_threads;
  13399. } break;
  13400. case GGML_OP_ALIBI:
  13401. {
  13402. n_tasks = 1; //TODO
  13403. } break;
  13404. case GGML_OP_CLAMP:
  13405. {
  13406. n_tasks = 1; //TODO
  13407. } break;
  13408. case GGML_OP_CONV_1D:
  13409. {
  13410. n_tasks = n_threads;
  13411. GGML_ASSERT(node->src[0]->ne[3] == 1);
  13412. GGML_ASSERT(node->src[1]->ne[2] == 1);
  13413. GGML_ASSERT(node->src[1]->ne[3] == 1);
  13414. size_t cur = 0;
  13415. const int nk = node->src[0]->ne[0];
  13416. if (node->src[0]->type == GGML_TYPE_F16 &&
  13417. node->src[1]->type == GGML_TYPE_F32) {
  13418. cur = sizeof(ggml_fp16_t)*(
  13419. nk*ggml_up32(node->src[0]->ne[1])*node->src[0]->ne[2] +
  13420. ( 2*(nk/2) + node->src[1]->ne[0])*node->src[1]->ne[1]
  13421. );
  13422. } else if (node->src[0]->type == GGML_TYPE_F32 &&
  13423. node->src[1]->type == GGML_TYPE_F32) {
  13424. cur = sizeof(float)*(
  13425. nk*ggml_up32(node->src[0]->ne[1])*node->src[0]->ne[2] +
  13426. ( 2*(nk/2) + node->src[1]->ne[0])*node->src[1]->ne[1]
  13427. );
  13428. } else {
  13429. GGML_ASSERT(false);
  13430. }
  13431. work_size = MAX(work_size, cur);
  13432. } break;
  13433. case GGML_OP_CONV_2D:
  13434. {
  13435. n_tasks = n_threads;
  13436. const int64_t ne00 = node->src[0]->ne[0]; // W
  13437. const int64_t ne01 = node->src[0]->ne[1]; // H
  13438. const int64_t ne02 = node->src[0]->ne[2]; // C
  13439. const int64_t ne03 = node->src[0]->ne[3]; // N
  13440. const int64_t ne10 = node->src[1]->ne[0]; // W
  13441. const int64_t ne11 = node->src[1]->ne[1]; // H
  13442. const int64_t ne12 = node->src[1]->ne[2]; // C
  13443. const int64_t ne0 = node->ne[0];
  13444. const int64_t ne1 = node->ne[1];
  13445. const int64_t ne2 = node->ne[2];
  13446. const int64_t nk = ne00*ne01;
  13447. const int64_t ew0 = nk * ne02;
  13448. UNUSED(ne03);
  13449. UNUSED(ne2);
  13450. size_t cur = 0;
  13451. if (node->src[0]->type == GGML_TYPE_F16 &&
  13452. node->src[1]->type == GGML_TYPE_F32) {
  13453. cur = sizeof(ggml_fp16_t)*(ne0*ne1*ew0);
  13454. } else if (node->src[0]->type == GGML_TYPE_F32 &&
  13455. node->src[1]->type == GGML_TYPE_F32) {
  13456. cur = sizeof(float)* (ne10*ne11*ne12);
  13457. } else {
  13458. GGML_ASSERT(false);
  13459. }
  13460. work_size = MAX(work_size, cur);
  13461. } break;
  13462. case GGML_OP_POOL_1D:
  13463. case GGML_OP_POOL_2D:
  13464. {
  13465. n_tasks = 1;
  13466. } break;
  13467. case GGML_OP_FLASH_ATTN:
  13468. {
  13469. n_tasks = n_threads;
  13470. size_t cur = 0;
  13471. const int64_t ne11 = ggml_up(node->src[1]->ne[1], GGML_SOFT_MAX_UNROLL);
  13472. if (node->src[1]->type == GGML_TYPE_F32) {
  13473. cur = sizeof(float)*ne11*n_tasks; // TODO: this can become (n_tasks-1)
  13474. cur += sizeof(float)*ne11*n_tasks; // this is overestimated by x2
  13475. }
  13476. if (node->src[1]->type == GGML_TYPE_F16) {
  13477. cur = sizeof(float)*ne11*n_tasks; // TODO: this can become (n_tasks-1)
  13478. cur += sizeof(float)*ne11*n_tasks; // this is overestimated by x2
  13479. }
  13480. work_size = MAX(work_size, cur);
  13481. } break;
  13482. case GGML_OP_FLASH_FF:
  13483. {
  13484. n_tasks = n_threads;
  13485. size_t cur = 0;
  13486. if (node->src[1]->type == GGML_TYPE_F32) {
  13487. cur = sizeof(float)*node->src[1]->ne[1]*n_tasks; // TODO: this can become (n_tasks-1)
  13488. cur += sizeof(float)*node->src[1]->ne[1]*n_tasks; // this is overestimated by x2
  13489. }
  13490. if (node->src[1]->type == GGML_TYPE_F16) {
  13491. cur = sizeof(float)*node->src[1]->ne[1]*n_tasks; // TODO: this can become (n_tasks-1)
  13492. cur += sizeof(float)*node->src[1]->ne[1]*n_tasks; // this is overestimated by x2
  13493. }
  13494. work_size = MAX(work_size, cur);
  13495. } break;
  13496. case GGML_OP_FLASH_ATTN_BACK:
  13497. {
  13498. n_tasks = n_threads;
  13499. size_t cur = 0;
  13500. const int64_t D = node->src[0]->ne[0];
  13501. const int64_t ne11 = ggml_up(node->src[1]->ne[1], GGML_SOFT_MAX_UNROLL);
  13502. const int64_t mxDn = MAX(D, ne11) * 2; // *2 because of S and SM in ggml_compute_forward_flash_attn_back
  13503. if (node->src[1]->type == GGML_TYPE_F32) {
  13504. cur = sizeof(float)*mxDn*n_tasks; // TODO: this can become (n_tasks-1)
  13505. cur += sizeof(float)*mxDn*n_tasks; // this is overestimated by x2
  13506. }
  13507. if (node->src[1]->type == GGML_TYPE_F16) {
  13508. cur = sizeof(float)*mxDn*n_tasks; // TODO: this can become (n_tasks-1)
  13509. cur += sizeof(float)*mxDn*n_tasks; // this is overestimated by x2
  13510. }
  13511. work_size = MAX(work_size, cur);
  13512. } break;
  13513. case GGML_OP_WIN_PART:
  13514. case GGML_OP_WIN_UNPART:
  13515. case GGML_OP_MAP_UNARY:
  13516. case GGML_OP_MAP_BINARY:
  13517. case GGML_OP_MAP_CUSTOM1:
  13518. case GGML_OP_MAP_CUSTOM2:
  13519. case GGML_OP_MAP_CUSTOM3:
  13520. {
  13521. n_tasks = 1;
  13522. } break;
  13523. case GGML_OP_CROSS_ENTROPY_LOSS:
  13524. {
  13525. n_tasks = n_threads;
  13526. size_t cur = ggml_type_size(node->type)*(n_tasks + node->src[0]->ne[0]*n_tasks);
  13527. work_size = MAX(work_size, cur);
  13528. } break;
  13529. case GGML_OP_CROSS_ENTROPY_LOSS_BACK:
  13530. {
  13531. n_tasks = n_threads;
  13532. size_t cur = ggml_type_size(node->type)*node->src[0]->ne[0]*n_tasks;
  13533. work_size = MAX(work_size, cur);
  13534. } break;
  13535. case GGML_OP_NONE:
  13536. {
  13537. n_tasks = 1;
  13538. } break;
  13539. case GGML_OP_COUNT:
  13540. {
  13541. GGML_ASSERT(false);
  13542. } break;
  13543. }
  13544. cplan.n_tasks[i] = n_tasks;
  13545. }
  13546. if (work_size > 0) {
  13547. work_size += CACHE_LINE_SIZE*(n_threads - 1);
  13548. }
  13549. cplan.n_threads = n_threads;
  13550. cplan.work_size = work_size;
  13551. cplan.work_data = NULL;
  13552. return cplan;
  13553. }
  13554. int ggml_graph_compute(struct ggml_cgraph * cgraph, struct ggml_cplan * cplan) {
  13555. {
  13556. GGML_ASSERT(cplan);
  13557. GGML_ASSERT(cplan->n_threads > 0);
  13558. if (cplan->work_size > 0) {
  13559. GGML_ASSERT(cplan->work_data);
  13560. }
  13561. for (int i = 0; i < cgraph->n_nodes; ++i) {
  13562. if (cgraph->nodes[i]->op != GGML_OP_NONE) {
  13563. GGML_ASSERT(cplan->n_tasks[i] > 0);
  13564. }
  13565. }
  13566. }
  13567. const int n_threads = cplan->n_threads;
  13568. struct ggml_compute_state_shared state_shared = {
  13569. /*.cgraph =*/ cgraph,
  13570. /*.cgraph_plan =*/ cplan,
  13571. /*.perf_node_start_cycles =*/ 0,
  13572. /*.perf_node_start_time_us =*/ 0,
  13573. /*.n_threads =*/ n_threads,
  13574. /*.n_active =*/ n_threads,
  13575. /*.node_n =*/ -1,
  13576. /*.abort_callback =*/ NULL,
  13577. /*.abort_callback_data =*/ NULL,
  13578. };
  13579. struct ggml_compute_state * workers = alloca(sizeof(struct ggml_compute_state)*n_threads);
  13580. // create thread pool
  13581. if (n_threads > 1) {
  13582. for (int j = 1; j < n_threads; ++j) {
  13583. workers[j] = (struct ggml_compute_state) {
  13584. .thrd = 0,
  13585. .ith = j,
  13586. .shared = &state_shared,
  13587. };
  13588. const int rc = ggml_thread_create(&workers[j].thrd, NULL, ggml_graph_compute_thread, &workers[j]);
  13589. GGML_ASSERT(rc == 0);
  13590. }
  13591. }
  13592. workers[0].ith = 0;
  13593. workers[0].shared = &state_shared;
  13594. const int64_t perf_start_cycles = ggml_perf_cycles();
  13595. const int64_t perf_start_time_us = ggml_perf_time_us();
  13596. // this is a work thread too
  13597. int compute_status = (size_t) ggml_graph_compute_thread(&workers[0]);
  13598. // don't leave affinity set on the main thread
  13599. clear_numa_thread_affinity();
  13600. // join or kill thread pool
  13601. if (n_threads > 1) {
  13602. for (int j = 1; j < n_threads; j++) {
  13603. const int rc = ggml_thread_join(workers[j].thrd, NULL);
  13604. GGML_ASSERT(rc == 0);
  13605. }
  13606. }
  13607. // performance stats (graph)
  13608. {
  13609. int64_t perf_cycles_cur = ggml_perf_cycles() - perf_start_cycles;
  13610. int64_t perf_time_us_cur = ggml_perf_time_us() - perf_start_time_us;
  13611. cgraph->perf_runs++;
  13612. cgraph->perf_cycles += perf_cycles_cur;
  13613. cgraph->perf_time_us += perf_time_us_cur;
  13614. GGML_PRINT_DEBUG("%s: perf (%d) - cpu = %.3f / %.3f ms, wall = %.3f / %.3f ms\n",
  13615. __func__, cgraph->perf_runs,
  13616. (double) perf_cycles_cur / (double) ggml_cycles_per_ms(),
  13617. (double) cgraph->perf_cycles / (double) ggml_cycles_per_ms() / (double) cgraph->perf_runs,
  13618. (double) perf_time_us_cur / 1000.0,
  13619. (double) cgraph->perf_time_us / 1000.0 / cgraph->perf_runs);
  13620. }
  13621. return compute_status;
  13622. }
  13623. void ggml_graph_reset(struct ggml_cgraph * cgraph) {
  13624. for (int i = 0; i < cgraph->n_nodes; i++) {
  13625. struct ggml_tensor * grad = cgraph->grads[i];
  13626. if (grad) {
  13627. ggml_set_zero(grad);
  13628. }
  13629. }
  13630. }
  13631. void ggml_graph_compute_with_ctx(struct ggml_context * ctx, struct ggml_cgraph * cgraph, int n_threads) {
  13632. struct ggml_cplan cplan = ggml_graph_plan(cgraph, n_threads);
  13633. struct ggml_tensor * buf = ggml_new_tensor_1d(ctx, GGML_TYPE_I8, cplan.work_size);
  13634. GGML_ASSERT(buf);
  13635. cplan.work_data = buf->data;
  13636. ggml_graph_compute(cgraph, &cplan);
  13637. }
  13638. struct ggml_tensor * ggml_graph_get_tensor(struct ggml_cgraph * cgraph, const char * name) {
  13639. for (int i = 0; i < cgraph->n_leafs; i++) {
  13640. struct ggml_tensor * leaf = cgraph->leafs[i];
  13641. if (strcmp(leaf->name, name) == 0) {
  13642. return leaf;
  13643. }
  13644. }
  13645. for (int i = 0; i < cgraph->n_nodes; i++) {
  13646. struct ggml_tensor * node = cgraph->nodes[i];
  13647. if (strcmp(node->name, name) == 0) {
  13648. return node;
  13649. }
  13650. }
  13651. return NULL;
  13652. }
  13653. static void ggml_graph_export_leaf(const struct ggml_tensor * tensor, FILE * fout) {
  13654. const int64_t * ne = tensor->ne;
  13655. const size_t * nb = tensor->nb;
  13656. fprintf(fout, "%-6s %-12s %8d %" PRId64 " %" PRId64 " %" PRId64 " %" PRId64 " %16zu %16zu %16zu %16zu %16p %32s\n",
  13657. ggml_type_name(tensor->type),
  13658. ggml_op_name (tensor->op),
  13659. tensor->n_dims,
  13660. ne[0], ne[1], ne[2], ne[3],
  13661. nb[0], nb[1], nb[2], nb[3],
  13662. tensor->data,
  13663. tensor->name);
  13664. }
  13665. static void ggml_graph_export_node(const struct ggml_tensor * tensor, const char * arg, FILE * fout) {
  13666. const int64_t * ne = tensor->ne;
  13667. const size_t * nb = tensor->nb;
  13668. fprintf(fout, "%-6s %-6s %-12s %8d %" PRId64 " %" PRId64 " %" PRId64 " %" PRId64 " %16zu %16zu %16zu %16zu %16p %32s\n",
  13669. arg,
  13670. ggml_type_name(tensor->type),
  13671. ggml_op_name (tensor->op),
  13672. tensor->n_dims,
  13673. ne[0], ne[1], ne[2], ne[3],
  13674. nb[0], nb[1], nb[2], nb[3],
  13675. tensor->data,
  13676. tensor->name);
  13677. }
  13678. void ggml_graph_export(const struct ggml_cgraph * cgraph, const char * fname) {
  13679. uint64_t size_eval = 0;
  13680. // compute size of intermediate results
  13681. // TODO: does not take into account scratch buffers !!!!
  13682. for (int i = 0; i < cgraph->n_nodes; ++i) {
  13683. size_eval += ggml_nbytes(cgraph->nodes[i]);
  13684. }
  13685. // print
  13686. {
  13687. FILE * fout = stdout;
  13688. fprintf(fout, "\n");
  13689. fprintf(fout, "%-16s %8x\n", "magic", GGML_FILE_MAGIC);
  13690. fprintf(fout, "%-16s %8d\n", "version", GGML_FILE_VERSION);
  13691. fprintf(fout, "%-16s %8d\n", "leafs", cgraph->n_leafs);
  13692. fprintf(fout, "%-16s %8d\n", "nodes", cgraph->n_nodes);
  13693. fprintf(fout, "%-16s %" PRIu64 "\n", "eval", size_eval);
  13694. // header
  13695. fprintf(fout, "\n");
  13696. fprintf(fout, "%-6s %-12s %8s %8s %8s %8s %8s %16s %16s %16s %16s %16s %16s\n",
  13697. "TYPE", "OP", "NDIMS", "NE0", "NE1", "NE2", "NE3", "NB0", "NB1", "NB2", "NB3", "DATA", "NAME");
  13698. for (int i = 0; i < cgraph->n_leafs; ++i) {
  13699. ggml_graph_export_leaf(cgraph->leafs[i], fout);
  13700. GGML_ASSERT(cgraph->leafs[i]->op == GGML_OP_NONE);
  13701. GGML_ASSERT(cgraph->leafs[i]->src[0] == NULL);
  13702. GGML_ASSERT(cgraph->leafs[i]->src[1] == NULL);
  13703. }
  13704. // header
  13705. fprintf(fout, "\n");
  13706. fprintf(fout, "%-6s %-6s %-12s %8s %8s %8s %8s %8s %16s %16s %16s %16s %8s %16s %16s\n",
  13707. "ARG", "TYPE", "OP", "NDIMS", "NE0", "NE1", "NE2", "NE3", "NB0", "NB1", "NB2", "NB3", "NTASKS", "DATA", "NAME");
  13708. for (int i = 0; i < cgraph->n_nodes; ++i) {
  13709. ggml_graph_export_node(cgraph->nodes[i], "DST", fout);
  13710. for (int j = 0; j < GGML_MAX_SRC; ++j) {
  13711. if (cgraph->nodes[i]->src[j]) {
  13712. ggml_graph_export_node(cgraph->nodes[i]->src[j], "SRC", fout);
  13713. }
  13714. }
  13715. fprintf(fout, "\n");
  13716. }
  13717. fprintf(fout, "\n");
  13718. }
  13719. // write binary data
  13720. {
  13721. FILE * fout = fopen(fname, "wb");
  13722. if (!fout) {
  13723. fprintf(stderr, "%s: failed to open %s\n", __func__, fname);
  13724. return;
  13725. }
  13726. // header
  13727. {
  13728. const uint32_t magic = GGML_FILE_MAGIC;
  13729. const uint32_t version = GGML_FILE_VERSION;
  13730. const uint32_t n_leafs = cgraph->n_leafs;
  13731. const uint32_t nodes = cgraph->n_nodes;
  13732. fwrite(&magic, sizeof(uint32_t), 1, fout);
  13733. fwrite(&version, sizeof(uint32_t), 1, fout);
  13734. fwrite(&n_leafs, sizeof(uint32_t), 1, fout);
  13735. fwrite(&nodes, sizeof(uint32_t), 1, fout);
  13736. fwrite(&size_eval, sizeof(uint64_t), 1, fout);
  13737. }
  13738. // leafs
  13739. {
  13740. for (int i = 0; i < cgraph->n_leafs; ++i) {
  13741. const struct ggml_tensor * tensor = cgraph->leafs[i];
  13742. const uint32_t type = tensor->type;
  13743. const uint32_t op = tensor->op;
  13744. const uint32_t n_dims = tensor->n_dims;
  13745. fwrite(&type, sizeof(uint32_t), 1, fout);
  13746. fwrite(&op, sizeof(uint32_t), 1, fout);
  13747. fwrite(&n_dims, sizeof(uint32_t), 1, fout);
  13748. for (int j = 0; j < GGML_MAX_DIMS; ++j) {
  13749. const uint64_t ne = tensor->ne[j];
  13750. const uint64_t nb = tensor->nb[j];
  13751. fwrite(&ne, sizeof(uint64_t), 1, fout);
  13752. fwrite(&nb, sizeof(uint64_t), 1, fout);
  13753. }
  13754. fwrite(tensor->name, sizeof(char), GGML_MAX_NAME, fout);
  13755. fwrite(tensor->op_params, sizeof(char), GGML_MAX_OP_PARAMS, fout);
  13756. // dump the data
  13757. // TODO: pad this to 32 byte boundary
  13758. {
  13759. const size_t size = ggml_nbytes(tensor);
  13760. fwrite(tensor->data, sizeof(char), size, fout);
  13761. }
  13762. }
  13763. }
  13764. // nodes
  13765. {
  13766. for (int i = 0; i < cgraph->n_nodes; ++i) {
  13767. const struct ggml_tensor * tensor = cgraph->nodes[i];
  13768. const uint32_t type = tensor->type;
  13769. const uint32_t op = tensor->op;
  13770. const uint32_t n_dims = tensor->n_dims;
  13771. fwrite(&type, sizeof(uint32_t), 1, fout);
  13772. fwrite(&op, sizeof(uint32_t), 1, fout);
  13773. fwrite(&n_dims, sizeof(uint32_t), 1, fout);
  13774. for (int j = 0; j < GGML_MAX_DIMS; ++j) {
  13775. const uint64_t ne = tensor->ne[j];
  13776. const uint64_t nb = tensor->nb[j];
  13777. fwrite(&ne, sizeof(uint64_t), 1, fout);
  13778. fwrite(&nb, sizeof(uint64_t), 1, fout);
  13779. }
  13780. fwrite(tensor->name, sizeof(char), GGML_MAX_NAME, fout);
  13781. fwrite(tensor->op_params, sizeof(char), GGML_MAX_OP_PARAMS, fout);
  13782. // output the op arguments
  13783. {
  13784. struct ggml_tensor * args[GGML_MAX_SRC] = { NULL };
  13785. for (int j = 0; j < GGML_MAX_SRC; ++j) {
  13786. args[j] = tensor->src[j];
  13787. }
  13788. for (int j = 0; j < GGML_MAX_SRC; ++j) {
  13789. if (args[j]) {
  13790. int32_t idx = -1;
  13791. // check if leaf
  13792. {
  13793. for (int k = 0; k < cgraph->n_leafs; ++k) {
  13794. if (args[j] == cgraph->leafs[k]) {
  13795. idx = k;
  13796. break;
  13797. }
  13798. }
  13799. }
  13800. // check if node
  13801. if (idx == -1) {
  13802. for (int k = 0; k < cgraph->n_nodes; ++k) {
  13803. if (args[j] == cgraph->nodes[k]) {
  13804. idx = GGML_MAX_NODES + k;
  13805. break;
  13806. }
  13807. }
  13808. }
  13809. if (idx == -1) {
  13810. fprintf(stderr, "%s: failed to find tensor, arg = %d, node = %d\n", __func__, j, i);
  13811. return;
  13812. }
  13813. fwrite(&idx, sizeof(int32_t), 1, fout);
  13814. } else {
  13815. const int32_t nul = -1;
  13816. fwrite(&nul, sizeof(int32_t), 1, fout);
  13817. }
  13818. }
  13819. }
  13820. }
  13821. }
  13822. fclose(fout);
  13823. }
  13824. }
  13825. struct ggml_cgraph ggml_graph_import(const char * fname, struct ggml_context ** ctx_data, struct ggml_context ** ctx_eval) {
  13826. assert(*ctx_data == NULL);
  13827. assert(*ctx_eval == NULL);
  13828. struct ggml_cgraph result = { 0 };
  13829. struct ggml_tensor * data = NULL;
  13830. // read file into data
  13831. {
  13832. FILE * fin = fopen(fname, "rb");
  13833. if (!fin) {
  13834. fprintf(stderr, "%s: failed to open %s\n", __func__, fname);
  13835. return result;
  13836. }
  13837. size_t fsize = 0;
  13838. fseek(fin, 0, SEEK_END);
  13839. fsize = ftell(fin);
  13840. fseek(fin, 0, SEEK_SET);
  13841. // create the data context
  13842. {
  13843. const size_t overhead = 1*ggml_tensor_overhead();
  13844. struct ggml_init_params params = {
  13845. .mem_size = fsize + overhead,
  13846. .mem_buffer = NULL,
  13847. .no_alloc = false,
  13848. };
  13849. *ctx_data = ggml_init(params);
  13850. if (!*ctx_data) {
  13851. fprintf(stderr, "%s: failed to create ggml context\n", __func__);
  13852. fclose(fin);
  13853. return result;
  13854. }
  13855. }
  13856. data = ggml_new_tensor_1d(*ctx_data, GGML_TYPE_I8, fsize);
  13857. {
  13858. const size_t ret = fread(data->data, sizeof(char), fsize, fin);
  13859. if (ret != fsize) {
  13860. fprintf(stderr, "%s: failed to read %s\n", __func__, fname);
  13861. fclose(fin);
  13862. return result;
  13863. }
  13864. }
  13865. fclose(fin);
  13866. }
  13867. // populate result
  13868. {
  13869. char * ptr = (char *) data->data;
  13870. const uint32_t magic = *(const uint32_t *) ptr; ptr += sizeof(magic);
  13871. if (magic != GGML_FILE_MAGIC) {
  13872. fprintf(stderr, "%s: invalid magic number, got %08x\n", __func__, magic);
  13873. return result;
  13874. }
  13875. const uint32_t version = *(const uint32_t *) ptr; ptr += sizeof(version);
  13876. if (version != GGML_FILE_VERSION) {
  13877. fprintf(stderr, "%s: invalid version number\n", __func__);
  13878. return result;
  13879. }
  13880. const uint32_t n_leafs = *(const uint32_t *) ptr; ptr += sizeof(n_leafs);
  13881. const uint32_t n_nodes = *(const uint32_t *) ptr; ptr += sizeof(n_nodes);
  13882. const uint64_t size_eval = *(const uint64_t *) ptr; ptr += sizeof(size_eval);
  13883. result.n_leafs = n_leafs;
  13884. result.n_nodes = n_nodes;
  13885. // create the data context
  13886. {
  13887. const size_t overhead = (n_leafs + n_nodes)*ggml_tensor_overhead();
  13888. struct ggml_init_params params = {
  13889. .mem_size = size_eval + overhead,
  13890. .mem_buffer = NULL,
  13891. .no_alloc = true,
  13892. };
  13893. *ctx_eval = ggml_init(params);
  13894. if (!*ctx_eval) {
  13895. fprintf(stderr, "%s: failed to create ggml context\n", __func__);
  13896. return result;
  13897. }
  13898. }
  13899. // leafs
  13900. {
  13901. uint32_t type;
  13902. uint32_t op;
  13903. uint32_t n_dims;
  13904. for (uint32_t i = 0; i < n_leafs; ++i) {
  13905. type = *(const uint32_t *) ptr; ptr += sizeof(type);
  13906. op = *(const uint32_t *) ptr; ptr += sizeof(op);
  13907. n_dims = *(const uint32_t *) ptr; ptr += sizeof(n_dims);
  13908. int64_t ne[GGML_MAX_DIMS];
  13909. size_t nb[GGML_MAX_DIMS];
  13910. for (int j = 0; j < GGML_MAX_DIMS; ++j) {
  13911. uint64_t ne_cur;
  13912. uint64_t nb_cur;
  13913. ne_cur = *(const uint64_t *) ptr; ptr += sizeof(ne_cur);
  13914. nb_cur = *(const uint64_t *) ptr; ptr += sizeof(nb_cur);
  13915. ne[j] = ne_cur;
  13916. nb[j] = nb_cur;
  13917. }
  13918. struct ggml_tensor * tensor = ggml_new_tensor(*ctx_eval, (enum ggml_type) type, n_dims, ne);
  13919. tensor->op = (enum ggml_op) op;
  13920. memcpy(tensor->name, ptr, GGML_MAX_NAME); ptr += GGML_MAX_NAME;
  13921. memcpy(tensor->op_params, ptr, GGML_MAX_OP_PARAMS); ptr += GGML_MAX_OP_PARAMS;
  13922. tensor->data = (void *) ptr;
  13923. for (int j = 0; j < GGML_MAX_DIMS; ++j) {
  13924. tensor->nb[j] = nb[j];
  13925. }
  13926. result.leafs[i] = tensor;
  13927. ptr += ggml_nbytes(tensor);
  13928. fprintf(stderr, "%s: loaded leaf %d: '%16s', %3d dims, %9zu bytes\n", __func__, i, tensor->name, n_dims, ggml_nbytes(tensor));
  13929. }
  13930. }
  13931. ggml_set_no_alloc(*ctx_eval, false);
  13932. // nodes
  13933. {
  13934. uint32_t type;
  13935. uint32_t op;
  13936. uint32_t n_dims;
  13937. for (uint32_t i = 0; i < n_nodes; ++i) {
  13938. type = *(const uint32_t *) ptr; ptr += sizeof(type);
  13939. op = *(const uint32_t *) ptr; ptr += sizeof(op);
  13940. n_dims = *(const uint32_t *) ptr; ptr += sizeof(n_dims);
  13941. enum ggml_op eop = (enum ggml_op) op;
  13942. int64_t ne[GGML_MAX_DIMS];
  13943. size_t nb[GGML_MAX_DIMS];
  13944. for (int j = 0; j < GGML_MAX_DIMS; ++j) {
  13945. uint64_t ne_cur;
  13946. uint64_t nb_cur;
  13947. ne_cur = *(const uint64_t *) ptr; ptr += sizeof(ne_cur);
  13948. nb_cur = *(const uint64_t *) ptr; ptr += sizeof(nb_cur);
  13949. ne[j] = ne_cur;
  13950. nb[j] = nb_cur;
  13951. }
  13952. const char * ptr_name = ptr; ptr += GGML_MAX_NAME;
  13953. const char * ptr_op_params = ptr; ptr += GGML_MAX_OP_PARAMS;
  13954. const int32_t * ptr_arg_idx = (const int32_t *) ptr; ptr += GGML_MAX_SRC*sizeof(int32_t);
  13955. struct ggml_tensor * args[GGML_MAX_SRC] = { NULL };
  13956. // parse args
  13957. for (int j = 0; j < GGML_MAX_SRC; ++j) {
  13958. const int32_t arg_idx = ptr_arg_idx[j];
  13959. if (arg_idx == -1) {
  13960. continue;
  13961. }
  13962. if (arg_idx < GGML_MAX_NODES) {
  13963. args[j] = result.leafs[arg_idx];
  13964. } else {
  13965. args[j] = result.nodes[arg_idx - GGML_MAX_NODES];
  13966. }
  13967. }
  13968. // create the tensor
  13969. // "view" operations are handled differently
  13970. // TODO: handle inplace ops - currently a copy is always made
  13971. struct ggml_tensor * tensor = NULL;
  13972. switch (eop) {
  13973. // TODO: implement other view ops
  13974. case GGML_OP_RESHAPE:
  13975. {
  13976. tensor = ggml_reshape_4d(*ctx_eval, args[0], ne[0], ne[1], ne[2], ne[3]);
  13977. } break;
  13978. case GGML_OP_VIEW:
  13979. {
  13980. tensor = ggml_view_4d(*ctx_eval, args[0], ne[0], ne[1], ne[2], ne[3], 0, 0, 0, 0);
  13981. size_t offs;
  13982. memcpy(&offs, ptr_op_params, sizeof(offs));
  13983. tensor->data = ((char *) tensor->data) + offs;
  13984. } break;
  13985. case GGML_OP_TRANSPOSE:
  13986. {
  13987. tensor = ggml_transpose(*ctx_eval, args[0]);
  13988. } break;
  13989. case GGML_OP_PERMUTE:
  13990. {
  13991. tensor = ggml_view_4d(*ctx_eval, args[0], ne[0], ne[1], ne[2], ne[3], 0, 0, 0, 0);
  13992. } break;
  13993. default:
  13994. {
  13995. tensor = ggml_new_tensor(*ctx_eval, (enum ggml_type) type, n_dims, ne);
  13996. tensor->op = eop;
  13997. } break;
  13998. }
  13999. memcpy(tensor->name, ptr_name, GGML_MAX_NAME);
  14000. memcpy(tensor->op_params, ptr_op_params, GGML_MAX_OP_PARAMS);
  14001. for (int j = 0; j < GGML_MAX_DIMS; ++j) {
  14002. tensor->nb[j] = nb[j];
  14003. }
  14004. for (int j = 0; j < GGML_MAX_SRC; ++j) {
  14005. tensor->src[j] = args[j];
  14006. }
  14007. result.nodes[i] = tensor;
  14008. fprintf(stderr, "%s: loaded node %d: '%16s', %3d dims, %9zu bytes\n", __func__, i, tensor->name, n_dims, ggml_nbytes(tensor));
  14009. }
  14010. }
  14011. }
  14012. return result;
  14013. }
  14014. void ggml_graph_print(const struct ggml_cgraph * cgraph) {
  14015. int64_t perf_total_per_op_us[GGML_OP_COUNT] = {0};
  14016. GGML_PRINT("=== GRAPH ===\n");
  14017. GGML_PRINT("n_nodes = %d\n", cgraph->n_nodes);
  14018. for (int i = 0; i < cgraph->n_nodes; i++) {
  14019. struct ggml_tensor * node = cgraph->nodes[i];
  14020. perf_total_per_op_us[node->op] += MAX(1, node->perf_time_us);
  14021. GGML_PRINT(" - %3d: [ %5" PRId64 ", %5" PRId64 ", %5" PRId64 "] %16s %s (%3d) cpu = %7.3f / %7.3f ms, wall = %7.3f / %7.3f ms\n",
  14022. i,
  14023. node->ne[0], node->ne[1], node->ne[2],
  14024. ggml_op_name(node->op), node->is_param ? "x" : node->grad ? "g" : " ", node->perf_runs,
  14025. (double) node->perf_cycles / (double) ggml_cycles_per_ms(),
  14026. (double) node->perf_cycles / (double) ggml_cycles_per_ms() / (double) node->perf_runs,
  14027. (double) node->perf_time_us / 1000.0,
  14028. (double) node->perf_time_us / 1000.0 / node->perf_runs);
  14029. }
  14030. GGML_PRINT("n_leafs = %d\n", cgraph->n_leafs);
  14031. for (int i = 0; i < cgraph->n_leafs; i++) {
  14032. struct ggml_tensor * node = cgraph->leafs[i];
  14033. GGML_PRINT(" - %3d: [ %5" PRId64 ", %5" PRId64 "] %8s\n",
  14034. i,
  14035. node->ne[0], node->ne[1],
  14036. ggml_op_name(node->op));
  14037. }
  14038. for (int i = 0; i < GGML_OP_COUNT; i++) {
  14039. if (perf_total_per_op_us[i] == 0) {
  14040. continue;
  14041. }
  14042. GGML_PRINT("perf_total_per_op_us[%16s] = %7.3f ms\n", ggml_op_name(i), (double) perf_total_per_op_us[i] / 1000.0);
  14043. }
  14044. GGML_PRINT("========================================\n");
  14045. }
  14046. // check if node is part of the graph
  14047. static bool ggml_graph_find(const struct ggml_cgraph * cgraph, const struct ggml_tensor * node) {
  14048. if (cgraph == NULL) {
  14049. return true;
  14050. }
  14051. for (int i = 0; i < cgraph->n_nodes; i++) {
  14052. if (cgraph->nodes[i] == node) {
  14053. return true;
  14054. }
  14055. }
  14056. return false;
  14057. }
  14058. static struct ggml_tensor * ggml_graph_get_parent(const struct ggml_cgraph * cgraph, const struct ggml_tensor * node) {
  14059. for (int i = 0; i < cgraph->n_nodes; i++) {
  14060. struct ggml_tensor * parent = cgraph->nodes[i];
  14061. if (parent->grad == node) {
  14062. return parent;
  14063. }
  14064. }
  14065. return NULL;
  14066. }
  14067. static void ggml_graph_dump_dot_node_edge(FILE * fp, const struct ggml_cgraph * gb, struct ggml_tensor * node, struct ggml_tensor * parent, const char * label) {
  14068. struct ggml_tensor * gparent = ggml_graph_get_parent(gb, node);
  14069. struct ggml_tensor * gparent0 = ggml_graph_get_parent(gb, parent);
  14070. fprintf(fp, " \"%p\":%s -> \"%p\":%s [ arrowhead = %s; style = %s; label = \"%s\"; ]\n",
  14071. gparent0 ? (void *) gparent0 : (void *) parent,
  14072. gparent0 ? "g" : "x",
  14073. gparent ? (void *) gparent : (void *) node,
  14074. gparent ? "g" : "x",
  14075. gparent ? "empty" : "vee",
  14076. gparent ? "dashed" : "solid",
  14077. label);
  14078. }
  14079. static void ggml_graph_dump_dot_leaf_edge(FILE * fp, struct ggml_tensor * node, struct ggml_tensor * parent, const char * label) {
  14080. fprintf(fp, " \"%p\":%s -> \"%p\":%s [ label = \"%s\"; ]\n",
  14081. (void *) parent, "x",
  14082. (void *) node, "x",
  14083. label);
  14084. }
  14085. void ggml_graph_dump_dot(const struct ggml_cgraph * gb, const struct ggml_cgraph * gf, const char * filename) {
  14086. char color[16];
  14087. FILE * fp = fopen(filename, "w");
  14088. GGML_ASSERT(fp);
  14089. fprintf(fp, "digraph G {\n");
  14090. fprintf(fp, " newrank = true;\n");
  14091. fprintf(fp, " rankdir = LR;\n");
  14092. for (int i = 0; i < gb->n_nodes; i++) {
  14093. struct ggml_tensor * node = gb->nodes[i];
  14094. if (ggml_graph_get_parent(gb, node) != NULL) {
  14095. continue;
  14096. }
  14097. if (node->is_param) {
  14098. snprintf(color, sizeof(color), "yellow");
  14099. } else if (node->grad) {
  14100. if (ggml_graph_find(gf, node)) {
  14101. snprintf(color, sizeof(color), "green");
  14102. } else {
  14103. snprintf(color, sizeof(color), "lightblue");
  14104. }
  14105. } else {
  14106. snprintf(color, sizeof(color), "white");
  14107. }
  14108. fprintf(fp, " \"%p\" [ "
  14109. "style = filled; fillcolor = %s; shape = record; "
  14110. "label=\"",
  14111. (void *) node, color);
  14112. if (strlen(node->name) > 0) {
  14113. fprintf(fp, "%s (%s)|", node->name, ggml_type_name(node->type));
  14114. } else {
  14115. fprintf(fp, "(%s)|", ggml_type_name(node->type));
  14116. }
  14117. if (node->n_dims == 2) {
  14118. fprintf(fp, "%d [%" PRId64 ", %" PRId64 "] | <x>%s", i, node->ne[0], node->ne[1], ggml_op_symbol(node->op));
  14119. } else {
  14120. fprintf(fp, "%d [%" PRId64 ", %" PRId64 ", %" PRId64 "] | <x>%s", i, node->ne[0], node->ne[1], node->ne[2], ggml_op_symbol(node->op));
  14121. }
  14122. if (node->grad) {
  14123. fprintf(fp, " | <g>%s\"; ]\n", ggml_op_symbol(node->grad->op));
  14124. } else {
  14125. fprintf(fp, "\"; ]\n");
  14126. }
  14127. }
  14128. for (int i = 0; i < gb->n_leafs; i++) {
  14129. struct ggml_tensor * node = gb->leafs[i];
  14130. snprintf(color, sizeof(color), "pink");
  14131. fprintf(fp, " \"%p\" [ "
  14132. "style = filled; fillcolor = %s; shape = record; "
  14133. "label=\"<x>",
  14134. (void *) node, color);
  14135. if (strlen(node->name) > 0) {
  14136. fprintf(fp, "%s (%s)|", node->name, ggml_type_name(node->type));
  14137. } else {
  14138. fprintf(fp, "(%s)|", ggml_type_name(node->type));
  14139. }
  14140. fprintf(fp, "CONST %d [%" PRId64 ", %" PRId64 "]", i, node->ne[0], node->ne[1]);
  14141. if (ggml_nelements(node) < 5) {
  14142. fprintf(fp, " | (");
  14143. for (int j = 0; j < ggml_nelements(node); j++) {
  14144. if (node->type == GGML_TYPE_I8 || node->type == GGML_TYPE_I16 || node->type == GGML_TYPE_I32) {
  14145. fprintf(fp, "%d", ggml_get_i32_1d(node, j));
  14146. }
  14147. else if (node->type == GGML_TYPE_F32 || node->type == GGML_TYPE_F16) {
  14148. fprintf(fp, "%.1e", (double)ggml_get_f32_1d(node, j));
  14149. }
  14150. else {
  14151. fprintf(fp, "#");
  14152. }
  14153. if (j < ggml_nelements(node) - 1) {
  14154. fprintf(fp, ", ");
  14155. }
  14156. }
  14157. fprintf(fp, ")");
  14158. }
  14159. fprintf(fp, "\"; ]\n");
  14160. }
  14161. for (int i = 0; i < gb->n_nodes; i++) {
  14162. struct ggml_tensor * node = gb->nodes[i];
  14163. for (int j = 0; j < GGML_MAX_SRC; j++) {
  14164. if (node->src[j]) {
  14165. char label[16];
  14166. snprintf(label, sizeof(label), "src %d", j);
  14167. ggml_graph_dump_dot_node_edge(fp, gb, node, node->src[j], label);
  14168. }
  14169. }
  14170. }
  14171. for (int i = 0; i < gb->n_leafs; i++) {
  14172. struct ggml_tensor * node = gb->leafs[i];
  14173. for (int j = 0; j < GGML_MAX_SRC; j++) {
  14174. if (node->src[j]) {
  14175. char label[16];
  14176. snprintf(label, sizeof(label), "src %d", j);
  14177. ggml_graph_dump_dot_leaf_edge(fp, node, node->src[j], label);
  14178. }
  14179. }
  14180. }
  14181. fprintf(fp, "}\n");
  14182. fclose(fp);
  14183. GGML_PRINT("%s: dot -Tpng %s -o %s.png && open %s.png\n", __func__, filename, filename, filename);
  14184. }
  14185. ////////////////////////////////////////////////////////////////////////////////
  14186. static void ggml_opt_set_params(int np, struct ggml_tensor * const ps[], const float * x) {
  14187. int i = 0;
  14188. for (int p = 0; p < np; ++p) {
  14189. const int64_t ne = ggml_nelements(ps[p]) ;
  14190. // TODO: add function to set tensor from array
  14191. for (int64_t j = 0; j < ne; ++j) {
  14192. ggml_set_f32_1d(ps[p], j, x[i++]);
  14193. }
  14194. }
  14195. }
  14196. static void ggml_opt_get_params(int np, struct ggml_tensor * const ps[], float * x) {
  14197. int i = 0;
  14198. for (int p = 0; p < np; ++p) {
  14199. const int64_t ne = ggml_nelements(ps[p]) ;
  14200. // TODO: add function to get all elements at once
  14201. for (int64_t j = 0; j < ne; ++j) {
  14202. x[i++] = ggml_get_f32_1d(ps[p], j);
  14203. }
  14204. }
  14205. }
  14206. static void ggml_opt_get_grad(int np, struct ggml_tensor * const ps[], float * g) {
  14207. int i = 0;
  14208. for (int p = 0; p < np; ++p) {
  14209. const int64_t ne = ggml_nelements(ps[p]) ;
  14210. // TODO: add function to get all elements at once
  14211. for (int64_t j = 0; j < ne; ++j) {
  14212. g[i++] = ggml_get_f32_1d(ps[p]->grad, j);
  14213. }
  14214. }
  14215. }
  14216. //
  14217. // ADAM
  14218. //
  14219. // ref: https://arxiv.org/pdf/1412.6980.pdf
  14220. //
  14221. static enum ggml_opt_result ggml_opt_adam(
  14222. struct ggml_context * ctx,
  14223. struct ggml_opt_context * opt,
  14224. struct ggml_opt_params params,
  14225. struct ggml_tensor * f,
  14226. struct ggml_cgraph * gf,
  14227. struct ggml_cgraph * gb) {
  14228. GGML_ASSERT(ggml_is_scalar(f));
  14229. // these will store the parameters we want to optimize
  14230. struct ggml_tensor * ps[GGML_MAX_PARAMS];
  14231. int np = 0;
  14232. int nx = 0;
  14233. for (int i = 0; i < gf->n_nodes; ++i) {
  14234. if (gf->nodes[i]->is_param) {
  14235. GGML_PRINT_DEBUG("found param %d: grad->op = %d\n", np, gf->nodes[i]->grad->op);
  14236. GGML_ASSERT(np < GGML_MAX_PARAMS);
  14237. ps[np++] = gf->nodes[i];
  14238. nx += ggml_nelements(gf->nodes[i]);
  14239. }
  14240. }
  14241. if ((opt->params.type != params.type) || (opt->nx != nx) || (opt->params.past != params.past)) {
  14242. int iter = opt->iter;
  14243. ggml_opt_init(opt->ctx, opt, params, nx);
  14244. opt->iter = iter;
  14245. }
  14246. // constants
  14247. const float sched = params.adam.sched;
  14248. const float decay = params.adam.decay * sched;
  14249. const float alpha = params.adam.alpha * sched;
  14250. const float beta1 = params.adam.beta1;
  14251. const float beta2 = params.adam.beta2;
  14252. const float eps = params.adam.eps;
  14253. float * x = opt->adam.x->data; // view of the parameters
  14254. float * g1 = opt->adam.g1->data; // gradient
  14255. float * g2 = opt->adam.g2->data; // gradient squared
  14256. float * m = opt->adam.m->data; // first moment
  14257. float * v = opt->adam.v->data; // second moment
  14258. float * mh = opt->adam.mh->data; // first moment hat
  14259. float * vh = opt->adam.vh->data; // second moment hat
  14260. float * pf = params.past > 0 ? opt->adam.pf->data : NULL; // past function values
  14261. // update view
  14262. ggml_opt_get_params(np, ps, x);
  14263. // compute the function value
  14264. ggml_graph_reset (gf);
  14265. ggml_set_f32 (f->grad, 1.0f);
  14266. ggml_graph_compute_with_ctx(ctx, gb, params.n_threads);
  14267. opt->adam.fx_prev = ggml_get_f32_1d(f, 0);
  14268. opt->adam.fx_best = opt->adam.fx_prev;
  14269. if (pf) {
  14270. pf[opt->iter % params.past] = opt->adam.fx_prev;
  14271. }
  14272. // initialize
  14273. if (opt->just_initialized) {
  14274. opt->adam.n_no_improvement = 0;
  14275. opt->just_initialized = false;
  14276. }
  14277. float * fx_best = &opt->adam.fx_best;
  14278. float * fx_prev = &opt->adam.fx_prev;
  14279. int * n_no_improvement = &opt->adam.n_no_improvement;
  14280. int iter0 = opt->iter;
  14281. // run the optimizer
  14282. for (int t = 0; t < params.adam.n_iter; ++t) {
  14283. opt->iter = iter0 + t + 1;
  14284. GGML_PRINT_DEBUG ("=== iter %d ===\n", t);
  14285. GGML_PRINT_DEBUG ("f = %10.6f\n", ggml_get_f32_1d(f, 0));
  14286. GGML_PRINT_DEBUG_5("df/dx0 = %10.6f\n", ggml_get_f32_1d(ps[0]->grad, 0));
  14287. GGML_PRINT_DEBUG_5("df/dx1 = %10.6f\n", ggml_get_f32_1d(ps[1]->grad, 0));
  14288. for (int i = 0; i < np; ++i) {
  14289. GGML_PRINT_DEBUG("param %d: %10.6f, g = %10.6f\n", i,
  14290. ggml_get_f32_1d(ps[i], 0), ggml_get_f32_1d(ps[i]->grad, 0));
  14291. }
  14292. const int64_t t_start_wall = ggml_time_us();
  14293. const int64_t t_start_cpu = ggml_cycles();
  14294. UNUSED(t_start_wall);
  14295. UNUSED(t_start_cpu);
  14296. {
  14297. // update the gradient
  14298. ggml_opt_get_grad(np, ps, g1);
  14299. // m_t = beta1*m_t-1 + (1 - beta1)*g_t
  14300. ggml_vec_scale_f32(nx, m, beta1);
  14301. ggml_vec_mad_f32 (nx, m, g1, 1.0f - beta1);
  14302. // g2 = g1^2
  14303. ggml_vec_sqr_f32 (nx, g2, g1);
  14304. // v_t = beta2*v_t-1 + (1 - beta2)*g_t^2
  14305. ggml_vec_scale_f32(nx, v, beta2);
  14306. ggml_vec_mad_f32 (nx, v, g2, 1.0f - beta2);
  14307. // m^hat = m_t / (1 - beta1^t)
  14308. // v^hat = v_t / (1 - beta2^t)
  14309. // x_t = x_t-1 - sched*(alpha*m^hat/(sqrt(v^hat) + eps) + decay*x_t-1)
  14310. // x_t = x_t-1 - sched*alpha*m^hat/(sqrt(v^hat) + eps) - sched*decay*x_t-1
  14311. // x_t = x_t-1*(1-sched*decay) - sched*alpha*m^hat/(sqrt(v^hat) + eps)
  14312. // x_t = x_t-1*(1-sched*decay) + sched*decay*(-alpha/decay)*m^hat/(sqrt(v^hat) + eps)
  14313. // x_t = mix(x_t-1, (-alpha/decay)*m^hat/(sqrt(v^hat) + eps), sched*decay)
  14314. ggml_vec_cpy_f32 (nx, mh, m);
  14315. ggml_vec_cpy_f32 (nx, vh, v);
  14316. ggml_vec_scale_f32(nx, mh, alpha/(1.0f - powf(beta1, opt->iter)));
  14317. ggml_vec_scale_f32(nx, vh, 1.0f/(1.0f - powf(beta2, opt->iter)));
  14318. ggml_vec_sqrt_f32 (nx, vh, vh);
  14319. ggml_vec_acc1_f32 (nx, vh, eps);
  14320. ggml_vec_div_f32 (nx, mh, mh, vh);
  14321. ggml_vec_scale_f32(nx, x, 1.0f - decay);
  14322. ggml_vec_sub_f32 (nx, x, x, mh);
  14323. // update the parameters
  14324. ggml_opt_set_params(np, ps, x);
  14325. }
  14326. ggml_graph_reset (gf);
  14327. ggml_set_f32 (f->grad, 1.0f);
  14328. ggml_graph_compute_with_ctx(ctx, gb, params.n_threads);
  14329. const float fx = ggml_get_f32_1d(f, 0);
  14330. // check convergence
  14331. if (fabsf(fx - fx_prev[0])/fx < params.adam.eps_f) {
  14332. GGML_PRINT_DEBUG("converged\n");
  14333. return GGML_OPT_OK;
  14334. }
  14335. // delta-based convergence test
  14336. if (pf != NULL) {
  14337. // need at least params.past iterations to start checking for convergence
  14338. if (params.past <= iter0 + t) {
  14339. const float rate = (pf[(iter0 + t)%params.past] - fx)/fx;
  14340. if (fabsf(rate) < params.delta) {
  14341. return GGML_OPT_OK;
  14342. }
  14343. }
  14344. pf[(iter0 + t)%params.past] = fx;
  14345. }
  14346. // check for improvement
  14347. if (params.max_no_improvement > 0) {
  14348. if (fx_best[0] > fx) {
  14349. fx_best[0] = fx;
  14350. n_no_improvement[0] = 0;
  14351. } else {
  14352. ++n_no_improvement[0];
  14353. if (n_no_improvement[0] >= params.max_no_improvement) {
  14354. return GGML_OPT_OK;
  14355. }
  14356. }
  14357. }
  14358. fx_prev[0] = fx;
  14359. {
  14360. const int64_t t_end_cpu = ggml_cycles();
  14361. GGML_PRINT_DEBUG("time iter: %5.3f s\n", ((float)(t_end_cpu - t_start_cpu))/CLOCKS_PER_SEC);
  14362. UNUSED(t_end_cpu);
  14363. const int64_t t_end_wall = ggml_time_us();
  14364. GGML_PRINT_DEBUG("wall time iter: %5.3f s\n", (t_end_wall - t_start_wall)/1e6);
  14365. UNUSED(t_end_wall);
  14366. }
  14367. }
  14368. return GGML_OPT_DID_NOT_CONVERGE;
  14369. }
  14370. //
  14371. // L-BFGS
  14372. //
  14373. // the L-BFGS implementation below is based on the following implementation:
  14374. //
  14375. // https://github.com/chokkan/liblbfgs
  14376. //
  14377. struct ggml_lbfgs_iteration_data {
  14378. float alpha;
  14379. float ys;
  14380. float * s;
  14381. float * y;
  14382. };
  14383. static enum ggml_opt_result linesearch_backtracking(
  14384. struct ggml_context * ctx,
  14385. const struct ggml_opt_params * params,
  14386. int nx,
  14387. float * x,
  14388. float * fx,
  14389. float * g,
  14390. float * d,
  14391. float * step,
  14392. const float * xp,
  14393. struct ggml_tensor * f,
  14394. struct ggml_cgraph * gf,
  14395. struct ggml_cgraph * gb,
  14396. const int np,
  14397. struct ggml_tensor * ps[]) {
  14398. int count = 0;
  14399. float width = 0.0f;
  14400. float dg = 0.0f;
  14401. float finit = 0.0f;
  14402. float dginit = 0.0f;
  14403. float dgtest = 0.0f;
  14404. const float dec = 0.5f;
  14405. const float inc = 2.1f;
  14406. if (*step <= 0.f) {
  14407. return GGML_LINESEARCH_INVALID_PARAMETERS;
  14408. }
  14409. // compute the initial gradient in the search direction
  14410. ggml_vec_dot_f32(nx, &dginit, g, d);
  14411. // make sure that d points to a descent direction
  14412. if (0 < dginit) {
  14413. return GGML_LINESEARCH_FAIL;
  14414. }
  14415. // initialize local variables
  14416. finit = *fx;
  14417. dgtest = params->lbfgs.ftol*dginit;
  14418. while (true) {
  14419. ggml_vec_cpy_f32(nx, x, xp);
  14420. ggml_vec_mad_f32(nx, x, d, *step);
  14421. // evaluate the function and gradient values
  14422. {
  14423. ggml_opt_set_params(np, ps, x);
  14424. ggml_graph_reset (gf);
  14425. ggml_set_f32 (f->grad, 1.0f);
  14426. ggml_graph_compute_with_ctx(ctx, gb, params->n_threads);
  14427. ggml_opt_get_grad(np, ps, g);
  14428. *fx = ggml_get_f32_1d(f, 0);
  14429. }
  14430. ++count;
  14431. if (*fx > finit + (*step)*dgtest) {
  14432. width = dec;
  14433. } else {
  14434. // Armijo condition is satisfied
  14435. if (params->lbfgs.linesearch == GGML_LINESEARCH_BACKTRACKING_ARMIJO) {
  14436. return count;
  14437. }
  14438. ggml_vec_dot_f32(nx, &dg, g, d);
  14439. // check the Wolfe condition
  14440. if (dg < params->lbfgs.wolfe * dginit) {
  14441. width = inc;
  14442. } else {
  14443. if(params->lbfgs.linesearch == GGML_LINESEARCH_BACKTRACKING_WOLFE) {
  14444. // regular Wolfe conditions
  14445. return count;
  14446. }
  14447. if(dg > -params->lbfgs.wolfe*dginit) {
  14448. width = dec;
  14449. } else {
  14450. // strong Wolfe condition (GGML_LINESEARCH_BACKTRACKING_STRONG_WOLFE)
  14451. return count;
  14452. }
  14453. return count;
  14454. }
  14455. }
  14456. if (*step < params->lbfgs.min_step) {
  14457. return GGML_LINESEARCH_MINIMUM_STEP;
  14458. }
  14459. if (*step > params->lbfgs.max_step) {
  14460. return GGML_LINESEARCH_MAXIMUM_STEP;
  14461. }
  14462. if (params->lbfgs.max_linesearch <= count) {
  14463. return GGML_LINESEARCH_MAXIMUM_ITERATIONS;
  14464. }
  14465. (*step) *= width;
  14466. }
  14467. return GGML_LINESEARCH_FAIL;
  14468. }
  14469. static enum ggml_opt_result ggml_opt_lbfgs(
  14470. struct ggml_context * ctx,
  14471. struct ggml_opt_context * opt,
  14472. struct ggml_opt_params params,
  14473. struct ggml_tensor * f,
  14474. struct ggml_cgraph * gf,
  14475. struct ggml_cgraph * gb) {
  14476. if (params.lbfgs.linesearch == GGML_LINESEARCH_BACKTRACKING_WOLFE ||
  14477. params.lbfgs.linesearch == GGML_LINESEARCH_BACKTRACKING_STRONG_WOLFE) {
  14478. if (params.lbfgs.wolfe <= params.lbfgs.ftol || 1.f <= params.lbfgs.wolfe) {
  14479. return GGML_OPT_INVALID_WOLFE;
  14480. }
  14481. }
  14482. const int m = params.lbfgs.m;
  14483. // these will store the parameters we want to optimize
  14484. struct ggml_tensor * ps[GGML_MAX_PARAMS];
  14485. int np = 0;
  14486. int nx = 0;
  14487. for (int i = 0; i < gf->n_nodes; ++i) {
  14488. if (gf->nodes[i]->is_param) {
  14489. GGML_PRINT_DEBUG("found param %d: grad->op = %d\n", np, gf->nodes[i]->grad->op);
  14490. GGML_ASSERT(np < GGML_MAX_PARAMS);
  14491. ps[np++] = gf->nodes[i];
  14492. nx += ggml_nelements(gf->nodes[i]);
  14493. }
  14494. }
  14495. if ((opt->params.type != params.type) || (opt->nx != nx) || (opt->params.past != params.past) || (opt->params.lbfgs.m != params.lbfgs.m)) {
  14496. int iter = opt->iter;
  14497. ggml_opt_init(ctx, opt, params, nx);
  14498. opt->iter = iter;
  14499. }
  14500. float * x = opt->lbfgs.x->data; // current parameters
  14501. float * xp = opt->lbfgs.xp->data; // previous parameters
  14502. float * g = opt->lbfgs.g->data; // current gradient
  14503. float * gp = opt->lbfgs.gp->data; // previous gradient
  14504. float * d = opt->lbfgs.d->data; // search direction
  14505. float * pf = params.past > 0 ? opt->lbfgs.pf->data : NULL; // past function values
  14506. float fx = 0.0f; // cost function value
  14507. float xnorm = 0.0f; // ||x||
  14508. float gnorm = 0.0f; // ||g||
  14509. // initialize x from the graph nodes
  14510. ggml_opt_get_params(np, ps, x);
  14511. // the L-BFGS memory
  14512. float * lm_alpha = opt->lbfgs.lmal->data;
  14513. float * lm_ys = opt->lbfgs.lmys->data;
  14514. float * lm_s = opt->lbfgs.lms->data;
  14515. float * lm_y = opt->lbfgs.lmy->data;
  14516. // evaluate the function value and its gradient
  14517. {
  14518. ggml_opt_set_params(np, ps, x);
  14519. ggml_graph_reset (gf);
  14520. ggml_set_f32 (f->grad, 1.0f);
  14521. ggml_graph_compute_with_ctx(ctx, gb, params.n_threads);
  14522. ggml_opt_get_grad(np, ps, g);
  14523. fx = ggml_get_f32_1d(f, 0);
  14524. }
  14525. // search direction = -gradient
  14526. ggml_vec_neg_f32(nx, d, g);
  14527. // ||x||, ||g||
  14528. ggml_vec_norm_f32(nx, &xnorm, x);
  14529. ggml_vec_norm_f32(nx, &gnorm, g);
  14530. if (xnorm < 1.0f) {
  14531. xnorm = 1.0f;
  14532. }
  14533. // already optimized
  14534. if (gnorm/xnorm <= params.lbfgs.eps) {
  14535. return GGML_OPT_OK;
  14536. }
  14537. if (opt->just_initialized) {
  14538. if (pf) {
  14539. pf[0] = fx;
  14540. }
  14541. opt->lbfgs.fx_best = fx;
  14542. // initial step
  14543. ggml_vec_norm_inv_f32(nx, &opt->lbfgs.step, d);
  14544. opt->lbfgs.j = 0;
  14545. opt->lbfgs.k = 1;
  14546. opt->lbfgs.end = 0;
  14547. opt->lbfgs.n_no_improvement = 0;
  14548. opt->just_initialized = false;
  14549. }
  14550. float * fx_best = &opt->lbfgs.fx_best;
  14551. float * step = &opt->lbfgs.step;
  14552. int * j = &opt->lbfgs.j;
  14553. int * k = &opt->lbfgs.k;
  14554. int * end = &opt->lbfgs.end;
  14555. int * n_no_improvement = &opt->lbfgs.n_no_improvement;
  14556. int ls = 0;
  14557. int bound = 0;
  14558. float ys = 0.0f;
  14559. float yy = 0.0f;
  14560. float beta = 0.0f;
  14561. int it = 0;
  14562. while (true) {
  14563. // store the current position and gradient vectors
  14564. ggml_vec_cpy_f32(nx, xp, x);
  14565. ggml_vec_cpy_f32(nx, gp, g);
  14566. ls = linesearch_backtracking(ctx, &params, nx, x, &fx, g, d, step, xp, f, gf, gb, np, ps);
  14567. if (ls < 0) {
  14568. // linesearch failed - go back to the previous point and return
  14569. ggml_vec_cpy_f32(nx, x, xp);
  14570. ggml_vec_cpy_f32(nx, g, gp);
  14571. return ls;
  14572. }
  14573. ggml_vec_norm_f32(nx, &xnorm, x);
  14574. ggml_vec_norm_f32(nx, &gnorm, g);
  14575. GGML_PRINT_DEBUG("f = %10.6f\n", ggml_get_f32_1d(f, 0));
  14576. if (xnorm < 1.0f) {
  14577. xnorm = 1.0f;
  14578. }
  14579. if (gnorm/xnorm <= params.lbfgs.eps) {
  14580. // converged
  14581. return GGML_OPT_OK;
  14582. }
  14583. // delta-based convergence test
  14584. if (pf != NULL) {
  14585. // need at least params.past iterations to start checking for convergence
  14586. if (params.past <= k[0]) {
  14587. const float rate = (pf[k[0]%params.past] - fx)/fx;
  14588. if (fabsf(rate) < params.delta) {
  14589. return GGML_OPT_OK;
  14590. }
  14591. }
  14592. pf[k[0]%params.past] = fx;
  14593. }
  14594. // check for improvement
  14595. if (params.max_no_improvement > 0) {
  14596. if (fx < fx_best[0]) {
  14597. fx_best[0] = fx;
  14598. n_no_improvement[0] = 0;
  14599. } else {
  14600. n_no_improvement[0]++;
  14601. if (n_no_improvement[0] >= params.max_no_improvement) {
  14602. return GGML_OPT_OK;
  14603. }
  14604. }
  14605. }
  14606. if (params.lbfgs.n_iter != 0 && params.lbfgs.n_iter < it + 1) {
  14607. // reached the maximum number of iterations
  14608. return GGML_OPT_DID_NOT_CONVERGE;
  14609. }
  14610. // update vectors s and y:
  14611. // s_{k+1} = x_{k+1} - x_{k} = \step * d_{k}.
  14612. // y_{k+1} = g_{k+1} - g_{k}.
  14613. //
  14614. ggml_vec_sub_f32(nx, &lm_s[end[0]*nx], x, xp);
  14615. ggml_vec_sub_f32(nx, &lm_y[end[0]*nx], g, gp);
  14616. // compute scalars ys and yy:
  14617. // ys = y^t \cdot s -> 1 / \rho.
  14618. // yy = y^t \cdot y.
  14619. //
  14620. ggml_vec_dot_f32(nx, &ys, &lm_y[end[0]*nx], &lm_s[end[0] *nx]);
  14621. ggml_vec_dot_f32(nx, &yy, &lm_y[end[0]*nx], &lm_y[end[0]*nx]);
  14622. lm_ys[end[0]] = ys;
  14623. // find new search direction
  14624. // ref: https://en.wikipedia.org/wiki/Limited-memory_BFGS
  14625. bound = (m <= k[0]) ? m : k[0];
  14626. k[0]++;
  14627. it++;
  14628. end[0] = (end[0] + 1)%m;
  14629. // initialize search direction with -g
  14630. ggml_vec_neg_f32(nx, d, g);
  14631. j[0] = end[0];
  14632. for (int i = 0; i < bound; ++i) {
  14633. j[0] = (j[0] + m - 1) % m;
  14634. // \alpha_{j} = \rho_{j} s^{t}_{j} \cdot q_{k+1}
  14635. ggml_vec_dot_f32(nx, &lm_alpha[j[0]], &lm_s[j[0]*nx], d);
  14636. lm_alpha[j[0]] /= lm_ys[j[0]];
  14637. // q_{i} = q_{i+1} - \alpha_{i} y_{i}
  14638. ggml_vec_mad_f32(nx, d, &lm_y[j[0]*nx], -lm_alpha[j[0]]);
  14639. }
  14640. ggml_vec_scale_f32(nx, d, ys/yy);
  14641. for (int i = 0; i < bound; ++i) {
  14642. // \beta_{j} = \rho_{j} y^t_{j} \cdot \gamma_{i}
  14643. ggml_vec_dot_f32(nx, &beta, &lm_y[j[0]*nx], d);
  14644. beta /= lm_ys[j[0]];
  14645. // \gamma_{i+1} = \gamma_{i} + (\alpha_{j} - \beta_{j}) s_{j}
  14646. ggml_vec_mad_f32(nx, d, &lm_s[j[0]*nx], lm_alpha[j[0]] - beta);
  14647. j[0] = (j[0] + 1)%m;
  14648. }
  14649. step[0] = 1.0;
  14650. }
  14651. return GGML_OPT_DID_NOT_CONVERGE;
  14652. }
  14653. struct ggml_opt_params ggml_opt_default_params(enum ggml_opt_type type) {
  14654. struct ggml_opt_params result;
  14655. switch (type) {
  14656. case GGML_OPT_ADAM:
  14657. {
  14658. result = (struct ggml_opt_params) {
  14659. .type = GGML_OPT_ADAM,
  14660. .n_threads = 1,
  14661. .past = 0,
  14662. .delta = 1e-5f,
  14663. .max_no_improvement = 100,
  14664. .print_forward_graph = true,
  14665. .print_backward_graph = true,
  14666. .adam = {
  14667. .n_iter = 10000,
  14668. .sched = 1.000f,
  14669. .decay = 0.001f,
  14670. .alpha = 0.001f,
  14671. .beta1 = 0.9f,
  14672. .beta2 = 0.999f,
  14673. .eps = 1e-8f,
  14674. .eps_f = 1e-5f,
  14675. .eps_g = 1e-3f,
  14676. },
  14677. };
  14678. } break;
  14679. case GGML_OPT_LBFGS:
  14680. {
  14681. result = (struct ggml_opt_params) {
  14682. .type = GGML_OPT_LBFGS,
  14683. .n_threads = 1,
  14684. .past = 0,
  14685. .delta = 1e-5f,
  14686. .max_no_improvement = 0,
  14687. .print_forward_graph = true,
  14688. .print_backward_graph = true,
  14689. .lbfgs = {
  14690. .m = 6,
  14691. .n_iter = 100,
  14692. .max_linesearch = 20,
  14693. .eps = 1e-5f,
  14694. .ftol = 1e-4f,
  14695. .wolfe = 0.9f,
  14696. .min_step = 1e-20f,
  14697. .max_step = 1e+20f,
  14698. .linesearch = GGML_LINESEARCH_DEFAULT,
  14699. },
  14700. };
  14701. } break;
  14702. }
  14703. return result;
  14704. }
  14705. GGML_API void ggml_opt_init(
  14706. struct ggml_context * ctx,
  14707. struct ggml_opt_context * opt,
  14708. struct ggml_opt_params params,
  14709. int64_t nx) {
  14710. opt->ctx = ctx;
  14711. opt->params = params;
  14712. opt->iter = 0;
  14713. opt->nx = nx;
  14714. opt->just_initialized = true;
  14715. switch (opt->params.type) {
  14716. case GGML_OPT_ADAM:
  14717. {
  14718. opt->adam.x = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, nx);
  14719. opt->adam.g1 = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, nx);
  14720. opt->adam.g2 = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, nx);
  14721. opt->adam.m = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, nx);
  14722. opt->adam.v = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, nx);
  14723. opt->adam.mh = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, nx);
  14724. opt->adam.vh = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, nx);
  14725. opt->adam.pf = params.past > 0
  14726. ? ggml_new_tensor_1d(ctx, GGML_TYPE_F32, params.past)
  14727. : NULL;
  14728. ggml_set_zero(opt->adam.x);
  14729. ggml_set_zero(opt->adam.g1);
  14730. ggml_set_zero(opt->adam.g2);
  14731. ggml_set_zero(opt->adam.m);
  14732. ggml_set_zero(opt->adam.v);
  14733. ggml_set_zero(opt->adam.mh);
  14734. ggml_set_zero(opt->adam.vh);
  14735. if (opt->adam.pf) {
  14736. ggml_set_zero(opt->adam.pf);
  14737. }
  14738. } break;
  14739. case GGML_OPT_LBFGS:
  14740. {
  14741. opt->lbfgs.x = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, nx);
  14742. opt->lbfgs.xp = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, nx);
  14743. opt->lbfgs.g = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, nx);
  14744. opt->lbfgs.gp = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, nx);
  14745. opt->lbfgs.d = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, nx);
  14746. opt->lbfgs.pf = params.past > 0
  14747. ? ggml_new_tensor_1d(ctx, GGML_TYPE_F32, params.past)
  14748. : NULL;
  14749. opt->lbfgs.lmal = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, params.lbfgs.m);
  14750. opt->lbfgs.lmys = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, params.lbfgs.m);
  14751. opt->lbfgs.lms = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, nx, params.lbfgs.m);
  14752. opt->lbfgs.lmy = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, nx, params.lbfgs.m);
  14753. ggml_set_zero(opt->lbfgs.x);
  14754. ggml_set_zero(opt->lbfgs.xp);
  14755. ggml_set_zero(opt->lbfgs.g);
  14756. ggml_set_zero(opt->lbfgs.gp);
  14757. ggml_set_zero(opt->lbfgs.d);
  14758. if (opt->lbfgs.pf) {
  14759. ggml_set_zero(opt->lbfgs.pf);
  14760. }
  14761. ggml_set_zero(opt->lbfgs.lmal);
  14762. ggml_set_zero(opt->lbfgs.lmys);
  14763. ggml_set_zero(opt->lbfgs.lms);
  14764. ggml_set_zero(opt->lbfgs.lmy);
  14765. } break;
  14766. }
  14767. }
  14768. enum ggml_opt_result ggml_opt(
  14769. struct ggml_context * ctx,
  14770. struct ggml_opt_params params,
  14771. struct ggml_tensor * f) {
  14772. bool free_ctx = false;
  14773. if (ctx == NULL) {
  14774. struct ggml_init_params params_ctx = {
  14775. .mem_size = 16*1024*1024,
  14776. .mem_buffer = NULL,
  14777. .no_alloc = false,
  14778. };
  14779. ctx = ggml_init(params_ctx);
  14780. if (ctx == NULL) {
  14781. return GGML_OPT_NO_CONTEXT;
  14782. }
  14783. free_ctx = true;
  14784. }
  14785. enum ggml_opt_result result = GGML_OPT_OK;
  14786. struct ggml_opt_context * opt = (struct ggml_opt_context *) alloca(sizeof(struct ggml_opt_context));
  14787. ggml_opt_init(ctx, opt, params, 0);
  14788. result = ggml_opt_resume(ctx, opt, f);
  14789. if (free_ctx) {
  14790. ggml_free(ctx);
  14791. }
  14792. return result;
  14793. }
  14794. enum ggml_opt_result ggml_opt_resume(
  14795. struct ggml_context * ctx,
  14796. struct ggml_opt_context * opt,
  14797. struct ggml_tensor * f) {
  14798. // build forward + backward compute graphs
  14799. struct ggml_tensor * gfbuf = ggml_new_tensor_1d(ctx, GGML_TYPE_I32, sizeof(struct ggml_cgraph) / GGML_TYPE_SIZE[GGML_TYPE_I32]+ (sizeof(struct ggml_cgraph) % GGML_TYPE_SIZE[GGML_TYPE_I32] ? 1 : 0));
  14800. struct ggml_tensor * gbbuf = ggml_new_tensor_1d(ctx, GGML_TYPE_I32, sizeof(struct ggml_cgraph) / GGML_TYPE_SIZE[GGML_TYPE_I32]+ (sizeof(struct ggml_cgraph) % GGML_TYPE_SIZE[GGML_TYPE_I32] ? 1 : 0));
  14801. struct ggml_cgraph * gf = (struct ggml_cgraph *) gfbuf->data;
  14802. struct ggml_cgraph * gb = (struct ggml_cgraph *) gbbuf->data;
  14803. *gf = ggml_build_forward (f);
  14804. *gb = ggml_build_backward(ctx, gf, true);
  14805. return ggml_opt_resume_g(ctx, opt, f, gf, gb);
  14806. }
  14807. enum ggml_opt_result ggml_opt_resume_g(
  14808. struct ggml_context * ctx,
  14809. struct ggml_opt_context * opt,
  14810. struct ggml_tensor * f,
  14811. struct ggml_cgraph * gf,
  14812. struct ggml_cgraph * gb) {
  14813. // build forward + backward compute graphs
  14814. enum ggml_opt_result result = GGML_OPT_OK;
  14815. switch (opt->params.type) {
  14816. case GGML_OPT_ADAM:
  14817. {
  14818. result = ggml_opt_adam(ctx, opt, opt->params, f, gf, gb);
  14819. } break;
  14820. case GGML_OPT_LBFGS:
  14821. {
  14822. result = ggml_opt_lbfgs(ctx, opt, opt->params, f, gf, gb);
  14823. } break;
  14824. }
  14825. if (opt->params.print_forward_graph) {
  14826. ggml_graph_print (gf);
  14827. ggml_graph_dump_dot(gf, NULL, "opt-forward.dot");
  14828. }
  14829. if (opt->params.print_backward_graph) {
  14830. ggml_graph_print (gb);
  14831. ggml_graph_dump_dot(gb, gf, "opt-backward.dot");
  14832. }
  14833. return result;
  14834. }
  14835. ////////////////////////////////////////////////////////////////////////////////
  14836. size_t ggml_quantize_q4_0(const float * src, void * dst, int n, int k, int64_t * hist) {
  14837. assert(k % QK4_0 == 0);
  14838. const int nb = k / QK4_0;
  14839. for (int b = 0; b < n; b += k) {
  14840. block_q4_0 * restrict y = (block_q4_0 *) dst + b/QK4_0;
  14841. quantize_row_q4_0_reference(src + b, y, k);
  14842. for (int i = 0; i < nb; i++) {
  14843. for (int j = 0; j < QK4_0; j += 2) {
  14844. const uint8_t vi0 = y[i].qs[j/2] & 0x0F;
  14845. const uint8_t vi1 = y[i].qs[j/2] >> 4;
  14846. hist[vi0]++;
  14847. hist[vi1]++;
  14848. }
  14849. }
  14850. }
  14851. return (n/QK4_0*sizeof(block_q4_0));
  14852. }
  14853. size_t ggml_quantize_q4_1(const float * src, void * dst, int n, int k, int64_t * hist) {
  14854. assert(k % QK4_1 == 0);
  14855. const int nb = k / QK4_1;
  14856. for (int b = 0; b < n; b += k) {
  14857. block_q4_1 * restrict y = (block_q4_1 *) dst + b/QK4_1;
  14858. quantize_row_q4_1_reference(src + b, y, k);
  14859. for (int i = 0; i < nb; i++) {
  14860. for (int j = 0; j < QK4_1; j += 2) {
  14861. const uint8_t vi0 = y[i].qs[j/2] & 0x0F;
  14862. const uint8_t vi1 = y[i].qs[j/2] >> 4;
  14863. hist[vi0]++;
  14864. hist[vi1]++;
  14865. }
  14866. }
  14867. }
  14868. return (n/QK4_1*sizeof(block_q4_1));
  14869. }
  14870. size_t ggml_quantize_q5_0(const float * src, void * dst, int n, int k, int64_t * hist) {
  14871. assert(k % QK5_0 == 0);
  14872. const int nb = k / QK5_0;
  14873. for (int b = 0; b < n; b += k) {
  14874. block_q5_0 * restrict y = (block_q5_0 *)dst + b/QK5_0;
  14875. quantize_row_q5_0_reference(src + b, y, k);
  14876. for (int i = 0; i < nb; i++) {
  14877. uint32_t qh;
  14878. memcpy(&qh, &y[i].qh, sizeof(qh));
  14879. for (int j = 0; j < QK5_0; j += 2) {
  14880. const uint8_t vh0 = ((qh & (1u << (j + 0 ))) >> (j + 0 )) << 4;
  14881. const uint8_t vh1 = ((qh & (1u << (j + 16))) >> (j + 12));
  14882. // cast to 16 bins
  14883. const uint8_t vi0 = ((y[i].qs[j/2] & 0x0F) | vh0) / 2;
  14884. const uint8_t vi1 = ((y[i].qs[j/2] >> 4) | vh1) / 2;
  14885. hist[vi0]++;
  14886. hist[vi1]++;
  14887. }
  14888. }
  14889. }
  14890. return (n/QK5_0*sizeof(block_q5_0));
  14891. }
  14892. size_t ggml_quantize_q5_1(const float * src, void * dst, int n, int k, int64_t * hist) {
  14893. assert(k % QK5_1 == 0);
  14894. const int nb = k / QK5_1;
  14895. for (int b = 0; b < n; b += k) {
  14896. block_q5_1 * restrict y = (block_q5_1 *)dst + b/QK5_1;
  14897. quantize_row_q5_1_reference(src + b, y, k);
  14898. for (int i = 0; i < nb; i++) {
  14899. uint32_t qh;
  14900. memcpy(&qh, &y[i].qh, sizeof(qh));
  14901. for (int j = 0; j < QK5_1; j += 2) {
  14902. const uint8_t vh0 = ((qh & (1u << (j + 0 ))) >> (j + 0 )) << 4;
  14903. const uint8_t vh1 = ((qh & (1u << (j + 16))) >> (j + 12));
  14904. // cast to 16 bins
  14905. const uint8_t vi0 = ((y[i].qs[j/2] & 0x0F) | vh0) / 2;
  14906. const uint8_t vi1 = ((y[i].qs[j/2] >> 4) | vh1) / 2;
  14907. hist[vi0]++;
  14908. hist[vi1]++;
  14909. }
  14910. }
  14911. }
  14912. return (n/QK5_1*sizeof(block_q5_1));
  14913. }
  14914. size_t ggml_quantize_q8_0(const float * src, void * dst, int n, int k, int64_t * hist) {
  14915. assert(k % QK8_0 == 0);
  14916. const int nb = k / QK8_0;
  14917. for (int b = 0; b < n; b += k) {
  14918. block_q8_0 * restrict y = (block_q8_0 *)dst + b/QK8_0;
  14919. quantize_row_q8_0_reference(src + b, y, k);
  14920. for (int i = 0; i < nb; i++) {
  14921. for (int j = 0; j < QK8_0; ++j) {
  14922. const int8_t vi = y[i].qs[j];
  14923. hist[vi/16 + 8]++;
  14924. }
  14925. }
  14926. }
  14927. return (n/QK8_0*sizeof(block_q8_0));
  14928. }
  14929. size_t ggml_quantize_chunk(enum ggml_type type, const float * src, void * dst, int start, int n, int64_t * hist) {
  14930. size_t result = 0;
  14931. switch (type) {
  14932. case GGML_TYPE_Q4_0:
  14933. {
  14934. GGML_ASSERT(start % QK4_0 == 0);
  14935. block_q4_0 * block = (block_q4_0*)dst + start / QK4_0;
  14936. result = ggml_quantize_q4_0(src + start, block, n, n, hist);
  14937. } break;
  14938. case GGML_TYPE_Q4_1:
  14939. {
  14940. GGML_ASSERT(start % QK4_1 == 0);
  14941. block_q4_1 * block = (block_q4_1*)dst + start / QK4_1;
  14942. result = ggml_quantize_q4_1(src + start, block, n, n, hist);
  14943. } break;
  14944. case GGML_TYPE_Q5_0:
  14945. {
  14946. GGML_ASSERT(start % QK5_0 == 0);
  14947. block_q5_0 * block = (block_q5_0*)dst + start / QK5_0;
  14948. result = ggml_quantize_q5_0(src + start, block, n, n, hist);
  14949. } break;
  14950. case GGML_TYPE_Q5_1:
  14951. {
  14952. GGML_ASSERT(start % QK5_1 == 0);
  14953. block_q5_1 * block = (block_q5_1*)dst + start / QK5_1;
  14954. result = ggml_quantize_q5_1(src + start, block, n, n, hist);
  14955. } break;
  14956. case GGML_TYPE_Q8_0:
  14957. {
  14958. GGML_ASSERT(start % QK8_0 == 0);
  14959. block_q8_0 * block = (block_q8_0*)dst + start / QK8_0;
  14960. result = ggml_quantize_q8_0(src + start, block, n, n, hist);
  14961. } break;
  14962. #ifdef GGML_USE_K_QUANTS
  14963. case GGML_TYPE_Q2_K:
  14964. {
  14965. GGML_ASSERT(start % QK_K == 0);
  14966. block_q2_K * block = (block_q2_K*)dst + start / QK_K;
  14967. result = ggml_quantize_q2_K(src + start, block, n, n, hist);
  14968. } break;
  14969. case GGML_TYPE_Q3_K:
  14970. {
  14971. GGML_ASSERT(start % QK_K == 0);
  14972. block_q3_K * block = (block_q3_K*)dst + start / QK_K;
  14973. result = ggml_quantize_q3_K(src + start, block, n, n, hist);
  14974. } break;
  14975. case GGML_TYPE_Q4_K:
  14976. {
  14977. GGML_ASSERT(start % QK_K == 0);
  14978. block_q4_K * block = (block_q4_K*)dst + start / QK_K;
  14979. result = ggml_quantize_q4_K(src + start, block, n, n, hist);
  14980. } break;
  14981. case GGML_TYPE_Q5_K:
  14982. {
  14983. GGML_ASSERT(start % QK_K == 0);
  14984. block_q5_K * block = (block_q5_K*)dst + start / QK_K;
  14985. result = ggml_quantize_q5_K(src + start, block, n, n, hist);
  14986. } break;
  14987. case GGML_TYPE_Q6_K:
  14988. {
  14989. GGML_ASSERT(start % QK_K == 0);
  14990. block_q6_K * block = (block_q6_K*)dst + start / QK_K;
  14991. result = ggml_quantize_q6_K(src + start, block, n, n, hist);
  14992. } break;
  14993. #endif
  14994. case GGML_TYPE_F16:
  14995. {
  14996. int elemsize = sizeof(ggml_fp16_t);
  14997. ggml_fp32_to_fp16_row(src + start, (ggml_fp16_t *)dst + start, n);
  14998. result = n * elemsize;
  14999. } break;
  15000. case GGML_TYPE_F32:
  15001. {
  15002. int elemsize = sizeof(float);
  15003. result = n * elemsize;
  15004. memcpy((uint8_t *)dst + start * elemsize, src + start, result);
  15005. } break;
  15006. default:
  15007. assert(false);
  15008. }
  15009. return result;
  15010. }
  15011. ////////////////////////////////////////////////////////////////////////////////
  15012. int ggml_cpu_has_avx(void) {
  15013. #if defined(__AVX__)
  15014. return 1;
  15015. #else
  15016. return 0;
  15017. #endif
  15018. }
  15019. int ggml_cpu_has_avx2(void) {
  15020. #if defined(__AVX2__)
  15021. return 1;
  15022. #else
  15023. return 0;
  15024. #endif
  15025. }
  15026. int ggml_cpu_has_avx512(void) {
  15027. #if defined(__AVX512F__)
  15028. return 1;
  15029. #else
  15030. return 0;
  15031. #endif
  15032. }
  15033. int ggml_cpu_has_avx512_vbmi(void) {
  15034. #if defined(__AVX512VBMI__)
  15035. return 1;
  15036. #else
  15037. return 0;
  15038. #endif
  15039. }
  15040. int ggml_cpu_has_avx512_vnni(void) {
  15041. #if defined(__AVX512VNNI__)
  15042. return 1;
  15043. #else
  15044. return 0;
  15045. #endif
  15046. }
  15047. int ggml_cpu_has_fma(void) {
  15048. #if defined(__FMA__)
  15049. return 1;
  15050. #else
  15051. return 0;
  15052. #endif
  15053. }
  15054. int ggml_cpu_has_neon(void) {
  15055. #if defined(__ARM_NEON)
  15056. return 1;
  15057. #else
  15058. return 0;
  15059. #endif
  15060. }
  15061. int ggml_cpu_has_arm_fma(void) {
  15062. #if defined(__ARM_FEATURE_FMA)
  15063. return 1;
  15064. #else
  15065. return 0;
  15066. #endif
  15067. }
  15068. int ggml_cpu_has_f16c(void) {
  15069. #if defined(__F16C__)
  15070. return 1;
  15071. #else
  15072. return 0;
  15073. #endif
  15074. }
  15075. int ggml_cpu_has_fp16_va(void) {
  15076. #if defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC)
  15077. return 1;
  15078. #else
  15079. return 0;
  15080. #endif
  15081. }
  15082. int ggml_cpu_has_wasm_simd(void) {
  15083. #if defined(__wasm_simd128__)
  15084. return 1;
  15085. #else
  15086. return 0;
  15087. #endif
  15088. }
  15089. int ggml_cpu_has_blas(void) {
  15090. #if defined(GGML_USE_ACCELERATE) || defined(GGML_USE_OPENBLAS) || defined(GGML_USE_CUBLAS) || defined(GGML_USE_CLBLAST)
  15091. return 1;
  15092. #else
  15093. return 0;
  15094. #endif
  15095. }
  15096. int ggml_cpu_has_cublas(void) {
  15097. #if defined(GGML_USE_CUBLAS)
  15098. return 1;
  15099. #else
  15100. return 0;
  15101. #endif
  15102. }
  15103. int ggml_cpu_has_clblast(void) {
  15104. #if defined(GGML_USE_CLBLAST)
  15105. return 1;
  15106. #else
  15107. return 0;
  15108. #endif
  15109. }
  15110. int ggml_cpu_has_gpublas(void) {
  15111. return ggml_cpu_has_cublas() || ggml_cpu_has_clblast();
  15112. }
  15113. int ggml_cpu_has_sse3(void) {
  15114. #if defined(__SSE3__)
  15115. return 1;
  15116. #else
  15117. return 0;
  15118. #endif
  15119. }
  15120. int ggml_cpu_has_vsx(void) {
  15121. #if defined(__POWER9_VECTOR__)
  15122. return 1;
  15123. #else
  15124. return 0;
  15125. #endif
  15126. }
  15127. ////////////////////////////////////////////////////////////////////////////////