ggml.c 628 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316431743184319432043214322432343244325432643274328432943304331433243334334433543364337433843394340434143424343434443454346434743484349435043514352435343544355435643574358435943604361436243634364436543664367436843694370437143724373437443754376437743784379438043814382438343844385438643874388438943904391439243934394439543964397439843994400440144024403440444054406440744084409441044114412441344144415441644174418441944204421442244234424442544264427442844294430443144324433443444354436443744384439444044414442444344444445444644474448444944504451445244534454445544564457445844594460446144624463446444654466446744684469447044714472447344744475447644774478447944804481448244834484448544864487448844894490449144924493449444954496449744984499450045014502450345044505450645074508450945104511451245134514451545164517451845194520452145224523452445254526452745284529453045314532453345344535453645374538453945404541454245434544454545464547454845494550455145524553455445554556455745584559456045614562456345644565456645674568456945704571457245734574457545764577457845794580458145824583458445854586458745884589459045914592459345944595459645974598459946004601460246034604460546064607460846094610461146124613461446154616461746184619462046214622462346244625462646274628462946304631463246334634463546364637463846394640464146424643464446454646464746484649465046514652465346544655465646574658465946604661466246634664466546664667466846694670467146724673467446754676467746784679468046814682468346844685468646874688468946904691469246934694469546964697469846994700470147024703470447054706470747084709471047114712471347144715471647174718471947204721472247234724472547264727472847294730473147324733473447354736473747384739474047414742474347444745474647474748474947504751475247534754475547564757475847594760476147624763476447654766476747684769477047714772477347744775477647774778477947804781478247834784478547864787478847894790479147924793479447954796479747984799480048014802480348044805480648074808480948104811481248134814481548164817481848194820482148224823482448254826482748284829483048314832483348344835483648374838483948404841484248434844484548464847484848494850485148524853485448554856485748584859486048614862486348644865486648674868486948704871487248734874487548764877487848794880488148824883488448854886488748884889489048914892489348944895489648974898489949004901490249034904490549064907490849094910491149124913491449154916491749184919492049214922492349244925492649274928492949304931493249334934493549364937493849394940494149424943494449454946494749484949495049514952495349544955495649574958495949604961496249634964496549664967496849694970497149724973497449754976497749784979498049814982498349844985498649874988498949904991499249934994499549964997499849995000500150025003500450055006500750085009501050115012501350145015501650175018501950205021502250235024502550265027502850295030503150325033503450355036503750385039504050415042504350445045504650475048504950505051505250535054505550565057505850595060506150625063506450655066506750685069507050715072507350745075507650775078507950805081508250835084508550865087508850895090509150925093509450955096509750985099510051015102510351045105510651075108510951105111511251135114511551165117511851195120512151225123512451255126512751285129513051315132513351345135513651375138513951405141514251435144514551465147514851495150515151525153515451555156515751585159516051615162516351645165516651675168516951705171517251735174517551765177517851795180518151825183518451855186518751885189519051915192519351945195519651975198519952005201520252035204520552065207520852095210521152125213521452155216521752185219522052215222522352245225522652275228522952305231523252335234523552365237523852395240524152425243524452455246524752485249525052515252525352545255525652575258525952605261526252635264526552665267526852695270527152725273527452755276527752785279528052815282528352845285528652875288528952905291529252935294529552965297529852995300530153025303530453055306530753085309531053115312531353145315531653175318531953205321532253235324532553265327532853295330533153325333533453355336533753385339534053415342534353445345534653475348534953505351535253535354535553565357535853595360536153625363536453655366536753685369537053715372537353745375537653775378537953805381538253835384538553865387538853895390539153925393539453955396539753985399540054015402540354045405540654075408540954105411541254135414541554165417541854195420542154225423542454255426542754285429543054315432543354345435543654375438543954405441544254435444544554465447544854495450545154525453545454555456545754585459546054615462546354645465546654675468546954705471547254735474547554765477547854795480548154825483548454855486548754885489549054915492549354945495549654975498549955005501550255035504550555065507550855095510551155125513551455155516551755185519552055215522552355245525552655275528552955305531553255335534553555365537553855395540554155425543554455455546554755485549555055515552555355545555555655575558555955605561556255635564556555665567556855695570557155725573557455755576557755785579558055815582558355845585558655875588558955905591559255935594559555965597559855995600560156025603560456055606560756085609561056115612561356145615561656175618561956205621562256235624562556265627562856295630563156325633563456355636563756385639564056415642564356445645564656475648564956505651565256535654565556565657565856595660566156625663566456655666566756685669567056715672567356745675567656775678567956805681568256835684568556865687568856895690569156925693569456955696569756985699570057015702570357045705570657075708570957105711571257135714571557165717571857195720572157225723572457255726572757285729573057315732573357345735573657375738573957405741574257435744574557465747574857495750575157525753575457555756575757585759576057615762576357645765576657675768576957705771577257735774577557765777577857795780578157825783578457855786578757885789579057915792579357945795579657975798579958005801580258035804580558065807580858095810581158125813581458155816581758185819582058215822582358245825582658275828582958305831583258335834583558365837583858395840584158425843584458455846584758485849585058515852585358545855585658575858585958605861586258635864586558665867586858695870587158725873587458755876587758785879588058815882588358845885588658875888588958905891589258935894589558965897589858995900590159025903590459055906590759085909591059115912591359145915591659175918591959205921592259235924592559265927592859295930593159325933593459355936593759385939594059415942594359445945594659475948594959505951595259535954595559565957595859595960596159625963596459655966596759685969597059715972597359745975597659775978597959805981598259835984598559865987598859895990599159925993599459955996599759985999600060016002600360046005600660076008600960106011601260136014601560166017601860196020602160226023602460256026602760286029603060316032603360346035603660376038603960406041604260436044604560466047604860496050605160526053605460556056605760586059606060616062606360646065606660676068606960706071607260736074607560766077607860796080608160826083608460856086608760886089609060916092609360946095609660976098609961006101610261036104610561066107610861096110611161126113611461156116611761186119612061216122612361246125612661276128612961306131613261336134613561366137613861396140614161426143614461456146614761486149615061516152615361546155615661576158615961606161616261636164616561666167616861696170617161726173617461756176617761786179618061816182618361846185618661876188618961906191619261936194619561966197619861996200620162026203620462056206620762086209621062116212621362146215621662176218621962206221622262236224622562266227622862296230623162326233623462356236623762386239624062416242624362446245624662476248624962506251625262536254625562566257625862596260626162626263626462656266626762686269627062716272627362746275627662776278627962806281628262836284628562866287628862896290629162926293629462956296629762986299630063016302630363046305630663076308630963106311631263136314631563166317631863196320632163226323632463256326632763286329633063316332633363346335633663376338633963406341634263436344634563466347634863496350635163526353635463556356635763586359636063616362636363646365636663676368636963706371637263736374637563766377637863796380638163826383638463856386638763886389639063916392639363946395639663976398639964006401640264036404640564066407640864096410641164126413641464156416641764186419642064216422642364246425642664276428642964306431643264336434643564366437643864396440644164426443644464456446644764486449645064516452645364546455645664576458645964606461646264636464646564666467646864696470647164726473647464756476647764786479648064816482648364846485648664876488648964906491649264936494649564966497649864996500650165026503650465056506650765086509651065116512651365146515651665176518651965206521652265236524652565266527652865296530653165326533653465356536653765386539654065416542654365446545654665476548654965506551655265536554655565566557655865596560656165626563656465656566656765686569657065716572657365746575657665776578657965806581658265836584658565866587658865896590659165926593659465956596659765986599660066016602660366046605660666076608660966106611661266136614661566166617661866196620662166226623662466256626662766286629663066316632663366346635663666376638663966406641664266436644664566466647664866496650665166526653665466556656665766586659666066616662666366646665666666676668666966706671667266736674667566766677667866796680668166826683668466856686668766886689669066916692669366946695669666976698669967006701670267036704670567066707670867096710671167126713671467156716671767186719672067216722672367246725672667276728672967306731673267336734673567366737673867396740674167426743674467456746674767486749675067516752675367546755675667576758675967606761676267636764676567666767676867696770677167726773677467756776677767786779678067816782678367846785678667876788678967906791679267936794679567966797679867996800680168026803680468056806680768086809681068116812681368146815681668176818681968206821682268236824682568266827682868296830683168326833683468356836683768386839684068416842684368446845684668476848684968506851685268536854685568566857685868596860686168626863686468656866686768686869687068716872687368746875687668776878687968806881688268836884688568866887688868896890689168926893689468956896689768986899690069016902690369046905690669076908690969106911691269136914691569166917691869196920692169226923692469256926692769286929693069316932693369346935693669376938693969406941694269436944694569466947694869496950695169526953695469556956695769586959696069616962696369646965696669676968696969706971697269736974697569766977697869796980698169826983698469856986698769886989699069916992699369946995699669976998699970007001700270037004700570067007700870097010701170127013701470157016701770187019702070217022702370247025702670277028702970307031703270337034703570367037703870397040704170427043704470457046704770487049705070517052705370547055705670577058705970607061706270637064706570667067706870697070707170727073707470757076707770787079708070817082708370847085708670877088708970907091709270937094709570967097709870997100710171027103710471057106710771087109711071117112711371147115711671177118711971207121712271237124712571267127712871297130713171327133713471357136713771387139714071417142714371447145714671477148714971507151715271537154715571567157715871597160716171627163716471657166716771687169717071717172717371747175717671777178717971807181718271837184718571867187718871897190719171927193719471957196719771987199720072017202720372047205720672077208720972107211721272137214721572167217721872197220722172227223722472257226722772287229723072317232723372347235723672377238723972407241724272437244724572467247724872497250725172527253725472557256725772587259726072617262726372647265726672677268726972707271727272737274727572767277727872797280728172827283728472857286728772887289729072917292729372947295729672977298729973007301730273037304730573067307730873097310731173127313731473157316731773187319732073217322732373247325732673277328732973307331733273337334733573367337733873397340734173427343734473457346734773487349735073517352735373547355735673577358735973607361736273637364736573667367736873697370737173727373737473757376737773787379738073817382738373847385738673877388738973907391739273937394739573967397739873997400740174027403740474057406740774087409741074117412741374147415741674177418741974207421742274237424742574267427742874297430743174327433743474357436743774387439744074417442744374447445744674477448744974507451745274537454745574567457745874597460746174627463746474657466746774687469747074717472747374747475747674777478747974807481748274837484748574867487748874897490749174927493749474957496749774987499750075017502750375047505750675077508750975107511751275137514751575167517751875197520752175227523752475257526752775287529753075317532753375347535753675377538753975407541754275437544754575467547754875497550755175527553755475557556755775587559756075617562756375647565756675677568756975707571757275737574757575767577757875797580758175827583758475857586758775887589759075917592759375947595759675977598759976007601760276037604760576067607760876097610761176127613761476157616761776187619762076217622762376247625762676277628762976307631763276337634763576367637763876397640764176427643764476457646764776487649765076517652765376547655765676577658765976607661766276637664766576667667766876697670767176727673767476757676767776787679768076817682768376847685768676877688768976907691769276937694769576967697769876997700770177027703770477057706770777087709771077117712771377147715771677177718771977207721772277237724772577267727772877297730773177327733773477357736773777387739774077417742774377447745774677477748774977507751775277537754775577567757775877597760776177627763776477657766776777687769777077717772777377747775777677777778777977807781778277837784778577867787778877897790779177927793779477957796779777987799780078017802780378047805780678077808780978107811781278137814781578167817781878197820782178227823782478257826782778287829783078317832783378347835783678377838783978407841784278437844784578467847784878497850785178527853785478557856785778587859786078617862786378647865786678677868786978707871787278737874787578767877787878797880788178827883788478857886788778887889789078917892789378947895789678977898789979007901790279037904790579067907790879097910791179127913791479157916791779187919792079217922792379247925792679277928792979307931793279337934793579367937793879397940794179427943794479457946794779487949795079517952795379547955795679577958795979607961796279637964796579667967796879697970797179727973797479757976797779787979798079817982798379847985798679877988798979907991799279937994799579967997799879998000800180028003800480058006800780088009801080118012801380148015801680178018801980208021802280238024802580268027802880298030803180328033803480358036803780388039804080418042804380448045804680478048804980508051805280538054805580568057805880598060806180628063806480658066806780688069807080718072807380748075807680778078807980808081808280838084808580868087808880898090809180928093809480958096809780988099810081018102810381048105810681078108810981108111811281138114811581168117811881198120812181228123812481258126812781288129813081318132813381348135813681378138813981408141814281438144814581468147814881498150815181528153815481558156815781588159816081618162816381648165816681678168816981708171817281738174817581768177817881798180818181828183818481858186818781888189819081918192819381948195819681978198819982008201820282038204820582068207820882098210821182128213821482158216821782188219822082218222822382248225822682278228822982308231823282338234823582368237823882398240824182428243824482458246824782488249825082518252825382548255825682578258825982608261826282638264826582668267826882698270827182728273827482758276827782788279828082818282828382848285828682878288828982908291829282938294829582968297829882998300830183028303830483058306830783088309831083118312831383148315831683178318831983208321832283238324832583268327832883298330833183328333833483358336833783388339834083418342834383448345834683478348834983508351835283538354835583568357835883598360836183628363836483658366836783688369837083718372837383748375837683778378837983808381838283838384838583868387838883898390839183928393839483958396839783988399840084018402840384048405840684078408840984108411841284138414841584168417841884198420842184228423842484258426842784288429843084318432843384348435843684378438843984408441844284438444844584468447844884498450845184528453845484558456845784588459846084618462846384648465846684678468846984708471847284738474847584768477847884798480848184828483848484858486848784888489849084918492849384948495849684978498849985008501850285038504850585068507850885098510851185128513851485158516851785188519852085218522852385248525852685278528852985308531853285338534853585368537853885398540854185428543854485458546854785488549855085518552855385548555855685578558855985608561856285638564856585668567856885698570857185728573857485758576857785788579858085818582858385848585858685878588858985908591859285938594859585968597859885998600860186028603860486058606860786088609861086118612861386148615861686178618861986208621862286238624862586268627862886298630863186328633863486358636863786388639864086418642864386448645864686478648864986508651865286538654865586568657865886598660866186628663866486658666866786688669867086718672867386748675867686778678867986808681868286838684868586868687868886898690869186928693869486958696869786988699870087018702870387048705870687078708870987108711871287138714871587168717871887198720872187228723872487258726872787288729873087318732873387348735873687378738873987408741874287438744874587468747874887498750875187528753875487558756875787588759876087618762876387648765876687678768876987708771877287738774877587768777877887798780878187828783878487858786878787888789879087918792879387948795879687978798879988008801880288038804880588068807880888098810881188128813881488158816881788188819882088218822882388248825882688278828882988308831883288338834883588368837883888398840884188428843884488458846884788488849885088518852885388548855885688578858885988608861886288638864886588668867886888698870887188728873887488758876887788788879888088818882888388848885888688878888888988908891889288938894889588968897889888998900890189028903890489058906890789088909891089118912891389148915891689178918891989208921892289238924892589268927892889298930893189328933893489358936893789388939894089418942894389448945894689478948894989508951895289538954895589568957895889598960896189628963896489658966896789688969897089718972897389748975897689778978897989808981898289838984898589868987898889898990899189928993899489958996899789988999900090019002900390049005900690079008900990109011901290139014901590169017901890199020902190229023902490259026902790289029903090319032903390349035903690379038903990409041904290439044904590469047904890499050905190529053905490559056905790589059906090619062906390649065906690679068906990709071907290739074907590769077907890799080908190829083908490859086908790889089909090919092909390949095909690979098909991009101910291039104910591069107910891099110911191129113911491159116911791189119912091219122912391249125912691279128912991309131913291339134913591369137913891399140914191429143914491459146914791489149915091519152915391549155915691579158915991609161916291639164916591669167916891699170917191729173917491759176917791789179918091819182918391849185918691879188918991909191919291939194919591969197919891999200920192029203920492059206920792089209921092119212921392149215921692179218921992209221922292239224922592269227922892299230923192329233923492359236923792389239924092419242924392449245924692479248924992509251925292539254925592569257925892599260926192629263926492659266926792689269927092719272927392749275927692779278927992809281928292839284928592869287928892899290929192929293929492959296929792989299930093019302930393049305930693079308930993109311931293139314931593169317931893199320932193229323932493259326932793289329933093319332933393349335933693379338933993409341934293439344934593469347934893499350935193529353935493559356935793589359936093619362936393649365936693679368936993709371937293739374937593769377937893799380938193829383938493859386938793889389939093919392939393949395939693979398939994009401940294039404940594069407940894099410941194129413941494159416941794189419942094219422942394249425942694279428942994309431943294339434943594369437943894399440944194429443944494459446944794489449945094519452945394549455945694579458945994609461946294639464946594669467946894699470947194729473947494759476947794789479948094819482948394849485948694879488948994909491949294939494949594969497949894999500950195029503950495059506950795089509951095119512951395149515951695179518951995209521952295239524952595269527952895299530953195329533953495359536953795389539954095419542954395449545954695479548954995509551955295539554955595569557955895599560956195629563956495659566956795689569957095719572957395749575957695779578957995809581958295839584958595869587958895899590959195929593959495959596959795989599960096019602960396049605960696079608960996109611961296139614961596169617961896199620962196229623962496259626962796289629963096319632963396349635963696379638963996409641964296439644964596469647964896499650965196529653965496559656965796589659966096619662966396649665966696679668966996709671967296739674967596769677967896799680968196829683968496859686968796889689969096919692969396949695969696979698969997009701970297039704970597069707970897099710971197129713971497159716971797189719972097219722972397249725972697279728972997309731973297339734973597369737973897399740974197429743974497459746974797489749975097519752975397549755975697579758975997609761976297639764976597669767976897699770977197729773977497759776977797789779978097819782978397849785978697879788978997909791979297939794979597969797979897999800980198029803980498059806980798089809981098119812981398149815981698179818981998209821982298239824982598269827982898299830983198329833983498359836983798389839984098419842984398449845984698479848984998509851985298539854985598569857985898599860986198629863986498659866986798689869987098719872987398749875987698779878987998809881988298839884988598869887988898899890989198929893989498959896989798989899990099019902990399049905990699079908990999109911991299139914991599169917991899199920992199229923992499259926992799289929993099319932993399349935993699379938993999409941994299439944994599469947994899499950995199529953995499559956995799589959996099619962996399649965996699679968996999709971997299739974997599769977997899799980998199829983998499859986998799889989999099919992999399949995999699979998999910000100011000210003100041000510006100071000810009100101001110012100131001410015100161001710018100191002010021100221002310024100251002610027100281002910030100311003210033100341003510036100371003810039100401004110042100431004410045100461004710048100491005010051100521005310054100551005610057100581005910060100611006210063100641006510066100671006810069100701007110072100731007410075100761007710078100791008010081100821008310084100851008610087100881008910090100911009210093100941009510096100971009810099101001010110102101031010410105101061010710108101091011010111101121011310114101151011610117101181011910120101211012210123101241012510126101271012810129101301013110132101331013410135101361013710138101391014010141101421014310144101451014610147101481014910150101511015210153101541015510156101571015810159101601016110162101631016410165101661016710168101691017010171101721017310174101751017610177101781017910180101811018210183101841018510186101871018810189101901019110192101931019410195101961019710198101991020010201102021020310204102051020610207102081020910210102111021210213102141021510216102171021810219102201022110222102231022410225102261022710228102291023010231102321023310234102351023610237102381023910240102411024210243102441024510246102471024810249102501025110252102531025410255102561025710258102591026010261102621026310264102651026610267102681026910270102711027210273102741027510276102771027810279102801028110282102831028410285102861028710288102891029010291102921029310294102951029610297102981029910300103011030210303103041030510306103071030810309103101031110312103131031410315103161031710318103191032010321103221032310324103251032610327103281032910330103311033210333103341033510336103371033810339103401034110342103431034410345103461034710348103491035010351103521035310354103551035610357103581035910360103611036210363103641036510366103671036810369103701037110372103731037410375103761037710378103791038010381103821038310384103851038610387103881038910390103911039210393103941039510396103971039810399104001040110402104031040410405104061040710408104091041010411104121041310414104151041610417104181041910420104211042210423104241042510426104271042810429104301043110432104331043410435104361043710438104391044010441104421044310444104451044610447104481044910450104511045210453104541045510456104571045810459104601046110462104631046410465104661046710468104691047010471104721047310474104751047610477104781047910480104811048210483104841048510486104871048810489104901049110492104931049410495104961049710498104991050010501105021050310504105051050610507105081050910510105111051210513105141051510516105171051810519105201052110522105231052410525105261052710528105291053010531105321053310534105351053610537105381053910540105411054210543105441054510546105471054810549105501055110552105531055410555105561055710558105591056010561105621056310564105651056610567105681056910570105711057210573105741057510576105771057810579105801058110582105831058410585105861058710588105891059010591105921059310594105951059610597105981059910600106011060210603106041060510606106071060810609106101061110612106131061410615106161061710618106191062010621106221062310624106251062610627106281062910630106311063210633106341063510636106371063810639106401064110642106431064410645106461064710648106491065010651106521065310654106551065610657106581065910660106611066210663106641066510666106671066810669106701067110672106731067410675106761067710678106791068010681106821068310684106851068610687106881068910690106911069210693106941069510696106971069810699107001070110702107031070410705107061070710708107091071010711107121071310714107151071610717107181071910720107211072210723107241072510726107271072810729107301073110732107331073410735107361073710738107391074010741107421074310744107451074610747107481074910750107511075210753107541075510756107571075810759107601076110762107631076410765107661076710768107691077010771107721077310774107751077610777107781077910780107811078210783107841078510786107871078810789107901079110792107931079410795107961079710798107991080010801108021080310804108051080610807108081080910810108111081210813108141081510816108171081810819108201082110822108231082410825108261082710828108291083010831108321083310834108351083610837108381083910840108411084210843108441084510846108471084810849108501085110852108531085410855108561085710858108591086010861108621086310864108651086610867108681086910870108711087210873108741087510876108771087810879108801088110882108831088410885108861088710888108891089010891108921089310894108951089610897108981089910900109011090210903109041090510906109071090810909109101091110912109131091410915109161091710918109191092010921109221092310924109251092610927109281092910930109311093210933109341093510936109371093810939109401094110942109431094410945109461094710948109491095010951109521095310954109551095610957109581095910960109611096210963109641096510966109671096810969109701097110972109731097410975109761097710978109791098010981109821098310984109851098610987109881098910990109911099210993109941099510996109971099810999110001100111002110031100411005110061100711008110091101011011110121101311014110151101611017110181101911020110211102211023110241102511026110271102811029110301103111032110331103411035110361103711038110391104011041110421104311044110451104611047110481104911050110511105211053110541105511056110571105811059110601106111062110631106411065110661106711068110691107011071110721107311074110751107611077110781107911080110811108211083110841108511086110871108811089110901109111092110931109411095110961109711098110991110011101111021110311104111051110611107111081110911110111111111211113111141111511116111171111811119111201112111122111231112411125111261112711128111291113011131111321113311134111351113611137111381113911140111411114211143111441114511146111471114811149111501115111152111531115411155111561115711158111591116011161111621116311164111651116611167111681116911170111711117211173111741117511176111771117811179111801118111182111831118411185111861118711188111891119011191111921119311194111951119611197111981119911200112011120211203112041120511206112071120811209112101121111212112131121411215112161121711218112191122011221112221122311224112251122611227112281122911230112311123211233112341123511236112371123811239112401124111242112431124411245112461124711248112491125011251112521125311254112551125611257112581125911260112611126211263112641126511266112671126811269112701127111272112731127411275112761127711278112791128011281112821128311284112851128611287112881128911290112911129211293112941129511296112971129811299113001130111302113031130411305113061130711308113091131011311113121131311314113151131611317113181131911320113211132211323113241132511326113271132811329113301133111332113331133411335113361133711338113391134011341113421134311344113451134611347113481134911350113511135211353113541135511356113571135811359113601136111362113631136411365113661136711368113691137011371113721137311374113751137611377113781137911380113811138211383113841138511386113871138811389113901139111392113931139411395113961139711398113991140011401114021140311404114051140611407114081140911410114111141211413114141141511416114171141811419114201142111422114231142411425114261142711428114291143011431114321143311434114351143611437114381143911440114411144211443114441144511446114471144811449114501145111452114531145411455114561145711458114591146011461114621146311464114651146611467114681146911470114711147211473114741147511476114771147811479114801148111482114831148411485114861148711488114891149011491114921149311494114951149611497114981149911500115011150211503115041150511506115071150811509115101151111512115131151411515115161151711518115191152011521115221152311524115251152611527115281152911530115311153211533115341153511536115371153811539115401154111542115431154411545115461154711548115491155011551115521155311554115551155611557115581155911560115611156211563115641156511566115671156811569115701157111572115731157411575115761157711578115791158011581115821158311584115851158611587115881158911590115911159211593115941159511596115971159811599116001160111602116031160411605116061160711608116091161011611116121161311614116151161611617116181161911620116211162211623116241162511626116271162811629116301163111632116331163411635116361163711638116391164011641116421164311644116451164611647116481164911650116511165211653116541165511656116571165811659116601166111662116631166411665116661166711668116691167011671116721167311674116751167611677116781167911680116811168211683116841168511686116871168811689116901169111692116931169411695116961169711698116991170011701117021170311704117051170611707117081170911710117111171211713117141171511716117171171811719117201172111722117231172411725117261172711728117291173011731117321173311734117351173611737117381173911740117411174211743117441174511746117471174811749117501175111752117531175411755117561175711758117591176011761117621176311764117651176611767117681176911770117711177211773117741177511776117771177811779117801178111782117831178411785117861178711788117891179011791117921179311794117951179611797117981179911800118011180211803118041180511806118071180811809118101181111812118131181411815118161181711818118191182011821118221182311824118251182611827118281182911830118311183211833118341183511836118371183811839118401184111842118431184411845118461184711848118491185011851118521185311854118551185611857118581185911860118611186211863118641186511866118671186811869118701187111872118731187411875118761187711878118791188011881118821188311884118851188611887118881188911890118911189211893118941189511896118971189811899119001190111902119031190411905119061190711908119091191011911119121191311914119151191611917119181191911920119211192211923119241192511926119271192811929119301193111932119331193411935119361193711938119391194011941119421194311944119451194611947119481194911950119511195211953119541195511956119571195811959119601196111962119631196411965119661196711968119691197011971119721197311974119751197611977119781197911980119811198211983119841198511986119871198811989119901199111992119931199411995119961199711998119991200012001120021200312004120051200612007120081200912010120111201212013120141201512016120171201812019120201202112022120231202412025120261202712028120291203012031120321203312034120351203612037120381203912040120411204212043120441204512046120471204812049120501205112052120531205412055120561205712058120591206012061120621206312064120651206612067120681206912070120711207212073120741207512076120771207812079120801208112082120831208412085120861208712088120891209012091120921209312094120951209612097120981209912100121011210212103121041210512106121071210812109121101211112112121131211412115121161211712118121191212012121121221212312124121251212612127121281212912130121311213212133121341213512136121371213812139121401214112142121431214412145121461214712148121491215012151121521215312154121551215612157121581215912160121611216212163121641216512166121671216812169121701217112172121731217412175121761217712178121791218012181121821218312184121851218612187121881218912190121911219212193121941219512196121971219812199122001220112202122031220412205122061220712208122091221012211122121221312214122151221612217122181221912220122211222212223122241222512226122271222812229122301223112232122331223412235122361223712238122391224012241122421224312244122451224612247122481224912250122511225212253122541225512256122571225812259122601226112262122631226412265122661226712268122691227012271122721227312274122751227612277122781227912280122811228212283122841228512286122871228812289122901229112292122931229412295122961229712298122991230012301123021230312304123051230612307123081230912310123111231212313123141231512316123171231812319123201232112322123231232412325123261232712328123291233012331123321233312334123351233612337123381233912340123411234212343123441234512346123471234812349123501235112352123531235412355123561235712358123591236012361123621236312364123651236612367123681236912370123711237212373123741237512376123771237812379123801238112382123831238412385123861238712388123891239012391123921239312394123951239612397123981239912400124011240212403124041240512406124071240812409124101241112412124131241412415124161241712418124191242012421124221242312424124251242612427124281242912430124311243212433124341243512436124371243812439124401244112442124431244412445124461244712448124491245012451124521245312454124551245612457124581245912460124611246212463124641246512466124671246812469124701247112472124731247412475124761247712478124791248012481124821248312484124851248612487124881248912490124911249212493124941249512496124971249812499125001250112502125031250412505125061250712508125091251012511125121251312514125151251612517125181251912520125211252212523125241252512526125271252812529125301253112532125331253412535125361253712538125391254012541125421254312544125451254612547125481254912550125511255212553125541255512556125571255812559125601256112562125631256412565125661256712568125691257012571125721257312574125751257612577125781257912580125811258212583125841258512586125871258812589125901259112592125931259412595125961259712598125991260012601126021260312604126051260612607126081260912610126111261212613126141261512616126171261812619126201262112622126231262412625126261262712628126291263012631126321263312634126351263612637126381263912640126411264212643126441264512646126471264812649126501265112652126531265412655126561265712658126591266012661126621266312664126651266612667126681266912670126711267212673126741267512676126771267812679126801268112682126831268412685126861268712688126891269012691126921269312694126951269612697126981269912700127011270212703127041270512706127071270812709127101271112712127131271412715127161271712718127191272012721127221272312724127251272612727127281272912730127311273212733127341273512736127371273812739127401274112742127431274412745127461274712748127491275012751127521275312754127551275612757127581275912760127611276212763127641276512766127671276812769127701277112772127731277412775127761277712778127791278012781127821278312784127851278612787127881278912790127911279212793127941279512796127971279812799128001280112802128031280412805128061280712808128091281012811128121281312814128151281612817128181281912820128211282212823128241282512826128271282812829128301283112832128331283412835128361283712838128391284012841128421284312844128451284612847128481284912850128511285212853128541285512856128571285812859128601286112862128631286412865128661286712868128691287012871128721287312874128751287612877128781287912880128811288212883128841288512886128871288812889128901289112892128931289412895128961289712898128991290012901129021290312904129051290612907129081290912910129111291212913129141291512916129171291812919129201292112922129231292412925129261292712928129291293012931129321293312934129351293612937129381293912940129411294212943129441294512946129471294812949129501295112952129531295412955129561295712958129591296012961129621296312964129651296612967129681296912970129711297212973129741297512976129771297812979129801298112982129831298412985129861298712988129891299012991129921299312994129951299612997129981299913000130011300213003130041300513006130071300813009130101301113012130131301413015130161301713018130191302013021130221302313024130251302613027130281302913030130311303213033130341303513036130371303813039130401304113042130431304413045130461304713048130491305013051130521305313054130551305613057130581305913060130611306213063130641306513066130671306813069130701307113072130731307413075130761307713078130791308013081130821308313084130851308613087130881308913090130911309213093130941309513096130971309813099131001310113102131031310413105131061310713108131091311013111131121311313114131151311613117131181311913120131211312213123131241312513126131271312813129131301313113132131331313413135131361313713138131391314013141131421314313144131451314613147131481314913150131511315213153131541315513156131571315813159131601316113162131631316413165131661316713168131691317013171131721317313174131751317613177131781317913180131811318213183131841318513186131871318813189131901319113192131931319413195131961319713198131991320013201132021320313204132051320613207132081320913210132111321213213132141321513216132171321813219132201322113222132231322413225132261322713228132291323013231132321323313234132351323613237132381323913240132411324213243132441324513246132471324813249132501325113252132531325413255132561325713258132591326013261132621326313264132651326613267132681326913270132711327213273132741327513276132771327813279132801328113282132831328413285132861328713288132891329013291132921329313294132951329613297132981329913300133011330213303133041330513306133071330813309133101331113312133131331413315133161331713318133191332013321133221332313324133251332613327133281332913330133311333213333133341333513336133371333813339133401334113342133431334413345133461334713348133491335013351133521335313354133551335613357133581335913360133611336213363133641336513366133671336813369133701337113372133731337413375133761337713378133791338013381133821338313384133851338613387133881338913390133911339213393133941339513396133971339813399134001340113402134031340413405134061340713408134091341013411134121341313414134151341613417134181341913420134211342213423134241342513426134271342813429134301343113432134331343413435134361343713438134391344013441134421344313444134451344613447134481344913450134511345213453134541345513456134571345813459134601346113462134631346413465134661346713468134691347013471134721347313474134751347613477134781347913480134811348213483134841348513486134871348813489134901349113492134931349413495134961349713498134991350013501135021350313504135051350613507135081350913510135111351213513135141351513516135171351813519135201352113522135231352413525135261352713528135291353013531135321353313534135351353613537135381353913540135411354213543135441354513546135471354813549135501355113552135531355413555135561355713558135591356013561135621356313564135651356613567135681356913570135711357213573135741357513576135771357813579135801358113582135831358413585135861358713588135891359013591135921359313594135951359613597135981359913600136011360213603136041360513606136071360813609136101361113612136131361413615136161361713618136191362013621136221362313624136251362613627136281362913630136311363213633136341363513636136371363813639136401364113642136431364413645136461364713648136491365013651136521365313654136551365613657136581365913660136611366213663136641366513666136671366813669136701367113672136731367413675136761367713678136791368013681136821368313684136851368613687136881368913690136911369213693136941369513696136971369813699137001370113702137031370413705137061370713708137091371013711137121371313714137151371613717137181371913720137211372213723137241372513726137271372813729137301373113732137331373413735137361373713738137391374013741137421374313744137451374613747137481374913750137511375213753137541375513756137571375813759137601376113762137631376413765137661376713768137691377013771137721377313774137751377613777137781377913780137811378213783137841378513786137871378813789137901379113792137931379413795137961379713798137991380013801138021380313804138051380613807138081380913810138111381213813138141381513816138171381813819138201382113822138231382413825138261382713828138291383013831138321383313834138351383613837138381383913840138411384213843138441384513846138471384813849138501385113852138531385413855138561385713858138591386013861138621386313864138651386613867138681386913870138711387213873138741387513876138771387813879138801388113882138831388413885138861388713888138891389013891138921389313894138951389613897138981389913900139011390213903139041390513906139071390813909139101391113912139131391413915139161391713918139191392013921139221392313924139251392613927139281392913930139311393213933139341393513936139371393813939139401394113942139431394413945139461394713948139491395013951139521395313954139551395613957139581395913960139611396213963139641396513966139671396813969139701397113972139731397413975139761397713978139791398013981139821398313984139851398613987139881398913990139911399213993139941399513996139971399813999140001400114002140031400414005140061400714008140091401014011140121401314014140151401614017140181401914020140211402214023140241402514026140271402814029140301403114032140331403414035140361403714038140391404014041140421404314044140451404614047140481404914050140511405214053140541405514056140571405814059140601406114062140631406414065140661406714068140691407014071140721407314074140751407614077140781407914080140811408214083140841408514086140871408814089140901409114092140931409414095140961409714098140991410014101141021410314104141051410614107141081410914110141111411214113141141411514116141171411814119141201412114122141231412414125141261412714128141291413014131141321413314134141351413614137141381413914140141411414214143141441414514146141471414814149141501415114152141531415414155141561415714158141591416014161141621416314164141651416614167141681416914170141711417214173141741417514176141771417814179141801418114182141831418414185141861418714188141891419014191141921419314194141951419614197141981419914200142011420214203142041420514206142071420814209142101421114212142131421414215142161421714218142191422014221142221422314224142251422614227142281422914230142311423214233142341423514236142371423814239142401424114242142431424414245142461424714248142491425014251142521425314254142551425614257142581425914260142611426214263142641426514266142671426814269142701427114272142731427414275142761427714278142791428014281142821428314284142851428614287142881428914290142911429214293142941429514296142971429814299143001430114302143031430414305143061430714308143091431014311143121431314314143151431614317143181431914320143211432214323143241432514326143271432814329143301433114332143331433414335143361433714338143391434014341143421434314344143451434614347143481434914350143511435214353143541435514356143571435814359143601436114362143631436414365143661436714368143691437014371143721437314374143751437614377143781437914380143811438214383143841438514386143871438814389143901439114392143931439414395143961439714398143991440014401144021440314404144051440614407144081440914410144111441214413144141441514416144171441814419144201442114422144231442414425144261442714428144291443014431144321443314434144351443614437144381443914440144411444214443144441444514446144471444814449144501445114452144531445414455144561445714458144591446014461144621446314464144651446614467144681446914470144711447214473144741447514476144771447814479144801448114482144831448414485144861448714488144891449014491144921449314494144951449614497144981449914500145011450214503145041450514506145071450814509145101451114512145131451414515145161451714518145191452014521145221452314524145251452614527145281452914530145311453214533145341453514536145371453814539145401454114542145431454414545145461454714548145491455014551145521455314554145551455614557145581455914560145611456214563145641456514566145671456814569145701457114572145731457414575145761457714578145791458014581145821458314584145851458614587145881458914590145911459214593145941459514596145971459814599146001460114602146031460414605146061460714608146091461014611146121461314614146151461614617146181461914620146211462214623146241462514626146271462814629146301463114632146331463414635146361463714638146391464014641146421464314644146451464614647146481464914650146511465214653146541465514656146571465814659146601466114662146631466414665146661466714668146691467014671146721467314674146751467614677146781467914680146811468214683146841468514686146871468814689146901469114692146931469414695146961469714698146991470014701147021470314704147051470614707147081470914710147111471214713147141471514716147171471814719147201472114722147231472414725147261472714728147291473014731147321473314734147351473614737147381473914740147411474214743147441474514746147471474814749147501475114752147531475414755147561475714758147591476014761147621476314764147651476614767147681476914770147711477214773147741477514776147771477814779147801478114782147831478414785147861478714788147891479014791147921479314794147951479614797147981479914800148011480214803148041480514806148071480814809148101481114812148131481414815148161481714818148191482014821148221482314824148251482614827148281482914830148311483214833148341483514836148371483814839148401484114842148431484414845148461484714848148491485014851148521485314854148551485614857148581485914860148611486214863148641486514866148671486814869148701487114872148731487414875148761487714878148791488014881148821488314884148851488614887148881488914890148911489214893148941489514896148971489814899149001490114902149031490414905149061490714908149091491014911149121491314914149151491614917149181491914920149211492214923149241492514926149271492814929149301493114932149331493414935149361493714938149391494014941149421494314944149451494614947149481494914950149511495214953149541495514956149571495814959149601496114962149631496414965149661496714968149691497014971149721497314974149751497614977149781497914980149811498214983149841498514986149871498814989149901499114992149931499414995149961499714998149991500015001150021500315004150051500615007150081500915010150111501215013150141501515016150171501815019150201502115022150231502415025150261502715028150291503015031150321503315034150351503615037150381503915040150411504215043150441504515046150471504815049150501505115052150531505415055150561505715058150591506015061150621506315064150651506615067150681506915070150711507215073150741507515076150771507815079150801508115082150831508415085150861508715088150891509015091150921509315094150951509615097150981509915100151011510215103151041510515106151071510815109151101511115112151131511415115151161511715118151191512015121151221512315124151251512615127151281512915130151311513215133151341513515136151371513815139151401514115142151431514415145151461514715148151491515015151151521515315154151551515615157151581515915160151611516215163151641516515166151671516815169151701517115172151731517415175151761517715178151791518015181151821518315184151851518615187151881518915190151911519215193151941519515196151971519815199152001520115202152031520415205152061520715208152091521015211152121521315214152151521615217152181521915220152211522215223152241522515226152271522815229152301523115232152331523415235152361523715238152391524015241152421524315244152451524615247152481524915250152511525215253152541525515256152571525815259152601526115262152631526415265152661526715268152691527015271152721527315274152751527615277152781527915280152811528215283152841528515286152871528815289152901529115292152931529415295152961529715298152991530015301153021530315304153051530615307153081530915310153111531215313153141531515316153171531815319153201532115322153231532415325153261532715328153291533015331153321533315334153351533615337153381533915340153411534215343153441534515346153471534815349153501535115352153531535415355153561535715358153591536015361153621536315364153651536615367153681536915370153711537215373153741537515376153771537815379153801538115382153831538415385153861538715388153891539015391153921539315394153951539615397153981539915400154011540215403154041540515406154071540815409154101541115412154131541415415154161541715418154191542015421154221542315424154251542615427154281542915430154311543215433154341543515436154371543815439154401544115442154431544415445154461544715448154491545015451154521545315454154551545615457154581545915460154611546215463154641546515466154671546815469154701547115472154731547415475154761547715478154791548015481154821548315484154851548615487154881548915490154911549215493154941549515496154971549815499155001550115502155031550415505155061550715508155091551015511155121551315514155151551615517155181551915520155211552215523155241552515526155271552815529155301553115532155331553415535155361553715538155391554015541155421554315544155451554615547155481554915550155511555215553155541555515556155571555815559155601556115562155631556415565155661556715568155691557015571155721557315574155751557615577155781557915580155811558215583155841558515586155871558815589155901559115592155931559415595155961559715598155991560015601156021560315604156051560615607156081560915610156111561215613156141561515616156171561815619156201562115622156231562415625156261562715628156291563015631156321563315634156351563615637156381563915640156411564215643156441564515646156471564815649156501565115652156531565415655156561565715658156591566015661156621566315664156651566615667156681566915670156711567215673156741567515676156771567815679156801568115682156831568415685156861568715688156891569015691156921569315694156951569615697156981569915700157011570215703157041570515706157071570815709157101571115712157131571415715157161571715718157191572015721157221572315724157251572615727157281572915730157311573215733157341573515736157371573815739157401574115742157431574415745157461574715748157491575015751157521575315754157551575615757157581575915760157611576215763157641576515766157671576815769157701577115772157731577415775157761577715778157791578015781157821578315784157851578615787157881578915790157911579215793157941579515796157971579815799158001580115802158031580415805158061580715808158091581015811158121581315814158151581615817158181581915820158211582215823158241582515826158271582815829158301583115832158331583415835158361583715838158391584015841158421584315844158451584615847158481584915850158511585215853158541585515856158571585815859158601586115862158631586415865158661586715868158691587015871158721587315874158751587615877158781587915880158811588215883158841588515886158871588815889158901589115892158931589415895158961589715898158991590015901159021590315904159051590615907159081590915910159111591215913159141591515916159171591815919159201592115922159231592415925159261592715928159291593015931159321593315934159351593615937159381593915940159411594215943159441594515946159471594815949159501595115952159531595415955159561595715958159591596015961159621596315964159651596615967159681596915970159711597215973159741597515976159771597815979159801598115982159831598415985159861598715988159891599015991159921599315994159951599615997159981599916000160011600216003160041600516006160071600816009160101601116012160131601416015160161601716018160191602016021160221602316024160251602616027160281602916030160311603216033160341603516036160371603816039160401604116042160431604416045160461604716048160491605016051160521605316054160551605616057160581605916060160611606216063160641606516066160671606816069160701607116072160731607416075160761607716078160791608016081160821608316084160851608616087160881608916090160911609216093160941609516096160971609816099161001610116102161031610416105161061610716108161091611016111161121611316114161151611616117161181611916120161211612216123161241612516126161271612816129161301613116132161331613416135161361613716138161391614016141161421614316144161451614616147161481614916150161511615216153161541615516156161571615816159161601616116162161631616416165161661616716168161691617016171161721617316174161751617616177161781617916180161811618216183161841618516186161871618816189161901619116192161931619416195161961619716198161991620016201162021620316204162051620616207162081620916210162111621216213162141621516216162171621816219162201622116222162231622416225162261622716228162291623016231162321623316234162351623616237162381623916240162411624216243162441624516246162471624816249162501625116252162531625416255162561625716258162591626016261162621626316264162651626616267162681626916270162711627216273162741627516276162771627816279162801628116282162831628416285162861628716288162891629016291162921629316294162951629616297162981629916300163011630216303163041630516306163071630816309163101631116312163131631416315163161631716318163191632016321163221632316324163251632616327163281632916330163311633216333163341633516336163371633816339163401634116342163431634416345163461634716348163491635016351163521635316354163551635616357163581635916360163611636216363163641636516366163671636816369163701637116372163731637416375163761637716378163791638016381163821638316384163851638616387163881638916390163911639216393163941639516396163971639816399164001640116402164031640416405164061640716408164091641016411164121641316414164151641616417164181641916420164211642216423164241642516426164271642816429164301643116432164331643416435164361643716438164391644016441164421644316444164451644616447164481644916450164511645216453164541645516456164571645816459164601646116462164631646416465164661646716468164691647016471164721647316474164751647616477164781647916480164811648216483164841648516486164871648816489164901649116492164931649416495164961649716498164991650016501165021650316504165051650616507165081650916510165111651216513165141651516516165171651816519165201652116522165231652416525165261652716528165291653016531165321653316534165351653616537165381653916540165411654216543165441654516546165471654816549165501655116552165531655416555165561655716558165591656016561165621656316564165651656616567165681656916570165711657216573165741657516576165771657816579165801658116582165831658416585165861658716588165891659016591165921659316594165951659616597165981659916600166011660216603166041660516606166071660816609166101661116612166131661416615166161661716618166191662016621166221662316624166251662616627166281662916630166311663216633166341663516636166371663816639166401664116642166431664416645166461664716648166491665016651166521665316654166551665616657166581665916660166611666216663166641666516666166671666816669166701667116672166731667416675166761667716678166791668016681166821668316684166851668616687166881668916690166911669216693166941669516696166971669816699167001670116702167031670416705167061670716708167091671016711167121671316714167151671616717167181671916720167211672216723167241672516726167271672816729167301673116732167331673416735167361673716738167391674016741167421674316744167451674616747167481674916750167511675216753167541675516756167571675816759167601676116762167631676416765167661676716768167691677016771167721677316774167751677616777167781677916780167811678216783167841678516786167871678816789167901679116792167931679416795167961679716798167991680016801168021680316804168051680616807168081680916810168111681216813168141681516816168171681816819168201682116822168231682416825168261682716828168291683016831168321683316834168351683616837168381683916840168411684216843168441684516846168471684816849168501685116852168531685416855168561685716858168591686016861168621686316864168651686616867168681686916870168711687216873168741687516876168771687816879168801688116882168831688416885168861688716888168891689016891168921689316894168951689616897168981689916900169011690216903169041690516906169071690816909169101691116912169131691416915169161691716918169191692016921169221692316924169251692616927169281692916930169311693216933169341693516936169371693816939169401694116942169431694416945169461694716948169491695016951169521695316954169551695616957169581695916960169611696216963169641696516966169671696816969169701697116972169731697416975169761697716978169791698016981169821698316984169851698616987169881698916990169911699216993169941699516996169971699816999170001700117002170031700417005170061700717008170091701017011170121701317014170151701617017170181701917020170211702217023170241702517026170271702817029170301703117032170331703417035170361703717038170391704017041170421704317044170451704617047170481704917050170511705217053170541705517056170571705817059170601706117062170631706417065170661706717068170691707017071170721707317074170751707617077170781707917080170811708217083170841708517086170871708817089170901709117092170931709417095170961709717098170991710017101171021710317104171051710617107171081710917110171111711217113171141711517116171171711817119171201712117122171231712417125171261712717128171291713017131171321713317134171351713617137171381713917140171411714217143171441714517146171471714817149171501715117152171531715417155171561715717158171591716017161171621716317164171651716617167171681716917170171711717217173171741717517176171771717817179171801718117182171831718417185171861718717188171891719017191171921719317194171951719617197171981719917200172011720217203172041720517206172071720817209172101721117212172131721417215172161721717218172191722017221172221722317224172251722617227172281722917230172311723217233172341723517236172371723817239172401724117242172431724417245172461724717248172491725017251172521725317254172551725617257172581725917260172611726217263172641726517266172671726817269172701727117272172731727417275172761727717278172791728017281172821728317284172851728617287172881728917290172911729217293172941729517296172971729817299173001730117302173031730417305173061730717308173091731017311173121731317314173151731617317173181731917320173211732217323173241732517326173271732817329173301733117332173331733417335173361733717338173391734017341173421734317344173451734617347173481734917350173511735217353173541735517356173571735817359173601736117362173631736417365173661736717368173691737017371173721737317374173751737617377173781737917380173811738217383173841738517386173871738817389173901739117392173931739417395173961739717398173991740017401174021740317404174051740617407174081740917410174111741217413174141741517416174171741817419174201742117422174231742417425174261742717428174291743017431174321743317434174351743617437174381743917440174411744217443174441744517446174471744817449174501745117452174531745417455174561745717458174591746017461174621746317464174651746617467174681746917470174711747217473174741747517476174771747817479174801748117482174831748417485174861748717488174891749017491174921749317494174951749617497174981749917500175011750217503175041750517506175071750817509175101751117512175131751417515175161751717518175191752017521175221752317524175251752617527175281752917530175311753217533175341753517536175371753817539175401754117542175431754417545175461754717548175491755017551175521755317554175551755617557175581755917560175611756217563175641756517566175671756817569175701757117572175731757417575175761757717578175791758017581175821758317584175851758617587175881758917590175911759217593175941759517596175971759817599176001760117602176031760417605176061760717608176091761017611176121761317614176151761617617176181761917620176211762217623176241762517626176271762817629176301763117632176331763417635176361763717638176391764017641176421764317644176451764617647176481764917650176511765217653176541765517656176571765817659176601766117662176631766417665176661766717668176691767017671176721767317674176751767617677176781767917680176811768217683176841768517686176871768817689176901769117692176931769417695176961769717698176991770017701177021770317704177051770617707177081770917710177111771217713177141771517716177171771817719177201772117722177231772417725177261772717728177291773017731177321773317734177351773617737177381773917740177411774217743177441774517746177471774817749177501775117752177531775417755177561775717758177591776017761177621776317764177651776617767177681776917770177711777217773177741777517776177771777817779177801778117782177831778417785177861778717788177891779017791177921779317794177951779617797177981779917800178011780217803178041780517806178071780817809178101781117812178131781417815178161781717818178191782017821178221782317824178251782617827178281782917830178311783217833178341783517836178371783817839178401784117842178431784417845178461784717848178491785017851178521785317854178551785617857178581785917860178611786217863178641786517866178671786817869178701787117872178731787417875178761787717878178791788017881178821788317884178851788617887178881788917890178911789217893178941789517896178971789817899179001790117902179031790417905179061790717908179091791017911179121791317914179151791617917179181791917920179211792217923179241792517926179271792817929179301793117932179331793417935179361793717938179391794017941179421794317944179451794617947179481794917950179511795217953179541795517956179571795817959179601796117962179631796417965179661796717968179691797017971179721797317974179751797617977179781797917980179811798217983179841798517986179871798817989179901799117992179931799417995179961799717998179991800018001180021800318004180051800618007180081800918010180111801218013180141801518016180171801818019180201802118022180231802418025180261802718028180291803018031180321803318034180351803618037180381803918040180411804218043180441804518046180471804818049180501805118052180531805418055180561805718058180591806018061180621806318064180651806618067180681806918070180711807218073180741807518076180771807818079180801808118082180831808418085180861808718088180891809018091180921809318094180951809618097180981809918100181011810218103181041810518106181071810818109181101811118112181131811418115181161811718118181191812018121181221812318124181251812618127181281812918130181311813218133181341813518136181371813818139181401814118142181431814418145181461814718148181491815018151181521815318154181551815618157181581815918160181611816218163181641816518166181671816818169181701817118172181731817418175181761817718178181791818018181181821818318184181851818618187181881818918190181911819218193181941819518196181971819818199182001820118202182031820418205182061820718208182091821018211182121821318214182151821618217182181821918220182211822218223182241822518226182271822818229182301823118232182331823418235182361823718238182391824018241182421824318244182451824618247182481824918250182511825218253182541825518256182571825818259182601826118262182631826418265182661826718268182691827018271182721827318274182751827618277182781827918280182811828218283182841828518286182871828818289182901829118292182931829418295182961829718298182991830018301183021830318304183051830618307183081830918310183111831218313183141831518316183171831818319183201832118322183231832418325183261832718328183291833018331183321833318334183351833618337183381833918340183411834218343183441834518346183471834818349183501835118352183531835418355183561835718358183591836018361183621836318364183651836618367183681836918370183711837218373183741837518376183771837818379183801838118382183831838418385183861838718388183891839018391183921839318394183951839618397183981839918400184011840218403184041840518406184071840818409184101841118412184131841418415184161841718418184191842018421184221842318424184251842618427184281842918430184311843218433184341843518436184371843818439184401844118442184431844418445184461844718448184491845018451184521845318454184551845618457184581845918460184611846218463184641846518466184671846818469184701847118472184731847418475184761847718478184791848018481184821848318484184851848618487184881848918490184911849218493184941849518496184971849818499185001850118502185031850418505185061850718508185091851018511185121851318514185151851618517185181851918520185211852218523185241852518526185271852818529185301853118532185331853418535185361853718538185391854018541185421854318544185451854618547185481854918550185511855218553185541855518556185571855818559185601856118562185631856418565185661856718568185691857018571185721857318574185751857618577185781857918580185811858218583185841858518586185871858818589185901859118592185931859418595185961859718598185991860018601186021860318604186051860618607186081860918610186111861218613186141861518616186171861818619186201862118622186231862418625186261862718628186291863018631186321863318634186351863618637186381863918640186411864218643186441864518646186471864818649186501865118652186531865418655186561865718658186591866018661186621866318664186651866618667186681866918670186711867218673186741867518676186771867818679186801868118682186831868418685186861868718688186891869018691186921869318694186951869618697186981869918700187011870218703187041870518706187071870818709187101871118712187131871418715187161871718718187191872018721187221872318724187251872618727187281872918730187311873218733187341873518736187371873818739187401874118742187431874418745187461874718748187491875018751187521875318754187551875618757187581875918760187611876218763187641876518766187671876818769187701877118772187731877418775187761877718778187791878018781187821878318784187851878618787187881878918790187911879218793187941879518796187971879818799188001880118802188031880418805188061880718808188091881018811188121881318814188151881618817188181881918820188211882218823188241882518826188271882818829188301883118832188331883418835188361883718838188391884018841188421884318844188451884618847188481884918850188511885218853188541885518856188571885818859188601886118862188631886418865188661886718868188691887018871188721887318874188751887618877188781887918880188811888218883188841888518886188871888818889188901889118892188931889418895188961889718898188991890018901189021890318904189051890618907189081890918910189111891218913189141891518916189171891818919189201892118922189231892418925189261892718928189291893018931189321893318934189351893618937189381893918940189411894218943189441894518946189471894818949189501895118952189531895418955189561895718958189591896018961189621896318964189651896618967189681896918970189711897218973189741897518976189771897818979189801898118982189831898418985189861898718988189891899018991189921899318994189951899618997189981899919000190011900219003190041900519006190071900819009190101901119012190131901419015190161901719018190191902019021190221902319024190251902619027190281902919030190311903219033190341903519036190371903819039190401904119042190431904419045190461904719048190491905019051190521905319054190551905619057190581905919060190611906219063190641906519066190671906819069190701907119072190731907419075190761907719078190791908019081190821908319084190851908619087190881908919090190911909219093190941909519096190971909819099191001910119102191031910419105191061910719108191091911019111191121911319114191151911619117191181911919120191211912219123191241912519126191271912819129191301913119132191331913419135191361913719138191391914019141191421914319144191451914619147191481914919150191511915219153191541915519156191571915819159191601916119162191631916419165191661916719168191691917019171191721917319174191751917619177191781917919180191811918219183191841918519186191871918819189191901919119192191931919419195191961919719198191991920019201192021920319204192051920619207192081920919210192111921219213192141921519216192171921819219192201922119222192231922419225192261922719228192291923019231192321923319234192351923619237192381923919240192411924219243192441924519246192471924819249192501925119252192531925419255192561925719258192591926019261192621926319264192651926619267192681926919270192711927219273192741927519276192771927819279192801928119282192831928419285192861928719288192891929019291192921929319294192951929619297192981929919300193011930219303193041930519306193071930819309193101931119312193131931419315193161931719318193191932019321193221932319324193251932619327193281932919330193311933219333193341933519336193371933819339193401934119342193431934419345193461934719348193491935019351193521935319354193551935619357193581935919360193611936219363193641936519366193671936819369193701937119372193731937419375193761937719378193791938019381193821938319384193851938619387193881938919390193911939219393193941939519396193971939819399194001940119402194031940419405194061940719408194091941019411194121941319414194151941619417194181941919420194211942219423194241942519426194271942819429194301943119432194331943419435194361943719438194391944019441194421944319444194451944619447194481944919450194511945219453194541945519456194571945819459194601946119462194631946419465194661946719468194691947019471194721947319474194751947619477194781947919480194811948219483194841948519486194871948819489194901949119492194931949419495194961949719498194991950019501195021950319504195051950619507195081950919510195111951219513195141951519516195171951819519195201952119522195231952419525195261952719528195291953019531195321953319534195351953619537195381953919540195411954219543195441954519546195471954819549195501955119552195531955419555195561955719558195591956019561195621956319564195651956619567195681956919570195711957219573195741957519576195771957819579195801958119582195831958419585195861958719588195891959019591195921959319594195951959619597195981959919600196011960219603196041960519606196071960819609
  1. #define _CRT_SECURE_NO_DEPRECATE // Disables ridiculous "unsafe" warnings on Windows
  2. #define _USE_MATH_DEFINES // For M_PI on MSVC
  3. #include "ggml-impl.h"
  4. #include "ggml-quants.h"
  5. #if defined(_MSC_VER) || defined(__MINGW32__)
  6. #include <malloc.h> // using malloc.h with MSC/MINGW
  7. #elif !defined(__FreeBSD__) && !defined(__NetBSD__) && !defined(__OpenBSD__)
  8. #include <alloca.h>
  9. #endif
  10. #include <assert.h>
  11. #include <errno.h>
  12. #include <time.h>
  13. #include <math.h>
  14. #include <stdlib.h>
  15. #include <string.h>
  16. #include <stdint.h>
  17. #include <inttypes.h>
  18. #include <stdio.h>
  19. #include <float.h>
  20. #include <limits.h>
  21. #include <stdarg.h>
  22. #include <signal.h>
  23. #ifdef GGML_USE_METAL
  24. #include <unistd.h>
  25. #endif
  26. #if defined(_MSC_VER)
  27. // disable "possible loss of data" to avoid hundreds of casts
  28. // we should just be careful :)
  29. #pragma warning(disable: 4244 4267)
  30. // disable POSIX deprecation warnings
  31. // these functions are never going away, anyway
  32. #pragma warning(disable: 4996)
  33. #endif
  34. #if defined(_WIN32)
  35. #include <windows.h>
  36. typedef volatile LONG atomic_int;
  37. typedef atomic_int atomic_bool;
  38. static void atomic_store(atomic_int * ptr, LONG val) {
  39. InterlockedExchange(ptr, val);
  40. }
  41. static LONG atomic_load(atomic_int * ptr) {
  42. return InterlockedCompareExchange(ptr, 0, 0);
  43. }
  44. static LONG atomic_fetch_add(atomic_int * ptr, LONG inc) {
  45. return InterlockedExchangeAdd(ptr, inc);
  46. }
  47. static LONG atomic_fetch_sub(atomic_int * ptr, LONG dec) {
  48. return atomic_fetch_add(ptr, -(dec));
  49. }
  50. typedef HANDLE pthread_t;
  51. typedef DWORD thread_ret_t;
  52. static int pthread_create(pthread_t * out, void * unused, thread_ret_t(*func)(void *), void * arg) {
  53. (void) unused;
  54. HANDLE handle = CreateThread(NULL, 0, (LPTHREAD_START_ROUTINE) func, arg, 0, NULL);
  55. if (handle == NULL)
  56. {
  57. return EAGAIN;
  58. }
  59. *out = handle;
  60. return 0;
  61. }
  62. static int pthread_join(pthread_t thread, void * unused) {
  63. (void) unused;
  64. int ret = (int) WaitForSingleObject(thread, INFINITE);
  65. CloseHandle(thread);
  66. return ret;
  67. }
  68. static int sched_yield (void) {
  69. Sleep (0);
  70. return 0;
  71. }
  72. #else
  73. #include <pthread.h>
  74. #include <stdatomic.h>
  75. typedef void * thread_ret_t;
  76. #include <sys/types.h>
  77. #include <sys/stat.h>
  78. #include <unistd.h>
  79. #endif
  80. #ifdef GGML_USE_CPU_HBM
  81. #include <hbwmalloc.h>
  82. #endif
  83. #if defined(__APPLE__)
  84. #include <TargetConditionals.h>
  85. #endif
  86. #if (defined(__linux__) || defined(__APPLE__) || defined(__FreeBSD__) || defined(__NetBSD__) || defined(__OpenBSD__)) && \
  87. (!defined(TARGET_OS_TV) && !defined(TARGET_OS_WATCH))
  88. #include <sys/wait.h>
  89. void ggml_print_backtrace(void) {
  90. /*
  91. #include <execinfo.h>
  92. #include <dlfcn.h>
  93. void * trace[100];
  94. int nptrs = backtrace(trace, sizeof(trace)/sizeof(trace[0]));
  95. backtrace_symbols_fd(trace, nptrs, STDERR_FILENO);
  96. */
  97. // backtrack_symbols does not show line numbers, use gdb instead
  98. char attach[32];
  99. snprintf(attach, sizeof(attach), "attach %d", getpid());
  100. int pid = fork();
  101. if (pid == 0) {
  102. execlp("gdb", "gdb", "--batch",
  103. "-ex", "set style enabled on",
  104. "-ex", attach,
  105. "-ex", "bt -frame-info source-and-location",
  106. "-ex", "detach",
  107. "-ex", "quit",
  108. NULL);
  109. } else {
  110. waitpid(pid, NULL, 0);
  111. }
  112. }
  113. #else
  114. void ggml_print_backtrace(void) {
  115. // platform not supported
  116. }
  117. #endif
  118. /*#define GGML_PERF*/
  119. #define GGML_DEBUG 0
  120. #define GGML_GELU_FP16
  121. #define GGML_GELU_QUICK_FP16
  122. #define GGML_SILU_FP16
  123. // #define GGML_CROSS_ENTROPY_EXP_FP16
  124. // #define GGML_FLASH_ATTN_EXP_FP16
  125. #define GGML_SOFT_MAX_UNROLL 4
  126. #define GGML_VEC_DOT_UNROLL 2
  127. #define GGML_VEC_MAD_UNROLL 32
  128. //
  129. // logging
  130. //
  131. #if (GGML_DEBUG >= 1)
  132. #define GGML_PRINT_DEBUG(...) printf(__VA_ARGS__)
  133. #else
  134. #define GGML_PRINT_DEBUG(...)
  135. #endif
  136. #if (GGML_DEBUG >= 5)
  137. #define GGML_PRINT_DEBUG_5(...) printf(__VA_ARGS__)
  138. #else
  139. #define GGML_PRINT_DEBUG_5(...)
  140. #endif
  141. #if (GGML_DEBUG >= 10)
  142. #define GGML_PRINT_DEBUG_10(...) printf(__VA_ARGS__)
  143. #else
  144. #define GGML_PRINT_DEBUG_10(...)
  145. #endif
  146. #define GGML_PRINT(...) printf(__VA_ARGS__)
  147. //
  148. // end of logging block
  149. //
  150. #ifdef GGML_USE_ACCELERATE
  151. // uncomment to use vDSP for soft max computation
  152. // note: not sure if it is actually faster
  153. //#define GGML_SOFT_MAX_ACCELERATE
  154. #endif
  155. #if defined(_MSC_VER) || defined(__MINGW32__)
  156. #define GGML_ALIGNED_MALLOC(size) _aligned_malloc(size, GGML_MEM_ALIGN)
  157. #define GGML_ALIGNED_FREE(ptr) _aligned_free(ptr)
  158. #else
  159. inline static void * ggml_aligned_malloc(size_t size) {
  160. if (size == 0) {
  161. GGML_PRINT("WARNING: Behavior may be unexpected when allocating 0 bytes for ggml_aligned_malloc!\n");
  162. return NULL;
  163. }
  164. void * aligned_memory = NULL;
  165. #ifdef GGML_USE_CPU_HBM
  166. int result = hbw_posix_memalign(&aligned_memory, 16, size);
  167. #elif GGML_USE_METAL
  168. int result = posix_memalign(&aligned_memory, sysconf(_SC_PAGESIZE), size);
  169. #else
  170. int result = posix_memalign(&aligned_memory, GGML_MEM_ALIGN, size);
  171. #endif
  172. if (result != 0) {
  173. // Handle allocation failure
  174. const char *error_desc = "unknown allocation error";
  175. switch (result) {
  176. case EINVAL:
  177. error_desc = "invalid alignment value";
  178. break;
  179. case ENOMEM:
  180. error_desc = "insufficient memory";
  181. break;
  182. }
  183. GGML_PRINT("%s: %s (attempted to allocate %6.2f MB)\n", __func__, error_desc, size/(1024.0*1024.0));
  184. return NULL;
  185. }
  186. return aligned_memory;
  187. }
  188. #define GGML_ALIGNED_MALLOC(size) ggml_aligned_malloc(size)
  189. #ifdef GGML_USE_CPU_HBM
  190. #define GGML_ALIGNED_FREE(ptr) if(NULL != ptr) hbw_free(ptr)
  191. #else
  192. #define GGML_ALIGNED_FREE(ptr) free(ptr)
  193. #endif
  194. #endif
  195. #define UNUSED GGML_UNUSED
  196. #define SWAP(x, y, T) do { T SWAP = x; x = y; y = SWAP; } while (0)
  197. #if defined(GGML_USE_ACCELERATE)
  198. #include <Accelerate/Accelerate.h>
  199. #if defined(GGML_USE_CLBLAST) // allow usage of CLBlast alongside Accelerate functions
  200. #include "ggml-opencl.h"
  201. #endif
  202. #elif defined(GGML_USE_OPENBLAS)
  203. #if defined(GGML_BLAS_USE_MKL)
  204. #include <mkl.h>
  205. #else
  206. #include <cblas.h>
  207. #endif
  208. #elif defined(GGML_USE_CUBLAS)
  209. #include "ggml-cuda.h"
  210. #elif defined(GGML_USE_CLBLAST)
  211. #include "ggml-opencl.h"
  212. #endif
  213. // floating point type used to accumulate sums
  214. typedef double ggml_float;
  215. #undef MIN
  216. #undef MAX
  217. #define MIN(a, b) ((a) < (b) ? (a) : (b))
  218. #define MAX(a, b) ((a) > (b) ? (a) : (b))
  219. //
  220. // global data
  221. //
  222. // precomputed gelu table for f16 (128 KB)
  223. static ggml_fp16_t ggml_table_gelu_f16[1 << 16];
  224. // precomputed quick gelu table for f16 (128 KB)
  225. static ggml_fp16_t ggml_table_gelu_quick_f16[1 << 16];
  226. // precomputed silu table for f16 (128 KB)
  227. static ggml_fp16_t ggml_table_silu_f16[1 << 16];
  228. // precomputed exp table for f16 (128 KB)
  229. static ggml_fp16_t ggml_table_exp_f16[1 << 16];
  230. // precomputed f32 table for f16 (256 KB) (ggml-impl.h)
  231. float ggml_table_f32_f16[1 << 16];
  232. // note: do not use these inside ggml.c
  233. // these are meant to be used via the ggml.h API
  234. float ggml_fp16_to_fp32(ggml_fp16_t x) {
  235. return (float) GGML_FP16_TO_FP32(x);
  236. }
  237. ggml_fp16_t ggml_fp32_to_fp16(float x) {
  238. return GGML_FP32_TO_FP16(x);
  239. }
  240. void ggml_fp16_to_fp32_row(const ggml_fp16_t * x, float * y, int n) {
  241. for (int i = 0; i < n; i++) {
  242. y[i] = GGML_FP16_TO_FP32(x[i]);
  243. }
  244. }
  245. void ggml_fp32_to_fp16_row(const float * x, ggml_fp16_t * y, int n) {
  246. int i = 0;
  247. #if defined(__F16C__)
  248. for (; i + 7 < n; i += 8) {
  249. __m256 x_vec = _mm256_loadu_ps(x + i);
  250. __m128i y_vec = _mm256_cvtps_ph(x_vec, _MM_FROUND_TO_NEAREST_INT);
  251. _mm_storeu_si128((__m128i *)(y + i), y_vec);
  252. }
  253. for(; i + 3 < n; i += 4) {
  254. __m128 x_vec = _mm_loadu_ps(x + i);
  255. __m128i y_vec = _mm_cvtps_ph(x_vec, _MM_FROUND_TO_NEAREST_INT);
  256. _mm_storel_epi64((__m128i *)(y + i), y_vec);
  257. }
  258. #endif
  259. for (; i < n; i++) {
  260. y[i] = GGML_FP32_TO_FP16(x[i]);
  261. }
  262. }
  263. //
  264. // timing
  265. //
  266. #if defined(_MSC_VER) || defined(__MINGW32__)
  267. static int64_t timer_freq, timer_start;
  268. void ggml_time_init(void) {
  269. LARGE_INTEGER t;
  270. QueryPerformanceFrequency(&t);
  271. timer_freq = t.QuadPart;
  272. // The multiplication by 1000 or 1000000 below can cause an overflow if timer_freq
  273. // and the uptime is high enough.
  274. // We subtract the program start time to reduce the likelihood of that happening.
  275. QueryPerformanceCounter(&t);
  276. timer_start = t.QuadPart;
  277. }
  278. int64_t ggml_time_ms(void) {
  279. LARGE_INTEGER t;
  280. QueryPerformanceCounter(&t);
  281. return ((t.QuadPart-timer_start) * 1000) / timer_freq;
  282. }
  283. int64_t ggml_time_us(void) {
  284. LARGE_INTEGER t;
  285. QueryPerformanceCounter(&t);
  286. return ((t.QuadPart-timer_start) * 1000000) / timer_freq;
  287. }
  288. #else
  289. void ggml_time_init(void) {}
  290. int64_t ggml_time_ms(void) {
  291. struct timespec ts;
  292. clock_gettime(CLOCK_MONOTONIC, &ts);
  293. return (int64_t)ts.tv_sec*1000 + (int64_t)ts.tv_nsec/1000000;
  294. }
  295. int64_t ggml_time_us(void) {
  296. struct timespec ts;
  297. clock_gettime(CLOCK_MONOTONIC, &ts);
  298. return (int64_t)ts.tv_sec*1000000 + (int64_t)ts.tv_nsec/1000;
  299. }
  300. #endif
  301. int64_t ggml_cycles(void) {
  302. return clock();
  303. }
  304. int64_t ggml_cycles_per_ms(void) {
  305. return CLOCKS_PER_SEC/1000;
  306. }
  307. #ifdef GGML_PERF
  308. #define ggml_perf_time_ms() ggml_time_ms()
  309. #define ggml_perf_time_us() ggml_time_us()
  310. #define ggml_perf_cycles() ggml_cycles()
  311. #define ggml_perf_cycles_per_ms() ggml_cycles_per_ms()
  312. #else
  313. #define ggml_perf_time_ms() 0
  314. #define ggml_perf_time_us() 0
  315. #define ggml_perf_cycles() 0
  316. #define ggml_perf_cycles_per_ms() 0
  317. #endif
  318. //
  319. // cache line
  320. //
  321. #if defined(__cpp_lib_hardware_interference_size)
  322. #define CACHE_LINE_SIZE hardware_destructive_interference_size
  323. #else
  324. #if defined(__POWER9_VECTOR__)
  325. #define CACHE_LINE_SIZE 128
  326. #else
  327. #define CACHE_LINE_SIZE 64
  328. #endif
  329. #endif
  330. static const size_t CACHE_LINE_SIZE_F32 = CACHE_LINE_SIZE/sizeof(float);
  331. static void ggml_vec_dot_f32(const int n, float * restrict s, const float * restrict x, const float * restrict y);
  332. static void ggml_vec_dot_f16(const int n, float * restrict s, ggml_fp16_t * restrict x, ggml_fp16_t * restrict y);
  333. static const ggml_type_traits_t type_traits[GGML_TYPE_COUNT] = {
  334. [GGML_TYPE_I8] = {
  335. .type_name = "i8",
  336. .blck_size = 1,
  337. .type_size = sizeof(int8_t),
  338. .is_quantized = false,
  339. },
  340. [GGML_TYPE_I16] = {
  341. .type_name = "i16",
  342. .blck_size = 1,
  343. .type_size = sizeof(int16_t),
  344. .is_quantized = false,
  345. },
  346. [GGML_TYPE_I32] = {
  347. .type_name = "i32",
  348. .blck_size = 1,
  349. .type_size = sizeof(int32_t),
  350. .is_quantized = false,
  351. },
  352. [GGML_TYPE_F32] = {
  353. .type_name = "f32",
  354. .blck_size = 1,
  355. .type_size = sizeof(float),
  356. .is_quantized = false,
  357. .vec_dot = (ggml_vec_dot_t) ggml_vec_dot_f32,
  358. .vec_dot_type = GGML_TYPE_F32,
  359. },
  360. [GGML_TYPE_F16] = {
  361. .type_name = "f16",
  362. .blck_size = 1,
  363. .type_size = sizeof(ggml_fp16_t),
  364. .is_quantized = false,
  365. .to_float = (ggml_to_float_t) ggml_fp16_to_fp32_row,
  366. .from_float = (ggml_from_float_t) ggml_fp32_to_fp16_row,
  367. .from_float_reference = (ggml_from_float_t) ggml_fp32_to_fp16_row,
  368. .vec_dot = (ggml_vec_dot_t) ggml_vec_dot_f16,
  369. .vec_dot_type = GGML_TYPE_F16,
  370. },
  371. [GGML_TYPE_Q4_0] = {
  372. .type_name = "q4_0",
  373. .blck_size = QK4_0,
  374. .type_size = sizeof(block_q4_0),
  375. .is_quantized = true,
  376. .to_float = (ggml_to_float_t) dequantize_row_q4_0,
  377. .from_float = quantize_row_q4_0,
  378. .from_float_reference = (ggml_from_float_t) quantize_row_q4_0_reference,
  379. .vec_dot = ggml_vec_dot_q4_0_q8_0,
  380. .vec_dot_type = GGML_TYPE_Q8_0,
  381. },
  382. [GGML_TYPE_Q4_1] = {
  383. .type_name = "q4_1",
  384. .blck_size = QK4_1,
  385. .type_size = sizeof(block_q4_1),
  386. .is_quantized = true,
  387. .to_float = (ggml_to_float_t) dequantize_row_q4_1,
  388. .from_float = quantize_row_q4_1,
  389. .from_float_reference = (ggml_from_float_t) quantize_row_q4_1_reference,
  390. .vec_dot = ggml_vec_dot_q4_1_q8_1,
  391. .vec_dot_type = GGML_TYPE_Q8_1,
  392. },
  393. [4] = { // GGML_TYPE_Q4_2
  394. .type_name = "DEPRECATED",
  395. .blck_size = 0,
  396. .type_size = 0,
  397. .is_quantized = false,
  398. .to_float = NULL,
  399. .from_float = NULL,
  400. .from_float_reference = NULL,
  401. .vec_dot = NULL,
  402. .vec_dot_type = GGML_TYPE_COUNT,
  403. },
  404. [5] = { // GGML_TYPE_Q4_3
  405. .type_name = "DEPRECATED",
  406. .blck_size = 0,
  407. .type_size = 0,
  408. .is_quantized = false,
  409. .to_float = NULL,
  410. .from_float = NULL,
  411. .from_float_reference = NULL,
  412. .vec_dot = NULL,
  413. .vec_dot_type = GGML_TYPE_COUNT,
  414. },
  415. [GGML_TYPE_Q5_0] = {
  416. .type_name = "q5_0",
  417. .blck_size = QK5_0,
  418. .type_size = sizeof(block_q5_0),
  419. .is_quantized = true,
  420. .to_float = (ggml_to_float_t) dequantize_row_q5_0,
  421. .from_float = quantize_row_q5_0,
  422. .from_float_reference = (ggml_from_float_t) quantize_row_q5_0_reference,
  423. .vec_dot = ggml_vec_dot_q5_0_q8_0,
  424. .vec_dot_type = GGML_TYPE_Q8_0,
  425. },
  426. [GGML_TYPE_Q5_1] = {
  427. .type_name = "q5_1",
  428. .blck_size = QK5_1,
  429. .type_size = sizeof(block_q5_1),
  430. .is_quantized = true,
  431. .to_float = (ggml_to_float_t) dequantize_row_q5_1,
  432. .from_float = quantize_row_q5_1,
  433. .from_float_reference = (ggml_from_float_t) quantize_row_q5_1_reference,
  434. .vec_dot = ggml_vec_dot_q5_1_q8_1,
  435. .vec_dot_type = GGML_TYPE_Q8_1,
  436. },
  437. [GGML_TYPE_Q8_0] = {
  438. .type_name = "q8_0",
  439. .blck_size = QK8_0,
  440. .type_size = sizeof(block_q8_0),
  441. .is_quantized = true,
  442. .to_float = (ggml_to_float_t) dequantize_row_q8_0,
  443. .from_float = quantize_row_q8_0,
  444. .from_float_reference = (ggml_from_float_t) quantize_row_q8_0_reference,
  445. .vec_dot = ggml_vec_dot_q8_0_q8_0,
  446. .vec_dot_type = GGML_TYPE_Q8_0,
  447. },
  448. [GGML_TYPE_Q8_1] = {
  449. .type_name = "q8_1",
  450. .blck_size = QK8_1,
  451. .type_size = sizeof(block_q8_1),
  452. .is_quantized = true,
  453. .from_float = quantize_row_q8_1,
  454. .from_float_reference = (ggml_from_float_t) quantize_row_q8_1_reference,
  455. .vec_dot_type = GGML_TYPE_Q8_1,
  456. },
  457. [GGML_TYPE_Q2_K] = {
  458. .type_name = "q2_K",
  459. .blck_size = QK_K,
  460. .type_size = sizeof(block_q2_K),
  461. .is_quantized = true,
  462. .to_float = (ggml_to_float_t) dequantize_row_q2_K,
  463. .from_float = quantize_row_q2_K,
  464. .from_float_reference = (ggml_from_float_t) quantize_row_q2_K_reference,
  465. .vec_dot = ggml_vec_dot_q2_K_q8_K,
  466. .vec_dot_type = GGML_TYPE_Q8_K,
  467. },
  468. [GGML_TYPE_Q3_K] = {
  469. .type_name = "q3_K",
  470. .blck_size = QK_K,
  471. .type_size = sizeof(block_q3_K),
  472. .is_quantized = true,
  473. .to_float = (ggml_to_float_t) dequantize_row_q3_K,
  474. .from_float = quantize_row_q3_K,
  475. .from_float_reference = (ggml_from_float_t) quantize_row_q3_K_reference,
  476. .vec_dot = ggml_vec_dot_q3_K_q8_K,
  477. .vec_dot_type = GGML_TYPE_Q8_K,
  478. },
  479. [GGML_TYPE_Q4_K] = {
  480. .type_name = "q4_K",
  481. .blck_size = QK_K,
  482. .type_size = sizeof(block_q4_K),
  483. .is_quantized = true,
  484. .to_float = (ggml_to_float_t) dequantize_row_q4_K,
  485. .from_float = quantize_row_q4_K,
  486. .from_float_reference = (ggml_from_float_t) quantize_row_q4_K_reference,
  487. .vec_dot = ggml_vec_dot_q4_K_q8_K,
  488. .vec_dot_type = GGML_TYPE_Q8_K,
  489. },
  490. [GGML_TYPE_Q5_K] = {
  491. .type_name = "q5_K",
  492. .blck_size = QK_K,
  493. .type_size = sizeof(block_q5_K),
  494. .is_quantized = true,
  495. .to_float = (ggml_to_float_t) dequantize_row_q5_K,
  496. .from_float = quantize_row_q5_K,
  497. .from_float_reference = (ggml_from_float_t) quantize_row_q5_K_reference,
  498. .vec_dot = ggml_vec_dot_q5_K_q8_K,
  499. .vec_dot_type = GGML_TYPE_Q8_K,
  500. },
  501. [GGML_TYPE_Q6_K] = {
  502. .type_name = "q6_K",
  503. .blck_size = QK_K,
  504. .type_size = sizeof(block_q6_K),
  505. .is_quantized = true,
  506. .to_float = (ggml_to_float_t) dequantize_row_q6_K,
  507. .from_float = quantize_row_q6_K,
  508. .from_float_reference = (ggml_from_float_t) quantize_row_q6_K_reference,
  509. .vec_dot = ggml_vec_dot_q6_K_q8_K,
  510. .vec_dot_type = GGML_TYPE_Q8_K,
  511. },
  512. [GGML_TYPE_Q8_K] = {
  513. .type_name = "q8_K",
  514. .blck_size = QK_K,
  515. .type_size = sizeof(block_q8_K),
  516. .is_quantized = true,
  517. .from_float = quantize_row_q8_K,
  518. }
  519. };
  520. // For internal test use
  521. ggml_type_traits_t ggml_internal_get_type_traits(enum ggml_type type) {
  522. GGML_ASSERT(type < GGML_TYPE_COUNT);
  523. return type_traits[type];
  524. }
  525. //
  526. // simd mappings
  527. //
  528. #if defined(__ARM_NEON)
  529. #if !defined(__aarch64__)
  530. // 64-bit compatibility
  531. inline static float vaddvq_f32(float32x4_t v) {
  532. return vgetq_lane_f32(v, 0) + vgetq_lane_f32(v, 1) + vgetq_lane_f32(v, 2) + vgetq_lane_f32(v, 3);
  533. }
  534. #endif
  535. #endif
  536. // we define a common set of C macros which map to specific intrinsics based on the current architecture
  537. // we then implement the fundamental computation operations below using only these macros
  538. // adding support for new architectures requires to define the corresponding SIMD macros
  539. //
  540. // GGML_F32_STEP / GGML_F16_STEP
  541. // number of elements to process in a single step
  542. //
  543. // GGML_F32_EPR / GGML_F16_EPR
  544. // number of elements to fit in a single register
  545. //
  546. #if defined(__ARM_NEON) && defined(__ARM_FEATURE_FMA)
  547. #define GGML_SIMD
  548. // F32 NEON
  549. #define GGML_F32_STEP 16
  550. #define GGML_F32_EPR 4
  551. #define GGML_F32x4 float32x4_t
  552. #define GGML_F32x4_ZERO vdupq_n_f32(0.0f)
  553. #define GGML_F32x4_SET1(x) vdupq_n_f32(x)
  554. #define GGML_F32x4_LOAD vld1q_f32
  555. #define GGML_F32x4_STORE vst1q_f32
  556. #define GGML_F32x4_FMA(a, b, c) vfmaq_f32(a, b, c)
  557. #define GGML_F32x4_ADD vaddq_f32
  558. #define GGML_F32x4_MUL vmulq_f32
  559. #define GGML_F32x4_REDUCE_ONE(x) vaddvq_f32(x)
  560. #define GGML_F32x4_REDUCE(res, x) \
  561. { \
  562. int offset = GGML_F32_ARR >> 1; \
  563. for (int i = 0; i < offset; ++i) { \
  564. x[i] = vaddq_f32(x[i], x[offset+i]); \
  565. } \
  566. offset >>= 1; \
  567. for (int i = 0; i < offset; ++i) { \
  568. x[i] = vaddq_f32(x[i], x[offset+i]); \
  569. } \
  570. offset >>= 1; \
  571. for (int i = 0; i < offset; ++i) { \
  572. x[i] = vaddq_f32(x[i], x[offset+i]); \
  573. } \
  574. res = GGML_F32x4_REDUCE_ONE(x[0]); \
  575. }
  576. #define GGML_F32_VEC GGML_F32x4
  577. #define GGML_F32_VEC_ZERO GGML_F32x4_ZERO
  578. #define GGML_F32_VEC_SET1 GGML_F32x4_SET1
  579. #define GGML_F32_VEC_LOAD GGML_F32x4_LOAD
  580. #define GGML_F32_VEC_STORE GGML_F32x4_STORE
  581. #define GGML_F32_VEC_FMA GGML_F32x4_FMA
  582. #define GGML_F32_VEC_ADD GGML_F32x4_ADD
  583. #define GGML_F32_VEC_MUL GGML_F32x4_MUL
  584. #define GGML_F32_VEC_REDUCE GGML_F32x4_REDUCE
  585. // F16 NEON
  586. #if defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC)
  587. #define GGML_F16_STEP 32
  588. #define GGML_F16_EPR 8
  589. #define GGML_F16x8 float16x8_t
  590. #define GGML_F16x8_ZERO vdupq_n_f16(0.0f)
  591. #define GGML_F16x8_SET1(x) vdupq_n_f16(x)
  592. #define GGML_F16x8_LOAD vld1q_f16
  593. #define GGML_F16x8_STORE vst1q_f16
  594. #define GGML_F16x8_FMA(a, b, c) vfmaq_f16(a, b, c)
  595. #define GGML_F16x8_ADD vaddq_f16
  596. #define GGML_F16x8_MUL vmulq_f16
  597. #define GGML_F16x8_REDUCE(res, x) \
  598. do { \
  599. int offset = GGML_F16_ARR >> 1; \
  600. for (int i = 0; i < offset; ++i) { \
  601. x[i] = vaddq_f16(x[i], x[offset+i]); \
  602. } \
  603. offset >>= 1; \
  604. for (int i = 0; i < offset; ++i) { \
  605. x[i] = vaddq_f16(x[i], x[offset+i]); \
  606. } \
  607. offset >>= 1; \
  608. for (int i = 0; i < offset; ++i) { \
  609. x[i] = vaddq_f16(x[i], x[offset+i]); \
  610. } \
  611. const float32x4_t t0 = vcvt_f32_f16(vget_low_f16 (x[0])); \
  612. const float32x4_t t1 = vcvt_f32_f16(vget_high_f16(x[0])); \
  613. res = (ggml_float) vaddvq_f32(vaddq_f32(t0, t1)); \
  614. } while (0)
  615. #define GGML_F16_VEC GGML_F16x8
  616. #define GGML_F16_VEC_ZERO GGML_F16x8_ZERO
  617. #define GGML_F16_VEC_SET1 GGML_F16x8_SET1
  618. #define GGML_F16_VEC_LOAD(p, i) GGML_F16x8_LOAD(p)
  619. #define GGML_F16_VEC_STORE(p, r, i) GGML_F16x8_STORE(p, r[i])
  620. #define GGML_F16_VEC_FMA GGML_F16x8_FMA
  621. #define GGML_F16_VEC_ADD GGML_F16x8_ADD
  622. #define GGML_F16_VEC_MUL GGML_F16x8_MUL
  623. #define GGML_F16_VEC_REDUCE GGML_F16x8_REDUCE
  624. #else
  625. // if FP16 vector arithmetic is not supported, we use FP32 instead
  626. // and take advantage of the vcvt_ functions to convert to/from FP16
  627. #define GGML_F16_STEP 16
  628. #define GGML_F16_EPR 4
  629. #define GGML_F32Cx4 float32x4_t
  630. #define GGML_F32Cx4_ZERO vdupq_n_f32(0.0f)
  631. #define GGML_F32Cx4_SET1(x) vdupq_n_f32(x)
  632. #define GGML_F32Cx4_LOAD(x) vcvt_f32_f16(vld1_f16(x))
  633. #define GGML_F32Cx4_STORE(x, y) vst1_f16(x, vcvt_f16_f32(y))
  634. #define GGML_F32Cx4_FMA(a, b, c) vfmaq_f32(a, b, c)
  635. #define GGML_F32Cx4_ADD vaddq_f32
  636. #define GGML_F32Cx4_MUL vmulq_f32
  637. #define GGML_F32Cx4_REDUCE GGML_F32x4_REDUCE
  638. #define GGML_F16_VEC GGML_F32Cx4
  639. #define GGML_F16_VEC_ZERO GGML_F32Cx4_ZERO
  640. #define GGML_F16_VEC_SET1 GGML_F32Cx4_SET1
  641. #define GGML_F16_VEC_LOAD(p, i) GGML_F32Cx4_LOAD(p)
  642. #define GGML_F16_VEC_STORE(p, r, i) GGML_F32Cx4_STORE(p, r[i])
  643. #define GGML_F16_VEC_FMA GGML_F32Cx4_FMA
  644. #define GGML_F16_VEC_ADD GGML_F32Cx4_ADD
  645. #define GGML_F16_VEC_MUL GGML_F32Cx4_MUL
  646. #define GGML_F16_VEC_REDUCE GGML_F32Cx4_REDUCE
  647. #endif
  648. #elif defined(__AVX__)
  649. #define GGML_SIMD
  650. // F32 AVX
  651. #define GGML_F32_STEP 32
  652. #define GGML_F32_EPR 8
  653. #define GGML_F32x8 __m256
  654. #define GGML_F32x8_ZERO _mm256_setzero_ps()
  655. #define GGML_F32x8_SET1(x) _mm256_set1_ps(x)
  656. #define GGML_F32x8_LOAD _mm256_loadu_ps
  657. #define GGML_F32x8_STORE _mm256_storeu_ps
  658. #if defined(__FMA__)
  659. #define GGML_F32x8_FMA(a, b, c) _mm256_fmadd_ps(b, c, a)
  660. #else
  661. #define GGML_F32x8_FMA(a, b, c) _mm256_add_ps(_mm256_mul_ps(b, c), a)
  662. #endif
  663. #define GGML_F32x8_ADD _mm256_add_ps
  664. #define GGML_F32x8_MUL _mm256_mul_ps
  665. #define GGML_F32x8_REDUCE(res, x) \
  666. do { \
  667. int offset = GGML_F32_ARR >> 1; \
  668. for (int i = 0; i < offset; ++i) { \
  669. x[i] = _mm256_add_ps(x[i], x[offset+i]); \
  670. } \
  671. offset >>= 1; \
  672. for (int i = 0; i < offset; ++i) { \
  673. x[i] = _mm256_add_ps(x[i], x[offset+i]); \
  674. } \
  675. offset >>= 1; \
  676. for (int i = 0; i < offset; ++i) { \
  677. x[i] = _mm256_add_ps(x[i], x[offset+i]); \
  678. } \
  679. const __m128 t0 = _mm_add_ps(_mm256_castps256_ps128(x[0]), \
  680. _mm256_extractf128_ps(x[0], 1)); \
  681. const __m128 t1 = _mm_hadd_ps(t0, t0); \
  682. res = _mm_cvtss_f32(_mm_hadd_ps(t1, t1)); \
  683. } while (0)
  684. // TODO: is this optimal ?
  685. #define GGML_F32_VEC GGML_F32x8
  686. #define GGML_F32_VEC_ZERO GGML_F32x8_ZERO
  687. #define GGML_F32_VEC_SET1 GGML_F32x8_SET1
  688. #define GGML_F32_VEC_LOAD GGML_F32x8_LOAD
  689. #define GGML_F32_VEC_STORE GGML_F32x8_STORE
  690. #define GGML_F32_VEC_FMA GGML_F32x8_FMA
  691. #define GGML_F32_VEC_ADD GGML_F32x8_ADD
  692. #define GGML_F32_VEC_MUL GGML_F32x8_MUL
  693. #define GGML_F32_VEC_REDUCE GGML_F32x8_REDUCE
  694. // F16 AVX
  695. #define GGML_F16_STEP 32
  696. #define GGML_F16_EPR 8
  697. // F16 arithmetic is not supported by AVX, so we use F32 instead
  698. #define GGML_F32Cx8 __m256
  699. #define GGML_F32Cx8_ZERO _mm256_setzero_ps()
  700. #define GGML_F32Cx8_SET1(x) _mm256_set1_ps(x)
  701. #if defined(__F16C__)
  702. // the _mm256_cvt intrinsics require F16C
  703. #define GGML_F32Cx8_LOAD(x) _mm256_cvtph_ps(_mm_loadu_si128((__m128i *)(x)))
  704. #define GGML_F32Cx8_STORE(x, y) _mm_storeu_si128((__m128i *)(x), _mm256_cvtps_ph(y, 0))
  705. #else
  706. static inline __m256 __avx_f32cx8_load(ggml_fp16_t *x) {
  707. float tmp[8];
  708. for (int i = 0; i < 8; i++) {
  709. tmp[i] = GGML_FP16_TO_FP32(x[i]);
  710. }
  711. return _mm256_loadu_ps(tmp);
  712. }
  713. static inline void __avx_f32cx8_store(ggml_fp16_t *x, __m256 y) {
  714. float arr[8];
  715. _mm256_storeu_ps(arr, y);
  716. for (int i = 0; i < 8; i++)
  717. x[i] = GGML_FP32_TO_FP16(arr[i]);
  718. }
  719. #define GGML_F32Cx8_LOAD(x) __avx_f32cx8_load(x)
  720. #define GGML_F32Cx8_STORE(x, y) __avx_f32cx8_store(x, y)
  721. #endif
  722. #define GGML_F32Cx8_FMA GGML_F32x8_FMA
  723. #define GGML_F32Cx8_ADD _mm256_add_ps
  724. #define GGML_F32Cx8_MUL _mm256_mul_ps
  725. #define GGML_F32Cx8_REDUCE GGML_F32x8_REDUCE
  726. #define GGML_F16_VEC GGML_F32Cx8
  727. #define GGML_F16_VEC_ZERO GGML_F32Cx8_ZERO
  728. #define GGML_F16_VEC_SET1 GGML_F32Cx8_SET1
  729. #define GGML_F16_VEC_LOAD(p, i) GGML_F32Cx8_LOAD(p)
  730. #define GGML_F16_VEC_STORE(p, r, i) GGML_F32Cx8_STORE(p, r[i])
  731. #define GGML_F16_VEC_FMA GGML_F32Cx8_FMA
  732. #define GGML_F16_VEC_ADD GGML_F32Cx8_ADD
  733. #define GGML_F16_VEC_MUL GGML_F32Cx8_MUL
  734. #define GGML_F16_VEC_REDUCE GGML_F32Cx8_REDUCE
  735. #elif defined(__POWER9_VECTOR__)
  736. #define GGML_SIMD
  737. // F32 POWER9
  738. #define GGML_F32_STEP 32
  739. #define GGML_F32_EPR 4
  740. #define GGML_F32x4 vector float
  741. #define GGML_F32x4_ZERO 0.0f
  742. #define GGML_F32x4_SET1 vec_splats
  743. #define GGML_F32x4_LOAD(p) vec_xl(0, p)
  744. #define GGML_F32x4_STORE(p, r) vec_xst(r, 0, p)
  745. #define GGML_F32x4_FMA(a, b, c) vec_madd(b, c, a)
  746. #define GGML_F32x4_ADD vec_add
  747. #define GGML_F32x4_MUL vec_mul
  748. #define GGML_F32x4_REDUCE(res, x) \
  749. { \
  750. int offset = GGML_F32_ARR >> 1; \
  751. for (int i = 0; i < offset; ++i) { \
  752. x[i] = vec_add(x[i], x[offset+i]); \
  753. } \
  754. offset >>= 1; \
  755. for (int i = 0; i < offset; ++i) { \
  756. x[i] = vec_add(x[i], x[offset+i]); \
  757. } \
  758. offset >>= 1; \
  759. for (int i = 0; i < offset; ++i) { \
  760. x[i] = vec_add(x[i], x[offset+i]); \
  761. } \
  762. res = vec_extract(x[0], 0) + \
  763. vec_extract(x[0], 1) + \
  764. vec_extract(x[0], 2) + \
  765. vec_extract(x[0], 3); \
  766. }
  767. #define GGML_F32_VEC GGML_F32x4
  768. #define GGML_F32_VEC_ZERO GGML_F32x4_ZERO
  769. #define GGML_F32_VEC_SET1 GGML_F32x4_SET1
  770. #define GGML_F32_VEC_LOAD GGML_F32x4_LOAD
  771. #define GGML_F32_VEC_STORE GGML_F32x4_STORE
  772. #define GGML_F32_VEC_FMA GGML_F32x4_FMA
  773. #define GGML_F32_VEC_ADD GGML_F32x4_ADD
  774. #define GGML_F32_VEC_MUL GGML_F32x4_MUL
  775. #define GGML_F32_VEC_REDUCE GGML_F32x4_REDUCE
  776. // F16 POWER9
  777. #define GGML_F16_STEP GGML_F32_STEP
  778. #define GGML_F16_EPR GGML_F32_EPR
  779. #define GGML_F16_VEC GGML_F32x4
  780. #define GGML_F16_VEC_ZERO GGML_F32x4_ZERO
  781. #define GGML_F16_VEC_SET1 GGML_F32x4_SET1
  782. #define GGML_F16_VEC_FMA GGML_F32x4_FMA
  783. #define GGML_F16_VEC_REDUCE GGML_F32x4_REDUCE
  784. // Use vec_xl, not vec_ld, in case the load address is not aligned.
  785. #define GGML_F16_VEC_LOAD(p, i) (i & 0x1) ? \
  786. vec_extract_fp32_from_shorth(vec_xl(0, p - GGML_F16_EPR)) : \
  787. vec_extract_fp32_from_shortl(vec_xl(0, p))
  788. #define GGML_ENDIAN_BYTE(i) ((unsigned char *)&(uint16_t){1})[i]
  789. #define GGML_F16_VEC_STORE(p, r, i) \
  790. if (i & 0x1) \
  791. vec_xst(vec_pack_to_short_fp32(r[i - GGML_ENDIAN_BYTE(1)], \
  792. r[i - GGML_ENDIAN_BYTE(0)]), \
  793. 0, p - GGML_F16_EPR)
  794. #elif defined(__wasm_simd128__)
  795. #define GGML_SIMD
  796. // F32 WASM
  797. #define GGML_F32_STEP 16
  798. #define GGML_F32_EPR 4
  799. #define GGML_F32x4 v128_t
  800. #define GGML_F32x4_ZERO wasm_f32x4_splat(0.0f)
  801. #define GGML_F32x4_SET1(x) wasm_f32x4_splat(x)
  802. #define GGML_F32x4_LOAD wasm_v128_load
  803. #define GGML_F32x4_STORE wasm_v128_store
  804. #define GGML_F32x4_FMA(a, b, c) wasm_f32x4_add(wasm_f32x4_mul(b, c), a)
  805. #define GGML_F32x4_ADD wasm_f32x4_add
  806. #define GGML_F32x4_MUL wasm_f32x4_mul
  807. #define GGML_F32x4_REDUCE(res, x) \
  808. { \
  809. int offset = GGML_F32_ARR >> 1; \
  810. for (int i = 0; i < offset; ++i) { \
  811. x[i] = wasm_f32x4_add(x[i], x[offset+i]); \
  812. } \
  813. offset >>= 1; \
  814. for (int i = 0; i < offset; ++i) { \
  815. x[i] = wasm_f32x4_add(x[i], x[offset+i]); \
  816. } \
  817. offset >>= 1; \
  818. for (int i = 0; i < offset; ++i) { \
  819. x[i] = wasm_f32x4_add(x[i], x[offset+i]); \
  820. } \
  821. res = wasm_f32x4_extract_lane(x[0], 0) + \
  822. wasm_f32x4_extract_lane(x[0], 1) + \
  823. wasm_f32x4_extract_lane(x[0], 2) + \
  824. wasm_f32x4_extract_lane(x[0], 3); \
  825. }
  826. #define GGML_F32_VEC GGML_F32x4
  827. #define GGML_F32_VEC_ZERO GGML_F32x4_ZERO
  828. #define GGML_F32_VEC_SET1 GGML_F32x4_SET1
  829. #define GGML_F32_VEC_LOAD GGML_F32x4_LOAD
  830. #define GGML_F32_VEC_STORE GGML_F32x4_STORE
  831. #define GGML_F32_VEC_FMA GGML_F32x4_FMA
  832. #define GGML_F32_VEC_ADD GGML_F32x4_ADD
  833. #define GGML_F32_VEC_MUL GGML_F32x4_MUL
  834. #define GGML_F32_VEC_REDUCE GGML_F32x4_REDUCE
  835. // F16 WASM
  836. #define GGML_F16_STEP 16
  837. #define GGML_F16_EPR 4
  838. inline static v128_t __wasm_f16x4_load(const ggml_fp16_t * p) {
  839. float tmp[4];
  840. tmp[0] = GGML_FP16_TO_FP32(p[0]);
  841. tmp[1] = GGML_FP16_TO_FP32(p[1]);
  842. tmp[2] = GGML_FP16_TO_FP32(p[2]);
  843. tmp[3] = GGML_FP16_TO_FP32(p[3]);
  844. return wasm_v128_load(tmp);
  845. }
  846. inline static void __wasm_f16x4_store(ggml_fp16_t * p, v128_t x) {
  847. float tmp[4];
  848. wasm_v128_store(tmp, x);
  849. p[0] = GGML_FP32_TO_FP16(tmp[0]);
  850. p[1] = GGML_FP32_TO_FP16(tmp[1]);
  851. p[2] = GGML_FP32_TO_FP16(tmp[2]);
  852. p[3] = GGML_FP32_TO_FP16(tmp[3]);
  853. }
  854. #define GGML_F16x4 v128_t
  855. #define GGML_F16x4_ZERO wasm_f32x4_splat(0.0f)
  856. #define GGML_F16x4_SET1(x) wasm_f32x4_splat(x)
  857. #define GGML_F16x4_LOAD(x) __wasm_f16x4_load(x)
  858. #define GGML_F16x4_STORE(x, y) __wasm_f16x4_store(x, y)
  859. #define GGML_F16x4_FMA GGML_F32x4_FMA
  860. #define GGML_F16x4_ADD wasm_f32x4_add
  861. #define GGML_F16x4_MUL wasm_f32x4_mul
  862. #define GGML_F16x4_REDUCE(res, x) \
  863. { \
  864. int offset = GGML_F16_ARR >> 1; \
  865. for (int i = 0; i < offset; ++i) { \
  866. x[i] = wasm_f32x4_add(x[i], x[offset+i]); \
  867. } \
  868. offset >>= 1; \
  869. for (int i = 0; i < offset; ++i) { \
  870. x[i] = wasm_f32x4_add(x[i], x[offset+i]); \
  871. } \
  872. offset >>= 1; \
  873. for (int i = 0; i < offset; ++i) { \
  874. x[i] = wasm_f32x4_add(x[i], x[offset+i]); \
  875. } \
  876. res = wasm_f32x4_extract_lane(x[0], 0) + \
  877. wasm_f32x4_extract_lane(x[0], 1) + \
  878. wasm_f32x4_extract_lane(x[0], 2) + \
  879. wasm_f32x4_extract_lane(x[0], 3); \
  880. }
  881. #define GGML_F16_VEC GGML_F16x4
  882. #define GGML_F16_VEC_ZERO GGML_F16x4_ZERO
  883. #define GGML_F16_VEC_SET1 GGML_F16x4_SET1
  884. #define GGML_F16_VEC_LOAD(p, i) GGML_F16x4_LOAD(p)
  885. #define GGML_F16_VEC_STORE(p, r, i) GGML_F16x4_STORE(p, r[i])
  886. #define GGML_F16_VEC_FMA GGML_F16x4_FMA
  887. #define GGML_F16_VEC_ADD GGML_F16x4_ADD
  888. #define GGML_F16_VEC_MUL GGML_F16x4_MUL
  889. #define GGML_F16_VEC_REDUCE GGML_F16x4_REDUCE
  890. #elif defined(__SSE3__)
  891. #define GGML_SIMD
  892. // F32 SSE
  893. #define GGML_F32_STEP 32
  894. #define GGML_F32_EPR 4
  895. #define GGML_F32x4 __m128
  896. #define GGML_F32x4_ZERO _mm_setzero_ps()
  897. #define GGML_F32x4_SET1(x) _mm_set1_ps(x)
  898. #define GGML_F32x4_LOAD _mm_loadu_ps
  899. #define GGML_F32x4_STORE _mm_storeu_ps
  900. #if defined(__FMA__)
  901. // TODO: Does this work?
  902. #define GGML_F32x4_FMA(a, b, c) _mm_fmadd_ps(b, c, a)
  903. #else
  904. #define GGML_F32x4_FMA(a, b, c) _mm_add_ps(_mm_mul_ps(b, c), a)
  905. #endif
  906. #define GGML_F32x4_ADD _mm_add_ps
  907. #define GGML_F32x4_MUL _mm_mul_ps
  908. #define GGML_F32x4_REDUCE(res, x) \
  909. { \
  910. int offset = GGML_F32_ARR >> 1; \
  911. for (int i = 0; i < offset; ++i) { \
  912. x[i] = _mm_add_ps(x[i], x[offset+i]); \
  913. } \
  914. offset >>= 1; \
  915. for (int i = 0; i < offset; ++i) { \
  916. x[i] = _mm_add_ps(x[i], x[offset+i]); \
  917. } \
  918. offset >>= 1; \
  919. for (int i = 0; i < offset; ++i) { \
  920. x[i] = _mm_add_ps(x[i], x[offset+i]); \
  921. } \
  922. const __m128 t0 = _mm_hadd_ps(x[0], x[0]); \
  923. res = _mm_cvtss_f32(_mm_hadd_ps(t0, t0)); \
  924. }
  925. // TODO: is this optimal ?
  926. #define GGML_F32_VEC GGML_F32x4
  927. #define GGML_F32_VEC_ZERO GGML_F32x4_ZERO
  928. #define GGML_F32_VEC_SET1 GGML_F32x4_SET1
  929. #define GGML_F32_VEC_LOAD GGML_F32x4_LOAD
  930. #define GGML_F32_VEC_STORE GGML_F32x4_STORE
  931. #define GGML_F32_VEC_FMA GGML_F32x4_FMA
  932. #define GGML_F32_VEC_ADD GGML_F32x4_ADD
  933. #define GGML_F32_VEC_MUL GGML_F32x4_MUL
  934. #define GGML_F32_VEC_REDUCE GGML_F32x4_REDUCE
  935. // F16 SSE
  936. #define GGML_F16_STEP 32
  937. #define GGML_F16_EPR 4
  938. static inline __m128 __sse_f16x4_load(ggml_fp16_t *x) {
  939. float tmp[4];
  940. tmp[0] = GGML_FP16_TO_FP32(x[0]);
  941. tmp[1] = GGML_FP16_TO_FP32(x[1]);
  942. tmp[2] = GGML_FP16_TO_FP32(x[2]);
  943. tmp[3] = GGML_FP16_TO_FP32(x[3]);
  944. return _mm_loadu_ps(tmp);
  945. }
  946. static inline void __sse_f16x4_store(ggml_fp16_t *x, __m128 y) {
  947. float arr[4];
  948. _mm_storeu_ps(arr, y);
  949. x[0] = GGML_FP32_TO_FP16(arr[0]);
  950. x[1] = GGML_FP32_TO_FP16(arr[1]);
  951. x[2] = GGML_FP32_TO_FP16(arr[2]);
  952. x[3] = GGML_FP32_TO_FP16(arr[3]);
  953. }
  954. #define GGML_F32Cx4 __m128
  955. #define GGML_F32Cx4_ZERO _mm_setzero_ps()
  956. #define GGML_F32Cx4_SET1(x) _mm_set1_ps(x)
  957. #define GGML_F32Cx4_LOAD(x) __sse_f16x4_load(x)
  958. #define GGML_F32Cx4_STORE(x, y) __sse_f16x4_store(x, y)
  959. #define GGML_F32Cx4_FMA GGML_F32x4_FMA
  960. #define GGML_F32Cx4_ADD _mm_add_ps
  961. #define GGML_F32Cx4_MUL _mm_mul_ps
  962. #define GGML_F32Cx4_REDUCE GGML_F32x4_REDUCE
  963. #define GGML_F16_VEC GGML_F32Cx4
  964. #define GGML_F16_VEC_ZERO GGML_F32Cx4_ZERO
  965. #define GGML_F16_VEC_SET1 GGML_F32Cx4_SET1
  966. #define GGML_F16_VEC_LOAD(p, i) GGML_F32Cx4_LOAD(p)
  967. #define GGML_F16_VEC_STORE(p, r, i) GGML_F32Cx4_STORE(p, r[i])
  968. #define GGML_F16_VEC_FMA GGML_F32Cx4_FMA
  969. #define GGML_F16_VEC_ADD GGML_F32Cx4_ADD
  970. #define GGML_F16_VEC_MUL GGML_F32Cx4_MUL
  971. #define GGML_F16_VEC_REDUCE GGML_F32Cx4_REDUCE
  972. #endif
  973. // GGML_F32_ARR / GGML_F16_ARR
  974. // number of registers to use per step
  975. #ifdef GGML_SIMD
  976. #define GGML_F32_ARR (GGML_F32_STEP/GGML_F32_EPR)
  977. #define GGML_F16_ARR (GGML_F16_STEP/GGML_F16_EPR)
  978. #endif
  979. //
  980. // fundamental operations
  981. //
  982. inline static void ggml_vec_set_i8(const int n, int8_t * x, const int8_t v) { for (int i = 0; i < n; ++i) x[i] = v; }
  983. inline static void ggml_vec_set_i16(const int n, int16_t * x, const int16_t v) { for (int i = 0; i < n; ++i) x[i] = v; }
  984. inline static void ggml_vec_set_i32(const int n, int32_t * x, const int32_t v) { for (int i = 0; i < n; ++i) x[i] = v; }
  985. inline static void ggml_vec_set_f16(const int n, ggml_fp16_t * x, const int32_t v) { for (int i = 0; i < n; ++i) x[i] = v; }
  986. inline static void ggml_vec_add_f32 (const int n, float * z, const float * x, const float * y) { for (int i = 0; i < n; ++i) z[i] = x[i] + y[i]; }
  987. inline static void ggml_vec_add1_f32(const int n, float * z, const float * x, const float v) { for (int i = 0; i < n; ++i) z[i] = x[i] + v; }
  988. inline static void ggml_vec_acc_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] += x[i]; }
  989. inline static void ggml_vec_acc1_f32(const int n, float * y, const float v) { for (int i = 0; i < n; ++i) y[i] += v; }
  990. inline static void ggml_vec_sub_f32 (const int n, float * z, const float * x, const float * y) { for (int i = 0; i < n; ++i) z[i] = x[i] - y[i]; }
  991. inline static void ggml_vec_set_f32 (const int n, float * x, const float v) { for (int i = 0; i < n; ++i) x[i] = v; }
  992. inline static void ggml_vec_cpy_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = x[i]; }
  993. inline static void ggml_vec_neg_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = -x[i]; }
  994. inline static void ggml_vec_mul_f32 (const int n, float * z, const float * x, const float * y) { for (int i = 0; i < n; ++i) z[i] = x[i]*y[i]; }
  995. inline static void ggml_vec_div_f32 (const int n, float * z, const float * x, const float * y) { for (int i = 0; i < n; ++i) z[i] = x[i]/y[i]; }
  996. static void ggml_vec_dot_f32(const int n, float * restrict s, const float * restrict x, const float * restrict y) {
  997. #ifdef GGML_SIMD
  998. float sumf = 0.0f;
  999. const int np = (n & ~(GGML_F32_STEP - 1));
  1000. GGML_F32_VEC sum[GGML_F32_ARR] = { GGML_F32_VEC_ZERO };
  1001. GGML_F32_VEC ax[GGML_F32_ARR];
  1002. GGML_F32_VEC ay[GGML_F32_ARR];
  1003. for (int i = 0; i < np; i += GGML_F32_STEP) {
  1004. for (int j = 0; j < GGML_F32_ARR; j++) {
  1005. ax[j] = GGML_F32_VEC_LOAD(x + i + j*GGML_F32_EPR);
  1006. ay[j] = GGML_F32_VEC_LOAD(y + i + j*GGML_F32_EPR);
  1007. sum[j] = GGML_F32_VEC_FMA(sum[j], ax[j], ay[j]);
  1008. }
  1009. }
  1010. // reduce sum0..sum3 to sum0
  1011. GGML_F32_VEC_REDUCE(sumf, sum);
  1012. // leftovers
  1013. for (int i = np; i < n; ++i) {
  1014. sumf += x[i]*y[i];
  1015. }
  1016. #else
  1017. // scalar
  1018. ggml_float sumf = 0.0;
  1019. for (int i = 0; i < n; ++i) {
  1020. sumf += (ggml_float)(x[i]*y[i]);
  1021. }
  1022. #endif
  1023. *s = sumf;
  1024. }
  1025. static void ggml_vec_dot_f16(const int n, float * restrict s, ggml_fp16_t * restrict x, ggml_fp16_t * restrict y) {
  1026. ggml_float sumf = 0.0;
  1027. #if defined(GGML_SIMD)
  1028. const int np = (n & ~(GGML_F16_STEP - 1));
  1029. GGML_F16_VEC sum[GGML_F16_ARR] = { GGML_F16_VEC_ZERO };
  1030. GGML_F16_VEC ax[GGML_F16_ARR];
  1031. GGML_F16_VEC ay[GGML_F16_ARR];
  1032. for (int i = 0; i < np; i += GGML_F16_STEP) {
  1033. for (int j = 0; j < GGML_F16_ARR; j++) {
  1034. ax[j] = GGML_F16_VEC_LOAD(x + i + j*GGML_F16_EPR, j);
  1035. ay[j] = GGML_F16_VEC_LOAD(y + i + j*GGML_F16_EPR, j);
  1036. sum[j] = GGML_F16_VEC_FMA(sum[j], ax[j], ay[j]);
  1037. }
  1038. }
  1039. // reduce sum0..sum3 to sum0
  1040. GGML_F16_VEC_REDUCE(sumf, sum);
  1041. // leftovers
  1042. for (int i = np; i < n; ++i) {
  1043. sumf += (ggml_float)(GGML_FP16_TO_FP32(x[i])*GGML_FP16_TO_FP32(y[i]));
  1044. }
  1045. #else
  1046. for (int i = 0; i < n; ++i) {
  1047. sumf += (ggml_float)(GGML_FP16_TO_FP32(x[i])*GGML_FP16_TO_FP32(y[i]));
  1048. }
  1049. #endif
  1050. *s = sumf;
  1051. }
  1052. // compute GGML_VEC_DOT_UNROLL dot products at once
  1053. // xs - x row stride in bytes
  1054. inline static void ggml_vec_dot_f16_unroll(const int n, const int xs, float * restrict s, void * restrict xv, ggml_fp16_t * restrict y) {
  1055. ggml_float sumf[GGML_VEC_DOT_UNROLL] = { 0.0 };
  1056. ggml_fp16_t * restrict x[GGML_VEC_DOT_UNROLL];
  1057. for (int i = 0; i < GGML_VEC_DOT_UNROLL; ++i) {
  1058. x[i] = (ggml_fp16_t *) ((char *) xv + i*xs);
  1059. }
  1060. #if defined(GGML_SIMD)
  1061. const int np = (n & ~(GGML_F16_STEP - 1));
  1062. GGML_F16_VEC sum[GGML_VEC_DOT_UNROLL][GGML_F16_ARR] = { { GGML_F16_VEC_ZERO } };
  1063. GGML_F16_VEC ax[GGML_F16_ARR];
  1064. GGML_F16_VEC ay[GGML_F16_ARR];
  1065. for (int i = 0; i < np; i += GGML_F16_STEP) {
  1066. for (int j = 0; j < GGML_F16_ARR; j++) {
  1067. ay[j] = GGML_F16_VEC_LOAD(y + i + j*GGML_F16_EPR, j);
  1068. for (int k = 0; k < GGML_VEC_DOT_UNROLL; ++k) {
  1069. ax[j] = GGML_F16_VEC_LOAD(x[k] + i + j*GGML_F16_EPR, j);
  1070. sum[k][j] = GGML_F16_VEC_FMA(sum[k][j], ax[j], ay[j]);
  1071. }
  1072. }
  1073. }
  1074. // reduce sum0..sum3 to sum0
  1075. for (int k = 0; k < GGML_VEC_DOT_UNROLL; ++k) {
  1076. GGML_F16_VEC_REDUCE(sumf[k], sum[k]);
  1077. }
  1078. // leftovers
  1079. for (int i = np; i < n; ++i) {
  1080. for (int j = 0; j < GGML_VEC_DOT_UNROLL; ++j) {
  1081. sumf[j] += (ggml_float)(GGML_FP16_TO_FP32(x[j][i])*GGML_FP16_TO_FP32(y[i]));
  1082. }
  1083. }
  1084. #else
  1085. for (int i = 0; i < n; ++i) {
  1086. for (int j = 0; j < GGML_VEC_DOT_UNROLL; ++j) {
  1087. sumf[j] += (ggml_float)(GGML_FP16_TO_FP32(x[j][i])*GGML_FP16_TO_FP32(y[i]));
  1088. }
  1089. }
  1090. #endif
  1091. for (int i = 0; i < GGML_VEC_DOT_UNROLL; ++i) {
  1092. s[i] = sumf[i];
  1093. }
  1094. }
  1095. inline static void ggml_vec_mad_f32(const int n, float * restrict y, const float * restrict x, const float v) {
  1096. #if defined(GGML_SIMD)
  1097. const int np = (n & ~(GGML_F32_STEP - 1));
  1098. GGML_F32_VEC vx = GGML_F32_VEC_SET1(v);
  1099. GGML_F32_VEC ax[GGML_F32_ARR];
  1100. GGML_F32_VEC ay[GGML_F32_ARR];
  1101. for (int i = 0; i < np; i += GGML_F32_STEP) {
  1102. for (int j = 0; j < GGML_F32_ARR; j++) {
  1103. ax[j] = GGML_F32_VEC_LOAD(x + i + j*GGML_F32_EPR);
  1104. ay[j] = GGML_F32_VEC_LOAD(y + i + j*GGML_F32_EPR);
  1105. ay[j] = GGML_F32_VEC_FMA(ay[j], ax[j], vx);
  1106. GGML_F32_VEC_STORE(y + i + j*GGML_F32_EPR, ay[j]);
  1107. }
  1108. }
  1109. // leftovers
  1110. for (int i = np; i < n; ++i) {
  1111. y[i] += x[i]*v;
  1112. }
  1113. #else
  1114. // scalar
  1115. for (int i = 0; i < n; ++i) {
  1116. y[i] += x[i]*v;
  1117. }
  1118. #endif
  1119. }
  1120. // xs and vs are byte strides of x and v
  1121. inline static void ggml_vec_mad_f32_unroll(const int n, const int xs, const int vs, float * restrict y, const float * restrict xv, const float * restrict vv) {
  1122. const float * restrict x[GGML_VEC_MAD_UNROLL];
  1123. const float * restrict v[GGML_VEC_MAD_UNROLL];
  1124. for (int i = 0; i < GGML_VEC_MAD_UNROLL; ++i) {
  1125. x[i] = (const float *) ((const char *) xv + i*xs);
  1126. v[i] = (const float *) ((const char *) vv + i*vs);
  1127. }
  1128. #if defined(GGML_SIMD)
  1129. const int np = (n & ~(GGML_F32_STEP - 1));
  1130. GGML_F32_VEC vx[GGML_VEC_MAD_UNROLL];
  1131. for (int k = 0; k < GGML_VEC_MAD_UNROLL; ++k) {
  1132. vx[k] = GGML_F32_VEC_SET1(v[k][0]);
  1133. }
  1134. GGML_F32_VEC ax[GGML_VEC_MAD_UNROLL][GGML_F32_ARR];
  1135. GGML_F32_VEC ay[GGML_F32_ARR];
  1136. for (int i = 0; i < np; i += GGML_F32_STEP) {
  1137. for (int j = 0; j < GGML_F32_ARR; j++) {
  1138. ay[j] = GGML_F32_VEC_LOAD(y + i + j*GGML_F32_EPR);
  1139. for (int k = 0; k < GGML_VEC_MAD_UNROLL; ++k) {
  1140. ax[k][j] = GGML_F32_VEC_LOAD(x[k] + i + j*GGML_F32_EPR);
  1141. ay[j] = GGML_F32_VEC_FMA(ay[j], ax[k][j], vx[k]);
  1142. }
  1143. GGML_F32_VEC_STORE(y + i + j*GGML_F32_EPR, ay[j]);
  1144. }
  1145. }
  1146. // leftovers
  1147. for (int k = 0; k < GGML_VEC_MAD_UNROLL; ++k) {
  1148. for (int i = np; i < n; ++i) {
  1149. y[i] += x[k][i]*v[k][0];
  1150. }
  1151. }
  1152. #else
  1153. // scalar
  1154. for (int k = 0; k < GGML_VEC_MAD_UNROLL; ++k) {
  1155. for (int i = 0; i < n; ++i) {
  1156. y[i] += x[k][i]*v[k][0];
  1157. }
  1158. }
  1159. #endif
  1160. }
  1161. //inline static void ggml_vec_scale_f32(const int n, float * y, const float v) { for (int i = 0; i < n; ++i) y[i] *= v; }
  1162. inline static void ggml_vec_scale_f32(const int n, float * y, const float v) {
  1163. #if defined(GGML_USE_ACCELERATE)
  1164. vDSP_vsmul(y, 1, &v, y, 1, n);
  1165. #elif defined(GGML_SIMD)
  1166. const int np = (n & ~(GGML_F32_STEP - 1));
  1167. GGML_F32_VEC vx = GGML_F32_VEC_SET1(v);
  1168. GGML_F32_VEC ay[GGML_F32_ARR];
  1169. for (int i = 0; i < np; i += GGML_F32_STEP) {
  1170. for (int j = 0; j < GGML_F32_ARR; j++) {
  1171. ay[j] = GGML_F32_VEC_LOAD(y + i + j*GGML_F32_EPR);
  1172. ay[j] = GGML_F32_VEC_MUL(ay[j], vx);
  1173. GGML_F32_VEC_STORE(y + i + j*GGML_F32_EPR, ay[j]);
  1174. }
  1175. }
  1176. // leftovers
  1177. for (int i = np; i < n; ++i) {
  1178. y[i] *= v;
  1179. }
  1180. #else
  1181. // scalar
  1182. for (int i = 0; i < n; ++i) {
  1183. y[i] *= v;
  1184. }
  1185. #endif
  1186. }
  1187. inline static void ggml_vec_norm_f32 (const int n, float * s, const float * x) { ggml_vec_dot_f32(n, s, x, x); *s = sqrtf(*s); }
  1188. inline static void ggml_vec_sqr_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = x[i]*x[i]; }
  1189. inline static void ggml_vec_sqrt_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = sqrtf(x[i]); }
  1190. inline static void ggml_vec_log_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = logf(x[i]); }
  1191. inline static void ggml_vec_abs_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = fabsf(x[i]); }
  1192. inline static void ggml_vec_sgn_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = (x[i] > 0.f) ? 1.f : ((x[i] < 0.f) ? -1.f : 0.f); }
  1193. inline static void ggml_vec_step_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = (x[i] > 0.f) ? 1.f : 0.f; }
  1194. inline static void ggml_vec_tanh_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = tanhf(x[i]); }
  1195. inline static void ggml_vec_elu_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = (x[i] > 0.f) ? x[i] : expf(x[i])-1; }
  1196. inline static void ggml_vec_relu_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = (x[i] > 0.f) ? x[i] : 0.f; }
  1197. inline static void ggml_vec_leaky_relu_f32 (const int n, float * y, const float * x, const float ns) { for (int i = 0; i < n; ++i) y[i] = ((x[i] > 0.f) ? x[i] : 0.f) + ns * ((x[i] < 0.0f) ? x[i] : 0.f); }
  1198. static const float GELU_COEF_A = 0.044715f;
  1199. static const float GELU_QUICK_COEF = -1.702f;
  1200. static const float SQRT_2_OVER_PI = 0.79788456080286535587989211986876f;
  1201. inline static float ggml_gelu_f32(float x) {
  1202. return 0.5f*x*(1.0f + tanhf(SQRT_2_OVER_PI*x*(1.0f + GELU_COEF_A*x*x)));
  1203. }
  1204. inline static void ggml_vec_gelu_f16(const int n, ggml_fp16_t * y, const ggml_fp16_t * x) {
  1205. const uint16_t * i16 = (const uint16_t *) x;
  1206. for (int i = 0; i < n; ++i) {
  1207. y[i] = ggml_table_gelu_f16[i16[i]];
  1208. }
  1209. }
  1210. #ifdef GGML_GELU_FP16
  1211. inline static void ggml_vec_gelu_f32(const int n, float * y, const float * x) {
  1212. uint16_t t;
  1213. for (int i = 0; i < n; ++i) {
  1214. ggml_fp16_t fp16 = GGML_FP32_TO_FP16(x[i]);
  1215. memcpy(&t, &fp16, sizeof(uint16_t));
  1216. y[i] = GGML_FP16_TO_FP32(ggml_table_gelu_f16[t]);
  1217. }
  1218. }
  1219. #else
  1220. inline static void ggml_vec_gelu_f32(const int n, float * y, const float * x) {
  1221. for (int i = 0; i < n; ++i) {
  1222. y[i] = ggml_gelu_f32(x[i]);
  1223. }
  1224. }
  1225. #endif
  1226. inline static float ggml_gelu_quick_f32(float x) {
  1227. return x*(1.0f/(1.0f+expf(GELU_QUICK_COEF*x)));
  1228. }
  1229. //inline static void ggml_vec_gelu_quick_f16(const int n, ggml_fp16_t * y, const ggml_fp16_t * x) {
  1230. // const uint16_t * i16 = (const uint16_t *) x;
  1231. // for (int i = 0; i < n; ++i) {
  1232. // y[i] = ggml_table_gelu_quick_f16[i16[i]];
  1233. // }
  1234. //}
  1235. #ifdef GGML_GELU_QUICK_FP16
  1236. inline static void ggml_vec_gelu_quick_f32(const int n, float * y, const float * x) {
  1237. uint16_t t;
  1238. for (int i = 0; i < n; ++i) {
  1239. ggml_fp16_t fp16 = GGML_FP32_TO_FP16(x[i]);
  1240. memcpy(&t, &fp16, sizeof(uint16_t));
  1241. y[i] = GGML_FP16_TO_FP32(ggml_table_gelu_quick_f16[t]);
  1242. }
  1243. }
  1244. #else
  1245. inline static void ggml_vec_gelu_quick_f32(const int n, float * y, const float * x) {
  1246. for (int i = 0; i < n; ++i) {
  1247. y[i] = ggml_gelu_quick_f32(x[i]);
  1248. }
  1249. }
  1250. #endif
  1251. // Sigmoid Linear Unit (SiLU) function
  1252. inline static float ggml_silu_f32(float x) {
  1253. return x/(1.0f + expf(-x));
  1254. }
  1255. //inline static void ggml_vec_silu_f16(const int n, ggml_fp16_t * y, const ggml_fp16_t * x) {
  1256. // const uint16_t * i16 = (const uint16_t *) x;
  1257. // for (int i = 0; i < n; ++i) {
  1258. // y[i] = ggml_table_silu_f16[i16[i]];
  1259. // }
  1260. //}
  1261. #ifdef GGML_SILU_FP16
  1262. inline static void ggml_vec_silu_f32(const int n, float * y, const float * x) {
  1263. uint16_t t;
  1264. for (int i = 0; i < n; ++i) {
  1265. ggml_fp16_t fp16 = GGML_FP32_TO_FP16(x[i]);
  1266. memcpy(&t, &fp16, sizeof(uint16_t));
  1267. y[i] = GGML_FP16_TO_FP32(ggml_table_silu_f16[t]);
  1268. }
  1269. }
  1270. #else
  1271. inline static void ggml_vec_silu_f32(const int n, float * y, const float * x) {
  1272. for (int i = 0; i < n; ++i) {
  1273. y[i] = ggml_silu_f32(x[i]);
  1274. }
  1275. }
  1276. #endif
  1277. inline static float ggml_silu_backward_f32(float x, float dy) {
  1278. const float s = 1.0f/(1.0f + expf(-x));
  1279. return dy*s*(1.0f + x*(1.0f - s));
  1280. }
  1281. #ifdef GGML_SILU_FP16
  1282. inline static void ggml_vec_silu_backward_f32(const int n, float * dx, const float * x, const float * dy) {
  1283. for (int i = 0; i < n; ++i) {
  1284. // we did not use x[i] to compute forward silu but its f16 equivalent
  1285. // take derivative at f16 of x[i]:
  1286. ggml_fp16_t fp16 = GGML_FP32_TO_FP16(x[i]);
  1287. float usedx = GGML_FP16_TO_FP32(fp16);
  1288. dx[i] = ggml_silu_backward_f32(usedx, dy[i]);
  1289. }
  1290. }
  1291. #else
  1292. inline static void ggml_vec_silu_backward_f32(const int n, float * dx, const float * x, const float * dy) {
  1293. for (int i = 0; i < n; ++i) {
  1294. dx[i] = ggml_silu_backward_f32(x[i], dy[i]);
  1295. }
  1296. }
  1297. #endif
  1298. inline static void ggml_vec_sum_f32(const int n, float * s, const float * x) {
  1299. #ifndef GGML_USE_ACCELERATE
  1300. ggml_float sum = 0.0;
  1301. for (int i = 0; i < n; ++i) {
  1302. sum += (ggml_float)x[i];
  1303. }
  1304. *s = sum;
  1305. #else
  1306. vDSP_sve(x, 1, s, n);
  1307. #endif
  1308. }
  1309. inline static void ggml_vec_sum_f32_ggf(const int n, ggml_float * s, const float * x) {
  1310. ggml_float sum = 0.0;
  1311. for (int i = 0; i < n; ++i) {
  1312. sum += (ggml_float)x[i];
  1313. }
  1314. *s = sum;
  1315. }
  1316. inline static void ggml_vec_sum_f16_ggf(const int n, float * s, const ggml_fp16_t * x) {
  1317. float sum = 0.0f;
  1318. for (int i = 0; i < n; ++i) {
  1319. sum += GGML_FP16_TO_FP32(x[i]);
  1320. }
  1321. *s = sum;
  1322. }
  1323. inline static void ggml_vec_max_f32(const int n, float * s, const float * x) {
  1324. #ifndef GGML_USE_ACCELERATE
  1325. float max = -INFINITY;
  1326. for (int i = 0; i < n; ++i) {
  1327. max = MAX(max, x[i]);
  1328. }
  1329. *s = max;
  1330. #else
  1331. vDSP_maxv(x, 1, s, n);
  1332. #endif
  1333. }
  1334. inline static void ggml_vec_norm_inv_f32(const int n, float * s, const float * x) {
  1335. ggml_vec_norm_f32(n, s, x);
  1336. *s = 1.f/(*s);
  1337. }
  1338. inline static void ggml_vec_argmax_f32(const int n, int * s, const float * x) {
  1339. float max = -INFINITY;
  1340. int idx = 0;
  1341. for (int i = 0; i < n; ++i) {
  1342. max = MAX(max, x[i]);
  1343. if (max == x[i]) { idx = i; }
  1344. }
  1345. *s = idx;
  1346. }
  1347. //
  1348. // data types
  1349. //
  1350. static const char * GGML_OP_NAME[GGML_OP_COUNT] = {
  1351. "NONE",
  1352. "DUP",
  1353. "ADD",
  1354. "ADD1",
  1355. "ACC",
  1356. "SUB",
  1357. "MUL",
  1358. "DIV",
  1359. "SQR",
  1360. "SQRT",
  1361. "LOG",
  1362. "SUM",
  1363. "SUM_ROWS",
  1364. "MEAN",
  1365. "ARGMAX",
  1366. "REPEAT",
  1367. "REPEAT_BACK",
  1368. "CONCAT",
  1369. "SILU_BACK",
  1370. "NORM",
  1371. "RMS_NORM",
  1372. "RMS_NORM_BACK",
  1373. "GROUP_NORM",
  1374. "MUL_MAT",
  1375. "MUL_MAT_ID",
  1376. "OUT_PROD",
  1377. "SCALE",
  1378. "SET",
  1379. "CPY",
  1380. "CONT",
  1381. "RESHAPE",
  1382. "VIEW",
  1383. "PERMUTE",
  1384. "TRANSPOSE",
  1385. "GET_ROWS",
  1386. "GET_ROWS_BACK",
  1387. "DIAG",
  1388. "DIAG_MASK_INF",
  1389. "DIAG_MASK_ZERO",
  1390. "SOFT_MAX",
  1391. "SOFT_MAX_BACK",
  1392. "ROPE",
  1393. "ROPE_BACK",
  1394. "ALIBI",
  1395. "CLAMP",
  1396. "CONV_TRANSPOSE_1D",
  1397. "IM2COL",
  1398. "CONV_TRANSPOSE_2D",
  1399. "POOL_1D",
  1400. "POOL_2D",
  1401. "UPSCALE",
  1402. "PAD",
  1403. "ARGSORT",
  1404. "LEAKY_RELU",
  1405. "FLASH_ATTN",
  1406. "FLASH_FF",
  1407. "FLASH_ATTN_BACK",
  1408. "WIN_PART",
  1409. "WIN_UNPART",
  1410. "GET_REL_POS",
  1411. "ADD_REL_POS",
  1412. "UNARY",
  1413. "MAP_UNARY",
  1414. "MAP_BINARY",
  1415. "MAP_CUSTOM1_F32",
  1416. "MAP_CUSTOM2_F32",
  1417. "MAP_CUSTOM3_F32",
  1418. "MAP_CUSTOM1",
  1419. "MAP_CUSTOM2",
  1420. "MAP_CUSTOM3",
  1421. "CROSS_ENTROPY_LOSS",
  1422. "CROSS_ENTROPY_LOSS_BACK",
  1423. };
  1424. static_assert(GGML_OP_COUNT == 72, "GGML_OP_COUNT != 72");
  1425. static const char * GGML_OP_SYMBOL[GGML_OP_COUNT] = {
  1426. "none",
  1427. "x",
  1428. "x+y",
  1429. "x+y",
  1430. "view(x,nb,offset)+=y->x",
  1431. "x-y",
  1432. "x*y",
  1433. "x/y",
  1434. "x^2",
  1435. "√x",
  1436. "log(x)",
  1437. "Σx",
  1438. "Σx_k",
  1439. "Σx/n",
  1440. "argmax(x)",
  1441. "repeat(x)",
  1442. "repeat_back(x)",
  1443. "concat(x, y)",
  1444. "silu_back(x)",
  1445. "norm(x)",
  1446. "rms_norm(x)",
  1447. "rms_norm_back(x)",
  1448. "group_norm(x)",
  1449. "X*Y",
  1450. "X[i]*Y",
  1451. "X*Y",
  1452. "x*v",
  1453. "y-\\>view(x)",
  1454. "x-\\>y",
  1455. "cont(x)",
  1456. "reshape(x)",
  1457. "view(x)",
  1458. "permute(x)",
  1459. "transpose(x)",
  1460. "get_rows(x)",
  1461. "get_rows_back(x)",
  1462. "diag(x)",
  1463. "diag_mask_inf(x)",
  1464. "diag_mask_zero(x)",
  1465. "soft_max(x)",
  1466. "soft_max_back(x)",
  1467. "rope(x)",
  1468. "rope_back(x)",
  1469. "alibi(x)",
  1470. "clamp(x)",
  1471. "conv_transpose_1d(x)",
  1472. "im2col(x)",
  1473. "conv_transpose_2d(x)",
  1474. "pool_1d(x)",
  1475. "pool_2d(x)",
  1476. "upscale(x)",
  1477. "pad(x)",
  1478. "argsort(x)",
  1479. "leaky_relu(x)",
  1480. "flash_attn(x)",
  1481. "flash_ff(x)",
  1482. "flash_attn_back(x)",
  1483. "win_part(x)",
  1484. "win_unpart(x)",
  1485. "get_rel_pos(x)",
  1486. "add_rel_pos(x)",
  1487. "unary(x)",
  1488. "f(x)",
  1489. "f(x,y)",
  1490. "custom_f32(x)",
  1491. "custom_f32(x,y)",
  1492. "custom_f32(x,y,z)",
  1493. "custom(x)",
  1494. "custom(x,y)",
  1495. "custom(x,y,z)",
  1496. "cross_entropy_loss(x,y)",
  1497. "cross_entropy_loss_back(x,y)",
  1498. };
  1499. static_assert(GGML_OP_COUNT == 72, "GGML_OP_COUNT != 72");
  1500. static_assert(GGML_OP_POOL_COUNT == 2, "GGML_OP_POOL_COUNT != 2");
  1501. static const char * GGML_UNARY_OP_NAME[GGML_UNARY_OP_COUNT] = {
  1502. "ABS",
  1503. "SGN",
  1504. "NEG",
  1505. "STEP",
  1506. "TANH",
  1507. "ELU",
  1508. "RELU",
  1509. "GELU",
  1510. "GELU_QUICK",
  1511. "SILU",
  1512. };
  1513. static_assert(GGML_UNARY_OP_COUNT == 10, "GGML_UNARY_OP_COUNT != 10");
  1514. static_assert(sizeof(struct ggml_object)%GGML_MEM_ALIGN == 0, "ggml_object size must be a multiple of GGML_MEM_ALIGN");
  1515. static_assert(sizeof(struct ggml_tensor)%GGML_MEM_ALIGN == 0, "ggml_tensor size must be a multiple of GGML_MEM_ALIGN");
  1516. // WARN:
  1517. // Mis-configuration can lead to problem that's hard to reason about:
  1518. // * At best it crash or talks nosense.
  1519. // * At worst it talks slightly difference but hard to perceive.
  1520. //
  1521. // An op has to enable INIT or FINALIZE when any of it's branch needs that pass.
  1522. // Take care about compile options (e.g., GGML_USE_xxx).
  1523. static bool GGML_OP_HAS_INIT [GGML_OP_COUNT] = { 0 };
  1524. static bool GGML_OP_HAS_FINALIZE[GGML_OP_COUNT] = { 0 };
  1525. static void ggml_setup_op_has_task_pass(void) {
  1526. { // INIT
  1527. bool * p = GGML_OP_HAS_INIT;
  1528. p[GGML_OP_ACC ] = true;
  1529. p[GGML_OP_MUL_MAT ] = true;
  1530. p[GGML_OP_MUL_MAT_ID ] = true;
  1531. p[GGML_OP_OUT_PROD ] = true;
  1532. p[GGML_OP_SET ] = true;
  1533. p[GGML_OP_GET_ROWS_BACK ] = true;
  1534. p[GGML_OP_DIAG_MASK_INF ] = true;
  1535. p[GGML_OP_DIAG_MASK_ZERO ] = true;
  1536. p[GGML_OP_CONV_TRANSPOSE_1D ] = true;
  1537. p[GGML_OP_CONV_TRANSPOSE_2D ] = true;
  1538. p[GGML_OP_FLASH_ATTN_BACK ] = true;
  1539. p[GGML_OP_CROSS_ENTROPY_LOSS ] = true;
  1540. p[GGML_OP_ADD_REL_POS ] = true;
  1541. }
  1542. { // FINALIZE
  1543. bool * p = GGML_OP_HAS_FINALIZE;
  1544. p[GGML_OP_CROSS_ENTROPY_LOSS ] = true;
  1545. }
  1546. }
  1547. //
  1548. // ggml context
  1549. //
  1550. struct ggml_context {
  1551. size_t mem_size;
  1552. void * mem_buffer;
  1553. bool mem_buffer_owned;
  1554. bool no_alloc;
  1555. bool no_alloc_save; // this is used to save the no_alloc state when using scratch buffers
  1556. int n_objects;
  1557. struct ggml_object * objects_begin;
  1558. struct ggml_object * objects_end;
  1559. struct ggml_scratch scratch;
  1560. struct ggml_scratch scratch_save;
  1561. };
  1562. struct ggml_context_container {
  1563. bool used;
  1564. struct ggml_context context;
  1565. };
  1566. //
  1567. // NUMA support
  1568. //
  1569. #define GGML_NUMA_MAX_NODES 8
  1570. #define GGML_NUMA_MAX_CPUS 512
  1571. struct ggml_numa_node {
  1572. uint32_t cpus[GGML_NUMA_MAX_CPUS]; // hardware threads on this node
  1573. uint32_t n_cpus;
  1574. };
  1575. struct ggml_numa_nodes {
  1576. struct ggml_numa_node nodes[GGML_NUMA_MAX_NODES];
  1577. uint32_t n_nodes;
  1578. uint32_t total_cpus; // hardware threads on system
  1579. };
  1580. //
  1581. // ggml state
  1582. //
  1583. struct ggml_state {
  1584. struct ggml_context_container contexts[GGML_MAX_CONTEXTS];
  1585. struct ggml_numa_nodes numa;
  1586. };
  1587. // global state
  1588. static struct ggml_state g_state;
  1589. static atomic_int g_state_barrier = 0;
  1590. // barrier via spin lock
  1591. inline static void ggml_critical_section_start(void) {
  1592. int processing = atomic_fetch_add(&g_state_barrier, 1);
  1593. while (processing > 0) {
  1594. // wait for other threads to finish
  1595. atomic_fetch_sub(&g_state_barrier, 1);
  1596. sched_yield(); // TODO: reconsider this
  1597. processing = atomic_fetch_add(&g_state_barrier, 1);
  1598. }
  1599. }
  1600. // TODO: make this somehow automatically executed
  1601. // some sort of "sentry" mechanism
  1602. inline static void ggml_critical_section_end(void) {
  1603. atomic_fetch_sub(&g_state_barrier, 1);
  1604. }
  1605. void ggml_numa_init(void) {
  1606. if (g_state.numa.n_nodes > 0) {
  1607. fprintf(stderr, "ggml_numa_init: NUMA already initialized\n");
  1608. return;
  1609. }
  1610. #ifdef __linux__
  1611. struct stat st;
  1612. char path[256];
  1613. int rv;
  1614. // enumerate nodes
  1615. while (g_state.numa.n_nodes < GGML_NUMA_MAX_NODES) {
  1616. rv = snprintf(path, sizeof(path), "/sys/devices/system/node/node%u", g_state.numa.n_nodes);
  1617. GGML_ASSERT(rv > 0 && (unsigned)rv < sizeof(path));
  1618. if (stat(path, &st) != 0) { break; }
  1619. ++g_state.numa.n_nodes;
  1620. }
  1621. // enumerate CPUs
  1622. while (g_state.numa.total_cpus < GGML_NUMA_MAX_CPUS) {
  1623. rv = snprintf(path, sizeof(path), "/sys/devices/system/cpu/cpu%u", g_state.numa.total_cpus);
  1624. GGML_ASSERT(rv > 0 && (unsigned)rv < sizeof(path));
  1625. if (stat(path, &st) != 0) { break; }
  1626. ++g_state.numa.total_cpus;
  1627. }
  1628. GGML_PRINT_DEBUG("found %u numa nodes, %u CPUs\n", g_state.numa.n_nodes, g_state.numa.total_cpus);
  1629. if (g_state.numa.n_nodes < 1 || g_state.numa.total_cpus < 1) {
  1630. g_state.numa.n_nodes = 0;
  1631. return;
  1632. }
  1633. for (uint32_t n = 0; n < g_state.numa.n_nodes; ++n) {
  1634. struct ggml_numa_node * node = &g_state.numa.nodes[n];
  1635. GGML_PRINT_DEBUG("CPUs on node %u:", n);
  1636. node->n_cpus = 0;
  1637. for (uint32_t c = 0; c < g_state.numa.total_cpus; ++c) {
  1638. rv = snprintf(path, sizeof(path), "/sys/devices/system/node/node%u/cpu%u", n, c);
  1639. GGML_ASSERT(rv > 0 && (unsigned)rv < sizeof(path));
  1640. if (stat(path, &st) == 0) {
  1641. node->cpus[node->n_cpus++] = c;
  1642. GGML_PRINT_DEBUG(" %u", c);
  1643. }
  1644. }
  1645. GGML_PRINT_DEBUG("\n");
  1646. }
  1647. if (ggml_is_numa()) {
  1648. FILE *fptr = fopen("/proc/sys/kernel/numa_balancing", "r");
  1649. if (fptr != NULL) {
  1650. char buf[42];
  1651. if (fgets(buf, sizeof(buf), fptr) && strncmp(buf, "0\n", sizeof(buf)) != 0) {
  1652. GGML_PRINT("WARNING: /proc/sys/kernel/numa_balancing is enabled, this has been observed to impair performance\n");
  1653. }
  1654. fclose(fptr);
  1655. }
  1656. }
  1657. #else
  1658. // TODO
  1659. #endif
  1660. }
  1661. bool ggml_is_numa(void) {
  1662. return g_state.numa.n_nodes > 1;
  1663. }
  1664. ////////////////////////////////////////////////////////////////////////////////
  1665. void ggml_print_object(const struct ggml_object * obj) {
  1666. GGML_PRINT(" - ggml_object: type = %d, offset = %zu, size = %zu, next = %p\n",
  1667. obj->type, obj->offs, obj->size, (const void *) obj->next);
  1668. }
  1669. void ggml_print_objects(const struct ggml_context * ctx) {
  1670. struct ggml_object * obj = ctx->objects_begin;
  1671. GGML_PRINT("%s: objects in context %p:\n", __func__, (const void *) ctx);
  1672. while (obj != NULL) {
  1673. ggml_print_object(obj);
  1674. obj = obj->next;
  1675. }
  1676. GGML_PRINT("%s: --- end ---\n", __func__);
  1677. }
  1678. int64_t ggml_nelements(const struct ggml_tensor * tensor) {
  1679. static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function");
  1680. return tensor->ne[0]*tensor->ne[1]*tensor->ne[2]*tensor->ne[3];
  1681. }
  1682. int64_t ggml_nrows(const struct ggml_tensor * tensor) {
  1683. static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function");
  1684. return tensor->ne[1]*tensor->ne[2]*tensor->ne[3];
  1685. }
  1686. size_t ggml_nbytes(const struct ggml_tensor * tensor) {
  1687. size_t nbytes;
  1688. size_t blck_size = ggml_blck_size(tensor->type);
  1689. if (blck_size == 1) {
  1690. nbytes = ggml_type_size(tensor->type);
  1691. for (int i = 0; i < GGML_MAX_DIMS; ++i) {
  1692. nbytes += (tensor->ne[i] - 1)*tensor->nb[i];
  1693. }
  1694. }
  1695. else {
  1696. nbytes = tensor->ne[0]*tensor->nb[0]/blck_size;
  1697. for (int i = 1; i < GGML_MAX_DIMS; ++i) {
  1698. nbytes += (tensor->ne[i] - 1)*tensor->nb[i];
  1699. }
  1700. }
  1701. return nbytes;
  1702. }
  1703. size_t ggml_nbytes_pad(const struct ggml_tensor * tensor) {
  1704. return GGML_PAD(ggml_nbytes(tensor), GGML_MEM_ALIGN);
  1705. }
  1706. int ggml_blck_size(enum ggml_type type) {
  1707. return type_traits[type].blck_size;
  1708. }
  1709. size_t ggml_type_size(enum ggml_type type) {
  1710. return type_traits[type].type_size;
  1711. }
  1712. size_t ggml_row_size(enum ggml_type type, int64_t ne) {
  1713. assert(ne % ggml_blck_size(type) == 0);
  1714. return ggml_type_size(type)*ne/ggml_blck_size(type);
  1715. }
  1716. double ggml_type_sizef(enum ggml_type type) {
  1717. return ((double)(type_traits[type].type_size))/type_traits[type].blck_size;
  1718. }
  1719. const char * ggml_type_name(enum ggml_type type) {
  1720. return type_traits[type].type_name;
  1721. }
  1722. bool ggml_is_quantized(enum ggml_type type) {
  1723. return type_traits[type].is_quantized;
  1724. }
  1725. const char * ggml_op_name(enum ggml_op op) {
  1726. return GGML_OP_NAME[op];
  1727. }
  1728. const char * ggml_op_symbol(enum ggml_op op) {
  1729. return GGML_OP_SYMBOL[op];
  1730. }
  1731. const char * ggml_unary_op_name(enum ggml_unary_op op) {
  1732. return GGML_UNARY_OP_NAME[op];
  1733. }
  1734. const char * ggml_op_desc(const struct ggml_tensor * t) {
  1735. if (t->op == GGML_OP_UNARY) {
  1736. enum ggml_unary_op uop = ggml_get_unary_op(t);
  1737. return ggml_unary_op_name(uop);
  1738. }
  1739. else {
  1740. return ggml_op_name(t->op);
  1741. }
  1742. }
  1743. size_t ggml_element_size(const struct ggml_tensor * tensor) {
  1744. return ggml_type_size(tensor->type);
  1745. }
  1746. bool ggml_is_scalar(const struct ggml_tensor * tensor) {
  1747. static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function");
  1748. return tensor->ne[0] == 1 && tensor->ne[1] == 1 && tensor->ne[2] == 1 && tensor->ne[3] == 1;
  1749. }
  1750. bool ggml_is_vector(const struct ggml_tensor * tensor) {
  1751. static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function");
  1752. return tensor->ne[1] == 1 && tensor->ne[2] == 1 && tensor->ne[3] == 1;
  1753. }
  1754. bool ggml_is_matrix(const struct ggml_tensor * tensor) {
  1755. static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function");
  1756. return tensor->ne[2] == 1 && tensor->ne[3] == 1;
  1757. }
  1758. bool ggml_is_3d(const struct ggml_tensor * tensor) {
  1759. return tensor->ne[3] == 1;
  1760. }
  1761. int ggml_n_dims(const struct ggml_tensor * tensor) {
  1762. for (int i = GGML_MAX_DIMS - 1; i >= 1; --i) {
  1763. if (tensor->ne[i] > 1) {
  1764. return i + 1;
  1765. }
  1766. }
  1767. return 1;
  1768. }
  1769. static inline bool ggml_can_mul_mat(const struct ggml_tensor * t0, const struct ggml_tensor * t1) {
  1770. static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function");
  1771. return (t0->ne[0] == t1->ne[0]) &&
  1772. (t1->ne[2]%t0->ne[2] == 0) && // verify t0 is broadcastable
  1773. (t1->ne[3]%t0->ne[3] == 0);
  1774. }
  1775. static inline bool ggml_can_out_prod(const struct ggml_tensor * t0, const struct ggml_tensor * t1) {
  1776. static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function");
  1777. return (t0->ne[1] == t1->ne[1]) &&
  1778. (t1->ne[2]%t0->ne[2] == 0) && // verify t0 is broadcastable
  1779. (t1->ne[3]%t0->ne[3] == 0);
  1780. }
  1781. enum ggml_type ggml_ftype_to_ggml_type(enum ggml_ftype ftype) {
  1782. enum ggml_type wtype = GGML_TYPE_COUNT;
  1783. switch (ftype) {
  1784. case GGML_FTYPE_ALL_F32: wtype = GGML_TYPE_F32; break;
  1785. case GGML_FTYPE_MOSTLY_F16: wtype = GGML_TYPE_F16; break;
  1786. case GGML_FTYPE_MOSTLY_Q4_0: wtype = GGML_TYPE_Q4_0; break;
  1787. case GGML_FTYPE_MOSTLY_Q4_1: wtype = GGML_TYPE_Q4_1; break;
  1788. case GGML_FTYPE_MOSTLY_Q5_0: wtype = GGML_TYPE_Q5_0; break;
  1789. case GGML_FTYPE_MOSTLY_Q5_1: wtype = GGML_TYPE_Q5_1; break;
  1790. case GGML_FTYPE_MOSTLY_Q8_0: wtype = GGML_TYPE_Q8_0; break;
  1791. case GGML_FTYPE_MOSTLY_Q2_K: wtype = GGML_TYPE_Q2_K; break;
  1792. case GGML_FTYPE_MOSTLY_Q3_K: wtype = GGML_TYPE_Q3_K; break;
  1793. case GGML_FTYPE_MOSTLY_Q4_K: wtype = GGML_TYPE_Q4_K; break;
  1794. case GGML_FTYPE_MOSTLY_Q5_K: wtype = GGML_TYPE_Q5_K; break;
  1795. case GGML_FTYPE_MOSTLY_Q6_K: wtype = GGML_TYPE_Q6_K; break;
  1796. case GGML_FTYPE_UNKNOWN: wtype = GGML_TYPE_COUNT; break;
  1797. case GGML_FTYPE_MOSTLY_Q4_1_SOME_F16: wtype = GGML_TYPE_COUNT; break;
  1798. }
  1799. GGML_ASSERT(wtype != GGML_TYPE_COUNT);
  1800. return wtype;
  1801. }
  1802. size_t ggml_tensor_overhead(void) {
  1803. return GGML_OBJECT_SIZE + GGML_TENSOR_SIZE;
  1804. }
  1805. bool ggml_is_transposed(const struct ggml_tensor * tensor) {
  1806. return tensor->nb[0] > tensor->nb[1];
  1807. }
  1808. bool ggml_is_contiguous(const struct ggml_tensor * tensor) {
  1809. static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function");
  1810. return
  1811. tensor->nb[0] == ggml_type_size(tensor->type) &&
  1812. tensor->nb[1] == (tensor->nb[0]*tensor->ne[0])/ggml_blck_size(tensor->type) &&
  1813. tensor->nb[2] == tensor->nb[1]*tensor->ne[1] &&
  1814. tensor->nb[3] == tensor->nb[2]*tensor->ne[2];
  1815. }
  1816. static inline bool ggml_is_contiguous_except_dim_1(const struct ggml_tensor * tensor) {
  1817. static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function");
  1818. return
  1819. tensor->nb[0] == ggml_type_size(tensor->type) &&
  1820. tensor->nb[2] == tensor->nb[1]*tensor->ne[1] &&
  1821. tensor->nb[3] == tensor->nb[2]*tensor->ne[2];
  1822. }
  1823. bool ggml_is_permuted(const struct ggml_tensor * tensor) {
  1824. static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function");
  1825. return tensor->nb[0] > tensor->nb[1] || tensor->nb[1] > tensor->nb[2] || tensor->nb[2] > tensor->nb[3];
  1826. }
  1827. static inline bool ggml_is_padded_1d(const struct ggml_tensor * tensor) {
  1828. static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function");
  1829. return
  1830. tensor->nb[0] == ggml_type_size(tensor->type) &&
  1831. tensor->nb[2] == tensor->nb[1]*tensor->ne[1] &&
  1832. tensor->nb[3] == tensor->nb[2]*tensor->ne[2];
  1833. }
  1834. bool ggml_are_same_shape(const struct ggml_tensor * t0, const struct ggml_tensor * t1) {
  1835. static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function");
  1836. return
  1837. (t0->ne[0] == t1->ne[0] ) &&
  1838. (t0->ne[1] == t1->ne[1] ) &&
  1839. (t0->ne[2] == t1->ne[2] ) &&
  1840. (t0->ne[3] == t1->ne[3] );
  1841. }
  1842. // check if t1 can be represented as a repeatition of t0
  1843. static inline bool ggml_can_repeat(const struct ggml_tensor * t0, const struct ggml_tensor * t1) {
  1844. static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function");
  1845. return
  1846. (t1->ne[0]%t0->ne[0] == 0) &&
  1847. (t1->ne[1]%t0->ne[1] == 0) &&
  1848. (t1->ne[2]%t0->ne[2] == 0) &&
  1849. (t1->ne[3]%t0->ne[3] == 0);
  1850. }
  1851. static inline bool ggml_can_repeat_rows(const struct ggml_tensor * t0, const struct ggml_tensor * t1) {
  1852. static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function");
  1853. return (t0->ne[0] == t1->ne[0]) && ggml_can_repeat(t0, t1);
  1854. }
  1855. static inline int ggml_up32(int n) {
  1856. return (n + 31) & ~31;
  1857. }
  1858. //static inline int ggml_up64(int n) {
  1859. // return (n + 63) & ~63;
  1860. //}
  1861. static inline int ggml_up(int n, int m) {
  1862. // assert m is a power of 2
  1863. GGML_ASSERT((m & (m - 1)) == 0);
  1864. return (n + m - 1) & ~(m - 1);
  1865. }
  1866. // assert that pointer is aligned to GGML_MEM_ALIGN
  1867. #define ggml_assert_aligned(ptr) \
  1868. GGML_ASSERT(((uintptr_t) (ptr))%GGML_MEM_ALIGN == 0)
  1869. ////////////////////////////////////////////////////////////////////////////////
  1870. struct ggml_context * ggml_init(struct ggml_init_params params) {
  1871. // make this function thread safe
  1872. ggml_critical_section_start();
  1873. static bool is_first_call = true;
  1874. if (is_first_call) {
  1875. // initialize time system (required on Windows)
  1876. ggml_time_init();
  1877. // initialize GELU, Quick GELU, SILU and EXP F32 tables
  1878. {
  1879. const uint64_t t_start = ggml_time_us(); UNUSED(t_start);
  1880. ggml_fp16_t ii;
  1881. for (int i = 0; i < (1 << 16); ++i) {
  1882. uint16_t ui = i;
  1883. memcpy(&ii, &ui, sizeof(ii));
  1884. const float f = ggml_table_f32_f16[i] = GGML_COMPUTE_FP16_TO_FP32(ii);
  1885. ggml_table_gelu_f16[i] = GGML_FP32_TO_FP16(ggml_gelu_f32(f));
  1886. ggml_table_gelu_quick_f16[i] = GGML_FP32_TO_FP16(ggml_gelu_quick_f32(f));
  1887. ggml_table_silu_f16[i] = GGML_FP32_TO_FP16(ggml_silu_f32(f));
  1888. ggml_table_exp_f16[i] = GGML_FP32_TO_FP16(expf(f));
  1889. }
  1890. const uint64_t t_end = ggml_time_us(); UNUSED(t_end);
  1891. GGML_PRINT_DEBUG("%s: GELU, Quick GELU, SILU and EXP tables initialized in %f ms\n", __func__, (t_end - t_start)/1000.0f);
  1892. }
  1893. // initialize g_state
  1894. {
  1895. const uint64_t t_start = ggml_time_us(); UNUSED(t_start);
  1896. g_state = (struct ggml_state) {
  1897. /*.contexts =*/ { { 0 } },
  1898. /*.numa =*/ {
  1899. .n_nodes = 0,
  1900. .total_cpus = 0,
  1901. },
  1902. };
  1903. for (int i = 0; i < GGML_MAX_CONTEXTS; ++i) {
  1904. g_state.contexts[i].used = false;
  1905. }
  1906. const uint64_t t_end = ggml_time_us(); UNUSED(t_end);
  1907. GGML_PRINT_DEBUG("%s: g_state initialized in %f ms\n", __func__, (t_end - t_start)/1000.0f);
  1908. }
  1909. #if defined(GGML_USE_CUBLAS)
  1910. ggml_init_cublas();
  1911. #elif defined(GGML_USE_CLBLAST)
  1912. ggml_cl_init();
  1913. #endif
  1914. ggml_setup_op_has_task_pass();
  1915. is_first_call = false;
  1916. }
  1917. // find non-used context in g_state
  1918. struct ggml_context * ctx = NULL;
  1919. for (int i = 0; i < GGML_MAX_CONTEXTS; i++) {
  1920. if (!g_state.contexts[i].used) {
  1921. g_state.contexts[i].used = true;
  1922. ctx = &g_state.contexts[i].context;
  1923. GGML_PRINT_DEBUG("%s: found unused context %d\n", __func__, i);
  1924. break;
  1925. }
  1926. }
  1927. if (ctx == NULL) {
  1928. GGML_PRINT_DEBUG("%s: no unused context found\n", __func__);
  1929. ggml_critical_section_end();
  1930. return NULL;
  1931. }
  1932. // allow to call ggml_init with 0 size
  1933. if (params.mem_size == 0) {
  1934. params.mem_size = GGML_MEM_ALIGN;
  1935. }
  1936. const size_t mem_size = params.mem_buffer ? params.mem_size : GGML_PAD(params.mem_size, GGML_MEM_ALIGN);
  1937. *ctx = (struct ggml_context) {
  1938. /*.mem_size =*/ mem_size,
  1939. /*.mem_buffer =*/ params.mem_buffer ? params.mem_buffer : GGML_ALIGNED_MALLOC(mem_size),
  1940. /*.mem_buffer_owned =*/ params.mem_buffer ? false : true,
  1941. /*.no_alloc =*/ params.no_alloc,
  1942. /*.no_alloc_save =*/ params.no_alloc,
  1943. /*.n_objects =*/ 0,
  1944. /*.objects_begin =*/ NULL,
  1945. /*.objects_end =*/ NULL,
  1946. /*.scratch =*/ { 0, 0, NULL, },
  1947. /*.scratch_save =*/ { 0, 0, NULL, },
  1948. };
  1949. GGML_ASSERT(ctx->mem_buffer != NULL);
  1950. ggml_assert_aligned(ctx->mem_buffer);
  1951. GGML_PRINT_DEBUG("%s: context initialized\n", __func__);
  1952. ggml_critical_section_end();
  1953. return ctx;
  1954. }
  1955. void ggml_free(struct ggml_context * ctx) {
  1956. // make this function thread safe
  1957. ggml_critical_section_start();
  1958. bool found = false;
  1959. for (int i = 0; i < GGML_MAX_CONTEXTS; i++) {
  1960. if (&g_state.contexts[i].context == ctx) {
  1961. g_state.contexts[i].used = false;
  1962. GGML_PRINT_DEBUG("%s: context %d has been freed. memory used = %zu\n",
  1963. __func__, i, ggml_used_mem(ctx));
  1964. if (ctx->mem_buffer_owned) {
  1965. GGML_ALIGNED_FREE(ctx->mem_buffer);
  1966. }
  1967. found = true;
  1968. break;
  1969. }
  1970. }
  1971. if (!found) {
  1972. GGML_PRINT_DEBUG("%s: context not found\n", __func__);
  1973. }
  1974. ggml_critical_section_end();
  1975. }
  1976. size_t ggml_used_mem(const struct ggml_context * ctx) {
  1977. return ctx->objects_end == NULL ? 0 : ctx->objects_end->offs + ctx->objects_end->size;
  1978. }
  1979. size_t ggml_set_scratch(struct ggml_context * ctx, struct ggml_scratch scratch) {
  1980. const size_t result = ctx->scratch.data ? ctx->scratch.offs : 0;
  1981. ctx->scratch = scratch;
  1982. return result;
  1983. }
  1984. bool ggml_get_no_alloc(struct ggml_context * ctx) {
  1985. return ctx->no_alloc;
  1986. }
  1987. void ggml_set_no_alloc(struct ggml_context * ctx, bool no_alloc) {
  1988. ctx->no_alloc = no_alloc;
  1989. }
  1990. void * ggml_get_mem_buffer(const struct ggml_context * ctx) {
  1991. return ctx->mem_buffer;
  1992. }
  1993. size_t ggml_get_mem_size(const struct ggml_context * ctx) {
  1994. return ctx->mem_size;
  1995. }
  1996. size_t ggml_get_max_tensor_size(const struct ggml_context * ctx) {
  1997. size_t max_size = 0;
  1998. struct ggml_object * obj = ctx->objects_begin;
  1999. while (obj != NULL) {
  2000. if (obj->type == GGML_OBJECT_TENSOR) {
  2001. struct ggml_tensor * tensor = (struct ggml_tensor *) ((char *) ctx->mem_buffer + obj->offs);
  2002. const size_t size = ggml_nbytes(tensor);
  2003. if (max_size < size) {
  2004. max_size = size;
  2005. }
  2006. }
  2007. obj = obj->next;
  2008. }
  2009. return max_size;
  2010. }
  2011. // IMPORTANT:
  2012. // when creating "opt" tensors, always save and load the scratch buffer
  2013. // this is an error prone process, but it is necessary to support inplace
  2014. // operators when using scratch buffers
  2015. // TODO: implement a better way
  2016. static void ggml_scratch_save(struct ggml_context * ctx) {
  2017. // this is needed to allow opt tensors to store their data
  2018. // TODO: again, need to find a better way
  2019. ctx->no_alloc_save = ctx->no_alloc;
  2020. ctx->no_alloc = false;
  2021. ctx->scratch_save = ctx->scratch;
  2022. ctx->scratch.data = NULL;
  2023. }
  2024. static void ggml_scratch_load(struct ggml_context * ctx) {
  2025. ctx->no_alloc = ctx->no_alloc_save;
  2026. ctx->scratch = ctx->scratch_save;
  2027. }
  2028. ////////////////////////////////////////////////////////////////////////////////
  2029. static struct ggml_object * ggml_new_object(struct ggml_context * ctx, enum ggml_object_type type, size_t size) {
  2030. // always insert objects at the end of the context's memory pool
  2031. struct ggml_object * obj_cur = ctx->objects_end;
  2032. const size_t cur_offs = obj_cur == NULL ? 0 : obj_cur->offs;
  2033. const size_t cur_size = obj_cur == NULL ? 0 : obj_cur->size;
  2034. const size_t cur_end = cur_offs + cur_size;
  2035. // align to GGML_MEM_ALIGN
  2036. size_t size_needed = GGML_PAD(size, GGML_MEM_ALIGN);
  2037. char * const mem_buffer = ctx->mem_buffer;
  2038. struct ggml_object * const obj_new = (struct ggml_object *)(mem_buffer + cur_end);
  2039. if (cur_end + size_needed + GGML_OBJECT_SIZE > ctx->mem_size) {
  2040. GGML_PRINT("%s: not enough space in the context's memory pool (needed %zu, available %zu)\n",
  2041. __func__, cur_end + size_needed, ctx->mem_size);
  2042. assert(false);
  2043. return NULL;
  2044. }
  2045. *obj_new = (struct ggml_object) {
  2046. .offs = cur_end + GGML_OBJECT_SIZE,
  2047. .size = size_needed,
  2048. .next = NULL,
  2049. .type = type,
  2050. };
  2051. ggml_assert_aligned(mem_buffer + obj_new->offs);
  2052. if (obj_cur != NULL) {
  2053. obj_cur->next = obj_new;
  2054. } else {
  2055. // this is the first object in this context
  2056. ctx->objects_begin = obj_new;
  2057. }
  2058. ctx->objects_end = obj_new;
  2059. //printf("%s: inserted new object at %zu, size = %zu\n", __func__, cur_end, obj_new->size);
  2060. return obj_new;
  2061. }
  2062. static struct ggml_tensor * ggml_new_tensor_impl(
  2063. struct ggml_context * ctx,
  2064. enum ggml_type type,
  2065. int n_dims,
  2066. const int64_t * ne,
  2067. struct ggml_tensor * view_src,
  2068. size_t view_offs) {
  2069. assert(n_dims >= 1 && n_dims <= GGML_MAX_DIMS);
  2070. // find the base tensor and absolute offset
  2071. if (view_src != NULL && view_src->view_src != NULL) {
  2072. view_offs += view_src->view_offs;
  2073. view_src = view_src->view_src;
  2074. }
  2075. size_t data_size = ggml_row_size(type, ne[0]);
  2076. for (int i = 1; i < n_dims; i++) {
  2077. data_size *= ne[i];
  2078. }
  2079. GGML_ASSERT(view_src == NULL || data_size + view_offs <= ggml_nbytes(view_src));
  2080. void * data = view_src != NULL ? view_src->data : NULL;
  2081. if (data != NULL) {
  2082. data = (char *) data + view_offs;
  2083. }
  2084. size_t obj_alloc_size = 0;
  2085. if (view_src == NULL && !ctx->no_alloc) {
  2086. if (ctx->scratch.data != NULL) {
  2087. // allocate tensor data in the scratch buffer
  2088. if (ctx->scratch.offs + data_size > ctx->scratch.size) {
  2089. GGML_PRINT("%s: not enough space in the scratch memory pool (needed %zu, available %zu)\n",
  2090. __func__, ctx->scratch.offs + data_size, ctx->scratch.size);
  2091. assert(false);
  2092. return NULL;
  2093. }
  2094. data = (char * const) ctx->scratch.data + ctx->scratch.offs;
  2095. ctx->scratch.offs += data_size;
  2096. } else {
  2097. // allocate tensor data in the context's memory pool
  2098. obj_alloc_size = data_size;
  2099. }
  2100. }
  2101. struct ggml_object * const obj_new = ggml_new_object(ctx, GGML_OBJECT_TENSOR, GGML_TENSOR_SIZE + obj_alloc_size);
  2102. // TODO: for recoverable errors, we would need to free the data allocated from the scratch buffer here
  2103. struct ggml_tensor * const result = (struct ggml_tensor *)((char *)ctx->mem_buffer + obj_new->offs);
  2104. *result = (struct ggml_tensor) {
  2105. /*.type =*/ type,
  2106. /*.backend =*/ GGML_BACKEND_CPU,
  2107. /*.buffer =*/ NULL,
  2108. /*.ne =*/ { 1, 1, 1, 1 },
  2109. /*.nb =*/ { 0, 0, 0, 0 },
  2110. /*.op =*/ GGML_OP_NONE,
  2111. /*.op_params =*/ { 0 },
  2112. /*.is_param =*/ false,
  2113. /*.grad =*/ NULL,
  2114. /*.src =*/ { NULL },
  2115. /*.perf_runs =*/ 0,
  2116. /*.perf_cycles =*/ 0,
  2117. /*.perf_time_us =*/ 0,
  2118. /*.view_src =*/ view_src,
  2119. /*.view_offs =*/ view_offs,
  2120. /*.data =*/ obj_alloc_size > 0 ? (void *)(result + 1) : data,
  2121. /*.name =*/ { 0 },
  2122. /*.extra =*/ NULL,
  2123. /*.padding =*/ { 0 },
  2124. };
  2125. // TODO: this should not be needed as long as we don't rely on aligned SIMD loads
  2126. //ggml_assert_aligned(result->data);
  2127. for (int i = 0; i < n_dims; i++) {
  2128. result->ne[i] = ne[i];
  2129. }
  2130. result->nb[0] = ggml_type_size(type);
  2131. result->nb[1] = result->nb[0]*(result->ne[0]/ggml_blck_size(type));
  2132. for (int i = 2; i < GGML_MAX_DIMS; i++) {
  2133. result->nb[i] = result->nb[i - 1]*result->ne[i - 1];
  2134. }
  2135. ctx->n_objects++;
  2136. return result;
  2137. }
  2138. struct ggml_tensor * ggml_new_tensor(
  2139. struct ggml_context * ctx,
  2140. enum ggml_type type,
  2141. int n_dims,
  2142. const int64_t * ne) {
  2143. return ggml_new_tensor_impl(ctx, type, n_dims, ne, NULL, 0);
  2144. }
  2145. struct ggml_tensor * ggml_new_tensor_1d(
  2146. struct ggml_context * ctx,
  2147. enum ggml_type type,
  2148. int64_t ne0) {
  2149. return ggml_new_tensor(ctx, type, 1, &ne0);
  2150. }
  2151. struct ggml_tensor * ggml_new_tensor_2d(
  2152. struct ggml_context * ctx,
  2153. enum ggml_type type,
  2154. int64_t ne0,
  2155. int64_t ne1) {
  2156. const int64_t ne[2] = { ne0, ne1 };
  2157. return ggml_new_tensor(ctx, type, 2, ne);
  2158. }
  2159. struct ggml_tensor * ggml_new_tensor_3d(
  2160. struct ggml_context * ctx,
  2161. enum ggml_type type,
  2162. int64_t ne0,
  2163. int64_t ne1,
  2164. int64_t ne2) {
  2165. const int64_t ne[3] = { ne0, ne1, ne2 };
  2166. return ggml_new_tensor(ctx, type, 3, ne);
  2167. }
  2168. struct ggml_tensor * ggml_new_tensor_4d(
  2169. struct ggml_context * ctx,
  2170. enum ggml_type type,
  2171. int64_t ne0,
  2172. int64_t ne1,
  2173. int64_t ne2,
  2174. int64_t ne3) {
  2175. const int64_t ne[4] = { ne0, ne1, ne2, ne3 };
  2176. return ggml_new_tensor(ctx, type, 4, ne);
  2177. }
  2178. struct ggml_tensor * ggml_new_i32(struct ggml_context * ctx, int32_t value) {
  2179. ggml_scratch_save(ctx);
  2180. struct ggml_tensor * result = ggml_new_tensor_1d(ctx, GGML_TYPE_I32, 1);
  2181. ggml_scratch_load(ctx);
  2182. ggml_set_i32(result, value);
  2183. return result;
  2184. }
  2185. struct ggml_tensor * ggml_new_f32(struct ggml_context * ctx, float value) {
  2186. ggml_scratch_save(ctx);
  2187. struct ggml_tensor * result = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, 1);
  2188. ggml_scratch_load(ctx);
  2189. ggml_set_f32(result, value);
  2190. return result;
  2191. }
  2192. struct ggml_tensor * ggml_dup_tensor(struct ggml_context * ctx, const struct ggml_tensor * src) {
  2193. return ggml_new_tensor(ctx, src->type, GGML_MAX_DIMS, src->ne);
  2194. }
  2195. static void ggml_set_op_params(struct ggml_tensor * tensor, const void * params, size_t params_size) {
  2196. GGML_ASSERT(tensor != NULL); // silence -Warray-bounds warnings
  2197. assert(params_size <= GGML_MAX_OP_PARAMS);
  2198. memcpy(tensor->op_params, params, params_size);
  2199. }
  2200. static int32_t ggml_get_op_params_i32(const struct ggml_tensor * tensor, uint32_t i) {
  2201. assert(i < GGML_MAX_OP_PARAMS / sizeof(int32_t));
  2202. return ((const int32_t *)(tensor->op_params))[i];
  2203. }
  2204. static void ggml_set_op_params_i32(struct ggml_tensor * tensor, uint32_t i, int32_t value) {
  2205. assert(i < GGML_MAX_OP_PARAMS / sizeof(int32_t));
  2206. ((int32_t *)(tensor->op_params))[i] = value;
  2207. }
  2208. struct ggml_tensor * ggml_set_zero(struct ggml_tensor * tensor) {
  2209. memset(tensor->data, 0, ggml_nbytes(tensor));
  2210. return tensor;
  2211. }
  2212. struct ggml_tensor * ggml_set_i32 (struct ggml_tensor * tensor, int32_t value) {
  2213. const int n = ggml_nrows(tensor);
  2214. const int nc = tensor->ne[0];
  2215. const size_t n1 = tensor->nb[1];
  2216. char * const data = tensor->data;
  2217. switch (tensor->type) {
  2218. case GGML_TYPE_I8:
  2219. {
  2220. assert(tensor->nb[0] == sizeof(int8_t));
  2221. for (int i = 0; i < n; i++) {
  2222. ggml_vec_set_i8(nc, (int8_t *)(data + i*n1), value);
  2223. }
  2224. } break;
  2225. case GGML_TYPE_I16:
  2226. {
  2227. assert(tensor->nb[0] == sizeof(int16_t));
  2228. for (int i = 0; i < n; i++) {
  2229. ggml_vec_set_i16(nc, (int16_t *)(data + i*n1), value);
  2230. }
  2231. } break;
  2232. case GGML_TYPE_I32:
  2233. {
  2234. assert(tensor->nb[0] == sizeof(int32_t));
  2235. for (int i = 0; i < n; i++) {
  2236. ggml_vec_set_i32(nc, (int32_t *)(data + i*n1), value);
  2237. }
  2238. } break;
  2239. case GGML_TYPE_F16:
  2240. {
  2241. assert(tensor->nb[0] == sizeof(ggml_fp16_t));
  2242. for (int i = 0; i < n; i++) {
  2243. ggml_vec_set_f16(nc, (ggml_fp16_t *)(data + i*n1), GGML_FP32_TO_FP16(value));
  2244. }
  2245. } break;
  2246. case GGML_TYPE_F32:
  2247. {
  2248. assert(tensor->nb[0] == sizeof(float));
  2249. for (int i = 0; i < n; i++) {
  2250. ggml_vec_set_f32(nc, (float *)(data + i*n1), value);
  2251. }
  2252. } break;
  2253. default:
  2254. {
  2255. GGML_ASSERT(false);
  2256. } break;
  2257. }
  2258. return tensor;
  2259. }
  2260. struct ggml_tensor * ggml_set_f32(struct ggml_tensor * tensor, float value) {
  2261. const int n = ggml_nrows(tensor);
  2262. const int nc = tensor->ne[0];
  2263. const size_t n1 = tensor->nb[1];
  2264. char * const data = tensor->data;
  2265. switch (tensor->type) {
  2266. case GGML_TYPE_I8:
  2267. {
  2268. assert(tensor->nb[0] == sizeof(int8_t));
  2269. for (int i = 0; i < n; i++) {
  2270. ggml_vec_set_i8(nc, (int8_t *)(data + i*n1), value);
  2271. }
  2272. } break;
  2273. case GGML_TYPE_I16:
  2274. {
  2275. assert(tensor->nb[0] == sizeof(int16_t));
  2276. for (int i = 0; i < n; i++) {
  2277. ggml_vec_set_i16(nc, (int16_t *)(data + i*n1), value);
  2278. }
  2279. } break;
  2280. case GGML_TYPE_I32:
  2281. {
  2282. assert(tensor->nb[0] == sizeof(int32_t));
  2283. for (int i = 0; i < n; i++) {
  2284. ggml_vec_set_i32(nc, (int32_t *)(data + i*n1), value);
  2285. }
  2286. } break;
  2287. case GGML_TYPE_F16:
  2288. {
  2289. assert(tensor->nb[0] == sizeof(ggml_fp16_t));
  2290. for (int i = 0; i < n; i++) {
  2291. ggml_vec_set_f16(nc, (ggml_fp16_t *)(data + i*n1), GGML_FP32_TO_FP16(value));
  2292. }
  2293. } break;
  2294. case GGML_TYPE_F32:
  2295. {
  2296. assert(tensor->nb[0] == sizeof(float));
  2297. for (int i = 0; i < n; i++) {
  2298. ggml_vec_set_f32(nc, (float *)(data + i*n1), value);
  2299. }
  2300. } break;
  2301. default:
  2302. {
  2303. GGML_ASSERT(false);
  2304. } break;
  2305. }
  2306. return tensor;
  2307. }
  2308. void ggml_unravel_index(const struct ggml_tensor * tensor, int64_t i, int64_t * i0, int64_t * i1, int64_t * i2, int64_t * i3) {
  2309. const int64_t ne2 = tensor->ne[2];
  2310. const int64_t ne1 = tensor->ne[1];
  2311. const int64_t ne0 = tensor->ne[0];
  2312. const int64_t i3_ = (i/(ne2*ne1*ne0));
  2313. const int64_t i2_ = (i - i3_*ne2*ne1*ne0)/(ne1*ne0);
  2314. const int64_t i1_ = (i - i3_*ne2*ne1*ne0 - i2_*ne1*ne0)/ne0;
  2315. const int64_t i0_ = (i - i3_*ne2*ne1*ne0 - i2_*ne1*ne0 - i1_*ne0);
  2316. if (i0) {
  2317. * i0 = i0_;
  2318. }
  2319. if (i1) {
  2320. * i1 = i1_;
  2321. }
  2322. if (i2) {
  2323. * i2 = i2_;
  2324. }
  2325. if (i3) {
  2326. * i3 = i3_;
  2327. }
  2328. }
  2329. int32_t ggml_get_i32_1d(const struct ggml_tensor * tensor, int i) {
  2330. if (!ggml_is_contiguous(tensor)) {
  2331. int64_t id[4] = { 0, 0, 0, 0 };
  2332. ggml_unravel_index(tensor, i, &id[0], &id[1], &id[2], &id[3]);
  2333. return ggml_get_i32_nd(tensor, id[0], id[1], id[2], id[3]);
  2334. }
  2335. switch (tensor->type) {
  2336. case GGML_TYPE_I8:
  2337. {
  2338. GGML_ASSERT(tensor->nb[0] == sizeof(int8_t));
  2339. return ((int8_t *)(tensor->data))[i];
  2340. }
  2341. case GGML_TYPE_I16:
  2342. {
  2343. GGML_ASSERT(tensor->nb[0] == sizeof(int16_t));
  2344. return ((int16_t *)(tensor->data))[i];
  2345. }
  2346. case GGML_TYPE_I32:
  2347. {
  2348. GGML_ASSERT(tensor->nb[0] == sizeof(int32_t));
  2349. return ((int32_t *)(tensor->data))[i];
  2350. }
  2351. case GGML_TYPE_F16:
  2352. {
  2353. GGML_ASSERT(tensor->nb[0] == sizeof(ggml_fp16_t));
  2354. return GGML_FP16_TO_FP32(((ggml_fp16_t *)(tensor->data))[i]);
  2355. }
  2356. case GGML_TYPE_F32:
  2357. {
  2358. GGML_ASSERT(tensor->nb[0] == sizeof(float));
  2359. return ((float *)(tensor->data))[i];
  2360. }
  2361. default:
  2362. {
  2363. GGML_ASSERT(false);
  2364. }
  2365. }
  2366. return 0.0f;
  2367. }
  2368. void ggml_set_i32_1d(const struct ggml_tensor * tensor, int i, int32_t value) {
  2369. if (!ggml_is_contiguous(tensor)) {
  2370. int64_t id[4] = { 0, 0, 0, 0 };
  2371. ggml_unravel_index(tensor, i, &id[0], &id[1], &id[2], &id[3]);
  2372. ggml_set_i32_nd(tensor, id[0], id[1], id[2], id[3], value);
  2373. return;
  2374. }
  2375. switch (tensor->type) {
  2376. case GGML_TYPE_I8:
  2377. {
  2378. GGML_ASSERT(tensor->nb[0] == sizeof(int8_t));
  2379. ((int8_t *)(tensor->data))[i] = value;
  2380. } break;
  2381. case GGML_TYPE_I16:
  2382. {
  2383. GGML_ASSERT(tensor->nb[0] == sizeof(int16_t));
  2384. ((int16_t *)(tensor->data))[i] = value;
  2385. } break;
  2386. case GGML_TYPE_I32:
  2387. {
  2388. GGML_ASSERT(tensor->nb[0] == sizeof(int32_t));
  2389. ((int32_t *)(tensor->data))[i] = value;
  2390. } break;
  2391. case GGML_TYPE_F16:
  2392. {
  2393. GGML_ASSERT(tensor->nb[0] == sizeof(ggml_fp16_t));
  2394. ((ggml_fp16_t *)(tensor->data))[i] = GGML_FP32_TO_FP16(value);
  2395. } break;
  2396. case GGML_TYPE_F32:
  2397. {
  2398. GGML_ASSERT(tensor->nb[0] == sizeof(float));
  2399. ((float *)(tensor->data))[i] = value;
  2400. } break;
  2401. default:
  2402. {
  2403. GGML_ASSERT(false);
  2404. } break;
  2405. }
  2406. }
  2407. int32_t ggml_get_i32_nd(const struct ggml_tensor * tensor, int i0, int i1, int i2, int i3) {
  2408. void * data = (char *) tensor->data + i0*tensor->nb[0] + i1*tensor->nb[1] + i2*tensor->nb[2] + i3*tensor->nb[3];
  2409. switch (tensor->type) {
  2410. case GGML_TYPE_I8:
  2411. return ((int8_t *) data)[0];
  2412. case GGML_TYPE_I16:
  2413. return ((int16_t *) data)[0];
  2414. case GGML_TYPE_I32:
  2415. return ((int32_t *) data)[0];
  2416. case GGML_TYPE_F16:
  2417. return GGML_FP16_TO_FP32(((ggml_fp16_t *) data)[0]);
  2418. case GGML_TYPE_F32:
  2419. return ((float *) data)[0];
  2420. default:
  2421. GGML_ASSERT(false);
  2422. }
  2423. return 0.0f;
  2424. }
  2425. void ggml_set_i32_nd(const struct ggml_tensor * tensor, int i0, int i1, int i2, int i3, int32_t value) {
  2426. void * data = (char *) tensor->data + i0*tensor->nb[0] + i1*tensor->nb[1] + i2*tensor->nb[2] + i3*tensor->nb[3];
  2427. switch (tensor->type) {
  2428. case GGML_TYPE_I8:
  2429. {
  2430. ((int8_t *)(data))[0] = value;
  2431. } break;
  2432. case GGML_TYPE_I16:
  2433. {
  2434. ((int16_t *)(data))[0] = value;
  2435. } break;
  2436. case GGML_TYPE_I32:
  2437. {
  2438. ((int32_t *)(data))[0] = value;
  2439. } break;
  2440. case GGML_TYPE_F16:
  2441. {
  2442. ((ggml_fp16_t *)(data))[0] = GGML_FP32_TO_FP16(value);
  2443. } break;
  2444. case GGML_TYPE_F32:
  2445. {
  2446. ((float *)(data))[0] = value;
  2447. } break;
  2448. default:
  2449. {
  2450. GGML_ASSERT(false);
  2451. } break;
  2452. }
  2453. }
  2454. float ggml_get_f32_1d(const struct ggml_tensor * tensor, int i) {
  2455. if (!ggml_is_contiguous(tensor)) {
  2456. int64_t id[4] = { 0, 0, 0, 0 };
  2457. ggml_unravel_index(tensor, i, &id[0], &id[1], &id[2], &id[3]);
  2458. return ggml_get_f32_nd(tensor, id[0], id[1], id[2], id[3]);
  2459. }
  2460. switch (tensor->type) {
  2461. case GGML_TYPE_I8:
  2462. {
  2463. GGML_ASSERT(tensor->nb[0] == sizeof(int8_t));
  2464. return ((int8_t *)(tensor->data))[i];
  2465. }
  2466. case GGML_TYPE_I16:
  2467. {
  2468. GGML_ASSERT(tensor->nb[0] == sizeof(int16_t));
  2469. return ((int16_t *)(tensor->data))[i];
  2470. }
  2471. case GGML_TYPE_I32:
  2472. {
  2473. GGML_ASSERT(tensor->nb[0] == sizeof(int32_t));
  2474. return ((int32_t *)(tensor->data))[i];
  2475. }
  2476. case GGML_TYPE_F16:
  2477. {
  2478. GGML_ASSERT(tensor->nb[0] == sizeof(ggml_fp16_t));
  2479. return GGML_FP16_TO_FP32(((ggml_fp16_t *)(tensor->data))[i]);
  2480. }
  2481. case GGML_TYPE_F32:
  2482. {
  2483. GGML_ASSERT(tensor->nb[0] == sizeof(float));
  2484. return ((float *)(tensor->data))[i];
  2485. }
  2486. default:
  2487. {
  2488. GGML_ASSERT(false);
  2489. }
  2490. }
  2491. return 0.0f;
  2492. }
  2493. void ggml_set_f32_1d(const struct ggml_tensor * tensor, int i, float value) {
  2494. if (!ggml_is_contiguous(tensor)) {
  2495. int64_t id[4] = { 0, 0, 0, 0 };
  2496. ggml_unravel_index(tensor, i, &id[0], &id[1], &id[2], &id[3]);
  2497. ggml_set_f32_nd(tensor, id[0], id[1], id[2], id[3], value);
  2498. return;
  2499. }
  2500. switch (tensor->type) {
  2501. case GGML_TYPE_I8:
  2502. {
  2503. GGML_ASSERT(tensor->nb[0] == sizeof(int8_t));
  2504. ((int8_t *)(tensor->data))[i] = value;
  2505. } break;
  2506. case GGML_TYPE_I16:
  2507. {
  2508. GGML_ASSERT(tensor->nb[0] == sizeof(int16_t));
  2509. ((int16_t *)(tensor->data))[i] = value;
  2510. } break;
  2511. case GGML_TYPE_I32:
  2512. {
  2513. GGML_ASSERT(tensor->nb[0] == sizeof(int32_t));
  2514. ((int32_t *)(tensor->data))[i] = value;
  2515. } break;
  2516. case GGML_TYPE_F16:
  2517. {
  2518. GGML_ASSERT(tensor->nb[0] == sizeof(ggml_fp16_t));
  2519. ((ggml_fp16_t *)(tensor->data))[i] = GGML_FP32_TO_FP16(value);
  2520. } break;
  2521. case GGML_TYPE_F32:
  2522. {
  2523. GGML_ASSERT(tensor->nb[0] == sizeof(float));
  2524. ((float *)(tensor->data))[i] = value;
  2525. } break;
  2526. default:
  2527. {
  2528. GGML_ASSERT(false);
  2529. } break;
  2530. }
  2531. }
  2532. float ggml_get_f32_nd(const struct ggml_tensor * tensor, int i0, int i1, int i2, int i3) {
  2533. void * data = (char *) tensor->data + i0*tensor->nb[0] + i1*tensor->nb[1] + i2*tensor->nb[2] + i3*tensor->nb[3];
  2534. switch (tensor->type) {
  2535. case GGML_TYPE_I8:
  2536. return ((int8_t *) data)[0];
  2537. case GGML_TYPE_I16:
  2538. return ((int16_t *) data)[0];
  2539. case GGML_TYPE_I32:
  2540. return ((int32_t *) data)[0];
  2541. case GGML_TYPE_F16:
  2542. return GGML_FP16_TO_FP32(((ggml_fp16_t *) data)[0]);
  2543. case GGML_TYPE_F32:
  2544. return ((float *) data)[0];
  2545. default:
  2546. GGML_ASSERT(false);
  2547. }
  2548. return 0.0f;
  2549. }
  2550. void ggml_set_f32_nd(const struct ggml_tensor * tensor, int i0, int i1, int i2, int i3, float value) {
  2551. void * data = (char *) tensor->data + i0*tensor->nb[0] + i1*tensor->nb[1] + i2*tensor->nb[2] + i3*tensor->nb[3];
  2552. switch (tensor->type) {
  2553. case GGML_TYPE_I8:
  2554. {
  2555. ((int8_t *)(data))[0] = value;
  2556. } break;
  2557. case GGML_TYPE_I16:
  2558. {
  2559. ((int16_t *)(data))[0] = value;
  2560. } break;
  2561. case GGML_TYPE_I32:
  2562. {
  2563. ((int32_t *)(data))[0] = value;
  2564. } break;
  2565. case GGML_TYPE_F16:
  2566. {
  2567. ((ggml_fp16_t *)(data))[0] = GGML_FP32_TO_FP16(value);
  2568. } break;
  2569. case GGML_TYPE_F32:
  2570. {
  2571. ((float *)(data))[0] = value;
  2572. } break;
  2573. default:
  2574. {
  2575. GGML_ASSERT(false);
  2576. } break;
  2577. }
  2578. }
  2579. void * ggml_get_data(const struct ggml_tensor * tensor) {
  2580. return tensor->data;
  2581. }
  2582. float * ggml_get_data_f32(const struct ggml_tensor * tensor) {
  2583. assert(tensor->type == GGML_TYPE_F32);
  2584. return (float *)(tensor->data);
  2585. }
  2586. enum ggml_unary_op ggml_get_unary_op(const struct ggml_tensor * tensor) {
  2587. GGML_ASSERT(tensor->op == GGML_OP_UNARY);
  2588. return (enum ggml_unary_op) ggml_get_op_params_i32(tensor, 0);
  2589. }
  2590. const char * ggml_get_name(const struct ggml_tensor * tensor) {
  2591. return tensor->name;
  2592. }
  2593. struct ggml_tensor * ggml_set_name(struct ggml_tensor * tensor, const char * name) {
  2594. strncpy(tensor->name, name, sizeof(tensor->name));
  2595. tensor->name[sizeof(tensor->name) - 1] = '\0';
  2596. return tensor;
  2597. }
  2598. struct ggml_tensor * ggml_format_name(struct ggml_tensor * tensor, const char * fmt, ...) {
  2599. va_list args;
  2600. va_start(args, fmt);
  2601. vsnprintf(tensor->name, sizeof(tensor->name), fmt, args);
  2602. va_end(args);
  2603. return tensor;
  2604. }
  2605. struct ggml_tensor * ggml_view_tensor(
  2606. struct ggml_context * ctx,
  2607. struct ggml_tensor * src) {
  2608. struct ggml_tensor * result = ggml_new_tensor_impl(ctx, src->type, GGML_MAX_DIMS, src->ne, src, 0);
  2609. ggml_format_name(result, "%s (view)", src->name);
  2610. for (int i = 0; i < GGML_MAX_DIMS; i++) {
  2611. result->nb[i] = src->nb[i];
  2612. }
  2613. return result;
  2614. }
  2615. struct ggml_tensor * ggml_get_first_tensor(struct ggml_context * ctx) {
  2616. struct ggml_object * obj = ctx->objects_begin;
  2617. char * const mem_buffer = ctx->mem_buffer;
  2618. while (obj != NULL) {
  2619. if (obj->type == GGML_OBJECT_TENSOR) {
  2620. return (struct ggml_tensor *)(mem_buffer + obj->offs);
  2621. }
  2622. obj = obj->next;
  2623. }
  2624. return NULL;
  2625. }
  2626. struct ggml_tensor * ggml_get_next_tensor(struct ggml_context * ctx, struct ggml_tensor * tensor) {
  2627. struct ggml_object * obj = (struct ggml_object *) ((char *)tensor - GGML_OBJECT_SIZE);
  2628. obj = obj->next;
  2629. char * const mem_buffer = ctx->mem_buffer;
  2630. while (obj != NULL) {
  2631. if (obj->type == GGML_OBJECT_TENSOR) {
  2632. return (struct ggml_tensor *)(mem_buffer + obj->offs);
  2633. }
  2634. obj = obj->next;
  2635. }
  2636. return NULL;
  2637. }
  2638. struct ggml_tensor * ggml_get_tensor(struct ggml_context * ctx, const char * name) {
  2639. struct ggml_object * obj = ctx->objects_begin;
  2640. char * const mem_buffer = ctx->mem_buffer;
  2641. while (obj != NULL) {
  2642. if (obj->type == GGML_OBJECT_TENSOR) {
  2643. struct ggml_tensor * cur = (struct ggml_tensor *)(mem_buffer + obj->offs);
  2644. if (strcmp(cur->name, name) == 0) {
  2645. return cur;
  2646. }
  2647. }
  2648. obj = obj->next;
  2649. }
  2650. return NULL;
  2651. }
  2652. ////////////////////////////////////////////////////////////////////////////////
  2653. // ggml_dup
  2654. static struct ggml_tensor * ggml_dup_impl(
  2655. struct ggml_context * ctx,
  2656. struct ggml_tensor * a,
  2657. bool inplace) {
  2658. bool is_node = false;
  2659. if (!inplace && (a->grad)) {
  2660. is_node = true;
  2661. }
  2662. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  2663. result->op = GGML_OP_DUP;
  2664. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  2665. result->src[0] = a;
  2666. return result;
  2667. }
  2668. struct ggml_tensor * ggml_dup(
  2669. struct ggml_context * ctx,
  2670. struct ggml_tensor * a) {
  2671. return ggml_dup_impl(ctx, a, false);
  2672. }
  2673. struct ggml_tensor * ggml_dup_inplace(
  2674. struct ggml_context * ctx,
  2675. struct ggml_tensor * a) {
  2676. return ggml_dup_impl(ctx, a, true);
  2677. }
  2678. // ggml_add
  2679. static struct ggml_tensor * ggml_add_impl(
  2680. struct ggml_context * ctx,
  2681. struct ggml_tensor * a,
  2682. struct ggml_tensor * b,
  2683. bool inplace) {
  2684. GGML_ASSERT(ggml_can_repeat(b, a));
  2685. bool is_node = false;
  2686. if (!inplace && (a->grad || b->grad)) {
  2687. // TODO: support backward pass for broadcasting
  2688. GGML_ASSERT(ggml_are_same_shape(a, b));
  2689. is_node = true;
  2690. }
  2691. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  2692. result->op = GGML_OP_ADD;
  2693. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  2694. result->src[0] = a;
  2695. result->src[1] = b;
  2696. return result;
  2697. }
  2698. struct ggml_tensor * ggml_add(
  2699. struct ggml_context * ctx,
  2700. struct ggml_tensor * a,
  2701. struct ggml_tensor * b) {
  2702. return ggml_add_impl(ctx, a, b, false);
  2703. }
  2704. struct ggml_tensor * ggml_add_inplace(
  2705. struct ggml_context * ctx,
  2706. struct ggml_tensor * a,
  2707. struct ggml_tensor * b) {
  2708. return ggml_add_impl(ctx, a, b, true);
  2709. }
  2710. // ggml_add_cast
  2711. static struct ggml_tensor * ggml_add_cast_impl(
  2712. struct ggml_context * ctx,
  2713. struct ggml_tensor * a,
  2714. struct ggml_tensor * b,
  2715. enum ggml_type type) {
  2716. // TODO: support less-strict constraint
  2717. // GGML_ASSERT(ggml_can_repeat(b, a));
  2718. GGML_ASSERT(ggml_can_repeat_rows(b, a));
  2719. GGML_ASSERT(ggml_is_quantized(a->type) || a->type == GGML_TYPE_F16); // currently only supported for quantized input and f16
  2720. bool is_node = false;
  2721. if (a->grad || b->grad) {
  2722. // TODO: support backward pass for broadcasting
  2723. GGML_ASSERT(ggml_are_same_shape(a, b));
  2724. is_node = true;
  2725. }
  2726. struct ggml_tensor * result = ggml_new_tensor(ctx, type, GGML_MAX_DIMS, a->ne);
  2727. result->op = GGML_OP_ADD;
  2728. result->grad = is_node ? ggml_new_tensor(ctx, GGML_TYPE_F32, GGML_MAX_DIMS, a->ne) : NULL;
  2729. result->src[0] = a;
  2730. result->src[1] = b;
  2731. return result;
  2732. }
  2733. struct ggml_tensor * ggml_add_cast(
  2734. struct ggml_context * ctx,
  2735. struct ggml_tensor * a,
  2736. struct ggml_tensor * b,
  2737. enum ggml_type type) {
  2738. return ggml_add_cast_impl(ctx, a, b, type);
  2739. }
  2740. // ggml_add1
  2741. static struct ggml_tensor * ggml_add1_impl(
  2742. struct ggml_context * ctx,
  2743. struct ggml_tensor * a,
  2744. struct ggml_tensor * b,
  2745. bool inplace) {
  2746. GGML_ASSERT(ggml_is_scalar(b));
  2747. GGML_ASSERT(ggml_is_padded_1d(a));
  2748. bool is_node = false;
  2749. if (a->grad || b->grad) {
  2750. is_node = true;
  2751. }
  2752. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  2753. result->op = GGML_OP_ADD1;
  2754. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  2755. result->src[0] = a;
  2756. result->src[1] = b;
  2757. return result;
  2758. }
  2759. struct ggml_tensor * ggml_add1(
  2760. struct ggml_context * ctx,
  2761. struct ggml_tensor * a,
  2762. struct ggml_tensor * b) {
  2763. return ggml_add1_impl(ctx, a, b, false);
  2764. }
  2765. struct ggml_tensor * ggml_add1_inplace(
  2766. struct ggml_context * ctx,
  2767. struct ggml_tensor * a,
  2768. struct ggml_tensor * b) {
  2769. return ggml_add1_impl(ctx, a, b, true);
  2770. }
  2771. // ggml_acc
  2772. static struct ggml_tensor * ggml_acc_impl(
  2773. struct ggml_context * ctx,
  2774. struct ggml_tensor * a,
  2775. struct ggml_tensor * b,
  2776. size_t nb1,
  2777. size_t nb2,
  2778. size_t nb3,
  2779. size_t offset,
  2780. bool inplace) {
  2781. GGML_ASSERT(ggml_nelements(b) <= ggml_nelements(a));
  2782. GGML_ASSERT(ggml_is_contiguous(a));
  2783. GGML_ASSERT(a->type == GGML_TYPE_F32);
  2784. GGML_ASSERT(b->type == GGML_TYPE_F32);
  2785. bool is_node = false;
  2786. if (!inplace && (a->grad || b->grad)) {
  2787. is_node = true;
  2788. }
  2789. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  2790. int32_t params[] = { nb1, nb2, nb3, offset, inplace ? 1 : 0 };
  2791. ggml_set_op_params(result, params, sizeof(params));
  2792. result->op = GGML_OP_ACC;
  2793. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  2794. result->src[0] = a;
  2795. result->src[1] = b;
  2796. return result;
  2797. }
  2798. struct ggml_tensor * ggml_acc(
  2799. struct ggml_context * ctx,
  2800. struct ggml_tensor * a,
  2801. struct ggml_tensor * b,
  2802. size_t nb1,
  2803. size_t nb2,
  2804. size_t nb3,
  2805. size_t offset) {
  2806. return ggml_acc_impl(ctx, a, b, nb1, nb2, nb3, offset, false);
  2807. }
  2808. struct ggml_tensor * ggml_acc_inplace(
  2809. struct ggml_context * ctx,
  2810. struct ggml_tensor * a,
  2811. struct ggml_tensor * b,
  2812. size_t nb1,
  2813. size_t nb2,
  2814. size_t nb3,
  2815. size_t offset) {
  2816. return ggml_acc_impl(ctx, a, b, nb1, nb2, nb3, offset, true);
  2817. }
  2818. // ggml_sub
  2819. static struct ggml_tensor * ggml_sub_impl(
  2820. struct ggml_context * ctx,
  2821. struct ggml_tensor * a,
  2822. struct ggml_tensor * b,
  2823. bool inplace) {
  2824. GGML_ASSERT(ggml_are_same_shape(a, b));
  2825. bool is_node = false;
  2826. if (!inplace && (a->grad || b->grad)) {
  2827. is_node = true;
  2828. }
  2829. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  2830. result->op = GGML_OP_SUB;
  2831. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  2832. result->src[0] = a;
  2833. result->src[1] = b;
  2834. return result;
  2835. }
  2836. struct ggml_tensor * ggml_sub(
  2837. struct ggml_context * ctx,
  2838. struct ggml_tensor * a,
  2839. struct ggml_tensor * b) {
  2840. return ggml_sub_impl(ctx, a, b, false);
  2841. }
  2842. struct ggml_tensor * ggml_sub_inplace(
  2843. struct ggml_context * ctx,
  2844. struct ggml_tensor * a,
  2845. struct ggml_tensor * b) {
  2846. return ggml_sub_impl(ctx, a, b, true);
  2847. }
  2848. // ggml_mul
  2849. static struct ggml_tensor * ggml_mul_impl(
  2850. struct ggml_context * ctx,
  2851. struct ggml_tensor * a,
  2852. struct ggml_tensor * b,
  2853. bool inplace) {
  2854. GGML_ASSERT(ggml_can_repeat(b, a));
  2855. bool is_node = false;
  2856. if (!inplace && (a->grad || b->grad)) {
  2857. // TODO: support backward pass for broadcasting
  2858. GGML_ASSERT(ggml_are_same_shape(a, b));
  2859. is_node = true;
  2860. }
  2861. if (inplace) {
  2862. GGML_ASSERT(!is_node);
  2863. }
  2864. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  2865. result->op = GGML_OP_MUL;
  2866. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  2867. result->src[0] = a;
  2868. result->src[1] = b;
  2869. return result;
  2870. }
  2871. struct ggml_tensor * ggml_mul(
  2872. struct ggml_context * ctx,
  2873. struct ggml_tensor * a,
  2874. struct ggml_tensor * b) {
  2875. return ggml_mul_impl(ctx, a, b, false);
  2876. }
  2877. struct ggml_tensor * ggml_mul_inplace(
  2878. struct ggml_context * ctx,
  2879. struct ggml_tensor * a,
  2880. struct ggml_tensor * b) {
  2881. return ggml_mul_impl(ctx, a, b, true);
  2882. }
  2883. // ggml_div
  2884. static struct ggml_tensor * ggml_div_impl(
  2885. struct ggml_context * ctx,
  2886. struct ggml_tensor * a,
  2887. struct ggml_tensor * b,
  2888. bool inplace) {
  2889. GGML_ASSERT(ggml_can_repeat(b, a));
  2890. bool is_node = false;
  2891. if (!inplace && (a->grad || b->grad)) {
  2892. is_node = true;
  2893. }
  2894. if (inplace) {
  2895. GGML_ASSERT(!is_node);
  2896. }
  2897. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  2898. result->op = GGML_OP_DIV;
  2899. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  2900. result->src[0] = a;
  2901. result->src[1] = b;
  2902. return result;
  2903. }
  2904. struct ggml_tensor * ggml_div(
  2905. struct ggml_context * ctx,
  2906. struct ggml_tensor * a,
  2907. struct ggml_tensor * b) {
  2908. return ggml_div_impl(ctx, a, b, false);
  2909. }
  2910. struct ggml_tensor * ggml_div_inplace(
  2911. struct ggml_context * ctx,
  2912. struct ggml_tensor * a,
  2913. struct ggml_tensor * b) {
  2914. return ggml_div_impl(ctx, a, b, true);
  2915. }
  2916. // ggml_sqr
  2917. static struct ggml_tensor * ggml_sqr_impl(
  2918. struct ggml_context * ctx,
  2919. struct ggml_tensor * a,
  2920. bool inplace) {
  2921. bool is_node = false;
  2922. if (!inplace && (a->grad)) {
  2923. is_node = true;
  2924. }
  2925. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  2926. result->op = GGML_OP_SQR;
  2927. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  2928. result->src[0] = a;
  2929. return result;
  2930. }
  2931. struct ggml_tensor * ggml_sqr(
  2932. struct ggml_context * ctx,
  2933. struct ggml_tensor * a) {
  2934. return ggml_sqr_impl(ctx, a, false);
  2935. }
  2936. struct ggml_tensor * ggml_sqr_inplace(
  2937. struct ggml_context * ctx,
  2938. struct ggml_tensor * a) {
  2939. return ggml_sqr_impl(ctx, a, true);
  2940. }
  2941. // ggml_sqrt
  2942. static struct ggml_tensor * ggml_sqrt_impl(
  2943. struct ggml_context * ctx,
  2944. struct ggml_tensor * a,
  2945. bool inplace) {
  2946. bool is_node = false;
  2947. if (!inplace && (a->grad)) {
  2948. is_node = true;
  2949. }
  2950. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  2951. result->op = GGML_OP_SQRT;
  2952. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  2953. result->src[0] = a;
  2954. return result;
  2955. }
  2956. struct ggml_tensor * ggml_sqrt(
  2957. struct ggml_context * ctx,
  2958. struct ggml_tensor * a) {
  2959. return ggml_sqrt_impl(ctx, a, false);
  2960. }
  2961. struct ggml_tensor * ggml_sqrt_inplace(
  2962. struct ggml_context * ctx,
  2963. struct ggml_tensor * a) {
  2964. return ggml_sqrt_impl(ctx, a, true);
  2965. }
  2966. // ggml_log
  2967. static struct ggml_tensor * ggml_log_impl(
  2968. struct ggml_context * ctx,
  2969. struct ggml_tensor * a,
  2970. bool inplace) {
  2971. bool is_node = false;
  2972. if (!inplace && (a->grad)) {
  2973. is_node = true;
  2974. }
  2975. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  2976. result->op = GGML_OP_LOG;
  2977. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  2978. result->src[0] = a;
  2979. return result;
  2980. }
  2981. struct ggml_tensor * ggml_log(
  2982. struct ggml_context * ctx,
  2983. struct ggml_tensor * a) {
  2984. return ggml_log_impl(ctx, a, false);
  2985. }
  2986. struct ggml_tensor * ggml_log_inplace(
  2987. struct ggml_context * ctx,
  2988. struct ggml_tensor * a) {
  2989. return ggml_log_impl(ctx, a, true);
  2990. }
  2991. // ggml_sum
  2992. struct ggml_tensor * ggml_sum(
  2993. struct ggml_context * ctx,
  2994. struct ggml_tensor * a) {
  2995. bool is_node = false;
  2996. if (a->grad) {
  2997. is_node = true;
  2998. }
  2999. struct ggml_tensor * result = ggml_new_tensor_1d(ctx, a->type, 1);
  3000. result->op = GGML_OP_SUM;
  3001. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3002. result->src[0] = a;
  3003. return result;
  3004. }
  3005. // ggml_sum_rows
  3006. struct ggml_tensor * ggml_sum_rows(
  3007. struct ggml_context * ctx,
  3008. struct ggml_tensor * a) {
  3009. bool is_node = false;
  3010. if (a->grad) {
  3011. is_node = true;
  3012. }
  3013. int64_t ne[GGML_MAX_DIMS] = { 1 };
  3014. for (int i = 1; i < GGML_MAX_DIMS; ++i) {
  3015. ne[i] = a->ne[i];
  3016. }
  3017. struct ggml_tensor * result = ggml_new_tensor(ctx, a->type, GGML_MAX_DIMS, ne);
  3018. result->op = GGML_OP_SUM_ROWS;
  3019. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3020. result->src[0] = a;
  3021. return result;
  3022. }
  3023. // ggml_mean
  3024. struct ggml_tensor * ggml_mean(
  3025. struct ggml_context * ctx,
  3026. struct ggml_tensor * a) {
  3027. bool is_node = false;
  3028. if (a->grad) {
  3029. GGML_ASSERT(false); // TODO: implement
  3030. is_node = true;
  3031. }
  3032. int64_t ne[4] = { 1, a->ne[1], a->ne[2], a->ne[3] };
  3033. struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, 4, ne);
  3034. result->op = GGML_OP_MEAN;
  3035. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3036. result->src[0] = a;
  3037. return result;
  3038. }
  3039. // ggml_argmax
  3040. struct ggml_tensor * ggml_argmax(
  3041. struct ggml_context * ctx,
  3042. struct ggml_tensor * a) {
  3043. GGML_ASSERT(ggml_is_matrix(a));
  3044. bool is_node = false;
  3045. if (a->grad) {
  3046. GGML_ASSERT(false);
  3047. is_node = true;
  3048. }
  3049. struct ggml_tensor * result = ggml_new_tensor_1d(ctx, GGML_TYPE_I32, a->ne[1]);
  3050. result->op = GGML_OP_ARGMAX;
  3051. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3052. result->src[0] = a;
  3053. return result;
  3054. }
  3055. // ggml_repeat
  3056. struct ggml_tensor * ggml_repeat(
  3057. struct ggml_context * ctx,
  3058. struct ggml_tensor * a,
  3059. struct ggml_tensor * b) {
  3060. GGML_ASSERT(ggml_can_repeat(a, b));
  3061. bool is_node = false;
  3062. if (a->grad) {
  3063. is_node = true;
  3064. }
  3065. struct ggml_tensor * result = ggml_new_tensor(ctx, a->type, GGML_MAX_DIMS, b->ne);
  3066. result->op = GGML_OP_REPEAT;
  3067. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3068. result->src[0] = a;
  3069. return result;
  3070. }
  3071. // ggml_repeat_back
  3072. struct ggml_tensor * ggml_repeat_back(
  3073. struct ggml_context * ctx,
  3074. struct ggml_tensor * a,
  3075. struct ggml_tensor * b) {
  3076. GGML_ASSERT(ggml_can_repeat(b, a));
  3077. bool is_node = false;
  3078. if (a->grad) {
  3079. is_node = true;
  3080. }
  3081. if (ggml_are_same_shape(a, b) && !is_node) {
  3082. return a;
  3083. }
  3084. struct ggml_tensor * result = ggml_new_tensor(ctx, a->type, GGML_MAX_DIMS, b->ne);
  3085. result->op = GGML_OP_REPEAT_BACK;
  3086. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3087. result->src[0] = a;
  3088. return result;
  3089. }
  3090. // ggml_concat
  3091. struct ggml_tensor * ggml_concat(
  3092. struct ggml_context* ctx,
  3093. struct ggml_tensor* a,
  3094. struct ggml_tensor* b) {
  3095. GGML_ASSERT(a->ne[0] == b->ne[0] && a->ne[1] == b->ne[1] && a->ne[3] == b->ne[3]);
  3096. bool is_node = false;
  3097. if (a->grad || b->grad) {
  3098. is_node = true;
  3099. }
  3100. struct ggml_tensor * result = ggml_new_tensor_4d(ctx, a->type, a->ne[0], a->ne[1], a->ne[2] + b->ne[2], a->ne[3]);
  3101. result->op = GGML_OP_CONCAT;
  3102. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3103. result->src[0] = a;
  3104. result->src[1] = b;
  3105. return result;
  3106. }
  3107. // ggml_abs
  3108. struct ggml_tensor * ggml_abs(
  3109. struct ggml_context * ctx,
  3110. struct ggml_tensor * a) {
  3111. return ggml_unary(ctx, a, GGML_UNARY_OP_ABS);
  3112. }
  3113. struct ggml_tensor * ggml_abs_inplace(
  3114. struct ggml_context * ctx,
  3115. struct ggml_tensor * a) {
  3116. return ggml_unary_inplace(ctx, a, GGML_UNARY_OP_ABS);
  3117. }
  3118. // ggml_sgn
  3119. struct ggml_tensor * ggml_sgn(
  3120. struct ggml_context * ctx,
  3121. struct ggml_tensor * a) {
  3122. return ggml_unary(ctx, a, GGML_UNARY_OP_SGN);
  3123. }
  3124. struct ggml_tensor * ggml_sgn_inplace(
  3125. struct ggml_context * ctx,
  3126. struct ggml_tensor * a) {
  3127. return ggml_unary_inplace(ctx, a, GGML_UNARY_OP_SGN);
  3128. }
  3129. // ggml_neg
  3130. struct ggml_tensor * ggml_neg(
  3131. struct ggml_context * ctx,
  3132. struct ggml_tensor * a) {
  3133. return ggml_unary(ctx, a, GGML_UNARY_OP_NEG);
  3134. }
  3135. struct ggml_tensor * ggml_neg_inplace(
  3136. struct ggml_context * ctx,
  3137. struct ggml_tensor * a) {
  3138. return ggml_unary_inplace(ctx, a, GGML_UNARY_OP_NEG);
  3139. }
  3140. // ggml_step
  3141. struct ggml_tensor * ggml_step(
  3142. struct ggml_context * ctx,
  3143. struct ggml_tensor * a) {
  3144. return ggml_unary(ctx, a, GGML_UNARY_OP_STEP);
  3145. }
  3146. struct ggml_tensor * ggml_step_inplace(
  3147. struct ggml_context * ctx,
  3148. struct ggml_tensor * a) {
  3149. return ggml_unary_inplace(ctx, a, GGML_UNARY_OP_STEP);
  3150. }
  3151. // ggml_tanh
  3152. struct ggml_tensor * ggml_tanh(
  3153. struct ggml_context * ctx,
  3154. struct ggml_tensor * a) {
  3155. return ggml_unary(ctx, a, GGML_UNARY_OP_TANH);
  3156. }
  3157. struct ggml_tensor * ggml_tanh_inplace(
  3158. struct ggml_context * ctx,
  3159. struct ggml_tensor * a) {
  3160. return ggml_unary_inplace(ctx, a, GGML_UNARY_OP_TANH);
  3161. }
  3162. // ggml_elu
  3163. struct ggml_tensor * ggml_elu(
  3164. struct ggml_context * ctx,
  3165. struct ggml_tensor * a) {
  3166. return ggml_unary(ctx, a, GGML_UNARY_OP_ELU);
  3167. }
  3168. struct ggml_tensor * ggml_elu_inplace(
  3169. struct ggml_context * ctx,
  3170. struct ggml_tensor * a) {
  3171. return ggml_unary_inplace(ctx, a, GGML_UNARY_OP_ELU);
  3172. }
  3173. // ggml_relu
  3174. struct ggml_tensor * ggml_relu(
  3175. struct ggml_context * ctx,
  3176. struct ggml_tensor * a) {
  3177. return ggml_unary(ctx, a, GGML_UNARY_OP_RELU);
  3178. }
  3179. struct ggml_tensor * ggml_relu_inplace(
  3180. struct ggml_context * ctx,
  3181. struct ggml_tensor * a) {
  3182. return ggml_unary_inplace(ctx, a, GGML_UNARY_OP_RELU);
  3183. }
  3184. // ggml_leaky_relu
  3185. struct ggml_tensor * ggml_leaky_relu(
  3186. struct ggml_context * ctx,
  3187. struct ggml_tensor * a, float negative_slope, bool inplace) {
  3188. bool is_node = false;
  3189. if (!inplace && (a->grad)) {
  3190. is_node = true;
  3191. }
  3192. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  3193. ggml_set_op_params(result, &negative_slope, sizeof(negative_slope));
  3194. result->op = GGML_OP_LEAKY_RELU;
  3195. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3196. result->src[0] = a;
  3197. return result;
  3198. }
  3199. // ggml_gelu
  3200. struct ggml_tensor * ggml_gelu(
  3201. struct ggml_context * ctx,
  3202. struct ggml_tensor * a) {
  3203. return ggml_unary(ctx, a, GGML_UNARY_OP_GELU);
  3204. }
  3205. struct ggml_tensor * ggml_gelu_inplace(
  3206. struct ggml_context * ctx,
  3207. struct ggml_tensor * a) {
  3208. return ggml_unary_inplace(ctx, a, GGML_UNARY_OP_GELU);
  3209. }
  3210. // ggml_gelu_quick
  3211. struct ggml_tensor * ggml_gelu_quick(
  3212. struct ggml_context * ctx,
  3213. struct ggml_tensor * a) {
  3214. return ggml_unary(ctx, a, GGML_UNARY_OP_GELU_QUICK);
  3215. }
  3216. struct ggml_tensor * ggml_gelu_quick_inplace(
  3217. struct ggml_context * ctx,
  3218. struct ggml_tensor * a) {
  3219. return ggml_unary_inplace(ctx, a, GGML_UNARY_OP_GELU_QUICK);
  3220. }
  3221. // ggml_silu
  3222. struct ggml_tensor * ggml_silu(
  3223. struct ggml_context * ctx,
  3224. struct ggml_tensor * a) {
  3225. return ggml_unary(ctx, a, GGML_UNARY_OP_SILU);
  3226. }
  3227. struct ggml_tensor * ggml_silu_inplace(
  3228. struct ggml_context * ctx,
  3229. struct ggml_tensor * a) {
  3230. return ggml_unary_inplace(ctx, a, GGML_UNARY_OP_SILU);
  3231. }
  3232. // ggml_silu_back
  3233. struct ggml_tensor * ggml_silu_back(
  3234. struct ggml_context * ctx,
  3235. struct ggml_tensor * a,
  3236. struct ggml_tensor * b) {
  3237. bool is_node = false;
  3238. if (a->grad || b->grad) {
  3239. // TODO: implement backward
  3240. is_node = true;
  3241. }
  3242. struct ggml_tensor * result = ggml_dup_tensor(ctx, a);
  3243. result->op = GGML_OP_SILU_BACK;
  3244. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3245. result->src[0] = a;
  3246. result->src[1] = b;
  3247. return result;
  3248. }
  3249. // ggml_norm
  3250. static struct ggml_tensor * ggml_norm_impl(
  3251. struct ggml_context * ctx,
  3252. struct ggml_tensor * a,
  3253. float eps,
  3254. bool inplace) {
  3255. bool is_node = false;
  3256. if (!inplace && (a->grad)) {
  3257. GGML_ASSERT(false); // TODO: implement backward
  3258. is_node = true;
  3259. }
  3260. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  3261. ggml_set_op_params(result, &eps, sizeof(eps));
  3262. result->op = GGML_OP_NORM;
  3263. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3264. result->src[0] = a;
  3265. return result;
  3266. }
  3267. struct ggml_tensor * ggml_norm(
  3268. struct ggml_context * ctx,
  3269. struct ggml_tensor * a,
  3270. float eps) {
  3271. return ggml_norm_impl(ctx, a, eps, false);
  3272. }
  3273. struct ggml_tensor * ggml_norm_inplace(
  3274. struct ggml_context * ctx,
  3275. struct ggml_tensor * a,
  3276. float eps) {
  3277. return ggml_norm_impl(ctx, a, eps, true);
  3278. }
  3279. // ggml_rms_norm
  3280. static struct ggml_tensor * ggml_rms_norm_impl(
  3281. struct ggml_context * ctx,
  3282. struct ggml_tensor * a,
  3283. float eps,
  3284. bool inplace) {
  3285. bool is_node = false;
  3286. if (!inplace && (a->grad)) {
  3287. is_node = true;
  3288. }
  3289. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  3290. ggml_set_op_params(result, &eps, sizeof(eps));
  3291. result->op = GGML_OP_RMS_NORM;
  3292. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3293. result->src[0] = a;
  3294. return result;
  3295. }
  3296. struct ggml_tensor * ggml_rms_norm(
  3297. struct ggml_context * ctx,
  3298. struct ggml_tensor * a,
  3299. float eps) {
  3300. return ggml_rms_norm_impl(ctx, a, eps, false);
  3301. }
  3302. struct ggml_tensor * ggml_rms_norm_inplace(
  3303. struct ggml_context * ctx,
  3304. struct ggml_tensor * a,
  3305. float eps) {
  3306. return ggml_rms_norm_impl(ctx, a, eps, true);
  3307. }
  3308. // ggml_rms_norm_back
  3309. struct ggml_tensor * ggml_rms_norm_back(
  3310. struct ggml_context * ctx,
  3311. struct ggml_tensor * a,
  3312. struct ggml_tensor * b,
  3313. float eps) {
  3314. bool is_node = false;
  3315. if (a->grad) {
  3316. // TODO: implement backward
  3317. is_node = true;
  3318. }
  3319. struct ggml_tensor * result = ggml_dup_tensor(ctx, a);
  3320. ggml_set_op_params(result, &eps, sizeof(eps));
  3321. result->op = GGML_OP_RMS_NORM_BACK;
  3322. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3323. result->src[0] = a;
  3324. result->src[1] = b;
  3325. return result;
  3326. }
  3327. // ggml_group_norm
  3328. static struct ggml_tensor * ggml_group_norm_impl(
  3329. struct ggml_context * ctx,
  3330. struct ggml_tensor * a,
  3331. int n_groups,
  3332. bool inplace) {
  3333. bool is_node = false;
  3334. if (!inplace && (a->grad)) {
  3335. GGML_ASSERT(false); // TODO: implement backward
  3336. is_node = true;
  3337. }
  3338. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  3339. result->op_params[0] = n_groups;
  3340. result->op = GGML_OP_GROUP_NORM;
  3341. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3342. result->src[0] = a;
  3343. result->src[1] = NULL; // TODO: maybe store epsilon here?
  3344. return result;
  3345. }
  3346. struct ggml_tensor * ggml_group_norm(
  3347. struct ggml_context * ctx,
  3348. struct ggml_tensor * a,
  3349. int n_groups) {
  3350. return ggml_group_norm_impl(ctx, a, n_groups, false);
  3351. }
  3352. struct ggml_tensor * ggml_group_norm_inplace(
  3353. struct ggml_context * ctx,
  3354. struct ggml_tensor * a,
  3355. int n_groups) {
  3356. return ggml_group_norm_impl(ctx, a, n_groups, true);
  3357. }
  3358. // ggml_mul_mat
  3359. struct ggml_tensor * ggml_mul_mat(
  3360. struct ggml_context * ctx,
  3361. struct ggml_tensor * a,
  3362. struct ggml_tensor * b) {
  3363. GGML_ASSERT(ggml_can_mul_mat(a, b));
  3364. GGML_ASSERT(!ggml_is_transposed(a));
  3365. bool is_node = false;
  3366. if (a->grad || b->grad) {
  3367. is_node = true;
  3368. }
  3369. const int64_t ne[4] = { a->ne[1], b->ne[1], b->ne[2], b->ne[3] };
  3370. struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, 4, ne);
  3371. result->op = GGML_OP_MUL_MAT;
  3372. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3373. result->src[0] = a;
  3374. result->src[1] = b;
  3375. return result;
  3376. }
  3377. // ggml_mul_mat_id
  3378. struct ggml_tensor * ggml_mul_mat_id(
  3379. struct ggml_context * ctx,
  3380. struct ggml_tensor * const as[],
  3381. int n_as,
  3382. struct ggml_tensor * ids,
  3383. int id,
  3384. struct ggml_tensor * b) {
  3385. GGML_ASSERT(ids->type == GGML_TYPE_I32);
  3386. GGML_ASSERT(ids->ne[2] == 1 && ids->ne[3] == 1);
  3387. GGML_ASSERT(ids->ne[1] == b->ne[1]);
  3388. GGML_ASSERT(ids->ne[2] == b->ne[2] && ids->ne[3] == b->ne[3]);
  3389. GGML_ASSERT(n_as > 0 && n_as <= GGML_MAX_SRC - 2);
  3390. GGML_ASSERT(id >= 0 && id < ids->ne[0]);
  3391. bool is_node = false;
  3392. if (as[0]->grad || b->grad) {
  3393. is_node = true;
  3394. }
  3395. const int64_t ne[4] = { as[0]->ne[1], b->ne[1], b->ne[2], b->ne[3] };
  3396. struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, 4, ne);
  3397. ggml_set_op_params_i32(result, 0, id);
  3398. ggml_set_op_params_i32(result, 1, n_as);
  3399. result->op = GGML_OP_MUL_MAT_ID;
  3400. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3401. result->src[0] = ids;
  3402. result->src[1] = b;
  3403. for (int i = 0; i < n_as; i++) {
  3404. struct ggml_tensor * a = as[i];
  3405. GGML_ASSERT(ggml_are_same_shape(as[0], a));
  3406. GGML_ASSERT(ggml_can_mul_mat(a, b));
  3407. GGML_ASSERT(!ggml_is_transposed(a));
  3408. result->src[i + 2] = a;
  3409. }
  3410. return result;
  3411. }
  3412. // ggml_out_prod
  3413. struct ggml_tensor * ggml_out_prod(
  3414. struct ggml_context * ctx,
  3415. struct ggml_tensor * a,
  3416. struct ggml_tensor * b) {
  3417. GGML_ASSERT(ggml_can_out_prod(a, b));
  3418. GGML_ASSERT(!ggml_is_transposed(a));
  3419. bool is_node = false;
  3420. if (a->grad || b->grad) {
  3421. is_node = true;
  3422. }
  3423. // a is broadcastable to b for ne[2] and ne[3] -> use b->ne[2] and b->ne[3]
  3424. const int64_t ne[4] = { a->ne[0], b->ne[0], b->ne[2], b->ne[3] };
  3425. struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, 4, ne);
  3426. result->op = GGML_OP_OUT_PROD;
  3427. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3428. result->src[0] = a;
  3429. result->src[1] = b;
  3430. return result;
  3431. }
  3432. // ggml_scale
  3433. static struct ggml_tensor * ggml_scale_impl(
  3434. struct ggml_context * ctx,
  3435. struct ggml_tensor * a,
  3436. struct ggml_tensor * b,
  3437. bool inplace) {
  3438. GGML_ASSERT(ggml_is_scalar(b));
  3439. GGML_ASSERT(ggml_is_padded_1d(a));
  3440. bool is_node = false;
  3441. if (a->grad || b->grad) {
  3442. is_node = true;
  3443. }
  3444. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  3445. result->op = GGML_OP_SCALE;
  3446. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3447. result->src[0] = a;
  3448. result->src[1] = b;
  3449. return result;
  3450. }
  3451. struct ggml_tensor * ggml_scale(
  3452. struct ggml_context * ctx,
  3453. struct ggml_tensor * a,
  3454. struct ggml_tensor * b) {
  3455. return ggml_scale_impl(ctx, a, b, false);
  3456. }
  3457. struct ggml_tensor * ggml_scale_inplace(
  3458. struct ggml_context * ctx,
  3459. struct ggml_tensor * a,
  3460. struct ggml_tensor * b) {
  3461. return ggml_scale_impl(ctx, a, b, true);
  3462. }
  3463. // ggml_set
  3464. static struct ggml_tensor * ggml_set_impl(
  3465. struct ggml_context * ctx,
  3466. struct ggml_tensor * a,
  3467. struct ggml_tensor * b,
  3468. size_t nb1,
  3469. size_t nb2,
  3470. size_t nb3,
  3471. size_t offset,
  3472. bool inplace) {
  3473. GGML_ASSERT(ggml_nelements(a) >= ggml_nelements(b));
  3474. bool is_node = false;
  3475. if (a->grad || b->grad) {
  3476. is_node = true;
  3477. }
  3478. // make a view of the destination
  3479. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  3480. int32_t params[] = { nb1, nb2, nb3, offset, inplace ? 1 : 0 };
  3481. ggml_set_op_params(result, params, sizeof(params));
  3482. result->op = GGML_OP_SET;
  3483. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3484. result->src[0] = a;
  3485. result->src[1] = b;
  3486. return result;
  3487. }
  3488. struct ggml_tensor * ggml_set(
  3489. struct ggml_context * ctx,
  3490. struct ggml_tensor * a,
  3491. struct ggml_tensor * b,
  3492. size_t nb1,
  3493. size_t nb2,
  3494. size_t nb3,
  3495. size_t offset) {
  3496. return ggml_set_impl(ctx, a, b, nb1, nb2, nb3, offset, false);
  3497. }
  3498. struct ggml_tensor * ggml_set_inplace(
  3499. struct ggml_context * ctx,
  3500. struct ggml_tensor * a,
  3501. struct ggml_tensor * b,
  3502. size_t nb1,
  3503. size_t nb2,
  3504. size_t nb3,
  3505. size_t offset) {
  3506. return ggml_set_impl(ctx, a, b, nb1, nb2, nb3, offset, true);
  3507. }
  3508. struct ggml_tensor * ggml_set_1d(
  3509. struct ggml_context * ctx,
  3510. struct ggml_tensor * a,
  3511. struct ggml_tensor * b,
  3512. size_t offset) {
  3513. return ggml_set_impl(ctx, a, b, a->nb[1], a->nb[2], a->nb[3], offset, false);
  3514. }
  3515. struct ggml_tensor * ggml_set_1d_inplace(
  3516. struct ggml_context * ctx,
  3517. struct ggml_tensor * a,
  3518. struct ggml_tensor * b,
  3519. size_t offset) {
  3520. return ggml_set_impl(ctx, a, b, a->nb[1], a->nb[2], a->nb[3], offset, true);
  3521. }
  3522. struct ggml_tensor * ggml_set_2d(
  3523. struct ggml_context * ctx,
  3524. struct ggml_tensor * a,
  3525. struct ggml_tensor * b,
  3526. size_t nb1,
  3527. size_t offset) {
  3528. return ggml_set_impl(ctx, a, b, nb1, a->nb[2], a->nb[3], offset, false);
  3529. }
  3530. struct ggml_tensor * ggml_set_2d_inplace(
  3531. struct ggml_context * ctx,
  3532. struct ggml_tensor * a,
  3533. struct ggml_tensor * b,
  3534. size_t nb1,
  3535. size_t offset) {
  3536. return ggml_set_impl(ctx, a, b, nb1, a->nb[2], a->nb[3], offset, true);
  3537. }
  3538. // ggml_cpy
  3539. static struct ggml_tensor * ggml_cpy_impl(
  3540. struct ggml_context * ctx,
  3541. struct ggml_tensor * a,
  3542. struct ggml_tensor * b,
  3543. bool inplace) {
  3544. GGML_ASSERT(ggml_nelements(a) == ggml_nelements(b));
  3545. bool is_node = false;
  3546. if (!inplace && (a->grad || b->grad)) {
  3547. is_node = true;
  3548. }
  3549. // make a view of the destination
  3550. struct ggml_tensor * result = ggml_view_tensor(ctx, b);
  3551. if (strlen(b->name) > 0) {
  3552. ggml_format_name(result, "%s (copy of %s)", b->name, a->name);
  3553. } else {
  3554. ggml_format_name(result, "%s (copy)", a->name);
  3555. }
  3556. result->op = GGML_OP_CPY;
  3557. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3558. result->src[0] = a;
  3559. result->src[1] = b;
  3560. return result;
  3561. }
  3562. struct ggml_tensor * ggml_cpy(
  3563. struct ggml_context * ctx,
  3564. struct ggml_tensor * a,
  3565. struct ggml_tensor * b) {
  3566. return ggml_cpy_impl(ctx, a, b, false);
  3567. }
  3568. struct ggml_tensor * ggml_cpy_inplace(
  3569. struct ggml_context * ctx,
  3570. struct ggml_tensor * a,
  3571. struct ggml_tensor * b) {
  3572. return ggml_cpy_impl(ctx, a, b, true);
  3573. }
  3574. // ggml_cont
  3575. static struct ggml_tensor * ggml_cont_impl(
  3576. struct ggml_context * ctx,
  3577. struct ggml_tensor * a,
  3578. bool inplace) {
  3579. bool is_node = false;
  3580. if (!inplace && a->grad) {
  3581. is_node = true;
  3582. }
  3583. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  3584. ggml_format_name(result, "%s (cont)", a->name);
  3585. result->op = GGML_OP_CONT;
  3586. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3587. result->src[0] = a;
  3588. return result;
  3589. }
  3590. struct ggml_tensor * ggml_cont(
  3591. struct ggml_context * ctx,
  3592. struct ggml_tensor * a) {
  3593. return ggml_cont_impl(ctx, a, false);
  3594. }
  3595. struct ggml_tensor * ggml_cont_inplace(
  3596. struct ggml_context * ctx,
  3597. struct ggml_tensor * a) {
  3598. return ggml_cont_impl(ctx, a, true);
  3599. }
  3600. // make contiguous, with new shape
  3601. GGML_API struct ggml_tensor * ggml_cont_1d(
  3602. struct ggml_context * ctx,
  3603. struct ggml_tensor * a,
  3604. int64_t ne0) {
  3605. return ggml_cont_4d(ctx, a, ne0, 1, 1, 1);
  3606. }
  3607. GGML_API struct ggml_tensor * ggml_cont_2d(
  3608. struct ggml_context * ctx,
  3609. struct ggml_tensor * a,
  3610. int64_t ne0,
  3611. int64_t ne1) {
  3612. return ggml_cont_4d(ctx, a, ne0, ne1, 1, 1);
  3613. }
  3614. GGML_API struct ggml_tensor * ggml_cont_3d(
  3615. struct ggml_context * ctx,
  3616. struct ggml_tensor * a,
  3617. int64_t ne0,
  3618. int64_t ne1,
  3619. int64_t ne2) {
  3620. return ggml_cont_4d(ctx, a, ne0, ne1, ne2, 1);
  3621. }
  3622. struct ggml_tensor * ggml_cont_4d(
  3623. struct ggml_context * ctx,
  3624. struct ggml_tensor * a,
  3625. int64_t ne0,
  3626. int64_t ne1,
  3627. int64_t ne2,
  3628. int64_t ne3) {
  3629. GGML_ASSERT(ggml_nelements(a) == (ne0*ne1*ne2*ne3));
  3630. bool is_node = false;
  3631. struct ggml_tensor * result = ggml_new_tensor_4d(ctx, a->type, ne0, ne1, ne2, ne3);
  3632. ggml_format_name(result, "%s (cont)", a->name);
  3633. result->op = GGML_OP_CONT;
  3634. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3635. result->src[0] = a;
  3636. return result;
  3637. }
  3638. // ggml_reshape
  3639. struct ggml_tensor * ggml_reshape(
  3640. struct ggml_context * ctx,
  3641. struct ggml_tensor * a,
  3642. struct ggml_tensor * b) {
  3643. GGML_ASSERT(ggml_is_contiguous(a));
  3644. // as only the shape of b is relevant, and not its memory layout, b is allowed to be non contiguous.
  3645. GGML_ASSERT(ggml_nelements(a) == ggml_nelements(b));
  3646. bool is_node = false;
  3647. if (a->grad) {
  3648. is_node = true;
  3649. }
  3650. if (b->grad) {
  3651. // gradient propagation is not supported
  3652. //GGML_ASSERT(false);
  3653. }
  3654. struct ggml_tensor * result = ggml_new_tensor_impl(ctx, a->type, GGML_MAX_DIMS, b->ne, a, 0);
  3655. ggml_format_name(result, "%s (reshaped)", a->name);
  3656. result->op = GGML_OP_RESHAPE;
  3657. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3658. result->src[0] = a;
  3659. return result;
  3660. }
  3661. struct ggml_tensor * ggml_reshape_1d(
  3662. struct ggml_context * ctx,
  3663. struct ggml_tensor * a,
  3664. int64_t ne0) {
  3665. GGML_ASSERT(ggml_is_contiguous(a));
  3666. GGML_ASSERT(ggml_nelements(a) == ne0);
  3667. bool is_node = false;
  3668. if (a->grad) {
  3669. is_node = true;
  3670. }
  3671. const int64_t ne[1] = { ne0 };
  3672. struct ggml_tensor * result = ggml_new_tensor_impl(ctx, a->type, 1, ne, a, 0);
  3673. ggml_format_name(result, "%s (reshaped)", a->name);
  3674. result->op = GGML_OP_RESHAPE;
  3675. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3676. result->src[0] = a;
  3677. return result;
  3678. }
  3679. struct ggml_tensor * ggml_reshape_2d(
  3680. struct ggml_context * ctx,
  3681. struct ggml_tensor * a,
  3682. int64_t ne0,
  3683. int64_t ne1) {
  3684. GGML_ASSERT(ggml_is_contiguous(a));
  3685. GGML_ASSERT(ggml_nelements(a) == ne0*ne1);
  3686. bool is_node = false;
  3687. if (a->grad) {
  3688. is_node = true;
  3689. }
  3690. const int64_t ne[2] = { ne0, ne1 };
  3691. struct ggml_tensor * result = ggml_new_tensor_impl(ctx, a->type, 2, ne, a, 0);
  3692. ggml_format_name(result, "%s (reshaped)", a->name);
  3693. result->op = GGML_OP_RESHAPE;
  3694. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3695. result->src[0] = a;
  3696. return result;
  3697. }
  3698. struct ggml_tensor * ggml_reshape_3d(
  3699. struct ggml_context * ctx,
  3700. struct ggml_tensor * a,
  3701. int64_t ne0,
  3702. int64_t ne1,
  3703. int64_t ne2) {
  3704. GGML_ASSERT(ggml_is_contiguous(a));
  3705. GGML_ASSERT(ggml_nelements(a) == ne0*ne1*ne2);
  3706. bool is_node = false;
  3707. if (a->grad) {
  3708. is_node = true;
  3709. }
  3710. const int64_t ne[3] = { ne0, ne1, ne2 };
  3711. struct ggml_tensor * result = ggml_new_tensor_impl(ctx, a->type, 3, ne, a, 0);
  3712. ggml_format_name(result, "%s (reshaped)", a->name);
  3713. result->op = GGML_OP_RESHAPE;
  3714. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3715. result->src[0] = a;
  3716. return result;
  3717. }
  3718. struct ggml_tensor * ggml_reshape_4d(
  3719. struct ggml_context * ctx,
  3720. struct ggml_tensor * a,
  3721. int64_t ne0,
  3722. int64_t ne1,
  3723. int64_t ne2,
  3724. int64_t ne3) {
  3725. GGML_ASSERT(ggml_is_contiguous(a));
  3726. GGML_ASSERT(ggml_nelements(a) == ne0*ne1*ne2*ne3);
  3727. bool is_node = false;
  3728. if (a->grad) {
  3729. is_node = true;
  3730. }
  3731. const int64_t ne[4] = { ne0, ne1, ne2, ne3 };
  3732. struct ggml_tensor * result = ggml_new_tensor_impl(ctx, a->type, 4, ne, a, 0);
  3733. ggml_format_name(result, "%s (reshaped)", a->name);
  3734. result->op = GGML_OP_RESHAPE;
  3735. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3736. result->src[0] = a;
  3737. return result;
  3738. }
  3739. static struct ggml_tensor * ggml_view_impl(
  3740. struct ggml_context * ctx,
  3741. struct ggml_tensor * a,
  3742. int n_dims,
  3743. const int64_t * ne,
  3744. size_t offset) {
  3745. bool is_node = false;
  3746. if (a->grad) {
  3747. is_node = true;
  3748. }
  3749. struct ggml_tensor * result = ggml_new_tensor_impl(ctx, a->type, n_dims, ne, a, offset);
  3750. ggml_format_name(result, "%s (view)", a->name);
  3751. ggml_set_op_params(result, &offset, sizeof(offset));
  3752. result->op = GGML_OP_VIEW;
  3753. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3754. result->src[0] = a;
  3755. return result;
  3756. }
  3757. // ggml_view_1d
  3758. struct ggml_tensor * ggml_view_1d(
  3759. struct ggml_context * ctx,
  3760. struct ggml_tensor * a,
  3761. int64_t ne0,
  3762. size_t offset) {
  3763. struct ggml_tensor * result = ggml_view_impl(ctx, a, 1, &ne0, offset);
  3764. return result;
  3765. }
  3766. // ggml_view_2d
  3767. struct ggml_tensor * ggml_view_2d(
  3768. struct ggml_context * ctx,
  3769. struct ggml_tensor * a,
  3770. int64_t ne0,
  3771. int64_t ne1,
  3772. size_t nb1,
  3773. size_t offset) {
  3774. const int64_t ne[2] = { ne0, ne1 };
  3775. struct ggml_tensor * result = ggml_view_impl(ctx, a, 2, ne, offset);
  3776. result->nb[1] = nb1;
  3777. result->nb[2] = result->nb[1]*ne1;
  3778. result->nb[3] = result->nb[2];
  3779. return result;
  3780. }
  3781. // ggml_view_3d
  3782. struct ggml_tensor * ggml_view_3d(
  3783. struct ggml_context * ctx,
  3784. struct ggml_tensor * a,
  3785. int64_t ne0,
  3786. int64_t ne1,
  3787. int64_t ne2,
  3788. size_t nb1,
  3789. size_t nb2,
  3790. size_t offset) {
  3791. const int64_t ne[3] = { ne0, ne1, ne2 };
  3792. struct ggml_tensor * result = ggml_view_impl(ctx, a, 3, ne, offset);
  3793. result->nb[1] = nb1;
  3794. result->nb[2] = nb2;
  3795. result->nb[3] = result->nb[2]*ne2;
  3796. return result;
  3797. }
  3798. // ggml_view_4d
  3799. struct ggml_tensor * ggml_view_4d(
  3800. struct ggml_context * ctx,
  3801. struct ggml_tensor * a,
  3802. int64_t ne0,
  3803. int64_t ne1,
  3804. int64_t ne2,
  3805. int64_t ne3,
  3806. size_t nb1,
  3807. size_t nb2,
  3808. size_t nb3,
  3809. size_t offset) {
  3810. const int64_t ne[4] = { ne0, ne1, ne2, ne3 };
  3811. struct ggml_tensor * result = ggml_view_impl(ctx, a, 4, ne, offset);
  3812. result->nb[1] = nb1;
  3813. result->nb[2] = nb2;
  3814. result->nb[3] = nb3;
  3815. return result;
  3816. }
  3817. // ggml_permute
  3818. struct ggml_tensor * ggml_permute(
  3819. struct ggml_context * ctx,
  3820. struct ggml_tensor * a,
  3821. int axis0,
  3822. int axis1,
  3823. int axis2,
  3824. int axis3) {
  3825. GGML_ASSERT(axis0 >= 0 && axis0 < GGML_MAX_DIMS);
  3826. GGML_ASSERT(axis1 >= 0 && axis1 < GGML_MAX_DIMS);
  3827. GGML_ASSERT(axis2 >= 0 && axis2 < GGML_MAX_DIMS);
  3828. GGML_ASSERT(axis3 >= 0 && axis3 < GGML_MAX_DIMS);
  3829. GGML_ASSERT(axis0 != axis1);
  3830. GGML_ASSERT(axis0 != axis2);
  3831. GGML_ASSERT(axis0 != axis3);
  3832. GGML_ASSERT(axis1 != axis2);
  3833. GGML_ASSERT(axis1 != axis3);
  3834. GGML_ASSERT(axis2 != axis3);
  3835. bool is_node = false;
  3836. if (a->grad) {
  3837. is_node = true;
  3838. }
  3839. struct ggml_tensor * result = ggml_view_tensor(ctx, a);
  3840. ggml_format_name(result, "%s (permuted)", a->name);
  3841. int ne[GGML_MAX_DIMS];
  3842. int nb[GGML_MAX_DIMS];
  3843. ne[axis0] = a->ne[0];
  3844. ne[axis1] = a->ne[1];
  3845. ne[axis2] = a->ne[2];
  3846. ne[axis3] = a->ne[3];
  3847. nb[axis0] = a->nb[0];
  3848. nb[axis1] = a->nb[1];
  3849. nb[axis2] = a->nb[2];
  3850. nb[axis3] = a->nb[3];
  3851. result->ne[0] = ne[0];
  3852. result->ne[1] = ne[1];
  3853. result->ne[2] = ne[2];
  3854. result->ne[3] = ne[3];
  3855. result->nb[0] = nb[0];
  3856. result->nb[1] = nb[1];
  3857. result->nb[2] = nb[2];
  3858. result->nb[3] = nb[3];
  3859. result->op = GGML_OP_PERMUTE;
  3860. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3861. result->src[0] = a;
  3862. int32_t params[] = { axis0, axis1, axis2, axis3 };
  3863. ggml_set_op_params(result, params, sizeof(params));
  3864. return result;
  3865. }
  3866. // ggml_transpose
  3867. struct ggml_tensor * ggml_transpose(
  3868. struct ggml_context * ctx,
  3869. struct ggml_tensor * a) {
  3870. bool is_node = false;
  3871. if (a->grad) {
  3872. is_node = true;
  3873. }
  3874. struct ggml_tensor * result = ggml_view_tensor(ctx, a);
  3875. ggml_format_name(result, "%s (transposed)", a->name);
  3876. result->ne[0] = a->ne[1];
  3877. result->ne[1] = a->ne[0];
  3878. result->nb[0] = a->nb[1];
  3879. result->nb[1] = a->nb[0];
  3880. result->op = GGML_OP_TRANSPOSE;
  3881. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3882. result->src[0] = a;
  3883. return result;
  3884. }
  3885. // ggml_get_rows
  3886. struct ggml_tensor * ggml_get_rows(
  3887. struct ggml_context * ctx,
  3888. struct ggml_tensor * a,
  3889. struct ggml_tensor * b) {
  3890. GGML_ASSERT(a->ne[2] == b->ne[1]);
  3891. GGML_ASSERT(b->ne[3] == 1);
  3892. GGML_ASSERT(b->type == GGML_TYPE_I32);
  3893. bool is_node = false;
  3894. if (a->grad || b->grad) {
  3895. is_node = true;
  3896. }
  3897. // TODO: implement non F32 return
  3898. //struct ggml_tensor * result = ggml_new_tensor_2d(ctx, a->type, a->ne[0], b->ne[0]);
  3899. struct ggml_tensor * result = ggml_new_tensor_4d(ctx, GGML_TYPE_F32, a->ne[0], b->ne[0], b->ne[1], b->ne[2]);
  3900. result->op = GGML_OP_GET_ROWS;
  3901. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3902. result->src[0] = a;
  3903. result->src[1] = b;
  3904. return result;
  3905. }
  3906. // ggml_get_rows_back
  3907. struct ggml_tensor * ggml_get_rows_back(
  3908. struct ggml_context * ctx,
  3909. struct ggml_tensor * a,
  3910. struct ggml_tensor * b,
  3911. struct ggml_tensor * c) {
  3912. GGML_ASSERT(ggml_is_matrix(a) && ggml_is_vector(b) && b->type == GGML_TYPE_I32);
  3913. GGML_ASSERT(ggml_is_matrix(c) && (a->ne[0] == c->ne[0]));
  3914. bool is_node = false;
  3915. if (a->grad || b->grad) {
  3916. is_node = true;
  3917. }
  3918. // TODO: implement non F32 return
  3919. //struct ggml_tensor * result = ggml_new_tensor_2d(ctx, a->type, a->ne[0], b->ne[0]);
  3920. struct ggml_tensor * result = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, c->ne[0], c->ne[1]);
  3921. result->op = GGML_OP_GET_ROWS_BACK;
  3922. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3923. result->src[0] = a;
  3924. result->src[1] = b;
  3925. return result;
  3926. }
  3927. // ggml_diag
  3928. struct ggml_tensor * ggml_diag(
  3929. struct ggml_context * ctx,
  3930. struct ggml_tensor * a) {
  3931. GGML_ASSERT(a->ne[1] == 1);
  3932. bool is_node = false;
  3933. if (a->grad) {
  3934. is_node = true;
  3935. }
  3936. const int64_t ne[4] = { a->ne[0], a->ne[0], a->ne[2], a->ne[3] };
  3937. struct ggml_tensor * result = ggml_new_tensor(ctx, a->type, 4, ne);
  3938. result->op = GGML_OP_DIAG;
  3939. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3940. result->src[0] = a;
  3941. return result;
  3942. }
  3943. // ggml_diag_mask_inf
  3944. static struct ggml_tensor * ggml_diag_mask_inf_impl(
  3945. struct ggml_context * ctx,
  3946. struct ggml_tensor * a,
  3947. int n_past,
  3948. bool inplace) {
  3949. bool is_node = false;
  3950. if (a->grad) {
  3951. is_node = true;
  3952. }
  3953. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  3954. int32_t params[] = { n_past };
  3955. ggml_set_op_params(result, params, sizeof(params));
  3956. result->op = GGML_OP_DIAG_MASK_INF;
  3957. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3958. result->src[0] = a;
  3959. return result;
  3960. }
  3961. struct ggml_tensor * ggml_diag_mask_inf(
  3962. struct ggml_context * ctx,
  3963. struct ggml_tensor * a,
  3964. int n_past) {
  3965. return ggml_diag_mask_inf_impl(ctx, a, n_past, false);
  3966. }
  3967. struct ggml_tensor * ggml_diag_mask_inf_inplace(
  3968. struct ggml_context * ctx,
  3969. struct ggml_tensor * a,
  3970. int n_past) {
  3971. return ggml_diag_mask_inf_impl(ctx, a, n_past, true);
  3972. }
  3973. // ggml_diag_mask_zero
  3974. static struct ggml_tensor * ggml_diag_mask_zero_impl(
  3975. struct ggml_context * ctx,
  3976. struct ggml_tensor * a,
  3977. int n_past,
  3978. bool inplace) {
  3979. bool is_node = false;
  3980. if (a->grad) {
  3981. is_node = true;
  3982. }
  3983. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  3984. int32_t params[] = { n_past };
  3985. ggml_set_op_params(result, params, sizeof(params));
  3986. result->op = GGML_OP_DIAG_MASK_ZERO;
  3987. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3988. result->src[0] = a;
  3989. return result;
  3990. }
  3991. struct ggml_tensor * ggml_diag_mask_zero(
  3992. struct ggml_context * ctx,
  3993. struct ggml_tensor * a,
  3994. int n_past) {
  3995. return ggml_diag_mask_zero_impl(ctx, a, n_past, false);
  3996. }
  3997. struct ggml_tensor * ggml_diag_mask_zero_inplace(
  3998. struct ggml_context * ctx,
  3999. struct ggml_tensor * a,
  4000. int n_past) {
  4001. return ggml_diag_mask_zero_impl(ctx, a, n_past, true);
  4002. }
  4003. // ggml_soft_max
  4004. static struct ggml_tensor * ggml_soft_max_impl(
  4005. struct ggml_context * ctx,
  4006. struct ggml_tensor * a,
  4007. struct ggml_tensor * mask,
  4008. float scale,
  4009. bool inplace) {
  4010. GGML_ASSERT(ggml_is_contiguous(a));
  4011. if (mask) {
  4012. GGML_ASSERT(ggml_is_contiguous(mask));
  4013. GGML_ASSERT(mask->ne[2] == 1);
  4014. GGML_ASSERT(mask->ne[3] == 1);
  4015. GGML_ASSERT(ggml_can_repeat_rows(mask, a));
  4016. }
  4017. bool is_node = false;
  4018. if (a->grad) {
  4019. is_node = true;
  4020. }
  4021. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  4022. float params[] = { scale };
  4023. ggml_set_op_params(result, params, sizeof(params));
  4024. result->op = GGML_OP_SOFT_MAX;
  4025. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4026. result->src[0] = a;
  4027. result->src[1] = mask;
  4028. return result;
  4029. }
  4030. struct ggml_tensor * ggml_soft_max(
  4031. struct ggml_context * ctx,
  4032. struct ggml_tensor * a) {
  4033. return ggml_soft_max_impl(ctx, a, NULL, 1.0f, false);
  4034. }
  4035. struct ggml_tensor * ggml_soft_max_inplace(
  4036. struct ggml_context * ctx,
  4037. struct ggml_tensor * a) {
  4038. return ggml_soft_max_impl(ctx, a, NULL, 1.0f, true);
  4039. }
  4040. struct ggml_tensor * ggml_soft_max_ext(
  4041. struct ggml_context * ctx,
  4042. struct ggml_tensor * a,
  4043. struct ggml_tensor * mask,
  4044. float scale) {
  4045. return ggml_soft_max_impl(ctx, a, mask, scale, false);
  4046. }
  4047. // ggml_soft_max_back
  4048. static struct ggml_tensor * ggml_soft_max_back_impl(
  4049. struct ggml_context * ctx,
  4050. struct ggml_tensor * a,
  4051. struct ggml_tensor * b,
  4052. bool inplace) {
  4053. bool is_node = false;
  4054. if (a->grad || b->grad) {
  4055. is_node = true; // TODO : implement backward pass
  4056. }
  4057. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  4058. result->op = GGML_OP_SOFT_MAX_BACK;
  4059. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4060. result->src[0] = a;
  4061. result->src[1] = b;
  4062. return result;
  4063. }
  4064. struct ggml_tensor * ggml_soft_max_back(
  4065. struct ggml_context * ctx,
  4066. struct ggml_tensor * a,
  4067. struct ggml_tensor * b) {
  4068. return ggml_soft_max_back_impl(ctx, a, b, false);
  4069. }
  4070. struct ggml_tensor * ggml_soft_max_back_inplace(
  4071. struct ggml_context * ctx,
  4072. struct ggml_tensor * a,
  4073. struct ggml_tensor * b) {
  4074. return ggml_soft_max_back_impl(ctx, a, b, true);
  4075. }
  4076. // ggml_rope
  4077. static struct ggml_tensor * ggml_rope_impl(
  4078. struct ggml_context * ctx,
  4079. struct ggml_tensor * a,
  4080. struct ggml_tensor * b,
  4081. int n_dims,
  4082. int mode,
  4083. int n_ctx,
  4084. int n_orig_ctx,
  4085. float freq_base,
  4086. float freq_scale,
  4087. float ext_factor,
  4088. float attn_factor,
  4089. float beta_fast,
  4090. float beta_slow,
  4091. float xpos_base,
  4092. bool xpos_down,
  4093. bool inplace) {
  4094. GGML_ASSERT(ggml_is_vector(b));
  4095. GGML_ASSERT(b->type == GGML_TYPE_I32);
  4096. GGML_ASSERT(a->ne[2] == b->ne[0]);
  4097. bool is_node = false;
  4098. if (a->grad) {
  4099. is_node = true;
  4100. }
  4101. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  4102. int32_t params[13] = { /*n_past*/ 0, n_dims, mode, n_ctx, n_orig_ctx };
  4103. memcpy(params + 5, &freq_base, sizeof(float));
  4104. memcpy(params + 6, &freq_scale, sizeof(float));
  4105. memcpy(params + 7, &ext_factor, sizeof(float));
  4106. memcpy(params + 8, &attn_factor, sizeof(float));
  4107. memcpy(params + 9, &beta_fast, sizeof(float));
  4108. memcpy(params + 10, &beta_slow, sizeof(float));
  4109. memcpy(params + 11, &xpos_base, sizeof(float));
  4110. memcpy(params + 12, &xpos_down, sizeof(bool));
  4111. ggml_set_op_params(result, params, sizeof(params));
  4112. result->op = GGML_OP_ROPE;
  4113. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4114. result->src[0] = a;
  4115. result->src[1] = b;
  4116. return result;
  4117. }
  4118. struct ggml_tensor * ggml_rope(
  4119. struct ggml_context * ctx,
  4120. struct ggml_tensor * a,
  4121. struct ggml_tensor * b,
  4122. int n_dims,
  4123. int mode,
  4124. int n_ctx) {
  4125. return ggml_rope_impl(
  4126. ctx, a, b, n_dims, mode, n_ctx, 0, 10000.0f, 1.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, false, false
  4127. );
  4128. }
  4129. struct ggml_tensor * ggml_rope_inplace(
  4130. struct ggml_context * ctx,
  4131. struct ggml_tensor * a,
  4132. struct ggml_tensor * b,
  4133. int n_dims,
  4134. int mode,
  4135. int n_ctx) {
  4136. return ggml_rope_impl(
  4137. ctx, a, b, n_dims, mode, n_ctx, 0, 10000.0f, 1.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, false, true
  4138. );
  4139. }
  4140. struct ggml_tensor * ggml_rope_custom(
  4141. struct ggml_context * ctx,
  4142. struct ggml_tensor * a,
  4143. struct ggml_tensor * b,
  4144. int n_dims,
  4145. int mode,
  4146. int n_ctx,
  4147. int n_orig_ctx,
  4148. float freq_base,
  4149. float freq_scale,
  4150. float ext_factor,
  4151. float attn_factor,
  4152. float beta_fast,
  4153. float beta_slow) {
  4154. return ggml_rope_impl(
  4155. ctx, a, b, n_dims, mode, n_ctx, n_orig_ctx, freq_base, freq_scale,
  4156. ext_factor, attn_factor, beta_fast, beta_slow, 0.0f, false, false
  4157. );
  4158. }
  4159. struct ggml_tensor * ggml_rope_custom_inplace(
  4160. struct ggml_context * ctx,
  4161. struct ggml_tensor * a,
  4162. struct ggml_tensor * b,
  4163. int n_dims,
  4164. int mode,
  4165. int n_ctx,
  4166. int n_orig_ctx,
  4167. float freq_base,
  4168. float freq_scale,
  4169. float ext_factor,
  4170. float attn_factor,
  4171. float beta_fast,
  4172. float beta_slow) {
  4173. return ggml_rope_impl(
  4174. ctx, a, b, n_dims, mode, n_ctx, n_orig_ctx, freq_base, freq_scale,
  4175. ext_factor, attn_factor, beta_fast, beta_slow, 0.0f, false, true
  4176. );
  4177. }
  4178. struct ggml_tensor * ggml_rope_xpos_inplace(
  4179. struct ggml_context * ctx,
  4180. struct ggml_tensor * a,
  4181. struct ggml_tensor * b,
  4182. int n_dims,
  4183. float base,
  4184. bool down) {
  4185. return ggml_rope_impl(ctx, a, b, n_dims, 0, 0, 0, 10000.0f, 1.0f, 0.0f, 1.0f, 0.0f, 0.0f, base, down, true);
  4186. }
  4187. // ggml_rope_back
  4188. struct ggml_tensor * ggml_rope_back(
  4189. struct ggml_context * ctx,
  4190. struct ggml_tensor * a,
  4191. struct ggml_tensor * b,
  4192. int n_dims,
  4193. int mode,
  4194. int n_ctx,
  4195. int n_orig_ctx,
  4196. float freq_base,
  4197. float freq_scale,
  4198. float ext_factor,
  4199. float attn_factor,
  4200. float beta_fast,
  4201. float beta_slow,
  4202. float xpos_base,
  4203. bool xpos_down) {
  4204. GGML_ASSERT(ggml_is_vector(b));
  4205. GGML_ASSERT(b->type == GGML_TYPE_I32);
  4206. GGML_ASSERT(a->ne[2] == b->ne[0]);
  4207. GGML_ASSERT((mode & 4) == 0 && "ggml_rope_back() for ChatGLM not implemented yet");
  4208. bool is_node = false;
  4209. if (a->grad) {
  4210. is_node = false; // TODO: implement backward
  4211. }
  4212. struct ggml_tensor * result = ggml_dup_tensor(ctx, a);
  4213. int32_t params[13] = { /*n_past*/ 0, n_dims, mode, n_ctx, n_orig_ctx };
  4214. memcpy(params + 5, &freq_base, sizeof(float));
  4215. memcpy(params + 6, &freq_scale, sizeof(float));
  4216. memcpy(params + 7, &ext_factor, sizeof(float));
  4217. memcpy(params + 8, &attn_factor, sizeof(float));
  4218. memcpy(params + 9, &beta_fast, sizeof(float));
  4219. memcpy(params + 10, &beta_slow, sizeof(float));
  4220. memcpy(params + 11, &xpos_base, sizeof(float));
  4221. memcpy(params + 12, &xpos_down, sizeof(bool));
  4222. ggml_set_op_params(result, params, sizeof(params));
  4223. result->op = GGML_OP_ROPE_BACK;
  4224. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4225. result->src[0] = a;
  4226. result->src[1] = b;
  4227. return result;
  4228. }
  4229. // ggml_alibi
  4230. struct ggml_tensor * ggml_alibi(
  4231. struct ggml_context * ctx,
  4232. struct ggml_tensor * a,
  4233. int n_past,
  4234. int n_head,
  4235. float bias_max) {
  4236. GGML_ASSERT(n_past >= 0);
  4237. bool is_node = false;
  4238. if (a->grad) {
  4239. GGML_ASSERT(false); // TODO: implement backward
  4240. is_node = true;
  4241. }
  4242. // TODO: when implement backward, fix this:
  4243. //struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  4244. struct ggml_tensor * result = ggml_view_tensor(ctx, a);
  4245. int32_t op_params[3] = { n_past, n_head };
  4246. memcpy(op_params + 2, &bias_max, sizeof(float));
  4247. ggml_set_op_params(result, op_params, sizeof(op_params));
  4248. result->op = GGML_OP_ALIBI;
  4249. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4250. result->src[0] = a;
  4251. return result;
  4252. }
  4253. // ggml_clamp
  4254. struct ggml_tensor * ggml_clamp(
  4255. struct ggml_context * ctx,
  4256. struct ggml_tensor * a,
  4257. float min,
  4258. float max) {
  4259. bool is_node = false;
  4260. if (a->grad) {
  4261. GGML_ASSERT(false); // TODO: implement backward
  4262. is_node = true;
  4263. }
  4264. // TODO: when implement backward, fix this:
  4265. struct ggml_tensor * result = ggml_view_tensor(ctx, a);
  4266. float params[] = { min, max };
  4267. ggml_set_op_params(result, params, sizeof(params));
  4268. result->op = GGML_OP_CLAMP;
  4269. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4270. result->src[0] = a;
  4271. return result;
  4272. }
  4273. // ggml_conv_1d
  4274. static int64_t ggml_calc_conv_output_size(int64_t ins, int64_t ks, int s, int p, int d) {
  4275. return (ins + 2 * p - d * (ks - 1) - 1) / s + 1;
  4276. }
  4277. GGML_API struct ggml_tensor * ggml_conv_1d(
  4278. struct ggml_context * ctx,
  4279. struct ggml_tensor * a,
  4280. struct ggml_tensor * b,
  4281. int s0,
  4282. int p0,
  4283. int d0) {
  4284. struct ggml_tensor * im2col = ggml_im2col(ctx, a, b, s0, 0, p0, 0, d0, 0, false); // [N, OL, IC * K]
  4285. struct ggml_tensor * result =
  4286. ggml_mul_mat(ctx,
  4287. ggml_reshape_2d(ctx, im2col, im2col->ne[0], (im2col->ne[2] * im2col->ne[1])), // [N, OL, IC * K] => [N*OL, IC * K]
  4288. ggml_reshape_2d(ctx, a, (a->ne[0] * a->ne[1]), a->ne[2])); // [OC,IC, K] => [OC, IC * K]
  4289. result = ggml_reshape_3d(ctx, result, im2col->ne[1], a->ne[2], im2col->ne[2]); // [N, OC, OL]
  4290. return result;
  4291. }
  4292. // ggml_conv_1d_ph
  4293. struct ggml_tensor* ggml_conv_1d_ph(
  4294. struct ggml_context * ctx,
  4295. struct ggml_tensor * a,
  4296. struct ggml_tensor * b,
  4297. int s,
  4298. int d) {
  4299. return ggml_conv_1d(ctx, a, b, s, a->ne[0] / 2, d);
  4300. }
  4301. // ggml_conv_transpose_1d
  4302. static int64_t ggml_calc_conv_transpose_1d_output_size(int64_t ins, int64_t ks, int s, int p, int d) {
  4303. return (ins - 1) * s - 2 * p + d * (ks - 1) + 1;
  4304. }
  4305. GGML_API struct ggml_tensor * ggml_conv_transpose_1d(
  4306. struct ggml_context * ctx,
  4307. struct ggml_tensor * a,
  4308. struct ggml_tensor * b,
  4309. int s0,
  4310. int p0,
  4311. int d0) {
  4312. GGML_ASSERT(ggml_is_matrix(b));
  4313. GGML_ASSERT(a->ne[2] == b->ne[1]);
  4314. GGML_ASSERT(a->ne[3] == 1);
  4315. GGML_ASSERT(p0 == 0);
  4316. GGML_ASSERT(d0 == 1);
  4317. bool is_node = false;
  4318. if (a->grad || b->grad) {
  4319. GGML_ASSERT(false); // TODO: implement backward
  4320. is_node = true;
  4321. }
  4322. const int64_t ne[4] = {
  4323. ggml_calc_conv_transpose_1d_output_size(b->ne[0], a->ne[0], s0, 0 /*p0*/, 1 /*d0*/),
  4324. a->ne[1], b->ne[2], 1,
  4325. };
  4326. struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, 4, ne);
  4327. int32_t params[] = { s0, p0, d0 };
  4328. ggml_set_op_params(result, params, sizeof(params));
  4329. result->op = GGML_OP_CONV_TRANSPOSE_1D;
  4330. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4331. result->src[0] = a;
  4332. result->src[1] = b;
  4333. return result;
  4334. }
  4335. // ggml_conv_2d
  4336. // im2col: [N, IC, IH, IW] => [N, OH, OW, IC*KH*KW]
  4337. // a: [OC,IC, KH, KW]
  4338. // b: [N, IC, IH, IW]
  4339. // result: [N, OH, OW, IC*KH*KW]
  4340. struct ggml_tensor * ggml_im2col(
  4341. struct ggml_context * ctx,
  4342. struct ggml_tensor * a,
  4343. struct ggml_tensor * b,
  4344. int s0,
  4345. int s1,
  4346. int p0,
  4347. int p1,
  4348. int d0,
  4349. int d1,
  4350. bool is_2D) {
  4351. if(is_2D) {
  4352. GGML_ASSERT(a->ne[2] == b->ne[2]);
  4353. } else {
  4354. GGML_ASSERT(a->ne[1] == b->ne[1]);
  4355. }
  4356. bool is_node = false;
  4357. if (a->grad || b->grad) {
  4358. GGML_ASSERT(false); // TODO: implement backward
  4359. is_node = true;
  4360. }
  4361. const int64_t OH = is_2D ? ggml_calc_conv_output_size(b->ne[1], a->ne[1], s1, p1, d1) : 0;
  4362. const int64_t OW = ggml_calc_conv_output_size(b->ne[0], a->ne[0], s0, p0, d0);
  4363. const int64_t ne[4] = {
  4364. is_2D ? (a->ne[2] * a->ne[1] * a->ne[0]) : a->ne[1] * a->ne[0],
  4365. OW,
  4366. is_2D ? OH : b->ne[2],
  4367. is_2D ? b->ne[3] : 1,
  4368. };
  4369. struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F16, 4, ne);
  4370. int32_t params[] = { s0, s1, p0, p1, d0, d1, (is_2D ? 1 : 0) };
  4371. ggml_set_op_params(result, params, sizeof(params));
  4372. result->op = GGML_OP_IM2COL;
  4373. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4374. result->src[0] = a;
  4375. result->src[1] = b;
  4376. return result;
  4377. }
  4378. // a: [OC,IC, KH, KW]
  4379. // b: [N, IC, IH, IW]
  4380. // result: [N, OC, OH, OW]
  4381. struct ggml_tensor * ggml_conv_2d(
  4382. struct ggml_context * ctx,
  4383. struct ggml_tensor * a,
  4384. struct ggml_tensor * b,
  4385. int s0,
  4386. int s1,
  4387. int p0,
  4388. int p1,
  4389. int d0,
  4390. int d1) {
  4391. struct ggml_tensor * im2col = ggml_im2col(ctx, a, b, s0, s1, p0, p1, d0, d1, true); // [N, OH, OW, IC * KH * KW]
  4392. struct ggml_tensor * result =
  4393. ggml_mul_mat(ctx,
  4394. ggml_reshape_2d(ctx, im2col, im2col->ne[0], im2col->ne[3] * im2col->ne[2] * im2col->ne[1]), // [N, OH, OW, IC * KH * KW] => [N*OH*OW, IC * KH * KW]
  4395. ggml_reshape_2d(ctx, a, (a->ne[0] * a->ne[1] * a->ne[2]), a->ne[3])); // [OC,IC, KH, KW] => [OC, IC * KH * KW]
  4396. result = ggml_reshape_4d(ctx, result, im2col->ne[1], im2col->ne[2], a->ne[3], im2col->ne[3]); // [N, OC, OH, OW]
  4397. return result;
  4398. }
  4399. // ggml_conv_2d_sk_p0
  4400. struct ggml_tensor * ggml_conv_2d_sk_p0(
  4401. struct ggml_context * ctx,
  4402. struct ggml_tensor * a,
  4403. struct ggml_tensor * b) {
  4404. return ggml_conv_2d(ctx, a, b, a->ne[0], a->ne[1], 0, 0, 1, 1);
  4405. }
  4406. // ggml_conv_2d_s1_ph
  4407. struct ggml_tensor * ggml_conv_2d_s1_ph(
  4408. struct ggml_context * ctx,
  4409. struct ggml_tensor * a,
  4410. struct ggml_tensor * b) {
  4411. return ggml_conv_2d(ctx, a, b, 1, 1, a->ne[0] / 2, a->ne[1] / 2, 1, 1);
  4412. }
  4413. // ggml_conv_transpose_2d_p0
  4414. static int64_t ggml_calc_conv_transpose_output_size(int64_t ins, int64_t ks, int s, int p) {
  4415. return (ins - 1) * s - 2 * p + ks;
  4416. }
  4417. struct ggml_tensor * ggml_conv_transpose_2d_p0(
  4418. struct ggml_context * ctx,
  4419. struct ggml_tensor * a,
  4420. struct ggml_tensor * b,
  4421. int stride) {
  4422. GGML_ASSERT(a->ne[3] == b->ne[2]);
  4423. bool is_node = false;
  4424. if (a->grad || b->grad) {
  4425. GGML_ASSERT(false); // TODO: implement backward
  4426. is_node = true;
  4427. }
  4428. const int64_t ne[4] = {
  4429. ggml_calc_conv_transpose_output_size(b->ne[0], a->ne[0], stride, 0 /*p0*/),
  4430. ggml_calc_conv_transpose_output_size(b->ne[1], a->ne[1], stride, 0 /*p1*/),
  4431. a->ne[2], b->ne[3],
  4432. };
  4433. struct ggml_tensor* result = ggml_new_tensor(ctx, GGML_TYPE_F32, 4, ne);
  4434. ggml_set_op_params_i32(result, 0, stride);
  4435. result->op = GGML_OP_CONV_TRANSPOSE_2D;
  4436. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4437. result->src[0] = a;
  4438. result->src[1] = b;
  4439. return result;
  4440. }
  4441. // ggml_pool_*
  4442. static int64_t ggml_calc_pool_output_size(int64_t ins, int ks, int s, float p) {
  4443. return (ins + 2 * p - ks) / s + 1;
  4444. }
  4445. // ggml_pool_1d
  4446. struct ggml_tensor * ggml_pool_1d(
  4447. struct ggml_context * ctx,
  4448. struct ggml_tensor * a,
  4449. enum ggml_op_pool op,
  4450. int k0,
  4451. int s0,
  4452. int p0) {
  4453. bool is_node = false;
  4454. if (a->grad) {
  4455. GGML_ASSERT(false); // TODO: implement backward
  4456. is_node = true;
  4457. }
  4458. const int64_t ne[2] = {
  4459. ggml_calc_pool_output_size(a->ne[0], k0, s0, p0),
  4460. a->ne[1],
  4461. };
  4462. struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, 2, ne);
  4463. int32_t params[] = { op, k0, s0, p0 };
  4464. ggml_set_op_params(result, params, sizeof(params));
  4465. result->op = GGML_OP_POOL_1D;
  4466. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4467. result->src[0] = a;
  4468. return result;
  4469. }
  4470. // ggml_pool_2d
  4471. struct ggml_tensor * ggml_pool_2d(
  4472. struct ggml_context * ctx,
  4473. struct ggml_tensor * a,
  4474. enum ggml_op_pool op,
  4475. int k0,
  4476. int k1,
  4477. int s0,
  4478. int s1,
  4479. float p0,
  4480. float p1) {
  4481. bool is_node = false;
  4482. if (a->grad) {
  4483. GGML_ASSERT(false); // TODO: implement backward
  4484. is_node = true;
  4485. }
  4486. const int64_t ne[3] = {
  4487. ggml_calc_pool_output_size(a->ne[0], k0, s0, p0),
  4488. ggml_calc_pool_output_size(a->ne[1], k1, s1, p1),
  4489. a->ne[2],
  4490. };
  4491. struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, 3, ne);
  4492. int32_t params[] = { op, k0, k1, s0, s1, p0, p1 };
  4493. ggml_set_op_params(result, params, sizeof(params));
  4494. result->op = GGML_OP_POOL_2D;
  4495. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4496. result->src[0] = a;
  4497. return result;
  4498. }
  4499. // ggml_upscale
  4500. static struct ggml_tensor * ggml_upscale_impl(
  4501. struct ggml_context * ctx,
  4502. struct ggml_tensor * a,
  4503. int scale_factor) {
  4504. bool is_node = false;
  4505. if (a->grad) {
  4506. GGML_ASSERT(false); // TODO: implement backward
  4507. is_node = true;
  4508. }
  4509. struct ggml_tensor * result = ggml_new_tensor_4d(ctx, a->type,
  4510. a->ne[0] * scale_factor,
  4511. a->ne[1] * scale_factor,
  4512. a->ne[2], a->ne[3]);
  4513. result->op = GGML_OP_UPSCALE;
  4514. result->op_params[0] = scale_factor;
  4515. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4516. result->src[0] = a;
  4517. result->src[1] = NULL;
  4518. return result;
  4519. }
  4520. struct ggml_tensor * ggml_pad(
  4521. struct ggml_context * ctx,
  4522. struct ggml_tensor * a,
  4523. int p0, int p1, int p2, int p3) {
  4524. bool is_node = false;
  4525. if (a->grad) {
  4526. GGML_ASSERT(false); // TODO: implement backward
  4527. is_node = true;
  4528. }
  4529. struct ggml_tensor * result = ggml_new_tensor_4d(ctx, a->type,
  4530. a->ne[0] + p0,
  4531. a->ne[1] + p1,
  4532. a->ne[2] + p2,
  4533. a->ne[3] + p3);
  4534. result->op = GGML_OP_PAD;
  4535. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4536. result->src[0] = a;
  4537. return result;
  4538. }
  4539. struct ggml_tensor * ggml_upscale(
  4540. struct ggml_context * ctx,
  4541. struct ggml_tensor * a,
  4542. int scale_factor) {
  4543. return ggml_upscale_impl(ctx, a, scale_factor);
  4544. }
  4545. // ggml_argsort
  4546. struct ggml_tensor * ggml_argsort(
  4547. struct ggml_context * ctx,
  4548. struct ggml_tensor * a,
  4549. enum ggml_sort_order order) {
  4550. bool is_node = false;
  4551. struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_I32, GGML_MAX_DIMS, a->ne);
  4552. ggml_set_op_params_i32(result, 0, (int32_t) order);
  4553. result->op = GGML_OP_ARGSORT;
  4554. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4555. result->src[0] = a;
  4556. return result;
  4557. }
  4558. // ggml_top_k
  4559. struct ggml_tensor * ggml_top_k(
  4560. struct ggml_context * ctx,
  4561. struct ggml_tensor * a,
  4562. int k) {
  4563. GGML_ASSERT(a->ne[0] >= k);
  4564. struct ggml_tensor * result = ggml_argsort(ctx, a, GGML_SORT_DESC);
  4565. result = ggml_view_4d(ctx, result,
  4566. k, result->ne[1], result->ne[2], result->ne[3],
  4567. result->nb[1], result->nb[2], result->nb[3],
  4568. 0);
  4569. return result;
  4570. }
  4571. // ggml_flash_attn
  4572. struct ggml_tensor * ggml_flash_attn(
  4573. struct ggml_context * ctx,
  4574. struct ggml_tensor * q,
  4575. struct ggml_tensor * k,
  4576. struct ggml_tensor * v,
  4577. bool masked) {
  4578. GGML_ASSERT(ggml_can_mul_mat(k, q));
  4579. // TODO: check if vT can be multiplied by (k*qT)
  4580. bool is_node = false;
  4581. if (q->grad || k->grad || v->grad) {
  4582. is_node = true;
  4583. }
  4584. //struct ggml_tensor * result = ggml_dup_tensor(ctx, q);
  4585. struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, GGML_MAX_DIMS, q->ne);
  4586. int32_t t = masked ? 1 : 0;
  4587. ggml_set_op_params(result, &t, sizeof(t));
  4588. result->op = GGML_OP_FLASH_ATTN;
  4589. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4590. result->src[0] = q;
  4591. result->src[1] = k;
  4592. result->src[2] = v;
  4593. return result;
  4594. }
  4595. // ggml_flash_ff
  4596. struct ggml_tensor * ggml_flash_ff(
  4597. struct ggml_context * ctx,
  4598. struct ggml_tensor * a,
  4599. struct ggml_tensor * b0,
  4600. struct ggml_tensor * b1,
  4601. struct ggml_tensor * c0,
  4602. struct ggml_tensor * c1) {
  4603. GGML_ASSERT(ggml_can_mul_mat(b0, a));
  4604. // TODO: more checks
  4605. bool is_node = false;
  4606. if (a->grad || b0->grad || b1->grad || c0->grad || c1->grad) {
  4607. is_node = true;
  4608. }
  4609. //struct ggml_tensor * result = ggml_dup_tensor(ctx, a);
  4610. struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, GGML_MAX_DIMS, a->ne);
  4611. result->op = GGML_OP_FLASH_FF;
  4612. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4613. result->src[0] = a;
  4614. result->src[1] = b0;
  4615. result->src[2] = b1;
  4616. result->src[3] = c0;
  4617. result->src[4] = c1;
  4618. return result;
  4619. }
  4620. // ggml_flash_attn_back
  4621. struct ggml_tensor * ggml_flash_attn_back(
  4622. struct ggml_context * ctx,
  4623. struct ggml_tensor * q,
  4624. struct ggml_tensor * k,
  4625. struct ggml_tensor * v,
  4626. struct ggml_tensor * d,
  4627. bool masked) {
  4628. GGML_ASSERT(ggml_can_mul_mat(k, q));
  4629. // TODO: check if vT can be multiplied by (k*qT)
  4630. // d shape [D,N,ne2,ne3]
  4631. // q shape [D,N,ne2,ne3]
  4632. // k shape [D,M,kvne2,ne3]
  4633. // v shape [M,D,kvne2,ne3]
  4634. const int64_t D = q->ne[0];
  4635. const int64_t N = q->ne[1];
  4636. const int64_t M = k->ne[1];
  4637. const int64_t ne2 = q->ne[2];
  4638. const int64_t ne3 = q->ne[3];
  4639. const int64_t kvne2 = k->ne[2];
  4640. GGML_ASSERT(k->ne[0] == D);
  4641. GGML_ASSERT(v->ne[0] == M);
  4642. GGML_ASSERT(v->ne[1] == D);
  4643. GGML_ASSERT(d->ne[0] == D);
  4644. GGML_ASSERT(d->ne[1] == N);
  4645. GGML_ASSERT(k->ne[2] == kvne2);
  4646. GGML_ASSERT(k->ne[3] == ne3);
  4647. GGML_ASSERT(v->ne[2] == kvne2);
  4648. GGML_ASSERT(v->ne[3] == ne3);
  4649. GGML_ASSERT(d->ne[2] == ne2);
  4650. GGML_ASSERT(d->ne[3] == ne3);
  4651. GGML_ASSERT(ne2 % kvne2 == 0);
  4652. bool is_node = false;
  4653. if (q->grad || k->grad || v->grad) {
  4654. // when using this operation (in backwards pass) these grads are set.
  4655. // we don't want to create (big) grad of our result, so is_node is false.
  4656. is_node = false;
  4657. }
  4658. // store gradients of q, k and v as continuous tensors concatenated in result.
  4659. // note: v and gradv are actually transposed, i.e. v->ne[0] != D.
  4660. const int64_t elem_q = ggml_nelements(q);
  4661. const int64_t elem_k = ggml_nelements(k);
  4662. const int64_t elem_v = ggml_nelements(v);
  4663. enum ggml_type result_type = GGML_TYPE_F32;
  4664. GGML_ASSERT(ggml_blck_size(result_type) == 1);
  4665. const size_t tsize = ggml_type_size(result_type);
  4666. const size_t offs_q = 0;
  4667. const size_t offs_k = offs_q + GGML_PAD(elem_q * tsize, GGML_MEM_ALIGN);
  4668. const size_t offs_v = offs_k + GGML_PAD(elem_k * tsize, GGML_MEM_ALIGN);
  4669. const size_t end = offs_v + GGML_PAD(elem_v * tsize, GGML_MEM_ALIGN);
  4670. const size_t nelements = (end + tsize - 1)/tsize;
  4671. struct ggml_tensor * result = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, nelements);
  4672. int32_t masked_i = masked ? 1 : 0;
  4673. ggml_set_op_params(result, &masked_i, sizeof(masked_i));
  4674. result->op = GGML_OP_FLASH_ATTN_BACK;
  4675. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4676. result->src[0] = q;
  4677. result->src[1] = k;
  4678. result->src[2] = v;
  4679. result->src[3] = d;
  4680. return result;
  4681. }
  4682. // ggml_win_part
  4683. struct ggml_tensor * ggml_win_part(
  4684. struct ggml_context * ctx,
  4685. struct ggml_tensor * a,
  4686. int w) {
  4687. GGML_ASSERT(a->ne[3] == 1);
  4688. GGML_ASSERT(a->type == GGML_TYPE_F32);
  4689. bool is_node = false;
  4690. if (a->grad) {
  4691. GGML_ASSERT(false); // TODO: implement backward
  4692. is_node = true;
  4693. }
  4694. // padding
  4695. const int px = (w - a->ne[1]%w)%w;
  4696. const int py = (w - a->ne[2]%w)%w;
  4697. const int npx = (px + a->ne[1])/w;
  4698. const int npy = (py + a->ne[2])/w;
  4699. const int np = npx*npy;
  4700. const int64_t ne[4] = { a->ne[0], w, w, np, };
  4701. struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, 4, ne);
  4702. int32_t params[] = { npx, npy, w };
  4703. ggml_set_op_params(result, params, sizeof(params));
  4704. result->op = GGML_OP_WIN_PART;
  4705. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4706. result->src[0] = a;
  4707. return result;
  4708. }
  4709. // ggml_win_unpart
  4710. struct ggml_tensor * ggml_win_unpart(
  4711. struct ggml_context * ctx,
  4712. struct ggml_tensor * a,
  4713. int w0,
  4714. int h0,
  4715. int w) {
  4716. GGML_ASSERT(a->type == GGML_TYPE_F32);
  4717. bool is_node = false;
  4718. if (a->grad) {
  4719. GGML_ASSERT(false); // TODO: implement backward
  4720. is_node = true;
  4721. }
  4722. const int64_t ne[4] = { a->ne[0], w0, h0, 1, };
  4723. struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, 3, ne);
  4724. int32_t params[] = { w };
  4725. ggml_set_op_params(result, params, sizeof(params));
  4726. result->op = GGML_OP_WIN_UNPART;
  4727. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4728. result->src[0] = a;
  4729. return result;
  4730. }
  4731. // ggml_get_rel_pos
  4732. struct ggml_tensor * ggml_get_rel_pos(
  4733. struct ggml_context * ctx,
  4734. struct ggml_tensor * a,
  4735. int qh,
  4736. int kh) {
  4737. GGML_ASSERT(qh == kh);
  4738. GGML_ASSERT(2*MAX(qh, kh) - 1 == a->ne[1]);
  4739. bool is_node = false;
  4740. if (a->grad) {
  4741. GGML_ASSERT(false); // TODO: implement backward
  4742. is_node = true;
  4743. }
  4744. const int64_t ne[4] = { a->ne[0], kh, qh, 1, };
  4745. struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F16, 3, ne);
  4746. result->op = GGML_OP_GET_REL_POS;
  4747. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4748. result->src[0] = a;
  4749. result->src[1] = NULL;
  4750. return result;
  4751. }
  4752. // ggml_add_rel_pos
  4753. static struct ggml_tensor * ggml_add_rel_pos_impl(
  4754. struct ggml_context * ctx,
  4755. struct ggml_tensor * a,
  4756. struct ggml_tensor * pw,
  4757. struct ggml_tensor * ph,
  4758. bool inplace) {
  4759. GGML_ASSERT(ggml_are_same_shape(pw, ph));
  4760. GGML_ASSERT(ggml_is_contiguous(a));
  4761. GGML_ASSERT(ggml_is_contiguous(pw));
  4762. GGML_ASSERT(ggml_is_contiguous(ph));
  4763. GGML_ASSERT(ph->type == GGML_TYPE_F32);
  4764. GGML_ASSERT(pw->type == GGML_TYPE_F32);
  4765. GGML_ASSERT(pw->ne[3] == a->ne[2]);
  4766. GGML_ASSERT(pw->ne[0]*pw->ne[0] == a->ne[0]);
  4767. GGML_ASSERT(pw->ne[1]*pw->ne[2] == a->ne[1]);
  4768. bool is_node = false;
  4769. if (!inplace && (a->grad || pw->grad || ph->grad)) {
  4770. is_node = true;
  4771. }
  4772. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  4773. ggml_set_op_params_i32(result, 0, inplace ? 1 : 0);
  4774. result->op = GGML_OP_ADD_REL_POS;
  4775. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4776. result->src[0] = a;
  4777. result->src[1] = pw;
  4778. result->src[2] = ph;
  4779. return result;
  4780. }
  4781. struct ggml_tensor * ggml_add_rel_pos(
  4782. struct ggml_context * ctx,
  4783. struct ggml_tensor * a,
  4784. struct ggml_tensor * pw,
  4785. struct ggml_tensor * ph) {
  4786. return ggml_add_rel_pos_impl(ctx, a, pw, ph, false);
  4787. }
  4788. struct ggml_tensor * ggml_add_rel_pos_inplace(
  4789. struct ggml_context * ctx,
  4790. struct ggml_tensor * a,
  4791. struct ggml_tensor * pw,
  4792. struct ggml_tensor * ph) {
  4793. return ggml_add_rel_pos_impl(ctx, a, pw, ph, true);
  4794. }
  4795. // gmml_unary
  4796. static struct ggml_tensor * ggml_unary_impl(
  4797. struct ggml_context * ctx,
  4798. struct ggml_tensor * a,
  4799. enum ggml_unary_op op,
  4800. bool inplace) {
  4801. bool is_node = false;
  4802. if (!inplace && (a->grad)) {
  4803. is_node = true;
  4804. }
  4805. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  4806. ggml_set_op_params_i32(result, 0, (int32_t) op);
  4807. result->op = GGML_OP_UNARY;
  4808. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4809. result->src[0] = a;
  4810. return result;
  4811. }
  4812. struct ggml_tensor * ggml_unary(
  4813. struct ggml_context * ctx,
  4814. struct ggml_tensor * a,
  4815. enum ggml_unary_op op) {
  4816. return ggml_unary_impl(ctx, a, op, false);
  4817. }
  4818. struct ggml_tensor * ggml_unary_inplace(
  4819. struct ggml_context * ctx,
  4820. struct ggml_tensor * a,
  4821. enum ggml_unary_op op) {
  4822. return ggml_unary_impl(ctx, a, op, true);
  4823. }
  4824. // ggml_map_unary
  4825. static struct ggml_tensor * ggml_map_unary_impl_f32(
  4826. struct ggml_context * ctx,
  4827. struct ggml_tensor * a,
  4828. const ggml_unary_op_f32_t fun,
  4829. bool inplace) {
  4830. bool is_node = false;
  4831. if (!inplace && a->grad) {
  4832. is_node = true;
  4833. }
  4834. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  4835. ggml_set_op_params(result, (const void *) &fun, sizeof(fun));
  4836. result->op = GGML_OP_MAP_UNARY;
  4837. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4838. result->src[0] = a;
  4839. return result;
  4840. }
  4841. struct ggml_tensor * ggml_map_unary_f32(
  4842. struct ggml_context * ctx,
  4843. struct ggml_tensor * a,
  4844. const ggml_unary_op_f32_t fun) {
  4845. return ggml_map_unary_impl_f32(ctx, a, fun, false);
  4846. }
  4847. struct ggml_tensor * ggml_map_unary_inplace_f32(
  4848. struct ggml_context * ctx,
  4849. struct ggml_tensor * a,
  4850. const ggml_unary_op_f32_t fun) {
  4851. return ggml_map_unary_impl_f32(ctx, a, fun, true);
  4852. }
  4853. // ggml_map_binary
  4854. static struct ggml_tensor * ggml_map_binary_impl_f32(
  4855. struct ggml_context * ctx,
  4856. struct ggml_tensor * a,
  4857. struct ggml_tensor * b,
  4858. const ggml_binary_op_f32_t fun,
  4859. bool inplace) {
  4860. GGML_ASSERT(ggml_are_same_shape(a, b));
  4861. bool is_node = false;
  4862. if (!inplace && (a->grad || b->grad)) {
  4863. is_node = true;
  4864. }
  4865. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  4866. ggml_set_op_params(result, (const void *) &fun, sizeof(fun));
  4867. result->op = GGML_OP_MAP_BINARY;
  4868. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4869. result->src[0] = a;
  4870. result->src[1] = b;
  4871. return result;
  4872. }
  4873. struct ggml_tensor * ggml_map_binary_f32(
  4874. struct ggml_context * ctx,
  4875. struct ggml_tensor * a,
  4876. struct ggml_tensor * b,
  4877. const ggml_binary_op_f32_t fun) {
  4878. return ggml_map_binary_impl_f32(ctx, a, b, fun, false);
  4879. }
  4880. struct ggml_tensor * ggml_map_binary_inplace_f32(
  4881. struct ggml_context * ctx,
  4882. struct ggml_tensor * a,
  4883. struct ggml_tensor * b,
  4884. const ggml_binary_op_f32_t fun) {
  4885. return ggml_map_binary_impl_f32(ctx, a, b, fun, true);
  4886. }
  4887. // ggml_map_custom1_f32
  4888. static struct ggml_tensor * ggml_map_custom1_impl_f32(
  4889. struct ggml_context * ctx,
  4890. struct ggml_tensor * a,
  4891. const ggml_custom1_op_f32_t fun,
  4892. bool inplace) {
  4893. bool is_node = false;
  4894. if (!inplace && a->grad) {
  4895. is_node = true;
  4896. }
  4897. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  4898. ggml_set_op_params(result, (const void *) &fun, sizeof(fun));
  4899. result->op = GGML_OP_MAP_CUSTOM1_F32;
  4900. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4901. result->src[0] = a;
  4902. return result;
  4903. }
  4904. struct ggml_tensor * ggml_map_custom1_f32(
  4905. struct ggml_context * ctx,
  4906. struct ggml_tensor * a,
  4907. const ggml_custom1_op_f32_t fun) {
  4908. return ggml_map_custom1_impl_f32(ctx, a, fun, false);
  4909. }
  4910. struct ggml_tensor * ggml_map_custom1_inplace_f32(
  4911. struct ggml_context * ctx,
  4912. struct ggml_tensor * a,
  4913. const ggml_custom1_op_f32_t fun) {
  4914. return ggml_map_custom1_impl_f32(ctx, a, fun, true);
  4915. }
  4916. // ggml_map_custom2_f32
  4917. static struct ggml_tensor * ggml_map_custom2_impl_f32(
  4918. struct ggml_context * ctx,
  4919. struct ggml_tensor * a,
  4920. struct ggml_tensor * b,
  4921. const ggml_custom2_op_f32_t fun,
  4922. bool inplace) {
  4923. bool is_node = false;
  4924. if (!inplace && (a->grad || b->grad)) {
  4925. is_node = true;
  4926. }
  4927. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  4928. ggml_set_op_params(result, (const void *) &fun, sizeof(fun));
  4929. result->op = GGML_OP_MAP_CUSTOM2_F32;
  4930. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4931. result->src[0] = a;
  4932. result->src[1] = b;
  4933. return result;
  4934. }
  4935. struct ggml_tensor * ggml_map_custom2_f32(
  4936. struct ggml_context * ctx,
  4937. struct ggml_tensor * a,
  4938. struct ggml_tensor * b,
  4939. const ggml_custom2_op_f32_t fun) {
  4940. return ggml_map_custom2_impl_f32(ctx, a, b, fun, false);
  4941. }
  4942. struct ggml_tensor * ggml_map_custom2_inplace_f32(
  4943. struct ggml_context * ctx,
  4944. struct ggml_tensor * a,
  4945. struct ggml_tensor * b,
  4946. const ggml_custom2_op_f32_t fun) {
  4947. return ggml_map_custom2_impl_f32(ctx, a, b, fun, true);
  4948. }
  4949. // ggml_map_custom3_f32
  4950. static struct ggml_tensor * ggml_map_custom3_impl_f32(
  4951. struct ggml_context * ctx,
  4952. struct ggml_tensor * a,
  4953. struct ggml_tensor * b,
  4954. struct ggml_tensor * c,
  4955. const ggml_custom3_op_f32_t fun,
  4956. bool inplace) {
  4957. bool is_node = false;
  4958. if (!inplace && (a->grad || b->grad || c->grad)) {
  4959. is_node = true;
  4960. }
  4961. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  4962. ggml_set_op_params(result, (const void *) &fun, sizeof(fun));
  4963. result->op = GGML_OP_MAP_CUSTOM3_F32;
  4964. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4965. result->src[0] = a;
  4966. result->src[1] = b;
  4967. result->src[2] = c;
  4968. return result;
  4969. }
  4970. struct ggml_tensor * ggml_map_custom3_f32(
  4971. struct ggml_context * ctx,
  4972. struct ggml_tensor * a,
  4973. struct ggml_tensor * b,
  4974. struct ggml_tensor * c,
  4975. const ggml_custom3_op_f32_t fun) {
  4976. return ggml_map_custom3_impl_f32(ctx, a, b, c, fun, false);
  4977. }
  4978. struct ggml_tensor * ggml_map_custom3_inplace_f32(
  4979. struct ggml_context * ctx,
  4980. struct ggml_tensor * a,
  4981. struct ggml_tensor * b,
  4982. struct ggml_tensor * c,
  4983. const ggml_custom3_op_f32_t fun) {
  4984. return ggml_map_custom3_impl_f32(ctx, a, b, c, fun, true);
  4985. }
  4986. // ggml_map_custom1
  4987. struct ggml_map_custom1_op_params {
  4988. ggml_custom1_op_t fun;
  4989. int n_tasks;
  4990. void * userdata;
  4991. };
  4992. static struct ggml_tensor * ggml_map_custom1_impl(
  4993. struct ggml_context * ctx,
  4994. struct ggml_tensor * a,
  4995. const ggml_custom1_op_t fun,
  4996. int n_tasks,
  4997. void * userdata,
  4998. bool inplace) {
  4999. GGML_ASSERT(n_tasks == GGML_N_TASKS_MAX || n_tasks > 0);
  5000. bool is_node = false;
  5001. if (!inplace && a->grad) {
  5002. is_node = true;
  5003. }
  5004. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  5005. struct ggml_map_custom1_op_params params = {
  5006. /*.fun =*/ fun,
  5007. /*.n_tasks =*/ n_tasks,
  5008. /*.userdata =*/ userdata
  5009. };
  5010. ggml_set_op_params(result, (const void *) &params, sizeof(params));
  5011. result->op = GGML_OP_MAP_CUSTOM1;
  5012. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5013. result->src[0] = a;
  5014. return result;
  5015. }
  5016. struct ggml_tensor * ggml_map_custom1(
  5017. struct ggml_context * ctx,
  5018. struct ggml_tensor * a,
  5019. const ggml_custom1_op_t fun,
  5020. int n_tasks,
  5021. void * userdata) {
  5022. return ggml_map_custom1_impl(ctx, a, fun, n_tasks, userdata, false);
  5023. }
  5024. struct ggml_tensor * ggml_map_custom1_inplace(
  5025. struct ggml_context * ctx,
  5026. struct ggml_tensor * a,
  5027. const ggml_custom1_op_t fun,
  5028. int n_tasks,
  5029. void * userdata) {
  5030. return ggml_map_custom1_impl(ctx, a, fun, n_tasks, userdata, true);
  5031. }
  5032. // ggml_map_custom2
  5033. struct ggml_map_custom2_op_params {
  5034. ggml_custom2_op_t fun;
  5035. int n_tasks;
  5036. void * userdata;
  5037. };
  5038. static struct ggml_tensor * ggml_map_custom2_impl(
  5039. struct ggml_context * ctx,
  5040. struct ggml_tensor * a,
  5041. struct ggml_tensor * b,
  5042. const ggml_custom2_op_t fun,
  5043. int n_tasks,
  5044. void * userdata,
  5045. bool inplace) {
  5046. GGML_ASSERT(n_tasks == GGML_N_TASKS_MAX || n_tasks > 0);
  5047. bool is_node = false;
  5048. if (!inplace && (a->grad || b->grad)) {
  5049. is_node = true;
  5050. }
  5051. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  5052. struct ggml_map_custom2_op_params params = {
  5053. /*.fun =*/ fun,
  5054. /*.n_tasks =*/ n_tasks,
  5055. /*.userdata =*/ userdata
  5056. };
  5057. ggml_set_op_params(result, (const void *) &params, sizeof(params));
  5058. result->op = GGML_OP_MAP_CUSTOM2;
  5059. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5060. result->src[0] = a;
  5061. result->src[1] = b;
  5062. return result;
  5063. }
  5064. struct ggml_tensor * ggml_map_custom2(
  5065. struct ggml_context * ctx,
  5066. struct ggml_tensor * a,
  5067. struct ggml_tensor * b,
  5068. const ggml_custom2_op_t fun,
  5069. int n_tasks,
  5070. void * userdata) {
  5071. return ggml_map_custom2_impl(ctx, a, b, fun, n_tasks, userdata, false);
  5072. }
  5073. struct ggml_tensor * ggml_map_custom2_inplace(
  5074. struct ggml_context * ctx,
  5075. struct ggml_tensor * a,
  5076. struct ggml_tensor * b,
  5077. const ggml_custom2_op_t fun,
  5078. int n_tasks,
  5079. void * userdata) {
  5080. return ggml_map_custom2_impl(ctx, a, b, fun, n_tasks, userdata, true);
  5081. }
  5082. // ggml_map_custom3
  5083. struct ggml_map_custom3_op_params {
  5084. ggml_custom3_op_t fun;
  5085. int n_tasks;
  5086. void * userdata;
  5087. };
  5088. static struct ggml_tensor * ggml_map_custom3_impl(
  5089. struct ggml_context * ctx,
  5090. struct ggml_tensor * a,
  5091. struct ggml_tensor * b,
  5092. struct ggml_tensor * c,
  5093. const ggml_custom3_op_t fun,
  5094. int n_tasks,
  5095. void * userdata,
  5096. bool inplace) {
  5097. GGML_ASSERT(n_tasks == GGML_N_TASKS_MAX || n_tasks > 0);
  5098. bool is_node = false;
  5099. if (!inplace && (a->grad || b->grad || c->grad)) {
  5100. is_node = true;
  5101. }
  5102. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  5103. struct ggml_map_custom3_op_params params = {
  5104. /*.fun =*/ fun,
  5105. /*.n_tasks =*/ n_tasks,
  5106. /*.userdata =*/ userdata
  5107. };
  5108. ggml_set_op_params(result, (const void *) &params, sizeof(params));
  5109. result->op = GGML_OP_MAP_CUSTOM3;
  5110. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5111. result->src[0] = a;
  5112. result->src[1] = b;
  5113. result->src[2] = c;
  5114. return result;
  5115. }
  5116. struct ggml_tensor * ggml_map_custom3(
  5117. struct ggml_context * ctx,
  5118. struct ggml_tensor * a,
  5119. struct ggml_tensor * b,
  5120. struct ggml_tensor * c,
  5121. const ggml_custom3_op_t fun,
  5122. int n_tasks,
  5123. void * userdata) {
  5124. return ggml_map_custom3_impl(ctx, a, b, c, fun, n_tasks, userdata, false);
  5125. }
  5126. struct ggml_tensor * ggml_map_custom3_inplace(
  5127. struct ggml_context * ctx,
  5128. struct ggml_tensor * a,
  5129. struct ggml_tensor * b,
  5130. struct ggml_tensor * c,
  5131. const ggml_custom3_op_t fun,
  5132. int n_tasks,
  5133. void * userdata) {
  5134. return ggml_map_custom3_impl(ctx, a, b, c, fun, n_tasks, userdata, true);
  5135. }
  5136. // ggml_cross_entropy_loss
  5137. struct ggml_tensor * ggml_cross_entropy_loss(
  5138. struct ggml_context * ctx,
  5139. struct ggml_tensor * a,
  5140. struct ggml_tensor * b) {
  5141. GGML_ASSERT(ggml_are_same_shape(a, b));
  5142. bool is_node = false;
  5143. if (a->grad || b->grad) {
  5144. is_node = true;
  5145. }
  5146. struct ggml_tensor * result = ggml_new_tensor_1d(ctx, a->type, 1);
  5147. result->op = GGML_OP_CROSS_ENTROPY_LOSS;
  5148. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5149. result->src[0] = a;
  5150. result->src[1] = b;
  5151. return result;
  5152. }
  5153. // ggml_cross_entropy_loss_back
  5154. struct ggml_tensor * ggml_cross_entropy_loss_back(
  5155. struct ggml_context * ctx,
  5156. struct ggml_tensor * a,
  5157. struct ggml_tensor * b,
  5158. struct ggml_tensor * c) {
  5159. GGML_ASSERT(ggml_are_same_shape(a, b));
  5160. GGML_ASSERT(ggml_is_scalar(c));
  5161. struct ggml_tensor * result = ggml_dup_tensor(ctx, a);
  5162. result->op = GGML_OP_CROSS_ENTROPY_LOSS_BACK;
  5163. result->grad = NULL;
  5164. result->src[0] = a;
  5165. result->src[1] = b;
  5166. result->src[2] = c;
  5167. return result;
  5168. }
  5169. ////////////////////////////////////////////////////////////////////////////////
  5170. void ggml_set_param(
  5171. struct ggml_context * ctx,
  5172. struct ggml_tensor * tensor) {
  5173. tensor->is_param = true;
  5174. GGML_ASSERT(tensor->grad == NULL);
  5175. tensor->grad = ggml_dup_tensor(ctx, tensor);
  5176. ggml_format_name(tensor->grad, "%s (grad)", tensor->name);
  5177. }
  5178. // ggml_compute_forward_dup
  5179. static void ggml_compute_forward_dup_same_cont(
  5180. const struct ggml_compute_params * params,
  5181. const struct ggml_tensor * src0,
  5182. struct ggml_tensor * dst) {
  5183. GGML_ASSERT(ggml_nelements(dst) == ggml_nelements(src0));
  5184. GGML_ASSERT(ggml_is_contiguous(dst) && ggml_is_contiguous(src0));
  5185. GGML_ASSERT(src0->type == dst->type);
  5186. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  5187. return;
  5188. }
  5189. const size_t nb00 = src0->nb[0];
  5190. const size_t nb0 = dst->nb[0];
  5191. const int ith = params->ith; // thread index
  5192. const int nth = params->nth; // number of threads
  5193. // parallelize by elements
  5194. const int ne = ggml_nelements(dst);
  5195. const int dr = (ne + nth - 1) / nth;
  5196. const int ie0 = dr * ith;
  5197. const int ie1 = MIN(ie0 + dr, ne);
  5198. if (ie0 < ie1) {
  5199. memcpy(
  5200. ((char *) dst->data + ie0*nb0),
  5201. ((char *) src0->data + ie0*nb00),
  5202. (ie1 - ie0) * ggml_type_size(src0->type));
  5203. }
  5204. }
  5205. static void ggml_compute_forward_dup_f16(
  5206. const struct ggml_compute_params * params,
  5207. const struct ggml_tensor * src0,
  5208. struct ggml_tensor * dst) {
  5209. GGML_ASSERT(ggml_nelements(dst) == ggml_nelements(src0));
  5210. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  5211. return;
  5212. }
  5213. GGML_TENSOR_UNARY_OP_LOCALS
  5214. const int ith = params->ith; // thread index
  5215. const int nth = params->nth; // number of threads
  5216. if (ggml_is_contiguous(src0) && ggml_is_contiguous(dst) && src0->type == dst->type) {
  5217. ggml_compute_forward_dup_same_cont(params, src0, dst);
  5218. return;
  5219. }
  5220. // parallelize by rows
  5221. const int nr = ne01;
  5222. // number of rows per thread
  5223. const int dr = (nr + nth - 1) / nth;
  5224. // row range for this thread
  5225. const int ir0 = dr * ith;
  5226. const int ir1 = MIN(ir0 + dr, nr);
  5227. if (src0->type == dst->type &&
  5228. ne00 == ne0 &&
  5229. nb00 == ggml_type_size(src0->type) && nb0 == ggml_type_size(dst->type)) {
  5230. // copy by rows
  5231. const size_t rs = ne00*nb00;
  5232. for (int64_t i03 = 0; i03 < ne03; i03++) {
  5233. for (int64_t i02 = 0; i02 < ne02; i02++) {
  5234. for (int64_t i01 = ir0; i01 < ir1; i01++) {
  5235. memcpy(
  5236. ((char *) dst->data + i01*nb1 + i02*nb2 + i03*nb3),
  5237. ((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03),
  5238. rs);
  5239. }
  5240. }
  5241. }
  5242. return;
  5243. }
  5244. // TODO: add more special-case implementations for tensor shapes/strides that can benefit from memcpy
  5245. if (ggml_is_contiguous(dst)) {
  5246. if (nb00 == sizeof(ggml_fp16_t)) {
  5247. if (dst->type == GGML_TYPE_F16) {
  5248. size_t id = 0;
  5249. const size_t rs = ne00 * nb00;
  5250. char * dst_ptr = (char *) dst->data;
  5251. for (int i03 = 0; i03 < ne03; i03++) {
  5252. for (int i02 = 0; i02 < ne02; i02++) {
  5253. id += rs * ir0;
  5254. for (int i01 = ir0; i01 < ir1; i01++) {
  5255. const char * src0_ptr = (char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03;
  5256. memcpy(dst_ptr + id, src0_ptr, rs);
  5257. id += rs;
  5258. }
  5259. id += rs * (ne01 - ir1);
  5260. }
  5261. }
  5262. } else if (dst->type == GGML_TYPE_F32) {
  5263. size_t id = 0;
  5264. float * dst_ptr = (float *) dst->data;
  5265. for (int i03 = 0; i03 < ne03; i03++) {
  5266. for (int i02 = 0; i02 < ne02; i02++) {
  5267. id += ne00 * ir0;
  5268. for (int i01 = ir0; i01 < ir1; i01++) {
  5269. const ggml_fp16_t * src0_ptr = (ggml_fp16_t *) ((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03);
  5270. for (int i00 = 0; i00 < ne00; i00++) {
  5271. dst_ptr[id] = GGML_FP16_TO_FP32(src0_ptr[i00]);
  5272. id++;
  5273. }
  5274. }
  5275. id += ne00 * (ne01 - ir1);
  5276. }
  5277. }
  5278. } else if (type_traits[dst->type].from_float) {
  5279. ggml_from_float_t const quantize_row_q = type_traits[dst->type].from_float;
  5280. float * src0_f32 = (float *) params->wdata + (ne00 + CACHE_LINE_SIZE_F32) * ith;
  5281. size_t id = 0;
  5282. size_t rs = nb0 * (ne00 / ggml_blck_size(dst->type));
  5283. char * dst_ptr = (char *) dst->data;
  5284. for (int i03 = 0; i03 < ne03; i03++) {
  5285. for (int i02 = 0; i02 < ne02; i02++) {
  5286. id += rs * ir0;
  5287. for (int i01 = ir0; i01 < ir1; i01++) {
  5288. const ggml_fp16_t * src0_ptr = (ggml_fp16_t *) ((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03);
  5289. for (int i00 = 0; i00 < ne00; i00++) {
  5290. src0_f32[i00] = GGML_FP16_TO_FP32(src0_ptr[i00]);
  5291. }
  5292. quantize_row_q(src0_f32, dst_ptr + id, ne00);
  5293. id += rs;
  5294. }
  5295. id += rs * (ne01 - ir1);
  5296. }
  5297. }
  5298. } else {
  5299. GGML_ASSERT(false); // TODO: implement
  5300. }
  5301. } else {
  5302. //printf("%s: this is not optimal - fix me\n", __func__);
  5303. if (dst->type == GGML_TYPE_F32) {
  5304. size_t id = 0;
  5305. float * dst_ptr = (float *) dst->data;
  5306. for (int i03 = 0; i03 < ne03; i03++) {
  5307. for (int i02 = 0; i02 < ne02; i02++) {
  5308. id += ne00 * ir0;
  5309. for (int i01 = ir0; i01 < ir1; i01++) {
  5310. for (int i00 = 0; i00 < ne00; i00++) {
  5311. const ggml_fp16_t * src0_ptr = (ggml_fp16_t *) ((char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03);
  5312. dst_ptr[id] = GGML_FP16_TO_FP32(*src0_ptr);
  5313. id++;
  5314. }
  5315. }
  5316. id += ne00 * (ne01 - ir1);
  5317. }
  5318. }
  5319. } else if (dst->type == GGML_TYPE_F16) {
  5320. size_t id = 0;
  5321. ggml_fp16_t * dst_ptr = (ggml_fp16_t *) dst->data;
  5322. for (int i03 = 0; i03 < ne03; i03++) {
  5323. for (int i02 = 0; i02 < ne02; i02++) {
  5324. id += ne00 * ir0;
  5325. for (int i01 = ir0; i01 < ir1; i01++) {
  5326. for (int i00 = 0; i00 < ne00; i00++) {
  5327. const ggml_fp16_t * src0_ptr = (ggml_fp16_t *) ((char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03);
  5328. dst_ptr[id] = *src0_ptr;
  5329. id++;
  5330. }
  5331. }
  5332. id += ne00 * (ne01 - ir1);
  5333. }
  5334. }
  5335. } else {
  5336. GGML_ASSERT(false); // TODO: implement
  5337. }
  5338. }
  5339. return;
  5340. }
  5341. // dst counters
  5342. int64_t i10 = 0;
  5343. int64_t i11 = 0;
  5344. int64_t i12 = 0;
  5345. int64_t i13 = 0;
  5346. if (dst->type == GGML_TYPE_F16) {
  5347. for (int64_t i03 = 0; i03 < ne03; i03++) {
  5348. for (int64_t i02 = 0; i02 < ne02; i02++) {
  5349. i10 += ne00 * ir0;
  5350. while (i10 >= ne0) {
  5351. i10 -= ne0;
  5352. if (++i11 == ne1) {
  5353. i11 = 0;
  5354. if (++i12 == ne2) {
  5355. i12 = 0;
  5356. if (++i13 == ne3) {
  5357. i13 = 0;
  5358. }
  5359. }
  5360. }
  5361. }
  5362. for (int64_t i01 = ir0; i01 < ir1; i01++) {
  5363. for (int64_t i00 = 0; i00 < ne00; i00++) {
  5364. const char * src0_ptr = ((char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03);
  5365. char * dst_ptr = ((char *) dst->data + i10*nb0 + i11*nb1 + i12*nb2 + i13*nb3);
  5366. memcpy(dst_ptr, src0_ptr, sizeof(ggml_fp16_t));
  5367. if (++i10 == ne00) {
  5368. i10 = 0;
  5369. if (++i11 == ne01) {
  5370. i11 = 0;
  5371. if (++i12 == ne02) {
  5372. i12 = 0;
  5373. if (++i13 == ne03) {
  5374. i13 = 0;
  5375. }
  5376. }
  5377. }
  5378. }
  5379. }
  5380. }
  5381. i10 += ne00 * (ne01 - ir1);
  5382. while (i10 >= ne0) {
  5383. i10 -= ne0;
  5384. if (++i11 == ne1) {
  5385. i11 = 0;
  5386. if (++i12 == ne2) {
  5387. i12 = 0;
  5388. if (++i13 == ne3) {
  5389. i13 = 0;
  5390. }
  5391. }
  5392. }
  5393. }
  5394. }
  5395. }
  5396. } else if (dst->type == GGML_TYPE_F32) {
  5397. for (int64_t i03 = 0; i03 < ne03; i03++) {
  5398. for (int64_t i02 = 0; i02 < ne02; i02++) {
  5399. i10 += ne00 * ir0;
  5400. while (i10 >= ne0) {
  5401. i10 -= ne0;
  5402. if (++i11 == ne1) {
  5403. i11 = 0;
  5404. if (++i12 == ne2) {
  5405. i12 = 0;
  5406. if (++i13 == ne3) {
  5407. i13 = 0;
  5408. }
  5409. }
  5410. }
  5411. }
  5412. for (int64_t i01 = ir0; i01 < ir1; i01++) {
  5413. for (int64_t i00 = 0; i00 < ne00; i00++) {
  5414. const char * src0_ptr = ((char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03);
  5415. char * dst_ptr = ((char *) dst->data + i10*nb0 + i11*nb1 + i12*nb2 + i13*nb3);
  5416. *(float *) dst_ptr = GGML_FP16_TO_FP32(*(const ggml_fp16_t *) src0_ptr);
  5417. if (++i10 == ne0) {
  5418. i10 = 0;
  5419. if (++i11 == ne1) {
  5420. i11 = 0;
  5421. if (++i12 == ne2) {
  5422. i12 = 0;
  5423. if (++i13 == ne3) {
  5424. i13 = 0;
  5425. }
  5426. }
  5427. }
  5428. }
  5429. }
  5430. }
  5431. i10 += ne00 * (ne01 - ir1);
  5432. while (i10 >= ne0) {
  5433. i10 -= ne0;
  5434. if (++i11 == ne1) {
  5435. i11 = 0;
  5436. if (++i12 == ne2) {
  5437. i12 = 0;
  5438. if (++i13 == ne3) {
  5439. i13 = 0;
  5440. }
  5441. }
  5442. }
  5443. }
  5444. }
  5445. }
  5446. } else {
  5447. GGML_ASSERT(false); // TODO: implement
  5448. }
  5449. }
  5450. static void ggml_compute_forward_dup_f32(
  5451. const struct ggml_compute_params * params,
  5452. const struct ggml_tensor * src0,
  5453. struct ggml_tensor * dst) {
  5454. GGML_ASSERT(ggml_nelements(dst) == ggml_nelements(src0));
  5455. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  5456. return;
  5457. }
  5458. GGML_TENSOR_UNARY_OP_LOCALS
  5459. const int ith = params->ith; // thread index
  5460. const int nth = params->nth; // number of threads
  5461. if (ggml_is_contiguous(src0) && ggml_is_contiguous(dst) && src0->type == dst->type) {
  5462. ggml_compute_forward_dup_same_cont(params, src0, dst);
  5463. return;
  5464. }
  5465. // parallelize by rows
  5466. const int nr = ne01;
  5467. // number of rows per thread
  5468. const int dr = (nr + nth - 1) / nth;
  5469. // row range for this thread
  5470. const int ir0 = dr * ith;
  5471. const int ir1 = MIN(ir0 + dr, nr);
  5472. if (src0->type == dst->type &&
  5473. ne00 == ne0 &&
  5474. nb00 == ggml_type_size(src0->type) && nb0 == ggml_type_size(dst->type)) {
  5475. // copy by rows
  5476. const size_t rs = ne00*nb00;
  5477. for (int64_t i03 = 0; i03 < ne03; i03++) {
  5478. for (int64_t i02 = 0; i02 < ne02; i02++) {
  5479. for (int64_t i01 = ir0; i01 < ir1; i01++) {
  5480. memcpy(
  5481. ((char *) dst->data + i01*nb1 + i02*nb2 + i03*nb3),
  5482. ((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03),
  5483. rs);
  5484. }
  5485. }
  5486. }
  5487. return;
  5488. }
  5489. if (ggml_is_contiguous(dst)) {
  5490. // TODO: simplify
  5491. if (nb00 == sizeof(float)) {
  5492. if (dst->type == GGML_TYPE_F32) {
  5493. size_t id = 0;
  5494. const size_t rs = ne00 * nb00;
  5495. char * dst_ptr = (char *) dst->data;
  5496. for (int i03 = 0; i03 < ne03; i03++) {
  5497. for (int i02 = 0; i02 < ne02; i02++) {
  5498. id += rs * ir0;
  5499. for (int i01 = ir0; i01 < ir1; i01++) {
  5500. const char * src0_ptr = (char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03;
  5501. memcpy(dst_ptr + id, src0_ptr, rs);
  5502. id += rs;
  5503. }
  5504. id += rs * (ne01 - ir1);
  5505. }
  5506. }
  5507. } else if (type_traits[dst->type].from_float) {
  5508. ggml_from_float_t const quantize_row_q = type_traits[dst->type].from_float;
  5509. size_t id = 0;
  5510. size_t rs = nb0 * (ne00 / ggml_blck_size(dst->type));
  5511. char * dst_ptr = (char *) dst->data;
  5512. for (int i03 = 0; i03 < ne03; i03++) {
  5513. for (int i02 = 0; i02 < ne02; i02++) {
  5514. id += rs * ir0;
  5515. for (int i01 = ir0; i01 < ir1; i01++) {
  5516. const float * src0_ptr = (float *) ((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03);
  5517. quantize_row_q(src0_ptr, dst_ptr + id, ne00);
  5518. id += rs;
  5519. }
  5520. id += rs * (ne01 - ir1);
  5521. }
  5522. }
  5523. } else {
  5524. GGML_ASSERT(false); // TODO: implement
  5525. }
  5526. } else {
  5527. //printf("%s: this is not optimal - fix me\n", __func__);
  5528. if (dst->type == GGML_TYPE_F32) {
  5529. size_t id = 0;
  5530. float * dst_ptr = (float *) dst->data;
  5531. for (int i03 = 0; i03 < ne03; i03++) {
  5532. for (int i02 = 0; i02 < ne02; i02++) {
  5533. id += ne00 * ir0;
  5534. for (int i01 = ir0; i01 < ir1; i01++) {
  5535. for (int i00 = 0; i00 < ne00; i00++) {
  5536. const float * src0_ptr = (float *) ((char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03);
  5537. dst_ptr[id] = *src0_ptr;
  5538. id++;
  5539. }
  5540. }
  5541. id += ne00 * (ne01 - ir1);
  5542. }
  5543. }
  5544. } else if (dst->type == GGML_TYPE_F16) {
  5545. size_t id = 0;
  5546. ggml_fp16_t * dst_ptr = (ggml_fp16_t *) dst->data;
  5547. for (int i03 = 0; i03 < ne03; i03++) {
  5548. for (int i02 = 0; i02 < ne02; i02++) {
  5549. id += ne00 * ir0;
  5550. for (int i01 = ir0; i01 < ir1; i01++) {
  5551. for (int i00 = 0; i00 < ne00; i00++) {
  5552. const float * src0_ptr = (float *) ((char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03);
  5553. dst_ptr[id] = GGML_FP32_TO_FP16(*src0_ptr);
  5554. id++;
  5555. }
  5556. }
  5557. id += ne00 * (ne01 - ir1);
  5558. }
  5559. }
  5560. } else {
  5561. GGML_ASSERT(false); // TODO: implement
  5562. }
  5563. }
  5564. return;
  5565. }
  5566. // dst counters
  5567. int64_t i10 = 0;
  5568. int64_t i11 = 0;
  5569. int64_t i12 = 0;
  5570. int64_t i13 = 0;
  5571. if (dst->type == GGML_TYPE_F32) {
  5572. for (int64_t i03 = 0; i03 < ne03; i03++) {
  5573. for (int64_t i02 = 0; i02 < ne02; i02++) {
  5574. i10 += ne00 * ir0;
  5575. while (i10 >= ne0) {
  5576. i10 -= ne0;
  5577. if (++i11 == ne1) {
  5578. i11 = 0;
  5579. if (++i12 == ne2) {
  5580. i12 = 0;
  5581. if (++i13 == ne3) {
  5582. i13 = 0;
  5583. }
  5584. }
  5585. }
  5586. }
  5587. for (int64_t i01 = ir0; i01 < ir1; i01++) {
  5588. for (int64_t i00 = 0; i00 < ne00; i00++) {
  5589. const char * src0_ptr = ((char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03);
  5590. char * dst_ptr = ((char *) dst->data + i10*nb0 + i11*nb1 + i12*nb2 + i13*nb3);
  5591. memcpy(dst_ptr, src0_ptr, sizeof(float));
  5592. if (++i10 == ne0) {
  5593. i10 = 0;
  5594. if (++i11 == ne1) {
  5595. i11 = 0;
  5596. if (++i12 == ne2) {
  5597. i12 = 0;
  5598. if (++i13 == ne3) {
  5599. i13 = 0;
  5600. }
  5601. }
  5602. }
  5603. }
  5604. }
  5605. }
  5606. i10 += ne00 * (ne01 - ir1);
  5607. while (i10 >= ne0) {
  5608. i10 -= ne0;
  5609. if (++i11 == ne1) {
  5610. i11 = 0;
  5611. if (++i12 == ne2) {
  5612. i12 = 0;
  5613. if (++i13 == ne3) {
  5614. i13 = 0;
  5615. }
  5616. }
  5617. }
  5618. }
  5619. }
  5620. }
  5621. } else if (dst->type == GGML_TYPE_F16) {
  5622. for (int64_t i03 = 0; i03 < ne03; i03++) {
  5623. for (int64_t i02 = 0; i02 < ne02; i02++) {
  5624. i10 += ne00 * ir0;
  5625. while (i10 >= ne0) {
  5626. i10 -= ne0;
  5627. if (++i11 == ne1) {
  5628. i11 = 0;
  5629. if (++i12 == ne2) {
  5630. i12 = 0;
  5631. if (++i13 == ne3) {
  5632. i13 = 0;
  5633. }
  5634. }
  5635. }
  5636. }
  5637. for (int64_t i01 = ir0; i01 < ir1; i01++) {
  5638. for (int64_t i00 = 0; i00 < ne00; i00++) {
  5639. const char * src0_ptr = ((char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03);
  5640. char * dst_ptr = ((char *) dst->data + i10*nb0 + i11*nb1 + i12*nb2 + i13*nb3);
  5641. *(ggml_fp16_t *) dst_ptr = GGML_FP32_TO_FP16(*(const float *) src0_ptr);
  5642. if (++i10 == ne0) {
  5643. i10 = 0;
  5644. if (++i11 == ne1) {
  5645. i11 = 0;
  5646. if (++i12 == ne2) {
  5647. i12 = 0;
  5648. if (++i13 == ne3) {
  5649. i13 = 0;
  5650. }
  5651. }
  5652. }
  5653. }
  5654. }
  5655. }
  5656. i10 += ne00 * (ne01 - ir1);
  5657. while (i10 >= ne0) {
  5658. i10 -= ne0;
  5659. if (++i11 == ne1) {
  5660. i11 = 0;
  5661. if (++i12 == ne2) {
  5662. i12 = 0;
  5663. if (++i13 == ne3) {
  5664. i13 = 0;
  5665. }
  5666. }
  5667. }
  5668. }
  5669. }
  5670. }
  5671. } else {
  5672. GGML_ASSERT(false); // TODO: implement
  5673. }
  5674. }
  5675. static void ggml_compute_forward_dup(
  5676. const struct ggml_compute_params * params,
  5677. const struct ggml_tensor * src0,
  5678. struct ggml_tensor * dst) {
  5679. if (ggml_is_contiguous(src0) && ggml_is_contiguous(dst) && src0->type == dst->type) {
  5680. ggml_compute_forward_dup_same_cont(params, src0, dst);
  5681. return;
  5682. }
  5683. switch (src0->type) {
  5684. case GGML_TYPE_F16:
  5685. {
  5686. ggml_compute_forward_dup_f16(params, src0, dst);
  5687. } break;
  5688. case GGML_TYPE_F32:
  5689. {
  5690. ggml_compute_forward_dup_f32(params, src0, dst);
  5691. } break;
  5692. default:
  5693. {
  5694. GGML_ASSERT(false);
  5695. } break;
  5696. }
  5697. }
  5698. // ggml_compute_forward_add
  5699. static void ggml_compute_forward_add_f32(
  5700. const struct ggml_compute_params * params,
  5701. const struct ggml_tensor * src0,
  5702. const struct ggml_tensor * src1,
  5703. struct ggml_tensor * dst) {
  5704. GGML_ASSERT(ggml_can_repeat(src1, src0) && ggml_are_same_shape(src0, dst));
  5705. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  5706. return;
  5707. }
  5708. const int ith = params->ith;
  5709. const int nth = params->nth;
  5710. const int nr = ggml_nrows(src0);
  5711. GGML_TENSOR_BINARY_OP_LOCALS
  5712. GGML_ASSERT( nb0 == sizeof(float));
  5713. GGML_ASSERT(nb00 == sizeof(float));
  5714. // rows per thread
  5715. const int dr = (nr + nth - 1)/nth;
  5716. // row range for this thread
  5717. const int ir0 = dr*ith;
  5718. const int ir1 = MIN(ir0 + dr, nr);
  5719. if (nb10 == sizeof(float)) {
  5720. for (int ir = ir0; ir < ir1; ++ir) {
  5721. // src1 is broadcastable across src0 and dst in i1, i2, i3
  5722. const int64_t i03 = ir/(ne02*ne01);
  5723. const int64_t i02 = (ir - i03*ne02*ne01)/ne01;
  5724. const int64_t i01 = (ir - i03*ne02*ne01 - i02*ne01);
  5725. const int64_t i13 = i03 % ne13;
  5726. const int64_t i12 = i02 % ne12;
  5727. const int64_t i11 = i01 % ne11;
  5728. const int64_t nr0 = ne00 / ne10;
  5729. float * dst_ptr = (float *) ((char *) dst->data + i03*nb3 + i02*nb2 + i01*nb1 );
  5730. float * src0_ptr = (float *) ((char *) src0->data + i03*nb03 + i02*nb02 + i01*nb01);
  5731. float * src1_ptr = (float *) ((char *) src1->data + i13*nb13 + i12*nb12 + i11*nb11);
  5732. for (int64_t r = 0; r < nr0; ++r) {
  5733. #ifdef GGML_USE_ACCELERATE
  5734. vDSP_vadd(src0_ptr + r*ne10, 1, src1_ptr, 1, dst_ptr + r*ne10, 1, ne10);
  5735. #else
  5736. ggml_vec_add_f32(ne10, dst_ptr + r*ne10, src0_ptr + r*ne10, src1_ptr);
  5737. #endif
  5738. }
  5739. }
  5740. } else {
  5741. // src1 is not contiguous
  5742. for (int ir = ir0; ir < ir1; ++ir) {
  5743. // src1 is broadcastable across src0 and dst in i1, i2, i3
  5744. const int64_t i03 = ir/(ne02*ne01);
  5745. const int64_t i02 = (ir - i03*ne02*ne01)/ne01;
  5746. const int64_t i01 = (ir - i03*ne02*ne01 - i02*ne01);
  5747. const int64_t i13 = i03 % ne13;
  5748. const int64_t i12 = i02 % ne12;
  5749. const int64_t i11 = i01 % ne11;
  5750. float * dst_ptr = (float *) ((char *) dst->data + i03*nb3 + i02*nb2 + i01*nb1 );
  5751. float * src0_ptr = (float *) ((char *) src0->data + i03*nb03 + i02*nb02 + i01*nb01);
  5752. for (int64_t i0 = 0; i0 < ne0; ++i0) {
  5753. const int64_t i10 = i0 % ne10;
  5754. float * src1_ptr = (float *) ((char *) src1->data + i13*nb13 + i12*nb12 + i11*nb11 + i10*nb10);
  5755. dst_ptr[i0] = src0_ptr[i0] + *src1_ptr;
  5756. }
  5757. }
  5758. }
  5759. }
  5760. static void ggml_compute_forward_add_f16_f32(
  5761. const struct ggml_compute_params * params,
  5762. const struct ggml_tensor * src0,
  5763. const struct ggml_tensor * src1,
  5764. struct ggml_tensor * dst) {
  5765. GGML_ASSERT(ggml_are_same_shape(src0, src1) && ggml_are_same_shape(src0, dst));
  5766. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  5767. return;
  5768. }
  5769. const int ith = params->ith;
  5770. const int nth = params->nth;
  5771. const int nr = ggml_nrows(src0);
  5772. GGML_TENSOR_BINARY_OP_LOCALS
  5773. GGML_ASSERT(src0->type == GGML_TYPE_F16);
  5774. GGML_ASSERT(src1->type == GGML_TYPE_F32);
  5775. if (dst->type == GGML_TYPE_F32) {
  5776. GGML_ASSERT( nb0 == sizeof(float));
  5777. }
  5778. else {
  5779. GGML_ASSERT(dst->type == GGML_TYPE_F16);
  5780. GGML_ASSERT( nb0 == sizeof(ggml_fp16_t));
  5781. }
  5782. GGML_ASSERT(nb00 == sizeof(ggml_fp16_t));
  5783. // rows per thread
  5784. const int dr = (nr + nth - 1)/nth;
  5785. // row range for this thread
  5786. const int ir0 = dr*ith;
  5787. const int ir1 = MIN(ir0 + dr, nr);
  5788. if (nb10 == sizeof(float)) {
  5789. if (dst->type == GGML_TYPE_F16) {
  5790. for (int ir = ir0; ir < ir1; ++ir) {
  5791. // src0, src1 and dst are same shape => same indices
  5792. const int i3 = ir/(ne2*ne1);
  5793. const int i2 = (ir - i3*ne2*ne1)/ne1;
  5794. const int i1 = (ir - i3*ne2*ne1 - i2*ne1);
  5795. ggml_fp16_t * dst_ptr = (ggml_fp16_t *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1);
  5796. ggml_fp16_t * src0_ptr = (ggml_fp16_t *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01);
  5797. float * src1_ptr = (float *) ((char *) src1->data + i3*nb13 + i2*nb12 + i1*nb11);
  5798. for (int i = 0; i < ne0; i++) {
  5799. dst_ptr[i] = GGML_FP32_TO_FP16(GGML_FP16_TO_FP32(src0_ptr[i]) + src1_ptr[i]);
  5800. }
  5801. }
  5802. } else {
  5803. for (int ir = ir0; ir < ir1; ++ir) {
  5804. // src0, src1 and dst are same shape => same indices
  5805. const int i3 = ir/(ne2*ne1);
  5806. const int i2 = (ir - i3*ne2*ne1)/ne1;
  5807. const int i1 = (ir - i3*ne2*ne1 - i2*ne1);
  5808. float * dst_ptr = (float *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1);
  5809. ggml_fp16_t * src0_ptr = (ggml_fp16_t *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01);
  5810. float * src1_ptr = (float *) ((char *) src1->data + i3*nb13 + i2*nb12 + i1*nb11);
  5811. for (int i = 0; i < ne0; i++) {
  5812. dst_ptr[i] = GGML_FP16_TO_FP32(src0_ptr[i]) + src1_ptr[i];
  5813. }
  5814. }
  5815. }
  5816. }
  5817. else {
  5818. // src1 is not contiguous
  5819. GGML_ASSERT(false);
  5820. }
  5821. }
  5822. static void ggml_compute_forward_add_f16_f16(
  5823. const struct ggml_compute_params * params,
  5824. const struct ggml_tensor * src0,
  5825. const struct ggml_tensor * src1,
  5826. struct ggml_tensor * dst) {
  5827. GGML_ASSERT(ggml_are_same_shape(src0, src1) && ggml_are_same_shape(src0, dst));
  5828. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  5829. return;
  5830. }
  5831. const int ith = params->ith;
  5832. const int nth = params->nth;
  5833. const int nr = ggml_nrows(src0);
  5834. GGML_TENSOR_BINARY_OP_LOCALS
  5835. GGML_ASSERT(src0->type == GGML_TYPE_F16);
  5836. GGML_ASSERT(src1->type == GGML_TYPE_F16);
  5837. GGML_ASSERT(dst->type == GGML_TYPE_F16);
  5838. GGML_ASSERT( nb0 == sizeof(ggml_fp16_t));
  5839. GGML_ASSERT(nb00 == sizeof(ggml_fp16_t));
  5840. // rows per thread
  5841. const int dr = (nr + nth - 1)/nth;
  5842. // row range for this thread
  5843. const int ir0 = dr*ith;
  5844. const int ir1 = MIN(ir0 + dr, nr);
  5845. if (nb10 == sizeof(ggml_fp16_t)) {
  5846. for (int ir = ir0; ir < ir1; ++ir) {
  5847. // src0, src1 and dst are same shape => same indices
  5848. const int i3 = ir/(ne2*ne1);
  5849. const int i2 = (ir - i3*ne2*ne1)/ne1;
  5850. const int i1 = (ir - i3*ne2*ne1 - i2*ne1);
  5851. ggml_fp16_t * dst_ptr = (ggml_fp16_t *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1);
  5852. ggml_fp16_t * src0_ptr = (ggml_fp16_t *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01);
  5853. ggml_fp16_t * src1_ptr = (ggml_fp16_t *) ((char *) src1->data + i3*nb13 + i2*nb12 + i1*nb11);
  5854. for (int i = 0; i < ne0; i++) {
  5855. dst_ptr[i] = GGML_FP32_TO_FP16(GGML_FP16_TO_FP32(src0_ptr[i]) + GGML_FP16_TO_FP32(src1_ptr[i]));
  5856. }
  5857. }
  5858. }
  5859. else {
  5860. // src1 is not contiguous
  5861. GGML_ASSERT(false);
  5862. }
  5863. }
  5864. static void ggml_compute_forward_add_q_f32(
  5865. const struct ggml_compute_params * params,
  5866. const struct ggml_tensor * src0,
  5867. const struct ggml_tensor * src1,
  5868. struct ggml_tensor * dst) {
  5869. GGML_ASSERT(ggml_are_same_shape(src0, src1) && ggml_are_same_shape(src0, dst));
  5870. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  5871. return;
  5872. }
  5873. const int nr = ggml_nrows(src0);
  5874. GGML_TENSOR_BINARY_OP_LOCALS
  5875. const int ith = params->ith;
  5876. const int nth = params->nth;
  5877. const enum ggml_type type = src0->type;
  5878. const enum ggml_type dtype = dst->type;
  5879. ggml_to_float_t const dequantize_row_q = type_traits[type].to_float;
  5880. ggml_from_float_t const quantize_row_q = type_traits[dtype].from_float;
  5881. // we don't support permuted src0 or src1
  5882. GGML_ASSERT(nb00 == ggml_type_size(type));
  5883. GGML_ASSERT(nb10 == sizeof(float));
  5884. // dst cannot be transposed or permuted
  5885. GGML_ASSERT(nb0 <= nb1);
  5886. GGML_ASSERT(nb1 <= nb2);
  5887. GGML_ASSERT(nb2 <= nb3);
  5888. GGML_ASSERT(ggml_is_quantized(src0->type));
  5889. GGML_ASSERT(src1->type == GGML_TYPE_F32);
  5890. // rows per thread
  5891. const int dr = (nr + nth - 1)/nth;
  5892. // row range for this thread
  5893. const int ir0 = dr*ith;
  5894. const int ir1 = MIN(ir0 + dr, nr);
  5895. float * wdata = (float *) params->wdata + (ne00 + CACHE_LINE_SIZE_F32) * ith;
  5896. for (int ir = ir0; ir < ir1; ++ir) {
  5897. // src0 indices
  5898. const int i03 = ir/(ne02*ne01);
  5899. const int i02 = (ir - i03*ne02*ne01)/ne01;
  5900. const int i01 = (ir - i03*ne02*ne01 - i02*ne01);
  5901. // src1 and dst are same shape as src0 => same indices
  5902. const int i13 = i03;
  5903. const int i12 = i02;
  5904. const int i11 = i01;
  5905. const int i3 = i03;
  5906. const int i2 = i02;
  5907. const int i1 = i01;
  5908. void * src0_row = (void *) ((char *) src0->data + (i01*nb01 + i02*nb02 + i03*nb03));
  5909. float * src1_row = (float *)((char *) src1->data + (i11*nb11 + i12*nb12 + i13*nb13));
  5910. void * dst_row = (void *) ((char *) dst->data + ( i1*nb1 + i2*nb2 + i3*nb3));
  5911. assert(ne00 % 32 == 0);
  5912. // unquantize row from src0 to temp buffer
  5913. dequantize_row_q(src0_row, wdata, ne00);
  5914. // add src1
  5915. ggml_vec_acc_f32(ne00, wdata, src1_row);
  5916. // quantize row to dst
  5917. if (quantize_row_q != NULL) {
  5918. quantize_row_q(wdata, dst_row, ne00);
  5919. } else {
  5920. memcpy(dst_row, wdata, ne0*nb0);
  5921. }
  5922. }
  5923. }
  5924. static void ggml_compute_forward_add(
  5925. const struct ggml_compute_params * params,
  5926. const struct ggml_tensor * src0,
  5927. const struct ggml_tensor * src1,
  5928. struct ggml_tensor * dst) {
  5929. switch (src0->type) {
  5930. case GGML_TYPE_F32:
  5931. {
  5932. ggml_compute_forward_add_f32(params, src0, src1, dst);
  5933. } break;
  5934. case GGML_TYPE_F16:
  5935. {
  5936. if (src1->type == GGML_TYPE_F16) {
  5937. ggml_compute_forward_add_f16_f16(params, src0, src1, dst);
  5938. }
  5939. else if (src1->type == GGML_TYPE_F32) {
  5940. ggml_compute_forward_add_f16_f32(params, src0, src1, dst);
  5941. }
  5942. else {
  5943. GGML_ASSERT(false);
  5944. }
  5945. } break;
  5946. case GGML_TYPE_Q4_0:
  5947. case GGML_TYPE_Q4_1:
  5948. case GGML_TYPE_Q5_0:
  5949. case GGML_TYPE_Q5_1:
  5950. case GGML_TYPE_Q8_0:
  5951. case GGML_TYPE_Q2_K:
  5952. case GGML_TYPE_Q3_K:
  5953. case GGML_TYPE_Q4_K:
  5954. case GGML_TYPE_Q5_K:
  5955. case GGML_TYPE_Q6_K:
  5956. {
  5957. ggml_compute_forward_add_q_f32(params, src0, src1, dst);
  5958. } break;
  5959. default:
  5960. {
  5961. GGML_ASSERT(false);
  5962. } break;
  5963. }
  5964. }
  5965. // ggml_compute_forward_add1
  5966. static void ggml_compute_forward_add1_f32(
  5967. const struct ggml_compute_params * params,
  5968. const struct ggml_tensor * src0,
  5969. const struct ggml_tensor * src1,
  5970. struct ggml_tensor * dst) {
  5971. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  5972. GGML_ASSERT(ggml_is_scalar(src1));
  5973. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  5974. return;
  5975. }
  5976. const int ith = params->ith;
  5977. const int nth = params->nth;
  5978. const int nr = ggml_nrows(src0);
  5979. GGML_TENSOR_UNARY_OP_LOCALS
  5980. GGML_ASSERT( nb0 == sizeof(float));
  5981. GGML_ASSERT(nb00 == sizeof(float));
  5982. // rows per thread
  5983. const int dr = (nr + nth - 1)/nth;
  5984. // row range for this thread
  5985. const int ir0 = dr*ith;
  5986. const int ir1 = MIN(ir0 + dr, nr);
  5987. for (int ir = ir0; ir < ir1; ++ir) {
  5988. // src0 and dst are same shape => same indices
  5989. const int i3 = ir/(ne2*ne1);
  5990. const int i2 = (ir - i3*ne2*ne1)/ne1;
  5991. const int i1 = (ir - i3*ne2*ne1 - i2*ne1);
  5992. #ifdef GGML_USE_ACCELERATE
  5993. UNUSED(ggml_vec_add1_f32);
  5994. vDSP_vadd(
  5995. (float *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01), 1,
  5996. (float *) ((char *) src1->data), 0,
  5997. (float *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 ), 1,
  5998. ne0);
  5999. #else
  6000. ggml_vec_add1_f32(ne0,
  6001. (float *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 ),
  6002. (float *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01),
  6003. *(float *) src1->data);
  6004. #endif
  6005. }
  6006. }
  6007. static void ggml_compute_forward_add1_f16_f32(
  6008. const struct ggml_compute_params * params,
  6009. const struct ggml_tensor * src0,
  6010. const struct ggml_tensor * src1,
  6011. struct ggml_tensor * dst) {
  6012. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  6013. GGML_ASSERT(ggml_is_scalar(src1));
  6014. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  6015. return;
  6016. }
  6017. // scalar to add
  6018. const float v = *(float *) src1->data;
  6019. const int ith = params->ith;
  6020. const int nth = params->nth;
  6021. const int nr = ggml_nrows(src0);
  6022. GGML_TENSOR_UNARY_OP_LOCALS
  6023. GGML_ASSERT(src0->type == GGML_TYPE_F16);
  6024. GGML_ASSERT(src1->type == GGML_TYPE_F32);
  6025. GGML_ASSERT(dst->type == GGML_TYPE_F16);
  6026. GGML_ASSERT( nb0 == sizeof(ggml_fp16_t));
  6027. GGML_ASSERT(nb00 == sizeof(ggml_fp16_t));
  6028. // rows per thread
  6029. const int dr = (nr + nth - 1)/nth;
  6030. // row range for this thread
  6031. const int ir0 = dr*ith;
  6032. const int ir1 = MIN(ir0 + dr, nr);
  6033. for (int ir = ir0; ir < ir1; ++ir) {
  6034. // src0 and dst are same shape => same indices
  6035. const int i3 = ir/(ne2*ne1);
  6036. const int i2 = (ir - i3*ne2*ne1)/ne1;
  6037. const int i1 = (ir - i3*ne2*ne1 - i2*ne1);
  6038. ggml_fp16_t * dst_ptr = (ggml_fp16_t *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 );
  6039. ggml_fp16_t * src0_ptr = (ggml_fp16_t *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01);
  6040. for (int i = 0; i < ne0; i++) {
  6041. dst_ptr[i] = GGML_FP32_TO_FP16(GGML_FP16_TO_FP32(src0_ptr[i]) + v);
  6042. }
  6043. }
  6044. }
  6045. static void ggml_compute_forward_add1_f16_f16(
  6046. const struct ggml_compute_params * params,
  6047. const struct ggml_tensor * src0,
  6048. const struct ggml_tensor * src1,
  6049. struct ggml_tensor * dst) {
  6050. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  6051. GGML_ASSERT(ggml_is_scalar(src1));
  6052. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  6053. return;
  6054. }
  6055. // scalar to add
  6056. const float v = GGML_FP16_TO_FP32(*(ggml_fp16_t *) src1->data);
  6057. const int ith = params->ith;
  6058. const int nth = params->nth;
  6059. const int nr = ggml_nrows(src0);
  6060. GGML_TENSOR_UNARY_OP_LOCALS
  6061. GGML_ASSERT(src0->type == GGML_TYPE_F16);
  6062. GGML_ASSERT(src1->type == GGML_TYPE_F16);
  6063. GGML_ASSERT(dst->type == GGML_TYPE_F16);
  6064. GGML_ASSERT( nb0 == sizeof(ggml_fp16_t));
  6065. GGML_ASSERT(nb00 == sizeof(ggml_fp16_t));
  6066. // rows per thread
  6067. const int dr = (nr + nth - 1)/nth;
  6068. // row range for this thread
  6069. const int ir0 = dr*ith;
  6070. const int ir1 = MIN(ir0 + dr, nr);
  6071. for (int ir = ir0; ir < ir1; ++ir) {
  6072. // src0 and dst are same shape => same indices
  6073. const int i3 = ir/(ne2*ne1);
  6074. const int i2 = (ir - i3*ne2*ne1)/ne1;
  6075. const int i1 = (ir - i3*ne2*ne1 - i2*ne1);
  6076. ggml_fp16_t * dst_ptr = (ggml_fp16_t *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 );
  6077. ggml_fp16_t * src0_ptr = (ggml_fp16_t *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01);
  6078. for (int i = 0; i < ne0; i++) {
  6079. dst_ptr[i] = GGML_FP32_TO_FP16(GGML_FP16_TO_FP32(src0_ptr[i]) + v);
  6080. }
  6081. }
  6082. }
  6083. static void ggml_compute_forward_add1_q_f32(
  6084. const struct ggml_compute_params * params,
  6085. const struct ggml_tensor * src0,
  6086. const struct ggml_tensor * src1,
  6087. struct ggml_tensor * dst) {
  6088. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  6089. GGML_ASSERT(ggml_is_scalar(src1));
  6090. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  6091. return;
  6092. }
  6093. // scalar to add
  6094. const float v = *(float *) src1->data;
  6095. const int ith = params->ith;
  6096. const int nth = params->nth;
  6097. const int nr = ggml_nrows(src0);
  6098. GGML_TENSOR_UNARY_OP_LOCALS
  6099. const enum ggml_type type = src0->type;
  6100. ggml_to_float_t const dequantize_row_q = type_traits[type].to_float;
  6101. ggml_from_float_t const quantize_row_q = type_traits[type].from_float;
  6102. // we don't support permuted src0
  6103. GGML_ASSERT(nb00 == ggml_type_size(type));
  6104. // dst cannot be transposed or permuted
  6105. GGML_ASSERT(nb0 <= nb1);
  6106. GGML_ASSERT(nb1 <= nb2);
  6107. GGML_ASSERT(nb2 <= nb3);
  6108. GGML_ASSERT(ggml_is_quantized(src0->type));
  6109. GGML_ASSERT(dst->type == src0->type);
  6110. GGML_ASSERT(src1->type == GGML_TYPE_F32);
  6111. // rows per thread
  6112. const int dr = (nr + nth - 1)/nth;
  6113. // row range for this thread
  6114. const int ir0 = dr*ith;
  6115. const int ir1 = MIN(ir0 + dr, nr);
  6116. float * wdata = (float *) params->wdata + (ne0 + CACHE_LINE_SIZE_F32) * ith;
  6117. for (int ir = ir0; ir < ir1; ++ir) {
  6118. // src0 and dst are same shape => same indices
  6119. const int i3 = ir/(ne2*ne1);
  6120. const int i2 = (ir - i3*ne2*ne1)/ne1;
  6121. const int i1 = (ir - i3*ne2*ne1 - i2*ne1);
  6122. void * src0_row = (void *) ((char *) src0->data + (i1*nb01 + i2*nb02 + i3*nb03));
  6123. void * dst_row = (void *) ((char *) dst->data + (i1*nb1 + i2*nb2 + i3*nb0 ));
  6124. assert(ne0 % 32 == 0);
  6125. // unquantize row from src0 to temp buffer
  6126. dequantize_row_q(src0_row, wdata, ne0);
  6127. // add src1
  6128. ggml_vec_acc1_f32(ne0, wdata, v);
  6129. // quantize row to dst
  6130. quantize_row_q(wdata, dst_row, ne0);
  6131. }
  6132. }
  6133. static void ggml_compute_forward_add1(
  6134. const struct ggml_compute_params * params,
  6135. const struct ggml_tensor * src0,
  6136. const struct ggml_tensor * src1,
  6137. struct ggml_tensor * dst) {
  6138. switch (src0->type) {
  6139. case GGML_TYPE_F32:
  6140. {
  6141. ggml_compute_forward_add1_f32(params, src0, src1, dst);
  6142. } break;
  6143. case GGML_TYPE_F16:
  6144. {
  6145. if (src1->type == GGML_TYPE_F16) {
  6146. ggml_compute_forward_add1_f16_f16(params, src0, src1, dst);
  6147. }
  6148. else if (src1->type == GGML_TYPE_F32) {
  6149. ggml_compute_forward_add1_f16_f32(params, src0, src1, dst);
  6150. }
  6151. else {
  6152. GGML_ASSERT(false);
  6153. }
  6154. } break;
  6155. case GGML_TYPE_Q4_0:
  6156. case GGML_TYPE_Q4_1:
  6157. case GGML_TYPE_Q5_0:
  6158. case GGML_TYPE_Q5_1:
  6159. case GGML_TYPE_Q8_0:
  6160. case GGML_TYPE_Q8_1:
  6161. case GGML_TYPE_Q2_K:
  6162. case GGML_TYPE_Q3_K:
  6163. case GGML_TYPE_Q4_K:
  6164. case GGML_TYPE_Q5_K:
  6165. case GGML_TYPE_Q6_K:
  6166. {
  6167. ggml_compute_forward_add1_q_f32(params, src0, src1, dst);
  6168. } break;
  6169. default:
  6170. {
  6171. GGML_ASSERT(false);
  6172. } break;
  6173. }
  6174. }
  6175. // ggml_compute_forward_acc
  6176. static void ggml_compute_forward_acc_f32(
  6177. const struct ggml_compute_params * params,
  6178. const struct ggml_tensor * src0,
  6179. const struct ggml_tensor * src1,
  6180. struct ggml_tensor * dst) {
  6181. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  6182. GGML_ASSERT(ggml_is_contiguous(dst) && ggml_is_contiguous(src0));
  6183. // view src0 and dst with these strides and data offset inbytes during acc
  6184. // nb0 is implicitly element_size because src0 and dst are contiguous
  6185. size_t nb1 = ((int32_t *) dst->op_params)[0];
  6186. size_t nb2 = ((int32_t *) dst->op_params)[1];
  6187. size_t nb3 = ((int32_t *) dst->op_params)[2];
  6188. size_t offset = ((int32_t *) dst->op_params)[3];
  6189. bool inplace = (bool) ((int32_t *) dst->op_params)[4];
  6190. if (!inplace && (params->type == GGML_TASK_INIT)) {
  6191. // memcpy needs to be synchronized across threads to avoid race conditions.
  6192. // => do it in INIT phase
  6193. memcpy(
  6194. ((char *) dst->data),
  6195. ((char *) src0->data),
  6196. ggml_nbytes(dst));
  6197. }
  6198. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  6199. return;
  6200. }
  6201. const int ith = params->ith;
  6202. const int nth = params->nth;
  6203. const int nr = ggml_nrows(src1);
  6204. const int nc = src1->ne[0];
  6205. GGML_TENSOR_LOCALS(int64_t, ne1, src1, ne)
  6206. GGML_TENSOR_LOCALS(size_t, nb1, src1, nb)
  6207. // src0 and dst as viewed during acc
  6208. const size_t nb0 = ggml_element_size(src0);
  6209. const size_t nb00 = nb0;
  6210. const size_t nb01 = nb1;
  6211. const size_t nb02 = nb2;
  6212. const size_t nb03 = nb3;
  6213. GGML_ASSERT(offset + (ne10 == 0 ? 0 : ne10-1)*nb0 + (ne11 == 0 ? 0 : ne11-1)*nb1 + (ne12 == 0 ? 0 : ne12-1)*nb2 + (ne13 == 0 ? 0 : ne13-1)*nb3 < ggml_nbytes(dst));
  6214. GGML_ASSERT(offset + (ne10 == 0 ? 0 : ne10-1)*nb00 + (ne11 == 0 ? 0 : ne11-1)*nb01 + (ne12 == 0 ? 0 : ne12-1)*nb02 + (ne13 == 0 ? 0 : ne13-1)*nb03 < ggml_nbytes(src0));
  6215. GGML_ASSERT(nb10 == sizeof(float));
  6216. // rows per thread
  6217. const int dr = (nr + nth - 1)/nth;
  6218. // row range for this thread
  6219. const int ir0 = dr*ith;
  6220. const int ir1 = MIN(ir0 + dr, nr);
  6221. for (int ir = ir0; ir < ir1; ++ir) {
  6222. // src0 and dst are viewed with shape of src1 and offset
  6223. // => same indices
  6224. const int i3 = ir/(ne12*ne11);
  6225. const int i2 = (ir - i3*ne12*ne11)/ne11;
  6226. const int i1 = (ir - i3*ne12*ne11 - i2*ne11);
  6227. #ifdef GGML_USE_ACCELERATE
  6228. vDSP_vadd(
  6229. (float *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + offset), 1,
  6230. (float *) ((char *) src1->data + i3*nb13 + i2*nb12 + i1*nb11), 1,
  6231. (float *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + offset), 1, nc);
  6232. #else
  6233. ggml_vec_add_f32(nc,
  6234. (float *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + offset),
  6235. (float *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + offset),
  6236. (float *) ((char *) src1->data + i3*nb13 + i2*nb12 + i1*nb11));
  6237. #endif
  6238. }
  6239. }
  6240. static void ggml_compute_forward_acc(
  6241. const struct ggml_compute_params * params,
  6242. const struct ggml_tensor * src0,
  6243. const struct ggml_tensor * src1,
  6244. struct ggml_tensor * dst) {
  6245. switch (src0->type) {
  6246. case GGML_TYPE_F32:
  6247. {
  6248. ggml_compute_forward_acc_f32(params, src0, src1, dst);
  6249. } break;
  6250. case GGML_TYPE_F16:
  6251. case GGML_TYPE_Q4_0:
  6252. case GGML_TYPE_Q4_1:
  6253. case GGML_TYPE_Q5_0:
  6254. case GGML_TYPE_Q5_1:
  6255. case GGML_TYPE_Q8_0:
  6256. case GGML_TYPE_Q8_1:
  6257. case GGML_TYPE_Q2_K:
  6258. case GGML_TYPE_Q3_K:
  6259. case GGML_TYPE_Q4_K:
  6260. case GGML_TYPE_Q5_K:
  6261. case GGML_TYPE_Q6_K:
  6262. default:
  6263. {
  6264. GGML_ASSERT(false);
  6265. } break;
  6266. }
  6267. }
  6268. // ggml_compute_forward_sub
  6269. static void ggml_compute_forward_sub_f32(
  6270. const struct ggml_compute_params * params,
  6271. const struct ggml_tensor * src0,
  6272. const struct ggml_tensor * src1,
  6273. struct ggml_tensor * dst) {
  6274. assert(params->ith == 0);
  6275. assert(ggml_are_same_shape(src0, src1) && ggml_are_same_shape(src0, dst));
  6276. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  6277. return;
  6278. }
  6279. const int nr = ggml_nrows(src0);
  6280. GGML_TENSOR_BINARY_OP_LOCALS
  6281. GGML_ASSERT( nb0 == sizeof(float));
  6282. GGML_ASSERT(nb00 == sizeof(float));
  6283. if (nb10 == sizeof(float)) {
  6284. for (int ir = 0; ir < nr; ++ir) {
  6285. // src0, src1 and dst are same shape => same indices
  6286. const int i3 = ir/(ne2*ne1);
  6287. const int i2 = (ir - i3*ne2*ne1)/ne1;
  6288. const int i1 = (ir - i3*ne2*ne1 - i2*ne1);
  6289. #ifdef GGML_USE_ACCELERATE
  6290. vDSP_vsub(
  6291. (float *) ((char *) src1->data + i3*nb13 + i2*nb12 + i1*nb11), 1,
  6292. (float *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01), 1,
  6293. (float *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 ), 1,
  6294. ne0);
  6295. #else
  6296. ggml_vec_sub_f32(ne0,
  6297. (float *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 ),
  6298. (float *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01),
  6299. (float *) ((char *) src1->data + i3*nb13 + i2*nb12 + i1*nb11));
  6300. #endif
  6301. // }
  6302. // }
  6303. }
  6304. } else {
  6305. // src1 is not contiguous
  6306. for (int ir = 0; ir < nr; ++ir) {
  6307. // src0, src1 and dst are same shape => same indices
  6308. const int i3 = ir/(ne2*ne1);
  6309. const int i2 = (ir - i3*ne2*ne1)/ne1;
  6310. const int i1 = (ir - i3*ne2*ne1 - i2*ne1);
  6311. float * dst_ptr = (float *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 );
  6312. float * src0_ptr = (float *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01);
  6313. for (int i0 = 0; i0 < ne0; i0++) {
  6314. float * src1_ptr = (float *) ((char *) src1->data + i3*nb13 + i2*nb12 + i1*nb11 + i0*nb10);
  6315. dst_ptr[i0] = src0_ptr[i0] - *src1_ptr;
  6316. }
  6317. }
  6318. }
  6319. }
  6320. static void ggml_compute_forward_sub(
  6321. const struct ggml_compute_params * params,
  6322. const struct ggml_tensor * src0,
  6323. const struct ggml_tensor * src1,
  6324. struct ggml_tensor * dst) {
  6325. switch (src0->type) {
  6326. case GGML_TYPE_F32:
  6327. {
  6328. ggml_compute_forward_sub_f32(params, src0, src1, dst);
  6329. } break;
  6330. default:
  6331. {
  6332. GGML_ASSERT(false);
  6333. } break;
  6334. }
  6335. }
  6336. // ggml_compute_forward_mul
  6337. static void ggml_compute_forward_mul_f32(
  6338. const struct ggml_compute_params * params,
  6339. const struct ggml_tensor * src0,
  6340. const struct ggml_tensor * src1,
  6341. struct ggml_tensor * dst) {
  6342. GGML_ASSERT(ggml_can_repeat(src1, src0) && ggml_are_same_shape(src0, dst));
  6343. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  6344. return;
  6345. }
  6346. const int ith = params->ith;
  6347. const int nth = params->nth;
  6348. #ifdef GGML_USE_CLBLAST
  6349. if (src1->backend == GGML_BACKEND_GPU) {
  6350. // TODO: OpenCL kernel support full broadcast
  6351. GGML_ASSERT(ggml_can_repeat_rows(src1, src0));
  6352. if (ith == 0) {
  6353. ggml_cl_mul(src0, src1, dst);
  6354. }
  6355. return;
  6356. }
  6357. #endif
  6358. const int64_t nr = ggml_nrows(src0);
  6359. GGML_TENSOR_BINARY_OP_LOCALS
  6360. GGML_ASSERT( nb0 == sizeof(float));
  6361. GGML_ASSERT(nb00 == sizeof(float));
  6362. if (nb10 == sizeof(float)) {
  6363. for (int64_t ir = ith; ir < nr; ir += nth) {
  6364. // src0 and dst are same shape => same indices
  6365. const int64_t i03 = ir/(ne02*ne01);
  6366. const int64_t i02 = (ir - i03*ne02*ne01)/ne01;
  6367. const int64_t i01 = (ir - i03*ne02*ne01 - i02*ne01);
  6368. const int64_t i13 = i03 % ne13;
  6369. const int64_t i12 = i02 % ne12;
  6370. const int64_t i11 = i01 % ne11;
  6371. const int64_t nr0 = ne00 / ne10;
  6372. float * dst_ptr = (float *) ((char *) dst->data + i03*nb3 + i02*nb2 + i01*nb1 );
  6373. float * src0_ptr = (float *) ((char *) src0->data + i03*nb03 + i02*nb02 + i01*nb01);
  6374. float * src1_ptr = (float *) ((char *) src1->data + i13*nb13 + i12*nb12 + i11*nb11);
  6375. for (int64_t r = 0 ; r < nr0; ++r) {
  6376. #ifdef GGML_USE_ACCELERATE
  6377. UNUSED(ggml_vec_mul_f32);
  6378. vDSP_vmul(src0_ptr + r*ne10, 1, src1_ptr, 1, dst_ptr + r*ne10, 1, ne10);
  6379. #else
  6380. ggml_vec_mul_f32(ne10, dst_ptr + r*ne10, src0_ptr + r*ne10, src1_ptr);
  6381. #endif
  6382. }
  6383. }
  6384. } else {
  6385. // src1 is not contiguous
  6386. for (int64_t ir = ith; ir < nr; ir += nth) {
  6387. // src0 and dst are same shape => same indices
  6388. // src1 is broadcastable across src0 and dst in i1, i2, i3
  6389. const int64_t i03 = ir/(ne02*ne01);
  6390. const int64_t i02 = (ir - i03*ne02*ne01)/ne01;
  6391. const int64_t i01 = (ir - i03*ne02*ne01 - i02*ne01);
  6392. const int64_t i13 = i03 % ne13;
  6393. const int64_t i12 = i02 % ne12;
  6394. const int64_t i11 = i01 % ne11;
  6395. float * dst_ptr = (float *) ((char *) dst->data + i03*nb3 + i02*nb2 + i01*nb1 );
  6396. float * src0_ptr = (float *) ((char *) src0->data + i03*nb03 + i02*nb02 + i01*nb01);
  6397. for (int64_t i0 = 0; i0 < ne00; ++i0) {
  6398. const int64_t i10 = i0 % ne10;
  6399. float * src1_ptr = (float *) ((char *) src1->data + i13*nb13 + i12*nb12 + i11*nb11 + i10*nb10);
  6400. dst_ptr[i0] = src0_ptr[i0] * (*src1_ptr);
  6401. }
  6402. }
  6403. }
  6404. }
  6405. static void ggml_compute_forward_mul(
  6406. const struct ggml_compute_params * params,
  6407. const struct ggml_tensor * src0,
  6408. const struct ggml_tensor * src1,
  6409. struct ggml_tensor * dst) {
  6410. GGML_ASSERT(src1->type == GGML_TYPE_F32 && "only f32 src1 supported for now");
  6411. switch (src0->type) {
  6412. case GGML_TYPE_F32:
  6413. {
  6414. ggml_compute_forward_mul_f32(params, src0, src1, dst);
  6415. } break;
  6416. default:
  6417. {
  6418. GGML_ASSERT(false);
  6419. } break;
  6420. }
  6421. }
  6422. // ggml_compute_forward_div
  6423. static void ggml_compute_forward_div_f32(
  6424. const struct ggml_compute_params * params,
  6425. const struct ggml_tensor * src0,
  6426. const struct ggml_tensor * src1,
  6427. struct ggml_tensor * dst) {
  6428. GGML_ASSERT(ggml_can_repeat(src1, src0) && ggml_are_same_shape(src0, dst));
  6429. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  6430. return;
  6431. }
  6432. const int ith = params->ith;
  6433. const int nth = params->nth;
  6434. const int64_t nr = ggml_nrows(src0);
  6435. GGML_TENSOR_BINARY_OP_LOCALS
  6436. GGML_ASSERT( nb0 == sizeof(float));
  6437. GGML_ASSERT(nb00 == sizeof(float));
  6438. if (nb10 == sizeof(float)) {
  6439. for (int64_t ir = ith; ir < nr; ir += nth) {
  6440. // src0 and dst are same shape => same indices
  6441. const int64_t i03 = ir/(ne02*ne01);
  6442. const int64_t i02 = (ir - i03*ne02*ne01)/ne01;
  6443. const int64_t i01 = (ir - i03*ne02*ne01 - i02*ne01);
  6444. const int64_t i13 = i03 % ne13;
  6445. const int64_t i12 = i02 % ne12;
  6446. const int64_t i11 = i01 % ne11;
  6447. const int64_t nr0 = ne00 / ne10;
  6448. float * dst_ptr = (float *) ((char *) dst->data + i03*nb3 + i02*nb2 + i01*nb1 );
  6449. float * src0_ptr = (float *) ((char *) src0->data + i03*nb03 + i02*nb02 + i01*nb01);
  6450. float * src1_ptr = (float *) ((char *) src1->data + i13*nb13 + i12*nb12 + i11*nb11);
  6451. for (int64_t r = 0; r < nr0; ++r) {
  6452. #ifdef GGML_USE_ACCELERATE
  6453. UNUSED(ggml_vec_div_f32);
  6454. vDSP_vdiv(src1_ptr, 1, src0_ptr + r*ne10, 1, dst_ptr + r*ne10, 1, ne10);
  6455. #else
  6456. ggml_vec_div_f32(ne10, dst_ptr + r*ne10, src0_ptr + r*ne10, src1_ptr);
  6457. #endif
  6458. }
  6459. }
  6460. } else {
  6461. // src1 is not contiguous
  6462. for (int64_t ir = ith; ir < nr; ir += nth) {
  6463. // src0 and dst are same shape => same indices
  6464. // src1 is broadcastable across src0 and dst in i1, i2, i3
  6465. const int64_t i03 = ir/(ne02*ne01);
  6466. const int64_t i02 = (ir - i03*ne02*ne01)/ne01;
  6467. const int64_t i01 = (ir - i03*ne02*ne01 - i02*ne01);
  6468. const int64_t i13 = i03 % ne13;
  6469. const int64_t i12 = i02 % ne12;
  6470. const int64_t i11 = i01 % ne11;
  6471. float * dst_ptr = (float *) ((char *) dst->data + i03*nb3 + i02*nb2 + i01*nb1 );
  6472. float * src0_ptr = (float *) ((char *) src0->data + i03*nb03 + i02*nb02 + i01*nb01);
  6473. for (int64_t i0 = 0; i0 < ne00; ++i0) {
  6474. const int64_t i10 = i0 % ne10;
  6475. float * src1_ptr = (float *) ((char *) src1->data + i13*nb13 + i12*nb12 + i11*nb11 + i10*nb10);
  6476. dst_ptr[i0] = src0_ptr[i0] / (*src1_ptr);
  6477. }
  6478. }
  6479. }
  6480. }
  6481. static void ggml_compute_forward_div(
  6482. const struct ggml_compute_params * params,
  6483. const struct ggml_tensor * src0,
  6484. const struct ggml_tensor * src1,
  6485. struct ggml_tensor * dst) {
  6486. switch (src0->type) {
  6487. case GGML_TYPE_F32:
  6488. {
  6489. ggml_compute_forward_div_f32(params, src0, src1, dst);
  6490. } break;
  6491. default:
  6492. {
  6493. GGML_ASSERT(false);
  6494. } break;
  6495. }
  6496. }
  6497. // ggml_compute_forward_sqr
  6498. static void ggml_compute_forward_sqr_f32(
  6499. const struct ggml_compute_params * params,
  6500. const struct ggml_tensor * src0,
  6501. struct ggml_tensor * dst) {
  6502. assert(params->ith == 0);
  6503. assert(ggml_are_same_shape(src0, dst));
  6504. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  6505. return;
  6506. }
  6507. const int n = ggml_nrows(src0);
  6508. const int nc = src0->ne[0];
  6509. assert( dst->nb[0] == sizeof(float));
  6510. assert(src0->nb[0] == sizeof(float));
  6511. for (int i = 0; i < n; i++) {
  6512. ggml_vec_sqr_f32(nc,
  6513. (float *) ((char *) dst->data + i*( dst->nb[1])),
  6514. (float *) ((char *) src0->data + i*(src0->nb[1])));
  6515. }
  6516. }
  6517. static void ggml_compute_forward_sqr(
  6518. const struct ggml_compute_params * params,
  6519. const struct ggml_tensor * src0,
  6520. struct ggml_tensor * dst) {
  6521. switch (src0->type) {
  6522. case GGML_TYPE_F32:
  6523. {
  6524. ggml_compute_forward_sqr_f32(params, src0, dst);
  6525. } break;
  6526. default:
  6527. {
  6528. GGML_ASSERT(false);
  6529. } break;
  6530. }
  6531. }
  6532. // ggml_compute_forward_sqrt
  6533. static void ggml_compute_forward_sqrt_f32(
  6534. const struct ggml_compute_params * params,
  6535. const struct ggml_tensor * src0,
  6536. struct ggml_tensor * dst) {
  6537. assert(params->ith == 0);
  6538. assert(ggml_are_same_shape(src0, dst));
  6539. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  6540. return;
  6541. }
  6542. const int n = ggml_nrows(src0);
  6543. const int nc = src0->ne[0];
  6544. assert( dst->nb[0] == sizeof(float));
  6545. assert(src0->nb[0] == sizeof(float));
  6546. for (int i = 0; i < n; i++) {
  6547. ggml_vec_sqrt_f32(nc,
  6548. (float *) ((char *) dst->data + i*( dst->nb[1])),
  6549. (float *) ((char *) src0->data + i*(src0->nb[1])));
  6550. }
  6551. }
  6552. static void ggml_compute_forward_sqrt(
  6553. const struct ggml_compute_params * params,
  6554. const struct ggml_tensor * src0,
  6555. struct ggml_tensor * dst) {
  6556. switch (src0->type) {
  6557. case GGML_TYPE_F32:
  6558. {
  6559. ggml_compute_forward_sqrt_f32(params, src0, dst);
  6560. } break;
  6561. default:
  6562. {
  6563. GGML_ASSERT(false);
  6564. } break;
  6565. }
  6566. }
  6567. // ggml_compute_forward_log
  6568. static void ggml_compute_forward_log_f32(
  6569. const struct ggml_compute_params * params,
  6570. const struct ggml_tensor * src0,
  6571. struct ggml_tensor * dst) {
  6572. GGML_ASSERT(params->ith == 0);
  6573. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  6574. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  6575. return;
  6576. }
  6577. const int n = ggml_nrows(src0);
  6578. const int nc = src0->ne[0];
  6579. GGML_ASSERT( dst->nb[0] == sizeof(float));
  6580. GGML_ASSERT(src0->nb[0] == sizeof(float));
  6581. for (int i = 0; i < n; i++) {
  6582. ggml_vec_log_f32(nc,
  6583. (float *) ((char *) dst->data + i*( dst->nb[1])),
  6584. (float *) ((char *) src0->data + i*(src0->nb[1])));
  6585. }
  6586. }
  6587. static void ggml_compute_forward_log(
  6588. const struct ggml_compute_params * params,
  6589. const struct ggml_tensor * src0,
  6590. struct ggml_tensor * dst) {
  6591. switch (src0->type) {
  6592. case GGML_TYPE_F32:
  6593. {
  6594. ggml_compute_forward_log_f32(params, src0, dst);
  6595. } break;
  6596. default:
  6597. {
  6598. GGML_ASSERT(false);
  6599. } break;
  6600. }
  6601. }
  6602. // ggml_compute_forward_sum
  6603. static void ggml_compute_forward_sum_f32(
  6604. const struct ggml_compute_params * params,
  6605. const struct ggml_tensor * src0,
  6606. struct ggml_tensor * dst) {
  6607. assert(params->ith == 0);
  6608. assert(ggml_is_scalar(dst));
  6609. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  6610. return;
  6611. }
  6612. assert(ggml_is_scalar(dst));
  6613. assert(src0->nb[0] == sizeof(float));
  6614. GGML_TENSOR_LOCALS(int64_t, ne0, src0, ne)
  6615. GGML_TENSOR_LOCALS(size_t, nb0, src0, nb)
  6616. ggml_float sum = 0;
  6617. ggml_float row_sum = 0;
  6618. for (int64_t i03 = 0; i03 < ne03; i03++) {
  6619. for (int64_t i02 = 0; i02 < ne02; i02++) {
  6620. for (int64_t i01 = 0; i01 < ne01; i01++) {
  6621. ggml_vec_sum_f32_ggf(ne00,
  6622. &row_sum,
  6623. (float *) ((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03));
  6624. sum += row_sum;
  6625. }
  6626. }
  6627. }
  6628. ((float *) dst->data)[0] = sum;
  6629. }
  6630. static void ggml_compute_forward_sum_f16(
  6631. const struct ggml_compute_params * params,
  6632. const struct ggml_tensor * src0,
  6633. struct ggml_tensor * dst) {
  6634. assert(params->ith == 0);
  6635. assert(ggml_is_scalar(dst));
  6636. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  6637. return;
  6638. }
  6639. assert(src0->nb[0] == sizeof(ggml_fp16_t));
  6640. GGML_TENSOR_LOCALS(int64_t, ne0, src0, ne)
  6641. GGML_TENSOR_LOCALS(size_t, nb0, src0, nb)
  6642. float sum = 0;
  6643. float row_sum = 0;
  6644. for (int64_t i03 = 0; i03 < ne03; i03++) {
  6645. for (int64_t i02 = 0; i02 < ne02; i02++) {
  6646. for (int64_t i01 = 0; i01 < ne01; i01++) {
  6647. ggml_vec_sum_f16_ggf(ne00,
  6648. &row_sum,
  6649. (ggml_fp16_t *) ((char *) src0->data + i01 * nb01 + i02 * nb02 + i03 * nb03));
  6650. sum += row_sum;
  6651. }
  6652. }
  6653. }
  6654. ((ggml_fp16_t *) dst->data)[0] = GGML_FP32_TO_FP16(sum);
  6655. }
  6656. static void ggml_compute_forward_sum(
  6657. const struct ggml_compute_params * params,
  6658. const struct ggml_tensor * src0,
  6659. struct ggml_tensor * dst) {
  6660. switch (src0->type) {
  6661. case GGML_TYPE_F32:
  6662. {
  6663. ggml_compute_forward_sum_f32(params, src0, dst);
  6664. } break;
  6665. case GGML_TYPE_F16:
  6666. {
  6667. ggml_compute_forward_sum_f16(params, src0, dst);
  6668. } break;
  6669. default:
  6670. {
  6671. GGML_ASSERT(false);
  6672. } break;
  6673. }
  6674. }
  6675. // ggml_compute_forward_sum_rows
  6676. static void ggml_compute_forward_sum_rows_f32(
  6677. const struct ggml_compute_params * params,
  6678. const struct ggml_tensor * src0,
  6679. struct ggml_tensor * dst) {
  6680. GGML_ASSERT(params->ith == 0);
  6681. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  6682. return;
  6683. }
  6684. GGML_ASSERT(src0->nb[0] == sizeof(float));
  6685. GGML_ASSERT(dst->nb[0] == sizeof(float));
  6686. GGML_TENSOR_UNARY_OP_LOCALS
  6687. GGML_ASSERT(ne0 == 1);
  6688. GGML_ASSERT(ne1 == ne01);
  6689. GGML_ASSERT(ne2 == ne02);
  6690. GGML_ASSERT(ne3 == ne03);
  6691. for (int64_t i3 = 0; i3 < ne03; i3++) {
  6692. for (int64_t i2 = 0; i2 < ne02; i2++) {
  6693. for (int64_t i1 = 0; i1 < ne01; i1++) {
  6694. float * src_row = (float *) ((char *) src0->data + i1*nb01 + i2*nb02 + i3*nb03);
  6695. float * dst_row = (float *) ((char *) dst->data + i1*nb1 + i2*nb2 + i3*nb3);
  6696. float row_sum = 0;
  6697. ggml_vec_sum_f32(ne00, &row_sum, src_row);
  6698. dst_row[0] = row_sum;
  6699. }
  6700. }
  6701. }
  6702. }
  6703. static void ggml_compute_forward_sum_rows(
  6704. const struct ggml_compute_params * params,
  6705. const struct ggml_tensor * src0,
  6706. struct ggml_tensor * dst) {
  6707. switch (src0->type) {
  6708. case GGML_TYPE_F32:
  6709. {
  6710. ggml_compute_forward_sum_rows_f32(params, src0, dst);
  6711. } break;
  6712. default:
  6713. {
  6714. GGML_ASSERT(false);
  6715. } break;
  6716. }
  6717. }
  6718. // ggml_compute_forward_mean
  6719. static void ggml_compute_forward_mean_f32(
  6720. const struct ggml_compute_params * params,
  6721. const struct ggml_tensor * src0,
  6722. struct ggml_tensor * dst) {
  6723. assert(params->ith == 0);
  6724. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  6725. return;
  6726. }
  6727. assert(src0->nb[0] == sizeof(float));
  6728. GGML_TENSOR_UNARY_OP_LOCALS
  6729. assert(ne0 == 1);
  6730. assert(ne1 == ne01);
  6731. assert(ne2 == ne02);
  6732. assert(ne3 == ne03);
  6733. UNUSED(ne0);
  6734. UNUSED(ne1);
  6735. UNUSED(ne2);
  6736. UNUSED(ne3);
  6737. for (int64_t i03 = 0; i03 < ne03; i03++) {
  6738. for (int64_t i02 = 0; i02 < ne02; i02++) {
  6739. for (int64_t i01 = 0; i01 < ne01; i01++) {
  6740. ggml_vec_sum_f32(ne00,
  6741. (float *) ((char *) dst->data + i01*nb1 + i02*nb2 + i03*nb3),
  6742. (float *) ((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03));
  6743. *(float *) ((char *) dst->data + i01*nb1 + i02*nb2 + i03*nb3) /= (float) ne00;
  6744. }
  6745. }
  6746. }
  6747. }
  6748. static void ggml_compute_forward_mean(
  6749. const struct ggml_compute_params * params,
  6750. const struct ggml_tensor * src0,
  6751. struct ggml_tensor * dst) {
  6752. switch (src0->type) {
  6753. case GGML_TYPE_F32:
  6754. {
  6755. ggml_compute_forward_mean_f32(params, src0, dst);
  6756. } break;
  6757. default:
  6758. {
  6759. GGML_ASSERT(false);
  6760. } break;
  6761. }
  6762. }
  6763. // ggml_compute_forward_argmax
  6764. static void ggml_compute_forward_argmax_f32(
  6765. const struct ggml_compute_params * params,
  6766. const struct ggml_tensor * src0,
  6767. struct ggml_tensor * dst) {
  6768. assert(params->ith == 0);
  6769. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  6770. return;
  6771. }
  6772. assert(src0->nb[0] == sizeof(float));
  6773. assert(dst->nb[0] == sizeof(float));
  6774. const int64_t ne00 = src0->ne[0];
  6775. const int64_t ne01 = src0->ne[1];
  6776. const size_t nb01 = src0->nb[1];
  6777. const size_t nb0 = dst->nb[0];
  6778. for (int64_t i1 = 0; i1 < ne01; i1++) {
  6779. float * src = (float *) ((char *) src0->data + i1*nb01);
  6780. int32_t * dst_ = (int32_t *) ((char *) dst->data + i1*nb0);
  6781. int v = 0;
  6782. ggml_vec_argmax_f32(ne00, &v, src);
  6783. dst_[0] = v;
  6784. }
  6785. }
  6786. static void ggml_compute_forward_argmax(
  6787. const struct ggml_compute_params * params,
  6788. const struct ggml_tensor * src0,
  6789. struct ggml_tensor * dst) {
  6790. switch (src0->type) {
  6791. case GGML_TYPE_F32:
  6792. {
  6793. ggml_compute_forward_argmax_f32(params, src0, dst);
  6794. } break;
  6795. default:
  6796. {
  6797. GGML_ASSERT(false);
  6798. } break;
  6799. }
  6800. }
  6801. // ggml_compute_forward_repeat
  6802. static void ggml_compute_forward_repeat_f32(
  6803. const struct ggml_compute_params * params,
  6804. const struct ggml_tensor * src0,
  6805. struct ggml_tensor * dst) {
  6806. GGML_ASSERT(params->ith == 0);
  6807. GGML_ASSERT(ggml_can_repeat(src0, dst));
  6808. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  6809. return;
  6810. }
  6811. GGML_TENSOR_UNARY_OP_LOCALS
  6812. // guaranteed to be an integer due to the check in ggml_can_repeat
  6813. const int nr0 = (int)(ne0/ne00);
  6814. const int nr1 = (int)(ne1/ne01);
  6815. const int nr2 = (int)(ne2/ne02);
  6816. const int nr3 = (int)(ne3/ne03);
  6817. // TODO: support for transposed / permuted tensors
  6818. GGML_ASSERT(nb0 == sizeof(float));
  6819. GGML_ASSERT(nb00 == sizeof(float));
  6820. // TODO: maybe this is not optimal?
  6821. for (int i3 = 0; i3 < nr3; i3++) {
  6822. for (int k3 = 0; k3 < ne03; k3++) {
  6823. for (int i2 = 0; i2 < nr2; i2++) {
  6824. for (int k2 = 0; k2 < ne02; k2++) {
  6825. for (int i1 = 0; i1 < nr1; i1++) {
  6826. for (int k1 = 0; k1 < ne01; k1++) {
  6827. for (int i0 = 0; i0 < nr0; i0++) {
  6828. ggml_vec_cpy_f32(ne00,
  6829. (float *) ((char *) dst->data + (i3*ne03 + k3)*nb3 + (i2*ne02 + k2)*nb2 + (i1*ne01 + k1)*nb1 + (i0*ne00)*nb0),
  6830. (float *) ((char *) src0->data + ( k3)*nb03 + ( k2)*nb02 + ( k1)*nb01));
  6831. }
  6832. }
  6833. }
  6834. }
  6835. }
  6836. }
  6837. }
  6838. }
  6839. static void ggml_compute_forward_repeat_f16(
  6840. const struct ggml_compute_params * params,
  6841. const struct ggml_tensor * src0,
  6842. struct ggml_tensor * dst) {
  6843. GGML_ASSERT(params->ith == 0);
  6844. GGML_ASSERT(ggml_can_repeat(src0, dst));
  6845. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  6846. return;
  6847. }
  6848. GGML_TENSOR_UNARY_OP_LOCALS
  6849. // guaranteed to be an integer due to the check in ggml_can_repeat
  6850. const int nr0 = (int)(ne0/ne00);
  6851. const int nr1 = (int)(ne1/ne01);
  6852. const int nr2 = (int)(ne2/ne02);
  6853. const int nr3 = (int)(ne3/ne03);
  6854. // TODO: support for transposed / permuted tensors
  6855. GGML_ASSERT(nb0 == sizeof(ggml_fp16_t));
  6856. GGML_ASSERT(nb00 == sizeof(ggml_fp16_t));
  6857. // TODO: maybe this is not optimal?
  6858. for (int i3 = 0; i3 < nr3; i3++) {
  6859. for (int k3 = 0; k3 < ne03; k3++) {
  6860. for (int i2 = 0; i2 < nr2; i2++) {
  6861. for (int k2 = 0; k2 < ne02; k2++) {
  6862. for (int i1 = 0; i1 < nr1; i1++) {
  6863. for (int k1 = 0; k1 < ne01; k1++) {
  6864. for (int i0 = 0; i0 < nr0; i0++) {
  6865. ggml_fp16_t * y = (ggml_fp16_t *) ((char *) dst->data + (i3*ne03 + k3)*nb3 + (i2*ne02 + k2)*nb2 + (i1*ne01 + k1)*nb1 + (i0*ne00)*nb0);
  6866. ggml_fp16_t * x = (ggml_fp16_t *) ((char *) src0->data + ( k3)*nb03 + ( k2)*nb02 + ( k1)*nb01);
  6867. // ggml_vec_cpy_f16(ne00, y, x)
  6868. for (int i = 0; i < ne00; ++i) {
  6869. y[i] = x[i];
  6870. }
  6871. }
  6872. }
  6873. }
  6874. }
  6875. }
  6876. }
  6877. }
  6878. }
  6879. static void ggml_compute_forward_repeat(
  6880. const struct ggml_compute_params * params,
  6881. const struct ggml_tensor * src0,
  6882. struct ggml_tensor * dst) {
  6883. switch (src0->type) {
  6884. case GGML_TYPE_F16:
  6885. {
  6886. ggml_compute_forward_repeat_f16(params, src0, dst);
  6887. } break;
  6888. case GGML_TYPE_F32:
  6889. {
  6890. ggml_compute_forward_repeat_f32(params, src0, dst);
  6891. } break;
  6892. default:
  6893. {
  6894. GGML_ASSERT(false);
  6895. } break;
  6896. }
  6897. }
  6898. // ggml_compute_forward_repeat_back
  6899. static void ggml_compute_forward_repeat_back_f32(
  6900. const struct ggml_compute_params * params,
  6901. const struct ggml_tensor * src0,
  6902. struct ggml_tensor * dst) {
  6903. GGML_ASSERT(params->ith == 0);
  6904. GGML_ASSERT(ggml_can_repeat(dst, src0));
  6905. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  6906. return;
  6907. }
  6908. GGML_TENSOR_UNARY_OP_LOCALS
  6909. // guaranteed to be an integer due to the check in ggml_can_repeat
  6910. const int nr0 = (int)(ne00/ne0);
  6911. const int nr1 = (int)(ne01/ne1);
  6912. const int nr2 = (int)(ne02/ne2);
  6913. const int nr3 = (int)(ne03/ne3);
  6914. // TODO: support for transposed / permuted tensors
  6915. GGML_ASSERT(nb0 == sizeof(float));
  6916. GGML_ASSERT(nb00 == sizeof(float));
  6917. if (ggml_is_contiguous(dst)) {
  6918. ggml_vec_set_f32(ne0*ne1*ne2*ne3, dst->data, 0);
  6919. } else {
  6920. for (int k3 = 0; k3 < ne3; k3++) {
  6921. for (int k2 = 0; k2 < ne2; k2++) {
  6922. for (int k1 = 0; k1 < ne1; k1++) {
  6923. ggml_vec_set_f32(ne0,
  6924. (float *) ((char *) dst->data + k1*nb1 + k2*nb2 + k3*nb3),
  6925. 0);
  6926. }
  6927. }
  6928. }
  6929. }
  6930. // TODO: maybe this is not optimal?
  6931. for (int i3 = 0; i3 < nr3; i3++) {
  6932. for (int k3 = 0; k3 < ne3; k3++) {
  6933. for (int i2 = 0; i2 < nr2; i2++) {
  6934. for (int k2 = 0; k2 < ne2; k2++) {
  6935. for (int i1 = 0; i1 < nr1; i1++) {
  6936. for (int k1 = 0; k1 < ne1; k1++) {
  6937. for (int i0 = 0; i0 < nr0; i0++) {
  6938. ggml_vec_acc_f32(ne0,
  6939. (float *) ((char *) dst->data + ( k3)*nb3 + ( k2)*nb2 + ( k1)*nb1),
  6940. (float *) ((char *) src0->data + (i3*ne3 + k3)*nb03 + (i2*ne2 + k2)*nb02 + (i1*ne1 + k1)*nb01 + (i0*ne0)*nb00));
  6941. }
  6942. }
  6943. }
  6944. }
  6945. }
  6946. }
  6947. }
  6948. }
  6949. static void ggml_compute_forward_repeat_back(
  6950. const struct ggml_compute_params * params,
  6951. const struct ggml_tensor * src0,
  6952. struct ggml_tensor * dst) {
  6953. switch (src0->type) {
  6954. case GGML_TYPE_F32:
  6955. {
  6956. ggml_compute_forward_repeat_back_f32(params, src0, dst);
  6957. } break;
  6958. default:
  6959. {
  6960. GGML_ASSERT(false);
  6961. } break;
  6962. }
  6963. }
  6964. // ggml_compute_forward_concat
  6965. static void ggml_compute_forward_concat_f32(
  6966. const struct ggml_compute_params * params,
  6967. const struct ggml_tensor * src0,
  6968. const struct ggml_tensor * src1,
  6969. struct ggml_tensor * dst) {
  6970. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  6971. return;
  6972. }
  6973. GGML_ASSERT(src0->nb[0] == sizeof(float));
  6974. const int ith = params->ith;
  6975. const int nth = params->nth;
  6976. GGML_TENSOR_BINARY_OP_LOCALS
  6977. // TODO: support for transposed / permuted tensors
  6978. GGML_ASSERT(nb0 == sizeof(float));
  6979. GGML_ASSERT(nb00 == sizeof(float));
  6980. GGML_ASSERT(nb10 == sizeof(float));
  6981. for (int i3 = 0; i3 < ne3; i3++) {
  6982. for (int i2 = ith; i2 < ne2; i2 += nth) {
  6983. if (i2 < ne02) { // src0
  6984. for (int i1 = 0; i1 < ne1; i1++) {
  6985. for (int i0 = 0; i0 < ne0; i0++) {
  6986. const float * x = (float *)((char *) src0->data + i0 * nb00 + i1 * nb01 + i2 * nb02 + i3 * nb03);
  6987. float * y = (float *)((char *)dst->data + i0 * nb0 + i1 * nb1 + i2 * nb2 + i3 * nb3);
  6988. *y = *x;
  6989. }
  6990. }
  6991. } // src1
  6992. else {
  6993. for (int i1 = 0; i1 < ne1; i1++) {
  6994. for (int i0 = 0; i0 < ne0; i0++) {
  6995. const float * x = (float *)((char *) src1->data + i0 * nb10 + i1 * nb11 + (i2 - ne02) * nb12 + i3 * nb13);
  6996. float * y = (float *)((char *)dst->data + i0 * nb0 + i1 * nb1 + i2 * nb2 + i3 * nb3);
  6997. *y = *x;
  6998. }
  6999. }
  7000. }
  7001. }
  7002. }
  7003. }
  7004. static void ggml_compute_forward_concat(
  7005. const struct ggml_compute_params* params,
  7006. const struct ggml_tensor* src0,
  7007. const struct ggml_tensor* src1,
  7008. struct ggml_tensor* dst) {
  7009. switch (src0->type) {
  7010. case GGML_TYPE_F32:
  7011. {
  7012. ggml_compute_forward_concat_f32(params, src0, src1, dst);
  7013. } break;
  7014. default:
  7015. {
  7016. GGML_ASSERT(false);
  7017. } break;
  7018. }
  7019. }
  7020. // ggml_compute_forward_abs
  7021. static void ggml_compute_forward_abs_f32(
  7022. const struct ggml_compute_params * params,
  7023. const struct ggml_tensor * src0,
  7024. struct ggml_tensor * dst) {
  7025. assert(params->ith == 0);
  7026. assert(ggml_are_same_shape(src0, dst));
  7027. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  7028. return;
  7029. }
  7030. const int n = ggml_nrows(src0);
  7031. const int nc = src0->ne[0];
  7032. assert(dst->nb[0] == sizeof(float));
  7033. assert(src0->nb[0] == sizeof(float));
  7034. for (int i = 0; i < n; i++) {
  7035. ggml_vec_abs_f32(nc,
  7036. (float *) ((char *) dst->data + i*( dst->nb[1])),
  7037. (float *) ((char *) src0->data + i*(src0->nb[1])));
  7038. }
  7039. }
  7040. static void ggml_compute_forward_abs(
  7041. const struct ggml_compute_params * params,
  7042. const struct ggml_tensor * src0,
  7043. struct ggml_tensor * dst) {
  7044. switch (src0->type) {
  7045. case GGML_TYPE_F32:
  7046. {
  7047. ggml_compute_forward_abs_f32(params, src0, dst);
  7048. } break;
  7049. default:
  7050. {
  7051. GGML_ASSERT(false);
  7052. } break;
  7053. }
  7054. }
  7055. // ggml_compute_forward_sgn
  7056. static void ggml_compute_forward_sgn_f32(
  7057. const struct ggml_compute_params * params,
  7058. const struct ggml_tensor * src0,
  7059. struct ggml_tensor * dst) {
  7060. assert(params->ith == 0);
  7061. assert(ggml_are_same_shape(src0, dst));
  7062. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  7063. return;
  7064. }
  7065. const int n = ggml_nrows(src0);
  7066. const int nc = src0->ne[0];
  7067. assert(dst->nb[0] == sizeof(float));
  7068. assert(src0->nb[0] == sizeof(float));
  7069. for (int i = 0; i < n; i++) {
  7070. ggml_vec_sgn_f32(nc,
  7071. (float *) ((char *) dst->data + i*( dst->nb[1])),
  7072. (float *) ((char *) src0->data + i*(src0->nb[1])));
  7073. }
  7074. }
  7075. static void ggml_compute_forward_sgn(
  7076. const struct ggml_compute_params * params,
  7077. const struct ggml_tensor * src0,
  7078. struct ggml_tensor * dst) {
  7079. switch (src0->type) {
  7080. case GGML_TYPE_F32:
  7081. {
  7082. ggml_compute_forward_sgn_f32(params, src0, dst);
  7083. } break;
  7084. default:
  7085. {
  7086. GGML_ASSERT(false);
  7087. } break;
  7088. }
  7089. }
  7090. // ggml_compute_forward_neg
  7091. static void ggml_compute_forward_neg_f32(
  7092. const struct ggml_compute_params * params,
  7093. const struct ggml_tensor * src0,
  7094. struct ggml_tensor * dst) {
  7095. assert(params->ith == 0);
  7096. assert(ggml_are_same_shape(src0, dst));
  7097. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  7098. return;
  7099. }
  7100. const int n = ggml_nrows(src0);
  7101. const int nc = src0->ne[0];
  7102. assert(dst->nb[0] == sizeof(float));
  7103. assert(src0->nb[0] == sizeof(float));
  7104. for (int i = 0; i < n; i++) {
  7105. ggml_vec_neg_f32(nc,
  7106. (float *) ((char *) dst->data + i*( dst->nb[1])),
  7107. (float *) ((char *) src0->data + i*(src0->nb[1])));
  7108. }
  7109. }
  7110. static void ggml_compute_forward_neg(
  7111. const struct ggml_compute_params * params,
  7112. const struct ggml_tensor * src0,
  7113. struct ggml_tensor * dst) {
  7114. switch (src0->type) {
  7115. case GGML_TYPE_F32:
  7116. {
  7117. ggml_compute_forward_neg_f32(params, src0, dst);
  7118. } break;
  7119. default:
  7120. {
  7121. GGML_ASSERT(false);
  7122. } break;
  7123. }
  7124. }
  7125. // ggml_compute_forward_step
  7126. static void ggml_compute_forward_step_f32(
  7127. const struct ggml_compute_params * params,
  7128. const struct ggml_tensor * src0,
  7129. struct ggml_tensor * dst) {
  7130. assert(params->ith == 0);
  7131. assert(ggml_are_same_shape(src0, dst));
  7132. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  7133. return;
  7134. }
  7135. const int n = ggml_nrows(src0);
  7136. const int nc = src0->ne[0];
  7137. assert(dst->nb[0] == sizeof(float));
  7138. assert(src0->nb[0] == sizeof(float));
  7139. for (int i = 0; i < n; i++) {
  7140. ggml_vec_step_f32(nc,
  7141. (float *) ((char *) dst->data + i*( dst->nb[1])),
  7142. (float *) ((char *) src0->data + i*(src0->nb[1])));
  7143. }
  7144. }
  7145. static void ggml_compute_forward_step(
  7146. const struct ggml_compute_params * params,
  7147. const struct ggml_tensor * src0,
  7148. struct ggml_tensor * dst) {
  7149. switch (src0->type) {
  7150. case GGML_TYPE_F32:
  7151. {
  7152. ggml_compute_forward_step_f32(params, src0, dst);
  7153. } break;
  7154. default:
  7155. {
  7156. GGML_ASSERT(false);
  7157. } break;
  7158. }
  7159. }
  7160. // ggml_compute_forward_tanh
  7161. static void ggml_compute_forward_tanh_f32(
  7162. const struct ggml_compute_params * params,
  7163. const struct ggml_tensor * src0,
  7164. struct ggml_tensor * dst) {
  7165. assert(params->ith == 0);
  7166. assert(ggml_are_same_shape(src0, dst));
  7167. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  7168. return;
  7169. }
  7170. const int n = ggml_nrows(src0);
  7171. const int nc = src0->ne[0];
  7172. assert(dst->nb[0] == sizeof(float));
  7173. assert(src0->nb[0] == sizeof(float));
  7174. for (int i = 0; i < n; i++) {
  7175. ggml_vec_tanh_f32(nc,
  7176. (float *) ((char *) dst->data + i*( dst->nb[1])),
  7177. (float *) ((char *) src0->data + i*(src0->nb[1])));
  7178. }
  7179. }
  7180. static void ggml_compute_forward_tanh(
  7181. const struct ggml_compute_params * params,
  7182. const struct ggml_tensor * src0,
  7183. struct ggml_tensor * dst) {
  7184. switch (src0->type) {
  7185. case GGML_TYPE_F32:
  7186. {
  7187. ggml_compute_forward_tanh_f32(params, src0, dst);
  7188. } break;
  7189. default:
  7190. {
  7191. GGML_ASSERT(false);
  7192. } break;
  7193. }
  7194. }
  7195. // ggml_compute_forward_elu
  7196. static void ggml_compute_forward_elu_f32(
  7197. const struct ggml_compute_params * params,
  7198. const struct ggml_tensor * src0,
  7199. struct ggml_tensor * dst) {
  7200. assert(params->ith == 0);
  7201. assert(ggml_are_same_shape(src0, dst));
  7202. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  7203. return;
  7204. }
  7205. const int n = ggml_nrows(src0);
  7206. const int nc = src0->ne[0];
  7207. assert(dst->nb[0] == sizeof(float));
  7208. assert(src0->nb[0] == sizeof(float));
  7209. for (int i = 0; i < n; i++) {
  7210. ggml_vec_elu_f32(nc,
  7211. (float *) ((char *) dst->data + i*( dst->nb[1])),
  7212. (float *) ((char *) src0->data + i*(src0->nb[1])));
  7213. }
  7214. }
  7215. static void ggml_compute_forward_elu(
  7216. const struct ggml_compute_params * params,
  7217. const struct ggml_tensor * src0,
  7218. struct ggml_tensor * dst) {
  7219. switch (src0->type) {
  7220. case GGML_TYPE_F32:
  7221. {
  7222. ggml_compute_forward_elu_f32(params, src0, dst);
  7223. } break;
  7224. default:
  7225. {
  7226. GGML_ASSERT(false);
  7227. } break;
  7228. }
  7229. }
  7230. // ggml_compute_forward_relu
  7231. static void ggml_compute_forward_relu_f32(
  7232. const struct ggml_compute_params * params,
  7233. const struct ggml_tensor * src0,
  7234. struct ggml_tensor * dst) {
  7235. assert(params->ith == 0);
  7236. assert(ggml_are_same_shape(src0, dst));
  7237. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  7238. return;
  7239. }
  7240. const int n = ggml_nrows(src0);
  7241. const int nc = src0->ne[0];
  7242. assert(dst->nb[0] == sizeof(float));
  7243. assert(src0->nb[0] == sizeof(float));
  7244. for (int i = 0; i < n; i++) {
  7245. ggml_vec_relu_f32(nc,
  7246. (float *) ((char *) dst->data + i*( dst->nb[1])),
  7247. (float *) ((char *) src0->data + i*(src0->nb[1])));
  7248. }
  7249. }
  7250. static void ggml_compute_forward_relu(
  7251. const struct ggml_compute_params * params,
  7252. const struct ggml_tensor * src0,
  7253. struct ggml_tensor * dst) {
  7254. switch (src0->type) {
  7255. case GGML_TYPE_F32:
  7256. {
  7257. ggml_compute_forward_relu_f32(params, src0, dst);
  7258. } break;
  7259. default:
  7260. {
  7261. GGML_ASSERT(false);
  7262. } break;
  7263. }
  7264. }
  7265. // ggml_compute_forward_gelu
  7266. static void ggml_compute_forward_gelu_f32(
  7267. const struct ggml_compute_params * params,
  7268. const struct ggml_tensor * src0,
  7269. struct ggml_tensor * dst) {
  7270. GGML_ASSERT(ggml_is_contiguous_except_dim_1(src0));
  7271. GGML_ASSERT(ggml_is_contiguous_except_dim_1(dst));
  7272. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  7273. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  7274. return;
  7275. }
  7276. const int ith = params->ith;
  7277. const int nth = params->nth;
  7278. const int nc = src0->ne[0];
  7279. const int nr = ggml_nrows(src0);
  7280. // rows per thread
  7281. const int dr = (nr + nth - 1)/nth;
  7282. // row range for this thread
  7283. const int ir0 = dr*ith;
  7284. const int ir1 = MIN(ir0 + dr, nr);
  7285. for (int i1 = ir0; i1 < ir1; i1++) {
  7286. ggml_vec_gelu_f32(nc,
  7287. (float *) ((char *) dst->data + i1*( dst->nb[1])),
  7288. (float *) ((char *) src0->data + i1*(src0->nb[1])));
  7289. #ifndef NDEBUG
  7290. for (int k = 0; k < nc; k++) {
  7291. const float x = ((float *) ((char *) dst->data + i1*( dst->nb[1])))[k];
  7292. UNUSED(x);
  7293. assert(!isnan(x));
  7294. assert(!isinf(x));
  7295. }
  7296. #endif
  7297. }
  7298. }
  7299. static void ggml_compute_forward_gelu(
  7300. const struct ggml_compute_params * params,
  7301. const struct ggml_tensor * src0,
  7302. struct ggml_tensor * dst) {
  7303. switch (src0->type) {
  7304. case GGML_TYPE_F32:
  7305. {
  7306. ggml_compute_forward_gelu_f32(params, src0, dst);
  7307. } break;
  7308. default:
  7309. {
  7310. GGML_ASSERT(false);
  7311. } break;
  7312. }
  7313. }
  7314. // ggml_compute_forward_gelu_quick
  7315. static void ggml_compute_forward_gelu_quick_f32(
  7316. const struct ggml_compute_params * params,
  7317. const struct ggml_tensor * src0,
  7318. struct ggml_tensor * dst) {
  7319. GGML_ASSERT(ggml_is_contiguous_except_dim_1(src0));
  7320. GGML_ASSERT(ggml_is_contiguous_except_dim_1(dst));
  7321. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  7322. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  7323. return;
  7324. }
  7325. const int ith = params->ith;
  7326. const int nth = params->nth;
  7327. const int nc = src0->ne[0];
  7328. const int nr = ggml_nrows(src0);
  7329. // rows per thread
  7330. const int dr = (nr + nth - 1)/nth;
  7331. // row range for this thread
  7332. const int ir0 = dr*ith;
  7333. const int ir1 = MIN(ir0 + dr, nr);
  7334. for (int i1 = ir0; i1 < ir1; i1++) {
  7335. ggml_vec_gelu_quick_f32(nc,
  7336. (float *) ((char *) dst->data + i1*( dst->nb[1])),
  7337. (float *) ((char *) src0->data + i1*(src0->nb[1])));
  7338. #ifndef NDEBUG
  7339. for (int k = 0; k < nc; k++) {
  7340. const float x = ((float *) ((char *) dst->data + i1*( dst->nb[1])))[k];
  7341. UNUSED(x);
  7342. assert(!isnan(x));
  7343. assert(!isinf(x));
  7344. }
  7345. #endif
  7346. }
  7347. }
  7348. static void ggml_compute_forward_gelu_quick(
  7349. const struct ggml_compute_params * params,
  7350. const struct ggml_tensor * src0,
  7351. struct ggml_tensor * dst) {
  7352. switch (src0->type) {
  7353. case GGML_TYPE_F32:
  7354. {
  7355. ggml_compute_forward_gelu_quick_f32(params, src0, dst);
  7356. } break;
  7357. default:
  7358. {
  7359. GGML_ASSERT(false);
  7360. } break;
  7361. }
  7362. }
  7363. // ggml_compute_forward_silu
  7364. static void ggml_compute_forward_silu_f32(
  7365. const struct ggml_compute_params * params,
  7366. const struct ggml_tensor * src0,
  7367. struct ggml_tensor * dst) {
  7368. GGML_ASSERT(ggml_is_contiguous_except_dim_1(src0));
  7369. GGML_ASSERT(ggml_is_contiguous_except_dim_1(dst));
  7370. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  7371. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  7372. return;
  7373. }
  7374. const int ith = params->ith;
  7375. const int nth = params->nth;
  7376. const int nc = src0->ne[0];
  7377. const int nr = ggml_nrows(src0);
  7378. // rows per thread
  7379. const int dr = (nr + nth - 1)/nth;
  7380. // row range for this thread
  7381. const int ir0 = dr*ith;
  7382. const int ir1 = MIN(ir0 + dr, nr);
  7383. for (int i1 = ir0; i1 < ir1; i1++) {
  7384. ggml_vec_silu_f32(nc,
  7385. (float *) ((char *) dst->data + i1*( dst->nb[1])),
  7386. (float *) ((char *) src0->data + i1*(src0->nb[1])));
  7387. #ifndef NDEBUG
  7388. for (int k = 0; k < nc; k++) {
  7389. const float x = ((float *) ((char *) dst->data + i1*(dst->nb[1])))[k];
  7390. UNUSED(x);
  7391. assert(!isnan(x));
  7392. assert(!isinf(x));
  7393. }
  7394. #endif
  7395. }
  7396. }
  7397. static void ggml_compute_forward_silu(
  7398. const struct ggml_compute_params * params,
  7399. const struct ggml_tensor * src0,
  7400. struct ggml_tensor * dst) {
  7401. switch (src0->type) {
  7402. case GGML_TYPE_F32:
  7403. {
  7404. ggml_compute_forward_silu_f32(params, src0, dst);
  7405. } break;
  7406. default:
  7407. {
  7408. GGML_ASSERT(false);
  7409. } break;
  7410. }
  7411. }
  7412. // ggml_compute_forward_leaky_relu
  7413. static void ggml_compute_forward_leaky_relu_f32(
  7414. const struct ggml_compute_params * params,
  7415. const struct ggml_tensor * src0,
  7416. struct ggml_tensor * dst) {
  7417. assert(params->ith == 0);
  7418. assert(ggml_are_same_shape(src0, dst));
  7419. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  7420. return;
  7421. }
  7422. const int n = ggml_nrows(src0);
  7423. const int nc = src0->ne[0];
  7424. float negative_slope;
  7425. memcpy(&negative_slope, dst->op_params, sizeof(float));
  7426. assert(dst->nb[0] == sizeof(float));
  7427. assert(src0->nb[0] == sizeof(float));
  7428. for (int i = 0; i < n; i++) {
  7429. ggml_vec_leaky_relu_f32(nc,
  7430. (float *) ((char *) dst->data + i*( dst->nb[1])),
  7431. (float *) ((char *) src0->data + i*(src0->nb[1])), negative_slope);
  7432. }
  7433. }
  7434. static void ggml_compute_forward_leaky_relu(
  7435. const struct ggml_compute_params * params,
  7436. const struct ggml_tensor * src0,
  7437. struct ggml_tensor * dst) {
  7438. switch (src0->type) {
  7439. case GGML_TYPE_F32:
  7440. {
  7441. ggml_compute_forward_leaky_relu_f32(params, src0, dst);
  7442. } break;
  7443. default:
  7444. {
  7445. GGML_ASSERT(false);
  7446. } break;
  7447. }
  7448. }
  7449. // ggml_compute_forward_silu_back
  7450. static void ggml_compute_forward_silu_back_f32(
  7451. const struct ggml_compute_params * params,
  7452. const struct ggml_tensor * src0,
  7453. const struct ggml_tensor * grad,
  7454. struct ggml_tensor * dst) {
  7455. GGML_ASSERT(ggml_is_contiguous_except_dim_1(grad));
  7456. GGML_ASSERT(ggml_is_contiguous_except_dim_1(src0));
  7457. GGML_ASSERT(ggml_is_contiguous_except_dim_1(dst));
  7458. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  7459. GGML_ASSERT(ggml_are_same_shape(src0, grad));
  7460. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  7461. return;
  7462. }
  7463. const int ith = params->ith;
  7464. const int nth = params->nth;
  7465. const int nc = src0->ne[0];
  7466. const int nr = ggml_nrows(src0);
  7467. // rows per thread
  7468. const int dr = (nr + nth - 1)/nth;
  7469. // row range for this thread
  7470. const int ir0 = dr*ith;
  7471. const int ir1 = MIN(ir0 + dr, nr);
  7472. for (int i1 = ir0; i1 < ir1; i1++) {
  7473. ggml_vec_silu_backward_f32(nc,
  7474. (float *) ((char *) dst->data + i1*( dst->nb[1])),
  7475. (float *) ((char *) src0->data + i1*(src0->nb[1])),
  7476. (float *) ((char *) grad->data + i1*(grad->nb[1])));
  7477. #ifndef NDEBUG
  7478. for (int k = 0; k < nc; k++) {
  7479. const float x = ((float *) ((char *) dst->data + i1*( dst->nb[1])))[k];
  7480. UNUSED(x);
  7481. assert(!isnan(x));
  7482. assert(!isinf(x));
  7483. }
  7484. #endif
  7485. }
  7486. }
  7487. static void ggml_compute_forward_silu_back(
  7488. const struct ggml_compute_params * params,
  7489. const struct ggml_tensor * src0,
  7490. const struct ggml_tensor * grad,
  7491. struct ggml_tensor * dst) {
  7492. switch (src0->type) {
  7493. case GGML_TYPE_F32:
  7494. {
  7495. ggml_compute_forward_silu_back_f32(params, src0, grad, dst);
  7496. } break;
  7497. default:
  7498. {
  7499. GGML_ASSERT(false);
  7500. } break;
  7501. }
  7502. }
  7503. // ggml_compute_forward_norm
  7504. static void ggml_compute_forward_norm_f32(
  7505. const struct ggml_compute_params * params,
  7506. const struct ggml_tensor * src0,
  7507. struct ggml_tensor * dst) {
  7508. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  7509. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  7510. return;
  7511. }
  7512. GGML_ASSERT(src0->nb[0] == sizeof(float));
  7513. const int ith = params->ith;
  7514. const int nth = params->nth;
  7515. GGML_TENSOR_UNARY_OP_LOCALS
  7516. float eps;
  7517. memcpy(&eps, dst->op_params, sizeof(float));
  7518. // TODO: optimize
  7519. for (int64_t i03 = 0; i03 < ne03; i03++) {
  7520. for (int64_t i02 = 0; i02 < ne02; i02++) {
  7521. for (int64_t i01 = ith; i01 < ne01; i01 += nth) {
  7522. const float * x = (float *) ((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03);
  7523. ggml_float sum = 0.0;
  7524. for (int64_t i00 = 0; i00 < ne00; i00++) {
  7525. sum += (ggml_float)x[i00];
  7526. }
  7527. float mean = sum/ne00;
  7528. float * y = (float *) ((char *) dst->data + i01*nb1 + i02*nb2 + i03*nb3);
  7529. ggml_float sum2 = 0.0;
  7530. for (int64_t i00 = 0; i00 < ne00; i00++) {
  7531. float v = x[i00] - mean;
  7532. y[i00] = v;
  7533. sum2 += (ggml_float)(v*v);
  7534. }
  7535. float variance = sum2/ne00;
  7536. const float scale = 1.0f/sqrtf(variance + eps);
  7537. ggml_vec_scale_f32(ne00, y, scale);
  7538. }
  7539. }
  7540. }
  7541. }
  7542. static void ggml_compute_forward_norm(
  7543. const struct ggml_compute_params * params,
  7544. const struct ggml_tensor * src0,
  7545. struct ggml_tensor * dst) {
  7546. switch (src0->type) {
  7547. case GGML_TYPE_F32:
  7548. {
  7549. ggml_compute_forward_norm_f32(params, src0, dst);
  7550. } break;
  7551. default:
  7552. {
  7553. GGML_ASSERT(false);
  7554. } break;
  7555. }
  7556. }
  7557. // ggml_compute_forward_group_rms_norm
  7558. static void ggml_compute_forward_rms_norm_f32(
  7559. const struct ggml_compute_params * params,
  7560. const struct ggml_tensor * src0,
  7561. struct ggml_tensor * dst) {
  7562. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  7563. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  7564. return;
  7565. }
  7566. GGML_ASSERT(src0->nb[0] == sizeof(float));
  7567. const int ith = params->ith;
  7568. const int nth = params->nth;
  7569. GGML_TENSOR_UNARY_OP_LOCALS
  7570. float eps;
  7571. memcpy(&eps, dst->op_params, sizeof(float));
  7572. // TODO: optimize
  7573. for (int64_t i03 = 0; i03 < ne03; i03++) {
  7574. for (int64_t i02 = 0; i02 < ne02; i02++) {
  7575. for (int64_t i01 = ith; i01 < ne01; i01 += nth) {
  7576. const float * x = (float *) ((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03);
  7577. ggml_float sum = 0.0;
  7578. for (int64_t i00 = 0; i00 < ne00; i00++) {
  7579. sum += (ggml_float)(x[i00] * x[i00]);
  7580. }
  7581. const float mean = sum/ne00;
  7582. float * y = (float *) ((char *) dst->data + i01*nb1 + i02*nb2 + i03*nb3);
  7583. memcpy(y, x, ne00 * sizeof(float));
  7584. // for (int i00 = 0; i00 < ne00; i00++) {
  7585. // y[i00] = x[i00];
  7586. // }
  7587. const float scale = 1.0f/sqrtf(mean + eps);
  7588. ggml_vec_scale_f32(ne00, y, scale);
  7589. }
  7590. }
  7591. }
  7592. }
  7593. static void ggml_compute_forward_rms_norm(
  7594. const struct ggml_compute_params * params,
  7595. const struct ggml_tensor * src0,
  7596. struct ggml_tensor * dst) {
  7597. switch (src0->type) {
  7598. case GGML_TYPE_F32:
  7599. {
  7600. ggml_compute_forward_rms_norm_f32(params, src0, dst);
  7601. } break;
  7602. default:
  7603. {
  7604. GGML_ASSERT(false);
  7605. } break;
  7606. }
  7607. }
  7608. static void ggml_compute_forward_rms_norm_back_f32(
  7609. const struct ggml_compute_params * params,
  7610. const struct ggml_tensor * src0,
  7611. const struct ggml_tensor * src1,
  7612. struct ggml_tensor * dst) {
  7613. GGML_ASSERT(ggml_are_same_shape(src0, dst) && ggml_are_same_shape(src0, src1));
  7614. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  7615. return;
  7616. }
  7617. GGML_ASSERT(src0->nb[0] == sizeof(float));
  7618. const int ith = params->ith;
  7619. const int nth = params->nth;
  7620. GGML_TENSOR_BINARY_OP_LOCALS
  7621. float eps;
  7622. memcpy(&eps, dst->op_params, sizeof(float));
  7623. // TODO: optimize
  7624. for (int64_t i03 = 0; i03 < ne03; i03++) {
  7625. for (int64_t i02 = 0; i02 < ne02; i02++) {
  7626. for (int64_t i01 = ith; i01 < ne01; i01 += nth) {
  7627. // src1 is same shape as src0 => same indices
  7628. const int64_t i11 = i01;
  7629. const int64_t i12 = i02;
  7630. const int64_t i13 = i03;
  7631. const float * x = (float *) ((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03);
  7632. const float * dz = (float *) ((char *) src1->data + i11*nb11 + i12*nb12 + i13*nb13);
  7633. ggml_float sum_xx = 0.0;
  7634. ggml_float sum_xdz = 0.0;
  7635. for (int64_t i00 = 0; i00 < ne00; i00++) {
  7636. sum_xx += (ggml_float)(x[i00] * x[i00]);
  7637. sum_xdz += (ggml_float)(x[i00] * dz[i00]);
  7638. }
  7639. //const float mean = (float)(sum_xx)/ne00;
  7640. const float mean_eps = (float)(sum_xx)/ne00 + eps;
  7641. const float sum_eps = (float)(sum_xx) + eps*ne00;
  7642. //const float mean_xdz = (float)(sum_xdz)/ne00;
  7643. // we could cache rms from forward pass to improve performance.
  7644. // to do this implement ggml_rms and compose ggml_rms_norm using ggml_rms.
  7645. //const float rms = sqrtf(mean_eps);
  7646. const float rrms = 1.0f / sqrtf(mean_eps);
  7647. //const float scale = -rrms/(ne00 * mean_eps); // -1/(n*rms**3)
  7648. {
  7649. // z = rms_norm(x)
  7650. //
  7651. // rms_norm(src0) =
  7652. // scale(
  7653. // src0,
  7654. // div(
  7655. // 1,
  7656. // sqrt(
  7657. // add(
  7658. // scale(
  7659. // sum(
  7660. // sqr(
  7661. // src0)),
  7662. // (1.0/N)),
  7663. // eps))));
  7664. // postorder:
  7665. // ## op args grad
  7666. // 00 param src0 grad[#00]
  7667. // 01 const 1
  7668. // 02 sqr (#00) grad[#02]
  7669. // 03 sum (#02) grad[#03]
  7670. // 04 const 1/N
  7671. // 05 scale (#03, #04) grad[#05]
  7672. // 06 const eps
  7673. // 07 add (#05, #06) grad[#07]
  7674. // 08 sqrt (#07) grad[#08]
  7675. // 09 div (#01,#08) grad[#09]
  7676. // 10 scale (#00,#09) grad[#10]
  7677. //
  7678. // backward pass, given grad[#10]
  7679. // #10: scale
  7680. // grad[#00] += scale(grad[#10],#09)
  7681. // grad[#09] += sum(mul(grad[#10],#00))
  7682. // #09: div
  7683. // grad[#08] += neg(mul(grad[#09], div(#09,#08)))
  7684. // #08: sqrt
  7685. // grad[#07] += mul(grad[#08], div(0.5, #08))
  7686. // #07: add
  7687. // grad[#05] += grad[#07]
  7688. // #05: scale
  7689. // grad[#03] += scale(grad[#05],#04)
  7690. // #03: sum
  7691. // grad[#02] += repeat(grad[#03], #02)
  7692. // #02:
  7693. // grad[#00] += scale(mul(#00, grad[#02]), 2.0)
  7694. //
  7695. // substitute and simplify:
  7696. // grad[#00] = scale(grad(#10), #09) + scale(mul(#00, grad[#02]), 2.0)
  7697. // grad[#02] = repeat(grad[#03], #02)
  7698. // grad[#02] = repeat(scale(grad[#05],#04), #02)
  7699. // grad[#02] = repeat(scale(grad[#07],#04), #02)
  7700. // grad[#02] = repeat(scale(mul(grad[#08], div(0.5, #08)),#04), #02)
  7701. // grad[#02] = repeat(scale(mul(neg(mul(grad[#09], div(#09,#08))), div(0.5, #08)),#04), #02)
  7702. // grad[#02] = repeat(scale(mul(neg(mul(sum(mul(grad[#10],#00)), div(#09,#08))), div(0.5, #08)),#04), #02)
  7703. // grad[#02] = repeat(-(sum(mul(grad[#10],#00)) * div(#09,#08) * div(0.5, #08) * (1/N)), #02)
  7704. // grad[#02] = repeat(-(sum(mul(grad[#10],#00)) * div(div(#01,#08),#08) * div(0.5, #08) * (1/N)), #02)
  7705. // grad[#02] = repeat(-(sum(mul(grad[#10],#00)) * div(1,#08*#08) * div(0.5, #08) * (1/N)), #02)
  7706. // grad[#02] = repeat(-(sum(mul(grad[#10],#00)) * div(1,#07) * div(0.5, #08) * (1/N)), #02)
  7707. // grad[#00] = scale(grad(#10), #09) + scale(mul(#00, grad[#02]), 2.0)
  7708. // grad[#00] = scale(grad(#10), #09) + scale(mul(#00, repeat(-(sum(mul(grad[#10],#00)) * div(1,#07) * div(0.5, #08) * (1/N)), #02)), 2.0)
  7709. // grad[#00] = scale(grad(#10), #09) + scale(scale(#00, -(sum(mul(grad[#10],#00)) * div(1,#07) * div(0.5, #08) * (1/N))), 2.0)
  7710. // grad[#00] = scale(grad(#10), #09) + scale(#00, -(sum(mul(grad[#10],#00)) * div(1,#07) * div(1,#08) * (1/N)))
  7711. // grad[#00] = scale(grad(#10), #09) + scale(#00, sum(mul(grad[#10],#00)) * div(1,#07*#08) * (-1/N))
  7712. // grad[#00] = scale(grad(#10), #09) + scale(#00, sum(mul(grad[#10],#00)) * div(1,#07*#08) * (-1/N))
  7713. // grad[#00] = scale(grad(#10), #09) + scale(#00, sum(mul(grad[#10],#00)) * div(1,mean_eps*rms) * (-1/N))
  7714. // grad[#00] = scale(grad(#10), #09) + scale(#00, sum(mul(grad[#10],#00)) * div(-1,rms*N*mean_eps))
  7715. // grad[#00] = scale(grad(#10), #09) + scale(#00, sum(mul(grad[#10],#00)) * div(-1,rms*N*(sum_xx/N+eps)))
  7716. // grad[#00] = scale(grad(#10), #09) + scale(#00, sum(mul(grad[#10],#00)) * div(-1,rms*N*sum_xx+rms*N*eps))
  7717. // grad[#00] = scale(dz, rrms) + scale(x, sum(mul(dz,x)) * div(-1,rms*N*mean_eps))
  7718. // grad[#00] = scale(dz, rrms) + scale(x, sum_xdz * div(-1,rms*N*mean_eps))
  7719. // a = b*c + d*e
  7720. // a = b*c*f/f + d*e*f/f
  7721. // a = (b*c*f + d*e*f)*(1/f)
  7722. // a = (b*c*(1/c) + d*e*(1/c))*(1/(1/c))
  7723. // a = (b + d*e/c)*c
  7724. // b = dz, c = rrms, d = x, e = sum_xdz * div(-1,rms*N*mean_eps)
  7725. // a = (dz + x*sum_xdz * div(-1,rms*N*mean_eps)/rrms)*rrms
  7726. // a = (dz + x*sum_xdz * div(-1,rms*N*mean_eps)*rms)*rrms
  7727. // a = (dz + x*sum_xdz * div(-rms,rms*N*mean_eps))*rrms
  7728. // a = (dz + x*sum_xdz * div(-1,N*mean_eps))*rrms
  7729. // a = (dz + x*div(-sum_xdz,N*mean_eps))*rrms
  7730. // a = (dz + x*div(-mean_xdz,mean_eps))*rrms
  7731. // grad[#00] = scale(dz + scale(x, div(-mean_xdz,mean_eps)),rrms)
  7732. // grad[#00] = scale(dz + scale(x, -mean_xdz/mean_eps),rrms)
  7733. // dx = scale(dz + scale(x, -mean_xdz/mean_eps),rrms)
  7734. }
  7735. // dx = scale(dz + scale(x, -mean_xdz/mean_eps),rrms)
  7736. // post-order:
  7737. // dx := x
  7738. // dx := scale(dx,-mean_xdz/mean_eps)
  7739. // dx := add(dx, dz)
  7740. // dx := scale(dx, rrms)
  7741. float * dx = (float *) ((char *) dst->data + i01*nb1 + i02*nb2 + i03*nb3);
  7742. ggml_vec_cpy_f32 (ne00, dx, x);
  7743. // ggml_vec_scale_f32(ne00, dx, -mean_xdz/mean_eps);
  7744. ggml_vec_scale_f32(ne00, dx, (float)(-sum_xdz)/sum_eps);
  7745. ggml_vec_acc_f32 (ne00, dx, dz);
  7746. ggml_vec_scale_f32(ne00, dx, rrms);
  7747. }
  7748. }
  7749. }
  7750. }
  7751. static void ggml_compute_forward_rms_norm_back(
  7752. const struct ggml_compute_params * params,
  7753. const struct ggml_tensor * src0,
  7754. const struct ggml_tensor * src1,
  7755. struct ggml_tensor * dst) {
  7756. switch (src0->type) {
  7757. case GGML_TYPE_F32:
  7758. {
  7759. ggml_compute_forward_rms_norm_back_f32(params, src0, src1, dst);
  7760. } break;
  7761. default:
  7762. {
  7763. GGML_ASSERT(false);
  7764. } break;
  7765. }
  7766. }
  7767. // ggml_compute_forward_group_norm
  7768. static void ggml_compute_forward_group_norm_f32(
  7769. const struct ggml_compute_params * params,
  7770. const struct ggml_tensor * src0,
  7771. struct ggml_tensor * dst) {
  7772. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  7773. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  7774. return;
  7775. }
  7776. GGML_ASSERT(src0->nb[0] == sizeof(float));
  7777. const int ith = params->ith;
  7778. const int nth = params->nth;
  7779. GGML_TENSOR_UNARY_OP_LOCALS
  7780. const float eps = 1e-6f; // TODO: make this a parameter
  7781. // TODO: optimize
  7782. int n_channels = src0->ne[2];
  7783. int n_groups = dst->op_params[0];
  7784. int n_channels_per_group = (n_channels + n_groups - 1) / n_groups;
  7785. for (int i = ith; i < n_groups; i+=nth) {
  7786. int start = i * n_channels_per_group;
  7787. int end = start + n_channels_per_group;
  7788. if (end > n_channels) {
  7789. end = n_channels;
  7790. }
  7791. int step = end - start;
  7792. for (int64_t i03 = 0; i03 < ne03; i03++) {
  7793. ggml_float sum = 0.0;
  7794. for (int64_t i02 = start; i02 < end; i02++) {
  7795. for (int64_t i01 = 0; i01 < ne01; i01++) {
  7796. const float * x = (float *)((char *) src0->data + i01 * nb01 + i02 * nb02 + i03 * nb03);
  7797. for (int64_t i00 = 0; i00 < ne00; i00++) {
  7798. sum += (ggml_float)x[i00];
  7799. }
  7800. }
  7801. }
  7802. float mean = sum / (ne00 * ne01 * step);
  7803. ggml_float sum2 = 0.0;
  7804. for (int64_t i02 = start; i02 < end; i02++) {
  7805. for (int64_t i01 = 0; i01 < ne01; i01++) {
  7806. const float * x = (float *)((char *) src0->data + i01 * nb01 + i02 * nb02 + i03 * nb03);
  7807. float * y = (float *)((char *) dst->data + i01 * nb1 + i02 * nb2 + i03 * nb3);
  7808. for (int64_t i00 = 0; i00 < ne00; i00++) {
  7809. float v = x[i00] - mean;
  7810. y[i00] = v;
  7811. sum2 += (ggml_float)(v * v);
  7812. }
  7813. }
  7814. }
  7815. float variance = sum2 / (ne00 * ne01 * step);
  7816. const float scale = 1.0f / sqrtf(variance + eps);
  7817. for (int64_t i02 = start; i02 < end; i02++) {
  7818. for (int64_t i01 = 0; i01 < ne01; i01++) {
  7819. float * y = (float *)((char *) dst->data + i01 * nb1 + i02 * nb2 + i03 * nb3);
  7820. ggml_vec_scale_f32(ne00, y, scale);
  7821. }
  7822. }
  7823. }
  7824. }
  7825. }
  7826. static void ggml_compute_forward_group_norm(
  7827. const struct ggml_compute_params * params,
  7828. const struct ggml_tensor * src0,
  7829. struct ggml_tensor * dst) {
  7830. switch (src0->type) {
  7831. case GGML_TYPE_F32:
  7832. {
  7833. ggml_compute_forward_group_norm_f32(params, src0, dst);
  7834. } break;
  7835. default:
  7836. {
  7837. GGML_ASSERT(false);
  7838. } break;
  7839. }
  7840. }
  7841. // ggml_compute_forward_mul_mat
  7842. #if defined(GGML_USE_ACCELERATE) || defined(GGML_USE_OPENBLAS)
  7843. // helper function to determine if it is better to use BLAS or not
  7844. // for large matrices, BLAS is faster
  7845. static bool ggml_compute_forward_mul_mat_use_blas(
  7846. const struct ggml_tensor * src0,
  7847. const struct ggml_tensor * src1,
  7848. struct ggml_tensor * dst) {
  7849. //const int64_t ne00 = src0->ne[0];
  7850. //const int64_t ne01 = src0->ne[1];
  7851. const int64_t ne10 = src1->ne[0];
  7852. const int64_t ne0 = dst->ne[0];
  7853. const int64_t ne1 = dst->ne[1];
  7854. // NOTE: with GGML_OP_MUL_MAT_ID we don't want to go through the BLAS branch because it will dequantize (to_float)
  7855. // all the experts for each batch element and the processing would become incredibly slow
  7856. // TODO: find the optimal values for these
  7857. if (dst->op != GGML_OP_MUL_MAT_ID &&
  7858. ggml_is_contiguous(src0) &&
  7859. ggml_is_contiguous(src1) &&
  7860. //src0->type == GGML_TYPE_F32 &&
  7861. src1->type == GGML_TYPE_F32 &&
  7862. (ne0 >= 32 && ne1 >= 32 && ne10 >= 32)) {
  7863. /*printf("BLAS: %d %d %d %d %d\n", ne0, ne1, ne10, ne00, ne01);*/
  7864. return true;
  7865. }
  7866. return false;
  7867. }
  7868. #endif
  7869. // off1 = offset in i11 and i1
  7870. // cne1 = ne11 and ne1
  7871. // in a normal matrix multiplication, off1 = 0 and cne1 = ne1
  7872. // during GGML_TASK_INIT, the full src1 is converted regardless of off1 and cne1
  7873. static void ggml_compute_forward_mul_mat(
  7874. const struct ggml_compute_params * params,
  7875. const struct ggml_tensor * src0,
  7876. const struct ggml_tensor * src1,
  7877. struct ggml_tensor * dst,
  7878. int64_t off1, int64_t cne1) {
  7879. int64_t t0 = ggml_perf_time_us();
  7880. UNUSED(t0);
  7881. GGML_TENSOR_BINARY_OP_LOCALS
  7882. const int ith = params->ith;
  7883. const int nth = params->nth;
  7884. const enum ggml_type type = src0->type;
  7885. const bool src1_cont = ggml_is_contiguous(src1);
  7886. ggml_vec_dot_t const vec_dot = type_traits[type].vec_dot;
  7887. enum ggml_type const vec_dot_type = type_traits[type].vec_dot_type;
  7888. ggml_from_float_t const from_float_to_vec_dot = type_traits[vec_dot_type].from_float;
  7889. GGML_ASSERT(ne0 == ne01);
  7890. GGML_ASSERT(ne1 == ne11);
  7891. GGML_ASSERT(ne2 == ne12);
  7892. GGML_ASSERT(ne3 == ne13);
  7893. // we don't support permuted src0 or src1
  7894. GGML_ASSERT(nb00 == ggml_type_size(type));
  7895. GGML_ASSERT(nb10 == ggml_type_size(src1->type));
  7896. // dst cannot be transposed or permuted
  7897. GGML_ASSERT(nb0 == sizeof(float));
  7898. GGML_ASSERT(nb0 <= nb1);
  7899. GGML_ASSERT(nb1 <= nb2);
  7900. GGML_ASSERT(nb2 <= nb3);
  7901. // broadcast factors
  7902. const int64_t r2 = ne12/ne02;
  7903. const int64_t r3 = ne13/ne03;
  7904. // nb01 >= nb00 - src0 is not transposed
  7905. // compute by src0 rows
  7906. #if defined(GGML_USE_CLBLAST)
  7907. if (ggml_cl_can_mul_mat(src0, src1, dst)) {
  7908. if (params->ith == 0 && params->type == GGML_TASK_COMPUTE) {
  7909. ggml_cl_mul_mat(src0, src1, dst, params->wdata, params->wsize);
  7910. }
  7911. return;
  7912. }
  7913. #endif
  7914. #if defined(GGML_USE_ACCELERATE) || defined(GGML_USE_OPENBLAS)
  7915. if (ggml_compute_forward_mul_mat_use_blas(src0, src1, dst)) {
  7916. if (params->ith != 0) {
  7917. return;
  7918. }
  7919. if (params->type == GGML_TASK_INIT) {
  7920. return;
  7921. }
  7922. if (params->type == GGML_TASK_FINALIZE) {
  7923. return;
  7924. }
  7925. for (int64_t i13 = 0; i13 < ne13; i13++) {
  7926. for (int64_t i12 = 0; i12 < ne12; i12++) {
  7927. // broadcast src0 into src1 across 2nd,3rd dimension
  7928. const int64_t i03 = i13/r3;
  7929. const int64_t i02 = i12/r2;
  7930. const void * x = (char *) src0->data + i02*nb02 + i03*nb03;
  7931. const float * y = (float *) ((char *) src1->data + off1*nb11 + i12*nb12 + i13*nb13);
  7932. float * d = (float *) ((char *) dst->data + off1*nb1 + i12*nb2 + i13*nb3);
  7933. if (type != GGML_TYPE_F32) {
  7934. float * const wdata = params->wdata;
  7935. ggml_to_float_t const to_float = type_traits[type].to_float;
  7936. size_t id = 0;
  7937. for (int64_t i01 = 0; i01 < ne01; ++i01) {
  7938. to_float((const char *) x + i01*nb01, wdata + id, ne00);
  7939. id += ne00;
  7940. }
  7941. assert(id*sizeof(float) <= params->wsize);
  7942. x = wdata;
  7943. }
  7944. cblas_sgemm(CblasRowMajor, CblasNoTrans, CblasTrans,
  7945. cne1, ne01, ne10,
  7946. 1.0f, y, ne10,
  7947. x, ne00,
  7948. 0.0f, d, ne01);
  7949. }
  7950. }
  7951. //printf("CBLAS = %f ms, %d x %d x %d x %d\n", (ggml_perf_time_us() - t0)/1000.0, ne0, ne1, ne2, ne3);
  7952. return;
  7953. }
  7954. #endif
  7955. if (params->type == GGML_TASK_INIT) {
  7956. if (src1->type != vec_dot_type) {
  7957. char * wdata = params->wdata;
  7958. const size_t row_size = ggml_row_size(vec_dot_type, ne10);
  7959. assert(params->wsize >= ne11*ne12*ne13*row_size);
  7960. assert(src1->type == GGML_TYPE_F32);
  7961. for (int64_t i13 = 0; i13 < ne13; ++i13) {
  7962. for (int64_t i12 = 0; i12 < ne12; ++i12) {
  7963. for (int64_t i11 = 0; i11 < ne11; ++i11) {
  7964. from_float_to_vec_dot((float *)((char *) src1->data + i13*nb13 + i12*nb12 + i11*nb11), (void *) wdata, ne10);
  7965. wdata += row_size;
  7966. }
  7967. }
  7968. }
  7969. }
  7970. return;
  7971. }
  7972. if (params->type == GGML_TASK_FINALIZE) {
  7973. return;
  7974. }
  7975. const void * wdata = (src1->type == vec_dot_type) ? src1->data : params->wdata;
  7976. const size_t row_size = ggml_row_size(vec_dot_type, ne10);
  7977. const int64_t nr0 = ne01; // src0 rows
  7978. const int64_t nr1 = cne1*ne12*ne13; // src1 rows
  7979. //printf("nr0 = %lld, nr1 = %lld\n", nr0, nr1);
  7980. // distribute the thread work across the inner or outer loop based on which one is larger
  7981. const int64_t nth0 = nr0 > nr1 ? nth : 1; // parallelize by src0 rows
  7982. const int64_t nth1 = nr0 > nr1 ? 1 : nth; // parallelize by src1 rows
  7983. const int64_t ith0 = ith % nth0;
  7984. const int64_t ith1 = ith / nth0;
  7985. const int64_t dr0 = (nr0 + nth0 - 1)/nth0;
  7986. const int64_t dr1 = (nr1 + nth1 - 1)/nth1;
  7987. const int64_t ir010 = dr0*ith0;
  7988. const int64_t ir011 = MIN(ir010 + dr0, nr0);
  7989. const int64_t ir110 = dr1*ith1;
  7990. const int64_t ir111 = MIN(ir110 + dr1, nr1);
  7991. //printf("ir010 = %6lld, ir011 = %6lld, ir110 = %6lld, ir111 = %6lld\n", ir010, ir011, ir110, ir111);
  7992. // threads with no work simply yield (not sure if it helps)
  7993. if (ir010 >= ir011 || ir110 >= ir111) {
  7994. sched_yield();
  7995. return;
  7996. }
  7997. assert(ne12 % ne02 == 0);
  7998. assert(ne13 % ne03 == 0);
  7999. // block-tiling attempt
  8000. const int64_t blck_0 = 16;
  8001. const int64_t blck_1 = 16;
  8002. // attempt to reduce false-sharing (does not seem to make a difference)
  8003. float tmp[16];
  8004. for (int64_t iir1 = ir110; iir1 < ir111; iir1 += blck_1) {
  8005. for (int64_t iir0 = ir010; iir0 < ir011; iir0 += blck_0) {
  8006. for (int64_t ir1 = iir1; ir1 < iir1 + blck_1 && ir1 < ir111; ++ir1) {
  8007. const int64_t i13 = (ir1/(ne12*cne1));
  8008. const int64_t i12 = (ir1 - i13*ne12*cne1)/cne1;
  8009. const int64_t i11 = (ir1 - i13*ne12*cne1 - i12*cne1) + off1;
  8010. // broadcast src0 into src1
  8011. const int64_t i03 = i13/r3;
  8012. const int64_t i02 = i12/r2;
  8013. const int64_t i1 = i11;
  8014. const int64_t i2 = i12;
  8015. const int64_t i3 = i13;
  8016. const char * src0_row = (const char *) src0->data + (0 + i02*nb02 + i03*nb03);
  8017. // desc: when src1 is not a contiguous memory block we have to calculate the offset using the strides
  8018. // if it is, then we have either copied the data to params->wdata and made it contiguous or we are using
  8019. // the original src1 data pointer, so we should index using the indices directly
  8020. // TODO: this is a bit of a hack, we should probably have a better way to handle this
  8021. const char * src1_col = (const char *) wdata +
  8022. (src1_cont || src1->type != vec_dot_type
  8023. ? (i11 + i12*ne11 + i13*ne12*ne11)*row_size
  8024. : (i11*nb11 + i12*nb12 + i13*nb13));
  8025. float * dst_col = (float *) ((char *) dst->data + (i1*nb1 + i2*nb2 + i3*nb3));
  8026. //for (int64_t ir0 = iir0; ir0 < iir0 + blck_0 && ir0 < ir011; ++ir0) {
  8027. // vec_dot(ne00, &dst_col[ir0], src0_row + ir0*nb01, src1_col);
  8028. //}
  8029. for (int64_t ir0 = iir0; ir0 < iir0 + blck_0 && ir0 < ir011; ++ir0) {
  8030. vec_dot(ne00, &tmp[ir0 - iir0], src0_row + ir0*nb01, src1_col);
  8031. }
  8032. memcpy(&dst_col[iir0], tmp, (MIN(iir0 + blck_0, ir011) - iir0)*sizeof(float));
  8033. }
  8034. }
  8035. }
  8036. }
  8037. // ggml_compute_forward_mul_mat_id
  8038. static void ggml_compute_forward_mul_mat_id(
  8039. const struct ggml_compute_params * params,
  8040. const struct ggml_tensor * src0,
  8041. const struct ggml_tensor * src1,
  8042. struct ggml_tensor * dst) {
  8043. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  8044. // during GGML_TASK_INIT the entire src1 is converted to vec_dot_type
  8045. ggml_compute_forward_mul_mat(params, dst->src[2], src1, dst, 0, dst->ne[1]);
  8046. return;
  8047. }
  8048. const struct ggml_tensor * ids = src0;
  8049. const int id = ggml_get_op_params_i32(dst, 0);
  8050. const int n_as = ggml_get_op_params_i32(dst, 1);
  8051. for (int64_t i01 = 0; i01 < ids->ne[1]; i01++) {
  8052. const int32_t row_id = *(const int32_t *) ((const char *) ids->data + i01*ids->nb[1] + id*ids->nb[0]);
  8053. GGML_ASSERT(row_id >= 0 && row_id < n_as);
  8054. const struct ggml_tensor * src0_row = dst->src[row_id + 2];
  8055. ggml_compute_forward_mul_mat(params, src0_row, src1, dst, i01, 1);
  8056. }
  8057. }
  8058. // ggml_compute_forward_out_prod
  8059. static void ggml_compute_forward_out_prod_f32(
  8060. const struct ggml_compute_params * params,
  8061. const struct ggml_tensor * src0,
  8062. const struct ggml_tensor * src1,
  8063. struct ggml_tensor * dst) {
  8064. // int64_t t0 = ggml_perf_time_us();
  8065. // UNUSED(t0);
  8066. GGML_TENSOR_BINARY_OP_LOCALS
  8067. const int ith = params->ith;
  8068. const int nth = params->nth;
  8069. GGML_ASSERT(ne0 == ne00);
  8070. GGML_ASSERT(ne1 == ne10);
  8071. GGML_ASSERT(ne2 == ne02);
  8072. GGML_ASSERT(ne02 == ne12);
  8073. GGML_ASSERT(ne3 == ne13);
  8074. GGML_ASSERT(ne03 == ne13);
  8075. // we don't support permuted src0 or src1
  8076. GGML_ASSERT(nb00 == sizeof(float));
  8077. // dst cannot be transposed or permuted
  8078. GGML_ASSERT(nb0 == sizeof(float));
  8079. // GGML_ASSERT(nb0 <= nb1);
  8080. // GGML_ASSERT(nb1 <= nb2);
  8081. // GGML_ASSERT(nb2 <= nb3);
  8082. // nb01 >= nb00 - src0 is not transposed
  8083. // compute by src0 rows
  8084. // TODO: #if defined(GGML_USE_CUBLAS) ggml_cuda_out_prod
  8085. // TODO: #if defined(GGML_USE_CLBLAST)
  8086. #if defined(GGML_USE_ACCELERATE) || defined(GGML_USE_OPENBLAS)
  8087. bool use_blas = ggml_is_matrix(src0) &&
  8088. ggml_is_matrix(src1) &&
  8089. ggml_is_contiguous(src0) &&
  8090. (ggml_is_contiguous(src1) || ggml_is_transposed(src1));
  8091. #endif
  8092. if (params->type == GGML_TASK_INIT) {
  8093. #if defined(GGML_USE_ACCELERATE) || defined(GGML_USE_OPENBLAS) // gemm beta will zero dst
  8094. if (use_blas) {
  8095. return;
  8096. }
  8097. #endif
  8098. ggml_vec_set_f32(ne0*ne1*ne2*ne3, dst->data, 0);
  8099. return;
  8100. }
  8101. if (params->type == GGML_TASK_FINALIZE) {
  8102. return;
  8103. }
  8104. #if defined(GGML_USE_ACCELERATE) || defined(GGML_USE_OPENBLAS)
  8105. if (use_blas) {
  8106. if (params->ith != 0) { // All threads other than the first do no work.
  8107. return;
  8108. }
  8109. // Arguments to ggml_compute_forward_out_prod (expressed as major,minor)
  8110. // src0: (k,n)
  8111. // src1: (k,m)
  8112. // dst: (m,n)
  8113. //
  8114. // Arguments to sgemm (see https://github.com/Reference-LAPACK/lapack/blob/master/BLAS/SRC/sgemm.f)
  8115. // Also expressed as (major,minor)
  8116. // a: (m,k): so src1 transposed
  8117. // b: (k,n): so src0
  8118. // c: (m,n)
  8119. //
  8120. // However, if ggml_is_transposed(src1) is true, then
  8121. // src1->data already contains a transposed version, so sgemm mustn't
  8122. // transpose it further.
  8123. int n = src0->ne[0];
  8124. int k = src0->ne[1];
  8125. int m = src1->ne[0];
  8126. int transposeA, lda;
  8127. if (!ggml_is_transposed(src1)) {
  8128. transposeA = CblasTrans;
  8129. lda = m;
  8130. } else {
  8131. transposeA = CblasNoTrans;
  8132. lda = k;
  8133. }
  8134. float * a = (float *) ((char *) src1->data);
  8135. float * b = (float *) ((char *) src0->data);
  8136. float * c = (float *) ((char *) dst->data);
  8137. cblas_sgemm(CblasRowMajor, transposeA, CblasNoTrans, m, n, k, 1.0, a, lda, b, n, 0.0, c, n);
  8138. return;
  8139. }
  8140. #endif
  8141. // dst[:,:,:,:] = 0
  8142. // for i2,i3:
  8143. // for i1:
  8144. // for i01:
  8145. // for i0:
  8146. // dst[i0,i1,i2,i3] += src0[i0,i01,i2,i3] * src1[i1,i01,i2,i3]
  8147. // parallelize by last three dimensions
  8148. // total rows in dst
  8149. const int64_t nr = ne1*ne2*ne3;
  8150. // rows per thread
  8151. const int64_t dr = (nr + nth - 1)/nth;
  8152. // row range for this thread
  8153. const int64_t ir0 = dr*ith;
  8154. const int64_t ir1 = MIN(ir0 + dr, nr);
  8155. // block-tiling attempt
  8156. const int64_t blck_0 = MAX(GGML_VEC_MAD_UNROLL, 32);
  8157. const int64_t blck_1 = 16;
  8158. for (int64_t bir = ir0; bir < ir1; bir += blck_1) {
  8159. const int64_t bir1 = MIN(bir + blck_1, ir1);
  8160. for (int64_t bi01 = 0; bi01 < ne01; bi01 += blck_0) {
  8161. const int64_t bne01 = MIN(bi01 + blck_0, ne01);
  8162. for (int64_t ir = bir; ir < bir1; ++ir) {
  8163. // dst indices
  8164. const int64_t i3 = ir/(ne2*ne1);
  8165. const int64_t i2 = (ir - i3*ne2*ne1)/ne1;
  8166. const int64_t i1 = (ir - i3*ne2*ne1 - i2*ne1);
  8167. const int64_t i02 = i2;
  8168. const int64_t i03 = i3;
  8169. //const int64_t i10 = i1;
  8170. const int64_t i12 = i2;
  8171. const int64_t i13 = i3;
  8172. #if GGML_VEC_MAD_UNROLL > 2
  8173. const int64_t bne01_unroll = bne01 - (bne01 % GGML_VEC_MAD_UNROLL);
  8174. for (int64_t i01 = bi01; i01 < bne01_unroll; i01 += GGML_VEC_MAD_UNROLL) {
  8175. const int64_t i11 = i01;
  8176. float * s0 = (float *) ((char *) src0->data + ( i01*nb01 + i02*nb02 + i03*nb03));
  8177. float * s1 = (float *) ((char *) src1->data + (i1*nb10 + i11*nb11 + i12*nb12 + i13*nb13));
  8178. float * d = (float *) ((char *) dst->data + ( i1*nb1 + i2*nb2 + i3*nb3));
  8179. ggml_vec_mad_f32_unroll(ne0, nb01, nb11, d, s0, s1);
  8180. }
  8181. for (int64_t i01 = bne01_unroll; i01 < bne01; ++i01) {
  8182. const int64_t i11 = i01;
  8183. float * s0 = (float *) ((char *) src0->data + ( i01*nb01 + i02*nb02 + i03*nb03));
  8184. float * s1 = (float *) ((char *) src1->data + (i1*nb10 + i11*nb11 + i12*nb12 + i13*nb13));
  8185. float * d = (float *) ((char *) dst->data + ( i1*nb1 + i2*nb2 + i3*nb3));
  8186. ggml_vec_mad_f32(ne0, d, s0, *s1);
  8187. }
  8188. #else
  8189. for (int64_t i01 = bi01; i01 < bne01; ++i01) {
  8190. const int64_t i11 = i01;
  8191. float * s0 = (float *) ((char *) src0->data + ( i01*nb01 + i02*nb02 + i03*nb03));
  8192. float * s1 = (float *) ((char *) src1->data + (i1*nb10 + i11*nb11 + i12*nb12 + i13*nb13));
  8193. float * d = (float *) ((char *) dst->data + ( i1*nb1 + i2*nb2 + i3*nb3));
  8194. ggml_vec_mad_f32(ne0, d, s0, *s1);
  8195. }
  8196. #endif
  8197. }
  8198. }
  8199. }
  8200. //int64_t t1 = ggml_perf_time_us();
  8201. //static int64_t acc = 0;
  8202. //acc += t1 - t0;
  8203. //if (t1 - t0 > 10) {
  8204. // printf("\n");
  8205. // printf("ne00 = %5d, ne01 = %5d, ne02 = %5d, ne03 = %5d\n", ne00, ne01, ne02, ne03);
  8206. // printf("nb00 = %5d, nb01 = %5d, nb02 = %5d, nb03 = %5d\n", nb00, nb01, nb02, nb03);
  8207. // printf("ne10 = %5d, ne11 = %5d, ne12 = %5d, ne13 = %5d\n", ne10, ne11, ne12, ne13);
  8208. // printf("nb10 = %5d, nb11 = %5d, nb12 = %5d, nb13 = %5d\n", nb10, nb11, nb12, nb13);
  8209. // printf("XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX task %d/%d: %d us, acc = %d\n", ith, nth, (int) (t1 - t0), (int) acc);
  8210. //}
  8211. }
  8212. static void ggml_compute_forward_out_prod_q_f32(
  8213. const struct ggml_compute_params * params,
  8214. const struct ggml_tensor * src0,
  8215. const struct ggml_tensor * src1,
  8216. struct ggml_tensor * dst) {
  8217. // int64_t t0 = ggml_perf_time_us();
  8218. // UNUSED(t0);
  8219. GGML_TENSOR_BINARY_OP_LOCALS;
  8220. const int ith = params->ith;
  8221. const int nth = params->nth;
  8222. const enum ggml_type type = src0->type;
  8223. ggml_to_float_t const dequantize_row_q = type_traits[type].to_float;
  8224. GGML_ASSERT(ne02 == ne12);
  8225. GGML_ASSERT(ne03 == ne13);
  8226. GGML_ASSERT(ne2 == ne12);
  8227. GGML_ASSERT(ne3 == ne13);
  8228. // we don't support permuted src0 dim0
  8229. GGML_ASSERT(nb00 == ggml_type_size(type));
  8230. // dst dim0 cannot be transposed or permuted
  8231. GGML_ASSERT(nb0 == sizeof(float));
  8232. // GGML_ASSERT(nb0 <= nb1);
  8233. // GGML_ASSERT(nb1 <= nb2);
  8234. // GGML_ASSERT(nb2 <= nb3);
  8235. GGML_ASSERT(ne0 == ne00);
  8236. GGML_ASSERT(ne1 == ne10);
  8237. GGML_ASSERT(ne2 == ne02);
  8238. GGML_ASSERT(ne3 == ne03);
  8239. // nb01 >= nb00 - src0 is not transposed
  8240. // compute by src0 rows
  8241. // TODO: #if defined(GGML_USE_CUBLAS) ggml_cuda_out_prod
  8242. // TODO: #if defined(GGML_USE_ACCELERATE) || defined(GGML_USE_OPENBLAS) || defined(GGML_USE_CLBLAST)
  8243. if (params->type == GGML_TASK_INIT) {
  8244. ggml_vec_set_f32(ne0*ne1*ne2*ne3, dst->data, 0);
  8245. return;
  8246. }
  8247. if (params->type == GGML_TASK_FINALIZE) {
  8248. return;
  8249. }
  8250. // parallelize by last three dimensions
  8251. // total rows in dst
  8252. const int64_t nr = ne1*ne2*ne3;
  8253. // rows per thread
  8254. const int64_t dr = (nr + nth - 1)/nth;
  8255. // row range for this thread
  8256. const int64_t ir0 = dr*ith;
  8257. const int64_t ir1 = MIN(ir0 + dr, nr);
  8258. // dst[:,:,:,:] = 0
  8259. // for i2,i3:
  8260. // for i1:
  8261. // for i01:
  8262. // for i0:
  8263. // dst[i0,i1,i2,i3] += src0[i0,i01,i2,i3] * src1[i1,i01,i2,i3]
  8264. float * wdata = (float *) params->wdata + (ne0 + CACHE_LINE_SIZE_F32) * ith;
  8265. for (int64_t ir = ir0; ir < ir1; ++ir) {
  8266. // dst indices
  8267. const int64_t i3 = ir/(ne2*ne1);
  8268. const int64_t i2 = (ir - i3*ne2*ne1)/ne1;
  8269. const int64_t i1 = (ir - i3*ne2*ne1 - i2*ne1);
  8270. const int64_t i02 = i2;
  8271. const int64_t i03 = i3;
  8272. //const int64_t i10 = i1;
  8273. const int64_t i12 = i2;
  8274. const int64_t i13 = i3;
  8275. for (int64_t i01 = 0; i01 < ne01; ++i01) {
  8276. const int64_t i11 = i01;
  8277. float * s0 = (float *) ((char *) src0->data + ( i01*nb01 + i02*nb02 + i03*nb03));
  8278. float * s1 = (float *) ((char *) src1->data + (i1*nb10 + i11*nb11 + i12*nb12 + i13*nb13));
  8279. float * d = (float *) ((char *) dst->data + ( i1*nb1 + i2*nb2 + i3*nb3));
  8280. dequantize_row_q(s0, wdata, ne0);
  8281. ggml_vec_mad_f32(ne0, d, wdata, *s1);
  8282. }
  8283. }
  8284. //int64_t t1 = ggml_perf_time_us();
  8285. //static int64_t acc = 0;
  8286. //acc += t1 - t0;
  8287. //if (t1 - t0 > 10) {
  8288. // printf("\n");
  8289. // printf("ne00 = %5d, ne01 = %5d, ne02 = %5d, ne03 = %5d\n", ne00, ne01, ne02, ne03);
  8290. // printf("nb00 = %5d, nb01 = %5d, nb02 = %5d, nb03 = %5d\n", nb00, nb01, nb02, nb03);
  8291. // printf("ne10 = %5d, ne11 = %5d, ne12 = %5d, ne13 = %5d\n", ne10, ne11, ne12, ne13);
  8292. // printf("nb10 = %5d, nb11 = %5d, nb12 = %5d, nb13 = %5d\n", nb10, nb11, nb12, nb13);
  8293. // printf("XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX task %d/%d: %d us, acc = %d\n", ith, nth, (int) (t1 - t0), (int) acc);
  8294. //}
  8295. }
  8296. static void ggml_compute_forward_out_prod(
  8297. const struct ggml_compute_params * params,
  8298. const struct ggml_tensor * src0,
  8299. const struct ggml_tensor * src1,
  8300. struct ggml_tensor * dst) {
  8301. switch (src0->type) {
  8302. case GGML_TYPE_Q4_0:
  8303. case GGML_TYPE_Q4_1:
  8304. case GGML_TYPE_Q5_0:
  8305. case GGML_TYPE_Q5_1:
  8306. case GGML_TYPE_Q8_0:
  8307. case GGML_TYPE_Q2_K:
  8308. case GGML_TYPE_Q3_K:
  8309. case GGML_TYPE_Q4_K:
  8310. case GGML_TYPE_Q5_K:
  8311. case GGML_TYPE_Q6_K:
  8312. {
  8313. ggml_compute_forward_out_prod_q_f32(params, src0, src1, dst);
  8314. } break;
  8315. case GGML_TYPE_F16:
  8316. {
  8317. GGML_ASSERT(false); // todo
  8318. // ggml_compute_forward_out_prod_f16_f32(params, src0, src1, dst);
  8319. } break;
  8320. case GGML_TYPE_F32:
  8321. {
  8322. ggml_compute_forward_out_prod_f32(params, src0, src1, dst);
  8323. } break;
  8324. default:
  8325. {
  8326. GGML_ASSERT(false);
  8327. } break;
  8328. }
  8329. }
  8330. // ggml_compute_forward_scale
  8331. static void ggml_compute_forward_scale_f32(
  8332. const struct ggml_compute_params * params,
  8333. const struct ggml_tensor * src0,
  8334. const struct ggml_tensor * src1,
  8335. struct ggml_tensor * dst) {
  8336. GGML_ASSERT(ggml_is_contiguous(src0));
  8337. GGML_ASSERT(ggml_is_contiguous(dst));
  8338. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  8339. GGML_ASSERT(ggml_is_scalar(src1));
  8340. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  8341. return;
  8342. }
  8343. // scale factor
  8344. const float v = *(float *) src1->data;
  8345. const int ith = params->ith;
  8346. const int nth = params->nth;
  8347. const int nc = src0->ne[0];
  8348. const int nr = ggml_nrows(src0);
  8349. // rows per thread
  8350. const int dr = (nr + nth - 1)/nth;
  8351. // row range for this thread
  8352. const int ir0 = dr*ith;
  8353. const int ir1 = MIN(ir0 + dr, nr);
  8354. const size_t nb01 = src0->nb[1];
  8355. const size_t nb1 = dst->nb[1];
  8356. for (int i1 = ir0; i1 < ir1; i1++) {
  8357. if (dst->data != src0->data) {
  8358. // src0 is same shape as dst => same indices
  8359. memcpy((char *)dst->data + i1*nb1, (char *)src0->data + i1*nb01, nc * sizeof(float));
  8360. }
  8361. ggml_vec_scale_f32(nc, (float *) ((char *) dst->data + i1*nb1), v);
  8362. }
  8363. }
  8364. static void ggml_compute_forward_scale(
  8365. const struct ggml_compute_params * params,
  8366. const struct ggml_tensor * src0,
  8367. const struct ggml_tensor * src1,
  8368. struct ggml_tensor * dst) {
  8369. switch (src0->type) {
  8370. case GGML_TYPE_F32:
  8371. {
  8372. ggml_compute_forward_scale_f32(params, src0, src1, dst);
  8373. } break;
  8374. default:
  8375. {
  8376. GGML_ASSERT(false);
  8377. } break;
  8378. }
  8379. }
  8380. // ggml_compute_forward_set
  8381. static void ggml_compute_forward_set_f32(
  8382. const struct ggml_compute_params * params,
  8383. const struct ggml_tensor * src0,
  8384. const struct ggml_tensor * src1,
  8385. struct ggml_tensor * dst) {
  8386. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  8387. GGML_ASSERT(ggml_is_contiguous(dst) && ggml_is_contiguous(src0));
  8388. // view src0 and dst with these strides and data offset inbytes during set
  8389. // nb0 is implicitly element_size because src0 and dst are contiguous
  8390. size_t nb1 = ((int32_t *) dst->op_params)[0];
  8391. size_t nb2 = ((int32_t *) dst->op_params)[1];
  8392. size_t nb3 = ((int32_t *) dst->op_params)[2];
  8393. size_t offset = ((int32_t *) dst->op_params)[3];
  8394. bool inplace = (bool) ((int32_t *) dst->op_params)[4];
  8395. if (!inplace && (params->type == GGML_TASK_INIT)) {
  8396. // memcpy needs to be synchronized across threads to avoid race conditions.
  8397. // => do it in INIT phase
  8398. memcpy(
  8399. ((char *) dst->data),
  8400. ((char *) src0->data),
  8401. ggml_nbytes(dst));
  8402. }
  8403. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  8404. return;
  8405. }
  8406. const int ith = params->ith;
  8407. const int nth = params->nth;
  8408. const int nr = ggml_nrows(src1);
  8409. const int nc = src1->ne[0];
  8410. GGML_TENSOR_LOCALS(int64_t, ne1, src1, ne)
  8411. GGML_TENSOR_LOCALS(size_t, nb1, src1, nb)
  8412. // src0 and dst as viewed during set
  8413. const size_t nb0 = ggml_element_size(src0);
  8414. const int im0 = (ne10 == 0 ? 0 : ne10-1);
  8415. const int im1 = (ne11 == 0 ? 0 : ne11-1);
  8416. const int im2 = (ne12 == 0 ? 0 : ne12-1);
  8417. const int im3 = (ne13 == 0 ? 0 : ne13-1);
  8418. GGML_ASSERT(offset + im0*nb0 + im1*nb1 + im2*nb2 + im3*nb3 <= ggml_nbytes(dst));
  8419. GGML_ASSERT(nb10 == sizeof(float));
  8420. // rows per thread
  8421. const int dr = (nr + nth - 1)/nth;
  8422. // row range for this thread
  8423. const int ir0 = dr*ith;
  8424. const int ir1 = MIN(ir0 + dr, nr);
  8425. for (int ir = ir0; ir < ir1; ++ir) {
  8426. // src0 and dst are viewed with shape of src1 and offset
  8427. // => same indices
  8428. const int i3 = ir/(ne12*ne11);
  8429. const int i2 = (ir - i3*ne12*ne11)/ne11;
  8430. const int i1 = (ir - i3*ne12*ne11 - i2*ne11);
  8431. ggml_vec_cpy_f32(nc,
  8432. (float *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + offset),
  8433. (float *) ((char *) src1->data + i3*nb13 + i2*nb12 + i1*nb11));
  8434. }
  8435. }
  8436. static void ggml_compute_forward_set(
  8437. const struct ggml_compute_params * params,
  8438. const struct ggml_tensor * src0,
  8439. const struct ggml_tensor * src1,
  8440. struct ggml_tensor * dst) {
  8441. switch (src0->type) {
  8442. case GGML_TYPE_F32:
  8443. {
  8444. ggml_compute_forward_set_f32(params, src0, src1, dst);
  8445. } break;
  8446. case GGML_TYPE_F16:
  8447. case GGML_TYPE_Q4_0:
  8448. case GGML_TYPE_Q4_1:
  8449. case GGML_TYPE_Q5_0:
  8450. case GGML_TYPE_Q5_1:
  8451. case GGML_TYPE_Q8_0:
  8452. case GGML_TYPE_Q8_1:
  8453. case GGML_TYPE_Q2_K:
  8454. case GGML_TYPE_Q3_K:
  8455. case GGML_TYPE_Q4_K:
  8456. case GGML_TYPE_Q5_K:
  8457. case GGML_TYPE_Q6_K:
  8458. default:
  8459. {
  8460. GGML_ASSERT(false);
  8461. } break;
  8462. }
  8463. }
  8464. // ggml_compute_forward_cpy
  8465. static void ggml_compute_forward_cpy(
  8466. const struct ggml_compute_params * params,
  8467. const struct ggml_tensor * src0,
  8468. struct ggml_tensor * dst) {
  8469. ggml_compute_forward_dup(params, src0, dst);
  8470. }
  8471. // ggml_compute_forward_cont
  8472. static void ggml_compute_forward_cont(
  8473. const struct ggml_compute_params * params,
  8474. const struct ggml_tensor * src0,
  8475. struct ggml_tensor * dst) {
  8476. ggml_compute_forward_dup(params, src0, dst);
  8477. }
  8478. // ggml_compute_forward_reshape
  8479. static void ggml_compute_forward_reshape(
  8480. const struct ggml_compute_params * params,
  8481. const struct ggml_tensor * src0,
  8482. struct ggml_tensor * dst) {
  8483. // NOP
  8484. UNUSED(params);
  8485. UNUSED(src0);
  8486. UNUSED(dst);
  8487. }
  8488. // ggml_compute_forward_view
  8489. static void ggml_compute_forward_view(
  8490. const struct ggml_compute_params * params,
  8491. const struct ggml_tensor * src0) {
  8492. // NOP
  8493. UNUSED(params);
  8494. UNUSED(src0);
  8495. }
  8496. // ggml_compute_forward_permute
  8497. static void ggml_compute_forward_permute(
  8498. const struct ggml_compute_params * params,
  8499. const struct ggml_tensor * src0) {
  8500. // NOP
  8501. UNUSED(params);
  8502. UNUSED(src0);
  8503. }
  8504. // ggml_compute_forward_transpose
  8505. static void ggml_compute_forward_transpose(
  8506. const struct ggml_compute_params * params,
  8507. const struct ggml_tensor * src0) {
  8508. // NOP
  8509. UNUSED(params);
  8510. UNUSED(src0);
  8511. }
  8512. // ggml_compute_forward_get_rows
  8513. static void ggml_compute_forward_get_rows_q(
  8514. const struct ggml_compute_params * params,
  8515. const struct ggml_tensor * src0,
  8516. const struct ggml_tensor * src1,
  8517. struct ggml_tensor * dst) {
  8518. assert(params->ith == 0);
  8519. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  8520. return;
  8521. }
  8522. GGML_TENSOR_BINARY_OP_LOCALS
  8523. const int64_t nc = ne00;
  8524. const int64_t nr = ggml_nelements(src1); GGML_UNUSED(nr);
  8525. const enum ggml_type type = src0->type;
  8526. ggml_to_float_t const dequantize_row_q = type_traits[type].to_float;
  8527. assert(ne0 == nc);
  8528. assert(ne02 == ne11);
  8529. assert(nb00 == ggml_type_size(type));
  8530. assert(ggml_nrows(dst) == nr);
  8531. // TODO: multi-thread
  8532. for (int64_t i12 = 0; i12 < ne12; ++i12) {
  8533. for (int64_t i11 = 0; i11 < ne11; ++i11) {
  8534. for (int64_t i10 = 0; i10 < ne10; ++i10) {
  8535. const int64_t i01 = *(int32_t *) ((char *) src1->data + i10*nb10 + i11*nb11 + i12*nb12);
  8536. dequantize_row_q(
  8537. (const void *) ((char *) src0->data + i01*nb01 + i11*nb02 + i12*nb03),
  8538. (float *) ((char *) dst->data + i10*nb1 + i11*nb2 + i12*nb3), nc);
  8539. }
  8540. }
  8541. }
  8542. }
  8543. static void ggml_compute_forward_get_rows_f16(
  8544. const struct ggml_compute_params * params,
  8545. const struct ggml_tensor * src0,
  8546. const struct ggml_tensor * src1,
  8547. struct ggml_tensor * dst) {
  8548. assert(params->ith == 0);
  8549. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  8550. return;
  8551. }
  8552. GGML_TENSOR_BINARY_OP_LOCALS
  8553. const int64_t nc = ne00;
  8554. const int64_t nr = ggml_nelements(src1); GGML_UNUSED(nr);
  8555. assert(ne0 == nc);
  8556. assert(ne02 == ne11);
  8557. assert(nb00 == sizeof(ggml_fp16_t));
  8558. assert(ggml_nrows(dst) == nr);
  8559. // TODO: multi-thread
  8560. for (int64_t i12 = 0; i12 < ne12; ++i12) {
  8561. for (int64_t i11 = 0; i11 < ne11; ++i11) {
  8562. for (int64_t i10 = 0; i10 < ne10; ++i10) {
  8563. const int64_t i01 = *(int32_t *) ((char *) src1->data + i10*nb10 + i11*nb11 + i12*nb12);
  8564. ggml_fp16_to_fp32_row(
  8565. (const void *) ((char *) src0->data + i01*nb01 + i11*nb02 + i12*nb03),
  8566. (float *) ((char *) dst->data + i10*nb1 + i11*nb2 + i12*nb3), nc);
  8567. }
  8568. }
  8569. }
  8570. }
  8571. static void ggml_compute_forward_get_rows_f32(
  8572. const struct ggml_compute_params * params,
  8573. const struct ggml_tensor * src0,
  8574. const struct ggml_tensor * src1,
  8575. struct ggml_tensor * dst) {
  8576. assert(params->ith == 0);
  8577. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  8578. return;
  8579. }
  8580. GGML_TENSOR_BINARY_OP_LOCALS
  8581. const int64_t nc = ne00;
  8582. const int64_t nr = ggml_nelements(src1); GGML_UNUSED(nr);
  8583. assert(ne0 == nc);
  8584. assert(ne02 == ne11);
  8585. assert(nb00 == sizeof(float));
  8586. assert(ggml_nrows(dst) == nr);
  8587. // TODO: multi-thread
  8588. for (int64_t i12 = 0; i12 < ne12; ++i12) {
  8589. for (int64_t i11 = 0; i11 < ne11; ++i11) {
  8590. for (int64_t i10 = 0; i10 < ne10; ++i10) {
  8591. const int64_t i01 = *(int32_t *) ((char *) src1->data + i10*nb10 + i11*nb11 + i12*nb12);
  8592. ggml_vec_cpy_f32(nc,
  8593. (float *) ((char *) dst->data + i10*nb1 + i11*nb2 + i12*nb3),
  8594. (float *) ((char *) src0->data + i01*nb01 + i11*nb02 + i12*nb03));
  8595. }
  8596. }
  8597. }
  8598. }
  8599. static void ggml_compute_forward_get_rows(
  8600. const struct ggml_compute_params * params,
  8601. const struct ggml_tensor * src0,
  8602. const struct ggml_tensor * src1,
  8603. struct ggml_tensor * dst) {
  8604. switch (src0->type) {
  8605. case GGML_TYPE_Q4_0:
  8606. case GGML_TYPE_Q4_1:
  8607. case GGML_TYPE_Q5_0:
  8608. case GGML_TYPE_Q5_1:
  8609. case GGML_TYPE_Q8_0:
  8610. case GGML_TYPE_Q8_1:
  8611. case GGML_TYPE_Q2_K:
  8612. case GGML_TYPE_Q3_K:
  8613. case GGML_TYPE_Q4_K:
  8614. case GGML_TYPE_Q5_K:
  8615. case GGML_TYPE_Q6_K:
  8616. {
  8617. ggml_compute_forward_get_rows_q(params, src0, src1, dst);
  8618. } break;
  8619. case GGML_TYPE_F16:
  8620. {
  8621. ggml_compute_forward_get_rows_f16(params, src0, src1, dst);
  8622. } break;
  8623. case GGML_TYPE_F32:
  8624. {
  8625. ggml_compute_forward_get_rows_f32(params, src0, src1, dst);
  8626. } break;
  8627. default:
  8628. {
  8629. GGML_ASSERT(false);
  8630. } break;
  8631. }
  8632. //static bool first = true;
  8633. //printf("ne0 = %d, ne1 = %d, ne2 = %d\n", dst->ne[0], dst->ne[1], dst->ne[2]);
  8634. //if (first) {
  8635. // first = false;
  8636. //} else {
  8637. // for (int k = 0; k < dst->ne[1]; ++k) {
  8638. // for (int j = 0; j < dst->ne[0]/16; ++j) {
  8639. // for (int i = 0; i < 16; ++i) {
  8640. // printf("%8.4f ", ((float *) dst->data)[k*dst->ne[0] + j*16 + i]);
  8641. // }
  8642. // printf("\n");
  8643. // }
  8644. // printf("\n");
  8645. // }
  8646. // printf("\n");
  8647. // exit(0);
  8648. //}
  8649. }
  8650. // ggml_compute_forward_get_rows_back
  8651. static void ggml_compute_forward_get_rows_back_f32_f16(
  8652. const struct ggml_compute_params * params,
  8653. const struct ggml_tensor * src0,
  8654. const struct ggml_tensor * src1,
  8655. struct ggml_tensor * dst) {
  8656. GGML_ASSERT(params->ith == 0);
  8657. GGML_ASSERT(ggml_is_contiguous(dst));
  8658. // ggml_compute_forward_dup_same_cont(params, opt0, dst);
  8659. if (params->type == GGML_TASK_INIT) {
  8660. memset(dst->data, 0, ggml_nbytes(dst));
  8661. }
  8662. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  8663. return;
  8664. }
  8665. const int nc = src0->ne[0];
  8666. const int nr = ggml_nelements(src1);
  8667. GGML_ASSERT( dst->ne[0] == nc);
  8668. GGML_ASSERT(src0->nb[0] == sizeof(ggml_fp16_t));
  8669. for (int i = 0; i < nr; ++i) {
  8670. const int r = ((int32_t *) src1->data)[i];
  8671. for (int j = 0; j < nc; ++j) {
  8672. ggml_fp16_t v = ((ggml_fp16_t *) ((char *) src0->data + i*src0->nb[1]))[j];
  8673. ((float *) ((char *) dst->data + r*dst->nb[1]))[j] += GGML_FP16_TO_FP32(v);
  8674. }
  8675. }
  8676. }
  8677. static void ggml_compute_forward_get_rows_back_f32(
  8678. const struct ggml_compute_params * params,
  8679. const struct ggml_tensor * src0,
  8680. const struct ggml_tensor * src1,
  8681. struct ggml_tensor * dst) {
  8682. GGML_ASSERT(params->ith == 0);
  8683. GGML_ASSERT(ggml_is_contiguous(dst));
  8684. // ggml_compute_forward_dup_same_cont(params, opt0, dst);
  8685. if (params->type == GGML_TASK_INIT) {
  8686. memset(dst->data, 0, ggml_nbytes(dst));
  8687. }
  8688. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  8689. return;
  8690. }
  8691. const int nc = src0->ne[0];
  8692. const int nr = ggml_nelements(src1);
  8693. GGML_ASSERT( dst->ne[0] == nc);
  8694. GGML_ASSERT(src0->nb[0] == sizeof(float));
  8695. for (int i = 0; i < nr; ++i) {
  8696. const int r = ((int32_t *) src1->data)[i];
  8697. ggml_vec_add_f32(nc,
  8698. (float *) ((char *) dst->data + r*dst->nb[1]),
  8699. (float *) ((char *) dst->data + r*dst->nb[1]),
  8700. (float *) ((char *) src0->data + i*src0->nb[1]));
  8701. }
  8702. }
  8703. static void ggml_compute_forward_get_rows_back(
  8704. const struct ggml_compute_params * params,
  8705. const struct ggml_tensor * src0,
  8706. const struct ggml_tensor * src1,
  8707. struct ggml_tensor * dst) {
  8708. switch (src0->type) {
  8709. case GGML_TYPE_F16:
  8710. {
  8711. ggml_compute_forward_get_rows_back_f32_f16(params, src0, src1, dst);
  8712. } break;
  8713. case GGML_TYPE_F32:
  8714. {
  8715. ggml_compute_forward_get_rows_back_f32(params, src0, src1, dst);
  8716. } break;
  8717. default:
  8718. {
  8719. GGML_ASSERT(false);
  8720. } break;
  8721. }
  8722. //static bool first = true;
  8723. //printf("ne0 = %d, ne1 = %d, ne2 = %d\n", dst->ne[0], dst->ne[1], dst->ne[2]);
  8724. //if (first) {
  8725. // first = false;
  8726. //} else {
  8727. // for (int k = 0; k < dst->ne[1]; ++k) {
  8728. // for (int j = 0; j < dst->ne[0]/16; ++j) {
  8729. // for (int i = 0; i < 16; ++i) {
  8730. // printf("%8.4f ", ((float *) dst->data)[k*dst->ne[0] + j*16 + i]);
  8731. // }
  8732. // printf("\n");
  8733. // }
  8734. // printf("\n");
  8735. // }
  8736. // printf("\n");
  8737. // exit(0);
  8738. //}
  8739. }
  8740. // ggml_compute_forward_diag
  8741. static void ggml_compute_forward_diag_f32(
  8742. const struct ggml_compute_params * params,
  8743. const struct ggml_tensor * src0,
  8744. struct ggml_tensor * dst) {
  8745. GGML_ASSERT(params->ith == 0);
  8746. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  8747. return;
  8748. }
  8749. // TODO: handle transposed/permuted matrices
  8750. GGML_TENSOR_UNARY_OP_LOCALS
  8751. GGML_ASSERT(ne00 == ne0);
  8752. GGML_ASSERT(ne00 == ne1);
  8753. GGML_ASSERT(ne01 == 1);
  8754. GGML_ASSERT(ne02 == ne2);
  8755. GGML_ASSERT(ne03 == ne3);
  8756. GGML_ASSERT(nb00 == sizeof(float));
  8757. GGML_ASSERT(nb0 == sizeof(float));
  8758. for (int i3 = 0; i3 < ne3; i3++) {
  8759. for (int i2 = 0; i2 < ne2; i2++) {
  8760. for (int i1 = 0; i1 < ne1; i1++) {
  8761. float * d = (float *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1);
  8762. float * s = (float *)((char *) src0->data + i3*nb03 + i2*nb02);
  8763. for (int i0 = 0; i0 < i1; i0++) {
  8764. d[i0] = 0;
  8765. }
  8766. d[i1] = s[i1];
  8767. for (int i0 = i1+1; i0 < ne0; i0++) {
  8768. d[i0] = 0;
  8769. }
  8770. }
  8771. }
  8772. }
  8773. }
  8774. static void ggml_compute_forward_diag(
  8775. const struct ggml_compute_params * params,
  8776. const struct ggml_tensor * src0,
  8777. struct ggml_tensor * dst) {
  8778. switch (src0->type) {
  8779. case GGML_TYPE_F32:
  8780. {
  8781. ggml_compute_forward_diag_f32(params, src0, dst);
  8782. } break;
  8783. default:
  8784. {
  8785. GGML_ASSERT(false);
  8786. } break;
  8787. }
  8788. }
  8789. // ggml_compute_forward_diag_mask_inf
  8790. static void ggml_compute_forward_diag_mask_f32(
  8791. const struct ggml_compute_params * params,
  8792. const struct ggml_tensor * src0,
  8793. struct ggml_tensor * dst,
  8794. const float value) {
  8795. const int ith = params->ith;
  8796. const int nth = params->nth;
  8797. const int n_past = ((int32_t *) dst->op_params)[0];
  8798. const bool inplace = src0->data == dst->data;
  8799. GGML_ASSERT(n_past >= 0);
  8800. if (!inplace && (params->type == GGML_TASK_INIT)) {
  8801. // memcpy needs to be synchronized across threads to avoid race conditions.
  8802. // => do it in INIT phase
  8803. GGML_ASSERT(ggml_nelements(dst) == ggml_nelements(src0));
  8804. GGML_ASSERT(ggml_is_contiguous(dst) && ggml_is_contiguous(src0));
  8805. memcpy(
  8806. ((char *) dst->data),
  8807. ((char *) src0->data),
  8808. ggml_nbytes(dst));
  8809. }
  8810. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  8811. return;
  8812. }
  8813. // TODO: handle transposed/permuted matrices
  8814. const int n = ggml_nrows(src0);
  8815. const int nc = src0->ne[0];
  8816. const int nr = src0->ne[1];
  8817. const int nz = n/nr;
  8818. GGML_ASSERT( dst->nb[0] == sizeof(float));
  8819. GGML_ASSERT(src0->nb[0] == sizeof(float));
  8820. for (int k = 0; k < nz; k++) {
  8821. for (int j = ith; j < nr; j += nth) {
  8822. for (int i = n_past; i < nc; i++) {
  8823. if (i > n_past + j) {
  8824. *(float *)((char *) dst->data + k*dst->nb[2] + j*dst->nb[1] + i*dst->nb[0]) = value;
  8825. }
  8826. }
  8827. }
  8828. }
  8829. }
  8830. static void ggml_compute_forward_diag_mask_inf(
  8831. const struct ggml_compute_params * params,
  8832. const struct ggml_tensor * src0,
  8833. struct ggml_tensor * dst) {
  8834. switch (src0->type) {
  8835. case GGML_TYPE_F32:
  8836. {
  8837. ggml_compute_forward_diag_mask_f32(params, src0, dst, -INFINITY);
  8838. } break;
  8839. default:
  8840. {
  8841. GGML_ASSERT(false);
  8842. } break;
  8843. }
  8844. }
  8845. static void ggml_compute_forward_diag_mask_zero(
  8846. const struct ggml_compute_params * params,
  8847. const struct ggml_tensor * src0,
  8848. struct ggml_tensor * dst) {
  8849. switch (src0->type) {
  8850. case GGML_TYPE_F32:
  8851. {
  8852. ggml_compute_forward_diag_mask_f32(params, src0, dst, 0);
  8853. } break;
  8854. default:
  8855. {
  8856. GGML_ASSERT(false);
  8857. } break;
  8858. }
  8859. }
  8860. // ggml_compute_forward_soft_max
  8861. static void ggml_compute_forward_soft_max_f32(
  8862. const struct ggml_compute_params * params,
  8863. const struct ggml_tensor * src0,
  8864. const struct ggml_tensor * src1,
  8865. struct ggml_tensor * dst) {
  8866. assert(ggml_is_contiguous(dst));
  8867. assert(ggml_are_same_shape(src0, dst));
  8868. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  8869. return;
  8870. }
  8871. float scale = 1.0f;
  8872. memcpy(&scale, (float *) dst->op_params + 0, sizeof(float));
  8873. // TODO: handle transposed/permuted matrices
  8874. const int ith = params->ith;
  8875. const int nth = params->nth;
  8876. const int64_t ne11 = src1 ? src1->ne[1] : 1;
  8877. const int nc = src0->ne[0];
  8878. const int nr = ggml_nrows(src0);
  8879. // rows per thread
  8880. const int dr = (nr + nth - 1)/nth;
  8881. // row range for this thread
  8882. const int ir0 = dr*ith;
  8883. const int ir1 = MIN(ir0 + dr, nr);
  8884. float * wp = (float *) params->wdata + (nc + CACHE_LINE_SIZE_F32) * ith;
  8885. for (int i1 = ir0; i1 < ir1; i1++) {
  8886. float * sp = (float *)((char *) src0->data + i1*src0->nb[1]);
  8887. float * dp = (float *)((char *) dst->data + i1*dst->nb[1]);
  8888. // broadcast the mask across rows
  8889. float * mp = src1 ? (float *)((char *) src1->data + (i1%ne11)*src1->nb[1]) : NULL;
  8890. ggml_vec_cpy_f32 (nc, wp, sp);
  8891. ggml_vec_scale_f32(nc, wp, scale);
  8892. if (mp) {
  8893. ggml_vec_acc_f32(nc, wp, mp);
  8894. }
  8895. #ifndef NDEBUG
  8896. for (int i = 0; i < nc; ++i) {
  8897. //printf("p[%d] = %f\n", i, p[i]);
  8898. assert(!isnan(wp[i]));
  8899. }
  8900. #endif
  8901. float max = -INFINITY;
  8902. ggml_vec_max_f32(nc, &max, wp);
  8903. ggml_float sum = 0.0;
  8904. uint16_t scvt;
  8905. for (int i = 0; i < nc; i++) {
  8906. if (wp[i] == -INFINITY) {
  8907. dp[i] = 0.0f;
  8908. } else {
  8909. // const float val = (wp[i] == -INFINITY) ? 0.0 : exp(wp[i] - max);
  8910. ggml_fp16_t s = GGML_FP32_TO_FP16(wp[i] - max);
  8911. memcpy(&scvt, &s, sizeof(scvt));
  8912. const float val = GGML_FP16_TO_FP32(ggml_table_exp_f16[scvt]);
  8913. sum += (ggml_float)val;
  8914. dp[i] = val;
  8915. }
  8916. }
  8917. assert(sum > 0.0);
  8918. sum = 1.0/sum;
  8919. ggml_vec_scale_f32(nc, dp, sum);
  8920. #ifndef NDEBUG
  8921. for (int i = 0; i < nc; ++i) {
  8922. assert(!isnan(dp[i]));
  8923. assert(!isinf(dp[i]));
  8924. }
  8925. #endif
  8926. }
  8927. }
  8928. static void ggml_compute_forward_soft_max(
  8929. const struct ggml_compute_params * params,
  8930. const struct ggml_tensor * src0,
  8931. const struct ggml_tensor * src1,
  8932. struct ggml_tensor * dst) {
  8933. switch (src0->type) {
  8934. case GGML_TYPE_F32:
  8935. {
  8936. ggml_compute_forward_soft_max_f32(params, src0, src1, dst);
  8937. } break;
  8938. default:
  8939. {
  8940. GGML_ASSERT(false);
  8941. } break;
  8942. }
  8943. }
  8944. // ggml_compute_forward_soft_max_back
  8945. static void ggml_compute_forward_soft_max_back_f32(
  8946. const struct ggml_compute_params * params,
  8947. const struct ggml_tensor * src0,
  8948. const struct ggml_tensor * src1,
  8949. struct ggml_tensor * dst) {
  8950. GGML_ASSERT(ggml_is_contiguous(src0));
  8951. GGML_ASSERT(ggml_is_contiguous(src1));
  8952. GGML_ASSERT(ggml_is_contiguous(dst));
  8953. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  8954. GGML_ASSERT(ggml_are_same_shape(src1, dst));
  8955. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  8956. return;
  8957. }
  8958. // TODO: handle transposed/permuted matrices
  8959. const int ith = params->ith;
  8960. const int nth = params->nth;
  8961. const int nc = src0->ne[0];
  8962. const int nr = ggml_nrows(src0);
  8963. // rows per thread
  8964. const int dr = (nr + nth - 1)/nth;
  8965. // row range for this thread
  8966. const int ir0 = dr*ith;
  8967. const int ir1 = MIN(ir0 + dr, nr);
  8968. for (int i1 = ir0; i1 < ir1; i1++) {
  8969. float *dy = (float *)((char *) src0->data + i1*src0->nb[1]);
  8970. float *y = (float *)((char *) src1->data + i1*src1->nb[1]);
  8971. float *dx = (float *)((char *) dst->data + i1*dst->nb[1]);
  8972. #ifndef NDEBUG
  8973. for (int i = 0; i < nc; ++i) {
  8974. //printf("p[%d] = %f\n", i, p[i]);
  8975. assert(!isnan(dy[i]));
  8976. assert(!isnan(y[i]));
  8977. }
  8978. #endif
  8979. // Jii = yi - yi*yi
  8980. // Jij = -yi*yj
  8981. // J = diag(y)-y.T*y
  8982. // dx = J * dy
  8983. // dxk = sum_i(Jki * dyi)
  8984. // dxk = sum_i(-yk*yi * dyi) - (-yk*yk)*dyk + (yk - yk*yk)*dyk
  8985. // dxk = sum_i(-yk*yi * dyi) + yk*yk*dyk + yk*dyk - yk*yk*dyk
  8986. // dxk = sum_i(-yk*yi * dyi) + yk*dyk
  8987. // dxk = -yk * sum_i(yi * dyi) + yk*dyk
  8988. // dxk = -yk * dot(y, dy) + yk*dyk
  8989. // dxk = yk * (- dot(y, dy) + dyk)
  8990. // dxk = yk * (dyk - dot(y, dy))
  8991. //
  8992. // post-order:
  8993. // dot_y_dy := dot(y, dy)
  8994. // dx := dy
  8995. // dx := dx - dot_y_dy
  8996. // dx := dx * y
  8997. // linear runtime, no additional memory
  8998. float dot_y_dy = 0;
  8999. ggml_vec_dot_f32 (nc, &dot_y_dy, y, dy);
  9000. ggml_vec_cpy_f32 (nc, dx, dy);
  9001. ggml_vec_acc1_f32(nc, dx, -dot_y_dy);
  9002. ggml_vec_mul_f32 (nc, dx, dx, y);
  9003. #ifndef NDEBUG
  9004. for (int i = 0; i < nc; ++i) {
  9005. assert(!isnan(dx[i]));
  9006. assert(!isinf(dx[i]));
  9007. }
  9008. #endif
  9009. }
  9010. }
  9011. static void ggml_compute_forward_soft_max_back(
  9012. const struct ggml_compute_params * params,
  9013. const struct ggml_tensor * src0,
  9014. const struct ggml_tensor * src1,
  9015. struct ggml_tensor * dst) {
  9016. switch (src0->type) {
  9017. case GGML_TYPE_F32:
  9018. {
  9019. ggml_compute_forward_soft_max_back_f32(params, src0, src1, dst);
  9020. } break;
  9021. default:
  9022. {
  9023. GGML_ASSERT(false);
  9024. } break;
  9025. }
  9026. }
  9027. // ggml_compute_forward_alibi
  9028. static void ggml_compute_forward_alibi_f32(
  9029. const struct ggml_compute_params * params,
  9030. const struct ggml_tensor * src0,
  9031. struct ggml_tensor * dst) {
  9032. assert(params->ith == 0);
  9033. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  9034. return;
  9035. }
  9036. //const int n_past = ((int32_t *) dst->op_params)[0];
  9037. const int n_head = ((int32_t *) dst->op_params)[1];
  9038. float max_bias;
  9039. memcpy(&max_bias, (int32_t *) dst->op_params + 2, sizeof(float));
  9040. const int64_t ne0 = src0->ne[0]; // all_seq_len = n_past + ne1
  9041. const int64_t ne1 = src0->ne[1]; // seq_len_without_past
  9042. const int64_t ne2 = src0->ne[2]; // n_head -> this is k
  9043. //const int64_t ne3 = src0->ne[3]; // 1 -> bsz
  9044. const int64_t n = ggml_nrows(src0);
  9045. const int64_t ne2_ne3 = n/ne1; // ne2*ne3
  9046. const size_t nb0 = src0->nb[0];
  9047. const size_t nb1 = src0->nb[1];
  9048. const size_t nb2 = src0->nb[2];
  9049. //const int nb3 = src0->nb[3];
  9050. GGML_ASSERT(nb0 == sizeof(float));
  9051. GGML_ASSERT(n_head == ne2);
  9052. // add alibi to src0 (KQ_scaled)
  9053. const int n_heads_log2_floor = 1 << (int) floor(log2(n_head));
  9054. const float m0 = powf(2.0f, -(max_bias) / n_heads_log2_floor);
  9055. const float m1 = powf(2.0f, -(max_bias / 2.0f) / n_heads_log2_floor);
  9056. for (int64_t i = 0; i < ne0; i++) {
  9057. for (int64_t j = 0; j < ne1; j++) {
  9058. for (int64_t k = 0; k < ne2_ne3; k++) {
  9059. float * const src = (float *)((char *) src0->data + i*nb0 + j*nb1 + k*nb2);
  9060. float * pdst = (float *)((char *) dst->data + i*nb0 + j*nb1 + k*nb2);
  9061. // TODO: k*nb2 or k*nb3
  9062. float m_k;
  9063. if (k < n_heads_log2_floor) {
  9064. m_k = powf(m0, k + 1);
  9065. } else {
  9066. m_k = powf(m1, 2 * (k - n_heads_log2_floor) + 1);
  9067. }
  9068. pdst[0] = i * m_k + src[0];
  9069. }
  9070. }
  9071. }
  9072. }
  9073. static void ggml_compute_forward_alibi_f16(
  9074. const struct ggml_compute_params * params,
  9075. const struct ggml_tensor * src0,
  9076. struct ggml_tensor * dst) {
  9077. assert(params->ith == 0);
  9078. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  9079. return;
  9080. }
  9081. //const int n_past = ((int32_t *) dst->op_params)[0];
  9082. const int n_head = ((int32_t *) dst->op_params)[1];
  9083. float max_bias;
  9084. memcpy(&max_bias, (int32_t *) dst->op_params + 2, sizeof(float));
  9085. const int ne0 = src0->ne[0]; // all_seq_len = n_past + ne1
  9086. const int ne1 = src0->ne[1]; // seq_len_without_past
  9087. const int ne2 = src0->ne[2]; // n_head -> this is k
  9088. //const int ne3 = src0->ne[3]; // 1 -> bsz
  9089. const int n = ggml_nrows(src0);
  9090. const int ne2_ne3 = n/ne1; // ne2*ne3
  9091. const int nb0 = src0->nb[0];
  9092. const int nb1 = src0->nb[1];
  9093. const int nb2 = src0->nb[2];
  9094. //const int nb3 = src0->nb[3];
  9095. GGML_ASSERT(nb0 == sizeof(ggml_fp16_t));
  9096. //GGML_ASSERT(ne1 + n_past == ne0); (void) n_past;
  9097. GGML_ASSERT(n_head == ne2);
  9098. // add alibi to src0 (KQ_scaled)
  9099. const int n_heads_log2_floor = 1 << (int) floor(log2(n_head));
  9100. const float m0 = powf(2.0f, -(max_bias) / n_heads_log2_floor);
  9101. const float m1 = powf(2.0f, -(max_bias / 2.0f) / n_heads_log2_floor);
  9102. for (int i = 0; i < ne0; i++) {
  9103. for (int j = 0; j < ne1; j++) {
  9104. for (int k = 0; k < ne2_ne3; k++) {
  9105. ggml_fp16_t * const src = (ggml_fp16_t *)((char *) src0->data + i*nb0 + j*nb1 + k*nb2);
  9106. float * pdst = (float *)((char *) dst->data + i*nb0 + j*nb1 + k*nb2);
  9107. // TODO: k*nb2 or k*nb3
  9108. float m_k;
  9109. if (k < n_heads_log2_floor) {
  9110. m_k = powf(m0, k + 1);
  9111. } else {
  9112. m_k = powf(m1, 2 * (k - n_heads_log2_floor) + 1);
  9113. }
  9114. // we return F32
  9115. pdst[0] = i * m_k + GGML_FP16_TO_FP32(src[0]);
  9116. }
  9117. }
  9118. }
  9119. }
  9120. static void ggml_compute_forward_alibi(
  9121. const struct ggml_compute_params * params,
  9122. const struct ggml_tensor * src0,
  9123. struct ggml_tensor * dst) {
  9124. switch (src0->type) {
  9125. case GGML_TYPE_F16:
  9126. {
  9127. ggml_compute_forward_alibi_f16(params, src0, dst);
  9128. } break;
  9129. case GGML_TYPE_F32:
  9130. {
  9131. ggml_compute_forward_alibi_f32(params, src0, dst);
  9132. } break;
  9133. case GGML_TYPE_Q4_0:
  9134. case GGML_TYPE_Q4_1:
  9135. case GGML_TYPE_Q5_0:
  9136. case GGML_TYPE_Q5_1:
  9137. case GGML_TYPE_Q8_0:
  9138. case GGML_TYPE_Q8_1:
  9139. case GGML_TYPE_Q2_K:
  9140. case GGML_TYPE_Q3_K:
  9141. case GGML_TYPE_Q4_K:
  9142. case GGML_TYPE_Q5_K:
  9143. case GGML_TYPE_Q6_K:
  9144. case GGML_TYPE_Q8_K:
  9145. case GGML_TYPE_I8:
  9146. case GGML_TYPE_I16:
  9147. case GGML_TYPE_I32:
  9148. case GGML_TYPE_COUNT:
  9149. {
  9150. GGML_ASSERT(false);
  9151. } break;
  9152. }
  9153. }
  9154. // ggml_compute_forward_clamp
  9155. static void ggml_compute_forward_clamp_f32(
  9156. const struct ggml_compute_params * params,
  9157. const struct ggml_tensor * src0,
  9158. struct ggml_tensor * dst) {
  9159. assert(params->ith == 0);
  9160. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  9161. return;
  9162. }
  9163. float min;
  9164. float max;
  9165. memcpy(&min, (float *) dst->op_params + 0, sizeof(float));
  9166. memcpy(&max, (float *) dst->op_params + 1, sizeof(float));
  9167. const int ith = params->ith;
  9168. const int nth = params->nth;
  9169. const int n = ggml_nrows(src0);
  9170. const int nc = src0->ne[0];
  9171. const size_t nb00 = src0->nb[0];
  9172. const size_t nb01 = src0->nb[1];
  9173. const size_t nb0 = dst->nb[0];
  9174. const size_t nb1 = dst->nb[1];
  9175. GGML_ASSERT( nb0 == sizeof(float));
  9176. GGML_ASSERT(nb00 == sizeof(float));
  9177. for (int j = ith; j < n; j += nth) {
  9178. float * dst_ptr = (float *) ((char *) dst->data + j*nb1);
  9179. float * src0_ptr = (float *) ((char *) src0->data + j*nb01);
  9180. for (int i = 0; i < nc; i++) {
  9181. dst_ptr[i] = MAX(MIN(src0_ptr[i], max), min);
  9182. }
  9183. }
  9184. }
  9185. static void ggml_compute_forward_clamp(
  9186. const struct ggml_compute_params * params,
  9187. const struct ggml_tensor * src0,
  9188. struct ggml_tensor * dst) {
  9189. switch (src0->type) {
  9190. case GGML_TYPE_F32:
  9191. {
  9192. ggml_compute_forward_clamp_f32(params, src0, dst);
  9193. } break;
  9194. case GGML_TYPE_F16:
  9195. case GGML_TYPE_Q4_0:
  9196. case GGML_TYPE_Q4_1:
  9197. case GGML_TYPE_Q5_0:
  9198. case GGML_TYPE_Q5_1:
  9199. case GGML_TYPE_Q8_0:
  9200. case GGML_TYPE_Q8_1:
  9201. case GGML_TYPE_Q2_K:
  9202. case GGML_TYPE_Q3_K:
  9203. case GGML_TYPE_Q4_K:
  9204. case GGML_TYPE_Q5_K:
  9205. case GGML_TYPE_Q6_K:
  9206. case GGML_TYPE_Q8_K:
  9207. case GGML_TYPE_I8:
  9208. case GGML_TYPE_I16:
  9209. case GGML_TYPE_I32:
  9210. case GGML_TYPE_COUNT:
  9211. {
  9212. GGML_ASSERT(false);
  9213. } break;
  9214. }
  9215. }
  9216. // ggml_compute_forward_rope
  9217. static float rope_yarn_ramp(const float low, const float high, const int i0) {
  9218. const float y = (i0 / 2 - low) / MAX(0.001f, high - low);
  9219. return 1 - MIN(1, MAX(0, y));
  9220. }
  9221. // YaRN algorithm based on LlamaYaRNScaledRotaryEmbedding.py from https://github.com/jquesnelle/yarn
  9222. // MIT licensed. Copyright (c) 2023 Jeffrey Quesnelle and Bowen Peng.
  9223. static void rope_yarn(
  9224. float theta_extrap, float freq_scale, float corr_dims[2], int64_t i0, float ext_factor, float mscale,
  9225. float * cos_theta, float * sin_theta
  9226. ) {
  9227. // Get n-d rotational scaling corrected for extrapolation
  9228. float theta_interp = freq_scale * theta_extrap;
  9229. float theta = theta_interp;
  9230. if (ext_factor != 0.0f) {
  9231. float ramp_mix = rope_yarn_ramp(corr_dims[0], corr_dims[1], i0) * ext_factor;
  9232. theta = theta_interp * (1 - ramp_mix) + theta_extrap * ramp_mix;
  9233. // Get n-d magnitude scaling corrected for interpolation
  9234. mscale *= 1.0f + 0.1f * logf(1.0f / freq_scale);
  9235. }
  9236. *cos_theta = cosf(theta) * mscale;
  9237. *sin_theta = sinf(theta) * mscale;
  9238. }
  9239. // Apparently solving `n_rot = 2pi * x * base^((2 * max_pos_emb) / n_dims)` for x, we get
  9240. // `corr_dim(n_rot) = n_dims * log(max_pos_emb / (n_rot * 2pi)) / (2 * log(base))`
  9241. static float ggml_rope_yarn_corr_dim(int n_dims, int n_orig_ctx, float n_rot, float base) {
  9242. return n_dims * logf(n_orig_ctx / (n_rot * 2 * (float)M_PI)) / (2 * logf(base));
  9243. }
  9244. void ggml_rope_yarn_corr_dims(
  9245. int n_dims, int n_orig_ctx, float freq_base, float beta_fast, float beta_slow, float dims[2]
  9246. ) {
  9247. // start and end correction dims
  9248. dims[0] = MAX(0, floorf(ggml_rope_yarn_corr_dim(n_dims, n_orig_ctx, beta_fast, freq_base)));
  9249. dims[1] = MIN(n_dims - 1, ceilf(ggml_rope_yarn_corr_dim(n_dims, n_orig_ctx, beta_slow, freq_base)));
  9250. }
  9251. static void ggml_compute_forward_rope_f32(
  9252. const struct ggml_compute_params * params,
  9253. const struct ggml_tensor * src0,
  9254. const struct ggml_tensor * src1,
  9255. struct ggml_tensor * dst,
  9256. const bool forward) {
  9257. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  9258. return;
  9259. }
  9260. float freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow;
  9261. // these two only relevant for xPos RoPE:
  9262. float xpos_base;
  9263. bool xpos_down;
  9264. //const int n_past = ((int32_t *) dst->op_params)[0];
  9265. const int n_dims = ((int32_t *) dst->op_params)[1];
  9266. const int mode = ((int32_t *) dst->op_params)[2];
  9267. const int n_ctx = ((int32_t *) dst->op_params)[3];
  9268. const int n_orig_ctx = ((int32_t *) dst->op_params)[4];
  9269. memcpy(&freq_base, (int32_t *) dst->op_params + 5, sizeof(float));
  9270. memcpy(&freq_scale, (int32_t *) dst->op_params + 6, sizeof(float));
  9271. memcpy(&ext_factor, (int32_t *) dst->op_params + 7, sizeof(float));
  9272. memcpy(&attn_factor, (int32_t *) dst->op_params + 8, sizeof(float));
  9273. memcpy(&beta_fast, (int32_t *) dst->op_params + 9, sizeof(float));
  9274. memcpy(&beta_slow, (int32_t *) dst->op_params + 10, sizeof(float));
  9275. memcpy(&xpos_base, (int32_t *) dst->op_params + 11, sizeof(float));
  9276. memcpy(&xpos_down, (int32_t *) dst->op_params + 12, sizeof(bool));
  9277. GGML_TENSOR_UNARY_OP_LOCALS
  9278. //printf("ne0: %d, ne1: %d, ne2: %d, ne3: %d\n", ne0, ne1, ne2, ne3);
  9279. //printf("n_past = %d, ne2 = %d\n", n_past, ne2);
  9280. GGML_ASSERT(nb00 == sizeof(float));
  9281. const int ith = params->ith;
  9282. const int nth = params->nth;
  9283. const int nr = ggml_nrows(dst);
  9284. GGML_ASSERT(n_dims <= ne0);
  9285. GGML_ASSERT(n_dims % 2 == 0);
  9286. // rows per thread
  9287. const int dr = (nr + nth - 1)/nth;
  9288. // row range for this thread
  9289. const int ir0 = dr*ith;
  9290. const int ir1 = MIN(ir0 + dr, nr);
  9291. // row index used to determine which thread to use
  9292. int ir = 0;
  9293. const float theta_scale = powf(freq_base, -2.0f/n_dims);
  9294. const float inv_ndims = -1.f/n_dims;
  9295. float corr_dims[2];
  9296. ggml_rope_yarn_corr_dims(n_dims, n_orig_ctx, freq_base, beta_fast, beta_slow, corr_dims);
  9297. const bool is_neox = mode & 2;
  9298. const bool is_glm = mode & 4;
  9299. // backward process uses inverse rotation by cos and sin.
  9300. // cos and sin build a rotation matrix, where the inverse is the transpose.
  9301. // this essentially just switches the sign of sin.
  9302. const float sin_sign = forward ? 1.0f : -1.0f;
  9303. const int32_t * pos = (const int32_t *) src1->data;
  9304. for (int64_t i3 = 0; i3 < ne3; i3++) {
  9305. for (int64_t i2 = 0; i2 < ne2; i2++) {
  9306. const int64_t p = pos[i2];
  9307. for (int64_t i1 = 0; i1 < ne1; i1++) {
  9308. if (ir++ < ir0) continue;
  9309. if (ir > ir1) break;
  9310. float theta_base = (float)p;
  9311. if (is_glm) {
  9312. theta_base = MIN(p, n_ctx - 2);
  9313. float block_theta = MAX(p - (n_ctx - 2), 0);
  9314. for (int64_t i0 = 0; i0 < ne0 / 4; i0++) {
  9315. const float cos_theta = cosf(theta_base);
  9316. const float sin_theta = sinf(theta_base) * sin_sign;
  9317. const float cos_block_theta = cosf(block_theta);
  9318. const float sin_block_theta = sinf(block_theta) * sin_sign;
  9319. theta_base *= theta_scale;
  9320. block_theta *= theta_scale;
  9321. const float * const src = (float *)((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00);
  9322. float * dst_data = (float *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0);
  9323. const float x0 = src[0];
  9324. const float x1 = src[n_dims/2];
  9325. const float x2 = src[n_dims];
  9326. const float x3 = src[n_dims/2*3];
  9327. dst_data[0] = x0*cos_theta - x1*sin_theta;
  9328. dst_data[n_dims/2] = x0*sin_theta + x1*cos_theta;
  9329. dst_data[n_dims] = x2*cos_block_theta - x3*sin_block_theta;
  9330. dst_data[n_dims/2*3] = x2*sin_block_theta + x3*cos_block_theta;
  9331. }
  9332. } else if (!is_neox) {
  9333. for (int64_t i0 = 0; i0 < ne0; i0 += 2) {
  9334. float cos_theta, sin_theta;
  9335. rope_yarn(
  9336. theta_base, freq_scale, corr_dims, i0, ext_factor, attn_factor, &cos_theta, &sin_theta
  9337. );
  9338. sin_theta *= sin_sign;
  9339. // zeta scaling for xPos only:
  9340. float zeta = xpos_base != 0.0f ? powf((i0 + 0.4f * ne0) / (1.4f * ne0), p / xpos_base) : 1.0f;
  9341. if (xpos_down) zeta = 1.0f / zeta;
  9342. theta_base *= theta_scale;
  9343. const float * const src = (float *)((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00);
  9344. float * dst_data = (float *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0);
  9345. const float x0 = src[0];
  9346. const float x1 = src[1];
  9347. dst_data[0] = x0*cos_theta*zeta - x1*sin_theta*zeta;
  9348. dst_data[1] = x0*sin_theta*zeta + x1*cos_theta*zeta;
  9349. }
  9350. } else {
  9351. // TODO: this might be wrong for ne0 != n_dims - need double check
  9352. // ref: https://github.com/huggingface/transformers/blob/main/src/transformers/models/gpt_neox/modeling_gpt_neox.py#LL251C1-L294C28
  9353. theta_base *= freq_scale;
  9354. for (int64_t ib = 0; ib < ne0/n_dims; ++ib) {
  9355. for (int64_t ic = 0; ic < n_dims; ic += 2) {
  9356. // simplified from `(ib * n_dims + ic) * inv_ndims`
  9357. float cur_rot = inv_ndims * ic - ib;
  9358. float cos_theta, sin_theta;
  9359. rope_yarn(
  9360. theta_base, freq_scale, corr_dims, cur_rot, ext_factor, attn_factor,
  9361. &cos_theta, &sin_theta
  9362. );
  9363. sin_theta *= sin_sign;
  9364. theta_base *= theta_scale;
  9365. const int64_t i0 = ib*n_dims + ic/2;
  9366. const float * const src = (float *)((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00);
  9367. float * dst_data = (float *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0);
  9368. const float x0 = src[0];
  9369. const float x1 = src[n_dims/2];
  9370. dst_data[0] = x0*cos_theta - x1*sin_theta;
  9371. dst_data[n_dims/2] = x0*sin_theta + x1*cos_theta;
  9372. }
  9373. }
  9374. }
  9375. }
  9376. }
  9377. }
  9378. }
  9379. static void ggml_compute_forward_rope_f16(
  9380. const struct ggml_compute_params * params,
  9381. const struct ggml_tensor * src0,
  9382. const struct ggml_tensor * src1,
  9383. struct ggml_tensor * dst,
  9384. const bool forward) {
  9385. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  9386. return;
  9387. }
  9388. float freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow;
  9389. //const int n_past = ((int32_t *) dst->op_params)[0];
  9390. const int n_dims = ((int32_t *) dst->op_params)[1];
  9391. const int mode = ((int32_t *) dst->op_params)[2];
  9392. const int n_ctx = ((int32_t *) dst->op_params)[3];
  9393. const int n_orig_ctx = ((int32_t *) dst->op_params)[4];
  9394. memcpy(&freq_base, (int32_t *) dst->op_params + 5, sizeof(float));
  9395. memcpy(&freq_scale, (int32_t *) dst->op_params + 6, sizeof(float));
  9396. memcpy(&ext_factor, (int32_t *) dst->op_params + 7, sizeof(float));
  9397. memcpy(&attn_factor, (int32_t *) dst->op_params + 8, sizeof(float));
  9398. memcpy(&beta_fast, (int32_t *) dst->op_params + 9, sizeof(float));
  9399. memcpy(&beta_slow, (int32_t *) dst->op_params + 10, sizeof(float));
  9400. GGML_TENSOR_UNARY_OP_LOCALS
  9401. //printf("ne0: %d, ne1: %d, ne2: %d, ne3: %d\n", ne0, ne1, ne2, ne3);
  9402. //printf("n_past = %d, ne2 = %d\n", n_past, ne2);
  9403. GGML_ASSERT(nb0 == sizeof(ggml_fp16_t));
  9404. const int ith = params->ith;
  9405. const int nth = params->nth;
  9406. const int nr = ggml_nrows(dst);
  9407. GGML_ASSERT(n_dims <= ne0);
  9408. GGML_ASSERT(n_dims % 2 == 0);
  9409. // rows per thread
  9410. const int dr = (nr + nth - 1)/nth;
  9411. // row range for this thread
  9412. const int ir0 = dr*ith;
  9413. const int ir1 = MIN(ir0 + dr, nr);
  9414. // row index used to determine which thread to use
  9415. int ir = 0;
  9416. const float theta_scale = powf(freq_base, -2.0f/n_dims);
  9417. const float inv_ndims = -1.f/n_dims;
  9418. float corr_dims[2];
  9419. ggml_rope_yarn_corr_dims(n_dims, n_orig_ctx, freq_base, beta_fast, beta_slow, corr_dims);
  9420. const bool is_neox = mode & 2;
  9421. const bool is_glm = mode & 4;
  9422. // backward process uses inverse rotation by cos and sin.
  9423. // cos and sin build a rotation matrix, where the inverse is the transpose.
  9424. // this essentially just switches the sign of sin.
  9425. const float sin_sign = forward ? 1.0f : -1.0f;
  9426. const int32_t * pos = (const int32_t *) src1->data;
  9427. for (int64_t i3 = 0; i3 < ne3; i3++) {
  9428. for (int64_t i2 = 0; i2 < ne2; i2++) {
  9429. const int64_t p = pos[i2];
  9430. for (int64_t i1 = 0; i1 < ne1; i1++) {
  9431. if (ir++ < ir0) continue;
  9432. if (ir > ir1) break;
  9433. float theta_base = (float)p;
  9434. if (is_glm) {
  9435. theta_base = MIN(p, n_ctx - 2);
  9436. float block_theta = MAX(p - (n_ctx - 2), 0);
  9437. for (int64_t i0 = 0; i0 < ne0 / 4; i0++) {
  9438. const float cos_theta = cosf(theta_base);
  9439. const float sin_theta = sinf(theta_base) * sin_sign;
  9440. const float cos_block_theta = cosf(block_theta);
  9441. const float sin_block_theta = sinf(block_theta) * sin_sign;
  9442. theta_base *= theta_scale;
  9443. block_theta *= theta_scale;
  9444. const ggml_fp16_t * const src = (ggml_fp16_t *)((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00);
  9445. ggml_fp16_t * dst_data = (ggml_fp16_t *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0);
  9446. const float x0 = GGML_FP16_TO_FP32(src[0]);
  9447. const float x1 = GGML_FP16_TO_FP32(src[n_dims/2]);
  9448. const float x2 = GGML_FP16_TO_FP32(src[n_dims]);
  9449. const float x3 = GGML_FP16_TO_FP32(src[n_dims/2*3]);
  9450. dst_data[0] = GGML_FP32_TO_FP16(x0*cos_theta - x1*sin_theta);
  9451. dst_data[n_dims/2] = GGML_FP32_TO_FP16(x0*sin_theta + x1*cos_theta);
  9452. dst_data[n_dims] = GGML_FP32_TO_FP16(x2*cos_block_theta - x3*sin_block_theta);
  9453. dst_data[n_dims/2*3] = GGML_FP32_TO_FP16(x2*sin_block_theta + x3*cos_block_theta);
  9454. }
  9455. } else if (!is_neox) {
  9456. for (int64_t i0 = 0; i0 < ne0; i0 += 2) {
  9457. float cos_theta, sin_theta;
  9458. rope_yarn(
  9459. theta_base, freq_scale, corr_dims, i0, ext_factor, attn_factor, &cos_theta, &sin_theta
  9460. );
  9461. sin_theta *= sin_sign;
  9462. theta_base *= theta_scale;
  9463. const ggml_fp16_t * const src = (ggml_fp16_t *)((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00);
  9464. ggml_fp16_t * dst_data = (ggml_fp16_t *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0);
  9465. const float x0 = GGML_FP16_TO_FP32(src[0]);
  9466. const float x1 = GGML_FP16_TO_FP32(src[1]);
  9467. dst_data[0] = GGML_FP32_TO_FP16(x0*cos_theta - x1*sin_theta);
  9468. dst_data[1] = GGML_FP32_TO_FP16(x0*sin_theta + x1*cos_theta);
  9469. }
  9470. } else {
  9471. // TODO: this might be wrong for ne0 != n_dims - need double check
  9472. // ref: https://github.com/huggingface/transformers/blob/main/src/transformers/models/gpt_neox/modeling_gpt_neox.py#LL251C1-L294C28
  9473. theta_base *= freq_scale;
  9474. for (int64_t ib = 0; ib < ne0/n_dims; ++ib) {
  9475. for (int64_t ic = 0; ic < n_dims; ic += 2) {
  9476. // simplified from `(ib * n_dims + ic) * inv_ndims`
  9477. float cur_rot = inv_ndims * ic - ib;
  9478. float cos_theta, sin_theta;
  9479. rope_yarn(
  9480. theta_base, freq_scale, corr_dims, cur_rot, ext_factor, attn_factor,
  9481. &cos_theta, &sin_theta
  9482. );
  9483. sin_theta *= sin_sign;
  9484. theta_base *= theta_scale;
  9485. const int64_t i0 = ib*n_dims + ic/2;
  9486. const ggml_fp16_t * const src = (ggml_fp16_t *)((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00);
  9487. ggml_fp16_t * dst_data = (ggml_fp16_t *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0);
  9488. const float x0 = GGML_FP16_TO_FP32(src[0]);
  9489. const float x1 = GGML_FP16_TO_FP32(src[n_dims/2]);
  9490. dst_data[0] = GGML_FP32_TO_FP16(x0*cos_theta - x1*sin_theta);
  9491. dst_data[n_dims/2] = GGML_FP32_TO_FP16(x0*sin_theta + x1*cos_theta);
  9492. }
  9493. }
  9494. }
  9495. }
  9496. }
  9497. }
  9498. }
  9499. static void ggml_compute_forward_rope(
  9500. const struct ggml_compute_params * params,
  9501. const struct ggml_tensor * src0,
  9502. const struct ggml_tensor * src1,
  9503. struct ggml_tensor * dst) {
  9504. switch (src0->type) {
  9505. case GGML_TYPE_F16:
  9506. {
  9507. ggml_compute_forward_rope_f16(params, src0, src1, dst, true);
  9508. } break;
  9509. case GGML_TYPE_F32:
  9510. {
  9511. ggml_compute_forward_rope_f32(params, src0, src1, dst, true);
  9512. } break;
  9513. default:
  9514. {
  9515. GGML_ASSERT(false);
  9516. } break;
  9517. }
  9518. }
  9519. // ggml_compute_forward_rope_back
  9520. static void ggml_compute_forward_rope_back(
  9521. const struct ggml_compute_params * params,
  9522. const struct ggml_tensor * src0,
  9523. const struct ggml_tensor * src1,
  9524. struct ggml_tensor * dst) {
  9525. switch (src0->type) {
  9526. case GGML_TYPE_F16:
  9527. {
  9528. ggml_compute_forward_rope_f16(params, src0, src1, dst, false);
  9529. } break;
  9530. case GGML_TYPE_F32:
  9531. {
  9532. ggml_compute_forward_rope_f32(params, src0, src1, dst, false);
  9533. } break;
  9534. default:
  9535. {
  9536. GGML_ASSERT(false);
  9537. } break;
  9538. }
  9539. }
  9540. // ggml_compute_forward_conv_transpose_1d
  9541. static void ggml_compute_forward_conv_transpose_1d_f16_f32(
  9542. const struct ggml_compute_params * params,
  9543. const struct ggml_tensor * src0,
  9544. const struct ggml_tensor * src1,
  9545. struct ggml_tensor * dst) {
  9546. GGML_ASSERT(src0->type == GGML_TYPE_F16);
  9547. GGML_ASSERT(src1->type == GGML_TYPE_F32);
  9548. GGML_ASSERT( dst->type == GGML_TYPE_F32);
  9549. int64_t t0 = ggml_perf_time_us();
  9550. UNUSED(t0);
  9551. GGML_TENSOR_BINARY_OP_LOCALS
  9552. const int ith = params->ith;
  9553. const int nth = params->nth;
  9554. const int nk = ne00*ne01*ne02;
  9555. GGML_ASSERT(nb00 == sizeof(ggml_fp16_t));
  9556. GGML_ASSERT(nb10 == sizeof(float));
  9557. if (params->type == GGML_TASK_INIT) {
  9558. memset(params->wdata, 0, params->wsize);
  9559. // permute kernel data (src0) from (K x Cout x Cin) to (Cin x K x Cout)
  9560. {
  9561. ggml_fp16_t * const wdata = (ggml_fp16_t *) params->wdata + 0;
  9562. for (int64_t i02 = 0; i02 < ne02; i02++) {
  9563. for (int64_t i01 = 0; i01 < ne01; i01++) {
  9564. const ggml_fp16_t * const src = (ggml_fp16_t *)((char *) src0->data + i02*nb02 + i01*nb01);
  9565. ggml_fp16_t * dst_data = wdata + i01*ne00*ne02;
  9566. for (int64_t i00 = 0; i00 < ne00; i00++) {
  9567. dst_data[i00*ne02 + i02] = src[i00];
  9568. }
  9569. }
  9570. }
  9571. }
  9572. // permute source data (src1) from (L x Cin) to (Cin x L)
  9573. {
  9574. ggml_fp16_t * const wdata = (ggml_fp16_t *) params->wdata + nk;
  9575. ggml_fp16_t * dst_data = wdata;
  9576. for (int64_t i11 = 0; i11 < ne11; i11++) {
  9577. const float * const src = (float *)((char *) src1->data + i11*nb11);
  9578. for (int64_t i10 = 0; i10 < ne10; i10++) {
  9579. dst_data[i10*ne11 + i11] = GGML_FP32_TO_FP16(src[i10]);
  9580. }
  9581. }
  9582. }
  9583. // need to zero dst since we are accumulating into it
  9584. memset(dst->data, 0, ggml_nbytes(dst));
  9585. return;
  9586. }
  9587. if (params->type == GGML_TASK_FINALIZE) {
  9588. return;
  9589. }
  9590. const int32_t s0 = ((const int32_t*)(dst->op_params))[0];
  9591. // total rows in dst
  9592. const int nr = ne1;
  9593. // rows per thread
  9594. const int dr = (nr + nth - 1)/nth;
  9595. // row range for this thread
  9596. const int ir0 = dr*ith;
  9597. const int ir1 = MIN(ir0 + dr, nr);
  9598. ggml_fp16_t * const wdata = (ggml_fp16_t *) params->wdata + 0;
  9599. ggml_fp16_t * const wdata_src = wdata + nk;
  9600. for (int i1 = ir0; i1 < ir1; i1++) {
  9601. float * dst_data = (float *)((char *) dst->data + i1*nb1);
  9602. ggml_fp16_t * wdata_kernel = wdata + i1*ne02*ne00;
  9603. for (int i10 = 0; i10 < ne10; i10++) {
  9604. const int i1n = i10*ne11;
  9605. for (int i00 = 0; i00 < ne00; i00++) {
  9606. float v = 0;
  9607. ggml_vec_dot_f16(ne02, &v,
  9608. (ggml_fp16_t *) wdata_src + i1n,
  9609. (ggml_fp16_t *) wdata_kernel + i00*ne02);
  9610. dst_data[i10*s0 + i00] += v;
  9611. }
  9612. }
  9613. }
  9614. }
  9615. static void ggml_compute_forward_conv_transpose_1d_f32(
  9616. const struct ggml_compute_params * params,
  9617. const struct ggml_tensor * src0,
  9618. const struct ggml_tensor * src1,
  9619. struct ggml_tensor * dst) {
  9620. GGML_ASSERT(src0->type == GGML_TYPE_F32);
  9621. GGML_ASSERT(src1->type == GGML_TYPE_F32);
  9622. GGML_ASSERT( dst->type == GGML_TYPE_F32);
  9623. int64_t t0 = ggml_perf_time_us();
  9624. UNUSED(t0);
  9625. GGML_TENSOR_BINARY_OP_LOCALS
  9626. const int ith = params->ith;
  9627. const int nth = params->nth;
  9628. const int nk = ne00*ne01*ne02;
  9629. GGML_ASSERT(nb00 == sizeof(float));
  9630. GGML_ASSERT(nb10 == sizeof(float));
  9631. if (params->type == GGML_TASK_INIT) {
  9632. memset(params->wdata, 0, params->wsize);
  9633. // prepare kernel data (src0) from (K x Cout x Cin) to (Cin x K x Cout)
  9634. {
  9635. float * const wdata = (float *) params->wdata + 0;
  9636. for (int64_t i02 = 0; i02 < ne02; i02++) {
  9637. for (int64_t i01 = 0; i01 < ne01; i01++) {
  9638. const float * const src = (float *)((char *) src0->data + i02*nb02 + i01*nb01);
  9639. float * dst_data = wdata + i01*ne00*ne02;
  9640. for (int64_t i00 = 0; i00 < ne00; i00++) {
  9641. dst_data[i00*ne02 + i02] = src[i00];
  9642. }
  9643. }
  9644. }
  9645. }
  9646. // prepare source data (src1)
  9647. {
  9648. float * const wdata = (float *) params->wdata + nk;
  9649. float * dst_data = wdata;
  9650. for (int64_t i11 = 0; i11 < ne11; i11++) {
  9651. const float * const src = (float *)((char *) src1->data + i11*nb11);
  9652. for (int64_t i10 = 0; i10 < ne10; i10++) {
  9653. dst_data[i10*ne11 + i11] = src[i10];
  9654. }
  9655. }
  9656. }
  9657. // need to zero dst since we are accumulating into it
  9658. memset(dst->data, 0, ggml_nbytes(dst));
  9659. return;
  9660. }
  9661. if (params->type == GGML_TASK_FINALIZE) {
  9662. return;
  9663. }
  9664. const int32_t s0 = ((const int32_t*)(dst->op_params))[0];
  9665. // total rows in dst
  9666. const int nr = ne1;
  9667. // rows per thread
  9668. const int dr = (nr + nth - 1)/nth;
  9669. // row range for this thread
  9670. const int ir0 = dr*ith;
  9671. const int ir1 = MIN(ir0 + dr, nr);
  9672. float * const wdata = (float *) params->wdata + 0;
  9673. float * const wdata_src = wdata + nk;
  9674. for (int i1 = ir0; i1 < ir1; i1++) {
  9675. float * dst_data = (float *)((char *) dst->data + i1*nb1);
  9676. float * wdata_kernel = wdata + i1*ne02*ne00;
  9677. for (int i10 = 0; i10 < ne10; i10++) {
  9678. const int i1n = i10*ne11;
  9679. for (int i00 = 0; i00 < ne00; i00++) {
  9680. float v = 0;
  9681. ggml_vec_dot_f32(ne02, &v,
  9682. wdata_src + i1n,
  9683. wdata_kernel + i00*ne02);
  9684. dst_data[i10*s0 + i00] += v;
  9685. }
  9686. }
  9687. }
  9688. }
  9689. static void ggml_compute_forward_conv_transpose_1d(
  9690. const struct ggml_compute_params * params,
  9691. const struct ggml_tensor * src0,
  9692. const struct ggml_tensor * src1,
  9693. struct ggml_tensor * dst) {
  9694. switch (src0->type) {
  9695. case GGML_TYPE_F16:
  9696. {
  9697. ggml_compute_forward_conv_transpose_1d_f16_f32(params, src0, src1, dst);
  9698. } break;
  9699. case GGML_TYPE_F32:
  9700. {
  9701. ggml_compute_forward_conv_transpose_1d_f32(params, src0, src1, dst);
  9702. } break;
  9703. default:
  9704. {
  9705. GGML_ASSERT(false);
  9706. } break;
  9707. }
  9708. }
  9709. // src0: kernel [OC, IC, KH, KW]
  9710. // src1: image [N, IC, IH, IW]
  9711. // dst: result [N, OH, OW, IC*KH*KW]
  9712. static void ggml_compute_forward_im2col_f16(
  9713. const struct ggml_compute_params * params,
  9714. const struct ggml_tensor * src0,
  9715. const struct ggml_tensor * src1,
  9716. struct ggml_tensor * dst) {
  9717. GGML_ASSERT(src0->type == GGML_TYPE_F16);
  9718. GGML_ASSERT(src1->type == GGML_TYPE_F32);
  9719. GGML_ASSERT( dst->type == GGML_TYPE_F16);
  9720. int64_t t0 = ggml_perf_time_us();
  9721. UNUSED(t0);
  9722. GGML_TENSOR_BINARY_OP_LOCALS;
  9723. const int32_t s0 = ((const int32_t *)(dst->op_params))[0];
  9724. const int32_t s1 = ((const int32_t *)(dst->op_params))[1];
  9725. const int32_t p0 = ((const int32_t *)(dst->op_params))[2];
  9726. const int32_t p1 = ((const int32_t *)(dst->op_params))[3];
  9727. const int32_t d0 = ((const int32_t *)(dst->op_params))[4];
  9728. const int32_t d1 = ((const int32_t *)(dst->op_params))[5];
  9729. const bool is_2D = ((const int32_t *)(dst->op_params))[6] == 1;
  9730. const int ith = params->ith;
  9731. const int nth = params->nth;
  9732. const int64_t N = is_2D ? ne13 : ne12;
  9733. const int64_t IC = is_2D ? ne12 : ne11;
  9734. const int64_t IH = is_2D ? ne11 : 1;
  9735. const int64_t IW = ne10;
  9736. const int64_t KH = is_2D ? ne01 : 1;
  9737. const int64_t KW = ne00;
  9738. const int64_t OH = is_2D ? ne2 : 1;
  9739. const int64_t OW = ne1;
  9740. int ofs0 = is_2D ? nb13 : nb12;
  9741. int ofs1 = is_2D ? nb12 : nb11;
  9742. GGML_ASSERT(nb00 == sizeof(ggml_fp16_t));
  9743. GGML_ASSERT(nb10 == sizeof(float));
  9744. if (params->type == GGML_TASK_INIT) {
  9745. return;
  9746. }
  9747. if (params->type == GGML_TASK_FINALIZE) {
  9748. return;
  9749. }
  9750. // im2col: [N, IC, IH, IW] => [N, OH, OW, IC*KH*KW]
  9751. {
  9752. ggml_fp16_t * const wdata = (ggml_fp16_t *) dst->data;
  9753. for (int64_t in = 0; in < N; in++) {
  9754. for (int64_t ioh = 0; ioh < OH; ioh++) { // 1
  9755. for (int64_t iow = 0; iow < OW; iow++) {
  9756. for (int64_t iic = ith; iic < IC; iic += nth) {
  9757. // micro kernel
  9758. ggml_fp16_t * dst_data = wdata + (in*OH*OW + ioh*OW + iow)*(IC*KH*KW); // [IC, KH, KW]
  9759. const float * const src_data = (float *)((char *) src1->data + in*ofs0 + iic*ofs1); // [IH, IW]
  9760. for (int64_t ikh = 0; ikh < KH; ikh++) { // 1
  9761. for (int64_t ikw = 0; ikw < KW; ikw++) {
  9762. const int64_t iiw = iow*s0 + ikw*d0 - p0;
  9763. const int64_t iih = ioh*s1 + ikh*d1 - p1;
  9764. if (iih < 0 || iih >= IH || iiw < 0 || iiw >= IW) {
  9765. dst_data[iic*(KH*KW) + ikh*KW + ikw] = 0;
  9766. } else {
  9767. dst_data[iic*(KH*KW) + ikh*KW + ikw] = GGML_FP32_TO_FP16(src_data[iih*IW + iiw]);
  9768. }
  9769. }
  9770. }
  9771. }
  9772. }
  9773. }
  9774. }
  9775. }
  9776. }
  9777. static void ggml_compute_forward_im2col(
  9778. const struct ggml_compute_params * params,
  9779. const struct ggml_tensor * src0,
  9780. const struct ggml_tensor * src1,
  9781. struct ggml_tensor * dst) {
  9782. switch (src0->type) {
  9783. case GGML_TYPE_F16:
  9784. {
  9785. ggml_compute_forward_im2col_f16(params, src0, src1, dst);
  9786. } break;
  9787. case GGML_TYPE_F32:
  9788. {
  9789. GGML_ASSERT(false);
  9790. } break;
  9791. default:
  9792. {
  9793. GGML_ASSERT(false);
  9794. } break;
  9795. }
  9796. }
  9797. // ggml_compute_forward_conv_transpose_2d
  9798. static void ggml_compute_forward_conv_transpose_2d(
  9799. const struct ggml_compute_params * params,
  9800. const struct ggml_tensor * src0,
  9801. const struct ggml_tensor * src1,
  9802. struct ggml_tensor * dst) {
  9803. GGML_ASSERT(src0->type == GGML_TYPE_F16);
  9804. GGML_ASSERT(src1->type == GGML_TYPE_F32);
  9805. GGML_ASSERT( dst->type == GGML_TYPE_F32);
  9806. int64_t t0 = ggml_perf_time_us();
  9807. UNUSED(t0);
  9808. GGML_TENSOR_BINARY_OP_LOCALS
  9809. const int ith = params->ith;
  9810. const int nth = params->nth;
  9811. const int nk = ne00*ne01*ne02*ne03;
  9812. GGML_ASSERT(nb00 == sizeof(ggml_fp16_t));
  9813. GGML_ASSERT(nb10 == sizeof(float));
  9814. if (params->type == GGML_TASK_INIT) {
  9815. memset(params->wdata, 0, params->wsize);
  9816. // permute kernel data (src0) from (Kw x Kh x Cout x Cin) to (Cin x Kw x Kh x Cout)
  9817. {
  9818. ggml_fp16_t * const wdata = (ggml_fp16_t *) params->wdata + 0;
  9819. for (int64_t i03 = 0; i03 < ne03; i03++) {
  9820. for (int64_t i02 = 0; i02 < ne02; i02++) {
  9821. const ggml_fp16_t * const src = (ggml_fp16_t *)((char *) src0->data + i03*nb03 + i02*nb02);
  9822. ggml_fp16_t * dst_data = wdata + i02*ne01*ne00*ne03;
  9823. for (int64_t i01 = 0; i01 < ne01; i01++) {
  9824. for (int64_t i00 = 0; i00 < ne00; i00++) {
  9825. dst_data[i01*ne00*ne03 + i00*ne03 + i03] = src[i01 * ne00 + i00];
  9826. }
  9827. }
  9828. }
  9829. }
  9830. }
  9831. // permute source data (src1) from (Sw x Sh x Cin) to (Cin x Sw x Sh)
  9832. {
  9833. ggml_fp16_t * const wdata = (ggml_fp16_t *) params->wdata + nk;
  9834. for (int i12 = 0; i12 < ne12; i12++) {
  9835. for (int i11 = 0; i11 < ne11; i11++) {
  9836. const float * const src = (float *)((char *) src1->data + i12*nb12 + i11*nb11);
  9837. ggml_fp16_t * dst_data = wdata + i11*ne10*ne12;
  9838. for (int i10 = 0; i10 < ne10; i10++) {
  9839. dst_data[i10*ne12 + i12] = GGML_FP32_TO_FP16(src[i10]);
  9840. }
  9841. }
  9842. }
  9843. }
  9844. memset(dst->data, 0, ggml_nbytes(dst));
  9845. return;
  9846. }
  9847. if (params->type == GGML_TASK_FINALIZE) {
  9848. return;
  9849. }
  9850. const int32_t stride = ggml_get_op_params_i32(dst, 0);
  9851. // total patches in dst
  9852. const int np = ne2;
  9853. // patches per thread
  9854. const int dp = (np + nth - 1)/nth;
  9855. // patch range for this thread
  9856. const int ip0 = dp*ith;
  9857. const int ip1 = MIN(ip0 + dp, np);
  9858. ggml_fp16_t * const wdata = (ggml_fp16_t *) params->wdata + 0;
  9859. ggml_fp16_t * const wdata_src = wdata + nk;
  9860. for (int i2 = ip0; i2 < ip1; i2++) { // Cout
  9861. float * dst_data = (float *)((char *) dst->data + i2*nb2);
  9862. ggml_fp16_t * wdata_kernel = wdata + i2*ne01*ne00*ne03;
  9863. for (int i11 = 0; i11 < ne11; i11++) {
  9864. for (int i10 = 0; i10 < ne10; i10++) {
  9865. const int i1n = i11*ne10*ne12 + i10*ne12;
  9866. for (int i01 = 0; i01 < ne01; i01++) {
  9867. for (int i00 = 0; i00 < ne00; i00++) {
  9868. float v = 0;
  9869. ggml_vec_dot_f16(ne03, &v,
  9870. wdata_src + i1n,
  9871. wdata_kernel + i01*ne00*ne03 + i00*ne03);
  9872. dst_data[(i11*stride + i01)*ne0 + i10*stride + i00] += v;
  9873. }
  9874. }
  9875. }
  9876. }
  9877. }
  9878. }
  9879. // ggml_compute_forward_pool_1d_sk_p0
  9880. static void ggml_compute_forward_pool_1d_sk_p0(
  9881. const struct ggml_compute_params * params,
  9882. const enum ggml_op_pool op,
  9883. const struct ggml_tensor * src,
  9884. const int k,
  9885. struct ggml_tensor * dst) {
  9886. assert(src->type == GGML_TYPE_F32);
  9887. assert(params->ith == 0);
  9888. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  9889. return;
  9890. }
  9891. const char * cdata = (const char *)src->data;
  9892. const char * const data_end = cdata + ggml_nbytes(src);
  9893. float * drow = (float *)dst->data;
  9894. const int64_t rs = dst->ne[0];
  9895. while (cdata < data_end) {
  9896. const float * const srow = (const float *)cdata;
  9897. int j = 0;
  9898. for (int64_t i = 0; i < rs; ++i) {
  9899. switch (op) {
  9900. case GGML_OP_POOL_AVG: drow[i] = 0; break;
  9901. case GGML_OP_POOL_MAX: drow[i] = -FLT_MAX; break;
  9902. case GGML_OP_POOL_COUNT: GGML_ASSERT(false); break;
  9903. }
  9904. for (int ki = 0; ki < k; ++ki) {
  9905. switch (op) {
  9906. case GGML_OP_POOL_AVG: drow[i] += srow[j]; break;
  9907. case GGML_OP_POOL_MAX: if (srow[j] > drow[i]) drow[i] = srow[j]; break;
  9908. case GGML_OP_POOL_COUNT: GGML_ASSERT(false); break;
  9909. }
  9910. ++j;
  9911. }
  9912. switch (op) {
  9913. case GGML_OP_POOL_AVG: drow[i] /= k; break;
  9914. case GGML_OP_POOL_MAX: break;
  9915. case GGML_OP_POOL_COUNT: GGML_ASSERT(false); break;
  9916. }
  9917. }
  9918. cdata += src->nb[1];
  9919. drow += rs;
  9920. }
  9921. }
  9922. // ggml_compute_forward_pool_1d
  9923. static void ggml_compute_forward_pool_1d(
  9924. const struct ggml_compute_params * params,
  9925. const struct ggml_tensor * src0,
  9926. struct ggml_tensor * dst) {
  9927. const int32_t * opts = (const int32_t *)dst->op_params;
  9928. enum ggml_op_pool op = opts[0];
  9929. const int k0 = opts[1];
  9930. const int s0 = opts[2];
  9931. const int p0 = opts[3];
  9932. GGML_ASSERT(p0 == 0); // padding not supported
  9933. GGML_ASSERT(k0 == s0); // only s = k supported
  9934. ggml_compute_forward_pool_1d_sk_p0(params, op, src0, k0, dst);
  9935. }
  9936. // ggml_compute_forward_pool_2d
  9937. static void ggml_compute_forward_pool_2d(
  9938. const struct ggml_compute_params * params,
  9939. const struct ggml_tensor * src,
  9940. struct ggml_tensor * dst) {
  9941. assert(src->type == GGML_TYPE_F32);
  9942. assert(params->ith == 0);
  9943. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  9944. return;
  9945. }
  9946. const int32_t * opts = (const int32_t *)dst->op_params;
  9947. enum ggml_op_pool op = opts[0];
  9948. const int k0 = opts[1];
  9949. const int k1 = opts[2];
  9950. const int s0 = opts[3];
  9951. const int s1 = opts[4];
  9952. const int p0 = opts[5];
  9953. const int p1 = opts[6];
  9954. const char * cdata = (const char*)src->data;
  9955. const char * const data_end = cdata + ggml_nbytes(src);
  9956. const int64_t px = dst->ne[0];
  9957. const int64_t py = dst->ne[1];
  9958. const int64_t pa = px * py;
  9959. float * dplane = (float *)dst->data;
  9960. const int ka = k0 * k1;
  9961. const int offset0 = -p0;
  9962. const int offset1 = -p1;
  9963. while (cdata < data_end) {
  9964. for (int oy = 0; oy < py; ++oy) {
  9965. float * const drow = dplane + oy * px;
  9966. for (int ox = 0; ox < px; ++ox) {
  9967. float * const out = drow + ox;
  9968. switch (op) {
  9969. case GGML_OP_POOL_AVG: *out = 0; break;
  9970. case GGML_OP_POOL_MAX: *out = -FLT_MAX; break;
  9971. case GGML_OP_POOL_COUNT: GGML_ASSERT(false); break;
  9972. }
  9973. const int ix = offset0 + ox * s0;
  9974. const int iy = offset1 + oy * s1;
  9975. for (int ky = 0; ky < k1; ++ky) {
  9976. if (iy + ky < 0 || iy + ky >= src->ne[1]) continue;
  9977. const float * const srow = (const float *)(cdata + src->nb[1] * (iy + ky));
  9978. for (int kx = 0; kx < k0; ++kx) {
  9979. int j = ix + kx;
  9980. if (j < 0 || j >= src->ne[0]) continue;
  9981. switch (op) {
  9982. case GGML_OP_POOL_AVG: *out += srow[j]; break;
  9983. case GGML_OP_POOL_MAX: if (srow[j] > *out) *out = srow[j]; break;
  9984. case GGML_OP_POOL_COUNT: GGML_ASSERT(false); break;
  9985. }
  9986. }
  9987. }
  9988. switch (op) {
  9989. case GGML_OP_POOL_AVG: *out /= ka; break;
  9990. case GGML_OP_POOL_MAX: break;
  9991. case GGML_OP_POOL_COUNT: GGML_ASSERT(false); break;
  9992. }
  9993. }
  9994. }
  9995. cdata += src->nb[2];
  9996. dplane += pa;
  9997. }
  9998. }
  9999. // ggml_compute_forward_upscale
  10000. static void ggml_compute_forward_upscale_f32(
  10001. const struct ggml_compute_params * params,
  10002. const struct ggml_tensor * src0,
  10003. struct ggml_tensor * dst) {
  10004. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  10005. return;
  10006. }
  10007. GGML_ASSERT(src0->nb[0] == sizeof(float));
  10008. const int ith = params->ith;
  10009. const int nth = params->nth;
  10010. GGML_TENSOR_UNARY_OP_LOCALS
  10011. const int scale_factor = dst->op_params[0];
  10012. // TODO: optimize
  10013. for (int64_t i3 = 0; i3 < ne3; i3++) {
  10014. const int64_t i03 = i3;
  10015. for (int64_t i2 = ith; i2 < ne2; i2 += nth) {
  10016. const int64_t i02 = i2;
  10017. for (int64_t i1 = 0; i1 < ne1; i1++) {
  10018. const int64_t i01 = i1 / scale_factor;
  10019. for (int64_t i0 = 0; i0 < ne0; i0++) {
  10020. const int64_t i00 = i0 / scale_factor;
  10021. const float * x = (float *)((char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03);
  10022. float * y = (float *)((char *) dst->data + i0*nb0 + i1*nb1 + i2*nb2 + i3*nb3);
  10023. *y = *x;
  10024. }
  10025. }
  10026. }
  10027. }
  10028. }
  10029. static void ggml_compute_forward_upscale(
  10030. const struct ggml_compute_params * params,
  10031. const struct ggml_tensor * src0,
  10032. struct ggml_tensor * dst) {
  10033. switch (src0->type) {
  10034. case GGML_TYPE_F32:
  10035. {
  10036. ggml_compute_forward_upscale_f32(params, src0, dst);
  10037. } break;
  10038. default:
  10039. {
  10040. GGML_ASSERT(false);
  10041. } break;
  10042. }
  10043. }
  10044. // ggml_compute_forward_pad
  10045. static void ggml_compute_forward_pad_f32(
  10046. const struct ggml_compute_params * params,
  10047. const struct ggml_tensor * src0,
  10048. struct ggml_tensor * dst) {
  10049. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  10050. return;
  10051. }
  10052. GGML_ASSERT(src0->nb[0] == sizeof(float));
  10053. GGML_ASSERT( dst->nb[0] == sizeof(float));
  10054. const int ith = params->ith;
  10055. const int nth = params->nth;
  10056. GGML_TENSOR_UNARY_OP_LOCALS
  10057. float * dst_ptr = (float *) dst->data;
  10058. // TODO: optimize
  10059. for (int64_t i2 = 0; i2 < ne2; ++i2) {
  10060. for (int64_t i1 = ith; i1 < ne1; i1 += nth) {
  10061. for (int64_t i0 = 0; i0 < ne0; ++i0) {
  10062. for (int64_t i3 = 0; i3 < ne3; ++i3) {
  10063. const int64_t dst_idx = i3*(ne0*ne1*ne2) + i2*(ne0*ne1) + i1*ne0 + i0;
  10064. const float * src_ptr = (const float *)((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00);
  10065. if (i0 < ne00 && i1 < ne01 && i2 < ne02 && i3 < ne03) {
  10066. dst_ptr[dst_idx] = *src_ptr;
  10067. } else {
  10068. dst_ptr[dst_idx] = 0;
  10069. }
  10070. }
  10071. }
  10072. }
  10073. }
  10074. }
  10075. static void ggml_compute_forward_pad(
  10076. const struct ggml_compute_params * params,
  10077. const struct ggml_tensor * src0,
  10078. struct ggml_tensor * dst) {
  10079. switch (src0->type) {
  10080. case GGML_TYPE_F32:
  10081. {
  10082. ggml_compute_forward_pad_f32(params, src0, dst);
  10083. } break;
  10084. default:
  10085. {
  10086. GGML_ASSERT(false);
  10087. } break;
  10088. }
  10089. }
  10090. // ggml_compute_forward_argsort
  10091. static void ggml_compute_forward_argsort_f32(
  10092. const struct ggml_compute_params * params,
  10093. const struct ggml_tensor * src0,
  10094. struct ggml_tensor * dst) {
  10095. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  10096. return;
  10097. }
  10098. GGML_TENSOR_UNARY_OP_LOCALS
  10099. GGML_ASSERT(nb0 == sizeof(float));
  10100. const int ith = params->ith;
  10101. const int nth = params->nth;
  10102. const int64_t nr = ggml_nrows(src0);
  10103. enum ggml_sort_order order = (enum ggml_sort_order) ggml_get_op_params_i32(dst, 0);
  10104. for (int64_t i = ith; i < nr; i += nth) {
  10105. int32_t * dst_data = (int32_t *)((char *) dst->data + i*nb1);
  10106. const float * src_data = (float *)((char *) src0->data + i*nb01);
  10107. for (int64_t j = 0; j < ne0; j++) {
  10108. dst_data[j] = j;
  10109. }
  10110. // C doesn't have a functional sort, so we do a bubble sort instead
  10111. for (int64_t j = 0; j < ne0; j++) {
  10112. for (int64_t k = j + 1; k < ne0; k++) {
  10113. if ((order == GGML_SORT_ASC && src_data[dst_data[j]] > src_data[dst_data[k]]) ||
  10114. (order == GGML_SORT_DESC && src_data[dst_data[j]] < src_data[dst_data[k]])) {
  10115. int32_t tmp = dst_data[j];
  10116. dst_data[j] = dst_data[k];
  10117. dst_data[k] = tmp;
  10118. }
  10119. }
  10120. }
  10121. }
  10122. }
  10123. static void ggml_compute_forward_argsort(
  10124. const struct ggml_compute_params * params,
  10125. const struct ggml_tensor * src0,
  10126. struct ggml_tensor * dst) {
  10127. switch (src0->type) {
  10128. case GGML_TYPE_F32:
  10129. {
  10130. ggml_compute_forward_argsort_f32(params, src0, dst);
  10131. } break;
  10132. default:
  10133. {
  10134. GGML_ASSERT(false);
  10135. } break;
  10136. }
  10137. }
  10138. // ggml_compute_forward_flash_attn
  10139. static void ggml_compute_forward_flash_attn_f32(
  10140. const struct ggml_compute_params * params,
  10141. const struct ggml_tensor * q,
  10142. const struct ggml_tensor * k,
  10143. const struct ggml_tensor * v,
  10144. const bool masked,
  10145. struct ggml_tensor * dst) {
  10146. int64_t t0 = ggml_perf_time_us();
  10147. UNUSED(t0);
  10148. GGML_TENSOR_LOCALS(int64_t, neq, q, ne)
  10149. GGML_TENSOR_LOCALS(size_t, nbq, q, nb)
  10150. GGML_TENSOR_LOCALS(int64_t, nek, k, ne)
  10151. GGML_TENSOR_LOCALS(size_t, nbk, k, nb)
  10152. GGML_TENSOR_LOCALS(int64_t, nev, v, ne)
  10153. GGML_TENSOR_LOCALS(size_t, nbv, v, nb)
  10154. GGML_TENSOR_LOCALS(int64_t, ne, dst, ne)
  10155. GGML_TENSOR_LOCALS(size_t, nb, dst, nb)
  10156. const int ith = params->ith;
  10157. const int nth = params->nth;
  10158. const int64_t D = neq0;
  10159. const int64_t N = neq1;
  10160. const int64_t P = nek1 - N;
  10161. const int64_t M = P + N;
  10162. const int Mup = ggml_up(M, GGML_SOFT_MAX_UNROLL);
  10163. GGML_ASSERT(ne0 == D);
  10164. GGML_ASSERT(ne1 == N);
  10165. GGML_ASSERT(P >= 0);
  10166. GGML_ASSERT(nbq0 == sizeof(float));
  10167. GGML_ASSERT(nbk0 == sizeof(float));
  10168. GGML_ASSERT(nbv0 == sizeof(float));
  10169. GGML_ASSERT(neq0 == D);
  10170. GGML_ASSERT(nek0 == D);
  10171. GGML_ASSERT(nev1 == D);
  10172. GGML_ASSERT(neq1 == N);
  10173. GGML_ASSERT(nek1 == N + P);
  10174. GGML_ASSERT(nev1 == D);
  10175. // dst cannot be transposed or permuted
  10176. GGML_ASSERT(nb0 == sizeof(float));
  10177. GGML_ASSERT(nb0 <= nb1);
  10178. GGML_ASSERT(nb1 <= nb2);
  10179. GGML_ASSERT(nb2 <= nb3);
  10180. if (params->type == GGML_TASK_INIT) {
  10181. return;
  10182. }
  10183. if (params->type == GGML_TASK_FINALIZE) {
  10184. return;
  10185. }
  10186. // parallelize by q rows using ggml_vec_dot_f32
  10187. // total rows in q
  10188. const int nr = neq1*neq2*neq3;
  10189. // rows per thread
  10190. const int dr = (nr + nth - 1)/nth;
  10191. // row range for this thread
  10192. const int ir0 = dr*ith;
  10193. const int ir1 = MIN(ir0 + dr, nr);
  10194. const float scale = 1.0f/sqrtf(D);
  10195. //printf("P=%d N=%d D=%d ir0=%d ir1=%d scale = %f\n", P, N, D, ir0, ir1, scale);
  10196. for (int ir = ir0; ir < ir1; ++ir) {
  10197. // q indices
  10198. const int iq3 = ir/(neq2*neq1);
  10199. const int iq2 = (ir - iq3*neq2*neq1)/neq1;
  10200. const int iq1 = (ir - iq3*neq2*neq1 - iq2*neq1);
  10201. float * S = (float *) params->wdata + ith*(Mup + CACHE_LINE_SIZE_F32);
  10202. for (int i = M; i < Mup; ++i) {
  10203. S[i] = -INFINITY;
  10204. }
  10205. const int64_t masked_begin = masked ? (P + iq1 + 1) : M;
  10206. for (int64_t ic = 0; ic < masked_begin; ++ic) {
  10207. // k indices
  10208. const int ik3 = iq3;
  10209. const int ik2 = iq2 % nek2;
  10210. const int ik1 = ic;
  10211. // S indices
  10212. const int i1 = ik1;
  10213. ggml_vec_dot_f32(neq0,
  10214. S + i1,
  10215. (float *) ((char *) k->data + (ik1*nbk1 + ik2*nbk2 + ik3*nbk3)),
  10216. (float *) ((char *) q->data + (iq1*nbq1 + iq2*nbq2 + iq3*nbq3)));
  10217. }
  10218. // scale
  10219. ggml_vec_scale_f32(masked_begin, S, scale);
  10220. for (int64_t i = masked_begin; i < M; i++) {
  10221. S[i] = -INFINITY;
  10222. }
  10223. // softmax
  10224. // exclude known -INF S[..] values from max and loop
  10225. // dont forget to set their SW values to zero
  10226. {
  10227. float max = -INFINITY;
  10228. ggml_vec_max_f32(masked_begin, &max, S);
  10229. ggml_float sum = 0.0;
  10230. {
  10231. #ifdef GGML_SOFT_MAX_ACCELERATE
  10232. max = -max;
  10233. vDSP_vsadd(S, 1, &max, S, 1, Mup);
  10234. vvexpf(S, S, &Mup);
  10235. ggml_vec_sum_f32(Mup, &sum, S);
  10236. #else
  10237. uint16_t scvt[GGML_SOFT_MAX_UNROLL]; UNUSED(scvt);
  10238. ggml_float sump[GGML_SOFT_MAX_UNROLL] = { 0.0 };
  10239. for (int i = 0; i < Mup; i += GGML_SOFT_MAX_UNROLL) {
  10240. if (i >= masked_begin) {
  10241. break;
  10242. }
  10243. float * SS = S + i;
  10244. for (int j = 0; j < GGML_SOFT_MAX_UNROLL; ++j) {
  10245. if (i + j >= masked_begin) {
  10246. break;
  10247. } else if (SS[j] == -INFINITY) {
  10248. SS[j] = 0.0f;
  10249. } else {
  10250. #ifndef GGML_FLASH_ATTN_EXP_FP16
  10251. const float val = expf(SS[j] - max);
  10252. #else
  10253. ggml_fp16_t s = GGML_FP32_TO_FP16(SS[j] - max);
  10254. memcpy(&scvt[j], &s, sizeof(uint16_t));
  10255. const float val = GGML_FP16_TO_FP32(ggml_table_exp_f16[scvt[j]]);
  10256. #endif
  10257. sump[j] += (ggml_float)val;
  10258. SS[j] = val;
  10259. }
  10260. }
  10261. }
  10262. for (int i = 0; i < GGML_SOFT_MAX_UNROLL; i++) {
  10263. sum += sump[i];
  10264. }
  10265. #endif
  10266. }
  10267. assert(sum > 0.0);
  10268. sum = 1.0/sum;
  10269. ggml_vec_scale_f32(masked_begin, S, sum);
  10270. #ifndef NDEBUG
  10271. for (int i = 0; i < masked_begin; ++i) {
  10272. assert(!isnan(S[i]));
  10273. assert(!isinf(S[i]));
  10274. }
  10275. #endif
  10276. }
  10277. for (int64_t ic = 0; ic < nev1; ++ic) {
  10278. // dst indices
  10279. const int i1 = iq1;
  10280. const int i2 = iq2;
  10281. const int i3 = iq3;
  10282. // v indices
  10283. const int iv2 = iq2 % nev2;
  10284. const int iv3 = iq3;
  10285. ggml_vec_dot_f32(masked_begin,
  10286. (float *) ((char *) dst->data + (ic*nb0 + i1*nb1 + i2*nb2 + i3*nb3)),
  10287. (float *) ((char *) v->data + ( ic*nbv1 + iv2*nbv2 + iv3*nbv3)),
  10288. S);
  10289. }
  10290. }
  10291. }
  10292. static void ggml_compute_forward_flash_attn_f16(
  10293. const struct ggml_compute_params * params,
  10294. const struct ggml_tensor * q,
  10295. const struct ggml_tensor * k,
  10296. const struct ggml_tensor * v,
  10297. const bool masked,
  10298. struct ggml_tensor * dst) {
  10299. int64_t t0 = ggml_perf_time_us();
  10300. UNUSED(t0);
  10301. GGML_TENSOR_LOCALS(int64_t, neq, q, ne)
  10302. GGML_TENSOR_LOCALS(size_t, nbq, q, nb)
  10303. GGML_TENSOR_LOCALS(int64_t, nek, k, ne)
  10304. GGML_TENSOR_LOCALS(size_t, nbk, k, nb)
  10305. GGML_TENSOR_LOCALS(int64_t, nev, v, ne)
  10306. GGML_TENSOR_LOCALS(size_t, nbv, v, nb)
  10307. GGML_TENSOR_LOCALS(int64_t, ne, dst, ne)
  10308. GGML_TENSOR_LOCALS(size_t, nb, dst, nb)
  10309. const int ith = params->ith;
  10310. const int nth = params->nth;
  10311. const int64_t D = neq0;
  10312. const int64_t N = neq1;
  10313. const int64_t P = nek1 - N;
  10314. const int64_t M = P + N;
  10315. const int Mup = ggml_up(M, GGML_SOFT_MAX_UNROLL);
  10316. GGML_ASSERT(ne0 == D);
  10317. GGML_ASSERT(ne1 == N);
  10318. GGML_ASSERT(P >= 0);
  10319. GGML_ASSERT(nbq0 == sizeof(ggml_fp16_t));
  10320. GGML_ASSERT(nbk0 == sizeof(ggml_fp16_t));
  10321. GGML_ASSERT(nbv0 == sizeof(ggml_fp16_t));
  10322. GGML_ASSERT(neq0 == D);
  10323. GGML_ASSERT(nek0 == D);
  10324. GGML_ASSERT(nev1 == D);
  10325. GGML_ASSERT(neq1 == N);
  10326. GGML_ASSERT(nek1 == N + P);
  10327. GGML_ASSERT(nev1 == D);
  10328. // dst cannot be transposed or permuted
  10329. GGML_ASSERT(nb0 == sizeof(float));
  10330. GGML_ASSERT(nb0 <= nb1);
  10331. GGML_ASSERT(nb1 <= nb2);
  10332. GGML_ASSERT(nb2 <= nb3);
  10333. if (params->type == GGML_TASK_INIT) {
  10334. return;
  10335. }
  10336. if (params->type == GGML_TASK_FINALIZE) {
  10337. return;
  10338. }
  10339. // parallelize by q rows using ggml_vec_dot_f32
  10340. // total rows in q
  10341. const int nr = neq1*neq2*neq3;
  10342. // rows per thread
  10343. const int dr = (nr + nth - 1)/nth;
  10344. // row range for this thread
  10345. const int ir0 = dr*ith;
  10346. const int ir1 = MIN(ir0 + dr, nr);
  10347. const float scale = 1.0f/sqrtf(D);
  10348. //printf("P=%d N=%d D=%d ir0=%d ir1=%d scale = %f\n", P, N, D, ir0, ir1, scale);
  10349. for (int ir = ir0; ir < ir1; ++ir) {
  10350. // q indices
  10351. const int iq3 = ir/(neq2*neq1);
  10352. const int iq2 = (ir - iq3*neq2*neq1)/neq1;
  10353. const int iq1 = (ir - iq3*neq2*neq1 - iq2*neq1);
  10354. float * S = (float *) params->wdata + ith*(2*Mup + CACHE_LINE_SIZE_F32);
  10355. for (int i = M; i < Mup; ++i) {
  10356. S[i] = -INFINITY;
  10357. }
  10358. if (GGML_VEC_DOT_UNROLL > 2 || nek1 % GGML_VEC_DOT_UNROLL != 0) {
  10359. for (int64_t ic = 0; ic < nek1; ++ic) {
  10360. // k indices
  10361. const int ik3 = iq3;
  10362. const int ik2 = iq2 % nek2;
  10363. const int ik1 = ic;
  10364. // S indices
  10365. const int i1 = ik1;
  10366. ggml_vec_dot_f16(neq0,
  10367. S + i1,
  10368. (ggml_fp16_t *) ((char *) k->data + (ik1*nbk1 + ik2*nbk2 + ik3*nbk3)),
  10369. (ggml_fp16_t *) ((char *) q->data + (iq1*nbq1 + iq2*nbq2 + iq3*nbq3)));
  10370. }
  10371. } else {
  10372. for (int64_t ic = 0; ic < nek1; ic += GGML_VEC_DOT_UNROLL) {
  10373. // k indices
  10374. const int ik3 = iq3;
  10375. const int ik2 = iq2 % nek2;
  10376. const int ik1 = ic;
  10377. // S indices
  10378. const int i1 = ik1;
  10379. ggml_vec_dot_f16_unroll(neq0, nbk1,
  10380. S + i1,
  10381. ((char *) k->data + (ik1*nbk1 + ik2*nbk2 + ik3*nbk3)),
  10382. (ggml_fp16_t *) ((char *) q->data + (iq1*nbq1 + iq2*nbq2 + iq3*nbq3)));
  10383. }
  10384. }
  10385. // scale
  10386. ggml_vec_scale_f32(nek1, S, scale);
  10387. if (masked) {
  10388. for (int64_t i = P; i < M; i++) {
  10389. if (i > P + iq1) {
  10390. S[i] = -INFINITY;
  10391. }
  10392. }
  10393. }
  10394. // softmax
  10395. // todo: exclude known -INF S[..] values from max and loop, assuming their results to be zero.
  10396. // dont forget to set their S values to zero
  10397. {
  10398. float max = -INFINITY;
  10399. ggml_vec_max_f32(M, &max, S);
  10400. ggml_float sum = 0.0;
  10401. {
  10402. #ifdef GGML_SOFT_MAX_ACCELERATE
  10403. max = -max;
  10404. vDSP_vsadd(S, 1, &max, S, 1, Mup);
  10405. vvexpf(S, S, &Mup);
  10406. ggml_vec_sum_f32(Mup, &sum, S);
  10407. #else
  10408. uint16_t scvt[GGML_SOFT_MAX_UNROLL];
  10409. ggml_float sump[GGML_SOFT_MAX_UNROLL] = { 0.0 };
  10410. for (int i = 0; i < Mup; i += GGML_SOFT_MAX_UNROLL) {
  10411. float * SS = S + i;
  10412. for (int j = 0; j < GGML_SOFT_MAX_UNROLL; ++j) {
  10413. if (SS[j] == -INFINITY) {
  10414. SS[j] = 0.0f;
  10415. } else {
  10416. ggml_fp16_t s = GGML_FP32_TO_FP16(SS[j] - max);
  10417. memcpy(&scvt[j], &s, sizeof(uint16_t));
  10418. const float val = GGML_FP16_TO_FP32(ggml_table_exp_f16[scvt[j]]);
  10419. sump[j] += (ggml_float)val;
  10420. SS[j] = val;
  10421. }
  10422. }
  10423. }
  10424. for (int i = 0; i < GGML_SOFT_MAX_UNROLL; i++) {
  10425. sum += sump[i];
  10426. }
  10427. #endif
  10428. }
  10429. assert(sum > 0.0);
  10430. sum = 1.0/sum;
  10431. ggml_vec_scale_f32(M, S, sum);
  10432. #ifndef NDEBUG
  10433. for (int i = 0; i < M; ++i) {
  10434. assert(!isnan(S[i]));
  10435. assert(!isinf(S[i]));
  10436. }
  10437. #endif
  10438. }
  10439. ggml_fp16_t * S16 = (ggml_fp16_t *) ((float *) params->wdata + ith*(2*Mup + CACHE_LINE_SIZE_F32) + Mup);
  10440. for (int64_t i = 0; i < M; i++) {
  10441. S16[i] = GGML_FP32_TO_FP16(S[i]);
  10442. }
  10443. // todo: exclude known zero S[..] values from dot (reducing nev0 and increasing begin of v and S16).
  10444. if (GGML_VEC_DOT_UNROLL == 1 || (nev1 % GGML_VEC_DOT_UNROLL != 0)) {
  10445. for (int64_t ic = 0; ic < nev1; ++ic) {
  10446. // dst indices
  10447. const int i1 = iq1;
  10448. const int i2 = iq2;
  10449. const int i3 = iq3;
  10450. // v indices
  10451. const int iv2 = iq2 % nev2;
  10452. const int iv3 = iq3;
  10453. ggml_vec_dot_f16(nev0,
  10454. (float *) ((char *) dst->data + (ic*nb0 + i1*nb1 + i2*nb2 + i3*nb3)),
  10455. (ggml_fp16_t *) ((char *) v->data + ( ic*nbv1 + iv2*nbv2 + iv3*nbv3)),
  10456. S16);
  10457. }
  10458. } else {
  10459. for (int64_t ic = 0; ic < nev1; ic += GGML_VEC_DOT_UNROLL) {
  10460. // dst indices
  10461. const int i1 = iq1;
  10462. const int i2 = iq2;
  10463. const int i3 = iq3;
  10464. // v indices
  10465. const int iv2 = iq2 % nev2;
  10466. const int iv3 = iq3;
  10467. ggml_vec_dot_f16_unroll(nev0, nbv1,
  10468. (float *) ((char *) dst->data + (ic*nb0 + i1*nb1 + i2*nb2 + i3*nb3)),
  10469. ((char *) v->data + ( ic*nbv1 + iv2*nbv2 + iv3*nbv3)),
  10470. S16);
  10471. }
  10472. }
  10473. }
  10474. }
  10475. static void ggml_compute_forward_flash_attn(
  10476. const struct ggml_compute_params * params,
  10477. const struct ggml_tensor * q,
  10478. const struct ggml_tensor * k,
  10479. const struct ggml_tensor * v,
  10480. const bool masked,
  10481. struct ggml_tensor * dst) {
  10482. switch (q->type) {
  10483. case GGML_TYPE_F16:
  10484. {
  10485. ggml_compute_forward_flash_attn_f16(params, q, k, v, masked, dst);
  10486. } break;
  10487. case GGML_TYPE_F32:
  10488. {
  10489. ggml_compute_forward_flash_attn_f32(params, q, k, v, masked, dst);
  10490. } break;
  10491. default:
  10492. {
  10493. GGML_ASSERT(false);
  10494. } break;
  10495. }
  10496. }
  10497. // ggml_compute_forward_flash_ff
  10498. static void ggml_compute_forward_flash_ff_f16(
  10499. const struct ggml_compute_params * params,
  10500. const struct ggml_tensor * a, // F16
  10501. const struct ggml_tensor * b0, // F16 fc_w
  10502. const struct ggml_tensor * b1, // F32 fc_b
  10503. const struct ggml_tensor * c0, // F16 proj_w
  10504. const struct ggml_tensor * c1, // F32 proj_b
  10505. struct ggml_tensor * dst) {
  10506. int64_t t0 = ggml_perf_time_us();
  10507. UNUSED(t0);
  10508. GGML_TENSOR_LOCALS(int64_t, nea, a, ne)
  10509. GGML_TENSOR_LOCALS(size_t, nba, a, nb)
  10510. GGML_TENSOR_LOCALS(int64_t, neb0, b0, ne)
  10511. GGML_TENSOR_LOCALS(size_t, nbb0, b0, nb)
  10512. GGML_TENSOR_LOCALS(int64_t, neb1, b1, ne)
  10513. GGML_TENSOR_LOCALS(size_t, nbb1, b1, nb)
  10514. GGML_TENSOR_LOCALS(int64_t, nec0, c0, ne)
  10515. GGML_TENSOR_LOCALS(size_t, nbc0, c0, nb)
  10516. GGML_TENSOR_LOCALS(int64_t, nec1, c1, ne)
  10517. GGML_TENSOR_LOCALS(size_t, nbc1, c1, nb)
  10518. GGML_TENSOR_LOCALS(int64_t, ne, dst, ne)
  10519. GGML_TENSOR_LOCALS(size_t, nb, dst, nb)
  10520. const int ith = params->ith;
  10521. const int nth = params->nth;
  10522. const int64_t D = nea0;
  10523. //const int64_t N = nea1;
  10524. const int64_t M = neb01;
  10525. GGML_ASSERT(ne0 == nea0);
  10526. GGML_ASSERT(ne1 == nea1);
  10527. GGML_ASSERT(ne2 == nea2);
  10528. GGML_ASSERT(nba0 == sizeof(ggml_fp16_t));
  10529. GGML_ASSERT(nbb00 == sizeof(ggml_fp16_t));
  10530. GGML_ASSERT(nbb10 == sizeof(float));
  10531. GGML_ASSERT(nbc00 == sizeof(ggml_fp16_t));
  10532. GGML_ASSERT(nbc10 == sizeof(float));
  10533. GGML_ASSERT(neb00 == D);
  10534. GGML_ASSERT(neb01 == M);
  10535. GGML_ASSERT(neb10 == M);
  10536. GGML_ASSERT(neb11 == 1);
  10537. GGML_ASSERT(nec00 == M);
  10538. GGML_ASSERT(nec01 == D);
  10539. GGML_ASSERT(nec10 == D);
  10540. GGML_ASSERT(nec11 == 1);
  10541. // dst cannot be transposed or permuted
  10542. GGML_ASSERT(nb0 == sizeof(float));
  10543. GGML_ASSERT(nb0 <= nb1);
  10544. GGML_ASSERT(nb1 <= nb2);
  10545. GGML_ASSERT(nb2 <= nb3);
  10546. if (params->type == GGML_TASK_INIT) {
  10547. return;
  10548. }
  10549. if (params->type == GGML_TASK_FINALIZE) {
  10550. return;
  10551. }
  10552. // parallelize by a rows using ggml_vec_dot_f32
  10553. // total rows in a
  10554. const int nr = nea1*nea2*nea3;
  10555. // rows per thread
  10556. const int dr = (nr + nth - 1)/nth;
  10557. // row range for this thread
  10558. const int ir0 = dr*ith;
  10559. const int ir1 = MIN(ir0 + dr, nr);
  10560. for (int ir = ir0; ir < ir1; ++ir) {
  10561. // a indices
  10562. const int ia3 = ir/(nea2*nea1);
  10563. const int ia2 = (ir - ia3*nea2*nea1)/nea1;
  10564. const int ia1 = (ir - ia3*nea2*nea1 - ia2*nea1);
  10565. float * S = (float *) params->wdata + ith*(2*M + CACHE_LINE_SIZE_F32);
  10566. for (int64_t ic = 0; ic < neb01; ++ic) {
  10567. // b0 indices
  10568. const int ib03 = ia3;
  10569. const int ib02 = ia2;
  10570. const int ib01 = ic;
  10571. // S indices
  10572. const int i1 = ib01;
  10573. ggml_vec_dot_f16(nea0,
  10574. S + i1,
  10575. (ggml_fp16_t *) ((char *) b0->data + (ib01*nbb01 + ib02*nbb02 + ib03*nbb03)),
  10576. (ggml_fp16_t *) ((char *) a->data + ( ia1*nba1 + ia2*nba2 + ia3*nba3)));
  10577. }
  10578. ggml_vec_add_f32(neb01, S, S, (float *) b1->data);
  10579. //ggml_vec_gelu_f32(neb01, S, S);
  10580. ggml_fp16_t * S16 = (ggml_fp16_t *) ((float *) params->wdata + ith*(2*M + CACHE_LINE_SIZE_F32) + M);
  10581. for (int64_t i = 0; i < M; i++) {
  10582. S16[i] = GGML_FP32_TO_FP16(S[i]);
  10583. }
  10584. ggml_vec_gelu_f16(neb01, S16, S16);
  10585. {
  10586. // dst indices
  10587. const int i1 = ia1;
  10588. const int i2 = ia2;
  10589. const int i3 = ia3;
  10590. for (int64_t ic = 0; ic < nec01; ++ic) {
  10591. ggml_vec_dot_f16(neb01,
  10592. (float *) ((char *) dst->data + (ic*nb0 + i1*nb1 + i2*nb2 + i3*nb3)),
  10593. (ggml_fp16_t *) ((char *) c0->data + ( ic*nbc01 + i2*nbc02 + i3*nbc03)),
  10594. S16);
  10595. }
  10596. ggml_vec_add_f32(nec01,
  10597. (float *) ((char *) dst->data + (i1*nb1 + i2*nb2 + i3*nb3)),
  10598. (float *) ((char *) dst->data + (i1*nb1 + i2*nb2 + i3*nb3)),
  10599. (float *) c1->data);
  10600. }
  10601. }
  10602. }
  10603. static void ggml_compute_forward_flash_ff(
  10604. const struct ggml_compute_params * params,
  10605. const struct ggml_tensor * a,
  10606. const struct ggml_tensor * b0,
  10607. const struct ggml_tensor * b1,
  10608. const struct ggml_tensor * c0,
  10609. const struct ggml_tensor * c1,
  10610. struct ggml_tensor * dst) {
  10611. switch (b0->type) {
  10612. case GGML_TYPE_F16:
  10613. {
  10614. ggml_compute_forward_flash_ff_f16(params, a, b0, b1, c0, c1, dst);
  10615. } break;
  10616. case GGML_TYPE_F32:
  10617. {
  10618. GGML_ASSERT(false); // TODO
  10619. } break;
  10620. default:
  10621. {
  10622. GGML_ASSERT(false);
  10623. } break;
  10624. }
  10625. }
  10626. // ggml_compute_forward_flash_attn_back
  10627. static void ggml_compute_forward_flash_attn_back_f32(
  10628. const struct ggml_compute_params * params,
  10629. const struct ggml_tensor * q,
  10630. const struct ggml_tensor * k,
  10631. const struct ggml_tensor * v,
  10632. const struct ggml_tensor * d,
  10633. const bool masked,
  10634. struct ggml_tensor * dst) {
  10635. int64_t t0 = ggml_perf_time_us();
  10636. UNUSED(t0);
  10637. GGML_TENSOR_LOCALS(int64_t, neq, q, ne)
  10638. GGML_TENSOR_LOCALS(size_t, nbq, q, nb)
  10639. GGML_TENSOR_LOCALS(int64_t, nek, k, ne)
  10640. GGML_TENSOR_LOCALS(size_t, nbk, k, nb)
  10641. GGML_TENSOR_LOCALS(int64_t, nev, v, ne)
  10642. GGML_TENSOR_LOCALS(size_t, nbv, v, nb)
  10643. GGML_TENSOR_LOCALS(int64_t, ned, d, ne)
  10644. GGML_TENSOR_LOCALS(size_t, nbd, d, nb)
  10645. GGML_TENSOR_LOCALS(int64_t, ne, dst, ne)
  10646. GGML_TENSOR_LOCALS(size_t, nb, dst, nb)
  10647. const int ith = params->ith;
  10648. const int nth = params->nth;
  10649. const int64_t D = neq0;
  10650. const int64_t N = neq1;
  10651. const int64_t P = nek1 - N;
  10652. const int64_t M = P + N;
  10653. const int Mup = ggml_up(M, GGML_SOFT_MAX_UNROLL);
  10654. const int mxDM = MAX(D, Mup);
  10655. // GGML_ASSERT(ne0 == D);
  10656. // GGML_ASSERT(ne1 == N);
  10657. GGML_ASSERT(P >= 0);
  10658. GGML_ASSERT(nbq0 == sizeof(float));
  10659. GGML_ASSERT(nbk0 == sizeof(float));
  10660. GGML_ASSERT(nbv0 == sizeof(float));
  10661. GGML_ASSERT(neq0 == D);
  10662. GGML_ASSERT(nek0 == D);
  10663. GGML_ASSERT(nev1 == D);
  10664. GGML_ASSERT(ned0 == D);
  10665. GGML_ASSERT(neq1 == N);
  10666. GGML_ASSERT(nek1 == N + P);
  10667. GGML_ASSERT(nev1 == D);
  10668. GGML_ASSERT(ned1 == N);
  10669. // dst cannot be transposed or permuted
  10670. GGML_ASSERT(nb0 == sizeof(float));
  10671. GGML_ASSERT(nb0 <= nb1);
  10672. GGML_ASSERT(nb1 <= nb2);
  10673. GGML_ASSERT(nb2 <= nb3);
  10674. if (params->type == GGML_TASK_INIT) {
  10675. if (ith == 0) {
  10676. memset(dst->data, 0, nb0*ne0*ne1*ne2*ne3);
  10677. }
  10678. return;
  10679. }
  10680. if (params->type == GGML_TASK_FINALIZE) {
  10681. return;
  10682. }
  10683. const int64_t elem_q = ggml_nelements(q);
  10684. const int64_t elem_k = ggml_nelements(k);
  10685. enum ggml_type result_type = dst->type;
  10686. GGML_ASSERT(ggml_blck_size(result_type) == 1);
  10687. const size_t tsize = ggml_type_size(result_type);
  10688. const size_t offs_q = 0;
  10689. const size_t offs_k = offs_q + GGML_PAD(elem_q * tsize, GGML_MEM_ALIGN);
  10690. const size_t offs_v = offs_k + GGML_PAD(elem_k * tsize, GGML_MEM_ALIGN);
  10691. void * grad_q = (char *) dst->data;
  10692. void * grad_k = (char *) dst->data + offs_k;
  10693. void * grad_v = (char *) dst->data + offs_v;
  10694. const size_t nbgq1 = nb0*neq0;
  10695. const size_t nbgq2 = nb0*neq0*neq1;
  10696. const size_t nbgq3 = nb0*neq0*neq1*neq2;
  10697. const size_t nbgk1 = nb0*nek0;
  10698. const size_t nbgk2 = nb0*nek0*nek1;
  10699. const size_t nbgk3 = nb0*nek0*nek1*neq2;
  10700. const size_t nbgv1 = nb0*nev0;
  10701. const size_t nbgv2 = nb0*nev0*nev1;
  10702. const size_t nbgv3 = nb0*nev0*nev1*neq2;
  10703. // parallelize by k rows using ggml_vec_dot_f32
  10704. // total rows in k
  10705. const int nr = nek2*nek3;
  10706. // rows per thread
  10707. const int dr = (nr + nth - 1)/nth;
  10708. // row range for this thread
  10709. const int ir0 = dr*ith;
  10710. const int ir1 = MIN(ir0 + dr, nr);
  10711. const float scale = 1.0f/sqrtf(D);
  10712. //printf("P=%d N=%d D=%d ir0=%d ir1=%d scale = %f\n", P, N, D, ir0, ir1, scale);
  10713. // how often k2 (and v2) is repeated in q2
  10714. int nrep = neq2/nek2;
  10715. for (int ir = ir0; ir < ir1; ++ir) {
  10716. // q indices
  10717. const int ik3 = ir/(nek2);
  10718. const int ik2 = ir - ik3*nek2;
  10719. const int iq3 = ik3;
  10720. const int id3 = ik3;
  10721. const int iv3 = ik3;
  10722. const int iv2 = ik2;
  10723. for (int irep = 0; irep < nrep; ++irep) {
  10724. const int iq2 = ik2 + irep*nek2;
  10725. const int id2 = iq2;
  10726. // (ik2 + irep*nek2) % nek2 == ik2
  10727. for (int iq1 = 0; iq1 < neq1; ++iq1) {
  10728. const int id1 = iq1;
  10729. // not sure about CACHE_LINE_SIZE_F32..
  10730. // - maybe it must not be multiplied by 2 and excluded from .. in SM 1*(..) offset?
  10731. float * S = (float *) params->wdata + ith*2*(mxDM + CACHE_LINE_SIZE_F32) + 0*(mxDM+CACHE_LINE_SIZE_F32);
  10732. float * SM = (float *) params->wdata + ith*2*(mxDM + CACHE_LINE_SIZE_F32) + 1*(mxDM+CACHE_LINE_SIZE_F32);
  10733. for (int i = M; i < Mup; ++i) {
  10734. S[i] = -INFINITY;
  10735. }
  10736. const int64_t masked_begin = masked ? (P + iq1 + 1) : M;
  10737. for (int64_t ic = 0; ic < masked_begin; ++ic) {
  10738. // k indices
  10739. const int ik1 = ic;
  10740. // S indices
  10741. const int i1 = ik1;
  10742. ggml_vec_dot_f32(neq0,
  10743. S + i1,
  10744. (float *) ((char *) k->data + (ik1*nbk1 + ik2*nbk2 + ik3*nbk3)),
  10745. (float *) ((char *) q->data + (iq1*nbq1 + iq2*nbq2 + iq3*nbq3)));
  10746. }
  10747. // scale
  10748. ggml_vec_scale_f32(masked_begin, S, scale);
  10749. for (int64_t i = masked_begin; i < M; i++) {
  10750. S[i] = -INFINITY;
  10751. }
  10752. // softmax
  10753. // exclude known -INF S[..] values from max and loop
  10754. // dont forget to set their SM values to zero
  10755. {
  10756. float max = -INFINITY;
  10757. ggml_vec_max_f32(masked_begin, &max, S);
  10758. ggml_float sum = 0.0;
  10759. {
  10760. #ifdef GGML_SOFT_MAX_ACCELERATE
  10761. max = -max;
  10762. vDSP_vsadd(SM, 1, &max, SM, 1, Mup);
  10763. vvexpf(SM, SM, &Mup);
  10764. ggml_vec_sum_f32(Mup, &sum, SM);
  10765. #else
  10766. uint16_t scvt[GGML_SOFT_MAX_UNROLL]; UNUSED(scvt);
  10767. ggml_float sump[GGML_SOFT_MAX_UNROLL] = { 0.0 };
  10768. for (int i = 0; i < Mup; i += GGML_SOFT_MAX_UNROLL) {
  10769. if (i >= masked_begin) {
  10770. break;
  10771. }
  10772. float * SR = S + i;
  10773. float * SW = SM + i;
  10774. for (int j = 0; j < GGML_SOFT_MAX_UNROLL; ++j) {
  10775. if (i + j >= masked_begin) {
  10776. break;
  10777. } else if (SR[j] == -INFINITY) {
  10778. SW[j] = 0.0f;
  10779. } else {
  10780. #ifndef GGML_FLASH_ATTN_EXP_FP16
  10781. const float val = expf(SR[j] - max);
  10782. #else
  10783. ggml_fp16_t s = GGML_FP32_TO_FP16(SR[j] - max);
  10784. memcpy(&scvt[j], &s, sizeof(uint16_t));
  10785. const float val = GGML_FP16_TO_FP32(ggml_table_exp_f16[scvt[j]]);
  10786. #endif
  10787. sump[j] += (ggml_float)val;
  10788. SW[j] = val;
  10789. }
  10790. }
  10791. }
  10792. for (int i = 0; i < GGML_SOFT_MAX_UNROLL; i++) {
  10793. sum += sump[i];
  10794. }
  10795. #endif
  10796. }
  10797. assert(sum > 0.0);
  10798. sum = 1.0/sum;
  10799. ggml_vec_scale_f32(masked_begin, SM, sum);
  10800. }
  10801. // step-by-step explanation
  10802. {
  10803. // forward-process shape grads from backward process
  10804. // parallel_for ik2,ik3:
  10805. // for irep:
  10806. // iq2 = ik2 + irep*nek2
  10807. // k[:D,:M,:,:] [D,M,:,:] grad[k][:D,:M,ik2,ik3] += grad[kcur]
  10808. // q[:D,:N,:,:] [D,N,:,:] grad[q][:D,iq1,iq2,iq3] += grad[qcur]
  10809. // v[:M,:D,:,:] [M,D,:,:] grad[v][:M,:D,iv2,iv3] += grad[vcur]
  10810. // for iq1:
  10811. // kcur = k[:D,:M,ik2,ik3] [D,M,1,1] grad[kcur] = grad[S1].T @ qcur
  10812. // qcur = q[:D,iq1,iq2,iq3] [D,1,1,1] grad[qcur] = grad[S1] @ kcur
  10813. // vcur = v[:M,:D,iv2,iv3] [M,D,1,1] grad[vcur] = grad[S5].T @ S4
  10814. // S0 = -Inf [D,1,1,1]
  10815. // ~S1[i] = dot(kcur[:D,i], qcur)
  10816. // S1 = qcur @ kcur.T [M,1,1,1] grad[S1] = grad[S2] * scale
  10817. // S2 = S1 * scale [M,1,1,1] grad[S2] = diag_mask_zero(grad[S3], P)
  10818. // S3 = diag_mask_inf(S2, P) [M,1,1,1] grad[S3] = S4 * (grad[S4] - dot(S4, grad[S4]))
  10819. // S4 = softmax(S3) [M,1,1,1] grad[S4] = grad[S5] @ vcur
  10820. // ~S5[i] = dot(vcur[:,i], S4)
  10821. // S5 = S4 @ vcur.T [D,1,1,1] grad[S5] = d[:D,id1,id2,id3]
  10822. // ~dst[i,iq1,iq2,iq3] = S5[i] ^
  10823. // dst[:D,iq1,iq2,iq3] = S5 | grad[dst[:D,iq1,iq2,iq3]] = d[:D,id1,id2,id3]
  10824. // dst backward-/ grad[dst] = d
  10825. //
  10826. // output gradients with their dependencies:
  10827. //
  10828. // grad[kcur] = grad[S1].T @ qcur
  10829. // grad[S1] = diag_mask_zero(grad[S3], P) * scale
  10830. // grad[S3] = S4 * (grad[S4] - dot(S4, grad[S4]))
  10831. // grad[S4] = grad[S5] @ vcur
  10832. // grad[S4] = d[:D,id1,id2,id3] @ vcur
  10833. // grad[qcur] = grad[S1] @ kcur
  10834. // grad[vcur] = grad[S5].T @ S4
  10835. // grad[vcur] = d[:D,id1,id2,id3].T @ S4
  10836. //
  10837. // in post-order:
  10838. //
  10839. // S1 = qcur @ kcur.T
  10840. // S2 = S1 * scale
  10841. // S3 = diag_mask_inf(S2, P)
  10842. // S4 = softmax(S3)
  10843. // grad[S4] = d[:D,id1,id2,id3] @ vcur
  10844. // grad[S3] = S4 * (grad[S4] - dot(S4, grad[S4]))
  10845. // grad[S1] = diag_mask_zero(grad[S3], P) * scale
  10846. // grad[qcur] = grad[S1] @ kcur
  10847. // grad[kcur] = grad[S1].T @ qcur
  10848. // grad[vcur] = d[:D,id1,id2,id3].T @ S4
  10849. //
  10850. // using less variables (SM=S4):
  10851. //
  10852. // S = diag_mask_inf(qcur @ kcur.T * scale, P)
  10853. // SM = softmax(S)
  10854. // S = d[:D,iq1,iq2,iq3] @ vcur
  10855. // dot_SM_gradSM = dot(SM, S)
  10856. // S = SM * (S - dot(SM, S))
  10857. // S = diag_mask_zero(S, P) * scale
  10858. //
  10859. // grad[q][:D,iq1,iq2,iq3] += S @ kcur
  10860. // grad[k][:D,:M,ik2,ik3] += S.T @ qcur
  10861. // grad[v][:M,:D,iv2,iv3] += d[:D,id1,id2,id3].T @ SM
  10862. }
  10863. // S = gradSM = d[:D,id1,id2,id3] @ vcur[:,:,iv2,iv3]
  10864. // S = d[:D,id1,id2,id3] @ vcur[:,:,iv2,iv3]
  10865. // for ic:
  10866. // S[:M] += vcur[:M,ic,iv2,iv3] * d[ic,id1,id2,id3]
  10867. // exclude known future zero S[..] values from operation
  10868. ggml_vec_set_f32(masked_begin, S, 0);
  10869. for (int64_t ic = 0; ic < D; ++ic) {
  10870. ggml_vec_mad_f32(masked_begin,
  10871. S,
  10872. (float *) ((char *) v->data + ( ic*nbv1 + iv2*nbv2 + iv3*nbv3)),
  10873. *(float *) ((char *) d->data + (ic*nbd0 + id1*nbd1 + id2*nbd2 + id3*nbd3)));
  10874. }
  10875. // S = SM * (S - dot(SM, S))
  10876. float dot_SM_gradSM = 0;
  10877. ggml_vec_dot_f32 (masked_begin, &dot_SM_gradSM, SM, S);
  10878. ggml_vec_acc1_f32(M, S, -dot_SM_gradSM);
  10879. ggml_vec_mul_f32 (masked_begin, S, S, SM);
  10880. // S = diag_mask_zero(S, P) * scale
  10881. // already done by above ggml_vec_set_f32
  10882. // exclude known zero S[..] values from operation
  10883. ggml_vec_scale_f32(masked_begin, S, scale);
  10884. // S shape [M,1]
  10885. // SM shape [M,1]
  10886. // kcur shape [D,M]
  10887. // qcur shape [D,1]
  10888. // vcur shape [M,D]
  10889. // grad[q][:D,iq1,iq2,iq3] += S @ kcur
  10890. // grad[q][:D,iq1,iq2,iq3] += shape[M,1] @ shape[D,M]
  10891. // for ic:
  10892. // grad[q][:D,iq1,iq2,iq3] += S[ic] * kcur[:D,ic,ik2,ik3]
  10893. // exclude known zero S[..] values from loop
  10894. for (int64_t ic = 0; ic < masked_begin; ++ic) {
  10895. ggml_vec_mad_f32(D,
  10896. (float *) ((char *) grad_q + (iq1*nbgq1 + iq2*nbgq2 + iq3*nbgq3)),
  10897. (float *) ((char *) k->data + (ic*nbk1 + ik2*nbk2 + ik3*nbk3)),
  10898. S[ic]);
  10899. }
  10900. // grad[k][:D,:M,iq2,iq3] += S.T @ qcur
  10901. // for ic:
  10902. // grad[k][:D,ic,iq2,iq3] += S.T[0,ic] * qcur[:D,0]
  10903. // grad[k][:D,ic,iq2,iq3] += S[ic] * qcur[:D,0]
  10904. // exclude known zero S[..] values from loop
  10905. for (int64_t ic = 0; ic < masked_begin; ++ic) {
  10906. ggml_vec_mad_f32(D,
  10907. (float *) ((char *) grad_k + (ic*nbgk1 + ik2*nbgk2 + ik3*nbgk3)),
  10908. (float *) ((char *) q->data + (iq1*nbq1 + iq2*nbq2 + iq3*nbq3)),
  10909. S[ic]);
  10910. }
  10911. // grad[v][:M,:D,iv2,iv3] += d[:D,id1,id2,id3].T @ SM
  10912. // for ic:
  10913. // grad[v][:M,ic,iv2,iv3] += d[:D,id1,id2,id3].T[0,ic] * SM[:M]
  10914. // grad[v][:M,ic,iv2,iv3] += d[ic,id1,id2,id3] * SM[:M]
  10915. // exclude known zero SM[..] values from mad
  10916. for (int64_t ic = 0; ic < D; ++ic) {
  10917. ggml_vec_mad_f32(masked_begin,
  10918. (float *) ((char *) grad_v + ( ic*nbgv1 + iv2*nbgv2 + iv3*nbgv3)),
  10919. SM,
  10920. *(float *) ((char *) d->data + (ic*nbd0 + id1*nbd1 + id2*nbd2 + id3*nbd3)));
  10921. }
  10922. }
  10923. }
  10924. }
  10925. }
  10926. static void ggml_compute_forward_flash_attn_back(
  10927. const struct ggml_compute_params * params,
  10928. const struct ggml_tensor * q,
  10929. const struct ggml_tensor * k,
  10930. const struct ggml_tensor * v,
  10931. const struct ggml_tensor * d,
  10932. const bool masked,
  10933. struct ggml_tensor * dst) {
  10934. switch (q->type) {
  10935. case GGML_TYPE_F32:
  10936. {
  10937. ggml_compute_forward_flash_attn_back_f32(params, q, k, v, d, masked, dst);
  10938. } break;
  10939. default:
  10940. {
  10941. GGML_ASSERT(false);
  10942. } break;
  10943. }
  10944. }
  10945. // ggml_compute_forward_win_part
  10946. static void ggml_compute_forward_win_part_f32(
  10947. const struct ggml_compute_params * params,
  10948. const struct ggml_tensor * src0,
  10949. struct ggml_tensor * dst) {
  10950. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  10951. return;
  10952. }
  10953. GGML_TENSOR_LOCALS(int64_t, ne0, src0, ne)
  10954. GGML_TENSOR_LOCALS(int64_t, ne, dst, ne)
  10955. const int32_t nep0 = ((const int32_t *)(dst->op_params))[0];
  10956. const int32_t nep1 = ((const int32_t *)(dst->op_params))[1];
  10957. const int32_t w = ((const int32_t *)(dst->op_params))[2];
  10958. assert(ne00 == ne0);
  10959. assert(ne3 == nep0*nep1);
  10960. // TODO: optimize / multi-thread
  10961. for (int py = 0; py < nep1; ++py) {
  10962. for (int px = 0; px < nep0; ++px) {
  10963. const int64_t i3 = py*nep0 + px;
  10964. for (int64_t i2 = 0; i2 < ne2; ++i2) {
  10965. for (int64_t i1 = 0; i1 < ne1; ++i1) {
  10966. for (int64_t i0 = 0; i0 < ne0; ++i0) {
  10967. const int64_t i02 = py*w + i2;
  10968. const int64_t i01 = px*w + i1;
  10969. const int64_t i00 = i0;
  10970. const int64_t i = i3*ne2*ne1*ne0 + i2*ne1*ne0 + i1*ne0 + i0;
  10971. const int64_t j = i02*ne01*ne00 + i01*ne00 + i00;
  10972. if (py*w + i2 >= ne02 || px*w + i1 >= ne01) {
  10973. ((float *) dst->data)[i] = 0.0f;
  10974. } else {
  10975. ((float *) dst->data)[i] = ((float *) src0->data)[j];
  10976. }
  10977. }
  10978. }
  10979. }
  10980. }
  10981. }
  10982. }
  10983. static void ggml_compute_forward_win_part(
  10984. const struct ggml_compute_params * params,
  10985. const struct ggml_tensor * src0,
  10986. struct ggml_tensor * dst) {
  10987. switch (src0->type) {
  10988. case GGML_TYPE_F32:
  10989. {
  10990. ggml_compute_forward_win_part_f32(params, src0, dst);
  10991. } break;
  10992. default:
  10993. {
  10994. GGML_ASSERT(false);
  10995. } break;
  10996. }
  10997. }
  10998. // ggml_compute_forward_win_unpart
  10999. static void ggml_compute_forward_win_unpart_f32(
  11000. const struct ggml_compute_params * params,
  11001. const struct ggml_tensor * src0,
  11002. struct ggml_tensor * dst) {
  11003. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  11004. return;
  11005. }
  11006. GGML_TENSOR_LOCALS(int64_t, ne0, src0, ne)
  11007. GGML_TENSOR_LOCALS(int64_t, ne, dst, ne)
  11008. const int32_t w = ((const int32_t *)(dst->op_params))[0];
  11009. // padding
  11010. const int px = (w - ne1%w)%w;
  11011. //const int py = (w - ne2%w)%w;
  11012. const int npx = (px + ne1)/w;
  11013. //const int npy = (py + ne2)/w;
  11014. assert(ne0 == ne00);
  11015. // TODO: optimize / multi-thread
  11016. for (int64_t i2 = 0; i2 < ne2; ++i2) {
  11017. for (int64_t i1 = 0; i1 < ne1; ++i1) {
  11018. for (int64_t i0 = 0; i0 < ne0; ++i0) {
  11019. const int ip2 = i2/w;
  11020. const int ip1 = i1/w;
  11021. const int64_t i02 = i2%w;
  11022. const int64_t i01 = i1%w;
  11023. const int64_t i00 = i0;
  11024. const int64_t i = (ip2*npx + ip1)*ne02*ne01*ne00 + i02*ne01*ne00 + i01*ne00 + i00;
  11025. const int64_t j = i2*ne1*ne0 + i1*ne0 + i0;
  11026. ((float *) dst->data)[j] = ((float *) src0->data)[i];
  11027. }
  11028. }
  11029. }
  11030. }
  11031. static void ggml_compute_forward_win_unpart(
  11032. const struct ggml_compute_params * params,
  11033. const struct ggml_tensor * src0,
  11034. struct ggml_tensor * dst) {
  11035. switch (src0->type) {
  11036. case GGML_TYPE_F32:
  11037. {
  11038. ggml_compute_forward_win_unpart_f32(params, src0, dst);
  11039. } break;
  11040. default:
  11041. {
  11042. GGML_ASSERT(false);
  11043. } break;
  11044. }
  11045. }
  11046. //gmml_compute_forward_unary
  11047. static void ggml_compute_forward_unary(
  11048. const struct ggml_compute_params * params,
  11049. const struct ggml_tensor * src0,
  11050. struct ggml_tensor * dst) {
  11051. const enum ggml_unary_op op = ggml_get_unary_op(dst);
  11052. switch (op) {
  11053. case GGML_UNARY_OP_ABS:
  11054. {
  11055. ggml_compute_forward_abs(params, src0, dst);
  11056. } break;
  11057. case GGML_UNARY_OP_SGN:
  11058. {
  11059. ggml_compute_forward_sgn(params, src0, dst);
  11060. } break;
  11061. case GGML_UNARY_OP_NEG:
  11062. {
  11063. ggml_compute_forward_neg(params, src0, dst);
  11064. } break;
  11065. case GGML_UNARY_OP_STEP:
  11066. {
  11067. ggml_compute_forward_step(params, src0, dst);
  11068. } break;
  11069. case GGML_UNARY_OP_TANH:
  11070. {
  11071. ggml_compute_forward_tanh(params, src0, dst);
  11072. } break;
  11073. case GGML_UNARY_OP_ELU:
  11074. {
  11075. ggml_compute_forward_elu(params, src0, dst);
  11076. } break;
  11077. case GGML_UNARY_OP_RELU:
  11078. {
  11079. ggml_compute_forward_relu(params, src0, dst);
  11080. } break;
  11081. case GGML_UNARY_OP_GELU:
  11082. {
  11083. ggml_compute_forward_gelu(params, src0, dst);
  11084. } break;
  11085. case GGML_UNARY_OP_GELU_QUICK:
  11086. {
  11087. ggml_compute_forward_gelu_quick(params, src0, dst);
  11088. } break;
  11089. case GGML_UNARY_OP_SILU:
  11090. {
  11091. ggml_compute_forward_silu(params, src0, dst);
  11092. } break;
  11093. default:
  11094. {
  11095. GGML_ASSERT(false);
  11096. } break;
  11097. }
  11098. }
  11099. // ggml_compute_forward_get_rel_pos
  11100. static void ggml_compute_forward_get_rel_pos_f16(
  11101. const struct ggml_compute_params * params,
  11102. const struct ggml_tensor * src0,
  11103. struct ggml_tensor * dst) {
  11104. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  11105. return;
  11106. }
  11107. // ref: https://github.com/facebookresearch/segment-anything/blob/main/segment_anything/modeling/image_encoder.py#L292-L322
  11108. GGML_TENSOR_UNARY_OP_LOCALS
  11109. const int64_t w = ne1;
  11110. ggml_fp16_t * src0_data = (ggml_fp16_t *) src0->data;
  11111. ggml_fp16_t * dst_data = (ggml_fp16_t *) dst->data;
  11112. for (int64_t i2 = 0; i2 < ne2; ++i2) {
  11113. for (int64_t i1 = 0; i1 < ne1; ++i1) {
  11114. const int64_t pos = (w - i1 - 1) + i2;
  11115. for (int64_t i0 = 0; i0 < ne0; ++i0) {
  11116. dst_data[i2*ne1*ne0 + i1*ne0 + i0] = src0_data[pos*ne00 + i0];
  11117. }
  11118. }
  11119. }
  11120. }
  11121. static void ggml_compute_forward_get_rel_pos(
  11122. const struct ggml_compute_params * params,
  11123. const struct ggml_tensor * src0,
  11124. struct ggml_tensor * dst) {
  11125. switch (src0->type) {
  11126. case GGML_TYPE_F16:
  11127. {
  11128. ggml_compute_forward_get_rel_pos_f16(params, src0, dst);
  11129. } break;
  11130. default:
  11131. {
  11132. GGML_ASSERT(false);
  11133. } break;
  11134. }
  11135. }
  11136. // ggml_compute_forward_add_rel_pos
  11137. static void ggml_compute_forward_add_rel_pos_f32(
  11138. const struct ggml_compute_params * params,
  11139. const struct ggml_tensor * src0,
  11140. const struct ggml_tensor * src1,
  11141. const struct ggml_tensor * src2,
  11142. struct ggml_tensor * dst) {
  11143. const bool inplace = (bool) ((int32_t *) dst->op_params)[0];
  11144. if (!inplace && params->type == GGML_TASK_INIT) {
  11145. memcpy((char *) dst->data, (char *) src0->data, ggml_nbytes(dst));
  11146. return;
  11147. }
  11148. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  11149. return;
  11150. }
  11151. int64_t t0 = ggml_perf_time_us();
  11152. UNUSED(t0);
  11153. // ref: https://github.com/facebookresearch/segment-anything/blob/main/segment_anything/modeling/image_encoder.py#L357-L359
  11154. float * src1_data = (float *) src1->data;
  11155. float * src2_data = (float *) src2->data;
  11156. float * dst_data = (float *) dst->data;
  11157. const int64_t ne10 = src1->ne[0];
  11158. const int64_t ne11 = src1->ne[1];
  11159. const int64_t ne12 = src1->ne[2];
  11160. const int64_t ne13 = src1->ne[3];
  11161. const int ith = params->ith;
  11162. const int nth = params->nth;
  11163. // total patches in dst
  11164. const int np = ne13;
  11165. // patches per thread
  11166. const int dp = (np + nth - 1)/nth;
  11167. // patch range for this thread
  11168. const int ip0 = dp*ith;
  11169. const int ip1 = MIN(ip0 + dp, np);
  11170. for (int64_t i13 = ip0; i13 < ip1; ++i13) {
  11171. for (int64_t i12 = 0; i12 < ne12; ++i12) {
  11172. for (int64_t i11 = 0; i11 < ne11; ++i11) {
  11173. const int64_t jp1 = i13*ne12*ne11*ne10 + i12*ne11*ne10 + i11*ne10;
  11174. for (int64_t i10 = 0; i10 < ne10; ++i10) {
  11175. const int64_t jp0 = jp1 + i10;
  11176. const float src1_e = src1_data[jp0];
  11177. const float src2_e = src2_data[jp0];
  11178. const int64_t jdh = jp0 * ne10;
  11179. const int64_t jdw = jdh - (ne10 - 1) * i10;
  11180. for (int64_t j = 0; j < ne10; ++j) {
  11181. dst_data[jdh + j ] += src2_e;
  11182. dst_data[jdw + j*ne10] += src1_e;
  11183. }
  11184. }
  11185. }
  11186. }
  11187. }
  11188. }
  11189. static void ggml_compute_forward_add_rel_pos(
  11190. const struct ggml_compute_params * params,
  11191. const struct ggml_tensor * src0,
  11192. const struct ggml_tensor * src1,
  11193. const struct ggml_tensor * src2,
  11194. struct ggml_tensor * dst) {
  11195. switch (src0->type) {
  11196. case GGML_TYPE_F32:
  11197. {
  11198. ggml_compute_forward_add_rel_pos_f32(params, src0, src1, src2, dst);
  11199. } break;
  11200. default:
  11201. {
  11202. GGML_ASSERT(false);
  11203. } break;
  11204. }
  11205. }
  11206. // ggml_compute_forward_map_unary
  11207. static void ggml_compute_forward_map_unary_f32(
  11208. const struct ggml_compute_params * params,
  11209. const struct ggml_tensor * src0,
  11210. struct ggml_tensor * dst,
  11211. const ggml_unary_op_f32_t fun) {
  11212. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  11213. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  11214. return;
  11215. }
  11216. const int n = ggml_nrows(src0);
  11217. const int nc = src0->ne[0];
  11218. assert( dst->nb[0] == sizeof(float));
  11219. assert(src0->nb[0] == sizeof(float));
  11220. for (int i = 0; i < n; i++) {
  11221. fun(nc,
  11222. (float *) ((char *) dst->data + i*( dst->nb[1])),
  11223. (float *) ((char *) src0->data + i*(src0->nb[1])));
  11224. }
  11225. }
  11226. static void ggml_compute_forward_map_unary(
  11227. const struct ggml_compute_params * params,
  11228. const struct ggml_tensor * src0,
  11229. struct ggml_tensor * dst,
  11230. const ggml_unary_op_f32_t fun) {
  11231. switch (src0->type) {
  11232. case GGML_TYPE_F32:
  11233. {
  11234. ggml_compute_forward_map_unary_f32(params, src0, dst, fun);
  11235. } break;
  11236. default:
  11237. {
  11238. GGML_ASSERT(false);
  11239. } break;
  11240. }
  11241. }
  11242. // ggml_compute_forward_map_binary
  11243. static void ggml_compute_forward_map_binary_f32(
  11244. const struct ggml_compute_params * params,
  11245. const struct ggml_tensor * src0,
  11246. const struct ggml_tensor * src1,
  11247. struct ggml_tensor * dst,
  11248. const ggml_binary_op_f32_t fun) {
  11249. assert(params->ith == 0);
  11250. assert(ggml_are_same_shape(src0, src1) && ggml_are_same_shape(src0, dst));
  11251. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  11252. return;
  11253. }
  11254. const int n = ggml_nrows(src0);
  11255. const int nc = src0->ne[0];
  11256. assert( dst->nb[0] == sizeof(float));
  11257. assert(src0->nb[0] == sizeof(float));
  11258. assert(src1->nb[0] == sizeof(float));
  11259. for (int i = 0; i < n; i++) {
  11260. fun(nc,
  11261. (float *) ((char *) dst->data + i*( dst->nb[1])),
  11262. (float *) ((char *) src0->data + i*(src0->nb[1])),
  11263. (float *) ((char *) src1->data + i*(src1->nb[1])));
  11264. }
  11265. }
  11266. static void ggml_compute_forward_map_binary(
  11267. const struct ggml_compute_params * params,
  11268. const struct ggml_tensor * src0,
  11269. const struct ggml_tensor * src1,
  11270. struct ggml_tensor * dst,
  11271. const ggml_binary_op_f32_t fun) {
  11272. switch (src0->type) {
  11273. case GGML_TYPE_F32:
  11274. {
  11275. ggml_compute_forward_map_binary_f32(params, src0, src1, dst, fun);
  11276. } break;
  11277. default:
  11278. {
  11279. GGML_ASSERT(false);
  11280. } break;
  11281. }
  11282. }
  11283. // ggml_compute_forward_map_custom1
  11284. static void ggml_compute_forward_map_custom1_f32(
  11285. const struct ggml_compute_params * params,
  11286. const struct ggml_tensor * a,
  11287. struct ggml_tensor * dst,
  11288. const ggml_custom1_op_f32_t fun) {
  11289. assert(params->ith == 0);
  11290. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  11291. return;
  11292. }
  11293. fun(dst, a);
  11294. }
  11295. // ggml_compute_forward_map_custom2
  11296. static void ggml_compute_forward_map_custom2_f32(
  11297. const struct ggml_compute_params * params,
  11298. const struct ggml_tensor * a,
  11299. const struct ggml_tensor * b,
  11300. struct ggml_tensor * dst,
  11301. const ggml_custom2_op_f32_t fun) {
  11302. assert(params->ith == 0);
  11303. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  11304. return;
  11305. }
  11306. fun(dst, a, b);
  11307. }
  11308. // ggml_compute_forward_map_custom3
  11309. static void ggml_compute_forward_map_custom3_f32(
  11310. const struct ggml_compute_params * params,
  11311. const struct ggml_tensor * a,
  11312. const struct ggml_tensor * b,
  11313. const struct ggml_tensor * c,
  11314. struct ggml_tensor * dst,
  11315. const ggml_custom3_op_f32_t fun) {
  11316. assert(params->ith == 0);
  11317. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  11318. return;
  11319. }
  11320. fun(dst, a, b, c);
  11321. }
  11322. // ggml_compute_forward_map_custom1
  11323. static void ggml_compute_forward_map_custom1(
  11324. const struct ggml_compute_params * params,
  11325. const struct ggml_tensor * a,
  11326. struct ggml_tensor * dst) {
  11327. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  11328. return;
  11329. }
  11330. struct ggml_map_custom1_op_params * p = (struct ggml_map_custom1_op_params *) dst->op_params;
  11331. p->fun(dst, a, params->ith, params->nth, p->userdata);
  11332. }
  11333. // ggml_compute_forward_map_custom2
  11334. static void ggml_compute_forward_map_custom2(
  11335. const struct ggml_compute_params * params,
  11336. const struct ggml_tensor * a,
  11337. const struct ggml_tensor * b,
  11338. struct ggml_tensor * dst) {
  11339. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  11340. return;
  11341. }
  11342. struct ggml_map_custom2_op_params * p = (struct ggml_map_custom2_op_params *) dst->op_params;
  11343. p->fun(dst, a, b, params->ith, params->nth, p->userdata);
  11344. }
  11345. // ggml_compute_forward_map_custom3
  11346. static void ggml_compute_forward_map_custom3(
  11347. const struct ggml_compute_params * params,
  11348. const struct ggml_tensor * a,
  11349. const struct ggml_tensor * b,
  11350. const struct ggml_tensor * c,
  11351. struct ggml_tensor * dst) {
  11352. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  11353. return;
  11354. }
  11355. struct ggml_map_custom3_op_params * p = (struct ggml_map_custom3_op_params *) dst->op_params;
  11356. p->fun(dst, a, b, c, params->ith, params->nth, p->userdata);
  11357. }
  11358. // ggml_compute_forward_cross_entropy_loss
  11359. static void ggml_compute_forward_cross_entropy_loss_f32(
  11360. const struct ggml_compute_params * params,
  11361. const struct ggml_tensor * src0,
  11362. const struct ggml_tensor * src1,
  11363. struct ggml_tensor * dst) {
  11364. GGML_ASSERT(ggml_is_contiguous(src0));
  11365. GGML_ASSERT(ggml_is_contiguous(src1));
  11366. GGML_ASSERT(ggml_is_scalar(dst));
  11367. GGML_ASSERT(ggml_are_same_shape(src0, src1));
  11368. const int ith = params->ith;
  11369. const int nth = params->nth;
  11370. float * sums = (float *) params->wdata;
  11371. // TODO: handle transposed/permuted matrices
  11372. const int nc = src0->ne[0];
  11373. const int nr = ggml_nrows(src0);
  11374. GGML_ASSERT(params->wsize >= sizeof(float) * (nth + nth * nc));
  11375. if (params->type == GGML_TASK_INIT) {
  11376. if (ith == 0) {
  11377. memset(sums, 0, sizeof(float) * (nth + nth * nc));
  11378. }
  11379. return;
  11380. }
  11381. if (params->type == GGML_TASK_FINALIZE) {
  11382. if (ith == 0) {
  11383. float * dp = (float *) dst->data;
  11384. ggml_vec_sum_f32(nth, dp, sums);
  11385. dp[0] *= -1.0f / (float) nr;
  11386. }
  11387. return;
  11388. }
  11389. const double eps = 1e-9;
  11390. // rows per thread
  11391. const int dr = (nr + nth - 1)/nth;
  11392. // row range for this thread
  11393. const int ir0 = dr*ith;
  11394. const int ir1 = MIN(ir0 + dr, nr);
  11395. for (int i1 = ir0; i1 < ir1; i1++) {
  11396. float * s0 = (float *)((char *) src0->data + i1*src0->nb[1]);
  11397. float * s1 = (float *)((char *) src1->data + i1*src1->nb[1]);
  11398. float * st = ((float *) params->wdata) + nth + ith*nc;
  11399. #ifndef NDEBUG
  11400. for (int i = 0; i < nc; ++i) {
  11401. //printf("p[%d] = %f\n", i, p[i]);
  11402. assert(!isnan(s0[i]));
  11403. assert(!isnan(s1[i]));
  11404. }
  11405. #endif
  11406. // soft_max
  11407. ggml_float sum = 0.0;
  11408. {
  11409. float max = -INFINITY;
  11410. ggml_vec_max_f32(nc, &max, s0);
  11411. uint16_t scvt; UNUSED(scvt);
  11412. for (int i = 0; i < nc; i++) {
  11413. if (s0[i] == -INFINITY) {
  11414. st[i] = 0.0f;
  11415. } else {
  11416. #ifndef GGML_CROSS_ENTROPY_EXP_FP16
  11417. const float s = s0[i] - max;
  11418. const float val = expf(s);
  11419. #else
  11420. ggml_fp16_t s = GGML_FP32_TO_FP16(s0[i] - max);
  11421. memcpy(&scvt, &s, sizeof(scvt));
  11422. const float val = GGML_FP16_TO_FP32(ggml_table_exp_f16[scvt]);
  11423. #endif
  11424. sum += (ggml_float)val;
  11425. st[i] = val;
  11426. }
  11427. }
  11428. assert(sum > 0.0);
  11429. // sum = 1.0/sum;
  11430. }
  11431. // avoid log(0) by rescaling from [0..1] to [eps..1]
  11432. sum = (1.0 - eps) / sum;
  11433. ggml_vec_scale_f32(nc, st, sum);
  11434. ggml_vec_add1_f32(nc, st, st, eps);
  11435. ggml_vec_log_f32(nc, st, st);
  11436. ggml_vec_mul_f32(nc, st, st, s1);
  11437. float st_sum = 0;
  11438. ggml_vec_sum_f32(nc, &st_sum, st);
  11439. sums[ith] += st_sum;
  11440. #ifndef NDEBUG
  11441. for (int i = 0; i < nc; ++i) {
  11442. assert(!isnan(st[i]));
  11443. assert(!isinf(st[i]));
  11444. }
  11445. #endif
  11446. }
  11447. }
  11448. static void ggml_compute_forward_cross_entropy_loss(
  11449. const struct ggml_compute_params * params,
  11450. const struct ggml_tensor * src0,
  11451. const struct ggml_tensor * src1,
  11452. struct ggml_tensor * dst) {
  11453. switch (src0->type) {
  11454. case GGML_TYPE_F32:
  11455. {
  11456. ggml_compute_forward_cross_entropy_loss_f32(params, src0, src1, dst);
  11457. } break;
  11458. default:
  11459. {
  11460. GGML_ASSERT(false);
  11461. } break;
  11462. }
  11463. }
  11464. // ggml_compute_forward_cross_entropy_loss_back
  11465. static void ggml_compute_forward_cross_entropy_loss_back_f32(
  11466. const struct ggml_compute_params * params,
  11467. const struct ggml_tensor * src0,
  11468. const struct ggml_tensor * src1,
  11469. const struct ggml_tensor * opt0,
  11470. struct ggml_tensor * dst) {
  11471. GGML_ASSERT(ggml_is_contiguous(dst));
  11472. GGML_ASSERT(ggml_is_contiguous(src0));
  11473. GGML_ASSERT(ggml_is_contiguous(src1));
  11474. GGML_ASSERT(ggml_is_contiguous(opt0));
  11475. GGML_ASSERT(ggml_are_same_shape(src0, src1) && ggml_are_same_shape(src0, dst));
  11476. const int64_t ith = params->ith;
  11477. const int64_t nth = params->nth;
  11478. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  11479. return;
  11480. }
  11481. const double eps = 1e-9;
  11482. // TODO: handle transposed/permuted matrices
  11483. const int64_t nc = src0->ne[0];
  11484. const int64_t nr = ggml_nrows(src0);
  11485. // rows per thread
  11486. const int64_t dr = (nr + nth - 1)/nth;
  11487. // row range for this thread
  11488. const int64_t ir0 = dr*ith;
  11489. const int64_t ir1 = MIN(ir0 + dr, nr);
  11490. float * d = (float *) opt0->data;
  11491. for (int64_t i1 = ir0; i1 < ir1; i1++) {
  11492. float * ds0 = (float *)((char *) dst->data + i1*dst->nb[1]);
  11493. float * s0 = (float *)((char *) src0->data + i1*src0->nb[1]);
  11494. float * s1 = (float *)((char *) src1->data + i1*src1->nb[1]);
  11495. #ifndef NDEBUG
  11496. for (int i = 0; i < nc; ++i) {
  11497. //printf("p[%d] = %f\n", i, p[i]);
  11498. assert(!isnan(s0[i]));
  11499. assert(!isnan(s1[i]));
  11500. }
  11501. #endif
  11502. // soft_max
  11503. ggml_float sum = 0.0;
  11504. {
  11505. float max = -INFINITY;
  11506. ggml_vec_max_f32(nc, &max, s0);
  11507. uint16_t scvt; UNUSED(scvt);
  11508. for (int i = 0; i < nc; i++) {
  11509. if (s0[i] == -INFINITY) {
  11510. ds0[i] = 0.0f;
  11511. } else {
  11512. #ifndef GGML_CROSS_ENTROPY_EXP_FP16
  11513. const float s = s0[i] - max;
  11514. const float val = expf(s);
  11515. #else
  11516. ggml_fp16_t s = GGML_FP32_TO_FP16(s0[i] - max);
  11517. memcpy(&scvt, &s, sizeof(scvt));
  11518. const float val = GGML_FP16_TO_FP32(ggml_table_exp_f16[scvt]);
  11519. #endif
  11520. sum += (ggml_float)val;
  11521. ds0[i] = val;
  11522. }
  11523. }
  11524. assert(sum > 0.0);
  11525. sum = (1.0 - eps)/sum;
  11526. }
  11527. // grad(src0) = (softmax(src0) - src1) * grad(cross_entropy_loss(src0, src1)) / nr
  11528. ggml_vec_scale_f32(nc, ds0, sum);
  11529. ggml_vec_add1_f32(nc, ds0, ds0, eps);
  11530. ggml_vec_sub_f32(nc, ds0, ds0, s1);
  11531. ggml_vec_scale_f32(nc, ds0, d[0] / (float) nr);
  11532. #ifndef NDEBUG
  11533. for (int i = 0; i < nc; ++i) {
  11534. assert(!isnan(ds0[i]));
  11535. assert(!isinf(ds0[i]));
  11536. }
  11537. #endif
  11538. }
  11539. }
  11540. static void ggml_compute_forward_cross_entropy_loss_back(
  11541. const struct ggml_compute_params * params,
  11542. const struct ggml_tensor * src0,
  11543. const struct ggml_tensor * src1,
  11544. const struct ggml_tensor * opt0,
  11545. struct ggml_tensor * dst) {
  11546. switch (src0->type) {
  11547. case GGML_TYPE_F32:
  11548. {
  11549. ggml_compute_forward_cross_entropy_loss_back_f32(params, src0, src1, opt0, dst);
  11550. } break;
  11551. default:
  11552. {
  11553. GGML_ASSERT(false);
  11554. } break;
  11555. }
  11556. }
  11557. /////////////////////////////////
  11558. static void ggml_compute_forward(struct ggml_compute_params * params, struct ggml_tensor * tensor) {
  11559. GGML_ASSERT(params);
  11560. if (tensor->op == GGML_OP_NONE) {
  11561. return;
  11562. }
  11563. #ifdef GGML_USE_CUBLAS
  11564. bool skip_cpu = ggml_cuda_compute_forward(params, tensor);
  11565. if (skip_cpu) {
  11566. return;
  11567. }
  11568. GGML_ASSERT(tensor->src[0] == NULL || tensor->src[0]->backend == GGML_BACKEND_CPU);
  11569. GGML_ASSERT(tensor->src[1] == NULL || tensor->src[1]->backend == GGML_BACKEND_CPU);
  11570. #endif // GGML_USE_CUBLAS
  11571. switch (tensor->op) {
  11572. case GGML_OP_DUP:
  11573. {
  11574. ggml_compute_forward_dup(params, tensor->src[0], tensor);
  11575. } break;
  11576. case GGML_OP_ADD:
  11577. {
  11578. ggml_compute_forward_add(params, tensor->src[0], tensor->src[1], tensor);
  11579. } break;
  11580. case GGML_OP_ADD1:
  11581. {
  11582. ggml_compute_forward_add1(params, tensor->src[0], tensor->src[1], tensor);
  11583. } break;
  11584. case GGML_OP_ACC:
  11585. {
  11586. ggml_compute_forward_acc(params, tensor->src[0], tensor->src[1], tensor);
  11587. } break;
  11588. case GGML_OP_SUB:
  11589. {
  11590. ggml_compute_forward_sub(params, tensor->src[0], tensor->src[1], tensor);
  11591. } break;
  11592. case GGML_OP_MUL:
  11593. {
  11594. ggml_compute_forward_mul(params, tensor->src[0], tensor->src[1], tensor);
  11595. } break;
  11596. case GGML_OP_DIV:
  11597. {
  11598. ggml_compute_forward_div(params, tensor->src[0], tensor->src[1], tensor);
  11599. } break;
  11600. case GGML_OP_SQR:
  11601. {
  11602. ggml_compute_forward_sqr(params, tensor->src[0], tensor);
  11603. } break;
  11604. case GGML_OP_SQRT:
  11605. {
  11606. ggml_compute_forward_sqrt(params, tensor->src[0], tensor);
  11607. } break;
  11608. case GGML_OP_LOG:
  11609. {
  11610. ggml_compute_forward_log(params, tensor->src[0], tensor);
  11611. } break;
  11612. case GGML_OP_SUM:
  11613. {
  11614. ggml_compute_forward_sum(params, tensor->src[0], tensor);
  11615. } break;
  11616. case GGML_OP_SUM_ROWS:
  11617. {
  11618. ggml_compute_forward_sum_rows(params, tensor->src[0], tensor);
  11619. } break;
  11620. case GGML_OP_MEAN:
  11621. {
  11622. ggml_compute_forward_mean(params, tensor->src[0], tensor);
  11623. } break;
  11624. case GGML_OP_ARGMAX:
  11625. {
  11626. ggml_compute_forward_argmax(params, tensor->src[0], tensor);
  11627. } break;
  11628. case GGML_OP_REPEAT:
  11629. {
  11630. ggml_compute_forward_repeat(params, tensor->src[0], tensor);
  11631. } break;
  11632. case GGML_OP_REPEAT_BACK:
  11633. {
  11634. ggml_compute_forward_repeat_back(params, tensor->src[0], tensor);
  11635. } break;
  11636. case GGML_OP_CONCAT:
  11637. {
  11638. ggml_compute_forward_concat(params, tensor->src[0], tensor->src[1], tensor);
  11639. } break;
  11640. case GGML_OP_SILU_BACK:
  11641. {
  11642. ggml_compute_forward_silu_back(params, tensor->src[0], tensor->src[1], tensor);
  11643. } break;
  11644. case GGML_OP_NORM:
  11645. {
  11646. ggml_compute_forward_norm(params, tensor->src[0], tensor);
  11647. } break;
  11648. case GGML_OP_RMS_NORM:
  11649. {
  11650. ggml_compute_forward_rms_norm(params, tensor->src[0], tensor);
  11651. } break;
  11652. case GGML_OP_RMS_NORM_BACK:
  11653. {
  11654. ggml_compute_forward_rms_norm_back(params, tensor->src[0], tensor->src[1], tensor);
  11655. } break;
  11656. case GGML_OP_GROUP_NORM:
  11657. {
  11658. ggml_compute_forward_group_norm(params, tensor->src[0], tensor);
  11659. } break;
  11660. case GGML_OP_MUL_MAT:
  11661. {
  11662. ggml_compute_forward_mul_mat(params, tensor->src[0], tensor->src[1], tensor, 0, tensor->ne[1]);
  11663. } break;
  11664. case GGML_OP_MUL_MAT_ID:
  11665. {
  11666. ggml_compute_forward_mul_mat_id(params, tensor->src[0], tensor->src[1], tensor);
  11667. } break;
  11668. case GGML_OP_OUT_PROD:
  11669. {
  11670. ggml_compute_forward_out_prod(params, tensor->src[0], tensor->src[1], tensor);
  11671. } break;
  11672. case GGML_OP_SCALE:
  11673. {
  11674. ggml_compute_forward_scale(params, tensor->src[0], tensor->src[1], tensor);
  11675. } break;
  11676. case GGML_OP_SET:
  11677. {
  11678. ggml_compute_forward_set(params, tensor->src[0], tensor->src[1], tensor);
  11679. } break;
  11680. case GGML_OP_CPY:
  11681. {
  11682. ggml_compute_forward_cpy(params, tensor->src[0], tensor);
  11683. } break;
  11684. case GGML_OP_CONT:
  11685. {
  11686. ggml_compute_forward_cont(params, tensor->src[0], tensor);
  11687. } break;
  11688. case GGML_OP_RESHAPE:
  11689. {
  11690. ggml_compute_forward_reshape(params, tensor->src[0], tensor);
  11691. } break;
  11692. case GGML_OP_VIEW:
  11693. {
  11694. ggml_compute_forward_view(params, tensor->src[0]);
  11695. } break;
  11696. case GGML_OP_PERMUTE:
  11697. {
  11698. ggml_compute_forward_permute(params, tensor->src[0]);
  11699. } break;
  11700. case GGML_OP_TRANSPOSE:
  11701. {
  11702. ggml_compute_forward_transpose(params, tensor->src[0]);
  11703. } break;
  11704. case GGML_OP_GET_ROWS:
  11705. {
  11706. ggml_compute_forward_get_rows(params, tensor->src[0], tensor->src[1], tensor);
  11707. } break;
  11708. case GGML_OP_GET_ROWS_BACK:
  11709. {
  11710. ggml_compute_forward_get_rows_back(params, tensor->src[0], tensor->src[1], tensor);
  11711. } break;
  11712. case GGML_OP_DIAG:
  11713. {
  11714. ggml_compute_forward_diag(params, tensor->src[0], tensor);
  11715. } break;
  11716. case GGML_OP_DIAG_MASK_INF:
  11717. {
  11718. ggml_compute_forward_diag_mask_inf(params, tensor->src[0], tensor);
  11719. } break;
  11720. case GGML_OP_DIAG_MASK_ZERO:
  11721. {
  11722. ggml_compute_forward_diag_mask_zero(params, tensor->src[0], tensor);
  11723. } break;
  11724. case GGML_OP_SOFT_MAX:
  11725. {
  11726. ggml_compute_forward_soft_max(params, tensor->src[0], tensor->src[1], tensor);
  11727. } break;
  11728. case GGML_OP_SOFT_MAX_BACK:
  11729. {
  11730. ggml_compute_forward_soft_max_back(params, tensor->src[0], tensor->src[1], tensor);
  11731. } break;
  11732. case GGML_OP_ROPE:
  11733. {
  11734. ggml_compute_forward_rope(params, tensor->src[0], tensor->src[1], tensor);
  11735. } break;
  11736. case GGML_OP_ROPE_BACK:
  11737. {
  11738. ggml_compute_forward_rope_back(params, tensor->src[0], tensor->src[1], tensor);
  11739. } break;
  11740. case GGML_OP_ALIBI:
  11741. {
  11742. ggml_compute_forward_alibi(params, tensor->src[0], tensor);
  11743. } break;
  11744. case GGML_OP_CLAMP:
  11745. {
  11746. ggml_compute_forward_clamp(params, tensor->src[0], tensor);
  11747. } break;
  11748. case GGML_OP_CONV_TRANSPOSE_1D:
  11749. {
  11750. ggml_compute_forward_conv_transpose_1d(params, tensor->src[0], tensor->src[1], tensor);
  11751. } break;
  11752. case GGML_OP_IM2COL:
  11753. {
  11754. ggml_compute_forward_im2col(params, tensor->src[0], tensor->src[1], tensor);
  11755. } break;
  11756. case GGML_OP_CONV_TRANSPOSE_2D:
  11757. {
  11758. ggml_compute_forward_conv_transpose_2d(params, tensor->src[0], tensor->src[1], tensor);
  11759. } break;
  11760. case GGML_OP_POOL_1D:
  11761. {
  11762. ggml_compute_forward_pool_1d(params, tensor->src[0], tensor);
  11763. } break;
  11764. case GGML_OP_POOL_2D:
  11765. {
  11766. ggml_compute_forward_pool_2d(params, tensor->src[0], tensor);
  11767. } break;
  11768. case GGML_OP_UPSCALE:
  11769. {
  11770. ggml_compute_forward_upscale(params, tensor->src[0], tensor);
  11771. } break;
  11772. case GGML_OP_PAD:
  11773. {
  11774. ggml_compute_forward_pad(params, tensor->src[0], tensor);
  11775. } break;
  11776. case GGML_OP_ARGSORT:
  11777. {
  11778. ggml_compute_forward_argsort(params, tensor->src[0], tensor);
  11779. } break;
  11780. case GGML_OP_LEAKY_RELU:
  11781. {
  11782. ggml_compute_forward_leaky_relu(params, tensor->src[0], tensor);
  11783. } break;
  11784. case GGML_OP_FLASH_ATTN:
  11785. {
  11786. const int32_t t = ggml_get_op_params_i32(tensor, 0);
  11787. GGML_ASSERT(t == 0 || t == 1);
  11788. const bool masked = t != 0;
  11789. ggml_compute_forward_flash_attn(params, tensor->src[0], tensor->src[1], tensor->src[2], masked, tensor);
  11790. } break;
  11791. case GGML_OP_FLASH_FF:
  11792. {
  11793. ggml_compute_forward_flash_ff(params, tensor->src[0], tensor->src[1], tensor->src[2], tensor->src[3], tensor->src[4], tensor);
  11794. } break;
  11795. case GGML_OP_FLASH_ATTN_BACK:
  11796. {
  11797. int32_t t = ggml_get_op_params_i32(tensor, 0);
  11798. GGML_ASSERT(t == 0 || t == 1);
  11799. bool masked = t != 0;
  11800. ggml_compute_forward_flash_attn_back(params, tensor->src[0], tensor->src[1], tensor->src[2], tensor->src[3], masked, tensor);
  11801. } break;
  11802. case GGML_OP_WIN_PART:
  11803. {
  11804. ggml_compute_forward_win_part(params, tensor->src[0], tensor);
  11805. } break;
  11806. case GGML_OP_WIN_UNPART:
  11807. {
  11808. ggml_compute_forward_win_unpart(params, tensor->src[0], tensor);
  11809. } break;
  11810. case GGML_OP_UNARY:
  11811. {
  11812. ggml_compute_forward_unary(params, tensor->src[0], tensor);
  11813. } break;
  11814. case GGML_OP_GET_REL_POS:
  11815. {
  11816. ggml_compute_forward_get_rel_pos(params, tensor->src[0], tensor);
  11817. } break;
  11818. case GGML_OP_ADD_REL_POS:
  11819. {
  11820. ggml_compute_forward_add_rel_pos(params, tensor->src[0], tensor->src[1], tensor->src[2], tensor);
  11821. } break;
  11822. case GGML_OP_MAP_UNARY:
  11823. {
  11824. ggml_unary_op_f32_t fun;
  11825. memcpy(&fun, tensor->op_params, sizeof(fun));
  11826. ggml_compute_forward_map_unary(params, tensor->src[0], tensor, fun);
  11827. }
  11828. break;
  11829. case GGML_OP_MAP_BINARY:
  11830. {
  11831. ggml_binary_op_f32_t fun;
  11832. memcpy(&fun, tensor->op_params, sizeof(fun));
  11833. ggml_compute_forward_map_binary(params, tensor->src[0], tensor->src[1], tensor, fun);
  11834. }
  11835. break;
  11836. case GGML_OP_MAP_CUSTOM1_F32:
  11837. {
  11838. ggml_custom1_op_f32_t fun;
  11839. memcpy(&fun, tensor->op_params, sizeof(fun));
  11840. ggml_compute_forward_map_custom1_f32(params, tensor->src[0], tensor, fun);
  11841. }
  11842. break;
  11843. case GGML_OP_MAP_CUSTOM2_F32:
  11844. {
  11845. ggml_custom2_op_f32_t fun;
  11846. memcpy(&fun, tensor->op_params, sizeof(fun));
  11847. ggml_compute_forward_map_custom2_f32(params, tensor->src[0], tensor->src[1], tensor, fun);
  11848. }
  11849. break;
  11850. case GGML_OP_MAP_CUSTOM3_F32:
  11851. {
  11852. ggml_custom3_op_f32_t fun;
  11853. memcpy(&fun, tensor->op_params, sizeof(fun));
  11854. ggml_compute_forward_map_custom3_f32(params, tensor->src[0], tensor->src[1], tensor->src[2], tensor, fun);
  11855. }
  11856. break;
  11857. case GGML_OP_MAP_CUSTOM1:
  11858. {
  11859. ggml_compute_forward_map_custom1(params, tensor->src[0], tensor);
  11860. }
  11861. break;
  11862. case GGML_OP_MAP_CUSTOM2:
  11863. {
  11864. ggml_compute_forward_map_custom2(params, tensor->src[0], tensor->src[1], tensor);
  11865. }
  11866. break;
  11867. case GGML_OP_MAP_CUSTOM3:
  11868. {
  11869. ggml_compute_forward_map_custom3(params, tensor->src[0], tensor->src[1], tensor->src[2], tensor);
  11870. }
  11871. break;
  11872. case GGML_OP_CROSS_ENTROPY_LOSS:
  11873. {
  11874. ggml_compute_forward_cross_entropy_loss(params, tensor->src[0], tensor->src[1], tensor);
  11875. }
  11876. break;
  11877. case GGML_OP_CROSS_ENTROPY_LOSS_BACK:
  11878. {
  11879. ggml_compute_forward_cross_entropy_loss_back(params, tensor->src[0], tensor->src[1], tensor->src[2], tensor);
  11880. }
  11881. break;
  11882. case GGML_OP_NONE:
  11883. {
  11884. // nop
  11885. } break;
  11886. case GGML_OP_COUNT:
  11887. {
  11888. GGML_ASSERT(false);
  11889. } break;
  11890. }
  11891. }
  11892. ////////////////////////////////////////////////////////////////////////////////
  11893. static size_t ggml_hash_size(size_t min_sz) {
  11894. // next primes after powers of two
  11895. static const size_t primes[] = {
  11896. 2, 3, 5, 11, 17, 37, 67, 131, 257, 521, 1031,
  11897. 2053, 4099, 8209, 16411, 32771, 65537, 131101,
  11898. 262147, 524309, 1048583, 2097169, 4194319, 8388617,
  11899. 16777259, 33554467, 67108879, 134217757, 268435459,
  11900. 536870923, 1073741827, 2147483659
  11901. };
  11902. static const size_t n_primes = sizeof(primes)/sizeof(primes[0]);
  11903. // find the smallest prime that is larger or equal to min_sz
  11904. size_t l = 0;
  11905. size_t r = n_primes;
  11906. while (l < r) {
  11907. size_t m = (l + r)/2;
  11908. if (primes[m] < min_sz) {
  11909. l = m + 1;
  11910. } else {
  11911. r = m;
  11912. }
  11913. }
  11914. size_t sz = l < n_primes ? primes[l] : min_sz | 1;
  11915. return sz;
  11916. }
  11917. static size_t ggml_hash(const void * p) {
  11918. return (size_t)p;
  11919. }
  11920. size_t ggml_hash_find(const struct ggml_hash_set hash_set, struct ggml_tensor * key) {
  11921. size_t h = ggml_hash(key) % hash_set.size;
  11922. // linear probing
  11923. size_t i = h;
  11924. while (hash_set.keys[i] != NULL && hash_set.keys[i] != key) {
  11925. i = (i + 1) % hash_set.size;
  11926. if (i == h) {
  11927. // visited all hash table entries -> not found
  11928. return GGML_HASHTABLE_FULL;
  11929. }
  11930. }
  11931. return i;
  11932. }
  11933. bool ggml_hash_contains(struct ggml_hash_set hash_set, struct ggml_tensor * key) {
  11934. size_t i = ggml_hash_find(hash_set, key);
  11935. return i != GGML_HASHTABLE_FULL && hash_set.keys[i] == key;
  11936. }
  11937. size_t ggml_hash_insert(struct ggml_hash_set hash_set, struct ggml_tensor * key) {
  11938. size_t i = ggml_hash_find(hash_set, key);
  11939. GGML_ASSERT(i != GGML_HASHTABLE_FULL);
  11940. if (hash_set.keys[i] == key) {
  11941. return GGML_HASHTABLE_ALREADY_EXISTS;
  11942. }
  11943. // insert
  11944. GGML_ASSERT(hash_set.keys[i] == NULL);
  11945. hash_set.keys[i] = key;
  11946. return i;
  11947. }
  11948. size_t ggml_hash_find_or_insert(struct ggml_hash_set hash_set, struct ggml_tensor * key) {
  11949. size_t i = ggml_hash_find(hash_set, key);
  11950. GGML_ASSERT(i != GGML_HASHTABLE_FULL);
  11951. hash_set.keys[i] = key;
  11952. return i;
  11953. }
  11954. static struct ggml_hash_set ggml_hash_set_new(size_t size) {
  11955. size = ggml_hash_size(size);
  11956. struct ggml_hash_set result;
  11957. result.size = size;
  11958. result.keys = malloc(sizeof(struct ggml_tensor *) * size);
  11959. memset(result.keys, 0, sizeof(struct ggml_tensor *) * size);
  11960. return result;
  11961. }
  11962. static void ggml_hash_set_free(struct ggml_hash_set hash_set) {
  11963. free(hash_set.keys);
  11964. }
  11965. struct hash_map {
  11966. struct ggml_hash_set set;
  11967. struct ggml_tensor ** vals;
  11968. };
  11969. static struct hash_map * ggml_new_hash_map(size_t size) {
  11970. struct hash_map * result = malloc(sizeof(struct hash_map));
  11971. result->set = ggml_hash_set_new(size);
  11972. result->vals = malloc(sizeof(struct ggml_tensor *) * result->set.size);
  11973. memset(result->vals, 0, sizeof(struct ggml_tensor *) * result->set.size);
  11974. return result;
  11975. }
  11976. static void ggml_hash_map_free(struct hash_map * map) {
  11977. ggml_hash_set_free(map->set);
  11978. free(map->vals);
  11979. free(map);
  11980. }
  11981. // gradient checkpointing
  11982. static struct ggml_tensor * ggml_recompute_graph_node(
  11983. struct ggml_context * ctx,
  11984. struct ggml_cgraph * graph,
  11985. struct hash_map * replacements,
  11986. struct ggml_tensor * node) {
  11987. if (node == NULL) {
  11988. return NULL;
  11989. }
  11990. if (node->is_param) {
  11991. return node;
  11992. }
  11993. if (!ggml_hash_contains(graph->visited_hash_table, node)) {
  11994. return node;
  11995. }
  11996. int count_children = 0;
  11997. for (int k = 0; k < GGML_MAX_SRC; ++k) {
  11998. if (node->src[k]) {
  11999. ++count_children;
  12000. }
  12001. }
  12002. if (count_children == 0) {
  12003. return node;
  12004. }
  12005. size_t i = ggml_hash_find(replacements->set, node);
  12006. GGML_ASSERT(i != GGML_HASHTABLE_FULL); // assert that not full
  12007. if (replacements->set.keys[i] == node) {
  12008. return replacements->vals[i];
  12009. }
  12010. struct ggml_tensor * clone = ggml_new_tensor(ctx, node->type, GGML_MAX_DIMS, node->ne);
  12011. // insert clone into replacements
  12012. GGML_ASSERT(replacements->set.keys[i] == NULL); // assert that we don't overwrite
  12013. replacements->set.keys[i] = node;
  12014. replacements->vals[i] = clone;
  12015. clone->op = node->op;
  12016. clone->grad = node->grad;
  12017. clone->is_param = node->is_param;
  12018. clone->extra = node->extra;
  12019. for (int k = 0; k < GGML_MAX_DIMS; ++k) {
  12020. clone->nb[k] = node->nb[k];
  12021. }
  12022. for (int k = 0; k < GGML_MAX_SRC; ++k) {
  12023. clone->src[k] = ggml_recompute_graph_node(ctx, graph, replacements, node->src[k]);
  12024. }
  12025. if (node->view_src != NULL) {
  12026. clone->data = (node->view_src->data == NULL)
  12027. ? NULL // view_src not yet allocated
  12028. : (char *) node->view_src->data // view_src already allocated
  12029. + node->view_offs;
  12030. clone->view_src = node->view_src;
  12031. clone->view_offs = node->view_offs;
  12032. }
  12033. GGML_ASSERT(sizeof(node->op_params) == sizeof(int32_t) * (GGML_MAX_OP_PARAMS / sizeof(int32_t)));
  12034. GGML_ASSERT(sizeof(node->name) == GGML_MAX_NAME);
  12035. memcpy(clone->op_params, node->op_params, sizeof(node->op_params));
  12036. ggml_format_name(clone, "%s (clone)", ggml_get_name(node));
  12037. return clone;
  12038. }
  12039. void ggml_build_backward_gradient_checkpointing(
  12040. struct ggml_context * ctx,
  12041. struct ggml_cgraph * gf,
  12042. struct ggml_cgraph * gb,
  12043. struct ggml_cgraph * gb_tmp,
  12044. struct ggml_tensor * * checkpoints,
  12045. int n_checkpoints) {
  12046. ggml_graph_cpy(gf, gb_tmp);
  12047. ggml_build_backward_expand(ctx, gf, gb_tmp, true);
  12048. if (n_checkpoints <= 0) {
  12049. ggml_graph_cpy(gb_tmp, gb);
  12050. return;
  12051. }
  12052. struct hash_map * replacements = ggml_new_hash_map(gf->n_nodes + gf->n_leafs + n_checkpoints);
  12053. // insert checkpoints in replacements
  12054. for (int i = 0; i < n_checkpoints; ++i) {
  12055. size_t k = ggml_hash_find(replacements->set, checkpoints[i]);
  12056. GGML_ASSERT(k != GGML_HASHTABLE_FULL); // assert that not full
  12057. GGML_ASSERT(replacements->set.keys[k] == NULL); // assert that we don't overwrite
  12058. replacements->set.keys[k] = checkpoints[i];
  12059. replacements->vals[k] = checkpoints[i];
  12060. }
  12061. ggml_graph_cpy(gf, gb);
  12062. // rewrite gb_tmp->nodes[gf->n_nodes:gb_tmp->n_nodes],
  12063. // replacing references to gb_tmp->nodes[0:gf->n_nodes] ( == gf->nodes[0:gf->n_nodes]),
  12064. // by recomputing them from checkpoints
  12065. for (int i = gf->n_nodes; i<gb_tmp->n_nodes; ++i) {
  12066. struct ggml_tensor * node = gb_tmp->nodes[i];
  12067. for (int k = 0; k < GGML_MAX_SRC; ++k) {
  12068. // insert new tensors recomputing src, reusing already made replacements,
  12069. // remember replacements: remember new tensors with mapping from corresponding gf nodes
  12070. // recurse for input tensors,
  12071. // unless (i.e. terminating when) input tensors are replacements (like checkpoints)
  12072. node->src[k] = ggml_recompute_graph_node(ctx, gf, replacements, node->src[k]);
  12073. }
  12074. // insert rewritten backward node with replacements made into resulting backward graph gb
  12075. ggml_build_forward_expand(gb, node);
  12076. }
  12077. ggml_hash_map_free(replacements);
  12078. }
  12079. // functions to change gradients considering the case that input a might be initial gradient with zero value
  12080. static struct ggml_tensor * ggml_add_or_set(struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b, struct ggml_hash_set zero_table) {
  12081. if (ggml_hash_contains(zero_table, a)) {
  12082. return b;
  12083. } else {
  12084. return ggml_add_impl(ctx, a, b, false);
  12085. }
  12086. }
  12087. static struct ggml_tensor * ggml_acc_or_set(struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b, size_t nb1, size_t nb2, size_t nb3, size_t offset, struct ggml_hash_set zero_table) {
  12088. if (ggml_hash_contains(zero_table, a)) {
  12089. struct ggml_tensor * a_zero = ggml_scale(ctx, a, ggml_new_f32(ctx, 0));
  12090. return ggml_acc_impl(ctx, a_zero, b, nb1, nb2, nb3, offset, false);
  12091. } else {
  12092. return ggml_acc_impl(ctx, a, b, nb1, nb2, nb3, offset, false);
  12093. }
  12094. }
  12095. static struct ggml_tensor * ggml_add1_or_set(struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b, struct ggml_hash_set zero_table) {
  12096. if (ggml_hash_contains(zero_table, a)) {
  12097. return ggml_repeat(ctx, b, a);
  12098. } else {
  12099. return ggml_add1_impl(ctx, a, b, false);
  12100. }
  12101. }
  12102. static struct ggml_tensor * ggml_sub_or_set(struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b, struct ggml_hash_set zero_table) {
  12103. if (ggml_hash_contains(zero_table, a)) {
  12104. return ggml_neg(ctx, b);
  12105. } else {
  12106. return ggml_sub_impl(ctx, a, b, false);
  12107. }
  12108. }
  12109. static void ggml_compute_backward(struct ggml_context * ctx, struct ggml_tensor * tensor, struct ggml_hash_set zero_table) {
  12110. struct ggml_tensor * src0 = tensor->src[0];
  12111. struct ggml_tensor * src1 = tensor->src[1];
  12112. switch (tensor->op) {
  12113. case GGML_OP_DUP:
  12114. {
  12115. if (src0->grad) {
  12116. src0->grad = ggml_add_or_set(ctx, src0->grad, tensor->grad, zero_table);
  12117. }
  12118. } break;
  12119. case GGML_OP_ADD:
  12120. {
  12121. if (src0->grad) {
  12122. src0->grad = ggml_add_or_set(ctx, src0->grad, tensor->grad, zero_table);
  12123. }
  12124. if (src1->grad) {
  12125. src1->grad = ggml_add_or_set(ctx, src1->grad, tensor->grad, zero_table);
  12126. }
  12127. } break;
  12128. case GGML_OP_ADD1:
  12129. {
  12130. if (src0->grad) {
  12131. src0->grad = ggml_add_or_set(ctx, src0->grad, tensor->grad, zero_table);
  12132. }
  12133. if (src1->grad) {
  12134. src1->grad = ggml_add_or_set(ctx,
  12135. src1->grad,
  12136. ggml_mean(ctx, tensor->grad), // TODO: should probably be sum instead of mean
  12137. zero_table);
  12138. }
  12139. } break;
  12140. case GGML_OP_ACC:
  12141. {
  12142. if (src0->grad) {
  12143. src0->grad = ggml_add_or_set(ctx, src0->grad, tensor->grad, zero_table);
  12144. }
  12145. if (src1->grad) {
  12146. const size_t nb1 = ((int32_t *) tensor->op_params)[0];
  12147. const size_t nb2 = ((int32_t *) tensor->op_params)[1];
  12148. const size_t nb3 = ((int32_t *) tensor->op_params)[2];
  12149. const size_t offset = ((int32_t *) tensor->op_params)[3];
  12150. struct ggml_tensor * tensor_grad_view = ggml_view_4d(ctx,
  12151. tensor->grad,
  12152. src1->grad->ne[0],
  12153. src1->grad->ne[1],
  12154. src1->grad->ne[2],
  12155. src1->grad->ne[3],
  12156. nb1, nb2, nb3, offset);
  12157. src1->grad =
  12158. ggml_add_or_set(ctx,
  12159. src1->grad,
  12160. ggml_reshape(ctx,
  12161. ggml_cont(ctx, tensor_grad_view),
  12162. src1->grad),
  12163. zero_table);
  12164. }
  12165. } break;
  12166. case GGML_OP_SUB:
  12167. {
  12168. if (src0->grad) {
  12169. src0->grad = ggml_add_or_set(ctx, src0->grad, tensor->grad, zero_table);
  12170. }
  12171. if (src1->grad) {
  12172. src1->grad = ggml_sub_or_set(ctx, src1->grad, tensor->grad, zero_table);
  12173. }
  12174. } break;
  12175. case GGML_OP_MUL:
  12176. {
  12177. if (src0->grad) {
  12178. src0->grad =
  12179. ggml_add_or_set(ctx,
  12180. src0->grad,
  12181. ggml_mul(ctx, src1, tensor->grad),
  12182. zero_table);
  12183. }
  12184. if (src1->grad) {
  12185. src1->grad =
  12186. ggml_add_or_set(ctx,
  12187. src1->grad,
  12188. ggml_mul(ctx, src0, tensor->grad),
  12189. zero_table);
  12190. }
  12191. } break;
  12192. case GGML_OP_DIV:
  12193. {
  12194. if (src0->grad) {
  12195. src0->grad =
  12196. ggml_add_or_set(ctx,
  12197. src0->grad,
  12198. ggml_div(ctx, tensor->grad, src1),
  12199. zero_table);
  12200. }
  12201. if (src1->grad) {
  12202. src1->grad =
  12203. ggml_sub_or_set(ctx,
  12204. src1->grad,
  12205. ggml_mul(ctx,
  12206. tensor->grad,
  12207. ggml_div(ctx, tensor, src1)),
  12208. zero_table);
  12209. }
  12210. } break;
  12211. case GGML_OP_SQR:
  12212. {
  12213. if (src0->grad) {
  12214. src0->grad =
  12215. ggml_add_or_set(ctx,
  12216. src0->grad,
  12217. ggml_scale(ctx,
  12218. ggml_mul(ctx, src0, tensor->grad),
  12219. ggml_new_f32(ctx, 2.0f)),
  12220. zero_table);
  12221. }
  12222. } break;
  12223. case GGML_OP_SQRT:
  12224. {
  12225. if (src0->grad) {
  12226. src0->grad =
  12227. ggml_add_or_set(ctx,
  12228. src0->grad,
  12229. ggml_scale(ctx,
  12230. ggml_div(ctx,
  12231. tensor->grad,
  12232. tensor),
  12233. ggml_new_f32(ctx, 0.5f)),
  12234. zero_table);
  12235. }
  12236. } break;
  12237. case GGML_OP_LOG:
  12238. {
  12239. if (src0->grad) {
  12240. src0->grad =
  12241. ggml_add_or_set(ctx,
  12242. src0->grad,
  12243. ggml_div(ctx,
  12244. tensor->grad,
  12245. src0),
  12246. zero_table);
  12247. }
  12248. } break;
  12249. case GGML_OP_SUM:
  12250. {
  12251. if (src0->grad) {
  12252. src0->grad =
  12253. ggml_add1_or_set(ctx,
  12254. src0->grad,
  12255. tensor->grad,
  12256. zero_table);
  12257. }
  12258. } break;
  12259. case GGML_OP_SUM_ROWS:
  12260. {
  12261. if (src0->grad) {
  12262. src0->grad =
  12263. ggml_add_or_set(ctx,
  12264. src0->grad,
  12265. ggml_repeat(ctx,
  12266. tensor->grad,
  12267. src0->grad),
  12268. zero_table);
  12269. }
  12270. } break;
  12271. case GGML_OP_MEAN:
  12272. case GGML_OP_ARGMAX:
  12273. {
  12274. GGML_ASSERT(false); // TODO: implement
  12275. } break;
  12276. case GGML_OP_REPEAT:
  12277. {
  12278. // necessary for llama
  12279. if (src0->grad) {
  12280. src0->grad = ggml_add_or_set(ctx,
  12281. src0->grad,
  12282. ggml_repeat_back(ctx, tensor->grad, src0->grad),
  12283. zero_table);
  12284. }
  12285. } break;
  12286. case GGML_OP_REPEAT_BACK:
  12287. {
  12288. if (src0->grad) {
  12289. // TODO: test this
  12290. src0->grad = ggml_add_or_set(ctx,
  12291. src0->grad,
  12292. ggml_repeat(ctx, tensor->grad, src0->grad),
  12293. zero_table);
  12294. }
  12295. } break;
  12296. case GGML_OP_CONCAT:
  12297. {
  12298. GGML_ASSERT(false); // TODO: implement
  12299. } break;
  12300. case GGML_OP_SILU_BACK:
  12301. {
  12302. GGML_ASSERT(false); // TODO: not implemented
  12303. } break;
  12304. case GGML_OP_NORM:
  12305. {
  12306. GGML_ASSERT(false); // TODO: not implemented
  12307. } break;
  12308. case GGML_OP_RMS_NORM:
  12309. {
  12310. // necessary for llama
  12311. if (src0->grad) {
  12312. float eps;
  12313. memcpy(&eps, tensor->op_params, sizeof(float));
  12314. src0->grad = ggml_add_or_set(ctx,
  12315. src0->grad,
  12316. ggml_rms_norm_back(ctx, src0, tensor->grad, eps),
  12317. zero_table);
  12318. }
  12319. } break;
  12320. case GGML_OP_RMS_NORM_BACK:
  12321. {
  12322. GGML_ASSERT(false); // TODO: not implemented
  12323. } break;
  12324. case GGML_OP_GROUP_NORM:
  12325. {
  12326. GGML_ASSERT(false); // TODO: not implemented
  12327. } break;
  12328. case GGML_OP_MUL_MAT:
  12329. {
  12330. // https://cs231n.github.io/optimization-2/#staged
  12331. // # forward pass
  12332. // s0 = np.random.randn(5, 10)
  12333. // s1 = np.random.randn(10, 3)
  12334. // t = s0.dot(s1)
  12335. // # now suppose we had the gradient on t from above in the circuit
  12336. // dt = np.random.randn(*t.shape) # same shape as t
  12337. // ds0 = dt.dot(s1.T) #.T gives the transpose of the matrix
  12338. // ds1 = t.T.dot(dt)
  12339. // tensor.shape [m,p,qq,rr]
  12340. // src0.shape [n,m,q1,r1]
  12341. // src1.shape [n,p,qq,rr]
  12342. // necessary for llama
  12343. if (src0->grad) {
  12344. struct ggml_tensor * s1_tg =
  12345. ggml_out_prod(ctx, // [n,m,qq,rr]
  12346. src1, // [n,p,qq,rr]
  12347. tensor->grad); // [m,p,qq,rr]
  12348. const int64_t qq = s1_tg->ne[2];
  12349. const int64_t rr = s1_tg->ne[3];
  12350. const int64_t q1 = src0->ne[2];
  12351. const int64_t r1 = src0->ne[3];
  12352. const bool ne2_broadcasted = qq > q1;
  12353. const bool ne3_broadcasted = rr > r1;
  12354. if (ne2_broadcasted || ne3_broadcasted) {
  12355. // sum broadcast repetitions of s1_tg into shape of src0
  12356. s1_tg = ggml_repeat_back(ctx, s1_tg, src0);
  12357. }
  12358. src0->grad =
  12359. ggml_add_or_set(ctx,
  12360. src0->grad, // [n,m,q1,r1]
  12361. s1_tg, // [n,m,q1,r1]
  12362. zero_table);
  12363. }
  12364. if (src1->grad) {
  12365. src1->grad =
  12366. ggml_add_or_set(ctx,
  12367. src1->grad, // [n,p,qq,rr]
  12368. // ggml_mul_mat(ctx, // [n,p,qq,rr]
  12369. // ggml_cont(ctx, // [m,n,q1,r1]
  12370. // ggml_transpose(ctx, src0)), // [m,n,q1,r1]
  12371. // tensor->grad), // [m,p,qq,rr]
  12372. // // when src0 is bigger than tensor->grad (this is mostly the case in llama),
  12373. // // avoid transpose of src0, rather transpose smaller tensor->grad
  12374. // // and then use ggml_out_prod
  12375. ggml_out_prod(ctx, // [n,p,qq,rr]
  12376. src0, // [n,m,q1,r1]
  12377. ggml_transpose(ctx, // [p,m,qq,rr]
  12378. tensor->grad)), // [m,p,qq,rr]
  12379. zero_table);
  12380. }
  12381. } break;
  12382. case GGML_OP_MUL_MAT_ID:
  12383. {
  12384. GGML_ASSERT(false); // TODO: not implemented
  12385. } break;
  12386. case GGML_OP_OUT_PROD:
  12387. {
  12388. GGML_ASSERT(false); // TODO: not implemented
  12389. } break;
  12390. case GGML_OP_SCALE:
  12391. {
  12392. // necessary for llama
  12393. if (src0->grad) {
  12394. src0->grad =
  12395. ggml_add_or_set(ctx,
  12396. src0->grad,
  12397. ggml_scale_impl(ctx, tensor->grad, src1, false),
  12398. zero_table);
  12399. }
  12400. if (src1->grad) {
  12401. src1->grad =
  12402. ggml_add_or_set(ctx,
  12403. src1->grad,
  12404. ggml_sum(ctx, ggml_mul_impl(ctx, tensor->grad, src0, false)),
  12405. zero_table);
  12406. }
  12407. } break;
  12408. case GGML_OP_SET:
  12409. {
  12410. const size_t nb1 = ((int32_t *) tensor->op_params)[0];
  12411. const size_t nb2 = ((int32_t *) tensor->op_params)[1];
  12412. const size_t nb3 = ((int32_t *) tensor->op_params)[2];
  12413. const size_t offset = ((int32_t *) tensor->op_params)[3];
  12414. struct ggml_tensor * tensor_grad_view = NULL;
  12415. if (src0->grad || src1->grad) {
  12416. GGML_ASSERT(src0->type == tensor->type);
  12417. GGML_ASSERT(tensor->grad->type == tensor->type);
  12418. GGML_ASSERT(tensor->grad->type == src1->grad->type);
  12419. tensor_grad_view = ggml_view_4d(ctx,
  12420. tensor->grad,
  12421. src1->grad->ne[0],
  12422. src1->grad->ne[1],
  12423. src1->grad->ne[2],
  12424. src1->grad->ne[3],
  12425. nb1, nb2, nb3, offset);
  12426. }
  12427. if (src0->grad) {
  12428. src0->grad = ggml_add_or_set(ctx,
  12429. src0->grad,
  12430. ggml_acc_impl(ctx,
  12431. tensor->grad,
  12432. ggml_neg(ctx, tensor_grad_view),
  12433. nb1, nb2, nb3, offset, false),
  12434. zero_table);
  12435. }
  12436. if (src1->grad) {
  12437. src1->grad =
  12438. ggml_add_or_set(ctx,
  12439. src1->grad,
  12440. ggml_reshape(ctx,
  12441. ggml_cont(ctx, tensor_grad_view),
  12442. src1->grad),
  12443. zero_table);
  12444. }
  12445. } break;
  12446. case GGML_OP_CPY:
  12447. {
  12448. // necessary for llama
  12449. // cpy overwrites value of src1 by src0 and returns view(src1)
  12450. // the overwriting is mathematically equivalent to:
  12451. // tensor = src0 * 1 + src1 * 0
  12452. if (src0->grad) {
  12453. // dsrc0 = dtensor * 1
  12454. src0->grad = ggml_add_or_set(ctx, src0->grad, tensor->grad, zero_table);
  12455. }
  12456. if (src1->grad) {
  12457. // dsrc1 = dtensor * 0 -> noop
  12458. }
  12459. } break;
  12460. case GGML_OP_CONT:
  12461. {
  12462. // same as cpy
  12463. if (src0->grad) {
  12464. GGML_ASSERT(ggml_is_contiguous(src0->grad));
  12465. GGML_ASSERT(ggml_is_contiguous(tensor->grad));
  12466. src0->grad = ggml_add_or_set(ctx, src0->grad, tensor->grad, zero_table);
  12467. }
  12468. } break;
  12469. case GGML_OP_RESHAPE:
  12470. {
  12471. // necessary for llama
  12472. if (src0->grad) {
  12473. src0->grad =
  12474. ggml_add_or_set(ctx, src0->grad,
  12475. ggml_reshape(ctx,
  12476. ggml_is_contiguous(tensor->grad)
  12477. ? tensor->grad
  12478. : ggml_cont(ctx, tensor->grad),
  12479. src0->grad),
  12480. zero_table);
  12481. }
  12482. } break;
  12483. case GGML_OP_VIEW:
  12484. {
  12485. // necessary for llama
  12486. if (src0->grad) {
  12487. size_t offset;
  12488. memcpy(&offset, tensor->op_params, sizeof(offset));
  12489. size_t nb1 = tensor->nb[1];
  12490. size_t nb2 = tensor->nb[2];
  12491. size_t nb3 = tensor->nb[3];
  12492. if (src0->type != src0->grad->type) {
  12493. // gradient is typically F32, but src0 could be other type
  12494. size_t ng = ggml_element_size(src0->grad);
  12495. size_t n0 = ggml_element_size(src0);
  12496. GGML_ASSERT(offset % n0 == 0);
  12497. GGML_ASSERT(nb1 % n0 == 0);
  12498. GGML_ASSERT(nb2 % n0 == 0);
  12499. GGML_ASSERT(nb3 % n0 == 0);
  12500. offset = (offset / n0) * ng;
  12501. nb1 = (nb1 / n0) * ng;
  12502. nb2 = (nb2 / n0) * ng;
  12503. nb3 = (nb3 / n0) * ng;
  12504. }
  12505. src0->grad = ggml_acc_or_set(ctx, src0->grad, tensor->grad, nb1, nb2, nb3, offset, zero_table);
  12506. }
  12507. } break;
  12508. case GGML_OP_PERMUTE:
  12509. {
  12510. // necessary for llama
  12511. if (src0->grad) {
  12512. int32_t * axes = (int32_t *) tensor->op_params;
  12513. int axis0 = axes[0] & 0x3;
  12514. int axis1 = axes[1] & 0x3;
  12515. int axis2 = axes[2] & 0x3;
  12516. int axis3 = axes[3] & 0x3;
  12517. int axes_backward[4] = {0,0,0,0};
  12518. axes_backward[axis0] = 0;
  12519. axes_backward[axis1] = 1;
  12520. axes_backward[axis2] = 2;
  12521. axes_backward[axis3] = 3;
  12522. src0->grad =
  12523. ggml_add_or_set(ctx, src0->grad,
  12524. ggml_permute(ctx,
  12525. tensor->grad,
  12526. axes_backward[0],
  12527. axes_backward[1],
  12528. axes_backward[2],
  12529. axes_backward[3]),
  12530. zero_table);
  12531. }
  12532. } break;
  12533. case GGML_OP_TRANSPOSE:
  12534. {
  12535. // necessary for llama
  12536. if (src0->grad) {
  12537. src0->grad =
  12538. ggml_add_or_set(ctx, src0->grad,
  12539. ggml_transpose(ctx, tensor->grad),
  12540. zero_table);
  12541. }
  12542. } break;
  12543. case GGML_OP_GET_ROWS:
  12544. {
  12545. // necessary for llama (only for tokenizer)
  12546. if (src0->grad) {
  12547. src0->grad =
  12548. ggml_add_or_set(ctx, src0->grad,
  12549. // last ggml_get_rows_back argument src0->grad is only
  12550. // necessary to setup correct output shape
  12551. ggml_get_rows_back(ctx, tensor->grad, src1, src0->grad),
  12552. zero_table);
  12553. }
  12554. if (src1->grad) {
  12555. // noop
  12556. }
  12557. } break;
  12558. case GGML_OP_GET_ROWS_BACK:
  12559. {
  12560. GGML_ASSERT(false); // TODO: not implemented
  12561. } break;
  12562. case GGML_OP_DIAG:
  12563. {
  12564. GGML_ASSERT(false); // TODO: not implemented
  12565. } break;
  12566. case GGML_OP_DIAG_MASK_INF:
  12567. {
  12568. // necessary for llama
  12569. if (src0->grad) {
  12570. const int n_past = ((int32_t *) tensor->op_params)[0];
  12571. src0->grad =
  12572. ggml_add_or_set(ctx, src0->grad,
  12573. ggml_diag_mask_zero_impl(ctx, tensor->grad, n_past, false),
  12574. zero_table);
  12575. }
  12576. } break;
  12577. case GGML_OP_DIAG_MASK_ZERO:
  12578. {
  12579. // necessary for llama
  12580. if (src0->grad) {
  12581. const int n_past = ((int32_t *) tensor->op_params)[0];
  12582. src0->grad =
  12583. ggml_add_or_set(ctx, src0->grad,
  12584. ggml_diag_mask_zero_impl(ctx, tensor->grad, n_past, false),
  12585. zero_table);
  12586. }
  12587. } break;
  12588. case GGML_OP_SOFT_MAX:
  12589. {
  12590. // necessary for llama
  12591. if (src0->grad) {
  12592. src0->grad =
  12593. ggml_add_or_set(ctx, src0->grad,
  12594. ggml_soft_max_back(ctx, tensor->grad, tensor),
  12595. zero_table);
  12596. }
  12597. } break;
  12598. case GGML_OP_SOFT_MAX_BACK:
  12599. {
  12600. GGML_ASSERT(false); // TODO: not implemented
  12601. } break;
  12602. case GGML_OP_ROPE:
  12603. {
  12604. // necessary for llama
  12605. if (src0->grad) {
  12606. //const int n_past = ((int32_t *) tensor->op_params)[0];
  12607. const int n_dims = ((int32_t *) tensor->op_params)[1];
  12608. const int mode = ((int32_t *) tensor->op_params)[2];
  12609. const int n_ctx = ((int32_t *) tensor->op_params)[3];
  12610. const int n_orig_ctx = ((int32_t *) tensor->op_params)[4];
  12611. float freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow, xpos_base, xpos_down;
  12612. memcpy(&freq_base, (int32_t *) tensor->op_params + 5, sizeof(float));
  12613. memcpy(&freq_scale, (int32_t *) tensor->op_params + 6, sizeof(float));
  12614. memcpy(&ext_factor, (int32_t *) tensor->op_params + 7, sizeof(float));
  12615. memcpy(&attn_factor, (int32_t *) tensor->op_params + 8, sizeof(float));
  12616. memcpy(&beta_fast, (int32_t *) tensor->op_params + 9, sizeof(float));
  12617. memcpy(&beta_slow, (int32_t *) tensor->op_params + 10, sizeof(float));
  12618. memcpy(&xpos_base, (int32_t *) tensor->op_params + 11, sizeof(float));
  12619. memcpy(&xpos_down, (int32_t *) tensor->op_params + 12, sizeof(bool));
  12620. src0->grad = ggml_add_or_set(ctx,
  12621. src0->grad,
  12622. ggml_rope_back(ctx,
  12623. tensor->grad,
  12624. src1,
  12625. n_dims,
  12626. mode,
  12627. n_ctx,
  12628. n_orig_ctx,
  12629. freq_base,
  12630. freq_scale,
  12631. ext_factor,
  12632. attn_factor,
  12633. beta_fast,
  12634. beta_slow,
  12635. xpos_base,
  12636. xpos_down),
  12637. zero_table);
  12638. }
  12639. } break;
  12640. case GGML_OP_ROPE_BACK:
  12641. {
  12642. if (src0->grad) {
  12643. //const int n_past = ((int32_t *) tensor->op_params)[0];
  12644. const int n_dims = ((int32_t *) tensor->op_params)[1];
  12645. const int mode = ((int32_t *) tensor->op_params)[2];
  12646. const int n_ctx = ((int32_t *) tensor->op_params)[3];
  12647. const int n_orig_ctx = ((int32_t *) tensor->op_params)[4];
  12648. float freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow, xpos_base, xpos_down;
  12649. memcpy(&freq_base, (int32_t *) tensor->op_params + 5, sizeof(float));
  12650. memcpy(&freq_scale, (int32_t *) tensor->op_params + 6, sizeof(float));
  12651. memcpy(&ext_factor, (int32_t *) tensor->op_params + 7, sizeof(float));
  12652. memcpy(&attn_factor, (int32_t *) tensor->op_params + 8, sizeof(float));
  12653. memcpy(&beta_fast, (int32_t *) tensor->op_params + 9, sizeof(float));
  12654. memcpy(&beta_slow, (int32_t *) tensor->op_params + 10, sizeof(float));
  12655. memcpy(&xpos_base, (int32_t *) tensor->op_params + 11, sizeof(float));
  12656. memcpy(&xpos_down, (int32_t *) tensor->op_params + 12, sizeof(bool));
  12657. src0->grad = ggml_add_or_set(ctx,
  12658. src0->grad,
  12659. ggml_rope_impl(ctx,
  12660. tensor->grad,
  12661. src1,
  12662. n_dims,
  12663. mode,
  12664. n_ctx,
  12665. n_orig_ctx,
  12666. freq_base,
  12667. freq_scale,
  12668. ext_factor,
  12669. attn_factor,
  12670. beta_fast,
  12671. beta_slow,
  12672. xpos_base,
  12673. xpos_down,
  12674. false),
  12675. zero_table);
  12676. }
  12677. } break;
  12678. case GGML_OP_ALIBI:
  12679. {
  12680. GGML_ASSERT(false); // TODO: not implemented
  12681. } break;
  12682. case GGML_OP_CLAMP:
  12683. {
  12684. GGML_ASSERT(false); // TODO: not implemented
  12685. } break;
  12686. case GGML_OP_CONV_TRANSPOSE_1D:
  12687. {
  12688. GGML_ASSERT(false); // TODO: not implemented
  12689. } break;
  12690. case GGML_OP_IM2COL:
  12691. {
  12692. GGML_ASSERT(false); // TODO: not implemented
  12693. } break;
  12694. case GGML_OP_CONV_TRANSPOSE_2D:
  12695. {
  12696. GGML_ASSERT(false); // TODO: not implemented
  12697. } break;
  12698. case GGML_OP_POOL_1D:
  12699. {
  12700. GGML_ASSERT(false); // TODO: not implemented
  12701. } break;
  12702. case GGML_OP_POOL_2D:
  12703. {
  12704. GGML_ASSERT(false); // TODO: not implemented
  12705. } break;
  12706. case GGML_OP_UPSCALE:
  12707. {
  12708. GGML_ASSERT(false); // TODO: not implemented
  12709. } break;
  12710. case GGML_OP_PAD:
  12711. {
  12712. GGML_ASSERT(false); // TODO: not implemented
  12713. } break;
  12714. case GGML_OP_ARGSORT:
  12715. {
  12716. GGML_ASSERT(false); // TODO: not implemented
  12717. } break;
  12718. case GGML_OP_LEAKY_RELU:
  12719. {
  12720. GGML_ASSERT(false); // TODO: not implemented
  12721. } break;
  12722. case GGML_OP_FLASH_ATTN:
  12723. {
  12724. struct ggml_tensor * flash_grad = NULL;
  12725. if (src0->grad || src1->grad || tensor->src[2]->grad) {
  12726. int32_t t = ggml_get_op_params_i32(tensor, 0);
  12727. GGML_ASSERT(t == 0 || t == 1);
  12728. bool masked = t != 0;
  12729. flash_grad =
  12730. ggml_flash_attn_back(ctx,
  12731. src0,
  12732. src1,
  12733. tensor->src[2],
  12734. tensor->grad,
  12735. masked);
  12736. }
  12737. struct ggml_tensor * src2 = tensor->src[2];
  12738. const int64_t elem_q = ggml_nelements(src0);
  12739. const int64_t elem_k = ggml_nelements(src1);
  12740. const int64_t elem_v = ggml_nelements(src2);
  12741. enum ggml_type result_type = flash_grad->type;
  12742. GGML_ASSERT(ggml_blck_size(result_type) == 1);
  12743. const size_t tsize = ggml_type_size(result_type);
  12744. const size_t offs_q = 0;
  12745. const size_t offs_k = offs_q + GGML_PAD(elem_q * tsize, GGML_MEM_ALIGN);
  12746. const size_t offs_v = offs_k + GGML_PAD(elem_k * tsize, GGML_MEM_ALIGN);
  12747. if (src0->grad) {
  12748. struct ggml_tensor * view_q = ggml_view_1d(ctx, flash_grad, elem_q, offs_q);
  12749. struct ggml_tensor * grad_q = ggml_reshape(ctx, view_q, src0);
  12750. src0->grad = ggml_add_or_set(ctx,
  12751. src0->grad,
  12752. grad_q,
  12753. zero_table);
  12754. }
  12755. if (src1->grad) {
  12756. struct ggml_tensor * view_k = ggml_view_1d(ctx, flash_grad, elem_k, offs_k);
  12757. struct ggml_tensor * grad_k = ggml_reshape(ctx, view_k, src1);
  12758. src1->grad = ggml_add_or_set(ctx,
  12759. src1->grad,
  12760. grad_k,
  12761. zero_table);
  12762. }
  12763. if (src2->grad) {
  12764. struct ggml_tensor * view_v = ggml_view_1d(ctx, flash_grad, elem_v, offs_v);
  12765. struct ggml_tensor * grad_v = ggml_reshape(ctx, view_v, src2);
  12766. src2->grad = ggml_add_or_set(ctx,
  12767. src2->grad,
  12768. grad_v,
  12769. zero_table);
  12770. }
  12771. } break;
  12772. case GGML_OP_FLASH_FF:
  12773. {
  12774. GGML_ASSERT(false); // not supported
  12775. } break;
  12776. case GGML_OP_FLASH_ATTN_BACK:
  12777. {
  12778. GGML_ASSERT(false); // not supported
  12779. } break;
  12780. case GGML_OP_WIN_PART:
  12781. case GGML_OP_WIN_UNPART:
  12782. case GGML_OP_UNARY:
  12783. {
  12784. switch (ggml_get_unary_op(tensor)) {
  12785. case GGML_UNARY_OP_ABS:
  12786. {
  12787. if (src0->grad) {
  12788. src0->grad =
  12789. ggml_add_or_set(ctx,
  12790. src0->grad,
  12791. ggml_mul(ctx,
  12792. ggml_sgn(ctx, src0),
  12793. tensor->grad),
  12794. zero_table);
  12795. }
  12796. } break;
  12797. case GGML_UNARY_OP_SGN:
  12798. {
  12799. if (src0->grad) {
  12800. // noop
  12801. }
  12802. } break;
  12803. case GGML_UNARY_OP_NEG:
  12804. {
  12805. if (src0->grad) {
  12806. src0->grad = ggml_sub_or_set(ctx, src0->grad, tensor->grad, zero_table);
  12807. }
  12808. } break;
  12809. case GGML_UNARY_OP_STEP:
  12810. {
  12811. if (src0->grad) {
  12812. // noop
  12813. }
  12814. } break;
  12815. case GGML_UNARY_OP_TANH:
  12816. {
  12817. GGML_ASSERT(false); // TODO: not implemented
  12818. } break;
  12819. case GGML_UNARY_OP_ELU:
  12820. {
  12821. GGML_ASSERT(false); // TODO: not implemented
  12822. } break;
  12823. case GGML_UNARY_OP_RELU:
  12824. {
  12825. if (src0->grad) {
  12826. src0->grad = ggml_add_or_set(ctx,
  12827. src0->grad,
  12828. ggml_mul(ctx,
  12829. ggml_step(ctx, src0),
  12830. tensor->grad),
  12831. zero_table);
  12832. }
  12833. } break;
  12834. case GGML_UNARY_OP_GELU:
  12835. {
  12836. GGML_ASSERT(false); // TODO: not implemented
  12837. } break;
  12838. case GGML_UNARY_OP_GELU_QUICK:
  12839. {
  12840. GGML_ASSERT(false); // TODO: not implemented
  12841. } break;
  12842. case GGML_UNARY_OP_SILU:
  12843. {
  12844. // necessary for llama
  12845. if (src0->grad) {
  12846. src0->grad = ggml_add_or_set(ctx,
  12847. src0->grad,
  12848. ggml_silu_back(ctx, src0, tensor->grad),
  12849. zero_table);
  12850. }
  12851. } break;
  12852. default:
  12853. GGML_ASSERT(false);
  12854. }
  12855. } break;
  12856. case GGML_OP_GET_REL_POS:
  12857. case GGML_OP_ADD_REL_POS:
  12858. case GGML_OP_MAP_UNARY:
  12859. case GGML_OP_MAP_BINARY:
  12860. case GGML_OP_MAP_CUSTOM1_F32:
  12861. case GGML_OP_MAP_CUSTOM2_F32:
  12862. case GGML_OP_MAP_CUSTOM3_F32:
  12863. case GGML_OP_MAP_CUSTOM1:
  12864. case GGML_OP_MAP_CUSTOM2:
  12865. case GGML_OP_MAP_CUSTOM3:
  12866. {
  12867. GGML_ASSERT(false); // not supported
  12868. } break;
  12869. case GGML_OP_CROSS_ENTROPY_LOSS:
  12870. {
  12871. if (src0->grad) {
  12872. src0->grad = ggml_add_or_set(ctx,
  12873. src0->grad,
  12874. ggml_cross_entropy_loss_back(ctx,
  12875. src0,
  12876. src1,
  12877. tensor->grad),
  12878. zero_table);
  12879. }
  12880. } break;
  12881. case GGML_OP_CROSS_ENTROPY_LOSS_BACK:
  12882. {
  12883. GGML_ASSERT(false); // not supported
  12884. } break;
  12885. case GGML_OP_NONE:
  12886. {
  12887. // nop
  12888. } break;
  12889. case GGML_OP_COUNT:
  12890. {
  12891. GGML_ASSERT(false);
  12892. } break;
  12893. }
  12894. for (int i = 0; i < GGML_MAX_SRC; ++i) {
  12895. if (tensor->src[i] && tensor->src[i]->grad) {
  12896. GGML_ASSERT(ggml_are_same_shape(tensor->src[i], tensor->src[i]->grad));
  12897. }
  12898. }
  12899. }
  12900. static void ggml_visit_parents(struct ggml_cgraph * cgraph, struct ggml_tensor * node) {
  12901. if (node->grad == NULL) {
  12902. // this usually happens when we generate intermediate nodes from constants in the backward pass
  12903. // it can also happen during forward pass, if the user performs computations with constants
  12904. if (node->op != GGML_OP_NONE) {
  12905. //GGML_PRINT_DEBUG("%s: warning: node %p has no grad, but op %d\n", __func__, (void *) node, node->op);
  12906. }
  12907. }
  12908. // check if already visited
  12909. if (ggml_hash_insert(cgraph->visited_hash_table, node) == GGML_HASHTABLE_ALREADY_EXISTS) {
  12910. return;
  12911. }
  12912. for (int i = 0; i < GGML_MAX_SRC; ++i) {
  12913. const int k =
  12914. (cgraph->order == GGML_CGRAPH_EVAL_ORDER_LEFT_TO_RIGHT) ? i :
  12915. (cgraph->order == GGML_CGRAPH_EVAL_ORDER_RIGHT_TO_LEFT) ? (GGML_MAX_SRC-1-i) :
  12916. /* unknown order, just fall back to using i*/ i;
  12917. if (node->src[k]) {
  12918. ggml_visit_parents(cgraph, node->src[k]);
  12919. }
  12920. }
  12921. if (node->op == GGML_OP_NONE && node->grad == NULL) {
  12922. // reached a leaf node, not part of the gradient graph (e.g. a constant)
  12923. GGML_ASSERT(cgraph->n_leafs < cgraph->size);
  12924. if (strlen(node->name) == 0) {
  12925. ggml_format_name(node, "leaf_%d", cgraph->n_leafs);
  12926. }
  12927. cgraph->leafs[cgraph->n_leafs] = node;
  12928. cgraph->n_leafs++;
  12929. } else {
  12930. GGML_ASSERT(cgraph->n_nodes < cgraph->size);
  12931. if (strlen(node->name) == 0) {
  12932. ggml_format_name(node, "node_%d", cgraph->n_nodes);
  12933. }
  12934. cgraph->nodes[cgraph->n_nodes] = node;
  12935. if (cgraph->grads) {
  12936. cgraph->grads[cgraph->n_nodes] = node->grad;
  12937. }
  12938. cgraph->n_nodes++;
  12939. }
  12940. }
  12941. static void ggml_build_forward_impl(struct ggml_cgraph * cgraph, struct ggml_tensor * tensor, bool expand) {
  12942. if (!expand) {
  12943. // TODO: this branch isn't accessible anymore, maybe move this to ggml_build_forward_expand
  12944. ggml_graph_clear(cgraph);
  12945. }
  12946. const int n0 = cgraph->n_nodes;
  12947. UNUSED(n0);
  12948. ggml_visit_parents(cgraph, tensor);
  12949. const int n_new = cgraph->n_nodes - n0;
  12950. GGML_PRINT_DEBUG("%s: visited %d new nodes\n", __func__, n_new);
  12951. if (n_new > 0) {
  12952. // the last added node should always be starting point
  12953. GGML_ASSERT(cgraph->nodes[cgraph->n_nodes - 1] == tensor);
  12954. }
  12955. }
  12956. void ggml_build_forward_expand(struct ggml_cgraph * cgraph, struct ggml_tensor * tensor) {
  12957. ggml_build_forward_impl(cgraph, tensor, true);
  12958. }
  12959. void ggml_build_backward_expand(struct ggml_context * ctx, struct ggml_cgraph * gf, struct ggml_cgraph * gb, bool keep) {
  12960. GGML_ASSERT(gf->n_nodes > 0);
  12961. // if we are keeping the gradient graph, we have to detach the gradient nodes from the original graph
  12962. if (keep) {
  12963. for (int i = 0; i < gf->n_nodes; i++) {
  12964. struct ggml_tensor * node = gf->nodes[i];
  12965. if (node->grad) {
  12966. node->grad = ggml_dup_tensor(ctx, node);
  12967. gf->grads[i] = node->grad;
  12968. }
  12969. }
  12970. }
  12971. // remember original gradients which start with zero values
  12972. struct ggml_hash_set zero_table = ggml_hash_set_new(gf->size);
  12973. for (int i = 0; i < gf->n_nodes; i++) {
  12974. if (gf->grads[i]) {
  12975. ggml_hash_insert(zero_table, gf->grads[i]);
  12976. }
  12977. }
  12978. for (int i = gf->n_nodes - 1; i >= 0; i--) {
  12979. struct ggml_tensor * node = gf->nodes[i];
  12980. // inplace operations to add gradients are not created by ggml_compute_backward
  12981. // use allocator to automatically make inplace operations
  12982. if (node->grad) {
  12983. ggml_compute_backward(ctx, node, zero_table);
  12984. }
  12985. }
  12986. for (int i = 0; i < gf->n_nodes; i++) {
  12987. struct ggml_tensor * node = gf->nodes[i];
  12988. if (node->is_param) {
  12989. GGML_PRINT_DEBUG("%s: found root node %p\n", __func__, (void *) node);
  12990. ggml_build_forward_expand(gb, node->grad);
  12991. }
  12992. }
  12993. ggml_hash_set_free(zero_table);
  12994. }
  12995. static size_t ggml_graph_nbytes(size_t size, bool grads) {
  12996. size_t nbytes = sizeof(struct ggml_cgraph);
  12997. nbytes += size * sizeof(struct ggml_tensor *) * 2; // leafs + nodes
  12998. if (grads) {
  12999. nbytes += size * sizeof(struct ggml_tensor *); // grads
  13000. }
  13001. nbytes += ggml_hash_size(size * 2) * sizeof(struct ggml_tensor *); // hash set
  13002. return nbytes;
  13003. }
  13004. size_t ggml_graph_overhead_custom(size_t size, bool grads) {
  13005. return GGML_OBJECT_SIZE + GGML_PAD(ggml_graph_nbytes(size, grads), GGML_MEM_ALIGN);
  13006. }
  13007. size_t ggml_graph_overhead(void) {
  13008. return ggml_graph_overhead_custom(GGML_DEFAULT_GRAPH_SIZE, false);
  13009. }
  13010. struct ggml_cgraph * ggml_new_graph_custom(struct ggml_context * ctx, size_t size, bool grads) {
  13011. const size_t obj_size = ggml_graph_nbytes(size, grads);
  13012. struct ggml_object * obj = ggml_new_object(ctx, GGML_OBJECT_GRAPH, obj_size);
  13013. struct ggml_cgraph * cgraph = (struct ggml_cgraph *) ((char *) ctx->mem_buffer + obj->offs);
  13014. struct ggml_tensor ** data_start = (struct ggml_tensor **) (cgraph + 1);
  13015. size_t hash_size = ggml_hash_size(size * 2);
  13016. struct ggml_tensor ** nodes_ptr = data_start;
  13017. struct ggml_tensor ** leafs_ptr = nodes_ptr + size;
  13018. struct ggml_tensor ** hash_keys_ptr = leafs_ptr + size;
  13019. struct ggml_tensor ** grads_ptr = grads ? hash_keys_ptr + hash_size : NULL;
  13020. // check that we allocated the correct amount of memory
  13021. assert(obj_size == (size_t) (
  13022. (grads ? (char *)(grads_ptr + size) : (char *)(hash_keys_ptr + hash_size)) - (char *)cgraph));
  13023. memset(hash_keys_ptr, 0, hash_size * sizeof(struct ggml_tensor *));
  13024. *cgraph = (struct ggml_cgraph) {
  13025. /*.size =*/ size,
  13026. /*.n_nodes =*/ 0,
  13027. /*.n_leafs =*/ 0,
  13028. /*.nodes =*/ nodes_ptr,
  13029. /*.grads =*/ grads_ptr,
  13030. /*.leafs =*/ leafs_ptr,
  13031. /*.hash_table =*/ { hash_size, hash_keys_ptr },
  13032. /*.order =*/ GGML_CGRAPH_EVAL_ORDER_LEFT_TO_RIGHT,
  13033. /*.perf_runs =*/ 0,
  13034. /*.perf_cycles =*/ 0,
  13035. /*.perf_time_us =*/ 0,
  13036. };
  13037. return cgraph;
  13038. }
  13039. struct ggml_cgraph * ggml_new_graph(struct ggml_context * ctx) {
  13040. return ggml_new_graph_custom(ctx, GGML_DEFAULT_GRAPH_SIZE, false);
  13041. }
  13042. struct ggml_cgraph ggml_graph_view(struct ggml_cgraph * cgraph0, int i0, int i1) {
  13043. struct ggml_cgraph cgraph = {
  13044. /*.size =*/ 0,
  13045. /*.n_nodes =*/ i1 - i0,
  13046. /*.n_leafs =*/ 0,
  13047. /*.nodes =*/ cgraph0->nodes + i0,
  13048. /*.grads =*/ cgraph0->grads ? cgraph0->grads + i0 : NULL,
  13049. /*.leafs =*/ NULL,
  13050. /*.hash_table =*/ { 0, NULL },
  13051. /*.order =*/ cgraph0->order,
  13052. /*.perf_runs =*/ 0,
  13053. /*.perf_cycles =*/ 0,
  13054. /*.perf_time_us =*/ 0,
  13055. };
  13056. return cgraph;
  13057. }
  13058. void ggml_graph_cpy(struct ggml_cgraph * src, struct ggml_cgraph * dst) {
  13059. GGML_ASSERT(dst->size >= src->n_leafs);
  13060. GGML_ASSERT(dst->size >= src->n_nodes);
  13061. GGML_ASSERT(dst->visited_hash_table.size >= src->visited_hash_table.size);
  13062. dst->n_leafs = src->n_leafs;
  13063. dst->n_nodes = src->n_nodes;
  13064. dst->order = src->order;
  13065. for (int i = 0; i < src->n_leafs; ++i) {
  13066. dst->leafs[i] = src->leafs[i];
  13067. }
  13068. for (int i = 0; i < src->n_nodes; ++i) {
  13069. dst->nodes[i] = src->nodes[i];
  13070. }
  13071. if (src->grads) {
  13072. GGML_ASSERT(dst->grads != NULL);
  13073. for (int i = 0; i < src->n_nodes; ++i) {
  13074. dst->grads[i] = src->grads[i];
  13075. }
  13076. }
  13077. for (size_t i = 0; i < src->visited_hash_table.size; ++i) {
  13078. if (src->visited_hash_table.keys[i]) {
  13079. ggml_hash_insert(dst->visited_hash_table, src->visited_hash_table.keys[i]);
  13080. }
  13081. }
  13082. }
  13083. struct ggml_cgraph * ggml_graph_dup(struct ggml_context * ctx, struct ggml_cgraph * cgraph) {
  13084. struct ggml_cgraph * result = ggml_new_graph_custom(ctx, cgraph->size, cgraph->grads != NULL);
  13085. ggml_graph_cpy(cgraph, result);
  13086. return result;
  13087. }
  13088. void ggml_graph_reset(struct ggml_cgraph * cgraph) {
  13089. GGML_ASSERT(cgraph->grads != NULL);
  13090. for (int i = 0; i < cgraph->n_nodes; i++) {
  13091. struct ggml_tensor * grad = cgraph->grads[i];
  13092. if (grad) {
  13093. ggml_set_zero(grad);
  13094. }
  13095. }
  13096. }
  13097. void ggml_graph_clear(struct ggml_cgraph * cgraph) {
  13098. cgraph->n_leafs = 0;
  13099. cgraph->n_nodes = 0;
  13100. memset(cgraph->visited_hash_table.keys, 0, cgraph->visited_hash_table.size * sizeof(struct ggml_tensor *));
  13101. }
  13102. //
  13103. // thread data
  13104. //
  13105. // synchronization is done via busy loops
  13106. // I tried using spin locks, but not sure how to use them correctly - the things I tried were slower than busy loops
  13107. //
  13108. #ifdef __APPLE__
  13109. //#include <os/lock.h>
  13110. //
  13111. //typedef os_unfair_lock ggml_lock_t;
  13112. //
  13113. //#define ggml_lock_init(x) UNUSED(x)
  13114. //#define ggml_lock_destroy(x) UNUSED(x)
  13115. //#define ggml_lock_lock os_unfair_lock_lock
  13116. //#define ggml_lock_unlock os_unfair_lock_unlock
  13117. //
  13118. //#define GGML_LOCK_INITIALIZER OS_UNFAIR_LOCK_INIT
  13119. typedef int ggml_lock_t;
  13120. #define ggml_lock_init(x) UNUSED(x)
  13121. #define ggml_lock_destroy(x) UNUSED(x)
  13122. #define ggml_lock_lock(x) UNUSED(x)
  13123. #define ggml_lock_unlock(x) UNUSED(x)
  13124. #define GGML_LOCK_INITIALIZER 0
  13125. typedef pthread_t ggml_thread_t;
  13126. #define ggml_thread_create pthread_create
  13127. #define ggml_thread_join pthread_join
  13128. #else
  13129. //typedef pthread_spinlock_t ggml_lock_t;
  13130. //#define ggml_lock_init(x) pthread_spin_init(x, PTHREAD_PROCESS_PRIVATE)
  13131. //#define ggml_lock_destroy pthread_spin_destroy
  13132. //#define ggml_lock_lock pthread_spin_lock
  13133. //#define ggml_lock_unlock pthread_spin_unlock
  13134. typedef int ggml_lock_t;
  13135. #define ggml_lock_init(x) UNUSED(x)
  13136. #define ggml_lock_destroy(x) UNUSED(x)
  13137. #if defined(__x86_64__) || (defined(_MSC_VER) && defined(_M_AMD64))
  13138. #define ggml_lock_lock(x) _mm_pause()
  13139. #else
  13140. #define ggml_lock_lock(x) UNUSED(x)
  13141. #endif
  13142. #define ggml_lock_unlock(x) UNUSED(x)
  13143. #define GGML_LOCK_INITIALIZER 0
  13144. typedef pthread_t ggml_thread_t;
  13145. #define ggml_thread_create pthread_create
  13146. #define ggml_thread_join pthread_join
  13147. #endif
  13148. // Android's libc implementation "bionic" does not support setting affinity
  13149. #if defined(__linux__) && !defined(__BIONIC__)
  13150. static void set_numa_thread_affinity(int thread_n, int n_threads) {
  13151. if (!ggml_is_numa()) {
  13152. return;
  13153. }
  13154. // run thread on node_num thread_n / (threads per node)
  13155. const int node_num = thread_n / ((n_threads + g_state.numa.n_nodes - 1) / g_state.numa.n_nodes);
  13156. struct ggml_numa_node * node = &g_state.numa.nodes[node_num];
  13157. size_t setsize = CPU_ALLOC_SIZE(g_state.numa.total_cpus);
  13158. cpu_set_t * cpus = CPU_ALLOC(g_state.numa.total_cpus);
  13159. CPU_ZERO_S(setsize, cpus);
  13160. for (size_t i = 0; i < node->n_cpus; ++i) {
  13161. CPU_SET_S(node->cpus[i], setsize, cpus);
  13162. }
  13163. int rv = pthread_setaffinity_np(pthread_self(), setsize, cpus);
  13164. if (rv) {
  13165. fprintf(stderr, "warning: pthread_setaffinity_np() failed: %s\n",
  13166. strerror(rv));
  13167. }
  13168. CPU_FREE(cpus);
  13169. }
  13170. static void clear_numa_thread_affinity(void) {
  13171. if (!ggml_is_numa()) {
  13172. return;
  13173. }
  13174. size_t setsize = CPU_ALLOC_SIZE(g_state.numa.total_cpus);
  13175. cpu_set_t * cpus = CPU_ALLOC(g_state.numa.total_cpus);
  13176. CPU_ZERO_S(setsize, cpus);
  13177. for (unsigned i = 0; i < g_state.numa.total_cpus; ++i) {
  13178. CPU_SET_S(i, setsize, cpus);
  13179. }
  13180. int rv = pthread_setaffinity_np(pthread_self(), setsize, cpus);
  13181. if (rv) {
  13182. fprintf(stderr, "warning: pthread_setaffinity_np() failed: %s\n",
  13183. strerror(rv));
  13184. }
  13185. CPU_FREE(cpus);
  13186. }
  13187. #else
  13188. // TODO: Windows etc.
  13189. // (the linux implementation may also work on BSD, someone should test)
  13190. static void set_numa_thread_affinity(int thread_n, int n_threads) { UNUSED(thread_n); UNUSED(n_threads); }
  13191. static void clear_numa_thread_affinity(void) {}
  13192. #endif
  13193. struct ggml_compute_state_shared {
  13194. const struct ggml_cgraph * cgraph;
  13195. const struct ggml_cplan * cplan;
  13196. int64_t perf_node_start_cycles;
  13197. int64_t perf_node_start_time_us;
  13198. const int n_threads;
  13199. // synchronization primitives
  13200. atomic_int n_active; // num active threads
  13201. atomic_int node_n; // active graph node
  13202. bool (*abort_callback)(void * data); // abort ggml_graph_compute when true
  13203. void * abort_callback_data;
  13204. };
  13205. struct ggml_compute_state {
  13206. ggml_thread_t thrd;
  13207. int ith;
  13208. struct ggml_compute_state_shared * shared;
  13209. };
  13210. static void ggml_graph_compute_perf_stats_node(struct ggml_tensor * node, const struct ggml_compute_state_shared * st) {
  13211. int64_t cycles_cur = ggml_perf_cycles() - st->perf_node_start_cycles;
  13212. int64_t time_us_cur = ggml_perf_time_us() - st->perf_node_start_time_us;
  13213. node->perf_runs++;
  13214. node->perf_cycles += cycles_cur;
  13215. node->perf_time_us += time_us_cur;
  13216. }
  13217. static int ggml_get_n_tasks(struct ggml_tensor * node, int n_threads) {
  13218. int n_tasks = 0;
  13219. switch (node->op) {
  13220. case GGML_OP_CPY:
  13221. case GGML_OP_DUP:
  13222. case GGML_OP_ADD:
  13223. case GGML_OP_ADD1:
  13224. case GGML_OP_ACC:
  13225. {
  13226. n_tasks = n_threads;
  13227. } break;
  13228. case GGML_OP_SUB:
  13229. case GGML_OP_SQR:
  13230. case GGML_OP_SQRT:
  13231. case GGML_OP_LOG:
  13232. case GGML_OP_SUM:
  13233. case GGML_OP_SUM_ROWS:
  13234. case GGML_OP_MEAN:
  13235. case GGML_OP_ARGMAX:
  13236. case GGML_OP_REPEAT:
  13237. case GGML_OP_REPEAT_BACK:
  13238. case GGML_OP_LEAKY_RELU:
  13239. {
  13240. n_tasks = 1;
  13241. } break;
  13242. case GGML_OP_UNARY:
  13243. switch (ggml_get_unary_op(node)) {
  13244. case GGML_UNARY_OP_ABS:
  13245. case GGML_UNARY_OP_SGN:
  13246. case GGML_UNARY_OP_NEG:
  13247. case GGML_UNARY_OP_STEP:
  13248. case GGML_UNARY_OP_TANH:
  13249. case GGML_UNARY_OP_ELU:
  13250. case GGML_UNARY_OP_RELU:
  13251. {
  13252. n_tasks = 1;
  13253. } break;
  13254. case GGML_UNARY_OP_GELU:
  13255. case GGML_UNARY_OP_GELU_QUICK:
  13256. case GGML_UNARY_OP_SILU:
  13257. {
  13258. n_tasks = n_threads;
  13259. } break;
  13260. default:
  13261. GGML_ASSERT(false);
  13262. }
  13263. break;
  13264. case GGML_OP_SILU_BACK:
  13265. case GGML_OP_MUL:
  13266. case GGML_OP_DIV:
  13267. case GGML_OP_NORM:
  13268. case GGML_OP_RMS_NORM:
  13269. case GGML_OP_RMS_NORM_BACK:
  13270. case GGML_OP_GROUP_NORM:
  13271. case GGML_OP_CONCAT:
  13272. {
  13273. n_tasks = n_threads;
  13274. } break;
  13275. case GGML_OP_MUL_MAT:
  13276. {
  13277. n_tasks = n_threads;
  13278. // TODO: use different scheduling for different matrix sizes
  13279. //const int nr0 = ggml_nrows(node->src[0]);
  13280. //const int nr1 = ggml_nrows(node->src[1]);
  13281. //n_tasks = MIN(n_threads, MAX(1, nr0/128));
  13282. //printf("nr0 = %8d, nr1 = %8d, nr0*nr1 = %8d, n_tasks%d\n", nr0, nr1, nr0*nr1, n_tasks);
  13283. #if defined(GGML_USE_CUBLAS)
  13284. if (ggml_cuda_can_mul_mat(node->src[0], node->src[1], node)) {
  13285. n_tasks = 1; // TODO: this actually is doing nothing
  13286. // the threads are still spinning
  13287. }
  13288. #elif defined(GGML_USE_CLBLAST)
  13289. if (ggml_cl_can_mul_mat(node->src[0], node->src[1], node)) {
  13290. n_tasks = 1; // TODO: this actually is doing nothing
  13291. // the threads are still spinning
  13292. }
  13293. #endif
  13294. #if defined(GGML_USE_ACCELERATE) || defined(GGML_USE_OPENBLAS)
  13295. if (ggml_compute_forward_mul_mat_use_blas(node->src[0], node->src[1], node)) {
  13296. n_tasks = 1; // TODO: this actually is doing nothing
  13297. // the threads are still spinning
  13298. }
  13299. #endif
  13300. } break;
  13301. case GGML_OP_MUL_MAT_ID:
  13302. {
  13303. // FIXME: blas
  13304. n_tasks = n_threads;
  13305. } break;
  13306. case GGML_OP_OUT_PROD:
  13307. {
  13308. n_tasks = n_threads;
  13309. } break;
  13310. case GGML_OP_SCALE:
  13311. case GGML_OP_SET:
  13312. case GGML_OP_CONT:
  13313. case GGML_OP_RESHAPE:
  13314. case GGML_OP_VIEW:
  13315. case GGML_OP_PERMUTE:
  13316. case GGML_OP_TRANSPOSE:
  13317. case GGML_OP_GET_ROWS:
  13318. case GGML_OP_GET_ROWS_BACK:
  13319. case GGML_OP_DIAG:
  13320. {
  13321. n_tasks = 1;
  13322. } break;
  13323. case GGML_OP_DIAG_MASK_ZERO:
  13324. case GGML_OP_DIAG_MASK_INF:
  13325. case GGML_OP_SOFT_MAX_BACK:
  13326. case GGML_OP_ROPE:
  13327. case GGML_OP_ROPE_BACK:
  13328. case GGML_OP_ADD_REL_POS:
  13329. {
  13330. n_tasks = n_threads;
  13331. } break;
  13332. case GGML_OP_ALIBI:
  13333. {
  13334. n_tasks = 1; //TODO
  13335. } break;
  13336. case GGML_OP_CLAMP:
  13337. {
  13338. n_tasks = 1; //TODO
  13339. } break;
  13340. case GGML_OP_SOFT_MAX:
  13341. {
  13342. n_tasks = MIN(MIN(4, n_threads), ggml_nrows(node->src[0]));
  13343. } break;
  13344. case GGML_OP_CONV_TRANSPOSE_1D:
  13345. {
  13346. n_tasks = n_threads;
  13347. } break;
  13348. case GGML_OP_IM2COL:
  13349. {
  13350. n_tasks = n_threads;
  13351. } break;
  13352. case GGML_OP_CONV_TRANSPOSE_2D:
  13353. {
  13354. n_tasks = n_threads;
  13355. } break;
  13356. case GGML_OP_POOL_1D:
  13357. case GGML_OP_POOL_2D:
  13358. {
  13359. n_tasks = 1;
  13360. } break;
  13361. case GGML_OP_UPSCALE:
  13362. {
  13363. n_tasks = n_threads;
  13364. } break;
  13365. case GGML_OP_PAD:
  13366. {
  13367. n_tasks = n_threads;
  13368. } break;
  13369. case GGML_OP_ARGSORT:
  13370. {
  13371. n_tasks = n_threads;
  13372. } break;
  13373. case GGML_OP_FLASH_ATTN:
  13374. {
  13375. n_tasks = n_threads;
  13376. } break;
  13377. case GGML_OP_FLASH_FF:
  13378. {
  13379. n_tasks = n_threads;
  13380. } break;
  13381. case GGML_OP_FLASH_ATTN_BACK:
  13382. {
  13383. n_tasks = n_threads;
  13384. } break;
  13385. case GGML_OP_WIN_PART:
  13386. case GGML_OP_WIN_UNPART:
  13387. case GGML_OP_GET_REL_POS:
  13388. case GGML_OP_MAP_UNARY:
  13389. case GGML_OP_MAP_BINARY:
  13390. case GGML_OP_MAP_CUSTOM1_F32:
  13391. case GGML_OP_MAP_CUSTOM2_F32:
  13392. case GGML_OP_MAP_CUSTOM3_F32:
  13393. {
  13394. n_tasks = 1;
  13395. } break;
  13396. case GGML_OP_MAP_CUSTOM1:
  13397. {
  13398. struct ggml_map_custom1_op_params * p = (struct ggml_map_custom1_op_params *) node->op_params;
  13399. if (p->n_tasks == GGML_N_TASKS_MAX) {
  13400. n_tasks = n_threads;
  13401. } else {
  13402. n_tasks = MIN(p->n_tasks, n_threads);
  13403. }
  13404. } break;
  13405. case GGML_OP_MAP_CUSTOM2:
  13406. {
  13407. struct ggml_map_custom2_op_params * p = (struct ggml_map_custom2_op_params *) node->op_params;
  13408. if (p->n_tasks == GGML_N_TASKS_MAX) {
  13409. n_tasks = n_threads;
  13410. } else {
  13411. n_tasks = MIN(p->n_tasks, n_threads);
  13412. }
  13413. } break;
  13414. case GGML_OP_MAP_CUSTOM3:
  13415. {
  13416. struct ggml_map_custom3_op_params * p = (struct ggml_map_custom3_op_params *) node->op_params;
  13417. if (p->n_tasks == GGML_N_TASKS_MAX) {
  13418. n_tasks = n_threads;
  13419. } else {
  13420. n_tasks = MIN(p->n_tasks, n_threads);
  13421. }
  13422. } break;
  13423. case GGML_OP_CROSS_ENTROPY_LOSS:
  13424. {
  13425. n_tasks = n_threads;
  13426. } break;
  13427. case GGML_OP_CROSS_ENTROPY_LOSS_BACK:
  13428. {
  13429. n_tasks = n_threads;
  13430. } break;
  13431. case GGML_OP_NONE:
  13432. {
  13433. n_tasks = 1;
  13434. } break;
  13435. case GGML_OP_COUNT:
  13436. {
  13437. GGML_ASSERT(false);
  13438. } break;
  13439. default:
  13440. {
  13441. fprintf(stderr, "%s: op not implemented: ", __func__);
  13442. if (node->op < GGML_OP_COUNT) {
  13443. fprintf(stderr, "%s\n", ggml_op_name(node->op));
  13444. } else {
  13445. fprintf(stderr, "%d\n", node->op);
  13446. }
  13447. GGML_ASSERT(false);
  13448. } break;
  13449. }
  13450. assert(n_tasks > 0);
  13451. return n_tasks;
  13452. }
  13453. static thread_ret_t ggml_graph_compute_thread(void * data) {
  13454. struct ggml_compute_state * state = (struct ggml_compute_state *) data;
  13455. const struct ggml_cgraph * cgraph = state->shared->cgraph;
  13456. const struct ggml_cplan * cplan = state->shared->cplan;
  13457. const int n_threads = state->shared->n_threads;
  13458. set_numa_thread_affinity(state->ith, n_threads);
  13459. int node_n = -1;
  13460. while (true) {
  13461. if (cplan->abort_callback && cplan->abort_callback(cplan->abort_callback_data)) {
  13462. state->shared->node_n += 1;
  13463. return (thread_ret_t) GGML_EXIT_ABORTED;
  13464. }
  13465. if (atomic_fetch_sub(&state->shared->n_active, 1) == 1) {
  13466. // all other threads are finished and spinning
  13467. // do finalize and init here so we don't have synchronize again
  13468. struct ggml_compute_params params = {
  13469. /*.type =*/ GGML_TASK_FINALIZE,
  13470. /*.ith =*/ 0,
  13471. /*.nth =*/ 0,
  13472. /*.wsize =*/ cplan->work_size,
  13473. /*.wdata =*/ cplan->work_data,
  13474. };
  13475. if (node_n != -1) {
  13476. /* FINALIZE */
  13477. struct ggml_tensor * node = cgraph->nodes[node_n];
  13478. if (GGML_OP_HAS_FINALIZE[node->op]) {
  13479. params.nth = ggml_get_n_tasks(node, n_threads);
  13480. ggml_compute_forward(&params, node);
  13481. }
  13482. ggml_graph_compute_perf_stats_node(node, state->shared);
  13483. }
  13484. // distribute new work or execute it direct if 1T
  13485. while (++node_n < cgraph->n_nodes) {
  13486. GGML_PRINT_DEBUG_5("%s: %d/%d\n", __func__, node_n, cgraph->n_nodes);
  13487. struct ggml_tensor * node = cgraph->nodes[node_n];
  13488. const int n_tasks = ggml_get_n_tasks(node, n_threads);
  13489. state->shared->perf_node_start_cycles = ggml_perf_cycles();
  13490. state->shared->perf_node_start_time_us = ggml_perf_time_us();
  13491. params.nth = n_tasks;
  13492. /* INIT */
  13493. if (GGML_OP_HAS_INIT[node->op]) {
  13494. params.type = GGML_TASK_INIT;
  13495. ggml_compute_forward(&params, node);
  13496. }
  13497. if (n_tasks == 1) {
  13498. // TODO: maybe push node_n to the atomic but if other threads see n_tasks is 1,
  13499. // they do something more efficient than spinning (?)
  13500. params.type = GGML_TASK_COMPUTE;
  13501. ggml_compute_forward(&params, node);
  13502. if (GGML_OP_HAS_FINALIZE[node->op]) {
  13503. params.type = GGML_TASK_FINALIZE;
  13504. ggml_compute_forward(&params, node);
  13505. }
  13506. ggml_graph_compute_perf_stats_node(node, state->shared);
  13507. } else {
  13508. break;
  13509. }
  13510. if (cplan->abort_callback && cplan->abort_callback(cplan->abort_callback_data)) {
  13511. break;
  13512. }
  13513. }
  13514. atomic_store(&state->shared->n_active, n_threads);
  13515. atomic_store(&state->shared->node_n, node_n);
  13516. } else {
  13517. // wait for other threads to finish
  13518. const int last = node_n;
  13519. while (true) {
  13520. // TODO: this sched_yield can have significant impact on the performance - either positive or negative
  13521. // depending on the workload and the operating system.
  13522. // since it is not clear what is the best approach, it should potentially become user-configurable
  13523. // ref: https://github.com/ggerganov/ggml/issues/291
  13524. #if defined(GGML_USE_ACCELERATE) || defined(GGML_USE_OPENBLAS)
  13525. sched_yield();
  13526. #endif
  13527. node_n = atomic_load(&state->shared->node_n);
  13528. if (node_n != last) break;
  13529. };
  13530. }
  13531. // check if we should stop
  13532. if (node_n >= cgraph->n_nodes) break;
  13533. /* COMPUTE */
  13534. struct ggml_tensor * node = cgraph->nodes[node_n];
  13535. const int n_tasks = ggml_get_n_tasks(node, n_threads);
  13536. struct ggml_compute_params params = {
  13537. /*.type =*/ GGML_TASK_COMPUTE,
  13538. /*.ith =*/ state->ith,
  13539. /*.nth =*/ n_tasks,
  13540. /*.wsize =*/ cplan->work_size,
  13541. /*.wdata =*/ cplan->work_data,
  13542. };
  13543. if (state->ith < n_tasks) {
  13544. ggml_compute_forward(&params, node);
  13545. }
  13546. }
  13547. return GGML_EXIT_SUCCESS;
  13548. }
  13549. struct ggml_cplan ggml_graph_plan(struct ggml_cgraph * cgraph, int n_threads) {
  13550. if (n_threads <= 0) {
  13551. n_threads = GGML_DEFAULT_N_THREADS;
  13552. }
  13553. size_t work_size = 0;
  13554. struct ggml_cplan cplan;
  13555. memset(&cplan, 0, sizeof(struct ggml_cplan));
  13556. // thread scheduling for the different operations + work buffer size estimation
  13557. for (int i = 0; i < cgraph->n_nodes; i++) {
  13558. struct ggml_tensor * node = cgraph->nodes[i];
  13559. const int n_tasks = ggml_get_n_tasks(node, n_threads);
  13560. size_t cur = 0;
  13561. switch (node->op) {
  13562. case GGML_OP_CPY:
  13563. case GGML_OP_DUP:
  13564. {
  13565. if (ggml_is_quantized(node->type)) {
  13566. cur = ggml_type_size(GGML_TYPE_F32) * node->ne[0] * n_tasks;
  13567. }
  13568. } break;
  13569. case GGML_OP_ADD:
  13570. case GGML_OP_ADD1:
  13571. {
  13572. if (ggml_is_quantized(node->src[0]->type)) {
  13573. cur = ggml_type_size(GGML_TYPE_F32) * node->src[0]->ne[0] * n_tasks;
  13574. }
  13575. } break;
  13576. case GGML_OP_ACC:
  13577. {
  13578. if (ggml_is_quantized(node->src[0]->type)) {
  13579. cur = ggml_type_size(GGML_TYPE_F32) * node->src[1]->ne[0] * n_tasks;
  13580. }
  13581. } break;
  13582. case GGML_OP_MUL_MAT:
  13583. {
  13584. const enum ggml_type vec_dot_type = type_traits[node->src[0]->type].vec_dot_type;
  13585. #if defined(GGML_USE_CLBLAST)
  13586. if (ggml_cl_can_mul_mat(node->src[0], node->src[1], node)) {
  13587. cur = ggml_cl_mul_mat_get_wsize(node->src[0], node->src[1], node);
  13588. } else
  13589. #endif
  13590. #if defined(GGML_USE_ACCELERATE) || defined(GGML_USE_OPENBLAS)
  13591. if (ggml_compute_forward_mul_mat_use_blas(node->src[0], node->src[1], node)) {
  13592. if (node->src[0]->type != GGML_TYPE_F32) {
  13593. // here we need memory just for single 2D matrix from src0
  13594. cur = ggml_type_size(GGML_TYPE_F32)*(node->src[0]->ne[0]*node->src[0]->ne[1]);
  13595. }
  13596. } else
  13597. #endif
  13598. if (node->src[1]->type != vec_dot_type) {
  13599. cur = ggml_row_size(vec_dot_type, ggml_nelements(node->src[1]));
  13600. }
  13601. } break;
  13602. case GGML_OP_MUL_MAT_ID:
  13603. {
  13604. const struct ggml_tensor * a = node->src[2];
  13605. const struct ggml_tensor * b = node->src[1];
  13606. const enum ggml_type vec_dot_type = type_traits[a->type].vec_dot_type;
  13607. #if defined(GGML_USE_ACCELERATE) || defined(GGML_USE_OPENBLAS)
  13608. if (ggml_compute_forward_mul_mat_use_blas(a, b, node)) {
  13609. if (a->type != GGML_TYPE_F32) {
  13610. // here we need memory just for single 2D matrix from src0
  13611. cur = ggml_type_size(GGML_TYPE_F32)*(a->ne[0]*a->ne[1]);
  13612. }
  13613. } else
  13614. #endif
  13615. if (b->type != vec_dot_type) {
  13616. cur = ggml_row_size(vec_dot_type, ggml_nelements(b));
  13617. }
  13618. } break;
  13619. case GGML_OP_OUT_PROD:
  13620. {
  13621. if (ggml_is_quantized(node->src[0]->type)) {
  13622. cur = ggml_type_size(GGML_TYPE_F32) * node->src[0]->ne[0] * n_tasks;
  13623. }
  13624. } break;
  13625. case GGML_OP_SOFT_MAX:
  13626. {
  13627. cur = ggml_type_size(GGML_TYPE_F32) * node->ne[0] * n_tasks;
  13628. } break;
  13629. case GGML_OP_CONV_TRANSPOSE_1D:
  13630. {
  13631. GGML_ASSERT(node->src[0]->ne[3] == 1);
  13632. GGML_ASSERT(node->src[1]->ne[2] == 1);
  13633. GGML_ASSERT(node->src[1]->ne[3] == 1);
  13634. const int64_t ne00 = node->src[0]->ne[0]; // K
  13635. const int64_t ne01 = node->src[0]->ne[1]; // Cout
  13636. const int64_t ne02 = node->src[0]->ne[2]; // Cin
  13637. const int64_t ne10 = node->src[1]->ne[0]; // L
  13638. const int64_t ne11 = node->src[1]->ne[1]; // Cin
  13639. if (node->src[0]->type == GGML_TYPE_F16 &&
  13640. node->src[1]->type == GGML_TYPE_F32) {
  13641. cur += sizeof(ggml_fp16_t)*ne00*ne01*ne02;
  13642. cur += sizeof(ggml_fp16_t)*ne10*ne11;
  13643. } else if (node->src[0]->type == GGML_TYPE_F32 &&
  13644. node->src[1]->type == GGML_TYPE_F32) {
  13645. cur += sizeof(float)*ne00*ne01*ne02;
  13646. cur += sizeof(float)*ne10*ne11;
  13647. } else {
  13648. GGML_ASSERT(false);
  13649. }
  13650. } break;
  13651. case GGML_OP_CONV_TRANSPOSE_2D:
  13652. {
  13653. const int64_t ne00 = node->src[0]->ne[0]; // W
  13654. const int64_t ne01 = node->src[0]->ne[1]; // H
  13655. const int64_t ne02 = node->src[0]->ne[2]; // Channels Out
  13656. const int64_t ne03 = node->src[0]->ne[3]; // Channels In
  13657. const int64_t ne10 = node->src[1]->ne[0]; // W
  13658. const int64_t ne11 = node->src[1]->ne[1]; // H
  13659. const int64_t ne12 = node->src[1]->ne[2]; // Channels In
  13660. cur += sizeof(ggml_fp16_t)*ne00*ne01*ne02*ne03;
  13661. cur += sizeof(ggml_fp16_t)*ne10*ne11*ne12;
  13662. } break;
  13663. case GGML_OP_FLASH_ATTN:
  13664. {
  13665. const int64_t ne11 = ggml_up(node->src[1]->ne[1], GGML_SOFT_MAX_UNROLL);
  13666. if (node->src[1]->type == GGML_TYPE_F32) {
  13667. cur = sizeof(float)*ne11*n_tasks; // TODO: this can become (n_tasks-1)
  13668. cur += sizeof(float)*ne11*n_tasks; // this is overestimated by x2
  13669. } else if (node->src[1]->type == GGML_TYPE_F16) {
  13670. cur = sizeof(float)*ne11*n_tasks; // TODO: this can become (n_tasks-1)
  13671. cur += sizeof(float)*ne11*n_tasks; // this is overestimated by x2
  13672. }
  13673. } break;
  13674. case GGML_OP_FLASH_FF:
  13675. {
  13676. if (node->src[1]->type == GGML_TYPE_F32) {
  13677. cur = sizeof(float)*node->src[1]->ne[1]*n_tasks; // TODO: this can become (n_tasks-1)
  13678. cur += sizeof(float)*node->src[1]->ne[1]*n_tasks; // this is overestimated by x2
  13679. } else if (node->src[1]->type == GGML_TYPE_F16) {
  13680. cur = sizeof(float)*node->src[1]->ne[1]*n_tasks; // TODO: this can become (n_tasks-1)
  13681. cur += sizeof(float)*node->src[1]->ne[1]*n_tasks; // this is overestimated by x2
  13682. }
  13683. } break;
  13684. case GGML_OP_FLASH_ATTN_BACK:
  13685. {
  13686. const int64_t D = node->src[0]->ne[0];
  13687. const int64_t ne11 = ggml_up(node->src[1]->ne[1], GGML_SOFT_MAX_UNROLL);
  13688. const int64_t mxDn = MAX(D, ne11) * 2; // *2 because of S and SM in ggml_compute_forward_flash_attn_back
  13689. if (node->src[1]->type == GGML_TYPE_F32) {
  13690. cur = sizeof(float)*mxDn*n_tasks; // TODO: this can become (n_tasks-1)
  13691. cur += sizeof(float)*mxDn*n_tasks; // this is overestimated by x2
  13692. } else if (node->src[1]->type == GGML_TYPE_F16) {
  13693. cur = sizeof(float)*mxDn*n_tasks; // TODO: this can become (n_tasks-1)
  13694. cur += sizeof(float)*mxDn*n_tasks; // this is overestimated by x2
  13695. }
  13696. } break;
  13697. case GGML_OP_CROSS_ENTROPY_LOSS:
  13698. {
  13699. cur = ggml_type_size(node->type)*(n_tasks + node->src[0]->ne[0]*n_tasks);
  13700. } break;
  13701. case GGML_OP_COUNT:
  13702. {
  13703. GGML_ASSERT(false);
  13704. } break;
  13705. default:
  13706. break;
  13707. }
  13708. work_size = MAX(work_size, cur);
  13709. }
  13710. if (work_size > 0) {
  13711. work_size += CACHE_LINE_SIZE*(n_threads - 1);
  13712. }
  13713. cplan.n_threads = n_threads;
  13714. cplan.work_size = work_size;
  13715. cplan.work_data = NULL;
  13716. return cplan;
  13717. }
  13718. int ggml_graph_compute(struct ggml_cgraph * cgraph, struct ggml_cplan * cplan) {
  13719. {
  13720. GGML_ASSERT(cplan);
  13721. GGML_ASSERT(cplan->n_threads > 0);
  13722. if (cplan->work_size > 0) {
  13723. GGML_ASSERT(cplan->work_data);
  13724. }
  13725. }
  13726. const int n_threads = cplan->n_threads;
  13727. struct ggml_compute_state_shared state_shared = {
  13728. /*.cgraph =*/ cgraph,
  13729. /*.cgraph_plan =*/ cplan,
  13730. /*.perf_node_start_cycles =*/ 0,
  13731. /*.perf_node_start_time_us =*/ 0,
  13732. /*.n_threads =*/ n_threads,
  13733. /*.n_active =*/ n_threads,
  13734. /*.node_n =*/ -1,
  13735. /*.abort_callback =*/ NULL,
  13736. /*.abort_callback_data =*/ NULL,
  13737. };
  13738. struct ggml_compute_state * workers = alloca(sizeof(struct ggml_compute_state)*n_threads);
  13739. // create thread pool
  13740. if (n_threads > 1) {
  13741. for (int j = 1; j < n_threads; ++j) {
  13742. workers[j] = (struct ggml_compute_state) {
  13743. .thrd = 0,
  13744. .ith = j,
  13745. .shared = &state_shared,
  13746. };
  13747. const int rc = ggml_thread_create(&workers[j].thrd, NULL, ggml_graph_compute_thread, &workers[j]);
  13748. GGML_ASSERT(rc == 0);
  13749. UNUSED(rc);
  13750. }
  13751. }
  13752. workers[0].ith = 0;
  13753. workers[0].shared = &state_shared;
  13754. const int64_t perf_start_cycles = ggml_perf_cycles();
  13755. const int64_t perf_start_time_us = ggml_perf_time_us();
  13756. // this is a work thread too
  13757. int compute_status = (size_t) ggml_graph_compute_thread(&workers[0]);
  13758. // don't leave affinity set on the main thread
  13759. clear_numa_thread_affinity();
  13760. // join or kill thread pool
  13761. if (n_threads > 1) {
  13762. for (int j = 1; j < n_threads; j++) {
  13763. const int rc = ggml_thread_join(workers[j].thrd, NULL);
  13764. GGML_ASSERT(rc == 0);
  13765. }
  13766. }
  13767. // performance stats (graph)
  13768. {
  13769. int64_t perf_cycles_cur = ggml_perf_cycles() - perf_start_cycles;
  13770. int64_t perf_time_us_cur = ggml_perf_time_us() - perf_start_time_us;
  13771. cgraph->perf_runs++;
  13772. cgraph->perf_cycles += perf_cycles_cur;
  13773. cgraph->perf_time_us += perf_time_us_cur;
  13774. GGML_PRINT_DEBUG("%s: perf (%d) - cpu = %.3f / %.3f ms, wall = %.3f / %.3f ms\n",
  13775. __func__, cgraph->perf_runs,
  13776. (double) perf_cycles_cur / (double) ggml_cycles_per_ms(),
  13777. (double) cgraph->perf_cycles / (double) ggml_cycles_per_ms() / (double) cgraph->perf_runs,
  13778. (double) perf_time_us_cur / 1000.0,
  13779. (double) cgraph->perf_time_us / 1000.0 / cgraph->perf_runs);
  13780. }
  13781. return compute_status;
  13782. }
  13783. void ggml_graph_compute_with_ctx(struct ggml_context * ctx, struct ggml_cgraph * cgraph, int n_threads) {
  13784. struct ggml_cplan cplan = ggml_graph_plan(cgraph, n_threads);
  13785. struct ggml_object * obj = ggml_new_object(ctx, GGML_OBJECT_WORK_BUFFER, cplan.work_size);
  13786. cplan.work_data = (uint8_t *)ctx->mem_buffer + obj->offs;
  13787. ggml_graph_compute(cgraph, &cplan);
  13788. }
  13789. struct ggml_tensor * ggml_graph_get_tensor(struct ggml_cgraph * cgraph, const char * name) {
  13790. for (int i = 0; i < cgraph->n_leafs; i++) {
  13791. struct ggml_tensor * leaf = cgraph->leafs[i];
  13792. if (strcmp(leaf->name, name) == 0) {
  13793. return leaf;
  13794. }
  13795. }
  13796. for (int i = 0; i < cgraph->n_nodes; i++) {
  13797. struct ggml_tensor * node = cgraph->nodes[i];
  13798. if (strcmp(node->name, name) == 0) {
  13799. return node;
  13800. }
  13801. }
  13802. return NULL;
  13803. }
  13804. static void ggml_graph_export_leaf(const struct ggml_tensor * tensor, FILE * fout) {
  13805. const int64_t * ne = tensor->ne;
  13806. const size_t * nb = tensor->nb;
  13807. fprintf(fout, "%-6s %-12s %8d %" PRId64 " %" PRId64 " %" PRId64 " %" PRId64 " %16zu %16zu %16zu %16zu %16p %32s\n",
  13808. ggml_type_name(tensor->type),
  13809. ggml_op_name (tensor->op),
  13810. ggml_n_dims(tensor),
  13811. ne[0], ne[1], ne[2], ne[3],
  13812. nb[0], nb[1], nb[2], nb[3],
  13813. tensor->data,
  13814. tensor->name);
  13815. }
  13816. static void ggml_graph_export_node(const struct ggml_tensor * tensor, const char * arg, FILE * fout) {
  13817. const int64_t * ne = tensor->ne;
  13818. const size_t * nb = tensor->nb;
  13819. fprintf(fout, "%-6s %-6s %-12s %8d %" PRId64 " %" PRId64 " %" PRId64 " %" PRId64 " %16zu %16zu %16zu %16zu %16p %32s\n",
  13820. arg,
  13821. ggml_type_name(tensor->type),
  13822. ggml_op_name (tensor->op),
  13823. ggml_n_dims(tensor),
  13824. ne[0], ne[1], ne[2], ne[3],
  13825. nb[0], nb[1], nb[2], nb[3],
  13826. tensor->data,
  13827. tensor->name);
  13828. }
  13829. void ggml_graph_export(const struct ggml_cgraph * cgraph, const char * fname) {
  13830. uint64_t size_eval = 0;
  13831. // compute size of intermediate results
  13832. // TODO: does not take into account scratch buffers !!!!
  13833. for (int i = 0; i < cgraph->n_nodes; ++i) {
  13834. size_eval += ggml_nbytes_pad(cgraph->nodes[i]);
  13835. }
  13836. // print
  13837. {
  13838. FILE * fout = stdout;
  13839. fprintf(fout, "\n");
  13840. fprintf(fout, "%-16s %8x\n", "magic", GGML_FILE_MAGIC);
  13841. fprintf(fout, "%-16s %8d\n", "version", GGML_FILE_VERSION);
  13842. fprintf(fout, "%-16s %8d\n", "leafs", cgraph->n_leafs);
  13843. fprintf(fout, "%-16s %8d\n", "nodes", cgraph->n_nodes);
  13844. fprintf(fout, "%-16s %" PRIu64 "\n", "eval", size_eval);
  13845. // header
  13846. fprintf(fout, "\n");
  13847. fprintf(fout, "%-6s %-12s %8s %8s %8s %8s %8s %16s %16s %16s %16s %16s %16s\n",
  13848. "TYPE", "OP", "NDIMS", "NE0", "NE1", "NE2", "NE3", "NB0", "NB1", "NB2", "NB3", "DATA", "NAME");
  13849. for (int i = 0; i < cgraph->n_leafs; ++i) {
  13850. ggml_graph_export_leaf(cgraph->leafs[i], fout);
  13851. GGML_ASSERT(cgraph->leafs[i]->op == GGML_OP_NONE);
  13852. GGML_ASSERT(cgraph->leafs[i]->src[0] == NULL);
  13853. GGML_ASSERT(cgraph->leafs[i]->src[1] == NULL);
  13854. }
  13855. // header
  13856. fprintf(fout, "\n");
  13857. fprintf(fout, "%-6s %-6s %-12s %8s %8s %8s %8s %8s %16s %16s %16s %16s %8s %16s %16s\n",
  13858. "ARG", "TYPE", "OP", "NDIMS", "NE0", "NE1", "NE2", "NE3", "NB0", "NB1", "NB2", "NB3", "NTASKS", "DATA", "NAME");
  13859. for (int i = 0; i < cgraph->n_nodes; ++i) {
  13860. ggml_graph_export_node(cgraph->nodes[i], "DST", fout);
  13861. for (int j = 0; j < GGML_MAX_SRC; ++j) {
  13862. if (cgraph->nodes[i]->src[j]) {
  13863. ggml_graph_export_node(cgraph->nodes[i]->src[j], "SRC", fout);
  13864. }
  13865. }
  13866. fprintf(fout, "\n");
  13867. }
  13868. fprintf(fout, "\n");
  13869. }
  13870. // write binary data
  13871. {
  13872. FILE * fout = fopen(fname, "wb");
  13873. if (!fout) {
  13874. fprintf(stderr, "%s: failed to open %s\n", __func__, fname);
  13875. return;
  13876. }
  13877. // header
  13878. {
  13879. const uint32_t magic = GGML_FILE_MAGIC;
  13880. const uint32_t version = GGML_FILE_VERSION;
  13881. const uint32_t n_leafs = cgraph->n_leafs;
  13882. const uint32_t n_nodes = cgraph->n_nodes;
  13883. fwrite(&magic, sizeof(uint32_t), 1, fout);
  13884. fwrite(&version, sizeof(uint32_t), 1, fout);
  13885. fwrite(&n_leafs, sizeof(uint32_t), 1, fout);
  13886. fwrite(&n_nodes, sizeof(uint32_t), 1, fout);
  13887. fwrite(&size_eval, sizeof(uint64_t), 1, fout);
  13888. }
  13889. // leafs
  13890. {
  13891. for (int i = 0; i < cgraph->n_leafs; ++i) {
  13892. const struct ggml_tensor * tensor = cgraph->leafs[i];
  13893. const uint32_t type = tensor->type;
  13894. const uint32_t op = tensor->op;
  13895. fwrite(&type, sizeof(uint32_t), 1, fout);
  13896. fwrite(&op, sizeof(uint32_t), 1, fout);
  13897. for (int j = 0; j < GGML_MAX_DIMS; ++j) {
  13898. const uint64_t ne = tensor->ne[j];
  13899. const uint64_t nb = tensor->nb[j];
  13900. fwrite(&ne, sizeof(uint64_t), 1, fout);
  13901. fwrite(&nb, sizeof(uint64_t), 1, fout);
  13902. }
  13903. fwrite(tensor->name, sizeof(char), GGML_MAX_NAME, fout);
  13904. fwrite(tensor->op_params, sizeof(char), GGML_MAX_OP_PARAMS, fout);
  13905. // dump the data
  13906. // TODO: pad this to 32 byte boundary
  13907. {
  13908. const size_t size = ggml_nbytes(tensor);
  13909. fwrite(tensor->data, sizeof(char), size, fout);
  13910. }
  13911. }
  13912. }
  13913. // nodes
  13914. {
  13915. for (int i = 0; i < cgraph->n_nodes; ++i) {
  13916. const struct ggml_tensor * tensor = cgraph->nodes[i];
  13917. const uint32_t type = tensor->type;
  13918. const uint32_t op = tensor->op;
  13919. fwrite(&type, sizeof(uint32_t), 1, fout);
  13920. fwrite(&op, sizeof(uint32_t), 1, fout);
  13921. for (int j = 0; j < GGML_MAX_DIMS; ++j) {
  13922. const uint64_t ne = tensor->ne[j];
  13923. const uint64_t nb = tensor->nb[j];
  13924. fwrite(&ne, sizeof(uint64_t), 1, fout);
  13925. fwrite(&nb, sizeof(uint64_t), 1, fout);
  13926. }
  13927. fwrite(tensor->name, sizeof(char), GGML_MAX_NAME, fout);
  13928. fwrite(tensor->op_params, sizeof(char), GGML_MAX_OP_PARAMS, fout);
  13929. // output the op arguments
  13930. {
  13931. struct ggml_tensor * args[GGML_MAX_SRC] = { NULL };
  13932. for (int j = 0; j < GGML_MAX_SRC; ++j) {
  13933. args[j] = tensor->src[j];
  13934. }
  13935. for (int j = 0; j < GGML_MAX_SRC; ++j) {
  13936. if (args[j]) {
  13937. int32_t idx = -1;
  13938. // check if leaf
  13939. {
  13940. for (int k = 0; k < cgraph->n_leafs; ++k) {
  13941. if (args[j] == cgraph->leafs[k]) {
  13942. idx = k;
  13943. break;
  13944. }
  13945. }
  13946. }
  13947. // check if node
  13948. if (idx == -1) {
  13949. for (int k = 0; k < cgraph->n_nodes; ++k) {
  13950. if (args[j] == cgraph->nodes[k]) {
  13951. idx = cgraph->n_leafs + k;
  13952. break;
  13953. }
  13954. }
  13955. }
  13956. if (idx == -1) {
  13957. fprintf(stderr, "%s: failed to find tensor, arg = %d, node = %d\n", __func__, j, i);
  13958. fclose(fout);
  13959. return;
  13960. }
  13961. fwrite(&idx, sizeof(int32_t), 1, fout);
  13962. } else {
  13963. const int32_t nul = -1;
  13964. fwrite(&nul, sizeof(int32_t), 1, fout);
  13965. }
  13966. }
  13967. }
  13968. }
  13969. }
  13970. fclose(fout);
  13971. }
  13972. }
  13973. struct ggml_cgraph * ggml_graph_import(const char * fname, struct ggml_context ** ctx_data, struct ggml_context ** ctx_eval) {
  13974. assert(*ctx_data == NULL);
  13975. assert(*ctx_eval == NULL);
  13976. struct ggml_cgraph * result = NULL;
  13977. struct ggml_tensor * data = NULL;
  13978. // read file into data
  13979. {
  13980. FILE * fin = fopen(fname, "rb");
  13981. if (!fin) {
  13982. fprintf(stderr, "%s: failed to open %s\n", __func__, fname);
  13983. return result;
  13984. }
  13985. size_t fsize = 0;
  13986. fseek(fin, 0, SEEK_END);
  13987. fsize = ftell(fin);
  13988. fseek(fin, 0, SEEK_SET);
  13989. // create the data context
  13990. {
  13991. const size_t overhead = 1*ggml_tensor_overhead();
  13992. struct ggml_init_params params = {
  13993. .mem_size = fsize + overhead,
  13994. .mem_buffer = NULL,
  13995. .no_alloc = false,
  13996. };
  13997. *ctx_data = ggml_init(params);
  13998. if (!*ctx_data) {
  13999. fprintf(stderr, "%s: failed to create ggml context\n", __func__);
  14000. fclose(fin);
  14001. return result;
  14002. }
  14003. }
  14004. data = ggml_new_tensor_1d(*ctx_data, GGML_TYPE_I8, fsize);
  14005. {
  14006. const size_t ret = fread(data->data, sizeof(char), fsize, fin);
  14007. if (ret != fsize) {
  14008. fprintf(stderr, "%s: failed to read %s\n", __func__, fname);
  14009. fclose(fin);
  14010. return result;
  14011. }
  14012. }
  14013. fclose(fin);
  14014. }
  14015. // populate result
  14016. {
  14017. char * ptr = (char *) data->data;
  14018. const uint32_t magic = *(const uint32_t *) ptr; ptr += sizeof(magic);
  14019. if (magic != GGML_FILE_MAGIC) {
  14020. fprintf(stderr, "%s: invalid magic number, got %08x\n", __func__, magic);
  14021. return result;
  14022. }
  14023. const uint32_t version = *(const uint32_t *) ptr; ptr += sizeof(version);
  14024. if (version != GGML_FILE_VERSION) {
  14025. fprintf(stderr, "%s: invalid version number\n", __func__);
  14026. return result;
  14027. }
  14028. const uint32_t n_leafs = *(const uint32_t *) ptr; ptr += sizeof(n_leafs);
  14029. const uint32_t n_nodes = *(const uint32_t *) ptr; ptr += sizeof(n_nodes);
  14030. const uint64_t size_eval = *(const uint64_t *) ptr; ptr += sizeof(size_eval);
  14031. const int graph_size = MAX(n_leafs, n_nodes);
  14032. // create the data context
  14033. {
  14034. const size_t overhead = (n_leafs + n_nodes)*ggml_tensor_overhead() + ggml_graph_overhead_custom(graph_size, false);
  14035. struct ggml_init_params params = {
  14036. .mem_size = size_eval + overhead,
  14037. .mem_buffer = NULL,
  14038. .no_alloc = true,
  14039. };
  14040. *ctx_eval = ggml_init(params);
  14041. if (!*ctx_eval) {
  14042. fprintf(stderr, "%s: failed to create ggml context\n", __func__);
  14043. return result;
  14044. }
  14045. }
  14046. result = ggml_new_graph_custom(*ctx_eval, graph_size, false);
  14047. result->n_leafs = n_leafs;
  14048. result->n_nodes = n_nodes;
  14049. // leafs
  14050. {
  14051. uint32_t type;
  14052. uint32_t op;
  14053. for (uint32_t i = 0; i < n_leafs; ++i) {
  14054. type = *(const uint32_t *) ptr; ptr += sizeof(type);
  14055. op = *(const uint32_t *) ptr; ptr += sizeof(op);
  14056. int64_t ne[GGML_MAX_DIMS];
  14057. size_t nb[GGML_MAX_DIMS];
  14058. for (int j = 0; j < GGML_MAX_DIMS; ++j) {
  14059. uint64_t ne_cur;
  14060. uint64_t nb_cur;
  14061. ne_cur = *(const uint64_t *) ptr; ptr += sizeof(ne_cur);
  14062. nb_cur = *(const uint64_t *) ptr; ptr += sizeof(nb_cur);
  14063. ne[j] = ne_cur;
  14064. nb[j] = nb_cur;
  14065. }
  14066. struct ggml_tensor * tensor = ggml_new_tensor(*ctx_eval, (enum ggml_type) type, GGML_MAX_DIMS, ne);
  14067. tensor->op = (enum ggml_op) op;
  14068. memcpy(tensor->name, ptr, GGML_MAX_NAME); ptr += GGML_MAX_NAME;
  14069. memcpy(tensor->op_params, ptr, GGML_MAX_OP_PARAMS); ptr += GGML_MAX_OP_PARAMS;
  14070. tensor->data = (void *) ptr;
  14071. for (int j = 0; j < GGML_MAX_DIMS; ++j) {
  14072. tensor->nb[j] = nb[j];
  14073. }
  14074. result->leafs[i] = tensor;
  14075. ptr += ggml_nbytes(tensor);
  14076. fprintf(stderr, "%s: loaded leaf %d: '%16s', %9zu bytes\n", __func__, i, tensor->name, ggml_nbytes(tensor));
  14077. }
  14078. }
  14079. ggml_set_no_alloc(*ctx_eval, false);
  14080. // nodes
  14081. {
  14082. uint32_t type;
  14083. uint32_t op;
  14084. for (uint32_t i = 0; i < n_nodes; ++i) {
  14085. type = *(const uint32_t *) ptr; ptr += sizeof(type);
  14086. op = *(const uint32_t *) ptr; ptr += sizeof(op);
  14087. enum ggml_op eop = (enum ggml_op) op;
  14088. int64_t ne[GGML_MAX_DIMS];
  14089. size_t nb[GGML_MAX_DIMS];
  14090. for (int j = 0; j < GGML_MAX_DIMS; ++j) {
  14091. uint64_t ne_cur;
  14092. uint64_t nb_cur;
  14093. ne_cur = *(const uint64_t *) ptr; ptr += sizeof(ne_cur);
  14094. nb_cur = *(const uint64_t *) ptr; ptr += sizeof(nb_cur);
  14095. ne[j] = ne_cur;
  14096. nb[j] = nb_cur;
  14097. }
  14098. const char * ptr_name = ptr; ptr += GGML_MAX_NAME;
  14099. const char * ptr_op_params = ptr; ptr += GGML_MAX_OP_PARAMS;
  14100. const int32_t * ptr_arg_idx = (const int32_t *) ptr; ptr += GGML_MAX_SRC*sizeof(int32_t);
  14101. struct ggml_tensor * args[GGML_MAX_SRC] = { NULL };
  14102. // parse args
  14103. for (int j = 0; j < GGML_MAX_SRC; ++j) {
  14104. const int32_t arg_idx = ptr_arg_idx[j];
  14105. if (arg_idx == -1) {
  14106. continue;
  14107. }
  14108. if (arg_idx < result->n_leafs) {
  14109. args[j] = result->leafs[arg_idx];
  14110. } else {
  14111. args[j] = result->nodes[arg_idx - result->n_leafs];
  14112. }
  14113. }
  14114. // create the tensor
  14115. // "view" operations are handled differently
  14116. // TODO: handle inplace ops - currently a copy is always made
  14117. struct ggml_tensor * tensor = NULL;
  14118. switch (eop) {
  14119. // TODO: implement other view ops
  14120. case GGML_OP_RESHAPE:
  14121. {
  14122. tensor = ggml_reshape_4d(*ctx_eval, args[0], ne[0], ne[1], ne[2], ne[3]);
  14123. } break;
  14124. case GGML_OP_VIEW:
  14125. {
  14126. tensor = ggml_view_4d(*ctx_eval, args[0], ne[0], ne[1], ne[2], ne[3], 0, 0, 0, 0);
  14127. size_t offs;
  14128. memcpy(&offs, ptr_op_params, sizeof(offs));
  14129. tensor->data = ((char *) tensor->data) + offs;
  14130. } break;
  14131. case GGML_OP_TRANSPOSE:
  14132. {
  14133. tensor = ggml_transpose(*ctx_eval, args[0]);
  14134. } break;
  14135. case GGML_OP_PERMUTE:
  14136. {
  14137. tensor = ggml_view_4d(*ctx_eval, args[0], ne[0], ne[1], ne[2], ne[3], 0, 0, 0, 0);
  14138. } break;
  14139. default:
  14140. {
  14141. tensor = ggml_new_tensor(*ctx_eval, (enum ggml_type) type, GGML_MAX_DIMS, ne);
  14142. tensor->op = eop;
  14143. } break;
  14144. }
  14145. memcpy(tensor->name, ptr_name, GGML_MAX_NAME);
  14146. memcpy(tensor->op_params, ptr_op_params, GGML_MAX_OP_PARAMS);
  14147. for (int j = 0; j < GGML_MAX_DIMS; ++j) {
  14148. tensor->nb[j] = nb[j];
  14149. }
  14150. for (int j = 0; j < GGML_MAX_SRC; ++j) {
  14151. tensor->src[j] = args[j];
  14152. }
  14153. result->nodes[i] = tensor;
  14154. fprintf(stderr, "%s: loaded node %d: '%16s', %9zu bytes\n", __func__, i, tensor->name, ggml_nbytes(tensor));
  14155. }
  14156. }
  14157. }
  14158. return result;
  14159. }
  14160. void ggml_graph_print(const struct ggml_cgraph * cgraph) {
  14161. int64_t perf_total_per_op_us[GGML_OP_COUNT] = {0};
  14162. GGML_PRINT("=== GRAPH ===\n");
  14163. GGML_PRINT("n_nodes = %d\n", cgraph->n_nodes);
  14164. for (int i = 0; i < cgraph->n_nodes; i++) {
  14165. struct ggml_tensor * node = cgraph->nodes[i];
  14166. perf_total_per_op_us[node->op] += MAX(1, node->perf_time_us);
  14167. GGML_PRINT(" - %3d: [ %5" PRId64 ", %5" PRId64 ", %5" PRId64 "] %16s %s (%3d) cpu = %7.3f / %7.3f ms, wall = %7.3f / %7.3f ms\n",
  14168. i,
  14169. node->ne[0], node->ne[1], node->ne[2],
  14170. ggml_op_name(node->op), node->is_param ? "x" : node->grad ? "g" : " ", node->perf_runs,
  14171. (double) node->perf_cycles / (double) ggml_cycles_per_ms(),
  14172. (double) node->perf_cycles / (double) ggml_cycles_per_ms() / (double) node->perf_runs,
  14173. (double) node->perf_time_us / 1000.0,
  14174. (double) node->perf_time_us / 1000.0 / node->perf_runs);
  14175. }
  14176. GGML_PRINT("n_leafs = %d\n", cgraph->n_leafs);
  14177. for (int i = 0; i < cgraph->n_leafs; i++) {
  14178. struct ggml_tensor * node = cgraph->leafs[i];
  14179. GGML_PRINT(" - %3d: [ %5" PRId64 ", %5" PRId64 "] %8s %16s\n",
  14180. i,
  14181. node->ne[0], node->ne[1],
  14182. ggml_op_name(node->op),
  14183. ggml_get_name(node));
  14184. }
  14185. for (int i = 0; i < GGML_OP_COUNT; i++) {
  14186. if (perf_total_per_op_us[i] == 0) {
  14187. continue;
  14188. }
  14189. GGML_PRINT("perf_total_per_op_us[%16s] = %7.3f ms\n", ggml_op_name(i), (double) perf_total_per_op_us[i] / 1000.0);
  14190. }
  14191. GGML_PRINT("========================================\n");
  14192. }
  14193. // check if node is part of the graph
  14194. static bool ggml_graph_find(const struct ggml_cgraph * cgraph, const struct ggml_tensor * node) {
  14195. if (cgraph == NULL) {
  14196. return true;
  14197. }
  14198. for (int i = 0; i < cgraph->n_nodes; i++) {
  14199. if (cgraph->nodes[i] == node) {
  14200. return true;
  14201. }
  14202. }
  14203. return false;
  14204. }
  14205. static struct ggml_tensor * ggml_graph_get_parent(const struct ggml_cgraph * cgraph, const struct ggml_tensor * node) {
  14206. for (int i = 0; i < cgraph->n_nodes; i++) {
  14207. struct ggml_tensor * parent = cgraph->nodes[i];
  14208. if (parent->grad == node) {
  14209. return parent;
  14210. }
  14211. }
  14212. return NULL;
  14213. }
  14214. static void ggml_graph_dump_dot_node_edge(FILE * fp, const struct ggml_cgraph * gb, struct ggml_tensor * node, struct ggml_tensor * parent, const char * label) {
  14215. struct ggml_tensor * gparent = ggml_graph_get_parent(gb, node);
  14216. struct ggml_tensor * gparent0 = ggml_graph_get_parent(gb, parent);
  14217. fprintf(fp, " \"%p\":%s -> \"%p\":%s [ arrowhead = %s; style = %s; label = \"%s\"; ]\n",
  14218. gparent0 ? (void *) gparent0 : (void *) parent,
  14219. gparent0 ? "g" : "x",
  14220. gparent ? (void *) gparent : (void *) node,
  14221. gparent ? "g" : "x",
  14222. gparent ? "empty" : "vee",
  14223. gparent ? "dashed" : "solid",
  14224. label);
  14225. }
  14226. static void ggml_graph_dump_dot_leaf_edge(FILE * fp, struct ggml_tensor * node, struct ggml_tensor * parent, const char * label) {
  14227. fprintf(fp, " \"%p\":%s -> \"%p\":%s [ label = \"%s\"; ]\n",
  14228. (void *) parent, "x",
  14229. (void *) node, "x",
  14230. label);
  14231. }
  14232. void ggml_graph_dump_dot(const struct ggml_cgraph * gb, const struct ggml_cgraph * gf, const char * filename) {
  14233. char color[16];
  14234. FILE * fp = fopen(filename, "w");
  14235. GGML_ASSERT(fp);
  14236. fprintf(fp, "digraph G {\n");
  14237. fprintf(fp, " newrank = true;\n");
  14238. fprintf(fp, " rankdir = LR;\n");
  14239. for (int i = 0; i < gb->n_nodes; i++) {
  14240. struct ggml_tensor * node = gb->nodes[i];
  14241. if (ggml_graph_get_parent(gb, node) != NULL) {
  14242. continue;
  14243. }
  14244. if (node->is_param) {
  14245. snprintf(color, sizeof(color), "yellow");
  14246. } else if (node->grad) {
  14247. if (ggml_graph_find(gf, node)) {
  14248. snprintf(color, sizeof(color), "green");
  14249. } else {
  14250. snprintf(color, sizeof(color), "lightblue");
  14251. }
  14252. } else {
  14253. snprintf(color, sizeof(color), "white");
  14254. }
  14255. fprintf(fp, " \"%p\" [ "
  14256. "style = filled; fillcolor = %s; shape = record; "
  14257. "label=\"",
  14258. (void *) node, color);
  14259. if (strlen(node->name) > 0) {
  14260. fprintf(fp, "%s (%s)|", node->name, ggml_type_name(node->type));
  14261. } else {
  14262. fprintf(fp, "(%s)|", ggml_type_name(node->type));
  14263. }
  14264. if (ggml_is_matrix(node)) {
  14265. fprintf(fp, "%d [%" PRId64 ", %" PRId64 "] | <x>%s", i, node->ne[0], node->ne[1], ggml_op_symbol(node->op));
  14266. } else {
  14267. fprintf(fp, "%d [%" PRId64 ", %" PRId64 ", %" PRId64 "] | <x>%s", i, node->ne[0], node->ne[1], node->ne[2], ggml_op_symbol(node->op));
  14268. }
  14269. if (node->grad) {
  14270. fprintf(fp, " | <g>%s\"; ]\n", ggml_op_symbol(node->grad->op));
  14271. } else {
  14272. fprintf(fp, "\"; ]\n");
  14273. }
  14274. }
  14275. for (int i = 0; i < gb->n_leafs; i++) {
  14276. struct ggml_tensor * node = gb->leafs[i];
  14277. snprintf(color, sizeof(color), "pink");
  14278. fprintf(fp, " \"%p\" [ "
  14279. "style = filled; fillcolor = %s; shape = record; "
  14280. "label=\"<x>",
  14281. (void *) node, color);
  14282. if (strlen(node->name) > 0) {
  14283. fprintf(fp, "%s (%s)|", node->name, ggml_type_name(node->type));
  14284. } else {
  14285. fprintf(fp, "(%s)|", ggml_type_name(node->type));
  14286. }
  14287. fprintf(fp, "CONST %d [%" PRId64 ", %" PRId64 "]", i, node->ne[0], node->ne[1]);
  14288. if (ggml_nelements(node) < 5) {
  14289. fprintf(fp, " | (");
  14290. for (int j = 0; j < ggml_nelements(node); j++) {
  14291. if (node->type == GGML_TYPE_I8 || node->type == GGML_TYPE_I16 || node->type == GGML_TYPE_I32) {
  14292. fprintf(fp, "%d", ggml_get_i32_1d(node, j));
  14293. }
  14294. else if (node->type == GGML_TYPE_F32 || node->type == GGML_TYPE_F16) {
  14295. fprintf(fp, "%.1e", (double)ggml_get_f32_1d(node, j));
  14296. }
  14297. else {
  14298. fprintf(fp, "#");
  14299. }
  14300. if (j < ggml_nelements(node) - 1) {
  14301. fprintf(fp, ", ");
  14302. }
  14303. }
  14304. fprintf(fp, ")");
  14305. }
  14306. fprintf(fp, "\"; ]\n");
  14307. }
  14308. for (int i = 0; i < gb->n_nodes; i++) {
  14309. struct ggml_tensor * node = gb->nodes[i];
  14310. for (int j = 0; j < GGML_MAX_SRC; j++) {
  14311. if (node->src[j]) {
  14312. char label[16];
  14313. snprintf(label, sizeof(label), "src %d", j);
  14314. ggml_graph_dump_dot_node_edge(fp, gb, node, node->src[j], label);
  14315. }
  14316. }
  14317. }
  14318. for (int i = 0; i < gb->n_leafs; i++) {
  14319. struct ggml_tensor * node = gb->leafs[i];
  14320. for (int j = 0; j < GGML_MAX_SRC; j++) {
  14321. if (node->src[j]) {
  14322. char label[16];
  14323. snprintf(label, sizeof(label), "src %d", j);
  14324. ggml_graph_dump_dot_leaf_edge(fp, node, node->src[j], label);
  14325. }
  14326. }
  14327. }
  14328. fprintf(fp, "}\n");
  14329. fclose(fp);
  14330. GGML_PRINT("%s: dot -Tpng %s -o %s.png && open %s.png\n", __func__, filename, filename, filename);
  14331. }
  14332. ////////////////////////////////////////////////////////////////////////////////
  14333. static void ggml_opt_set_params(int np, struct ggml_tensor * const ps[], const float * x) {
  14334. int i = 0;
  14335. for (int p = 0; p < np; ++p) {
  14336. const int64_t ne = ggml_nelements(ps[p]) ;
  14337. // TODO: add function to set tensor from array
  14338. for (int64_t j = 0; j < ne; ++j) {
  14339. ggml_set_f32_1d(ps[p], j, x[i++]);
  14340. }
  14341. }
  14342. }
  14343. static void ggml_opt_get_params(int np, struct ggml_tensor * const ps[], float * x) {
  14344. int i = 0;
  14345. for (int p = 0; p < np; ++p) {
  14346. const int64_t ne = ggml_nelements(ps[p]) ;
  14347. // TODO: add function to get all elements at once
  14348. for (int64_t j = 0; j < ne; ++j) {
  14349. x[i++] = ggml_get_f32_1d(ps[p], j);
  14350. }
  14351. }
  14352. }
  14353. static void ggml_opt_get_grad(int np, struct ggml_tensor * const ps[], float * g) {
  14354. int64_t i = 0;
  14355. for (int p = 0; p < np; ++p) {
  14356. const int64_t ne = ggml_nelements(ps[p]) ;
  14357. // TODO: add function to get all elements at once
  14358. for (int64_t j = 0; j < ne; ++j) {
  14359. g[i++] = ggml_get_f32_1d(ps[p]->grad, j);
  14360. }
  14361. }
  14362. }
  14363. static void ggml_opt_acc_grad(int np, struct ggml_tensor * const ps[], float * g, float scale) {
  14364. int64_t i = 0;
  14365. for (int p = 0; p < np; ++p) {
  14366. const int64_t ne = ggml_nelements(ps[p]) ;
  14367. // TODO: add function to get all elements at once
  14368. for (int64_t j = 0; j < ne; ++j) {
  14369. g[i++] += ggml_get_f32_1d(ps[p]->grad, j) * scale;
  14370. }
  14371. }
  14372. }
  14373. //
  14374. // ADAM
  14375. //
  14376. // ref: https://arxiv.org/pdf/1412.6980.pdf
  14377. //
  14378. static enum ggml_opt_result ggml_opt_adam(
  14379. struct ggml_context * ctx,
  14380. struct ggml_opt_context * opt,
  14381. struct ggml_opt_params params,
  14382. struct ggml_tensor * f,
  14383. struct ggml_cgraph * gf,
  14384. struct ggml_cgraph * gb,
  14385. ggml_opt_callback callback,
  14386. void * callback_data) {
  14387. GGML_ASSERT(ggml_is_scalar(f));
  14388. // these will store the parameters we want to optimize
  14389. struct ggml_tensor * ps[GGML_MAX_PARAMS];
  14390. int np = 0;
  14391. int64_t nx = 0;
  14392. for (int i = 0; i < gf->n_nodes; ++i) {
  14393. if (gf->nodes[i]->is_param) {
  14394. GGML_PRINT_DEBUG("found param %d: grad->op = %d\n", np, gf->nodes[i]->grad->op);
  14395. GGML_ASSERT(np < GGML_MAX_PARAMS);
  14396. ps[np++] = gf->nodes[i];
  14397. nx += ggml_nelements(gf->nodes[i]);
  14398. }
  14399. }
  14400. if ((opt->params.type != params.type) || (opt->nx != nx) || (opt->params.past != params.past)) {
  14401. int iter = opt->iter;
  14402. ggml_opt_init(opt->ctx, opt, params, nx);
  14403. opt->iter = iter;
  14404. }
  14405. // constants
  14406. float sched = params.adam.sched;
  14407. const float alpha = params.adam.alpha;
  14408. const float decay = params.adam.decay * alpha;
  14409. const float beta1 = params.adam.beta1;
  14410. const float beta2 = params.adam.beta2;
  14411. const float eps = params.adam.eps;
  14412. const float gclip = params.adam.gclip;
  14413. const int decay_min_ndim = params.adam.decay_min_ndim;
  14414. const int n_accum = MAX(1, params.n_gradient_accumulation);
  14415. const float accum_norm = 1.0f / (float) n_accum;
  14416. float * g = opt->adam.g->data; // gradients
  14417. float * m = opt->adam.m->data; // first moment
  14418. float * v = opt->adam.v->data; // second moment
  14419. float * pf = params.past > 0 ? opt->adam.pf->data : NULL; // past function values
  14420. struct ggml_cplan cplan = ggml_graph_plan(gb, params.n_threads);
  14421. struct ggml_object * obj = ggml_new_object(ctx, GGML_OBJECT_WORK_BUFFER, cplan.work_size);
  14422. cplan.work_data = (uint8_t *)ctx->mem_buffer + obj->offs;
  14423. bool cancel = false;
  14424. // compute the function value
  14425. float fx = 0;
  14426. ggml_set_zero(opt->adam.g);
  14427. for (int accum_step = 0; accum_step < n_accum; ++accum_step) {
  14428. if (callback) {
  14429. callback(callback_data, accum_step, &sched, &cancel);
  14430. if (cancel) {
  14431. return GGML_OPT_CANCEL;
  14432. }
  14433. }
  14434. // ggml_graph_reset (gf);
  14435. ggml_set_f32 (f->grad, 1.0f);
  14436. ggml_graph_compute(gb, &cplan);
  14437. ggml_opt_acc_grad(np, ps, g, accum_norm);
  14438. fx += ggml_get_f32_1d(f, 0);
  14439. }
  14440. fx *= accum_norm;
  14441. opt->adam.fx_prev = fx;
  14442. opt->adam.fx_best = opt->adam.fx_prev;
  14443. if (pf) {
  14444. pf[opt->iter % params.past] = opt->adam.fx_prev;
  14445. }
  14446. opt->loss_before = opt->adam.fx_prev;
  14447. opt->loss_after = opt->adam.fx_prev;
  14448. // initialize
  14449. if (opt->just_initialized) {
  14450. opt->adam.n_no_improvement = 0;
  14451. opt->just_initialized = false;
  14452. }
  14453. float * fx_best = &opt->adam.fx_best;
  14454. float * fx_prev = &opt->adam.fx_prev;
  14455. int * n_no_improvement = &opt->adam.n_no_improvement;
  14456. int iter0 = opt->iter;
  14457. // run the optimizer
  14458. for (int t = 0; t < params.adam.n_iter; ++t) {
  14459. opt->iter = iter0 + t + 1;
  14460. GGML_PRINT_DEBUG ("=== iter %d ===\n", t);
  14461. GGML_PRINT_DEBUG ("f = %10.6f\n", ggml_get_f32_1d(f, 0));
  14462. GGML_PRINT_DEBUG_5("df/dx0 = %10.6f\n", ggml_get_f32_1d(ps[0]->grad, 0));
  14463. GGML_PRINT_DEBUG_5("df/dx1 = %10.6f\n", ggml_get_f32_1d(ps[1]->grad, 0));
  14464. for (int i = 0; i < np; ++i) {
  14465. GGML_PRINT_DEBUG("param %d: %10.6f, g = %10.6f\n", i,
  14466. ggml_get_f32_1d(ps[i], 0), ggml_get_f32_1d(ps[i]->grad, 0));
  14467. }
  14468. const int64_t t_start_wall = ggml_time_us();
  14469. const int64_t t_start_cpu = ggml_cycles();
  14470. UNUSED(t_start_wall);
  14471. UNUSED(t_start_cpu);
  14472. {
  14473. float gnorm = 1.0f;
  14474. if (gclip > 0.0f) {
  14475. // gradient clipping
  14476. ggml_float sum = 0.0;
  14477. for (int64_t i = 0; i < nx; ++i) {
  14478. sum += (ggml_float)(g[i]*g[i]);
  14479. }
  14480. ggml_float norm = sqrt(sum);
  14481. if (norm > (ggml_float) gclip) {
  14482. gnorm = (float) ((ggml_float) gclip / norm);
  14483. }
  14484. }
  14485. const float beta1h = alpha*sched/(1.0f - powf(beta1, opt->iter));
  14486. const float beta2h = 1.0f/(1.0f - powf(beta2, opt->iter));
  14487. int64_t i = 0;
  14488. for (int p = 0; p < np; ++p) {
  14489. const int64_t ne = ggml_nelements(ps[p]);
  14490. const float p_decay = ((ggml_n_dims(ps[p]) >= decay_min_ndim) ? decay : 0.0f) * sched;
  14491. for (int64_t j = 0; j < ne; ++j) {
  14492. float x = ggml_get_f32_1d(ps[p], j);
  14493. float g_ = g[i]*gnorm;
  14494. m[i] = m[i]*beta1 + g_*(1.0f - beta1);
  14495. v[i] = v[i]*beta2 + g_*g_*(1.0f - beta2);
  14496. float mh = m[i]*beta1h;
  14497. float vh = v[i]*beta2h;
  14498. vh = sqrtf(vh) + eps;
  14499. x = x*(1.0f - p_decay) - mh/vh;
  14500. ggml_set_f32_1d(ps[p], j, x);
  14501. ++i;
  14502. }
  14503. }
  14504. }
  14505. fx = 0;
  14506. ggml_set_zero(opt->adam.g);
  14507. for (int accum_step = 0; accum_step < n_accum; ++accum_step) {
  14508. if (callback) {
  14509. callback(callback_data, accum_step, &sched, &cancel);
  14510. if (cancel) {
  14511. return GGML_OPT_CANCEL;;
  14512. }
  14513. }
  14514. // ggml_graph_reset (gf);
  14515. ggml_set_f32 (f->grad, 1.0f);
  14516. ggml_graph_compute(gb, &cplan);
  14517. ggml_opt_acc_grad(np, ps, g, accum_norm);
  14518. fx += ggml_get_f32_1d(f, 0);
  14519. }
  14520. fx *= accum_norm;
  14521. opt->loss_after = fx;
  14522. // check convergence
  14523. if (fabsf(fx - fx_prev[0])/fx < params.adam.eps_f) {
  14524. GGML_PRINT_DEBUG("converged\n");
  14525. return GGML_OPT_OK;
  14526. }
  14527. // delta-based convergence test
  14528. if (pf != NULL) {
  14529. // need at least params.past iterations to start checking for convergence
  14530. if (params.past <= iter0 + t) {
  14531. const float rate = (pf[(iter0 + t)%params.past] - fx)/fx;
  14532. if (fabsf(rate) < params.delta) {
  14533. return GGML_OPT_OK;
  14534. }
  14535. }
  14536. pf[(iter0 + t)%params.past] = fx;
  14537. }
  14538. // check for improvement
  14539. if (params.max_no_improvement > 0) {
  14540. if (fx_best[0] > fx) {
  14541. fx_best[0] = fx;
  14542. n_no_improvement[0] = 0;
  14543. } else {
  14544. ++n_no_improvement[0];
  14545. if (n_no_improvement[0] >= params.max_no_improvement) {
  14546. return GGML_OPT_OK;
  14547. }
  14548. }
  14549. }
  14550. fx_prev[0] = fx;
  14551. {
  14552. const int64_t t_end_cpu = ggml_cycles();
  14553. GGML_PRINT_DEBUG("time iter: %5.3f s\n", ((float)(t_end_cpu - t_start_cpu))/CLOCKS_PER_SEC);
  14554. UNUSED(t_end_cpu);
  14555. const int64_t t_end_wall = ggml_time_us();
  14556. GGML_PRINT_DEBUG("wall time iter: %5.3f s\n", (t_end_wall - t_start_wall)/1e6);
  14557. UNUSED(t_end_wall);
  14558. }
  14559. }
  14560. return GGML_OPT_DID_NOT_CONVERGE;
  14561. }
  14562. //
  14563. // L-BFGS
  14564. //
  14565. // the L-BFGS implementation below is based on the following implementation:
  14566. //
  14567. // https://github.com/chokkan/liblbfgs
  14568. //
  14569. struct ggml_lbfgs_iteration_data {
  14570. float alpha;
  14571. float ys;
  14572. float * s;
  14573. float * y;
  14574. };
  14575. static enum ggml_opt_result linesearch_backtracking(
  14576. const struct ggml_opt_params * params,
  14577. int nx,
  14578. float * x,
  14579. float * fx,
  14580. float * g,
  14581. float * d,
  14582. float * step,
  14583. const float * xp,
  14584. struct ggml_tensor * f,
  14585. struct ggml_cgraph * gb,
  14586. struct ggml_cplan * cplan,
  14587. const int np,
  14588. struct ggml_tensor * ps[],
  14589. bool * cancel,
  14590. ggml_opt_callback callback,
  14591. void * callback_data) {
  14592. int count = 0;
  14593. float width = 0.0f;
  14594. float dg = 0.0f;
  14595. float finit = 0.0f;
  14596. float dginit = 0.0f;
  14597. float dgtest = 0.0f;
  14598. const float dec = 0.5f;
  14599. const float inc = 2.1f;
  14600. const int n_accum = MAX(1, params->n_gradient_accumulation);
  14601. const float accum_norm = 1.0f / (float) n_accum;
  14602. if (*step <= 0.f) {
  14603. return GGML_LINESEARCH_INVALID_PARAMETERS;
  14604. }
  14605. // compute the initial gradient in the search direction
  14606. ggml_vec_dot_f32(nx, &dginit, g, d);
  14607. // make sure that d points to a descent direction
  14608. if (0 < dginit) {
  14609. return GGML_LINESEARCH_FAIL;
  14610. }
  14611. // initialize local variables
  14612. finit = *fx;
  14613. dgtest = params->lbfgs.ftol*dginit;
  14614. while (true) {
  14615. ggml_vec_cpy_f32(nx, x, xp);
  14616. ggml_vec_mad_f32(nx, x, d, *step);
  14617. // evaluate the function and gradient values
  14618. {
  14619. ggml_opt_set_params(np, ps, x);
  14620. *fx = 0;
  14621. memset(g, 0, sizeof(float)*nx);
  14622. for (int accum_step = 0; accum_step < n_accum; ++accum_step) {
  14623. if (callback) {
  14624. // LBFG-S does not support learning rate -> ignore learning schedule
  14625. float sched = 0;
  14626. callback(callback_data, accum_step, &sched, cancel);
  14627. if (*cancel) {
  14628. return GGML_OPT_CANCEL;
  14629. }
  14630. }
  14631. // ggml_graph_reset (gf);
  14632. ggml_set_f32 (f->grad, 1.0f);
  14633. ggml_graph_compute(gb, cplan);
  14634. ggml_opt_acc_grad(np, ps, g, accum_norm);
  14635. *fx += ggml_get_f32_1d(f, 0);
  14636. }
  14637. *fx *= accum_norm;
  14638. }
  14639. ++count;
  14640. if (*fx > finit + (*step)*dgtest) {
  14641. width = dec;
  14642. } else {
  14643. // Armijo condition is satisfied
  14644. if (params->lbfgs.linesearch == GGML_LINESEARCH_BACKTRACKING_ARMIJO) {
  14645. return count;
  14646. }
  14647. ggml_vec_dot_f32(nx, &dg, g, d);
  14648. // check the Wolfe condition
  14649. if (dg < params->lbfgs.wolfe * dginit) {
  14650. width = inc;
  14651. } else {
  14652. if(params->lbfgs.linesearch == GGML_LINESEARCH_BACKTRACKING_WOLFE) {
  14653. // regular Wolfe conditions
  14654. return count;
  14655. }
  14656. if(dg > -params->lbfgs.wolfe*dginit) {
  14657. width = dec;
  14658. } else {
  14659. // strong Wolfe condition (GGML_LINESEARCH_BACKTRACKING_STRONG_WOLFE)
  14660. return count;
  14661. }
  14662. }
  14663. }
  14664. if (*step < params->lbfgs.min_step) {
  14665. return GGML_LINESEARCH_MINIMUM_STEP;
  14666. }
  14667. if (*step > params->lbfgs.max_step) {
  14668. return GGML_LINESEARCH_MAXIMUM_STEP;
  14669. }
  14670. if (params->lbfgs.max_linesearch <= count) {
  14671. return GGML_LINESEARCH_MAXIMUM_ITERATIONS;
  14672. }
  14673. (*step) *= width;
  14674. }
  14675. GGML_UNREACHABLE();
  14676. }
  14677. static enum ggml_opt_result ggml_opt_lbfgs(
  14678. struct ggml_context * ctx,
  14679. struct ggml_opt_context * opt,
  14680. struct ggml_opt_params params,
  14681. struct ggml_tensor * f,
  14682. struct ggml_cgraph * gf,
  14683. struct ggml_cgraph * gb,
  14684. ggml_opt_callback callback,
  14685. void * callback_data) {
  14686. if (params.lbfgs.linesearch == GGML_LINESEARCH_BACKTRACKING_WOLFE ||
  14687. params.lbfgs.linesearch == GGML_LINESEARCH_BACKTRACKING_STRONG_WOLFE) {
  14688. if (params.lbfgs.wolfe <= params.lbfgs.ftol || 1.f <= params.lbfgs.wolfe) {
  14689. return GGML_OPT_INVALID_WOLFE;
  14690. }
  14691. }
  14692. const int m = params.lbfgs.m;
  14693. // these will store the parameters we want to optimize
  14694. struct ggml_tensor * ps[GGML_MAX_PARAMS];
  14695. int np = 0;
  14696. int nx = 0;
  14697. for (int i = 0; i < gf->n_nodes; ++i) {
  14698. if (gf->nodes[i]->is_param) {
  14699. GGML_PRINT_DEBUG("found param %d: grad->op = %d\n", np, gf->nodes[i]->grad->op);
  14700. GGML_ASSERT(np < GGML_MAX_PARAMS);
  14701. ps[np++] = gf->nodes[i];
  14702. nx += ggml_nelements(gf->nodes[i]);
  14703. }
  14704. }
  14705. if ((opt->params.type != params.type) || (opt->nx != nx) || (opt->params.past != params.past) || (opt->params.lbfgs.m != params.lbfgs.m)) {
  14706. int iter = opt->iter;
  14707. ggml_opt_init(ctx, opt, params, nx);
  14708. opt->iter = iter;
  14709. }
  14710. struct ggml_cplan cplan = ggml_graph_plan(gb, params.n_threads);
  14711. struct ggml_object * obj = ggml_new_object(ctx, GGML_OBJECT_WORK_BUFFER, cplan.work_size);
  14712. cplan.work_data = (uint8_t *)ctx->mem_buffer + obj->offs;
  14713. float * x = opt->lbfgs.x->data; // current parameters
  14714. float * xp = opt->lbfgs.xp->data; // previous parameters
  14715. float * g = opt->lbfgs.g->data; // current gradient
  14716. float * gp = opt->lbfgs.gp->data; // previous gradient
  14717. float * d = opt->lbfgs.d->data; // search direction
  14718. float * pf = params.past > 0 ? opt->lbfgs.pf->data : NULL; // past function values
  14719. const int n_accum = MAX(1, params.n_gradient_accumulation);
  14720. const float accum_norm = 1.0f / (float) n_accum;
  14721. float fx = 0.0f; // cost function value
  14722. float xnorm = 0.0f; // ||x||
  14723. float gnorm = 0.0f; // ||g||
  14724. // initialize x from the graph nodes
  14725. ggml_opt_get_params(np, ps, x);
  14726. // the L-BFGS memory
  14727. float * lm_alpha = opt->lbfgs.lmal->data;
  14728. float * lm_ys = opt->lbfgs.lmys->data;
  14729. float * lm_s = opt->lbfgs.lms->data;
  14730. float * lm_y = opt->lbfgs.lmy->data;
  14731. bool cancel = false;
  14732. // evaluate the function value and its gradient
  14733. {
  14734. ggml_opt_set_params(np, ps, x);
  14735. fx = 0;
  14736. memset(g, 0, sizeof(float)*nx);
  14737. for (int accum_step = 0; accum_step < n_accum; ++accum_step) {
  14738. if (callback) {
  14739. // LBFG-S does not support learning rate -> ignore learning schedule
  14740. float sched = 0;
  14741. callback(callback_data, accum_step, &sched, &cancel);
  14742. if (cancel) {
  14743. return GGML_OPT_CANCEL;
  14744. }
  14745. }
  14746. // ggml_graph_reset (gf);
  14747. ggml_set_f32 (f->grad, 1.0f);
  14748. ggml_graph_compute(gb, &cplan);
  14749. ggml_opt_acc_grad(np, ps, g, accum_norm);
  14750. fx += ggml_get_f32_1d(f, 0);
  14751. }
  14752. fx *= accum_norm;
  14753. opt->loss_before = fx;
  14754. opt->loss_after = fx;
  14755. }
  14756. // search direction = -gradient
  14757. ggml_vec_neg_f32(nx, d, g);
  14758. // ||x||, ||g||
  14759. ggml_vec_norm_f32(nx, &xnorm, x);
  14760. ggml_vec_norm_f32(nx, &gnorm, g);
  14761. if (xnorm < 1.0f) {
  14762. xnorm = 1.0f;
  14763. }
  14764. // already optimized
  14765. if (gnorm/xnorm <= params.lbfgs.eps) {
  14766. return GGML_OPT_OK;
  14767. }
  14768. if (opt->just_initialized) {
  14769. if (pf) {
  14770. pf[0] = fx;
  14771. }
  14772. opt->lbfgs.fx_best = fx;
  14773. // initial step
  14774. ggml_vec_norm_inv_f32(nx, &opt->lbfgs.step, d);
  14775. opt->lbfgs.j = 0;
  14776. opt->lbfgs.k = 1;
  14777. opt->lbfgs.end = 0;
  14778. opt->lbfgs.n_no_improvement = 0;
  14779. opt->just_initialized = false;
  14780. }
  14781. float * fx_best = &opt->lbfgs.fx_best;
  14782. float * step = &opt->lbfgs.step;
  14783. int * j = &opt->lbfgs.j;
  14784. int * k = &opt->lbfgs.k;
  14785. int * end = &opt->lbfgs.end;
  14786. int * n_no_improvement = &opt->lbfgs.n_no_improvement;
  14787. int ls = 0;
  14788. int bound = 0;
  14789. float ys = 0.0f;
  14790. float yy = 0.0f;
  14791. float beta = 0.0f;
  14792. int it = 0;
  14793. while (true) {
  14794. // store the current position and gradient vectors
  14795. ggml_vec_cpy_f32(nx, xp, x);
  14796. ggml_vec_cpy_f32(nx, gp, g);
  14797. // TODO: instead of passing &cancel here, use the return code of the linesearch
  14798. // to determine if the optimization should be cancelled
  14799. // this is a simple change, but not doing this atm, since I don't have a nice
  14800. // way to test and don't want to break something with so many changes lined up
  14801. ls = linesearch_backtracking(&params, nx, x, &fx, g, d, step, xp, f, gb, &cplan, np, ps, &cancel, callback, callback_data);
  14802. if (cancel) {
  14803. return GGML_OPT_CANCEL;
  14804. }
  14805. if (ls < 0) {
  14806. // linesearch failed - go back to the previous point and return
  14807. ggml_vec_cpy_f32(nx, x, xp);
  14808. ggml_vec_cpy_f32(nx, g, gp);
  14809. return ls;
  14810. }
  14811. opt->loss_after = fx;
  14812. ggml_vec_norm_f32(nx, &xnorm, x);
  14813. ggml_vec_norm_f32(nx, &gnorm, g);
  14814. GGML_PRINT_DEBUG("f = %10.6f\n", ggml_get_f32_1d(f, 0));
  14815. if (xnorm < 1.0f) {
  14816. xnorm = 1.0f;
  14817. }
  14818. if (gnorm/xnorm <= params.lbfgs.eps) {
  14819. // converged
  14820. return GGML_OPT_OK;
  14821. }
  14822. // delta-based convergence test
  14823. if (pf != NULL) {
  14824. // need at least params.past iterations to start checking for convergence
  14825. if (params.past <= k[0]) {
  14826. const float rate = (pf[k[0]%params.past] - fx)/fx;
  14827. if (fabsf(rate) < params.delta) {
  14828. return GGML_OPT_OK;
  14829. }
  14830. }
  14831. pf[k[0]%params.past] = fx;
  14832. }
  14833. // check for improvement
  14834. if (params.max_no_improvement > 0) {
  14835. if (fx < fx_best[0]) {
  14836. fx_best[0] = fx;
  14837. n_no_improvement[0] = 0;
  14838. } else {
  14839. n_no_improvement[0]++;
  14840. if (n_no_improvement[0] >= params.max_no_improvement) {
  14841. return GGML_OPT_OK;
  14842. }
  14843. }
  14844. }
  14845. if (params.lbfgs.n_iter != 0 && params.lbfgs.n_iter < it + 1) {
  14846. // reached the maximum number of iterations
  14847. return GGML_OPT_DID_NOT_CONVERGE;
  14848. }
  14849. // update vectors s and y:
  14850. // s_{k+1} = x_{k+1} - x_{k} = \step * d_{k}.
  14851. // y_{k+1} = g_{k+1} - g_{k}.
  14852. //
  14853. ggml_vec_sub_f32(nx, &lm_s[end[0]*nx], x, xp);
  14854. ggml_vec_sub_f32(nx, &lm_y[end[0]*nx], g, gp);
  14855. // compute scalars ys and yy:
  14856. // ys = y^t \cdot s -> 1 / \rho.
  14857. // yy = y^t \cdot y.
  14858. //
  14859. ggml_vec_dot_f32(nx, &ys, &lm_y[end[0]*nx], &lm_s[end[0]*nx]);
  14860. ggml_vec_dot_f32(nx, &yy, &lm_y[end[0]*nx], &lm_y[end[0]*nx]);
  14861. lm_ys[end[0]] = ys;
  14862. // find new search direction
  14863. // ref: https://en.wikipedia.org/wiki/Limited-memory_BFGS
  14864. bound = (m <= k[0]) ? m : k[0];
  14865. k[0]++;
  14866. it++;
  14867. end[0] = (end[0] + 1)%m;
  14868. // initialize search direction with -g
  14869. ggml_vec_neg_f32(nx, d, g);
  14870. j[0] = end[0];
  14871. for (int i = 0; i < bound; ++i) {
  14872. j[0] = (j[0] + m - 1) % m;
  14873. // \alpha_{j} = \rho_{j} s^{t}_{j} \cdot q_{k+1}
  14874. ggml_vec_dot_f32(nx, &lm_alpha[j[0]], &lm_s[j[0]*nx], d);
  14875. lm_alpha[j[0]] /= lm_ys[j[0]];
  14876. // q_{i} = q_{i+1} - \alpha_{i} y_{i}
  14877. ggml_vec_mad_f32(nx, d, &lm_y[j[0]*nx], -lm_alpha[j[0]]);
  14878. }
  14879. ggml_vec_scale_f32(nx, d, ys/yy);
  14880. for (int i = 0; i < bound; ++i) {
  14881. // \beta_{j} = \rho_{j} y^t_{j} \cdot \gamma_{i}
  14882. ggml_vec_dot_f32(nx, &beta, &lm_y[j[0]*nx], d);
  14883. beta /= lm_ys[j[0]];
  14884. // \gamma_{i+1} = \gamma_{i} + (\alpha_{j} - \beta_{j}) s_{j}
  14885. ggml_vec_mad_f32(nx, d, &lm_s[j[0]*nx], lm_alpha[j[0]] - beta);
  14886. j[0] = (j[0] + 1)%m;
  14887. }
  14888. step[0] = 1.0;
  14889. }
  14890. GGML_UNREACHABLE();
  14891. }
  14892. struct ggml_opt_params ggml_opt_default_params(enum ggml_opt_type type) {
  14893. struct ggml_opt_params result;
  14894. switch (type) {
  14895. case GGML_OPT_ADAM:
  14896. {
  14897. result = (struct ggml_opt_params) {
  14898. .type = GGML_OPT_ADAM,
  14899. .graph_size = GGML_DEFAULT_GRAPH_SIZE,
  14900. .n_threads = 1, // FIXME: GGML_DEFAULT_N_THREADS ?
  14901. .past = 0,
  14902. .delta = 1e-5f,
  14903. .max_no_improvement = 100,
  14904. .print_forward_graph = true,
  14905. .print_backward_graph = true,
  14906. .n_gradient_accumulation = 1,
  14907. .adam = {
  14908. .n_iter = 10000,
  14909. .sched = 1.000f,
  14910. .decay = 0.0f,
  14911. .decay_min_ndim = 2,
  14912. .alpha = 0.001f,
  14913. .beta1 = 0.9f,
  14914. .beta2 = 0.999f,
  14915. .eps = 1e-8f,
  14916. .eps_f = 1e-5f,
  14917. .eps_g = 1e-3f,
  14918. .gclip = 0.0f,
  14919. },
  14920. };
  14921. } break;
  14922. case GGML_OPT_LBFGS:
  14923. {
  14924. result = (struct ggml_opt_params) {
  14925. .type = GGML_OPT_LBFGS,
  14926. .graph_size = GGML_DEFAULT_GRAPH_SIZE,
  14927. .n_threads = 1,
  14928. .past = 0,
  14929. .delta = 1e-5f,
  14930. .max_no_improvement = 0,
  14931. .print_forward_graph = true,
  14932. .print_backward_graph = true,
  14933. .n_gradient_accumulation = 1,
  14934. .lbfgs = {
  14935. .m = 6,
  14936. .n_iter = 100,
  14937. .max_linesearch = 20,
  14938. .eps = 1e-5f,
  14939. .ftol = 1e-4f,
  14940. .wolfe = 0.9f,
  14941. .min_step = 1e-20f,
  14942. .max_step = 1e+20f,
  14943. .linesearch = GGML_LINESEARCH_DEFAULT,
  14944. },
  14945. };
  14946. } break;
  14947. }
  14948. return result;
  14949. }
  14950. GGML_API void ggml_opt_init(
  14951. struct ggml_context * ctx,
  14952. struct ggml_opt_context * opt,
  14953. struct ggml_opt_params params,
  14954. int64_t nx) {
  14955. opt->ctx = ctx;
  14956. opt->params = params;
  14957. opt->iter = 0;
  14958. opt->nx = nx;
  14959. opt->just_initialized = true;
  14960. if (opt->ctx == NULL) {
  14961. struct ggml_init_params ctx_opt_params;
  14962. if (opt->params.type == GGML_OPT_ADAM) {
  14963. ctx_opt_params.mem_size = GGML_MEM_ALIGN*3 + ggml_tensor_overhead()*3 + ggml_type_size(GGML_TYPE_F32)*nx*3;
  14964. if (opt->params.past > 0) {
  14965. ctx_opt_params.mem_size += GGML_MEM_ALIGN + ggml_tensor_overhead() + ggml_type_size(GGML_TYPE_F32)*opt->params.past;
  14966. }
  14967. } else if (opt->params.type == GGML_OPT_LBFGS) {
  14968. ctx_opt_params.mem_size = GGML_MEM_ALIGN*9 + ggml_tensor_overhead()*9 + ggml_type_size(GGML_TYPE_F32)*(nx*5 + opt->params.lbfgs.m*2 + nx*opt->params.lbfgs.m*2);
  14969. if (opt->params.past > 0) {
  14970. ctx_opt_params.mem_size += GGML_MEM_ALIGN + ggml_tensor_overhead() + ggml_type_size(GGML_TYPE_F32)*opt->params.past;
  14971. }
  14972. }
  14973. ctx_opt_params.mem_buffer = NULL;
  14974. ctx_opt_params.no_alloc = false;
  14975. opt->ctx = ggml_init(ctx_opt_params);
  14976. }
  14977. switch (opt->params.type) {
  14978. case GGML_OPT_ADAM:
  14979. {
  14980. opt->adam.g = ggml_new_tensor_1d(opt->ctx, GGML_TYPE_F32, nx);
  14981. opt->adam.m = ggml_new_tensor_1d(opt->ctx, GGML_TYPE_F32, nx);
  14982. opt->adam.v = ggml_new_tensor_1d(opt->ctx, GGML_TYPE_F32, nx);
  14983. opt->adam.pf = params.past > 0
  14984. ? ggml_new_tensor_1d(opt->ctx, GGML_TYPE_F32, params.past)
  14985. : NULL;
  14986. ggml_set_zero(opt->adam.m);
  14987. ggml_set_zero(opt->adam.v);
  14988. if (opt->adam.pf) {
  14989. ggml_set_zero(opt->adam.pf);
  14990. }
  14991. } break;
  14992. case GGML_OPT_LBFGS:
  14993. {
  14994. opt->lbfgs.x = ggml_new_tensor_1d(opt->ctx, GGML_TYPE_F32, nx);
  14995. opt->lbfgs.xp = ggml_new_tensor_1d(opt->ctx, GGML_TYPE_F32, nx);
  14996. opt->lbfgs.g = ggml_new_tensor_1d(opt->ctx, GGML_TYPE_F32, nx);
  14997. opt->lbfgs.gp = ggml_new_tensor_1d(opt->ctx, GGML_TYPE_F32, nx);
  14998. opt->lbfgs.d = ggml_new_tensor_1d(opt->ctx, GGML_TYPE_F32, nx);
  14999. opt->lbfgs.pf = params.past > 0
  15000. ? ggml_new_tensor_1d(opt->ctx, GGML_TYPE_F32, params.past)
  15001. : NULL;
  15002. opt->lbfgs.lmal = ggml_new_tensor_1d(opt->ctx, GGML_TYPE_F32, params.lbfgs.m);
  15003. opt->lbfgs.lmys = ggml_new_tensor_1d(opt->ctx, GGML_TYPE_F32, params.lbfgs.m);
  15004. opt->lbfgs.lms = ggml_new_tensor_2d(opt->ctx, GGML_TYPE_F32, nx, params.lbfgs.m);
  15005. opt->lbfgs.lmy = ggml_new_tensor_2d(opt->ctx, GGML_TYPE_F32, nx, params.lbfgs.m);
  15006. ggml_set_zero(opt->lbfgs.x);
  15007. ggml_set_zero(opt->lbfgs.xp);
  15008. ggml_set_zero(opt->lbfgs.g);
  15009. ggml_set_zero(opt->lbfgs.gp);
  15010. ggml_set_zero(opt->lbfgs.d);
  15011. if (opt->lbfgs.pf) {
  15012. ggml_set_zero(opt->lbfgs.pf);
  15013. }
  15014. ggml_set_zero(opt->lbfgs.lmal);
  15015. ggml_set_zero(opt->lbfgs.lmys);
  15016. ggml_set_zero(opt->lbfgs.lms);
  15017. ggml_set_zero(opt->lbfgs.lmy);
  15018. } break;
  15019. }
  15020. }
  15021. enum ggml_opt_result ggml_opt(
  15022. struct ggml_context * ctx,
  15023. struct ggml_opt_params params,
  15024. struct ggml_tensor * f) {
  15025. bool free_ctx = false;
  15026. if (ctx == NULL) {
  15027. struct ggml_init_params params_ctx = {
  15028. .mem_size = 16*1024*1024,
  15029. .mem_buffer = NULL,
  15030. .no_alloc = false,
  15031. };
  15032. ctx = ggml_init(params_ctx);
  15033. if (ctx == NULL) {
  15034. return GGML_OPT_NO_CONTEXT;
  15035. }
  15036. free_ctx = true;
  15037. }
  15038. enum ggml_opt_result result = GGML_OPT_OK;
  15039. struct ggml_opt_context * opt = (struct ggml_opt_context *) alloca(sizeof(struct ggml_opt_context));
  15040. ggml_opt_init(ctx, opt, params, 0);
  15041. result = ggml_opt_resume(ctx, opt, f);
  15042. if (free_ctx) {
  15043. ggml_free(ctx);
  15044. }
  15045. return result;
  15046. }
  15047. enum ggml_opt_result ggml_opt_resume(
  15048. struct ggml_context * ctx,
  15049. struct ggml_opt_context * opt,
  15050. struct ggml_tensor * f) {
  15051. // build forward + backward compute graphs
  15052. struct ggml_cgraph * gf = ggml_new_graph_custom(ctx, opt->params.graph_size, true);
  15053. ggml_build_forward_expand(gf, f);
  15054. struct ggml_cgraph * gb = ggml_graph_dup(ctx, gf);
  15055. ggml_build_backward_expand(ctx, gf, gb, true);
  15056. return ggml_opt_resume_g(ctx, opt, f, gf, gb, NULL, NULL);
  15057. }
  15058. enum ggml_opt_result ggml_opt_resume_g(
  15059. struct ggml_context * ctx,
  15060. struct ggml_opt_context * opt,
  15061. struct ggml_tensor * f,
  15062. struct ggml_cgraph * gf,
  15063. struct ggml_cgraph * gb,
  15064. ggml_opt_callback callback,
  15065. void * callback_data) {
  15066. // build forward + backward compute graphs
  15067. enum ggml_opt_result result = GGML_OPT_OK;
  15068. switch (opt->params.type) {
  15069. case GGML_OPT_ADAM:
  15070. {
  15071. result = ggml_opt_adam(ctx, opt, opt->params, f, gf, gb, callback, callback_data);
  15072. } break;
  15073. case GGML_OPT_LBFGS:
  15074. {
  15075. result = ggml_opt_lbfgs(ctx, opt, opt->params, f, gf, gb, callback, callback_data);
  15076. } break;
  15077. }
  15078. if (opt->params.print_forward_graph) {
  15079. ggml_graph_print (gf);
  15080. ggml_graph_dump_dot(gf, NULL, "opt-forward.dot");
  15081. }
  15082. if (opt->params.print_backward_graph) {
  15083. ggml_graph_print (gb);
  15084. ggml_graph_dump_dot(gb, gf, "opt-backward.dot");
  15085. }
  15086. return result;
  15087. }
  15088. ////////////////////////////////////////////////////////////////////////////////
  15089. size_t ggml_quantize_q4_0(const float * src, void * dst, int n, int k, int64_t * hist) {
  15090. assert(k % QK4_0 == 0);
  15091. const int nb = k / QK4_0;
  15092. for (int b = 0; b < n; b += k) {
  15093. block_q4_0 * restrict y = (block_q4_0 *) dst + b/QK4_0;
  15094. quantize_row_q4_0_reference(src + b, y, k);
  15095. for (int i = 0; i < nb; i++) {
  15096. for (int j = 0; j < QK4_0; j += 2) {
  15097. const uint8_t vi0 = y[i].qs[j/2] & 0x0F;
  15098. const uint8_t vi1 = y[i].qs[j/2] >> 4;
  15099. hist[vi0]++;
  15100. hist[vi1]++;
  15101. }
  15102. }
  15103. }
  15104. return (n/QK4_0*sizeof(block_q4_0));
  15105. }
  15106. size_t ggml_quantize_q4_1(const float * src, void * dst, int n, int k, int64_t * hist) {
  15107. assert(k % QK4_1 == 0);
  15108. const int nb = k / QK4_1;
  15109. for (int b = 0; b < n; b += k) {
  15110. block_q4_1 * restrict y = (block_q4_1 *) dst + b/QK4_1;
  15111. quantize_row_q4_1_reference(src + b, y, k);
  15112. for (int i = 0; i < nb; i++) {
  15113. for (int j = 0; j < QK4_1; j += 2) {
  15114. const uint8_t vi0 = y[i].qs[j/2] & 0x0F;
  15115. const uint8_t vi1 = y[i].qs[j/2] >> 4;
  15116. hist[vi0]++;
  15117. hist[vi1]++;
  15118. }
  15119. }
  15120. }
  15121. return (n/QK4_1*sizeof(block_q4_1));
  15122. }
  15123. size_t ggml_quantize_q5_0(const float * src, void * dst, int n, int k, int64_t * hist) {
  15124. assert(k % QK5_0 == 0);
  15125. const int nb = k / QK5_0;
  15126. for (int b = 0; b < n; b += k) {
  15127. block_q5_0 * restrict y = (block_q5_0 *)dst + b/QK5_0;
  15128. quantize_row_q5_0_reference(src + b, y, k);
  15129. for (int i = 0; i < nb; i++) {
  15130. uint32_t qh;
  15131. memcpy(&qh, &y[i].qh, sizeof(qh));
  15132. for (int j = 0; j < QK5_0; j += 2) {
  15133. const uint8_t vh0 = ((qh & (1u << (j/2 + 0 ))) >> (j/2 + 0 )) << 4;
  15134. const uint8_t vh1 = ((qh & (1u << (j/2 + 16))) >> (j/2 + 12));
  15135. // cast to 16 bins
  15136. const uint8_t vi0 = ((y[i].qs[j/2] & 0x0F) | vh0) / 2;
  15137. const uint8_t vi1 = ((y[i].qs[j/2] >> 4) | vh1) / 2;
  15138. hist[vi0]++;
  15139. hist[vi1]++;
  15140. }
  15141. }
  15142. }
  15143. return (n/QK5_0*sizeof(block_q5_0));
  15144. }
  15145. size_t ggml_quantize_q5_1(const float * src, void * dst, int n, int k, int64_t * hist) {
  15146. assert(k % QK5_1 == 0);
  15147. const int nb = k / QK5_1;
  15148. for (int b = 0; b < n; b += k) {
  15149. block_q5_1 * restrict y = (block_q5_1 *)dst + b/QK5_1;
  15150. quantize_row_q5_1_reference(src + b, y, k);
  15151. for (int i = 0; i < nb; i++) {
  15152. uint32_t qh;
  15153. memcpy(&qh, &y[i].qh, sizeof(qh));
  15154. for (int j = 0; j < QK5_1; j += 2) {
  15155. const uint8_t vh0 = ((qh & (1u << (j/2 + 0 ))) >> (j/2 + 0 )) << 4;
  15156. const uint8_t vh1 = ((qh & (1u << (j/2 + 16))) >> (j/2 + 12));
  15157. // cast to 16 bins
  15158. const uint8_t vi0 = ((y[i].qs[j/2] & 0x0F) | vh0) / 2;
  15159. const uint8_t vi1 = ((y[i].qs[j/2] >> 4) | vh1) / 2;
  15160. hist[vi0]++;
  15161. hist[vi1]++;
  15162. }
  15163. }
  15164. }
  15165. return (n/QK5_1*sizeof(block_q5_1));
  15166. }
  15167. size_t ggml_quantize_q8_0(const float * src, void * dst, int n, int k, int64_t * hist) {
  15168. assert(k % QK8_0 == 0);
  15169. const int nb = k / QK8_0;
  15170. for (int b = 0; b < n; b += k) {
  15171. block_q8_0 * restrict y = (block_q8_0 *)dst + b/QK8_0;
  15172. quantize_row_q8_0_reference(src + b, y, k);
  15173. for (int i = 0; i < nb; i++) {
  15174. for (int j = 0; j < QK8_0; ++j) {
  15175. const int8_t vi = y[i].qs[j];
  15176. hist[vi/16 + 8]++;
  15177. }
  15178. }
  15179. }
  15180. return (n/QK8_0*sizeof(block_q8_0));
  15181. }
  15182. size_t ggml_quantize_chunk(enum ggml_type type, const float * src, void * dst, int start, int n, int64_t * hist) {
  15183. size_t result = 0;
  15184. switch (type) {
  15185. case GGML_TYPE_Q4_0:
  15186. {
  15187. GGML_ASSERT(start % QK4_0 == 0);
  15188. block_q4_0 * block = (block_q4_0*)dst + start / QK4_0;
  15189. result = ggml_quantize_q4_0(src + start, block, n, n, hist);
  15190. } break;
  15191. case GGML_TYPE_Q4_1:
  15192. {
  15193. GGML_ASSERT(start % QK4_1 == 0);
  15194. block_q4_1 * block = (block_q4_1*)dst + start / QK4_1;
  15195. result = ggml_quantize_q4_1(src + start, block, n, n, hist);
  15196. } break;
  15197. case GGML_TYPE_Q5_0:
  15198. {
  15199. GGML_ASSERT(start % QK5_0 == 0);
  15200. block_q5_0 * block = (block_q5_0*)dst + start / QK5_0;
  15201. result = ggml_quantize_q5_0(src + start, block, n, n, hist);
  15202. } break;
  15203. case GGML_TYPE_Q5_1:
  15204. {
  15205. GGML_ASSERT(start % QK5_1 == 0);
  15206. block_q5_1 * block = (block_q5_1*)dst + start / QK5_1;
  15207. result = ggml_quantize_q5_1(src + start, block, n, n, hist);
  15208. } break;
  15209. case GGML_TYPE_Q8_0:
  15210. {
  15211. GGML_ASSERT(start % QK8_0 == 0);
  15212. block_q8_0 * block = (block_q8_0*)dst + start / QK8_0;
  15213. result = ggml_quantize_q8_0(src + start, block, n, n, hist);
  15214. } break;
  15215. case GGML_TYPE_Q2_K:
  15216. {
  15217. GGML_ASSERT(start % QK_K == 0);
  15218. block_q2_K * block = (block_q2_K*)dst + start / QK_K;
  15219. result = ggml_quantize_q2_K(src + start, block, n, n, hist);
  15220. } break;
  15221. case GGML_TYPE_Q3_K:
  15222. {
  15223. GGML_ASSERT(start % QK_K == 0);
  15224. block_q3_K * block = (block_q3_K*)dst + start / QK_K;
  15225. result = ggml_quantize_q3_K(src + start, block, n, n, hist);
  15226. } break;
  15227. case GGML_TYPE_Q4_K:
  15228. {
  15229. GGML_ASSERT(start % QK_K == 0);
  15230. block_q4_K * block = (block_q4_K*)dst + start / QK_K;
  15231. result = ggml_quantize_q4_K(src + start, block, n, n, hist);
  15232. } break;
  15233. case GGML_TYPE_Q5_K:
  15234. {
  15235. GGML_ASSERT(start % QK_K == 0);
  15236. block_q5_K * block = (block_q5_K*)dst + start / QK_K;
  15237. result = ggml_quantize_q5_K(src + start, block, n, n, hist);
  15238. } break;
  15239. case GGML_TYPE_Q6_K:
  15240. {
  15241. GGML_ASSERT(start % QK_K == 0);
  15242. block_q6_K * block = (block_q6_K*)dst + start / QK_K;
  15243. result = ggml_quantize_q6_K(src + start, block, n, n, hist);
  15244. } break;
  15245. case GGML_TYPE_F16:
  15246. {
  15247. int elemsize = sizeof(ggml_fp16_t);
  15248. ggml_fp32_to_fp16_row(src + start, (ggml_fp16_t *)dst + start, n);
  15249. result = n * elemsize;
  15250. } break;
  15251. case GGML_TYPE_F32:
  15252. {
  15253. int elemsize = sizeof(float);
  15254. result = n * elemsize;
  15255. memcpy((uint8_t *)dst + start * elemsize, src + start, result);
  15256. } break;
  15257. default:
  15258. assert(false);
  15259. }
  15260. return result;
  15261. }
  15262. ////////////////////////////////////////////////////////////////////////////////
  15263. struct gguf_str {
  15264. uint64_t n; // GGUFv2
  15265. char * data;
  15266. };
  15267. static const size_t GGUF_TYPE_SIZE[GGUF_TYPE_COUNT] = {
  15268. [GGUF_TYPE_UINT8] = sizeof(uint8_t),
  15269. [GGUF_TYPE_INT8] = sizeof(int8_t),
  15270. [GGUF_TYPE_UINT16] = sizeof(uint16_t),
  15271. [GGUF_TYPE_INT16] = sizeof(int16_t),
  15272. [GGUF_TYPE_UINT32] = sizeof(uint32_t),
  15273. [GGUF_TYPE_INT32] = sizeof(int32_t),
  15274. [GGUF_TYPE_FLOAT32] = sizeof(float),
  15275. [GGUF_TYPE_BOOL] = sizeof(bool),
  15276. [GGUF_TYPE_STRING] = sizeof(struct gguf_str),
  15277. [GGUF_TYPE_UINT64] = sizeof(uint64_t),
  15278. [GGUF_TYPE_INT64] = sizeof(int64_t),
  15279. [GGUF_TYPE_FLOAT64] = sizeof(double),
  15280. [GGUF_TYPE_ARRAY] = 0, // undefined
  15281. };
  15282. static_assert(GGUF_TYPE_COUNT == 13, "GGUF_TYPE_COUNT != 13");
  15283. static const char * GGUF_TYPE_NAME[GGUF_TYPE_COUNT] = {
  15284. [GGUF_TYPE_UINT8] = "u8",
  15285. [GGUF_TYPE_INT8] = "i8",
  15286. [GGUF_TYPE_UINT16] = "u16",
  15287. [GGUF_TYPE_INT16] = "i16",
  15288. [GGUF_TYPE_UINT32] = "u32",
  15289. [GGUF_TYPE_INT32] = "i32",
  15290. [GGUF_TYPE_FLOAT32] = "f32",
  15291. [GGUF_TYPE_BOOL] = "bool",
  15292. [GGUF_TYPE_STRING] = "str",
  15293. [GGUF_TYPE_ARRAY] = "arr",
  15294. [GGUF_TYPE_UINT64] = "u64",
  15295. [GGUF_TYPE_INT64] = "i64",
  15296. [GGUF_TYPE_FLOAT64] = "f64",
  15297. };
  15298. static_assert(GGUF_TYPE_COUNT == 13, "GGUF_TYPE_COUNT != 13");
  15299. union gguf_value {
  15300. uint8_t uint8;
  15301. int8_t int8;
  15302. uint16_t uint16;
  15303. int16_t int16;
  15304. uint32_t uint32;
  15305. int32_t int32;
  15306. float float32;
  15307. uint64_t uint64;
  15308. int64_t int64;
  15309. double float64;
  15310. bool bool_;
  15311. struct gguf_str str;
  15312. struct {
  15313. enum gguf_type type;
  15314. uint64_t n; // GGUFv2
  15315. void * data;
  15316. } arr;
  15317. };
  15318. struct gguf_kv {
  15319. struct gguf_str key;
  15320. enum gguf_type type;
  15321. union gguf_value value;
  15322. };
  15323. struct gguf_header {
  15324. char magic[4];
  15325. uint32_t version;
  15326. uint64_t n_tensors; // GGUFv2
  15327. uint64_t n_kv; // GGUFv2
  15328. };
  15329. struct gguf_tensor_info {
  15330. struct gguf_str name;
  15331. uint32_t n_dims;
  15332. uint64_t ne[GGML_MAX_DIMS];
  15333. enum ggml_type type;
  15334. uint64_t offset; // offset from start of `data`, must be a multiple of `ALIGNMENT`
  15335. // for writing API
  15336. const void * data;
  15337. size_t size;
  15338. };
  15339. struct gguf_context {
  15340. struct gguf_header header;
  15341. struct gguf_kv * kv;
  15342. struct gguf_tensor_info * infos;
  15343. size_t alignment;
  15344. size_t offset; // offset of `data` from beginning of file
  15345. size_t size; // size of `data` in bytes
  15346. //uint8_t * padding;
  15347. void * data;
  15348. };
  15349. static bool gguf_fread_el(FILE * file, void * dst, size_t size, size_t * offset) {
  15350. const size_t n = fread(dst, 1, size, file);
  15351. *offset += n;
  15352. return n == size;
  15353. }
  15354. static bool gguf_fread_str(FILE * file, struct gguf_str * p, size_t * offset) {
  15355. p->n = 0;
  15356. p->data = NULL;
  15357. bool ok = true;
  15358. ok = ok && gguf_fread_el(file, &p->n, sizeof(p->n), offset); p->data = calloc(p->n + 1, 1);
  15359. ok = ok && gguf_fread_el(file, p->data, p->n, offset);
  15360. return ok;
  15361. }
  15362. struct gguf_context * gguf_init_empty(void) {
  15363. struct gguf_context * ctx = GGML_ALIGNED_MALLOC(sizeof(struct gguf_context));
  15364. memcpy(ctx->header.magic, GGUF_MAGIC, sizeof(ctx->header.magic));
  15365. ctx->header.version = GGUF_VERSION;
  15366. ctx->header.n_tensors = 0;
  15367. ctx->header.n_kv = 0;
  15368. ctx->kv = NULL;
  15369. ctx->infos = NULL;
  15370. ctx->alignment = GGUF_DEFAULT_ALIGNMENT;
  15371. ctx->offset = 0;
  15372. ctx->size = 0;
  15373. ctx->data = NULL;
  15374. return ctx;
  15375. }
  15376. struct gguf_context * gguf_init_from_file(const char * fname, struct gguf_init_params params) {
  15377. FILE * file = fopen(fname, "rb");
  15378. if (!file) {
  15379. return NULL;
  15380. }
  15381. // offset from start of file
  15382. size_t offset = 0;
  15383. char magic[4];
  15384. // check the magic before making allocations
  15385. {
  15386. gguf_fread_el(file, &magic, sizeof(magic), &offset);
  15387. for (uint32_t i = 0; i < sizeof(magic); i++) {
  15388. if (magic[i] != GGUF_MAGIC[i]) {
  15389. fprintf(stderr, "%s: invalid magic characters '%c%c%c%c'\n", __func__, magic[0], magic[1], magic[2], magic[3]);
  15390. fclose(file);
  15391. return NULL;
  15392. }
  15393. }
  15394. }
  15395. bool ok = true;
  15396. struct gguf_context * ctx = GGML_ALIGNED_MALLOC(sizeof(struct gguf_context));
  15397. // read the header
  15398. {
  15399. strncpy(ctx->header.magic, magic, 4);
  15400. ctx->kv = NULL;
  15401. ctx->infos = NULL;
  15402. ctx->data = NULL;
  15403. ok = ok && gguf_fread_el(file, &ctx->header.version, sizeof(ctx->header.version), &offset);
  15404. ok = ok && gguf_fread_el(file, &ctx->header.n_tensors, sizeof(ctx->header.n_tensors), &offset);
  15405. ok = ok && gguf_fread_el(file, &ctx->header.n_kv, sizeof(ctx->header.n_kv), &offset);
  15406. if (ctx->header.version == 1) {
  15407. fprintf(stderr, "%s: GGUFv1 is no longer supported. please use a more up-to-date version\n", __func__);
  15408. fclose(file);
  15409. gguf_free(ctx);
  15410. return NULL;
  15411. }
  15412. if (!ok) {
  15413. fprintf(stderr, "%s: failed to read header\n", __func__);
  15414. fclose(file);
  15415. gguf_free(ctx);
  15416. return NULL;
  15417. }
  15418. }
  15419. // read the kv pairs
  15420. {
  15421. ctx->kv = malloc(ctx->header.n_kv * sizeof(struct gguf_kv));
  15422. for (uint64_t i = 0; i < ctx->header.n_kv; ++i) {
  15423. struct gguf_kv * kv = &ctx->kv[i];
  15424. //fprintf(stderr, "%s: reading kv %d\n", __func__, i);
  15425. ok = ok && gguf_fread_str(file, &kv->key, &offset);
  15426. ok = ok && gguf_fread_el (file, &kv->type, sizeof(kv->type), &offset);
  15427. //fprintf(stderr, "%s: reading kv with key %s\n", __func__, kv->key.data);
  15428. switch (kv->type) {
  15429. case GGUF_TYPE_UINT8: ok = ok && gguf_fread_el (file, &kv->value.uint8, sizeof(kv->value.uint8), &offset); break;
  15430. case GGUF_TYPE_INT8: ok = ok && gguf_fread_el (file, &kv->value.int8, sizeof(kv->value.int8), &offset); break;
  15431. case GGUF_TYPE_UINT16: ok = ok && gguf_fread_el (file, &kv->value.uint16, sizeof(kv->value.uint16), &offset); break;
  15432. case GGUF_TYPE_INT16: ok = ok && gguf_fread_el (file, &kv->value.int16, sizeof(kv->value.int16), &offset); break;
  15433. case GGUF_TYPE_UINT32: ok = ok && gguf_fread_el (file, &kv->value.uint32, sizeof(kv->value.uint32), &offset); break;
  15434. case GGUF_TYPE_INT32: ok = ok && gguf_fread_el (file, &kv->value.int32, sizeof(kv->value.int32), &offset); break;
  15435. case GGUF_TYPE_FLOAT32: ok = ok && gguf_fread_el (file, &kv->value.float32, sizeof(kv->value.float32), &offset); break;
  15436. case GGUF_TYPE_UINT64: ok = ok && gguf_fread_el (file, &kv->value.uint64, sizeof(kv->value.uint64), &offset); break;
  15437. case GGUF_TYPE_INT64: ok = ok && gguf_fread_el (file, &kv->value.int64, sizeof(kv->value.int64), &offset); break;
  15438. case GGUF_TYPE_FLOAT64: ok = ok && gguf_fread_el (file, &kv->value.float64, sizeof(kv->value.float64), &offset); break;
  15439. case GGUF_TYPE_BOOL: ok = ok && gguf_fread_el (file, &kv->value.bool_, sizeof(kv->value.bool_), &offset); break;
  15440. case GGUF_TYPE_STRING: ok = ok && gguf_fread_str(file, &kv->value.str, &offset); break;
  15441. case GGUF_TYPE_ARRAY:
  15442. {
  15443. ok = ok && gguf_fread_el(file, &kv->value.arr.type, sizeof(kv->value.arr.type), &offset);
  15444. ok = ok && gguf_fread_el(file, &kv->value.arr.n, sizeof(kv->value.arr.n), &offset);
  15445. switch (kv->value.arr.type) {
  15446. case GGUF_TYPE_UINT8:
  15447. case GGUF_TYPE_INT8:
  15448. case GGUF_TYPE_UINT16:
  15449. case GGUF_TYPE_INT16:
  15450. case GGUF_TYPE_UINT32:
  15451. case GGUF_TYPE_INT32:
  15452. case GGUF_TYPE_FLOAT32:
  15453. case GGUF_TYPE_UINT64:
  15454. case GGUF_TYPE_INT64:
  15455. case GGUF_TYPE_FLOAT64:
  15456. case GGUF_TYPE_BOOL:
  15457. {
  15458. kv->value.arr.data = malloc(kv->value.arr.n * GGUF_TYPE_SIZE[kv->value.arr.type]);
  15459. ok = ok && gguf_fread_el(file, kv->value.arr.data, kv->value.arr.n * GGUF_TYPE_SIZE[kv->value.arr.type], &offset);
  15460. } break;
  15461. case GGUF_TYPE_STRING:
  15462. {
  15463. kv->value.arr.data = malloc(kv->value.arr.n * sizeof(struct gguf_str));
  15464. for (uint64_t j = 0; j < kv->value.arr.n; ++j) {
  15465. ok = ok && gguf_fread_str(file, &((struct gguf_str *) kv->value.arr.data)[j], &offset);
  15466. }
  15467. } break;
  15468. case GGUF_TYPE_ARRAY:
  15469. case GGUF_TYPE_COUNT: GGML_ASSERT(false && "invalid type"); break;
  15470. }
  15471. } break;
  15472. case GGUF_TYPE_COUNT: GGML_ASSERT(false && "invalid type");
  15473. }
  15474. if (!ok) {
  15475. break;
  15476. }
  15477. }
  15478. if (!ok) {
  15479. fprintf(stderr, "%s: failed to read key-value pairs\n", __func__);
  15480. fclose(file);
  15481. gguf_free(ctx);
  15482. return NULL;
  15483. }
  15484. }
  15485. // read the tensor infos
  15486. {
  15487. ctx->infos = malloc(ctx->header.n_tensors * sizeof(struct gguf_tensor_info));
  15488. for (uint64_t i = 0; i < ctx->header.n_tensors; ++i) {
  15489. struct gguf_tensor_info * info = &ctx->infos[i];
  15490. for (int j = 0; j < GGML_MAX_DIMS; ++j) {
  15491. info->ne[j] = 1;
  15492. }
  15493. ok = ok && gguf_fread_str(file, &info->name, &offset);
  15494. ok = ok && gguf_fread_el (file, &info->n_dims, sizeof(info->n_dims), &offset);
  15495. for (uint32_t j = 0; j < info->n_dims; ++j) {
  15496. ok = ok && gguf_fread_el(file, &info->ne[j], sizeof(info->ne[j]), &offset);
  15497. }
  15498. ok = ok && gguf_fread_el (file, &info->type, sizeof(info->type), &offset);
  15499. ok = ok && gguf_fread_el (file, &info->offset, sizeof(info->offset), &offset);
  15500. if (!ok) {
  15501. fprintf(stderr, "%s: failed to read tensor info\n", __func__);
  15502. fclose(file);
  15503. gguf_free(ctx);
  15504. return NULL;
  15505. }
  15506. }
  15507. }
  15508. ctx->alignment = GGUF_DEFAULT_ALIGNMENT;
  15509. int alignment_idx = gguf_find_key(ctx, "general.alignment");
  15510. if (alignment_idx != -1) {
  15511. ctx->alignment = gguf_get_val_u32(ctx, alignment_idx);
  15512. }
  15513. // we require the data section to be aligned, so take into account any padding
  15514. {
  15515. const size_t offset_pad = offset % ctx->alignment;
  15516. if (offset_pad != 0) {
  15517. offset += ctx->alignment - offset_pad;
  15518. fseek(file, offset, SEEK_SET);
  15519. }
  15520. }
  15521. // store the current file offset - this is where the data section starts
  15522. ctx->offset = offset;
  15523. // compute the total size of the data section, taking into account the alignment
  15524. {
  15525. ctx->size = 0;
  15526. for (uint64_t i = 0; i < ctx->header.n_tensors; ++i) {
  15527. struct gguf_tensor_info * info = &ctx->infos[i];
  15528. const int64_t ne =
  15529. (int64_t) info->ne[0] *
  15530. (int64_t) info->ne[1] *
  15531. (int64_t) info->ne[2] *
  15532. (int64_t) info->ne[3];
  15533. if (ne % ggml_blck_size(info->type) != 0) {
  15534. fprintf(stderr, "%s: tensor '%s' number of elements (%" PRId64 ") is not a multiple of block size (%d)\n",
  15535. __func__, info->name.data, ne, ggml_blck_size(info->type));
  15536. fclose(file);
  15537. gguf_free(ctx);
  15538. return NULL;
  15539. }
  15540. const size_t size_cur = ggml_row_size(info->type, ne);
  15541. ctx->size += GGML_PAD(size_cur, ctx->alignment);
  15542. }
  15543. }
  15544. // load the tensor data only if requested
  15545. if (params.ctx != NULL) {
  15546. // if the provided gguf_context is no_alloc, then we create "empty" tensors and do not read the binary blob
  15547. // otherwise, we load the binary blob into the created ggml_context as well, and point the "data" members of
  15548. // the ggml_tensor structs to the appropriate locations in the binary blob
  15549. // compute the exact size needed for the new ggml_context
  15550. const size_t mem_size =
  15551. params.no_alloc ?
  15552. (ctx->header.n_tensors )*ggml_tensor_overhead() :
  15553. (ctx->header.n_tensors + 1)*ggml_tensor_overhead() + ctx->size;
  15554. struct ggml_init_params pdata = {
  15555. .mem_size = mem_size,
  15556. .mem_buffer = NULL,
  15557. .no_alloc = params.no_alloc,
  15558. };
  15559. *params.ctx = ggml_init(pdata);
  15560. struct ggml_context * ctx_data = *params.ctx;
  15561. struct ggml_tensor * data = NULL;
  15562. if (!params.no_alloc) {
  15563. data = ggml_new_tensor_1d(ctx_data, GGML_TYPE_I8, ctx->size);
  15564. ok = ok && data != NULL;
  15565. // read the binary blob with the tensor data
  15566. ok = ok && gguf_fread_el(file, data->data, ctx->size, &offset);
  15567. if (!ok) {
  15568. fprintf(stderr, "%s: failed to read tensor data\n", __func__);
  15569. fclose(file);
  15570. ggml_free(ctx_data);
  15571. gguf_free(ctx);
  15572. return NULL;
  15573. }
  15574. ctx->data = data->data;
  15575. }
  15576. ggml_set_no_alloc(ctx_data, true);
  15577. // create the tensors
  15578. for (uint64_t i = 0; i < ctx->header.n_tensors; ++i) {
  15579. const int64_t ne[GGML_MAX_DIMS] = {
  15580. ctx->infos[i].ne[0],
  15581. ctx->infos[i].ne[1],
  15582. ctx->infos[i].ne[2],
  15583. ctx->infos[i].ne[3],
  15584. };
  15585. struct ggml_tensor * cur = ggml_new_tensor(ctx_data, ctx->infos[i].type, ctx->infos[i].n_dims, ne);
  15586. ok = ok && cur != NULL;
  15587. ggml_set_name(cur, ctx->infos[i].name.data);
  15588. if (!ok) {
  15589. break;
  15590. }
  15591. // point the data member to the appropriate location in the binary blob using the tensor infos
  15592. if (!params.no_alloc) {
  15593. //cur->data = (char *) data->data + ctx->infos[i].offset - ctx->offset; // offset from start of file
  15594. cur->data = (char *) data->data + ctx->infos[i].offset; // offset from data
  15595. }
  15596. }
  15597. if (!ok) {
  15598. fprintf(stderr, "%s: failed to read the tensor data\n", __func__);
  15599. fclose(file);
  15600. ggml_free(ctx_data);
  15601. gguf_free(ctx);
  15602. return NULL;
  15603. }
  15604. ggml_set_no_alloc(ctx_data, params.no_alloc);
  15605. }
  15606. fclose(file);
  15607. return ctx;
  15608. }
  15609. void gguf_free(struct gguf_context * ctx) {
  15610. if (ctx == NULL) {
  15611. return;
  15612. }
  15613. if (ctx->kv) {
  15614. // free string memory - not great..
  15615. for (uint32_t i = 0; i < ctx->header.n_kv; ++i) {
  15616. struct gguf_kv * kv = &ctx->kv[i];
  15617. if (kv->key.data) {
  15618. free(kv->key.data);
  15619. }
  15620. if (kv->type == GGUF_TYPE_STRING) {
  15621. if (kv->value.str.data) {
  15622. free(kv->value.str.data);
  15623. }
  15624. }
  15625. if (kv->type == GGUF_TYPE_ARRAY) {
  15626. if (kv->value.arr.data) {
  15627. if (kv->value.arr.type == GGUF_TYPE_STRING) {
  15628. for (uint32_t j = 0; j < kv->value.arr.n; ++j) {
  15629. struct gguf_str * str = &((struct gguf_str *) kv->value.arr.data)[j];
  15630. if (str->data) {
  15631. free(str->data);
  15632. }
  15633. }
  15634. }
  15635. free(kv->value.arr.data);
  15636. }
  15637. }
  15638. }
  15639. free(ctx->kv);
  15640. }
  15641. if (ctx->infos) {
  15642. for (uint32_t i = 0; i < ctx->header.n_tensors; ++i) {
  15643. struct gguf_tensor_info * info = &ctx->infos[i];
  15644. if (info->name.data) {
  15645. free(info->name.data);
  15646. }
  15647. }
  15648. free(ctx->infos);
  15649. }
  15650. GGML_ALIGNED_FREE(ctx);
  15651. }
  15652. const char * gguf_type_name(enum gguf_type type) {
  15653. return GGUF_TYPE_NAME[type];
  15654. }
  15655. int gguf_get_version(const struct gguf_context * ctx) {
  15656. return ctx->header.version;
  15657. }
  15658. size_t gguf_get_alignment(const struct gguf_context * ctx) {
  15659. return ctx->alignment;
  15660. }
  15661. size_t gguf_get_data_offset(const struct gguf_context * ctx) {
  15662. return ctx->offset;
  15663. }
  15664. void * gguf_get_data(const struct gguf_context * ctx) {
  15665. return ctx->data;
  15666. }
  15667. int gguf_get_n_kv(const struct gguf_context * ctx) {
  15668. return ctx->header.n_kv;
  15669. }
  15670. int gguf_find_key(const struct gguf_context * ctx, const char * key) {
  15671. // return -1 if key not found
  15672. int keyfound = -1;
  15673. const int n_kv = gguf_get_n_kv(ctx);
  15674. for (int i = 0; i < n_kv; ++i) {
  15675. if (strcmp(key, gguf_get_key(ctx, i)) == 0) {
  15676. keyfound = i;
  15677. break;
  15678. }
  15679. }
  15680. return keyfound;
  15681. }
  15682. const char * gguf_get_key(const struct gguf_context * ctx, int key_id) {
  15683. GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx));
  15684. return ctx->kv[key_id].key.data;
  15685. }
  15686. enum gguf_type gguf_get_kv_type(const struct gguf_context * ctx, int key_id) {
  15687. GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx));
  15688. return ctx->kv[key_id].type;
  15689. }
  15690. enum gguf_type gguf_get_arr_type(const struct gguf_context * ctx, int key_id) {
  15691. GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx));
  15692. GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_ARRAY);
  15693. return ctx->kv[key_id].value.arr.type;
  15694. }
  15695. const void * gguf_get_arr_data(const struct gguf_context * ctx, int key_id) {
  15696. GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx));
  15697. GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_ARRAY);
  15698. return ctx->kv[key_id].value.arr.data;
  15699. }
  15700. const char * gguf_get_arr_str(const struct gguf_context * ctx, int key_id, int i) {
  15701. GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx));
  15702. GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_ARRAY);
  15703. struct gguf_kv * kv = &ctx->kv[key_id];
  15704. struct gguf_str * str = &((struct gguf_str *) kv->value.arr.data)[i];
  15705. return str->data;
  15706. }
  15707. int gguf_get_arr_n(const struct gguf_context * ctx, int key_id) {
  15708. GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx));
  15709. GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_ARRAY);
  15710. return ctx->kv[key_id].value.arr.n;
  15711. }
  15712. uint8_t gguf_get_val_u8(const struct gguf_context * ctx, int key_id) {
  15713. GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx));
  15714. GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_UINT8);
  15715. return ctx->kv[key_id].value.uint8;
  15716. }
  15717. int8_t gguf_get_val_i8(const struct gguf_context * ctx, int key_id) {
  15718. GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx));
  15719. GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_INT8);
  15720. return ctx->kv[key_id].value.int8;
  15721. }
  15722. uint16_t gguf_get_val_u16(const struct gguf_context * ctx, int key_id) {
  15723. GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx));
  15724. GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_UINT16);
  15725. return ctx->kv[key_id].value.uint16;
  15726. }
  15727. int16_t gguf_get_val_i16(const struct gguf_context * ctx, int key_id) {
  15728. GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx));
  15729. GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_INT16);
  15730. return ctx->kv[key_id].value.int16;
  15731. }
  15732. uint32_t gguf_get_val_u32(const struct gguf_context * ctx, int key_id) {
  15733. GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx));
  15734. GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_UINT32);
  15735. return ctx->kv[key_id].value.uint32;
  15736. }
  15737. int32_t gguf_get_val_i32(const struct gguf_context * ctx, int key_id) {
  15738. GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx));
  15739. GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_INT32);
  15740. return ctx->kv[key_id].value.int32;
  15741. }
  15742. float gguf_get_val_f32(const struct gguf_context * ctx, int key_id) {
  15743. GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx));
  15744. GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_FLOAT32);
  15745. return ctx->kv[key_id].value.float32;
  15746. }
  15747. uint64_t gguf_get_val_u64(const struct gguf_context * ctx, int key_id) {
  15748. GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx));
  15749. GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_UINT64);
  15750. return ctx->kv[key_id].value.uint64;
  15751. }
  15752. int64_t gguf_get_val_i64(const struct gguf_context * ctx, int key_id) {
  15753. GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx));
  15754. GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_INT64);
  15755. return ctx->kv[key_id].value.int64;
  15756. }
  15757. double gguf_get_val_f64(const struct gguf_context * ctx, int key_id) {
  15758. GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx));
  15759. GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_FLOAT64);
  15760. return ctx->kv[key_id].value.float64;
  15761. }
  15762. bool gguf_get_val_bool(const struct gguf_context * ctx, int key_id) {
  15763. GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx));
  15764. GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_BOOL);
  15765. return ctx->kv[key_id].value.bool_;
  15766. }
  15767. const char * gguf_get_val_str(const struct gguf_context * ctx, int key_id) {
  15768. GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx));
  15769. GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_STRING);
  15770. return ctx->kv[key_id].value.str.data;
  15771. }
  15772. const void * gguf_get_val_data(const struct gguf_context * ctx, int key_id) {
  15773. GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx));
  15774. GGML_ASSERT(ctx->kv[key_id].type != GGUF_TYPE_ARRAY);
  15775. GGML_ASSERT(ctx->kv[key_id].type != GGUF_TYPE_STRING);
  15776. return &ctx->kv[key_id].value;
  15777. }
  15778. int gguf_get_n_tensors(const struct gguf_context * ctx) {
  15779. return ctx->header.n_tensors;
  15780. }
  15781. int gguf_find_tensor(const struct gguf_context * ctx, const char * name) {
  15782. // return -1 if tensor not found
  15783. int tensorfound = -1;
  15784. const int n_tensors = gguf_get_n_tensors(ctx);
  15785. for (int i = 0; i < n_tensors; ++i) {
  15786. if (strcmp(name, gguf_get_tensor_name(ctx, i)) == 0) {
  15787. tensorfound = i;
  15788. break;
  15789. }
  15790. }
  15791. return tensorfound;
  15792. }
  15793. size_t gguf_get_tensor_offset(const struct gguf_context * ctx, int i) {
  15794. return ctx->infos[i].offset;
  15795. }
  15796. char * gguf_get_tensor_name(const struct gguf_context * ctx, int i) {
  15797. return ctx->infos[i].name.data;
  15798. }
  15799. // returns the index
  15800. static int gguf_get_or_add_key(struct gguf_context * ctx, const char * key) {
  15801. const int idx = gguf_find_key(ctx, key);
  15802. if (idx >= 0) {
  15803. return idx;
  15804. }
  15805. const int n_kv = gguf_get_n_kv(ctx);
  15806. ctx->kv = realloc(ctx->kv, (n_kv + 1) * sizeof(struct gguf_kv));
  15807. ctx->kv[n_kv].key.n = strlen(key);
  15808. ctx->kv[n_kv].key.data = strdup(key);
  15809. ctx->header.n_kv++;
  15810. return n_kv;
  15811. }
  15812. void gguf_set_val_u8(struct gguf_context * ctx, const char * key, uint8_t val) {
  15813. const int idx = gguf_get_or_add_key(ctx, key);
  15814. ctx->kv[idx].type = GGUF_TYPE_UINT8;
  15815. ctx->kv[idx].value.uint8 = val;
  15816. }
  15817. void gguf_set_val_i8(struct gguf_context * ctx, const char * key, int8_t val) {
  15818. const int idx = gguf_get_or_add_key(ctx, key);
  15819. ctx->kv[idx].type = GGUF_TYPE_INT8;
  15820. ctx->kv[idx].value.int8 = val;
  15821. }
  15822. void gguf_set_val_u16(struct gguf_context * ctx, const char * key, uint16_t val) {
  15823. const int idx = gguf_get_or_add_key(ctx, key);
  15824. ctx->kv[idx].type = GGUF_TYPE_UINT16;
  15825. ctx->kv[idx].value.uint16 = val;
  15826. }
  15827. void gguf_set_val_i16(struct gguf_context * ctx, const char * key, int16_t val) {
  15828. const int idx = gguf_get_or_add_key(ctx, key);
  15829. ctx->kv[idx].type = GGUF_TYPE_INT16;
  15830. ctx->kv[idx].value.int16 = val;
  15831. }
  15832. void gguf_set_val_u32(struct gguf_context * ctx, const char * key, uint32_t val) {
  15833. const int idx = gguf_get_or_add_key(ctx, key);
  15834. ctx->kv[idx].type = GGUF_TYPE_UINT32;
  15835. ctx->kv[idx].value.uint32 = val;
  15836. }
  15837. void gguf_set_val_i32(struct gguf_context * ctx, const char * key, int32_t val) {
  15838. const int idx = gguf_get_or_add_key(ctx, key);
  15839. ctx->kv[idx].type = GGUF_TYPE_INT32;
  15840. ctx->kv[idx].value.int32 = val;
  15841. }
  15842. void gguf_set_val_f32(struct gguf_context * ctx, const char * key, float val) {
  15843. const int idx = gguf_get_or_add_key(ctx, key);
  15844. ctx->kv[idx].type = GGUF_TYPE_FLOAT32;
  15845. ctx->kv[idx].value.float32 = val;
  15846. }
  15847. void gguf_set_val_u64(struct gguf_context * ctx, const char * key, uint64_t val) {
  15848. const int idx = gguf_get_or_add_key(ctx, key);
  15849. ctx->kv[idx].type = GGUF_TYPE_UINT64;
  15850. ctx->kv[idx].value.uint64 = val;
  15851. }
  15852. void gguf_set_val_i64(struct gguf_context * ctx, const char * key, int64_t val) {
  15853. const int idx = gguf_get_or_add_key(ctx, key);
  15854. ctx->kv[idx].type = GGUF_TYPE_INT64;
  15855. ctx->kv[idx].value.int64 = val;
  15856. }
  15857. void gguf_set_val_f64(struct gguf_context * ctx, const char * key, double val) {
  15858. const int idx = gguf_get_or_add_key(ctx, key);
  15859. ctx->kv[idx].type = GGUF_TYPE_FLOAT64;
  15860. ctx->kv[idx].value.float64 = val;
  15861. }
  15862. void gguf_set_val_bool(struct gguf_context * ctx, const char * key, bool val) {
  15863. const int idx = gguf_get_or_add_key(ctx, key);
  15864. ctx->kv[idx].type = GGUF_TYPE_BOOL;
  15865. ctx->kv[idx].value.bool_ = val;
  15866. }
  15867. void gguf_set_val_str(struct gguf_context * ctx, const char * key, const char * val) {
  15868. const int idx = gguf_get_or_add_key(ctx, key);
  15869. ctx->kv[idx].type = GGUF_TYPE_STRING;
  15870. ctx->kv[idx].value.str.n = strlen(val);
  15871. ctx->kv[idx].value.str.data = strdup(val);
  15872. }
  15873. void gguf_set_arr_data(struct gguf_context * ctx, const char * key, enum gguf_type type, const void * data, int n) {
  15874. const int idx = gguf_get_or_add_key(ctx, key);
  15875. ctx->kv[idx].type = GGUF_TYPE_ARRAY;
  15876. ctx->kv[idx].value.arr.type = type;
  15877. ctx->kv[idx].value.arr.n = n;
  15878. ctx->kv[idx].value.arr.data = malloc(n*GGUF_TYPE_SIZE[type]);
  15879. memcpy(ctx->kv[idx].value.arr.data, data, n*GGUF_TYPE_SIZE[type]);
  15880. }
  15881. void gguf_set_arr_str(struct gguf_context * ctx, const char * key, const char ** data, int n) {
  15882. const int idx = gguf_get_or_add_key(ctx, key);
  15883. ctx->kv[idx].type = GGUF_TYPE_ARRAY;
  15884. ctx->kv[idx].value.arr.type = GGUF_TYPE_STRING;
  15885. ctx->kv[idx].value.arr.n = n;
  15886. ctx->kv[idx].value.arr.data = malloc(n*sizeof(struct gguf_str));
  15887. for (int i = 0; i < n; i++) {
  15888. struct gguf_str * str = &((struct gguf_str *)ctx->kv[idx].value.arr.data)[i];
  15889. str->n = strlen(data[i]);
  15890. str->data = strdup(data[i]);
  15891. }
  15892. }
  15893. // set or add KV pairs from another context
  15894. void gguf_set_kv(struct gguf_context * ctx, struct gguf_context * src) {
  15895. for (uint32_t i = 0; i < src->header.n_kv; i++) {
  15896. switch (src->kv[i].type) {
  15897. case GGUF_TYPE_UINT8: gguf_set_val_u8 (ctx, src->kv[i].key.data, src->kv[i].value.uint8); break;
  15898. case GGUF_TYPE_INT8: gguf_set_val_i8 (ctx, src->kv[i].key.data, src->kv[i].value.int8); break;
  15899. case GGUF_TYPE_UINT16: gguf_set_val_u16 (ctx, src->kv[i].key.data, src->kv[i].value.uint16); break;
  15900. case GGUF_TYPE_INT16: gguf_set_val_i16 (ctx, src->kv[i].key.data, src->kv[i].value.int16); break;
  15901. case GGUF_TYPE_UINT32: gguf_set_val_u32 (ctx, src->kv[i].key.data, src->kv[i].value.uint32); break;
  15902. case GGUF_TYPE_INT32: gguf_set_val_i32 (ctx, src->kv[i].key.data, src->kv[i].value.int32); break;
  15903. case GGUF_TYPE_FLOAT32: gguf_set_val_f32 (ctx, src->kv[i].key.data, src->kv[i].value.float32); break;
  15904. case GGUF_TYPE_UINT64: gguf_set_val_u64 (ctx, src->kv[i].key.data, src->kv[i].value.uint64); break;
  15905. case GGUF_TYPE_INT64: gguf_set_val_i64 (ctx, src->kv[i].key.data, src->kv[i].value.int64); break;
  15906. case GGUF_TYPE_FLOAT64: gguf_set_val_f64 (ctx, src->kv[i].key.data, src->kv[i].value.float64); break;
  15907. case GGUF_TYPE_BOOL: gguf_set_val_bool(ctx, src->kv[i].key.data, src->kv[i].value.bool_); break;
  15908. case GGUF_TYPE_STRING: gguf_set_val_str (ctx, src->kv[i].key.data, src->kv[i].value.str.data); break;
  15909. case GGUF_TYPE_ARRAY:
  15910. {
  15911. if (src->kv[i].value.arr.type == GGUF_TYPE_STRING) {
  15912. const char ** data = malloc(src->kv[i].value.arr.n*sizeof(char *));
  15913. for (uint32_t j = 0; j < src->kv[i].value.arr.n; j++) {
  15914. data[j] = ((struct gguf_str *)src->kv[i].value.arr.data)[j].data;
  15915. }
  15916. gguf_set_arr_str(ctx, src->kv[i].key.data, data, src->kv[i].value.arr.n);
  15917. free(data);
  15918. } else if (src->kv[i].value.arr.type == GGUF_TYPE_ARRAY) {
  15919. GGML_ASSERT(false && "nested arrays not supported");
  15920. } else {
  15921. gguf_set_arr_data(ctx, src->kv[i].key.data, src->kv[i].value.arr.type, src->kv[i].value.arr.data, src->kv[i].value.arr.n);
  15922. }
  15923. } break;
  15924. case GGUF_TYPE_COUNT: GGML_ASSERT(false && "invalid type"); break;
  15925. }
  15926. }
  15927. }
  15928. void gguf_add_tensor(
  15929. struct gguf_context * ctx,
  15930. const struct ggml_tensor * tensor) {
  15931. const int idx = ctx->header.n_tensors;
  15932. ctx->infos = realloc(ctx->infos, (idx + 1)*sizeof(struct gguf_tensor_info));
  15933. ctx->infos[idx].name.n = strlen(tensor->name);
  15934. ctx->infos[idx].name.data = strdup(tensor->name);
  15935. for (int i = 0; i < GGML_MAX_DIMS; ++i) {
  15936. ctx->infos[idx].ne[i] = 1;
  15937. }
  15938. ctx->infos[idx].n_dims = ggml_n_dims(tensor);
  15939. for (uint32_t i = 0; i < ctx->infos[idx].n_dims; i++) {
  15940. ctx->infos[idx].ne[i] = tensor->ne[i];
  15941. }
  15942. ctx->infos[idx].type = tensor->type;
  15943. ctx->infos[idx].offset = 0;
  15944. ctx->infos[idx].data = tensor->data;
  15945. ctx->infos[idx].size = ggml_nbytes(tensor);
  15946. if (ctx->header.n_tensors > 0) {
  15947. ctx->infos[idx].offset = ctx->infos[idx - 1].offset + GGML_PAD(ctx->infos[idx - 1].size, ctx->alignment);
  15948. }
  15949. ctx->header.n_tensors++;
  15950. }
  15951. void gguf_set_tensor_type(struct gguf_context * ctx, const char * name, enum ggml_type type) {
  15952. const int idx = gguf_find_tensor(ctx, name);
  15953. if (idx < 0) {
  15954. GGML_ASSERT(false && "tensor not found");
  15955. }
  15956. ctx->infos[idx].type = type;
  15957. }
  15958. void gguf_set_tensor_data(struct gguf_context * ctx, const char * name, const void * data, size_t size) {
  15959. const int idx = gguf_find_tensor(ctx, name);
  15960. if (idx < 0) {
  15961. GGML_ASSERT(false && "tensor not found");
  15962. }
  15963. ctx->infos[idx].data = data;
  15964. ctx->infos[idx].size = size;
  15965. // update offsets
  15966. for (uint32_t i = idx + 1; i < ctx->header.n_tensors; ++i) {
  15967. ctx->infos[i].offset = ctx->infos[i - 1].offset + GGML_PAD(ctx->infos[i - 1].size, ctx->alignment);
  15968. }
  15969. }
  15970. //static void gguf_fwrite_str(FILE * file, const struct gguf_str * val) {
  15971. // fwrite(&val->n, sizeof(val->n), 1, file);
  15972. // fwrite(val->data, sizeof(char), val->n, file);
  15973. //}
  15974. //
  15975. //static void gguf_fwrite_el(FILE * file, const void * val, size_t size) {
  15976. // fwrite(val, sizeof(char), size, file);
  15977. //}
  15978. struct gguf_buf {
  15979. void * data;
  15980. size_t size;
  15981. size_t offset;
  15982. };
  15983. static struct gguf_buf gguf_buf_init(size_t size) {
  15984. struct gguf_buf buf = {
  15985. /*buf.data =*/ size == 0 ? NULL : malloc(size),
  15986. /*buf.size =*/ size,
  15987. /*buf.offset =*/ 0,
  15988. };
  15989. return buf;
  15990. }
  15991. static void gguf_buf_free(struct gguf_buf buf) {
  15992. if (buf.data) {
  15993. free(buf.data);
  15994. }
  15995. }
  15996. static void gguf_buf_grow(struct gguf_buf * buf, size_t size) {
  15997. if (buf->offset + size > buf->size) {
  15998. buf->size = 1.5*(buf->offset + size);
  15999. if (buf->data) {
  16000. buf->data = realloc(buf->data, buf->size);
  16001. }
  16002. }
  16003. }
  16004. static void gguf_bwrite_str(struct gguf_buf * buf, const struct gguf_str * val) {
  16005. gguf_buf_grow(buf, sizeof(val->n) + val->n);
  16006. if (buf->data) {
  16007. memcpy((char *) buf->data + buf->offset, &val->n, sizeof(val->n));
  16008. }
  16009. buf->offset += sizeof(val->n);
  16010. if (buf->data) {
  16011. memcpy((char *) buf->data + buf->offset, val->data, val->n);
  16012. }
  16013. buf->offset += val->n;
  16014. }
  16015. static void gguf_bwrite_el(struct gguf_buf * buf, const void * val, size_t el_size) {
  16016. gguf_buf_grow(buf, el_size);
  16017. if (buf->data) {
  16018. memcpy((char *) buf->data + buf->offset, val, el_size);
  16019. }
  16020. buf->offset += el_size;
  16021. }
  16022. static void gguf_write_to_buf(const struct gguf_context * ctx, struct gguf_buf * buf, bool only_meta) {
  16023. // write header
  16024. gguf_bwrite_el(buf, &ctx->header.magic, sizeof(ctx->header.magic));
  16025. gguf_bwrite_el(buf, &ctx->header.version, sizeof(ctx->header.version));
  16026. gguf_bwrite_el(buf, &ctx->header.n_tensors, sizeof(ctx->header.n_tensors));
  16027. gguf_bwrite_el(buf, &ctx->header.n_kv, sizeof(ctx->header.n_kv));
  16028. // write key-value pairs
  16029. for (uint32_t i = 0; i < ctx->header.n_kv; ++i) {
  16030. struct gguf_kv * kv = &ctx->kv[i];
  16031. gguf_bwrite_str(buf, &kv->key);
  16032. gguf_bwrite_el (buf, &kv->type, sizeof(kv->type));
  16033. switch (kv->type) {
  16034. case GGUF_TYPE_UINT8: gguf_bwrite_el( buf, &kv->value.uint8, sizeof(kv->value.uint8) ); break;
  16035. case GGUF_TYPE_INT8: gguf_bwrite_el (buf, &kv->value.int8, sizeof(kv->value.int8) ); break;
  16036. case GGUF_TYPE_UINT16: gguf_bwrite_el (buf, &kv->value.uint16, sizeof(kv->value.uint16) ); break;
  16037. case GGUF_TYPE_INT16: gguf_bwrite_el (buf, &kv->value.int16, sizeof(kv->value.int16) ); break;
  16038. case GGUF_TYPE_UINT32: gguf_bwrite_el (buf, &kv->value.uint32, sizeof(kv->value.uint32) ); break;
  16039. case GGUF_TYPE_INT32: gguf_bwrite_el (buf, &kv->value.int32, sizeof(kv->value.int32) ); break;
  16040. case GGUF_TYPE_FLOAT32: gguf_bwrite_el (buf, &kv->value.float32, sizeof(kv->value.float32)); break;
  16041. case GGUF_TYPE_UINT64: gguf_bwrite_el (buf, &kv->value.uint64, sizeof(kv->value.uint64) ); break;
  16042. case GGUF_TYPE_INT64: gguf_bwrite_el (buf, &kv->value.int64, sizeof(kv->value.int64) ); break;
  16043. case GGUF_TYPE_FLOAT64: gguf_bwrite_el (buf, &kv->value.float64, sizeof(kv->value.float64)); break;
  16044. case GGUF_TYPE_BOOL: gguf_bwrite_el (buf, &kv->value.bool_, sizeof(kv->value.bool_) ); break;
  16045. case GGUF_TYPE_STRING: gguf_bwrite_str(buf, &kv->value.str ); break;
  16046. case GGUF_TYPE_ARRAY:
  16047. {
  16048. gguf_bwrite_el(buf, &kv->value.arr.type, sizeof(kv->value.arr.type));
  16049. gguf_bwrite_el(buf, &kv->value.arr.n, sizeof(kv->value.arr.n) );
  16050. switch (kv->value.arr.type) {
  16051. case GGUF_TYPE_UINT8:
  16052. case GGUF_TYPE_INT8:
  16053. case GGUF_TYPE_UINT16:
  16054. case GGUF_TYPE_INT16:
  16055. case GGUF_TYPE_UINT32:
  16056. case GGUF_TYPE_INT32:
  16057. case GGUF_TYPE_FLOAT32:
  16058. case GGUF_TYPE_UINT64:
  16059. case GGUF_TYPE_INT64:
  16060. case GGUF_TYPE_FLOAT64:
  16061. case GGUF_TYPE_BOOL:
  16062. {
  16063. gguf_bwrite_el(buf, kv->value.arr.data, kv->value.arr.n * GGUF_TYPE_SIZE[kv->value.arr.type]);
  16064. } break;
  16065. case GGUF_TYPE_STRING:
  16066. {
  16067. for (uint32_t j = 0; j < kv->value.arr.n; ++j) {
  16068. gguf_bwrite_str(buf, &((struct gguf_str *) kv->value.arr.data)[j]);
  16069. }
  16070. } break;
  16071. case GGUF_TYPE_ARRAY:
  16072. case GGUF_TYPE_COUNT: GGML_ASSERT(false && "invalid type"); break;
  16073. }
  16074. } break;
  16075. case GGUF_TYPE_COUNT: GGML_ASSERT(false && "invalid type");
  16076. }
  16077. }
  16078. // write tensor infos
  16079. for (uint32_t i = 0; i < ctx->header.n_tensors; ++i) {
  16080. struct gguf_tensor_info * info = &ctx->infos[i];
  16081. gguf_bwrite_str(buf, &info->name);
  16082. gguf_bwrite_el (buf, &info->n_dims, sizeof(info->n_dims));
  16083. for (uint32_t j = 0; j < info->n_dims; ++j) {
  16084. gguf_bwrite_el(buf, &info->ne[j], sizeof(info->ne[j]));
  16085. }
  16086. gguf_bwrite_el(buf, &info->type, sizeof(info->type));
  16087. gguf_bwrite_el(buf, &info->offset, sizeof(info->offset));
  16088. }
  16089. // we require the data section to be aligned, so take into account any padding
  16090. {
  16091. const size_t offset = buf->offset;
  16092. const size_t offset_pad = GGML_PAD(offset, ctx->alignment);
  16093. if (offset_pad != offset) {
  16094. uint8_t pad = 0;
  16095. for (size_t i = 0; i < offset_pad - offset; ++i) {
  16096. gguf_bwrite_el(buf, &pad, sizeof(pad));
  16097. }
  16098. }
  16099. }
  16100. if (only_meta) {
  16101. return;
  16102. }
  16103. size_t offset = 0;
  16104. // write tensor data
  16105. for (uint32_t i = 0; i < ctx->header.n_tensors; ++i) {
  16106. struct gguf_tensor_info * info = &ctx->infos[i];
  16107. const size_t size = info->size;
  16108. const size_t size_pad = GGML_PAD(size, ctx->alignment);
  16109. gguf_bwrite_el(buf, info->data, size);
  16110. if (size_pad != size) {
  16111. uint8_t pad = 0;
  16112. for (size_t j = 0; j < size_pad - size; ++j) {
  16113. gguf_bwrite_el(buf, &pad, sizeof(pad));
  16114. }
  16115. }
  16116. GGML_ASSERT(offset == info->offset);
  16117. offset += size_pad;
  16118. }
  16119. }
  16120. void gguf_write_to_file(const struct gguf_context * ctx, const char * fname, bool only_meta) {
  16121. FILE * file = fopen(fname, "wb");
  16122. if (!file) {
  16123. GGML_ASSERT(false && "failed to open file for writing");
  16124. }
  16125. struct gguf_buf buf = gguf_buf_init(16*1024);
  16126. gguf_write_to_buf(ctx, &buf, only_meta);
  16127. fwrite(buf.data, 1, buf.offset, file);
  16128. gguf_buf_free(buf);
  16129. fclose(file);
  16130. }
  16131. size_t gguf_get_meta_size(const struct gguf_context * ctx) {
  16132. // no allocs - only compute size
  16133. struct gguf_buf buf = gguf_buf_init(0);
  16134. gguf_write_to_buf(ctx, &buf, true);
  16135. return buf.offset;
  16136. }
  16137. void gguf_get_meta_data(const struct gguf_context * ctx, void * data) {
  16138. struct gguf_buf buf = gguf_buf_init(16*1024);
  16139. gguf_write_to_buf(ctx, &buf, true);
  16140. memcpy(data, buf.data, buf.offset);
  16141. gguf_buf_free(buf);
  16142. }
  16143. ////////////////////////////////////////////////////////////////////////////////
  16144. int ggml_cpu_has_avx(void) {
  16145. #if defined(__AVX__)
  16146. return 1;
  16147. #else
  16148. return 0;
  16149. #endif
  16150. }
  16151. int ggml_cpu_has_avx2(void) {
  16152. #if defined(__AVX2__)
  16153. return 1;
  16154. #else
  16155. return 0;
  16156. #endif
  16157. }
  16158. int ggml_cpu_has_avx512(void) {
  16159. #if defined(__AVX512F__)
  16160. return 1;
  16161. #else
  16162. return 0;
  16163. #endif
  16164. }
  16165. int ggml_cpu_has_avx512_vbmi(void) {
  16166. #if defined(__AVX512VBMI__)
  16167. return 1;
  16168. #else
  16169. return 0;
  16170. #endif
  16171. }
  16172. int ggml_cpu_has_avx512_vnni(void) {
  16173. #if defined(__AVX512VNNI__)
  16174. return 1;
  16175. #else
  16176. return 0;
  16177. #endif
  16178. }
  16179. int ggml_cpu_has_fma(void) {
  16180. #if defined(__FMA__)
  16181. return 1;
  16182. #else
  16183. return 0;
  16184. #endif
  16185. }
  16186. int ggml_cpu_has_neon(void) {
  16187. #if defined(__ARM_NEON)
  16188. return 1;
  16189. #else
  16190. return 0;
  16191. #endif
  16192. }
  16193. int ggml_cpu_has_arm_fma(void) {
  16194. #if defined(__ARM_FEATURE_FMA)
  16195. return 1;
  16196. #else
  16197. return 0;
  16198. #endif
  16199. }
  16200. int ggml_cpu_has_metal(void) {
  16201. #if defined(GGML_USE_METAL)
  16202. return 1;
  16203. #else
  16204. return 0;
  16205. #endif
  16206. }
  16207. int ggml_cpu_has_f16c(void) {
  16208. #if defined(__F16C__)
  16209. return 1;
  16210. #else
  16211. return 0;
  16212. #endif
  16213. }
  16214. int ggml_cpu_has_fp16_va(void) {
  16215. #if defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC)
  16216. return 1;
  16217. #else
  16218. return 0;
  16219. #endif
  16220. }
  16221. int ggml_cpu_has_wasm_simd(void) {
  16222. #if defined(__wasm_simd128__)
  16223. return 1;
  16224. #else
  16225. return 0;
  16226. #endif
  16227. }
  16228. int ggml_cpu_has_blas(void) {
  16229. #if defined(GGML_USE_ACCELERATE) || defined(GGML_USE_OPENBLAS) || defined(GGML_USE_CUBLAS) || defined(GGML_USE_CLBLAST)
  16230. return 1;
  16231. #else
  16232. return 0;
  16233. #endif
  16234. }
  16235. int ggml_cpu_has_cublas(void) {
  16236. #if defined(GGML_USE_CUBLAS)
  16237. return 1;
  16238. #else
  16239. return 0;
  16240. #endif
  16241. }
  16242. int ggml_cpu_has_clblast(void) {
  16243. #if defined(GGML_USE_CLBLAST)
  16244. return 1;
  16245. #else
  16246. return 0;
  16247. #endif
  16248. }
  16249. int ggml_cpu_has_gpublas(void) {
  16250. return ggml_cpu_has_cublas() || ggml_cpu_has_clblast();
  16251. }
  16252. int ggml_cpu_has_sse3(void) {
  16253. #if defined(__SSE3__)
  16254. return 1;
  16255. #else
  16256. return 0;
  16257. #endif
  16258. }
  16259. int ggml_cpu_has_ssse3(void) {
  16260. #if defined(__SSSE3__)
  16261. return 1;
  16262. #else
  16263. return 0;
  16264. #endif
  16265. }
  16266. int ggml_cpu_has_vsx(void) {
  16267. #if defined(__POWER9_VECTOR__)
  16268. return 1;
  16269. #else
  16270. return 0;
  16271. #endif
  16272. }
  16273. ////////////////////////////////////////////////////////////////////////////////