ggml.c 629 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384438543864387438843894390439143924393439443954396439743984399440044014402440344044405440644074408440944104411441244134414441544164417441844194420442144224423442444254426442744284429443044314432443344344435443644374438443944404441444244434444444544464447444844494450445144524453445444554456445744584459446044614462446344644465446644674468446944704471447244734474447544764477447844794480448144824483448444854486448744884489449044914492449344944495449644974498449945004501450245034504450545064507450845094510451145124513451445154516451745184519452045214522452345244525452645274528452945304531453245334534453545364537453845394540454145424543454445454546454745484549455045514552455345544555455645574558455945604561456245634564456545664567456845694570457145724573457445754576457745784579458045814582458345844585458645874588458945904591459245934594459545964597459845994600460146024603460446054606460746084609461046114612461346144615461646174618461946204621462246234624462546264627462846294630463146324633463446354636463746384639464046414642464346444645464646474648464946504651465246534654465546564657465846594660466146624663466446654666466746684669467046714672467346744675467646774678467946804681468246834684468546864687468846894690469146924693469446954696469746984699470047014702470347044705470647074708470947104711471247134714471547164717471847194720472147224723472447254726472747284729473047314732473347344735473647374738473947404741474247434744474547464747474847494750475147524753475447554756475747584759476047614762476347644765476647674768476947704771477247734774477547764777477847794780478147824783478447854786478747884789479047914792479347944795479647974798479948004801480248034804480548064807480848094810481148124813481448154816481748184819482048214822482348244825482648274828482948304831483248334834483548364837483848394840484148424843484448454846484748484849485048514852485348544855485648574858485948604861486248634864486548664867486848694870487148724873487448754876487748784879488048814882488348844885488648874888488948904891489248934894489548964897489848994900490149024903490449054906490749084909491049114912491349144915491649174918491949204921492249234924492549264927492849294930493149324933493449354936493749384939494049414942494349444945494649474948494949504951495249534954495549564957495849594960496149624963496449654966496749684969497049714972497349744975497649774978497949804981498249834984498549864987498849894990499149924993499449954996499749984999500050015002500350045005500650075008500950105011501250135014501550165017501850195020502150225023502450255026502750285029503050315032503350345035503650375038503950405041504250435044504550465047504850495050505150525053505450555056505750585059506050615062506350645065506650675068506950705071507250735074507550765077507850795080508150825083508450855086508750885089509050915092509350945095509650975098509951005101510251035104510551065107510851095110511151125113511451155116511751185119512051215122512351245125512651275128512951305131513251335134513551365137513851395140514151425143514451455146514751485149515051515152515351545155515651575158515951605161516251635164516551665167516851695170517151725173517451755176517751785179518051815182518351845185518651875188518951905191519251935194519551965197519851995200520152025203520452055206520752085209521052115212521352145215521652175218521952205221522252235224522552265227522852295230523152325233523452355236523752385239524052415242524352445245524652475248524952505251525252535254525552565257525852595260526152625263526452655266526752685269527052715272527352745275527652775278527952805281528252835284528552865287528852895290529152925293529452955296529752985299530053015302530353045305530653075308530953105311531253135314531553165317531853195320532153225323532453255326532753285329533053315332533353345335533653375338533953405341534253435344534553465347534853495350535153525353535453555356535753585359536053615362536353645365536653675368536953705371537253735374537553765377537853795380538153825383538453855386538753885389539053915392539353945395539653975398539954005401540254035404540554065407540854095410541154125413541454155416541754185419542054215422542354245425542654275428542954305431543254335434543554365437543854395440544154425443544454455446544754485449545054515452545354545455545654575458545954605461546254635464546554665467546854695470547154725473547454755476547754785479548054815482548354845485548654875488548954905491549254935494549554965497549854995500550155025503550455055506550755085509551055115512551355145515551655175518551955205521552255235524552555265527552855295530553155325533553455355536553755385539554055415542554355445545554655475548554955505551555255535554555555565557555855595560556155625563556455655566556755685569557055715572557355745575557655775578557955805581558255835584558555865587558855895590559155925593559455955596559755985599560056015602560356045605560656075608560956105611561256135614561556165617561856195620562156225623562456255626562756285629563056315632563356345635563656375638563956405641564256435644564556465647564856495650565156525653565456555656565756585659566056615662566356645665566656675668566956705671567256735674567556765677567856795680568156825683568456855686568756885689569056915692569356945695569656975698569957005701570257035704570557065707570857095710571157125713571457155716571757185719572057215722572357245725572657275728572957305731573257335734573557365737573857395740574157425743574457455746574757485749575057515752575357545755575657575758575957605761576257635764576557665767576857695770577157725773577457755776577757785779578057815782578357845785578657875788578957905791579257935794579557965797579857995800580158025803580458055806580758085809581058115812581358145815581658175818581958205821582258235824582558265827582858295830583158325833583458355836583758385839584058415842584358445845584658475848584958505851585258535854585558565857585858595860586158625863586458655866586758685869587058715872587358745875587658775878587958805881588258835884588558865887588858895890589158925893589458955896589758985899590059015902590359045905590659075908590959105911591259135914591559165917591859195920592159225923592459255926592759285929593059315932593359345935593659375938593959405941594259435944594559465947594859495950595159525953595459555956595759585959596059615962596359645965596659675968596959705971597259735974597559765977597859795980598159825983598459855986598759885989599059915992599359945995599659975998599960006001600260036004600560066007600860096010601160126013601460156016601760186019602060216022602360246025602660276028602960306031603260336034603560366037603860396040604160426043604460456046604760486049605060516052605360546055605660576058605960606061606260636064606560666067606860696070607160726073607460756076607760786079608060816082608360846085608660876088608960906091609260936094609560966097609860996100610161026103610461056106610761086109611061116112611361146115611661176118611961206121612261236124612561266127612861296130613161326133613461356136613761386139614061416142614361446145614661476148614961506151615261536154615561566157615861596160616161626163616461656166616761686169617061716172617361746175617661776178617961806181618261836184618561866187618861896190619161926193619461956196619761986199620062016202620362046205620662076208620962106211621262136214621562166217621862196220622162226223622462256226622762286229623062316232623362346235623662376238623962406241624262436244624562466247624862496250625162526253625462556256625762586259626062616262626362646265626662676268626962706271627262736274627562766277627862796280628162826283628462856286628762886289629062916292629362946295629662976298629963006301630263036304630563066307630863096310631163126313631463156316631763186319632063216322632363246325632663276328632963306331633263336334633563366337633863396340634163426343634463456346634763486349635063516352635363546355635663576358635963606361636263636364636563666367636863696370637163726373637463756376637763786379638063816382638363846385638663876388638963906391639263936394639563966397639863996400640164026403640464056406640764086409641064116412641364146415641664176418641964206421642264236424642564266427642864296430643164326433643464356436643764386439644064416442644364446445644664476448644964506451645264536454645564566457645864596460646164626463646464656466646764686469647064716472647364746475647664776478647964806481648264836484648564866487648864896490649164926493649464956496649764986499650065016502650365046505650665076508650965106511651265136514651565166517651865196520652165226523652465256526652765286529653065316532653365346535653665376538653965406541654265436544654565466547654865496550655165526553655465556556655765586559656065616562656365646565656665676568656965706571657265736574657565766577657865796580658165826583658465856586658765886589659065916592659365946595659665976598659966006601660266036604660566066607660866096610661166126613661466156616661766186619662066216622662366246625662666276628662966306631663266336634663566366637663866396640664166426643664466456646664766486649665066516652665366546655665666576658665966606661666266636664666566666667666866696670667166726673667466756676667766786679668066816682668366846685668666876688668966906691669266936694669566966697669866996700670167026703670467056706670767086709671067116712671367146715671667176718671967206721672267236724672567266727672867296730673167326733673467356736673767386739674067416742674367446745674667476748674967506751675267536754675567566757675867596760676167626763676467656766676767686769677067716772677367746775677667776778677967806781678267836784678567866787678867896790679167926793679467956796679767986799680068016802680368046805680668076808680968106811681268136814681568166817681868196820682168226823682468256826682768286829683068316832683368346835683668376838683968406841684268436844684568466847684868496850685168526853685468556856685768586859686068616862686368646865686668676868686968706871687268736874687568766877687868796880688168826883688468856886688768886889689068916892689368946895689668976898689969006901690269036904690569066907690869096910691169126913691469156916691769186919692069216922692369246925692669276928692969306931693269336934693569366937693869396940694169426943694469456946694769486949695069516952695369546955695669576958695969606961696269636964696569666967696869696970697169726973697469756976697769786979698069816982698369846985698669876988698969906991699269936994699569966997699869997000700170027003700470057006700770087009701070117012701370147015701670177018701970207021702270237024702570267027702870297030703170327033703470357036703770387039704070417042704370447045704670477048704970507051705270537054705570567057705870597060706170627063706470657066706770687069707070717072707370747075707670777078707970807081708270837084708570867087708870897090709170927093709470957096709770987099710071017102710371047105710671077108710971107111711271137114711571167117711871197120712171227123712471257126712771287129713071317132713371347135713671377138713971407141714271437144714571467147714871497150715171527153715471557156715771587159716071617162716371647165716671677168716971707171717271737174717571767177717871797180718171827183718471857186718771887189719071917192719371947195719671977198719972007201720272037204720572067207720872097210721172127213721472157216721772187219722072217222722372247225722672277228722972307231723272337234723572367237723872397240724172427243724472457246724772487249725072517252725372547255725672577258725972607261726272637264726572667267726872697270727172727273727472757276727772787279728072817282728372847285728672877288728972907291729272937294729572967297729872997300730173027303730473057306730773087309731073117312731373147315731673177318731973207321732273237324732573267327732873297330733173327333733473357336733773387339734073417342734373447345734673477348734973507351735273537354735573567357735873597360736173627363736473657366736773687369737073717372737373747375737673777378737973807381738273837384738573867387738873897390739173927393739473957396739773987399740074017402740374047405740674077408740974107411741274137414741574167417741874197420742174227423742474257426742774287429743074317432743374347435743674377438743974407441744274437444744574467447744874497450745174527453745474557456745774587459746074617462746374647465746674677468746974707471747274737474747574767477747874797480748174827483748474857486748774887489749074917492749374947495749674977498749975007501750275037504750575067507750875097510751175127513751475157516751775187519752075217522752375247525752675277528752975307531753275337534753575367537753875397540754175427543754475457546754775487549755075517552755375547555755675577558755975607561756275637564756575667567756875697570757175727573757475757576757775787579758075817582758375847585758675877588758975907591759275937594759575967597759875997600760176027603760476057606760776087609761076117612761376147615761676177618761976207621762276237624762576267627762876297630763176327633763476357636763776387639764076417642764376447645764676477648764976507651765276537654765576567657765876597660766176627663766476657666766776687669767076717672767376747675767676777678767976807681768276837684768576867687768876897690769176927693769476957696769776987699770077017702770377047705770677077708770977107711771277137714771577167717771877197720772177227723772477257726772777287729773077317732773377347735773677377738773977407741774277437744774577467747774877497750775177527753775477557756775777587759776077617762776377647765776677677768776977707771777277737774777577767777777877797780778177827783778477857786778777887789779077917792779377947795779677977798779978007801780278037804780578067807780878097810781178127813781478157816781778187819782078217822782378247825782678277828782978307831783278337834783578367837783878397840784178427843784478457846784778487849785078517852785378547855785678577858785978607861786278637864786578667867786878697870787178727873787478757876787778787879788078817882788378847885788678877888788978907891789278937894789578967897789878997900790179027903790479057906790779087909791079117912791379147915791679177918791979207921792279237924792579267927792879297930793179327933793479357936793779387939794079417942794379447945794679477948794979507951795279537954795579567957795879597960796179627963796479657966796779687969797079717972797379747975797679777978797979807981798279837984798579867987798879897990799179927993799479957996799779987999800080018002800380048005800680078008800980108011801280138014801580168017801880198020802180228023802480258026802780288029803080318032803380348035803680378038803980408041804280438044804580468047804880498050805180528053805480558056805780588059806080618062806380648065806680678068806980708071807280738074807580768077807880798080808180828083808480858086808780888089809080918092809380948095809680978098809981008101810281038104810581068107810881098110811181128113811481158116811781188119812081218122812381248125812681278128812981308131813281338134813581368137813881398140814181428143814481458146814781488149815081518152815381548155815681578158815981608161816281638164816581668167816881698170817181728173817481758176817781788179818081818182818381848185818681878188818981908191819281938194819581968197819881998200820182028203820482058206820782088209821082118212821382148215821682178218821982208221822282238224822582268227822882298230823182328233823482358236823782388239824082418242824382448245824682478248824982508251825282538254825582568257825882598260826182628263826482658266826782688269827082718272827382748275827682778278827982808281828282838284828582868287828882898290829182928293829482958296829782988299830083018302830383048305830683078308830983108311831283138314831583168317831883198320832183228323832483258326832783288329833083318332833383348335833683378338833983408341834283438344834583468347834883498350835183528353835483558356835783588359836083618362836383648365836683678368836983708371837283738374837583768377837883798380838183828383838483858386838783888389839083918392839383948395839683978398839984008401840284038404840584068407840884098410841184128413841484158416841784188419842084218422842384248425842684278428842984308431843284338434843584368437843884398440844184428443844484458446844784488449845084518452845384548455845684578458845984608461846284638464846584668467846884698470847184728473847484758476847784788479848084818482848384848485848684878488848984908491849284938494849584968497849884998500850185028503850485058506850785088509851085118512851385148515851685178518851985208521852285238524852585268527852885298530853185328533853485358536853785388539854085418542854385448545854685478548854985508551855285538554855585568557855885598560856185628563856485658566856785688569857085718572857385748575857685778578857985808581858285838584858585868587858885898590859185928593859485958596859785988599860086018602860386048605860686078608860986108611861286138614861586168617861886198620862186228623862486258626862786288629863086318632863386348635863686378638863986408641864286438644864586468647864886498650865186528653865486558656865786588659866086618662866386648665866686678668866986708671867286738674867586768677867886798680868186828683868486858686868786888689869086918692869386948695869686978698869987008701870287038704870587068707870887098710871187128713871487158716871787188719872087218722872387248725872687278728872987308731873287338734873587368737873887398740874187428743874487458746874787488749875087518752875387548755875687578758875987608761876287638764876587668767876887698770877187728773877487758776877787788779878087818782878387848785878687878788878987908791879287938794879587968797879887998800880188028803880488058806880788088809881088118812881388148815881688178818881988208821882288238824882588268827882888298830883188328833883488358836883788388839884088418842884388448845884688478848884988508851885288538854885588568857885888598860886188628863886488658866886788688869887088718872887388748875887688778878887988808881888288838884888588868887888888898890889188928893889488958896889788988899890089018902890389048905890689078908890989108911891289138914891589168917891889198920892189228923892489258926892789288929893089318932893389348935893689378938893989408941894289438944894589468947894889498950895189528953895489558956895789588959896089618962896389648965896689678968896989708971897289738974897589768977897889798980898189828983898489858986898789888989899089918992899389948995899689978998899990009001900290039004900590069007900890099010901190129013901490159016901790189019902090219022902390249025902690279028902990309031903290339034903590369037903890399040904190429043904490459046904790489049905090519052905390549055905690579058905990609061906290639064906590669067906890699070907190729073907490759076907790789079908090819082908390849085908690879088908990909091909290939094909590969097909890999100910191029103910491059106910791089109911091119112911391149115911691179118911991209121912291239124912591269127912891299130913191329133913491359136913791389139914091419142914391449145914691479148914991509151915291539154915591569157915891599160916191629163916491659166916791689169917091719172917391749175917691779178917991809181918291839184918591869187918891899190919191929193919491959196919791989199920092019202920392049205920692079208920992109211921292139214921592169217921892199220922192229223922492259226922792289229923092319232923392349235923692379238923992409241924292439244924592469247924892499250925192529253925492559256925792589259926092619262926392649265926692679268926992709271927292739274927592769277927892799280928192829283928492859286928792889289929092919292929392949295929692979298929993009301930293039304930593069307930893099310931193129313931493159316931793189319932093219322932393249325932693279328932993309331933293339334933593369337933893399340934193429343934493459346934793489349935093519352935393549355935693579358935993609361936293639364936593669367936893699370937193729373937493759376937793789379938093819382938393849385938693879388938993909391939293939394939593969397939893999400940194029403940494059406940794089409941094119412941394149415941694179418941994209421942294239424942594269427942894299430943194329433943494359436943794389439944094419442944394449445944694479448944994509451945294539454945594569457945894599460946194629463946494659466946794689469947094719472947394749475947694779478947994809481948294839484948594869487948894899490949194929493949494959496949794989499950095019502950395049505950695079508950995109511951295139514951595169517951895199520952195229523952495259526952795289529953095319532953395349535953695379538953995409541954295439544954595469547954895499550955195529553955495559556955795589559956095619562956395649565956695679568956995709571957295739574957595769577957895799580958195829583958495859586958795889589959095919592959395949595959695979598959996009601960296039604960596069607960896099610961196129613961496159616961796189619962096219622962396249625962696279628962996309631963296339634963596369637963896399640964196429643964496459646964796489649965096519652965396549655965696579658965996609661966296639664966596669667966896699670967196729673967496759676967796789679968096819682968396849685968696879688968996909691969296939694969596969697969896999700970197029703970497059706970797089709971097119712971397149715971697179718971997209721972297239724972597269727972897299730973197329733973497359736973797389739974097419742974397449745974697479748974997509751975297539754975597569757975897599760976197629763976497659766976797689769977097719772977397749775977697779778977997809781978297839784978597869787978897899790979197929793979497959796979797989799980098019802980398049805980698079808980998109811981298139814981598169817981898199820982198229823982498259826982798289829983098319832983398349835983698379838983998409841984298439844984598469847984898499850985198529853985498559856985798589859986098619862986398649865986698679868986998709871987298739874987598769877987898799880988198829883988498859886988798889889989098919892989398949895989698979898989999009901990299039904990599069907990899099910991199129913991499159916991799189919992099219922992399249925992699279928992999309931993299339934993599369937993899399940994199429943994499459946994799489949995099519952995399549955995699579958995999609961996299639964996599669967996899699970997199729973997499759976997799789979998099819982998399849985998699879988998999909991999299939994999599969997999899991000010001100021000310004100051000610007100081000910010100111001210013100141001510016100171001810019100201002110022100231002410025100261002710028100291003010031100321003310034100351003610037100381003910040100411004210043100441004510046100471004810049100501005110052100531005410055100561005710058100591006010061100621006310064100651006610067100681006910070100711007210073100741007510076100771007810079100801008110082100831008410085100861008710088100891009010091100921009310094100951009610097100981009910100101011010210103101041010510106101071010810109101101011110112101131011410115101161011710118101191012010121101221012310124101251012610127101281012910130101311013210133101341013510136101371013810139101401014110142101431014410145101461014710148101491015010151101521015310154101551015610157101581015910160101611016210163101641016510166101671016810169101701017110172101731017410175101761017710178101791018010181101821018310184101851018610187101881018910190101911019210193101941019510196101971019810199102001020110202102031020410205102061020710208102091021010211102121021310214102151021610217102181021910220102211022210223102241022510226102271022810229102301023110232102331023410235102361023710238102391024010241102421024310244102451024610247102481024910250102511025210253102541025510256102571025810259102601026110262102631026410265102661026710268102691027010271102721027310274102751027610277102781027910280102811028210283102841028510286102871028810289102901029110292102931029410295102961029710298102991030010301103021030310304103051030610307103081030910310103111031210313103141031510316103171031810319103201032110322103231032410325103261032710328103291033010331103321033310334103351033610337103381033910340103411034210343103441034510346103471034810349103501035110352103531035410355103561035710358103591036010361103621036310364103651036610367103681036910370103711037210373103741037510376103771037810379103801038110382103831038410385103861038710388103891039010391103921039310394103951039610397103981039910400104011040210403104041040510406104071040810409104101041110412104131041410415104161041710418104191042010421104221042310424104251042610427104281042910430104311043210433104341043510436104371043810439104401044110442104431044410445104461044710448104491045010451104521045310454104551045610457104581045910460104611046210463104641046510466104671046810469104701047110472104731047410475104761047710478104791048010481104821048310484104851048610487104881048910490104911049210493104941049510496104971049810499105001050110502105031050410505105061050710508105091051010511105121051310514105151051610517105181051910520105211052210523105241052510526105271052810529105301053110532105331053410535105361053710538105391054010541105421054310544105451054610547105481054910550105511055210553105541055510556105571055810559105601056110562105631056410565105661056710568105691057010571105721057310574105751057610577105781057910580105811058210583105841058510586105871058810589105901059110592105931059410595105961059710598105991060010601106021060310604106051060610607106081060910610106111061210613106141061510616106171061810619106201062110622106231062410625106261062710628106291063010631106321063310634106351063610637106381063910640106411064210643106441064510646106471064810649106501065110652106531065410655106561065710658106591066010661106621066310664106651066610667106681066910670106711067210673106741067510676106771067810679106801068110682106831068410685106861068710688106891069010691106921069310694106951069610697106981069910700107011070210703107041070510706107071070810709107101071110712107131071410715107161071710718107191072010721107221072310724107251072610727107281072910730107311073210733107341073510736107371073810739107401074110742107431074410745107461074710748107491075010751107521075310754107551075610757107581075910760107611076210763107641076510766107671076810769107701077110772107731077410775107761077710778107791078010781107821078310784107851078610787107881078910790107911079210793107941079510796107971079810799108001080110802108031080410805108061080710808108091081010811108121081310814108151081610817108181081910820108211082210823108241082510826108271082810829108301083110832108331083410835108361083710838108391084010841108421084310844108451084610847108481084910850108511085210853108541085510856108571085810859108601086110862108631086410865108661086710868108691087010871108721087310874108751087610877108781087910880108811088210883108841088510886108871088810889108901089110892108931089410895108961089710898108991090010901109021090310904109051090610907109081090910910109111091210913109141091510916109171091810919109201092110922109231092410925109261092710928109291093010931109321093310934109351093610937109381093910940109411094210943109441094510946109471094810949109501095110952109531095410955109561095710958109591096010961109621096310964109651096610967109681096910970109711097210973109741097510976109771097810979109801098110982109831098410985109861098710988109891099010991109921099310994109951099610997109981099911000110011100211003110041100511006110071100811009110101101111012110131101411015110161101711018110191102011021110221102311024110251102611027110281102911030110311103211033110341103511036110371103811039110401104111042110431104411045110461104711048110491105011051110521105311054110551105611057110581105911060110611106211063110641106511066110671106811069110701107111072110731107411075110761107711078110791108011081110821108311084110851108611087110881108911090110911109211093110941109511096110971109811099111001110111102111031110411105111061110711108111091111011111111121111311114111151111611117111181111911120111211112211123111241112511126111271112811129111301113111132111331113411135111361113711138111391114011141111421114311144111451114611147111481114911150111511115211153111541115511156111571115811159111601116111162111631116411165111661116711168111691117011171111721117311174111751117611177111781117911180111811118211183111841118511186111871118811189111901119111192111931119411195111961119711198111991120011201112021120311204112051120611207112081120911210112111121211213112141121511216112171121811219112201122111222112231122411225112261122711228112291123011231112321123311234112351123611237112381123911240112411124211243112441124511246112471124811249112501125111252112531125411255112561125711258112591126011261112621126311264112651126611267112681126911270112711127211273112741127511276112771127811279112801128111282112831128411285112861128711288112891129011291112921129311294112951129611297112981129911300113011130211303113041130511306113071130811309113101131111312113131131411315113161131711318113191132011321113221132311324113251132611327113281132911330113311133211333113341133511336113371133811339113401134111342113431134411345113461134711348113491135011351113521135311354113551135611357113581135911360113611136211363113641136511366113671136811369113701137111372113731137411375113761137711378113791138011381113821138311384113851138611387113881138911390113911139211393113941139511396113971139811399114001140111402114031140411405114061140711408114091141011411114121141311414114151141611417114181141911420114211142211423114241142511426114271142811429114301143111432114331143411435114361143711438114391144011441114421144311444114451144611447114481144911450114511145211453114541145511456114571145811459114601146111462114631146411465114661146711468114691147011471114721147311474114751147611477114781147911480114811148211483114841148511486114871148811489114901149111492114931149411495114961149711498114991150011501115021150311504115051150611507115081150911510115111151211513115141151511516115171151811519115201152111522115231152411525115261152711528115291153011531115321153311534115351153611537115381153911540115411154211543115441154511546115471154811549115501155111552115531155411555115561155711558115591156011561115621156311564115651156611567115681156911570115711157211573115741157511576115771157811579115801158111582115831158411585115861158711588115891159011591115921159311594115951159611597115981159911600116011160211603116041160511606116071160811609116101161111612116131161411615116161161711618116191162011621116221162311624116251162611627116281162911630116311163211633116341163511636116371163811639116401164111642116431164411645116461164711648116491165011651116521165311654116551165611657116581165911660116611166211663116641166511666116671166811669116701167111672116731167411675116761167711678116791168011681116821168311684116851168611687116881168911690116911169211693116941169511696116971169811699117001170111702117031170411705117061170711708117091171011711117121171311714117151171611717117181171911720117211172211723117241172511726117271172811729117301173111732117331173411735117361173711738117391174011741117421174311744117451174611747117481174911750117511175211753117541175511756117571175811759117601176111762117631176411765117661176711768117691177011771117721177311774117751177611777117781177911780117811178211783117841178511786117871178811789117901179111792117931179411795117961179711798117991180011801118021180311804118051180611807118081180911810118111181211813118141181511816118171181811819118201182111822118231182411825118261182711828118291183011831118321183311834118351183611837118381183911840118411184211843118441184511846118471184811849118501185111852118531185411855118561185711858118591186011861118621186311864118651186611867118681186911870118711187211873118741187511876118771187811879118801188111882118831188411885118861188711888118891189011891118921189311894118951189611897118981189911900119011190211903119041190511906119071190811909119101191111912119131191411915119161191711918119191192011921119221192311924119251192611927119281192911930119311193211933119341193511936119371193811939119401194111942119431194411945119461194711948119491195011951119521195311954119551195611957119581195911960119611196211963119641196511966119671196811969119701197111972119731197411975119761197711978119791198011981119821198311984119851198611987119881198911990119911199211993119941199511996119971199811999120001200112002120031200412005120061200712008120091201012011120121201312014120151201612017120181201912020120211202212023120241202512026120271202812029120301203112032120331203412035120361203712038120391204012041120421204312044120451204612047120481204912050120511205212053120541205512056120571205812059120601206112062120631206412065120661206712068120691207012071120721207312074120751207612077120781207912080120811208212083120841208512086120871208812089120901209112092120931209412095120961209712098120991210012101121021210312104121051210612107121081210912110121111211212113121141211512116121171211812119121201212112122121231212412125121261212712128121291213012131121321213312134121351213612137121381213912140121411214212143121441214512146121471214812149121501215112152121531215412155121561215712158121591216012161121621216312164121651216612167121681216912170121711217212173121741217512176121771217812179121801218112182121831218412185121861218712188121891219012191121921219312194121951219612197121981219912200122011220212203122041220512206122071220812209122101221112212122131221412215122161221712218122191222012221122221222312224122251222612227122281222912230122311223212233122341223512236122371223812239122401224112242122431224412245122461224712248122491225012251122521225312254122551225612257122581225912260122611226212263122641226512266122671226812269122701227112272122731227412275122761227712278122791228012281122821228312284122851228612287122881228912290122911229212293122941229512296122971229812299123001230112302123031230412305123061230712308123091231012311123121231312314123151231612317123181231912320123211232212323123241232512326123271232812329123301233112332123331233412335123361233712338123391234012341123421234312344123451234612347123481234912350123511235212353123541235512356123571235812359123601236112362123631236412365123661236712368123691237012371123721237312374123751237612377123781237912380123811238212383123841238512386123871238812389123901239112392123931239412395123961239712398123991240012401124021240312404124051240612407124081240912410124111241212413124141241512416124171241812419124201242112422124231242412425124261242712428124291243012431124321243312434124351243612437124381243912440124411244212443124441244512446124471244812449124501245112452124531245412455124561245712458124591246012461124621246312464124651246612467124681246912470124711247212473124741247512476124771247812479124801248112482124831248412485124861248712488124891249012491124921249312494124951249612497124981249912500125011250212503125041250512506125071250812509125101251112512125131251412515125161251712518125191252012521125221252312524125251252612527125281252912530125311253212533125341253512536125371253812539125401254112542125431254412545125461254712548125491255012551125521255312554125551255612557125581255912560125611256212563125641256512566125671256812569125701257112572125731257412575125761257712578125791258012581125821258312584125851258612587125881258912590125911259212593125941259512596125971259812599126001260112602126031260412605126061260712608126091261012611126121261312614126151261612617126181261912620126211262212623126241262512626126271262812629126301263112632126331263412635126361263712638126391264012641126421264312644126451264612647126481264912650126511265212653126541265512656126571265812659126601266112662126631266412665126661266712668126691267012671126721267312674126751267612677126781267912680126811268212683126841268512686126871268812689126901269112692126931269412695126961269712698126991270012701127021270312704127051270612707127081270912710127111271212713127141271512716127171271812719127201272112722127231272412725127261272712728127291273012731127321273312734127351273612737127381273912740127411274212743127441274512746127471274812749127501275112752127531275412755127561275712758127591276012761127621276312764127651276612767127681276912770127711277212773127741277512776127771277812779127801278112782127831278412785127861278712788127891279012791127921279312794127951279612797127981279912800128011280212803128041280512806128071280812809128101281112812128131281412815128161281712818128191282012821128221282312824128251282612827128281282912830128311283212833128341283512836128371283812839128401284112842128431284412845128461284712848128491285012851128521285312854128551285612857128581285912860128611286212863128641286512866128671286812869128701287112872128731287412875128761287712878128791288012881128821288312884128851288612887128881288912890128911289212893128941289512896128971289812899129001290112902129031290412905129061290712908129091291012911129121291312914129151291612917129181291912920129211292212923129241292512926129271292812929129301293112932129331293412935129361293712938129391294012941129421294312944129451294612947129481294912950129511295212953129541295512956129571295812959129601296112962129631296412965129661296712968129691297012971129721297312974129751297612977129781297912980129811298212983129841298512986129871298812989129901299112992129931299412995129961299712998129991300013001130021300313004130051300613007130081300913010130111301213013130141301513016130171301813019130201302113022130231302413025130261302713028130291303013031130321303313034130351303613037130381303913040130411304213043130441304513046130471304813049130501305113052130531305413055130561305713058130591306013061130621306313064130651306613067130681306913070130711307213073130741307513076130771307813079130801308113082130831308413085130861308713088130891309013091130921309313094130951309613097130981309913100131011310213103131041310513106131071310813109131101311113112131131311413115131161311713118131191312013121131221312313124131251312613127131281312913130131311313213133131341313513136131371313813139131401314113142131431314413145131461314713148131491315013151131521315313154131551315613157131581315913160131611316213163131641316513166131671316813169131701317113172131731317413175131761317713178131791318013181131821318313184131851318613187131881318913190131911319213193131941319513196131971319813199132001320113202132031320413205132061320713208132091321013211132121321313214132151321613217132181321913220132211322213223132241322513226132271322813229132301323113232132331323413235132361323713238132391324013241132421324313244132451324613247132481324913250132511325213253132541325513256132571325813259132601326113262132631326413265132661326713268132691327013271132721327313274132751327613277132781327913280132811328213283132841328513286132871328813289132901329113292132931329413295132961329713298132991330013301133021330313304133051330613307133081330913310133111331213313133141331513316133171331813319133201332113322133231332413325133261332713328133291333013331133321333313334133351333613337133381333913340133411334213343133441334513346133471334813349133501335113352133531335413355133561335713358133591336013361133621336313364133651336613367133681336913370133711337213373133741337513376133771337813379133801338113382133831338413385133861338713388133891339013391133921339313394133951339613397133981339913400134011340213403134041340513406134071340813409134101341113412134131341413415134161341713418134191342013421134221342313424134251342613427134281342913430134311343213433134341343513436134371343813439134401344113442134431344413445134461344713448134491345013451134521345313454134551345613457134581345913460134611346213463134641346513466134671346813469134701347113472134731347413475134761347713478134791348013481134821348313484134851348613487134881348913490134911349213493134941349513496134971349813499135001350113502135031350413505135061350713508135091351013511135121351313514135151351613517135181351913520135211352213523135241352513526135271352813529135301353113532135331353413535135361353713538135391354013541135421354313544135451354613547135481354913550135511355213553135541355513556135571355813559135601356113562135631356413565135661356713568135691357013571135721357313574135751357613577135781357913580135811358213583135841358513586135871358813589135901359113592135931359413595135961359713598135991360013601136021360313604136051360613607136081360913610136111361213613136141361513616136171361813619136201362113622136231362413625136261362713628136291363013631136321363313634136351363613637136381363913640136411364213643136441364513646136471364813649136501365113652136531365413655136561365713658136591366013661136621366313664136651366613667136681366913670136711367213673136741367513676136771367813679136801368113682136831368413685136861368713688136891369013691136921369313694136951369613697136981369913700137011370213703137041370513706137071370813709137101371113712137131371413715137161371713718137191372013721137221372313724137251372613727137281372913730137311373213733137341373513736137371373813739137401374113742137431374413745137461374713748137491375013751137521375313754137551375613757137581375913760137611376213763137641376513766137671376813769137701377113772137731377413775137761377713778137791378013781137821378313784137851378613787137881378913790137911379213793137941379513796137971379813799138001380113802138031380413805138061380713808138091381013811138121381313814138151381613817138181381913820138211382213823138241382513826138271382813829138301383113832138331383413835138361383713838138391384013841138421384313844138451384613847138481384913850138511385213853138541385513856138571385813859138601386113862138631386413865138661386713868138691387013871138721387313874138751387613877138781387913880138811388213883138841388513886138871388813889138901389113892138931389413895138961389713898138991390013901139021390313904139051390613907139081390913910139111391213913139141391513916139171391813919139201392113922139231392413925139261392713928139291393013931139321393313934139351393613937139381393913940139411394213943139441394513946139471394813949139501395113952139531395413955139561395713958139591396013961139621396313964139651396613967139681396913970139711397213973139741397513976139771397813979139801398113982139831398413985139861398713988139891399013991139921399313994139951399613997139981399914000140011400214003140041400514006140071400814009140101401114012140131401414015140161401714018140191402014021140221402314024140251402614027140281402914030140311403214033140341403514036140371403814039140401404114042140431404414045140461404714048140491405014051140521405314054140551405614057140581405914060140611406214063140641406514066140671406814069140701407114072140731407414075140761407714078140791408014081140821408314084140851408614087140881408914090140911409214093140941409514096140971409814099141001410114102141031410414105141061410714108141091411014111141121411314114141151411614117141181411914120141211412214123141241412514126141271412814129141301413114132141331413414135141361413714138141391414014141141421414314144141451414614147141481414914150141511415214153141541415514156141571415814159141601416114162141631416414165141661416714168141691417014171141721417314174141751417614177141781417914180141811418214183141841418514186141871418814189141901419114192141931419414195141961419714198141991420014201142021420314204142051420614207142081420914210142111421214213142141421514216142171421814219142201422114222142231422414225142261422714228142291423014231142321423314234142351423614237142381423914240142411424214243142441424514246142471424814249142501425114252142531425414255142561425714258142591426014261142621426314264142651426614267142681426914270142711427214273142741427514276142771427814279142801428114282142831428414285142861428714288142891429014291142921429314294142951429614297142981429914300143011430214303143041430514306143071430814309143101431114312143131431414315143161431714318143191432014321143221432314324143251432614327143281432914330143311433214333143341433514336143371433814339143401434114342143431434414345143461434714348143491435014351143521435314354143551435614357143581435914360143611436214363143641436514366143671436814369143701437114372143731437414375143761437714378143791438014381143821438314384143851438614387143881438914390143911439214393143941439514396143971439814399144001440114402144031440414405144061440714408144091441014411144121441314414144151441614417144181441914420144211442214423144241442514426144271442814429144301443114432144331443414435144361443714438144391444014441144421444314444144451444614447144481444914450144511445214453144541445514456144571445814459144601446114462144631446414465144661446714468144691447014471144721447314474144751447614477144781447914480144811448214483144841448514486144871448814489144901449114492144931449414495144961449714498144991450014501145021450314504145051450614507145081450914510145111451214513145141451514516145171451814519145201452114522145231452414525145261452714528145291453014531145321453314534145351453614537145381453914540145411454214543145441454514546145471454814549145501455114552145531455414555145561455714558145591456014561145621456314564145651456614567145681456914570145711457214573145741457514576145771457814579145801458114582145831458414585145861458714588145891459014591145921459314594145951459614597145981459914600146011460214603146041460514606146071460814609146101461114612146131461414615146161461714618146191462014621146221462314624146251462614627146281462914630146311463214633146341463514636146371463814639146401464114642146431464414645146461464714648146491465014651146521465314654146551465614657146581465914660146611466214663146641466514666146671466814669146701467114672146731467414675146761467714678146791468014681146821468314684146851468614687146881468914690146911469214693146941469514696146971469814699147001470114702147031470414705147061470714708147091471014711147121471314714147151471614717147181471914720147211472214723147241472514726147271472814729147301473114732147331473414735147361473714738147391474014741147421474314744147451474614747147481474914750147511475214753147541475514756147571475814759147601476114762147631476414765147661476714768147691477014771147721477314774147751477614777147781477914780147811478214783147841478514786147871478814789147901479114792147931479414795147961479714798147991480014801148021480314804148051480614807148081480914810148111481214813148141481514816148171481814819148201482114822148231482414825148261482714828148291483014831148321483314834148351483614837148381483914840148411484214843148441484514846148471484814849148501485114852148531485414855148561485714858148591486014861148621486314864148651486614867148681486914870148711487214873148741487514876148771487814879148801488114882148831488414885148861488714888148891489014891148921489314894148951489614897148981489914900149011490214903149041490514906149071490814909149101491114912149131491414915149161491714918149191492014921149221492314924149251492614927149281492914930149311493214933149341493514936149371493814939149401494114942149431494414945149461494714948149491495014951149521495314954149551495614957149581495914960149611496214963149641496514966149671496814969149701497114972149731497414975149761497714978149791498014981149821498314984149851498614987149881498914990149911499214993149941499514996149971499814999150001500115002150031500415005150061500715008150091501015011150121501315014150151501615017150181501915020150211502215023150241502515026150271502815029150301503115032150331503415035150361503715038150391504015041150421504315044150451504615047150481504915050150511505215053150541505515056150571505815059150601506115062150631506415065150661506715068150691507015071150721507315074150751507615077150781507915080150811508215083150841508515086150871508815089150901509115092150931509415095150961509715098150991510015101151021510315104151051510615107151081510915110151111511215113151141511515116151171511815119151201512115122151231512415125151261512715128151291513015131151321513315134151351513615137151381513915140151411514215143151441514515146151471514815149151501515115152151531515415155151561515715158151591516015161151621516315164151651516615167151681516915170151711517215173151741517515176151771517815179151801518115182151831518415185151861518715188151891519015191151921519315194151951519615197151981519915200152011520215203152041520515206152071520815209152101521115212152131521415215152161521715218152191522015221152221522315224152251522615227152281522915230152311523215233152341523515236152371523815239152401524115242152431524415245152461524715248152491525015251152521525315254152551525615257152581525915260152611526215263152641526515266152671526815269152701527115272152731527415275152761527715278152791528015281152821528315284152851528615287152881528915290152911529215293152941529515296152971529815299153001530115302153031530415305153061530715308153091531015311153121531315314153151531615317153181531915320153211532215323153241532515326153271532815329153301533115332153331533415335153361533715338153391534015341153421534315344153451534615347153481534915350153511535215353153541535515356153571535815359153601536115362153631536415365153661536715368153691537015371153721537315374153751537615377153781537915380153811538215383153841538515386153871538815389153901539115392153931539415395153961539715398153991540015401154021540315404154051540615407154081540915410154111541215413154141541515416154171541815419154201542115422154231542415425154261542715428154291543015431154321543315434154351543615437154381543915440154411544215443154441544515446154471544815449154501545115452154531545415455154561545715458154591546015461154621546315464154651546615467154681546915470154711547215473154741547515476154771547815479154801548115482154831548415485154861548715488154891549015491154921549315494154951549615497154981549915500155011550215503155041550515506155071550815509155101551115512155131551415515155161551715518155191552015521155221552315524155251552615527155281552915530155311553215533155341553515536155371553815539155401554115542155431554415545155461554715548155491555015551155521555315554155551555615557155581555915560155611556215563155641556515566155671556815569155701557115572155731557415575155761557715578155791558015581155821558315584155851558615587155881558915590155911559215593155941559515596155971559815599156001560115602156031560415605156061560715608156091561015611156121561315614156151561615617156181561915620156211562215623156241562515626156271562815629156301563115632156331563415635156361563715638156391564015641156421564315644156451564615647156481564915650156511565215653156541565515656156571565815659156601566115662156631566415665156661566715668156691567015671156721567315674156751567615677156781567915680156811568215683156841568515686156871568815689156901569115692156931569415695156961569715698156991570015701157021570315704157051570615707157081570915710157111571215713157141571515716157171571815719157201572115722157231572415725157261572715728157291573015731157321573315734157351573615737157381573915740157411574215743157441574515746157471574815749157501575115752157531575415755157561575715758157591576015761157621576315764157651576615767157681576915770157711577215773157741577515776157771577815779157801578115782157831578415785157861578715788157891579015791157921579315794157951579615797157981579915800158011580215803158041580515806158071580815809158101581115812158131581415815158161581715818158191582015821158221582315824158251582615827158281582915830158311583215833158341583515836158371583815839158401584115842158431584415845158461584715848158491585015851158521585315854158551585615857158581585915860158611586215863158641586515866158671586815869158701587115872158731587415875158761587715878158791588015881158821588315884158851588615887158881588915890158911589215893158941589515896158971589815899159001590115902159031590415905159061590715908159091591015911159121591315914159151591615917159181591915920159211592215923159241592515926159271592815929159301593115932159331593415935159361593715938159391594015941159421594315944159451594615947159481594915950159511595215953159541595515956159571595815959159601596115962159631596415965159661596715968159691597015971159721597315974159751597615977159781597915980159811598215983159841598515986159871598815989159901599115992159931599415995159961599715998159991600016001160021600316004160051600616007160081600916010160111601216013160141601516016160171601816019160201602116022160231602416025160261602716028160291603016031160321603316034160351603616037160381603916040160411604216043160441604516046160471604816049160501605116052160531605416055160561605716058160591606016061160621606316064160651606616067160681606916070160711607216073160741607516076160771607816079160801608116082160831608416085160861608716088160891609016091160921609316094160951609616097160981609916100161011610216103161041610516106161071610816109161101611116112161131611416115161161611716118161191612016121161221612316124161251612616127161281612916130161311613216133161341613516136161371613816139161401614116142161431614416145161461614716148161491615016151161521615316154161551615616157161581615916160161611616216163161641616516166161671616816169161701617116172161731617416175161761617716178161791618016181161821618316184161851618616187161881618916190161911619216193161941619516196161971619816199162001620116202162031620416205162061620716208162091621016211162121621316214162151621616217162181621916220162211622216223162241622516226162271622816229162301623116232162331623416235162361623716238162391624016241162421624316244162451624616247162481624916250162511625216253162541625516256162571625816259162601626116262162631626416265162661626716268162691627016271162721627316274162751627616277162781627916280162811628216283162841628516286162871628816289162901629116292162931629416295162961629716298162991630016301163021630316304163051630616307163081630916310163111631216313163141631516316163171631816319163201632116322163231632416325163261632716328163291633016331163321633316334163351633616337163381633916340163411634216343163441634516346163471634816349163501635116352163531635416355163561635716358163591636016361163621636316364163651636616367163681636916370163711637216373163741637516376163771637816379163801638116382163831638416385163861638716388163891639016391163921639316394163951639616397163981639916400164011640216403164041640516406164071640816409164101641116412164131641416415164161641716418164191642016421164221642316424164251642616427164281642916430164311643216433164341643516436164371643816439164401644116442164431644416445164461644716448164491645016451164521645316454164551645616457164581645916460164611646216463164641646516466164671646816469164701647116472164731647416475164761647716478164791648016481164821648316484164851648616487164881648916490164911649216493164941649516496164971649816499165001650116502165031650416505165061650716508165091651016511165121651316514165151651616517165181651916520165211652216523165241652516526165271652816529165301653116532165331653416535165361653716538165391654016541165421654316544165451654616547165481654916550165511655216553165541655516556165571655816559165601656116562165631656416565165661656716568165691657016571165721657316574165751657616577165781657916580165811658216583165841658516586165871658816589165901659116592165931659416595165961659716598165991660016601166021660316604166051660616607166081660916610166111661216613166141661516616166171661816619166201662116622166231662416625166261662716628166291663016631166321663316634166351663616637166381663916640166411664216643166441664516646166471664816649166501665116652166531665416655166561665716658166591666016661166621666316664166651666616667166681666916670166711667216673166741667516676166771667816679166801668116682166831668416685166861668716688166891669016691166921669316694166951669616697166981669916700167011670216703167041670516706167071670816709167101671116712167131671416715167161671716718167191672016721167221672316724167251672616727167281672916730167311673216733167341673516736167371673816739167401674116742167431674416745167461674716748167491675016751167521675316754167551675616757167581675916760167611676216763167641676516766167671676816769167701677116772167731677416775167761677716778167791678016781167821678316784167851678616787167881678916790167911679216793167941679516796167971679816799168001680116802168031680416805168061680716808168091681016811168121681316814168151681616817168181681916820168211682216823168241682516826168271682816829168301683116832168331683416835168361683716838168391684016841168421684316844168451684616847168481684916850168511685216853168541685516856168571685816859168601686116862168631686416865168661686716868168691687016871168721687316874168751687616877168781687916880168811688216883168841688516886168871688816889168901689116892168931689416895168961689716898168991690016901169021690316904169051690616907169081690916910169111691216913169141691516916169171691816919169201692116922169231692416925169261692716928169291693016931169321693316934169351693616937169381693916940169411694216943169441694516946169471694816949169501695116952169531695416955169561695716958169591696016961169621696316964169651696616967169681696916970169711697216973169741697516976169771697816979169801698116982169831698416985169861698716988169891699016991169921699316994169951699616997169981699917000170011700217003170041700517006170071700817009170101701117012170131701417015170161701717018170191702017021170221702317024170251702617027170281702917030170311703217033170341703517036170371703817039170401704117042170431704417045170461704717048170491705017051170521705317054170551705617057170581705917060170611706217063170641706517066170671706817069170701707117072170731707417075170761707717078170791708017081170821708317084170851708617087170881708917090170911709217093170941709517096170971709817099171001710117102171031710417105171061710717108171091711017111171121711317114171151711617117171181711917120171211712217123171241712517126171271712817129171301713117132171331713417135171361713717138171391714017141171421714317144171451714617147171481714917150171511715217153171541715517156171571715817159171601716117162171631716417165171661716717168171691717017171171721717317174171751717617177171781717917180171811718217183171841718517186171871718817189171901719117192171931719417195171961719717198171991720017201172021720317204172051720617207172081720917210172111721217213172141721517216172171721817219172201722117222172231722417225172261722717228172291723017231172321723317234172351723617237172381723917240172411724217243172441724517246172471724817249172501725117252172531725417255172561725717258172591726017261172621726317264172651726617267172681726917270172711727217273172741727517276172771727817279172801728117282172831728417285172861728717288172891729017291172921729317294172951729617297172981729917300173011730217303173041730517306173071730817309173101731117312173131731417315173161731717318173191732017321173221732317324173251732617327173281732917330173311733217333173341733517336173371733817339173401734117342173431734417345173461734717348173491735017351173521735317354173551735617357173581735917360173611736217363173641736517366173671736817369173701737117372173731737417375173761737717378173791738017381173821738317384173851738617387173881738917390173911739217393173941739517396173971739817399174001740117402174031740417405174061740717408174091741017411174121741317414174151741617417174181741917420174211742217423174241742517426174271742817429174301743117432174331743417435174361743717438174391744017441174421744317444174451744617447174481744917450174511745217453174541745517456174571745817459174601746117462174631746417465174661746717468174691747017471174721747317474174751747617477174781747917480174811748217483174841748517486174871748817489174901749117492174931749417495174961749717498174991750017501175021750317504175051750617507175081750917510175111751217513175141751517516175171751817519175201752117522175231752417525175261752717528175291753017531175321753317534175351753617537175381753917540175411754217543175441754517546175471754817549175501755117552175531755417555175561755717558175591756017561175621756317564175651756617567175681756917570175711757217573175741757517576175771757817579175801758117582175831758417585175861758717588175891759017591175921759317594175951759617597175981759917600176011760217603176041760517606176071760817609176101761117612176131761417615176161761717618176191762017621176221762317624176251762617627176281762917630176311763217633176341763517636176371763817639176401764117642176431764417645176461764717648176491765017651176521765317654176551765617657176581765917660176611766217663176641766517666176671766817669176701767117672176731767417675176761767717678176791768017681176821768317684176851768617687176881768917690176911769217693176941769517696176971769817699177001770117702177031770417705177061770717708177091771017711177121771317714177151771617717177181771917720177211772217723177241772517726177271772817729177301773117732177331773417735177361773717738177391774017741177421774317744177451774617747177481774917750177511775217753177541775517756177571775817759177601776117762177631776417765177661776717768177691777017771177721777317774177751777617777177781777917780177811778217783177841778517786177871778817789177901779117792177931779417795177961779717798177991780017801178021780317804178051780617807178081780917810178111781217813178141781517816178171781817819178201782117822178231782417825178261782717828178291783017831178321783317834178351783617837178381783917840178411784217843178441784517846178471784817849178501785117852178531785417855178561785717858178591786017861178621786317864178651786617867178681786917870178711787217873178741787517876178771787817879178801788117882178831788417885178861788717888178891789017891178921789317894178951789617897178981789917900179011790217903179041790517906179071790817909179101791117912179131791417915179161791717918179191792017921179221792317924179251792617927179281792917930179311793217933179341793517936179371793817939179401794117942179431794417945179461794717948179491795017951179521795317954179551795617957179581795917960179611796217963179641796517966179671796817969179701797117972179731797417975179761797717978179791798017981179821798317984179851798617987179881798917990179911799217993179941799517996179971799817999180001800118002180031800418005180061800718008180091801018011180121801318014180151801618017180181801918020180211802218023180241802518026180271802818029180301803118032180331803418035180361803718038180391804018041180421804318044180451804618047180481804918050180511805218053180541805518056180571805818059180601806118062180631806418065180661806718068180691807018071180721807318074180751807618077180781807918080180811808218083180841808518086180871808818089180901809118092180931809418095180961809718098180991810018101181021810318104181051810618107181081810918110181111811218113181141811518116181171811818119181201812118122181231812418125181261812718128181291813018131181321813318134181351813618137181381813918140181411814218143181441814518146181471814818149181501815118152181531815418155181561815718158181591816018161181621816318164181651816618167181681816918170181711817218173181741817518176181771817818179181801818118182181831818418185181861818718188181891819018191181921819318194181951819618197181981819918200182011820218203182041820518206182071820818209182101821118212182131821418215182161821718218182191822018221182221822318224182251822618227182281822918230182311823218233182341823518236182371823818239182401824118242182431824418245182461824718248182491825018251182521825318254182551825618257182581825918260182611826218263182641826518266182671826818269182701827118272182731827418275182761827718278182791828018281182821828318284182851828618287182881828918290182911829218293182941829518296182971829818299183001830118302183031830418305183061830718308183091831018311183121831318314183151831618317183181831918320183211832218323183241832518326183271832818329183301833118332183331833418335183361833718338183391834018341183421834318344183451834618347183481834918350183511835218353183541835518356183571835818359183601836118362183631836418365183661836718368183691837018371183721837318374183751837618377183781837918380183811838218383183841838518386183871838818389183901839118392183931839418395183961839718398183991840018401184021840318404184051840618407184081840918410184111841218413184141841518416184171841818419184201842118422184231842418425184261842718428184291843018431184321843318434184351843618437184381843918440184411844218443184441844518446184471844818449184501845118452184531845418455184561845718458184591846018461184621846318464184651846618467184681846918470184711847218473184741847518476184771847818479184801848118482184831848418485184861848718488184891849018491184921849318494184951849618497184981849918500185011850218503185041850518506185071850818509185101851118512185131851418515185161851718518185191852018521185221852318524185251852618527185281852918530185311853218533185341853518536185371853818539185401854118542185431854418545185461854718548185491855018551185521855318554185551855618557185581855918560185611856218563185641856518566185671856818569185701857118572185731857418575185761857718578185791858018581185821858318584185851858618587185881858918590185911859218593185941859518596185971859818599186001860118602186031860418605186061860718608186091861018611186121861318614186151861618617186181861918620186211862218623186241862518626186271862818629186301863118632186331863418635186361863718638186391864018641186421864318644186451864618647186481864918650186511865218653186541865518656186571865818659186601866118662186631866418665186661866718668186691867018671186721867318674186751867618677186781867918680186811868218683186841868518686186871868818689186901869118692186931869418695186961869718698186991870018701187021870318704187051870618707187081870918710187111871218713187141871518716187171871818719187201872118722187231872418725187261872718728187291873018731187321873318734187351873618737187381873918740187411874218743187441874518746187471874818749187501875118752187531875418755187561875718758187591876018761187621876318764187651876618767187681876918770187711877218773187741877518776187771877818779187801878118782187831878418785187861878718788187891879018791187921879318794187951879618797187981879918800188011880218803188041880518806188071880818809188101881118812188131881418815188161881718818188191882018821188221882318824188251882618827188281882918830188311883218833188341883518836188371883818839188401884118842188431884418845188461884718848188491885018851188521885318854188551885618857188581885918860188611886218863188641886518866188671886818869188701887118872188731887418875188761887718878188791888018881188821888318884188851888618887188881888918890188911889218893188941889518896188971889818899189001890118902189031890418905189061890718908189091891018911189121891318914189151891618917189181891918920189211892218923189241892518926189271892818929189301893118932189331893418935189361893718938189391894018941189421894318944189451894618947189481894918950189511895218953189541895518956189571895818959189601896118962189631896418965189661896718968189691897018971189721897318974189751897618977189781897918980189811898218983189841898518986189871898818989189901899118992189931899418995189961899718998189991900019001190021900319004190051900619007190081900919010190111901219013190141901519016190171901819019190201902119022190231902419025190261902719028190291903019031190321903319034190351903619037190381903919040190411904219043190441904519046190471904819049190501905119052190531905419055190561905719058190591906019061190621906319064190651906619067190681906919070190711907219073190741907519076190771907819079190801908119082190831908419085190861908719088190891909019091190921909319094190951909619097190981909919100191011910219103191041910519106191071910819109191101911119112191131911419115191161911719118191191912019121191221912319124191251912619127191281912919130191311913219133191341913519136191371913819139191401914119142191431914419145191461914719148191491915019151191521915319154191551915619157191581915919160191611916219163191641916519166191671916819169191701917119172191731917419175191761917719178191791918019181191821918319184191851918619187191881918919190191911919219193191941919519196191971919819199192001920119202192031920419205192061920719208192091921019211192121921319214192151921619217192181921919220192211922219223192241922519226192271922819229192301923119232192331923419235192361923719238192391924019241192421924319244192451924619247192481924919250192511925219253192541925519256192571925819259192601926119262192631926419265192661926719268192691927019271192721927319274192751927619277192781927919280192811928219283192841928519286192871928819289192901929119292192931929419295192961929719298192991930019301193021930319304193051930619307193081930919310193111931219313193141931519316193171931819319193201932119322193231932419325193261932719328193291933019331193321933319334193351933619337193381933919340193411934219343193441934519346193471934819349193501935119352193531935419355193561935719358193591936019361193621936319364193651936619367193681936919370193711937219373193741937519376193771937819379193801938119382193831938419385193861938719388193891939019391193921939319394193951939619397193981939919400194011940219403194041940519406194071940819409194101941119412194131941419415194161941719418194191942019421194221942319424194251942619427194281942919430194311943219433194341943519436194371943819439194401944119442194431944419445194461944719448194491945019451194521945319454194551945619457194581945919460194611946219463194641946519466194671946819469194701947119472194731947419475194761947719478194791948019481194821948319484194851948619487194881948919490194911949219493194941949519496194971949819499195001950119502195031950419505195061950719508195091951019511195121951319514195151951619517195181951919520195211952219523195241952519526195271952819529195301953119532195331953419535195361953719538195391954019541195421954319544195451954619547195481954919550195511955219553195541955519556195571955819559195601956119562195631956419565195661956719568195691957019571195721957319574195751957619577195781957919580195811958219583195841958519586195871958819589195901959119592195931959419595195961959719598195991960019601196021960319604196051960619607196081960919610196111961219613
  1. #define _CRT_SECURE_NO_DEPRECATE // Disables ridiculous "unsafe" warnings on Windows
  2. #define _USE_MATH_DEFINES // For M_PI on MSVC
  3. #include "ggml-impl.h"
  4. #include "ggml-quants.h"
  5. #if defined(_MSC_VER) || defined(__MINGW32__)
  6. #include <malloc.h> // using malloc.h with MSC/MINGW
  7. #elif !defined(__FreeBSD__) && !defined(__NetBSD__) && !defined(__OpenBSD__)
  8. #include <alloca.h>
  9. #endif
  10. #include <assert.h>
  11. #include <errno.h>
  12. #include <time.h>
  13. #include <math.h>
  14. #include <stdlib.h>
  15. #include <string.h>
  16. #include <stdint.h>
  17. #include <inttypes.h>
  18. #include <stdio.h>
  19. #include <float.h>
  20. #include <limits.h>
  21. #include <stdarg.h>
  22. #include <signal.h>
  23. #ifdef GGML_USE_METAL
  24. #include <unistd.h>
  25. #endif
  26. #if defined(_MSC_VER)
  27. // disable "possible loss of data" to avoid hundreds of casts
  28. // we should just be careful :)
  29. #pragma warning(disable: 4244 4267)
  30. // disable POSIX deprecation warnings
  31. // these functions are never going away, anyway
  32. #pragma warning(disable: 4996)
  33. #endif
  34. #if defined(_WIN32)
  35. #include <windows.h>
  36. typedef volatile LONG atomic_int;
  37. typedef atomic_int atomic_bool;
  38. static void atomic_store(atomic_int * ptr, LONG val) {
  39. InterlockedExchange(ptr, val);
  40. }
  41. static LONG atomic_load(atomic_int * ptr) {
  42. return InterlockedCompareExchange(ptr, 0, 0);
  43. }
  44. static LONG atomic_fetch_add(atomic_int * ptr, LONG inc) {
  45. return InterlockedExchangeAdd(ptr, inc);
  46. }
  47. static LONG atomic_fetch_sub(atomic_int * ptr, LONG dec) {
  48. return atomic_fetch_add(ptr, -(dec));
  49. }
  50. typedef HANDLE pthread_t;
  51. typedef DWORD thread_ret_t;
  52. static int pthread_create(pthread_t * out, void * unused, thread_ret_t(*func)(void *), void * arg) {
  53. (void) unused;
  54. HANDLE handle = CreateThread(NULL, 0, (LPTHREAD_START_ROUTINE) func, arg, 0, NULL);
  55. if (handle == NULL)
  56. {
  57. return EAGAIN;
  58. }
  59. *out = handle;
  60. return 0;
  61. }
  62. static int pthread_join(pthread_t thread, void * unused) {
  63. (void) unused;
  64. int ret = (int) WaitForSingleObject(thread, INFINITE);
  65. CloseHandle(thread);
  66. return ret;
  67. }
  68. static int sched_yield (void) {
  69. Sleep (0);
  70. return 0;
  71. }
  72. #else
  73. #include <pthread.h>
  74. #include <stdatomic.h>
  75. typedef void * thread_ret_t;
  76. #include <sys/types.h>
  77. #include <sys/stat.h>
  78. #include <unistd.h>
  79. #endif
  80. #ifdef GGML_USE_CPU_HBM
  81. #include <hbwmalloc.h>
  82. #endif
  83. #if defined(__APPLE__)
  84. #include <TargetConditionals.h>
  85. #endif
  86. #if (defined(__linux__) || defined(__APPLE__) || defined(__FreeBSD__) || defined(__NetBSD__) || defined(__OpenBSD__)) && \
  87. (!defined(TARGET_OS_TV) && !defined(TARGET_OS_WATCH))
  88. #include <sys/wait.h>
  89. void ggml_print_backtrace(void) {
  90. /*
  91. #include <execinfo.h>
  92. #include <dlfcn.h>
  93. void * trace[100];
  94. int nptrs = backtrace(trace, sizeof(trace)/sizeof(trace[0]));
  95. backtrace_symbols_fd(trace, nptrs, STDERR_FILENO);
  96. */
  97. // backtrack_symbols does not show line numbers, use gdb instead
  98. char attach[32];
  99. snprintf(attach, sizeof(attach), "attach %d", getpid());
  100. int pid = fork();
  101. if (pid == 0) {
  102. execlp("gdb", "gdb", "--batch",
  103. "-ex", "set style enabled on",
  104. "-ex", attach,
  105. "-ex", "bt -frame-info source-and-location",
  106. "-ex", "detach",
  107. "-ex", "quit",
  108. NULL);
  109. } else {
  110. waitpid(pid, NULL, 0);
  111. }
  112. }
  113. #else
  114. void ggml_print_backtrace(void) {
  115. // platform not supported
  116. }
  117. #endif
  118. /*#define GGML_PERF*/
  119. #define GGML_DEBUG 0
  120. #define GGML_GELU_FP16
  121. #define GGML_GELU_QUICK_FP16
  122. #define GGML_SILU_FP16
  123. // #define GGML_CROSS_ENTROPY_EXP_FP16
  124. // #define GGML_FLASH_ATTN_EXP_FP16
  125. #define GGML_SOFT_MAX_UNROLL 4
  126. #define GGML_VEC_DOT_UNROLL 2
  127. #define GGML_VEC_MAD_UNROLL 32
  128. //
  129. // logging
  130. //
  131. #if (GGML_DEBUG >= 1)
  132. #define GGML_PRINT_DEBUG(...) printf(__VA_ARGS__)
  133. #else
  134. #define GGML_PRINT_DEBUG(...)
  135. #endif
  136. #if (GGML_DEBUG >= 5)
  137. #define GGML_PRINT_DEBUG_5(...) printf(__VA_ARGS__)
  138. #else
  139. #define GGML_PRINT_DEBUG_5(...)
  140. #endif
  141. #if (GGML_DEBUG >= 10)
  142. #define GGML_PRINT_DEBUG_10(...) printf(__VA_ARGS__)
  143. #else
  144. #define GGML_PRINT_DEBUG_10(...)
  145. #endif
  146. #define GGML_PRINT(...) printf(__VA_ARGS__)
  147. //
  148. // end of logging block
  149. //
  150. #ifdef GGML_USE_ACCELERATE
  151. // uncomment to use vDSP for soft max computation
  152. // note: not sure if it is actually faster
  153. //#define GGML_SOFT_MAX_ACCELERATE
  154. #endif
  155. #if defined(_MSC_VER) || defined(__MINGW32__)
  156. #define GGML_ALIGNED_MALLOC(size) _aligned_malloc(size, GGML_MEM_ALIGN)
  157. #define GGML_ALIGNED_FREE(ptr) _aligned_free(ptr)
  158. #else
  159. inline static void * ggml_aligned_malloc(size_t size) {
  160. if (size == 0) {
  161. GGML_PRINT("WARNING: Behavior may be unexpected when allocating 0 bytes for ggml_aligned_malloc!\n");
  162. return NULL;
  163. }
  164. void * aligned_memory = NULL;
  165. #ifdef GGML_USE_CPU_HBM
  166. int result = hbw_posix_memalign(&aligned_memory, 16, size);
  167. #elif GGML_USE_METAL
  168. int result = posix_memalign(&aligned_memory, sysconf(_SC_PAGESIZE), size);
  169. #else
  170. int result = posix_memalign(&aligned_memory, GGML_MEM_ALIGN, size);
  171. #endif
  172. if (result != 0) {
  173. // Handle allocation failure
  174. const char *error_desc = "unknown allocation error";
  175. switch (result) {
  176. case EINVAL:
  177. error_desc = "invalid alignment value";
  178. break;
  179. case ENOMEM:
  180. error_desc = "insufficient memory";
  181. break;
  182. }
  183. GGML_PRINT("%s: %s (attempted to allocate %6.2f MB)\n", __func__, error_desc, size/(1024.0*1024.0));
  184. return NULL;
  185. }
  186. return aligned_memory;
  187. }
  188. #define GGML_ALIGNED_MALLOC(size) ggml_aligned_malloc(size)
  189. #ifdef GGML_USE_CPU_HBM
  190. #define GGML_ALIGNED_FREE(ptr) if(NULL != ptr) hbw_free(ptr)
  191. #else
  192. #define GGML_ALIGNED_FREE(ptr) free(ptr)
  193. #endif
  194. #endif
  195. #define UNUSED GGML_UNUSED
  196. #define SWAP(x, y, T) do { T SWAP = x; x = y; y = SWAP; } while (0)
  197. #if defined(GGML_USE_ACCELERATE)
  198. #include <Accelerate/Accelerate.h>
  199. #if defined(GGML_USE_CLBLAST) // allow usage of CLBlast alongside Accelerate functions
  200. #include "ggml-opencl.h"
  201. #endif
  202. #elif defined(GGML_USE_OPENBLAS)
  203. #if defined(GGML_BLAS_USE_MKL)
  204. #include <mkl.h>
  205. #else
  206. #include <cblas.h>
  207. #endif
  208. #elif defined(GGML_USE_CUBLAS)
  209. #include "ggml-cuda.h"
  210. #elif defined(GGML_USE_CLBLAST)
  211. #include "ggml-opencl.h"
  212. #endif
  213. // floating point type used to accumulate sums
  214. typedef double ggml_float;
  215. #undef MIN
  216. #undef MAX
  217. #define MIN(a, b) ((a) < (b) ? (a) : (b))
  218. #define MAX(a, b) ((a) > (b) ? (a) : (b))
  219. //
  220. // global data
  221. //
  222. // precomputed gelu table for f16 (128 KB)
  223. static ggml_fp16_t ggml_table_gelu_f16[1 << 16];
  224. // precomputed quick gelu table for f16 (128 KB)
  225. static ggml_fp16_t ggml_table_gelu_quick_f16[1 << 16];
  226. // precomputed silu table for f16 (128 KB)
  227. static ggml_fp16_t ggml_table_silu_f16[1 << 16];
  228. // precomputed exp table for f16 (128 KB)
  229. static ggml_fp16_t ggml_table_exp_f16[1 << 16];
  230. // precomputed f32 table for f16 (256 KB) (ggml-impl.h)
  231. float ggml_table_f32_f16[1 << 16];
  232. // note: do not use these inside ggml.c
  233. // these are meant to be used via the ggml.h API
  234. float ggml_fp16_to_fp32(ggml_fp16_t x) {
  235. return (float) GGML_FP16_TO_FP32(x);
  236. }
  237. ggml_fp16_t ggml_fp32_to_fp16(float x) {
  238. return GGML_FP32_TO_FP16(x);
  239. }
  240. void ggml_fp16_to_fp32_row(const ggml_fp16_t * x, float * y, int n) {
  241. for (int i = 0; i < n; i++) {
  242. y[i] = GGML_FP16_TO_FP32(x[i]);
  243. }
  244. }
  245. void ggml_fp32_to_fp16_row(const float * x, ggml_fp16_t * y, int n) {
  246. int i = 0;
  247. #if defined(__F16C__)
  248. for (; i + 7 < n; i += 8) {
  249. __m256 x_vec = _mm256_loadu_ps(x + i);
  250. __m128i y_vec = _mm256_cvtps_ph(x_vec, _MM_FROUND_TO_NEAREST_INT);
  251. _mm_storeu_si128((__m128i *)(y + i), y_vec);
  252. }
  253. for(; i + 3 < n; i += 4) {
  254. __m128 x_vec = _mm_loadu_ps(x + i);
  255. __m128i y_vec = _mm_cvtps_ph(x_vec, _MM_FROUND_TO_NEAREST_INT);
  256. _mm_storel_epi64((__m128i *)(y + i), y_vec);
  257. }
  258. #endif
  259. for (; i < n; i++) {
  260. y[i] = GGML_FP32_TO_FP16(x[i]);
  261. }
  262. }
  263. //
  264. // timing
  265. //
  266. #if defined(_MSC_VER) || defined(__MINGW32__)
  267. static int64_t timer_freq, timer_start;
  268. void ggml_time_init(void) {
  269. LARGE_INTEGER t;
  270. QueryPerformanceFrequency(&t);
  271. timer_freq = t.QuadPart;
  272. // The multiplication by 1000 or 1000000 below can cause an overflow if timer_freq
  273. // and the uptime is high enough.
  274. // We subtract the program start time to reduce the likelihood of that happening.
  275. QueryPerformanceCounter(&t);
  276. timer_start = t.QuadPart;
  277. }
  278. int64_t ggml_time_ms(void) {
  279. LARGE_INTEGER t;
  280. QueryPerformanceCounter(&t);
  281. return ((t.QuadPart-timer_start) * 1000) / timer_freq;
  282. }
  283. int64_t ggml_time_us(void) {
  284. LARGE_INTEGER t;
  285. QueryPerformanceCounter(&t);
  286. return ((t.QuadPart-timer_start) * 1000000) / timer_freq;
  287. }
  288. #else
  289. void ggml_time_init(void) {}
  290. int64_t ggml_time_ms(void) {
  291. struct timespec ts;
  292. clock_gettime(CLOCK_MONOTONIC, &ts);
  293. return (int64_t)ts.tv_sec*1000 + (int64_t)ts.tv_nsec/1000000;
  294. }
  295. int64_t ggml_time_us(void) {
  296. struct timespec ts;
  297. clock_gettime(CLOCK_MONOTONIC, &ts);
  298. return (int64_t)ts.tv_sec*1000000 + (int64_t)ts.tv_nsec/1000;
  299. }
  300. #endif
  301. int64_t ggml_cycles(void) {
  302. return clock();
  303. }
  304. int64_t ggml_cycles_per_ms(void) {
  305. return CLOCKS_PER_SEC/1000;
  306. }
  307. #ifdef GGML_PERF
  308. #define ggml_perf_time_ms() ggml_time_ms()
  309. #define ggml_perf_time_us() ggml_time_us()
  310. #define ggml_perf_cycles() ggml_cycles()
  311. #define ggml_perf_cycles_per_ms() ggml_cycles_per_ms()
  312. #else
  313. #define ggml_perf_time_ms() 0
  314. #define ggml_perf_time_us() 0
  315. #define ggml_perf_cycles() 0
  316. #define ggml_perf_cycles_per_ms() 0
  317. #endif
  318. //
  319. // cache line
  320. //
  321. #if defined(__cpp_lib_hardware_interference_size)
  322. #define CACHE_LINE_SIZE hardware_destructive_interference_size
  323. #else
  324. #if defined(__POWER9_VECTOR__)
  325. #define CACHE_LINE_SIZE 128
  326. #else
  327. #define CACHE_LINE_SIZE 64
  328. #endif
  329. #endif
  330. static const size_t CACHE_LINE_SIZE_F32 = CACHE_LINE_SIZE/sizeof(float);
  331. static void ggml_vec_dot_f32(const int n, float * restrict s, const float * restrict x, const float * restrict y);
  332. static void ggml_vec_dot_f16(const int n, float * restrict s, ggml_fp16_t * restrict x, ggml_fp16_t * restrict y);
  333. static const ggml_type_traits_t type_traits[GGML_TYPE_COUNT] = {
  334. [GGML_TYPE_I8] = {
  335. .type_name = "i8",
  336. .blck_size = 1,
  337. .type_size = sizeof(int8_t),
  338. .is_quantized = false,
  339. },
  340. [GGML_TYPE_I16] = {
  341. .type_name = "i16",
  342. .blck_size = 1,
  343. .type_size = sizeof(int16_t),
  344. .is_quantized = false,
  345. },
  346. [GGML_TYPE_I32] = {
  347. .type_name = "i32",
  348. .blck_size = 1,
  349. .type_size = sizeof(int32_t),
  350. .is_quantized = false,
  351. },
  352. [GGML_TYPE_F32] = {
  353. .type_name = "f32",
  354. .blck_size = 1,
  355. .type_size = sizeof(float),
  356. .is_quantized = false,
  357. .vec_dot = (ggml_vec_dot_t) ggml_vec_dot_f32,
  358. .vec_dot_type = GGML_TYPE_F32,
  359. },
  360. [GGML_TYPE_F16] = {
  361. .type_name = "f16",
  362. .blck_size = 1,
  363. .type_size = sizeof(ggml_fp16_t),
  364. .is_quantized = false,
  365. .to_float = (ggml_to_float_t) ggml_fp16_to_fp32_row,
  366. .from_float = (ggml_from_float_t) ggml_fp32_to_fp16_row,
  367. .from_float_reference = (ggml_from_float_t) ggml_fp32_to_fp16_row,
  368. .vec_dot = (ggml_vec_dot_t) ggml_vec_dot_f16,
  369. .vec_dot_type = GGML_TYPE_F16,
  370. },
  371. [GGML_TYPE_Q4_0] = {
  372. .type_name = "q4_0",
  373. .blck_size = QK4_0,
  374. .type_size = sizeof(block_q4_0),
  375. .is_quantized = true,
  376. .to_float = (ggml_to_float_t) dequantize_row_q4_0,
  377. .from_float = quantize_row_q4_0,
  378. .from_float_reference = (ggml_from_float_t) quantize_row_q4_0_reference,
  379. .vec_dot = ggml_vec_dot_q4_0_q8_0,
  380. .vec_dot_type = GGML_TYPE_Q8_0,
  381. },
  382. [GGML_TYPE_Q4_1] = {
  383. .type_name = "q4_1",
  384. .blck_size = QK4_1,
  385. .type_size = sizeof(block_q4_1),
  386. .is_quantized = true,
  387. .to_float = (ggml_to_float_t) dequantize_row_q4_1,
  388. .from_float = quantize_row_q4_1,
  389. .from_float_reference = (ggml_from_float_t) quantize_row_q4_1_reference,
  390. .vec_dot = ggml_vec_dot_q4_1_q8_1,
  391. .vec_dot_type = GGML_TYPE_Q8_1,
  392. },
  393. [4] = { // GGML_TYPE_Q4_2
  394. .type_name = "DEPRECATED",
  395. .blck_size = 0,
  396. .type_size = 0,
  397. .is_quantized = false,
  398. .to_float = NULL,
  399. .from_float = NULL,
  400. .from_float_reference = NULL,
  401. .vec_dot = NULL,
  402. .vec_dot_type = GGML_TYPE_COUNT,
  403. },
  404. [5] = { // GGML_TYPE_Q4_3
  405. .type_name = "DEPRECATED",
  406. .blck_size = 0,
  407. .type_size = 0,
  408. .is_quantized = false,
  409. .to_float = NULL,
  410. .from_float = NULL,
  411. .from_float_reference = NULL,
  412. .vec_dot = NULL,
  413. .vec_dot_type = GGML_TYPE_COUNT,
  414. },
  415. [GGML_TYPE_Q5_0] = {
  416. .type_name = "q5_0",
  417. .blck_size = QK5_0,
  418. .type_size = sizeof(block_q5_0),
  419. .is_quantized = true,
  420. .to_float = (ggml_to_float_t) dequantize_row_q5_0,
  421. .from_float = quantize_row_q5_0,
  422. .from_float_reference = (ggml_from_float_t) quantize_row_q5_0_reference,
  423. .vec_dot = ggml_vec_dot_q5_0_q8_0,
  424. .vec_dot_type = GGML_TYPE_Q8_0,
  425. },
  426. [GGML_TYPE_Q5_1] = {
  427. .type_name = "q5_1",
  428. .blck_size = QK5_1,
  429. .type_size = sizeof(block_q5_1),
  430. .is_quantized = true,
  431. .to_float = (ggml_to_float_t) dequantize_row_q5_1,
  432. .from_float = quantize_row_q5_1,
  433. .from_float_reference = (ggml_from_float_t) quantize_row_q5_1_reference,
  434. .vec_dot = ggml_vec_dot_q5_1_q8_1,
  435. .vec_dot_type = GGML_TYPE_Q8_1,
  436. },
  437. [GGML_TYPE_Q8_0] = {
  438. .type_name = "q8_0",
  439. .blck_size = QK8_0,
  440. .type_size = sizeof(block_q8_0),
  441. .is_quantized = true,
  442. .to_float = (ggml_to_float_t) dequantize_row_q8_0,
  443. .from_float = quantize_row_q8_0,
  444. .from_float_reference = (ggml_from_float_t) quantize_row_q8_0_reference,
  445. .vec_dot = ggml_vec_dot_q8_0_q8_0,
  446. .vec_dot_type = GGML_TYPE_Q8_0,
  447. },
  448. [GGML_TYPE_Q8_1] = {
  449. .type_name = "q8_1",
  450. .blck_size = QK8_1,
  451. .type_size = sizeof(block_q8_1),
  452. .is_quantized = true,
  453. .from_float = quantize_row_q8_1,
  454. .from_float_reference = (ggml_from_float_t) quantize_row_q8_1_reference,
  455. .vec_dot_type = GGML_TYPE_Q8_1,
  456. },
  457. [GGML_TYPE_Q2_K] = {
  458. .type_name = "q2_K",
  459. .blck_size = QK_K,
  460. .type_size = sizeof(block_q2_K),
  461. .is_quantized = true,
  462. .to_float = (ggml_to_float_t) dequantize_row_q2_K,
  463. .from_float = quantize_row_q2_K,
  464. .from_float_reference = (ggml_from_float_t) quantize_row_q2_K_reference,
  465. .vec_dot = ggml_vec_dot_q2_K_q8_K,
  466. .vec_dot_type = GGML_TYPE_Q8_K,
  467. },
  468. [GGML_TYPE_Q3_K] = {
  469. .type_name = "q3_K",
  470. .blck_size = QK_K,
  471. .type_size = sizeof(block_q3_K),
  472. .is_quantized = true,
  473. .to_float = (ggml_to_float_t) dequantize_row_q3_K,
  474. .from_float = quantize_row_q3_K,
  475. .from_float_reference = (ggml_from_float_t) quantize_row_q3_K_reference,
  476. .vec_dot = ggml_vec_dot_q3_K_q8_K,
  477. .vec_dot_type = GGML_TYPE_Q8_K,
  478. },
  479. [GGML_TYPE_Q4_K] = {
  480. .type_name = "q4_K",
  481. .blck_size = QK_K,
  482. .type_size = sizeof(block_q4_K),
  483. .is_quantized = true,
  484. .to_float = (ggml_to_float_t) dequantize_row_q4_K,
  485. .from_float = quantize_row_q4_K,
  486. .from_float_reference = (ggml_from_float_t) quantize_row_q4_K_reference,
  487. .vec_dot = ggml_vec_dot_q4_K_q8_K,
  488. .vec_dot_type = GGML_TYPE_Q8_K,
  489. },
  490. [GGML_TYPE_Q5_K] = {
  491. .type_name = "q5_K",
  492. .blck_size = QK_K,
  493. .type_size = sizeof(block_q5_K),
  494. .is_quantized = true,
  495. .to_float = (ggml_to_float_t) dequantize_row_q5_K,
  496. .from_float = quantize_row_q5_K,
  497. .from_float_reference = (ggml_from_float_t) quantize_row_q5_K_reference,
  498. .vec_dot = ggml_vec_dot_q5_K_q8_K,
  499. .vec_dot_type = GGML_TYPE_Q8_K,
  500. },
  501. [GGML_TYPE_Q6_K] = {
  502. .type_name = "q6_K",
  503. .blck_size = QK_K,
  504. .type_size = sizeof(block_q6_K),
  505. .is_quantized = true,
  506. .to_float = (ggml_to_float_t) dequantize_row_q6_K,
  507. .from_float = quantize_row_q6_K,
  508. .from_float_reference = (ggml_from_float_t) quantize_row_q6_K_reference,
  509. .vec_dot = ggml_vec_dot_q6_K_q8_K,
  510. .vec_dot_type = GGML_TYPE_Q8_K,
  511. },
  512. [GGML_TYPE_Q8_K] = {
  513. .type_name = "q8_K",
  514. .blck_size = QK_K,
  515. .type_size = sizeof(block_q8_K),
  516. .is_quantized = true,
  517. .from_float = quantize_row_q8_K,
  518. }
  519. };
  520. // For internal test use
  521. ggml_type_traits_t ggml_internal_get_type_traits(enum ggml_type type) {
  522. GGML_ASSERT(type < GGML_TYPE_COUNT);
  523. return type_traits[type];
  524. }
  525. //
  526. // simd mappings
  527. //
  528. #if defined(__ARM_NEON)
  529. #if !defined(__aarch64__)
  530. // 64-bit compatibility
  531. inline static float vaddvq_f32(float32x4_t v) {
  532. return vgetq_lane_f32(v, 0) + vgetq_lane_f32(v, 1) + vgetq_lane_f32(v, 2) + vgetq_lane_f32(v, 3);
  533. }
  534. #endif
  535. #endif
  536. // we define a common set of C macros which map to specific intrinsics based on the current architecture
  537. // we then implement the fundamental computation operations below using only these macros
  538. // adding support for new architectures requires to define the corresponding SIMD macros
  539. //
  540. // GGML_F32_STEP / GGML_F16_STEP
  541. // number of elements to process in a single step
  542. //
  543. // GGML_F32_EPR / GGML_F16_EPR
  544. // number of elements to fit in a single register
  545. //
  546. #if defined(__ARM_NEON) && defined(__ARM_FEATURE_FMA)
  547. #define GGML_SIMD
  548. // F32 NEON
  549. #define GGML_F32_STEP 16
  550. #define GGML_F32_EPR 4
  551. #define GGML_F32x4 float32x4_t
  552. #define GGML_F32x4_ZERO vdupq_n_f32(0.0f)
  553. #define GGML_F32x4_SET1(x) vdupq_n_f32(x)
  554. #define GGML_F32x4_LOAD vld1q_f32
  555. #define GGML_F32x4_STORE vst1q_f32
  556. #define GGML_F32x4_FMA(a, b, c) vfmaq_f32(a, b, c)
  557. #define GGML_F32x4_ADD vaddq_f32
  558. #define GGML_F32x4_MUL vmulq_f32
  559. #define GGML_F32x4_REDUCE_ONE(x) vaddvq_f32(x)
  560. #define GGML_F32x4_REDUCE(res, x) \
  561. { \
  562. int offset = GGML_F32_ARR >> 1; \
  563. for (int i = 0; i < offset; ++i) { \
  564. x[i] = vaddq_f32(x[i], x[offset+i]); \
  565. } \
  566. offset >>= 1; \
  567. for (int i = 0; i < offset; ++i) { \
  568. x[i] = vaddq_f32(x[i], x[offset+i]); \
  569. } \
  570. offset >>= 1; \
  571. for (int i = 0; i < offset; ++i) { \
  572. x[i] = vaddq_f32(x[i], x[offset+i]); \
  573. } \
  574. res = GGML_F32x4_REDUCE_ONE(x[0]); \
  575. }
  576. #define GGML_F32_VEC GGML_F32x4
  577. #define GGML_F32_VEC_ZERO GGML_F32x4_ZERO
  578. #define GGML_F32_VEC_SET1 GGML_F32x4_SET1
  579. #define GGML_F32_VEC_LOAD GGML_F32x4_LOAD
  580. #define GGML_F32_VEC_STORE GGML_F32x4_STORE
  581. #define GGML_F32_VEC_FMA GGML_F32x4_FMA
  582. #define GGML_F32_VEC_ADD GGML_F32x4_ADD
  583. #define GGML_F32_VEC_MUL GGML_F32x4_MUL
  584. #define GGML_F32_VEC_REDUCE GGML_F32x4_REDUCE
  585. // F16 NEON
  586. #if defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC)
  587. #define GGML_F16_STEP 32
  588. #define GGML_F16_EPR 8
  589. #define GGML_F16x8 float16x8_t
  590. #define GGML_F16x8_ZERO vdupq_n_f16(0.0f)
  591. #define GGML_F16x8_SET1(x) vdupq_n_f16(x)
  592. #define GGML_F16x8_LOAD vld1q_f16
  593. #define GGML_F16x8_STORE vst1q_f16
  594. #define GGML_F16x8_FMA(a, b, c) vfmaq_f16(a, b, c)
  595. #define GGML_F16x8_ADD vaddq_f16
  596. #define GGML_F16x8_MUL vmulq_f16
  597. #define GGML_F16x8_REDUCE(res, x) \
  598. do { \
  599. int offset = GGML_F16_ARR >> 1; \
  600. for (int i = 0; i < offset; ++i) { \
  601. x[i] = vaddq_f16(x[i], x[offset+i]); \
  602. } \
  603. offset >>= 1; \
  604. for (int i = 0; i < offset; ++i) { \
  605. x[i] = vaddq_f16(x[i], x[offset+i]); \
  606. } \
  607. offset >>= 1; \
  608. for (int i = 0; i < offset; ++i) { \
  609. x[i] = vaddq_f16(x[i], x[offset+i]); \
  610. } \
  611. const float32x4_t t0 = vcvt_f32_f16(vget_low_f16 (x[0])); \
  612. const float32x4_t t1 = vcvt_f32_f16(vget_high_f16(x[0])); \
  613. res = (ggml_float) vaddvq_f32(vaddq_f32(t0, t1)); \
  614. } while (0)
  615. #define GGML_F16_VEC GGML_F16x8
  616. #define GGML_F16_VEC_ZERO GGML_F16x8_ZERO
  617. #define GGML_F16_VEC_SET1 GGML_F16x8_SET1
  618. #define GGML_F16_VEC_LOAD(p, i) GGML_F16x8_LOAD(p)
  619. #define GGML_F16_VEC_STORE(p, r, i) GGML_F16x8_STORE(p, r[i])
  620. #define GGML_F16_VEC_FMA GGML_F16x8_FMA
  621. #define GGML_F16_VEC_ADD GGML_F16x8_ADD
  622. #define GGML_F16_VEC_MUL GGML_F16x8_MUL
  623. #define GGML_F16_VEC_REDUCE GGML_F16x8_REDUCE
  624. #else
  625. // if FP16 vector arithmetic is not supported, we use FP32 instead
  626. // and take advantage of the vcvt_ functions to convert to/from FP16
  627. #define GGML_F16_STEP 16
  628. #define GGML_F16_EPR 4
  629. #define GGML_F32Cx4 float32x4_t
  630. #define GGML_F32Cx4_ZERO vdupq_n_f32(0.0f)
  631. #define GGML_F32Cx4_SET1(x) vdupq_n_f32(x)
  632. #define GGML_F32Cx4_LOAD(x) vcvt_f32_f16(vld1_f16(x))
  633. #define GGML_F32Cx4_STORE(x, y) vst1_f16(x, vcvt_f16_f32(y))
  634. #define GGML_F32Cx4_FMA(a, b, c) vfmaq_f32(a, b, c)
  635. #define GGML_F32Cx4_ADD vaddq_f32
  636. #define GGML_F32Cx4_MUL vmulq_f32
  637. #define GGML_F32Cx4_REDUCE GGML_F32x4_REDUCE
  638. #define GGML_F16_VEC GGML_F32Cx4
  639. #define GGML_F16_VEC_ZERO GGML_F32Cx4_ZERO
  640. #define GGML_F16_VEC_SET1 GGML_F32Cx4_SET1
  641. #define GGML_F16_VEC_LOAD(p, i) GGML_F32Cx4_LOAD(p)
  642. #define GGML_F16_VEC_STORE(p, r, i) GGML_F32Cx4_STORE(p, r[i])
  643. #define GGML_F16_VEC_FMA GGML_F32Cx4_FMA
  644. #define GGML_F16_VEC_ADD GGML_F32Cx4_ADD
  645. #define GGML_F16_VEC_MUL GGML_F32Cx4_MUL
  646. #define GGML_F16_VEC_REDUCE GGML_F32Cx4_REDUCE
  647. #endif
  648. #elif defined(__AVX__)
  649. #define GGML_SIMD
  650. // F32 AVX
  651. #define GGML_F32_STEP 32
  652. #define GGML_F32_EPR 8
  653. #define GGML_F32x8 __m256
  654. #define GGML_F32x8_ZERO _mm256_setzero_ps()
  655. #define GGML_F32x8_SET1(x) _mm256_set1_ps(x)
  656. #define GGML_F32x8_LOAD _mm256_loadu_ps
  657. #define GGML_F32x8_STORE _mm256_storeu_ps
  658. #if defined(__FMA__)
  659. #define GGML_F32x8_FMA(a, b, c) _mm256_fmadd_ps(b, c, a)
  660. #else
  661. #define GGML_F32x8_FMA(a, b, c) _mm256_add_ps(_mm256_mul_ps(b, c), a)
  662. #endif
  663. #define GGML_F32x8_ADD _mm256_add_ps
  664. #define GGML_F32x8_MUL _mm256_mul_ps
  665. #define GGML_F32x8_REDUCE(res, x) \
  666. do { \
  667. int offset = GGML_F32_ARR >> 1; \
  668. for (int i = 0; i < offset; ++i) { \
  669. x[i] = _mm256_add_ps(x[i], x[offset+i]); \
  670. } \
  671. offset >>= 1; \
  672. for (int i = 0; i < offset; ++i) { \
  673. x[i] = _mm256_add_ps(x[i], x[offset+i]); \
  674. } \
  675. offset >>= 1; \
  676. for (int i = 0; i < offset; ++i) { \
  677. x[i] = _mm256_add_ps(x[i], x[offset+i]); \
  678. } \
  679. const __m128 t0 = _mm_add_ps(_mm256_castps256_ps128(x[0]), \
  680. _mm256_extractf128_ps(x[0], 1)); \
  681. const __m128 t1 = _mm_hadd_ps(t0, t0); \
  682. res = _mm_cvtss_f32(_mm_hadd_ps(t1, t1)); \
  683. } while (0)
  684. // TODO: is this optimal ?
  685. #define GGML_F32_VEC GGML_F32x8
  686. #define GGML_F32_VEC_ZERO GGML_F32x8_ZERO
  687. #define GGML_F32_VEC_SET1 GGML_F32x8_SET1
  688. #define GGML_F32_VEC_LOAD GGML_F32x8_LOAD
  689. #define GGML_F32_VEC_STORE GGML_F32x8_STORE
  690. #define GGML_F32_VEC_FMA GGML_F32x8_FMA
  691. #define GGML_F32_VEC_ADD GGML_F32x8_ADD
  692. #define GGML_F32_VEC_MUL GGML_F32x8_MUL
  693. #define GGML_F32_VEC_REDUCE GGML_F32x8_REDUCE
  694. // F16 AVX
  695. #define GGML_F16_STEP 32
  696. #define GGML_F16_EPR 8
  697. // F16 arithmetic is not supported by AVX, so we use F32 instead
  698. #define GGML_F32Cx8 __m256
  699. #define GGML_F32Cx8_ZERO _mm256_setzero_ps()
  700. #define GGML_F32Cx8_SET1(x) _mm256_set1_ps(x)
  701. #if defined(__F16C__)
  702. // the _mm256_cvt intrinsics require F16C
  703. #define GGML_F32Cx8_LOAD(x) _mm256_cvtph_ps(_mm_loadu_si128((__m128i *)(x)))
  704. #define GGML_F32Cx8_STORE(x, y) _mm_storeu_si128((__m128i *)(x), _mm256_cvtps_ph(y, 0))
  705. #else
  706. static inline __m256 __avx_f32cx8_load(ggml_fp16_t *x) {
  707. float tmp[8];
  708. for (int i = 0; i < 8; i++) {
  709. tmp[i] = GGML_FP16_TO_FP32(x[i]);
  710. }
  711. return _mm256_loadu_ps(tmp);
  712. }
  713. static inline void __avx_f32cx8_store(ggml_fp16_t *x, __m256 y) {
  714. float arr[8];
  715. _mm256_storeu_ps(arr, y);
  716. for (int i = 0; i < 8; i++)
  717. x[i] = GGML_FP32_TO_FP16(arr[i]);
  718. }
  719. #define GGML_F32Cx8_LOAD(x) __avx_f32cx8_load(x)
  720. #define GGML_F32Cx8_STORE(x, y) __avx_f32cx8_store(x, y)
  721. #endif
  722. #define GGML_F32Cx8_FMA GGML_F32x8_FMA
  723. #define GGML_F32Cx8_ADD _mm256_add_ps
  724. #define GGML_F32Cx8_MUL _mm256_mul_ps
  725. #define GGML_F32Cx8_REDUCE GGML_F32x8_REDUCE
  726. #define GGML_F16_VEC GGML_F32Cx8
  727. #define GGML_F16_VEC_ZERO GGML_F32Cx8_ZERO
  728. #define GGML_F16_VEC_SET1 GGML_F32Cx8_SET1
  729. #define GGML_F16_VEC_LOAD(p, i) GGML_F32Cx8_LOAD(p)
  730. #define GGML_F16_VEC_STORE(p, r, i) GGML_F32Cx8_STORE(p, r[i])
  731. #define GGML_F16_VEC_FMA GGML_F32Cx8_FMA
  732. #define GGML_F16_VEC_ADD GGML_F32Cx8_ADD
  733. #define GGML_F16_VEC_MUL GGML_F32Cx8_MUL
  734. #define GGML_F16_VEC_REDUCE GGML_F32Cx8_REDUCE
  735. #elif defined(__POWER9_VECTOR__)
  736. #define GGML_SIMD
  737. // F32 POWER9
  738. #define GGML_F32_STEP 32
  739. #define GGML_F32_EPR 4
  740. #define GGML_F32x4 vector float
  741. #define GGML_F32x4_ZERO 0.0f
  742. #define GGML_F32x4_SET1 vec_splats
  743. #define GGML_F32x4_LOAD(p) vec_xl(0, p)
  744. #define GGML_F32x4_STORE(p, r) vec_xst(r, 0, p)
  745. #define GGML_F32x4_FMA(a, b, c) vec_madd(b, c, a)
  746. #define GGML_F32x4_ADD vec_add
  747. #define GGML_F32x4_MUL vec_mul
  748. #define GGML_F32x4_REDUCE(res, x) \
  749. { \
  750. int offset = GGML_F32_ARR >> 1; \
  751. for (int i = 0; i < offset; ++i) { \
  752. x[i] = vec_add(x[i], x[offset+i]); \
  753. } \
  754. offset >>= 1; \
  755. for (int i = 0; i < offset; ++i) { \
  756. x[i] = vec_add(x[i], x[offset+i]); \
  757. } \
  758. offset >>= 1; \
  759. for (int i = 0; i < offset; ++i) { \
  760. x[i] = vec_add(x[i], x[offset+i]); \
  761. } \
  762. res = vec_extract(x[0], 0) + \
  763. vec_extract(x[0], 1) + \
  764. vec_extract(x[0], 2) + \
  765. vec_extract(x[0], 3); \
  766. }
  767. #define GGML_F32_VEC GGML_F32x4
  768. #define GGML_F32_VEC_ZERO GGML_F32x4_ZERO
  769. #define GGML_F32_VEC_SET1 GGML_F32x4_SET1
  770. #define GGML_F32_VEC_LOAD GGML_F32x4_LOAD
  771. #define GGML_F32_VEC_STORE GGML_F32x4_STORE
  772. #define GGML_F32_VEC_FMA GGML_F32x4_FMA
  773. #define GGML_F32_VEC_ADD GGML_F32x4_ADD
  774. #define GGML_F32_VEC_MUL GGML_F32x4_MUL
  775. #define GGML_F32_VEC_REDUCE GGML_F32x4_REDUCE
  776. // F16 POWER9
  777. #define GGML_F16_STEP GGML_F32_STEP
  778. #define GGML_F16_EPR GGML_F32_EPR
  779. #define GGML_F16_VEC GGML_F32x4
  780. #define GGML_F16_VEC_ZERO GGML_F32x4_ZERO
  781. #define GGML_F16_VEC_SET1 GGML_F32x4_SET1
  782. #define GGML_F16_VEC_FMA GGML_F32x4_FMA
  783. #define GGML_F16_VEC_REDUCE GGML_F32x4_REDUCE
  784. // Use vec_xl, not vec_ld, in case the load address is not aligned.
  785. #define GGML_F16_VEC_LOAD(p, i) (i & 0x1) ? \
  786. vec_extract_fp32_from_shorth(vec_xl(0, p - GGML_F16_EPR)) : \
  787. vec_extract_fp32_from_shortl(vec_xl(0, p))
  788. #define GGML_ENDIAN_BYTE(i) ((unsigned char *)&(uint16_t){1})[i]
  789. #define GGML_F16_VEC_STORE(p, r, i) \
  790. if (i & 0x1) \
  791. vec_xst(vec_pack_to_short_fp32(r[i - GGML_ENDIAN_BYTE(1)], \
  792. r[i - GGML_ENDIAN_BYTE(0)]), \
  793. 0, p - GGML_F16_EPR)
  794. #elif defined(__wasm_simd128__)
  795. #define GGML_SIMD
  796. // F32 WASM
  797. #define GGML_F32_STEP 16
  798. #define GGML_F32_EPR 4
  799. #define GGML_F32x4 v128_t
  800. #define GGML_F32x4_ZERO wasm_f32x4_splat(0.0f)
  801. #define GGML_F32x4_SET1(x) wasm_f32x4_splat(x)
  802. #define GGML_F32x4_LOAD wasm_v128_load
  803. #define GGML_F32x4_STORE wasm_v128_store
  804. #define GGML_F32x4_FMA(a, b, c) wasm_f32x4_add(wasm_f32x4_mul(b, c), a)
  805. #define GGML_F32x4_ADD wasm_f32x4_add
  806. #define GGML_F32x4_MUL wasm_f32x4_mul
  807. #define GGML_F32x4_REDUCE(res, x) \
  808. { \
  809. int offset = GGML_F32_ARR >> 1; \
  810. for (int i = 0; i < offset; ++i) { \
  811. x[i] = wasm_f32x4_add(x[i], x[offset+i]); \
  812. } \
  813. offset >>= 1; \
  814. for (int i = 0; i < offset; ++i) { \
  815. x[i] = wasm_f32x4_add(x[i], x[offset+i]); \
  816. } \
  817. offset >>= 1; \
  818. for (int i = 0; i < offset; ++i) { \
  819. x[i] = wasm_f32x4_add(x[i], x[offset+i]); \
  820. } \
  821. res = wasm_f32x4_extract_lane(x[0], 0) + \
  822. wasm_f32x4_extract_lane(x[0], 1) + \
  823. wasm_f32x4_extract_lane(x[0], 2) + \
  824. wasm_f32x4_extract_lane(x[0], 3); \
  825. }
  826. #define GGML_F32_VEC GGML_F32x4
  827. #define GGML_F32_VEC_ZERO GGML_F32x4_ZERO
  828. #define GGML_F32_VEC_SET1 GGML_F32x4_SET1
  829. #define GGML_F32_VEC_LOAD GGML_F32x4_LOAD
  830. #define GGML_F32_VEC_STORE GGML_F32x4_STORE
  831. #define GGML_F32_VEC_FMA GGML_F32x4_FMA
  832. #define GGML_F32_VEC_ADD GGML_F32x4_ADD
  833. #define GGML_F32_VEC_MUL GGML_F32x4_MUL
  834. #define GGML_F32_VEC_REDUCE GGML_F32x4_REDUCE
  835. // F16 WASM
  836. #define GGML_F16_STEP 16
  837. #define GGML_F16_EPR 4
  838. inline static v128_t __wasm_f16x4_load(const ggml_fp16_t * p) {
  839. float tmp[4];
  840. tmp[0] = GGML_FP16_TO_FP32(p[0]);
  841. tmp[1] = GGML_FP16_TO_FP32(p[1]);
  842. tmp[2] = GGML_FP16_TO_FP32(p[2]);
  843. tmp[3] = GGML_FP16_TO_FP32(p[3]);
  844. return wasm_v128_load(tmp);
  845. }
  846. inline static void __wasm_f16x4_store(ggml_fp16_t * p, v128_t x) {
  847. float tmp[4];
  848. wasm_v128_store(tmp, x);
  849. p[0] = GGML_FP32_TO_FP16(tmp[0]);
  850. p[1] = GGML_FP32_TO_FP16(tmp[1]);
  851. p[2] = GGML_FP32_TO_FP16(tmp[2]);
  852. p[3] = GGML_FP32_TO_FP16(tmp[3]);
  853. }
  854. #define GGML_F16x4 v128_t
  855. #define GGML_F16x4_ZERO wasm_f32x4_splat(0.0f)
  856. #define GGML_F16x4_SET1(x) wasm_f32x4_splat(x)
  857. #define GGML_F16x4_LOAD(x) __wasm_f16x4_load(x)
  858. #define GGML_F16x4_STORE(x, y) __wasm_f16x4_store(x, y)
  859. #define GGML_F16x4_FMA GGML_F32x4_FMA
  860. #define GGML_F16x4_ADD wasm_f32x4_add
  861. #define GGML_F16x4_MUL wasm_f32x4_mul
  862. #define GGML_F16x4_REDUCE(res, x) \
  863. { \
  864. int offset = GGML_F16_ARR >> 1; \
  865. for (int i = 0; i < offset; ++i) { \
  866. x[i] = wasm_f32x4_add(x[i], x[offset+i]); \
  867. } \
  868. offset >>= 1; \
  869. for (int i = 0; i < offset; ++i) { \
  870. x[i] = wasm_f32x4_add(x[i], x[offset+i]); \
  871. } \
  872. offset >>= 1; \
  873. for (int i = 0; i < offset; ++i) { \
  874. x[i] = wasm_f32x4_add(x[i], x[offset+i]); \
  875. } \
  876. res = wasm_f32x4_extract_lane(x[0], 0) + \
  877. wasm_f32x4_extract_lane(x[0], 1) + \
  878. wasm_f32x4_extract_lane(x[0], 2) + \
  879. wasm_f32x4_extract_lane(x[0], 3); \
  880. }
  881. #define GGML_F16_VEC GGML_F16x4
  882. #define GGML_F16_VEC_ZERO GGML_F16x4_ZERO
  883. #define GGML_F16_VEC_SET1 GGML_F16x4_SET1
  884. #define GGML_F16_VEC_LOAD(p, i) GGML_F16x4_LOAD(p)
  885. #define GGML_F16_VEC_STORE(p, r, i) GGML_F16x4_STORE(p, r[i])
  886. #define GGML_F16_VEC_FMA GGML_F16x4_FMA
  887. #define GGML_F16_VEC_ADD GGML_F16x4_ADD
  888. #define GGML_F16_VEC_MUL GGML_F16x4_MUL
  889. #define GGML_F16_VEC_REDUCE GGML_F16x4_REDUCE
  890. #elif defined(__SSE3__)
  891. #define GGML_SIMD
  892. // F32 SSE
  893. #define GGML_F32_STEP 32
  894. #define GGML_F32_EPR 4
  895. #define GGML_F32x4 __m128
  896. #define GGML_F32x4_ZERO _mm_setzero_ps()
  897. #define GGML_F32x4_SET1(x) _mm_set1_ps(x)
  898. #define GGML_F32x4_LOAD _mm_loadu_ps
  899. #define GGML_F32x4_STORE _mm_storeu_ps
  900. #if defined(__FMA__)
  901. // TODO: Does this work?
  902. #define GGML_F32x4_FMA(a, b, c) _mm_fmadd_ps(b, c, a)
  903. #else
  904. #define GGML_F32x4_FMA(a, b, c) _mm_add_ps(_mm_mul_ps(b, c), a)
  905. #endif
  906. #define GGML_F32x4_ADD _mm_add_ps
  907. #define GGML_F32x4_MUL _mm_mul_ps
  908. #define GGML_F32x4_REDUCE(res, x) \
  909. { \
  910. int offset = GGML_F32_ARR >> 1; \
  911. for (int i = 0; i < offset; ++i) { \
  912. x[i] = _mm_add_ps(x[i], x[offset+i]); \
  913. } \
  914. offset >>= 1; \
  915. for (int i = 0; i < offset; ++i) { \
  916. x[i] = _mm_add_ps(x[i], x[offset+i]); \
  917. } \
  918. offset >>= 1; \
  919. for (int i = 0; i < offset; ++i) { \
  920. x[i] = _mm_add_ps(x[i], x[offset+i]); \
  921. } \
  922. const __m128 t0 = _mm_hadd_ps(x[0], x[0]); \
  923. res = _mm_cvtss_f32(_mm_hadd_ps(t0, t0)); \
  924. }
  925. // TODO: is this optimal ?
  926. #define GGML_F32_VEC GGML_F32x4
  927. #define GGML_F32_VEC_ZERO GGML_F32x4_ZERO
  928. #define GGML_F32_VEC_SET1 GGML_F32x4_SET1
  929. #define GGML_F32_VEC_LOAD GGML_F32x4_LOAD
  930. #define GGML_F32_VEC_STORE GGML_F32x4_STORE
  931. #define GGML_F32_VEC_FMA GGML_F32x4_FMA
  932. #define GGML_F32_VEC_ADD GGML_F32x4_ADD
  933. #define GGML_F32_VEC_MUL GGML_F32x4_MUL
  934. #define GGML_F32_VEC_REDUCE GGML_F32x4_REDUCE
  935. // F16 SSE
  936. #define GGML_F16_STEP 32
  937. #define GGML_F16_EPR 4
  938. static inline __m128 __sse_f16x4_load(ggml_fp16_t *x) {
  939. float tmp[4];
  940. tmp[0] = GGML_FP16_TO_FP32(x[0]);
  941. tmp[1] = GGML_FP16_TO_FP32(x[1]);
  942. tmp[2] = GGML_FP16_TO_FP32(x[2]);
  943. tmp[3] = GGML_FP16_TO_FP32(x[3]);
  944. return _mm_loadu_ps(tmp);
  945. }
  946. static inline void __sse_f16x4_store(ggml_fp16_t *x, __m128 y) {
  947. float arr[4];
  948. _mm_storeu_ps(arr, y);
  949. x[0] = GGML_FP32_TO_FP16(arr[0]);
  950. x[1] = GGML_FP32_TO_FP16(arr[1]);
  951. x[2] = GGML_FP32_TO_FP16(arr[2]);
  952. x[3] = GGML_FP32_TO_FP16(arr[3]);
  953. }
  954. #define GGML_F32Cx4 __m128
  955. #define GGML_F32Cx4_ZERO _mm_setzero_ps()
  956. #define GGML_F32Cx4_SET1(x) _mm_set1_ps(x)
  957. #define GGML_F32Cx4_LOAD(x) __sse_f16x4_load(x)
  958. #define GGML_F32Cx4_STORE(x, y) __sse_f16x4_store(x, y)
  959. #define GGML_F32Cx4_FMA GGML_F32x4_FMA
  960. #define GGML_F32Cx4_ADD _mm_add_ps
  961. #define GGML_F32Cx4_MUL _mm_mul_ps
  962. #define GGML_F32Cx4_REDUCE GGML_F32x4_REDUCE
  963. #define GGML_F16_VEC GGML_F32Cx4
  964. #define GGML_F16_VEC_ZERO GGML_F32Cx4_ZERO
  965. #define GGML_F16_VEC_SET1 GGML_F32Cx4_SET1
  966. #define GGML_F16_VEC_LOAD(p, i) GGML_F32Cx4_LOAD(p)
  967. #define GGML_F16_VEC_STORE(p, r, i) GGML_F32Cx4_STORE(p, r[i])
  968. #define GGML_F16_VEC_FMA GGML_F32Cx4_FMA
  969. #define GGML_F16_VEC_ADD GGML_F32Cx4_ADD
  970. #define GGML_F16_VEC_MUL GGML_F32Cx4_MUL
  971. #define GGML_F16_VEC_REDUCE GGML_F32Cx4_REDUCE
  972. #endif
  973. // GGML_F32_ARR / GGML_F16_ARR
  974. // number of registers to use per step
  975. #ifdef GGML_SIMD
  976. #define GGML_F32_ARR (GGML_F32_STEP/GGML_F32_EPR)
  977. #define GGML_F16_ARR (GGML_F16_STEP/GGML_F16_EPR)
  978. #endif
  979. //
  980. // fundamental operations
  981. //
  982. inline static void ggml_vec_set_i8(const int n, int8_t * x, const int8_t v) { for (int i = 0; i < n; ++i) x[i] = v; }
  983. inline static void ggml_vec_set_i16(const int n, int16_t * x, const int16_t v) { for (int i = 0; i < n; ++i) x[i] = v; }
  984. inline static void ggml_vec_set_i32(const int n, int32_t * x, const int32_t v) { for (int i = 0; i < n; ++i) x[i] = v; }
  985. inline static void ggml_vec_set_f16(const int n, ggml_fp16_t * x, const int32_t v) { for (int i = 0; i < n; ++i) x[i] = v; }
  986. inline static void ggml_vec_add_f32 (const int n, float * z, const float * x, const float * y) { for (int i = 0; i < n; ++i) z[i] = x[i] + y[i]; }
  987. inline static void ggml_vec_add1_f32(const int n, float * z, const float * x, const float v) { for (int i = 0; i < n; ++i) z[i] = x[i] + v; }
  988. inline static void ggml_vec_acc_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] += x[i]; }
  989. inline static void ggml_vec_acc1_f32(const int n, float * y, const float v) { for (int i = 0; i < n; ++i) y[i] += v; }
  990. inline static void ggml_vec_sub_f32 (const int n, float * z, const float * x, const float * y) { for (int i = 0; i < n; ++i) z[i] = x[i] - y[i]; }
  991. inline static void ggml_vec_set_f32 (const int n, float * x, const float v) { for (int i = 0; i < n; ++i) x[i] = v; }
  992. inline static void ggml_vec_cpy_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = x[i]; }
  993. inline static void ggml_vec_neg_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = -x[i]; }
  994. inline static void ggml_vec_mul_f32 (const int n, float * z, const float * x, const float * y) { for (int i = 0; i < n; ++i) z[i] = x[i]*y[i]; }
  995. inline static void ggml_vec_div_f32 (const int n, float * z, const float * x, const float * y) { for (int i = 0; i < n; ++i) z[i] = x[i]/y[i]; }
  996. static void ggml_vec_dot_f32(const int n, float * restrict s, const float * restrict x, const float * restrict y) {
  997. #ifdef GGML_SIMD
  998. float sumf = 0.0f;
  999. const int np = (n & ~(GGML_F32_STEP - 1));
  1000. GGML_F32_VEC sum[GGML_F32_ARR] = { GGML_F32_VEC_ZERO };
  1001. GGML_F32_VEC ax[GGML_F32_ARR];
  1002. GGML_F32_VEC ay[GGML_F32_ARR];
  1003. for (int i = 0; i < np; i += GGML_F32_STEP) {
  1004. for (int j = 0; j < GGML_F32_ARR; j++) {
  1005. ax[j] = GGML_F32_VEC_LOAD(x + i + j*GGML_F32_EPR);
  1006. ay[j] = GGML_F32_VEC_LOAD(y + i + j*GGML_F32_EPR);
  1007. sum[j] = GGML_F32_VEC_FMA(sum[j], ax[j], ay[j]);
  1008. }
  1009. }
  1010. // reduce sum0..sum3 to sum0
  1011. GGML_F32_VEC_REDUCE(sumf, sum);
  1012. // leftovers
  1013. for (int i = np; i < n; ++i) {
  1014. sumf += x[i]*y[i];
  1015. }
  1016. #else
  1017. // scalar
  1018. ggml_float sumf = 0.0;
  1019. for (int i = 0; i < n; ++i) {
  1020. sumf += (ggml_float)(x[i]*y[i]);
  1021. }
  1022. #endif
  1023. *s = sumf;
  1024. }
  1025. static void ggml_vec_dot_f16(const int n, float * restrict s, ggml_fp16_t * restrict x, ggml_fp16_t * restrict y) {
  1026. ggml_float sumf = 0.0;
  1027. #if defined(GGML_SIMD)
  1028. const int np = (n & ~(GGML_F16_STEP - 1));
  1029. GGML_F16_VEC sum[GGML_F16_ARR] = { GGML_F16_VEC_ZERO };
  1030. GGML_F16_VEC ax[GGML_F16_ARR];
  1031. GGML_F16_VEC ay[GGML_F16_ARR];
  1032. for (int i = 0; i < np; i += GGML_F16_STEP) {
  1033. for (int j = 0; j < GGML_F16_ARR; j++) {
  1034. ax[j] = GGML_F16_VEC_LOAD(x + i + j*GGML_F16_EPR, j);
  1035. ay[j] = GGML_F16_VEC_LOAD(y + i + j*GGML_F16_EPR, j);
  1036. sum[j] = GGML_F16_VEC_FMA(sum[j], ax[j], ay[j]);
  1037. }
  1038. }
  1039. // reduce sum0..sum3 to sum0
  1040. GGML_F16_VEC_REDUCE(sumf, sum);
  1041. // leftovers
  1042. for (int i = np; i < n; ++i) {
  1043. sumf += (ggml_float)(GGML_FP16_TO_FP32(x[i])*GGML_FP16_TO_FP32(y[i]));
  1044. }
  1045. #else
  1046. for (int i = 0; i < n; ++i) {
  1047. sumf += (ggml_float)(GGML_FP16_TO_FP32(x[i])*GGML_FP16_TO_FP32(y[i]));
  1048. }
  1049. #endif
  1050. *s = sumf;
  1051. }
  1052. // compute GGML_VEC_DOT_UNROLL dot products at once
  1053. // xs - x row stride in bytes
  1054. inline static void ggml_vec_dot_f16_unroll(const int n, const int xs, float * restrict s, void * restrict xv, ggml_fp16_t * restrict y) {
  1055. ggml_float sumf[GGML_VEC_DOT_UNROLL] = { 0.0 };
  1056. ggml_fp16_t * restrict x[GGML_VEC_DOT_UNROLL];
  1057. for (int i = 0; i < GGML_VEC_DOT_UNROLL; ++i) {
  1058. x[i] = (ggml_fp16_t *) ((char *) xv + i*xs);
  1059. }
  1060. #if defined(GGML_SIMD)
  1061. const int np = (n & ~(GGML_F16_STEP - 1));
  1062. GGML_F16_VEC sum[GGML_VEC_DOT_UNROLL][GGML_F16_ARR] = { { GGML_F16_VEC_ZERO } };
  1063. GGML_F16_VEC ax[GGML_F16_ARR];
  1064. GGML_F16_VEC ay[GGML_F16_ARR];
  1065. for (int i = 0; i < np; i += GGML_F16_STEP) {
  1066. for (int j = 0; j < GGML_F16_ARR; j++) {
  1067. ay[j] = GGML_F16_VEC_LOAD(y + i + j*GGML_F16_EPR, j);
  1068. for (int k = 0; k < GGML_VEC_DOT_UNROLL; ++k) {
  1069. ax[j] = GGML_F16_VEC_LOAD(x[k] + i + j*GGML_F16_EPR, j);
  1070. sum[k][j] = GGML_F16_VEC_FMA(sum[k][j], ax[j], ay[j]);
  1071. }
  1072. }
  1073. }
  1074. // reduce sum0..sum3 to sum0
  1075. for (int k = 0; k < GGML_VEC_DOT_UNROLL; ++k) {
  1076. GGML_F16_VEC_REDUCE(sumf[k], sum[k]);
  1077. }
  1078. // leftovers
  1079. for (int i = np; i < n; ++i) {
  1080. for (int j = 0; j < GGML_VEC_DOT_UNROLL; ++j) {
  1081. sumf[j] += (ggml_float)(GGML_FP16_TO_FP32(x[j][i])*GGML_FP16_TO_FP32(y[i]));
  1082. }
  1083. }
  1084. #else
  1085. for (int i = 0; i < n; ++i) {
  1086. for (int j = 0; j < GGML_VEC_DOT_UNROLL; ++j) {
  1087. sumf[j] += (ggml_float)(GGML_FP16_TO_FP32(x[j][i])*GGML_FP16_TO_FP32(y[i]));
  1088. }
  1089. }
  1090. #endif
  1091. for (int i = 0; i < GGML_VEC_DOT_UNROLL; ++i) {
  1092. s[i] = sumf[i];
  1093. }
  1094. }
  1095. inline static void ggml_vec_mad_f32(const int n, float * restrict y, const float * restrict x, const float v) {
  1096. #if defined(GGML_SIMD)
  1097. const int np = (n & ~(GGML_F32_STEP - 1));
  1098. GGML_F32_VEC vx = GGML_F32_VEC_SET1(v);
  1099. GGML_F32_VEC ax[GGML_F32_ARR];
  1100. GGML_F32_VEC ay[GGML_F32_ARR];
  1101. for (int i = 0; i < np; i += GGML_F32_STEP) {
  1102. for (int j = 0; j < GGML_F32_ARR; j++) {
  1103. ax[j] = GGML_F32_VEC_LOAD(x + i + j*GGML_F32_EPR);
  1104. ay[j] = GGML_F32_VEC_LOAD(y + i + j*GGML_F32_EPR);
  1105. ay[j] = GGML_F32_VEC_FMA(ay[j], ax[j], vx);
  1106. GGML_F32_VEC_STORE(y + i + j*GGML_F32_EPR, ay[j]);
  1107. }
  1108. }
  1109. // leftovers
  1110. for (int i = np; i < n; ++i) {
  1111. y[i] += x[i]*v;
  1112. }
  1113. #else
  1114. // scalar
  1115. for (int i = 0; i < n; ++i) {
  1116. y[i] += x[i]*v;
  1117. }
  1118. #endif
  1119. }
  1120. // xs and vs are byte strides of x and v
  1121. inline static void ggml_vec_mad_f32_unroll(const int n, const int xs, const int vs, float * restrict y, const float * restrict xv, const float * restrict vv) {
  1122. const float * restrict x[GGML_VEC_MAD_UNROLL];
  1123. const float * restrict v[GGML_VEC_MAD_UNROLL];
  1124. for (int i = 0; i < GGML_VEC_MAD_UNROLL; ++i) {
  1125. x[i] = (const float *) ((const char *) xv + i*xs);
  1126. v[i] = (const float *) ((const char *) vv + i*vs);
  1127. }
  1128. #if defined(GGML_SIMD)
  1129. const int np = (n & ~(GGML_F32_STEP - 1));
  1130. GGML_F32_VEC vx[GGML_VEC_MAD_UNROLL];
  1131. for (int k = 0; k < GGML_VEC_MAD_UNROLL; ++k) {
  1132. vx[k] = GGML_F32_VEC_SET1(v[k][0]);
  1133. }
  1134. GGML_F32_VEC ax[GGML_VEC_MAD_UNROLL][GGML_F32_ARR];
  1135. GGML_F32_VEC ay[GGML_F32_ARR];
  1136. for (int i = 0; i < np; i += GGML_F32_STEP) {
  1137. for (int j = 0; j < GGML_F32_ARR; j++) {
  1138. ay[j] = GGML_F32_VEC_LOAD(y + i + j*GGML_F32_EPR);
  1139. for (int k = 0; k < GGML_VEC_MAD_UNROLL; ++k) {
  1140. ax[k][j] = GGML_F32_VEC_LOAD(x[k] + i + j*GGML_F32_EPR);
  1141. ay[j] = GGML_F32_VEC_FMA(ay[j], ax[k][j], vx[k]);
  1142. }
  1143. GGML_F32_VEC_STORE(y + i + j*GGML_F32_EPR, ay[j]);
  1144. }
  1145. }
  1146. // leftovers
  1147. for (int k = 0; k < GGML_VEC_MAD_UNROLL; ++k) {
  1148. for (int i = np; i < n; ++i) {
  1149. y[i] += x[k][i]*v[k][0];
  1150. }
  1151. }
  1152. #else
  1153. // scalar
  1154. for (int k = 0; k < GGML_VEC_MAD_UNROLL; ++k) {
  1155. for (int i = 0; i < n; ++i) {
  1156. y[i] += x[k][i]*v[k][0];
  1157. }
  1158. }
  1159. #endif
  1160. }
  1161. //inline static void ggml_vec_scale_f32(const int n, float * y, const float v) { for (int i = 0; i < n; ++i) y[i] *= v; }
  1162. inline static void ggml_vec_scale_f32(const int n, float * y, const float v) {
  1163. #if defined(GGML_USE_ACCELERATE)
  1164. vDSP_vsmul(y, 1, &v, y, 1, n);
  1165. #elif defined(GGML_SIMD)
  1166. const int np = (n & ~(GGML_F32_STEP - 1));
  1167. GGML_F32_VEC vx = GGML_F32_VEC_SET1(v);
  1168. GGML_F32_VEC ay[GGML_F32_ARR];
  1169. for (int i = 0; i < np; i += GGML_F32_STEP) {
  1170. for (int j = 0; j < GGML_F32_ARR; j++) {
  1171. ay[j] = GGML_F32_VEC_LOAD(y + i + j*GGML_F32_EPR);
  1172. ay[j] = GGML_F32_VEC_MUL(ay[j], vx);
  1173. GGML_F32_VEC_STORE(y + i + j*GGML_F32_EPR, ay[j]);
  1174. }
  1175. }
  1176. // leftovers
  1177. for (int i = np; i < n; ++i) {
  1178. y[i] *= v;
  1179. }
  1180. #else
  1181. // scalar
  1182. for (int i = 0; i < n; ++i) {
  1183. y[i] *= v;
  1184. }
  1185. #endif
  1186. }
  1187. inline static void ggml_vec_norm_f32 (const int n, float * s, const float * x) { ggml_vec_dot_f32(n, s, x, x); *s = sqrtf(*s); }
  1188. inline static void ggml_vec_sqr_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = x[i]*x[i]; }
  1189. inline static void ggml_vec_sqrt_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = sqrtf(x[i]); }
  1190. inline static void ggml_vec_log_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = logf(x[i]); }
  1191. inline static void ggml_vec_abs_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = fabsf(x[i]); }
  1192. inline static void ggml_vec_sgn_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = (x[i] > 0.f) ? 1.f : ((x[i] < 0.f) ? -1.f : 0.f); }
  1193. inline static void ggml_vec_step_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = (x[i] > 0.f) ? 1.f : 0.f; }
  1194. inline static void ggml_vec_tanh_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = tanhf(x[i]); }
  1195. inline static void ggml_vec_elu_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = (x[i] > 0.f) ? x[i] : expf(x[i])-1; }
  1196. inline static void ggml_vec_relu_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = (x[i] > 0.f) ? x[i] : 0.f; }
  1197. inline static void ggml_vec_leaky_relu_f32 (const int n, float * y, const float * x, const float ns) { for (int i = 0; i < n; ++i) y[i] = ((x[i] > 0.f) ? x[i] : 0.f) + ns * ((x[i] < 0.0f) ? x[i] : 0.f); }
  1198. static const float GELU_COEF_A = 0.044715f;
  1199. static const float GELU_QUICK_COEF = -1.702f;
  1200. static const float SQRT_2_OVER_PI = 0.79788456080286535587989211986876f;
  1201. inline static float ggml_gelu_f32(float x) {
  1202. return 0.5f*x*(1.0f + tanhf(SQRT_2_OVER_PI*x*(1.0f + GELU_COEF_A*x*x)));
  1203. }
  1204. inline static void ggml_vec_gelu_f16(const int n, ggml_fp16_t * y, const ggml_fp16_t * x) {
  1205. const uint16_t * i16 = (const uint16_t *) x;
  1206. for (int i = 0; i < n; ++i) {
  1207. y[i] = ggml_table_gelu_f16[i16[i]];
  1208. }
  1209. }
  1210. #ifdef GGML_GELU_FP16
  1211. inline static void ggml_vec_gelu_f32(const int n, float * y, const float * x) {
  1212. uint16_t t;
  1213. for (int i = 0; i < n; ++i) {
  1214. ggml_fp16_t fp16 = GGML_FP32_TO_FP16(x[i]);
  1215. memcpy(&t, &fp16, sizeof(uint16_t));
  1216. y[i] = GGML_FP16_TO_FP32(ggml_table_gelu_f16[t]);
  1217. }
  1218. }
  1219. #else
  1220. inline static void ggml_vec_gelu_f32(const int n, float * y, const float * x) {
  1221. for (int i = 0; i < n; ++i) {
  1222. y[i] = ggml_gelu_f32(x[i]);
  1223. }
  1224. }
  1225. #endif
  1226. inline static float ggml_gelu_quick_f32(float x) {
  1227. return x*(1.0f/(1.0f+expf(GELU_QUICK_COEF*x)));
  1228. }
  1229. //inline static void ggml_vec_gelu_quick_f16(const int n, ggml_fp16_t * y, const ggml_fp16_t * x) {
  1230. // const uint16_t * i16 = (const uint16_t *) x;
  1231. // for (int i = 0; i < n; ++i) {
  1232. // y[i] = ggml_table_gelu_quick_f16[i16[i]];
  1233. // }
  1234. //}
  1235. #ifdef GGML_GELU_QUICK_FP16
  1236. inline static void ggml_vec_gelu_quick_f32(const int n, float * y, const float * x) {
  1237. uint16_t t;
  1238. for (int i = 0; i < n; ++i) {
  1239. ggml_fp16_t fp16 = GGML_FP32_TO_FP16(x[i]);
  1240. memcpy(&t, &fp16, sizeof(uint16_t));
  1241. y[i] = GGML_FP16_TO_FP32(ggml_table_gelu_quick_f16[t]);
  1242. }
  1243. }
  1244. #else
  1245. inline static void ggml_vec_gelu_quick_f32(const int n, float * y, const float * x) {
  1246. for (int i = 0; i < n; ++i) {
  1247. y[i] = ggml_gelu_quick_f32(x[i]);
  1248. }
  1249. }
  1250. #endif
  1251. // Sigmoid Linear Unit (SiLU) function
  1252. inline static float ggml_silu_f32(float x) {
  1253. return x/(1.0f + expf(-x));
  1254. }
  1255. //inline static void ggml_vec_silu_f16(const int n, ggml_fp16_t * y, const ggml_fp16_t * x) {
  1256. // const uint16_t * i16 = (const uint16_t *) x;
  1257. // for (int i = 0; i < n; ++i) {
  1258. // y[i] = ggml_table_silu_f16[i16[i]];
  1259. // }
  1260. //}
  1261. #ifdef GGML_SILU_FP16
  1262. inline static void ggml_vec_silu_f32(const int n, float * y, const float * x) {
  1263. uint16_t t;
  1264. for (int i = 0; i < n; ++i) {
  1265. ggml_fp16_t fp16 = GGML_FP32_TO_FP16(x[i]);
  1266. memcpy(&t, &fp16, sizeof(uint16_t));
  1267. y[i] = GGML_FP16_TO_FP32(ggml_table_silu_f16[t]);
  1268. }
  1269. }
  1270. #else
  1271. inline static void ggml_vec_silu_f32(const int n, float * y, const float * x) {
  1272. for (int i = 0; i < n; ++i) {
  1273. y[i] = ggml_silu_f32(x[i]);
  1274. }
  1275. }
  1276. #endif
  1277. inline static float ggml_silu_backward_f32(float x, float dy) {
  1278. const float s = 1.0f/(1.0f + expf(-x));
  1279. return dy*s*(1.0f + x*(1.0f - s));
  1280. }
  1281. #ifdef GGML_SILU_FP16
  1282. inline static void ggml_vec_silu_backward_f32(const int n, float * dx, const float * x, const float * dy) {
  1283. for (int i = 0; i < n; ++i) {
  1284. // we did not use x[i] to compute forward silu but its f16 equivalent
  1285. // take derivative at f16 of x[i]:
  1286. ggml_fp16_t fp16 = GGML_FP32_TO_FP16(x[i]);
  1287. float usedx = GGML_FP16_TO_FP32(fp16);
  1288. dx[i] = ggml_silu_backward_f32(usedx, dy[i]);
  1289. }
  1290. }
  1291. #else
  1292. inline static void ggml_vec_silu_backward_f32(const int n, float * dx, const float * x, const float * dy) {
  1293. for (int i = 0; i < n; ++i) {
  1294. dx[i] = ggml_silu_backward_f32(x[i], dy[i]);
  1295. }
  1296. }
  1297. #endif
  1298. inline static void ggml_vec_sum_f32(const int n, float * s, const float * x) {
  1299. #ifndef GGML_USE_ACCELERATE
  1300. ggml_float sum = 0.0;
  1301. for (int i = 0; i < n; ++i) {
  1302. sum += (ggml_float)x[i];
  1303. }
  1304. *s = sum;
  1305. #else
  1306. vDSP_sve(x, 1, s, n);
  1307. #endif
  1308. }
  1309. inline static void ggml_vec_sum_f32_ggf(const int n, ggml_float * s, const float * x) {
  1310. ggml_float sum = 0.0;
  1311. for (int i = 0; i < n; ++i) {
  1312. sum += (ggml_float)x[i];
  1313. }
  1314. *s = sum;
  1315. }
  1316. inline static void ggml_vec_sum_f16_ggf(const int n, float * s, const ggml_fp16_t * x) {
  1317. float sum = 0.0f;
  1318. for (int i = 0; i < n; ++i) {
  1319. sum += GGML_FP16_TO_FP32(x[i]);
  1320. }
  1321. *s = sum;
  1322. }
  1323. inline static void ggml_vec_max_f32(const int n, float * s, const float * x) {
  1324. #ifndef GGML_USE_ACCELERATE
  1325. float max = -INFINITY;
  1326. for (int i = 0; i < n; ++i) {
  1327. max = MAX(max, x[i]);
  1328. }
  1329. *s = max;
  1330. #else
  1331. vDSP_maxv(x, 1, s, n);
  1332. #endif
  1333. }
  1334. inline static void ggml_vec_norm_inv_f32(const int n, float * s, const float * x) {
  1335. ggml_vec_norm_f32(n, s, x);
  1336. *s = 1.f/(*s);
  1337. }
  1338. inline static void ggml_vec_argmax_f32(const int n, int * s, const float * x) {
  1339. float max = -INFINITY;
  1340. int idx = 0;
  1341. for (int i = 0; i < n; ++i) {
  1342. max = MAX(max, x[i]);
  1343. if (max == x[i]) { idx = i; }
  1344. }
  1345. *s = idx;
  1346. }
  1347. //
  1348. // data types
  1349. //
  1350. static const char * GGML_OP_NAME[GGML_OP_COUNT] = {
  1351. "NONE",
  1352. "DUP",
  1353. "ADD",
  1354. "ADD1",
  1355. "ACC",
  1356. "SUB",
  1357. "MUL",
  1358. "DIV",
  1359. "SQR",
  1360. "SQRT",
  1361. "LOG",
  1362. "SUM",
  1363. "SUM_ROWS",
  1364. "MEAN",
  1365. "ARGMAX",
  1366. "REPEAT",
  1367. "REPEAT_BACK",
  1368. "CONCAT",
  1369. "SILU_BACK",
  1370. "NORM",
  1371. "RMS_NORM",
  1372. "RMS_NORM_BACK",
  1373. "GROUP_NORM",
  1374. "MUL_MAT",
  1375. "MUL_MAT_ID",
  1376. "OUT_PROD",
  1377. "SCALE",
  1378. "SET",
  1379. "CPY",
  1380. "CONT",
  1381. "RESHAPE",
  1382. "VIEW",
  1383. "PERMUTE",
  1384. "TRANSPOSE",
  1385. "GET_ROWS",
  1386. "GET_ROWS_BACK",
  1387. "DIAG",
  1388. "DIAG_MASK_INF",
  1389. "DIAG_MASK_ZERO",
  1390. "SOFT_MAX",
  1391. "SOFT_MAX_BACK",
  1392. "ROPE",
  1393. "ROPE_BACK",
  1394. "ALIBI",
  1395. "CLAMP",
  1396. "CONV_TRANSPOSE_1D",
  1397. "IM2COL",
  1398. "CONV_TRANSPOSE_2D",
  1399. "POOL_1D",
  1400. "POOL_2D",
  1401. "UPSCALE",
  1402. "PAD",
  1403. "ARGSORT",
  1404. "LEAKY_RELU",
  1405. "FLASH_ATTN",
  1406. "FLASH_FF",
  1407. "FLASH_ATTN_BACK",
  1408. "WIN_PART",
  1409. "WIN_UNPART",
  1410. "GET_REL_POS",
  1411. "ADD_REL_POS",
  1412. "UNARY",
  1413. "MAP_UNARY",
  1414. "MAP_BINARY",
  1415. "MAP_CUSTOM1_F32",
  1416. "MAP_CUSTOM2_F32",
  1417. "MAP_CUSTOM3_F32",
  1418. "MAP_CUSTOM1",
  1419. "MAP_CUSTOM2",
  1420. "MAP_CUSTOM3",
  1421. "CROSS_ENTROPY_LOSS",
  1422. "CROSS_ENTROPY_LOSS_BACK",
  1423. };
  1424. static_assert(GGML_OP_COUNT == 72, "GGML_OP_COUNT != 72");
  1425. static const char * GGML_OP_SYMBOL[GGML_OP_COUNT] = {
  1426. "none",
  1427. "x",
  1428. "x+y",
  1429. "x+y",
  1430. "view(x,nb,offset)+=y->x",
  1431. "x-y",
  1432. "x*y",
  1433. "x/y",
  1434. "x^2",
  1435. "√x",
  1436. "log(x)",
  1437. "Σx",
  1438. "Σx_k",
  1439. "Σx/n",
  1440. "argmax(x)",
  1441. "repeat(x)",
  1442. "repeat_back(x)",
  1443. "concat(x, y)",
  1444. "silu_back(x)",
  1445. "norm(x)",
  1446. "rms_norm(x)",
  1447. "rms_norm_back(x)",
  1448. "group_norm(x)",
  1449. "X*Y",
  1450. "X[i]*Y",
  1451. "X*Y",
  1452. "x*v",
  1453. "y-\\>view(x)",
  1454. "x-\\>y",
  1455. "cont(x)",
  1456. "reshape(x)",
  1457. "view(x)",
  1458. "permute(x)",
  1459. "transpose(x)",
  1460. "get_rows(x)",
  1461. "get_rows_back(x)",
  1462. "diag(x)",
  1463. "diag_mask_inf(x)",
  1464. "diag_mask_zero(x)",
  1465. "soft_max(x)",
  1466. "soft_max_back(x)",
  1467. "rope(x)",
  1468. "rope_back(x)",
  1469. "alibi(x)",
  1470. "clamp(x)",
  1471. "conv_transpose_1d(x)",
  1472. "im2col(x)",
  1473. "conv_transpose_2d(x)",
  1474. "pool_1d(x)",
  1475. "pool_2d(x)",
  1476. "upscale(x)",
  1477. "pad(x)",
  1478. "argsort(x)",
  1479. "leaky_relu(x)",
  1480. "flash_attn(x)",
  1481. "flash_ff(x)",
  1482. "flash_attn_back(x)",
  1483. "win_part(x)",
  1484. "win_unpart(x)",
  1485. "get_rel_pos(x)",
  1486. "add_rel_pos(x)",
  1487. "unary(x)",
  1488. "f(x)",
  1489. "f(x,y)",
  1490. "custom_f32(x)",
  1491. "custom_f32(x,y)",
  1492. "custom_f32(x,y,z)",
  1493. "custom(x)",
  1494. "custom(x,y)",
  1495. "custom(x,y,z)",
  1496. "cross_entropy_loss(x,y)",
  1497. "cross_entropy_loss_back(x,y)",
  1498. };
  1499. static_assert(GGML_OP_COUNT == 72, "GGML_OP_COUNT != 72");
  1500. static_assert(GGML_OP_POOL_COUNT == 2, "GGML_OP_POOL_COUNT != 2");
  1501. static const char * GGML_UNARY_OP_NAME[GGML_UNARY_OP_COUNT] = {
  1502. "ABS",
  1503. "SGN",
  1504. "NEG",
  1505. "STEP",
  1506. "TANH",
  1507. "ELU",
  1508. "RELU",
  1509. "GELU",
  1510. "GELU_QUICK",
  1511. "SILU",
  1512. };
  1513. static_assert(GGML_UNARY_OP_COUNT == 10, "GGML_UNARY_OP_COUNT != 10");
  1514. static_assert(sizeof(struct ggml_object)%GGML_MEM_ALIGN == 0, "ggml_object size must be a multiple of GGML_MEM_ALIGN");
  1515. static_assert(sizeof(struct ggml_tensor)%GGML_MEM_ALIGN == 0, "ggml_tensor size must be a multiple of GGML_MEM_ALIGN");
  1516. // WARN:
  1517. // Mis-configuration can lead to problem that's hard to reason about:
  1518. // * At best it crash or talks nosense.
  1519. // * At worst it talks slightly difference but hard to perceive.
  1520. //
  1521. // An op has to enable INIT or FINALIZE when any of it's branch needs that pass.
  1522. // Take care about compile options (e.g., GGML_USE_xxx).
  1523. static bool GGML_OP_HAS_INIT [GGML_OP_COUNT] = { 0 };
  1524. static bool GGML_OP_HAS_FINALIZE[GGML_OP_COUNT] = { 0 };
  1525. static void ggml_setup_op_has_task_pass(void) {
  1526. { // INIT
  1527. bool * p = GGML_OP_HAS_INIT;
  1528. p[GGML_OP_ACC ] = true;
  1529. p[GGML_OP_MUL_MAT ] = true;
  1530. p[GGML_OP_MUL_MAT_ID ] = true;
  1531. p[GGML_OP_OUT_PROD ] = true;
  1532. p[GGML_OP_SET ] = true;
  1533. p[GGML_OP_GET_ROWS_BACK ] = true;
  1534. p[GGML_OP_DIAG_MASK_INF ] = true;
  1535. p[GGML_OP_DIAG_MASK_ZERO ] = true;
  1536. p[GGML_OP_CONV_TRANSPOSE_1D ] = true;
  1537. p[GGML_OP_CONV_TRANSPOSE_2D ] = true;
  1538. p[GGML_OP_FLASH_ATTN_BACK ] = true;
  1539. p[GGML_OP_CROSS_ENTROPY_LOSS ] = true;
  1540. p[GGML_OP_ADD_REL_POS ] = true;
  1541. }
  1542. { // FINALIZE
  1543. bool * p = GGML_OP_HAS_FINALIZE;
  1544. p[GGML_OP_CROSS_ENTROPY_LOSS ] = true;
  1545. }
  1546. }
  1547. //
  1548. // ggml context
  1549. //
  1550. struct ggml_context {
  1551. size_t mem_size;
  1552. void * mem_buffer;
  1553. bool mem_buffer_owned;
  1554. bool no_alloc;
  1555. bool no_alloc_save; // this is used to save the no_alloc state when using scratch buffers
  1556. int n_objects;
  1557. struct ggml_object * objects_begin;
  1558. struct ggml_object * objects_end;
  1559. struct ggml_scratch scratch;
  1560. struct ggml_scratch scratch_save;
  1561. };
  1562. struct ggml_context_container {
  1563. bool used;
  1564. struct ggml_context context;
  1565. };
  1566. //
  1567. // NUMA support
  1568. //
  1569. #define GGML_NUMA_MAX_NODES 8
  1570. #define GGML_NUMA_MAX_CPUS 512
  1571. struct ggml_numa_node {
  1572. uint32_t cpus[GGML_NUMA_MAX_CPUS]; // hardware threads on this node
  1573. uint32_t n_cpus;
  1574. };
  1575. struct ggml_numa_nodes {
  1576. struct ggml_numa_node nodes[GGML_NUMA_MAX_NODES];
  1577. uint32_t n_nodes;
  1578. uint32_t total_cpus; // hardware threads on system
  1579. };
  1580. //
  1581. // ggml state
  1582. //
  1583. struct ggml_state {
  1584. struct ggml_context_container contexts[GGML_MAX_CONTEXTS];
  1585. struct ggml_numa_nodes numa;
  1586. };
  1587. // global state
  1588. static struct ggml_state g_state;
  1589. static atomic_int g_state_barrier = 0;
  1590. // barrier via spin lock
  1591. inline static void ggml_critical_section_start(void) {
  1592. int processing = atomic_fetch_add(&g_state_barrier, 1);
  1593. while (processing > 0) {
  1594. // wait for other threads to finish
  1595. atomic_fetch_sub(&g_state_barrier, 1);
  1596. sched_yield(); // TODO: reconsider this
  1597. processing = atomic_fetch_add(&g_state_barrier, 1);
  1598. }
  1599. }
  1600. // TODO: make this somehow automatically executed
  1601. // some sort of "sentry" mechanism
  1602. inline static void ggml_critical_section_end(void) {
  1603. atomic_fetch_sub(&g_state_barrier, 1);
  1604. }
  1605. void ggml_numa_init(void) {
  1606. if (g_state.numa.n_nodes > 0) {
  1607. fprintf(stderr, "ggml_numa_init: NUMA already initialized\n");
  1608. return;
  1609. }
  1610. #ifdef __linux__
  1611. struct stat st;
  1612. char path[256];
  1613. int rv;
  1614. // enumerate nodes
  1615. while (g_state.numa.n_nodes < GGML_NUMA_MAX_NODES) {
  1616. rv = snprintf(path, sizeof(path), "/sys/devices/system/node/node%u", g_state.numa.n_nodes);
  1617. GGML_ASSERT(rv > 0 && (unsigned)rv < sizeof(path));
  1618. if (stat(path, &st) != 0) { break; }
  1619. ++g_state.numa.n_nodes;
  1620. }
  1621. // enumerate CPUs
  1622. while (g_state.numa.total_cpus < GGML_NUMA_MAX_CPUS) {
  1623. rv = snprintf(path, sizeof(path), "/sys/devices/system/cpu/cpu%u", g_state.numa.total_cpus);
  1624. GGML_ASSERT(rv > 0 && (unsigned)rv < sizeof(path));
  1625. if (stat(path, &st) != 0) { break; }
  1626. ++g_state.numa.total_cpus;
  1627. }
  1628. GGML_PRINT_DEBUG("found %u numa nodes, %u CPUs\n", g_state.numa.n_nodes, g_state.numa.total_cpus);
  1629. if (g_state.numa.n_nodes < 1 || g_state.numa.total_cpus < 1) {
  1630. g_state.numa.n_nodes = 0;
  1631. return;
  1632. }
  1633. for (uint32_t n = 0; n < g_state.numa.n_nodes; ++n) {
  1634. struct ggml_numa_node * node = &g_state.numa.nodes[n];
  1635. GGML_PRINT_DEBUG("CPUs on node %u:", n);
  1636. node->n_cpus = 0;
  1637. for (uint32_t c = 0; c < g_state.numa.total_cpus; ++c) {
  1638. rv = snprintf(path, sizeof(path), "/sys/devices/system/node/node%u/cpu%u", n, c);
  1639. GGML_ASSERT(rv > 0 && (unsigned)rv < sizeof(path));
  1640. if (stat(path, &st) == 0) {
  1641. node->cpus[node->n_cpus++] = c;
  1642. GGML_PRINT_DEBUG(" %u", c);
  1643. }
  1644. }
  1645. GGML_PRINT_DEBUG("\n");
  1646. }
  1647. if (ggml_is_numa()) {
  1648. FILE *fptr = fopen("/proc/sys/kernel/numa_balancing", "r");
  1649. if (fptr != NULL) {
  1650. char buf[42];
  1651. if (fgets(buf, sizeof(buf), fptr) && strncmp(buf, "0\n", sizeof(buf)) != 0) {
  1652. GGML_PRINT("WARNING: /proc/sys/kernel/numa_balancing is enabled, this has been observed to impair performance\n");
  1653. }
  1654. fclose(fptr);
  1655. }
  1656. }
  1657. #else
  1658. // TODO
  1659. #endif
  1660. }
  1661. bool ggml_is_numa(void) {
  1662. return g_state.numa.n_nodes > 1;
  1663. }
  1664. ////////////////////////////////////////////////////////////////////////////////
  1665. void ggml_print_object(const struct ggml_object * obj) {
  1666. GGML_PRINT(" - ggml_object: type = %d, offset = %zu, size = %zu, next = %p\n",
  1667. obj->type, obj->offs, obj->size, (const void *) obj->next);
  1668. }
  1669. void ggml_print_objects(const struct ggml_context * ctx) {
  1670. struct ggml_object * obj = ctx->objects_begin;
  1671. GGML_PRINT("%s: objects in context %p:\n", __func__, (const void *) ctx);
  1672. while (obj != NULL) {
  1673. ggml_print_object(obj);
  1674. obj = obj->next;
  1675. }
  1676. GGML_PRINT("%s: --- end ---\n", __func__);
  1677. }
  1678. int64_t ggml_nelements(const struct ggml_tensor * tensor) {
  1679. static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function");
  1680. return tensor->ne[0]*tensor->ne[1]*tensor->ne[2]*tensor->ne[3];
  1681. }
  1682. int64_t ggml_nrows(const struct ggml_tensor * tensor) {
  1683. static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function");
  1684. return tensor->ne[1]*tensor->ne[2]*tensor->ne[3];
  1685. }
  1686. size_t ggml_nbytes(const struct ggml_tensor * tensor) {
  1687. size_t nbytes;
  1688. size_t blck_size = ggml_blck_size(tensor->type);
  1689. if (blck_size == 1) {
  1690. nbytes = ggml_type_size(tensor->type);
  1691. for (int i = 0; i < GGML_MAX_DIMS; ++i) {
  1692. nbytes += (tensor->ne[i] - 1)*tensor->nb[i];
  1693. }
  1694. }
  1695. else {
  1696. nbytes = tensor->ne[0]*tensor->nb[0]/blck_size;
  1697. for (int i = 1; i < GGML_MAX_DIMS; ++i) {
  1698. nbytes += (tensor->ne[i] - 1)*tensor->nb[i];
  1699. }
  1700. }
  1701. return nbytes;
  1702. }
  1703. size_t ggml_nbytes_pad(const struct ggml_tensor * tensor) {
  1704. return GGML_PAD(ggml_nbytes(tensor), GGML_MEM_ALIGN);
  1705. }
  1706. size_t ggml_nbytes_split(const struct ggml_tensor * tensor, int nrows_split) {
  1707. static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function");
  1708. return (nrows_split*tensor->ne[0]*ggml_type_size(tensor->type))/ggml_blck_size(tensor->type);
  1709. }
  1710. int ggml_blck_size(enum ggml_type type) {
  1711. return type_traits[type].blck_size;
  1712. }
  1713. size_t ggml_type_size(enum ggml_type type) {
  1714. return type_traits[type].type_size;
  1715. }
  1716. size_t ggml_row_size(enum ggml_type type, int64_t ne) {
  1717. assert(ne % ggml_blck_size(type) == 0);
  1718. return ggml_type_size(type)*ne/ggml_blck_size(type);
  1719. }
  1720. double ggml_type_sizef(enum ggml_type type) {
  1721. return ((double)(type_traits[type].type_size))/type_traits[type].blck_size;
  1722. }
  1723. const char * ggml_type_name(enum ggml_type type) {
  1724. return type_traits[type].type_name;
  1725. }
  1726. bool ggml_is_quantized(enum ggml_type type) {
  1727. return type_traits[type].is_quantized;
  1728. }
  1729. const char * ggml_op_name(enum ggml_op op) {
  1730. return GGML_OP_NAME[op];
  1731. }
  1732. const char * ggml_op_symbol(enum ggml_op op) {
  1733. return GGML_OP_SYMBOL[op];
  1734. }
  1735. const char * ggml_unary_op_name(enum ggml_unary_op op) {
  1736. return GGML_UNARY_OP_NAME[op];
  1737. }
  1738. const char * ggml_op_desc(const struct ggml_tensor * t) {
  1739. if (t->op == GGML_OP_UNARY) {
  1740. enum ggml_unary_op uop = ggml_get_unary_op(t);
  1741. return ggml_unary_op_name(uop);
  1742. }
  1743. else {
  1744. return ggml_op_name(t->op);
  1745. }
  1746. }
  1747. size_t ggml_element_size(const struct ggml_tensor * tensor) {
  1748. return ggml_type_size(tensor->type);
  1749. }
  1750. static inline bool ggml_is_scalar(const struct ggml_tensor * tensor) {
  1751. static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function");
  1752. return tensor->ne[0] == 1 && tensor->ne[1] == 1 && tensor->ne[2] == 1 && tensor->ne[3] == 1;
  1753. }
  1754. static inline bool ggml_is_vector(const struct ggml_tensor * tensor) {
  1755. static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function");
  1756. return tensor->ne[1] == 1 && tensor->ne[2] == 1 && tensor->ne[3] == 1;
  1757. }
  1758. static inline bool ggml_is_matrix(const struct ggml_tensor * tensor) {
  1759. static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function");
  1760. return tensor->ne[2] == 1 && tensor->ne[3] == 1;
  1761. }
  1762. static inline bool ggml_can_mul_mat(const struct ggml_tensor * t0, const struct ggml_tensor * t1) {
  1763. static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function");
  1764. return (t0->ne[0] == t1->ne[0]) &&
  1765. (t1->ne[2]%t0->ne[2] == 0) && // verify t0 is broadcastable
  1766. (t1->ne[3]%t0->ne[3] == 0);
  1767. }
  1768. static inline bool ggml_can_out_prod(const struct ggml_tensor * t0, const struct ggml_tensor * t1) {
  1769. static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function");
  1770. return (t0->ne[1] == t1->ne[1]) &&
  1771. (t1->ne[2]%t0->ne[2] == 0) && // verify t0 is broadcastable
  1772. (t1->ne[3]%t0->ne[3] == 0);
  1773. }
  1774. enum ggml_type ggml_ftype_to_ggml_type(enum ggml_ftype ftype) {
  1775. enum ggml_type wtype = GGML_TYPE_COUNT;
  1776. switch (ftype) {
  1777. case GGML_FTYPE_ALL_F32: wtype = GGML_TYPE_F32; break;
  1778. case GGML_FTYPE_MOSTLY_F16: wtype = GGML_TYPE_F16; break;
  1779. case GGML_FTYPE_MOSTLY_Q4_0: wtype = GGML_TYPE_Q4_0; break;
  1780. case GGML_FTYPE_MOSTLY_Q4_1: wtype = GGML_TYPE_Q4_1; break;
  1781. case GGML_FTYPE_MOSTLY_Q5_0: wtype = GGML_TYPE_Q5_0; break;
  1782. case GGML_FTYPE_MOSTLY_Q5_1: wtype = GGML_TYPE_Q5_1; break;
  1783. case GGML_FTYPE_MOSTLY_Q8_0: wtype = GGML_TYPE_Q8_0; break;
  1784. case GGML_FTYPE_MOSTLY_Q2_K: wtype = GGML_TYPE_Q2_K; break;
  1785. case GGML_FTYPE_MOSTLY_Q3_K: wtype = GGML_TYPE_Q3_K; break;
  1786. case GGML_FTYPE_MOSTLY_Q4_K: wtype = GGML_TYPE_Q4_K; break;
  1787. case GGML_FTYPE_MOSTLY_Q5_K: wtype = GGML_TYPE_Q5_K; break;
  1788. case GGML_FTYPE_MOSTLY_Q6_K: wtype = GGML_TYPE_Q6_K; break;
  1789. case GGML_FTYPE_UNKNOWN: wtype = GGML_TYPE_COUNT; break;
  1790. case GGML_FTYPE_MOSTLY_Q4_1_SOME_F16: wtype = GGML_TYPE_COUNT; break;
  1791. }
  1792. GGML_ASSERT(wtype != GGML_TYPE_COUNT);
  1793. return wtype;
  1794. }
  1795. size_t ggml_tensor_overhead(void) {
  1796. return GGML_OBJECT_SIZE + GGML_TENSOR_SIZE;
  1797. }
  1798. bool ggml_is_transposed(const struct ggml_tensor * tensor) {
  1799. return tensor->nb[0] > tensor->nb[1];
  1800. }
  1801. bool ggml_is_contiguous(const struct ggml_tensor * tensor) {
  1802. static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function");
  1803. return
  1804. tensor->nb[0] == ggml_type_size(tensor->type) &&
  1805. tensor->nb[1] == (tensor->nb[0]*tensor->ne[0])/ggml_blck_size(tensor->type) &&
  1806. tensor->nb[2] == tensor->nb[1]*tensor->ne[1] &&
  1807. tensor->nb[3] == tensor->nb[2]*tensor->ne[2];
  1808. }
  1809. static inline bool ggml_is_contiguous_except_dim_1(const struct ggml_tensor * tensor) {
  1810. static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function");
  1811. return
  1812. tensor->nb[0] == ggml_type_size(tensor->type) &&
  1813. tensor->nb[2] == tensor->nb[1]*tensor->ne[1] &&
  1814. tensor->nb[3] == tensor->nb[2]*tensor->ne[2];
  1815. }
  1816. bool ggml_is_permuted(const struct ggml_tensor * tensor) {
  1817. static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function");
  1818. return tensor->nb[0] > tensor->nb[1] || tensor->nb[1] > tensor->nb[2] || tensor->nb[2] > tensor->nb[3];
  1819. }
  1820. static inline bool ggml_is_padded_1d(const struct ggml_tensor * tensor) {
  1821. static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function");
  1822. return
  1823. tensor->nb[0] == ggml_type_size(tensor->type) &&
  1824. tensor->nb[2] == tensor->nb[1]*tensor->ne[1] &&
  1825. tensor->nb[3] == tensor->nb[2]*tensor->ne[2];
  1826. }
  1827. bool ggml_are_same_shape(const struct ggml_tensor * t0, const struct ggml_tensor * t1) {
  1828. static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function");
  1829. return
  1830. (t0->ne[0] == t1->ne[0] ) &&
  1831. (t0->ne[1] == t1->ne[1] ) &&
  1832. (t0->ne[2] == t1->ne[2] ) &&
  1833. (t0->ne[3] == t1->ne[3] );
  1834. }
  1835. // check if t1 can be represented as a repeatition of t0
  1836. static inline bool ggml_can_repeat(const struct ggml_tensor * t0, const struct ggml_tensor * t1) {
  1837. static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function");
  1838. return
  1839. (t1->ne[0]%t0->ne[0] == 0) &&
  1840. (t1->ne[1]%t0->ne[1] == 0) &&
  1841. (t1->ne[2]%t0->ne[2] == 0) &&
  1842. (t1->ne[3]%t0->ne[3] == 0);
  1843. }
  1844. static inline bool ggml_can_repeat_rows(const struct ggml_tensor * t0, const struct ggml_tensor * t1) {
  1845. static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function");
  1846. return (t0->ne[0] == t1->ne[0]) && ggml_can_repeat(t0, t1);
  1847. }
  1848. static inline int ggml_up32(int n) {
  1849. return (n + 31) & ~31;
  1850. }
  1851. //static inline int ggml_up64(int n) {
  1852. // return (n + 63) & ~63;
  1853. //}
  1854. static inline int ggml_up(int n, int m) {
  1855. // assert m is a power of 2
  1856. GGML_ASSERT((m & (m - 1)) == 0);
  1857. return (n + m - 1) & ~(m - 1);
  1858. }
  1859. // assert that pointer is aligned to GGML_MEM_ALIGN
  1860. #define ggml_assert_aligned(ptr) \
  1861. GGML_ASSERT(((uintptr_t) (ptr))%GGML_MEM_ALIGN == 0)
  1862. ////////////////////////////////////////////////////////////////////////////////
  1863. struct ggml_context * ggml_init(struct ggml_init_params params) {
  1864. // make this function thread safe
  1865. ggml_critical_section_start();
  1866. static bool is_first_call = true;
  1867. if (is_first_call) {
  1868. // initialize time system (required on Windows)
  1869. ggml_time_init();
  1870. // initialize GELU, Quick GELU, SILU and EXP F32 tables
  1871. {
  1872. const uint64_t t_start = ggml_time_us(); UNUSED(t_start);
  1873. ggml_fp16_t ii;
  1874. for (int i = 0; i < (1 << 16); ++i) {
  1875. uint16_t ui = i;
  1876. memcpy(&ii, &ui, sizeof(ii));
  1877. const float f = ggml_table_f32_f16[i] = GGML_COMPUTE_FP16_TO_FP32(ii);
  1878. ggml_table_gelu_f16[i] = GGML_FP32_TO_FP16(ggml_gelu_f32(f));
  1879. ggml_table_gelu_quick_f16[i] = GGML_FP32_TO_FP16(ggml_gelu_quick_f32(f));
  1880. ggml_table_silu_f16[i] = GGML_FP32_TO_FP16(ggml_silu_f32(f));
  1881. ggml_table_exp_f16[i] = GGML_FP32_TO_FP16(expf(f));
  1882. }
  1883. const uint64_t t_end = ggml_time_us(); UNUSED(t_end);
  1884. GGML_PRINT_DEBUG("%s: GELU, Quick GELU, SILU and EXP tables initialized in %f ms\n", __func__, (t_end - t_start)/1000.0f);
  1885. }
  1886. // initialize g_state
  1887. {
  1888. const uint64_t t_start = ggml_time_us(); UNUSED(t_start);
  1889. g_state = (struct ggml_state) {
  1890. /*.contexts =*/ { { 0 } },
  1891. /*.numa =*/ {
  1892. .n_nodes = 0,
  1893. .total_cpus = 0,
  1894. },
  1895. };
  1896. for (int i = 0; i < GGML_MAX_CONTEXTS; ++i) {
  1897. g_state.contexts[i].used = false;
  1898. }
  1899. const uint64_t t_end = ggml_time_us(); UNUSED(t_end);
  1900. GGML_PRINT_DEBUG("%s: g_state initialized in %f ms\n", __func__, (t_end - t_start)/1000.0f);
  1901. }
  1902. #if defined(GGML_USE_CUBLAS)
  1903. ggml_init_cublas();
  1904. #elif defined(GGML_USE_CLBLAST)
  1905. ggml_cl_init();
  1906. #endif
  1907. ggml_setup_op_has_task_pass();
  1908. is_first_call = false;
  1909. }
  1910. // find non-used context in g_state
  1911. struct ggml_context * ctx = NULL;
  1912. for (int i = 0; i < GGML_MAX_CONTEXTS; i++) {
  1913. if (!g_state.contexts[i].used) {
  1914. g_state.contexts[i].used = true;
  1915. ctx = &g_state.contexts[i].context;
  1916. GGML_PRINT_DEBUG("%s: found unused context %d\n", __func__, i);
  1917. break;
  1918. }
  1919. }
  1920. if (ctx == NULL) {
  1921. GGML_PRINT_DEBUG("%s: no unused context found\n", __func__);
  1922. ggml_critical_section_end();
  1923. return NULL;
  1924. }
  1925. // allow to call ggml_init with 0 size
  1926. if (params.mem_size == 0) {
  1927. params.mem_size = GGML_MEM_ALIGN;
  1928. }
  1929. const size_t mem_size = params.mem_buffer ? params.mem_size : GGML_PAD(params.mem_size, GGML_MEM_ALIGN);
  1930. *ctx = (struct ggml_context) {
  1931. /*.mem_size =*/ mem_size,
  1932. /*.mem_buffer =*/ params.mem_buffer ? params.mem_buffer : GGML_ALIGNED_MALLOC(mem_size),
  1933. /*.mem_buffer_owned =*/ params.mem_buffer ? false : true,
  1934. /*.no_alloc =*/ params.no_alloc,
  1935. /*.no_alloc_save =*/ params.no_alloc,
  1936. /*.n_objects =*/ 0,
  1937. /*.objects_begin =*/ NULL,
  1938. /*.objects_end =*/ NULL,
  1939. /*.scratch =*/ { 0, 0, NULL, },
  1940. /*.scratch_save =*/ { 0, 0, NULL, },
  1941. };
  1942. GGML_ASSERT(ctx->mem_buffer != NULL);
  1943. ggml_assert_aligned(ctx->mem_buffer);
  1944. GGML_PRINT_DEBUG("%s: context initialized\n", __func__);
  1945. ggml_critical_section_end();
  1946. return ctx;
  1947. }
  1948. void ggml_free(struct ggml_context * ctx) {
  1949. // make this function thread safe
  1950. ggml_critical_section_start();
  1951. bool found = false;
  1952. for (int i = 0; i < GGML_MAX_CONTEXTS; i++) {
  1953. if (&g_state.contexts[i].context == ctx) {
  1954. g_state.contexts[i].used = false;
  1955. GGML_PRINT_DEBUG("%s: context %d has been freed. memory used = %zu\n",
  1956. __func__, i, ggml_used_mem(ctx));
  1957. if (ctx->mem_buffer_owned) {
  1958. GGML_ALIGNED_FREE(ctx->mem_buffer);
  1959. }
  1960. found = true;
  1961. break;
  1962. }
  1963. }
  1964. if (!found) {
  1965. GGML_PRINT_DEBUG("%s: context not found\n", __func__);
  1966. }
  1967. ggml_critical_section_end();
  1968. }
  1969. size_t ggml_used_mem(const struct ggml_context * ctx) {
  1970. return ctx->objects_end == NULL ? 0 : ctx->objects_end->offs + ctx->objects_end->size;
  1971. }
  1972. size_t ggml_set_scratch(struct ggml_context * ctx, struct ggml_scratch scratch) {
  1973. const size_t result = ctx->scratch.data ? ctx->scratch.offs : 0;
  1974. ctx->scratch = scratch;
  1975. return result;
  1976. }
  1977. bool ggml_get_no_alloc(struct ggml_context * ctx) {
  1978. return ctx->no_alloc;
  1979. }
  1980. void ggml_set_no_alloc(struct ggml_context * ctx, bool no_alloc) {
  1981. ctx->no_alloc = no_alloc;
  1982. }
  1983. void * ggml_get_mem_buffer(const struct ggml_context * ctx) {
  1984. return ctx->mem_buffer;
  1985. }
  1986. size_t ggml_get_mem_size(const struct ggml_context * ctx) {
  1987. return ctx->mem_size;
  1988. }
  1989. size_t ggml_get_max_tensor_size(const struct ggml_context * ctx) {
  1990. size_t max_size = 0;
  1991. struct ggml_object * obj = ctx->objects_begin;
  1992. while (obj != NULL) {
  1993. if (obj->type == GGML_OBJECT_TENSOR) {
  1994. struct ggml_tensor * tensor = (struct ggml_tensor *) ((char *) ctx->mem_buffer + obj->offs);
  1995. const size_t size = ggml_nbytes(tensor);
  1996. if (max_size < size) {
  1997. max_size = size;
  1998. }
  1999. }
  2000. obj = obj->next;
  2001. }
  2002. return max_size;
  2003. }
  2004. // IMPORTANT:
  2005. // when creating "opt" tensors, always save and load the scratch buffer
  2006. // this is an error prone process, but it is necessary to support inplace
  2007. // operators when using scratch buffers
  2008. // TODO: implement a better way
  2009. static void ggml_scratch_save(struct ggml_context * ctx) {
  2010. // this is needed to allow opt tensors to store their data
  2011. // TODO: again, need to find a better way
  2012. ctx->no_alloc_save = ctx->no_alloc;
  2013. ctx->no_alloc = false;
  2014. ctx->scratch_save = ctx->scratch;
  2015. ctx->scratch.data = NULL;
  2016. }
  2017. static void ggml_scratch_load(struct ggml_context * ctx) {
  2018. ctx->no_alloc = ctx->no_alloc_save;
  2019. ctx->scratch = ctx->scratch_save;
  2020. }
  2021. ////////////////////////////////////////////////////////////////////////////////
  2022. static struct ggml_object * ggml_new_object(struct ggml_context * ctx, enum ggml_object_type type, size_t size) {
  2023. // always insert objects at the end of the context's memory pool
  2024. struct ggml_object * obj_cur = ctx->objects_end;
  2025. const size_t cur_offs = obj_cur == NULL ? 0 : obj_cur->offs;
  2026. const size_t cur_size = obj_cur == NULL ? 0 : obj_cur->size;
  2027. const size_t cur_end = cur_offs + cur_size;
  2028. // align to GGML_MEM_ALIGN
  2029. size_t size_needed = GGML_PAD(size, GGML_MEM_ALIGN);
  2030. char * const mem_buffer = ctx->mem_buffer;
  2031. struct ggml_object * const obj_new = (struct ggml_object *)(mem_buffer + cur_end);
  2032. if (cur_end + size_needed + GGML_OBJECT_SIZE > ctx->mem_size) {
  2033. GGML_PRINT("%s: not enough space in the context's memory pool (needed %zu, available %zu)\n",
  2034. __func__, cur_end + size_needed, ctx->mem_size);
  2035. assert(false);
  2036. return NULL;
  2037. }
  2038. *obj_new = (struct ggml_object) {
  2039. .offs = cur_end + GGML_OBJECT_SIZE,
  2040. .size = size_needed,
  2041. .next = NULL,
  2042. .type = type,
  2043. };
  2044. ggml_assert_aligned(mem_buffer + obj_new->offs);
  2045. if (obj_cur != NULL) {
  2046. obj_cur->next = obj_new;
  2047. } else {
  2048. // this is the first object in this context
  2049. ctx->objects_begin = obj_new;
  2050. }
  2051. ctx->objects_end = obj_new;
  2052. //printf("%s: inserted new object at %zu, size = %zu\n", __func__, cur_end, obj_new->size);
  2053. return obj_new;
  2054. }
  2055. static struct ggml_tensor * ggml_new_tensor_impl(
  2056. struct ggml_context * ctx,
  2057. enum ggml_type type,
  2058. int n_dims,
  2059. const int64_t * ne,
  2060. struct ggml_tensor * view_src,
  2061. size_t view_offs) {
  2062. assert(n_dims >= 1 && n_dims <= GGML_MAX_DIMS);
  2063. // find the base tensor and absolute offset
  2064. if (view_src != NULL && view_src->view_src != NULL) {
  2065. view_offs += view_src->view_offs;
  2066. view_src = view_src->view_src;
  2067. }
  2068. size_t data_size = ggml_type_size(type)*(ne[0]/ggml_blck_size(type));
  2069. for (int i = 1; i < n_dims; i++) {
  2070. data_size *= ne[i];
  2071. }
  2072. GGML_ASSERT(view_src == NULL || data_size + view_offs <= ggml_nbytes(view_src));
  2073. void * data = view_src != NULL ? view_src->data : NULL;
  2074. if (data != NULL) {
  2075. data = (char *) data + view_offs;
  2076. }
  2077. size_t obj_alloc_size = 0;
  2078. if (view_src == NULL && !ctx->no_alloc) {
  2079. if (ctx->scratch.data != NULL) {
  2080. // allocate tensor data in the scratch buffer
  2081. if (ctx->scratch.offs + data_size > ctx->scratch.size) {
  2082. GGML_PRINT("%s: not enough space in the scratch memory pool (needed %zu, available %zu)\n",
  2083. __func__, ctx->scratch.offs + data_size, ctx->scratch.size);
  2084. assert(false);
  2085. return NULL;
  2086. }
  2087. data = (char * const) ctx->scratch.data + ctx->scratch.offs;
  2088. ctx->scratch.offs += data_size;
  2089. } else {
  2090. // allocate tensor data in the context's memory pool
  2091. obj_alloc_size = data_size;
  2092. }
  2093. }
  2094. struct ggml_object * const obj_new = ggml_new_object(ctx, GGML_OBJECT_TENSOR, GGML_TENSOR_SIZE + obj_alloc_size);
  2095. // TODO: for recoverable errors, we would need to free the data allocated from the scratch buffer here
  2096. struct ggml_tensor * const result = (struct ggml_tensor *)((char *)ctx->mem_buffer + obj_new->offs);
  2097. *result = (struct ggml_tensor) {
  2098. /*.type =*/ type,
  2099. /*.backend =*/ GGML_BACKEND_CPU,
  2100. /*.buffer =*/ NULL,
  2101. /*.n_dims =*/ n_dims,
  2102. /*.ne =*/ { 1, 1, 1, 1 },
  2103. /*.nb =*/ { 0, 0, 0, 0 },
  2104. /*.op =*/ GGML_OP_NONE,
  2105. /*.op_params =*/ { 0 },
  2106. /*.is_param =*/ false,
  2107. /*.grad =*/ NULL,
  2108. /*.src =*/ { NULL },
  2109. /*.perf_runs =*/ 0,
  2110. /*.perf_cycles =*/ 0,
  2111. /*.perf_time_us =*/ 0,
  2112. /*.view_src =*/ view_src,
  2113. /*.view_offs =*/ view_offs,
  2114. /*.data =*/ obj_alloc_size > 0 ? (void *)(result + 1) : data,
  2115. /*.name =*/ { 0 },
  2116. /*.extra =*/ NULL,
  2117. /*.padding =*/ { 0 },
  2118. };
  2119. // TODO: this should not be needed as long as we don't rely on aligned SIMD loads
  2120. //ggml_assert_aligned(result->data);
  2121. for (int i = 0; i < n_dims; i++) {
  2122. result->ne[i] = ne[i];
  2123. }
  2124. result->nb[0] = ggml_type_size(type);
  2125. result->nb[1] = result->nb[0]*(result->ne[0]/ggml_blck_size(type));
  2126. for (int i = 2; i < GGML_MAX_DIMS; i++) {
  2127. result->nb[i] = result->nb[i - 1]*result->ne[i - 1];
  2128. }
  2129. ctx->n_objects++;
  2130. return result;
  2131. }
  2132. struct ggml_tensor * ggml_new_tensor(
  2133. struct ggml_context * ctx,
  2134. enum ggml_type type,
  2135. int n_dims,
  2136. const int64_t * ne) {
  2137. return ggml_new_tensor_impl(ctx, type, n_dims, ne, NULL, 0);
  2138. }
  2139. struct ggml_tensor * ggml_new_tensor_1d(
  2140. struct ggml_context * ctx,
  2141. enum ggml_type type,
  2142. int64_t ne0) {
  2143. return ggml_new_tensor(ctx, type, 1, &ne0);
  2144. }
  2145. struct ggml_tensor * ggml_new_tensor_2d(
  2146. struct ggml_context * ctx,
  2147. enum ggml_type type,
  2148. int64_t ne0,
  2149. int64_t ne1) {
  2150. const int64_t ne[2] = { ne0, ne1 };
  2151. return ggml_new_tensor(ctx, type, 2, ne);
  2152. }
  2153. struct ggml_tensor * ggml_new_tensor_3d(
  2154. struct ggml_context * ctx,
  2155. enum ggml_type type,
  2156. int64_t ne0,
  2157. int64_t ne1,
  2158. int64_t ne2) {
  2159. const int64_t ne[3] = { ne0, ne1, ne2 };
  2160. return ggml_new_tensor(ctx, type, 3, ne);
  2161. }
  2162. struct ggml_tensor * ggml_new_tensor_4d(
  2163. struct ggml_context * ctx,
  2164. enum ggml_type type,
  2165. int64_t ne0,
  2166. int64_t ne1,
  2167. int64_t ne2,
  2168. int64_t ne3) {
  2169. const int64_t ne[4] = { ne0, ne1, ne2, ne3 };
  2170. return ggml_new_tensor(ctx, type, 4, ne);
  2171. }
  2172. struct ggml_tensor * ggml_new_i32(struct ggml_context * ctx, int32_t value) {
  2173. ggml_scratch_save(ctx);
  2174. struct ggml_tensor * result = ggml_new_tensor_1d(ctx, GGML_TYPE_I32, 1);
  2175. ggml_scratch_load(ctx);
  2176. ggml_set_i32(result, value);
  2177. return result;
  2178. }
  2179. struct ggml_tensor * ggml_new_f32(struct ggml_context * ctx, float value) {
  2180. ggml_scratch_save(ctx);
  2181. struct ggml_tensor * result = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, 1);
  2182. ggml_scratch_load(ctx);
  2183. ggml_set_f32(result, value);
  2184. return result;
  2185. }
  2186. struct ggml_tensor * ggml_dup_tensor(struct ggml_context * ctx, const struct ggml_tensor * src) {
  2187. return ggml_new_tensor(ctx, src->type, src->n_dims, src->ne);
  2188. }
  2189. static void ggml_set_op_params(struct ggml_tensor * tensor, const void * params, size_t params_size) {
  2190. GGML_ASSERT(tensor != NULL); // silence -Warray-bounds warnings
  2191. assert(params_size <= GGML_MAX_OP_PARAMS);
  2192. memcpy(tensor->op_params, params, params_size);
  2193. }
  2194. static int32_t ggml_get_op_params_i32(const struct ggml_tensor * tensor, uint32_t i) {
  2195. assert(i < GGML_MAX_OP_PARAMS / sizeof(int32_t));
  2196. return ((const int32_t *)(tensor->op_params))[i];
  2197. }
  2198. static void ggml_set_op_params_i32(struct ggml_tensor * tensor, uint32_t i, int32_t value) {
  2199. assert(i < GGML_MAX_OP_PARAMS / sizeof(int32_t));
  2200. ((int32_t *)(tensor->op_params))[i] = value;
  2201. }
  2202. struct ggml_tensor * ggml_set_zero(struct ggml_tensor * tensor) {
  2203. memset(tensor->data, 0, ggml_nbytes(tensor));
  2204. return tensor;
  2205. }
  2206. struct ggml_tensor * ggml_set_i32 (struct ggml_tensor * tensor, int32_t value) {
  2207. const int n = ggml_nrows(tensor);
  2208. const int nc = tensor->ne[0];
  2209. const size_t n1 = tensor->nb[1];
  2210. char * const data = tensor->data;
  2211. switch (tensor->type) {
  2212. case GGML_TYPE_I8:
  2213. {
  2214. assert(tensor->nb[0] == sizeof(int8_t));
  2215. for (int i = 0; i < n; i++) {
  2216. ggml_vec_set_i8(nc, (int8_t *)(data + i*n1), value);
  2217. }
  2218. } break;
  2219. case GGML_TYPE_I16:
  2220. {
  2221. assert(tensor->nb[0] == sizeof(int16_t));
  2222. for (int i = 0; i < n; i++) {
  2223. ggml_vec_set_i16(nc, (int16_t *)(data + i*n1), value);
  2224. }
  2225. } break;
  2226. case GGML_TYPE_I32:
  2227. {
  2228. assert(tensor->nb[0] == sizeof(int32_t));
  2229. for (int i = 0; i < n; i++) {
  2230. ggml_vec_set_i32(nc, (int32_t *)(data + i*n1), value);
  2231. }
  2232. } break;
  2233. case GGML_TYPE_F16:
  2234. {
  2235. assert(tensor->nb[0] == sizeof(ggml_fp16_t));
  2236. for (int i = 0; i < n; i++) {
  2237. ggml_vec_set_f16(nc, (ggml_fp16_t *)(data + i*n1), GGML_FP32_TO_FP16(value));
  2238. }
  2239. } break;
  2240. case GGML_TYPE_F32:
  2241. {
  2242. assert(tensor->nb[0] == sizeof(float));
  2243. for (int i = 0; i < n; i++) {
  2244. ggml_vec_set_f32(nc, (float *)(data + i*n1), value);
  2245. }
  2246. } break;
  2247. default:
  2248. {
  2249. GGML_ASSERT(false);
  2250. } break;
  2251. }
  2252. return tensor;
  2253. }
  2254. struct ggml_tensor * ggml_set_f32(struct ggml_tensor * tensor, float value) {
  2255. const int n = ggml_nrows(tensor);
  2256. const int nc = tensor->ne[0];
  2257. const size_t n1 = tensor->nb[1];
  2258. char * const data = tensor->data;
  2259. switch (tensor->type) {
  2260. case GGML_TYPE_I8:
  2261. {
  2262. assert(tensor->nb[0] == sizeof(int8_t));
  2263. for (int i = 0; i < n; i++) {
  2264. ggml_vec_set_i8(nc, (int8_t *)(data + i*n1), value);
  2265. }
  2266. } break;
  2267. case GGML_TYPE_I16:
  2268. {
  2269. assert(tensor->nb[0] == sizeof(int16_t));
  2270. for (int i = 0; i < n; i++) {
  2271. ggml_vec_set_i16(nc, (int16_t *)(data + i*n1), value);
  2272. }
  2273. } break;
  2274. case GGML_TYPE_I32:
  2275. {
  2276. assert(tensor->nb[0] == sizeof(int32_t));
  2277. for (int i = 0; i < n; i++) {
  2278. ggml_vec_set_i32(nc, (int32_t *)(data + i*n1), value);
  2279. }
  2280. } break;
  2281. case GGML_TYPE_F16:
  2282. {
  2283. assert(tensor->nb[0] == sizeof(ggml_fp16_t));
  2284. for (int i = 0; i < n; i++) {
  2285. ggml_vec_set_f16(nc, (ggml_fp16_t *)(data + i*n1), GGML_FP32_TO_FP16(value));
  2286. }
  2287. } break;
  2288. case GGML_TYPE_F32:
  2289. {
  2290. assert(tensor->nb[0] == sizeof(float));
  2291. for (int i = 0; i < n; i++) {
  2292. ggml_vec_set_f32(nc, (float *)(data + i*n1), value);
  2293. }
  2294. } break;
  2295. default:
  2296. {
  2297. GGML_ASSERT(false);
  2298. } break;
  2299. }
  2300. return tensor;
  2301. }
  2302. void ggml_unravel_index(const struct ggml_tensor * tensor, int64_t i, int64_t * i0, int64_t * i1, int64_t * i2, int64_t * i3) {
  2303. const int64_t ne2 = tensor->ne[2];
  2304. const int64_t ne1 = tensor->ne[1];
  2305. const int64_t ne0 = tensor->ne[0];
  2306. const int64_t i3_ = (i/(ne2*ne1*ne0));
  2307. const int64_t i2_ = (i - i3_*ne2*ne1*ne0)/(ne1*ne0);
  2308. const int64_t i1_ = (i - i3_*ne2*ne1*ne0 - i2_*ne1*ne0)/ne0;
  2309. const int64_t i0_ = (i - i3_*ne2*ne1*ne0 - i2_*ne1*ne0 - i1_*ne0);
  2310. if (i0) {
  2311. * i0 = i0_;
  2312. }
  2313. if (i1) {
  2314. * i1 = i1_;
  2315. }
  2316. if (i2) {
  2317. * i2 = i2_;
  2318. }
  2319. if (i3) {
  2320. * i3 = i3_;
  2321. }
  2322. }
  2323. int32_t ggml_get_i32_1d(const struct ggml_tensor * tensor, int i) {
  2324. if (!ggml_is_contiguous(tensor)) {
  2325. int64_t id[4] = { 0, 0, 0, 0 };
  2326. ggml_unravel_index(tensor, i, &id[0], &id[1], &id[2], &id[3]);
  2327. return ggml_get_i32_nd(tensor, id[0], id[1], id[2], id[3]);
  2328. }
  2329. switch (tensor->type) {
  2330. case GGML_TYPE_I8:
  2331. {
  2332. GGML_ASSERT(tensor->nb[0] == sizeof(int8_t));
  2333. return ((int8_t *)(tensor->data))[i];
  2334. }
  2335. case GGML_TYPE_I16:
  2336. {
  2337. GGML_ASSERT(tensor->nb[0] == sizeof(int16_t));
  2338. return ((int16_t *)(tensor->data))[i];
  2339. }
  2340. case GGML_TYPE_I32:
  2341. {
  2342. GGML_ASSERT(tensor->nb[0] == sizeof(int32_t));
  2343. return ((int32_t *)(tensor->data))[i];
  2344. }
  2345. case GGML_TYPE_F16:
  2346. {
  2347. GGML_ASSERT(tensor->nb[0] == sizeof(ggml_fp16_t));
  2348. return GGML_FP16_TO_FP32(((ggml_fp16_t *)(tensor->data))[i]);
  2349. }
  2350. case GGML_TYPE_F32:
  2351. {
  2352. GGML_ASSERT(tensor->nb[0] == sizeof(float));
  2353. return ((float *)(tensor->data))[i];
  2354. }
  2355. default:
  2356. {
  2357. GGML_ASSERT(false);
  2358. }
  2359. }
  2360. return 0.0f;
  2361. }
  2362. void ggml_set_i32_1d(const struct ggml_tensor * tensor, int i, int32_t value) {
  2363. if (!ggml_is_contiguous(tensor)) {
  2364. int64_t id[4] = { 0, 0, 0, 0 };
  2365. ggml_unravel_index(tensor, i, &id[0], &id[1], &id[2], &id[3]);
  2366. ggml_set_i32_nd(tensor, id[0], id[1], id[2], id[3], value);
  2367. return;
  2368. }
  2369. switch (tensor->type) {
  2370. case GGML_TYPE_I8:
  2371. {
  2372. GGML_ASSERT(tensor->nb[0] == sizeof(int8_t));
  2373. ((int8_t *)(tensor->data))[i] = value;
  2374. } break;
  2375. case GGML_TYPE_I16:
  2376. {
  2377. GGML_ASSERT(tensor->nb[0] == sizeof(int16_t));
  2378. ((int16_t *)(tensor->data))[i] = value;
  2379. } break;
  2380. case GGML_TYPE_I32:
  2381. {
  2382. GGML_ASSERT(tensor->nb[0] == sizeof(int32_t));
  2383. ((int32_t *)(tensor->data))[i] = value;
  2384. } break;
  2385. case GGML_TYPE_F16:
  2386. {
  2387. GGML_ASSERT(tensor->nb[0] == sizeof(ggml_fp16_t));
  2388. ((ggml_fp16_t *)(tensor->data))[i] = GGML_FP32_TO_FP16(value);
  2389. } break;
  2390. case GGML_TYPE_F32:
  2391. {
  2392. GGML_ASSERT(tensor->nb[0] == sizeof(float));
  2393. ((float *)(tensor->data))[i] = value;
  2394. } break;
  2395. default:
  2396. {
  2397. GGML_ASSERT(false);
  2398. } break;
  2399. }
  2400. }
  2401. int32_t ggml_get_i32_nd(const struct ggml_tensor * tensor, int i0, int i1, int i2, int i3) {
  2402. void * data = (char *) tensor->data + i0*tensor->nb[0] + i1*tensor->nb[1] + i2*tensor->nb[2] + i3*tensor->nb[3];
  2403. switch (tensor->type) {
  2404. case GGML_TYPE_I8:
  2405. return ((int8_t *) data)[0];
  2406. case GGML_TYPE_I16:
  2407. return ((int16_t *) data)[0];
  2408. case GGML_TYPE_I32:
  2409. return ((int32_t *) data)[0];
  2410. case GGML_TYPE_F16:
  2411. return GGML_FP16_TO_FP32(((ggml_fp16_t *) data)[0]);
  2412. case GGML_TYPE_F32:
  2413. return ((float *) data)[0];
  2414. default:
  2415. GGML_ASSERT(false);
  2416. }
  2417. return 0.0f;
  2418. }
  2419. void ggml_set_i32_nd(const struct ggml_tensor * tensor, int i0, int i1, int i2, int i3, int32_t value) {
  2420. void * data = (char *) tensor->data + i0*tensor->nb[0] + i1*tensor->nb[1] + i2*tensor->nb[2] + i3*tensor->nb[3];
  2421. switch (tensor->type) {
  2422. case GGML_TYPE_I8:
  2423. {
  2424. ((int8_t *)(data))[0] = value;
  2425. } break;
  2426. case GGML_TYPE_I16:
  2427. {
  2428. ((int16_t *)(data))[0] = value;
  2429. } break;
  2430. case GGML_TYPE_I32:
  2431. {
  2432. ((int32_t *)(data))[0] = value;
  2433. } break;
  2434. case GGML_TYPE_F16:
  2435. {
  2436. ((ggml_fp16_t *)(data))[0] = GGML_FP32_TO_FP16(value);
  2437. } break;
  2438. case GGML_TYPE_F32:
  2439. {
  2440. ((float *)(data))[0] = value;
  2441. } break;
  2442. default:
  2443. {
  2444. GGML_ASSERT(false);
  2445. } break;
  2446. }
  2447. }
  2448. float ggml_get_f32_1d(const struct ggml_tensor * tensor, int i) {
  2449. if (!ggml_is_contiguous(tensor)) {
  2450. int64_t id[4] = { 0, 0, 0, 0 };
  2451. ggml_unravel_index(tensor, i, &id[0], &id[1], &id[2], &id[3]);
  2452. return ggml_get_f32_nd(tensor, id[0], id[1], id[2], id[3]);
  2453. }
  2454. switch (tensor->type) {
  2455. case GGML_TYPE_I8:
  2456. {
  2457. GGML_ASSERT(tensor->nb[0] == sizeof(int8_t));
  2458. return ((int8_t *)(tensor->data))[i];
  2459. }
  2460. case GGML_TYPE_I16:
  2461. {
  2462. GGML_ASSERT(tensor->nb[0] == sizeof(int16_t));
  2463. return ((int16_t *)(tensor->data))[i];
  2464. }
  2465. case GGML_TYPE_I32:
  2466. {
  2467. GGML_ASSERT(tensor->nb[0] == sizeof(int32_t));
  2468. return ((int32_t *)(tensor->data))[i];
  2469. }
  2470. case GGML_TYPE_F16:
  2471. {
  2472. GGML_ASSERT(tensor->nb[0] == sizeof(ggml_fp16_t));
  2473. return GGML_FP16_TO_FP32(((ggml_fp16_t *)(tensor->data))[i]);
  2474. }
  2475. case GGML_TYPE_F32:
  2476. {
  2477. GGML_ASSERT(tensor->nb[0] == sizeof(float));
  2478. return ((float *)(tensor->data))[i];
  2479. }
  2480. default:
  2481. {
  2482. GGML_ASSERT(false);
  2483. }
  2484. }
  2485. return 0.0f;
  2486. }
  2487. void ggml_set_f32_1d(const struct ggml_tensor * tensor, int i, float value) {
  2488. if (!ggml_is_contiguous(tensor)) {
  2489. int64_t id[4] = { 0, 0, 0, 0 };
  2490. ggml_unravel_index(tensor, i, &id[0], &id[1], &id[2], &id[3]);
  2491. ggml_set_f32_nd(tensor, id[0], id[1], id[2], id[3], value);
  2492. return;
  2493. }
  2494. switch (tensor->type) {
  2495. case GGML_TYPE_I8:
  2496. {
  2497. GGML_ASSERT(tensor->nb[0] == sizeof(int8_t));
  2498. ((int8_t *)(tensor->data))[i] = value;
  2499. } break;
  2500. case GGML_TYPE_I16:
  2501. {
  2502. GGML_ASSERT(tensor->nb[0] == sizeof(int16_t));
  2503. ((int16_t *)(tensor->data))[i] = value;
  2504. } break;
  2505. case GGML_TYPE_I32:
  2506. {
  2507. GGML_ASSERT(tensor->nb[0] == sizeof(int32_t));
  2508. ((int32_t *)(tensor->data))[i] = value;
  2509. } break;
  2510. case GGML_TYPE_F16:
  2511. {
  2512. GGML_ASSERT(tensor->nb[0] == sizeof(ggml_fp16_t));
  2513. ((ggml_fp16_t *)(tensor->data))[i] = GGML_FP32_TO_FP16(value);
  2514. } break;
  2515. case GGML_TYPE_F32:
  2516. {
  2517. GGML_ASSERT(tensor->nb[0] == sizeof(float));
  2518. ((float *)(tensor->data))[i] = value;
  2519. } break;
  2520. default:
  2521. {
  2522. GGML_ASSERT(false);
  2523. } break;
  2524. }
  2525. }
  2526. float ggml_get_f32_nd(const struct ggml_tensor * tensor, int i0, int i1, int i2, int i3) {
  2527. void * data = (char *) tensor->data + i0*tensor->nb[0] + i1*tensor->nb[1] + i2*tensor->nb[2] + i3*tensor->nb[3];
  2528. switch (tensor->type) {
  2529. case GGML_TYPE_I8:
  2530. return ((int8_t *) data)[0];
  2531. case GGML_TYPE_I16:
  2532. return ((int16_t *) data)[0];
  2533. case GGML_TYPE_I32:
  2534. return ((int32_t *) data)[0];
  2535. case GGML_TYPE_F16:
  2536. return GGML_FP16_TO_FP32(((ggml_fp16_t *) data)[0]);
  2537. case GGML_TYPE_F32:
  2538. return ((float *) data)[0];
  2539. default:
  2540. GGML_ASSERT(false);
  2541. }
  2542. return 0.0f;
  2543. }
  2544. void ggml_set_f32_nd(const struct ggml_tensor * tensor, int i0, int i1, int i2, int i3, float value) {
  2545. void * data = (char *) tensor->data + i0*tensor->nb[0] + i1*tensor->nb[1] + i2*tensor->nb[2] + i3*tensor->nb[3];
  2546. switch (tensor->type) {
  2547. case GGML_TYPE_I8:
  2548. {
  2549. ((int8_t *)(data))[0] = value;
  2550. } break;
  2551. case GGML_TYPE_I16:
  2552. {
  2553. ((int16_t *)(data))[0] = value;
  2554. } break;
  2555. case GGML_TYPE_I32:
  2556. {
  2557. ((int32_t *)(data))[0] = value;
  2558. } break;
  2559. case GGML_TYPE_F16:
  2560. {
  2561. ((ggml_fp16_t *)(data))[0] = GGML_FP32_TO_FP16(value);
  2562. } break;
  2563. case GGML_TYPE_F32:
  2564. {
  2565. ((float *)(data))[0] = value;
  2566. } break;
  2567. default:
  2568. {
  2569. GGML_ASSERT(false);
  2570. } break;
  2571. }
  2572. }
  2573. void * ggml_get_data(const struct ggml_tensor * tensor) {
  2574. return tensor->data;
  2575. }
  2576. float * ggml_get_data_f32(const struct ggml_tensor * tensor) {
  2577. assert(tensor->type == GGML_TYPE_F32);
  2578. return (float *)(tensor->data);
  2579. }
  2580. enum ggml_unary_op ggml_get_unary_op(const struct ggml_tensor * tensor) {
  2581. GGML_ASSERT(tensor->op == GGML_OP_UNARY);
  2582. return (enum ggml_unary_op) ggml_get_op_params_i32(tensor, 0);
  2583. }
  2584. const char * ggml_get_name(const struct ggml_tensor * tensor) {
  2585. return tensor->name;
  2586. }
  2587. struct ggml_tensor * ggml_set_name(struct ggml_tensor * tensor, const char * name) {
  2588. strncpy(tensor->name, name, sizeof(tensor->name));
  2589. tensor->name[sizeof(tensor->name) - 1] = '\0';
  2590. return tensor;
  2591. }
  2592. struct ggml_tensor * ggml_format_name(struct ggml_tensor * tensor, const char * fmt, ...) {
  2593. va_list args;
  2594. va_start(args, fmt);
  2595. vsnprintf(tensor->name, sizeof(tensor->name), fmt, args);
  2596. va_end(args);
  2597. return tensor;
  2598. }
  2599. struct ggml_tensor * ggml_view_tensor(
  2600. struct ggml_context * ctx,
  2601. struct ggml_tensor * src) {
  2602. struct ggml_tensor * result = ggml_new_tensor_impl(ctx, src->type, src->n_dims, src->ne, src, 0);
  2603. ggml_format_name(result, "%s (view)", src->name);
  2604. for (int i = 0; i < GGML_MAX_DIMS; i++) {
  2605. result->nb[i] = src->nb[i];
  2606. }
  2607. return result;
  2608. }
  2609. struct ggml_tensor * ggml_get_first_tensor(struct ggml_context * ctx) {
  2610. struct ggml_object * obj = ctx->objects_begin;
  2611. char * const mem_buffer = ctx->mem_buffer;
  2612. while (obj != NULL) {
  2613. if (obj->type == GGML_OBJECT_TENSOR) {
  2614. return (struct ggml_tensor *)(mem_buffer + obj->offs);
  2615. }
  2616. obj = obj->next;
  2617. }
  2618. return NULL;
  2619. }
  2620. struct ggml_tensor * ggml_get_next_tensor(struct ggml_context * ctx, struct ggml_tensor * tensor) {
  2621. struct ggml_object * obj = (struct ggml_object *) ((char *)tensor - GGML_OBJECT_SIZE);
  2622. obj = obj->next;
  2623. char * const mem_buffer = ctx->mem_buffer;
  2624. while (obj != NULL) {
  2625. if (obj->type == GGML_OBJECT_TENSOR) {
  2626. return (struct ggml_tensor *)(mem_buffer + obj->offs);
  2627. }
  2628. obj = obj->next;
  2629. }
  2630. return NULL;
  2631. }
  2632. struct ggml_tensor * ggml_get_tensor(struct ggml_context * ctx, const char * name) {
  2633. struct ggml_object * obj = ctx->objects_begin;
  2634. char * const mem_buffer = ctx->mem_buffer;
  2635. while (obj != NULL) {
  2636. if (obj->type == GGML_OBJECT_TENSOR) {
  2637. struct ggml_tensor * cur = (struct ggml_tensor *)(mem_buffer + obj->offs);
  2638. if (strcmp(cur->name, name) == 0) {
  2639. return cur;
  2640. }
  2641. }
  2642. obj = obj->next;
  2643. }
  2644. return NULL;
  2645. }
  2646. ////////////////////////////////////////////////////////////////////////////////
  2647. // ggml_dup
  2648. static struct ggml_tensor * ggml_dup_impl(
  2649. struct ggml_context * ctx,
  2650. struct ggml_tensor * a,
  2651. bool inplace) {
  2652. bool is_node = false;
  2653. if (!inplace && (a->grad)) {
  2654. is_node = true;
  2655. }
  2656. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  2657. result->op = GGML_OP_DUP;
  2658. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  2659. result->src[0] = a;
  2660. return result;
  2661. }
  2662. struct ggml_tensor * ggml_dup(
  2663. struct ggml_context * ctx,
  2664. struct ggml_tensor * a) {
  2665. return ggml_dup_impl(ctx, a, false);
  2666. }
  2667. struct ggml_tensor * ggml_dup_inplace(
  2668. struct ggml_context * ctx,
  2669. struct ggml_tensor * a) {
  2670. return ggml_dup_impl(ctx, a, true);
  2671. }
  2672. // ggml_add
  2673. static struct ggml_tensor * ggml_add_impl(
  2674. struct ggml_context * ctx,
  2675. struct ggml_tensor * a,
  2676. struct ggml_tensor * b,
  2677. bool inplace) {
  2678. GGML_ASSERT(ggml_can_repeat(b, a));
  2679. bool is_node = false;
  2680. if (!inplace && (a->grad || b->grad)) {
  2681. // TODO: support backward pass for broadcasting
  2682. GGML_ASSERT(ggml_are_same_shape(a, b));
  2683. is_node = true;
  2684. }
  2685. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  2686. result->op = GGML_OP_ADD;
  2687. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  2688. result->src[0] = a;
  2689. result->src[1] = b;
  2690. return result;
  2691. }
  2692. struct ggml_tensor * ggml_add(
  2693. struct ggml_context * ctx,
  2694. struct ggml_tensor * a,
  2695. struct ggml_tensor * b) {
  2696. return ggml_add_impl(ctx, a, b, false);
  2697. }
  2698. struct ggml_tensor * ggml_add_inplace(
  2699. struct ggml_context * ctx,
  2700. struct ggml_tensor * a,
  2701. struct ggml_tensor * b) {
  2702. return ggml_add_impl(ctx, a, b, true);
  2703. }
  2704. // ggml_add_cast
  2705. static struct ggml_tensor * ggml_add_cast_impl(
  2706. struct ggml_context * ctx,
  2707. struct ggml_tensor * a,
  2708. struct ggml_tensor * b,
  2709. enum ggml_type type) {
  2710. // TODO: support less-strict constraint
  2711. // GGML_ASSERT(ggml_can_repeat(b, a));
  2712. GGML_ASSERT(ggml_can_repeat_rows(b, a));
  2713. GGML_ASSERT(ggml_is_quantized(a->type) || a->type == GGML_TYPE_F16); // currently only supported for quantized input and f16
  2714. bool is_node = false;
  2715. if (a->grad || b->grad) {
  2716. // TODO: support backward pass for broadcasting
  2717. GGML_ASSERT(ggml_are_same_shape(a, b));
  2718. is_node = true;
  2719. }
  2720. struct ggml_tensor * result = ggml_new_tensor(ctx, type, a->n_dims, a->ne);
  2721. result->op = GGML_OP_ADD;
  2722. result->grad = is_node ? ggml_new_tensor(ctx, GGML_TYPE_F32, a->n_dims, a->ne) : NULL;
  2723. result->src[0] = a;
  2724. result->src[1] = b;
  2725. return result;
  2726. }
  2727. struct ggml_tensor * ggml_add_cast(
  2728. struct ggml_context * ctx,
  2729. struct ggml_tensor * a,
  2730. struct ggml_tensor * b,
  2731. enum ggml_type type) {
  2732. return ggml_add_cast_impl(ctx, a, b, type);
  2733. }
  2734. // ggml_add1
  2735. static struct ggml_tensor * ggml_add1_impl(
  2736. struct ggml_context * ctx,
  2737. struct ggml_tensor * a,
  2738. struct ggml_tensor * b,
  2739. bool inplace) {
  2740. GGML_ASSERT(ggml_is_scalar(b));
  2741. GGML_ASSERT(ggml_is_padded_1d(a));
  2742. bool is_node = false;
  2743. if (a->grad || b->grad) {
  2744. is_node = true;
  2745. }
  2746. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  2747. result->op = GGML_OP_ADD1;
  2748. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  2749. result->src[0] = a;
  2750. result->src[1] = b;
  2751. return result;
  2752. }
  2753. struct ggml_tensor * ggml_add1(
  2754. struct ggml_context * ctx,
  2755. struct ggml_tensor * a,
  2756. struct ggml_tensor * b) {
  2757. return ggml_add1_impl(ctx, a, b, false);
  2758. }
  2759. struct ggml_tensor * ggml_add1_inplace(
  2760. struct ggml_context * ctx,
  2761. struct ggml_tensor * a,
  2762. struct ggml_tensor * b) {
  2763. return ggml_add1_impl(ctx, a, b, true);
  2764. }
  2765. // ggml_acc
  2766. static struct ggml_tensor * ggml_acc_impl(
  2767. struct ggml_context * ctx,
  2768. struct ggml_tensor * a,
  2769. struct ggml_tensor * b,
  2770. size_t nb1,
  2771. size_t nb2,
  2772. size_t nb3,
  2773. size_t offset,
  2774. bool inplace) {
  2775. GGML_ASSERT(ggml_nelements(b) <= ggml_nelements(a));
  2776. GGML_ASSERT(ggml_is_contiguous(a));
  2777. GGML_ASSERT(a->type == GGML_TYPE_F32);
  2778. GGML_ASSERT(b->type == GGML_TYPE_F32);
  2779. bool is_node = false;
  2780. if (!inplace && (a->grad || b->grad)) {
  2781. is_node = true;
  2782. }
  2783. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  2784. int32_t params[] = { nb1, nb2, nb3, offset, inplace ? 1 : 0 };
  2785. ggml_set_op_params(result, params, sizeof(params));
  2786. result->op = GGML_OP_ACC;
  2787. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  2788. result->src[0] = a;
  2789. result->src[1] = b;
  2790. return result;
  2791. }
  2792. struct ggml_tensor * ggml_acc(
  2793. struct ggml_context * ctx,
  2794. struct ggml_tensor * a,
  2795. struct ggml_tensor * b,
  2796. size_t nb1,
  2797. size_t nb2,
  2798. size_t nb3,
  2799. size_t offset) {
  2800. return ggml_acc_impl(ctx, a, b, nb1, nb2, nb3, offset, false);
  2801. }
  2802. struct ggml_tensor * ggml_acc_inplace(
  2803. struct ggml_context * ctx,
  2804. struct ggml_tensor * a,
  2805. struct ggml_tensor * b,
  2806. size_t nb1,
  2807. size_t nb2,
  2808. size_t nb3,
  2809. size_t offset) {
  2810. return ggml_acc_impl(ctx, a, b, nb1, nb2, nb3, offset, true);
  2811. }
  2812. // ggml_sub
  2813. static struct ggml_tensor * ggml_sub_impl(
  2814. struct ggml_context * ctx,
  2815. struct ggml_tensor * a,
  2816. struct ggml_tensor * b,
  2817. bool inplace) {
  2818. GGML_ASSERT(ggml_are_same_shape(a, b));
  2819. bool is_node = false;
  2820. if (!inplace && (a->grad || b->grad)) {
  2821. is_node = true;
  2822. }
  2823. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  2824. result->op = GGML_OP_SUB;
  2825. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  2826. result->src[0] = a;
  2827. result->src[1] = b;
  2828. return result;
  2829. }
  2830. struct ggml_tensor * ggml_sub(
  2831. struct ggml_context * ctx,
  2832. struct ggml_tensor * a,
  2833. struct ggml_tensor * b) {
  2834. return ggml_sub_impl(ctx, a, b, false);
  2835. }
  2836. struct ggml_tensor * ggml_sub_inplace(
  2837. struct ggml_context * ctx,
  2838. struct ggml_tensor * a,
  2839. struct ggml_tensor * b) {
  2840. return ggml_sub_impl(ctx, a, b, true);
  2841. }
  2842. // ggml_mul
  2843. static struct ggml_tensor * ggml_mul_impl(
  2844. struct ggml_context * ctx,
  2845. struct ggml_tensor * a,
  2846. struct ggml_tensor * b,
  2847. bool inplace) {
  2848. GGML_ASSERT(ggml_can_repeat(b, a));
  2849. bool is_node = false;
  2850. if (!inplace && (a->grad || b->grad)) {
  2851. // TODO: support backward pass for broadcasting
  2852. GGML_ASSERT(ggml_are_same_shape(a, b));
  2853. is_node = true;
  2854. }
  2855. if (inplace) {
  2856. GGML_ASSERT(!is_node);
  2857. }
  2858. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  2859. result->op = GGML_OP_MUL;
  2860. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  2861. result->src[0] = a;
  2862. result->src[1] = b;
  2863. return result;
  2864. }
  2865. struct ggml_tensor * ggml_mul(
  2866. struct ggml_context * ctx,
  2867. struct ggml_tensor * a,
  2868. struct ggml_tensor * b) {
  2869. return ggml_mul_impl(ctx, a, b, false);
  2870. }
  2871. struct ggml_tensor * ggml_mul_inplace(
  2872. struct ggml_context * ctx,
  2873. struct ggml_tensor * a,
  2874. struct ggml_tensor * b) {
  2875. return ggml_mul_impl(ctx, a, b, true);
  2876. }
  2877. // ggml_div
  2878. static struct ggml_tensor * ggml_div_impl(
  2879. struct ggml_context * ctx,
  2880. struct ggml_tensor * a,
  2881. struct ggml_tensor * b,
  2882. bool inplace) {
  2883. GGML_ASSERT(ggml_can_repeat(b, a));
  2884. bool is_node = false;
  2885. if (!inplace && (a->grad || b->grad)) {
  2886. is_node = true;
  2887. }
  2888. if (inplace) {
  2889. GGML_ASSERT(!is_node);
  2890. }
  2891. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  2892. result->op = GGML_OP_DIV;
  2893. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  2894. result->src[0] = a;
  2895. result->src[1] = b;
  2896. return result;
  2897. }
  2898. struct ggml_tensor * ggml_div(
  2899. struct ggml_context * ctx,
  2900. struct ggml_tensor * a,
  2901. struct ggml_tensor * b) {
  2902. return ggml_div_impl(ctx, a, b, false);
  2903. }
  2904. struct ggml_tensor * ggml_div_inplace(
  2905. struct ggml_context * ctx,
  2906. struct ggml_tensor * a,
  2907. struct ggml_tensor * b) {
  2908. return ggml_div_impl(ctx, a, b, true);
  2909. }
  2910. // ggml_sqr
  2911. static struct ggml_tensor * ggml_sqr_impl(
  2912. struct ggml_context * ctx,
  2913. struct ggml_tensor * a,
  2914. bool inplace) {
  2915. bool is_node = false;
  2916. if (!inplace && (a->grad)) {
  2917. is_node = true;
  2918. }
  2919. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  2920. result->op = GGML_OP_SQR;
  2921. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  2922. result->src[0] = a;
  2923. return result;
  2924. }
  2925. struct ggml_tensor * ggml_sqr(
  2926. struct ggml_context * ctx,
  2927. struct ggml_tensor * a) {
  2928. return ggml_sqr_impl(ctx, a, false);
  2929. }
  2930. struct ggml_tensor * ggml_sqr_inplace(
  2931. struct ggml_context * ctx,
  2932. struct ggml_tensor * a) {
  2933. return ggml_sqr_impl(ctx, a, true);
  2934. }
  2935. // ggml_sqrt
  2936. static struct ggml_tensor * ggml_sqrt_impl(
  2937. struct ggml_context * ctx,
  2938. struct ggml_tensor * a,
  2939. bool inplace) {
  2940. bool is_node = false;
  2941. if (!inplace && (a->grad)) {
  2942. is_node = true;
  2943. }
  2944. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  2945. result->op = GGML_OP_SQRT;
  2946. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  2947. result->src[0] = a;
  2948. return result;
  2949. }
  2950. struct ggml_tensor * ggml_sqrt(
  2951. struct ggml_context * ctx,
  2952. struct ggml_tensor * a) {
  2953. return ggml_sqrt_impl(ctx, a, false);
  2954. }
  2955. struct ggml_tensor * ggml_sqrt_inplace(
  2956. struct ggml_context * ctx,
  2957. struct ggml_tensor * a) {
  2958. return ggml_sqrt_impl(ctx, a, true);
  2959. }
  2960. // ggml_log
  2961. static struct ggml_tensor * ggml_log_impl(
  2962. struct ggml_context * ctx,
  2963. struct ggml_tensor * a,
  2964. bool inplace) {
  2965. bool is_node = false;
  2966. if (!inplace && (a->grad)) {
  2967. is_node = true;
  2968. }
  2969. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  2970. result->op = GGML_OP_LOG;
  2971. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  2972. result->src[0] = a;
  2973. return result;
  2974. }
  2975. struct ggml_tensor * ggml_log(
  2976. struct ggml_context * ctx,
  2977. struct ggml_tensor * a) {
  2978. return ggml_log_impl(ctx, a, false);
  2979. }
  2980. struct ggml_tensor * ggml_log_inplace(
  2981. struct ggml_context * ctx,
  2982. struct ggml_tensor * a) {
  2983. return ggml_log_impl(ctx, a, true);
  2984. }
  2985. // ggml_sum
  2986. struct ggml_tensor * ggml_sum(
  2987. struct ggml_context * ctx,
  2988. struct ggml_tensor * a) {
  2989. bool is_node = false;
  2990. if (a->grad) {
  2991. is_node = true;
  2992. }
  2993. struct ggml_tensor * result = ggml_new_tensor_1d(ctx, a->type, 1);
  2994. result->op = GGML_OP_SUM;
  2995. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  2996. result->src[0] = a;
  2997. return result;
  2998. }
  2999. // ggml_sum_rows
  3000. struct ggml_tensor * ggml_sum_rows(
  3001. struct ggml_context * ctx,
  3002. struct ggml_tensor * a) {
  3003. bool is_node = false;
  3004. if (a->grad) {
  3005. is_node = true;
  3006. }
  3007. int64_t ne[4] = {1,1,1,1};
  3008. for (int i=1; i<a->n_dims; ++i) {
  3009. ne[i] = a->ne[i];
  3010. }
  3011. struct ggml_tensor * result = ggml_new_tensor(ctx, a->type, a->n_dims, ne);
  3012. result->op = GGML_OP_SUM_ROWS;
  3013. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3014. result->src[0] = a;
  3015. return result;
  3016. }
  3017. // ggml_mean
  3018. struct ggml_tensor * ggml_mean(
  3019. struct ggml_context * ctx,
  3020. struct ggml_tensor * a) {
  3021. bool is_node = false;
  3022. if (a->grad) {
  3023. GGML_ASSERT(false); // TODO: implement
  3024. is_node = true;
  3025. }
  3026. int64_t ne[GGML_MAX_DIMS] = { 1, a->ne[1], a->ne[2], a->ne[3] };
  3027. struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, a->n_dims, ne);
  3028. result->op = GGML_OP_MEAN;
  3029. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3030. result->src[0] = a;
  3031. return result;
  3032. }
  3033. // ggml_argmax
  3034. struct ggml_tensor * ggml_argmax(
  3035. struct ggml_context * ctx,
  3036. struct ggml_tensor * a) {
  3037. GGML_ASSERT(ggml_is_matrix(a));
  3038. bool is_node = false;
  3039. if (a->grad) {
  3040. GGML_ASSERT(false);
  3041. is_node = true;
  3042. }
  3043. int64_t ne[GGML_MAX_DIMS] = { a->ne[1], 1, 1, 1 };
  3044. struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_I32, a->n_dims, ne);
  3045. result->op = GGML_OP_ARGMAX;
  3046. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3047. result->src[0] = a;
  3048. return result;
  3049. }
  3050. // ggml_repeat
  3051. struct ggml_tensor * ggml_repeat(
  3052. struct ggml_context * ctx,
  3053. struct ggml_tensor * a,
  3054. struct ggml_tensor * b) {
  3055. GGML_ASSERT(ggml_can_repeat(a, b));
  3056. bool is_node = false;
  3057. if (a->grad) {
  3058. is_node = true;
  3059. }
  3060. struct ggml_tensor * result = ggml_new_tensor(ctx, a->type, b->n_dims, b->ne);
  3061. result->op = GGML_OP_REPEAT;
  3062. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3063. result->src[0] = a;
  3064. return result;
  3065. }
  3066. // ggml_repeat_back
  3067. struct ggml_tensor * ggml_repeat_back(
  3068. struct ggml_context * ctx,
  3069. struct ggml_tensor * a,
  3070. struct ggml_tensor * b) {
  3071. GGML_ASSERT(ggml_can_repeat(b, a));
  3072. bool is_node = false;
  3073. if (a->grad) {
  3074. is_node = true;
  3075. }
  3076. if (ggml_are_same_shape(a, b) && !is_node) {
  3077. return a;
  3078. }
  3079. struct ggml_tensor * result = ggml_new_tensor(ctx, a->type, b->n_dims, b->ne);
  3080. result->op = GGML_OP_REPEAT_BACK;
  3081. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3082. result->src[0] = a;
  3083. return result;
  3084. }
  3085. // ggml_concat
  3086. struct ggml_tensor * ggml_concat(
  3087. struct ggml_context* ctx,
  3088. struct ggml_tensor* a,
  3089. struct ggml_tensor* b) {
  3090. GGML_ASSERT(a->ne[0] == b->ne[0] && a->ne[1] == b->ne[1] && a->ne[3] == b->ne[3]);
  3091. bool is_node = false;
  3092. if (a->grad || b->grad) {
  3093. is_node = true;
  3094. }
  3095. struct ggml_tensor * result = ggml_new_tensor_4d(ctx, a->type, a->ne[0], a->ne[1], a->ne[2] + b->ne[2], a->ne[3]);
  3096. result->op = GGML_OP_CONCAT;
  3097. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3098. result->src[0] = a;
  3099. result->src[1] = b;
  3100. return result;
  3101. }
  3102. // ggml_abs
  3103. struct ggml_tensor * ggml_abs(
  3104. struct ggml_context * ctx,
  3105. struct ggml_tensor * a) {
  3106. return ggml_unary(ctx, a, GGML_UNARY_OP_ABS);
  3107. }
  3108. struct ggml_tensor * ggml_abs_inplace(
  3109. struct ggml_context * ctx,
  3110. struct ggml_tensor * a) {
  3111. return ggml_unary_inplace(ctx, a, GGML_UNARY_OP_ABS);
  3112. }
  3113. // ggml_sgn
  3114. struct ggml_tensor * ggml_sgn(
  3115. struct ggml_context * ctx,
  3116. struct ggml_tensor * a) {
  3117. return ggml_unary(ctx, a, GGML_UNARY_OP_SGN);
  3118. }
  3119. struct ggml_tensor * ggml_sgn_inplace(
  3120. struct ggml_context * ctx,
  3121. struct ggml_tensor * a) {
  3122. return ggml_unary_inplace(ctx, a, GGML_UNARY_OP_SGN);
  3123. }
  3124. // ggml_neg
  3125. struct ggml_tensor * ggml_neg(
  3126. struct ggml_context * ctx,
  3127. struct ggml_tensor * a) {
  3128. return ggml_unary(ctx, a, GGML_UNARY_OP_NEG);
  3129. }
  3130. struct ggml_tensor * ggml_neg_inplace(
  3131. struct ggml_context * ctx,
  3132. struct ggml_tensor * a) {
  3133. return ggml_unary_inplace(ctx, a, GGML_UNARY_OP_NEG);
  3134. }
  3135. // ggml_step
  3136. struct ggml_tensor * ggml_step(
  3137. struct ggml_context * ctx,
  3138. struct ggml_tensor * a) {
  3139. return ggml_unary(ctx, a, GGML_UNARY_OP_STEP);
  3140. }
  3141. struct ggml_tensor * ggml_step_inplace(
  3142. struct ggml_context * ctx,
  3143. struct ggml_tensor * a) {
  3144. return ggml_unary_inplace(ctx, a, GGML_UNARY_OP_STEP);
  3145. }
  3146. // ggml_tanh
  3147. struct ggml_tensor * ggml_tanh(
  3148. struct ggml_context * ctx,
  3149. struct ggml_tensor * a) {
  3150. return ggml_unary(ctx, a, GGML_UNARY_OP_TANH);
  3151. }
  3152. struct ggml_tensor * ggml_tanh_inplace(
  3153. struct ggml_context * ctx,
  3154. struct ggml_tensor * a) {
  3155. return ggml_unary_inplace(ctx, a, GGML_UNARY_OP_TANH);
  3156. }
  3157. // ggml_elu
  3158. struct ggml_tensor * ggml_elu(
  3159. struct ggml_context * ctx,
  3160. struct ggml_tensor * a) {
  3161. return ggml_unary(ctx, a, GGML_UNARY_OP_ELU);
  3162. }
  3163. struct ggml_tensor * ggml_elu_inplace(
  3164. struct ggml_context * ctx,
  3165. struct ggml_tensor * a) {
  3166. return ggml_unary_inplace(ctx, a, GGML_UNARY_OP_ELU);
  3167. }
  3168. // ggml_relu
  3169. struct ggml_tensor * ggml_relu(
  3170. struct ggml_context * ctx,
  3171. struct ggml_tensor * a) {
  3172. return ggml_unary(ctx, a, GGML_UNARY_OP_RELU);
  3173. }
  3174. struct ggml_tensor * ggml_relu_inplace(
  3175. struct ggml_context * ctx,
  3176. struct ggml_tensor * a) {
  3177. return ggml_unary_inplace(ctx, a, GGML_UNARY_OP_RELU);
  3178. }
  3179. // ggml_leaky_relu
  3180. struct ggml_tensor * ggml_leaky_relu(
  3181. struct ggml_context * ctx,
  3182. struct ggml_tensor * a, float negative_slope, bool inplace) {
  3183. bool is_node = false;
  3184. if (!inplace && (a->grad)) {
  3185. is_node = true;
  3186. }
  3187. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  3188. ggml_set_op_params(result, &negative_slope, sizeof(negative_slope));
  3189. result->op = GGML_OP_LEAKY_RELU;
  3190. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3191. result->src[0] = a;
  3192. return result;
  3193. }
  3194. // ggml_gelu
  3195. struct ggml_tensor * ggml_gelu(
  3196. struct ggml_context * ctx,
  3197. struct ggml_tensor * a) {
  3198. return ggml_unary(ctx, a, GGML_UNARY_OP_GELU);
  3199. }
  3200. struct ggml_tensor * ggml_gelu_inplace(
  3201. struct ggml_context * ctx,
  3202. struct ggml_tensor * a) {
  3203. return ggml_unary_inplace(ctx, a, GGML_UNARY_OP_GELU);
  3204. }
  3205. // ggml_gelu_quick
  3206. struct ggml_tensor * ggml_gelu_quick(
  3207. struct ggml_context * ctx,
  3208. struct ggml_tensor * a) {
  3209. return ggml_unary(ctx, a, GGML_UNARY_OP_GELU_QUICK);
  3210. }
  3211. struct ggml_tensor * ggml_gelu_quick_inplace(
  3212. struct ggml_context * ctx,
  3213. struct ggml_tensor * a) {
  3214. return ggml_unary_inplace(ctx, a, GGML_UNARY_OP_GELU_QUICK);
  3215. }
  3216. // ggml_silu
  3217. struct ggml_tensor * ggml_silu(
  3218. struct ggml_context * ctx,
  3219. struct ggml_tensor * a) {
  3220. return ggml_unary(ctx, a, GGML_UNARY_OP_SILU);
  3221. }
  3222. struct ggml_tensor * ggml_silu_inplace(
  3223. struct ggml_context * ctx,
  3224. struct ggml_tensor * a) {
  3225. return ggml_unary_inplace(ctx, a, GGML_UNARY_OP_SILU);
  3226. }
  3227. // ggml_silu_back
  3228. struct ggml_tensor * ggml_silu_back(
  3229. struct ggml_context * ctx,
  3230. struct ggml_tensor * a,
  3231. struct ggml_tensor * b) {
  3232. bool is_node = false;
  3233. if (a->grad || b->grad) {
  3234. // TODO: implement backward
  3235. is_node = true;
  3236. }
  3237. struct ggml_tensor * result = ggml_dup_tensor(ctx, a);
  3238. result->op = GGML_OP_SILU_BACK;
  3239. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3240. result->src[0] = a;
  3241. result->src[1] = b;
  3242. return result;
  3243. }
  3244. // ggml_norm
  3245. static struct ggml_tensor * ggml_norm_impl(
  3246. struct ggml_context * ctx,
  3247. struct ggml_tensor * a,
  3248. float eps,
  3249. bool inplace) {
  3250. bool is_node = false;
  3251. if (!inplace && (a->grad)) {
  3252. GGML_ASSERT(false); // TODO: implement backward
  3253. is_node = true;
  3254. }
  3255. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  3256. ggml_set_op_params(result, &eps, sizeof(eps));
  3257. result->op = GGML_OP_NORM;
  3258. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3259. result->src[0] = a;
  3260. return result;
  3261. }
  3262. struct ggml_tensor * ggml_norm(
  3263. struct ggml_context * ctx,
  3264. struct ggml_tensor * a,
  3265. float eps) {
  3266. return ggml_norm_impl(ctx, a, eps, false);
  3267. }
  3268. struct ggml_tensor * ggml_norm_inplace(
  3269. struct ggml_context * ctx,
  3270. struct ggml_tensor * a,
  3271. float eps) {
  3272. return ggml_norm_impl(ctx, a, eps, true);
  3273. }
  3274. // ggml_rms_norm
  3275. static struct ggml_tensor * ggml_rms_norm_impl(
  3276. struct ggml_context * ctx,
  3277. struct ggml_tensor * a,
  3278. float eps,
  3279. bool inplace) {
  3280. bool is_node = false;
  3281. if (!inplace && (a->grad)) {
  3282. is_node = true;
  3283. }
  3284. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  3285. ggml_set_op_params(result, &eps, sizeof(eps));
  3286. result->op = GGML_OP_RMS_NORM;
  3287. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3288. result->src[0] = a;
  3289. return result;
  3290. }
  3291. struct ggml_tensor * ggml_rms_norm(
  3292. struct ggml_context * ctx,
  3293. struct ggml_tensor * a,
  3294. float eps) {
  3295. return ggml_rms_norm_impl(ctx, a, eps, false);
  3296. }
  3297. struct ggml_tensor * ggml_rms_norm_inplace(
  3298. struct ggml_context * ctx,
  3299. struct ggml_tensor * a,
  3300. float eps) {
  3301. return ggml_rms_norm_impl(ctx, a, eps, true);
  3302. }
  3303. // ggml_rms_norm_back
  3304. struct ggml_tensor * ggml_rms_norm_back(
  3305. struct ggml_context * ctx,
  3306. struct ggml_tensor * a,
  3307. struct ggml_tensor * b,
  3308. float eps) {
  3309. bool is_node = false;
  3310. if (a->grad) {
  3311. // TODO: implement backward
  3312. is_node = true;
  3313. }
  3314. struct ggml_tensor * result = ggml_dup_tensor(ctx, a);
  3315. ggml_set_op_params(result, &eps, sizeof(eps));
  3316. result->op = GGML_OP_RMS_NORM_BACK;
  3317. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3318. result->src[0] = a;
  3319. result->src[1] = b;
  3320. return result;
  3321. }
  3322. // ggml_group_norm
  3323. static struct ggml_tensor * ggml_group_norm_impl(
  3324. struct ggml_context * ctx,
  3325. struct ggml_tensor * a,
  3326. int n_groups,
  3327. bool inplace) {
  3328. bool is_node = false;
  3329. if (!inplace && (a->grad)) {
  3330. GGML_ASSERT(false); // TODO: implement backward
  3331. is_node = true;
  3332. }
  3333. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  3334. result->op_params[0] = n_groups;
  3335. result->op = GGML_OP_GROUP_NORM;
  3336. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3337. result->src[0] = a;
  3338. result->src[1] = NULL; // TODO: maybe store epsilon here?
  3339. return result;
  3340. }
  3341. struct ggml_tensor * ggml_group_norm(
  3342. struct ggml_context * ctx,
  3343. struct ggml_tensor * a,
  3344. int n_groups) {
  3345. return ggml_group_norm_impl(ctx, a, n_groups, false);
  3346. }
  3347. struct ggml_tensor * ggml_group_norm_inplace(
  3348. struct ggml_context * ctx,
  3349. struct ggml_tensor * a,
  3350. int n_groups) {
  3351. return ggml_group_norm_impl(ctx, a, n_groups, true);
  3352. }
  3353. // ggml_mul_mat
  3354. struct ggml_tensor * ggml_mul_mat(
  3355. struct ggml_context * ctx,
  3356. struct ggml_tensor * a,
  3357. struct ggml_tensor * b) {
  3358. GGML_ASSERT(ggml_can_mul_mat(a, b));
  3359. GGML_ASSERT(!ggml_is_transposed(a));
  3360. bool is_node = false;
  3361. if (a->grad || b->grad) {
  3362. is_node = true;
  3363. }
  3364. const int64_t ne[4] = { a->ne[1], b->ne[1], b->ne[2], b->ne[3] };
  3365. struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, MAX(a->n_dims, b->n_dims), ne);
  3366. result->op = GGML_OP_MUL_MAT;
  3367. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3368. result->src[0] = a;
  3369. result->src[1] = b;
  3370. return result;
  3371. }
  3372. // ggml_mul_mat_id
  3373. struct ggml_tensor * ggml_mul_mat_id(
  3374. struct ggml_context * ctx,
  3375. struct ggml_tensor * const as[],
  3376. int n_as,
  3377. struct ggml_tensor * ids,
  3378. int id,
  3379. struct ggml_tensor * b) {
  3380. GGML_ASSERT(ids->type == GGML_TYPE_I32);
  3381. GGML_ASSERT(ids->ne[2] == 1 && ids->ne[3] == 1);
  3382. GGML_ASSERT(ids->ne[1] == b->ne[1]);
  3383. GGML_ASSERT(ids->ne[2] == b->ne[2] && ids->ne[3] == b->ne[3]);
  3384. GGML_ASSERT(n_as > 0 && n_as <= GGML_MAX_SRC - 2);
  3385. GGML_ASSERT(id >= 0 && id < ids->ne[0]);
  3386. bool is_node = false;
  3387. if (as[0]->grad || b->grad) {
  3388. is_node = true;
  3389. }
  3390. const int64_t ne[4] = { as[0]->ne[1], b->ne[1], b->ne[2], b->ne[3] };
  3391. struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, MAX(as[0]->n_dims, b->n_dims), ne);
  3392. ggml_set_op_params_i32(result, 0, id);
  3393. ggml_set_op_params_i32(result, 1, n_as);
  3394. result->op = GGML_OP_MUL_MAT_ID;
  3395. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3396. result->src[0] = ids;
  3397. result->src[1] = b;
  3398. for (int i = 0; i < n_as; i++) {
  3399. struct ggml_tensor * a = as[i];
  3400. GGML_ASSERT(ggml_are_same_shape(as[0], a));
  3401. GGML_ASSERT(ggml_can_mul_mat(a, b));
  3402. GGML_ASSERT(!ggml_is_transposed(a));
  3403. result->src[i + 2] = a;
  3404. }
  3405. return result;
  3406. }
  3407. // ggml_out_prod
  3408. struct ggml_tensor * ggml_out_prod(
  3409. struct ggml_context * ctx,
  3410. struct ggml_tensor * a,
  3411. struct ggml_tensor * b) {
  3412. GGML_ASSERT(ggml_can_out_prod(a, b));
  3413. GGML_ASSERT(!ggml_is_transposed(a));
  3414. bool is_node = false;
  3415. if (a->grad || b->grad) {
  3416. is_node = true;
  3417. }
  3418. // a is broadcastable to b for ne[2] and ne[3] -> use b->ne[2] and b->ne[3]
  3419. const int64_t ne[4] = { a->ne[0], b->ne[0], b->ne[2], b->ne[3] };
  3420. struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, MAX(a->n_dims, b->n_dims), ne);
  3421. result->op = GGML_OP_OUT_PROD;
  3422. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3423. result->src[0] = a;
  3424. result->src[1] = b;
  3425. return result;
  3426. }
  3427. // ggml_scale
  3428. static struct ggml_tensor * ggml_scale_impl(
  3429. struct ggml_context * ctx,
  3430. struct ggml_tensor * a,
  3431. struct ggml_tensor * b,
  3432. bool inplace) {
  3433. GGML_ASSERT(ggml_is_scalar(b));
  3434. GGML_ASSERT(ggml_is_padded_1d(a));
  3435. bool is_node = false;
  3436. if (a->grad || b->grad) {
  3437. is_node = true;
  3438. }
  3439. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  3440. result->op = GGML_OP_SCALE;
  3441. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3442. result->src[0] = a;
  3443. result->src[1] = b;
  3444. return result;
  3445. }
  3446. struct ggml_tensor * ggml_scale(
  3447. struct ggml_context * ctx,
  3448. struct ggml_tensor * a,
  3449. struct ggml_tensor * b) {
  3450. return ggml_scale_impl(ctx, a, b, false);
  3451. }
  3452. struct ggml_tensor * ggml_scale_inplace(
  3453. struct ggml_context * ctx,
  3454. struct ggml_tensor * a,
  3455. struct ggml_tensor * b) {
  3456. return ggml_scale_impl(ctx, a, b, true);
  3457. }
  3458. // ggml_set
  3459. static struct ggml_tensor * ggml_set_impl(
  3460. struct ggml_context * ctx,
  3461. struct ggml_tensor * a,
  3462. struct ggml_tensor * b,
  3463. size_t nb1,
  3464. size_t nb2,
  3465. size_t nb3,
  3466. size_t offset,
  3467. bool inplace) {
  3468. GGML_ASSERT(ggml_nelements(a) >= ggml_nelements(b));
  3469. bool is_node = false;
  3470. if (a->grad || b->grad) {
  3471. is_node = true;
  3472. }
  3473. // make a view of the destination
  3474. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  3475. int32_t params[] = { nb1, nb2, nb3, offset, inplace ? 1 : 0 };
  3476. ggml_set_op_params(result, params, sizeof(params));
  3477. result->op = GGML_OP_SET;
  3478. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3479. result->src[0] = a;
  3480. result->src[1] = b;
  3481. return result;
  3482. }
  3483. struct ggml_tensor * ggml_set(
  3484. struct ggml_context * ctx,
  3485. struct ggml_tensor * a,
  3486. struct ggml_tensor * b,
  3487. size_t nb1,
  3488. size_t nb2,
  3489. size_t nb3,
  3490. size_t offset) {
  3491. return ggml_set_impl(ctx, a, b, nb1, nb2, nb3, offset, false);
  3492. }
  3493. struct ggml_tensor * ggml_set_inplace(
  3494. struct ggml_context * ctx,
  3495. struct ggml_tensor * a,
  3496. struct ggml_tensor * b,
  3497. size_t nb1,
  3498. size_t nb2,
  3499. size_t nb3,
  3500. size_t offset) {
  3501. return ggml_set_impl(ctx, a, b, nb1, nb2, nb3, offset, true);
  3502. }
  3503. struct ggml_tensor * ggml_set_1d(
  3504. struct ggml_context * ctx,
  3505. struct ggml_tensor * a,
  3506. struct ggml_tensor * b,
  3507. size_t offset) {
  3508. return ggml_set_impl(ctx, a, b, a->nb[1], a->nb[2], a->nb[3], offset, false);
  3509. }
  3510. struct ggml_tensor * ggml_set_1d_inplace(
  3511. struct ggml_context * ctx,
  3512. struct ggml_tensor * a,
  3513. struct ggml_tensor * b,
  3514. size_t offset) {
  3515. return ggml_set_impl(ctx, a, b, a->nb[1], a->nb[2], a->nb[3], offset, true);
  3516. }
  3517. struct ggml_tensor * ggml_set_2d(
  3518. struct ggml_context * ctx,
  3519. struct ggml_tensor * a,
  3520. struct ggml_tensor * b,
  3521. size_t nb1,
  3522. size_t offset) {
  3523. return ggml_set_impl(ctx, a, b, nb1, a->nb[2], a->nb[3], offset, false);
  3524. }
  3525. struct ggml_tensor * ggml_set_2d_inplace(
  3526. struct ggml_context * ctx,
  3527. struct ggml_tensor * a,
  3528. struct ggml_tensor * b,
  3529. size_t nb1,
  3530. size_t offset) {
  3531. return ggml_set_impl(ctx, a, b, nb1, a->nb[2], a->nb[3], offset, true);
  3532. }
  3533. // ggml_cpy
  3534. static struct ggml_tensor * ggml_cpy_impl(
  3535. struct ggml_context * ctx,
  3536. struct ggml_tensor * a,
  3537. struct ggml_tensor * b,
  3538. bool inplace) {
  3539. GGML_ASSERT(ggml_nelements(a) == ggml_nelements(b));
  3540. bool is_node = false;
  3541. if (!inplace && (a->grad || b->grad)) {
  3542. is_node = true;
  3543. }
  3544. // make a view of the destination
  3545. struct ggml_tensor * result = ggml_view_tensor(ctx, b);
  3546. if (strlen(b->name) > 0) {
  3547. ggml_format_name(result, "%s (copy of %s)", b->name, a->name);
  3548. } else {
  3549. ggml_format_name(result, "%s (copy)", a->name);
  3550. }
  3551. result->op = GGML_OP_CPY;
  3552. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3553. result->src[0] = a;
  3554. result->src[1] = b;
  3555. return result;
  3556. }
  3557. struct ggml_tensor * ggml_cpy(
  3558. struct ggml_context * ctx,
  3559. struct ggml_tensor * a,
  3560. struct ggml_tensor * b) {
  3561. return ggml_cpy_impl(ctx, a, b, false);
  3562. }
  3563. struct ggml_tensor * ggml_cpy_inplace(
  3564. struct ggml_context * ctx,
  3565. struct ggml_tensor * a,
  3566. struct ggml_tensor * b) {
  3567. return ggml_cpy_impl(ctx, a, b, true);
  3568. }
  3569. // ggml_cont
  3570. static struct ggml_tensor * ggml_cont_impl(
  3571. struct ggml_context * ctx,
  3572. struct ggml_tensor * a,
  3573. bool inplace) {
  3574. bool is_node = false;
  3575. if (!inplace && a->grad) {
  3576. is_node = true;
  3577. }
  3578. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  3579. ggml_format_name(result, "%s (cont)", a->name);
  3580. result->op = GGML_OP_CONT;
  3581. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3582. result->src[0] = a;
  3583. return result;
  3584. }
  3585. struct ggml_tensor * ggml_cont(
  3586. struct ggml_context * ctx,
  3587. struct ggml_tensor * a) {
  3588. return ggml_cont_impl(ctx, a, false);
  3589. }
  3590. struct ggml_tensor * ggml_cont_inplace(
  3591. struct ggml_context * ctx,
  3592. struct ggml_tensor * a) {
  3593. return ggml_cont_impl(ctx, a, true);
  3594. }
  3595. // make contiguous, with new shape
  3596. GGML_API struct ggml_tensor * ggml_cont_1d(
  3597. struct ggml_context * ctx,
  3598. struct ggml_tensor * a,
  3599. int64_t ne0) {
  3600. return ggml_cont_4d(ctx, a, ne0, 1, 1, 1);
  3601. }
  3602. GGML_API struct ggml_tensor * ggml_cont_2d(
  3603. struct ggml_context * ctx,
  3604. struct ggml_tensor * a,
  3605. int64_t ne0,
  3606. int64_t ne1) {
  3607. return ggml_cont_4d(ctx, a, ne0, ne1, 1, 1);
  3608. }
  3609. GGML_API struct ggml_tensor * ggml_cont_3d(
  3610. struct ggml_context * ctx,
  3611. struct ggml_tensor * a,
  3612. int64_t ne0,
  3613. int64_t ne1,
  3614. int64_t ne2) {
  3615. return ggml_cont_4d(ctx, a, ne0, ne1, ne2, 1);
  3616. }
  3617. struct ggml_tensor * ggml_cont_4d(
  3618. struct ggml_context * ctx,
  3619. struct ggml_tensor * a,
  3620. int64_t ne0,
  3621. int64_t ne1,
  3622. int64_t ne2,
  3623. int64_t ne3) {
  3624. GGML_ASSERT(ggml_nelements(a) == (ne0*ne1*ne2*ne3));
  3625. bool is_node = false;
  3626. struct ggml_tensor * result = ggml_new_tensor_4d(ctx, a->type, ne0, ne1, ne2, ne3);
  3627. ggml_format_name(result, "%s (cont)", a->name);
  3628. result->op = GGML_OP_CONT;
  3629. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3630. result->src[0] = a;
  3631. return result;
  3632. }
  3633. // ggml_reshape
  3634. struct ggml_tensor * ggml_reshape(
  3635. struct ggml_context * ctx,
  3636. struct ggml_tensor * a,
  3637. struct ggml_tensor * b) {
  3638. GGML_ASSERT(ggml_is_contiguous(a));
  3639. // as only the shape of b is relevant, and not its memory layout, b is allowed to be non contiguous.
  3640. GGML_ASSERT(ggml_nelements(a) == ggml_nelements(b));
  3641. bool is_node = false;
  3642. if (a->grad) {
  3643. is_node = true;
  3644. }
  3645. if (b->grad) {
  3646. // gradient propagation is not supported
  3647. //GGML_ASSERT(false);
  3648. }
  3649. struct ggml_tensor * result = ggml_new_tensor_impl(ctx, a->type, b->n_dims, b->ne, a, 0);
  3650. ggml_format_name(result, "%s (reshaped)", a->name);
  3651. result->op = GGML_OP_RESHAPE;
  3652. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3653. result->src[0] = a;
  3654. return result;
  3655. }
  3656. struct ggml_tensor * ggml_reshape_1d(
  3657. struct ggml_context * ctx,
  3658. struct ggml_tensor * a,
  3659. int64_t ne0) {
  3660. GGML_ASSERT(ggml_is_contiguous(a));
  3661. GGML_ASSERT(ggml_nelements(a) == ne0);
  3662. bool is_node = false;
  3663. if (a->grad) {
  3664. is_node = true;
  3665. }
  3666. const int64_t ne[1] = { ne0 };
  3667. struct ggml_tensor * result = ggml_new_tensor_impl(ctx, a->type, 1, ne, a, 0);
  3668. ggml_format_name(result, "%s (reshaped)", a->name);
  3669. result->op = GGML_OP_RESHAPE;
  3670. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3671. result->src[0] = a;
  3672. return result;
  3673. }
  3674. struct ggml_tensor * ggml_reshape_2d(
  3675. struct ggml_context * ctx,
  3676. struct ggml_tensor * a,
  3677. int64_t ne0,
  3678. int64_t ne1) {
  3679. GGML_ASSERT(ggml_is_contiguous(a));
  3680. GGML_ASSERT(ggml_nelements(a) == ne0*ne1);
  3681. bool is_node = false;
  3682. if (a->grad) {
  3683. is_node = true;
  3684. }
  3685. const int64_t ne[2] = { ne0, ne1 };
  3686. struct ggml_tensor * result = ggml_new_tensor_impl(ctx, a->type, 2, ne, a, 0);
  3687. ggml_format_name(result, "%s (reshaped)", a->name);
  3688. result->op = GGML_OP_RESHAPE;
  3689. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3690. result->src[0] = a;
  3691. return result;
  3692. }
  3693. struct ggml_tensor * ggml_reshape_3d(
  3694. struct ggml_context * ctx,
  3695. struct ggml_tensor * a,
  3696. int64_t ne0,
  3697. int64_t ne1,
  3698. int64_t ne2) {
  3699. GGML_ASSERT(ggml_is_contiguous(a));
  3700. GGML_ASSERT(ggml_nelements(a) == ne0*ne1*ne2);
  3701. bool is_node = false;
  3702. if (a->grad) {
  3703. is_node = true;
  3704. }
  3705. const int64_t ne[3] = { ne0, ne1, ne2 };
  3706. struct ggml_tensor * result = ggml_new_tensor_impl(ctx, a->type, 3, ne, a, 0);
  3707. ggml_format_name(result, "%s (reshaped)", a->name);
  3708. result->op = GGML_OP_RESHAPE;
  3709. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3710. result->src[0] = a;
  3711. return result;
  3712. }
  3713. struct ggml_tensor * ggml_reshape_4d(
  3714. struct ggml_context * ctx,
  3715. struct ggml_tensor * a,
  3716. int64_t ne0,
  3717. int64_t ne1,
  3718. int64_t ne2,
  3719. int64_t ne3) {
  3720. GGML_ASSERT(ggml_is_contiguous(a));
  3721. GGML_ASSERT(ggml_nelements(a) == ne0*ne1*ne2*ne3);
  3722. bool is_node = false;
  3723. if (a->grad) {
  3724. is_node = true;
  3725. }
  3726. const int64_t ne[4] = { ne0, ne1, ne2, ne3 };
  3727. struct ggml_tensor * result = ggml_new_tensor_impl(ctx, a->type, 4, ne, a, 0);
  3728. ggml_format_name(result, "%s (reshaped)", a->name);
  3729. result->op = GGML_OP_RESHAPE;
  3730. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3731. result->src[0] = a;
  3732. return result;
  3733. }
  3734. static struct ggml_tensor * ggml_view_impl(
  3735. struct ggml_context * ctx,
  3736. struct ggml_tensor * a,
  3737. int n_dims,
  3738. const int64_t * ne,
  3739. size_t offset) {
  3740. bool is_node = false;
  3741. if (a->grad) {
  3742. is_node = true;
  3743. }
  3744. struct ggml_tensor * result = ggml_new_tensor_impl(ctx, a->type, n_dims, ne, a, offset);
  3745. ggml_format_name(result, "%s (view)", a->name);
  3746. ggml_set_op_params(result, &offset, sizeof(offset));
  3747. result->op = GGML_OP_VIEW;
  3748. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3749. result->src[0] = a;
  3750. return result;
  3751. }
  3752. // ggml_view_1d
  3753. struct ggml_tensor * ggml_view_1d(
  3754. struct ggml_context * ctx,
  3755. struct ggml_tensor * a,
  3756. int64_t ne0,
  3757. size_t offset) {
  3758. struct ggml_tensor * result = ggml_view_impl(ctx, a, 1, &ne0, offset);
  3759. return result;
  3760. }
  3761. // ggml_view_2d
  3762. struct ggml_tensor * ggml_view_2d(
  3763. struct ggml_context * ctx,
  3764. struct ggml_tensor * a,
  3765. int64_t ne0,
  3766. int64_t ne1,
  3767. size_t nb1,
  3768. size_t offset) {
  3769. const int64_t ne[2] = { ne0, ne1 };
  3770. struct ggml_tensor * result = ggml_view_impl(ctx, a, 2, ne, offset);
  3771. result->nb[1] = nb1;
  3772. result->nb[2] = result->nb[1]*ne1;
  3773. result->nb[3] = result->nb[2];
  3774. return result;
  3775. }
  3776. // ggml_view_3d
  3777. struct ggml_tensor * ggml_view_3d(
  3778. struct ggml_context * ctx,
  3779. struct ggml_tensor * a,
  3780. int64_t ne0,
  3781. int64_t ne1,
  3782. int64_t ne2,
  3783. size_t nb1,
  3784. size_t nb2,
  3785. size_t offset) {
  3786. const int64_t ne[3] = { ne0, ne1, ne2 };
  3787. struct ggml_tensor * result = ggml_view_impl(ctx, a, 3, ne, offset);
  3788. result->nb[1] = nb1;
  3789. result->nb[2] = nb2;
  3790. result->nb[3] = result->nb[2]*ne2;
  3791. return result;
  3792. }
  3793. // ggml_view_4d
  3794. struct ggml_tensor * ggml_view_4d(
  3795. struct ggml_context * ctx,
  3796. struct ggml_tensor * a,
  3797. int64_t ne0,
  3798. int64_t ne1,
  3799. int64_t ne2,
  3800. int64_t ne3,
  3801. size_t nb1,
  3802. size_t nb2,
  3803. size_t nb3,
  3804. size_t offset) {
  3805. const int64_t ne[4] = { ne0, ne1, ne2, ne3 };
  3806. struct ggml_tensor * result = ggml_view_impl(ctx, a, 4, ne, offset);
  3807. result->nb[1] = nb1;
  3808. result->nb[2] = nb2;
  3809. result->nb[3] = nb3;
  3810. return result;
  3811. }
  3812. // ggml_permute
  3813. struct ggml_tensor * ggml_permute(
  3814. struct ggml_context * ctx,
  3815. struct ggml_tensor * a,
  3816. int axis0,
  3817. int axis1,
  3818. int axis2,
  3819. int axis3) {
  3820. GGML_ASSERT(axis0 >= 0 && axis0 < GGML_MAX_DIMS);
  3821. GGML_ASSERT(axis1 >= 0 && axis1 < GGML_MAX_DIMS);
  3822. GGML_ASSERT(axis2 >= 0 && axis2 < GGML_MAX_DIMS);
  3823. GGML_ASSERT(axis3 >= 0 && axis3 < GGML_MAX_DIMS);
  3824. GGML_ASSERT(axis0 != axis1);
  3825. GGML_ASSERT(axis0 != axis2);
  3826. GGML_ASSERT(axis0 != axis3);
  3827. GGML_ASSERT(axis1 != axis2);
  3828. GGML_ASSERT(axis1 != axis3);
  3829. GGML_ASSERT(axis2 != axis3);
  3830. bool is_node = false;
  3831. if (a->grad) {
  3832. is_node = true;
  3833. }
  3834. struct ggml_tensor * result = ggml_view_tensor(ctx, a);
  3835. ggml_format_name(result, "%s (permuted)", a->name);
  3836. int ne[GGML_MAX_DIMS];
  3837. int nb[GGML_MAX_DIMS];
  3838. ne[axis0] = a->ne[0];
  3839. ne[axis1] = a->ne[1];
  3840. ne[axis2] = a->ne[2];
  3841. ne[axis3] = a->ne[3];
  3842. nb[axis0] = a->nb[0];
  3843. nb[axis1] = a->nb[1];
  3844. nb[axis2] = a->nb[2];
  3845. nb[axis3] = a->nb[3];
  3846. result->ne[0] = ne[0];
  3847. result->ne[1] = ne[1];
  3848. result->ne[2] = ne[2];
  3849. result->ne[3] = ne[3];
  3850. result->nb[0] = nb[0];
  3851. result->nb[1] = nb[1];
  3852. result->nb[2] = nb[2];
  3853. result->nb[3] = nb[3];
  3854. result->op = GGML_OP_PERMUTE;
  3855. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3856. result->src[0] = a;
  3857. int32_t params[] = { axis0, axis1, axis2, axis3 };
  3858. ggml_set_op_params(result, params, sizeof(params));
  3859. return result;
  3860. }
  3861. // ggml_transpose
  3862. struct ggml_tensor * ggml_transpose(
  3863. struct ggml_context * ctx,
  3864. struct ggml_tensor * a) {
  3865. bool is_node = false;
  3866. if (a->grad) {
  3867. is_node = true;
  3868. }
  3869. struct ggml_tensor * result = ggml_view_tensor(ctx, a);
  3870. ggml_format_name(result, "%s (transposed)", a->name);
  3871. result->ne[0] = a->ne[1];
  3872. result->ne[1] = a->ne[0];
  3873. result->nb[0] = a->nb[1];
  3874. result->nb[1] = a->nb[0];
  3875. result->op = GGML_OP_TRANSPOSE;
  3876. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3877. result->src[0] = a;
  3878. return result;
  3879. }
  3880. // ggml_get_rows
  3881. struct ggml_tensor * ggml_get_rows(
  3882. struct ggml_context * ctx,
  3883. struct ggml_tensor * a,
  3884. struct ggml_tensor * b) {
  3885. GGML_ASSERT(a->ne[2] == b->ne[1]);
  3886. GGML_ASSERT(b->ne[3] == 1);
  3887. GGML_ASSERT(b->type == GGML_TYPE_I32);
  3888. bool is_node = false;
  3889. if (a->grad || b->grad) {
  3890. is_node = true;
  3891. }
  3892. // TODO: implement non F32 return
  3893. //struct ggml_tensor * result = ggml_new_tensor_2d(ctx, a->type, a->ne[0], b->ne[0]);
  3894. struct ggml_tensor * result = ggml_new_tensor_4d(ctx, GGML_TYPE_F32, a->ne[0], b->ne[0], b->ne[1], b->ne[2]);
  3895. result->op = GGML_OP_GET_ROWS;
  3896. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3897. result->src[0] = a;
  3898. result->src[1] = b;
  3899. return result;
  3900. }
  3901. // ggml_get_rows_back
  3902. struct ggml_tensor * ggml_get_rows_back(
  3903. struct ggml_context * ctx,
  3904. struct ggml_tensor * a,
  3905. struct ggml_tensor * b,
  3906. struct ggml_tensor * c) {
  3907. GGML_ASSERT(ggml_is_matrix(a) && ggml_is_vector(b) && b->type == GGML_TYPE_I32);
  3908. GGML_ASSERT(ggml_is_matrix(c) && (a->ne[0] == c->ne[0]));
  3909. bool is_node = false;
  3910. if (a->grad || b->grad) {
  3911. is_node = true;
  3912. }
  3913. // TODO: implement non F32 return
  3914. //struct ggml_tensor * result = ggml_new_tensor_2d(ctx, a->type, a->ne[0], b->ne[0]);
  3915. struct ggml_tensor * result = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, c->ne[0], c->ne[1]);
  3916. result->op = GGML_OP_GET_ROWS_BACK;
  3917. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3918. result->src[0] = a;
  3919. result->src[1] = b;
  3920. return result;
  3921. }
  3922. // ggml_diag
  3923. struct ggml_tensor * ggml_diag(
  3924. struct ggml_context * ctx,
  3925. struct ggml_tensor * a) {
  3926. GGML_ASSERT(a->ne[1] == 1);
  3927. bool is_node = false;
  3928. if (a->grad) {
  3929. is_node = true;
  3930. }
  3931. const int64_t ne[4] = { a->ne[0], a->ne[0], a->ne[2], a->ne[3] };
  3932. struct ggml_tensor * result = ggml_new_tensor(ctx, a->type, MAX(a->n_dims, 2), ne);
  3933. result->op = GGML_OP_DIAG;
  3934. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3935. result->src[0] = a;
  3936. return result;
  3937. }
  3938. // ggml_diag_mask_inf
  3939. static struct ggml_tensor * ggml_diag_mask_inf_impl(
  3940. struct ggml_context * ctx,
  3941. struct ggml_tensor * a,
  3942. int n_past,
  3943. bool inplace) {
  3944. bool is_node = false;
  3945. if (a->grad) {
  3946. is_node = true;
  3947. }
  3948. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  3949. int32_t params[] = { n_past };
  3950. ggml_set_op_params(result, params, sizeof(params));
  3951. result->op = GGML_OP_DIAG_MASK_INF;
  3952. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3953. result->src[0] = a;
  3954. return result;
  3955. }
  3956. struct ggml_tensor * ggml_diag_mask_inf(
  3957. struct ggml_context * ctx,
  3958. struct ggml_tensor * a,
  3959. int n_past) {
  3960. return ggml_diag_mask_inf_impl(ctx, a, n_past, false);
  3961. }
  3962. struct ggml_tensor * ggml_diag_mask_inf_inplace(
  3963. struct ggml_context * ctx,
  3964. struct ggml_tensor * a,
  3965. int n_past) {
  3966. return ggml_diag_mask_inf_impl(ctx, a, n_past, true);
  3967. }
  3968. // ggml_diag_mask_zero
  3969. static struct ggml_tensor * ggml_diag_mask_zero_impl(
  3970. struct ggml_context * ctx,
  3971. struct ggml_tensor * a,
  3972. int n_past,
  3973. bool inplace) {
  3974. bool is_node = false;
  3975. if (a->grad) {
  3976. is_node = true;
  3977. }
  3978. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  3979. int32_t params[] = { n_past };
  3980. ggml_set_op_params(result, params, sizeof(params));
  3981. result->op = GGML_OP_DIAG_MASK_ZERO;
  3982. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3983. result->src[0] = a;
  3984. return result;
  3985. }
  3986. struct ggml_tensor * ggml_diag_mask_zero(
  3987. struct ggml_context * ctx,
  3988. struct ggml_tensor * a,
  3989. int n_past) {
  3990. return ggml_diag_mask_zero_impl(ctx, a, n_past, false);
  3991. }
  3992. struct ggml_tensor * ggml_diag_mask_zero_inplace(
  3993. struct ggml_context * ctx,
  3994. struct ggml_tensor * a,
  3995. int n_past) {
  3996. return ggml_diag_mask_zero_impl(ctx, a, n_past, true);
  3997. }
  3998. // ggml_soft_max
  3999. static struct ggml_tensor * ggml_soft_max_impl(
  4000. struct ggml_context * ctx,
  4001. struct ggml_tensor * a,
  4002. struct ggml_tensor * mask,
  4003. float scale,
  4004. bool inplace) {
  4005. GGML_ASSERT(ggml_is_contiguous(a));
  4006. if (mask) {
  4007. GGML_ASSERT(ggml_is_contiguous(mask));
  4008. GGML_ASSERT(mask->ne[2] == 1);
  4009. GGML_ASSERT(mask->ne[3] == 1);
  4010. GGML_ASSERT(ggml_can_repeat_rows(mask, a));
  4011. }
  4012. bool is_node = false;
  4013. if (a->grad) {
  4014. is_node = true;
  4015. }
  4016. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  4017. float params[] = { scale };
  4018. ggml_set_op_params(result, params, sizeof(params));
  4019. result->op = GGML_OP_SOFT_MAX;
  4020. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4021. result->src[0] = a;
  4022. result->src[1] = mask;
  4023. return result;
  4024. }
  4025. struct ggml_tensor * ggml_soft_max(
  4026. struct ggml_context * ctx,
  4027. struct ggml_tensor * a) {
  4028. return ggml_soft_max_impl(ctx, a, NULL, 1.0f, false);
  4029. }
  4030. struct ggml_tensor * ggml_soft_max_inplace(
  4031. struct ggml_context * ctx,
  4032. struct ggml_tensor * a) {
  4033. return ggml_soft_max_impl(ctx, a, NULL, 1.0f, true);
  4034. }
  4035. struct ggml_tensor * ggml_soft_max_ext(
  4036. struct ggml_context * ctx,
  4037. struct ggml_tensor * a,
  4038. struct ggml_tensor * mask,
  4039. float scale) {
  4040. return ggml_soft_max_impl(ctx, a, mask, scale, false);
  4041. }
  4042. // ggml_soft_max_back
  4043. static struct ggml_tensor * ggml_soft_max_back_impl(
  4044. struct ggml_context * ctx,
  4045. struct ggml_tensor * a,
  4046. struct ggml_tensor * b,
  4047. bool inplace) {
  4048. bool is_node = false;
  4049. if (a->grad || b->grad) {
  4050. is_node = true; // TODO : implement backward pass
  4051. }
  4052. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  4053. result->op = GGML_OP_SOFT_MAX_BACK;
  4054. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4055. result->src[0] = a;
  4056. result->src[1] = b;
  4057. return result;
  4058. }
  4059. struct ggml_tensor * ggml_soft_max_back(
  4060. struct ggml_context * ctx,
  4061. struct ggml_tensor * a,
  4062. struct ggml_tensor * b) {
  4063. return ggml_soft_max_back_impl(ctx, a, b, false);
  4064. }
  4065. struct ggml_tensor * ggml_soft_max_back_inplace(
  4066. struct ggml_context * ctx,
  4067. struct ggml_tensor * a,
  4068. struct ggml_tensor * b) {
  4069. return ggml_soft_max_back_impl(ctx, a, b, true);
  4070. }
  4071. // ggml_rope
  4072. static struct ggml_tensor * ggml_rope_impl(
  4073. struct ggml_context * ctx,
  4074. struct ggml_tensor * a,
  4075. struct ggml_tensor * b,
  4076. int n_dims,
  4077. int mode,
  4078. int n_ctx,
  4079. int n_orig_ctx,
  4080. float freq_base,
  4081. float freq_scale,
  4082. float ext_factor,
  4083. float attn_factor,
  4084. float beta_fast,
  4085. float beta_slow,
  4086. float xpos_base,
  4087. bool xpos_down,
  4088. bool inplace) {
  4089. GGML_ASSERT(ggml_is_vector(b));
  4090. GGML_ASSERT(b->type == GGML_TYPE_I32);
  4091. GGML_ASSERT(a->ne[2] == b->ne[0]);
  4092. bool is_node = false;
  4093. if (a->grad) {
  4094. is_node = true;
  4095. }
  4096. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  4097. int32_t params[13] = { /*n_past*/ 0, n_dims, mode, n_ctx, n_orig_ctx };
  4098. memcpy(params + 5, &freq_base, sizeof(float));
  4099. memcpy(params + 6, &freq_scale, sizeof(float));
  4100. memcpy(params + 7, &ext_factor, sizeof(float));
  4101. memcpy(params + 8, &attn_factor, sizeof(float));
  4102. memcpy(params + 9, &beta_fast, sizeof(float));
  4103. memcpy(params + 10, &beta_slow, sizeof(float));
  4104. memcpy(params + 11, &xpos_base, sizeof(float));
  4105. memcpy(params + 12, &xpos_down, sizeof(bool));
  4106. ggml_set_op_params(result, params, sizeof(params));
  4107. result->op = GGML_OP_ROPE;
  4108. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4109. result->src[0] = a;
  4110. result->src[1] = b;
  4111. return result;
  4112. }
  4113. struct ggml_tensor * ggml_rope(
  4114. struct ggml_context * ctx,
  4115. struct ggml_tensor * a,
  4116. struct ggml_tensor * b,
  4117. int n_dims,
  4118. int mode,
  4119. int n_ctx) {
  4120. return ggml_rope_impl(
  4121. ctx, a, b, n_dims, mode, n_ctx, 0, 10000.0f, 1.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, false, false
  4122. );
  4123. }
  4124. struct ggml_tensor * ggml_rope_inplace(
  4125. struct ggml_context * ctx,
  4126. struct ggml_tensor * a,
  4127. struct ggml_tensor * b,
  4128. int n_dims,
  4129. int mode,
  4130. int n_ctx) {
  4131. return ggml_rope_impl(
  4132. ctx, a, b, n_dims, mode, n_ctx, 0, 10000.0f, 1.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, false, true
  4133. );
  4134. }
  4135. struct ggml_tensor * ggml_rope_custom(
  4136. struct ggml_context * ctx,
  4137. struct ggml_tensor * a,
  4138. struct ggml_tensor * b,
  4139. int n_dims,
  4140. int mode,
  4141. int n_ctx,
  4142. int n_orig_ctx,
  4143. float freq_base,
  4144. float freq_scale,
  4145. float ext_factor,
  4146. float attn_factor,
  4147. float beta_fast,
  4148. float beta_slow) {
  4149. return ggml_rope_impl(
  4150. ctx, a, b, n_dims, mode, n_ctx, n_orig_ctx, freq_base, freq_scale,
  4151. ext_factor, attn_factor, beta_fast, beta_slow, 0.0f, false, false
  4152. );
  4153. }
  4154. struct ggml_tensor * ggml_rope_custom_inplace(
  4155. struct ggml_context * ctx,
  4156. struct ggml_tensor * a,
  4157. struct ggml_tensor * b,
  4158. int n_dims,
  4159. int mode,
  4160. int n_ctx,
  4161. int n_orig_ctx,
  4162. float freq_base,
  4163. float freq_scale,
  4164. float ext_factor,
  4165. float attn_factor,
  4166. float beta_fast,
  4167. float beta_slow) {
  4168. return ggml_rope_impl(
  4169. ctx, a, b, n_dims, mode, n_ctx, n_orig_ctx, freq_base, freq_scale,
  4170. ext_factor, attn_factor, beta_fast, beta_slow, 0.0f, false, true
  4171. );
  4172. }
  4173. struct ggml_tensor * ggml_rope_xpos_inplace(
  4174. struct ggml_context * ctx,
  4175. struct ggml_tensor * a,
  4176. struct ggml_tensor * b,
  4177. int n_dims,
  4178. float base,
  4179. bool down) {
  4180. return ggml_rope_impl(ctx, a, b, n_dims, 0, 0, 0, 10000.0f, 1.0f, 0.0f, 1.0f, 0.0f, 0.0f, base, down, true);
  4181. }
  4182. // ggml_rope_back
  4183. struct ggml_tensor * ggml_rope_back(
  4184. struct ggml_context * ctx,
  4185. struct ggml_tensor * a,
  4186. struct ggml_tensor * b,
  4187. int n_dims,
  4188. int mode,
  4189. int n_ctx,
  4190. int n_orig_ctx,
  4191. float freq_base,
  4192. float freq_scale,
  4193. float ext_factor,
  4194. float attn_factor,
  4195. float beta_fast,
  4196. float beta_slow,
  4197. float xpos_base,
  4198. bool xpos_down) {
  4199. GGML_ASSERT(ggml_is_vector(b));
  4200. GGML_ASSERT(b->type == GGML_TYPE_I32);
  4201. GGML_ASSERT(a->ne[2] == b->ne[0]);
  4202. GGML_ASSERT((mode & 4) == 0 && "ggml_rope_back() for ChatGLM not implemented yet");
  4203. bool is_node = false;
  4204. if (a->grad) {
  4205. is_node = false; // TODO: implement backward
  4206. }
  4207. struct ggml_tensor * result = ggml_dup_tensor(ctx, a);
  4208. int32_t params[13] = { /*n_past*/ 0, n_dims, mode, n_ctx, n_orig_ctx };
  4209. memcpy(params + 5, &freq_base, sizeof(float));
  4210. memcpy(params + 6, &freq_scale, sizeof(float));
  4211. memcpy(params + 7, &ext_factor, sizeof(float));
  4212. memcpy(params + 8, &attn_factor, sizeof(float));
  4213. memcpy(params + 9, &beta_fast, sizeof(float));
  4214. memcpy(params + 10, &beta_slow, sizeof(float));
  4215. memcpy(params + 11, &xpos_base, sizeof(float));
  4216. memcpy(params + 12, &xpos_down, sizeof(bool));
  4217. ggml_set_op_params(result, params, sizeof(params));
  4218. result->op = GGML_OP_ROPE_BACK;
  4219. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4220. result->src[0] = a;
  4221. result->src[1] = b;
  4222. return result;
  4223. }
  4224. // ggml_alibi
  4225. struct ggml_tensor * ggml_alibi(
  4226. struct ggml_context * ctx,
  4227. struct ggml_tensor * a,
  4228. int n_past,
  4229. int n_head,
  4230. float bias_max) {
  4231. GGML_ASSERT(n_past >= 0);
  4232. bool is_node = false;
  4233. if (a->grad) {
  4234. GGML_ASSERT(false); // TODO: implement backward
  4235. is_node = true;
  4236. }
  4237. // TODO: when implement backward, fix this:
  4238. //struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  4239. struct ggml_tensor * result = ggml_view_tensor(ctx, a);
  4240. int32_t op_params[3] = { n_past, n_head };
  4241. memcpy(op_params + 2, &bias_max, sizeof(float));
  4242. ggml_set_op_params(result, op_params, sizeof(op_params));
  4243. result->op = GGML_OP_ALIBI;
  4244. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4245. result->src[0] = a;
  4246. return result;
  4247. }
  4248. // ggml_clamp
  4249. struct ggml_tensor * ggml_clamp(
  4250. struct ggml_context * ctx,
  4251. struct ggml_tensor * a,
  4252. float min,
  4253. float max) {
  4254. bool is_node = false;
  4255. if (a->grad) {
  4256. GGML_ASSERT(false); // TODO: implement backward
  4257. is_node = true;
  4258. }
  4259. // TODO: when implement backward, fix this:
  4260. struct ggml_tensor * result = ggml_view_tensor(ctx, a);
  4261. float params[] = { min, max };
  4262. ggml_set_op_params(result, params, sizeof(params));
  4263. result->op = GGML_OP_CLAMP;
  4264. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4265. result->src[0] = a;
  4266. return result;
  4267. }
  4268. // ggml_conv_1d
  4269. static int64_t ggml_calc_conv_output_size(int64_t ins, int64_t ks, int s, int p, int d) {
  4270. return (ins + 2 * p - d * (ks - 1) - 1) / s + 1;
  4271. }
  4272. GGML_API struct ggml_tensor * ggml_conv_1d(
  4273. struct ggml_context * ctx,
  4274. struct ggml_tensor * a,
  4275. struct ggml_tensor * b,
  4276. int s0,
  4277. int p0,
  4278. int d0) {
  4279. struct ggml_tensor * im2col = ggml_im2col(ctx, a, b, s0, 0, p0, 0, d0, 0, false); // [N, OL, IC * K]
  4280. struct ggml_tensor * result =
  4281. ggml_mul_mat(ctx,
  4282. ggml_reshape_2d(ctx, im2col, im2col->ne[0], (im2col->ne[2] * im2col->ne[1])), // [N, OL, IC * K] => [N*OL, IC * K]
  4283. ggml_reshape_2d(ctx, a, (a->ne[0] * a->ne[1]), a->ne[2])); // [OC,IC, K] => [OC, IC * K]
  4284. result = ggml_reshape_3d(ctx, result, im2col->ne[1], a->ne[2], im2col->ne[2]); // [N, OC, OL]
  4285. return result;
  4286. }
  4287. // ggml_conv_1d_ph
  4288. struct ggml_tensor* ggml_conv_1d_ph(
  4289. struct ggml_context * ctx,
  4290. struct ggml_tensor * a,
  4291. struct ggml_tensor * b,
  4292. int s,
  4293. int d) {
  4294. return ggml_conv_1d(ctx, a, b, s, a->ne[0] / 2, d);
  4295. }
  4296. // ggml_conv_transpose_1d
  4297. static int64_t ggml_calc_conv_transpose_1d_output_size(int64_t ins, int64_t ks, int s, int p, int d) {
  4298. return (ins - 1) * s - 2 * p + d * (ks - 1) + 1;
  4299. }
  4300. GGML_API struct ggml_tensor * ggml_conv_transpose_1d(
  4301. struct ggml_context * ctx,
  4302. struct ggml_tensor * a,
  4303. struct ggml_tensor * b,
  4304. int s0,
  4305. int p0,
  4306. int d0) {
  4307. GGML_ASSERT(ggml_is_matrix(b));
  4308. GGML_ASSERT(a->ne[2] == b->ne[1]);
  4309. GGML_ASSERT(a->ne[3] == 1);
  4310. GGML_ASSERT(p0 == 0);
  4311. GGML_ASSERT(d0 == 1);
  4312. bool is_node = false;
  4313. if (a->grad || b->grad) {
  4314. GGML_ASSERT(false); // TODO: implement backward
  4315. is_node = true;
  4316. }
  4317. const int64_t ne[4] = {
  4318. ggml_calc_conv_transpose_1d_output_size(b->ne[0], a->ne[0], s0, 0 /*p0*/, 1 /*d0*/),
  4319. a->ne[1], b->ne[2], 1,
  4320. };
  4321. struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, 4, ne);
  4322. int32_t params[] = { s0, p0, d0 };
  4323. ggml_set_op_params(result, params, sizeof(params));
  4324. result->op = GGML_OP_CONV_TRANSPOSE_1D;
  4325. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4326. result->src[0] = a;
  4327. result->src[1] = b;
  4328. return result;
  4329. }
  4330. // ggml_conv_2d
  4331. // im2col: [N, IC, IH, IW] => [N, OH, OW, IC*KH*KW]
  4332. // a: [OC,IC, KH, KW]
  4333. // b: [N, IC, IH, IW]
  4334. // result: [N, OH, OW, IC*KH*KW]
  4335. struct ggml_tensor * ggml_im2col(
  4336. struct ggml_context * ctx,
  4337. struct ggml_tensor * a,
  4338. struct ggml_tensor * b,
  4339. int s0,
  4340. int s1,
  4341. int p0,
  4342. int p1,
  4343. int d0,
  4344. int d1,
  4345. bool is_2D) {
  4346. if(is_2D) {
  4347. GGML_ASSERT(a->ne[2] == b->ne[2]);
  4348. } else {
  4349. GGML_ASSERT(a->ne[1] == b->ne[1]);
  4350. }
  4351. bool is_node = false;
  4352. if (a->grad || b->grad) {
  4353. GGML_ASSERT(false); // TODO: implement backward
  4354. is_node = true;
  4355. }
  4356. const int64_t OH = is_2D ? ggml_calc_conv_output_size(b->ne[1], a->ne[1], s1, p1, d1) : 0;
  4357. const int64_t OW = ggml_calc_conv_output_size(b->ne[0], a->ne[0], s0, p0, d0);
  4358. const int64_t ne[4] = {
  4359. is_2D ? (a->ne[2] * a->ne[1] * a->ne[0]) : a->ne[1] * a->ne[0],
  4360. OW,
  4361. is_2D ? OH : b->ne[2],
  4362. is_2D ? b->ne[3] : 1,
  4363. };
  4364. struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F16, 4, ne);
  4365. int32_t params[] = { s0, s1, p0, p1, d0, d1, (is_2D ? 1 : 0) };
  4366. ggml_set_op_params(result, params, sizeof(params));
  4367. result->op = GGML_OP_IM2COL;
  4368. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4369. result->src[0] = a;
  4370. result->src[1] = b;
  4371. return result;
  4372. }
  4373. // a: [OC,IC, KH, KW]
  4374. // b: [N, IC, IH, IW]
  4375. // result: [N, OC, OH, OW]
  4376. struct ggml_tensor * ggml_conv_2d(
  4377. struct ggml_context * ctx,
  4378. struct ggml_tensor * a,
  4379. struct ggml_tensor * b,
  4380. int s0,
  4381. int s1,
  4382. int p0,
  4383. int p1,
  4384. int d0,
  4385. int d1) {
  4386. struct ggml_tensor * im2col = ggml_im2col(ctx, a, b, s0, s1, p0, p1, d0, d1, true); // [N, OH, OW, IC * KH * KW]
  4387. struct ggml_tensor * result =
  4388. ggml_mul_mat(ctx,
  4389. ggml_reshape_2d(ctx, im2col, im2col->ne[0], im2col->ne[3] * im2col->ne[2] * im2col->ne[1]), // [N, OH, OW, IC * KH * KW] => [N*OH*OW, IC * KH * KW]
  4390. ggml_reshape_2d(ctx, a, (a->ne[0] * a->ne[1] * a->ne[2]), a->ne[3])); // [OC,IC, KH, KW] => [OC, IC * KH * KW]
  4391. result = ggml_reshape_4d(ctx, result, im2col->ne[1], im2col->ne[2], a->ne[3], im2col->ne[3]); // [N, OC, OH, OW]
  4392. return result;
  4393. }
  4394. // ggml_conv_2d_sk_p0
  4395. struct ggml_tensor * ggml_conv_2d_sk_p0(
  4396. struct ggml_context * ctx,
  4397. struct ggml_tensor * a,
  4398. struct ggml_tensor * b) {
  4399. return ggml_conv_2d(ctx, a, b, a->ne[0], a->ne[1], 0, 0, 1, 1);
  4400. }
  4401. // ggml_conv_2d_s1_ph
  4402. struct ggml_tensor * ggml_conv_2d_s1_ph(
  4403. struct ggml_context * ctx,
  4404. struct ggml_tensor * a,
  4405. struct ggml_tensor * b) {
  4406. return ggml_conv_2d(ctx, a, b, 1, 1, a->ne[0] / 2, a->ne[1] / 2, 1, 1);
  4407. }
  4408. // ggml_conv_transpose_2d_p0
  4409. static int64_t ggml_calc_conv_transpose_output_size(int64_t ins, int64_t ks, int s, int p) {
  4410. return (ins - 1) * s - 2 * p + ks;
  4411. }
  4412. struct ggml_tensor * ggml_conv_transpose_2d_p0(
  4413. struct ggml_context * ctx,
  4414. struct ggml_tensor * a,
  4415. struct ggml_tensor * b,
  4416. int stride) {
  4417. GGML_ASSERT(a->ne[3] == b->ne[2]);
  4418. bool is_node = false;
  4419. if (a->grad || b->grad) {
  4420. GGML_ASSERT(false); // TODO: implement backward
  4421. is_node = true;
  4422. }
  4423. const int64_t ne[4] = {
  4424. ggml_calc_conv_transpose_output_size(b->ne[0], a->ne[0], stride, 0 /*p0*/),
  4425. ggml_calc_conv_transpose_output_size(b->ne[1], a->ne[1], stride, 0 /*p1*/),
  4426. a->ne[2], b->ne[3],
  4427. };
  4428. struct ggml_tensor* result = ggml_new_tensor(ctx, GGML_TYPE_F32, 4, ne);
  4429. ggml_set_op_params_i32(result, 0, stride);
  4430. result->op = GGML_OP_CONV_TRANSPOSE_2D;
  4431. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4432. result->src[0] = a;
  4433. result->src[1] = b;
  4434. return result;
  4435. }
  4436. // ggml_pool_*
  4437. static int64_t ggml_calc_pool_output_size(int64_t ins, int ks, int s, float p) {
  4438. return (ins + 2 * p - ks) / s + 1;
  4439. }
  4440. // ggml_pool_1d
  4441. struct ggml_tensor * ggml_pool_1d(
  4442. struct ggml_context * ctx,
  4443. struct ggml_tensor * a,
  4444. enum ggml_op_pool op,
  4445. int k0,
  4446. int s0,
  4447. int p0) {
  4448. bool is_node = false;
  4449. if (a->grad) {
  4450. GGML_ASSERT(false); // TODO: implement backward
  4451. is_node = true;
  4452. }
  4453. const int64_t ne[3] = {
  4454. ggml_calc_pool_output_size(a->ne[0], k0, s0, p0),
  4455. a->ne[1],
  4456. };
  4457. struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, 2, ne);
  4458. int32_t params[] = { op, k0, s0, p0 };
  4459. ggml_set_op_params(result, params, sizeof(params));
  4460. result->op = GGML_OP_POOL_1D;
  4461. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4462. result->src[0] = a;
  4463. return result;
  4464. }
  4465. // ggml_pool_2d
  4466. struct ggml_tensor * ggml_pool_2d(
  4467. struct ggml_context * ctx,
  4468. struct ggml_tensor * a,
  4469. enum ggml_op_pool op,
  4470. int k0,
  4471. int k1,
  4472. int s0,
  4473. int s1,
  4474. float p0,
  4475. float p1) {
  4476. bool is_node = false;
  4477. if (a->grad) {
  4478. GGML_ASSERT(false); // TODO: implement backward
  4479. is_node = true;
  4480. }
  4481. const int64_t ne[3] = {
  4482. ggml_calc_pool_output_size(a->ne[0], k0, s0, p0),
  4483. ggml_calc_pool_output_size(a->ne[1], k1, s1, p1),
  4484. a->ne[2],
  4485. };
  4486. struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, 3, ne);
  4487. int32_t params[] = { op, k0, k1, s0, s1, p0, p1 };
  4488. ggml_set_op_params(result, params, sizeof(params));
  4489. result->op = GGML_OP_POOL_2D;
  4490. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4491. result->src[0] = a;
  4492. return result;
  4493. }
  4494. // ggml_upscale
  4495. static struct ggml_tensor * ggml_upscale_impl(
  4496. struct ggml_context * ctx,
  4497. struct ggml_tensor * a,
  4498. int scale_factor) {
  4499. bool is_node = false;
  4500. if (a->grad) {
  4501. GGML_ASSERT(false); // TODO: implement backward
  4502. is_node = true;
  4503. }
  4504. struct ggml_tensor * result = ggml_new_tensor_4d(ctx, a->type,
  4505. a->ne[0] * scale_factor,
  4506. a->ne[1] * scale_factor,
  4507. a->ne[2], a->ne[3]);
  4508. result->op = GGML_OP_UPSCALE;
  4509. result->op_params[0] = scale_factor;
  4510. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4511. result->src[0] = a;
  4512. result->src[1] = NULL;
  4513. return result;
  4514. }
  4515. struct ggml_tensor * ggml_pad(
  4516. struct ggml_context * ctx,
  4517. struct ggml_tensor * a,
  4518. int p0, int p1, int p2, int p3) {
  4519. bool is_node = false;
  4520. if (a->grad) {
  4521. GGML_ASSERT(false); // TODO: implement backward
  4522. is_node = true;
  4523. }
  4524. struct ggml_tensor * result = ggml_new_tensor_4d(ctx, a->type,
  4525. a->ne[0] + p0,
  4526. a->ne[1] + p1,
  4527. a->ne[2] + p2,
  4528. a->ne[3] + p3);
  4529. result->op = GGML_OP_PAD;
  4530. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4531. result->src[0] = a;
  4532. return result;
  4533. }
  4534. struct ggml_tensor * ggml_upscale(
  4535. struct ggml_context * ctx,
  4536. struct ggml_tensor * a,
  4537. int scale_factor) {
  4538. return ggml_upscale_impl(ctx, a, scale_factor);
  4539. }
  4540. // ggml_argsort
  4541. struct ggml_tensor * ggml_argsort(
  4542. struct ggml_context * ctx,
  4543. struct ggml_tensor * a,
  4544. enum ggml_sort_order order) {
  4545. bool is_node = false;
  4546. struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_I32, a->n_dims, a->ne);
  4547. ggml_set_op_params_i32(result, 0, (int32_t) order);
  4548. result->op = GGML_OP_ARGSORT;
  4549. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4550. result->src[0] = a;
  4551. return result;
  4552. }
  4553. // ggml_top_k
  4554. struct ggml_tensor * ggml_top_k(
  4555. struct ggml_context * ctx,
  4556. struct ggml_tensor * a,
  4557. int k) {
  4558. GGML_ASSERT(a->ne[0] >= k);
  4559. struct ggml_tensor * result = ggml_argsort(ctx, a, GGML_SORT_DESC);
  4560. result = ggml_view_4d(ctx, result,
  4561. k, result->ne[1], result->ne[2], result->ne[3],
  4562. result->nb[1], result->nb[2], result->nb[3],
  4563. 0);
  4564. return result;
  4565. }
  4566. // ggml_flash_attn
  4567. struct ggml_tensor * ggml_flash_attn(
  4568. struct ggml_context * ctx,
  4569. struct ggml_tensor * q,
  4570. struct ggml_tensor * k,
  4571. struct ggml_tensor * v,
  4572. bool masked) {
  4573. GGML_ASSERT(ggml_can_mul_mat(k, q));
  4574. // TODO: check if vT can be multiplied by (k*qT)
  4575. bool is_node = false;
  4576. if (q->grad || k->grad || v->grad) {
  4577. is_node = true;
  4578. }
  4579. //struct ggml_tensor * result = ggml_dup_tensor(ctx, q);
  4580. struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, q->n_dims, q->ne);
  4581. int32_t t = masked ? 1 : 0;
  4582. ggml_set_op_params(result, &t, sizeof(t));
  4583. result->op = GGML_OP_FLASH_ATTN;
  4584. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4585. result->src[0] = q;
  4586. result->src[1] = k;
  4587. result->src[2] = v;
  4588. return result;
  4589. }
  4590. // ggml_flash_ff
  4591. struct ggml_tensor * ggml_flash_ff(
  4592. struct ggml_context * ctx,
  4593. struct ggml_tensor * a,
  4594. struct ggml_tensor * b0,
  4595. struct ggml_tensor * b1,
  4596. struct ggml_tensor * c0,
  4597. struct ggml_tensor * c1) {
  4598. GGML_ASSERT(ggml_can_mul_mat(b0, a));
  4599. // TODO: more checks
  4600. bool is_node = false;
  4601. if (a->grad || b0->grad || b1->grad || c0->grad || c1->grad) {
  4602. is_node = true;
  4603. }
  4604. //struct ggml_tensor * result = ggml_dup_tensor(ctx, a);
  4605. struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, a->n_dims, a->ne);
  4606. result->op = GGML_OP_FLASH_FF;
  4607. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4608. result->src[0] = a;
  4609. result->src[1] = b0;
  4610. result->src[2] = b1;
  4611. result->src[3] = c0;
  4612. result->src[4] = c1;
  4613. return result;
  4614. }
  4615. // ggml_flash_attn_back
  4616. struct ggml_tensor * ggml_flash_attn_back(
  4617. struct ggml_context * ctx,
  4618. struct ggml_tensor * q,
  4619. struct ggml_tensor * k,
  4620. struct ggml_tensor * v,
  4621. struct ggml_tensor * d,
  4622. bool masked) {
  4623. GGML_ASSERT(ggml_can_mul_mat(k, q));
  4624. // TODO: check if vT can be multiplied by (k*qT)
  4625. // d shape [D,N,ne2,ne3]
  4626. // q shape [D,N,ne2,ne3]
  4627. // k shape [D,M,kvne2,ne3]
  4628. // v shape [M,D,kvne2,ne3]
  4629. const int64_t D = q->ne[0];
  4630. const int64_t N = q->ne[1];
  4631. const int64_t M = k->ne[1];
  4632. const int64_t ne2 = q->ne[2];
  4633. const int64_t ne3 = q->ne[3];
  4634. const int64_t kvne2 = k->ne[2];
  4635. GGML_ASSERT(k->ne[0] == D);
  4636. GGML_ASSERT(v->ne[0] == M);
  4637. GGML_ASSERT(v->ne[1] == D);
  4638. GGML_ASSERT(d->ne[0] == D);
  4639. GGML_ASSERT(d->ne[1] == N);
  4640. GGML_ASSERT(k->ne[2] == kvne2);
  4641. GGML_ASSERT(k->ne[3] == ne3);
  4642. GGML_ASSERT(v->ne[2] == kvne2);
  4643. GGML_ASSERT(v->ne[3] == ne3);
  4644. GGML_ASSERT(d->ne[2] == ne2);
  4645. GGML_ASSERT(d->ne[3] == ne3);
  4646. GGML_ASSERT(ne2 % kvne2 == 0);
  4647. bool is_node = false;
  4648. if (q->grad || k->grad || v->grad) {
  4649. // when using this operation (in backwards pass) these grads are set.
  4650. // we don't want to create (big) grad of our result, so is_node is false.
  4651. is_node = false;
  4652. }
  4653. // store gradients of q, k and v as continuous tensors concatenated in result.
  4654. // note: v and gradv are actually transposed, i.e. v->ne[0] != D.
  4655. const int64_t elem_q = ggml_nelements(q);
  4656. const int64_t elem_k = ggml_nelements(k);
  4657. const int64_t elem_v = ggml_nelements(v);
  4658. enum ggml_type result_type = GGML_TYPE_F32;
  4659. GGML_ASSERT(ggml_blck_size(result_type) == 1);
  4660. const size_t tsize = ggml_type_size(result_type);
  4661. const size_t offs_q = 0;
  4662. const size_t offs_k = offs_q + GGML_PAD(elem_q * tsize, GGML_MEM_ALIGN);
  4663. const size_t offs_v = offs_k + GGML_PAD(elem_k * tsize, GGML_MEM_ALIGN);
  4664. const size_t end = offs_v + GGML_PAD(elem_v * tsize, GGML_MEM_ALIGN);
  4665. const size_t nelements = (end + tsize - 1)/tsize;
  4666. struct ggml_tensor * result = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, nelements);
  4667. int32_t masked_i = masked ? 1 : 0;
  4668. ggml_set_op_params(result, &masked_i, sizeof(masked_i));
  4669. result->op = GGML_OP_FLASH_ATTN_BACK;
  4670. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4671. result->src[0] = q;
  4672. result->src[1] = k;
  4673. result->src[2] = v;
  4674. result->src[3] = d;
  4675. return result;
  4676. }
  4677. // ggml_win_part
  4678. struct ggml_tensor * ggml_win_part(
  4679. struct ggml_context * ctx,
  4680. struct ggml_tensor * a,
  4681. int w) {
  4682. GGML_ASSERT(a->ne[3] == 1);
  4683. GGML_ASSERT(a->type == GGML_TYPE_F32);
  4684. bool is_node = false;
  4685. if (a->grad) {
  4686. GGML_ASSERT(false); // TODO: implement backward
  4687. is_node = true;
  4688. }
  4689. // padding
  4690. const int px = (w - a->ne[1]%w)%w;
  4691. const int py = (w - a->ne[2]%w)%w;
  4692. const int npx = (px + a->ne[1])/w;
  4693. const int npy = (py + a->ne[2])/w;
  4694. const int np = npx*npy;
  4695. const int64_t ne[4] = { a->ne[0], w, w, np, };
  4696. struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, 4, ne);
  4697. int32_t params[] = { npx, npy, w };
  4698. ggml_set_op_params(result, params, sizeof(params));
  4699. result->op = GGML_OP_WIN_PART;
  4700. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4701. result->src[0] = a;
  4702. return result;
  4703. }
  4704. // ggml_win_unpart
  4705. struct ggml_tensor * ggml_win_unpart(
  4706. struct ggml_context * ctx,
  4707. struct ggml_tensor * a,
  4708. int w0,
  4709. int h0,
  4710. int w) {
  4711. GGML_ASSERT(a->type == GGML_TYPE_F32);
  4712. bool is_node = false;
  4713. if (a->grad) {
  4714. GGML_ASSERT(false); // TODO: implement backward
  4715. is_node = true;
  4716. }
  4717. const int64_t ne[4] = { a->ne[0], w0, h0, 1, };
  4718. struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, 3, ne);
  4719. int32_t params[] = { w };
  4720. ggml_set_op_params(result, params, sizeof(params));
  4721. result->op = GGML_OP_WIN_UNPART;
  4722. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4723. result->src[0] = a;
  4724. return result;
  4725. }
  4726. // ggml_get_rel_pos
  4727. struct ggml_tensor * ggml_get_rel_pos(
  4728. struct ggml_context * ctx,
  4729. struct ggml_tensor * a,
  4730. int qh,
  4731. int kh) {
  4732. GGML_ASSERT(qh == kh);
  4733. GGML_ASSERT(2*MAX(qh, kh) - 1 == a->ne[1]);
  4734. bool is_node = false;
  4735. if (a->grad) {
  4736. GGML_ASSERT(false); // TODO: implement backward
  4737. is_node = true;
  4738. }
  4739. const int64_t ne[4] = { a->ne[0], kh, qh, 1, };
  4740. struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F16, 3, ne);
  4741. result->op = GGML_OP_GET_REL_POS;
  4742. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4743. result->src[0] = a;
  4744. result->src[1] = NULL;
  4745. return result;
  4746. }
  4747. // ggml_add_rel_pos
  4748. static struct ggml_tensor * ggml_add_rel_pos_impl(
  4749. struct ggml_context * ctx,
  4750. struct ggml_tensor * a,
  4751. struct ggml_tensor * pw,
  4752. struct ggml_tensor * ph,
  4753. bool inplace) {
  4754. GGML_ASSERT(ggml_are_same_shape(pw, ph));
  4755. GGML_ASSERT(ggml_is_contiguous(a));
  4756. GGML_ASSERT(ggml_is_contiguous(pw));
  4757. GGML_ASSERT(ggml_is_contiguous(ph));
  4758. GGML_ASSERT(ph->type == GGML_TYPE_F32);
  4759. GGML_ASSERT(pw->type == GGML_TYPE_F32);
  4760. GGML_ASSERT(pw->ne[3] == a->ne[2]);
  4761. GGML_ASSERT(pw->ne[0]*pw->ne[0] == a->ne[0]);
  4762. GGML_ASSERT(pw->ne[1]*pw->ne[2] == a->ne[1]);
  4763. bool is_node = false;
  4764. if (!inplace && (a->grad || pw->grad || ph->grad)) {
  4765. is_node = true;
  4766. }
  4767. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  4768. ggml_set_op_params_i32(result, 0, inplace ? 1 : 0);
  4769. result->op = GGML_OP_ADD_REL_POS;
  4770. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4771. result->src[0] = a;
  4772. result->src[1] = pw;
  4773. result->src[2] = ph;
  4774. return result;
  4775. }
  4776. struct ggml_tensor * ggml_add_rel_pos(
  4777. struct ggml_context * ctx,
  4778. struct ggml_tensor * a,
  4779. struct ggml_tensor * pw,
  4780. struct ggml_tensor * ph) {
  4781. return ggml_add_rel_pos_impl(ctx, a, pw, ph, false);
  4782. }
  4783. struct ggml_tensor * ggml_add_rel_pos_inplace(
  4784. struct ggml_context * ctx,
  4785. struct ggml_tensor * a,
  4786. struct ggml_tensor * pw,
  4787. struct ggml_tensor * ph) {
  4788. return ggml_add_rel_pos_impl(ctx, a, pw, ph, true);
  4789. }
  4790. // gmml_unary
  4791. static struct ggml_tensor * ggml_unary_impl(
  4792. struct ggml_context * ctx,
  4793. struct ggml_tensor * a,
  4794. enum ggml_unary_op op,
  4795. bool inplace) {
  4796. bool is_node = false;
  4797. if (!inplace && (a->grad)) {
  4798. is_node = true;
  4799. }
  4800. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  4801. ggml_set_op_params_i32(result, 0, (int32_t) op);
  4802. result->op = GGML_OP_UNARY;
  4803. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4804. result->src[0] = a;
  4805. return result;
  4806. }
  4807. struct ggml_tensor * ggml_unary(
  4808. struct ggml_context * ctx,
  4809. struct ggml_tensor * a,
  4810. enum ggml_unary_op op) {
  4811. return ggml_unary_impl(ctx, a, op, false);
  4812. }
  4813. struct ggml_tensor * ggml_unary_inplace(
  4814. struct ggml_context * ctx,
  4815. struct ggml_tensor * a,
  4816. enum ggml_unary_op op) {
  4817. return ggml_unary_impl(ctx, a, op, true);
  4818. }
  4819. // ggml_map_unary
  4820. static struct ggml_tensor * ggml_map_unary_impl_f32(
  4821. struct ggml_context * ctx,
  4822. struct ggml_tensor * a,
  4823. const ggml_unary_op_f32_t fun,
  4824. bool inplace) {
  4825. bool is_node = false;
  4826. if (!inplace && a->grad) {
  4827. is_node = true;
  4828. }
  4829. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  4830. ggml_set_op_params(result, (const void *) &fun, sizeof(fun));
  4831. result->op = GGML_OP_MAP_UNARY;
  4832. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4833. result->src[0] = a;
  4834. return result;
  4835. }
  4836. struct ggml_tensor * ggml_map_unary_f32(
  4837. struct ggml_context * ctx,
  4838. struct ggml_tensor * a,
  4839. const ggml_unary_op_f32_t fun) {
  4840. return ggml_map_unary_impl_f32(ctx, a, fun, false);
  4841. }
  4842. struct ggml_tensor * ggml_map_unary_inplace_f32(
  4843. struct ggml_context * ctx,
  4844. struct ggml_tensor * a,
  4845. const ggml_unary_op_f32_t fun) {
  4846. return ggml_map_unary_impl_f32(ctx, a, fun, true);
  4847. }
  4848. // ggml_map_binary
  4849. static struct ggml_tensor * ggml_map_binary_impl_f32(
  4850. struct ggml_context * ctx,
  4851. struct ggml_tensor * a,
  4852. struct ggml_tensor * b,
  4853. const ggml_binary_op_f32_t fun,
  4854. bool inplace) {
  4855. GGML_ASSERT(ggml_are_same_shape(a, b));
  4856. bool is_node = false;
  4857. if (!inplace && (a->grad || b->grad)) {
  4858. is_node = true;
  4859. }
  4860. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  4861. ggml_set_op_params(result, (const void *) &fun, sizeof(fun));
  4862. result->op = GGML_OP_MAP_BINARY;
  4863. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4864. result->src[0] = a;
  4865. result->src[1] = b;
  4866. return result;
  4867. }
  4868. struct ggml_tensor * ggml_map_binary_f32(
  4869. struct ggml_context * ctx,
  4870. struct ggml_tensor * a,
  4871. struct ggml_tensor * b,
  4872. const ggml_binary_op_f32_t fun) {
  4873. return ggml_map_binary_impl_f32(ctx, a, b, fun, false);
  4874. }
  4875. struct ggml_tensor * ggml_map_binary_inplace_f32(
  4876. struct ggml_context * ctx,
  4877. struct ggml_tensor * a,
  4878. struct ggml_tensor * b,
  4879. const ggml_binary_op_f32_t fun) {
  4880. return ggml_map_binary_impl_f32(ctx, a, b, fun, true);
  4881. }
  4882. // ggml_map_custom1_f32
  4883. static struct ggml_tensor * ggml_map_custom1_impl_f32(
  4884. struct ggml_context * ctx,
  4885. struct ggml_tensor * a,
  4886. const ggml_custom1_op_f32_t fun,
  4887. bool inplace) {
  4888. bool is_node = false;
  4889. if (!inplace && a->grad) {
  4890. is_node = true;
  4891. }
  4892. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  4893. ggml_set_op_params(result, (const void *) &fun, sizeof(fun));
  4894. result->op = GGML_OP_MAP_CUSTOM1_F32;
  4895. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4896. result->src[0] = a;
  4897. return result;
  4898. }
  4899. struct ggml_tensor * ggml_map_custom1_f32(
  4900. struct ggml_context * ctx,
  4901. struct ggml_tensor * a,
  4902. const ggml_custom1_op_f32_t fun) {
  4903. return ggml_map_custom1_impl_f32(ctx, a, fun, false);
  4904. }
  4905. struct ggml_tensor * ggml_map_custom1_inplace_f32(
  4906. struct ggml_context * ctx,
  4907. struct ggml_tensor * a,
  4908. const ggml_custom1_op_f32_t fun) {
  4909. return ggml_map_custom1_impl_f32(ctx, a, fun, true);
  4910. }
  4911. // ggml_map_custom2_f32
  4912. static struct ggml_tensor * ggml_map_custom2_impl_f32(
  4913. struct ggml_context * ctx,
  4914. struct ggml_tensor * a,
  4915. struct ggml_tensor * b,
  4916. const ggml_custom2_op_f32_t fun,
  4917. bool inplace) {
  4918. bool is_node = false;
  4919. if (!inplace && (a->grad || b->grad)) {
  4920. is_node = true;
  4921. }
  4922. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  4923. ggml_set_op_params(result, (const void *) &fun, sizeof(fun));
  4924. result->op = GGML_OP_MAP_CUSTOM2_F32;
  4925. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4926. result->src[0] = a;
  4927. result->src[1] = b;
  4928. return result;
  4929. }
  4930. struct ggml_tensor * ggml_map_custom2_f32(
  4931. struct ggml_context * ctx,
  4932. struct ggml_tensor * a,
  4933. struct ggml_tensor * b,
  4934. const ggml_custom2_op_f32_t fun) {
  4935. return ggml_map_custom2_impl_f32(ctx, a, b, fun, false);
  4936. }
  4937. struct ggml_tensor * ggml_map_custom2_inplace_f32(
  4938. struct ggml_context * ctx,
  4939. struct ggml_tensor * a,
  4940. struct ggml_tensor * b,
  4941. const ggml_custom2_op_f32_t fun) {
  4942. return ggml_map_custom2_impl_f32(ctx, a, b, fun, true);
  4943. }
  4944. // ggml_map_custom3_f32
  4945. static struct ggml_tensor * ggml_map_custom3_impl_f32(
  4946. struct ggml_context * ctx,
  4947. struct ggml_tensor * a,
  4948. struct ggml_tensor * b,
  4949. struct ggml_tensor * c,
  4950. const ggml_custom3_op_f32_t fun,
  4951. bool inplace) {
  4952. bool is_node = false;
  4953. if (!inplace && (a->grad || b->grad || c->grad)) {
  4954. is_node = true;
  4955. }
  4956. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  4957. ggml_set_op_params(result, (const void *) &fun, sizeof(fun));
  4958. result->op = GGML_OP_MAP_CUSTOM3_F32;
  4959. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4960. result->src[0] = a;
  4961. result->src[1] = b;
  4962. result->src[2] = c;
  4963. return result;
  4964. }
  4965. struct ggml_tensor * ggml_map_custom3_f32(
  4966. struct ggml_context * ctx,
  4967. struct ggml_tensor * a,
  4968. struct ggml_tensor * b,
  4969. struct ggml_tensor * c,
  4970. const ggml_custom3_op_f32_t fun) {
  4971. return ggml_map_custom3_impl_f32(ctx, a, b, c, fun, false);
  4972. }
  4973. struct ggml_tensor * ggml_map_custom3_inplace_f32(
  4974. struct ggml_context * ctx,
  4975. struct ggml_tensor * a,
  4976. struct ggml_tensor * b,
  4977. struct ggml_tensor * c,
  4978. const ggml_custom3_op_f32_t fun) {
  4979. return ggml_map_custom3_impl_f32(ctx, a, b, c, fun, true);
  4980. }
  4981. // ggml_map_custom1
  4982. struct ggml_map_custom1_op_params {
  4983. ggml_custom1_op_t fun;
  4984. int n_tasks;
  4985. void * userdata;
  4986. };
  4987. static struct ggml_tensor * ggml_map_custom1_impl(
  4988. struct ggml_context * ctx,
  4989. struct ggml_tensor * a,
  4990. const ggml_custom1_op_t fun,
  4991. int n_tasks,
  4992. void * userdata,
  4993. bool inplace) {
  4994. GGML_ASSERT(n_tasks == GGML_N_TASKS_MAX || n_tasks > 0);
  4995. bool is_node = false;
  4996. if (!inplace && a->grad) {
  4997. is_node = true;
  4998. }
  4999. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  5000. struct ggml_map_custom1_op_params params = {
  5001. /*.fun =*/ fun,
  5002. /*.n_tasks =*/ n_tasks,
  5003. /*.userdata =*/ userdata
  5004. };
  5005. ggml_set_op_params(result, (const void *) &params, sizeof(params));
  5006. result->op = GGML_OP_MAP_CUSTOM1;
  5007. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5008. result->src[0] = a;
  5009. return result;
  5010. }
  5011. struct ggml_tensor * ggml_map_custom1(
  5012. struct ggml_context * ctx,
  5013. struct ggml_tensor * a,
  5014. const ggml_custom1_op_t fun,
  5015. int n_tasks,
  5016. void * userdata) {
  5017. return ggml_map_custom1_impl(ctx, a, fun, n_tasks, userdata, false);
  5018. }
  5019. struct ggml_tensor * ggml_map_custom1_inplace(
  5020. struct ggml_context * ctx,
  5021. struct ggml_tensor * a,
  5022. const ggml_custom1_op_t fun,
  5023. int n_tasks,
  5024. void * userdata) {
  5025. return ggml_map_custom1_impl(ctx, a, fun, n_tasks, userdata, true);
  5026. }
  5027. // ggml_map_custom2
  5028. struct ggml_map_custom2_op_params {
  5029. ggml_custom2_op_t fun;
  5030. int n_tasks;
  5031. void * userdata;
  5032. };
  5033. static struct ggml_tensor * ggml_map_custom2_impl(
  5034. struct ggml_context * ctx,
  5035. struct ggml_tensor * a,
  5036. struct ggml_tensor * b,
  5037. const ggml_custom2_op_t fun,
  5038. int n_tasks,
  5039. void * userdata,
  5040. bool inplace) {
  5041. GGML_ASSERT(n_tasks == GGML_N_TASKS_MAX || n_tasks > 0);
  5042. bool is_node = false;
  5043. if (!inplace && (a->grad || b->grad)) {
  5044. is_node = true;
  5045. }
  5046. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  5047. struct ggml_map_custom2_op_params params = {
  5048. /*.fun =*/ fun,
  5049. /*.n_tasks =*/ n_tasks,
  5050. /*.userdata =*/ userdata
  5051. };
  5052. ggml_set_op_params(result, (const void *) &params, sizeof(params));
  5053. result->op = GGML_OP_MAP_CUSTOM2;
  5054. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5055. result->src[0] = a;
  5056. result->src[1] = b;
  5057. return result;
  5058. }
  5059. struct ggml_tensor * ggml_map_custom2(
  5060. struct ggml_context * ctx,
  5061. struct ggml_tensor * a,
  5062. struct ggml_tensor * b,
  5063. const ggml_custom2_op_t fun,
  5064. int n_tasks,
  5065. void * userdata) {
  5066. return ggml_map_custom2_impl(ctx, a, b, fun, n_tasks, userdata, false);
  5067. }
  5068. struct ggml_tensor * ggml_map_custom2_inplace(
  5069. struct ggml_context * ctx,
  5070. struct ggml_tensor * a,
  5071. struct ggml_tensor * b,
  5072. const ggml_custom2_op_t fun,
  5073. int n_tasks,
  5074. void * userdata) {
  5075. return ggml_map_custom2_impl(ctx, a, b, fun, n_tasks, userdata, true);
  5076. }
  5077. // ggml_map_custom3
  5078. struct ggml_map_custom3_op_params {
  5079. ggml_custom3_op_t fun;
  5080. int n_tasks;
  5081. void * userdata;
  5082. };
  5083. static struct ggml_tensor * ggml_map_custom3_impl(
  5084. struct ggml_context * ctx,
  5085. struct ggml_tensor * a,
  5086. struct ggml_tensor * b,
  5087. struct ggml_tensor * c,
  5088. const ggml_custom3_op_t fun,
  5089. int n_tasks,
  5090. void * userdata,
  5091. bool inplace) {
  5092. GGML_ASSERT(n_tasks == GGML_N_TASKS_MAX || n_tasks > 0);
  5093. bool is_node = false;
  5094. if (!inplace && (a->grad || b->grad || c->grad)) {
  5095. is_node = true;
  5096. }
  5097. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  5098. struct ggml_map_custom3_op_params params = {
  5099. /*.fun =*/ fun,
  5100. /*.n_tasks =*/ n_tasks,
  5101. /*.userdata =*/ userdata
  5102. };
  5103. ggml_set_op_params(result, (const void *) &params, sizeof(params));
  5104. result->op = GGML_OP_MAP_CUSTOM3;
  5105. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5106. result->src[0] = a;
  5107. result->src[1] = b;
  5108. result->src[2] = c;
  5109. return result;
  5110. }
  5111. struct ggml_tensor * ggml_map_custom3(
  5112. struct ggml_context * ctx,
  5113. struct ggml_tensor * a,
  5114. struct ggml_tensor * b,
  5115. struct ggml_tensor * c,
  5116. const ggml_custom3_op_t fun,
  5117. int n_tasks,
  5118. void * userdata) {
  5119. return ggml_map_custom3_impl(ctx, a, b, c, fun, n_tasks, userdata, false);
  5120. }
  5121. struct ggml_tensor * ggml_map_custom3_inplace(
  5122. struct ggml_context * ctx,
  5123. struct ggml_tensor * a,
  5124. struct ggml_tensor * b,
  5125. struct ggml_tensor * c,
  5126. const ggml_custom3_op_t fun,
  5127. int n_tasks,
  5128. void * userdata) {
  5129. return ggml_map_custom3_impl(ctx, a, b, c, fun, n_tasks, userdata, true);
  5130. }
  5131. // ggml_cross_entropy_loss
  5132. struct ggml_tensor * ggml_cross_entropy_loss(
  5133. struct ggml_context * ctx,
  5134. struct ggml_tensor * a,
  5135. struct ggml_tensor * b) {
  5136. GGML_ASSERT(ggml_are_same_shape(a, b));
  5137. bool is_node = false;
  5138. if (a->grad || b->grad) {
  5139. is_node = true;
  5140. }
  5141. struct ggml_tensor * result = ggml_new_tensor_1d(ctx, a->type, 1);
  5142. result->op = GGML_OP_CROSS_ENTROPY_LOSS;
  5143. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5144. result->src[0] = a;
  5145. result->src[1] = b;
  5146. return result;
  5147. }
  5148. // ggml_cross_entropy_loss_back
  5149. struct ggml_tensor * ggml_cross_entropy_loss_back(
  5150. struct ggml_context * ctx,
  5151. struct ggml_tensor * a,
  5152. struct ggml_tensor * b,
  5153. struct ggml_tensor * c) {
  5154. GGML_ASSERT(ggml_are_same_shape(a, b));
  5155. GGML_ASSERT(ggml_is_scalar(c));
  5156. struct ggml_tensor * result = ggml_dup_tensor(ctx, a);
  5157. result->op = GGML_OP_CROSS_ENTROPY_LOSS_BACK;
  5158. result->grad = NULL;
  5159. result->src[0] = a;
  5160. result->src[1] = b;
  5161. result->src[2] = c;
  5162. return result;
  5163. }
  5164. ////////////////////////////////////////////////////////////////////////////////
  5165. void ggml_set_param(
  5166. struct ggml_context * ctx,
  5167. struct ggml_tensor * tensor) {
  5168. tensor->is_param = true;
  5169. GGML_ASSERT(tensor->grad == NULL);
  5170. tensor->grad = ggml_dup_tensor(ctx, tensor);
  5171. ggml_format_name(tensor->grad, "%s (grad)", tensor->name);
  5172. }
  5173. // ggml_compute_forward_dup
  5174. static void ggml_compute_forward_dup_same_cont(
  5175. const struct ggml_compute_params * params,
  5176. const struct ggml_tensor * src0,
  5177. struct ggml_tensor * dst) {
  5178. GGML_ASSERT(ggml_nelements(dst) == ggml_nelements(src0));
  5179. GGML_ASSERT(ggml_is_contiguous(dst) && ggml_is_contiguous(src0));
  5180. GGML_ASSERT(src0->type == dst->type);
  5181. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  5182. return;
  5183. }
  5184. const size_t nb00 = src0->nb[0];
  5185. const size_t nb0 = dst->nb[0];
  5186. const int ith = params->ith; // thread index
  5187. const int nth = params->nth; // number of threads
  5188. // parallelize by elements
  5189. const int ne = ggml_nelements(dst);
  5190. const int dr = (ne + nth - 1) / nth;
  5191. const int ie0 = dr * ith;
  5192. const int ie1 = MIN(ie0 + dr, ne);
  5193. if (ie0 < ie1) {
  5194. memcpy(
  5195. ((char *) dst->data + ie0*nb0),
  5196. ((char *) src0->data + ie0*nb00),
  5197. (ie1 - ie0) * ggml_type_size(src0->type));
  5198. }
  5199. }
  5200. static void ggml_compute_forward_dup_f16(
  5201. const struct ggml_compute_params * params,
  5202. const struct ggml_tensor * src0,
  5203. struct ggml_tensor * dst) {
  5204. GGML_ASSERT(ggml_nelements(dst) == ggml_nelements(src0));
  5205. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  5206. return;
  5207. }
  5208. GGML_TENSOR_UNARY_OP_LOCALS
  5209. const int ith = params->ith; // thread index
  5210. const int nth = params->nth; // number of threads
  5211. if (ggml_is_contiguous(src0) && ggml_is_contiguous(dst) && src0->type == dst->type) {
  5212. ggml_compute_forward_dup_same_cont(params, src0, dst);
  5213. return;
  5214. }
  5215. // parallelize by rows
  5216. const int nr = ne01;
  5217. // number of rows per thread
  5218. const int dr = (nr + nth - 1) / nth;
  5219. // row range for this thread
  5220. const int ir0 = dr * ith;
  5221. const int ir1 = MIN(ir0 + dr, nr);
  5222. if (src0->type == dst->type &&
  5223. ne00 == ne0 &&
  5224. nb00 == ggml_type_size(src0->type) && nb0 == ggml_type_size(dst->type)) {
  5225. // copy by rows
  5226. const size_t rs = ne00*nb00;
  5227. for (int64_t i03 = 0; i03 < ne03; i03++) {
  5228. for (int64_t i02 = 0; i02 < ne02; i02++) {
  5229. for (int64_t i01 = ir0; i01 < ir1; i01++) {
  5230. memcpy(
  5231. ((char *) dst->data + i01*nb1 + i02*nb2 + i03*nb3),
  5232. ((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03),
  5233. rs);
  5234. }
  5235. }
  5236. }
  5237. return;
  5238. }
  5239. // TODO: add more special-case implementations for tensor shapes/strides that can benefit from memcpy
  5240. if (ggml_is_contiguous(dst)) {
  5241. if (nb00 == sizeof(ggml_fp16_t)) {
  5242. if (dst->type == GGML_TYPE_F16) {
  5243. size_t id = 0;
  5244. const size_t rs = ne00 * nb00;
  5245. char * dst_ptr = (char *) dst->data;
  5246. for (int i03 = 0; i03 < ne03; i03++) {
  5247. for (int i02 = 0; i02 < ne02; i02++) {
  5248. id += rs * ir0;
  5249. for (int i01 = ir0; i01 < ir1; i01++) {
  5250. const char * src0_ptr = (char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03;
  5251. memcpy(dst_ptr + id, src0_ptr, rs);
  5252. id += rs;
  5253. }
  5254. id += rs * (ne01 - ir1);
  5255. }
  5256. }
  5257. } else if (dst->type == GGML_TYPE_F32) {
  5258. size_t id = 0;
  5259. float * dst_ptr = (float *) dst->data;
  5260. for (int i03 = 0; i03 < ne03; i03++) {
  5261. for (int i02 = 0; i02 < ne02; i02++) {
  5262. id += ne00 * ir0;
  5263. for (int i01 = ir0; i01 < ir1; i01++) {
  5264. const ggml_fp16_t * src0_ptr = (ggml_fp16_t *) ((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03);
  5265. for (int i00 = 0; i00 < ne00; i00++) {
  5266. dst_ptr[id] = GGML_FP16_TO_FP32(src0_ptr[i00]);
  5267. id++;
  5268. }
  5269. }
  5270. id += ne00 * (ne01 - ir1);
  5271. }
  5272. }
  5273. } else if (type_traits[dst->type].from_float) {
  5274. ggml_from_float_t const quantize_row_q = type_traits[dst->type].from_float;
  5275. float * src0_f32 = (float *) params->wdata + (ne00 + CACHE_LINE_SIZE_F32) * ith;
  5276. size_t id = 0;
  5277. size_t rs = nb0 * (ne00 / ggml_blck_size(dst->type));
  5278. char * dst_ptr = (char *) dst->data;
  5279. for (int i03 = 0; i03 < ne03; i03++) {
  5280. for (int i02 = 0; i02 < ne02; i02++) {
  5281. id += rs * ir0;
  5282. for (int i01 = ir0; i01 < ir1; i01++) {
  5283. const ggml_fp16_t * src0_ptr = (ggml_fp16_t *) ((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03);
  5284. for (int i00 = 0; i00 < ne00; i00++) {
  5285. src0_f32[i00] = GGML_FP16_TO_FP32(src0_ptr[i00]);
  5286. }
  5287. quantize_row_q(src0_f32, dst_ptr + id, ne00);
  5288. id += rs;
  5289. }
  5290. id += rs * (ne01 - ir1);
  5291. }
  5292. }
  5293. } else {
  5294. GGML_ASSERT(false); // TODO: implement
  5295. }
  5296. } else {
  5297. //printf("%s: this is not optimal - fix me\n", __func__);
  5298. if (dst->type == GGML_TYPE_F32) {
  5299. size_t id = 0;
  5300. float * dst_ptr = (float *) dst->data;
  5301. for (int i03 = 0; i03 < ne03; i03++) {
  5302. for (int i02 = 0; i02 < ne02; i02++) {
  5303. id += ne00 * ir0;
  5304. for (int i01 = ir0; i01 < ir1; i01++) {
  5305. for (int i00 = 0; i00 < ne00; i00++) {
  5306. const ggml_fp16_t * src0_ptr = (ggml_fp16_t *) ((char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03);
  5307. dst_ptr[id] = GGML_FP16_TO_FP32(*src0_ptr);
  5308. id++;
  5309. }
  5310. }
  5311. id += ne00 * (ne01 - ir1);
  5312. }
  5313. }
  5314. } else if (dst->type == GGML_TYPE_F16) {
  5315. size_t id = 0;
  5316. ggml_fp16_t * dst_ptr = (ggml_fp16_t *) dst->data;
  5317. for (int i03 = 0; i03 < ne03; i03++) {
  5318. for (int i02 = 0; i02 < ne02; i02++) {
  5319. id += ne00 * ir0;
  5320. for (int i01 = ir0; i01 < ir1; i01++) {
  5321. for (int i00 = 0; i00 < ne00; i00++) {
  5322. const ggml_fp16_t * src0_ptr = (ggml_fp16_t *) ((char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03);
  5323. dst_ptr[id] = *src0_ptr;
  5324. id++;
  5325. }
  5326. }
  5327. id += ne00 * (ne01 - ir1);
  5328. }
  5329. }
  5330. } else {
  5331. GGML_ASSERT(false); // TODO: implement
  5332. }
  5333. }
  5334. return;
  5335. }
  5336. // dst counters
  5337. int64_t i10 = 0;
  5338. int64_t i11 = 0;
  5339. int64_t i12 = 0;
  5340. int64_t i13 = 0;
  5341. if (dst->type == GGML_TYPE_F16) {
  5342. for (int64_t i03 = 0; i03 < ne03; i03++) {
  5343. for (int64_t i02 = 0; i02 < ne02; i02++) {
  5344. i10 += ne00 * ir0;
  5345. while (i10 >= ne0) {
  5346. i10 -= ne0;
  5347. if (++i11 == ne1) {
  5348. i11 = 0;
  5349. if (++i12 == ne2) {
  5350. i12 = 0;
  5351. if (++i13 == ne3) {
  5352. i13 = 0;
  5353. }
  5354. }
  5355. }
  5356. }
  5357. for (int64_t i01 = ir0; i01 < ir1; i01++) {
  5358. for (int64_t i00 = 0; i00 < ne00; i00++) {
  5359. const char * src0_ptr = ((char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03);
  5360. char * dst_ptr = ((char *) dst->data + i10*nb0 + i11*nb1 + i12*nb2 + i13*nb3);
  5361. memcpy(dst_ptr, src0_ptr, sizeof(ggml_fp16_t));
  5362. if (++i10 == ne00) {
  5363. i10 = 0;
  5364. if (++i11 == ne01) {
  5365. i11 = 0;
  5366. if (++i12 == ne02) {
  5367. i12 = 0;
  5368. if (++i13 == ne03) {
  5369. i13 = 0;
  5370. }
  5371. }
  5372. }
  5373. }
  5374. }
  5375. }
  5376. i10 += ne00 * (ne01 - ir1);
  5377. while (i10 >= ne0) {
  5378. i10 -= ne0;
  5379. if (++i11 == ne1) {
  5380. i11 = 0;
  5381. if (++i12 == ne2) {
  5382. i12 = 0;
  5383. if (++i13 == ne3) {
  5384. i13 = 0;
  5385. }
  5386. }
  5387. }
  5388. }
  5389. }
  5390. }
  5391. } else if (dst->type == GGML_TYPE_F32) {
  5392. for (int64_t i03 = 0; i03 < ne03; i03++) {
  5393. for (int64_t i02 = 0; i02 < ne02; i02++) {
  5394. i10 += ne00 * ir0;
  5395. while (i10 >= ne0) {
  5396. i10 -= ne0;
  5397. if (++i11 == ne1) {
  5398. i11 = 0;
  5399. if (++i12 == ne2) {
  5400. i12 = 0;
  5401. if (++i13 == ne3) {
  5402. i13 = 0;
  5403. }
  5404. }
  5405. }
  5406. }
  5407. for (int64_t i01 = ir0; i01 < ir1; i01++) {
  5408. for (int64_t i00 = 0; i00 < ne00; i00++) {
  5409. const char * src0_ptr = ((char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03);
  5410. char * dst_ptr = ((char *) dst->data + i10*nb0 + i11*nb1 + i12*nb2 + i13*nb3);
  5411. *(float *) dst_ptr = GGML_FP16_TO_FP32(*(const ggml_fp16_t *) src0_ptr);
  5412. if (++i10 == ne0) {
  5413. i10 = 0;
  5414. if (++i11 == ne1) {
  5415. i11 = 0;
  5416. if (++i12 == ne2) {
  5417. i12 = 0;
  5418. if (++i13 == ne3) {
  5419. i13 = 0;
  5420. }
  5421. }
  5422. }
  5423. }
  5424. }
  5425. }
  5426. i10 += ne00 * (ne01 - ir1);
  5427. while (i10 >= ne0) {
  5428. i10 -= ne0;
  5429. if (++i11 == ne1) {
  5430. i11 = 0;
  5431. if (++i12 == ne2) {
  5432. i12 = 0;
  5433. if (++i13 == ne3) {
  5434. i13 = 0;
  5435. }
  5436. }
  5437. }
  5438. }
  5439. }
  5440. }
  5441. } else {
  5442. GGML_ASSERT(false); // TODO: implement
  5443. }
  5444. }
  5445. static void ggml_compute_forward_dup_f32(
  5446. const struct ggml_compute_params * params,
  5447. const struct ggml_tensor * src0,
  5448. struct ggml_tensor * dst) {
  5449. GGML_ASSERT(ggml_nelements(dst) == ggml_nelements(src0));
  5450. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  5451. return;
  5452. }
  5453. GGML_TENSOR_UNARY_OP_LOCALS
  5454. const int ith = params->ith; // thread index
  5455. const int nth = params->nth; // number of threads
  5456. if (ggml_is_contiguous(src0) && ggml_is_contiguous(dst) && src0->type == dst->type) {
  5457. ggml_compute_forward_dup_same_cont(params, src0, dst);
  5458. return;
  5459. }
  5460. // parallelize by rows
  5461. const int nr = ne01;
  5462. // number of rows per thread
  5463. const int dr = (nr + nth - 1) / nth;
  5464. // row range for this thread
  5465. const int ir0 = dr * ith;
  5466. const int ir1 = MIN(ir0 + dr, nr);
  5467. if (src0->type == dst->type &&
  5468. ne00 == ne0 &&
  5469. nb00 == ggml_type_size(src0->type) && nb0 == ggml_type_size(dst->type)) {
  5470. // copy by rows
  5471. const size_t rs = ne00*nb00;
  5472. for (int64_t i03 = 0; i03 < ne03; i03++) {
  5473. for (int64_t i02 = 0; i02 < ne02; i02++) {
  5474. for (int64_t i01 = ir0; i01 < ir1; i01++) {
  5475. memcpy(
  5476. ((char *) dst->data + i01*nb1 + i02*nb2 + i03*nb3),
  5477. ((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03),
  5478. rs);
  5479. }
  5480. }
  5481. }
  5482. return;
  5483. }
  5484. if (ggml_is_contiguous(dst)) {
  5485. // TODO: simplify
  5486. if (nb00 == sizeof(float)) {
  5487. if (dst->type == GGML_TYPE_F32) {
  5488. size_t id = 0;
  5489. const size_t rs = ne00 * nb00;
  5490. char * dst_ptr = (char *) dst->data;
  5491. for (int i03 = 0; i03 < ne03; i03++) {
  5492. for (int i02 = 0; i02 < ne02; i02++) {
  5493. id += rs * ir0;
  5494. for (int i01 = ir0; i01 < ir1; i01++) {
  5495. const char * src0_ptr = (char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03;
  5496. memcpy(dst_ptr + id, src0_ptr, rs);
  5497. id += rs;
  5498. }
  5499. id += rs * (ne01 - ir1);
  5500. }
  5501. }
  5502. } else if (type_traits[dst->type].from_float) {
  5503. ggml_from_float_t const quantize_row_q = type_traits[dst->type].from_float;
  5504. size_t id = 0;
  5505. size_t rs = nb0 * (ne00 / ggml_blck_size(dst->type));
  5506. char * dst_ptr = (char *) dst->data;
  5507. for (int i03 = 0; i03 < ne03; i03++) {
  5508. for (int i02 = 0; i02 < ne02; i02++) {
  5509. id += rs * ir0;
  5510. for (int i01 = ir0; i01 < ir1; i01++) {
  5511. const float * src0_ptr = (float *) ((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03);
  5512. quantize_row_q(src0_ptr, dst_ptr + id, ne00);
  5513. id += rs;
  5514. }
  5515. id += rs * (ne01 - ir1);
  5516. }
  5517. }
  5518. } else {
  5519. GGML_ASSERT(false); // TODO: implement
  5520. }
  5521. } else {
  5522. //printf("%s: this is not optimal - fix me\n", __func__);
  5523. if (dst->type == GGML_TYPE_F32) {
  5524. size_t id = 0;
  5525. float * dst_ptr = (float *) dst->data;
  5526. for (int i03 = 0; i03 < ne03; i03++) {
  5527. for (int i02 = 0; i02 < ne02; i02++) {
  5528. id += ne00 * ir0;
  5529. for (int i01 = ir0; i01 < ir1; i01++) {
  5530. for (int i00 = 0; i00 < ne00; i00++) {
  5531. const float * src0_ptr = (float *) ((char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03);
  5532. dst_ptr[id] = *src0_ptr;
  5533. id++;
  5534. }
  5535. }
  5536. id += ne00 * (ne01 - ir1);
  5537. }
  5538. }
  5539. } else if (dst->type == GGML_TYPE_F16) {
  5540. size_t id = 0;
  5541. ggml_fp16_t * dst_ptr = (ggml_fp16_t *) dst->data;
  5542. for (int i03 = 0; i03 < ne03; i03++) {
  5543. for (int i02 = 0; i02 < ne02; i02++) {
  5544. id += ne00 * ir0;
  5545. for (int i01 = ir0; i01 < ir1; i01++) {
  5546. for (int i00 = 0; i00 < ne00; i00++) {
  5547. const float * src0_ptr = (float *) ((char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03);
  5548. dst_ptr[id] = GGML_FP32_TO_FP16(*src0_ptr);
  5549. id++;
  5550. }
  5551. }
  5552. id += ne00 * (ne01 - ir1);
  5553. }
  5554. }
  5555. } else {
  5556. GGML_ASSERT(false); // TODO: implement
  5557. }
  5558. }
  5559. return;
  5560. }
  5561. // dst counters
  5562. int64_t i10 = 0;
  5563. int64_t i11 = 0;
  5564. int64_t i12 = 0;
  5565. int64_t i13 = 0;
  5566. if (dst->type == GGML_TYPE_F32) {
  5567. for (int64_t i03 = 0; i03 < ne03; i03++) {
  5568. for (int64_t i02 = 0; i02 < ne02; i02++) {
  5569. i10 += ne00 * ir0;
  5570. while (i10 >= ne0) {
  5571. i10 -= ne0;
  5572. if (++i11 == ne1) {
  5573. i11 = 0;
  5574. if (++i12 == ne2) {
  5575. i12 = 0;
  5576. if (++i13 == ne3) {
  5577. i13 = 0;
  5578. }
  5579. }
  5580. }
  5581. }
  5582. for (int64_t i01 = ir0; i01 < ir1; i01++) {
  5583. for (int64_t i00 = 0; i00 < ne00; i00++) {
  5584. const char * src0_ptr = ((char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03);
  5585. char * dst_ptr = ((char *) dst->data + i10*nb0 + i11*nb1 + i12*nb2 + i13*nb3);
  5586. memcpy(dst_ptr, src0_ptr, sizeof(float));
  5587. if (++i10 == ne0) {
  5588. i10 = 0;
  5589. if (++i11 == ne1) {
  5590. i11 = 0;
  5591. if (++i12 == ne2) {
  5592. i12 = 0;
  5593. if (++i13 == ne3) {
  5594. i13 = 0;
  5595. }
  5596. }
  5597. }
  5598. }
  5599. }
  5600. }
  5601. i10 += ne00 * (ne01 - ir1);
  5602. while (i10 >= ne0) {
  5603. i10 -= ne0;
  5604. if (++i11 == ne1) {
  5605. i11 = 0;
  5606. if (++i12 == ne2) {
  5607. i12 = 0;
  5608. if (++i13 == ne3) {
  5609. i13 = 0;
  5610. }
  5611. }
  5612. }
  5613. }
  5614. }
  5615. }
  5616. } else if (dst->type == GGML_TYPE_F16) {
  5617. for (int64_t i03 = 0; i03 < ne03; i03++) {
  5618. for (int64_t i02 = 0; i02 < ne02; i02++) {
  5619. i10 += ne00 * ir0;
  5620. while (i10 >= ne0) {
  5621. i10 -= ne0;
  5622. if (++i11 == ne1) {
  5623. i11 = 0;
  5624. if (++i12 == ne2) {
  5625. i12 = 0;
  5626. if (++i13 == ne3) {
  5627. i13 = 0;
  5628. }
  5629. }
  5630. }
  5631. }
  5632. for (int64_t i01 = ir0; i01 < ir1; i01++) {
  5633. for (int64_t i00 = 0; i00 < ne00; i00++) {
  5634. const char * src0_ptr = ((char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03);
  5635. char * dst_ptr = ((char *) dst->data + i10*nb0 + i11*nb1 + i12*nb2 + i13*nb3);
  5636. *(ggml_fp16_t *) dst_ptr = GGML_FP32_TO_FP16(*(const float *) src0_ptr);
  5637. if (++i10 == ne0) {
  5638. i10 = 0;
  5639. if (++i11 == ne1) {
  5640. i11 = 0;
  5641. if (++i12 == ne2) {
  5642. i12 = 0;
  5643. if (++i13 == ne3) {
  5644. i13 = 0;
  5645. }
  5646. }
  5647. }
  5648. }
  5649. }
  5650. }
  5651. i10 += ne00 * (ne01 - ir1);
  5652. while (i10 >= ne0) {
  5653. i10 -= ne0;
  5654. if (++i11 == ne1) {
  5655. i11 = 0;
  5656. if (++i12 == ne2) {
  5657. i12 = 0;
  5658. if (++i13 == ne3) {
  5659. i13 = 0;
  5660. }
  5661. }
  5662. }
  5663. }
  5664. }
  5665. }
  5666. } else {
  5667. GGML_ASSERT(false); // TODO: implement
  5668. }
  5669. }
  5670. static void ggml_compute_forward_dup(
  5671. const struct ggml_compute_params * params,
  5672. const struct ggml_tensor * src0,
  5673. struct ggml_tensor * dst) {
  5674. if (ggml_is_contiguous(src0) && ggml_is_contiguous(dst) && src0->type == dst->type) {
  5675. ggml_compute_forward_dup_same_cont(params, src0, dst);
  5676. return;
  5677. }
  5678. switch (src0->type) {
  5679. case GGML_TYPE_F16:
  5680. {
  5681. ggml_compute_forward_dup_f16(params, src0, dst);
  5682. } break;
  5683. case GGML_TYPE_F32:
  5684. {
  5685. ggml_compute_forward_dup_f32(params, src0, dst);
  5686. } break;
  5687. default:
  5688. {
  5689. GGML_ASSERT(false);
  5690. } break;
  5691. }
  5692. }
  5693. // ggml_compute_forward_add
  5694. static void ggml_compute_forward_add_f32(
  5695. const struct ggml_compute_params * params,
  5696. const struct ggml_tensor * src0,
  5697. const struct ggml_tensor * src1,
  5698. struct ggml_tensor * dst) {
  5699. GGML_ASSERT(ggml_can_repeat(src1, src0) && ggml_are_same_shape(src0, dst));
  5700. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  5701. return;
  5702. }
  5703. const int ith = params->ith;
  5704. const int nth = params->nth;
  5705. const int nr = ggml_nrows(src0);
  5706. GGML_TENSOR_BINARY_OP_LOCALS
  5707. GGML_ASSERT( nb0 == sizeof(float));
  5708. GGML_ASSERT(nb00 == sizeof(float));
  5709. // rows per thread
  5710. const int dr = (nr + nth - 1)/nth;
  5711. // row range for this thread
  5712. const int ir0 = dr*ith;
  5713. const int ir1 = MIN(ir0 + dr, nr);
  5714. if (nb10 == sizeof(float)) {
  5715. for (int ir = ir0; ir < ir1; ++ir) {
  5716. // src1 is broadcastable across src0 and dst in i1, i2, i3
  5717. const int64_t i03 = ir/(ne02*ne01);
  5718. const int64_t i02 = (ir - i03*ne02*ne01)/ne01;
  5719. const int64_t i01 = (ir - i03*ne02*ne01 - i02*ne01);
  5720. const int64_t i13 = i03 % ne13;
  5721. const int64_t i12 = i02 % ne12;
  5722. const int64_t i11 = i01 % ne11;
  5723. const int64_t nr0 = ne00 / ne10;
  5724. float * dst_ptr = (float *) ((char *) dst->data + i03*nb3 + i02*nb2 + i01*nb1 );
  5725. float * src0_ptr = (float *) ((char *) src0->data + i03*nb03 + i02*nb02 + i01*nb01);
  5726. float * src1_ptr = (float *) ((char *) src1->data + i13*nb13 + i12*nb12 + i11*nb11);
  5727. for (int64_t r = 0; r < nr0; ++r) {
  5728. #ifdef GGML_USE_ACCELERATE
  5729. vDSP_vadd(src0_ptr + r*ne10, 1, src1_ptr, 1, dst_ptr + r*ne10, 1, ne10);
  5730. #else
  5731. ggml_vec_add_f32(ne10, dst_ptr + r*ne10, src0_ptr + r*ne10, src1_ptr);
  5732. #endif
  5733. }
  5734. }
  5735. } else {
  5736. // src1 is not contiguous
  5737. for (int ir = ir0; ir < ir1; ++ir) {
  5738. // src1 is broadcastable across src0 and dst in i1, i2, i3
  5739. const int64_t i03 = ir/(ne02*ne01);
  5740. const int64_t i02 = (ir - i03*ne02*ne01)/ne01;
  5741. const int64_t i01 = (ir - i03*ne02*ne01 - i02*ne01);
  5742. const int64_t i13 = i03 % ne13;
  5743. const int64_t i12 = i02 % ne12;
  5744. const int64_t i11 = i01 % ne11;
  5745. float * dst_ptr = (float *) ((char *) dst->data + i03*nb3 + i02*nb2 + i01*nb1 );
  5746. float * src0_ptr = (float *) ((char *) src0->data + i03*nb03 + i02*nb02 + i01*nb01);
  5747. for (int64_t i0 = 0; i0 < ne0; ++i0) {
  5748. const int64_t i10 = i0 % ne10;
  5749. float * src1_ptr = (float *) ((char *) src1->data + i13*nb13 + i12*nb12 + i11*nb11 + i10*nb10);
  5750. dst_ptr[i0] = src0_ptr[i0] + *src1_ptr;
  5751. }
  5752. }
  5753. }
  5754. }
  5755. static void ggml_compute_forward_add_f16_f32(
  5756. const struct ggml_compute_params * params,
  5757. const struct ggml_tensor * src0,
  5758. const struct ggml_tensor * src1,
  5759. struct ggml_tensor * dst) {
  5760. GGML_ASSERT(ggml_are_same_shape(src0, src1) && ggml_are_same_shape(src0, dst));
  5761. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  5762. return;
  5763. }
  5764. const int ith = params->ith;
  5765. const int nth = params->nth;
  5766. const int nr = ggml_nrows(src0);
  5767. GGML_TENSOR_BINARY_OP_LOCALS
  5768. GGML_ASSERT(src0->type == GGML_TYPE_F16);
  5769. GGML_ASSERT(src1->type == GGML_TYPE_F32);
  5770. if (dst->type == GGML_TYPE_F32) {
  5771. GGML_ASSERT( nb0 == sizeof(float));
  5772. }
  5773. else {
  5774. GGML_ASSERT(dst->type == GGML_TYPE_F16);
  5775. GGML_ASSERT( nb0 == sizeof(ggml_fp16_t));
  5776. }
  5777. GGML_ASSERT(nb00 == sizeof(ggml_fp16_t));
  5778. // rows per thread
  5779. const int dr = (nr + nth - 1)/nth;
  5780. // row range for this thread
  5781. const int ir0 = dr*ith;
  5782. const int ir1 = MIN(ir0 + dr, nr);
  5783. if (nb10 == sizeof(float)) {
  5784. if (dst->type == GGML_TYPE_F16) {
  5785. for (int ir = ir0; ir < ir1; ++ir) {
  5786. // src0, src1 and dst are same shape => same indices
  5787. const int i3 = ir/(ne2*ne1);
  5788. const int i2 = (ir - i3*ne2*ne1)/ne1;
  5789. const int i1 = (ir - i3*ne2*ne1 - i2*ne1);
  5790. ggml_fp16_t * dst_ptr = (ggml_fp16_t *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1);
  5791. ggml_fp16_t * src0_ptr = (ggml_fp16_t *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01);
  5792. float * src1_ptr = (float *) ((char *) src1->data + i3*nb13 + i2*nb12 + i1*nb11);
  5793. for (int i = 0; i < ne0; i++) {
  5794. dst_ptr[i] = GGML_FP32_TO_FP16(GGML_FP16_TO_FP32(src0_ptr[i]) + src1_ptr[i]);
  5795. }
  5796. }
  5797. } else {
  5798. for (int ir = ir0; ir < ir1; ++ir) {
  5799. // src0, src1 and dst are same shape => same indices
  5800. const int i3 = ir/(ne2*ne1);
  5801. const int i2 = (ir - i3*ne2*ne1)/ne1;
  5802. const int i1 = (ir - i3*ne2*ne1 - i2*ne1);
  5803. float * dst_ptr = (float *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1);
  5804. ggml_fp16_t * src0_ptr = (ggml_fp16_t *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01);
  5805. float * src1_ptr = (float *) ((char *) src1->data + i3*nb13 + i2*nb12 + i1*nb11);
  5806. for (int i = 0; i < ne0; i++) {
  5807. dst_ptr[i] = GGML_FP16_TO_FP32(src0_ptr[i]) + src1_ptr[i];
  5808. }
  5809. }
  5810. }
  5811. }
  5812. else {
  5813. // src1 is not contiguous
  5814. GGML_ASSERT(false);
  5815. }
  5816. }
  5817. static void ggml_compute_forward_add_f16_f16(
  5818. const struct ggml_compute_params * params,
  5819. const struct ggml_tensor * src0,
  5820. const struct ggml_tensor * src1,
  5821. struct ggml_tensor * dst) {
  5822. GGML_ASSERT(ggml_are_same_shape(src0, src1) && ggml_are_same_shape(src0, dst));
  5823. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  5824. return;
  5825. }
  5826. const int ith = params->ith;
  5827. const int nth = params->nth;
  5828. const int nr = ggml_nrows(src0);
  5829. GGML_TENSOR_BINARY_OP_LOCALS
  5830. GGML_ASSERT(src0->type == GGML_TYPE_F16);
  5831. GGML_ASSERT(src1->type == GGML_TYPE_F16);
  5832. GGML_ASSERT(dst->type == GGML_TYPE_F16);
  5833. GGML_ASSERT( nb0 == sizeof(ggml_fp16_t));
  5834. GGML_ASSERT(nb00 == sizeof(ggml_fp16_t));
  5835. // rows per thread
  5836. const int dr = (nr + nth - 1)/nth;
  5837. // row range for this thread
  5838. const int ir0 = dr*ith;
  5839. const int ir1 = MIN(ir0 + dr, nr);
  5840. if (nb10 == sizeof(ggml_fp16_t)) {
  5841. for (int ir = ir0; ir < ir1; ++ir) {
  5842. // src0, src1 and dst are same shape => same indices
  5843. const int i3 = ir/(ne2*ne1);
  5844. const int i2 = (ir - i3*ne2*ne1)/ne1;
  5845. const int i1 = (ir - i3*ne2*ne1 - i2*ne1);
  5846. ggml_fp16_t * dst_ptr = (ggml_fp16_t *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1);
  5847. ggml_fp16_t * src0_ptr = (ggml_fp16_t *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01);
  5848. ggml_fp16_t * src1_ptr = (ggml_fp16_t *) ((char *) src1->data + i3*nb13 + i2*nb12 + i1*nb11);
  5849. for (int i = 0; i < ne0; i++) {
  5850. dst_ptr[i] = GGML_FP32_TO_FP16(GGML_FP16_TO_FP32(src0_ptr[i]) + GGML_FP16_TO_FP32(src1_ptr[i]));
  5851. }
  5852. }
  5853. }
  5854. else {
  5855. // src1 is not contiguous
  5856. GGML_ASSERT(false);
  5857. }
  5858. }
  5859. static void ggml_compute_forward_add_q_f32(
  5860. const struct ggml_compute_params * params,
  5861. const struct ggml_tensor * src0,
  5862. const struct ggml_tensor * src1,
  5863. struct ggml_tensor * dst) {
  5864. GGML_ASSERT(ggml_are_same_shape(src0, src1) && ggml_are_same_shape(src0, dst));
  5865. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  5866. return;
  5867. }
  5868. const int nr = ggml_nrows(src0);
  5869. GGML_TENSOR_BINARY_OP_LOCALS
  5870. const int ith = params->ith;
  5871. const int nth = params->nth;
  5872. const enum ggml_type type = src0->type;
  5873. const enum ggml_type dtype = dst->type;
  5874. ggml_to_float_t const dequantize_row_q = type_traits[type].to_float;
  5875. ggml_from_float_t const quantize_row_q = type_traits[dtype].from_float;
  5876. // we don't support permuted src0 or src1
  5877. GGML_ASSERT(nb00 == ggml_type_size(type));
  5878. GGML_ASSERT(nb10 == sizeof(float));
  5879. // dst cannot be transposed or permuted
  5880. GGML_ASSERT(nb0 <= nb1);
  5881. GGML_ASSERT(nb1 <= nb2);
  5882. GGML_ASSERT(nb2 <= nb3);
  5883. GGML_ASSERT(ggml_is_quantized(src0->type));
  5884. GGML_ASSERT(src1->type == GGML_TYPE_F32);
  5885. // rows per thread
  5886. const int dr = (nr + nth - 1)/nth;
  5887. // row range for this thread
  5888. const int ir0 = dr*ith;
  5889. const int ir1 = MIN(ir0 + dr, nr);
  5890. float * wdata = (float *) params->wdata + (ne00 + CACHE_LINE_SIZE_F32) * ith;
  5891. for (int ir = ir0; ir < ir1; ++ir) {
  5892. // src0 indices
  5893. const int i03 = ir/(ne02*ne01);
  5894. const int i02 = (ir - i03*ne02*ne01)/ne01;
  5895. const int i01 = (ir - i03*ne02*ne01 - i02*ne01);
  5896. // src1 and dst are same shape as src0 => same indices
  5897. const int i13 = i03;
  5898. const int i12 = i02;
  5899. const int i11 = i01;
  5900. const int i3 = i03;
  5901. const int i2 = i02;
  5902. const int i1 = i01;
  5903. void * src0_row = (void *) ((char *) src0->data + (i01*nb01 + i02*nb02 + i03*nb03));
  5904. float * src1_row = (float *)((char *) src1->data + (i11*nb11 + i12*nb12 + i13*nb13));
  5905. void * dst_row = (void *) ((char *) dst->data + ( i1*nb1 + i2*nb2 + i3*nb3));
  5906. assert(ne00 % 32 == 0);
  5907. // unquantize row from src0 to temp buffer
  5908. dequantize_row_q(src0_row, wdata, ne00);
  5909. // add src1
  5910. ggml_vec_acc_f32(ne00, wdata, src1_row);
  5911. // quantize row to dst
  5912. if (quantize_row_q != NULL) {
  5913. quantize_row_q(wdata, dst_row, ne00);
  5914. } else {
  5915. memcpy(dst_row, wdata, ne0*nb0);
  5916. }
  5917. }
  5918. }
  5919. static void ggml_compute_forward_add(
  5920. const struct ggml_compute_params * params,
  5921. const struct ggml_tensor * src0,
  5922. const struct ggml_tensor * src1,
  5923. struct ggml_tensor * dst) {
  5924. switch (src0->type) {
  5925. case GGML_TYPE_F32:
  5926. {
  5927. ggml_compute_forward_add_f32(params, src0, src1, dst);
  5928. } break;
  5929. case GGML_TYPE_F16:
  5930. {
  5931. if (src1->type == GGML_TYPE_F16) {
  5932. ggml_compute_forward_add_f16_f16(params, src0, src1, dst);
  5933. }
  5934. else if (src1->type == GGML_TYPE_F32) {
  5935. ggml_compute_forward_add_f16_f32(params, src0, src1, dst);
  5936. }
  5937. else {
  5938. GGML_ASSERT(false);
  5939. }
  5940. } break;
  5941. case GGML_TYPE_Q4_0:
  5942. case GGML_TYPE_Q4_1:
  5943. case GGML_TYPE_Q5_0:
  5944. case GGML_TYPE_Q5_1:
  5945. case GGML_TYPE_Q8_0:
  5946. case GGML_TYPE_Q2_K:
  5947. case GGML_TYPE_Q3_K:
  5948. case GGML_TYPE_Q4_K:
  5949. case GGML_TYPE_Q5_K:
  5950. case GGML_TYPE_Q6_K:
  5951. {
  5952. ggml_compute_forward_add_q_f32(params, src0, src1, dst);
  5953. } break;
  5954. default:
  5955. {
  5956. GGML_ASSERT(false);
  5957. } break;
  5958. }
  5959. }
  5960. // ggml_compute_forward_add1
  5961. static void ggml_compute_forward_add1_f32(
  5962. const struct ggml_compute_params * params,
  5963. const struct ggml_tensor * src0,
  5964. const struct ggml_tensor * src1,
  5965. struct ggml_tensor * dst) {
  5966. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  5967. GGML_ASSERT(ggml_is_scalar(src1));
  5968. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  5969. return;
  5970. }
  5971. const int ith = params->ith;
  5972. const int nth = params->nth;
  5973. const int nr = ggml_nrows(src0);
  5974. GGML_TENSOR_UNARY_OP_LOCALS
  5975. GGML_ASSERT( nb0 == sizeof(float));
  5976. GGML_ASSERT(nb00 == sizeof(float));
  5977. // rows per thread
  5978. const int dr = (nr + nth - 1)/nth;
  5979. // row range for this thread
  5980. const int ir0 = dr*ith;
  5981. const int ir1 = MIN(ir0 + dr, nr);
  5982. for (int ir = ir0; ir < ir1; ++ir) {
  5983. // src0 and dst are same shape => same indices
  5984. const int i3 = ir/(ne2*ne1);
  5985. const int i2 = (ir - i3*ne2*ne1)/ne1;
  5986. const int i1 = (ir - i3*ne2*ne1 - i2*ne1);
  5987. #ifdef GGML_USE_ACCELERATE
  5988. UNUSED(ggml_vec_add1_f32);
  5989. vDSP_vadd(
  5990. (float *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01), 1,
  5991. (float *) ((char *) src1->data), 0,
  5992. (float *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 ), 1,
  5993. ne0);
  5994. #else
  5995. ggml_vec_add1_f32(ne0,
  5996. (float *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 ),
  5997. (float *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01),
  5998. *(float *) src1->data);
  5999. #endif
  6000. }
  6001. }
  6002. static void ggml_compute_forward_add1_f16_f32(
  6003. const struct ggml_compute_params * params,
  6004. const struct ggml_tensor * src0,
  6005. const struct ggml_tensor * src1,
  6006. struct ggml_tensor * dst) {
  6007. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  6008. GGML_ASSERT(ggml_is_scalar(src1));
  6009. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  6010. return;
  6011. }
  6012. // scalar to add
  6013. const float v = *(float *) src1->data;
  6014. const int ith = params->ith;
  6015. const int nth = params->nth;
  6016. const int nr = ggml_nrows(src0);
  6017. GGML_TENSOR_UNARY_OP_LOCALS
  6018. GGML_ASSERT(src0->type == GGML_TYPE_F16);
  6019. GGML_ASSERT(src1->type == GGML_TYPE_F32);
  6020. GGML_ASSERT(dst->type == GGML_TYPE_F16);
  6021. GGML_ASSERT( nb0 == sizeof(ggml_fp16_t));
  6022. GGML_ASSERT(nb00 == sizeof(ggml_fp16_t));
  6023. // rows per thread
  6024. const int dr = (nr + nth - 1)/nth;
  6025. // row range for this thread
  6026. const int ir0 = dr*ith;
  6027. const int ir1 = MIN(ir0 + dr, nr);
  6028. for (int ir = ir0; ir < ir1; ++ir) {
  6029. // src0 and dst are same shape => same indices
  6030. const int i3 = ir/(ne2*ne1);
  6031. const int i2 = (ir - i3*ne2*ne1)/ne1;
  6032. const int i1 = (ir - i3*ne2*ne1 - i2*ne1);
  6033. ggml_fp16_t * dst_ptr = (ggml_fp16_t *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 );
  6034. ggml_fp16_t * src0_ptr = (ggml_fp16_t *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01);
  6035. for (int i = 0; i < ne0; i++) {
  6036. dst_ptr[i] = GGML_FP32_TO_FP16(GGML_FP16_TO_FP32(src0_ptr[i]) + v);
  6037. }
  6038. }
  6039. }
  6040. static void ggml_compute_forward_add1_f16_f16(
  6041. const struct ggml_compute_params * params,
  6042. const struct ggml_tensor * src0,
  6043. const struct ggml_tensor * src1,
  6044. struct ggml_tensor * dst) {
  6045. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  6046. GGML_ASSERT(ggml_is_scalar(src1));
  6047. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  6048. return;
  6049. }
  6050. // scalar to add
  6051. const float v = GGML_FP16_TO_FP32(*(ggml_fp16_t *) src1->data);
  6052. const int ith = params->ith;
  6053. const int nth = params->nth;
  6054. const int nr = ggml_nrows(src0);
  6055. GGML_TENSOR_UNARY_OP_LOCALS
  6056. GGML_ASSERT(src0->type == GGML_TYPE_F16);
  6057. GGML_ASSERT(src1->type == GGML_TYPE_F16);
  6058. GGML_ASSERT(dst->type == GGML_TYPE_F16);
  6059. GGML_ASSERT( nb0 == sizeof(ggml_fp16_t));
  6060. GGML_ASSERT(nb00 == sizeof(ggml_fp16_t));
  6061. // rows per thread
  6062. const int dr = (nr + nth - 1)/nth;
  6063. // row range for this thread
  6064. const int ir0 = dr*ith;
  6065. const int ir1 = MIN(ir0 + dr, nr);
  6066. for (int ir = ir0; ir < ir1; ++ir) {
  6067. // src0 and dst are same shape => same indices
  6068. const int i3 = ir/(ne2*ne1);
  6069. const int i2 = (ir - i3*ne2*ne1)/ne1;
  6070. const int i1 = (ir - i3*ne2*ne1 - i2*ne1);
  6071. ggml_fp16_t * dst_ptr = (ggml_fp16_t *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 );
  6072. ggml_fp16_t * src0_ptr = (ggml_fp16_t *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01);
  6073. for (int i = 0; i < ne0; i++) {
  6074. dst_ptr[i] = GGML_FP32_TO_FP16(GGML_FP16_TO_FP32(src0_ptr[i]) + v);
  6075. }
  6076. }
  6077. }
  6078. static void ggml_compute_forward_add1_q_f32(
  6079. const struct ggml_compute_params * params,
  6080. const struct ggml_tensor * src0,
  6081. const struct ggml_tensor * src1,
  6082. struct ggml_tensor * dst) {
  6083. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  6084. GGML_ASSERT(ggml_is_scalar(src1));
  6085. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  6086. return;
  6087. }
  6088. // scalar to add
  6089. const float v = *(float *) src1->data;
  6090. const int ith = params->ith;
  6091. const int nth = params->nth;
  6092. const int nr = ggml_nrows(src0);
  6093. GGML_TENSOR_UNARY_OP_LOCALS
  6094. const enum ggml_type type = src0->type;
  6095. ggml_to_float_t const dequantize_row_q = type_traits[type].to_float;
  6096. ggml_from_float_t const quantize_row_q = type_traits[type].from_float;
  6097. // we don't support permuted src0
  6098. GGML_ASSERT(nb00 == ggml_type_size(type));
  6099. // dst cannot be transposed or permuted
  6100. GGML_ASSERT(nb0 <= nb1);
  6101. GGML_ASSERT(nb1 <= nb2);
  6102. GGML_ASSERT(nb2 <= nb3);
  6103. GGML_ASSERT(ggml_is_quantized(src0->type));
  6104. GGML_ASSERT(dst->type == src0->type);
  6105. GGML_ASSERT(src1->type == GGML_TYPE_F32);
  6106. // rows per thread
  6107. const int dr = (nr + nth - 1)/nth;
  6108. // row range for this thread
  6109. const int ir0 = dr*ith;
  6110. const int ir1 = MIN(ir0 + dr, nr);
  6111. float * wdata = (float *) params->wdata + (ne0 + CACHE_LINE_SIZE_F32) * ith;
  6112. for (int ir = ir0; ir < ir1; ++ir) {
  6113. // src0 and dst are same shape => same indices
  6114. const int i3 = ir/(ne2*ne1);
  6115. const int i2 = (ir - i3*ne2*ne1)/ne1;
  6116. const int i1 = (ir - i3*ne2*ne1 - i2*ne1);
  6117. void * src0_row = (void *) ((char *) src0->data + (i1*nb01 + i2*nb02 + i3*nb03));
  6118. void * dst_row = (void *) ((char *) dst->data + (i1*nb1 + i2*nb2 + i3*nb0 ));
  6119. assert(ne0 % 32 == 0);
  6120. // unquantize row from src0 to temp buffer
  6121. dequantize_row_q(src0_row, wdata, ne0);
  6122. // add src1
  6123. ggml_vec_acc1_f32(ne0, wdata, v);
  6124. // quantize row to dst
  6125. quantize_row_q(wdata, dst_row, ne0);
  6126. }
  6127. }
  6128. static void ggml_compute_forward_add1(
  6129. const struct ggml_compute_params * params,
  6130. const struct ggml_tensor * src0,
  6131. const struct ggml_tensor * src1,
  6132. struct ggml_tensor * dst) {
  6133. switch (src0->type) {
  6134. case GGML_TYPE_F32:
  6135. {
  6136. ggml_compute_forward_add1_f32(params, src0, src1, dst);
  6137. } break;
  6138. case GGML_TYPE_F16:
  6139. {
  6140. if (src1->type == GGML_TYPE_F16) {
  6141. ggml_compute_forward_add1_f16_f16(params, src0, src1, dst);
  6142. }
  6143. else if (src1->type == GGML_TYPE_F32) {
  6144. ggml_compute_forward_add1_f16_f32(params, src0, src1, dst);
  6145. }
  6146. else {
  6147. GGML_ASSERT(false);
  6148. }
  6149. } break;
  6150. case GGML_TYPE_Q4_0:
  6151. case GGML_TYPE_Q4_1:
  6152. case GGML_TYPE_Q5_0:
  6153. case GGML_TYPE_Q5_1:
  6154. case GGML_TYPE_Q8_0:
  6155. case GGML_TYPE_Q8_1:
  6156. case GGML_TYPE_Q2_K:
  6157. case GGML_TYPE_Q3_K:
  6158. case GGML_TYPE_Q4_K:
  6159. case GGML_TYPE_Q5_K:
  6160. case GGML_TYPE_Q6_K:
  6161. {
  6162. ggml_compute_forward_add1_q_f32(params, src0, src1, dst);
  6163. } break;
  6164. default:
  6165. {
  6166. GGML_ASSERT(false);
  6167. } break;
  6168. }
  6169. }
  6170. // ggml_compute_forward_acc
  6171. static void ggml_compute_forward_acc_f32(
  6172. const struct ggml_compute_params * params,
  6173. const struct ggml_tensor * src0,
  6174. const struct ggml_tensor * src1,
  6175. struct ggml_tensor * dst) {
  6176. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  6177. GGML_ASSERT(ggml_is_contiguous(dst) && ggml_is_contiguous(src0));
  6178. // view src0 and dst with these strides and data offset inbytes during acc
  6179. // nb0 is implicitly element_size because src0 and dst are contiguous
  6180. size_t nb1 = ((int32_t *) dst->op_params)[0];
  6181. size_t nb2 = ((int32_t *) dst->op_params)[1];
  6182. size_t nb3 = ((int32_t *) dst->op_params)[2];
  6183. size_t offset = ((int32_t *) dst->op_params)[3];
  6184. bool inplace = (bool) ((int32_t *) dst->op_params)[4];
  6185. if (!inplace && (params->type == GGML_TASK_INIT)) {
  6186. // memcpy needs to be synchronized across threads to avoid race conditions.
  6187. // => do it in INIT phase
  6188. memcpy(
  6189. ((char *) dst->data),
  6190. ((char *) src0->data),
  6191. ggml_nbytes(dst));
  6192. }
  6193. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  6194. return;
  6195. }
  6196. const int ith = params->ith;
  6197. const int nth = params->nth;
  6198. const int nr = ggml_nrows(src1);
  6199. const int nc = src1->ne[0];
  6200. GGML_TENSOR_LOCALS(int64_t, ne1, src1, ne)
  6201. GGML_TENSOR_LOCALS(size_t, nb1, src1, nb)
  6202. // src0 and dst as viewed during acc
  6203. const size_t nb0 = ggml_element_size(src0);
  6204. const size_t nb00 = nb0;
  6205. const size_t nb01 = nb1;
  6206. const size_t nb02 = nb2;
  6207. const size_t nb03 = nb3;
  6208. GGML_ASSERT(offset + (ne10 == 0 ? 0 : ne10-1)*nb0 + (ne11 == 0 ? 0 : ne11-1)*nb1 + (ne12 == 0 ? 0 : ne12-1)*nb2 + (ne13 == 0 ? 0 : ne13-1)*nb3 < ggml_nbytes(dst));
  6209. GGML_ASSERT(offset + (ne10 == 0 ? 0 : ne10-1)*nb00 + (ne11 == 0 ? 0 : ne11-1)*nb01 + (ne12 == 0 ? 0 : ne12-1)*nb02 + (ne13 == 0 ? 0 : ne13-1)*nb03 < ggml_nbytes(src0));
  6210. GGML_ASSERT(nb10 == sizeof(float));
  6211. // rows per thread
  6212. const int dr = (nr + nth - 1)/nth;
  6213. // row range for this thread
  6214. const int ir0 = dr*ith;
  6215. const int ir1 = MIN(ir0 + dr, nr);
  6216. for (int ir = ir0; ir < ir1; ++ir) {
  6217. // src0 and dst are viewed with shape of src1 and offset
  6218. // => same indices
  6219. const int i3 = ir/(ne12*ne11);
  6220. const int i2 = (ir - i3*ne12*ne11)/ne11;
  6221. const int i1 = (ir - i3*ne12*ne11 - i2*ne11);
  6222. #ifdef GGML_USE_ACCELERATE
  6223. vDSP_vadd(
  6224. (float *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + offset), 1,
  6225. (float *) ((char *) src1->data + i3*nb13 + i2*nb12 + i1*nb11), 1,
  6226. (float *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + offset), 1, nc);
  6227. #else
  6228. ggml_vec_add_f32(nc,
  6229. (float *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + offset),
  6230. (float *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + offset),
  6231. (float *) ((char *) src1->data + i3*nb13 + i2*nb12 + i1*nb11));
  6232. #endif
  6233. }
  6234. }
  6235. static void ggml_compute_forward_acc(
  6236. const struct ggml_compute_params * params,
  6237. const struct ggml_tensor * src0,
  6238. const struct ggml_tensor * src1,
  6239. struct ggml_tensor * dst) {
  6240. switch (src0->type) {
  6241. case GGML_TYPE_F32:
  6242. {
  6243. ggml_compute_forward_acc_f32(params, src0, src1, dst);
  6244. } break;
  6245. case GGML_TYPE_F16:
  6246. case GGML_TYPE_Q4_0:
  6247. case GGML_TYPE_Q4_1:
  6248. case GGML_TYPE_Q5_0:
  6249. case GGML_TYPE_Q5_1:
  6250. case GGML_TYPE_Q8_0:
  6251. case GGML_TYPE_Q8_1:
  6252. case GGML_TYPE_Q2_K:
  6253. case GGML_TYPE_Q3_K:
  6254. case GGML_TYPE_Q4_K:
  6255. case GGML_TYPE_Q5_K:
  6256. case GGML_TYPE_Q6_K:
  6257. default:
  6258. {
  6259. GGML_ASSERT(false);
  6260. } break;
  6261. }
  6262. }
  6263. // ggml_compute_forward_sub
  6264. static void ggml_compute_forward_sub_f32(
  6265. const struct ggml_compute_params * params,
  6266. const struct ggml_tensor * src0,
  6267. const struct ggml_tensor * src1,
  6268. struct ggml_tensor * dst) {
  6269. assert(params->ith == 0);
  6270. assert(ggml_are_same_shape(src0, src1) && ggml_are_same_shape(src0, dst));
  6271. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  6272. return;
  6273. }
  6274. const int nr = ggml_nrows(src0);
  6275. GGML_TENSOR_BINARY_OP_LOCALS
  6276. GGML_ASSERT( nb0 == sizeof(float));
  6277. GGML_ASSERT(nb00 == sizeof(float));
  6278. if (nb10 == sizeof(float)) {
  6279. for (int ir = 0; ir < nr; ++ir) {
  6280. // src0, src1 and dst are same shape => same indices
  6281. const int i3 = ir/(ne2*ne1);
  6282. const int i2 = (ir - i3*ne2*ne1)/ne1;
  6283. const int i1 = (ir - i3*ne2*ne1 - i2*ne1);
  6284. #ifdef GGML_USE_ACCELERATE
  6285. vDSP_vsub(
  6286. (float *) ((char *) src1->data + i3*nb13 + i2*nb12 + i1*nb11), 1,
  6287. (float *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01), 1,
  6288. (float *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 ), 1,
  6289. ne0);
  6290. #else
  6291. ggml_vec_sub_f32(ne0,
  6292. (float *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 ),
  6293. (float *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01),
  6294. (float *) ((char *) src1->data + i3*nb13 + i2*nb12 + i1*nb11));
  6295. #endif
  6296. // }
  6297. // }
  6298. }
  6299. } else {
  6300. // src1 is not contiguous
  6301. for (int ir = 0; ir < nr; ++ir) {
  6302. // src0, src1 and dst are same shape => same indices
  6303. const int i3 = ir/(ne2*ne1);
  6304. const int i2 = (ir - i3*ne2*ne1)/ne1;
  6305. const int i1 = (ir - i3*ne2*ne1 - i2*ne1);
  6306. float * dst_ptr = (float *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 );
  6307. float * src0_ptr = (float *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01);
  6308. for (int i0 = 0; i0 < ne0; i0++) {
  6309. float * src1_ptr = (float *) ((char *) src1->data + i3*nb13 + i2*nb12 + i1*nb11 + i0*nb10);
  6310. dst_ptr[i0] = src0_ptr[i0] - *src1_ptr;
  6311. }
  6312. }
  6313. }
  6314. }
  6315. static void ggml_compute_forward_sub(
  6316. const struct ggml_compute_params * params,
  6317. const struct ggml_tensor * src0,
  6318. const struct ggml_tensor * src1,
  6319. struct ggml_tensor * dst) {
  6320. switch (src0->type) {
  6321. case GGML_TYPE_F32:
  6322. {
  6323. ggml_compute_forward_sub_f32(params, src0, src1, dst);
  6324. } break;
  6325. default:
  6326. {
  6327. GGML_ASSERT(false);
  6328. } break;
  6329. }
  6330. }
  6331. // ggml_compute_forward_mul
  6332. static void ggml_compute_forward_mul_f32(
  6333. const struct ggml_compute_params * params,
  6334. const struct ggml_tensor * src0,
  6335. const struct ggml_tensor * src1,
  6336. struct ggml_tensor * dst) {
  6337. GGML_ASSERT(ggml_can_repeat(src1, src0) && ggml_are_same_shape(src0, dst));
  6338. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  6339. return;
  6340. }
  6341. const int ith = params->ith;
  6342. const int nth = params->nth;
  6343. #ifdef GGML_USE_CLBLAST
  6344. if (src1->backend == GGML_BACKEND_GPU) {
  6345. // TODO: OpenCL kernel support full broadcast
  6346. GGML_ASSERT(ggml_can_repeat_rows(src1, src0));
  6347. if (ith == 0) {
  6348. ggml_cl_mul(src0, src1, dst);
  6349. }
  6350. return;
  6351. }
  6352. #endif
  6353. const int64_t nr = ggml_nrows(src0);
  6354. GGML_TENSOR_BINARY_OP_LOCALS
  6355. GGML_ASSERT( nb0 == sizeof(float));
  6356. GGML_ASSERT(nb00 == sizeof(float));
  6357. if (nb10 == sizeof(float)) {
  6358. for (int64_t ir = ith; ir < nr; ir += nth) {
  6359. // src0 and dst are same shape => same indices
  6360. const int64_t i03 = ir/(ne02*ne01);
  6361. const int64_t i02 = (ir - i03*ne02*ne01)/ne01;
  6362. const int64_t i01 = (ir - i03*ne02*ne01 - i02*ne01);
  6363. const int64_t i13 = i03 % ne13;
  6364. const int64_t i12 = i02 % ne12;
  6365. const int64_t i11 = i01 % ne11;
  6366. const int64_t nr0 = ne00 / ne10;
  6367. float * dst_ptr = (float *) ((char *) dst->data + i03*nb3 + i02*nb2 + i01*nb1 );
  6368. float * src0_ptr = (float *) ((char *) src0->data + i03*nb03 + i02*nb02 + i01*nb01);
  6369. float * src1_ptr = (float *) ((char *) src1->data + i13*nb13 + i12*nb12 + i11*nb11);
  6370. for (int64_t r = 0 ; r < nr0; ++r) {
  6371. #ifdef GGML_USE_ACCELERATE
  6372. UNUSED(ggml_vec_mul_f32);
  6373. vDSP_vmul(src0_ptr + r*ne10, 1, src1_ptr, 1, dst_ptr + r*ne10, 1, ne10);
  6374. #else
  6375. ggml_vec_mul_f32(ne10, dst_ptr + r*ne10, src0_ptr + r*ne10, src1_ptr);
  6376. #endif
  6377. }
  6378. }
  6379. } else {
  6380. // src1 is not contiguous
  6381. for (int64_t ir = ith; ir < nr; ir += nth) {
  6382. // src0 and dst are same shape => same indices
  6383. // src1 is broadcastable across src0 and dst in i1, i2, i3
  6384. const int64_t i03 = ir/(ne02*ne01);
  6385. const int64_t i02 = (ir - i03*ne02*ne01)/ne01;
  6386. const int64_t i01 = (ir - i03*ne02*ne01 - i02*ne01);
  6387. const int64_t i13 = i03 % ne13;
  6388. const int64_t i12 = i02 % ne12;
  6389. const int64_t i11 = i01 % ne11;
  6390. float * dst_ptr = (float *) ((char *) dst->data + i03*nb3 + i02*nb2 + i01*nb1 );
  6391. float * src0_ptr = (float *) ((char *) src0->data + i03*nb03 + i02*nb02 + i01*nb01);
  6392. for (int64_t i0 = 0; i0 < ne00; ++i0) {
  6393. const int64_t i10 = i0 % ne10;
  6394. float * src1_ptr = (float *) ((char *) src1->data + i13*nb13 + i12*nb12 + i11*nb11 + i10*nb10);
  6395. dst_ptr[i0] = src0_ptr[i0] * (*src1_ptr);
  6396. }
  6397. }
  6398. }
  6399. }
  6400. static void ggml_compute_forward_mul(
  6401. const struct ggml_compute_params * params,
  6402. const struct ggml_tensor * src0,
  6403. const struct ggml_tensor * src1,
  6404. struct ggml_tensor * dst) {
  6405. GGML_ASSERT(src1->type == GGML_TYPE_F32 && "only f32 src1 supported for now");
  6406. switch (src0->type) {
  6407. case GGML_TYPE_F32:
  6408. {
  6409. ggml_compute_forward_mul_f32(params, src0, src1, dst);
  6410. } break;
  6411. default:
  6412. {
  6413. GGML_ASSERT(false);
  6414. } break;
  6415. }
  6416. }
  6417. // ggml_compute_forward_div
  6418. static void ggml_compute_forward_div_f32(
  6419. const struct ggml_compute_params * params,
  6420. const struct ggml_tensor * src0,
  6421. const struct ggml_tensor * src1,
  6422. struct ggml_tensor * dst) {
  6423. GGML_ASSERT(ggml_can_repeat(src1, src0) && ggml_are_same_shape(src0, dst));
  6424. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  6425. return;
  6426. }
  6427. const int ith = params->ith;
  6428. const int nth = params->nth;
  6429. const int64_t nr = ggml_nrows(src0);
  6430. GGML_TENSOR_BINARY_OP_LOCALS
  6431. GGML_ASSERT( nb0 == sizeof(float));
  6432. GGML_ASSERT(nb00 == sizeof(float));
  6433. if (nb10 == sizeof(float)) {
  6434. for (int64_t ir = ith; ir < nr; ir += nth) {
  6435. // src0 and dst are same shape => same indices
  6436. const int64_t i03 = ir/(ne02*ne01);
  6437. const int64_t i02 = (ir - i03*ne02*ne01)/ne01;
  6438. const int64_t i01 = (ir - i03*ne02*ne01 - i02*ne01);
  6439. const int64_t i13 = i03 % ne13;
  6440. const int64_t i12 = i02 % ne12;
  6441. const int64_t i11 = i01 % ne11;
  6442. const int64_t nr0 = ne00 / ne10;
  6443. float * dst_ptr = (float *) ((char *) dst->data + i03*nb3 + i02*nb2 + i01*nb1 );
  6444. float * src0_ptr = (float *) ((char *) src0->data + i03*nb03 + i02*nb02 + i01*nb01);
  6445. float * src1_ptr = (float *) ((char *) src1->data + i13*nb13 + i12*nb12 + i11*nb11);
  6446. for (int64_t r = 0; r < nr0; ++r) {
  6447. #ifdef GGML_USE_ACCELERATE
  6448. UNUSED(ggml_vec_div_f32);
  6449. vDSP_vdiv(src1_ptr, 1, src0_ptr + r*ne10, 1, dst_ptr + r*ne10, 1, ne10);
  6450. #else
  6451. ggml_vec_div_f32(ne10, dst_ptr + r*ne10, src0_ptr + r*ne10, src1_ptr);
  6452. #endif
  6453. }
  6454. }
  6455. } else {
  6456. // src1 is not contiguous
  6457. for (int64_t ir = ith; ir < nr; ir += nth) {
  6458. // src0 and dst are same shape => same indices
  6459. // src1 is broadcastable across src0 and dst in i1, i2, i3
  6460. const int64_t i03 = ir/(ne02*ne01);
  6461. const int64_t i02 = (ir - i03*ne02*ne01)/ne01;
  6462. const int64_t i01 = (ir - i03*ne02*ne01 - i02*ne01);
  6463. const int64_t i13 = i03 % ne13;
  6464. const int64_t i12 = i02 % ne12;
  6465. const int64_t i11 = i01 % ne11;
  6466. float * dst_ptr = (float *) ((char *) dst->data + i03*nb3 + i02*nb2 + i01*nb1 );
  6467. float * src0_ptr = (float *) ((char *) src0->data + i03*nb03 + i02*nb02 + i01*nb01);
  6468. for (int64_t i0 = 0; i0 < ne00; ++i0) {
  6469. const int64_t i10 = i0 % ne10;
  6470. float * src1_ptr = (float *) ((char *) src1->data + i13*nb13 + i12*nb12 + i11*nb11 + i10*nb10);
  6471. dst_ptr[i0] = src0_ptr[i0] / (*src1_ptr);
  6472. }
  6473. }
  6474. }
  6475. }
  6476. static void ggml_compute_forward_div(
  6477. const struct ggml_compute_params * params,
  6478. const struct ggml_tensor * src0,
  6479. const struct ggml_tensor * src1,
  6480. struct ggml_tensor * dst) {
  6481. switch (src0->type) {
  6482. case GGML_TYPE_F32:
  6483. {
  6484. ggml_compute_forward_div_f32(params, src0, src1, dst);
  6485. } break;
  6486. default:
  6487. {
  6488. GGML_ASSERT(false);
  6489. } break;
  6490. }
  6491. }
  6492. // ggml_compute_forward_sqr
  6493. static void ggml_compute_forward_sqr_f32(
  6494. const struct ggml_compute_params * params,
  6495. const struct ggml_tensor * src0,
  6496. struct ggml_tensor * dst) {
  6497. assert(params->ith == 0);
  6498. assert(ggml_are_same_shape(src0, dst));
  6499. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  6500. return;
  6501. }
  6502. const int n = ggml_nrows(src0);
  6503. const int nc = src0->ne[0];
  6504. assert( dst->nb[0] == sizeof(float));
  6505. assert(src0->nb[0] == sizeof(float));
  6506. for (int i = 0; i < n; i++) {
  6507. ggml_vec_sqr_f32(nc,
  6508. (float *) ((char *) dst->data + i*( dst->nb[1])),
  6509. (float *) ((char *) src0->data + i*(src0->nb[1])));
  6510. }
  6511. }
  6512. static void ggml_compute_forward_sqr(
  6513. const struct ggml_compute_params * params,
  6514. const struct ggml_tensor * src0,
  6515. struct ggml_tensor * dst) {
  6516. switch (src0->type) {
  6517. case GGML_TYPE_F32:
  6518. {
  6519. ggml_compute_forward_sqr_f32(params, src0, dst);
  6520. } break;
  6521. default:
  6522. {
  6523. GGML_ASSERT(false);
  6524. } break;
  6525. }
  6526. }
  6527. // ggml_compute_forward_sqrt
  6528. static void ggml_compute_forward_sqrt_f32(
  6529. const struct ggml_compute_params * params,
  6530. const struct ggml_tensor * src0,
  6531. struct ggml_tensor * dst) {
  6532. assert(params->ith == 0);
  6533. assert(ggml_are_same_shape(src0, dst));
  6534. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  6535. return;
  6536. }
  6537. const int n = ggml_nrows(src0);
  6538. const int nc = src0->ne[0];
  6539. assert( dst->nb[0] == sizeof(float));
  6540. assert(src0->nb[0] == sizeof(float));
  6541. for (int i = 0; i < n; i++) {
  6542. ggml_vec_sqrt_f32(nc,
  6543. (float *) ((char *) dst->data + i*( dst->nb[1])),
  6544. (float *) ((char *) src0->data + i*(src0->nb[1])));
  6545. }
  6546. }
  6547. static void ggml_compute_forward_sqrt(
  6548. const struct ggml_compute_params * params,
  6549. const struct ggml_tensor * src0,
  6550. struct ggml_tensor * dst) {
  6551. switch (src0->type) {
  6552. case GGML_TYPE_F32:
  6553. {
  6554. ggml_compute_forward_sqrt_f32(params, src0, dst);
  6555. } break;
  6556. default:
  6557. {
  6558. GGML_ASSERT(false);
  6559. } break;
  6560. }
  6561. }
  6562. // ggml_compute_forward_log
  6563. static void ggml_compute_forward_log_f32(
  6564. const struct ggml_compute_params * params,
  6565. const struct ggml_tensor * src0,
  6566. struct ggml_tensor * dst) {
  6567. GGML_ASSERT(params->ith == 0);
  6568. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  6569. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  6570. return;
  6571. }
  6572. const int n = ggml_nrows(src0);
  6573. const int nc = src0->ne[0];
  6574. GGML_ASSERT( dst->nb[0] == sizeof(float));
  6575. GGML_ASSERT(src0->nb[0] == sizeof(float));
  6576. for (int i = 0; i < n; i++) {
  6577. ggml_vec_log_f32(nc,
  6578. (float *) ((char *) dst->data + i*( dst->nb[1])),
  6579. (float *) ((char *) src0->data + i*(src0->nb[1])));
  6580. }
  6581. }
  6582. static void ggml_compute_forward_log(
  6583. const struct ggml_compute_params * params,
  6584. const struct ggml_tensor * src0,
  6585. struct ggml_tensor * dst) {
  6586. switch (src0->type) {
  6587. case GGML_TYPE_F32:
  6588. {
  6589. ggml_compute_forward_log_f32(params, src0, dst);
  6590. } break;
  6591. default:
  6592. {
  6593. GGML_ASSERT(false);
  6594. } break;
  6595. }
  6596. }
  6597. // ggml_compute_forward_sum
  6598. static void ggml_compute_forward_sum_f32(
  6599. const struct ggml_compute_params * params,
  6600. const struct ggml_tensor * src0,
  6601. struct ggml_tensor * dst) {
  6602. assert(params->ith == 0);
  6603. assert(ggml_is_scalar(dst));
  6604. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  6605. return;
  6606. }
  6607. assert(ggml_is_scalar(dst));
  6608. assert(src0->nb[0] == sizeof(float));
  6609. GGML_TENSOR_LOCALS(int64_t, ne0, src0, ne)
  6610. GGML_TENSOR_LOCALS(size_t, nb0, src0, nb)
  6611. ggml_float sum = 0;
  6612. ggml_float row_sum = 0;
  6613. for (int64_t i03 = 0; i03 < ne03; i03++) {
  6614. for (int64_t i02 = 0; i02 < ne02; i02++) {
  6615. for (int64_t i01 = 0; i01 < ne01; i01++) {
  6616. ggml_vec_sum_f32_ggf(ne00,
  6617. &row_sum,
  6618. (float *) ((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03));
  6619. sum += row_sum;
  6620. }
  6621. }
  6622. }
  6623. ((float *) dst->data)[0] = sum;
  6624. }
  6625. static void ggml_compute_forward_sum_f16(
  6626. const struct ggml_compute_params * params,
  6627. const struct ggml_tensor * src0,
  6628. struct ggml_tensor * dst) {
  6629. assert(params->ith == 0);
  6630. assert(ggml_is_scalar(dst));
  6631. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  6632. return;
  6633. }
  6634. assert(src0->nb[0] == sizeof(ggml_fp16_t));
  6635. GGML_TENSOR_LOCALS(int64_t, ne0, src0, ne)
  6636. GGML_TENSOR_LOCALS(size_t, nb0, src0, nb)
  6637. float sum = 0;
  6638. float row_sum = 0;
  6639. for (int64_t i03 = 0; i03 < ne03; i03++) {
  6640. for (int64_t i02 = 0; i02 < ne02; i02++) {
  6641. for (int64_t i01 = 0; i01 < ne01; i01++) {
  6642. ggml_vec_sum_f16_ggf(ne00,
  6643. &row_sum,
  6644. (ggml_fp16_t *) ((char *) src0->data + i01 * nb01 + i02 * nb02 + i03 * nb03));
  6645. sum += row_sum;
  6646. }
  6647. }
  6648. }
  6649. ((ggml_fp16_t *) dst->data)[0] = GGML_FP32_TO_FP16(sum);
  6650. }
  6651. static void ggml_compute_forward_sum(
  6652. const struct ggml_compute_params * params,
  6653. const struct ggml_tensor * src0,
  6654. struct ggml_tensor * dst) {
  6655. switch (src0->type) {
  6656. case GGML_TYPE_F32:
  6657. {
  6658. ggml_compute_forward_sum_f32(params, src0, dst);
  6659. } break;
  6660. case GGML_TYPE_F16:
  6661. {
  6662. ggml_compute_forward_sum_f16(params, src0, dst);
  6663. } break;
  6664. default:
  6665. {
  6666. GGML_ASSERT(false);
  6667. } break;
  6668. }
  6669. }
  6670. // ggml_compute_forward_sum_rows
  6671. static void ggml_compute_forward_sum_rows_f32(
  6672. const struct ggml_compute_params * params,
  6673. const struct ggml_tensor * src0,
  6674. struct ggml_tensor * dst) {
  6675. GGML_ASSERT(params->ith == 0);
  6676. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  6677. return;
  6678. }
  6679. GGML_ASSERT(src0->nb[0] == sizeof(float));
  6680. GGML_ASSERT(dst->nb[0] == sizeof(float));
  6681. GGML_TENSOR_UNARY_OP_LOCALS
  6682. GGML_ASSERT(ne0 == 1);
  6683. GGML_ASSERT(ne1 == ne01);
  6684. GGML_ASSERT(ne2 == ne02);
  6685. GGML_ASSERT(ne3 == ne03);
  6686. for (int64_t i3 = 0; i3 < ne03; i3++) {
  6687. for (int64_t i2 = 0; i2 < ne02; i2++) {
  6688. for (int64_t i1 = 0; i1 < ne01; i1++) {
  6689. float * src_row = (float *) ((char *) src0->data + i1*nb01 + i2*nb02 + i3*nb03);
  6690. float * dst_row = (float *) ((char *) dst->data + i1*nb1 + i2*nb2 + i3*nb3);
  6691. float row_sum = 0;
  6692. ggml_vec_sum_f32(ne00, &row_sum, src_row);
  6693. dst_row[0] = row_sum;
  6694. }
  6695. }
  6696. }
  6697. }
  6698. static void ggml_compute_forward_sum_rows(
  6699. const struct ggml_compute_params * params,
  6700. const struct ggml_tensor * src0,
  6701. struct ggml_tensor * dst) {
  6702. switch (src0->type) {
  6703. case GGML_TYPE_F32:
  6704. {
  6705. ggml_compute_forward_sum_rows_f32(params, src0, dst);
  6706. } break;
  6707. default:
  6708. {
  6709. GGML_ASSERT(false);
  6710. } break;
  6711. }
  6712. }
  6713. // ggml_compute_forward_mean
  6714. static void ggml_compute_forward_mean_f32(
  6715. const struct ggml_compute_params * params,
  6716. const struct ggml_tensor * src0,
  6717. struct ggml_tensor * dst) {
  6718. assert(params->ith == 0);
  6719. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  6720. return;
  6721. }
  6722. assert(src0->nb[0] == sizeof(float));
  6723. GGML_TENSOR_UNARY_OP_LOCALS
  6724. assert(ne0 == 1);
  6725. assert(ne1 == ne01);
  6726. assert(ne2 == ne02);
  6727. assert(ne3 == ne03);
  6728. UNUSED(ne0);
  6729. UNUSED(ne1);
  6730. UNUSED(ne2);
  6731. UNUSED(ne3);
  6732. for (int64_t i03 = 0; i03 < ne03; i03++) {
  6733. for (int64_t i02 = 0; i02 < ne02; i02++) {
  6734. for (int64_t i01 = 0; i01 < ne01; i01++) {
  6735. ggml_vec_sum_f32(ne00,
  6736. (float *) ((char *) dst->data + i01*nb1 + i02*nb2 + i03*nb3),
  6737. (float *) ((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03));
  6738. *(float *) ((char *) dst->data + i01*nb1 + i02*nb2 + i03*nb3) /= (float) ne00;
  6739. }
  6740. }
  6741. }
  6742. }
  6743. static void ggml_compute_forward_mean(
  6744. const struct ggml_compute_params * params,
  6745. const struct ggml_tensor * src0,
  6746. struct ggml_tensor * dst) {
  6747. switch (src0->type) {
  6748. case GGML_TYPE_F32:
  6749. {
  6750. ggml_compute_forward_mean_f32(params, src0, dst);
  6751. } break;
  6752. default:
  6753. {
  6754. GGML_ASSERT(false);
  6755. } break;
  6756. }
  6757. }
  6758. // ggml_compute_forward_argmax
  6759. static void ggml_compute_forward_argmax_f32(
  6760. const struct ggml_compute_params * params,
  6761. const struct ggml_tensor * src0,
  6762. struct ggml_tensor * dst) {
  6763. assert(params->ith == 0);
  6764. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  6765. return;
  6766. }
  6767. assert(src0->nb[0] == sizeof(float));
  6768. assert(dst->nb[0] == sizeof(float));
  6769. const int64_t ne00 = src0->ne[0];
  6770. const int64_t ne01 = src0->ne[1];
  6771. const size_t nb01 = src0->nb[1];
  6772. const size_t nb0 = dst->nb[0];
  6773. for (int64_t i1 = 0; i1 < ne01; i1++) {
  6774. float * src = (float *) ((char *) src0->data + i1*nb01);
  6775. int32_t * dst_ = (int32_t *) ((char *) dst->data + i1*nb0);
  6776. int v = 0;
  6777. ggml_vec_argmax_f32(ne00, &v, src);
  6778. dst_[0] = v;
  6779. }
  6780. }
  6781. static void ggml_compute_forward_argmax(
  6782. const struct ggml_compute_params * params,
  6783. const struct ggml_tensor * src0,
  6784. struct ggml_tensor * dst) {
  6785. switch (src0->type) {
  6786. case GGML_TYPE_F32:
  6787. {
  6788. ggml_compute_forward_argmax_f32(params, src0, dst);
  6789. } break;
  6790. default:
  6791. {
  6792. GGML_ASSERT(false);
  6793. } break;
  6794. }
  6795. }
  6796. // ggml_compute_forward_repeat
  6797. static void ggml_compute_forward_repeat_f32(
  6798. const struct ggml_compute_params * params,
  6799. const struct ggml_tensor * src0,
  6800. struct ggml_tensor * dst) {
  6801. GGML_ASSERT(params->ith == 0);
  6802. GGML_ASSERT(ggml_can_repeat(src0, dst));
  6803. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  6804. return;
  6805. }
  6806. GGML_TENSOR_UNARY_OP_LOCALS
  6807. // guaranteed to be an integer due to the check in ggml_can_repeat
  6808. const int nr0 = (int)(ne0/ne00);
  6809. const int nr1 = (int)(ne1/ne01);
  6810. const int nr2 = (int)(ne2/ne02);
  6811. const int nr3 = (int)(ne3/ne03);
  6812. // TODO: support for transposed / permuted tensors
  6813. GGML_ASSERT(nb0 == sizeof(float));
  6814. GGML_ASSERT(nb00 == sizeof(float));
  6815. // TODO: maybe this is not optimal?
  6816. for (int i3 = 0; i3 < nr3; i3++) {
  6817. for (int k3 = 0; k3 < ne03; k3++) {
  6818. for (int i2 = 0; i2 < nr2; i2++) {
  6819. for (int k2 = 0; k2 < ne02; k2++) {
  6820. for (int i1 = 0; i1 < nr1; i1++) {
  6821. for (int k1 = 0; k1 < ne01; k1++) {
  6822. for (int i0 = 0; i0 < nr0; i0++) {
  6823. ggml_vec_cpy_f32(ne00,
  6824. (float *) ((char *) dst->data + (i3*ne03 + k3)*nb3 + (i2*ne02 + k2)*nb2 + (i1*ne01 + k1)*nb1 + (i0*ne00)*nb0),
  6825. (float *) ((char *) src0->data + ( k3)*nb03 + ( k2)*nb02 + ( k1)*nb01));
  6826. }
  6827. }
  6828. }
  6829. }
  6830. }
  6831. }
  6832. }
  6833. }
  6834. static void ggml_compute_forward_repeat_f16(
  6835. const struct ggml_compute_params * params,
  6836. const struct ggml_tensor * src0,
  6837. struct ggml_tensor * dst) {
  6838. GGML_ASSERT(params->ith == 0);
  6839. GGML_ASSERT(ggml_can_repeat(src0, dst));
  6840. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  6841. return;
  6842. }
  6843. GGML_TENSOR_UNARY_OP_LOCALS
  6844. // guaranteed to be an integer due to the check in ggml_can_repeat
  6845. const int nr0 = (int)(ne0/ne00);
  6846. const int nr1 = (int)(ne1/ne01);
  6847. const int nr2 = (int)(ne2/ne02);
  6848. const int nr3 = (int)(ne3/ne03);
  6849. // TODO: support for transposed / permuted tensors
  6850. GGML_ASSERT(nb0 == sizeof(ggml_fp16_t));
  6851. GGML_ASSERT(nb00 == sizeof(ggml_fp16_t));
  6852. // TODO: maybe this is not optimal?
  6853. for (int i3 = 0; i3 < nr3; i3++) {
  6854. for (int k3 = 0; k3 < ne03; k3++) {
  6855. for (int i2 = 0; i2 < nr2; i2++) {
  6856. for (int k2 = 0; k2 < ne02; k2++) {
  6857. for (int i1 = 0; i1 < nr1; i1++) {
  6858. for (int k1 = 0; k1 < ne01; k1++) {
  6859. for (int i0 = 0; i0 < nr0; i0++) {
  6860. ggml_fp16_t * y = (ggml_fp16_t *) ((char *) dst->data + (i3*ne03 + k3)*nb3 + (i2*ne02 + k2)*nb2 + (i1*ne01 + k1)*nb1 + (i0*ne00)*nb0);
  6861. ggml_fp16_t * x = (ggml_fp16_t *) ((char *) src0->data + ( k3)*nb03 + ( k2)*nb02 + ( k1)*nb01);
  6862. // ggml_vec_cpy_f16(ne00, y, x)
  6863. for (int i = 0; i < ne00; ++i) {
  6864. y[i] = x[i];
  6865. }
  6866. }
  6867. }
  6868. }
  6869. }
  6870. }
  6871. }
  6872. }
  6873. }
  6874. static void ggml_compute_forward_repeat(
  6875. const struct ggml_compute_params * params,
  6876. const struct ggml_tensor * src0,
  6877. struct ggml_tensor * dst) {
  6878. switch (src0->type) {
  6879. case GGML_TYPE_F16:
  6880. {
  6881. ggml_compute_forward_repeat_f16(params, src0, dst);
  6882. } break;
  6883. case GGML_TYPE_F32:
  6884. {
  6885. ggml_compute_forward_repeat_f32(params, src0, dst);
  6886. } break;
  6887. default:
  6888. {
  6889. GGML_ASSERT(false);
  6890. } break;
  6891. }
  6892. }
  6893. // ggml_compute_forward_repeat_back
  6894. static void ggml_compute_forward_repeat_back_f32(
  6895. const struct ggml_compute_params * params,
  6896. const struct ggml_tensor * src0,
  6897. struct ggml_tensor * dst) {
  6898. GGML_ASSERT(params->ith == 0);
  6899. GGML_ASSERT(ggml_can_repeat(dst, src0));
  6900. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  6901. return;
  6902. }
  6903. GGML_TENSOR_UNARY_OP_LOCALS
  6904. // guaranteed to be an integer due to the check in ggml_can_repeat
  6905. const int nr0 = (int)(ne00/ne0);
  6906. const int nr1 = (int)(ne01/ne1);
  6907. const int nr2 = (int)(ne02/ne2);
  6908. const int nr3 = (int)(ne03/ne3);
  6909. // TODO: support for transposed / permuted tensors
  6910. GGML_ASSERT(nb0 == sizeof(float));
  6911. GGML_ASSERT(nb00 == sizeof(float));
  6912. if (ggml_is_contiguous(dst)) {
  6913. ggml_vec_set_f32(ne0*ne1*ne2*ne3, dst->data, 0);
  6914. } else {
  6915. for (int k3 = 0; k3 < ne3; k3++) {
  6916. for (int k2 = 0; k2 < ne2; k2++) {
  6917. for (int k1 = 0; k1 < ne1; k1++) {
  6918. ggml_vec_set_f32(ne0,
  6919. (float *) ((char *) dst->data + k1*nb1 + k2*nb2 + k3*nb3),
  6920. 0);
  6921. }
  6922. }
  6923. }
  6924. }
  6925. // TODO: maybe this is not optimal?
  6926. for (int i3 = 0; i3 < nr3; i3++) {
  6927. for (int k3 = 0; k3 < ne3; k3++) {
  6928. for (int i2 = 0; i2 < nr2; i2++) {
  6929. for (int k2 = 0; k2 < ne2; k2++) {
  6930. for (int i1 = 0; i1 < nr1; i1++) {
  6931. for (int k1 = 0; k1 < ne1; k1++) {
  6932. for (int i0 = 0; i0 < nr0; i0++) {
  6933. ggml_vec_acc_f32(ne0,
  6934. (float *) ((char *) dst->data + ( k3)*nb3 + ( k2)*nb2 + ( k1)*nb1),
  6935. (float *) ((char *) src0->data + (i3*ne3 + k3)*nb03 + (i2*ne2 + k2)*nb02 + (i1*ne1 + k1)*nb01 + (i0*ne0)*nb00));
  6936. }
  6937. }
  6938. }
  6939. }
  6940. }
  6941. }
  6942. }
  6943. }
  6944. static void ggml_compute_forward_repeat_back(
  6945. const struct ggml_compute_params * params,
  6946. const struct ggml_tensor * src0,
  6947. struct ggml_tensor * dst) {
  6948. switch (src0->type) {
  6949. case GGML_TYPE_F32:
  6950. {
  6951. ggml_compute_forward_repeat_back_f32(params, src0, dst);
  6952. } break;
  6953. default:
  6954. {
  6955. GGML_ASSERT(false);
  6956. } break;
  6957. }
  6958. }
  6959. // ggml_compute_forward_concat
  6960. static void ggml_compute_forward_concat_f32(
  6961. const struct ggml_compute_params * params,
  6962. const struct ggml_tensor * src0,
  6963. const struct ggml_tensor * src1,
  6964. struct ggml_tensor * dst) {
  6965. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  6966. return;
  6967. }
  6968. GGML_ASSERT(src0->nb[0] == sizeof(float));
  6969. const int ith = params->ith;
  6970. const int nth = params->nth;
  6971. GGML_TENSOR_BINARY_OP_LOCALS
  6972. // TODO: support for transposed / permuted tensors
  6973. GGML_ASSERT(nb0 == sizeof(float));
  6974. GGML_ASSERT(nb00 == sizeof(float));
  6975. GGML_ASSERT(nb10 == sizeof(float));
  6976. for (int i3 = 0; i3 < ne3; i3++) {
  6977. for (int i2 = ith; i2 < ne2; i2 += nth) {
  6978. if (i2 < ne02) { // src0
  6979. for (int i1 = 0; i1 < ne1; i1++) {
  6980. for (int i0 = 0; i0 < ne0; i0++) {
  6981. const float * x = (float *)((char *) src0->data + i0 * nb00 + i1 * nb01 + i2 * nb02 + i3 * nb03);
  6982. float * y = (float *)((char *)dst->data + i0 * nb0 + i1 * nb1 + i2 * nb2 + i3 * nb3);
  6983. *y = *x;
  6984. }
  6985. }
  6986. } // src1
  6987. else {
  6988. for (int i1 = 0; i1 < ne1; i1++) {
  6989. for (int i0 = 0; i0 < ne0; i0++) {
  6990. const float * x = (float *)((char *) src1->data + i0 * nb10 + i1 * nb11 + (i2 - ne02) * nb12 + i3 * nb13);
  6991. float * y = (float *)((char *)dst->data + i0 * nb0 + i1 * nb1 + i2 * nb2 + i3 * nb3);
  6992. *y = *x;
  6993. }
  6994. }
  6995. }
  6996. }
  6997. }
  6998. }
  6999. static void ggml_compute_forward_concat(
  7000. const struct ggml_compute_params* params,
  7001. const struct ggml_tensor* src0,
  7002. const struct ggml_tensor* src1,
  7003. struct ggml_tensor* dst) {
  7004. switch (src0->type) {
  7005. case GGML_TYPE_F32:
  7006. {
  7007. ggml_compute_forward_concat_f32(params, src0, src1, dst);
  7008. } break;
  7009. default:
  7010. {
  7011. GGML_ASSERT(false);
  7012. } break;
  7013. }
  7014. }
  7015. // ggml_compute_forward_abs
  7016. static void ggml_compute_forward_abs_f32(
  7017. const struct ggml_compute_params * params,
  7018. const struct ggml_tensor * src0,
  7019. struct ggml_tensor * dst) {
  7020. assert(params->ith == 0);
  7021. assert(ggml_are_same_shape(src0, dst));
  7022. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  7023. return;
  7024. }
  7025. const int n = ggml_nrows(src0);
  7026. const int nc = src0->ne[0];
  7027. assert(dst->nb[0] == sizeof(float));
  7028. assert(src0->nb[0] == sizeof(float));
  7029. for (int i = 0; i < n; i++) {
  7030. ggml_vec_abs_f32(nc,
  7031. (float *) ((char *) dst->data + i*( dst->nb[1])),
  7032. (float *) ((char *) src0->data + i*(src0->nb[1])));
  7033. }
  7034. }
  7035. static void ggml_compute_forward_abs(
  7036. const struct ggml_compute_params * params,
  7037. const struct ggml_tensor * src0,
  7038. struct ggml_tensor * dst) {
  7039. switch (src0->type) {
  7040. case GGML_TYPE_F32:
  7041. {
  7042. ggml_compute_forward_abs_f32(params, src0, dst);
  7043. } break;
  7044. default:
  7045. {
  7046. GGML_ASSERT(false);
  7047. } break;
  7048. }
  7049. }
  7050. // ggml_compute_forward_sgn
  7051. static void ggml_compute_forward_sgn_f32(
  7052. const struct ggml_compute_params * params,
  7053. const struct ggml_tensor * src0,
  7054. struct ggml_tensor * dst) {
  7055. assert(params->ith == 0);
  7056. assert(ggml_are_same_shape(src0, dst));
  7057. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  7058. return;
  7059. }
  7060. const int n = ggml_nrows(src0);
  7061. const int nc = src0->ne[0];
  7062. assert(dst->nb[0] == sizeof(float));
  7063. assert(src0->nb[0] == sizeof(float));
  7064. for (int i = 0; i < n; i++) {
  7065. ggml_vec_sgn_f32(nc,
  7066. (float *) ((char *) dst->data + i*( dst->nb[1])),
  7067. (float *) ((char *) src0->data + i*(src0->nb[1])));
  7068. }
  7069. }
  7070. static void ggml_compute_forward_sgn(
  7071. const struct ggml_compute_params * params,
  7072. const struct ggml_tensor * src0,
  7073. struct ggml_tensor * dst) {
  7074. switch (src0->type) {
  7075. case GGML_TYPE_F32:
  7076. {
  7077. ggml_compute_forward_sgn_f32(params, src0, dst);
  7078. } break;
  7079. default:
  7080. {
  7081. GGML_ASSERT(false);
  7082. } break;
  7083. }
  7084. }
  7085. // ggml_compute_forward_neg
  7086. static void ggml_compute_forward_neg_f32(
  7087. const struct ggml_compute_params * params,
  7088. const struct ggml_tensor * src0,
  7089. struct ggml_tensor * dst) {
  7090. assert(params->ith == 0);
  7091. assert(ggml_are_same_shape(src0, dst));
  7092. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  7093. return;
  7094. }
  7095. const int n = ggml_nrows(src0);
  7096. const int nc = src0->ne[0];
  7097. assert(dst->nb[0] == sizeof(float));
  7098. assert(src0->nb[0] == sizeof(float));
  7099. for (int i = 0; i < n; i++) {
  7100. ggml_vec_neg_f32(nc,
  7101. (float *) ((char *) dst->data + i*( dst->nb[1])),
  7102. (float *) ((char *) src0->data + i*(src0->nb[1])));
  7103. }
  7104. }
  7105. static void ggml_compute_forward_neg(
  7106. const struct ggml_compute_params * params,
  7107. const struct ggml_tensor * src0,
  7108. struct ggml_tensor * dst) {
  7109. switch (src0->type) {
  7110. case GGML_TYPE_F32:
  7111. {
  7112. ggml_compute_forward_neg_f32(params, src0, dst);
  7113. } break;
  7114. default:
  7115. {
  7116. GGML_ASSERT(false);
  7117. } break;
  7118. }
  7119. }
  7120. // ggml_compute_forward_step
  7121. static void ggml_compute_forward_step_f32(
  7122. const struct ggml_compute_params * params,
  7123. const struct ggml_tensor * src0,
  7124. struct ggml_tensor * dst) {
  7125. assert(params->ith == 0);
  7126. assert(ggml_are_same_shape(src0, dst));
  7127. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  7128. return;
  7129. }
  7130. const int n = ggml_nrows(src0);
  7131. const int nc = src0->ne[0];
  7132. assert(dst->nb[0] == sizeof(float));
  7133. assert(src0->nb[0] == sizeof(float));
  7134. for (int i = 0; i < n; i++) {
  7135. ggml_vec_step_f32(nc,
  7136. (float *) ((char *) dst->data + i*( dst->nb[1])),
  7137. (float *) ((char *) src0->data + i*(src0->nb[1])));
  7138. }
  7139. }
  7140. static void ggml_compute_forward_step(
  7141. const struct ggml_compute_params * params,
  7142. const struct ggml_tensor * src0,
  7143. struct ggml_tensor * dst) {
  7144. switch (src0->type) {
  7145. case GGML_TYPE_F32:
  7146. {
  7147. ggml_compute_forward_step_f32(params, src0, dst);
  7148. } break;
  7149. default:
  7150. {
  7151. GGML_ASSERT(false);
  7152. } break;
  7153. }
  7154. }
  7155. // ggml_compute_forward_tanh
  7156. static void ggml_compute_forward_tanh_f32(
  7157. const struct ggml_compute_params * params,
  7158. const struct ggml_tensor * src0,
  7159. struct ggml_tensor * dst) {
  7160. assert(params->ith == 0);
  7161. assert(ggml_are_same_shape(src0, dst));
  7162. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  7163. return;
  7164. }
  7165. const int n = ggml_nrows(src0);
  7166. const int nc = src0->ne[0];
  7167. assert(dst->nb[0] == sizeof(float));
  7168. assert(src0->nb[0] == sizeof(float));
  7169. for (int i = 0; i < n; i++) {
  7170. ggml_vec_tanh_f32(nc,
  7171. (float *) ((char *) dst->data + i*( dst->nb[1])),
  7172. (float *) ((char *) src0->data + i*(src0->nb[1])));
  7173. }
  7174. }
  7175. static void ggml_compute_forward_tanh(
  7176. const struct ggml_compute_params * params,
  7177. const struct ggml_tensor * src0,
  7178. struct ggml_tensor * dst) {
  7179. switch (src0->type) {
  7180. case GGML_TYPE_F32:
  7181. {
  7182. ggml_compute_forward_tanh_f32(params, src0, dst);
  7183. } break;
  7184. default:
  7185. {
  7186. GGML_ASSERT(false);
  7187. } break;
  7188. }
  7189. }
  7190. // ggml_compute_forward_elu
  7191. static void ggml_compute_forward_elu_f32(
  7192. const struct ggml_compute_params * params,
  7193. const struct ggml_tensor * src0,
  7194. struct ggml_tensor * dst) {
  7195. assert(params->ith == 0);
  7196. assert(ggml_are_same_shape(src0, dst));
  7197. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  7198. return;
  7199. }
  7200. const int n = ggml_nrows(src0);
  7201. const int nc = src0->ne[0];
  7202. assert(dst->nb[0] == sizeof(float));
  7203. assert(src0->nb[0] == sizeof(float));
  7204. for (int i = 0; i < n; i++) {
  7205. ggml_vec_elu_f32(nc,
  7206. (float *) ((char *) dst->data + i*( dst->nb[1])),
  7207. (float *) ((char *) src0->data + i*(src0->nb[1])));
  7208. }
  7209. }
  7210. static void ggml_compute_forward_elu(
  7211. const struct ggml_compute_params * params,
  7212. const struct ggml_tensor * src0,
  7213. struct ggml_tensor * dst) {
  7214. switch (src0->type) {
  7215. case GGML_TYPE_F32:
  7216. {
  7217. ggml_compute_forward_elu_f32(params, src0, dst);
  7218. } break;
  7219. default:
  7220. {
  7221. GGML_ASSERT(false);
  7222. } break;
  7223. }
  7224. }
  7225. // ggml_compute_forward_relu
  7226. static void ggml_compute_forward_relu_f32(
  7227. const struct ggml_compute_params * params,
  7228. const struct ggml_tensor * src0,
  7229. struct ggml_tensor * dst) {
  7230. assert(params->ith == 0);
  7231. assert(ggml_are_same_shape(src0, dst));
  7232. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  7233. return;
  7234. }
  7235. const int n = ggml_nrows(src0);
  7236. const int nc = src0->ne[0];
  7237. assert(dst->nb[0] == sizeof(float));
  7238. assert(src0->nb[0] == sizeof(float));
  7239. for (int i = 0; i < n; i++) {
  7240. ggml_vec_relu_f32(nc,
  7241. (float *) ((char *) dst->data + i*( dst->nb[1])),
  7242. (float *) ((char *) src0->data + i*(src0->nb[1])));
  7243. }
  7244. }
  7245. static void ggml_compute_forward_relu(
  7246. const struct ggml_compute_params * params,
  7247. const struct ggml_tensor * src0,
  7248. struct ggml_tensor * dst) {
  7249. switch (src0->type) {
  7250. case GGML_TYPE_F32:
  7251. {
  7252. ggml_compute_forward_relu_f32(params, src0, dst);
  7253. } break;
  7254. default:
  7255. {
  7256. GGML_ASSERT(false);
  7257. } break;
  7258. }
  7259. }
  7260. // ggml_compute_forward_gelu
  7261. static void ggml_compute_forward_gelu_f32(
  7262. const struct ggml_compute_params * params,
  7263. const struct ggml_tensor * src0,
  7264. struct ggml_tensor * dst) {
  7265. GGML_ASSERT(ggml_is_contiguous_except_dim_1(src0));
  7266. GGML_ASSERT(ggml_is_contiguous_except_dim_1(dst));
  7267. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  7268. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  7269. return;
  7270. }
  7271. const int ith = params->ith;
  7272. const int nth = params->nth;
  7273. const int nc = src0->ne[0];
  7274. const int nr = ggml_nrows(src0);
  7275. // rows per thread
  7276. const int dr = (nr + nth - 1)/nth;
  7277. // row range for this thread
  7278. const int ir0 = dr*ith;
  7279. const int ir1 = MIN(ir0 + dr, nr);
  7280. for (int i1 = ir0; i1 < ir1; i1++) {
  7281. ggml_vec_gelu_f32(nc,
  7282. (float *) ((char *) dst->data + i1*( dst->nb[1])),
  7283. (float *) ((char *) src0->data + i1*(src0->nb[1])));
  7284. #ifndef NDEBUG
  7285. for (int k = 0; k < nc; k++) {
  7286. const float x = ((float *) ((char *) dst->data + i1*( dst->nb[1])))[k];
  7287. UNUSED(x);
  7288. assert(!isnan(x));
  7289. assert(!isinf(x));
  7290. }
  7291. #endif
  7292. }
  7293. }
  7294. static void ggml_compute_forward_gelu(
  7295. const struct ggml_compute_params * params,
  7296. const struct ggml_tensor * src0,
  7297. struct ggml_tensor * dst) {
  7298. switch (src0->type) {
  7299. case GGML_TYPE_F32:
  7300. {
  7301. ggml_compute_forward_gelu_f32(params, src0, dst);
  7302. } break;
  7303. default:
  7304. {
  7305. GGML_ASSERT(false);
  7306. } break;
  7307. }
  7308. }
  7309. // ggml_compute_forward_gelu_quick
  7310. static void ggml_compute_forward_gelu_quick_f32(
  7311. const struct ggml_compute_params * params,
  7312. const struct ggml_tensor * src0,
  7313. struct ggml_tensor * dst) {
  7314. GGML_ASSERT(ggml_is_contiguous_except_dim_1(src0));
  7315. GGML_ASSERT(ggml_is_contiguous_except_dim_1(dst));
  7316. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  7317. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  7318. return;
  7319. }
  7320. const int ith = params->ith;
  7321. const int nth = params->nth;
  7322. const int nc = src0->ne[0];
  7323. const int nr = ggml_nrows(src0);
  7324. // rows per thread
  7325. const int dr = (nr + nth - 1)/nth;
  7326. // row range for this thread
  7327. const int ir0 = dr*ith;
  7328. const int ir1 = MIN(ir0 + dr, nr);
  7329. for (int i1 = ir0; i1 < ir1; i1++) {
  7330. ggml_vec_gelu_quick_f32(nc,
  7331. (float *) ((char *) dst->data + i1*( dst->nb[1])),
  7332. (float *) ((char *) src0->data + i1*(src0->nb[1])));
  7333. #ifndef NDEBUG
  7334. for (int k = 0; k < nc; k++) {
  7335. const float x = ((float *) ((char *) dst->data + i1*( dst->nb[1])))[k];
  7336. UNUSED(x);
  7337. assert(!isnan(x));
  7338. assert(!isinf(x));
  7339. }
  7340. #endif
  7341. }
  7342. }
  7343. static void ggml_compute_forward_gelu_quick(
  7344. const struct ggml_compute_params * params,
  7345. const struct ggml_tensor * src0,
  7346. struct ggml_tensor * dst) {
  7347. switch (src0->type) {
  7348. case GGML_TYPE_F32:
  7349. {
  7350. ggml_compute_forward_gelu_quick_f32(params, src0, dst);
  7351. } break;
  7352. default:
  7353. {
  7354. GGML_ASSERT(false);
  7355. } break;
  7356. }
  7357. }
  7358. // ggml_compute_forward_silu
  7359. static void ggml_compute_forward_silu_f32(
  7360. const struct ggml_compute_params * params,
  7361. const struct ggml_tensor * src0,
  7362. struct ggml_tensor * dst) {
  7363. GGML_ASSERT(ggml_is_contiguous_except_dim_1(src0));
  7364. GGML_ASSERT(ggml_is_contiguous_except_dim_1(dst));
  7365. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  7366. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  7367. return;
  7368. }
  7369. const int ith = params->ith;
  7370. const int nth = params->nth;
  7371. const int nc = src0->ne[0];
  7372. const int nr = ggml_nrows(src0);
  7373. // rows per thread
  7374. const int dr = (nr + nth - 1)/nth;
  7375. // row range for this thread
  7376. const int ir0 = dr*ith;
  7377. const int ir1 = MIN(ir0 + dr, nr);
  7378. for (int i1 = ir0; i1 < ir1; i1++) {
  7379. ggml_vec_silu_f32(nc,
  7380. (float *) ((char *) dst->data + i1*( dst->nb[1])),
  7381. (float *) ((char *) src0->data + i1*(src0->nb[1])));
  7382. #ifndef NDEBUG
  7383. for (int k = 0; k < nc; k++) {
  7384. const float x = ((float *) ((char *) dst->data + i1*(dst->nb[1])))[k];
  7385. UNUSED(x);
  7386. assert(!isnan(x));
  7387. assert(!isinf(x));
  7388. }
  7389. #endif
  7390. }
  7391. }
  7392. static void ggml_compute_forward_silu(
  7393. const struct ggml_compute_params * params,
  7394. const struct ggml_tensor * src0,
  7395. struct ggml_tensor * dst) {
  7396. switch (src0->type) {
  7397. case GGML_TYPE_F32:
  7398. {
  7399. ggml_compute_forward_silu_f32(params, src0, dst);
  7400. } break;
  7401. default:
  7402. {
  7403. GGML_ASSERT(false);
  7404. } break;
  7405. }
  7406. }
  7407. // ggml_compute_forward_leaky_relu
  7408. static void ggml_compute_forward_leaky_relu_f32(
  7409. const struct ggml_compute_params * params,
  7410. const struct ggml_tensor * src0,
  7411. struct ggml_tensor * dst) {
  7412. assert(params->ith == 0);
  7413. assert(ggml_are_same_shape(src0, dst));
  7414. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  7415. return;
  7416. }
  7417. const int n = ggml_nrows(src0);
  7418. const int nc = src0->ne[0];
  7419. float negative_slope;
  7420. memcpy(&negative_slope, dst->op_params, sizeof(float));
  7421. assert(dst->nb[0] == sizeof(float));
  7422. assert(src0->nb[0] == sizeof(float));
  7423. for (int i = 0; i < n; i++) {
  7424. ggml_vec_leaky_relu_f32(nc,
  7425. (float *) ((char *) dst->data + i*( dst->nb[1])),
  7426. (float *) ((char *) src0->data + i*(src0->nb[1])), negative_slope);
  7427. }
  7428. }
  7429. static void ggml_compute_forward_leaky_relu(
  7430. const struct ggml_compute_params * params,
  7431. const struct ggml_tensor * src0,
  7432. struct ggml_tensor * dst) {
  7433. switch (src0->type) {
  7434. case GGML_TYPE_F32:
  7435. {
  7436. ggml_compute_forward_leaky_relu_f32(params, src0, dst);
  7437. } break;
  7438. default:
  7439. {
  7440. GGML_ASSERT(false);
  7441. } break;
  7442. }
  7443. }
  7444. // ggml_compute_forward_silu_back
  7445. static void ggml_compute_forward_silu_back_f32(
  7446. const struct ggml_compute_params * params,
  7447. const struct ggml_tensor * src0,
  7448. const struct ggml_tensor * grad,
  7449. struct ggml_tensor * dst) {
  7450. GGML_ASSERT(ggml_is_contiguous_except_dim_1(grad));
  7451. GGML_ASSERT(ggml_is_contiguous_except_dim_1(src0));
  7452. GGML_ASSERT(ggml_is_contiguous_except_dim_1(dst));
  7453. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  7454. GGML_ASSERT(ggml_are_same_shape(src0, grad));
  7455. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  7456. return;
  7457. }
  7458. const int ith = params->ith;
  7459. const int nth = params->nth;
  7460. const int nc = src0->ne[0];
  7461. const int nr = ggml_nrows(src0);
  7462. // rows per thread
  7463. const int dr = (nr + nth - 1)/nth;
  7464. // row range for this thread
  7465. const int ir0 = dr*ith;
  7466. const int ir1 = MIN(ir0 + dr, nr);
  7467. for (int i1 = ir0; i1 < ir1; i1++) {
  7468. ggml_vec_silu_backward_f32(nc,
  7469. (float *) ((char *) dst->data + i1*( dst->nb[1])),
  7470. (float *) ((char *) src0->data + i1*(src0->nb[1])),
  7471. (float *) ((char *) grad->data + i1*(grad->nb[1])));
  7472. #ifndef NDEBUG
  7473. for (int k = 0; k < nc; k++) {
  7474. const float x = ((float *) ((char *) dst->data + i1*( dst->nb[1])))[k];
  7475. UNUSED(x);
  7476. assert(!isnan(x));
  7477. assert(!isinf(x));
  7478. }
  7479. #endif
  7480. }
  7481. }
  7482. static void ggml_compute_forward_silu_back(
  7483. const struct ggml_compute_params * params,
  7484. const struct ggml_tensor * src0,
  7485. const struct ggml_tensor * grad,
  7486. struct ggml_tensor * dst) {
  7487. switch (src0->type) {
  7488. case GGML_TYPE_F32:
  7489. {
  7490. ggml_compute_forward_silu_back_f32(params, src0, grad, dst);
  7491. } break;
  7492. default:
  7493. {
  7494. GGML_ASSERT(false);
  7495. } break;
  7496. }
  7497. }
  7498. // ggml_compute_forward_norm
  7499. static void ggml_compute_forward_norm_f32(
  7500. const struct ggml_compute_params * params,
  7501. const struct ggml_tensor * src0,
  7502. struct ggml_tensor * dst) {
  7503. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  7504. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  7505. return;
  7506. }
  7507. GGML_ASSERT(src0->nb[0] == sizeof(float));
  7508. const int ith = params->ith;
  7509. const int nth = params->nth;
  7510. GGML_TENSOR_UNARY_OP_LOCALS
  7511. float eps;
  7512. memcpy(&eps, dst->op_params, sizeof(float));
  7513. // TODO: optimize
  7514. for (int64_t i03 = 0; i03 < ne03; i03++) {
  7515. for (int64_t i02 = 0; i02 < ne02; i02++) {
  7516. for (int64_t i01 = ith; i01 < ne01; i01 += nth) {
  7517. const float * x = (float *) ((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03);
  7518. ggml_float sum = 0.0;
  7519. for (int64_t i00 = 0; i00 < ne00; i00++) {
  7520. sum += (ggml_float)x[i00];
  7521. }
  7522. float mean = sum/ne00;
  7523. float * y = (float *) ((char *) dst->data + i01*nb1 + i02*nb2 + i03*nb3);
  7524. ggml_float sum2 = 0.0;
  7525. for (int64_t i00 = 0; i00 < ne00; i00++) {
  7526. float v = x[i00] - mean;
  7527. y[i00] = v;
  7528. sum2 += (ggml_float)(v*v);
  7529. }
  7530. float variance = sum2/ne00;
  7531. const float scale = 1.0f/sqrtf(variance + eps);
  7532. ggml_vec_scale_f32(ne00, y, scale);
  7533. }
  7534. }
  7535. }
  7536. }
  7537. static void ggml_compute_forward_norm(
  7538. const struct ggml_compute_params * params,
  7539. const struct ggml_tensor * src0,
  7540. struct ggml_tensor * dst) {
  7541. switch (src0->type) {
  7542. case GGML_TYPE_F32:
  7543. {
  7544. ggml_compute_forward_norm_f32(params, src0, dst);
  7545. } break;
  7546. default:
  7547. {
  7548. GGML_ASSERT(false);
  7549. } break;
  7550. }
  7551. }
  7552. // ggml_compute_forward_group_rms_norm
  7553. static void ggml_compute_forward_rms_norm_f32(
  7554. const struct ggml_compute_params * params,
  7555. const struct ggml_tensor * src0,
  7556. struct ggml_tensor * dst) {
  7557. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  7558. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  7559. return;
  7560. }
  7561. GGML_ASSERT(src0->nb[0] == sizeof(float));
  7562. const int ith = params->ith;
  7563. const int nth = params->nth;
  7564. GGML_TENSOR_UNARY_OP_LOCALS
  7565. float eps;
  7566. memcpy(&eps, dst->op_params, sizeof(float));
  7567. // TODO: optimize
  7568. for (int64_t i03 = 0; i03 < ne03; i03++) {
  7569. for (int64_t i02 = 0; i02 < ne02; i02++) {
  7570. for (int64_t i01 = ith; i01 < ne01; i01 += nth) {
  7571. const float * x = (float *) ((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03);
  7572. ggml_float sum = 0.0;
  7573. for (int64_t i00 = 0; i00 < ne00; i00++) {
  7574. sum += (ggml_float)(x[i00] * x[i00]);
  7575. }
  7576. const float mean = sum/ne00;
  7577. float * y = (float *) ((char *) dst->data + i01*nb1 + i02*nb2 + i03*nb3);
  7578. memcpy(y, x, ne00 * sizeof(float));
  7579. // for (int i00 = 0; i00 < ne00; i00++) {
  7580. // y[i00] = x[i00];
  7581. // }
  7582. const float scale = 1.0f/sqrtf(mean + eps);
  7583. ggml_vec_scale_f32(ne00, y, scale);
  7584. }
  7585. }
  7586. }
  7587. }
  7588. static void ggml_compute_forward_rms_norm(
  7589. const struct ggml_compute_params * params,
  7590. const struct ggml_tensor * src0,
  7591. struct ggml_tensor * dst) {
  7592. switch (src0->type) {
  7593. case GGML_TYPE_F32:
  7594. {
  7595. ggml_compute_forward_rms_norm_f32(params, src0, dst);
  7596. } break;
  7597. default:
  7598. {
  7599. GGML_ASSERT(false);
  7600. } break;
  7601. }
  7602. }
  7603. static void ggml_compute_forward_rms_norm_back_f32(
  7604. const struct ggml_compute_params * params,
  7605. const struct ggml_tensor * src0,
  7606. const struct ggml_tensor * src1,
  7607. struct ggml_tensor * dst) {
  7608. GGML_ASSERT(ggml_are_same_shape(src0, dst) && ggml_are_same_shape(src0, src1));
  7609. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  7610. return;
  7611. }
  7612. GGML_ASSERT(src0->nb[0] == sizeof(float));
  7613. const int ith = params->ith;
  7614. const int nth = params->nth;
  7615. GGML_TENSOR_BINARY_OP_LOCALS
  7616. float eps;
  7617. memcpy(&eps, dst->op_params, sizeof(float));
  7618. // TODO: optimize
  7619. for (int64_t i03 = 0; i03 < ne03; i03++) {
  7620. for (int64_t i02 = 0; i02 < ne02; i02++) {
  7621. for (int64_t i01 = ith; i01 < ne01; i01 += nth) {
  7622. // src1 is same shape as src0 => same indices
  7623. const int64_t i11 = i01;
  7624. const int64_t i12 = i02;
  7625. const int64_t i13 = i03;
  7626. const float * x = (float *) ((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03);
  7627. const float * dz = (float *) ((char *) src1->data + i11*nb11 + i12*nb12 + i13*nb13);
  7628. ggml_float sum_xx = 0.0;
  7629. ggml_float sum_xdz = 0.0;
  7630. for (int64_t i00 = 0; i00 < ne00; i00++) {
  7631. sum_xx += (ggml_float)(x[i00] * x[i00]);
  7632. sum_xdz += (ggml_float)(x[i00] * dz[i00]);
  7633. }
  7634. //const float mean = (float)(sum_xx)/ne00;
  7635. const float mean_eps = (float)(sum_xx)/ne00 + eps;
  7636. const float sum_eps = (float)(sum_xx) + eps*ne00;
  7637. //const float mean_xdz = (float)(sum_xdz)/ne00;
  7638. // we could cache rms from forward pass to improve performance.
  7639. // to do this implement ggml_rms and compose ggml_rms_norm using ggml_rms.
  7640. //const float rms = sqrtf(mean_eps);
  7641. const float rrms = 1.0f / sqrtf(mean_eps);
  7642. //const float scale = -rrms/(ne00 * mean_eps); // -1/(n*rms**3)
  7643. {
  7644. // z = rms_norm(x)
  7645. //
  7646. // rms_norm(src0) =
  7647. // scale(
  7648. // src0,
  7649. // div(
  7650. // 1,
  7651. // sqrt(
  7652. // add(
  7653. // scale(
  7654. // sum(
  7655. // sqr(
  7656. // src0)),
  7657. // (1.0/N)),
  7658. // eps))));
  7659. // postorder:
  7660. // ## op args grad
  7661. // 00 param src0 grad[#00]
  7662. // 01 const 1
  7663. // 02 sqr (#00) grad[#02]
  7664. // 03 sum (#02) grad[#03]
  7665. // 04 const 1/N
  7666. // 05 scale (#03, #04) grad[#05]
  7667. // 06 const eps
  7668. // 07 add (#05, #06) grad[#07]
  7669. // 08 sqrt (#07) grad[#08]
  7670. // 09 div (#01,#08) grad[#09]
  7671. // 10 scale (#00,#09) grad[#10]
  7672. //
  7673. // backward pass, given grad[#10]
  7674. // #10: scale
  7675. // grad[#00] += scale(grad[#10],#09)
  7676. // grad[#09] += sum(mul(grad[#10],#00))
  7677. // #09: div
  7678. // grad[#08] += neg(mul(grad[#09], div(#09,#08)))
  7679. // #08: sqrt
  7680. // grad[#07] += mul(grad[#08], div(0.5, #08))
  7681. // #07: add
  7682. // grad[#05] += grad[#07]
  7683. // #05: scale
  7684. // grad[#03] += scale(grad[#05],#04)
  7685. // #03: sum
  7686. // grad[#02] += repeat(grad[#03], #02)
  7687. // #02:
  7688. // grad[#00] += scale(mul(#00, grad[#02]), 2.0)
  7689. //
  7690. // substitute and simplify:
  7691. // grad[#00] = scale(grad(#10), #09) + scale(mul(#00, grad[#02]), 2.0)
  7692. // grad[#02] = repeat(grad[#03], #02)
  7693. // grad[#02] = repeat(scale(grad[#05],#04), #02)
  7694. // grad[#02] = repeat(scale(grad[#07],#04), #02)
  7695. // grad[#02] = repeat(scale(mul(grad[#08], div(0.5, #08)),#04), #02)
  7696. // grad[#02] = repeat(scale(mul(neg(mul(grad[#09], div(#09,#08))), div(0.5, #08)),#04), #02)
  7697. // grad[#02] = repeat(scale(mul(neg(mul(sum(mul(grad[#10],#00)), div(#09,#08))), div(0.5, #08)),#04), #02)
  7698. // grad[#02] = repeat(-(sum(mul(grad[#10],#00)) * div(#09,#08) * div(0.5, #08) * (1/N)), #02)
  7699. // grad[#02] = repeat(-(sum(mul(grad[#10],#00)) * div(div(#01,#08),#08) * div(0.5, #08) * (1/N)), #02)
  7700. // grad[#02] = repeat(-(sum(mul(grad[#10],#00)) * div(1,#08*#08) * div(0.5, #08) * (1/N)), #02)
  7701. // grad[#02] = repeat(-(sum(mul(grad[#10],#00)) * div(1,#07) * div(0.5, #08) * (1/N)), #02)
  7702. // grad[#00] = scale(grad(#10), #09) + scale(mul(#00, grad[#02]), 2.0)
  7703. // grad[#00] = scale(grad(#10), #09) + scale(mul(#00, repeat(-(sum(mul(grad[#10],#00)) * div(1,#07) * div(0.5, #08) * (1/N)), #02)), 2.0)
  7704. // grad[#00] = scale(grad(#10), #09) + scale(scale(#00, -(sum(mul(grad[#10],#00)) * div(1,#07) * div(0.5, #08) * (1/N))), 2.0)
  7705. // grad[#00] = scale(grad(#10), #09) + scale(#00, -(sum(mul(grad[#10],#00)) * div(1,#07) * div(1,#08) * (1/N)))
  7706. // grad[#00] = scale(grad(#10), #09) + scale(#00, sum(mul(grad[#10],#00)) * div(1,#07*#08) * (-1/N))
  7707. // grad[#00] = scale(grad(#10), #09) + scale(#00, sum(mul(grad[#10],#00)) * div(1,#07*#08) * (-1/N))
  7708. // grad[#00] = scale(grad(#10), #09) + scale(#00, sum(mul(grad[#10],#00)) * div(1,mean_eps*rms) * (-1/N))
  7709. // grad[#00] = scale(grad(#10), #09) + scale(#00, sum(mul(grad[#10],#00)) * div(-1,rms*N*mean_eps))
  7710. // grad[#00] = scale(grad(#10), #09) + scale(#00, sum(mul(grad[#10],#00)) * div(-1,rms*N*(sum_xx/N+eps)))
  7711. // grad[#00] = scale(grad(#10), #09) + scale(#00, sum(mul(grad[#10],#00)) * div(-1,rms*N*sum_xx+rms*N*eps))
  7712. // grad[#00] = scale(dz, rrms) + scale(x, sum(mul(dz,x)) * div(-1,rms*N*mean_eps))
  7713. // grad[#00] = scale(dz, rrms) + scale(x, sum_xdz * div(-1,rms*N*mean_eps))
  7714. // a = b*c + d*e
  7715. // a = b*c*f/f + d*e*f/f
  7716. // a = (b*c*f + d*e*f)*(1/f)
  7717. // a = (b*c*(1/c) + d*e*(1/c))*(1/(1/c))
  7718. // a = (b + d*e/c)*c
  7719. // b = dz, c = rrms, d = x, e = sum_xdz * div(-1,rms*N*mean_eps)
  7720. // a = (dz + x*sum_xdz * div(-1,rms*N*mean_eps)/rrms)*rrms
  7721. // a = (dz + x*sum_xdz * div(-1,rms*N*mean_eps)*rms)*rrms
  7722. // a = (dz + x*sum_xdz * div(-rms,rms*N*mean_eps))*rrms
  7723. // a = (dz + x*sum_xdz * div(-1,N*mean_eps))*rrms
  7724. // a = (dz + x*div(-sum_xdz,N*mean_eps))*rrms
  7725. // a = (dz + x*div(-mean_xdz,mean_eps))*rrms
  7726. // grad[#00] = scale(dz + scale(x, div(-mean_xdz,mean_eps)),rrms)
  7727. // grad[#00] = scale(dz + scale(x, -mean_xdz/mean_eps),rrms)
  7728. // dx = scale(dz + scale(x, -mean_xdz/mean_eps),rrms)
  7729. }
  7730. // dx = scale(dz + scale(x, -mean_xdz/mean_eps),rrms)
  7731. // post-order:
  7732. // dx := x
  7733. // dx := scale(dx,-mean_xdz/mean_eps)
  7734. // dx := add(dx, dz)
  7735. // dx := scale(dx, rrms)
  7736. float * dx = (float *) ((char *) dst->data + i01*nb1 + i02*nb2 + i03*nb3);
  7737. ggml_vec_cpy_f32 (ne00, dx, x);
  7738. // ggml_vec_scale_f32(ne00, dx, -mean_xdz/mean_eps);
  7739. ggml_vec_scale_f32(ne00, dx, (float)(-sum_xdz)/sum_eps);
  7740. ggml_vec_acc_f32 (ne00, dx, dz);
  7741. ggml_vec_scale_f32(ne00, dx, rrms);
  7742. }
  7743. }
  7744. }
  7745. }
  7746. static void ggml_compute_forward_rms_norm_back(
  7747. const struct ggml_compute_params * params,
  7748. const struct ggml_tensor * src0,
  7749. const struct ggml_tensor * src1,
  7750. struct ggml_tensor * dst) {
  7751. switch (src0->type) {
  7752. case GGML_TYPE_F32:
  7753. {
  7754. ggml_compute_forward_rms_norm_back_f32(params, src0, src1, dst);
  7755. } break;
  7756. default:
  7757. {
  7758. GGML_ASSERT(false);
  7759. } break;
  7760. }
  7761. }
  7762. // ggml_compute_forward_group_norm
  7763. static void ggml_compute_forward_group_norm_f32(
  7764. const struct ggml_compute_params * params,
  7765. const struct ggml_tensor * src0,
  7766. struct ggml_tensor * dst) {
  7767. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  7768. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  7769. return;
  7770. }
  7771. GGML_ASSERT(src0->nb[0] == sizeof(float));
  7772. const int ith = params->ith;
  7773. const int nth = params->nth;
  7774. GGML_TENSOR_UNARY_OP_LOCALS
  7775. const float eps = 1e-6f; // TODO: make this a parameter
  7776. // TODO: optimize
  7777. int n_channels = src0->ne[2];
  7778. int n_groups = dst->op_params[0];
  7779. int n_channels_per_group = (n_channels + n_groups - 1) / n_groups;
  7780. for (int i = ith; i < n_groups; i+=nth) {
  7781. int start = i * n_channels_per_group;
  7782. int end = start + n_channels_per_group;
  7783. if (end > n_channels) {
  7784. end = n_channels;
  7785. }
  7786. int step = end - start;
  7787. for (int64_t i03 = 0; i03 < ne03; i03++) {
  7788. ggml_float sum = 0.0;
  7789. for (int64_t i02 = start; i02 < end; i02++) {
  7790. for (int64_t i01 = 0; i01 < ne01; i01++) {
  7791. const float * x = (float *)((char *) src0->data + i01 * nb01 + i02 * nb02 + i03 * nb03);
  7792. for (int64_t i00 = 0; i00 < ne00; i00++) {
  7793. sum += (ggml_float)x[i00];
  7794. }
  7795. }
  7796. }
  7797. float mean = sum / (ne00 * ne01 * step);
  7798. ggml_float sum2 = 0.0;
  7799. for (int64_t i02 = start; i02 < end; i02++) {
  7800. for (int64_t i01 = 0; i01 < ne01; i01++) {
  7801. const float * x = (float *)((char *) src0->data + i01 * nb01 + i02 * nb02 + i03 * nb03);
  7802. float * y = (float *)((char *) dst->data + i01 * nb1 + i02 * nb2 + i03 * nb3);
  7803. for (int64_t i00 = 0; i00 < ne00; i00++) {
  7804. float v = x[i00] - mean;
  7805. y[i00] = v;
  7806. sum2 += (ggml_float)(v * v);
  7807. }
  7808. }
  7809. }
  7810. float variance = sum2 / (ne00 * ne01 * step);
  7811. const float scale = 1.0f / sqrtf(variance + eps);
  7812. for (int64_t i02 = start; i02 < end; i02++) {
  7813. for (int64_t i01 = 0; i01 < ne01; i01++) {
  7814. float * y = (float *)((char *) dst->data + i01 * nb1 + i02 * nb2 + i03 * nb3);
  7815. ggml_vec_scale_f32(ne00, y, scale);
  7816. }
  7817. }
  7818. }
  7819. }
  7820. }
  7821. static void ggml_compute_forward_group_norm(
  7822. const struct ggml_compute_params * params,
  7823. const struct ggml_tensor * src0,
  7824. struct ggml_tensor * dst) {
  7825. switch (src0->type) {
  7826. case GGML_TYPE_F32:
  7827. {
  7828. ggml_compute_forward_group_norm_f32(params, src0, dst);
  7829. } break;
  7830. default:
  7831. {
  7832. GGML_ASSERT(false);
  7833. } break;
  7834. }
  7835. }
  7836. // ggml_compute_forward_mul_mat
  7837. #if defined(GGML_USE_ACCELERATE) || defined(GGML_USE_OPENBLAS)
  7838. // helper function to determine if it is better to use BLAS or not
  7839. // for large matrices, BLAS is faster
  7840. static bool ggml_compute_forward_mul_mat_use_blas(
  7841. const struct ggml_tensor * src0,
  7842. const struct ggml_tensor * src1,
  7843. struct ggml_tensor * dst) {
  7844. //const int64_t ne00 = src0->ne[0];
  7845. //const int64_t ne01 = src0->ne[1];
  7846. const int64_t ne10 = src1->ne[0];
  7847. const int64_t ne0 = dst->ne[0];
  7848. const int64_t ne1 = dst->ne[1];
  7849. // NOTE: with GGML_OP_MUL_MAT_ID we don't want to go through the BLAS branch because it will dequantize (to_float)
  7850. // all the experts for each batch element and the processing would become incredibly slow
  7851. // TODO: find the optimal values for these
  7852. if (dst->op != GGML_OP_MUL_MAT_ID &&
  7853. ggml_is_contiguous(src0) &&
  7854. ggml_is_contiguous(src1) &&
  7855. //src0->type == GGML_TYPE_F32 &&
  7856. src1->type == GGML_TYPE_F32 &&
  7857. (ne0 >= 32 && ne1 >= 32 && ne10 >= 32)) {
  7858. /*printf("BLAS: %d %d %d %d %d\n", ne0, ne1, ne10, ne00, ne01);*/
  7859. return true;
  7860. }
  7861. return false;
  7862. }
  7863. #endif
  7864. // off1 = offset in i11 and i1
  7865. // cne1 = ne11 and ne1
  7866. // in a normal matrix multiplication, off1 = 0 and cne1 = ne1
  7867. // during GGML_TASK_INIT, the full src1 is converted regardless of off1 and cne1
  7868. static void ggml_compute_forward_mul_mat(
  7869. const struct ggml_compute_params * params,
  7870. const struct ggml_tensor * src0,
  7871. const struct ggml_tensor * src1,
  7872. struct ggml_tensor * dst,
  7873. int64_t off1, int64_t cne1) {
  7874. int64_t t0 = ggml_perf_time_us();
  7875. UNUSED(t0);
  7876. GGML_TENSOR_BINARY_OP_LOCALS
  7877. const int ith = params->ith;
  7878. const int nth = params->nth;
  7879. const enum ggml_type type = src0->type;
  7880. const bool src1_cont = ggml_is_contiguous(src1);
  7881. ggml_vec_dot_t const vec_dot = type_traits[type].vec_dot;
  7882. enum ggml_type const vec_dot_type = type_traits[type].vec_dot_type;
  7883. ggml_from_float_t const from_float_to_vec_dot = type_traits[vec_dot_type].from_float;
  7884. GGML_ASSERT(ne0 == ne01);
  7885. GGML_ASSERT(ne1 == ne11);
  7886. GGML_ASSERT(ne2 == ne12);
  7887. GGML_ASSERT(ne3 == ne13);
  7888. // we don't support permuted src0 or src1
  7889. GGML_ASSERT(nb00 == ggml_type_size(type));
  7890. GGML_ASSERT(nb10 == ggml_type_size(src1->type));
  7891. // dst cannot be transposed or permuted
  7892. GGML_ASSERT(nb0 == sizeof(float));
  7893. GGML_ASSERT(nb0 <= nb1);
  7894. GGML_ASSERT(nb1 <= nb2);
  7895. GGML_ASSERT(nb2 <= nb3);
  7896. // broadcast factors
  7897. const int64_t r2 = ne12/ne02;
  7898. const int64_t r3 = ne13/ne03;
  7899. // nb01 >= nb00 - src0 is not transposed
  7900. // compute by src0 rows
  7901. #if defined(GGML_USE_CLBLAST)
  7902. if (ggml_cl_can_mul_mat(src0, src1, dst)) {
  7903. if (params->ith == 0 && params->type == GGML_TASK_COMPUTE) {
  7904. ggml_cl_mul_mat(src0, src1, dst, params->wdata, params->wsize);
  7905. }
  7906. return;
  7907. }
  7908. #endif
  7909. #if defined(GGML_USE_ACCELERATE) || defined(GGML_USE_OPENBLAS)
  7910. if (ggml_compute_forward_mul_mat_use_blas(src0, src1, dst)) {
  7911. if (params->ith != 0) {
  7912. return;
  7913. }
  7914. if (params->type == GGML_TASK_INIT) {
  7915. return;
  7916. }
  7917. if (params->type == GGML_TASK_FINALIZE) {
  7918. return;
  7919. }
  7920. for (int64_t i13 = 0; i13 < ne13; i13++) {
  7921. for (int64_t i12 = 0; i12 < ne12; i12++) {
  7922. // broadcast src0 into src1 across 2nd,3rd dimension
  7923. const int64_t i03 = i13/r3;
  7924. const int64_t i02 = i12/r2;
  7925. const void * x = (char *) src0->data + i02*nb02 + i03*nb03;
  7926. const float * y = (float *) ((char *) src1->data + off1*nb11 + i12*nb12 + i13*nb13);
  7927. float * d = (float *) ((char *) dst->data + off1*nb1 + i12*nb2 + i13*nb3);
  7928. if (type != GGML_TYPE_F32) {
  7929. float * const wdata = params->wdata;
  7930. ggml_to_float_t const to_float = type_traits[type].to_float;
  7931. size_t id = 0;
  7932. for (int64_t i01 = 0; i01 < ne01; ++i01) {
  7933. to_float((const char *) x + i01*nb01, wdata + id, ne00);
  7934. id += ne00;
  7935. }
  7936. assert(id*sizeof(float) <= params->wsize);
  7937. x = wdata;
  7938. }
  7939. cblas_sgemm(CblasRowMajor, CblasNoTrans, CblasTrans,
  7940. cne1, ne01, ne10,
  7941. 1.0f, y, ne10,
  7942. x, ne00,
  7943. 0.0f, d, ne01);
  7944. }
  7945. }
  7946. //printf("CBLAS = %f ms, %d x %d x %d x %d\n", (ggml_perf_time_us() - t0)/1000.0, ne0, ne1, ne2, ne3);
  7947. return;
  7948. }
  7949. #endif
  7950. if (params->type == GGML_TASK_INIT) {
  7951. if (src1->type != vec_dot_type) {
  7952. char * wdata = params->wdata;
  7953. const size_t row_size = ne10*ggml_type_size(vec_dot_type)/ggml_blck_size(vec_dot_type);
  7954. assert(params->wsize >= ne11*ne12*ne13*row_size);
  7955. assert(src1->type == GGML_TYPE_F32);
  7956. for (int64_t i13 = 0; i13 < ne13; ++i13) {
  7957. for (int64_t i12 = 0; i12 < ne12; ++i12) {
  7958. for (int64_t i11 = 0; i11 < ne11; ++i11) {
  7959. from_float_to_vec_dot((float *)((char *) src1->data + i13*nb13 + i12*nb12 + i11*nb11), (void *) wdata, ne10);
  7960. wdata += row_size;
  7961. }
  7962. }
  7963. }
  7964. }
  7965. return;
  7966. }
  7967. if (params->type == GGML_TASK_FINALIZE) {
  7968. return;
  7969. }
  7970. const void * wdata = (src1->type == vec_dot_type) ? src1->data : params->wdata;
  7971. const size_t row_size = ne10*ggml_type_size(vec_dot_type)/ggml_blck_size(vec_dot_type);
  7972. const int64_t nr0 = ne01; // src0 rows
  7973. const int64_t nr1 = cne1*ne12*ne13; // src1 rows
  7974. //printf("nr0 = %lld, nr1 = %lld\n", nr0, nr1);
  7975. // distribute the thread work across the inner or outer loop based on which one is larger
  7976. const int64_t nth0 = nr0 > nr1 ? nth : 1; // parallelize by src0 rows
  7977. const int64_t nth1 = nr0 > nr1 ? 1 : nth; // parallelize by src1 rows
  7978. const int64_t ith0 = ith % nth0;
  7979. const int64_t ith1 = ith / nth0;
  7980. const int64_t dr0 = (nr0 + nth0 - 1)/nth0;
  7981. const int64_t dr1 = (nr1 + nth1 - 1)/nth1;
  7982. const int64_t ir010 = dr0*ith0;
  7983. const int64_t ir011 = MIN(ir010 + dr0, nr0);
  7984. const int64_t ir110 = dr1*ith1;
  7985. const int64_t ir111 = MIN(ir110 + dr1, nr1);
  7986. //printf("ir010 = %6lld, ir011 = %6lld, ir110 = %6lld, ir111 = %6lld\n", ir010, ir011, ir110, ir111);
  7987. // threads with no work simply yield (not sure if it helps)
  7988. if (ir010 >= ir011 || ir110 >= ir111) {
  7989. sched_yield();
  7990. return;
  7991. }
  7992. assert(ne12 % ne02 == 0);
  7993. assert(ne13 % ne03 == 0);
  7994. // block-tiling attempt
  7995. const int64_t blck_0 = 16;
  7996. const int64_t blck_1 = 16;
  7997. // attempt to reduce false-sharing (does not seem to make a difference)
  7998. float tmp[16];
  7999. for (int64_t iir1 = ir110; iir1 < ir111; iir1 += blck_1) {
  8000. for (int64_t iir0 = ir010; iir0 < ir011; iir0 += blck_0) {
  8001. for (int64_t ir1 = iir1; ir1 < iir1 + blck_1 && ir1 < ir111; ++ir1) {
  8002. const int64_t i13 = (ir1/(ne12*cne1));
  8003. const int64_t i12 = (ir1 - i13*ne12*cne1)/cne1;
  8004. const int64_t i11 = (ir1 - i13*ne12*cne1 - i12*cne1) + off1;
  8005. // broadcast src0 into src1
  8006. const int64_t i03 = i13/r3;
  8007. const int64_t i02 = i12/r2;
  8008. const int64_t i1 = i11;
  8009. const int64_t i2 = i12;
  8010. const int64_t i3 = i13;
  8011. const char * src0_row = (const char *) src0->data + (0 + i02*nb02 + i03*nb03);
  8012. // desc: when src1 is not a contiguous memory block we have to calculate the offset using the strides
  8013. // if it is, then we have either copied the data to params->wdata and made it contiguous or we are using
  8014. // the original src1 data pointer, so we should index using the indices directly
  8015. // TODO: this is a bit of a hack, we should probably have a better way to handle this
  8016. const char * src1_col = (const char *) wdata +
  8017. (src1_cont || src1->type != vec_dot_type
  8018. ? (i11 + i12*ne11 + i13*ne12*ne11)*row_size
  8019. : (i11*nb11 + i12*nb12 + i13*nb13));
  8020. float * dst_col = (float *) ((char *) dst->data + (i1*nb1 + i2*nb2 + i3*nb3));
  8021. //for (int64_t ir0 = iir0; ir0 < iir0 + blck_0 && ir0 < ir011; ++ir0) {
  8022. // vec_dot(ne00, &dst_col[ir0], src0_row + ir0*nb01, src1_col);
  8023. //}
  8024. for (int64_t ir0 = iir0; ir0 < iir0 + blck_0 && ir0 < ir011; ++ir0) {
  8025. vec_dot(ne00, &tmp[ir0 - iir0], src0_row + ir0*nb01, src1_col);
  8026. }
  8027. memcpy(&dst_col[iir0], tmp, (MIN(iir0 + blck_0, ir011) - iir0)*sizeof(float));
  8028. }
  8029. }
  8030. }
  8031. }
  8032. // ggml_compute_forward_mul_mat_id
  8033. static void ggml_compute_forward_mul_mat_id(
  8034. const struct ggml_compute_params * params,
  8035. const struct ggml_tensor * src0,
  8036. const struct ggml_tensor * src1,
  8037. struct ggml_tensor * dst) {
  8038. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  8039. // during GGML_TASK_INIT the entire src1 is converted to vec_dot_type
  8040. ggml_compute_forward_mul_mat(params, dst->src[2], src1, dst, 0, dst->ne[1]);
  8041. return;
  8042. }
  8043. const struct ggml_tensor * ids = src0;
  8044. const int id = ggml_get_op_params_i32(dst, 0);
  8045. const int n_as = ggml_get_op_params_i32(dst, 1);
  8046. for (int64_t i01 = 0; i01 < ids->ne[1]; i01++) {
  8047. const int32_t row_id = *(const int32_t *) ((const char *) ids->data + i01*ids->nb[1] + id*ids->nb[0]);
  8048. GGML_ASSERT(row_id >= 0 && row_id < n_as);
  8049. const struct ggml_tensor * src0_row = dst->src[row_id + 2];
  8050. ggml_compute_forward_mul_mat(params, src0_row, src1, dst, i01, 1);
  8051. }
  8052. }
  8053. // ggml_compute_forward_out_prod
  8054. static void ggml_compute_forward_out_prod_f32(
  8055. const struct ggml_compute_params * params,
  8056. const struct ggml_tensor * src0,
  8057. const struct ggml_tensor * src1,
  8058. struct ggml_tensor * dst) {
  8059. // int64_t t0 = ggml_perf_time_us();
  8060. // UNUSED(t0);
  8061. GGML_TENSOR_BINARY_OP_LOCALS
  8062. const int ith = params->ith;
  8063. const int nth = params->nth;
  8064. GGML_ASSERT(ne0 == ne00);
  8065. GGML_ASSERT(ne1 == ne10);
  8066. GGML_ASSERT(ne2 == ne02);
  8067. GGML_ASSERT(ne02 == ne12);
  8068. GGML_ASSERT(ne3 == ne13);
  8069. GGML_ASSERT(ne03 == ne13);
  8070. // we don't support permuted src0 or src1
  8071. GGML_ASSERT(nb00 == sizeof(float));
  8072. // dst cannot be transposed or permuted
  8073. GGML_ASSERT(nb0 == sizeof(float));
  8074. // GGML_ASSERT(nb0 <= nb1);
  8075. // GGML_ASSERT(nb1 <= nb2);
  8076. // GGML_ASSERT(nb2 <= nb3);
  8077. // nb01 >= nb00 - src0 is not transposed
  8078. // compute by src0 rows
  8079. // TODO: #if defined(GGML_USE_CUBLAS) ggml_cuda_out_prod
  8080. // TODO: #if defined(GGML_USE_CLBLAST)
  8081. #if defined(GGML_USE_ACCELERATE) || defined(GGML_USE_OPENBLAS)
  8082. bool use_blas = ggml_is_matrix(src0) &&
  8083. ggml_is_matrix(src1) &&
  8084. ggml_is_contiguous(src0) &&
  8085. (ggml_is_contiguous(src1) || ggml_is_transposed(src1));
  8086. #endif
  8087. if (params->type == GGML_TASK_INIT) {
  8088. #if defined(GGML_USE_ACCELERATE) || defined(GGML_USE_OPENBLAS) // gemm beta will zero dst
  8089. if (use_blas) {
  8090. return;
  8091. }
  8092. #endif
  8093. ggml_vec_set_f32(ne0*ne1*ne2*ne3, dst->data, 0);
  8094. return;
  8095. }
  8096. if (params->type == GGML_TASK_FINALIZE) {
  8097. return;
  8098. }
  8099. #if defined(GGML_USE_ACCELERATE) || defined(GGML_USE_OPENBLAS)
  8100. if (use_blas) {
  8101. if (params->ith != 0) { // All threads other than the first do no work.
  8102. return;
  8103. }
  8104. // Arguments to ggml_compute_forward_out_prod (expressed as major,minor)
  8105. // src0: (k,n)
  8106. // src1: (k,m)
  8107. // dst: (m,n)
  8108. //
  8109. // Arguments to sgemm (see https://github.com/Reference-LAPACK/lapack/blob/master/BLAS/SRC/sgemm.f)
  8110. // Also expressed as (major,minor)
  8111. // a: (m,k): so src1 transposed
  8112. // b: (k,n): so src0
  8113. // c: (m,n)
  8114. //
  8115. // However, if ggml_is_transposed(src1) is true, then
  8116. // src1->data already contains a transposed version, so sgemm mustn't
  8117. // transpose it further.
  8118. int n = src0->ne[0];
  8119. int k = src0->ne[1];
  8120. int m = src1->ne[0];
  8121. int transposeA, lda;
  8122. if (!ggml_is_transposed(src1)) {
  8123. transposeA = CblasTrans;
  8124. lda = m;
  8125. } else {
  8126. transposeA = CblasNoTrans;
  8127. lda = k;
  8128. }
  8129. float * a = (float *) ((char *) src1->data);
  8130. float * b = (float *) ((char *) src0->data);
  8131. float * c = (float *) ((char *) dst->data);
  8132. cblas_sgemm(CblasRowMajor, transposeA, CblasNoTrans, m, n, k, 1.0, a, lda, b, n, 0.0, c, n);
  8133. return;
  8134. }
  8135. #endif
  8136. // dst[:,:,:,:] = 0
  8137. // for i2,i3:
  8138. // for i1:
  8139. // for i01:
  8140. // for i0:
  8141. // dst[i0,i1,i2,i3] += src0[i0,i01,i2,i3] * src1[i1,i01,i2,i3]
  8142. // parallelize by last three dimensions
  8143. // total rows in dst
  8144. const int64_t nr = ne1*ne2*ne3;
  8145. // rows per thread
  8146. const int64_t dr = (nr + nth - 1)/nth;
  8147. // row range for this thread
  8148. const int64_t ir0 = dr*ith;
  8149. const int64_t ir1 = MIN(ir0 + dr, nr);
  8150. // block-tiling attempt
  8151. const int64_t blck_0 = MAX(GGML_VEC_MAD_UNROLL, 32);
  8152. const int64_t blck_1 = 16;
  8153. for (int64_t bir = ir0; bir < ir1; bir += blck_1) {
  8154. const int64_t bir1 = MIN(bir + blck_1, ir1);
  8155. for (int64_t bi01 = 0; bi01 < ne01; bi01 += blck_0) {
  8156. const int64_t bne01 = MIN(bi01 + blck_0, ne01);
  8157. for (int64_t ir = bir; ir < bir1; ++ir) {
  8158. // dst indices
  8159. const int64_t i3 = ir/(ne2*ne1);
  8160. const int64_t i2 = (ir - i3*ne2*ne1)/ne1;
  8161. const int64_t i1 = (ir - i3*ne2*ne1 - i2*ne1);
  8162. const int64_t i02 = i2;
  8163. const int64_t i03 = i3;
  8164. //const int64_t i10 = i1;
  8165. const int64_t i12 = i2;
  8166. const int64_t i13 = i3;
  8167. #if GGML_VEC_MAD_UNROLL > 2
  8168. const int64_t bne01_unroll = bne01 - (bne01 % GGML_VEC_MAD_UNROLL);
  8169. for (int64_t i01 = bi01; i01 < bne01_unroll; i01 += GGML_VEC_MAD_UNROLL) {
  8170. const int64_t i11 = i01;
  8171. float * s0 = (float *) ((char *) src0->data + ( i01*nb01 + i02*nb02 + i03*nb03));
  8172. float * s1 = (float *) ((char *) src1->data + (i1*nb10 + i11*nb11 + i12*nb12 + i13*nb13));
  8173. float * d = (float *) ((char *) dst->data + ( i1*nb1 + i2*nb2 + i3*nb3));
  8174. ggml_vec_mad_f32_unroll(ne0, nb01, nb11, d, s0, s1);
  8175. }
  8176. for (int64_t i01 = bne01_unroll; i01 < bne01; ++i01) {
  8177. const int64_t i11 = i01;
  8178. float * s0 = (float *) ((char *) src0->data + ( i01*nb01 + i02*nb02 + i03*nb03));
  8179. float * s1 = (float *) ((char *) src1->data + (i1*nb10 + i11*nb11 + i12*nb12 + i13*nb13));
  8180. float * d = (float *) ((char *) dst->data + ( i1*nb1 + i2*nb2 + i3*nb3));
  8181. ggml_vec_mad_f32(ne0, d, s0, *s1);
  8182. }
  8183. #else
  8184. for (int64_t i01 = bi01; i01 < bne01; ++i01) {
  8185. const int64_t i11 = i01;
  8186. float * s0 = (float *) ((char *) src0->data + ( i01*nb01 + i02*nb02 + i03*nb03));
  8187. float * s1 = (float *) ((char *) src1->data + (i1*nb10 + i11*nb11 + i12*nb12 + i13*nb13));
  8188. float * d = (float *) ((char *) dst->data + ( i1*nb1 + i2*nb2 + i3*nb3));
  8189. ggml_vec_mad_f32(ne0, d, s0, *s1);
  8190. }
  8191. #endif
  8192. }
  8193. }
  8194. }
  8195. //int64_t t1 = ggml_perf_time_us();
  8196. //static int64_t acc = 0;
  8197. //acc += t1 - t0;
  8198. //if (t1 - t0 > 10) {
  8199. // printf("\n");
  8200. // printf("ne00 = %5d, ne01 = %5d, ne02 = %5d, ne03 = %5d\n", ne00, ne01, ne02, ne03);
  8201. // printf("nb00 = %5d, nb01 = %5d, nb02 = %5d, nb03 = %5d\n", nb00, nb01, nb02, nb03);
  8202. // printf("ne10 = %5d, ne11 = %5d, ne12 = %5d, ne13 = %5d\n", ne10, ne11, ne12, ne13);
  8203. // printf("nb10 = %5d, nb11 = %5d, nb12 = %5d, nb13 = %5d\n", nb10, nb11, nb12, nb13);
  8204. // printf("XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX task %d/%d: %d us, acc = %d\n", ith, nth, (int) (t1 - t0), (int) acc);
  8205. //}
  8206. }
  8207. static void ggml_compute_forward_out_prod_q_f32(
  8208. const struct ggml_compute_params * params,
  8209. const struct ggml_tensor * src0,
  8210. const struct ggml_tensor * src1,
  8211. struct ggml_tensor * dst) {
  8212. // int64_t t0 = ggml_perf_time_us();
  8213. // UNUSED(t0);
  8214. GGML_TENSOR_BINARY_OP_LOCALS;
  8215. const int ith = params->ith;
  8216. const int nth = params->nth;
  8217. const enum ggml_type type = src0->type;
  8218. ggml_to_float_t const dequantize_row_q = type_traits[type].to_float;
  8219. GGML_ASSERT(ne02 == ne12);
  8220. GGML_ASSERT(ne03 == ne13);
  8221. GGML_ASSERT(ne2 == ne12);
  8222. GGML_ASSERT(ne3 == ne13);
  8223. // we don't support permuted src0 dim0
  8224. GGML_ASSERT(nb00 == ggml_type_size(type));
  8225. // dst dim0 cannot be transposed or permuted
  8226. GGML_ASSERT(nb0 == sizeof(float));
  8227. // GGML_ASSERT(nb0 <= nb1);
  8228. // GGML_ASSERT(nb1 <= nb2);
  8229. // GGML_ASSERT(nb2 <= nb3);
  8230. GGML_ASSERT(ne0 == ne00);
  8231. GGML_ASSERT(ne1 == ne10);
  8232. GGML_ASSERT(ne2 == ne02);
  8233. GGML_ASSERT(ne3 == ne03);
  8234. // nb01 >= nb00 - src0 is not transposed
  8235. // compute by src0 rows
  8236. // TODO: #if defined(GGML_USE_CUBLAS) ggml_cuda_out_prod
  8237. // TODO: #if defined(GGML_USE_ACCELERATE) || defined(GGML_USE_OPENBLAS) || defined(GGML_USE_CLBLAST)
  8238. if (params->type == GGML_TASK_INIT) {
  8239. ggml_vec_set_f32(ne0*ne1*ne2*ne3, dst->data, 0);
  8240. return;
  8241. }
  8242. if (params->type == GGML_TASK_FINALIZE) {
  8243. return;
  8244. }
  8245. // parallelize by last three dimensions
  8246. // total rows in dst
  8247. const int64_t nr = ne1*ne2*ne3;
  8248. // rows per thread
  8249. const int64_t dr = (nr + nth - 1)/nth;
  8250. // row range for this thread
  8251. const int64_t ir0 = dr*ith;
  8252. const int64_t ir1 = MIN(ir0 + dr, nr);
  8253. // dst[:,:,:,:] = 0
  8254. // for i2,i3:
  8255. // for i1:
  8256. // for i01:
  8257. // for i0:
  8258. // dst[i0,i1,i2,i3] += src0[i0,i01,i2,i3] * src1[i1,i01,i2,i3]
  8259. float * wdata = (float *) params->wdata + (ne0 + CACHE_LINE_SIZE_F32) * ith;
  8260. for (int64_t ir = ir0; ir < ir1; ++ir) {
  8261. // dst indices
  8262. const int64_t i3 = ir/(ne2*ne1);
  8263. const int64_t i2 = (ir - i3*ne2*ne1)/ne1;
  8264. const int64_t i1 = (ir - i3*ne2*ne1 - i2*ne1);
  8265. const int64_t i02 = i2;
  8266. const int64_t i03 = i3;
  8267. //const int64_t i10 = i1;
  8268. const int64_t i12 = i2;
  8269. const int64_t i13 = i3;
  8270. for (int64_t i01 = 0; i01 < ne01; ++i01) {
  8271. const int64_t i11 = i01;
  8272. float * s0 = (float *) ((char *) src0->data + ( i01*nb01 + i02*nb02 + i03*nb03));
  8273. float * s1 = (float *) ((char *) src1->data + (i1*nb10 + i11*nb11 + i12*nb12 + i13*nb13));
  8274. float * d = (float *) ((char *) dst->data + ( i1*nb1 + i2*nb2 + i3*nb3));
  8275. dequantize_row_q(s0, wdata, ne0);
  8276. ggml_vec_mad_f32(ne0, d, wdata, *s1);
  8277. }
  8278. }
  8279. //int64_t t1 = ggml_perf_time_us();
  8280. //static int64_t acc = 0;
  8281. //acc += t1 - t0;
  8282. //if (t1 - t0 > 10) {
  8283. // printf("\n");
  8284. // printf("ne00 = %5d, ne01 = %5d, ne02 = %5d, ne03 = %5d\n", ne00, ne01, ne02, ne03);
  8285. // printf("nb00 = %5d, nb01 = %5d, nb02 = %5d, nb03 = %5d\n", nb00, nb01, nb02, nb03);
  8286. // printf("ne10 = %5d, ne11 = %5d, ne12 = %5d, ne13 = %5d\n", ne10, ne11, ne12, ne13);
  8287. // printf("nb10 = %5d, nb11 = %5d, nb12 = %5d, nb13 = %5d\n", nb10, nb11, nb12, nb13);
  8288. // printf("XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX task %d/%d: %d us, acc = %d\n", ith, nth, (int) (t1 - t0), (int) acc);
  8289. //}
  8290. }
  8291. static void ggml_compute_forward_out_prod(
  8292. const struct ggml_compute_params * params,
  8293. const struct ggml_tensor * src0,
  8294. const struct ggml_tensor * src1,
  8295. struct ggml_tensor * dst) {
  8296. switch (src0->type) {
  8297. case GGML_TYPE_Q4_0:
  8298. case GGML_TYPE_Q4_1:
  8299. case GGML_TYPE_Q5_0:
  8300. case GGML_TYPE_Q5_1:
  8301. case GGML_TYPE_Q8_0:
  8302. case GGML_TYPE_Q2_K:
  8303. case GGML_TYPE_Q3_K:
  8304. case GGML_TYPE_Q4_K:
  8305. case GGML_TYPE_Q5_K:
  8306. case GGML_TYPE_Q6_K:
  8307. {
  8308. ggml_compute_forward_out_prod_q_f32(params, src0, src1, dst);
  8309. } break;
  8310. case GGML_TYPE_F16:
  8311. {
  8312. GGML_ASSERT(false); // todo
  8313. // ggml_compute_forward_out_prod_f16_f32(params, src0, src1, dst);
  8314. } break;
  8315. case GGML_TYPE_F32:
  8316. {
  8317. ggml_compute_forward_out_prod_f32(params, src0, src1, dst);
  8318. } break;
  8319. default:
  8320. {
  8321. GGML_ASSERT(false);
  8322. } break;
  8323. }
  8324. }
  8325. // ggml_compute_forward_scale
  8326. static void ggml_compute_forward_scale_f32(
  8327. const struct ggml_compute_params * params,
  8328. const struct ggml_tensor * src0,
  8329. const struct ggml_tensor * src1,
  8330. struct ggml_tensor * dst) {
  8331. GGML_ASSERT(ggml_is_contiguous(src0));
  8332. GGML_ASSERT(ggml_is_contiguous(dst));
  8333. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  8334. GGML_ASSERT(ggml_is_scalar(src1));
  8335. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  8336. return;
  8337. }
  8338. // scale factor
  8339. const float v = *(float *) src1->data;
  8340. const int ith = params->ith;
  8341. const int nth = params->nth;
  8342. const int nc = src0->ne[0];
  8343. const int nr = ggml_nrows(src0);
  8344. // rows per thread
  8345. const int dr = (nr + nth - 1)/nth;
  8346. // row range for this thread
  8347. const int ir0 = dr*ith;
  8348. const int ir1 = MIN(ir0 + dr, nr);
  8349. const size_t nb01 = src0->nb[1];
  8350. const size_t nb1 = dst->nb[1];
  8351. for (int i1 = ir0; i1 < ir1; i1++) {
  8352. if (dst->data != src0->data) {
  8353. // src0 is same shape as dst => same indices
  8354. memcpy((char *)dst->data + i1*nb1, (char *)src0->data + i1*nb01, nc * sizeof(float));
  8355. }
  8356. ggml_vec_scale_f32(nc, (float *) ((char *) dst->data + i1*nb1), v);
  8357. }
  8358. }
  8359. static void ggml_compute_forward_scale(
  8360. const struct ggml_compute_params * params,
  8361. const struct ggml_tensor * src0,
  8362. const struct ggml_tensor * src1,
  8363. struct ggml_tensor * dst) {
  8364. switch (src0->type) {
  8365. case GGML_TYPE_F32:
  8366. {
  8367. ggml_compute_forward_scale_f32(params, src0, src1, dst);
  8368. } break;
  8369. default:
  8370. {
  8371. GGML_ASSERT(false);
  8372. } break;
  8373. }
  8374. }
  8375. // ggml_compute_forward_set
  8376. static void ggml_compute_forward_set_f32(
  8377. const struct ggml_compute_params * params,
  8378. const struct ggml_tensor * src0,
  8379. const struct ggml_tensor * src1,
  8380. struct ggml_tensor * dst) {
  8381. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  8382. GGML_ASSERT(ggml_is_contiguous(dst) && ggml_is_contiguous(src0));
  8383. // view src0 and dst with these strides and data offset inbytes during set
  8384. // nb0 is implicitly element_size because src0 and dst are contiguous
  8385. size_t nb1 = ((int32_t *) dst->op_params)[0];
  8386. size_t nb2 = ((int32_t *) dst->op_params)[1];
  8387. size_t nb3 = ((int32_t *) dst->op_params)[2];
  8388. size_t offset = ((int32_t *) dst->op_params)[3];
  8389. bool inplace = (bool) ((int32_t *) dst->op_params)[4];
  8390. if (!inplace && (params->type == GGML_TASK_INIT)) {
  8391. // memcpy needs to be synchronized across threads to avoid race conditions.
  8392. // => do it in INIT phase
  8393. memcpy(
  8394. ((char *) dst->data),
  8395. ((char *) src0->data),
  8396. ggml_nbytes(dst));
  8397. }
  8398. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  8399. return;
  8400. }
  8401. const int ith = params->ith;
  8402. const int nth = params->nth;
  8403. const int nr = ggml_nrows(src1);
  8404. const int nc = src1->ne[0];
  8405. GGML_TENSOR_LOCALS(int64_t, ne1, src1, ne)
  8406. GGML_TENSOR_LOCALS(size_t, nb1, src1, nb)
  8407. // src0 and dst as viewed during set
  8408. const size_t nb0 = ggml_element_size(src0);
  8409. const int im0 = (ne10 == 0 ? 0 : ne10-1);
  8410. const int im1 = (ne11 == 0 ? 0 : ne11-1);
  8411. const int im2 = (ne12 == 0 ? 0 : ne12-1);
  8412. const int im3 = (ne13 == 0 ? 0 : ne13-1);
  8413. GGML_ASSERT(offset + im0*nb0 + im1*nb1 + im2*nb2 + im3*nb3 <= ggml_nbytes(dst));
  8414. GGML_ASSERT(nb10 == sizeof(float));
  8415. // rows per thread
  8416. const int dr = (nr + nth - 1)/nth;
  8417. // row range for this thread
  8418. const int ir0 = dr*ith;
  8419. const int ir1 = MIN(ir0 + dr, nr);
  8420. for (int ir = ir0; ir < ir1; ++ir) {
  8421. // src0 and dst are viewed with shape of src1 and offset
  8422. // => same indices
  8423. const int i3 = ir/(ne12*ne11);
  8424. const int i2 = (ir - i3*ne12*ne11)/ne11;
  8425. const int i1 = (ir - i3*ne12*ne11 - i2*ne11);
  8426. ggml_vec_cpy_f32(nc,
  8427. (float *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + offset),
  8428. (float *) ((char *) src1->data + i3*nb13 + i2*nb12 + i1*nb11));
  8429. }
  8430. }
  8431. static void ggml_compute_forward_set(
  8432. const struct ggml_compute_params * params,
  8433. const struct ggml_tensor * src0,
  8434. const struct ggml_tensor * src1,
  8435. struct ggml_tensor * dst) {
  8436. switch (src0->type) {
  8437. case GGML_TYPE_F32:
  8438. {
  8439. ggml_compute_forward_set_f32(params, src0, src1, dst);
  8440. } break;
  8441. case GGML_TYPE_F16:
  8442. case GGML_TYPE_Q4_0:
  8443. case GGML_TYPE_Q4_1:
  8444. case GGML_TYPE_Q5_0:
  8445. case GGML_TYPE_Q5_1:
  8446. case GGML_TYPE_Q8_0:
  8447. case GGML_TYPE_Q8_1:
  8448. case GGML_TYPE_Q2_K:
  8449. case GGML_TYPE_Q3_K:
  8450. case GGML_TYPE_Q4_K:
  8451. case GGML_TYPE_Q5_K:
  8452. case GGML_TYPE_Q6_K:
  8453. default:
  8454. {
  8455. GGML_ASSERT(false);
  8456. } break;
  8457. }
  8458. }
  8459. // ggml_compute_forward_cpy
  8460. static void ggml_compute_forward_cpy(
  8461. const struct ggml_compute_params * params,
  8462. const struct ggml_tensor * src0,
  8463. struct ggml_tensor * dst) {
  8464. ggml_compute_forward_dup(params, src0, dst);
  8465. }
  8466. // ggml_compute_forward_cont
  8467. static void ggml_compute_forward_cont(
  8468. const struct ggml_compute_params * params,
  8469. const struct ggml_tensor * src0,
  8470. struct ggml_tensor * dst) {
  8471. ggml_compute_forward_dup(params, src0, dst);
  8472. }
  8473. // ggml_compute_forward_reshape
  8474. static void ggml_compute_forward_reshape(
  8475. const struct ggml_compute_params * params,
  8476. const struct ggml_tensor * src0,
  8477. struct ggml_tensor * dst) {
  8478. // NOP
  8479. UNUSED(params);
  8480. UNUSED(src0);
  8481. UNUSED(dst);
  8482. }
  8483. // ggml_compute_forward_view
  8484. static void ggml_compute_forward_view(
  8485. const struct ggml_compute_params * params,
  8486. const struct ggml_tensor * src0) {
  8487. // NOP
  8488. UNUSED(params);
  8489. UNUSED(src0);
  8490. }
  8491. // ggml_compute_forward_permute
  8492. static void ggml_compute_forward_permute(
  8493. const struct ggml_compute_params * params,
  8494. const struct ggml_tensor * src0) {
  8495. // NOP
  8496. UNUSED(params);
  8497. UNUSED(src0);
  8498. }
  8499. // ggml_compute_forward_transpose
  8500. static void ggml_compute_forward_transpose(
  8501. const struct ggml_compute_params * params,
  8502. const struct ggml_tensor * src0) {
  8503. // NOP
  8504. UNUSED(params);
  8505. UNUSED(src0);
  8506. }
  8507. // ggml_compute_forward_get_rows
  8508. static void ggml_compute_forward_get_rows_q(
  8509. const struct ggml_compute_params * params,
  8510. const struct ggml_tensor * src0,
  8511. const struct ggml_tensor * src1,
  8512. struct ggml_tensor * dst) {
  8513. assert(params->ith == 0);
  8514. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  8515. return;
  8516. }
  8517. GGML_TENSOR_BINARY_OP_LOCALS
  8518. const int64_t nc = ne00;
  8519. const int64_t nr = ggml_nelements(src1); GGML_UNUSED(nr);
  8520. const enum ggml_type type = src0->type;
  8521. ggml_to_float_t const dequantize_row_q = type_traits[type].to_float;
  8522. assert(ne0 == nc);
  8523. assert(ne02 == ne11);
  8524. assert(nb00 == ggml_type_size(type));
  8525. assert(ggml_nrows(dst) == nr);
  8526. // TODO: multi-thread
  8527. for (int64_t i12 = 0; i12 < ne12; ++i12) {
  8528. for (int64_t i11 = 0; i11 < ne11; ++i11) {
  8529. for (int64_t i10 = 0; i10 < ne10; ++i10) {
  8530. const int64_t i01 = *(int32_t *) ((char *) src1->data + i10*nb10 + i11*nb11 + i12*nb12);
  8531. dequantize_row_q(
  8532. (const void *) ((char *) src0->data + i01*nb01 + i11*nb02 + i12*nb03),
  8533. (float *) ((char *) dst->data + i10*nb1 + i11*nb2 + i12*nb3), nc);
  8534. }
  8535. }
  8536. }
  8537. }
  8538. static void ggml_compute_forward_get_rows_f16(
  8539. const struct ggml_compute_params * params,
  8540. const struct ggml_tensor * src0,
  8541. const struct ggml_tensor * src1,
  8542. struct ggml_tensor * dst) {
  8543. assert(params->ith == 0);
  8544. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  8545. return;
  8546. }
  8547. GGML_TENSOR_BINARY_OP_LOCALS
  8548. const int64_t nc = ne00;
  8549. const int64_t nr = ggml_nelements(src1); GGML_UNUSED(nr);
  8550. assert(ne0 == nc);
  8551. assert(ne02 == ne11);
  8552. assert(nb00 == sizeof(ggml_fp16_t));
  8553. assert(ggml_nrows(dst) == nr);
  8554. // TODO: multi-thread
  8555. for (int64_t i12 = 0; i12 < ne12; ++i12) {
  8556. for (int64_t i11 = 0; i11 < ne11; ++i11) {
  8557. for (int64_t i10 = 0; i10 < ne10; ++i10) {
  8558. const int64_t i01 = *(int32_t *) ((char *) src1->data + i10*nb10 + i11*nb11 + i12*nb12);
  8559. ggml_fp16_to_fp32_row(
  8560. (const void *) ((char *) src0->data + i01*nb01 + i11*nb02 + i12*nb03),
  8561. (float *) ((char *) dst->data + i10*nb1 + i11*nb2 + i12*nb3), nc);
  8562. }
  8563. }
  8564. }
  8565. }
  8566. static void ggml_compute_forward_get_rows_f32(
  8567. const struct ggml_compute_params * params,
  8568. const struct ggml_tensor * src0,
  8569. const struct ggml_tensor * src1,
  8570. struct ggml_tensor * dst) {
  8571. assert(params->ith == 0);
  8572. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  8573. return;
  8574. }
  8575. GGML_TENSOR_BINARY_OP_LOCALS
  8576. const int64_t nc = ne00;
  8577. const int64_t nr = ggml_nelements(src1); GGML_UNUSED(nr);
  8578. assert(ne0 == nc);
  8579. assert(ne02 == ne11);
  8580. assert(nb00 == sizeof(float));
  8581. assert(ggml_nrows(dst) == nr);
  8582. // TODO: multi-thread
  8583. for (int64_t i12 = 0; i12 < ne12; ++i12) {
  8584. for (int64_t i11 = 0; i11 < ne11; ++i11) {
  8585. for (int64_t i10 = 0; i10 < ne10; ++i10) {
  8586. const int64_t i01 = *(int32_t *) ((char *) src1->data + i10*nb10 + i11*nb11 + i12*nb12);
  8587. ggml_vec_cpy_f32(nc,
  8588. (float *) ((char *) dst->data + i10*nb1 + i11*nb2 + i12*nb3),
  8589. (float *) ((char *) src0->data + i01*nb01 + i11*nb02 + i12*nb03));
  8590. }
  8591. }
  8592. }
  8593. }
  8594. static void ggml_compute_forward_get_rows(
  8595. const struct ggml_compute_params * params,
  8596. const struct ggml_tensor * src0,
  8597. const struct ggml_tensor * src1,
  8598. struct ggml_tensor * dst) {
  8599. switch (src0->type) {
  8600. case GGML_TYPE_Q4_0:
  8601. case GGML_TYPE_Q4_1:
  8602. case GGML_TYPE_Q5_0:
  8603. case GGML_TYPE_Q5_1:
  8604. case GGML_TYPE_Q8_0:
  8605. case GGML_TYPE_Q8_1:
  8606. case GGML_TYPE_Q2_K:
  8607. case GGML_TYPE_Q3_K:
  8608. case GGML_TYPE_Q4_K:
  8609. case GGML_TYPE_Q5_K:
  8610. case GGML_TYPE_Q6_K:
  8611. {
  8612. ggml_compute_forward_get_rows_q(params, src0, src1, dst);
  8613. } break;
  8614. case GGML_TYPE_F16:
  8615. {
  8616. ggml_compute_forward_get_rows_f16(params, src0, src1, dst);
  8617. } break;
  8618. case GGML_TYPE_F32:
  8619. {
  8620. ggml_compute_forward_get_rows_f32(params, src0, src1, dst);
  8621. } break;
  8622. default:
  8623. {
  8624. GGML_ASSERT(false);
  8625. } break;
  8626. }
  8627. //static bool first = true;
  8628. //printf("ne0 = %d, ne1 = %d, ne2 = %d\n", dst->ne[0], dst->ne[1], dst->ne[2]);
  8629. //if (first) {
  8630. // first = false;
  8631. //} else {
  8632. // for (int k = 0; k < dst->ne[1]; ++k) {
  8633. // for (int j = 0; j < dst->ne[0]/16; ++j) {
  8634. // for (int i = 0; i < 16; ++i) {
  8635. // printf("%8.4f ", ((float *) dst->data)[k*dst->ne[0] + j*16 + i]);
  8636. // }
  8637. // printf("\n");
  8638. // }
  8639. // printf("\n");
  8640. // }
  8641. // printf("\n");
  8642. // exit(0);
  8643. //}
  8644. }
  8645. // ggml_compute_forward_get_rows_back
  8646. static void ggml_compute_forward_get_rows_back_f32_f16(
  8647. const struct ggml_compute_params * params,
  8648. const struct ggml_tensor * src0,
  8649. const struct ggml_tensor * src1,
  8650. struct ggml_tensor * dst) {
  8651. GGML_ASSERT(params->ith == 0);
  8652. GGML_ASSERT(ggml_is_contiguous(dst));
  8653. // ggml_compute_forward_dup_same_cont(params, opt0, dst);
  8654. if (params->type == GGML_TASK_INIT) {
  8655. memset(dst->data, 0, ggml_nbytes(dst));
  8656. }
  8657. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  8658. return;
  8659. }
  8660. const int nc = src0->ne[0];
  8661. const int nr = ggml_nelements(src1);
  8662. GGML_ASSERT( dst->ne[0] == nc);
  8663. GGML_ASSERT(src0->nb[0] == sizeof(ggml_fp16_t));
  8664. for (int i = 0; i < nr; ++i) {
  8665. const int r = ((int32_t *) src1->data)[i];
  8666. for (int j = 0; j < nc; ++j) {
  8667. ggml_fp16_t v = ((ggml_fp16_t *) ((char *) src0->data + i*src0->nb[1]))[j];
  8668. ((float *) ((char *) dst->data + r*dst->nb[1]))[j] += GGML_FP16_TO_FP32(v);
  8669. }
  8670. }
  8671. }
  8672. static void ggml_compute_forward_get_rows_back_f32(
  8673. const struct ggml_compute_params * params,
  8674. const struct ggml_tensor * src0,
  8675. const struct ggml_tensor * src1,
  8676. struct ggml_tensor * dst) {
  8677. GGML_ASSERT(params->ith == 0);
  8678. GGML_ASSERT(ggml_is_contiguous(dst));
  8679. // ggml_compute_forward_dup_same_cont(params, opt0, dst);
  8680. if (params->type == GGML_TASK_INIT) {
  8681. memset(dst->data, 0, ggml_nbytes(dst));
  8682. }
  8683. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  8684. return;
  8685. }
  8686. const int nc = src0->ne[0];
  8687. const int nr = ggml_nelements(src1);
  8688. GGML_ASSERT( dst->ne[0] == nc);
  8689. GGML_ASSERT(src0->nb[0] == sizeof(float));
  8690. for (int i = 0; i < nr; ++i) {
  8691. const int r = ((int32_t *) src1->data)[i];
  8692. ggml_vec_add_f32(nc,
  8693. (float *) ((char *) dst->data + r*dst->nb[1]),
  8694. (float *) ((char *) dst->data + r*dst->nb[1]),
  8695. (float *) ((char *) src0->data + i*src0->nb[1]));
  8696. }
  8697. }
  8698. static void ggml_compute_forward_get_rows_back(
  8699. const struct ggml_compute_params * params,
  8700. const struct ggml_tensor * src0,
  8701. const struct ggml_tensor * src1,
  8702. struct ggml_tensor * dst) {
  8703. switch (src0->type) {
  8704. case GGML_TYPE_F16:
  8705. {
  8706. ggml_compute_forward_get_rows_back_f32_f16(params, src0, src1, dst);
  8707. } break;
  8708. case GGML_TYPE_F32:
  8709. {
  8710. ggml_compute_forward_get_rows_back_f32(params, src0, src1, dst);
  8711. } break;
  8712. default:
  8713. {
  8714. GGML_ASSERT(false);
  8715. } break;
  8716. }
  8717. //static bool first = true;
  8718. //printf("ne0 = %d, ne1 = %d, ne2 = %d\n", dst->ne[0], dst->ne[1], dst->ne[2]);
  8719. //if (first) {
  8720. // first = false;
  8721. //} else {
  8722. // for (int k = 0; k < dst->ne[1]; ++k) {
  8723. // for (int j = 0; j < dst->ne[0]/16; ++j) {
  8724. // for (int i = 0; i < 16; ++i) {
  8725. // printf("%8.4f ", ((float *) dst->data)[k*dst->ne[0] + j*16 + i]);
  8726. // }
  8727. // printf("\n");
  8728. // }
  8729. // printf("\n");
  8730. // }
  8731. // printf("\n");
  8732. // exit(0);
  8733. //}
  8734. }
  8735. // ggml_compute_forward_diag
  8736. static void ggml_compute_forward_diag_f32(
  8737. const struct ggml_compute_params * params,
  8738. const struct ggml_tensor * src0,
  8739. struct ggml_tensor * dst) {
  8740. GGML_ASSERT(params->ith == 0);
  8741. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  8742. return;
  8743. }
  8744. // TODO: handle transposed/permuted matrices
  8745. GGML_TENSOR_UNARY_OP_LOCALS
  8746. GGML_ASSERT(ne00 == ne0);
  8747. GGML_ASSERT(ne00 == ne1);
  8748. GGML_ASSERT(ne01 == 1);
  8749. GGML_ASSERT(ne02 == ne2);
  8750. GGML_ASSERT(ne03 == ne3);
  8751. GGML_ASSERT(nb00 == sizeof(float));
  8752. GGML_ASSERT(nb0 == sizeof(float));
  8753. for (int i3 = 0; i3 < ne3; i3++) {
  8754. for (int i2 = 0; i2 < ne2; i2++) {
  8755. for (int i1 = 0; i1 < ne1; i1++) {
  8756. float * d = (float *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1);
  8757. float * s = (float *)((char *) src0->data + i3*nb03 + i2*nb02);
  8758. for (int i0 = 0; i0 < i1; i0++) {
  8759. d[i0] = 0;
  8760. }
  8761. d[i1] = s[i1];
  8762. for (int i0 = i1+1; i0 < ne0; i0++) {
  8763. d[i0] = 0;
  8764. }
  8765. }
  8766. }
  8767. }
  8768. }
  8769. static void ggml_compute_forward_diag(
  8770. const struct ggml_compute_params * params,
  8771. const struct ggml_tensor * src0,
  8772. struct ggml_tensor * dst) {
  8773. switch (src0->type) {
  8774. case GGML_TYPE_F32:
  8775. {
  8776. ggml_compute_forward_diag_f32(params, src0, dst);
  8777. } break;
  8778. default:
  8779. {
  8780. GGML_ASSERT(false);
  8781. } break;
  8782. }
  8783. }
  8784. // ggml_compute_forward_diag_mask_inf
  8785. static void ggml_compute_forward_diag_mask_f32(
  8786. const struct ggml_compute_params * params,
  8787. const struct ggml_tensor * src0,
  8788. struct ggml_tensor * dst,
  8789. const float value) {
  8790. const int ith = params->ith;
  8791. const int nth = params->nth;
  8792. const int n_past = ((int32_t *) dst->op_params)[0];
  8793. const bool inplace = src0->data == dst->data;
  8794. GGML_ASSERT(n_past >= 0);
  8795. if (!inplace && (params->type == GGML_TASK_INIT)) {
  8796. // memcpy needs to be synchronized across threads to avoid race conditions.
  8797. // => do it in INIT phase
  8798. GGML_ASSERT(ggml_nelements(dst) == ggml_nelements(src0));
  8799. GGML_ASSERT(ggml_is_contiguous(dst) && ggml_is_contiguous(src0));
  8800. memcpy(
  8801. ((char *) dst->data),
  8802. ((char *) src0->data),
  8803. ggml_nbytes(dst));
  8804. }
  8805. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  8806. return;
  8807. }
  8808. // TODO: handle transposed/permuted matrices
  8809. const int n = ggml_nrows(src0);
  8810. const int nc = src0->ne[0];
  8811. const int nr = src0->ne[1];
  8812. const int nz = n/nr;
  8813. GGML_ASSERT( dst->nb[0] == sizeof(float));
  8814. GGML_ASSERT(src0->nb[0] == sizeof(float));
  8815. for (int k = 0; k < nz; k++) {
  8816. for (int j = ith; j < nr; j += nth) {
  8817. for (int i = n_past; i < nc; i++) {
  8818. if (i > n_past + j) {
  8819. *(float *)((char *) dst->data + k*dst->nb[2] + j*dst->nb[1] + i*dst->nb[0]) = value;
  8820. }
  8821. }
  8822. }
  8823. }
  8824. }
  8825. static void ggml_compute_forward_diag_mask_inf(
  8826. const struct ggml_compute_params * params,
  8827. const struct ggml_tensor * src0,
  8828. struct ggml_tensor * dst) {
  8829. switch (src0->type) {
  8830. case GGML_TYPE_F32:
  8831. {
  8832. ggml_compute_forward_diag_mask_f32(params, src0, dst, -INFINITY);
  8833. } break;
  8834. default:
  8835. {
  8836. GGML_ASSERT(false);
  8837. } break;
  8838. }
  8839. }
  8840. static void ggml_compute_forward_diag_mask_zero(
  8841. const struct ggml_compute_params * params,
  8842. const struct ggml_tensor * src0,
  8843. struct ggml_tensor * dst) {
  8844. switch (src0->type) {
  8845. case GGML_TYPE_F32:
  8846. {
  8847. ggml_compute_forward_diag_mask_f32(params, src0, dst, 0);
  8848. } break;
  8849. default:
  8850. {
  8851. GGML_ASSERT(false);
  8852. } break;
  8853. }
  8854. }
  8855. // ggml_compute_forward_soft_max
  8856. static void ggml_compute_forward_soft_max_f32(
  8857. const struct ggml_compute_params * params,
  8858. const struct ggml_tensor * src0,
  8859. const struct ggml_tensor * src1,
  8860. struct ggml_tensor * dst) {
  8861. assert(ggml_is_contiguous(dst));
  8862. assert(ggml_are_same_shape(src0, dst));
  8863. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  8864. return;
  8865. }
  8866. float scale = 1.0f;
  8867. memcpy(&scale, (float *) dst->op_params + 0, sizeof(float));
  8868. // TODO: handle transposed/permuted matrices
  8869. const int ith = params->ith;
  8870. const int nth = params->nth;
  8871. const int64_t ne11 = src1 ? src1->ne[1] : 1;
  8872. const int nc = src0->ne[0];
  8873. const int nr = ggml_nrows(src0);
  8874. // rows per thread
  8875. const int dr = (nr + nth - 1)/nth;
  8876. // row range for this thread
  8877. const int ir0 = dr*ith;
  8878. const int ir1 = MIN(ir0 + dr, nr);
  8879. float * wp = (float *) params->wdata + (nc + CACHE_LINE_SIZE_F32) * ith;
  8880. for (int i1 = ir0; i1 < ir1; i1++) {
  8881. float * sp = (float *)((char *) src0->data + i1*src0->nb[1]);
  8882. float * dp = (float *)((char *) dst->data + i1*dst->nb[1]);
  8883. // broadcast the mask across rows
  8884. float * mp = src1 ? (float *)((char *) src1->data + (i1%ne11)*src1->nb[1]) : NULL;
  8885. ggml_vec_cpy_f32 (nc, wp, sp);
  8886. ggml_vec_scale_f32(nc, wp, scale);
  8887. if (mp) {
  8888. ggml_vec_acc_f32(nc, wp, mp);
  8889. }
  8890. #ifndef NDEBUG
  8891. for (int i = 0; i < nc; ++i) {
  8892. //printf("p[%d] = %f\n", i, p[i]);
  8893. assert(!isnan(wp[i]));
  8894. }
  8895. #endif
  8896. float max = -INFINITY;
  8897. ggml_vec_max_f32(nc, &max, wp);
  8898. ggml_float sum = 0.0;
  8899. uint16_t scvt;
  8900. for (int i = 0; i < nc; i++) {
  8901. if (wp[i] == -INFINITY) {
  8902. dp[i] = 0.0f;
  8903. } else {
  8904. // const float val = (wp[i] == -INFINITY) ? 0.0 : exp(wp[i] - max);
  8905. ggml_fp16_t s = GGML_FP32_TO_FP16(wp[i] - max);
  8906. memcpy(&scvt, &s, sizeof(scvt));
  8907. const float val = GGML_FP16_TO_FP32(ggml_table_exp_f16[scvt]);
  8908. sum += (ggml_float)val;
  8909. dp[i] = val;
  8910. }
  8911. }
  8912. assert(sum > 0.0);
  8913. sum = 1.0/sum;
  8914. ggml_vec_scale_f32(nc, dp, sum);
  8915. #ifndef NDEBUG
  8916. for (int i = 0; i < nc; ++i) {
  8917. assert(!isnan(dp[i]));
  8918. assert(!isinf(dp[i]));
  8919. }
  8920. #endif
  8921. }
  8922. }
  8923. static void ggml_compute_forward_soft_max(
  8924. const struct ggml_compute_params * params,
  8925. const struct ggml_tensor * src0,
  8926. const struct ggml_tensor * src1,
  8927. struct ggml_tensor * dst) {
  8928. switch (src0->type) {
  8929. case GGML_TYPE_F32:
  8930. {
  8931. ggml_compute_forward_soft_max_f32(params, src0, src1, dst);
  8932. } break;
  8933. default:
  8934. {
  8935. GGML_ASSERT(false);
  8936. } break;
  8937. }
  8938. }
  8939. // ggml_compute_forward_soft_max_back
  8940. static void ggml_compute_forward_soft_max_back_f32(
  8941. const struct ggml_compute_params * params,
  8942. const struct ggml_tensor * src0,
  8943. const struct ggml_tensor * src1,
  8944. struct ggml_tensor * dst) {
  8945. GGML_ASSERT(ggml_is_contiguous(src0));
  8946. GGML_ASSERT(ggml_is_contiguous(src1));
  8947. GGML_ASSERT(ggml_is_contiguous(dst));
  8948. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  8949. GGML_ASSERT(ggml_are_same_shape(src1, dst));
  8950. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  8951. return;
  8952. }
  8953. // TODO: handle transposed/permuted matrices
  8954. const int ith = params->ith;
  8955. const int nth = params->nth;
  8956. const int nc = src0->ne[0];
  8957. const int nr = ggml_nrows(src0);
  8958. // rows per thread
  8959. const int dr = (nr + nth - 1)/nth;
  8960. // row range for this thread
  8961. const int ir0 = dr*ith;
  8962. const int ir1 = MIN(ir0 + dr, nr);
  8963. for (int i1 = ir0; i1 < ir1; i1++) {
  8964. float *dy = (float *)((char *) src0->data + i1*src0->nb[1]);
  8965. float *y = (float *)((char *) src1->data + i1*src1->nb[1]);
  8966. float *dx = (float *)((char *) dst->data + i1*dst->nb[1]);
  8967. #ifndef NDEBUG
  8968. for (int i = 0; i < nc; ++i) {
  8969. //printf("p[%d] = %f\n", i, p[i]);
  8970. assert(!isnan(dy[i]));
  8971. assert(!isnan(y[i]));
  8972. }
  8973. #endif
  8974. // Jii = yi - yi*yi
  8975. // Jij = -yi*yj
  8976. // J = diag(y)-y.T*y
  8977. // dx = J * dy
  8978. // dxk = sum_i(Jki * dyi)
  8979. // dxk = sum_i(-yk*yi * dyi) - (-yk*yk)*dyk + (yk - yk*yk)*dyk
  8980. // dxk = sum_i(-yk*yi * dyi) + yk*yk*dyk + yk*dyk - yk*yk*dyk
  8981. // dxk = sum_i(-yk*yi * dyi) + yk*dyk
  8982. // dxk = -yk * sum_i(yi * dyi) + yk*dyk
  8983. // dxk = -yk * dot(y, dy) + yk*dyk
  8984. // dxk = yk * (- dot(y, dy) + dyk)
  8985. // dxk = yk * (dyk - dot(y, dy))
  8986. //
  8987. // post-order:
  8988. // dot_y_dy := dot(y, dy)
  8989. // dx := dy
  8990. // dx := dx - dot_y_dy
  8991. // dx := dx * y
  8992. // linear runtime, no additional memory
  8993. float dot_y_dy = 0;
  8994. ggml_vec_dot_f32 (nc, &dot_y_dy, y, dy);
  8995. ggml_vec_cpy_f32 (nc, dx, dy);
  8996. ggml_vec_acc1_f32(nc, dx, -dot_y_dy);
  8997. ggml_vec_mul_f32 (nc, dx, dx, y);
  8998. #ifndef NDEBUG
  8999. for (int i = 0; i < nc; ++i) {
  9000. assert(!isnan(dx[i]));
  9001. assert(!isinf(dx[i]));
  9002. }
  9003. #endif
  9004. }
  9005. }
  9006. static void ggml_compute_forward_soft_max_back(
  9007. const struct ggml_compute_params * params,
  9008. const struct ggml_tensor * src0,
  9009. const struct ggml_tensor * src1,
  9010. struct ggml_tensor * dst) {
  9011. switch (src0->type) {
  9012. case GGML_TYPE_F32:
  9013. {
  9014. ggml_compute_forward_soft_max_back_f32(params, src0, src1, dst);
  9015. } break;
  9016. default:
  9017. {
  9018. GGML_ASSERT(false);
  9019. } break;
  9020. }
  9021. }
  9022. // ggml_compute_forward_alibi
  9023. static void ggml_compute_forward_alibi_f32(
  9024. const struct ggml_compute_params * params,
  9025. const struct ggml_tensor * src0,
  9026. struct ggml_tensor * dst) {
  9027. assert(params->ith == 0);
  9028. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  9029. return;
  9030. }
  9031. //const int n_past = ((int32_t *) dst->op_params)[0];
  9032. const int n_head = ((int32_t *) dst->op_params)[1];
  9033. float max_bias;
  9034. memcpy(&max_bias, (int32_t *) dst->op_params + 2, sizeof(float));
  9035. const int64_t ne0 = src0->ne[0]; // all_seq_len = n_past + ne1
  9036. const int64_t ne1 = src0->ne[1]; // seq_len_without_past
  9037. const int64_t ne2 = src0->ne[2]; // n_head -> this is k
  9038. //const int64_t ne3 = src0->ne[3]; // 1 -> bsz
  9039. const int64_t n = ggml_nrows(src0);
  9040. const int64_t ne2_ne3 = n/ne1; // ne2*ne3
  9041. const size_t nb0 = src0->nb[0];
  9042. const size_t nb1 = src0->nb[1];
  9043. const size_t nb2 = src0->nb[2];
  9044. //const int nb3 = src0->nb[3];
  9045. GGML_ASSERT(nb0 == sizeof(float));
  9046. GGML_ASSERT(n_head == ne2);
  9047. // add alibi to src0 (KQ_scaled)
  9048. const int n_heads_log2_floor = 1 << (int) floor(log2(n_head));
  9049. const float m0 = powf(2.0f, -(max_bias) / n_heads_log2_floor);
  9050. const float m1 = powf(2.0f, -(max_bias / 2.0f) / n_heads_log2_floor);
  9051. for (int64_t i = 0; i < ne0; i++) {
  9052. for (int64_t j = 0; j < ne1; j++) {
  9053. for (int64_t k = 0; k < ne2_ne3; k++) {
  9054. float * const src = (float *)((char *) src0->data + i*nb0 + j*nb1 + k*nb2);
  9055. float * pdst = (float *)((char *) dst->data + i*nb0 + j*nb1 + k*nb2);
  9056. // TODO: k*nb2 or k*nb3
  9057. float m_k;
  9058. if (k < n_heads_log2_floor) {
  9059. m_k = powf(m0, k + 1);
  9060. } else {
  9061. m_k = powf(m1, 2 * (k - n_heads_log2_floor) + 1);
  9062. }
  9063. pdst[0] = i * m_k + src[0];
  9064. }
  9065. }
  9066. }
  9067. }
  9068. static void ggml_compute_forward_alibi_f16(
  9069. const struct ggml_compute_params * params,
  9070. const struct ggml_tensor * src0,
  9071. struct ggml_tensor * dst) {
  9072. assert(params->ith == 0);
  9073. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  9074. return;
  9075. }
  9076. //const int n_past = ((int32_t *) dst->op_params)[0];
  9077. const int n_head = ((int32_t *) dst->op_params)[1];
  9078. float max_bias;
  9079. memcpy(&max_bias, (int32_t *) dst->op_params + 2, sizeof(float));
  9080. const int ne0 = src0->ne[0]; // all_seq_len = n_past + ne1
  9081. const int ne1 = src0->ne[1]; // seq_len_without_past
  9082. const int ne2 = src0->ne[2]; // n_head -> this is k
  9083. //const int ne3 = src0->ne[3]; // 1 -> bsz
  9084. const int n = ggml_nrows(src0);
  9085. const int ne2_ne3 = n/ne1; // ne2*ne3
  9086. const int nb0 = src0->nb[0];
  9087. const int nb1 = src0->nb[1];
  9088. const int nb2 = src0->nb[2];
  9089. //const int nb3 = src0->nb[3];
  9090. GGML_ASSERT(nb0 == sizeof(ggml_fp16_t));
  9091. //GGML_ASSERT(ne1 + n_past == ne0); (void) n_past;
  9092. GGML_ASSERT(n_head == ne2);
  9093. // add alibi to src0 (KQ_scaled)
  9094. const int n_heads_log2_floor = 1 << (int) floor(log2(n_head));
  9095. const float m0 = powf(2.0f, -(max_bias) / n_heads_log2_floor);
  9096. const float m1 = powf(2.0f, -(max_bias / 2.0f) / n_heads_log2_floor);
  9097. for (int i = 0; i < ne0; i++) {
  9098. for (int j = 0; j < ne1; j++) {
  9099. for (int k = 0; k < ne2_ne3; k++) {
  9100. ggml_fp16_t * const src = (ggml_fp16_t *)((char *) src0->data + i*nb0 + j*nb1 + k*nb2);
  9101. float * pdst = (float *)((char *) dst->data + i*nb0 + j*nb1 + k*nb2);
  9102. // TODO: k*nb2 or k*nb3
  9103. float m_k;
  9104. if (k < n_heads_log2_floor) {
  9105. m_k = powf(m0, k + 1);
  9106. } else {
  9107. m_k = powf(m1, 2 * (k - n_heads_log2_floor) + 1);
  9108. }
  9109. // we return F32
  9110. pdst[0] = i * m_k + GGML_FP16_TO_FP32(src[0]);
  9111. }
  9112. }
  9113. }
  9114. }
  9115. static void ggml_compute_forward_alibi(
  9116. const struct ggml_compute_params * params,
  9117. const struct ggml_tensor * src0,
  9118. struct ggml_tensor * dst) {
  9119. switch (src0->type) {
  9120. case GGML_TYPE_F16:
  9121. {
  9122. ggml_compute_forward_alibi_f16(params, src0, dst);
  9123. } break;
  9124. case GGML_TYPE_F32:
  9125. {
  9126. ggml_compute_forward_alibi_f32(params, src0, dst);
  9127. } break;
  9128. case GGML_TYPE_Q4_0:
  9129. case GGML_TYPE_Q4_1:
  9130. case GGML_TYPE_Q5_0:
  9131. case GGML_TYPE_Q5_1:
  9132. case GGML_TYPE_Q8_0:
  9133. case GGML_TYPE_Q8_1:
  9134. case GGML_TYPE_Q2_K:
  9135. case GGML_TYPE_Q3_K:
  9136. case GGML_TYPE_Q4_K:
  9137. case GGML_TYPE_Q5_K:
  9138. case GGML_TYPE_Q6_K:
  9139. case GGML_TYPE_Q8_K:
  9140. case GGML_TYPE_I8:
  9141. case GGML_TYPE_I16:
  9142. case GGML_TYPE_I32:
  9143. case GGML_TYPE_COUNT:
  9144. {
  9145. GGML_ASSERT(false);
  9146. } break;
  9147. }
  9148. }
  9149. // ggml_compute_forward_clamp
  9150. static void ggml_compute_forward_clamp_f32(
  9151. const struct ggml_compute_params * params,
  9152. const struct ggml_tensor * src0,
  9153. struct ggml_tensor * dst) {
  9154. assert(params->ith == 0);
  9155. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  9156. return;
  9157. }
  9158. float min;
  9159. float max;
  9160. memcpy(&min, (float *) dst->op_params + 0, sizeof(float));
  9161. memcpy(&max, (float *) dst->op_params + 1, sizeof(float));
  9162. const int ith = params->ith;
  9163. const int nth = params->nth;
  9164. const int n = ggml_nrows(src0);
  9165. const int nc = src0->ne[0];
  9166. const size_t nb00 = src0->nb[0];
  9167. const size_t nb01 = src0->nb[1];
  9168. const size_t nb0 = dst->nb[0];
  9169. const size_t nb1 = dst->nb[1];
  9170. GGML_ASSERT( nb0 == sizeof(float));
  9171. GGML_ASSERT(nb00 == sizeof(float));
  9172. for (int j = ith; j < n; j += nth) {
  9173. float * dst_ptr = (float *) ((char *) dst->data + j*nb1);
  9174. float * src0_ptr = (float *) ((char *) src0->data + j*nb01);
  9175. for (int i = 0; i < nc; i++) {
  9176. dst_ptr[i] = MAX(MIN(src0_ptr[i], max), min);
  9177. }
  9178. }
  9179. }
  9180. static void ggml_compute_forward_clamp(
  9181. const struct ggml_compute_params * params,
  9182. const struct ggml_tensor * src0,
  9183. struct ggml_tensor * dst) {
  9184. switch (src0->type) {
  9185. case GGML_TYPE_F32:
  9186. {
  9187. ggml_compute_forward_clamp_f32(params, src0, dst);
  9188. } break;
  9189. case GGML_TYPE_F16:
  9190. case GGML_TYPE_Q4_0:
  9191. case GGML_TYPE_Q4_1:
  9192. case GGML_TYPE_Q5_0:
  9193. case GGML_TYPE_Q5_1:
  9194. case GGML_TYPE_Q8_0:
  9195. case GGML_TYPE_Q8_1:
  9196. case GGML_TYPE_Q2_K:
  9197. case GGML_TYPE_Q3_K:
  9198. case GGML_TYPE_Q4_K:
  9199. case GGML_TYPE_Q5_K:
  9200. case GGML_TYPE_Q6_K:
  9201. case GGML_TYPE_Q8_K:
  9202. case GGML_TYPE_I8:
  9203. case GGML_TYPE_I16:
  9204. case GGML_TYPE_I32:
  9205. case GGML_TYPE_COUNT:
  9206. {
  9207. GGML_ASSERT(false);
  9208. } break;
  9209. }
  9210. }
  9211. // ggml_compute_forward_rope
  9212. static float rope_yarn_ramp(const float low, const float high, const int i0) {
  9213. const float y = (i0 / 2 - low) / MAX(0.001f, high - low);
  9214. return 1 - MIN(1, MAX(0, y));
  9215. }
  9216. // YaRN algorithm based on LlamaYaRNScaledRotaryEmbedding.py from https://github.com/jquesnelle/yarn
  9217. // MIT licensed. Copyright (c) 2023 Jeffrey Quesnelle and Bowen Peng.
  9218. static void rope_yarn(
  9219. float theta_extrap, float freq_scale, float corr_dims[2], int64_t i0, float ext_factor, float mscale,
  9220. float * cos_theta, float * sin_theta
  9221. ) {
  9222. // Get n-d rotational scaling corrected for extrapolation
  9223. float theta_interp = freq_scale * theta_extrap;
  9224. float theta = theta_interp;
  9225. if (ext_factor != 0.0f) {
  9226. float ramp_mix = rope_yarn_ramp(corr_dims[0], corr_dims[1], i0) * ext_factor;
  9227. theta = theta_interp * (1 - ramp_mix) + theta_extrap * ramp_mix;
  9228. // Get n-d magnitude scaling corrected for interpolation
  9229. mscale *= 1.0f + 0.1f * logf(1.0f / freq_scale);
  9230. }
  9231. *cos_theta = cosf(theta) * mscale;
  9232. *sin_theta = sinf(theta) * mscale;
  9233. }
  9234. // Apparently solving `n_rot = 2pi * x * base^((2 * max_pos_emb) / n_dims)` for x, we get
  9235. // `corr_dim(n_rot) = n_dims * log(max_pos_emb / (n_rot * 2pi)) / (2 * log(base))`
  9236. static float ggml_rope_yarn_corr_dim(int n_dims, int n_orig_ctx, float n_rot, float base) {
  9237. return n_dims * logf(n_orig_ctx / (n_rot * 2 * (float)M_PI)) / (2 * logf(base));
  9238. }
  9239. void ggml_rope_yarn_corr_dims(
  9240. int n_dims, int n_orig_ctx, float freq_base, float beta_fast, float beta_slow, float dims[2]
  9241. ) {
  9242. // start and end correction dims
  9243. dims[0] = MAX(0, floorf(ggml_rope_yarn_corr_dim(n_dims, n_orig_ctx, beta_fast, freq_base)));
  9244. dims[1] = MIN(n_dims - 1, ceilf(ggml_rope_yarn_corr_dim(n_dims, n_orig_ctx, beta_slow, freq_base)));
  9245. }
  9246. static void ggml_compute_forward_rope_f32(
  9247. const struct ggml_compute_params * params,
  9248. const struct ggml_tensor * src0,
  9249. const struct ggml_tensor * src1,
  9250. struct ggml_tensor * dst,
  9251. const bool forward) {
  9252. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  9253. return;
  9254. }
  9255. float freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow;
  9256. // these two only relevant for xPos RoPE:
  9257. float xpos_base;
  9258. bool xpos_down;
  9259. //const int n_past = ((int32_t *) dst->op_params)[0];
  9260. const int n_dims = ((int32_t *) dst->op_params)[1];
  9261. const int mode = ((int32_t *) dst->op_params)[2];
  9262. const int n_ctx = ((int32_t *) dst->op_params)[3];
  9263. const int n_orig_ctx = ((int32_t *) dst->op_params)[4];
  9264. memcpy(&freq_base, (int32_t *) dst->op_params + 5, sizeof(float));
  9265. memcpy(&freq_scale, (int32_t *) dst->op_params + 6, sizeof(float));
  9266. memcpy(&ext_factor, (int32_t *) dst->op_params + 7, sizeof(float));
  9267. memcpy(&attn_factor, (int32_t *) dst->op_params + 8, sizeof(float));
  9268. memcpy(&beta_fast, (int32_t *) dst->op_params + 9, sizeof(float));
  9269. memcpy(&beta_slow, (int32_t *) dst->op_params + 10, sizeof(float));
  9270. memcpy(&xpos_base, (int32_t *) dst->op_params + 11, sizeof(float));
  9271. memcpy(&xpos_down, (int32_t *) dst->op_params + 12, sizeof(bool));
  9272. GGML_TENSOR_UNARY_OP_LOCALS
  9273. //printf("ne0: %d, ne1: %d, ne2: %d, ne3: %d\n", ne0, ne1, ne2, ne3);
  9274. //printf("n_past = %d, ne2 = %d\n", n_past, ne2);
  9275. GGML_ASSERT(nb00 == sizeof(float));
  9276. const int ith = params->ith;
  9277. const int nth = params->nth;
  9278. const int nr = ggml_nrows(dst);
  9279. GGML_ASSERT(n_dims <= ne0);
  9280. GGML_ASSERT(n_dims % 2 == 0);
  9281. // rows per thread
  9282. const int dr = (nr + nth - 1)/nth;
  9283. // row range for this thread
  9284. const int ir0 = dr*ith;
  9285. const int ir1 = MIN(ir0 + dr, nr);
  9286. // row index used to determine which thread to use
  9287. int ir = 0;
  9288. const float theta_scale = powf(freq_base, -2.0f/n_dims);
  9289. const float inv_ndims = -1.f/n_dims;
  9290. float corr_dims[2];
  9291. ggml_rope_yarn_corr_dims(n_dims, n_orig_ctx, freq_base, beta_fast, beta_slow, corr_dims);
  9292. const bool is_neox = mode & 2;
  9293. const bool is_glm = mode & 4;
  9294. // backward process uses inverse rotation by cos and sin.
  9295. // cos and sin build a rotation matrix, where the inverse is the transpose.
  9296. // this essentially just switches the sign of sin.
  9297. const float sin_sign = forward ? 1.0f : -1.0f;
  9298. const int32_t * pos = (const int32_t *) src1->data;
  9299. for (int64_t i3 = 0; i3 < ne3; i3++) {
  9300. for (int64_t i2 = 0; i2 < ne2; i2++) {
  9301. const int64_t p = pos[i2];
  9302. for (int64_t i1 = 0; i1 < ne1; i1++) {
  9303. if (ir++ < ir0) continue;
  9304. if (ir > ir1) break;
  9305. float theta_base = (float)p;
  9306. if (is_glm) {
  9307. theta_base = MIN(p, n_ctx - 2);
  9308. float block_theta = MAX(p - (n_ctx - 2), 0);
  9309. for (int64_t i0 = 0; i0 < ne0 / 4; i0++) {
  9310. const float cos_theta = cosf(theta_base);
  9311. const float sin_theta = sinf(theta_base) * sin_sign;
  9312. const float cos_block_theta = cosf(block_theta);
  9313. const float sin_block_theta = sinf(block_theta) * sin_sign;
  9314. theta_base *= theta_scale;
  9315. block_theta *= theta_scale;
  9316. const float * const src = (float *)((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00);
  9317. float * dst_data = (float *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0);
  9318. const float x0 = src[0];
  9319. const float x1 = src[n_dims/2];
  9320. const float x2 = src[n_dims];
  9321. const float x3 = src[n_dims/2*3];
  9322. dst_data[0] = x0*cos_theta - x1*sin_theta;
  9323. dst_data[n_dims/2] = x0*sin_theta + x1*cos_theta;
  9324. dst_data[n_dims] = x2*cos_block_theta - x3*sin_block_theta;
  9325. dst_data[n_dims/2*3] = x2*sin_block_theta + x3*cos_block_theta;
  9326. }
  9327. } else if (!is_neox) {
  9328. for (int64_t i0 = 0; i0 < ne0; i0 += 2) {
  9329. float cos_theta, sin_theta;
  9330. rope_yarn(
  9331. theta_base, freq_scale, corr_dims, i0, ext_factor, attn_factor, &cos_theta, &sin_theta
  9332. );
  9333. sin_theta *= sin_sign;
  9334. // zeta scaling for xPos only:
  9335. float zeta = xpos_base != 0.0f ? powf((i0 + 0.4f * ne0) / (1.4f * ne0), p / xpos_base) : 1.0f;
  9336. if (xpos_down) zeta = 1.0f / zeta;
  9337. theta_base *= theta_scale;
  9338. const float * const src = (float *)((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00);
  9339. float * dst_data = (float *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0);
  9340. const float x0 = src[0];
  9341. const float x1 = src[1];
  9342. dst_data[0] = x0*cos_theta*zeta - x1*sin_theta*zeta;
  9343. dst_data[1] = x0*sin_theta*zeta + x1*cos_theta*zeta;
  9344. }
  9345. } else {
  9346. // TODO: this might be wrong for ne0 != n_dims - need double check
  9347. // ref: https://github.com/huggingface/transformers/blob/main/src/transformers/models/gpt_neox/modeling_gpt_neox.py#LL251C1-L294C28
  9348. theta_base *= freq_scale;
  9349. for (int64_t ib = 0; ib < ne0/n_dims; ++ib) {
  9350. for (int64_t ic = 0; ic < n_dims; ic += 2) {
  9351. // simplified from `(ib * n_dims + ic) * inv_ndims`
  9352. float cur_rot = inv_ndims * ic - ib;
  9353. float cos_theta, sin_theta;
  9354. rope_yarn(
  9355. theta_base, freq_scale, corr_dims, cur_rot, ext_factor, attn_factor,
  9356. &cos_theta, &sin_theta
  9357. );
  9358. sin_theta *= sin_sign;
  9359. theta_base *= theta_scale;
  9360. const int64_t i0 = ib*n_dims + ic/2;
  9361. const float * const src = (float *)((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00);
  9362. float * dst_data = (float *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0);
  9363. const float x0 = src[0];
  9364. const float x1 = src[n_dims/2];
  9365. dst_data[0] = x0*cos_theta - x1*sin_theta;
  9366. dst_data[n_dims/2] = x0*sin_theta + x1*cos_theta;
  9367. }
  9368. }
  9369. }
  9370. }
  9371. }
  9372. }
  9373. }
  9374. static void ggml_compute_forward_rope_f16(
  9375. const struct ggml_compute_params * params,
  9376. const struct ggml_tensor * src0,
  9377. const struct ggml_tensor * src1,
  9378. struct ggml_tensor * dst,
  9379. const bool forward) {
  9380. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  9381. return;
  9382. }
  9383. float freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow;
  9384. //const int n_past = ((int32_t *) dst->op_params)[0];
  9385. const int n_dims = ((int32_t *) dst->op_params)[1];
  9386. const int mode = ((int32_t *) dst->op_params)[2];
  9387. const int n_ctx = ((int32_t *) dst->op_params)[3];
  9388. const int n_orig_ctx = ((int32_t *) dst->op_params)[4];
  9389. memcpy(&freq_base, (int32_t *) dst->op_params + 5, sizeof(float));
  9390. memcpy(&freq_scale, (int32_t *) dst->op_params + 6, sizeof(float));
  9391. memcpy(&ext_factor, (int32_t *) dst->op_params + 7, sizeof(float));
  9392. memcpy(&attn_factor, (int32_t *) dst->op_params + 8, sizeof(float));
  9393. memcpy(&beta_fast, (int32_t *) dst->op_params + 9, sizeof(float));
  9394. memcpy(&beta_slow, (int32_t *) dst->op_params + 10, sizeof(float));
  9395. GGML_TENSOR_UNARY_OP_LOCALS
  9396. //printf("ne0: %d, ne1: %d, ne2: %d, ne3: %d\n", ne0, ne1, ne2, ne3);
  9397. //printf("n_past = %d, ne2 = %d\n", n_past, ne2);
  9398. GGML_ASSERT(nb0 == sizeof(ggml_fp16_t));
  9399. const int ith = params->ith;
  9400. const int nth = params->nth;
  9401. const int nr = ggml_nrows(dst);
  9402. GGML_ASSERT(n_dims <= ne0);
  9403. GGML_ASSERT(n_dims % 2 == 0);
  9404. // rows per thread
  9405. const int dr = (nr + nth - 1)/nth;
  9406. // row range for this thread
  9407. const int ir0 = dr*ith;
  9408. const int ir1 = MIN(ir0 + dr, nr);
  9409. // row index used to determine which thread to use
  9410. int ir = 0;
  9411. const float theta_scale = powf(freq_base, -2.0f/n_dims);
  9412. const float inv_ndims = -1.f/n_dims;
  9413. float corr_dims[2];
  9414. ggml_rope_yarn_corr_dims(n_dims, n_orig_ctx, freq_base, beta_fast, beta_slow, corr_dims);
  9415. const bool is_neox = mode & 2;
  9416. const bool is_glm = mode & 4;
  9417. // backward process uses inverse rotation by cos and sin.
  9418. // cos and sin build a rotation matrix, where the inverse is the transpose.
  9419. // this essentially just switches the sign of sin.
  9420. const float sin_sign = forward ? 1.0f : -1.0f;
  9421. const int32_t * pos = (const int32_t *) src1->data;
  9422. for (int64_t i3 = 0; i3 < ne3; i3++) {
  9423. for (int64_t i2 = 0; i2 < ne2; i2++) {
  9424. const int64_t p = pos[i2];
  9425. for (int64_t i1 = 0; i1 < ne1; i1++) {
  9426. if (ir++ < ir0) continue;
  9427. if (ir > ir1) break;
  9428. float theta_base = (float)p;
  9429. if (is_glm) {
  9430. theta_base = MIN(p, n_ctx - 2);
  9431. float block_theta = MAX(p - (n_ctx - 2), 0);
  9432. for (int64_t i0 = 0; i0 < ne0 / 4; i0++) {
  9433. const float cos_theta = cosf(theta_base);
  9434. const float sin_theta = sinf(theta_base) * sin_sign;
  9435. const float cos_block_theta = cosf(block_theta);
  9436. const float sin_block_theta = sinf(block_theta) * sin_sign;
  9437. theta_base *= theta_scale;
  9438. block_theta *= theta_scale;
  9439. const ggml_fp16_t * const src = (ggml_fp16_t *)((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00);
  9440. ggml_fp16_t * dst_data = (ggml_fp16_t *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0);
  9441. const float x0 = GGML_FP16_TO_FP32(src[0]);
  9442. const float x1 = GGML_FP16_TO_FP32(src[n_dims/2]);
  9443. const float x2 = GGML_FP16_TO_FP32(src[n_dims]);
  9444. const float x3 = GGML_FP16_TO_FP32(src[n_dims/2*3]);
  9445. dst_data[0] = GGML_FP32_TO_FP16(x0*cos_theta - x1*sin_theta);
  9446. dst_data[n_dims/2] = GGML_FP32_TO_FP16(x0*sin_theta + x1*cos_theta);
  9447. dst_data[n_dims] = GGML_FP32_TO_FP16(x2*cos_block_theta - x3*sin_block_theta);
  9448. dst_data[n_dims/2*3] = GGML_FP32_TO_FP16(x2*sin_block_theta + x3*cos_block_theta);
  9449. }
  9450. } else if (!is_neox) {
  9451. for (int64_t i0 = 0; i0 < ne0; i0 += 2) {
  9452. float cos_theta, sin_theta;
  9453. rope_yarn(
  9454. theta_base, freq_scale, corr_dims, i0, ext_factor, attn_factor, &cos_theta, &sin_theta
  9455. );
  9456. sin_theta *= sin_sign;
  9457. theta_base *= theta_scale;
  9458. const ggml_fp16_t * const src = (ggml_fp16_t *)((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00);
  9459. ggml_fp16_t * dst_data = (ggml_fp16_t *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0);
  9460. const float x0 = GGML_FP16_TO_FP32(src[0]);
  9461. const float x1 = GGML_FP16_TO_FP32(src[1]);
  9462. dst_data[0] = GGML_FP32_TO_FP16(x0*cos_theta - x1*sin_theta);
  9463. dst_data[1] = GGML_FP32_TO_FP16(x0*sin_theta + x1*cos_theta);
  9464. }
  9465. } else {
  9466. // TODO: this might be wrong for ne0 != n_dims - need double check
  9467. // ref: https://github.com/huggingface/transformers/blob/main/src/transformers/models/gpt_neox/modeling_gpt_neox.py#LL251C1-L294C28
  9468. theta_base *= freq_scale;
  9469. for (int64_t ib = 0; ib < ne0/n_dims; ++ib) {
  9470. for (int64_t ic = 0; ic < n_dims; ic += 2) {
  9471. // simplified from `(ib * n_dims + ic) * inv_ndims`
  9472. float cur_rot = inv_ndims * ic - ib;
  9473. float cos_theta, sin_theta;
  9474. rope_yarn(
  9475. theta_base, freq_scale, corr_dims, cur_rot, ext_factor, attn_factor,
  9476. &cos_theta, &sin_theta
  9477. );
  9478. sin_theta *= sin_sign;
  9479. theta_base *= theta_scale;
  9480. const int64_t i0 = ib*n_dims + ic/2;
  9481. const ggml_fp16_t * const src = (ggml_fp16_t *)((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00);
  9482. ggml_fp16_t * dst_data = (ggml_fp16_t *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0);
  9483. const float x0 = GGML_FP16_TO_FP32(src[0]);
  9484. const float x1 = GGML_FP16_TO_FP32(src[n_dims/2]);
  9485. dst_data[0] = GGML_FP32_TO_FP16(x0*cos_theta - x1*sin_theta);
  9486. dst_data[n_dims/2] = GGML_FP32_TO_FP16(x0*sin_theta + x1*cos_theta);
  9487. }
  9488. }
  9489. }
  9490. }
  9491. }
  9492. }
  9493. }
  9494. static void ggml_compute_forward_rope(
  9495. const struct ggml_compute_params * params,
  9496. const struct ggml_tensor * src0,
  9497. const struct ggml_tensor * src1,
  9498. struct ggml_tensor * dst) {
  9499. switch (src0->type) {
  9500. case GGML_TYPE_F16:
  9501. {
  9502. ggml_compute_forward_rope_f16(params, src0, src1, dst, true);
  9503. } break;
  9504. case GGML_TYPE_F32:
  9505. {
  9506. ggml_compute_forward_rope_f32(params, src0, src1, dst, true);
  9507. } break;
  9508. default:
  9509. {
  9510. GGML_ASSERT(false);
  9511. } break;
  9512. }
  9513. }
  9514. // ggml_compute_forward_rope_back
  9515. static void ggml_compute_forward_rope_back(
  9516. const struct ggml_compute_params * params,
  9517. const struct ggml_tensor * src0,
  9518. const struct ggml_tensor * src1,
  9519. struct ggml_tensor * dst) {
  9520. switch (src0->type) {
  9521. case GGML_TYPE_F16:
  9522. {
  9523. ggml_compute_forward_rope_f16(params, src0, src1, dst, false);
  9524. } break;
  9525. case GGML_TYPE_F32:
  9526. {
  9527. ggml_compute_forward_rope_f32(params, src0, src1, dst, false);
  9528. } break;
  9529. default:
  9530. {
  9531. GGML_ASSERT(false);
  9532. } break;
  9533. }
  9534. }
  9535. // ggml_compute_forward_conv_transpose_1d
  9536. static void ggml_compute_forward_conv_transpose_1d_f16_f32(
  9537. const struct ggml_compute_params * params,
  9538. const struct ggml_tensor * src0,
  9539. const struct ggml_tensor * src1,
  9540. struct ggml_tensor * dst) {
  9541. GGML_ASSERT(src0->type == GGML_TYPE_F16);
  9542. GGML_ASSERT(src1->type == GGML_TYPE_F32);
  9543. GGML_ASSERT( dst->type == GGML_TYPE_F32);
  9544. int64_t t0 = ggml_perf_time_us();
  9545. UNUSED(t0);
  9546. GGML_TENSOR_BINARY_OP_LOCALS
  9547. const int ith = params->ith;
  9548. const int nth = params->nth;
  9549. const int nk = ne00*ne01*ne02;
  9550. GGML_ASSERT(nb00 == sizeof(ggml_fp16_t));
  9551. GGML_ASSERT(nb10 == sizeof(float));
  9552. if (params->type == GGML_TASK_INIT) {
  9553. memset(params->wdata, 0, params->wsize);
  9554. // permute kernel data (src0) from (K x Cout x Cin) to (Cin x K x Cout)
  9555. {
  9556. ggml_fp16_t * const wdata = (ggml_fp16_t *) params->wdata + 0;
  9557. for (int64_t i02 = 0; i02 < ne02; i02++) {
  9558. for (int64_t i01 = 0; i01 < ne01; i01++) {
  9559. const ggml_fp16_t * const src = (ggml_fp16_t *)((char *) src0->data + i02*nb02 + i01*nb01);
  9560. ggml_fp16_t * dst_data = wdata + i01*ne00*ne02;
  9561. for (int64_t i00 = 0; i00 < ne00; i00++) {
  9562. dst_data[i00*ne02 + i02] = src[i00];
  9563. }
  9564. }
  9565. }
  9566. }
  9567. // permute source data (src1) from (L x Cin) to (Cin x L)
  9568. {
  9569. ggml_fp16_t * const wdata = (ggml_fp16_t *) params->wdata + nk;
  9570. ggml_fp16_t * dst_data = wdata;
  9571. for (int64_t i11 = 0; i11 < ne11; i11++) {
  9572. const float * const src = (float *)((char *) src1->data + i11*nb11);
  9573. for (int64_t i10 = 0; i10 < ne10; i10++) {
  9574. dst_data[i10*ne11 + i11] = GGML_FP32_TO_FP16(src[i10]);
  9575. }
  9576. }
  9577. }
  9578. // need to zero dst since we are accumulating into it
  9579. memset(dst->data, 0, ggml_nbytes(dst));
  9580. return;
  9581. }
  9582. if (params->type == GGML_TASK_FINALIZE) {
  9583. return;
  9584. }
  9585. const int32_t s0 = ((const int32_t*)(dst->op_params))[0];
  9586. // total rows in dst
  9587. const int nr = ne1;
  9588. // rows per thread
  9589. const int dr = (nr + nth - 1)/nth;
  9590. // row range for this thread
  9591. const int ir0 = dr*ith;
  9592. const int ir1 = MIN(ir0 + dr, nr);
  9593. ggml_fp16_t * const wdata = (ggml_fp16_t *) params->wdata + 0;
  9594. ggml_fp16_t * const wdata_src = wdata + nk;
  9595. for (int i1 = ir0; i1 < ir1; i1++) {
  9596. float * dst_data = (float *)((char *) dst->data + i1*nb1);
  9597. ggml_fp16_t * wdata_kernel = wdata + i1*ne02*ne00;
  9598. for (int i10 = 0; i10 < ne10; i10++) {
  9599. const int i1n = i10*ne11;
  9600. for (int i00 = 0; i00 < ne00; i00++) {
  9601. float v = 0;
  9602. ggml_vec_dot_f16(ne02, &v,
  9603. (ggml_fp16_t *) wdata_src + i1n,
  9604. (ggml_fp16_t *) wdata_kernel + i00*ne02);
  9605. dst_data[i10*s0 + i00] += v;
  9606. }
  9607. }
  9608. }
  9609. }
  9610. static void ggml_compute_forward_conv_transpose_1d_f32(
  9611. const struct ggml_compute_params * params,
  9612. const struct ggml_tensor * src0,
  9613. const struct ggml_tensor * src1,
  9614. struct ggml_tensor * dst) {
  9615. GGML_ASSERT(src0->type == GGML_TYPE_F32);
  9616. GGML_ASSERT(src1->type == GGML_TYPE_F32);
  9617. GGML_ASSERT( dst->type == GGML_TYPE_F32);
  9618. int64_t t0 = ggml_perf_time_us();
  9619. UNUSED(t0);
  9620. GGML_TENSOR_BINARY_OP_LOCALS
  9621. const int ith = params->ith;
  9622. const int nth = params->nth;
  9623. const int nk = ne00*ne01*ne02;
  9624. GGML_ASSERT(nb00 == sizeof(float));
  9625. GGML_ASSERT(nb10 == sizeof(float));
  9626. if (params->type == GGML_TASK_INIT) {
  9627. memset(params->wdata, 0, params->wsize);
  9628. // prepare kernel data (src0) from (K x Cout x Cin) to (Cin x K x Cout)
  9629. {
  9630. float * const wdata = (float *) params->wdata + 0;
  9631. for (int64_t i02 = 0; i02 < ne02; i02++) {
  9632. for (int64_t i01 = 0; i01 < ne01; i01++) {
  9633. const float * const src = (float *)((char *) src0->data + i02*nb02 + i01*nb01);
  9634. float * dst_data = wdata + i01*ne00*ne02;
  9635. for (int64_t i00 = 0; i00 < ne00; i00++) {
  9636. dst_data[i00*ne02 + i02] = src[i00];
  9637. }
  9638. }
  9639. }
  9640. }
  9641. // prepare source data (src1)
  9642. {
  9643. float * const wdata = (float *) params->wdata + nk;
  9644. float * dst_data = wdata;
  9645. for (int64_t i11 = 0; i11 < ne11; i11++) {
  9646. const float * const src = (float *)((char *) src1->data + i11*nb11);
  9647. for (int64_t i10 = 0; i10 < ne10; i10++) {
  9648. dst_data[i10*ne11 + i11] = src[i10];
  9649. }
  9650. }
  9651. }
  9652. // need to zero dst since we are accumulating into it
  9653. memset(dst->data, 0, ggml_nbytes(dst));
  9654. return;
  9655. }
  9656. if (params->type == GGML_TASK_FINALIZE) {
  9657. return;
  9658. }
  9659. const int32_t s0 = ((const int32_t*)(dst->op_params))[0];
  9660. // total rows in dst
  9661. const int nr = ne1;
  9662. // rows per thread
  9663. const int dr = (nr + nth - 1)/nth;
  9664. // row range for this thread
  9665. const int ir0 = dr*ith;
  9666. const int ir1 = MIN(ir0 + dr, nr);
  9667. float * const wdata = (float *) params->wdata + 0;
  9668. float * const wdata_src = wdata + nk;
  9669. for (int i1 = ir0; i1 < ir1; i1++) {
  9670. float * dst_data = (float *)((char *) dst->data + i1*nb1);
  9671. float * wdata_kernel = wdata + i1*ne02*ne00;
  9672. for (int i10 = 0; i10 < ne10; i10++) {
  9673. const int i1n = i10*ne11;
  9674. for (int i00 = 0; i00 < ne00; i00++) {
  9675. float v = 0;
  9676. ggml_vec_dot_f32(ne02, &v,
  9677. wdata_src + i1n,
  9678. wdata_kernel + i00*ne02);
  9679. dst_data[i10*s0 + i00] += v;
  9680. }
  9681. }
  9682. }
  9683. }
  9684. static void ggml_compute_forward_conv_transpose_1d(
  9685. const struct ggml_compute_params * params,
  9686. const struct ggml_tensor * src0,
  9687. const struct ggml_tensor * src1,
  9688. struct ggml_tensor * dst) {
  9689. switch (src0->type) {
  9690. case GGML_TYPE_F16:
  9691. {
  9692. ggml_compute_forward_conv_transpose_1d_f16_f32(params, src0, src1, dst);
  9693. } break;
  9694. case GGML_TYPE_F32:
  9695. {
  9696. ggml_compute_forward_conv_transpose_1d_f32(params, src0, src1, dst);
  9697. } break;
  9698. default:
  9699. {
  9700. GGML_ASSERT(false);
  9701. } break;
  9702. }
  9703. }
  9704. // src0: kernel [OC, IC, KH, KW]
  9705. // src1: image [N, IC, IH, IW]
  9706. // dst: result [N, OH, OW, IC*KH*KW]
  9707. static void ggml_compute_forward_im2col_f16(
  9708. const struct ggml_compute_params * params,
  9709. const struct ggml_tensor * src0,
  9710. const struct ggml_tensor * src1,
  9711. struct ggml_tensor * dst) {
  9712. GGML_ASSERT(src0->type == GGML_TYPE_F16);
  9713. GGML_ASSERT(src1->type == GGML_TYPE_F32);
  9714. GGML_ASSERT( dst->type == GGML_TYPE_F16);
  9715. int64_t t0 = ggml_perf_time_us();
  9716. UNUSED(t0);
  9717. GGML_TENSOR_BINARY_OP_LOCALS;
  9718. const int32_t s0 = ((const int32_t *)(dst->op_params))[0];
  9719. const int32_t s1 = ((const int32_t *)(dst->op_params))[1];
  9720. const int32_t p0 = ((const int32_t *)(dst->op_params))[2];
  9721. const int32_t p1 = ((const int32_t *)(dst->op_params))[3];
  9722. const int32_t d0 = ((const int32_t *)(dst->op_params))[4];
  9723. const int32_t d1 = ((const int32_t *)(dst->op_params))[5];
  9724. const bool is_2D = ((const int32_t *)(dst->op_params))[6] == 1;
  9725. const int ith = params->ith;
  9726. const int nth = params->nth;
  9727. const int64_t N = is_2D ? ne13 : ne12;
  9728. const int64_t IC = is_2D ? ne12 : ne11;
  9729. const int64_t IH = is_2D ? ne11 : 1;
  9730. const int64_t IW = ne10;
  9731. const int64_t KH = is_2D ? ne01 : 1;
  9732. const int64_t KW = ne00;
  9733. const int64_t OH = is_2D ? ne2 : 1;
  9734. const int64_t OW = ne1;
  9735. int ofs0 = is_2D ? nb13 : nb12;
  9736. int ofs1 = is_2D ? nb12 : nb11;
  9737. GGML_ASSERT(nb00 == sizeof(ggml_fp16_t));
  9738. GGML_ASSERT(nb10 == sizeof(float));
  9739. if (params->type == GGML_TASK_INIT) {
  9740. return;
  9741. }
  9742. if (params->type == GGML_TASK_FINALIZE) {
  9743. return;
  9744. }
  9745. // im2col: [N, IC, IH, IW] => [N, OH, OW, IC*KH*KW]
  9746. {
  9747. ggml_fp16_t * const wdata = (ggml_fp16_t *) dst->data;
  9748. for (int64_t in = 0; in < N; in++) {
  9749. for (int64_t ioh = 0; ioh < OH; ioh++) { // 1
  9750. for (int64_t iow = 0; iow < OW; iow++) {
  9751. for (int64_t iic = ith; iic < IC; iic += nth) {
  9752. // micro kernel
  9753. ggml_fp16_t * dst_data = wdata + (in*OH*OW + ioh*OW + iow)*(IC*KH*KW); // [IC, KH, KW]
  9754. const float * const src_data = (float *)((char *) src1->data + in*ofs0 + iic*ofs1); // [IH, IW]
  9755. for (int64_t ikh = 0; ikh < KH; ikh++) { // 1
  9756. for (int64_t ikw = 0; ikw < KW; ikw++) {
  9757. const int64_t iiw = iow*s0 + ikw*d0 - p0;
  9758. const int64_t iih = ioh*s1 + ikh*d1 - p1;
  9759. if (iih < 0 || iih >= IH || iiw < 0 || iiw >= IW) {
  9760. dst_data[iic*(KH*KW) + ikh*KW + ikw] = 0;
  9761. } else {
  9762. dst_data[iic*(KH*KW) + ikh*KW + ikw] = GGML_FP32_TO_FP16(src_data[iih*IW + iiw]);
  9763. }
  9764. }
  9765. }
  9766. }
  9767. }
  9768. }
  9769. }
  9770. }
  9771. }
  9772. static void ggml_compute_forward_im2col(
  9773. const struct ggml_compute_params * params,
  9774. const struct ggml_tensor * src0,
  9775. const struct ggml_tensor * src1,
  9776. struct ggml_tensor * dst) {
  9777. switch (src0->type) {
  9778. case GGML_TYPE_F16:
  9779. {
  9780. ggml_compute_forward_im2col_f16(params, src0, src1, dst);
  9781. } break;
  9782. case GGML_TYPE_F32:
  9783. {
  9784. GGML_ASSERT(false);
  9785. } break;
  9786. default:
  9787. {
  9788. GGML_ASSERT(false);
  9789. } break;
  9790. }
  9791. }
  9792. // ggml_compute_forward_conv_transpose_2d
  9793. static void ggml_compute_forward_conv_transpose_2d(
  9794. const struct ggml_compute_params * params,
  9795. const struct ggml_tensor * src0,
  9796. const struct ggml_tensor * src1,
  9797. struct ggml_tensor * dst) {
  9798. GGML_ASSERT(src0->type == GGML_TYPE_F16);
  9799. GGML_ASSERT(src1->type == GGML_TYPE_F32);
  9800. GGML_ASSERT( dst->type == GGML_TYPE_F32);
  9801. int64_t t0 = ggml_perf_time_us();
  9802. UNUSED(t0);
  9803. GGML_TENSOR_BINARY_OP_LOCALS
  9804. const int ith = params->ith;
  9805. const int nth = params->nth;
  9806. const int nk = ne00*ne01*ne02*ne03;
  9807. GGML_ASSERT(nb00 == sizeof(ggml_fp16_t));
  9808. GGML_ASSERT(nb10 == sizeof(float));
  9809. if (params->type == GGML_TASK_INIT) {
  9810. memset(params->wdata, 0, params->wsize);
  9811. // permute kernel data (src0) from (Kw x Kh x Cout x Cin) to (Cin x Kw x Kh x Cout)
  9812. {
  9813. ggml_fp16_t * const wdata = (ggml_fp16_t *) params->wdata + 0;
  9814. for (int64_t i03 = 0; i03 < ne03; i03++) {
  9815. for (int64_t i02 = 0; i02 < ne02; i02++) {
  9816. const ggml_fp16_t * const src = (ggml_fp16_t *)((char *) src0->data + i03*nb03 + i02*nb02);
  9817. ggml_fp16_t * dst_data = wdata + i02*ne01*ne00*ne03;
  9818. for (int64_t i01 = 0; i01 < ne01; i01++) {
  9819. for (int64_t i00 = 0; i00 < ne00; i00++) {
  9820. dst_data[i01*ne00*ne03 + i00*ne03 + i03] = src[i01 * ne00 + i00];
  9821. }
  9822. }
  9823. }
  9824. }
  9825. }
  9826. // permute source data (src1) from (Sw x Sh x Cin) to (Cin x Sw x Sh)
  9827. {
  9828. ggml_fp16_t * const wdata = (ggml_fp16_t *) params->wdata + nk;
  9829. for (int i12 = 0; i12 < ne12; i12++) {
  9830. for (int i11 = 0; i11 < ne11; i11++) {
  9831. const float * const src = (float *)((char *) src1->data + i12*nb12 + i11*nb11);
  9832. ggml_fp16_t * dst_data = wdata + i11*ne10*ne12;
  9833. for (int i10 = 0; i10 < ne10; i10++) {
  9834. dst_data[i10*ne12 + i12] = GGML_FP32_TO_FP16(src[i10]);
  9835. }
  9836. }
  9837. }
  9838. }
  9839. memset(dst->data, 0, ggml_nbytes(dst));
  9840. return;
  9841. }
  9842. if (params->type == GGML_TASK_FINALIZE) {
  9843. return;
  9844. }
  9845. const int32_t stride = ggml_get_op_params_i32(dst, 0);
  9846. // total patches in dst
  9847. const int np = ne2;
  9848. // patches per thread
  9849. const int dp = (np + nth - 1)/nth;
  9850. // patch range for this thread
  9851. const int ip0 = dp*ith;
  9852. const int ip1 = MIN(ip0 + dp, np);
  9853. ggml_fp16_t * const wdata = (ggml_fp16_t *) params->wdata + 0;
  9854. ggml_fp16_t * const wdata_src = wdata + nk;
  9855. for (int i2 = ip0; i2 < ip1; i2++) { // Cout
  9856. float * dst_data = (float *)((char *) dst->data + i2*nb2);
  9857. ggml_fp16_t * wdata_kernel = wdata + i2*ne01*ne00*ne03;
  9858. for (int i11 = 0; i11 < ne11; i11++) {
  9859. for (int i10 = 0; i10 < ne10; i10++) {
  9860. const int i1n = i11*ne10*ne12 + i10*ne12;
  9861. for (int i01 = 0; i01 < ne01; i01++) {
  9862. for (int i00 = 0; i00 < ne00; i00++) {
  9863. float v = 0;
  9864. ggml_vec_dot_f16(ne03, &v,
  9865. wdata_src + i1n,
  9866. wdata_kernel + i01*ne00*ne03 + i00*ne03);
  9867. dst_data[(i11*stride + i01)*ne0 + i10*stride + i00] += v;
  9868. }
  9869. }
  9870. }
  9871. }
  9872. }
  9873. }
  9874. // ggml_compute_forward_pool_1d_sk_p0
  9875. static void ggml_compute_forward_pool_1d_sk_p0(
  9876. const struct ggml_compute_params * params,
  9877. const enum ggml_op_pool op,
  9878. const struct ggml_tensor * src,
  9879. const int k,
  9880. struct ggml_tensor * dst) {
  9881. assert(src->type == GGML_TYPE_F32);
  9882. assert(params->ith == 0);
  9883. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  9884. return;
  9885. }
  9886. const char * cdata = (const char *)src->data;
  9887. const char * const data_end = cdata + ggml_nbytes(src);
  9888. float * drow = (float *)dst->data;
  9889. const int64_t rs = dst->ne[0];
  9890. while (cdata < data_end) {
  9891. const float * const srow = (const float *)cdata;
  9892. int j = 0;
  9893. for (int64_t i = 0; i < rs; ++i) {
  9894. switch (op) {
  9895. case GGML_OP_POOL_AVG: drow[i] = 0; break;
  9896. case GGML_OP_POOL_MAX: drow[i] = -FLT_MAX; break;
  9897. case GGML_OP_POOL_COUNT: GGML_ASSERT(false); break;
  9898. }
  9899. for (int ki = 0; ki < k; ++ki) {
  9900. switch (op) {
  9901. case GGML_OP_POOL_AVG: drow[i] += srow[j]; break;
  9902. case GGML_OP_POOL_MAX: if (srow[j] > drow[i]) drow[i] = srow[j]; break;
  9903. case GGML_OP_POOL_COUNT: GGML_ASSERT(false); break;
  9904. }
  9905. ++j;
  9906. }
  9907. switch (op) {
  9908. case GGML_OP_POOL_AVG: drow[i] /= k; break;
  9909. case GGML_OP_POOL_MAX: break;
  9910. case GGML_OP_POOL_COUNT: GGML_ASSERT(false); break;
  9911. }
  9912. }
  9913. cdata += src->nb[1];
  9914. drow += rs;
  9915. }
  9916. }
  9917. // ggml_compute_forward_pool_1d
  9918. static void ggml_compute_forward_pool_1d(
  9919. const struct ggml_compute_params * params,
  9920. const struct ggml_tensor * src0,
  9921. struct ggml_tensor * dst) {
  9922. const int32_t * opts = (const int32_t *)dst->op_params;
  9923. enum ggml_op_pool op = opts[0];
  9924. const int k0 = opts[1];
  9925. const int s0 = opts[2];
  9926. const int p0 = opts[3];
  9927. GGML_ASSERT(p0 == 0); // padding not supported
  9928. GGML_ASSERT(k0 == s0); // only s = k supported
  9929. ggml_compute_forward_pool_1d_sk_p0(params, op, src0, k0, dst);
  9930. }
  9931. // ggml_compute_forward_pool_2d
  9932. static void ggml_compute_forward_pool_2d(
  9933. const struct ggml_compute_params * params,
  9934. const struct ggml_tensor * src,
  9935. struct ggml_tensor * dst) {
  9936. assert(src->type == GGML_TYPE_F32);
  9937. assert(params->ith == 0);
  9938. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  9939. return;
  9940. }
  9941. const int32_t * opts = (const int32_t *)dst->op_params;
  9942. enum ggml_op_pool op = opts[0];
  9943. const int k0 = opts[1];
  9944. const int k1 = opts[2];
  9945. const int s0 = opts[3];
  9946. const int s1 = opts[4];
  9947. const int p0 = opts[5];
  9948. const int p1 = opts[6];
  9949. const char * cdata = (const char*)src->data;
  9950. const char * const data_end = cdata + ggml_nbytes(src);
  9951. const int64_t px = dst->ne[0];
  9952. const int64_t py = dst->ne[1];
  9953. const int64_t pa = px * py;
  9954. float * dplane = (float *)dst->data;
  9955. const int ka = k0 * k1;
  9956. const int offset0 = -p0;
  9957. const int offset1 = -p1;
  9958. while (cdata < data_end) {
  9959. for (int oy = 0; oy < py; ++oy) {
  9960. float * const drow = dplane + oy * px;
  9961. for (int ox = 0; ox < px; ++ox) {
  9962. float * const out = drow + ox;
  9963. switch (op) {
  9964. case GGML_OP_POOL_AVG: *out = 0; break;
  9965. case GGML_OP_POOL_MAX: *out = -FLT_MAX; break;
  9966. case GGML_OP_POOL_COUNT: GGML_ASSERT(false); break;
  9967. }
  9968. const int ix = offset0 + ox * s0;
  9969. const int iy = offset1 + oy * s1;
  9970. for (int ky = 0; ky < k1; ++ky) {
  9971. if (iy + ky < 0 || iy + ky >= src->ne[1]) continue;
  9972. const float * const srow = (const float *)(cdata + src->nb[1] * (iy + ky));
  9973. for (int kx = 0; kx < k0; ++kx) {
  9974. int j = ix + kx;
  9975. if (j < 0 || j >= src->ne[0]) continue;
  9976. switch (op) {
  9977. case GGML_OP_POOL_AVG: *out += srow[j]; break;
  9978. case GGML_OP_POOL_MAX: if (srow[j] > *out) *out = srow[j]; break;
  9979. case GGML_OP_POOL_COUNT: GGML_ASSERT(false); break;
  9980. }
  9981. }
  9982. }
  9983. switch (op) {
  9984. case GGML_OP_POOL_AVG: *out /= ka; break;
  9985. case GGML_OP_POOL_MAX: break;
  9986. case GGML_OP_POOL_COUNT: GGML_ASSERT(false); break;
  9987. }
  9988. }
  9989. }
  9990. cdata += src->nb[2];
  9991. dplane += pa;
  9992. }
  9993. }
  9994. // ggml_compute_forward_upscale
  9995. static void ggml_compute_forward_upscale_f32(
  9996. const struct ggml_compute_params * params,
  9997. const struct ggml_tensor * src0,
  9998. struct ggml_tensor * dst) {
  9999. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  10000. return;
  10001. }
  10002. GGML_ASSERT(src0->nb[0] == sizeof(float));
  10003. const int ith = params->ith;
  10004. const int nth = params->nth;
  10005. GGML_TENSOR_UNARY_OP_LOCALS
  10006. const int scale_factor = dst->op_params[0];
  10007. // TODO: optimize
  10008. for (int64_t i3 = 0; i3 < ne3; i3++) {
  10009. const int64_t i03 = i3;
  10010. for (int64_t i2 = ith; i2 < ne2; i2 += nth) {
  10011. const int64_t i02 = i2;
  10012. for (int64_t i1 = 0; i1 < ne1; i1++) {
  10013. const int64_t i01 = i1 / scale_factor;
  10014. for (int64_t i0 = 0; i0 < ne0; i0++) {
  10015. const int64_t i00 = i0 / scale_factor;
  10016. const float * x = (float *)((char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03);
  10017. float * y = (float *)((char *) dst->data + i0*nb0 + i1*nb1 + i2*nb2 + i3*nb3);
  10018. *y = *x;
  10019. }
  10020. }
  10021. }
  10022. }
  10023. }
  10024. static void ggml_compute_forward_upscale(
  10025. const struct ggml_compute_params * params,
  10026. const struct ggml_tensor * src0,
  10027. struct ggml_tensor * dst) {
  10028. switch (src0->type) {
  10029. case GGML_TYPE_F32:
  10030. {
  10031. ggml_compute_forward_upscale_f32(params, src0, dst);
  10032. } break;
  10033. default:
  10034. {
  10035. GGML_ASSERT(false);
  10036. } break;
  10037. }
  10038. }
  10039. // ggml_compute_forward_pad
  10040. static void ggml_compute_forward_pad_f32(
  10041. const struct ggml_compute_params * params,
  10042. const struct ggml_tensor * src0,
  10043. struct ggml_tensor * dst) {
  10044. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  10045. return;
  10046. }
  10047. GGML_ASSERT(src0->nb[0] == sizeof(float));
  10048. GGML_ASSERT( dst->nb[0] == sizeof(float));
  10049. const int ith = params->ith;
  10050. const int nth = params->nth;
  10051. GGML_TENSOR_UNARY_OP_LOCALS
  10052. float * dst_ptr = (float *) dst->data;
  10053. // TODO: optimize
  10054. for (int64_t i2 = 0; i2 < ne2; ++i2) {
  10055. for (int64_t i1 = ith; i1 < ne1; i1 += nth) {
  10056. for (int64_t i0 = 0; i0 < ne0; ++i0) {
  10057. for (int64_t i3 = 0; i3 < ne3; ++i3) {
  10058. const int64_t dst_idx = i3*(ne0*ne1*ne2) + i2*(ne0*ne1) + i1*ne0 + i0;
  10059. const float * src_ptr = (const float *)((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00);
  10060. if (i0 < ne00 && i1 < ne01 && i2 < ne02 && i3 < ne03) {
  10061. dst_ptr[dst_idx] = *src_ptr;
  10062. } else {
  10063. dst_ptr[dst_idx] = 0;
  10064. }
  10065. }
  10066. }
  10067. }
  10068. }
  10069. }
  10070. static void ggml_compute_forward_pad(
  10071. const struct ggml_compute_params * params,
  10072. const struct ggml_tensor * src0,
  10073. struct ggml_tensor * dst) {
  10074. switch (src0->type) {
  10075. case GGML_TYPE_F32:
  10076. {
  10077. ggml_compute_forward_pad_f32(params, src0, dst);
  10078. } break;
  10079. default:
  10080. {
  10081. GGML_ASSERT(false);
  10082. } break;
  10083. }
  10084. }
  10085. // ggml_compute_forward_argsort
  10086. static void ggml_compute_forward_argsort_f32(
  10087. const struct ggml_compute_params * params,
  10088. const struct ggml_tensor * src0,
  10089. struct ggml_tensor * dst) {
  10090. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  10091. return;
  10092. }
  10093. GGML_TENSOR_UNARY_OP_LOCALS
  10094. GGML_ASSERT(nb0 == sizeof(float));
  10095. const int ith = params->ith;
  10096. const int nth = params->nth;
  10097. const int64_t nr = ggml_nrows(src0);
  10098. enum ggml_sort_order order = (enum ggml_sort_order) ggml_get_op_params_i32(dst, 0);
  10099. for (int64_t i = ith; i < nr; i += nth) {
  10100. int32_t * dst_data = (int32_t *)((char *) dst->data + i*nb1);
  10101. const float * src_data = (float *)((char *) src0->data + i*nb01);
  10102. for (int64_t j = 0; j < ne0; j++) {
  10103. dst_data[j] = j;
  10104. }
  10105. // C doesn't have a functional sort, so we do a bubble sort instead
  10106. for (int64_t j = 0; j < ne0; j++) {
  10107. for (int64_t k = j + 1; k < ne0; k++) {
  10108. if ((order == GGML_SORT_ASC && src_data[dst_data[j]] > src_data[dst_data[k]]) ||
  10109. (order == GGML_SORT_DESC && src_data[dst_data[j]] < src_data[dst_data[k]])) {
  10110. int32_t tmp = dst_data[j];
  10111. dst_data[j] = dst_data[k];
  10112. dst_data[k] = tmp;
  10113. }
  10114. }
  10115. }
  10116. }
  10117. }
  10118. static void ggml_compute_forward_argsort(
  10119. const struct ggml_compute_params * params,
  10120. const struct ggml_tensor * src0,
  10121. struct ggml_tensor * dst) {
  10122. switch (src0->type) {
  10123. case GGML_TYPE_F32:
  10124. {
  10125. ggml_compute_forward_argsort_f32(params, src0, dst);
  10126. } break;
  10127. default:
  10128. {
  10129. GGML_ASSERT(false);
  10130. } break;
  10131. }
  10132. }
  10133. // ggml_compute_forward_flash_attn
  10134. static void ggml_compute_forward_flash_attn_f32(
  10135. const struct ggml_compute_params * params,
  10136. const struct ggml_tensor * q,
  10137. const struct ggml_tensor * k,
  10138. const struct ggml_tensor * v,
  10139. const bool masked,
  10140. struct ggml_tensor * dst) {
  10141. int64_t t0 = ggml_perf_time_us();
  10142. UNUSED(t0);
  10143. GGML_TENSOR_LOCALS(int64_t, neq, q, ne)
  10144. GGML_TENSOR_LOCALS(size_t, nbq, q, nb)
  10145. GGML_TENSOR_LOCALS(int64_t, nek, k, ne)
  10146. GGML_TENSOR_LOCALS(size_t, nbk, k, nb)
  10147. GGML_TENSOR_LOCALS(int64_t, nev, v, ne)
  10148. GGML_TENSOR_LOCALS(size_t, nbv, v, nb)
  10149. GGML_TENSOR_LOCALS(int64_t, ne, dst, ne)
  10150. GGML_TENSOR_LOCALS(size_t, nb, dst, nb)
  10151. const int ith = params->ith;
  10152. const int nth = params->nth;
  10153. const int64_t D = neq0;
  10154. const int64_t N = neq1;
  10155. const int64_t P = nek1 - N;
  10156. const int64_t M = P + N;
  10157. const int Mup = ggml_up(M, GGML_SOFT_MAX_UNROLL);
  10158. GGML_ASSERT(ne0 == D);
  10159. GGML_ASSERT(ne1 == N);
  10160. GGML_ASSERT(P >= 0);
  10161. GGML_ASSERT(nbq0 == sizeof(float));
  10162. GGML_ASSERT(nbk0 == sizeof(float));
  10163. GGML_ASSERT(nbv0 == sizeof(float));
  10164. GGML_ASSERT(neq0 == D);
  10165. GGML_ASSERT(nek0 == D);
  10166. GGML_ASSERT(nev1 == D);
  10167. GGML_ASSERT(neq1 == N);
  10168. GGML_ASSERT(nek1 == N + P);
  10169. GGML_ASSERT(nev1 == D);
  10170. // dst cannot be transposed or permuted
  10171. GGML_ASSERT(nb0 == sizeof(float));
  10172. GGML_ASSERT(nb0 <= nb1);
  10173. GGML_ASSERT(nb1 <= nb2);
  10174. GGML_ASSERT(nb2 <= nb3);
  10175. if (params->type == GGML_TASK_INIT) {
  10176. return;
  10177. }
  10178. if (params->type == GGML_TASK_FINALIZE) {
  10179. return;
  10180. }
  10181. // parallelize by q rows using ggml_vec_dot_f32
  10182. // total rows in q
  10183. const int nr = neq1*neq2*neq3;
  10184. // rows per thread
  10185. const int dr = (nr + nth - 1)/nth;
  10186. // row range for this thread
  10187. const int ir0 = dr*ith;
  10188. const int ir1 = MIN(ir0 + dr, nr);
  10189. const float scale = 1.0f/sqrtf(D);
  10190. //printf("P=%d N=%d D=%d ir0=%d ir1=%d scale = %f\n", P, N, D, ir0, ir1, scale);
  10191. for (int ir = ir0; ir < ir1; ++ir) {
  10192. // q indices
  10193. const int iq3 = ir/(neq2*neq1);
  10194. const int iq2 = (ir - iq3*neq2*neq1)/neq1;
  10195. const int iq1 = (ir - iq3*neq2*neq1 - iq2*neq1);
  10196. float * S = (float *) params->wdata + ith*(Mup + CACHE_LINE_SIZE_F32);
  10197. for (int i = M; i < Mup; ++i) {
  10198. S[i] = -INFINITY;
  10199. }
  10200. const int64_t masked_begin = masked ? (P + iq1 + 1) : M;
  10201. for (int64_t ic = 0; ic < masked_begin; ++ic) {
  10202. // k indices
  10203. const int ik3 = iq3;
  10204. const int ik2 = iq2 % nek2;
  10205. const int ik1 = ic;
  10206. // S indices
  10207. const int i1 = ik1;
  10208. ggml_vec_dot_f32(neq0,
  10209. S + i1,
  10210. (float *) ((char *) k->data + (ik1*nbk1 + ik2*nbk2 + ik3*nbk3)),
  10211. (float *) ((char *) q->data + (iq1*nbq1 + iq2*nbq2 + iq3*nbq3)));
  10212. }
  10213. // scale
  10214. ggml_vec_scale_f32(masked_begin, S, scale);
  10215. for (int64_t i = masked_begin; i < M; i++) {
  10216. S[i] = -INFINITY;
  10217. }
  10218. // softmax
  10219. // exclude known -INF S[..] values from max and loop
  10220. // dont forget to set their SW values to zero
  10221. {
  10222. float max = -INFINITY;
  10223. ggml_vec_max_f32(masked_begin, &max, S);
  10224. ggml_float sum = 0.0;
  10225. {
  10226. #ifdef GGML_SOFT_MAX_ACCELERATE
  10227. max = -max;
  10228. vDSP_vsadd(S, 1, &max, S, 1, Mup);
  10229. vvexpf(S, S, &Mup);
  10230. ggml_vec_sum_f32(Mup, &sum, S);
  10231. #else
  10232. uint16_t scvt[GGML_SOFT_MAX_UNROLL]; UNUSED(scvt);
  10233. ggml_float sump[GGML_SOFT_MAX_UNROLL] = { 0.0 };
  10234. for (int i = 0; i < Mup; i += GGML_SOFT_MAX_UNROLL) {
  10235. if (i >= masked_begin) {
  10236. break;
  10237. }
  10238. float * SS = S + i;
  10239. for (int j = 0; j < GGML_SOFT_MAX_UNROLL; ++j) {
  10240. if (i + j >= masked_begin) {
  10241. break;
  10242. } else if (SS[j] == -INFINITY) {
  10243. SS[j] = 0.0f;
  10244. } else {
  10245. #ifndef GGML_FLASH_ATTN_EXP_FP16
  10246. const float val = expf(SS[j] - max);
  10247. #else
  10248. ggml_fp16_t s = GGML_FP32_TO_FP16(SS[j] - max);
  10249. memcpy(&scvt[j], &s, sizeof(uint16_t));
  10250. const float val = GGML_FP16_TO_FP32(ggml_table_exp_f16[scvt[j]]);
  10251. #endif
  10252. sump[j] += (ggml_float)val;
  10253. SS[j] = val;
  10254. }
  10255. }
  10256. }
  10257. for (int i = 0; i < GGML_SOFT_MAX_UNROLL; i++) {
  10258. sum += sump[i];
  10259. }
  10260. #endif
  10261. }
  10262. assert(sum > 0.0);
  10263. sum = 1.0/sum;
  10264. ggml_vec_scale_f32(masked_begin, S, sum);
  10265. #ifndef NDEBUG
  10266. for (int i = 0; i < masked_begin; ++i) {
  10267. assert(!isnan(S[i]));
  10268. assert(!isinf(S[i]));
  10269. }
  10270. #endif
  10271. }
  10272. for (int64_t ic = 0; ic < nev1; ++ic) {
  10273. // dst indices
  10274. const int i1 = iq1;
  10275. const int i2 = iq2;
  10276. const int i3 = iq3;
  10277. // v indices
  10278. const int iv2 = iq2 % nev2;
  10279. const int iv3 = iq3;
  10280. ggml_vec_dot_f32(masked_begin,
  10281. (float *) ((char *) dst->data + (ic*nb0 + i1*nb1 + i2*nb2 + i3*nb3)),
  10282. (float *) ((char *) v->data + ( ic*nbv1 + iv2*nbv2 + iv3*nbv3)),
  10283. S);
  10284. }
  10285. }
  10286. }
  10287. static void ggml_compute_forward_flash_attn_f16(
  10288. const struct ggml_compute_params * params,
  10289. const struct ggml_tensor * q,
  10290. const struct ggml_tensor * k,
  10291. const struct ggml_tensor * v,
  10292. const bool masked,
  10293. struct ggml_tensor * dst) {
  10294. int64_t t0 = ggml_perf_time_us();
  10295. UNUSED(t0);
  10296. GGML_TENSOR_LOCALS(int64_t, neq, q, ne)
  10297. GGML_TENSOR_LOCALS(size_t, nbq, q, nb)
  10298. GGML_TENSOR_LOCALS(int64_t, nek, k, ne)
  10299. GGML_TENSOR_LOCALS(size_t, nbk, k, nb)
  10300. GGML_TENSOR_LOCALS(int64_t, nev, v, ne)
  10301. GGML_TENSOR_LOCALS(size_t, nbv, v, nb)
  10302. GGML_TENSOR_LOCALS(int64_t, ne, dst, ne)
  10303. GGML_TENSOR_LOCALS(size_t, nb, dst, nb)
  10304. const int ith = params->ith;
  10305. const int nth = params->nth;
  10306. const int64_t D = neq0;
  10307. const int64_t N = neq1;
  10308. const int64_t P = nek1 - N;
  10309. const int64_t M = P + N;
  10310. const int Mup = ggml_up(M, GGML_SOFT_MAX_UNROLL);
  10311. GGML_ASSERT(ne0 == D);
  10312. GGML_ASSERT(ne1 == N);
  10313. GGML_ASSERT(P >= 0);
  10314. GGML_ASSERT(nbq0 == sizeof(ggml_fp16_t));
  10315. GGML_ASSERT(nbk0 == sizeof(ggml_fp16_t));
  10316. GGML_ASSERT(nbv0 == sizeof(ggml_fp16_t));
  10317. GGML_ASSERT(neq0 == D);
  10318. GGML_ASSERT(nek0 == D);
  10319. GGML_ASSERT(nev1 == D);
  10320. GGML_ASSERT(neq1 == N);
  10321. GGML_ASSERT(nek1 == N + P);
  10322. GGML_ASSERT(nev1 == D);
  10323. // dst cannot be transposed or permuted
  10324. GGML_ASSERT(nb0 == sizeof(float));
  10325. GGML_ASSERT(nb0 <= nb1);
  10326. GGML_ASSERT(nb1 <= nb2);
  10327. GGML_ASSERT(nb2 <= nb3);
  10328. if (params->type == GGML_TASK_INIT) {
  10329. return;
  10330. }
  10331. if (params->type == GGML_TASK_FINALIZE) {
  10332. return;
  10333. }
  10334. // parallelize by q rows using ggml_vec_dot_f32
  10335. // total rows in q
  10336. const int nr = neq1*neq2*neq3;
  10337. // rows per thread
  10338. const int dr = (nr + nth - 1)/nth;
  10339. // row range for this thread
  10340. const int ir0 = dr*ith;
  10341. const int ir1 = MIN(ir0 + dr, nr);
  10342. const float scale = 1.0f/sqrtf(D);
  10343. //printf("P=%d N=%d D=%d ir0=%d ir1=%d scale = %f\n", P, N, D, ir0, ir1, scale);
  10344. for (int ir = ir0; ir < ir1; ++ir) {
  10345. // q indices
  10346. const int iq3 = ir/(neq2*neq1);
  10347. const int iq2 = (ir - iq3*neq2*neq1)/neq1;
  10348. const int iq1 = (ir - iq3*neq2*neq1 - iq2*neq1);
  10349. float * S = (float *) params->wdata + ith*(2*Mup + CACHE_LINE_SIZE_F32);
  10350. for (int i = M; i < Mup; ++i) {
  10351. S[i] = -INFINITY;
  10352. }
  10353. if (GGML_VEC_DOT_UNROLL > 2 || nek1 % GGML_VEC_DOT_UNROLL != 0) {
  10354. for (int64_t ic = 0; ic < nek1; ++ic) {
  10355. // k indices
  10356. const int ik3 = iq3;
  10357. const int ik2 = iq2 % nek2;
  10358. const int ik1 = ic;
  10359. // S indices
  10360. const int i1 = ik1;
  10361. ggml_vec_dot_f16(neq0,
  10362. S + i1,
  10363. (ggml_fp16_t *) ((char *) k->data + (ik1*nbk1 + ik2*nbk2 + ik3*nbk3)),
  10364. (ggml_fp16_t *) ((char *) q->data + (iq1*nbq1 + iq2*nbq2 + iq3*nbq3)));
  10365. }
  10366. } else {
  10367. for (int64_t ic = 0; ic < nek1; ic += GGML_VEC_DOT_UNROLL) {
  10368. // k indices
  10369. const int ik3 = iq3;
  10370. const int ik2 = iq2 % nek2;
  10371. const int ik1 = ic;
  10372. // S indices
  10373. const int i1 = ik1;
  10374. ggml_vec_dot_f16_unroll(neq0, nbk1,
  10375. S + i1,
  10376. ((char *) k->data + (ik1*nbk1 + ik2*nbk2 + ik3*nbk3)),
  10377. (ggml_fp16_t *) ((char *) q->data + (iq1*nbq1 + iq2*nbq2 + iq3*nbq3)));
  10378. }
  10379. }
  10380. // scale
  10381. ggml_vec_scale_f32(nek1, S, scale);
  10382. if (masked) {
  10383. for (int64_t i = P; i < M; i++) {
  10384. if (i > P + iq1) {
  10385. S[i] = -INFINITY;
  10386. }
  10387. }
  10388. }
  10389. // softmax
  10390. // todo: exclude known -INF S[..] values from max and loop, assuming their results to be zero.
  10391. // dont forget to set their S values to zero
  10392. {
  10393. float max = -INFINITY;
  10394. ggml_vec_max_f32(M, &max, S);
  10395. ggml_float sum = 0.0;
  10396. {
  10397. #ifdef GGML_SOFT_MAX_ACCELERATE
  10398. max = -max;
  10399. vDSP_vsadd(S, 1, &max, S, 1, Mup);
  10400. vvexpf(S, S, &Mup);
  10401. ggml_vec_sum_f32(Mup, &sum, S);
  10402. #else
  10403. uint16_t scvt[GGML_SOFT_MAX_UNROLL];
  10404. ggml_float sump[GGML_SOFT_MAX_UNROLL] = { 0.0 };
  10405. for (int i = 0; i < Mup; i += GGML_SOFT_MAX_UNROLL) {
  10406. float * SS = S + i;
  10407. for (int j = 0; j < GGML_SOFT_MAX_UNROLL; ++j) {
  10408. if (SS[j] == -INFINITY) {
  10409. SS[j] = 0.0f;
  10410. } else {
  10411. ggml_fp16_t s = GGML_FP32_TO_FP16(SS[j] - max);
  10412. memcpy(&scvt[j], &s, sizeof(uint16_t));
  10413. const float val = GGML_FP16_TO_FP32(ggml_table_exp_f16[scvt[j]]);
  10414. sump[j] += (ggml_float)val;
  10415. SS[j] = val;
  10416. }
  10417. }
  10418. }
  10419. for (int i = 0; i < GGML_SOFT_MAX_UNROLL; i++) {
  10420. sum += sump[i];
  10421. }
  10422. #endif
  10423. }
  10424. assert(sum > 0.0);
  10425. sum = 1.0/sum;
  10426. ggml_vec_scale_f32(M, S, sum);
  10427. #ifndef NDEBUG
  10428. for (int i = 0; i < M; ++i) {
  10429. assert(!isnan(S[i]));
  10430. assert(!isinf(S[i]));
  10431. }
  10432. #endif
  10433. }
  10434. ggml_fp16_t * S16 = (ggml_fp16_t *) ((float *) params->wdata + ith*(2*Mup + CACHE_LINE_SIZE_F32) + Mup);
  10435. for (int64_t i = 0; i < M; i++) {
  10436. S16[i] = GGML_FP32_TO_FP16(S[i]);
  10437. }
  10438. // todo: exclude known zero S[..] values from dot (reducing nev0 and increasing begin of v and S16).
  10439. if (GGML_VEC_DOT_UNROLL == 1 || (nev1 % GGML_VEC_DOT_UNROLL != 0)) {
  10440. for (int64_t ic = 0; ic < nev1; ++ic) {
  10441. // dst indices
  10442. const int i1 = iq1;
  10443. const int i2 = iq2;
  10444. const int i3 = iq3;
  10445. // v indices
  10446. const int iv2 = iq2 % nev2;
  10447. const int iv3 = iq3;
  10448. ggml_vec_dot_f16(nev0,
  10449. (float *) ((char *) dst->data + (ic*nb0 + i1*nb1 + i2*nb2 + i3*nb3)),
  10450. (ggml_fp16_t *) ((char *) v->data + ( ic*nbv1 + iv2*nbv2 + iv3*nbv3)),
  10451. S16);
  10452. }
  10453. } else {
  10454. for (int64_t ic = 0; ic < nev1; ic += GGML_VEC_DOT_UNROLL) {
  10455. // dst indices
  10456. const int i1 = iq1;
  10457. const int i2 = iq2;
  10458. const int i3 = iq3;
  10459. // v indices
  10460. const int iv2 = iq2 % nev2;
  10461. const int iv3 = iq3;
  10462. ggml_vec_dot_f16_unroll(nev0, nbv1,
  10463. (float *) ((char *) dst->data + (ic*nb0 + i1*nb1 + i2*nb2 + i3*nb3)),
  10464. ((char *) v->data + ( ic*nbv1 + iv2*nbv2 + iv3*nbv3)),
  10465. S16);
  10466. }
  10467. }
  10468. }
  10469. }
  10470. static void ggml_compute_forward_flash_attn(
  10471. const struct ggml_compute_params * params,
  10472. const struct ggml_tensor * q,
  10473. const struct ggml_tensor * k,
  10474. const struct ggml_tensor * v,
  10475. const bool masked,
  10476. struct ggml_tensor * dst) {
  10477. switch (q->type) {
  10478. case GGML_TYPE_F16:
  10479. {
  10480. ggml_compute_forward_flash_attn_f16(params, q, k, v, masked, dst);
  10481. } break;
  10482. case GGML_TYPE_F32:
  10483. {
  10484. ggml_compute_forward_flash_attn_f32(params, q, k, v, masked, dst);
  10485. } break;
  10486. default:
  10487. {
  10488. GGML_ASSERT(false);
  10489. } break;
  10490. }
  10491. }
  10492. // ggml_compute_forward_flash_ff
  10493. static void ggml_compute_forward_flash_ff_f16(
  10494. const struct ggml_compute_params * params,
  10495. const struct ggml_tensor * a, // F16
  10496. const struct ggml_tensor * b0, // F16 fc_w
  10497. const struct ggml_tensor * b1, // F32 fc_b
  10498. const struct ggml_tensor * c0, // F16 proj_w
  10499. const struct ggml_tensor * c1, // F32 proj_b
  10500. struct ggml_tensor * dst) {
  10501. int64_t t0 = ggml_perf_time_us();
  10502. UNUSED(t0);
  10503. GGML_TENSOR_LOCALS(int64_t, nea, a, ne)
  10504. GGML_TENSOR_LOCALS(size_t, nba, a, nb)
  10505. GGML_TENSOR_LOCALS(int64_t, neb0, b0, ne)
  10506. GGML_TENSOR_LOCALS(size_t, nbb0, b0, nb)
  10507. GGML_TENSOR_LOCALS(int64_t, neb1, b1, ne)
  10508. GGML_TENSOR_LOCALS(size_t, nbb1, b1, nb)
  10509. GGML_TENSOR_LOCALS(int64_t, nec0, c0, ne)
  10510. GGML_TENSOR_LOCALS(size_t, nbc0, c0, nb)
  10511. GGML_TENSOR_LOCALS(int64_t, nec1, c1, ne)
  10512. GGML_TENSOR_LOCALS(size_t, nbc1, c1, nb)
  10513. GGML_TENSOR_LOCALS(int64_t, ne, dst, ne)
  10514. GGML_TENSOR_LOCALS(size_t, nb, dst, nb)
  10515. const int ith = params->ith;
  10516. const int nth = params->nth;
  10517. const int64_t D = nea0;
  10518. //const int64_t N = nea1;
  10519. const int64_t M = neb01;
  10520. GGML_ASSERT(ne0 == nea0);
  10521. GGML_ASSERT(ne1 == nea1);
  10522. GGML_ASSERT(ne2 == nea2);
  10523. GGML_ASSERT(nba0 == sizeof(ggml_fp16_t));
  10524. GGML_ASSERT(nbb00 == sizeof(ggml_fp16_t));
  10525. GGML_ASSERT(nbb10 == sizeof(float));
  10526. GGML_ASSERT(nbc00 == sizeof(ggml_fp16_t));
  10527. GGML_ASSERT(nbc10 == sizeof(float));
  10528. GGML_ASSERT(neb00 == D);
  10529. GGML_ASSERT(neb01 == M);
  10530. GGML_ASSERT(neb10 == M);
  10531. GGML_ASSERT(neb11 == 1);
  10532. GGML_ASSERT(nec00 == M);
  10533. GGML_ASSERT(nec01 == D);
  10534. GGML_ASSERT(nec10 == D);
  10535. GGML_ASSERT(nec11 == 1);
  10536. // dst cannot be transposed or permuted
  10537. GGML_ASSERT(nb0 == sizeof(float));
  10538. GGML_ASSERT(nb0 <= nb1);
  10539. GGML_ASSERT(nb1 <= nb2);
  10540. GGML_ASSERT(nb2 <= nb3);
  10541. if (params->type == GGML_TASK_INIT) {
  10542. return;
  10543. }
  10544. if (params->type == GGML_TASK_FINALIZE) {
  10545. return;
  10546. }
  10547. // parallelize by a rows using ggml_vec_dot_f32
  10548. // total rows in a
  10549. const int nr = nea1*nea2*nea3;
  10550. // rows per thread
  10551. const int dr = (nr + nth - 1)/nth;
  10552. // row range for this thread
  10553. const int ir0 = dr*ith;
  10554. const int ir1 = MIN(ir0 + dr, nr);
  10555. for (int ir = ir0; ir < ir1; ++ir) {
  10556. // a indices
  10557. const int ia3 = ir/(nea2*nea1);
  10558. const int ia2 = (ir - ia3*nea2*nea1)/nea1;
  10559. const int ia1 = (ir - ia3*nea2*nea1 - ia2*nea1);
  10560. float * S = (float *) params->wdata + ith*(2*M + CACHE_LINE_SIZE_F32);
  10561. for (int64_t ic = 0; ic < neb01; ++ic) {
  10562. // b0 indices
  10563. const int ib03 = ia3;
  10564. const int ib02 = ia2;
  10565. const int ib01 = ic;
  10566. // S indices
  10567. const int i1 = ib01;
  10568. ggml_vec_dot_f16(nea0,
  10569. S + i1,
  10570. (ggml_fp16_t *) ((char *) b0->data + (ib01*nbb01 + ib02*nbb02 + ib03*nbb03)),
  10571. (ggml_fp16_t *) ((char *) a->data + ( ia1*nba1 + ia2*nba2 + ia3*nba3)));
  10572. }
  10573. ggml_vec_add_f32(neb01, S, S, (float *) b1->data);
  10574. //ggml_vec_gelu_f32(neb01, S, S);
  10575. ggml_fp16_t * S16 = (ggml_fp16_t *) ((float *) params->wdata + ith*(2*M + CACHE_LINE_SIZE_F32) + M);
  10576. for (int64_t i = 0; i < M; i++) {
  10577. S16[i] = GGML_FP32_TO_FP16(S[i]);
  10578. }
  10579. ggml_vec_gelu_f16(neb01, S16, S16);
  10580. {
  10581. // dst indices
  10582. const int i1 = ia1;
  10583. const int i2 = ia2;
  10584. const int i3 = ia3;
  10585. for (int64_t ic = 0; ic < nec01; ++ic) {
  10586. ggml_vec_dot_f16(neb01,
  10587. (float *) ((char *) dst->data + (ic*nb0 + i1*nb1 + i2*nb2 + i3*nb3)),
  10588. (ggml_fp16_t *) ((char *) c0->data + ( ic*nbc01 + i2*nbc02 + i3*nbc03)),
  10589. S16);
  10590. }
  10591. ggml_vec_add_f32(nec01,
  10592. (float *) ((char *) dst->data + (i1*nb1 + i2*nb2 + i3*nb3)),
  10593. (float *) ((char *) dst->data + (i1*nb1 + i2*nb2 + i3*nb3)),
  10594. (float *) c1->data);
  10595. }
  10596. }
  10597. }
  10598. static void ggml_compute_forward_flash_ff(
  10599. const struct ggml_compute_params * params,
  10600. const struct ggml_tensor * a,
  10601. const struct ggml_tensor * b0,
  10602. const struct ggml_tensor * b1,
  10603. const struct ggml_tensor * c0,
  10604. const struct ggml_tensor * c1,
  10605. struct ggml_tensor * dst) {
  10606. switch (b0->type) {
  10607. case GGML_TYPE_F16:
  10608. {
  10609. ggml_compute_forward_flash_ff_f16(params, a, b0, b1, c0, c1, dst);
  10610. } break;
  10611. case GGML_TYPE_F32:
  10612. {
  10613. GGML_ASSERT(false); // TODO
  10614. } break;
  10615. default:
  10616. {
  10617. GGML_ASSERT(false);
  10618. } break;
  10619. }
  10620. }
  10621. // ggml_compute_forward_flash_attn_back
  10622. static void ggml_compute_forward_flash_attn_back_f32(
  10623. const struct ggml_compute_params * params,
  10624. const struct ggml_tensor * q,
  10625. const struct ggml_tensor * k,
  10626. const struct ggml_tensor * v,
  10627. const struct ggml_tensor * d,
  10628. const bool masked,
  10629. struct ggml_tensor * dst) {
  10630. int64_t t0 = ggml_perf_time_us();
  10631. UNUSED(t0);
  10632. GGML_TENSOR_LOCALS(int64_t, neq, q, ne)
  10633. GGML_TENSOR_LOCALS(size_t, nbq, q, nb)
  10634. GGML_TENSOR_LOCALS(int64_t, nek, k, ne)
  10635. GGML_TENSOR_LOCALS(size_t, nbk, k, nb)
  10636. GGML_TENSOR_LOCALS(int64_t, nev, v, ne)
  10637. GGML_TENSOR_LOCALS(size_t, nbv, v, nb)
  10638. GGML_TENSOR_LOCALS(int64_t, ned, d, ne)
  10639. GGML_TENSOR_LOCALS(size_t, nbd, d, nb)
  10640. GGML_TENSOR_LOCALS(int64_t, ne, dst, ne)
  10641. GGML_TENSOR_LOCALS(size_t, nb, dst, nb)
  10642. const int ith = params->ith;
  10643. const int nth = params->nth;
  10644. const int64_t D = neq0;
  10645. const int64_t N = neq1;
  10646. const int64_t P = nek1 - N;
  10647. const int64_t M = P + N;
  10648. const int Mup = ggml_up(M, GGML_SOFT_MAX_UNROLL);
  10649. const int mxDM = MAX(D, Mup);
  10650. // GGML_ASSERT(ne0 == D);
  10651. // GGML_ASSERT(ne1 == N);
  10652. GGML_ASSERT(P >= 0);
  10653. GGML_ASSERT(nbq0 == sizeof(float));
  10654. GGML_ASSERT(nbk0 == sizeof(float));
  10655. GGML_ASSERT(nbv0 == sizeof(float));
  10656. GGML_ASSERT(neq0 == D);
  10657. GGML_ASSERT(nek0 == D);
  10658. GGML_ASSERT(nev1 == D);
  10659. GGML_ASSERT(ned0 == D);
  10660. GGML_ASSERT(neq1 == N);
  10661. GGML_ASSERT(nek1 == N + P);
  10662. GGML_ASSERT(nev1 == D);
  10663. GGML_ASSERT(ned1 == N);
  10664. // dst cannot be transposed or permuted
  10665. GGML_ASSERT(nb0 == sizeof(float));
  10666. GGML_ASSERT(nb0 <= nb1);
  10667. GGML_ASSERT(nb1 <= nb2);
  10668. GGML_ASSERT(nb2 <= nb3);
  10669. if (params->type == GGML_TASK_INIT) {
  10670. if (ith == 0) {
  10671. memset(dst->data, 0, nb0*ne0*ne1*ne2*ne3);
  10672. }
  10673. return;
  10674. }
  10675. if (params->type == GGML_TASK_FINALIZE) {
  10676. return;
  10677. }
  10678. const int64_t elem_q = ggml_nelements(q);
  10679. const int64_t elem_k = ggml_nelements(k);
  10680. enum ggml_type result_type = dst->type;
  10681. GGML_ASSERT(ggml_blck_size(result_type) == 1);
  10682. const size_t tsize = ggml_type_size(result_type);
  10683. const size_t offs_q = 0;
  10684. const size_t offs_k = offs_q + GGML_PAD(elem_q * tsize, GGML_MEM_ALIGN);
  10685. const size_t offs_v = offs_k + GGML_PAD(elem_k * tsize, GGML_MEM_ALIGN);
  10686. void * grad_q = (char *) dst->data;
  10687. void * grad_k = (char *) dst->data + offs_k;
  10688. void * grad_v = (char *) dst->data + offs_v;
  10689. const size_t nbgq1 = nb0*neq0;
  10690. const size_t nbgq2 = nb0*neq0*neq1;
  10691. const size_t nbgq3 = nb0*neq0*neq1*neq2;
  10692. const size_t nbgk1 = nb0*nek0;
  10693. const size_t nbgk2 = nb0*nek0*nek1;
  10694. const size_t nbgk3 = nb0*nek0*nek1*neq2;
  10695. const size_t nbgv1 = nb0*nev0;
  10696. const size_t nbgv2 = nb0*nev0*nev1;
  10697. const size_t nbgv3 = nb0*nev0*nev1*neq2;
  10698. // parallelize by k rows using ggml_vec_dot_f32
  10699. // total rows in k
  10700. const int nr = nek2*nek3;
  10701. // rows per thread
  10702. const int dr = (nr + nth - 1)/nth;
  10703. // row range for this thread
  10704. const int ir0 = dr*ith;
  10705. const int ir1 = MIN(ir0 + dr, nr);
  10706. const float scale = 1.0f/sqrtf(D);
  10707. //printf("P=%d N=%d D=%d ir0=%d ir1=%d scale = %f\n", P, N, D, ir0, ir1, scale);
  10708. // how often k2 (and v2) is repeated in q2
  10709. int nrep = neq2/nek2;
  10710. for (int ir = ir0; ir < ir1; ++ir) {
  10711. // q indices
  10712. const int ik3 = ir/(nek2);
  10713. const int ik2 = ir - ik3*nek2;
  10714. const int iq3 = ik3;
  10715. const int id3 = ik3;
  10716. const int iv3 = ik3;
  10717. const int iv2 = ik2;
  10718. for (int irep = 0; irep < nrep; ++irep) {
  10719. const int iq2 = ik2 + irep*nek2;
  10720. const int id2 = iq2;
  10721. // (ik2 + irep*nek2) % nek2 == ik2
  10722. for (int iq1 = 0; iq1 < neq1; ++iq1) {
  10723. const int id1 = iq1;
  10724. // not sure about CACHE_LINE_SIZE_F32..
  10725. // - maybe it must not be multiplied by 2 and excluded from .. in SM 1*(..) offset?
  10726. float * S = (float *) params->wdata + ith*2*(mxDM + CACHE_LINE_SIZE_F32) + 0*(mxDM+CACHE_LINE_SIZE_F32);
  10727. float * SM = (float *) params->wdata + ith*2*(mxDM + CACHE_LINE_SIZE_F32) + 1*(mxDM+CACHE_LINE_SIZE_F32);
  10728. for (int i = M; i < Mup; ++i) {
  10729. S[i] = -INFINITY;
  10730. }
  10731. const int64_t masked_begin = masked ? (P + iq1 + 1) : M;
  10732. for (int64_t ic = 0; ic < masked_begin; ++ic) {
  10733. // k indices
  10734. const int ik1 = ic;
  10735. // S indices
  10736. const int i1 = ik1;
  10737. ggml_vec_dot_f32(neq0,
  10738. S + i1,
  10739. (float *) ((char *) k->data + (ik1*nbk1 + ik2*nbk2 + ik3*nbk3)),
  10740. (float *) ((char *) q->data + (iq1*nbq1 + iq2*nbq2 + iq3*nbq3)));
  10741. }
  10742. // scale
  10743. ggml_vec_scale_f32(masked_begin, S, scale);
  10744. for (int64_t i = masked_begin; i < M; i++) {
  10745. S[i] = -INFINITY;
  10746. }
  10747. // softmax
  10748. // exclude known -INF S[..] values from max and loop
  10749. // dont forget to set their SM values to zero
  10750. {
  10751. float max = -INFINITY;
  10752. ggml_vec_max_f32(masked_begin, &max, S);
  10753. ggml_float sum = 0.0;
  10754. {
  10755. #ifdef GGML_SOFT_MAX_ACCELERATE
  10756. max = -max;
  10757. vDSP_vsadd(SM, 1, &max, SM, 1, Mup);
  10758. vvexpf(SM, SM, &Mup);
  10759. ggml_vec_sum_f32(Mup, &sum, SM);
  10760. #else
  10761. uint16_t scvt[GGML_SOFT_MAX_UNROLL]; UNUSED(scvt);
  10762. ggml_float sump[GGML_SOFT_MAX_UNROLL] = { 0.0 };
  10763. for (int i = 0; i < Mup; i += GGML_SOFT_MAX_UNROLL) {
  10764. if (i >= masked_begin) {
  10765. break;
  10766. }
  10767. float * SR = S + i;
  10768. float * SW = SM + i;
  10769. for (int j = 0; j < GGML_SOFT_MAX_UNROLL; ++j) {
  10770. if (i + j >= masked_begin) {
  10771. break;
  10772. } else if (SR[j] == -INFINITY) {
  10773. SW[j] = 0.0f;
  10774. } else {
  10775. #ifndef GGML_FLASH_ATTN_EXP_FP16
  10776. const float val = expf(SR[j] - max);
  10777. #else
  10778. ggml_fp16_t s = GGML_FP32_TO_FP16(SR[j] - max);
  10779. memcpy(&scvt[j], &s, sizeof(uint16_t));
  10780. const float val = GGML_FP16_TO_FP32(ggml_table_exp_f16[scvt[j]]);
  10781. #endif
  10782. sump[j] += (ggml_float)val;
  10783. SW[j] = val;
  10784. }
  10785. }
  10786. }
  10787. for (int i = 0; i < GGML_SOFT_MAX_UNROLL; i++) {
  10788. sum += sump[i];
  10789. }
  10790. #endif
  10791. }
  10792. assert(sum > 0.0);
  10793. sum = 1.0/sum;
  10794. ggml_vec_scale_f32(masked_begin, SM, sum);
  10795. }
  10796. // step-by-step explanation
  10797. {
  10798. // forward-process shape grads from backward process
  10799. // parallel_for ik2,ik3:
  10800. // for irep:
  10801. // iq2 = ik2 + irep*nek2
  10802. // k[:D,:M,:,:] [D,M,:,:] grad[k][:D,:M,ik2,ik3] += grad[kcur]
  10803. // q[:D,:N,:,:] [D,N,:,:] grad[q][:D,iq1,iq2,iq3] += grad[qcur]
  10804. // v[:M,:D,:,:] [M,D,:,:] grad[v][:M,:D,iv2,iv3] += grad[vcur]
  10805. // for iq1:
  10806. // kcur = k[:D,:M,ik2,ik3] [D,M,1,1] grad[kcur] = grad[S1].T @ qcur
  10807. // qcur = q[:D,iq1,iq2,iq3] [D,1,1,1] grad[qcur] = grad[S1] @ kcur
  10808. // vcur = v[:M,:D,iv2,iv3] [M,D,1,1] grad[vcur] = grad[S5].T @ S4
  10809. // S0 = -Inf [D,1,1,1]
  10810. // ~S1[i] = dot(kcur[:D,i], qcur)
  10811. // S1 = qcur @ kcur.T [M,1,1,1] grad[S1] = grad[S2] * scale
  10812. // S2 = S1 * scale [M,1,1,1] grad[S2] = diag_mask_zero(grad[S3], P)
  10813. // S3 = diag_mask_inf(S2, P) [M,1,1,1] grad[S3] = S4 * (grad[S4] - dot(S4, grad[S4]))
  10814. // S4 = softmax(S3) [M,1,1,1] grad[S4] = grad[S5] @ vcur
  10815. // ~S5[i] = dot(vcur[:,i], S4)
  10816. // S5 = S4 @ vcur.T [D,1,1,1] grad[S5] = d[:D,id1,id2,id3]
  10817. // ~dst[i,iq1,iq2,iq3] = S5[i] ^
  10818. // dst[:D,iq1,iq2,iq3] = S5 | grad[dst[:D,iq1,iq2,iq3]] = d[:D,id1,id2,id3]
  10819. // dst backward-/ grad[dst] = d
  10820. //
  10821. // output gradients with their dependencies:
  10822. //
  10823. // grad[kcur] = grad[S1].T @ qcur
  10824. // grad[S1] = diag_mask_zero(grad[S3], P) * scale
  10825. // grad[S3] = S4 * (grad[S4] - dot(S4, grad[S4]))
  10826. // grad[S4] = grad[S5] @ vcur
  10827. // grad[S4] = d[:D,id1,id2,id3] @ vcur
  10828. // grad[qcur] = grad[S1] @ kcur
  10829. // grad[vcur] = grad[S5].T @ S4
  10830. // grad[vcur] = d[:D,id1,id2,id3].T @ S4
  10831. //
  10832. // in post-order:
  10833. //
  10834. // S1 = qcur @ kcur.T
  10835. // S2 = S1 * scale
  10836. // S3 = diag_mask_inf(S2, P)
  10837. // S4 = softmax(S3)
  10838. // grad[S4] = d[:D,id1,id2,id3] @ vcur
  10839. // grad[S3] = S4 * (grad[S4] - dot(S4, grad[S4]))
  10840. // grad[S1] = diag_mask_zero(grad[S3], P) * scale
  10841. // grad[qcur] = grad[S1] @ kcur
  10842. // grad[kcur] = grad[S1].T @ qcur
  10843. // grad[vcur] = d[:D,id1,id2,id3].T @ S4
  10844. //
  10845. // using less variables (SM=S4):
  10846. //
  10847. // S = diag_mask_inf(qcur @ kcur.T * scale, P)
  10848. // SM = softmax(S)
  10849. // S = d[:D,iq1,iq2,iq3] @ vcur
  10850. // dot_SM_gradSM = dot(SM, S)
  10851. // S = SM * (S - dot(SM, S))
  10852. // S = diag_mask_zero(S, P) * scale
  10853. //
  10854. // grad[q][:D,iq1,iq2,iq3] += S @ kcur
  10855. // grad[k][:D,:M,ik2,ik3] += S.T @ qcur
  10856. // grad[v][:M,:D,iv2,iv3] += d[:D,id1,id2,id3].T @ SM
  10857. }
  10858. // S = gradSM = d[:D,id1,id2,id3] @ vcur[:,:,iv2,iv3]
  10859. // S = d[:D,id1,id2,id3] @ vcur[:,:,iv2,iv3]
  10860. // for ic:
  10861. // S[:M] += vcur[:M,ic,iv2,iv3] * d[ic,id1,id2,id3]
  10862. // exclude known future zero S[..] values from operation
  10863. ggml_vec_set_f32(masked_begin, S, 0);
  10864. for (int64_t ic = 0; ic < D; ++ic) {
  10865. ggml_vec_mad_f32(masked_begin,
  10866. S,
  10867. (float *) ((char *) v->data + ( ic*nbv1 + iv2*nbv2 + iv3*nbv3)),
  10868. *(float *) ((char *) d->data + (ic*nbd0 + id1*nbd1 + id2*nbd2 + id3*nbd3)));
  10869. }
  10870. // S = SM * (S - dot(SM, S))
  10871. float dot_SM_gradSM = 0;
  10872. ggml_vec_dot_f32 (masked_begin, &dot_SM_gradSM, SM, S);
  10873. ggml_vec_acc1_f32(M, S, -dot_SM_gradSM);
  10874. ggml_vec_mul_f32 (masked_begin, S, S, SM);
  10875. // S = diag_mask_zero(S, P) * scale
  10876. // already done by above ggml_vec_set_f32
  10877. // exclude known zero S[..] values from operation
  10878. ggml_vec_scale_f32(masked_begin, S, scale);
  10879. // S shape [M,1]
  10880. // SM shape [M,1]
  10881. // kcur shape [D,M]
  10882. // qcur shape [D,1]
  10883. // vcur shape [M,D]
  10884. // grad[q][:D,iq1,iq2,iq3] += S @ kcur
  10885. // grad[q][:D,iq1,iq2,iq3] += shape[M,1] @ shape[D,M]
  10886. // for ic:
  10887. // grad[q][:D,iq1,iq2,iq3] += S[ic] * kcur[:D,ic,ik2,ik3]
  10888. // exclude known zero S[..] values from loop
  10889. for (int64_t ic = 0; ic < masked_begin; ++ic) {
  10890. ggml_vec_mad_f32(D,
  10891. (float *) ((char *) grad_q + (iq1*nbgq1 + iq2*nbgq2 + iq3*nbgq3)),
  10892. (float *) ((char *) k->data + (ic*nbk1 + ik2*nbk2 + ik3*nbk3)),
  10893. S[ic]);
  10894. }
  10895. // grad[k][:D,:M,iq2,iq3] += S.T @ qcur
  10896. // for ic:
  10897. // grad[k][:D,ic,iq2,iq3] += S.T[0,ic] * qcur[:D,0]
  10898. // grad[k][:D,ic,iq2,iq3] += S[ic] * qcur[:D,0]
  10899. // exclude known zero S[..] values from loop
  10900. for (int64_t ic = 0; ic < masked_begin; ++ic) {
  10901. ggml_vec_mad_f32(D,
  10902. (float *) ((char *) grad_k + (ic*nbgk1 + ik2*nbgk2 + ik3*nbgk3)),
  10903. (float *) ((char *) q->data + (iq1*nbq1 + iq2*nbq2 + iq3*nbq3)),
  10904. S[ic]);
  10905. }
  10906. // grad[v][:M,:D,iv2,iv3] += d[:D,id1,id2,id3].T @ SM
  10907. // for ic:
  10908. // grad[v][:M,ic,iv2,iv3] += d[:D,id1,id2,id3].T[0,ic] * SM[:M]
  10909. // grad[v][:M,ic,iv2,iv3] += d[ic,id1,id2,id3] * SM[:M]
  10910. // exclude known zero SM[..] values from mad
  10911. for (int64_t ic = 0; ic < D; ++ic) {
  10912. ggml_vec_mad_f32(masked_begin,
  10913. (float *) ((char *) grad_v + ( ic*nbgv1 + iv2*nbgv2 + iv3*nbgv3)),
  10914. SM,
  10915. *(float *) ((char *) d->data + (ic*nbd0 + id1*nbd1 + id2*nbd2 + id3*nbd3)));
  10916. }
  10917. }
  10918. }
  10919. }
  10920. }
  10921. static void ggml_compute_forward_flash_attn_back(
  10922. const struct ggml_compute_params * params,
  10923. const struct ggml_tensor * q,
  10924. const struct ggml_tensor * k,
  10925. const struct ggml_tensor * v,
  10926. const struct ggml_tensor * d,
  10927. const bool masked,
  10928. struct ggml_tensor * dst) {
  10929. switch (q->type) {
  10930. case GGML_TYPE_F32:
  10931. {
  10932. ggml_compute_forward_flash_attn_back_f32(params, q, k, v, d, masked, dst);
  10933. } break;
  10934. default:
  10935. {
  10936. GGML_ASSERT(false);
  10937. } break;
  10938. }
  10939. }
  10940. // ggml_compute_forward_win_part
  10941. static void ggml_compute_forward_win_part_f32(
  10942. const struct ggml_compute_params * params,
  10943. const struct ggml_tensor * src0,
  10944. struct ggml_tensor * dst) {
  10945. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  10946. return;
  10947. }
  10948. GGML_TENSOR_LOCALS(int64_t, ne0, src0, ne)
  10949. GGML_TENSOR_LOCALS(int64_t, ne, dst, ne)
  10950. const int32_t nep0 = ((const int32_t *)(dst->op_params))[0];
  10951. const int32_t nep1 = ((const int32_t *)(dst->op_params))[1];
  10952. const int32_t w = ((const int32_t *)(dst->op_params))[2];
  10953. assert(ne00 == ne0);
  10954. assert(ne3 == nep0*nep1);
  10955. // TODO: optimize / multi-thread
  10956. for (int py = 0; py < nep1; ++py) {
  10957. for (int px = 0; px < nep0; ++px) {
  10958. const int64_t i3 = py*nep0 + px;
  10959. for (int64_t i2 = 0; i2 < ne2; ++i2) {
  10960. for (int64_t i1 = 0; i1 < ne1; ++i1) {
  10961. for (int64_t i0 = 0; i0 < ne0; ++i0) {
  10962. const int64_t i02 = py*w + i2;
  10963. const int64_t i01 = px*w + i1;
  10964. const int64_t i00 = i0;
  10965. const int64_t i = i3*ne2*ne1*ne0 + i2*ne1*ne0 + i1*ne0 + i0;
  10966. const int64_t j = i02*ne01*ne00 + i01*ne00 + i00;
  10967. if (py*w + i2 >= ne02 || px*w + i1 >= ne01) {
  10968. ((float *) dst->data)[i] = 0.0f;
  10969. } else {
  10970. ((float *) dst->data)[i] = ((float *) src0->data)[j];
  10971. }
  10972. }
  10973. }
  10974. }
  10975. }
  10976. }
  10977. }
  10978. static void ggml_compute_forward_win_part(
  10979. const struct ggml_compute_params * params,
  10980. const struct ggml_tensor * src0,
  10981. struct ggml_tensor * dst) {
  10982. switch (src0->type) {
  10983. case GGML_TYPE_F32:
  10984. {
  10985. ggml_compute_forward_win_part_f32(params, src0, dst);
  10986. } break;
  10987. default:
  10988. {
  10989. GGML_ASSERT(false);
  10990. } break;
  10991. }
  10992. }
  10993. // ggml_compute_forward_win_unpart
  10994. static void ggml_compute_forward_win_unpart_f32(
  10995. const struct ggml_compute_params * params,
  10996. const struct ggml_tensor * src0,
  10997. struct ggml_tensor * dst) {
  10998. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  10999. return;
  11000. }
  11001. GGML_TENSOR_LOCALS(int64_t, ne0, src0, ne)
  11002. GGML_TENSOR_LOCALS(int64_t, ne, dst, ne)
  11003. const int32_t w = ((const int32_t *)(dst->op_params))[0];
  11004. // padding
  11005. const int px = (w - ne1%w)%w;
  11006. //const int py = (w - ne2%w)%w;
  11007. const int npx = (px + ne1)/w;
  11008. //const int npy = (py + ne2)/w;
  11009. assert(ne0 == ne00);
  11010. // TODO: optimize / multi-thread
  11011. for (int64_t i2 = 0; i2 < ne2; ++i2) {
  11012. for (int64_t i1 = 0; i1 < ne1; ++i1) {
  11013. for (int64_t i0 = 0; i0 < ne0; ++i0) {
  11014. const int ip2 = i2/w;
  11015. const int ip1 = i1/w;
  11016. const int64_t i02 = i2%w;
  11017. const int64_t i01 = i1%w;
  11018. const int64_t i00 = i0;
  11019. const int64_t i = (ip2*npx + ip1)*ne02*ne01*ne00 + i02*ne01*ne00 + i01*ne00 + i00;
  11020. const int64_t j = i2*ne1*ne0 + i1*ne0 + i0;
  11021. ((float *) dst->data)[j] = ((float *) src0->data)[i];
  11022. }
  11023. }
  11024. }
  11025. }
  11026. static void ggml_compute_forward_win_unpart(
  11027. const struct ggml_compute_params * params,
  11028. const struct ggml_tensor * src0,
  11029. struct ggml_tensor * dst) {
  11030. switch (src0->type) {
  11031. case GGML_TYPE_F32:
  11032. {
  11033. ggml_compute_forward_win_unpart_f32(params, src0, dst);
  11034. } break;
  11035. default:
  11036. {
  11037. GGML_ASSERT(false);
  11038. } break;
  11039. }
  11040. }
  11041. //gmml_compute_forward_unary
  11042. static void ggml_compute_forward_unary(
  11043. const struct ggml_compute_params * params,
  11044. const struct ggml_tensor * src0,
  11045. struct ggml_tensor * dst) {
  11046. const enum ggml_unary_op op = ggml_get_unary_op(dst);
  11047. switch (op) {
  11048. case GGML_UNARY_OP_ABS:
  11049. {
  11050. ggml_compute_forward_abs(params, src0, dst);
  11051. } break;
  11052. case GGML_UNARY_OP_SGN:
  11053. {
  11054. ggml_compute_forward_sgn(params, src0, dst);
  11055. } break;
  11056. case GGML_UNARY_OP_NEG:
  11057. {
  11058. ggml_compute_forward_neg(params, src0, dst);
  11059. } break;
  11060. case GGML_UNARY_OP_STEP:
  11061. {
  11062. ggml_compute_forward_step(params, src0, dst);
  11063. } break;
  11064. case GGML_UNARY_OP_TANH:
  11065. {
  11066. ggml_compute_forward_tanh(params, src0, dst);
  11067. } break;
  11068. case GGML_UNARY_OP_ELU:
  11069. {
  11070. ggml_compute_forward_elu(params, src0, dst);
  11071. } break;
  11072. case GGML_UNARY_OP_RELU:
  11073. {
  11074. ggml_compute_forward_relu(params, src0, dst);
  11075. } break;
  11076. case GGML_UNARY_OP_GELU:
  11077. {
  11078. ggml_compute_forward_gelu(params, src0, dst);
  11079. } break;
  11080. case GGML_UNARY_OP_GELU_QUICK:
  11081. {
  11082. ggml_compute_forward_gelu_quick(params, src0, dst);
  11083. } break;
  11084. case GGML_UNARY_OP_SILU:
  11085. {
  11086. ggml_compute_forward_silu(params, src0, dst);
  11087. } break;
  11088. default:
  11089. {
  11090. GGML_ASSERT(false);
  11091. } break;
  11092. }
  11093. }
  11094. // ggml_compute_forward_get_rel_pos
  11095. static void ggml_compute_forward_get_rel_pos_f16(
  11096. const struct ggml_compute_params * params,
  11097. const struct ggml_tensor * src0,
  11098. struct ggml_tensor * dst) {
  11099. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  11100. return;
  11101. }
  11102. // ref: https://github.com/facebookresearch/segment-anything/blob/main/segment_anything/modeling/image_encoder.py#L292-L322
  11103. GGML_TENSOR_UNARY_OP_LOCALS
  11104. const int64_t w = ne1;
  11105. ggml_fp16_t * src0_data = (ggml_fp16_t *) src0->data;
  11106. ggml_fp16_t * dst_data = (ggml_fp16_t *) dst->data;
  11107. for (int64_t i2 = 0; i2 < ne2; ++i2) {
  11108. for (int64_t i1 = 0; i1 < ne1; ++i1) {
  11109. const int64_t pos = (w - i1 - 1) + i2;
  11110. for (int64_t i0 = 0; i0 < ne0; ++i0) {
  11111. dst_data[i2*ne1*ne0 + i1*ne0 + i0] = src0_data[pos*ne00 + i0];
  11112. }
  11113. }
  11114. }
  11115. }
  11116. static void ggml_compute_forward_get_rel_pos(
  11117. const struct ggml_compute_params * params,
  11118. const struct ggml_tensor * src0,
  11119. struct ggml_tensor * dst) {
  11120. switch (src0->type) {
  11121. case GGML_TYPE_F16:
  11122. {
  11123. ggml_compute_forward_get_rel_pos_f16(params, src0, dst);
  11124. } break;
  11125. default:
  11126. {
  11127. GGML_ASSERT(false);
  11128. } break;
  11129. }
  11130. }
  11131. // ggml_compute_forward_add_rel_pos
  11132. static void ggml_compute_forward_add_rel_pos_f32(
  11133. const struct ggml_compute_params * params,
  11134. const struct ggml_tensor * src0,
  11135. const struct ggml_tensor * src1,
  11136. const struct ggml_tensor * src2,
  11137. struct ggml_tensor * dst) {
  11138. const bool inplace = (bool) ((int32_t *) dst->op_params)[0];
  11139. if (!inplace && params->type == GGML_TASK_INIT) {
  11140. memcpy((char *) dst->data, (char *) src0->data, ggml_nbytes(dst));
  11141. return;
  11142. }
  11143. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  11144. return;
  11145. }
  11146. int64_t t0 = ggml_perf_time_us();
  11147. UNUSED(t0);
  11148. // ref: https://github.com/facebookresearch/segment-anything/blob/main/segment_anything/modeling/image_encoder.py#L357-L359
  11149. float * src1_data = (float *) src1->data;
  11150. float * src2_data = (float *) src2->data;
  11151. float * dst_data = (float *) dst->data;
  11152. const int64_t ne10 = src1->ne[0];
  11153. const int64_t ne11 = src1->ne[1];
  11154. const int64_t ne12 = src1->ne[2];
  11155. const int64_t ne13 = src1->ne[3];
  11156. const int ith = params->ith;
  11157. const int nth = params->nth;
  11158. // total patches in dst
  11159. const int np = ne13;
  11160. // patches per thread
  11161. const int dp = (np + nth - 1)/nth;
  11162. // patch range for this thread
  11163. const int ip0 = dp*ith;
  11164. const int ip1 = MIN(ip0 + dp, np);
  11165. for (int64_t i13 = ip0; i13 < ip1; ++i13) {
  11166. for (int64_t i12 = 0; i12 < ne12; ++i12) {
  11167. for (int64_t i11 = 0; i11 < ne11; ++i11) {
  11168. const int64_t jp1 = i13*ne12*ne11*ne10 + i12*ne11*ne10 + i11*ne10;
  11169. for (int64_t i10 = 0; i10 < ne10; ++i10) {
  11170. const int64_t jp0 = jp1 + i10;
  11171. const float src1_e = src1_data[jp0];
  11172. const float src2_e = src2_data[jp0];
  11173. const int64_t jdh = jp0 * ne10;
  11174. const int64_t jdw = jdh - (ne10 - 1) * i10;
  11175. for (int64_t j = 0; j < ne10; ++j) {
  11176. dst_data[jdh + j ] += src2_e;
  11177. dst_data[jdw + j*ne10] += src1_e;
  11178. }
  11179. }
  11180. }
  11181. }
  11182. }
  11183. }
  11184. static void ggml_compute_forward_add_rel_pos(
  11185. const struct ggml_compute_params * params,
  11186. const struct ggml_tensor * src0,
  11187. const struct ggml_tensor * src1,
  11188. const struct ggml_tensor * src2,
  11189. struct ggml_tensor * dst) {
  11190. switch (src0->type) {
  11191. case GGML_TYPE_F32:
  11192. {
  11193. ggml_compute_forward_add_rel_pos_f32(params, src0, src1, src2, dst);
  11194. } break;
  11195. default:
  11196. {
  11197. GGML_ASSERT(false);
  11198. } break;
  11199. }
  11200. }
  11201. // ggml_compute_forward_map_unary
  11202. static void ggml_compute_forward_map_unary_f32(
  11203. const struct ggml_compute_params * params,
  11204. const struct ggml_tensor * src0,
  11205. struct ggml_tensor * dst,
  11206. const ggml_unary_op_f32_t fun) {
  11207. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  11208. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  11209. return;
  11210. }
  11211. const int n = ggml_nrows(src0);
  11212. const int nc = src0->ne[0];
  11213. assert( dst->nb[0] == sizeof(float));
  11214. assert(src0->nb[0] == sizeof(float));
  11215. for (int i = 0; i < n; i++) {
  11216. fun(nc,
  11217. (float *) ((char *) dst->data + i*( dst->nb[1])),
  11218. (float *) ((char *) src0->data + i*(src0->nb[1])));
  11219. }
  11220. }
  11221. static void ggml_compute_forward_map_unary(
  11222. const struct ggml_compute_params * params,
  11223. const struct ggml_tensor * src0,
  11224. struct ggml_tensor * dst,
  11225. const ggml_unary_op_f32_t fun) {
  11226. switch (src0->type) {
  11227. case GGML_TYPE_F32:
  11228. {
  11229. ggml_compute_forward_map_unary_f32(params, src0, dst, fun);
  11230. } break;
  11231. default:
  11232. {
  11233. GGML_ASSERT(false);
  11234. } break;
  11235. }
  11236. }
  11237. // ggml_compute_forward_map_binary
  11238. static void ggml_compute_forward_map_binary_f32(
  11239. const struct ggml_compute_params * params,
  11240. const struct ggml_tensor * src0,
  11241. const struct ggml_tensor * src1,
  11242. struct ggml_tensor * dst,
  11243. const ggml_binary_op_f32_t fun) {
  11244. assert(params->ith == 0);
  11245. assert(ggml_are_same_shape(src0, src1) && ggml_are_same_shape(src0, dst));
  11246. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  11247. return;
  11248. }
  11249. const int n = ggml_nrows(src0);
  11250. const int nc = src0->ne[0];
  11251. assert( dst->nb[0] == sizeof(float));
  11252. assert(src0->nb[0] == sizeof(float));
  11253. assert(src1->nb[0] == sizeof(float));
  11254. for (int i = 0; i < n; i++) {
  11255. fun(nc,
  11256. (float *) ((char *) dst->data + i*( dst->nb[1])),
  11257. (float *) ((char *) src0->data + i*(src0->nb[1])),
  11258. (float *) ((char *) src1->data + i*(src1->nb[1])));
  11259. }
  11260. }
  11261. static void ggml_compute_forward_map_binary(
  11262. const struct ggml_compute_params * params,
  11263. const struct ggml_tensor * src0,
  11264. const struct ggml_tensor * src1,
  11265. struct ggml_tensor * dst,
  11266. const ggml_binary_op_f32_t fun) {
  11267. switch (src0->type) {
  11268. case GGML_TYPE_F32:
  11269. {
  11270. ggml_compute_forward_map_binary_f32(params, src0, src1, dst, fun);
  11271. } break;
  11272. default:
  11273. {
  11274. GGML_ASSERT(false);
  11275. } break;
  11276. }
  11277. }
  11278. // ggml_compute_forward_map_custom1
  11279. static void ggml_compute_forward_map_custom1_f32(
  11280. const struct ggml_compute_params * params,
  11281. const struct ggml_tensor * a,
  11282. struct ggml_tensor * dst,
  11283. const ggml_custom1_op_f32_t fun) {
  11284. assert(params->ith == 0);
  11285. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  11286. return;
  11287. }
  11288. fun(dst, a);
  11289. }
  11290. // ggml_compute_forward_map_custom2
  11291. static void ggml_compute_forward_map_custom2_f32(
  11292. const struct ggml_compute_params * params,
  11293. const struct ggml_tensor * a,
  11294. const struct ggml_tensor * b,
  11295. struct ggml_tensor * dst,
  11296. const ggml_custom2_op_f32_t fun) {
  11297. assert(params->ith == 0);
  11298. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  11299. return;
  11300. }
  11301. fun(dst, a, b);
  11302. }
  11303. // ggml_compute_forward_map_custom3
  11304. static void ggml_compute_forward_map_custom3_f32(
  11305. const struct ggml_compute_params * params,
  11306. const struct ggml_tensor * a,
  11307. const struct ggml_tensor * b,
  11308. const struct ggml_tensor * c,
  11309. struct ggml_tensor * dst,
  11310. const ggml_custom3_op_f32_t fun) {
  11311. assert(params->ith == 0);
  11312. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  11313. return;
  11314. }
  11315. fun(dst, a, b, c);
  11316. }
  11317. // ggml_compute_forward_map_custom1
  11318. static void ggml_compute_forward_map_custom1(
  11319. const struct ggml_compute_params * params,
  11320. const struct ggml_tensor * a,
  11321. struct ggml_tensor * dst) {
  11322. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  11323. return;
  11324. }
  11325. struct ggml_map_custom1_op_params * p = (struct ggml_map_custom1_op_params *) dst->op_params;
  11326. p->fun(dst, a, params->ith, params->nth, p->userdata);
  11327. }
  11328. // ggml_compute_forward_map_custom2
  11329. static void ggml_compute_forward_map_custom2(
  11330. const struct ggml_compute_params * params,
  11331. const struct ggml_tensor * a,
  11332. const struct ggml_tensor * b,
  11333. struct ggml_tensor * dst) {
  11334. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  11335. return;
  11336. }
  11337. struct ggml_map_custom2_op_params * p = (struct ggml_map_custom2_op_params *) dst->op_params;
  11338. p->fun(dst, a, b, params->ith, params->nth, p->userdata);
  11339. }
  11340. // ggml_compute_forward_map_custom3
  11341. static void ggml_compute_forward_map_custom3(
  11342. const struct ggml_compute_params * params,
  11343. const struct ggml_tensor * a,
  11344. const struct ggml_tensor * b,
  11345. const struct ggml_tensor * c,
  11346. struct ggml_tensor * dst) {
  11347. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  11348. return;
  11349. }
  11350. struct ggml_map_custom3_op_params * p = (struct ggml_map_custom3_op_params *) dst->op_params;
  11351. p->fun(dst, a, b, c, params->ith, params->nth, p->userdata);
  11352. }
  11353. // ggml_compute_forward_cross_entropy_loss
  11354. static void ggml_compute_forward_cross_entropy_loss_f32(
  11355. const struct ggml_compute_params * params,
  11356. const struct ggml_tensor * src0,
  11357. const struct ggml_tensor * src1,
  11358. struct ggml_tensor * dst) {
  11359. GGML_ASSERT(ggml_is_contiguous(src0));
  11360. GGML_ASSERT(ggml_is_contiguous(src1));
  11361. GGML_ASSERT(ggml_is_scalar(dst));
  11362. GGML_ASSERT(ggml_are_same_shape(src0, src1));
  11363. const int ith = params->ith;
  11364. const int nth = params->nth;
  11365. float * sums = (float *) params->wdata;
  11366. // TODO: handle transposed/permuted matrices
  11367. const int nc = src0->ne[0];
  11368. const int nr = ggml_nrows(src0);
  11369. GGML_ASSERT(params->wsize >= sizeof(float) * (nth + nth * nc));
  11370. if (params->type == GGML_TASK_INIT) {
  11371. if (ith == 0) {
  11372. memset(sums, 0, sizeof(float) * (nth + nth * nc));
  11373. }
  11374. return;
  11375. }
  11376. if (params->type == GGML_TASK_FINALIZE) {
  11377. if (ith == 0) {
  11378. float * dp = (float *) dst->data;
  11379. ggml_vec_sum_f32(nth, dp, sums);
  11380. dp[0] *= -1.0f / (float) nr;
  11381. }
  11382. return;
  11383. }
  11384. const double eps = 1e-9;
  11385. // rows per thread
  11386. const int dr = (nr + nth - 1)/nth;
  11387. // row range for this thread
  11388. const int ir0 = dr*ith;
  11389. const int ir1 = MIN(ir0 + dr, nr);
  11390. for (int i1 = ir0; i1 < ir1; i1++) {
  11391. float * s0 = (float *)((char *) src0->data + i1*src0->nb[1]);
  11392. float * s1 = (float *)((char *) src1->data + i1*src1->nb[1]);
  11393. float * st = ((float *) params->wdata) + nth + ith*nc;
  11394. #ifndef NDEBUG
  11395. for (int i = 0; i < nc; ++i) {
  11396. //printf("p[%d] = %f\n", i, p[i]);
  11397. assert(!isnan(s0[i]));
  11398. assert(!isnan(s1[i]));
  11399. }
  11400. #endif
  11401. // soft_max
  11402. ggml_float sum = 0.0;
  11403. {
  11404. float max = -INFINITY;
  11405. ggml_vec_max_f32(nc, &max, s0);
  11406. uint16_t scvt; UNUSED(scvt);
  11407. for (int i = 0; i < nc; i++) {
  11408. if (s0[i] == -INFINITY) {
  11409. st[i] = 0.0f;
  11410. } else {
  11411. #ifndef GGML_CROSS_ENTROPY_EXP_FP16
  11412. const float s = s0[i] - max;
  11413. const float val = expf(s);
  11414. #else
  11415. ggml_fp16_t s = GGML_FP32_TO_FP16(s0[i] - max);
  11416. memcpy(&scvt, &s, sizeof(scvt));
  11417. const float val = GGML_FP16_TO_FP32(ggml_table_exp_f16[scvt]);
  11418. #endif
  11419. sum += (ggml_float)val;
  11420. st[i] = val;
  11421. }
  11422. }
  11423. assert(sum > 0.0);
  11424. // sum = 1.0/sum;
  11425. }
  11426. // avoid log(0) by rescaling from [0..1] to [eps..1]
  11427. sum = (1.0 - eps) / sum;
  11428. ggml_vec_scale_f32(nc, st, sum);
  11429. ggml_vec_add1_f32(nc, st, st, eps);
  11430. ggml_vec_log_f32(nc, st, st);
  11431. ggml_vec_mul_f32(nc, st, st, s1);
  11432. float st_sum = 0;
  11433. ggml_vec_sum_f32(nc, &st_sum, st);
  11434. sums[ith] += st_sum;
  11435. #ifndef NDEBUG
  11436. for (int i = 0; i < nc; ++i) {
  11437. assert(!isnan(st[i]));
  11438. assert(!isinf(st[i]));
  11439. }
  11440. #endif
  11441. }
  11442. }
  11443. static void ggml_compute_forward_cross_entropy_loss(
  11444. const struct ggml_compute_params * params,
  11445. const struct ggml_tensor * src0,
  11446. const struct ggml_tensor * src1,
  11447. struct ggml_tensor * dst) {
  11448. switch (src0->type) {
  11449. case GGML_TYPE_F32:
  11450. {
  11451. ggml_compute_forward_cross_entropy_loss_f32(params, src0, src1, dst);
  11452. } break;
  11453. default:
  11454. {
  11455. GGML_ASSERT(false);
  11456. } break;
  11457. }
  11458. }
  11459. // ggml_compute_forward_cross_entropy_loss_back
  11460. static void ggml_compute_forward_cross_entropy_loss_back_f32(
  11461. const struct ggml_compute_params * params,
  11462. const struct ggml_tensor * src0,
  11463. const struct ggml_tensor * src1,
  11464. const struct ggml_tensor * opt0,
  11465. struct ggml_tensor * dst) {
  11466. GGML_ASSERT(ggml_is_contiguous(dst));
  11467. GGML_ASSERT(ggml_is_contiguous(src0));
  11468. GGML_ASSERT(ggml_is_contiguous(src1));
  11469. GGML_ASSERT(ggml_is_contiguous(opt0));
  11470. GGML_ASSERT(ggml_are_same_shape(src0, src1) && ggml_are_same_shape(src0, dst));
  11471. const int64_t ith = params->ith;
  11472. const int64_t nth = params->nth;
  11473. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  11474. return;
  11475. }
  11476. const double eps = 1e-9;
  11477. // TODO: handle transposed/permuted matrices
  11478. const int64_t nc = src0->ne[0];
  11479. const int64_t nr = ggml_nrows(src0);
  11480. // rows per thread
  11481. const int64_t dr = (nr + nth - 1)/nth;
  11482. // row range for this thread
  11483. const int64_t ir0 = dr*ith;
  11484. const int64_t ir1 = MIN(ir0 + dr, nr);
  11485. float * d = (float *) opt0->data;
  11486. for (int64_t i1 = ir0; i1 < ir1; i1++) {
  11487. float * ds0 = (float *)((char *) dst->data + i1*dst->nb[1]);
  11488. float * s0 = (float *)((char *) src0->data + i1*src0->nb[1]);
  11489. float * s1 = (float *)((char *) src1->data + i1*src1->nb[1]);
  11490. #ifndef NDEBUG
  11491. for (int i = 0; i < nc; ++i) {
  11492. //printf("p[%d] = %f\n", i, p[i]);
  11493. assert(!isnan(s0[i]));
  11494. assert(!isnan(s1[i]));
  11495. }
  11496. #endif
  11497. // soft_max
  11498. ggml_float sum = 0.0;
  11499. {
  11500. float max = -INFINITY;
  11501. ggml_vec_max_f32(nc, &max, s0);
  11502. uint16_t scvt; UNUSED(scvt);
  11503. for (int i = 0; i < nc; i++) {
  11504. if (s0[i] == -INFINITY) {
  11505. ds0[i] = 0.0f;
  11506. } else {
  11507. #ifndef GGML_CROSS_ENTROPY_EXP_FP16
  11508. const float s = s0[i] - max;
  11509. const float val = expf(s);
  11510. #else
  11511. ggml_fp16_t s = GGML_FP32_TO_FP16(s0[i] - max);
  11512. memcpy(&scvt, &s, sizeof(scvt));
  11513. const float val = GGML_FP16_TO_FP32(ggml_table_exp_f16[scvt]);
  11514. #endif
  11515. sum += (ggml_float)val;
  11516. ds0[i] = val;
  11517. }
  11518. }
  11519. assert(sum > 0.0);
  11520. sum = (1.0 - eps)/sum;
  11521. }
  11522. // grad(src0) = (softmax(src0) - src1) * grad(cross_entropy_loss(src0, src1)) / nr
  11523. ggml_vec_scale_f32(nc, ds0, sum);
  11524. ggml_vec_add1_f32(nc, ds0, ds0, eps);
  11525. ggml_vec_sub_f32(nc, ds0, ds0, s1);
  11526. ggml_vec_scale_f32(nc, ds0, d[0] / (float) nr);
  11527. #ifndef NDEBUG
  11528. for (int i = 0; i < nc; ++i) {
  11529. assert(!isnan(ds0[i]));
  11530. assert(!isinf(ds0[i]));
  11531. }
  11532. #endif
  11533. }
  11534. }
  11535. static void ggml_compute_forward_cross_entropy_loss_back(
  11536. const struct ggml_compute_params * params,
  11537. const struct ggml_tensor * src0,
  11538. const struct ggml_tensor * src1,
  11539. const struct ggml_tensor * opt0,
  11540. struct ggml_tensor * dst) {
  11541. switch (src0->type) {
  11542. case GGML_TYPE_F32:
  11543. {
  11544. ggml_compute_forward_cross_entropy_loss_back_f32(params, src0, src1, opt0, dst);
  11545. } break;
  11546. default:
  11547. {
  11548. GGML_ASSERT(false);
  11549. } break;
  11550. }
  11551. }
  11552. /////////////////////////////////
  11553. static void ggml_compute_forward(struct ggml_compute_params * params, struct ggml_tensor * tensor) {
  11554. GGML_ASSERT(params);
  11555. if (tensor->op == GGML_OP_NONE) {
  11556. return;
  11557. }
  11558. #ifdef GGML_USE_CUBLAS
  11559. bool skip_cpu = ggml_cuda_compute_forward(params, tensor);
  11560. if (skip_cpu) {
  11561. return;
  11562. }
  11563. GGML_ASSERT(tensor->src[0] == NULL || tensor->src[0]->backend == GGML_BACKEND_CPU);
  11564. GGML_ASSERT(tensor->src[1] == NULL || tensor->src[1]->backend == GGML_BACKEND_CPU);
  11565. #endif // GGML_USE_CUBLAS
  11566. switch (tensor->op) {
  11567. case GGML_OP_DUP:
  11568. {
  11569. ggml_compute_forward_dup(params, tensor->src[0], tensor);
  11570. } break;
  11571. case GGML_OP_ADD:
  11572. {
  11573. ggml_compute_forward_add(params, tensor->src[0], tensor->src[1], tensor);
  11574. } break;
  11575. case GGML_OP_ADD1:
  11576. {
  11577. ggml_compute_forward_add1(params, tensor->src[0], tensor->src[1], tensor);
  11578. } break;
  11579. case GGML_OP_ACC:
  11580. {
  11581. ggml_compute_forward_acc(params, tensor->src[0], tensor->src[1], tensor);
  11582. } break;
  11583. case GGML_OP_SUB:
  11584. {
  11585. ggml_compute_forward_sub(params, tensor->src[0], tensor->src[1], tensor);
  11586. } break;
  11587. case GGML_OP_MUL:
  11588. {
  11589. ggml_compute_forward_mul(params, tensor->src[0], tensor->src[1], tensor);
  11590. } break;
  11591. case GGML_OP_DIV:
  11592. {
  11593. ggml_compute_forward_div(params, tensor->src[0], tensor->src[1], tensor);
  11594. } break;
  11595. case GGML_OP_SQR:
  11596. {
  11597. ggml_compute_forward_sqr(params, tensor->src[0], tensor);
  11598. } break;
  11599. case GGML_OP_SQRT:
  11600. {
  11601. ggml_compute_forward_sqrt(params, tensor->src[0], tensor);
  11602. } break;
  11603. case GGML_OP_LOG:
  11604. {
  11605. ggml_compute_forward_log(params, tensor->src[0], tensor);
  11606. } break;
  11607. case GGML_OP_SUM:
  11608. {
  11609. ggml_compute_forward_sum(params, tensor->src[0], tensor);
  11610. } break;
  11611. case GGML_OP_SUM_ROWS:
  11612. {
  11613. ggml_compute_forward_sum_rows(params, tensor->src[0], tensor);
  11614. } break;
  11615. case GGML_OP_MEAN:
  11616. {
  11617. ggml_compute_forward_mean(params, tensor->src[0], tensor);
  11618. } break;
  11619. case GGML_OP_ARGMAX:
  11620. {
  11621. ggml_compute_forward_argmax(params, tensor->src[0], tensor);
  11622. } break;
  11623. case GGML_OP_REPEAT:
  11624. {
  11625. ggml_compute_forward_repeat(params, tensor->src[0], tensor);
  11626. } break;
  11627. case GGML_OP_REPEAT_BACK:
  11628. {
  11629. ggml_compute_forward_repeat_back(params, tensor->src[0], tensor);
  11630. } break;
  11631. case GGML_OP_CONCAT:
  11632. {
  11633. ggml_compute_forward_concat(params, tensor->src[0], tensor->src[1], tensor);
  11634. } break;
  11635. case GGML_OP_SILU_BACK:
  11636. {
  11637. ggml_compute_forward_silu_back(params, tensor->src[0], tensor->src[1], tensor);
  11638. } break;
  11639. case GGML_OP_NORM:
  11640. {
  11641. ggml_compute_forward_norm(params, tensor->src[0], tensor);
  11642. } break;
  11643. case GGML_OP_RMS_NORM:
  11644. {
  11645. ggml_compute_forward_rms_norm(params, tensor->src[0], tensor);
  11646. } break;
  11647. case GGML_OP_RMS_NORM_BACK:
  11648. {
  11649. ggml_compute_forward_rms_norm_back(params, tensor->src[0], tensor->src[1], tensor);
  11650. } break;
  11651. case GGML_OP_GROUP_NORM:
  11652. {
  11653. ggml_compute_forward_group_norm(params, tensor->src[0], tensor);
  11654. } break;
  11655. case GGML_OP_MUL_MAT:
  11656. {
  11657. ggml_compute_forward_mul_mat(params, tensor->src[0], tensor->src[1], tensor, 0, tensor->ne[1]);
  11658. } break;
  11659. case GGML_OP_MUL_MAT_ID:
  11660. {
  11661. ggml_compute_forward_mul_mat_id(params, tensor->src[0], tensor->src[1], tensor);
  11662. } break;
  11663. case GGML_OP_OUT_PROD:
  11664. {
  11665. ggml_compute_forward_out_prod(params, tensor->src[0], tensor->src[1], tensor);
  11666. } break;
  11667. case GGML_OP_SCALE:
  11668. {
  11669. ggml_compute_forward_scale(params, tensor->src[0], tensor->src[1], tensor);
  11670. } break;
  11671. case GGML_OP_SET:
  11672. {
  11673. ggml_compute_forward_set(params, tensor->src[0], tensor->src[1], tensor);
  11674. } break;
  11675. case GGML_OP_CPY:
  11676. {
  11677. ggml_compute_forward_cpy(params, tensor->src[0], tensor);
  11678. } break;
  11679. case GGML_OP_CONT:
  11680. {
  11681. ggml_compute_forward_cont(params, tensor->src[0], tensor);
  11682. } break;
  11683. case GGML_OP_RESHAPE:
  11684. {
  11685. ggml_compute_forward_reshape(params, tensor->src[0], tensor);
  11686. } break;
  11687. case GGML_OP_VIEW:
  11688. {
  11689. ggml_compute_forward_view(params, tensor->src[0]);
  11690. } break;
  11691. case GGML_OP_PERMUTE:
  11692. {
  11693. ggml_compute_forward_permute(params, tensor->src[0]);
  11694. } break;
  11695. case GGML_OP_TRANSPOSE:
  11696. {
  11697. ggml_compute_forward_transpose(params, tensor->src[0]);
  11698. } break;
  11699. case GGML_OP_GET_ROWS:
  11700. {
  11701. ggml_compute_forward_get_rows(params, tensor->src[0], tensor->src[1], tensor);
  11702. } break;
  11703. case GGML_OP_GET_ROWS_BACK:
  11704. {
  11705. ggml_compute_forward_get_rows_back(params, tensor->src[0], tensor->src[1], tensor);
  11706. } break;
  11707. case GGML_OP_DIAG:
  11708. {
  11709. ggml_compute_forward_diag(params, tensor->src[0], tensor);
  11710. } break;
  11711. case GGML_OP_DIAG_MASK_INF:
  11712. {
  11713. ggml_compute_forward_diag_mask_inf(params, tensor->src[0], tensor);
  11714. } break;
  11715. case GGML_OP_DIAG_MASK_ZERO:
  11716. {
  11717. ggml_compute_forward_diag_mask_zero(params, tensor->src[0], tensor);
  11718. } break;
  11719. case GGML_OP_SOFT_MAX:
  11720. {
  11721. ggml_compute_forward_soft_max(params, tensor->src[0], tensor->src[1], tensor);
  11722. } break;
  11723. case GGML_OP_SOFT_MAX_BACK:
  11724. {
  11725. ggml_compute_forward_soft_max_back(params, tensor->src[0], tensor->src[1], tensor);
  11726. } break;
  11727. case GGML_OP_ROPE:
  11728. {
  11729. ggml_compute_forward_rope(params, tensor->src[0], tensor->src[1], tensor);
  11730. } break;
  11731. case GGML_OP_ROPE_BACK:
  11732. {
  11733. ggml_compute_forward_rope_back(params, tensor->src[0], tensor->src[1], tensor);
  11734. } break;
  11735. case GGML_OP_ALIBI:
  11736. {
  11737. ggml_compute_forward_alibi(params, tensor->src[0], tensor);
  11738. } break;
  11739. case GGML_OP_CLAMP:
  11740. {
  11741. ggml_compute_forward_clamp(params, tensor->src[0], tensor);
  11742. } break;
  11743. case GGML_OP_CONV_TRANSPOSE_1D:
  11744. {
  11745. ggml_compute_forward_conv_transpose_1d(params, tensor->src[0], tensor->src[1], tensor);
  11746. } break;
  11747. case GGML_OP_IM2COL:
  11748. {
  11749. ggml_compute_forward_im2col(params, tensor->src[0], tensor->src[1], tensor);
  11750. } break;
  11751. case GGML_OP_CONV_TRANSPOSE_2D:
  11752. {
  11753. ggml_compute_forward_conv_transpose_2d(params, tensor->src[0], tensor->src[1], tensor);
  11754. } break;
  11755. case GGML_OP_POOL_1D:
  11756. {
  11757. ggml_compute_forward_pool_1d(params, tensor->src[0], tensor);
  11758. } break;
  11759. case GGML_OP_POOL_2D:
  11760. {
  11761. ggml_compute_forward_pool_2d(params, tensor->src[0], tensor);
  11762. } break;
  11763. case GGML_OP_UPSCALE:
  11764. {
  11765. ggml_compute_forward_upscale(params, tensor->src[0], tensor);
  11766. } break;
  11767. case GGML_OP_PAD:
  11768. {
  11769. ggml_compute_forward_pad(params, tensor->src[0], tensor);
  11770. } break;
  11771. case GGML_OP_ARGSORT:
  11772. {
  11773. ggml_compute_forward_argsort(params, tensor->src[0], tensor);
  11774. } break;
  11775. case GGML_OP_LEAKY_RELU:
  11776. {
  11777. ggml_compute_forward_leaky_relu(params, tensor->src[0], tensor);
  11778. } break;
  11779. case GGML_OP_FLASH_ATTN:
  11780. {
  11781. const int32_t t = ggml_get_op_params_i32(tensor, 0);
  11782. GGML_ASSERT(t == 0 || t == 1);
  11783. const bool masked = t != 0;
  11784. ggml_compute_forward_flash_attn(params, tensor->src[0], tensor->src[1], tensor->src[2], masked, tensor);
  11785. } break;
  11786. case GGML_OP_FLASH_FF:
  11787. {
  11788. ggml_compute_forward_flash_ff(params, tensor->src[0], tensor->src[1], tensor->src[2], tensor->src[3], tensor->src[4], tensor);
  11789. } break;
  11790. case GGML_OP_FLASH_ATTN_BACK:
  11791. {
  11792. int32_t t = ggml_get_op_params_i32(tensor, 0);
  11793. GGML_ASSERT(t == 0 || t == 1);
  11794. bool masked = t != 0;
  11795. ggml_compute_forward_flash_attn_back(params, tensor->src[0], tensor->src[1], tensor->src[2], tensor->src[3], masked, tensor);
  11796. } break;
  11797. case GGML_OP_WIN_PART:
  11798. {
  11799. ggml_compute_forward_win_part(params, tensor->src[0], tensor);
  11800. } break;
  11801. case GGML_OP_WIN_UNPART:
  11802. {
  11803. ggml_compute_forward_win_unpart(params, tensor->src[0], tensor);
  11804. } break;
  11805. case GGML_OP_UNARY:
  11806. {
  11807. ggml_compute_forward_unary(params, tensor->src[0], tensor);
  11808. } break;
  11809. case GGML_OP_GET_REL_POS:
  11810. {
  11811. ggml_compute_forward_get_rel_pos(params, tensor->src[0], tensor);
  11812. } break;
  11813. case GGML_OP_ADD_REL_POS:
  11814. {
  11815. ggml_compute_forward_add_rel_pos(params, tensor->src[0], tensor->src[1], tensor->src[2], tensor);
  11816. } break;
  11817. case GGML_OP_MAP_UNARY:
  11818. {
  11819. ggml_unary_op_f32_t fun;
  11820. memcpy(&fun, tensor->op_params, sizeof(fun));
  11821. ggml_compute_forward_map_unary(params, tensor->src[0], tensor, fun);
  11822. }
  11823. break;
  11824. case GGML_OP_MAP_BINARY:
  11825. {
  11826. ggml_binary_op_f32_t fun;
  11827. memcpy(&fun, tensor->op_params, sizeof(fun));
  11828. ggml_compute_forward_map_binary(params, tensor->src[0], tensor->src[1], tensor, fun);
  11829. }
  11830. break;
  11831. case GGML_OP_MAP_CUSTOM1_F32:
  11832. {
  11833. ggml_custom1_op_f32_t fun;
  11834. memcpy(&fun, tensor->op_params, sizeof(fun));
  11835. ggml_compute_forward_map_custom1_f32(params, tensor->src[0], tensor, fun);
  11836. }
  11837. break;
  11838. case GGML_OP_MAP_CUSTOM2_F32:
  11839. {
  11840. ggml_custom2_op_f32_t fun;
  11841. memcpy(&fun, tensor->op_params, sizeof(fun));
  11842. ggml_compute_forward_map_custom2_f32(params, tensor->src[0], tensor->src[1], tensor, fun);
  11843. }
  11844. break;
  11845. case GGML_OP_MAP_CUSTOM3_F32:
  11846. {
  11847. ggml_custom3_op_f32_t fun;
  11848. memcpy(&fun, tensor->op_params, sizeof(fun));
  11849. ggml_compute_forward_map_custom3_f32(params, tensor->src[0], tensor->src[1], tensor->src[2], tensor, fun);
  11850. }
  11851. break;
  11852. case GGML_OP_MAP_CUSTOM1:
  11853. {
  11854. ggml_compute_forward_map_custom1(params, tensor->src[0], tensor);
  11855. }
  11856. break;
  11857. case GGML_OP_MAP_CUSTOM2:
  11858. {
  11859. ggml_compute_forward_map_custom2(params, tensor->src[0], tensor->src[1], tensor);
  11860. }
  11861. break;
  11862. case GGML_OP_MAP_CUSTOM3:
  11863. {
  11864. ggml_compute_forward_map_custom3(params, tensor->src[0], tensor->src[1], tensor->src[2], tensor);
  11865. }
  11866. break;
  11867. case GGML_OP_CROSS_ENTROPY_LOSS:
  11868. {
  11869. ggml_compute_forward_cross_entropy_loss(params, tensor->src[0], tensor->src[1], tensor);
  11870. }
  11871. break;
  11872. case GGML_OP_CROSS_ENTROPY_LOSS_BACK:
  11873. {
  11874. ggml_compute_forward_cross_entropy_loss_back(params, tensor->src[0], tensor->src[1], tensor->src[2], tensor);
  11875. }
  11876. break;
  11877. case GGML_OP_NONE:
  11878. {
  11879. // nop
  11880. } break;
  11881. case GGML_OP_COUNT:
  11882. {
  11883. GGML_ASSERT(false);
  11884. } break;
  11885. }
  11886. }
  11887. ////////////////////////////////////////////////////////////////////////////////
  11888. static size_t ggml_hash_size(size_t min_sz) {
  11889. // next primes after powers of two
  11890. static const size_t primes[] = {
  11891. 2, 3, 5, 11, 17, 37, 67, 131, 257, 521, 1031,
  11892. 2053, 4099, 8209, 16411, 32771, 65537, 131101,
  11893. 262147, 524309, 1048583, 2097169, 4194319, 8388617,
  11894. 16777259, 33554467, 67108879, 134217757, 268435459,
  11895. 536870923, 1073741827, 2147483659
  11896. };
  11897. static const size_t n_primes = sizeof(primes)/sizeof(primes[0]);
  11898. // find the smallest prime that is larger or equal to min_sz
  11899. size_t l = 0;
  11900. size_t r = n_primes;
  11901. while (l < r) {
  11902. size_t m = (l + r)/2;
  11903. if (primes[m] < min_sz) {
  11904. l = m + 1;
  11905. } else {
  11906. r = m;
  11907. }
  11908. }
  11909. size_t sz = l < n_primes ? primes[l] : min_sz | 1;
  11910. return sz;
  11911. }
  11912. static size_t ggml_hash(const void * p) {
  11913. return (size_t)p;
  11914. }
  11915. size_t ggml_hash_find(const struct ggml_hash_set hash_set, struct ggml_tensor * key) {
  11916. size_t h = ggml_hash(key) % hash_set.size;
  11917. // linear probing
  11918. size_t i = h;
  11919. while (hash_set.keys[i] != NULL && hash_set.keys[i] != key) {
  11920. i = (i + 1) % hash_set.size;
  11921. if (i == h) {
  11922. // visited all hash table entries -> not found
  11923. return GGML_HASHTABLE_FULL;
  11924. }
  11925. }
  11926. return i;
  11927. }
  11928. bool ggml_hash_contains(struct ggml_hash_set hash_set, struct ggml_tensor * key) {
  11929. size_t i = ggml_hash_find(hash_set, key);
  11930. return i != GGML_HASHTABLE_FULL && hash_set.keys[i] == key;
  11931. }
  11932. size_t ggml_hash_insert(struct ggml_hash_set hash_set, struct ggml_tensor * key) {
  11933. size_t i = ggml_hash_find(hash_set, key);
  11934. GGML_ASSERT(i != GGML_HASHTABLE_FULL);
  11935. if (hash_set.keys[i] == key) {
  11936. return GGML_HASHTABLE_ALREADY_EXISTS;
  11937. }
  11938. // insert
  11939. GGML_ASSERT(hash_set.keys[i] == NULL);
  11940. hash_set.keys[i] = key;
  11941. return i;
  11942. }
  11943. size_t ggml_hash_find_or_insert(struct ggml_hash_set hash_set, struct ggml_tensor * key) {
  11944. size_t i = ggml_hash_find(hash_set, key);
  11945. GGML_ASSERT(i != GGML_HASHTABLE_FULL);
  11946. hash_set.keys[i] = key;
  11947. return i;
  11948. }
  11949. static struct ggml_hash_set ggml_hash_set_new(size_t size) {
  11950. size = ggml_hash_size(size);
  11951. struct ggml_hash_set result;
  11952. result.size = size;
  11953. result.keys = malloc(sizeof(struct ggml_tensor *) * size);
  11954. memset(result.keys, 0, sizeof(struct ggml_tensor *) * size);
  11955. return result;
  11956. }
  11957. static void ggml_hash_set_free(struct ggml_hash_set hash_set) {
  11958. free(hash_set.keys);
  11959. }
  11960. struct hash_map {
  11961. struct ggml_hash_set set;
  11962. struct ggml_tensor ** vals;
  11963. };
  11964. static struct hash_map * ggml_new_hash_map(size_t size) {
  11965. struct hash_map * result = malloc(sizeof(struct hash_map));
  11966. result->set = ggml_hash_set_new(size);
  11967. result->vals = malloc(sizeof(struct ggml_tensor *) * result->set.size);
  11968. memset(result->vals, 0, sizeof(struct ggml_tensor *) * result->set.size);
  11969. return result;
  11970. }
  11971. static void ggml_hash_map_free(struct hash_map * map) {
  11972. ggml_hash_set_free(map->set);
  11973. free(map->vals);
  11974. free(map);
  11975. }
  11976. // gradient checkpointing
  11977. static struct ggml_tensor * ggml_recompute_graph_node(
  11978. struct ggml_context * ctx,
  11979. struct ggml_cgraph * graph,
  11980. struct hash_map * replacements,
  11981. struct ggml_tensor * node) {
  11982. if (node == NULL) {
  11983. return NULL;
  11984. }
  11985. if (node->is_param) {
  11986. return node;
  11987. }
  11988. if (!ggml_hash_contains(graph->visited_hash_table, node)) {
  11989. return node;
  11990. }
  11991. int count_children = 0;
  11992. for (int k = 0; k < GGML_MAX_SRC; ++k) {
  11993. if (node->src[k]) {
  11994. ++count_children;
  11995. }
  11996. }
  11997. if (count_children == 0) {
  11998. return node;
  11999. }
  12000. size_t i = ggml_hash_find(replacements->set, node);
  12001. GGML_ASSERT(i != GGML_HASHTABLE_FULL); // assert that not full
  12002. if (replacements->set.keys[i] == node) {
  12003. return replacements->vals[i];
  12004. }
  12005. struct ggml_tensor * clone = ggml_new_tensor(ctx, node->type, node->n_dims, node->ne);
  12006. // insert clone into replacements
  12007. GGML_ASSERT(replacements->set.keys[i] == NULL); // assert that we don't overwrite
  12008. replacements->set.keys[i] = node;
  12009. replacements->vals[i] = clone;
  12010. clone->op = node->op;
  12011. clone->grad = node->grad;
  12012. clone->is_param = node->is_param;
  12013. clone->extra = node->extra;
  12014. for (int k = 0; k < GGML_MAX_DIMS; ++k) {
  12015. clone->nb[k] = node->nb[k];
  12016. }
  12017. for (int k = 0; k < GGML_MAX_SRC; ++k) {
  12018. clone->src[k] = ggml_recompute_graph_node(ctx, graph, replacements, node->src[k]);
  12019. }
  12020. if (node->view_src != NULL) {
  12021. clone->data = (node->view_src->data == NULL)
  12022. ? NULL // view_src not yet allocated
  12023. : (char *) node->view_src->data // view_src already allocated
  12024. + node->view_offs;
  12025. clone->view_src = node->view_src;
  12026. clone->view_offs = node->view_offs;
  12027. }
  12028. GGML_ASSERT(sizeof(node->op_params) == sizeof(int32_t) * (GGML_MAX_OP_PARAMS / sizeof(int32_t)));
  12029. GGML_ASSERT(sizeof(node->name) == GGML_MAX_NAME);
  12030. memcpy(clone->op_params, node->op_params, sizeof(node->op_params));
  12031. ggml_format_name(clone, "%s (clone)", ggml_get_name(node));
  12032. return clone;
  12033. }
  12034. void ggml_build_backward_gradient_checkpointing(
  12035. struct ggml_context * ctx,
  12036. struct ggml_cgraph * gf,
  12037. struct ggml_cgraph * gb,
  12038. struct ggml_cgraph * gb_tmp,
  12039. struct ggml_tensor * * checkpoints,
  12040. int n_checkpoints) {
  12041. ggml_graph_cpy(gf, gb_tmp);
  12042. ggml_build_backward_expand(ctx, gf, gb_tmp, true);
  12043. if (n_checkpoints <= 0) {
  12044. ggml_graph_cpy(gb_tmp, gb);
  12045. return;
  12046. }
  12047. struct hash_map * replacements = ggml_new_hash_map(gf->n_nodes + gf->n_leafs + n_checkpoints);
  12048. // insert checkpoints in replacements
  12049. for (int i = 0; i < n_checkpoints; ++i) {
  12050. size_t k = ggml_hash_find(replacements->set, checkpoints[i]);
  12051. GGML_ASSERT(k != GGML_HASHTABLE_FULL); // assert that not full
  12052. GGML_ASSERT(replacements->set.keys[k] == NULL); // assert that we don't overwrite
  12053. replacements->set.keys[k] = checkpoints[i];
  12054. replacements->vals[k] = checkpoints[i];
  12055. }
  12056. ggml_graph_cpy(gf, gb);
  12057. // rewrite gb_tmp->nodes[gf->n_nodes:gb_tmp->n_nodes],
  12058. // replacing references to gb_tmp->nodes[0:gf->n_nodes] ( == gf->nodes[0:gf->n_nodes]),
  12059. // by recomputing them from checkpoints
  12060. for (int i = gf->n_nodes; i<gb_tmp->n_nodes; ++i) {
  12061. struct ggml_tensor * node = gb_tmp->nodes[i];
  12062. for (int k = 0; k < GGML_MAX_SRC; ++k) {
  12063. // insert new tensors recomputing src, reusing already made replacements,
  12064. // remember replacements: remember new tensors with mapping from corresponding gf nodes
  12065. // recurse for input tensors,
  12066. // unless (i.e. terminating when) input tensors are replacements (like checkpoints)
  12067. node->src[k] = ggml_recompute_graph_node(ctx, gf, replacements, node->src[k]);
  12068. }
  12069. // insert rewritten backward node with replacements made into resulting backward graph gb
  12070. ggml_build_forward_expand(gb, node);
  12071. }
  12072. ggml_hash_map_free(replacements);
  12073. }
  12074. // functions to change gradients considering the case that input a might be initial gradient with zero value
  12075. static struct ggml_tensor * ggml_add_or_set(struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b, struct ggml_hash_set zero_table) {
  12076. if (ggml_hash_contains(zero_table, a)) {
  12077. return b;
  12078. } else {
  12079. return ggml_add_impl(ctx, a, b, false);
  12080. }
  12081. }
  12082. static struct ggml_tensor * ggml_acc_or_set(struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b, size_t nb1, size_t nb2, size_t nb3, size_t offset, struct ggml_hash_set zero_table) {
  12083. if (ggml_hash_contains(zero_table, a)) {
  12084. struct ggml_tensor * a_zero = ggml_scale(ctx, a, ggml_new_f32(ctx, 0));
  12085. return ggml_acc_impl(ctx, a_zero, b, nb1, nb2, nb3, offset, false);
  12086. } else {
  12087. return ggml_acc_impl(ctx, a, b, nb1, nb2, nb3, offset, false);
  12088. }
  12089. }
  12090. static struct ggml_tensor * ggml_add1_or_set(struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b, struct ggml_hash_set zero_table) {
  12091. if (ggml_hash_contains(zero_table, a)) {
  12092. return ggml_repeat(ctx, b, a);
  12093. } else {
  12094. return ggml_add1_impl(ctx, a, b, false);
  12095. }
  12096. }
  12097. static struct ggml_tensor * ggml_sub_or_set(struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b, struct ggml_hash_set zero_table) {
  12098. if (ggml_hash_contains(zero_table, a)) {
  12099. return ggml_neg(ctx, b);
  12100. } else {
  12101. return ggml_sub_impl(ctx, a, b, false);
  12102. }
  12103. }
  12104. static void ggml_compute_backward(struct ggml_context * ctx, struct ggml_tensor * tensor, struct ggml_hash_set zero_table) {
  12105. struct ggml_tensor * src0 = tensor->src[0];
  12106. struct ggml_tensor * src1 = tensor->src[1];
  12107. switch (tensor->op) {
  12108. case GGML_OP_DUP:
  12109. {
  12110. if (src0->grad) {
  12111. src0->grad = ggml_add_or_set(ctx, src0->grad, tensor->grad, zero_table);
  12112. }
  12113. } break;
  12114. case GGML_OP_ADD:
  12115. {
  12116. if (src0->grad) {
  12117. src0->grad = ggml_add_or_set(ctx, src0->grad, tensor->grad, zero_table);
  12118. }
  12119. if (src1->grad) {
  12120. src1->grad = ggml_add_or_set(ctx, src1->grad, tensor->grad, zero_table);
  12121. }
  12122. } break;
  12123. case GGML_OP_ADD1:
  12124. {
  12125. if (src0->grad) {
  12126. src0->grad = ggml_add_or_set(ctx, src0->grad, tensor->grad, zero_table);
  12127. }
  12128. if (src1->grad) {
  12129. src1->grad = ggml_add_or_set(ctx,
  12130. src1->grad,
  12131. ggml_mean(ctx, tensor->grad), // TODO: should probably be sum instead of mean
  12132. zero_table);
  12133. }
  12134. } break;
  12135. case GGML_OP_ACC:
  12136. {
  12137. if (src0->grad) {
  12138. src0->grad = ggml_add_or_set(ctx, src0->grad, tensor->grad, zero_table);
  12139. }
  12140. if (src1->grad) {
  12141. const size_t nb1 = ((int32_t *) tensor->op_params)[0];
  12142. const size_t nb2 = ((int32_t *) tensor->op_params)[1];
  12143. const size_t nb3 = ((int32_t *) tensor->op_params)[2];
  12144. const size_t offset = ((int32_t *) tensor->op_params)[3];
  12145. struct ggml_tensor * tensor_grad_view = ggml_view_4d(ctx,
  12146. tensor->grad,
  12147. src1->grad->ne[0],
  12148. src1->grad->ne[1],
  12149. src1->grad->ne[2],
  12150. src1->grad->ne[3],
  12151. nb1, nb2, nb3, offset);
  12152. src1->grad =
  12153. ggml_add_or_set(ctx,
  12154. src1->grad,
  12155. ggml_reshape(ctx,
  12156. ggml_cont(ctx, tensor_grad_view),
  12157. src1->grad),
  12158. zero_table);
  12159. }
  12160. } break;
  12161. case GGML_OP_SUB:
  12162. {
  12163. if (src0->grad) {
  12164. src0->grad = ggml_add_or_set(ctx, src0->grad, tensor->grad, zero_table);
  12165. }
  12166. if (src1->grad) {
  12167. src1->grad = ggml_sub_or_set(ctx, src1->grad, tensor->grad, zero_table);
  12168. }
  12169. } break;
  12170. case GGML_OP_MUL:
  12171. {
  12172. if (src0->grad) {
  12173. src0->grad =
  12174. ggml_add_or_set(ctx,
  12175. src0->grad,
  12176. ggml_mul(ctx, src1, tensor->grad),
  12177. zero_table);
  12178. }
  12179. if (src1->grad) {
  12180. src1->grad =
  12181. ggml_add_or_set(ctx,
  12182. src1->grad,
  12183. ggml_mul(ctx, src0, tensor->grad),
  12184. zero_table);
  12185. }
  12186. } break;
  12187. case GGML_OP_DIV:
  12188. {
  12189. if (src0->grad) {
  12190. src0->grad =
  12191. ggml_add_or_set(ctx,
  12192. src0->grad,
  12193. ggml_div(ctx, tensor->grad, src1),
  12194. zero_table);
  12195. }
  12196. if (src1->grad) {
  12197. src1->grad =
  12198. ggml_sub_or_set(ctx,
  12199. src1->grad,
  12200. ggml_mul(ctx,
  12201. tensor->grad,
  12202. ggml_div(ctx, tensor, src1)),
  12203. zero_table);
  12204. }
  12205. } break;
  12206. case GGML_OP_SQR:
  12207. {
  12208. if (src0->grad) {
  12209. src0->grad =
  12210. ggml_add_or_set(ctx,
  12211. src0->grad,
  12212. ggml_scale(ctx,
  12213. ggml_mul(ctx, src0, tensor->grad),
  12214. ggml_new_f32(ctx, 2.0f)),
  12215. zero_table);
  12216. }
  12217. } break;
  12218. case GGML_OP_SQRT:
  12219. {
  12220. if (src0->grad) {
  12221. src0->grad =
  12222. ggml_add_or_set(ctx,
  12223. src0->grad,
  12224. ggml_scale(ctx,
  12225. ggml_div(ctx,
  12226. tensor->grad,
  12227. tensor),
  12228. ggml_new_f32(ctx, 0.5f)),
  12229. zero_table);
  12230. }
  12231. } break;
  12232. case GGML_OP_LOG:
  12233. {
  12234. if (src0->grad) {
  12235. src0->grad =
  12236. ggml_add_or_set(ctx,
  12237. src0->grad,
  12238. ggml_div(ctx,
  12239. tensor->grad,
  12240. src0),
  12241. zero_table);
  12242. }
  12243. } break;
  12244. case GGML_OP_SUM:
  12245. {
  12246. if (src0->grad) {
  12247. src0->grad =
  12248. ggml_add1_or_set(ctx,
  12249. src0->grad,
  12250. tensor->grad,
  12251. zero_table);
  12252. }
  12253. } break;
  12254. case GGML_OP_SUM_ROWS:
  12255. {
  12256. if (src0->grad) {
  12257. src0->grad =
  12258. ggml_add_or_set(ctx,
  12259. src0->grad,
  12260. ggml_repeat(ctx,
  12261. tensor->grad,
  12262. src0->grad),
  12263. zero_table);
  12264. }
  12265. } break;
  12266. case GGML_OP_MEAN:
  12267. case GGML_OP_ARGMAX:
  12268. {
  12269. GGML_ASSERT(false); // TODO: implement
  12270. } break;
  12271. case GGML_OP_REPEAT:
  12272. {
  12273. // necessary for llama
  12274. if (src0->grad) {
  12275. src0->grad = ggml_add_or_set(ctx,
  12276. src0->grad,
  12277. ggml_repeat_back(ctx, tensor->grad, src0->grad),
  12278. zero_table);
  12279. }
  12280. } break;
  12281. case GGML_OP_REPEAT_BACK:
  12282. {
  12283. if (src0->grad) {
  12284. // TODO: test this
  12285. src0->grad = ggml_add_or_set(ctx,
  12286. src0->grad,
  12287. ggml_repeat(ctx, tensor->grad, src0->grad),
  12288. zero_table);
  12289. }
  12290. } break;
  12291. case GGML_OP_CONCAT:
  12292. {
  12293. GGML_ASSERT(false); // TODO: implement
  12294. } break;
  12295. case GGML_OP_SILU_BACK:
  12296. {
  12297. GGML_ASSERT(false); // TODO: not implemented
  12298. } break;
  12299. case GGML_OP_NORM:
  12300. {
  12301. GGML_ASSERT(false); // TODO: not implemented
  12302. } break;
  12303. case GGML_OP_RMS_NORM:
  12304. {
  12305. // necessary for llama
  12306. if (src0->grad) {
  12307. float eps;
  12308. memcpy(&eps, tensor->op_params, sizeof(float));
  12309. src0->grad = ggml_add_or_set(ctx,
  12310. src0->grad,
  12311. ggml_rms_norm_back(ctx, src0, tensor->grad, eps),
  12312. zero_table);
  12313. }
  12314. } break;
  12315. case GGML_OP_RMS_NORM_BACK:
  12316. {
  12317. GGML_ASSERT(false); // TODO: not implemented
  12318. } break;
  12319. case GGML_OP_GROUP_NORM:
  12320. {
  12321. GGML_ASSERT(false); // TODO: not implemented
  12322. } break;
  12323. case GGML_OP_MUL_MAT:
  12324. {
  12325. // https://cs231n.github.io/optimization-2/#staged
  12326. // # forward pass
  12327. // s0 = np.random.randn(5, 10)
  12328. // s1 = np.random.randn(10, 3)
  12329. // t = s0.dot(s1)
  12330. // # now suppose we had the gradient on t from above in the circuit
  12331. // dt = np.random.randn(*t.shape) # same shape as t
  12332. // ds0 = dt.dot(s1.T) #.T gives the transpose of the matrix
  12333. // ds1 = t.T.dot(dt)
  12334. // tensor.shape [m,p,qq,rr]
  12335. // src0.shape [n,m,q1,r1]
  12336. // src1.shape [n,p,qq,rr]
  12337. // necessary for llama
  12338. if (src0->grad) {
  12339. struct ggml_tensor * s1_tg =
  12340. ggml_out_prod(ctx, // [n,m,qq,rr]
  12341. src1, // [n,p,qq,rr]
  12342. tensor->grad); // [m,p,qq,rr]
  12343. const int64_t qq = s1_tg->ne[2];
  12344. const int64_t rr = s1_tg->ne[3];
  12345. const int64_t q1 = src0->ne[2];
  12346. const int64_t r1 = src0->ne[3];
  12347. const bool ne2_broadcasted = qq > q1;
  12348. const bool ne3_broadcasted = rr > r1;
  12349. if (ne2_broadcasted || ne3_broadcasted) {
  12350. // sum broadcast repetitions of s1_tg into shape of src0
  12351. s1_tg = ggml_repeat_back(ctx, s1_tg, src0);
  12352. }
  12353. src0->grad =
  12354. ggml_add_or_set(ctx,
  12355. src0->grad, // [n,m,q1,r1]
  12356. s1_tg, // [n,m,q1,r1]
  12357. zero_table);
  12358. }
  12359. if (src1->grad) {
  12360. src1->grad =
  12361. ggml_add_or_set(ctx,
  12362. src1->grad, // [n,p,qq,rr]
  12363. // ggml_mul_mat(ctx, // [n,p,qq,rr]
  12364. // ggml_cont(ctx, // [m,n,q1,r1]
  12365. // ggml_transpose(ctx, src0)), // [m,n,q1,r1]
  12366. // tensor->grad), // [m,p,qq,rr]
  12367. // // when src0 is bigger than tensor->grad (this is mostly the case in llama),
  12368. // // avoid transpose of src0, rather transpose smaller tensor->grad
  12369. // // and then use ggml_out_prod
  12370. ggml_out_prod(ctx, // [n,p,qq,rr]
  12371. src0, // [n,m,q1,r1]
  12372. ggml_transpose(ctx, // [p,m,qq,rr]
  12373. tensor->grad)), // [m,p,qq,rr]
  12374. zero_table);
  12375. }
  12376. } break;
  12377. case GGML_OP_MUL_MAT_ID:
  12378. {
  12379. GGML_ASSERT(false); // TODO: not implemented
  12380. } break;
  12381. case GGML_OP_OUT_PROD:
  12382. {
  12383. GGML_ASSERT(false); // TODO: not implemented
  12384. } break;
  12385. case GGML_OP_SCALE:
  12386. {
  12387. // necessary for llama
  12388. if (src0->grad) {
  12389. src0->grad =
  12390. ggml_add_or_set(ctx,
  12391. src0->grad,
  12392. ggml_scale_impl(ctx, tensor->grad, src1, false),
  12393. zero_table);
  12394. }
  12395. if (src1->grad) {
  12396. src1->grad =
  12397. ggml_add_or_set(ctx,
  12398. src1->grad,
  12399. ggml_sum(ctx, ggml_mul_impl(ctx, tensor->grad, src0, false)),
  12400. zero_table);
  12401. }
  12402. } break;
  12403. case GGML_OP_SET:
  12404. {
  12405. const size_t nb1 = ((int32_t *) tensor->op_params)[0];
  12406. const size_t nb2 = ((int32_t *) tensor->op_params)[1];
  12407. const size_t nb3 = ((int32_t *) tensor->op_params)[2];
  12408. const size_t offset = ((int32_t *) tensor->op_params)[3];
  12409. struct ggml_tensor * tensor_grad_view = NULL;
  12410. if (src0->grad || src1->grad) {
  12411. GGML_ASSERT(src0->type == tensor->type);
  12412. GGML_ASSERT(tensor->grad->type == tensor->type);
  12413. GGML_ASSERT(tensor->grad->type == src1->grad->type);
  12414. tensor_grad_view = ggml_view_4d(ctx,
  12415. tensor->grad,
  12416. src1->grad->ne[0],
  12417. src1->grad->ne[1],
  12418. src1->grad->ne[2],
  12419. src1->grad->ne[3],
  12420. nb1, nb2, nb3, offset);
  12421. }
  12422. if (src0->grad) {
  12423. src0->grad = ggml_add_or_set(ctx,
  12424. src0->grad,
  12425. ggml_acc_impl(ctx,
  12426. tensor->grad,
  12427. ggml_neg(ctx, tensor_grad_view),
  12428. nb1, nb2, nb3, offset, false),
  12429. zero_table);
  12430. }
  12431. if (src1->grad) {
  12432. src1->grad =
  12433. ggml_add_or_set(ctx,
  12434. src1->grad,
  12435. ggml_reshape(ctx,
  12436. ggml_cont(ctx, tensor_grad_view),
  12437. src1->grad),
  12438. zero_table);
  12439. }
  12440. } break;
  12441. case GGML_OP_CPY:
  12442. {
  12443. // necessary for llama
  12444. // cpy overwrites value of src1 by src0 and returns view(src1)
  12445. // the overwriting is mathematically equivalent to:
  12446. // tensor = src0 * 1 + src1 * 0
  12447. if (src0->grad) {
  12448. // dsrc0 = dtensor * 1
  12449. src0->grad = ggml_add_or_set(ctx, src0->grad, tensor->grad, zero_table);
  12450. }
  12451. if (src1->grad) {
  12452. // dsrc1 = dtensor * 0 -> noop
  12453. }
  12454. } break;
  12455. case GGML_OP_CONT:
  12456. {
  12457. // same as cpy
  12458. if (src0->grad) {
  12459. GGML_ASSERT(ggml_is_contiguous(src0->grad));
  12460. GGML_ASSERT(ggml_is_contiguous(tensor->grad));
  12461. src0->grad = ggml_add_or_set(ctx, src0->grad, tensor->grad, zero_table);
  12462. }
  12463. } break;
  12464. case GGML_OP_RESHAPE:
  12465. {
  12466. // necessary for llama
  12467. if (src0->grad) {
  12468. src0->grad =
  12469. ggml_add_or_set(ctx, src0->grad,
  12470. ggml_reshape(ctx,
  12471. ggml_is_contiguous(tensor->grad)
  12472. ? tensor->grad
  12473. : ggml_cont(ctx, tensor->grad),
  12474. src0->grad),
  12475. zero_table);
  12476. }
  12477. } break;
  12478. case GGML_OP_VIEW:
  12479. {
  12480. // necessary for llama
  12481. if (src0->grad) {
  12482. size_t offset;
  12483. memcpy(&offset, tensor->op_params, sizeof(offset));
  12484. size_t nb1 = tensor->nb[1];
  12485. size_t nb2 = tensor->nb[2];
  12486. size_t nb3 = tensor->nb[3];
  12487. if (src0->type != src0->grad->type) {
  12488. // gradient is typically F32, but src0 could be other type
  12489. size_t ng = ggml_element_size(src0->grad);
  12490. size_t n0 = ggml_element_size(src0);
  12491. GGML_ASSERT(offset % n0 == 0);
  12492. GGML_ASSERT(nb1 % n0 == 0);
  12493. GGML_ASSERT(nb2 % n0 == 0);
  12494. GGML_ASSERT(nb3 % n0 == 0);
  12495. offset = (offset / n0) * ng;
  12496. nb1 = (nb1 / n0) * ng;
  12497. nb2 = (nb2 / n0) * ng;
  12498. nb3 = (nb3 / n0) * ng;
  12499. }
  12500. src0->grad = ggml_acc_or_set(ctx, src0->grad, tensor->grad, nb1, nb2, nb3, offset, zero_table);
  12501. }
  12502. } break;
  12503. case GGML_OP_PERMUTE:
  12504. {
  12505. // necessary for llama
  12506. if (src0->grad) {
  12507. int32_t * axes = (int32_t *) tensor->op_params;
  12508. int axis0 = axes[0] & 0x3;
  12509. int axis1 = axes[1] & 0x3;
  12510. int axis2 = axes[2] & 0x3;
  12511. int axis3 = axes[3] & 0x3;
  12512. int axes_backward[4] = {0,0,0,0};
  12513. axes_backward[axis0] = 0;
  12514. axes_backward[axis1] = 1;
  12515. axes_backward[axis2] = 2;
  12516. axes_backward[axis3] = 3;
  12517. src0->grad =
  12518. ggml_add_or_set(ctx, src0->grad,
  12519. ggml_permute(ctx,
  12520. tensor->grad,
  12521. axes_backward[0],
  12522. axes_backward[1],
  12523. axes_backward[2],
  12524. axes_backward[3]),
  12525. zero_table);
  12526. }
  12527. } break;
  12528. case GGML_OP_TRANSPOSE:
  12529. {
  12530. // necessary for llama
  12531. if (src0->grad) {
  12532. src0->grad =
  12533. ggml_add_or_set(ctx, src0->grad,
  12534. ggml_transpose(ctx, tensor->grad),
  12535. zero_table);
  12536. }
  12537. } break;
  12538. case GGML_OP_GET_ROWS:
  12539. {
  12540. // necessary for llama (only for tokenizer)
  12541. if (src0->grad) {
  12542. src0->grad =
  12543. ggml_add_or_set(ctx, src0->grad,
  12544. // last ggml_get_rows_back argument src0->grad is only
  12545. // necessary to setup correct output shape
  12546. ggml_get_rows_back(ctx, tensor->grad, src1, src0->grad),
  12547. zero_table);
  12548. }
  12549. if (src1->grad) {
  12550. // noop
  12551. }
  12552. } break;
  12553. case GGML_OP_GET_ROWS_BACK:
  12554. {
  12555. GGML_ASSERT(false); // TODO: not implemented
  12556. } break;
  12557. case GGML_OP_DIAG:
  12558. {
  12559. GGML_ASSERT(false); // TODO: not implemented
  12560. } break;
  12561. case GGML_OP_DIAG_MASK_INF:
  12562. {
  12563. // necessary for llama
  12564. if (src0->grad) {
  12565. const int n_past = ((int32_t *) tensor->op_params)[0];
  12566. src0->grad =
  12567. ggml_add_or_set(ctx, src0->grad,
  12568. ggml_diag_mask_zero_impl(ctx, tensor->grad, n_past, false),
  12569. zero_table);
  12570. }
  12571. } break;
  12572. case GGML_OP_DIAG_MASK_ZERO:
  12573. {
  12574. // necessary for llama
  12575. if (src0->grad) {
  12576. const int n_past = ((int32_t *) tensor->op_params)[0];
  12577. src0->grad =
  12578. ggml_add_or_set(ctx, src0->grad,
  12579. ggml_diag_mask_zero_impl(ctx, tensor->grad, n_past, false),
  12580. zero_table);
  12581. }
  12582. } break;
  12583. case GGML_OP_SOFT_MAX:
  12584. {
  12585. // necessary for llama
  12586. if (src0->grad) {
  12587. src0->grad =
  12588. ggml_add_or_set(ctx, src0->grad,
  12589. ggml_soft_max_back(ctx, tensor->grad, tensor),
  12590. zero_table);
  12591. }
  12592. } break;
  12593. case GGML_OP_SOFT_MAX_BACK:
  12594. {
  12595. GGML_ASSERT(false); // TODO: not implemented
  12596. } break;
  12597. case GGML_OP_ROPE:
  12598. {
  12599. // necessary for llama
  12600. if (src0->grad) {
  12601. //const int n_past = ((int32_t *) tensor->op_params)[0];
  12602. const int n_dims = ((int32_t *) tensor->op_params)[1];
  12603. const int mode = ((int32_t *) tensor->op_params)[2];
  12604. const int n_ctx = ((int32_t *) tensor->op_params)[3];
  12605. const int n_orig_ctx = ((int32_t *) tensor->op_params)[4];
  12606. float freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow, xpos_base, xpos_down;
  12607. memcpy(&freq_base, (int32_t *) tensor->op_params + 5, sizeof(float));
  12608. memcpy(&freq_scale, (int32_t *) tensor->op_params + 6, sizeof(float));
  12609. memcpy(&ext_factor, (int32_t *) tensor->op_params + 7, sizeof(float));
  12610. memcpy(&attn_factor, (int32_t *) tensor->op_params + 8, sizeof(float));
  12611. memcpy(&beta_fast, (int32_t *) tensor->op_params + 9, sizeof(float));
  12612. memcpy(&beta_slow, (int32_t *) tensor->op_params + 10, sizeof(float));
  12613. memcpy(&xpos_base, (int32_t *) tensor->op_params + 11, sizeof(float));
  12614. memcpy(&xpos_down, (int32_t *) tensor->op_params + 12, sizeof(bool));
  12615. src0->grad = ggml_add_or_set(ctx,
  12616. src0->grad,
  12617. ggml_rope_back(ctx,
  12618. tensor->grad,
  12619. src1,
  12620. n_dims,
  12621. mode,
  12622. n_ctx,
  12623. n_orig_ctx,
  12624. freq_base,
  12625. freq_scale,
  12626. ext_factor,
  12627. attn_factor,
  12628. beta_fast,
  12629. beta_slow,
  12630. xpos_base,
  12631. xpos_down),
  12632. zero_table);
  12633. }
  12634. } break;
  12635. case GGML_OP_ROPE_BACK:
  12636. {
  12637. if (src0->grad) {
  12638. //const int n_past = ((int32_t *) tensor->op_params)[0];
  12639. const int n_dims = ((int32_t *) tensor->op_params)[1];
  12640. const int mode = ((int32_t *) tensor->op_params)[2];
  12641. const int n_ctx = ((int32_t *) tensor->op_params)[3];
  12642. const int n_orig_ctx = ((int32_t *) tensor->op_params)[4];
  12643. float freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow, xpos_base, xpos_down;
  12644. memcpy(&freq_base, (int32_t *) tensor->op_params + 5, sizeof(float));
  12645. memcpy(&freq_scale, (int32_t *) tensor->op_params + 6, sizeof(float));
  12646. memcpy(&ext_factor, (int32_t *) tensor->op_params + 7, sizeof(float));
  12647. memcpy(&attn_factor, (int32_t *) tensor->op_params + 8, sizeof(float));
  12648. memcpy(&beta_fast, (int32_t *) tensor->op_params + 9, sizeof(float));
  12649. memcpy(&beta_slow, (int32_t *) tensor->op_params + 10, sizeof(float));
  12650. memcpy(&xpos_base, (int32_t *) tensor->op_params + 11, sizeof(float));
  12651. memcpy(&xpos_down, (int32_t *) tensor->op_params + 12, sizeof(bool));
  12652. src0->grad = ggml_add_or_set(ctx,
  12653. src0->grad,
  12654. ggml_rope_impl(ctx,
  12655. tensor->grad,
  12656. src1,
  12657. n_dims,
  12658. mode,
  12659. n_ctx,
  12660. n_orig_ctx,
  12661. freq_base,
  12662. freq_scale,
  12663. ext_factor,
  12664. attn_factor,
  12665. beta_fast,
  12666. beta_slow,
  12667. xpos_base,
  12668. xpos_down,
  12669. false),
  12670. zero_table);
  12671. }
  12672. } break;
  12673. case GGML_OP_ALIBI:
  12674. {
  12675. GGML_ASSERT(false); // TODO: not implemented
  12676. } break;
  12677. case GGML_OP_CLAMP:
  12678. {
  12679. GGML_ASSERT(false); // TODO: not implemented
  12680. } break;
  12681. case GGML_OP_CONV_TRANSPOSE_1D:
  12682. {
  12683. GGML_ASSERT(false); // TODO: not implemented
  12684. } break;
  12685. case GGML_OP_IM2COL:
  12686. {
  12687. GGML_ASSERT(false); // TODO: not implemented
  12688. } break;
  12689. case GGML_OP_CONV_TRANSPOSE_2D:
  12690. {
  12691. GGML_ASSERT(false); // TODO: not implemented
  12692. } break;
  12693. case GGML_OP_POOL_1D:
  12694. {
  12695. GGML_ASSERT(false); // TODO: not implemented
  12696. } break;
  12697. case GGML_OP_POOL_2D:
  12698. {
  12699. GGML_ASSERT(false); // TODO: not implemented
  12700. } break;
  12701. case GGML_OP_UPSCALE:
  12702. {
  12703. GGML_ASSERT(false); // TODO: not implemented
  12704. } break;
  12705. case GGML_OP_PAD:
  12706. {
  12707. GGML_ASSERT(false); // TODO: not implemented
  12708. } break;
  12709. case GGML_OP_ARGSORT:
  12710. {
  12711. GGML_ASSERT(false); // TODO: not implemented
  12712. } break;
  12713. case GGML_OP_LEAKY_RELU:
  12714. {
  12715. GGML_ASSERT(false); // TODO: not implemented
  12716. } break;
  12717. case GGML_OP_FLASH_ATTN:
  12718. {
  12719. struct ggml_tensor * flash_grad = NULL;
  12720. if (src0->grad || src1->grad || tensor->src[2]->grad) {
  12721. int32_t t = ggml_get_op_params_i32(tensor, 0);
  12722. GGML_ASSERT(t == 0 || t == 1);
  12723. bool masked = t != 0;
  12724. flash_grad =
  12725. ggml_flash_attn_back(ctx,
  12726. src0,
  12727. src1,
  12728. tensor->src[2],
  12729. tensor->grad,
  12730. masked);
  12731. }
  12732. struct ggml_tensor * src2 = tensor->src[2];
  12733. const int64_t elem_q = ggml_nelements(src0);
  12734. const int64_t elem_k = ggml_nelements(src1);
  12735. const int64_t elem_v = ggml_nelements(src2);
  12736. enum ggml_type result_type = flash_grad->type;
  12737. GGML_ASSERT(ggml_blck_size(result_type) == 1);
  12738. const size_t tsize = ggml_type_size(result_type);
  12739. const size_t offs_q = 0;
  12740. const size_t offs_k = offs_q + GGML_PAD(elem_q * tsize, GGML_MEM_ALIGN);
  12741. const size_t offs_v = offs_k + GGML_PAD(elem_k * tsize, GGML_MEM_ALIGN);
  12742. if (src0->grad) {
  12743. struct ggml_tensor * view_q = ggml_view_1d(ctx, flash_grad, elem_q, offs_q);
  12744. struct ggml_tensor * grad_q = ggml_reshape(ctx, view_q, src0);
  12745. src0->grad = ggml_add_or_set(ctx,
  12746. src0->grad,
  12747. grad_q,
  12748. zero_table);
  12749. }
  12750. if (src1->grad) {
  12751. struct ggml_tensor * view_k = ggml_view_1d(ctx, flash_grad, elem_k, offs_k);
  12752. struct ggml_tensor * grad_k = ggml_reshape(ctx, view_k, src1);
  12753. src1->grad = ggml_add_or_set(ctx,
  12754. src1->grad,
  12755. grad_k,
  12756. zero_table);
  12757. }
  12758. if (src2->grad) {
  12759. struct ggml_tensor * view_v = ggml_view_1d(ctx, flash_grad, elem_v, offs_v);
  12760. struct ggml_tensor * grad_v = ggml_reshape(ctx, view_v, src2);
  12761. src2->grad = ggml_add_or_set(ctx,
  12762. src2->grad,
  12763. grad_v,
  12764. zero_table);
  12765. }
  12766. } break;
  12767. case GGML_OP_FLASH_FF:
  12768. {
  12769. GGML_ASSERT(false); // not supported
  12770. } break;
  12771. case GGML_OP_FLASH_ATTN_BACK:
  12772. {
  12773. GGML_ASSERT(false); // not supported
  12774. } break;
  12775. case GGML_OP_WIN_PART:
  12776. case GGML_OP_WIN_UNPART:
  12777. case GGML_OP_UNARY:
  12778. {
  12779. switch (ggml_get_unary_op(tensor)) {
  12780. case GGML_UNARY_OP_ABS:
  12781. {
  12782. if (src0->grad) {
  12783. src0->grad =
  12784. ggml_add_or_set(ctx,
  12785. src0->grad,
  12786. ggml_mul(ctx,
  12787. ggml_sgn(ctx, src0),
  12788. tensor->grad),
  12789. zero_table);
  12790. }
  12791. } break;
  12792. case GGML_UNARY_OP_SGN:
  12793. {
  12794. if (src0->grad) {
  12795. // noop
  12796. }
  12797. } break;
  12798. case GGML_UNARY_OP_NEG:
  12799. {
  12800. if (src0->grad) {
  12801. src0->grad = ggml_sub_or_set(ctx, src0->grad, tensor->grad, zero_table);
  12802. }
  12803. } break;
  12804. case GGML_UNARY_OP_STEP:
  12805. {
  12806. if (src0->grad) {
  12807. // noop
  12808. }
  12809. } break;
  12810. case GGML_UNARY_OP_TANH:
  12811. {
  12812. GGML_ASSERT(false); // TODO: not implemented
  12813. } break;
  12814. case GGML_UNARY_OP_ELU:
  12815. {
  12816. GGML_ASSERT(false); // TODO: not implemented
  12817. } break;
  12818. case GGML_UNARY_OP_RELU:
  12819. {
  12820. if (src0->grad) {
  12821. src0->grad = ggml_add_or_set(ctx,
  12822. src0->grad,
  12823. ggml_mul(ctx,
  12824. ggml_step(ctx, src0),
  12825. tensor->grad),
  12826. zero_table);
  12827. }
  12828. } break;
  12829. case GGML_UNARY_OP_GELU:
  12830. {
  12831. GGML_ASSERT(false); // TODO: not implemented
  12832. } break;
  12833. case GGML_UNARY_OP_GELU_QUICK:
  12834. {
  12835. GGML_ASSERT(false); // TODO: not implemented
  12836. } break;
  12837. case GGML_UNARY_OP_SILU:
  12838. {
  12839. // necessary for llama
  12840. if (src0->grad) {
  12841. src0->grad = ggml_add_or_set(ctx,
  12842. src0->grad,
  12843. ggml_silu_back(ctx, src0, tensor->grad),
  12844. zero_table);
  12845. }
  12846. } break;
  12847. default:
  12848. GGML_ASSERT(false);
  12849. }
  12850. } break;
  12851. case GGML_OP_GET_REL_POS:
  12852. case GGML_OP_ADD_REL_POS:
  12853. case GGML_OP_MAP_UNARY:
  12854. case GGML_OP_MAP_BINARY:
  12855. case GGML_OP_MAP_CUSTOM1_F32:
  12856. case GGML_OP_MAP_CUSTOM2_F32:
  12857. case GGML_OP_MAP_CUSTOM3_F32:
  12858. case GGML_OP_MAP_CUSTOM1:
  12859. case GGML_OP_MAP_CUSTOM2:
  12860. case GGML_OP_MAP_CUSTOM3:
  12861. {
  12862. GGML_ASSERT(false); // not supported
  12863. } break;
  12864. case GGML_OP_CROSS_ENTROPY_LOSS:
  12865. {
  12866. if (src0->grad) {
  12867. src0->grad = ggml_add_or_set(ctx,
  12868. src0->grad,
  12869. ggml_cross_entropy_loss_back(ctx,
  12870. src0,
  12871. src1,
  12872. tensor->grad),
  12873. zero_table);
  12874. }
  12875. } break;
  12876. case GGML_OP_CROSS_ENTROPY_LOSS_BACK:
  12877. {
  12878. GGML_ASSERT(false); // not supported
  12879. } break;
  12880. case GGML_OP_NONE:
  12881. {
  12882. // nop
  12883. } break;
  12884. case GGML_OP_COUNT:
  12885. {
  12886. GGML_ASSERT(false);
  12887. } break;
  12888. }
  12889. for (int i = 0; i < GGML_MAX_SRC; ++i) {
  12890. if (tensor->src[i] && tensor->src[i]->grad) {
  12891. GGML_ASSERT(ggml_are_same_shape(tensor->src[i], tensor->src[i]->grad));
  12892. }
  12893. }
  12894. }
  12895. static void ggml_visit_parents(struct ggml_cgraph * cgraph, struct ggml_tensor * node) {
  12896. if (node->grad == NULL) {
  12897. // this usually happens when we generate intermediate nodes from constants in the backward pass
  12898. // it can also happen during forward pass, if the user performs computations with constants
  12899. if (node->op != GGML_OP_NONE) {
  12900. //GGML_PRINT_DEBUG("%s: warning: node %p has no grad, but op %d\n", __func__, (void *) node, node->op);
  12901. }
  12902. }
  12903. // check if already visited
  12904. if (ggml_hash_insert(cgraph->visited_hash_table, node) == GGML_HASHTABLE_ALREADY_EXISTS) {
  12905. return;
  12906. }
  12907. for (int i = 0; i < GGML_MAX_SRC; ++i) {
  12908. const int k =
  12909. (cgraph->order == GGML_CGRAPH_EVAL_ORDER_LEFT_TO_RIGHT) ? i :
  12910. (cgraph->order == GGML_CGRAPH_EVAL_ORDER_RIGHT_TO_LEFT) ? (GGML_MAX_SRC-1-i) :
  12911. /* unknown order, just fall back to using i*/ i;
  12912. if (node->src[k]) {
  12913. ggml_visit_parents(cgraph, node->src[k]);
  12914. }
  12915. }
  12916. if (node->op == GGML_OP_NONE && node->grad == NULL) {
  12917. // reached a leaf node, not part of the gradient graph (e.g. a constant)
  12918. GGML_ASSERT(cgraph->n_leafs < cgraph->size);
  12919. if (strlen(node->name) == 0) {
  12920. ggml_format_name(node, "leaf_%d", cgraph->n_leafs);
  12921. }
  12922. cgraph->leafs[cgraph->n_leafs] = node;
  12923. cgraph->n_leafs++;
  12924. } else {
  12925. GGML_ASSERT(cgraph->n_nodes < cgraph->size);
  12926. if (strlen(node->name) == 0) {
  12927. ggml_format_name(node, "node_%d", cgraph->n_nodes);
  12928. }
  12929. cgraph->nodes[cgraph->n_nodes] = node;
  12930. if (cgraph->grads) {
  12931. cgraph->grads[cgraph->n_nodes] = node->grad;
  12932. }
  12933. cgraph->n_nodes++;
  12934. }
  12935. }
  12936. static void ggml_build_forward_impl(struct ggml_cgraph * cgraph, struct ggml_tensor * tensor, bool expand) {
  12937. if (!expand) {
  12938. // TODO: this branch isn't accessible anymore, maybe move this to ggml_build_forward_expand
  12939. ggml_graph_clear(cgraph);
  12940. }
  12941. const int n0 = cgraph->n_nodes;
  12942. UNUSED(n0);
  12943. ggml_visit_parents(cgraph, tensor);
  12944. const int n_new = cgraph->n_nodes - n0;
  12945. GGML_PRINT_DEBUG("%s: visited %d new nodes\n", __func__, n_new);
  12946. if (n_new > 0) {
  12947. // the last added node should always be starting point
  12948. GGML_ASSERT(cgraph->nodes[cgraph->n_nodes - 1] == tensor);
  12949. }
  12950. }
  12951. void ggml_build_forward_expand(struct ggml_cgraph * cgraph, struct ggml_tensor * tensor) {
  12952. ggml_build_forward_impl(cgraph, tensor, true);
  12953. }
  12954. void ggml_build_backward_expand(struct ggml_context * ctx, struct ggml_cgraph * gf, struct ggml_cgraph * gb, bool keep) {
  12955. GGML_ASSERT(gf->n_nodes > 0);
  12956. // if we are keeping the gradient graph, we have to detach the gradient nodes from the original graph
  12957. if (keep) {
  12958. for (int i = 0; i < gf->n_nodes; i++) {
  12959. struct ggml_tensor * node = gf->nodes[i];
  12960. if (node->grad) {
  12961. node->grad = ggml_dup_tensor(ctx, node);
  12962. gf->grads[i] = node->grad;
  12963. }
  12964. }
  12965. }
  12966. // remember original gradients which start with zero values
  12967. struct ggml_hash_set zero_table = ggml_hash_set_new(gf->size);
  12968. for (int i = 0; i < gf->n_nodes; i++) {
  12969. if (gf->grads[i]) {
  12970. ggml_hash_insert(zero_table, gf->grads[i]);
  12971. }
  12972. }
  12973. for (int i = gf->n_nodes - 1; i >= 0; i--) {
  12974. struct ggml_tensor * node = gf->nodes[i];
  12975. // inplace operations to add gradients are not created by ggml_compute_backward
  12976. // use allocator to automatically make inplace operations
  12977. if (node->grad) {
  12978. ggml_compute_backward(ctx, node, zero_table);
  12979. }
  12980. }
  12981. for (int i = 0; i < gf->n_nodes; i++) {
  12982. struct ggml_tensor * node = gf->nodes[i];
  12983. if (node->is_param) {
  12984. GGML_PRINT_DEBUG("%s: found root node %p\n", __func__, (void *) node);
  12985. ggml_build_forward_expand(gb, node->grad);
  12986. }
  12987. }
  12988. ggml_hash_set_free(zero_table);
  12989. }
  12990. static size_t ggml_graph_nbytes(size_t size, bool grads) {
  12991. size_t nbytes = sizeof(struct ggml_cgraph);
  12992. nbytes += size * sizeof(struct ggml_tensor *) * 2; // leafs + nodes
  12993. if (grads) {
  12994. nbytes += size * sizeof(struct ggml_tensor *); // grads
  12995. }
  12996. nbytes += ggml_hash_size(size * 2) * sizeof(struct ggml_tensor *); // hash set
  12997. return nbytes;
  12998. }
  12999. size_t ggml_graph_overhead_custom(size_t size, bool grads) {
  13000. return GGML_OBJECT_SIZE + GGML_PAD(ggml_graph_nbytes(size, grads), GGML_MEM_ALIGN);
  13001. }
  13002. size_t ggml_graph_overhead(void) {
  13003. return ggml_graph_overhead_custom(GGML_DEFAULT_GRAPH_SIZE, false);
  13004. }
  13005. struct ggml_cgraph * ggml_new_graph_custom(struct ggml_context * ctx, size_t size, bool grads) {
  13006. const size_t obj_size = ggml_graph_nbytes(size, grads);
  13007. struct ggml_object * obj = ggml_new_object(ctx, GGML_OBJECT_GRAPH, obj_size);
  13008. struct ggml_cgraph * cgraph = (struct ggml_cgraph *) ((char *) ctx->mem_buffer + obj->offs);
  13009. struct ggml_tensor ** data_start = (struct ggml_tensor **) (cgraph + 1);
  13010. size_t hash_size = ggml_hash_size(size * 2);
  13011. struct ggml_tensor ** nodes_ptr = data_start;
  13012. struct ggml_tensor ** leafs_ptr = nodes_ptr + size;
  13013. struct ggml_tensor ** hash_keys_ptr = leafs_ptr + size;
  13014. struct ggml_tensor ** grads_ptr = grads ? hash_keys_ptr + hash_size : NULL;
  13015. // check that we allocated the correct amount of memory
  13016. assert(obj_size == (size_t) (
  13017. (grads ? (char *)(grads_ptr + size) : (char *)(hash_keys_ptr + hash_size)) - (char *)cgraph));
  13018. memset(hash_keys_ptr, 0, hash_size * sizeof(struct ggml_tensor *));
  13019. *cgraph = (struct ggml_cgraph) {
  13020. /*.size =*/ size,
  13021. /*.n_nodes =*/ 0,
  13022. /*.n_leafs =*/ 0,
  13023. /*.nodes =*/ nodes_ptr,
  13024. /*.grads =*/ grads_ptr,
  13025. /*.leafs =*/ leafs_ptr,
  13026. /*.hash_table =*/ { hash_size, hash_keys_ptr },
  13027. /*.order =*/ GGML_CGRAPH_EVAL_ORDER_LEFT_TO_RIGHT,
  13028. /*.perf_runs =*/ 0,
  13029. /*.perf_cycles =*/ 0,
  13030. /*.perf_time_us =*/ 0,
  13031. };
  13032. return cgraph;
  13033. }
  13034. struct ggml_cgraph * ggml_new_graph(struct ggml_context * ctx) {
  13035. return ggml_new_graph_custom(ctx, GGML_DEFAULT_GRAPH_SIZE, false);
  13036. }
  13037. struct ggml_cgraph ggml_graph_view(struct ggml_cgraph * cgraph0, int i0, int i1) {
  13038. struct ggml_cgraph cgraph = {
  13039. /*.size =*/ 0,
  13040. /*.n_nodes =*/ i1 - i0,
  13041. /*.n_leafs =*/ 0,
  13042. /*.nodes =*/ cgraph0->nodes + i0,
  13043. /*.grads =*/ cgraph0->grads ? cgraph0->grads + i0 : NULL,
  13044. /*.leafs =*/ NULL,
  13045. /*.hash_table =*/ { 0, NULL },
  13046. /*.order =*/ cgraph0->order,
  13047. /*.perf_runs =*/ 0,
  13048. /*.perf_cycles =*/ 0,
  13049. /*.perf_time_us =*/ 0,
  13050. };
  13051. return cgraph;
  13052. }
  13053. void ggml_graph_cpy(struct ggml_cgraph * src, struct ggml_cgraph * dst) {
  13054. GGML_ASSERT(dst->size >= src->n_leafs);
  13055. GGML_ASSERT(dst->size >= src->n_nodes);
  13056. GGML_ASSERT(dst->visited_hash_table.size >= src->visited_hash_table.size);
  13057. dst->n_leafs = src->n_leafs;
  13058. dst->n_nodes = src->n_nodes;
  13059. dst->order = src->order;
  13060. for (int i = 0; i < src->n_leafs; ++i) {
  13061. dst->leafs[i] = src->leafs[i];
  13062. }
  13063. for (int i = 0; i < src->n_nodes; ++i) {
  13064. dst->nodes[i] = src->nodes[i];
  13065. }
  13066. if (src->grads) {
  13067. GGML_ASSERT(dst->grads != NULL);
  13068. for (int i = 0; i < src->n_nodes; ++i) {
  13069. dst->grads[i] = src->grads[i];
  13070. }
  13071. }
  13072. for (size_t i = 0; i < src->visited_hash_table.size; ++i) {
  13073. if (src->visited_hash_table.keys[i]) {
  13074. ggml_hash_insert(dst->visited_hash_table, src->visited_hash_table.keys[i]);
  13075. }
  13076. }
  13077. }
  13078. struct ggml_cgraph * ggml_graph_dup(struct ggml_context * ctx, struct ggml_cgraph * cgraph) {
  13079. struct ggml_cgraph * result = ggml_new_graph_custom(ctx, cgraph->size, cgraph->grads != NULL);
  13080. ggml_graph_cpy(cgraph, result);
  13081. return result;
  13082. }
  13083. void ggml_graph_reset(struct ggml_cgraph * cgraph) {
  13084. GGML_ASSERT(cgraph->grads != NULL);
  13085. for (int i = 0; i < cgraph->n_nodes; i++) {
  13086. struct ggml_tensor * grad = cgraph->grads[i];
  13087. if (grad) {
  13088. ggml_set_zero(grad);
  13089. }
  13090. }
  13091. }
  13092. void ggml_graph_clear(struct ggml_cgraph * cgraph) {
  13093. cgraph->n_leafs = 0;
  13094. cgraph->n_nodes = 0;
  13095. memset(cgraph->visited_hash_table.keys, 0, cgraph->visited_hash_table.size * sizeof(struct ggml_tensor *));
  13096. }
  13097. //
  13098. // thread data
  13099. //
  13100. // synchronization is done via busy loops
  13101. // I tried using spin locks, but not sure how to use them correctly - the things I tried were slower than busy loops
  13102. //
  13103. #ifdef __APPLE__
  13104. //#include <os/lock.h>
  13105. //
  13106. //typedef os_unfair_lock ggml_lock_t;
  13107. //
  13108. //#define ggml_lock_init(x) UNUSED(x)
  13109. //#define ggml_lock_destroy(x) UNUSED(x)
  13110. //#define ggml_lock_lock os_unfair_lock_lock
  13111. //#define ggml_lock_unlock os_unfair_lock_unlock
  13112. //
  13113. //#define GGML_LOCK_INITIALIZER OS_UNFAIR_LOCK_INIT
  13114. typedef int ggml_lock_t;
  13115. #define ggml_lock_init(x) UNUSED(x)
  13116. #define ggml_lock_destroy(x) UNUSED(x)
  13117. #define ggml_lock_lock(x) UNUSED(x)
  13118. #define ggml_lock_unlock(x) UNUSED(x)
  13119. #define GGML_LOCK_INITIALIZER 0
  13120. typedef pthread_t ggml_thread_t;
  13121. #define ggml_thread_create pthread_create
  13122. #define ggml_thread_join pthread_join
  13123. #else
  13124. //typedef pthread_spinlock_t ggml_lock_t;
  13125. //#define ggml_lock_init(x) pthread_spin_init(x, PTHREAD_PROCESS_PRIVATE)
  13126. //#define ggml_lock_destroy pthread_spin_destroy
  13127. //#define ggml_lock_lock pthread_spin_lock
  13128. //#define ggml_lock_unlock pthread_spin_unlock
  13129. typedef int ggml_lock_t;
  13130. #define ggml_lock_init(x) UNUSED(x)
  13131. #define ggml_lock_destroy(x) UNUSED(x)
  13132. #if defined(__x86_64__) || (defined(_MSC_VER) && defined(_M_AMD64))
  13133. #define ggml_lock_lock(x) _mm_pause()
  13134. #else
  13135. #define ggml_lock_lock(x) UNUSED(x)
  13136. #endif
  13137. #define ggml_lock_unlock(x) UNUSED(x)
  13138. #define GGML_LOCK_INITIALIZER 0
  13139. typedef pthread_t ggml_thread_t;
  13140. #define ggml_thread_create pthread_create
  13141. #define ggml_thread_join pthread_join
  13142. #endif
  13143. // Android's libc implementation "bionic" does not support setting affinity
  13144. #if defined(__linux__) && !defined(__BIONIC__)
  13145. static void set_numa_thread_affinity(int thread_n, int n_threads) {
  13146. if (!ggml_is_numa()) {
  13147. return;
  13148. }
  13149. // run thread on node_num thread_n / (threads per node)
  13150. const int node_num = thread_n / ((n_threads + g_state.numa.n_nodes - 1) / g_state.numa.n_nodes);
  13151. struct ggml_numa_node * node = &g_state.numa.nodes[node_num];
  13152. size_t setsize = CPU_ALLOC_SIZE(g_state.numa.total_cpus);
  13153. cpu_set_t * cpus = CPU_ALLOC(g_state.numa.total_cpus);
  13154. CPU_ZERO_S(setsize, cpus);
  13155. for (size_t i = 0; i < node->n_cpus; ++i) {
  13156. CPU_SET_S(node->cpus[i], setsize, cpus);
  13157. }
  13158. int rv = pthread_setaffinity_np(pthread_self(), setsize, cpus);
  13159. if (rv) {
  13160. fprintf(stderr, "warning: pthread_setaffinity_np() failed: %s\n",
  13161. strerror(rv));
  13162. }
  13163. CPU_FREE(cpus);
  13164. }
  13165. static void clear_numa_thread_affinity(void) {
  13166. if (!ggml_is_numa()) {
  13167. return;
  13168. }
  13169. size_t setsize = CPU_ALLOC_SIZE(g_state.numa.total_cpus);
  13170. cpu_set_t * cpus = CPU_ALLOC(g_state.numa.total_cpus);
  13171. CPU_ZERO_S(setsize, cpus);
  13172. for (unsigned i = 0; i < g_state.numa.total_cpus; ++i) {
  13173. CPU_SET_S(i, setsize, cpus);
  13174. }
  13175. int rv = pthread_setaffinity_np(pthread_self(), setsize, cpus);
  13176. if (rv) {
  13177. fprintf(stderr, "warning: pthread_setaffinity_np() failed: %s\n",
  13178. strerror(rv));
  13179. }
  13180. CPU_FREE(cpus);
  13181. }
  13182. #else
  13183. // TODO: Windows etc.
  13184. // (the linux implementation may also work on BSD, someone should test)
  13185. static void set_numa_thread_affinity(int thread_n, int n_threads) { UNUSED(thread_n); UNUSED(n_threads); }
  13186. static void clear_numa_thread_affinity(void) {}
  13187. #endif
  13188. struct ggml_compute_state_shared {
  13189. const struct ggml_cgraph * cgraph;
  13190. const struct ggml_cplan * cplan;
  13191. int64_t perf_node_start_cycles;
  13192. int64_t perf_node_start_time_us;
  13193. const int n_threads;
  13194. // synchronization primitives
  13195. atomic_int n_active; // num active threads
  13196. atomic_int node_n; // active graph node
  13197. bool (*abort_callback)(void * data); // abort ggml_graph_compute when true
  13198. void * abort_callback_data;
  13199. };
  13200. struct ggml_compute_state {
  13201. ggml_thread_t thrd;
  13202. int ith;
  13203. struct ggml_compute_state_shared * shared;
  13204. };
  13205. static void ggml_graph_compute_perf_stats_node(struct ggml_tensor * node, const struct ggml_compute_state_shared * st) {
  13206. int64_t cycles_cur = ggml_perf_cycles() - st->perf_node_start_cycles;
  13207. int64_t time_us_cur = ggml_perf_time_us() - st->perf_node_start_time_us;
  13208. node->perf_runs++;
  13209. node->perf_cycles += cycles_cur;
  13210. node->perf_time_us += time_us_cur;
  13211. }
  13212. static int ggml_get_n_tasks(struct ggml_tensor * node, int n_threads) {
  13213. int n_tasks = 0;
  13214. switch (node->op) {
  13215. case GGML_OP_CPY:
  13216. case GGML_OP_DUP:
  13217. case GGML_OP_ADD:
  13218. case GGML_OP_ADD1:
  13219. case GGML_OP_ACC:
  13220. {
  13221. n_tasks = n_threads;
  13222. } break;
  13223. case GGML_OP_SUB:
  13224. case GGML_OP_SQR:
  13225. case GGML_OP_SQRT:
  13226. case GGML_OP_LOG:
  13227. case GGML_OP_SUM:
  13228. case GGML_OP_SUM_ROWS:
  13229. case GGML_OP_MEAN:
  13230. case GGML_OP_ARGMAX:
  13231. case GGML_OP_REPEAT:
  13232. case GGML_OP_REPEAT_BACK:
  13233. case GGML_OP_LEAKY_RELU:
  13234. {
  13235. n_tasks = 1;
  13236. } break;
  13237. case GGML_OP_UNARY:
  13238. switch (ggml_get_unary_op(node)) {
  13239. case GGML_UNARY_OP_ABS:
  13240. case GGML_UNARY_OP_SGN:
  13241. case GGML_UNARY_OP_NEG:
  13242. case GGML_UNARY_OP_STEP:
  13243. case GGML_UNARY_OP_TANH:
  13244. case GGML_UNARY_OP_ELU:
  13245. case GGML_UNARY_OP_RELU:
  13246. {
  13247. n_tasks = 1;
  13248. } break;
  13249. case GGML_UNARY_OP_GELU:
  13250. case GGML_UNARY_OP_GELU_QUICK:
  13251. case GGML_UNARY_OP_SILU:
  13252. {
  13253. n_tasks = n_threads;
  13254. } break;
  13255. default:
  13256. GGML_ASSERT(false);
  13257. }
  13258. break;
  13259. case GGML_OP_SILU_BACK:
  13260. case GGML_OP_MUL:
  13261. case GGML_OP_DIV:
  13262. case GGML_OP_NORM:
  13263. case GGML_OP_RMS_NORM:
  13264. case GGML_OP_RMS_NORM_BACK:
  13265. case GGML_OP_GROUP_NORM:
  13266. case GGML_OP_CONCAT:
  13267. {
  13268. n_tasks = n_threads;
  13269. } break;
  13270. case GGML_OP_MUL_MAT:
  13271. {
  13272. n_tasks = n_threads;
  13273. // TODO: use different scheduling for different matrix sizes
  13274. //const int nr0 = ggml_nrows(node->src[0]);
  13275. //const int nr1 = ggml_nrows(node->src[1]);
  13276. //n_tasks = MIN(n_threads, MAX(1, nr0/128));
  13277. //printf("nr0 = %8d, nr1 = %8d, nr0*nr1 = %8d, n_tasks%d\n", nr0, nr1, nr0*nr1, n_tasks);
  13278. #if defined(GGML_USE_CUBLAS)
  13279. if (ggml_cuda_can_mul_mat(node->src[0], node->src[1], node)) {
  13280. n_tasks = 1; // TODO: this actually is doing nothing
  13281. // the threads are still spinning
  13282. }
  13283. #elif defined(GGML_USE_CLBLAST)
  13284. if (ggml_cl_can_mul_mat(node->src[0], node->src[1], node)) {
  13285. n_tasks = 1; // TODO: this actually is doing nothing
  13286. // the threads are still spinning
  13287. }
  13288. #endif
  13289. #if defined(GGML_USE_ACCELERATE) || defined(GGML_USE_OPENBLAS)
  13290. if (ggml_compute_forward_mul_mat_use_blas(node->src[0], node->src[1], node)) {
  13291. n_tasks = 1; // TODO: this actually is doing nothing
  13292. // the threads are still spinning
  13293. }
  13294. #endif
  13295. } break;
  13296. case GGML_OP_MUL_MAT_ID:
  13297. {
  13298. // FIXME: blas
  13299. n_tasks = n_threads;
  13300. } break;
  13301. case GGML_OP_OUT_PROD:
  13302. {
  13303. n_tasks = n_threads;
  13304. } break;
  13305. case GGML_OP_SCALE:
  13306. case GGML_OP_SET:
  13307. case GGML_OP_CONT:
  13308. case GGML_OP_RESHAPE:
  13309. case GGML_OP_VIEW:
  13310. case GGML_OP_PERMUTE:
  13311. case GGML_OP_TRANSPOSE:
  13312. case GGML_OP_GET_ROWS:
  13313. case GGML_OP_GET_ROWS_BACK:
  13314. case GGML_OP_DIAG:
  13315. {
  13316. n_tasks = 1;
  13317. } break;
  13318. case GGML_OP_DIAG_MASK_ZERO:
  13319. case GGML_OP_DIAG_MASK_INF:
  13320. case GGML_OP_SOFT_MAX_BACK:
  13321. case GGML_OP_ROPE:
  13322. case GGML_OP_ROPE_BACK:
  13323. case GGML_OP_ADD_REL_POS:
  13324. {
  13325. n_tasks = n_threads;
  13326. } break;
  13327. case GGML_OP_ALIBI:
  13328. {
  13329. n_tasks = 1; //TODO
  13330. } break;
  13331. case GGML_OP_CLAMP:
  13332. {
  13333. n_tasks = 1; //TODO
  13334. } break;
  13335. case GGML_OP_SOFT_MAX:
  13336. {
  13337. n_tasks = MIN(MIN(4, n_threads), ggml_nrows(node->src[0]));
  13338. } break;
  13339. case GGML_OP_CONV_TRANSPOSE_1D:
  13340. {
  13341. n_tasks = n_threads;
  13342. } break;
  13343. case GGML_OP_IM2COL:
  13344. {
  13345. n_tasks = n_threads;
  13346. } break;
  13347. case GGML_OP_CONV_TRANSPOSE_2D:
  13348. {
  13349. n_tasks = n_threads;
  13350. } break;
  13351. case GGML_OP_POOL_1D:
  13352. case GGML_OP_POOL_2D:
  13353. {
  13354. n_tasks = 1;
  13355. } break;
  13356. case GGML_OP_UPSCALE:
  13357. {
  13358. n_tasks = n_threads;
  13359. } break;
  13360. case GGML_OP_PAD:
  13361. {
  13362. n_tasks = n_threads;
  13363. } break;
  13364. case GGML_OP_ARGSORT:
  13365. {
  13366. n_tasks = n_threads;
  13367. } break;
  13368. case GGML_OP_FLASH_ATTN:
  13369. {
  13370. n_tasks = n_threads;
  13371. } break;
  13372. case GGML_OP_FLASH_FF:
  13373. {
  13374. n_tasks = n_threads;
  13375. } break;
  13376. case GGML_OP_FLASH_ATTN_BACK:
  13377. {
  13378. n_tasks = n_threads;
  13379. } break;
  13380. case GGML_OP_WIN_PART:
  13381. case GGML_OP_WIN_UNPART:
  13382. case GGML_OP_GET_REL_POS:
  13383. case GGML_OP_MAP_UNARY:
  13384. case GGML_OP_MAP_BINARY:
  13385. case GGML_OP_MAP_CUSTOM1_F32:
  13386. case GGML_OP_MAP_CUSTOM2_F32:
  13387. case GGML_OP_MAP_CUSTOM3_F32:
  13388. {
  13389. n_tasks = 1;
  13390. } break;
  13391. case GGML_OP_MAP_CUSTOM1:
  13392. {
  13393. struct ggml_map_custom1_op_params * p = (struct ggml_map_custom1_op_params *) node->op_params;
  13394. if (p->n_tasks == GGML_N_TASKS_MAX) {
  13395. n_tasks = n_threads;
  13396. } else {
  13397. n_tasks = MIN(p->n_tasks, n_threads);
  13398. }
  13399. } break;
  13400. case GGML_OP_MAP_CUSTOM2:
  13401. {
  13402. struct ggml_map_custom2_op_params * p = (struct ggml_map_custom2_op_params *) node->op_params;
  13403. if (p->n_tasks == GGML_N_TASKS_MAX) {
  13404. n_tasks = n_threads;
  13405. } else {
  13406. n_tasks = MIN(p->n_tasks, n_threads);
  13407. }
  13408. } break;
  13409. case GGML_OP_MAP_CUSTOM3:
  13410. {
  13411. struct ggml_map_custom3_op_params * p = (struct ggml_map_custom3_op_params *) node->op_params;
  13412. if (p->n_tasks == GGML_N_TASKS_MAX) {
  13413. n_tasks = n_threads;
  13414. } else {
  13415. n_tasks = MIN(p->n_tasks, n_threads);
  13416. }
  13417. } break;
  13418. case GGML_OP_CROSS_ENTROPY_LOSS:
  13419. {
  13420. n_tasks = n_threads;
  13421. } break;
  13422. case GGML_OP_CROSS_ENTROPY_LOSS_BACK:
  13423. {
  13424. n_tasks = n_threads;
  13425. } break;
  13426. case GGML_OP_NONE:
  13427. {
  13428. n_tasks = 1;
  13429. } break;
  13430. case GGML_OP_COUNT:
  13431. {
  13432. GGML_ASSERT(false);
  13433. } break;
  13434. default:
  13435. {
  13436. fprintf(stderr, "%s: op not implemented: ", __func__);
  13437. if (node->op < GGML_OP_COUNT) {
  13438. fprintf(stderr, "%s\n", ggml_op_name(node->op));
  13439. } else {
  13440. fprintf(stderr, "%d\n", node->op);
  13441. }
  13442. GGML_ASSERT(false);
  13443. } break;
  13444. }
  13445. assert(n_tasks > 0);
  13446. return n_tasks;
  13447. }
  13448. static thread_ret_t ggml_graph_compute_thread(void * data) {
  13449. struct ggml_compute_state * state = (struct ggml_compute_state *) data;
  13450. const struct ggml_cgraph * cgraph = state->shared->cgraph;
  13451. const struct ggml_cplan * cplan = state->shared->cplan;
  13452. const int n_threads = state->shared->n_threads;
  13453. set_numa_thread_affinity(state->ith, n_threads);
  13454. int node_n = -1;
  13455. while (true) {
  13456. if (cplan->abort_callback && cplan->abort_callback(cplan->abort_callback_data)) {
  13457. state->shared->node_n += 1;
  13458. return (thread_ret_t) GGML_EXIT_ABORTED;
  13459. }
  13460. if (atomic_fetch_sub(&state->shared->n_active, 1) == 1) {
  13461. // all other threads are finished and spinning
  13462. // do finalize and init here so we don't have synchronize again
  13463. struct ggml_compute_params params = {
  13464. /*.type =*/ GGML_TASK_FINALIZE,
  13465. /*.ith =*/ 0,
  13466. /*.nth =*/ 0,
  13467. /*.wsize =*/ cplan->work_size,
  13468. /*.wdata =*/ cplan->work_data,
  13469. };
  13470. if (node_n != -1) {
  13471. /* FINALIZE */
  13472. struct ggml_tensor * node = cgraph->nodes[node_n];
  13473. if (GGML_OP_HAS_FINALIZE[node->op]) {
  13474. params.nth = ggml_get_n_tasks(node, n_threads);
  13475. ggml_compute_forward(&params, node);
  13476. }
  13477. ggml_graph_compute_perf_stats_node(node, state->shared);
  13478. }
  13479. // distribute new work or execute it direct if 1T
  13480. while (++node_n < cgraph->n_nodes) {
  13481. GGML_PRINT_DEBUG_5("%s: %d/%d\n", __func__, node_n, cgraph->n_nodes);
  13482. struct ggml_tensor * node = cgraph->nodes[node_n];
  13483. const int n_tasks = ggml_get_n_tasks(node, n_threads);
  13484. state->shared->perf_node_start_cycles = ggml_perf_cycles();
  13485. state->shared->perf_node_start_time_us = ggml_perf_time_us();
  13486. params.nth = n_tasks;
  13487. /* INIT */
  13488. if (GGML_OP_HAS_INIT[node->op]) {
  13489. params.type = GGML_TASK_INIT;
  13490. ggml_compute_forward(&params, node);
  13491. }
  13492. if (n_tasks == 1) {
  13493. // TODO: maybe push node_n to the atomic but if other threads see n_tasks is 1,
  13494. // they do something more efficient than spinning (?)
  13495. params.type = GGML_TASK_COMPUTE;
  13496. ggml_compute_forward(&params, node);
  13497. if (GGML_OP_HAS_FINALIZE[node->op]) {
  13498. params.type = GGML_TASK_FINALIZE;
  13499. ggml_compute_forward(&params, node);
  13500. }
  13501. ggml_graph_compute_perf_stats_node(node, state->shared);
  13502. } else {
  13503. break;
  13504. }
  13505. if (cplan->abort_callback && cplan->abort_callback(cplan->abort_callback_data)) {
  13506. break;
  13507. }
  13508. }
  13509. atomic_store(&state->shared->n_active, n_threads);
  13510. atomic_store(&state->shared->node_n, node_n);
  13511. } else {
  13512. // wait for other threads to finish
  13513. const int last = node_n;
  13514. while (true) {
  13515. // TODO: this sched_yield can have significant impact on the performance - either positive or negative
  13516. // depending on the workload and the operating system.
  13517. // since it is not clear what is the best approach, it should potentially become user-configurable
  13518. // ref: https://github.com/ggerganov/ggml/issues/291
  13519. #if defined(GGML_USE_ACCELERATE) || defined(GGML_USE_OPENBLAS)
  13520. sched_yield();
  13521. #endif
  13522. node_n = atomic_load(&state->shared->node_n);
  13523. if (node_n != last) break;
  13524. };
  13525. }
  13526. // check if we should stop
  13527. if (node_n >= cgraph->n_nodes) break;
  13528. /* COMPUTE */
  13529. struct ggml_tensor * node = cgraph->nodes[node_n];
  13530. const int n_tasks = ggml_get_n_tasks(node, n_threads);
  13531. struct ggml_compute_params params = {
  13532. /*.type =*/ GGML_TASK_COMPUTE,
  13533. /*.ith =*/ state->ith,
  13534. /*.nth =*/ n_tasks,
  13535. /*.wsize =*/ cplan->work_size,
  13536. /*.wdata =*/ cplan->work_data,
  13537. };
  13538. if (state->ith < n_tasks) {
  13539. ggml_compute_forward(&params, node);
  13540. }
  13541. }
  13542. return GGML_EXIT_SUCCESS;
  13543. }
  13544. struct ggml_cplan ggml_graph_plan(struct ggml_cgraph * cgraph, int n_threads) {
  13545. if (n_threads <= 0) {
  13546. n_threads = GGML_DEFAULT_N_THREADS;
  13547. }
  13548. size_t work_size = 0;
  13549. struct ggml_cplan cplan;
  13550. memset(&cplan, 0, sizeof(struct ggml_cplan));
  13551. // thread scheduling for the different operations + work buffer size estimation
  13552. for (int i = 0; i < cgraph->n_nodes; i++) {
  13553. struct ggml_tensor * node = cgraph->nodes[i];
  13554. const int n_tasks = ggml_get_n_tasks(node, n_threads);
  13555. size_t cur = 0;
  13556. switch (node->op) {
  13557. case GGML_OP_CPY:
  13558. case GGML_OP_DUP:
  13559. {
  13560. if (ggml_is_quantized(node->type)) {
  13561. cur = ggml_type_size(GGML_TYPE_F32) * node->ne[0] * n_tasks;
  13562. }
  13563. } break;
  13564. case GGML_OP_ADD:
  13565. case GGML_OP_ADD1:
  13566. {
  13567. if (ggml_is_quantized(node->src[0]->type)) {
  13568. cur = ggml_type_size(GGML_TYPE_F32) * node->src[0]->ne[0] * n_tasks;
  13569. }
  13570. } break;
  13571. case GGML_OP_ACC:
  13572. {
  13573. if (ggml_is_quantized(node->src[0]->type)) {
  13574. cur = ggml_type_size(GGML_TYPE_F32) * node->src[1]->ne[0] * n_tasks;
  13575. }
  13576. } break;
  13577. case GGML_OP_MUL_MAT:
  13578. {
  13579. const enum ggml_type vec_dot_type = type_traits[node->src[0]->type].vec_dot_type;
  13580. #if defined(GGML_USE_CLBLAST)
  13581. if (ggml_cl_can_mul_mat(node->src[0], node->src[1], node)) {
  13582. cur = ggml_cl_mul_mat_get_wsize(node->src[0], node->src[1], node);
  13583. } else
  13584. #endif
  13585. #if defined(GGML_USE_ACCELERATE) || defined(GGML_USE_OPENBLAS)
  13586. if (ggml_compute_forward_mul_mat_use_blas(node->src[0], node->src[1], node)) {
  13587. if (node->src[0]->type != GGML_TYPE_F32) {
  13588. // here we need memory just for single 2D matrix from src0
  13589. cur = ggml_type_size(GGML_TYPE_F32)*(node->src[0]->ne[0]*node->src[0]->ne[1]);
  13590. }
  13591. } else
  13592. #endif
  13593. if (node->src[1]->type != vec_dot_type) {
  13594. cur = ggml_type_size(vec_dot_type)*ggml_nelements(node->src[1])/ggml_blck_size(vec_dot_type);
  13595. }
  13596. } break;
  13597. case GGML_OP_MUL_MAT_ID:
  13598. {
  13599. const struct ggml_tensor * a = node->src[2];
  13600. const struct ggml_tensor * b = node->src[1];
  13601. const enum ggml_type vec_dot_type = type_traits[a->type].vec_dot_type;
  13602. #if defined(GGML_USE_ACCELERATE) || defined(GGML_USE_OPENBLAS)
  13603. if (ggml_compute_forward_mul_mat_use_blas(a, b, node)) {
  13604. if (a->type != GGML_TYPE_F32) {
  13605. // here we need memory just for single 2D matrix from src0
  13606. cur = ggml_type_size(GGML_TYPE_F32)*(a->ne[0]*a->ne[1]);
  13607. }
  13608. } else
  13609. #endif
  13610. if (b->type != vec_dot_type) {
  13611. cur = ggml_type_size(vec_dot_type)*ggml_nelements(b)/ggml_blck_size(vec_dot_type);
  13612. }
  13613. } break;
  13614. case GGML_OP_OUT_PROD:
  13615. {
  13616. if (ggml_is_quantized(node->src[0]->type)) {
  13617. cur = ggml_type_size(GGML_TYPE_F32) * node->src[0]->ne[0] * n_tasks;
  13618. }
  13619. } break;
  13620. case GGML_OP_SOFT_MAX:
  13621. {
  13622. cur = ggml_type_size(GGML_TYPE_F32) * node->ne[0] * n_tasks;
  13623. } break;
  13624. case GGML_OP_CONV_TRANSPOSE_1D:
  13625. {
  13626. GGML_ASSERT(node->src[0]->ne[3] == 1);
  13627. GGML_ASSERT(node->src[1]->ne[2] == 1);
  13628. GGML_ASSERT(node->src[1]->ne[3] == 1);
  13629. const int64_t ne00 = node->src[0]->ne[0]; // K
  13630. const int64_t ne01 = node->src[0]->ne[1]; // Cout
  13631. const int64_t ne02 = node->src[0]->ne[2]; // Cin
  13632. const int64_t ne10 = node->src[1]->ne[0]; // L
  13633. const int64_t ne11 = node->src[1]->ne[1]; // Cin
  13634. if (node->src[0]->type == GGML_TYPE_F16 &&
  13635. node->src[1]->type == GGML_TYPE_F32) {
  13636. cur += sizeof(ggml_fp16_t)*ne00*ne01*ne02;
  13637. cur += sizeof(ggml_fp16_t)*ne10*ne11;
  13638. } else if (node->src[0]->type == GGML_TYPE_F32 &&
  13639. node->src[1]->type == GGML_TYPE_F32) {
  13640. cur += sizeof(float)*ne00*ne01*ne02;
  13641. cur += sizeof(float)*ne10*ne11;
  13642. } else {
  13643. GGML_ASSERT(false);
  13644. }
  13645. } break;
  13646. case GGML_OP_CONV_TRANSPOSE_2D:
  13647. {
  13648. const int64_t ne00 = node->src[0]->ne[0]; // W
  13649. const int64_t ne01 = node->src[0]->ne[1]; // H
  13650. const int64_t ne02 = node->src[0]->ne[2]; // Channels Out
  13651. const int64_t ne03 = node->src[0]->ne[3]; // Channels In
  13652. const int64_t ne10 = node->src[1]->ne[0]; // W
  13653. const int64_t ne11 = node->src[1]->ne[1]; // H
  13654. const int64_t ne12 = node->src[1]->ne[2]; // Channels In
  13655. cur += sizeof(ggml_fp16_t)*ne00*ne01*ne02*ne03;
  13656. cur += sizeof(ggml_fp16_t)*ne10*ne11*ne12;
  13657. } break;
  13658. case GGML_OP_FLASH_ATTN:
  13659. {
  13660. const int64_t ne11 = ggml_up(node->src[1]->ne[1], GGML_SOFT_MAX_UNROLL);
  13661. if (node->src[1]->type == GGML_TYPE_F32) {
  13662. cur = sizeof(float)*ne11*n_tasks; // TODO: this can become (n_tasks-1)
  13663. cur += sizeof(float)*ne11*n_tasks; // this is overestimated by x2
  13664. } else if (node->src[1]->type == GGML_TYPE_F16) {
  13665. cur = sizeof(float)*ne11*n_tasks; // TODO: this can become (n_tasks-1)
  13666. cur += sizeof(float)*ne11*n_tasks; // this is overestimated by x2
  13667. }
  13668. } break;
  13669. case GGML_OP_FLASH_FF:
  13670. {
  13671. if (node->src[1]->type == GGML_TYPE_F32) {
  13672. cur = sizeof(float)*node->src[1]->ne[1]*n_tasks; // TODO: this can become (n_tasks-1)
  13673. cur += sizeof(float)*node->src[1]->ne[1]*n_tasks; // this is overestimated by x2
  13674. } else if (node->src[1]->type == GGML_TYPE_F16) {
  13675. cur = sizeof(float)*node->src[1]->ne[1]*n_tasks; // TODO: this can become (n_tasks-1)
  13676. cur += sizeof(float)*node->src[1]->ne[1]*n_tasks; // this is overestimated by x2
  13677. }
  13678. } break;
  13679. case GGML_OP_FLASH_ATTN_BACK:
  13680. {
  13681. const int64_t D = node->src[0]->ne[0];
  13682. const int64_t ne11 = ggml_up(node->src[1]->ne[1], GGML_SOFT_MAX_UNROLL);
  13683. const int64_t mxDn = MAX(D, ne11) * 2; // *2 because of S and SM in ggml_compute_forward_flash_attn_back
  13684. if (node->src[1]->type == GGML_TYPE_F32) {
  13685. cur = sizeof(float)*mxDn*n_tasks; // TODO: this can become (n_tasks-1)
  13686. cur += sizeof(float)*mxDn*n_tasks; // this is overestimated by x2
  13687. } else if (node->src[1]->type == GGML_TYPE_F16) {
  13688. cur = sizeof(float)*mxDn*n_tasks; // TODO: this can become (n_tasks-1)
  13689. cur += sizeof(float)*mxDn*n_tasks; // this is overestimated by x2
  13690. }
  13691. } break;
  13692. case GGML_OP_CROSS_ENTROPY_LOSS:
  13693. {
  13694. cur = ggml_type_size(node->type)*(n_tasks + node->src[0]->ne[0]*n_tasks);
  13695. } break;
  13696. case GGML_OP_COUNT:
  13697. {
  13698. GGML_ASSERT(false);
  13699. } break;
  13700. default:
  13701. break;
  13702. }
  13703. work_size = MAX(work_size, cur);
  13704. }
  13705. if (work_size > 0) {
  13706. work_size += CACHE_LINE_SIZE*(n_threads - 1);
  13707. }
  13708. cplan.n_threads = n_threads;
  13709. cplan.work_size = work_size;
  13710. cplan.work_data = NULL;
  13711. return cplan;
  13712. }
  13713. int ggml_graph_compute(struct ggml_cgraph * cgraph, struct ggml_cplan * cplan) {
  13714. {
  13715. GGML_ASSERT(cplan);
  13716. GGML_ASSERT(cplan->n_threads > 0);
  13717. if (cplan->work_size > 0) {
  13718. GGML_ASSERT(cplan->work_data);
  13719. }
  13720. }
  13721. const int n_threads = cplan->n_threads;
  13722. struct ggml_compute_state_shared state_shared = {
  13723. /*.cgraph =*/ cgraph,
  13724. /*.cgraph_plan =*/ cplan,
  13725. /*.perf_node_start_cycles =*/ 0,
  13726. /*.perf_node_start_time_us =*/ 0,
  13727. /*.n_threads =*/ n_threads,
  13728. /*.n_active =*/ n_threads,
  13729. /*.node_n =*/ -1,
  13730. /*.abort_callback =*/ NULL,
  13731. /*.abort_callback_data =*/ NULL,
  13732. };
  13733. struct ggml_compute_state * workers = alloca(sizeof(struct ggml_compute_state)*n_threads);
  13734. // create thread pool
  13735. if (n_threads > 1) {
  13736. for (int j = 1; j < n_threads; ++j) {
  13737. workers[j] = (struct ggml_compute_state) {
  13738. .thrd = 0,
  13739. .ith = j,
  13740. .shared = &state_shared,
  13741. };
  13742. const int rc = ggml_thread_create(&workers[j].thrd, NULL, ggml_graph_compute_thread, &workers[j]);
  13743. GGML_ASSERT(rc == 0);
  13744. UNUSED(rc);
  13745. }
  13746. }
  13747. workers[0].ith = 0;
  13748. workers[0].shared = &state_shared;
  13749. const int64_t perf_start_cycles = ggml_perf_cycles();
  13750. const int64_t perf_start_time_us = ggml_perf_time_us();
  13751. // this is a work thread too
  13752. int compute_status = (size_t) ggml_graph_compute_thread(&workers[0]);
  13753. // don't leave affinity set on the main thread
  13754. clear_numa_thread_affinity();
  13755. // join or kill thread pool
  13756. if (n_threads > 1) {
  13757. for (int j = 1; j < n_threads; j++) {
  13758. const int rc = ggml_thread_join(workers[j].thrd, NULL);
  13759. GGML_ASSERT(rc == 0);
  13760. }
  13761. }
  13762. // performance stats (graph)
  13763. {
  13764. int64_t perf_cycles_cur = ggml_perf_cycles() - perf_start_cycles;
  13765. int64_t perf_time_us_cur = ggml_perf_time_us() - perf_start_time_us;
  13766. cgraph->perf_runs++;
  13767. cgraph->perf_cycles += perf_cycles_cur;
  13768. cgraph->perf_time_us += perf_time_us_cur;
  13769. GGML_PRINT_DEBUG("%s: perf (%d) - cpu = %.3f / %.3f ms, wall = %.3f / %.3f ms\n",
  13770. __func__, cgraph->perf_runs,
  13771. (double) perf_cycles_cur / (double) ggml_cycles_per_ms(),
  13772. (double) cgraph->perf_cycles / (double) ggml_cycles_per_ms() / (double) cgraph->perf_runs,
  13773. (double) perf_time_us_cur / 1000.0,
  13774. (double) cgraph->perf_time_us / 1000.0 / cgraph->perf_runs);
  13775. }
  13776. return compute_status;
  13777. }
  13778. void ggml_graph_compute_with_ctx(struct ggml_context * ctx, struct ggml_cgraph * cgraph, int n_threads) {
  13779. struct ggml_cplan cplan = ggml_graph_plan(cgraph, n_threads);
  13780. struct ggml_object * obj = ggml_new_object(ctx, GGML_OBJECT_WORK_BUFFER, cplan.work_size);
  13781. cplan.work_data = (uint8_t *)ctx->mem_buffer + obj->offs;
  13782. ggml_graph_compute(cgraph, &cplan);
  13783. }
  13784. struct ggml_tensor * ggml_graph_get_tensor(struct ggml_cgraph * cgraph, const char * name) {
  13785. for (int i = 0; i < cgraph->n_leafs; i++) {
  13786. struct ggml_tensor * leaf = cgraph->leafs[i];
  13787. if (strcmp(leaf->name, name) == 0) {
  13788. return leaf;
  13789. }
  13790. }
  13791. for (int i = 0; i < cgraph->n_nodes; i++) {
  13792. struct ggml_tensor * node = cgraph->nodes[i];
  13793. if (strcmp(node->name, name) == 0) {
  13794. return node;
  13795. }
  13796. }
  13797. return NULL;
  13798. }
  13799. static void ggml_graph_export_leaf(const struct ggml_tensor * tensor, FILE * fout) {
  13800. const int64_t * ne = tensor->ne;
  13801. const size_t * nb = tensor->nb;
  13802. fprintf(fout, "%-6s %-12s %8d %" PRId64 " %" PRId64 " %" PRId64 " %" PRId64 " %16zu %16zu %16zu %16zu %16p %32s\n",
  13803. ggml_type_name(tensor->type),
  13804. ggml_op_name (tensor->op),
  13805. tensor->n_dims,
  13806. ne[0], ne[1], ne[2], ne[3],
  13807. nb[0], nb[1], nb[2], nb[3],
  13808. tensor->data,
  13809. tensor->name);
  13810. }
  13811. static void ggml_graph_export_node(const struct ggml_tensor * tensor, const char * arg, FILE * fout) {
  13812. const int64_t * ne = tensor->ne;
  13813. const size_t * nb = tensor->nb;
  13814. fprintf(fout, "%-6s %-6s %-12s %8d %" PRId64 " %" PRId64 " %" PRId64 " %" PRId64 " %16zu %16zu %16zu %16zu %16p %32s\n",
  13815. arg,
  13816. ggml_type_name(tensor->type),
  13817. ggml_op_name (tensor->op),
  13818. tensor->n_dims,
  13819. ne[0], ne[1], ne[2], ne[3],
  13820. nb[0], nb[1], nb[2], nb[3],
  13821. tensor->data,
  13822. tensor->name);
  13823. }
  13824. void ggml_graph_export(const struct ggml_cgraph * cgraph, const char * fname) {
  13825. uint64_t size_eval = 0;
  13826. // compute size of intermediate results
  13827. // TODO: does not take into account scratch buffers !!!!
  13828. for (int i = 0; i < cgraph->n_nodes; ++i) {
  13829. size_eval += ggml_nbytes_pad(cgraph->nodes[i]);
  13830. }
  13831. // print
  13832. {
  13833. FILE * fout = stdout;
  13834. fprintf(fout, "\n");
  13835. fprintf(fout, "%-16s %8x\n", "magic", GGML_FILE_MAGIC);
  13836. fprintf(fout, "%-16s %8d\n", "version", GGML_FILE_VERSION);
  13837. fprintf(fout, "%-16s %8d\n", "leafs", cgraph->n_leafs);
  13838. fprintf(fout, "%-16s %8d\n", "nodes", cgraph->n_nodes);
  13839. fprintf(fout, "%-16s %" PRIu64 "\n", "eval", size_eval);
  13840. // header
  13841. fprintf(fout, "\n");
  13842. fprintf(fout, "%-6s %-12s %8s %8s %8s %8s %8s %16s %16s %16s %16s %16s %16s\n",
  13843. "TYPE", "OP", "NDIMS", "NE0", "NE1", "NE2", "NE3", "NB0", "NB1", "NB2", "NB3", "DATA", "NAME");
  13844. for (int i = 0; i < cgraph->n_leafs; ++i) {
  13845. ggml_graph_export_leaf(cgraph->leafs[i], fout);
  13846. GGML_ASSERT(cgraph->leafs[i]->op == GGML_OP_NONE);
  13847. GGML_ASSERT(cgraph->leafs[i]->src[0] == NULL);
  13848. GGML_ASSERT(cgraph->leafs[i]->src[1] == NULL);
  13849. }
  13850. // header
  13851. fprintf(fout, "\n");
  13852. fprintf(fout, "%-6s %-6s %-12s %8s %8s %8s %8s %8s %16s %16s %16s %16s %8s %16s %16s\n",
  13853. "ARG", "TYPE", "OP", "NDIMS", "NE0", "NE1", "NE2", "NE3", "NB0", "NB1", "NB2", "NB3", "NTASKS", "DATA", "NAME");
  13854. for (int i = 0; i < cgraph->n_nodes; ++i) {
  13855. ggml_graph_export_node(cgraph->nodes[i], "DST", fout);
  13856. for (int j = 0; j < GGML_MAX_SRC; ++j) {
  13857. if (cgraph->nodes[i]->src[j]) {
  13858. ggml_graph_export_node(cgraph->nodes[i]->src[j], "SRC", fout);
  13859. }
  13860. }
  13861. fprintf(fout, "\n");
  13862. }
  13863. fprintf(fout, "\n");
  13864. }
  13865. // write binary data
  13866. {
  13867. FILE * fout = fopen(fname, "wb");
  13868. if (!fout) {
  13869. fprintf(stderr, "%s: failed to open %s\n", __func__, fname);
  13870. return;
  13871. }
  13872. // header
  13873. {
  13874. const uint32_t magic = GGML_FILE_MAGIC;
  13875. const uint32_t version = GGML_FILE_VERSION;
  13876. const uint32_t n_leafs = cgraph->n_leafs;
  13877. const uint32_t n_nodes = cgraph->n_nodes;
  13878. fwrite(&magic, sizeof(uint32_t), 1, fout);
  13879. fwrite(&version, sizeof(uint32_t), 1, fout);
  13880. fwrite(&n_leafs, sizeof(uint32_t), 1, fout);
  13881. fwrite(&n_nodes, sizeof(uint32_t), 1, fout);
  13882. fwrite(&size_eval, sizeof(uint64_t), 1, fout);
  13883. }
  13884. // leafs
  13885. {
  13886. for (int i = 0; i < cgraph->n_leafs; ++i) {
  13887. const struct ggml_tensor * tensor = cgraph->leafs[i];
  13888. const uint32_t type = tensor->type;
  13889. const uint32_t op = tensor->op;
  13890. const uint32_t n_dims = tensor->n_dims;
  13891. fwrite(&type, sizeof(uint32_t), 1, fout);
  13892. fwrite(&op, sizeof(uint32_t), 1, fout);
  13893. fwrite(&n_dims, sizeof(uint32_t), 1, fout);
  13894. for (int j = 0; j < GGML_MAX_DIMS; ++j) {
  13895. const uint64_t ne = tensor->ne[j];
  13896. const uint64_t nb = tensor->nb[j];
  13897. fwrite(&ne, sizeof(uint64_t), 1, fout);
  13898. fwrite(&nb, sizeof(uint64_t), 1, fout);
  13899. }
  13900. fwrite(tensor->name, sizeof(char), GGML_MAX_NAME, fout);
  13901. fwrite(tensor->op_params, sizeof(char), GGML_MAX_OP_PARAMS, fout);
  13902. // dump the data
  13903. // TODO: pad this to 32 byte boundary
  13904. {
  13905. const size_t size = ggml_nbytes(tensor);
  13906. fwrite(tensor->data, sizeof(char), size, fout);
  13907. }
  13908. }
  13909. }
  13910. // nodes
  13911. {
  13912. for (int i = 0; i < cgraph->n_nodes; ++i) {
  13913. const struct ggml_tensor * tensor = cgraph->nodes[i];
  13914. const uint32_t type = tensor->type;
  13915. const uint32_t op = tensor->op;
  13916. const uint32_t n_dims = tensor->n_dims;
  13917. fwrite(&type, sizeof(uint32_t), 1, fout);
  13918. fwrite(&op, sizeof(uint32_t), 1, fout);
  13919. fwrite(&n_dims, sizeof(uint32_t), 1, fout);
  13920. for (int j = 0; j < GGML_MAX_DIMS; ++j) {
  13921. const uint64_t ne = tensor->ne[j];
  13922. const uint64_t nb = tensor->nb[j];
  13923. fwrite(&ne, sizeof(uint64_t), 1, fout);
  13924. fwrite(&nb, sizeof(uint64_t), 1, fout);
  13925. }
  13926. fwrite(tensor->name, sizeof(char), GGML_MAX_NAME, fout);
  13927. fwrite(tensor->op_params, sizeof(char), GGML_MAX_OP_PARAMS, fout);
  13928. // output the op arguments
  13929. {
  13930. struct ggml_tensor * args[GGML_MAX_SRC] = { NULL };
  13931. for (int j = 0; j < GGML_MAX_SRC; ++j) {
  13932. args[j] = tensor->src[j];
  13933. }
  13934. for (int j = 0; j < GGML_MAX_SRC; ++j) {
  13935. if (args[j]) {
  13936. int32_t idx = -1;
  13937. // check if leaf
  13938. {
  13939. for (int k = 0; k < cgraph->n_leafs; ++k) {
  13940. if (args[j] == cgraph->leafs[k]) {
  13941. idx = k;
  13942. break;
  13943. }
  13944. }
  13945. }
  13946. // check if node
  13947. if (idx == -1) {
  13948. for (int k = 0; k < cgraph->n_nodes; ++k) {
  13949. if (args[j] == cgraph->nodes[k]) {
  13950. idx = cgraph->n_leafs + k;
  13951. break;
  13952. }
  13953. }
  13954. }
  13955. if (idx == -1) {
  13956. fprintf(stderr, "%s: failed to find tensor, arg = %d, node = %d\n", __func__, j, i);
  13957. fclose(fout);
  13958. return;
  13959. }
  13960. fwrite(&idx, sizeof(int32_t), 1, fout);
  13961. } else {
  13962. const int32_t nul = -1;
  13963. fwrite(&nul, sizeof(int32_t), 1, fout);
  13964. }
  13965. }
  13966. }
  13967. }
  13968. }
  13969. fclose(fout);
  13970. }
  13971. }
  13972. struct ggml_cgraph * ggml_graph_import(const char * fname, struct ggml_context ** ctx_data, struct ggml_context ** ctx_eval) {
  13973. assert(*ctx_data == NULL);
  13974. assert(*ctx_eval == NULL);
  13975. struct ggml_cgraph * result = NULL;
  13976. struct ggml_tensor * data = NULL;
  13977. // read file into data
  13978. {
  13979. FILE * fin = fopen(fname, "rb");
  13980. if (!fin) {
  13981. fprintf(stderr, "%s: failed to open %s\n", __func__, fname);
  13982. return result;
  13983. }
  13984. size_t fsize = 0;
  13985. fseek(fin, 0, SEEK_END);
  13986. fsize = ftell(fin);
  13987. fseek(fin, 0, SEEK_SET);
  13988. // create the data context
  13989. {
  13990. const size_t overhead = 1*ggml_tensor_overhead();
  13991. struct ggml_init_params params = {
  13992. .mem_size = fsize + overhead,
  13993. .mem_buffer = NULL,
  13994. .no_alloc = false,
  13995. };
  13996. *ctx_data = ggml_init(params);
  13997. if (!*ctx_data) {
  13998. fprintf(stderr, "%s: failed to create ggml context\n", __func__);
  13999. fclose(fin);
  14000. return result;
  14001. }
  14002. }
  14003. data = ggml_new_tensor_1d(*ctx_data, GGML_TYPE_I8, fsize);
  14004. {
  14005. const size_t ret = fread(data->data, sizeof(char), fsize, fin);
  14006. if (ret != fsize) {
  14007. fprintf(stderr, "%s: failed to read %s\n", __func__, fname);
  14008. fclose(fin);
  14009. return result;
  14010. }
  14011. }
  14012. fclose(fin);
  14013. }
  14014. // populate result
  14015. {
  14016. char * ptr = (char *) data->data;
  14017. const uint32_t magic = *(const uint32_t *) ptr; ptr += sizeof(magic);
  14018. if (magic != GGML_FILE_MAGIC) {
  14019. fprintf(stderr, "%s: invalid magic number, got %08x\n", __func__, magic);
  14020. return result;
  14021. }
  14022. const uint32_t version = *(const uint32_t *) ptr; ptr += sizeof(version);
  14023. if (version != GGML_FILE_VERSION) {
  14024. fprintf(stderr, "%s: invalid version number\n", __func__);
  14025. return result;
  14026. }
  14027. const uint32_t n_leafs = *(const uint32_t *) ptr; ptr += sizeof(n_leafs);
  14028. const uint32_t n_nodes = *(const uint32_t *) ptr; ptr += sizeof(n_nodes);
  14029. const uint64_t size_eval = *(const uint64_t *) ptr; ptr += sizeof(size_eval);
  14030. const int graph_size = MAX(n_leafs, n_nodes);
  14031. // create the data context
  14032. {
  14033. const size_t overhead = (n_leafs + n_nodes)*ggml_tensor_overhead() + ggml_graph_overhead_custom(graph_size, false);
  14034. struct ggml_init_params params = {
  14035. .mem_size = size_eval + overhead,
  14036. .mem_buffer = NULL,
  14037. .no_alloc = true,
  14038. };
  14039. *ctx_eval = ggml_init(params);
  14040. if (!*ctx_eval) {
  14041. fprintf(stderr, "%s: failed to create ggml context\n", __func__);
  14042. return result;
  14043. }
  14044. }
  14045. result = ggml_new_graph_custom(*ctx_eval, graph_size, false);
  14046. result->n_leafs = n_leafs;
  14047. result->n_nodes = n_nodes;
  14048. // leafs
  14049. {
  14050. uint32_t type;
  14051. uint32_t op;
  14052. uint32_t n_dims;
  14053. for (uint32_t i = 0; i < n_leafs; ++i) {
  14054. type = *(const uint32_t *) ptr; ptr += sizeof(type);
  14055. op = *(const uint32_t *) ptr; ptr += sizeof(op);
  14056. n_dims = *(const uint32_t *) ptr; ptr += sizeof(n_dims);
  14057. int64_t ne[GGML_MAX_DIMS];
  14058. size_t nb[GGML_MAX_DIMS];
  14059. for (int j = 0; j < GGML_MAX_DIMS; ++j) {
  14060. uint64_t ne_cur;
  14061. uint64_t nb_cur;
  14062. ne_cur = *(const uint64_t *) ptr; ptr += sizeof(ne_cur);
  14063. nb_cur = *(const uint64_t *) ptr; ptr += sizeof(nb_cur);
  14064. ne[j] = ne_cur;
  14065. nb[j] = nb_cur;
  14066. }
  14067. struct ggml_tensor * tensor = ggml_new_tensor(*ctx_eval, (enum ggml_type) type, n_dims, ne);
  14068. tensor->op = (enum ggml_op) op;
  14069. memcpy(tensor->name, ptr, GGML_MAX_NAME); ptr += GGML_MAX_NAME;
  14070. memcpy(tensor->op_params, ptr, GGML_MAX_OP_PARAMS); ptr += GGML_MAX_OP_PARAMS;
  14071. tensor->data = (void *) ptr;
  14072. for (int j = 0; j < GGML_MAX_DIMS; ++j) {
  14073. tensor->nb[j] = nb[j];
  14074. }
  14075. result->leafs[i] = tensor;
  14076. ptr += ggml_nbytes(tensor);
  14077. fprintf(stderr, "%s: loaded leaf %d: '%16s', %3d dims, %9zu bytes\n", __func__, i, tensor->name, n_dims, ggml_nbytes(tensor));
  14078. }
  14079. }
  14080. ggml_set_no_alloc(*ctx_eval, false);
  14081. // nodes
  14082. {
  14083. uint32_t type;
  14084. uint32_t op;
  14085. uint32_t n_dims;
  14086. for (uint32_t i = 0; i < n_nodes; ++i) {
  14087. type = *(const uint32_t *) ptr; ptr += sizeof(type);
  14088. op = *(const uint32_t *) ptr; ptr += sizeof(op);
  14089. n_dims = *(const uint32_t *) ptr; ptr += sizeof(n_dims);
  14090. enum ggml_op eop = (enum ggml_op) op;
  14091. int64_t ne[GGML_MAX_DIMS];
  14092. size_t nb[GGML_MAX_DIMS];
  14093. for (int j = 0; j < GGML_MAX_DIMS; ++j) {
  14094. uint64_t ne_cur;
  14095. uint64_t nb_cur;
  14096. ne_cur = *(const uint64_t *) ptr; ptr += sizeof(ne_cur);
  14097. nb_cur = *(const uint64_t *) ptr; ptr += sizeof(nb_cur);
  14098. ne[j] = ne_cur;
  14099. nb[j] = nb_cur;
  14100. }
  14101. const char * ptr_name = ptr; ptr += GGML_MAX_NAME;
  14102. const char * ptr_op_params = ptr; ptr += GGML_MAX_OP_PARAMS;
  14103. const int32_t * ptr_arg_idx = (const int32_t *) ptr; ptr += GGML_MAX_SRC*sizeof(int32_t);
  14104. struct ggml_tensor * args[GGML_MAX_SRC] = { NULL };
  14105. // parse args
  14106. for (int j = 0; j < GGML_MAX_SRC; ++j) {
  14107. const int32_t arg_idx = ptr_arg_idx[j];
  14108. if (arg_idx == -1) {
  14109. continue;
  14110. }
  14111. if (arg_idx < result->n_leafs) {
  14112. args[j] = result->leafs[arg_idx];
  14113. } else {
  14114. args[j] = result->nodes[arg_idx - result->n_leafs];
  14115. }
  14116. }
  14117. // create the tensor
  14118. // "view" operations are handled differently
  14119. // TODO: handle inplace ops - currently a copy is always made
  14120. struct ggml_tensor * tensor = NULL;
  14121. switch (eop) {
  14122. // TODO: implement other view ops
  14123. case GGML_OP_RESHAPE:
  14124. {
  14125. tensor = ggml_reshape_4d(*ctx_eval, args[0], ne[0], ne[1], ne[2], ne[3]);
  14126. } break;
  14127. case GGML_OP_VIEW:
  14128. {
  14129. tensor = ggml_view_4d(*ctx_eval, args[0], ne[0], ne[1], ne[2], ne[3], 0, 0, 0, 0);
  14130. size_t offs;
  14131. memcpy(&offs, ptr_op_params, sizeof(offs));
  14132. tensor->data = ((char *) tensor->data) + offs;
  14133. } break;
  14134. case GGML_OP_TRANSPOSE:
  14135. {
  14136. tensor = ggml_transpose(*ctx_eval, args[0]);
  14137. } break;
  14138. case GGML_OP_PERMUTE:
  14139. {
  14140. tensor = ggml_view_4d(*ctx_eval, args[0], ne[0], ne[1], ne[2], ne[3], 0, 0, 0, 0);
  14141. } break;
  14142. default:
  14143. {
  14144. tensor = ggml_new_tensor(*ctx_eval, (enum ggml_type) type, n_dims, ne);
  14145. tensor->op = eop;
  14146. } break;
  14147. }
  14148. memcpy(tensor->name, ptr_name, GGML_MAX_NAME);
  14149. memcpy(tensor->op_params, ptr_op_params, GGML_MAX_OP_PARAMS);
  14150. for (int j = 0; j < GGML_MAX_DIMS; ++j) {
  14151. tensor->nb[j] = nb[j];
  14152. }
  14153. for (int j = 0; j < GGML_MAX_SRC; ++j) {
  14154. tensor->src[j] = args[j];
  14155. }
  14156. result->nodes[i] = tensor;
  14157. fprintf(stderr, "%s: loaded node %d: '%16s', %3d dims, %9zu bytes\n", __func__, i, tensor->name, n_dims, ggml_nbytes(tensor));
  14158. }
  14159. }
  14160. }
  14161. return result;
  14162. }
  14163. void ggml_graph_print(const struct ggml_cgraph * cgraph) {
  14164. int64_t perf_total_per_op_us[GGML_OP_COUNT] = {0};
  14165. GGML_PRINT("=== GRAPH ===\n");
  14166. GGML_PRINT("n_nodes = %d\n", cgraph->n_nodes);
  14167. for (int i = 0; i < cgraph->n_nodes; i++) {
  14168. struct ggml_tensor * node = cgraph->nodes[i];
  14169. perf_total_per_op_us[node->op] += MAX(1, node->perf_time_us);
  14170. GGML_PRINT(" - %3d: [ %5" PRId64 ", %5" PRId64 ", %5" PRId64 "] %16s %s (%3d) cpu = %7.3f / %7.3f ms, wall = %7.3f / %7.3f ms\n",
  14171. i,
  14172. node->ne[0], node->ne[1], node->ne[2],
  14173. ggml_op_name(node->op), node->is_param ? "x" : node->grad ? "g" : " ", node->perf_runs,
  14174. (double) node->perf_cycles / (double) ggml_cycles_per_ms(),
  14175. (double) node->perf_cycles / (double) ggml_cycles_per_ms() / (double) node->perf_runs,
  14176. (double) node->perf_time_us / 1000.0,
  14177. (double) node->perf_time_us / 1000.0 / node->perf_runs);
  14178. }
  14179. GGML_PRINT("n_leafs = %d\n", cgraph->n_leafs);
  14180. for (int i = 0; i < cgraph->n_leafs; i++) {
  14181. struct ggml_tensor * node = cgraph->leafs[i];
  14182. GGML_PRINT(" - %3d: [ %5" PRId64 ", %5" PRId64 "] %8s %16s\n",
  14183. i,
  14184. node->ne[0], node->ne[1],
  14185. ggml_op_name(node->op),
  14186. ggml_get_name(node));
  14187. }
  14188. for (int i = 0; i < GGML_OP_COUNT; i++) {
  14189. if (perf_total_per_op_us[i] == 0) {
  14190. continue;
  14191. }
  14192. GGML_PRINT("perf_total_per_op_us[%16s] = %7.3f ms\n", ggml_op_name(i), (double) perf_total_per_op_us[i] / 1000.0);
  14193. }
  14194. GGML_PRINT("========================================\n");
  14195. }
  14196. // check if node is part of the graph
  14197. static bool ggml_graph_find(const struct ggml_cgraph * cgraph, const struct ggml_tensor * node) {
  14198. if (cgraph == NULL) {
  14199. return true;
  14200. }
  14201. for (int i = 0; i < cgraph->n_nodes; i++) {
  14202. if (cgraph->nodes[i] == node) {
  14203. return true;
  14204. }
  14205. }
  14206. return false;
  14207. }
  14208. static struct ggml_tensor * ggml_graph_get_parent(const struct ggml_cgraph * cgraph, const struct ggml_tensor * node) {
  14209. for (int i = 0; i < cgraph->n_nodes; i++) {
  14210. struct ggml_tensor * parent = cgraph->nodes[i];
  14211. if (parent->grad == node) {
  14212. return parent;
  14213. }
  14214. }
  14215. return NULL;
  14216. }
  14217. static void ggml_graph_dump_dot_node_edge(FILE * fp, const struct ggml_cgraph * gb, struct ggml_tensor * node, struct ggml_tensor * parent, const char * label) {
  14218. struct ggml_tensor * gparent = ggml_graph_get_parent(gb, node);
  14219. struct ggml_tensor * gparent0 = ggml_graph_get_parent(gb, parent);
  14220. fprintf(fp, " \"%p\":%s -> \"%p\":%s [ arrowhead = %s; style = %s; label = \"%s\"; ]\n",
  14221. gparent0 ? (void *) gparent0 : (void *) parent,
  14222. gparent0 ? "g" : "x",
  14223. gparent ? (void *) gparent : (void *) node,
  14224. gparent ? "g" : "x",
  14225. gparent ? "empty" : "vee",
  14226. gparent ? "dashed" : "solid",
  14227. label);
  14228. }
  14229. static void ggml_graph_dump_dot_leaf_edge(FILE * fp, struct ggml_tensor * node, struct ggml_tensor * parent, const char * label) {
  14230. fprintf(fp, " \"%p\":%s -> \"%p\":%s [ label = \"%s\"; ]\n",
  14231. (void *) parent, "x",
  14232. (void *) node, "x",
  14233. label);
  14234. }
  14235. void ggml_graph_dump_dot(const struct ggml_cgraph * gb, const struct ggml_cgraph * gf, const char * filename) {
  14236. char color[16];
  14237. FILE * fp = fopen(filename, "w");
  14238. GGML_ASSERT(fp);
  14239. fprintf(fp, "digraph G {\n");
  14240. fprintf(fp, " newrank = true;\n");
  14241. fprintf(fp, " rankdir = LR;\n");
  14242. for (int i = 0; i < gb->n_nodes; i++) {
  14243. struct ggml_tensor * node = gb->nodes[i];
  14244. if (ggml_graph_get_parent(gb, node) != NULL) {
  14245. continue;
  14246. }
  14247. if (node->is_param) {
  14248. snprintf(color, sizeof(color), "yellow");
  14249. } else if (node->grad) {
  14250. if (ggml_graph_find(gf, node)) {
  14251. snprintf(color, sizeof(color), "green");
  14252. } else {
  14253. snprintf(color, sizeof(color), "lightblue");
  14254. }
  14255. } else {
  14256. snprintf(color, sizeof(color), "white");
  14257. }
  14258. fprintf(fp, " \"%p\" [ "
  14259. "style = filled; fillcolor = %s; shape = record; "
  14260. "label=\"",
  14261. (void *) node, color);
  14262. if (strlen(node->name) > 0) {
  14263. fprintf(fp, "%s (%s)|", node->name, ggml_type_name(node->type));
  14264. } else {
  14265. fprintf(fp, "(%s)|", ggml_type_name(node->type));
  14266. }
  14267. if (node->n_dims == 2) {
  14268. fprintf(fp, "%d [%" PRId64 ", %" PRId64 "] | <x>%s", i, node->ne[0], node->ne[1], ggml_op_symbol(node->op));
  14269. } else {
  14270. fprintf(fp, "%d [%" PRId64 ", %" PRId64 ", %" PRId64 "] | <x>%s", i, node->ne[0], node->ne[1], node->ne[2], ggml_op_symbol(node->op));
  14271. }
  14272. if (node->grad) {
  14273. fprintf(fp, " | <g>%s\"; ]\n", ggml_op_symbol(node->grad->op));
  14274. } else {
  14275. fprintf(fp, "\"; ]\n");
  14276. }
  14277. }
  14278. for (int i = 0; i < gb->n_leafs; i++) {
  14279. struct ggml_tensor * node = gb->leafs[i];
  14280. snprintf(color, sizeof(color), "pink");
  14281. fprintf(fp, " \"%p\" [ "
  14282. "style = filled; fillcolor = %s; shape = record; "
  14283. "label=\"<x>",
  14284. (void *) node, color);
  14285. if (strlen(node->name) > 0) {
  14286. fprintf(fp, "%s (%s)|", node->name, ggml_type_name(node->type));
  14287. } else {
  14288. fprintf(fp, "(%s)|", ggml_type_name(node->type));
  14289. }
  14290. fprintf(fp, "CONST %d [%" PRId64 ", %" PRId64 "]", i, node->ne[0], node->ne[1]);
  14291. if (ggml_nelements(node) < 5) {
  14292. fprintf(fp, " | (");
  14293. for (int j = 0; j < ggml_nelements(node); j++) {
  14294. if (node->type == GGML_TYPE_I8 || node->type == GGML_TYPE_I16 || node->type == GGML_TYPE_I32) {
  14295. fprintf(fp, "%d", ggml_get_i32_1d(node, j));
  14296. }
  14297. else if (node->type == GGML_TYPE_F32 || node->type == GGML_TYPE_F16) {
  14298. fprintf(fp, "%.1e", (double)ggml_get_f32_1d(node, j));
  14299. }
  14300. else {
  14301. fprintf(fp, "#");
  14302. }
  14303. if (j < ggml_nelements(node) - 1) {
  14304. fprintf(fp, ", ");
  14305. }
  14306. }
  14307. fprintf(fp, ")");
  14308. }
  14309. fprintf(fp, "\"; ]\n");
  14310. }
  14311. for (int i = 0; i < gb->n_nodes; i++) {
  14312. struct ggml_tensor * node = gb->nodes[i];
  14313. for (int j = 0; j < GGML_MAX_SRC; j++) {
  14314. if (node->src[j]) {
  14315. char label[16];
  14316. snprintf(label, sizeof(label), "src %d", j);
  14317. ggml_graph_dump_dot_node_edge(fp, gb, node, node->src[j], label);
  14318. }
  14319. }
  14320. }
  14321. for (int i = 0; i < gb->n_leafs; i++) {
  14322. struct ggml_tensor * node = gb->leafs[i];
  14323. for (int j = 0; j < GGML_MAX_SRC; j++) {
  14324. if (node->src[j]) {
  14325. char label[16];
  14326. snprintf(label, sizeof(label), "src %d", j);
  14327. ggml_graph_dump_dot_leaf_edge(fp, node, node->src[j], label);
  14328. }
  14329. }
  14330. }
  14331. fprintf(fp, "}\n");
  14332. fclose(fp);
  14333. GGML_PRINT("%s: dot -Tpng %s -o %s.png && open %s.png\n", __func__, filename, filename, filename);
  14334. }
  14335. ////////////////////////////////////////////////////////////////////////////////
  14336. static void ggml_opt_set_params(int np, struct ggml_tensor * const ps[], const float * x) {
  14337. int i = 0;
  14338. for (int p = 0; p < np; ++p) {
  14339. const int64_t ne = ggml_nelements(ps[p]) ;
  14340. // TODO: add function to set tensor from array
  14341. for (int64_t j = 0; j < ne; ++j) {
  14342. ggml_set_f32_1d(ps[p], j, x[i++]);
  14343. }
  14344. }
  14345. }
  14346. static void ggml_opt_get_params(int np, struct ggml_tensor * const ps[], float * x) {
  14347. int i = 0;
  14348. for (int p = 0; p < np; ++p) {
  14349. const int64_t ne = ggml_nelements(ps[p]) ;
  14350. // TODO: add function to get all elements at once
  14351. for (int64_t j = 0; j < ne; ++j) {
  14352. x[i++] = ggml_get_f32_1d(ps[p], j);
  14353. }
  14354. }
  14355. }
  14356. static void ggml_opt_get_grad(int np, struct ggml_tensor * const ps[], float * g) {
  14357. int64_t i = 0;
  14358. for (int p = 0; p < np; ++p) {
  14359. const int64_t ne = ggml_nelements(ps[p]) ;
  14360. // TODO: add function to get all elements at once
  14361. for (int64_t j = 0; j < ne; ++j) {
  14362. g[i++] = ggml_get_f32_1d(ps[p]->grad, j);
  14363. }
  14364. }
  14365. }
  14366. static void ggml_opt_acc_grad(int np, struct ggml_tensor * const ps[], float * g, float scale) {
  14367. int64_t i = 0;
  14368. for (int p = 0; p < np; ++p) {
  14369. const int64_t ne = ggml_nelements(ps[p]) ;
  14370. // TODO: add function to get all elements at once
  14371. for (int64_t j = 0; j < ne; ++j) {
  14372. g[i++] += ggml_get_f32_1d(ps[p]->grad, j) * scale;
  14373. }
  14374. }
  14375. }
  14376. //
  14377. // ADAM
  14378. //
  14379. // ref: https://arxiv.org/pdf/1412.6980.pdf
  14380. //
  14381. static enum ggml_opt_result ggml_opt_adam(
  14382. struct ggml_context * ctx,
  14383. struct ggml_opt_context * opt,
  14384. struct ggml_opt_params params,
  14385. struct ggml_tensor * f,
  14386. struct ggml_cgraph * gf,
  14387. struct ggml_cgraph * gb,
  14388. ggml_opt_callback callback,
  14389. void * callback_data) {
  14390. GGML_ASSERT(ggml_is_scalar(f));
  14391. // these will store the parameters we want to optimize
  14392. struct ggml_tensor * ps[GGML_MAX_PARAMS];
  14393. int np = 0;
  14394. int64_t nx = 0;
  14395. for (int i = 0; i < gf->n_nodes; ++i) {
  14396. if (gf->nodes[i]->is_param) {
  14397. GGML_PRINT_DEBUG("found param %d: grad->op = %d\n", np, gf->nodes[i]->grad->op);
  14398. GGML_ASSERT(np < GGML_MAX_PARAMS);
  14399. ps[np++] = gf->nodes[i];
  14400. nx += ggml_nelements(gf->nodes[i]);
  14401. }
  14402. }
  14403. if ((opt->params.type != params.type) || (opt->nx != nx) || (opt->params.past != params.past)) {
  14404. int iter = opt->iter;
  14405. ggml_opt_init(opt->ctx, opt, params, nx);
  14406. opt->iter = iter;
  14407. }
  14408. // constants
  14409. float sched = params.adam.sched;
  14410. const float alpha = params.adam.alpha;
  14411. const float decay = params.adam.decay * alpha;
  14412. const float beta1 = params.adam.beta1;
  14413. const float beta2 = params.adam.beta2;
  14414. const float eps = params.adam.eps;
  14415. const float gclip = params.adam.gclip;
  14416. const int decay_min_ndim = params.adam.decay_min_ndim;
  14417. const int n_accum = MAX(1, params.n_gradient_accumulation);
  14418. const float accum_norm = 1.0f / (float) n_accum;
  14419. float * g = opt->adam.g->data; // gradients
  14420. float * m = opt->adam.m->data; // first moment
  14421. float * v = opt->adam.v->data; // second moment
  14422. float * pf = params.past > 0 ? opt->adam.pf->data : NULL; // past function values
  14423. struct ggml_cplan cplan = ggml_graph_plan(gb, params.n_threads);
  14424. struct ggml_object * obj = ggml_new_object(ctx, GGML_OBJECT_WORK_BUFFER, cplan.work_size);
  14425. cplan.work_data = (uint8_t *)ctx->mem_buffer + obj->offs;
  14426. bool cancel = false;
  14427. // compute the function value
  14428. float fx = 0;
  14429. ggml_set_zero(opt->adam.g);
  14430. for (int accum_step = 0; accum_step < n_accum; ++accum_step) {
  14431. if (callback) {
  14432. callback(callback_data, accum_step, &sched, &cancel);
  14433. if (cancel) {
  14434. return GGML_OPT_CANCEL;
  14435. }
  14436. }
  14437. // ggml_graph_reset (gf);
  14438. ggml_set_f32 (f->grad, 1.0f);
  14439. ggml_graph_compute(gb, &cplan);
  14440. ggml_opt_acc_grad(np, ps, g, accum_norm);
  14441. fx += ggml_get_f32_1d(f, 0);
  14442. }
  14443. fx *= accum_norm;
  14444. opt->adam.fx_prev = fx;
  14445. opt->adam.fx_best = opt->adam.fx_prev;
  14446. if (pf) {
  14447. pf[opt->iter % params.past] = opt->adam.fx_prev;
  14448. }
  14449. opt->loss_before = opt->adam.fx_prev;
  14450. opt->loss_after = opt->adam.fx_prev;
  14451. // initialize
  14452. if (opt->just_initialized) {
  14453. opt->adam.n_no_improvement = 0;
  14454. opt->just_initialized = false;
  14455. }
  14456. float * fx_best = &opt->adam.fx_best;
  14457. float * fx_prev = &opt->adam.fx_prev;
  14458. int * n_no_improvement = &opt->adam.n_no_improvement;
  14459. int iter0 = opt->iter;
  14460. // run the optimizer
  14461. for (int t = 0; t < params.adam.n_iter; ++t) {
  14462. opt->iter = iter0 + t + 1;
  14463. GGML_PRINT_DEBUG ("=== iter %d ===\n", t);
  14464. GGML_PRINT_DEBUG ("f = %10.6f\n", ggml_get_f32_1d(f, 0));
  14465. GGML_PRINT_DEBUG_5("df/dx0 = %10.6f\n", ggml_get_f32_1d(ps[0]->grad, 0));
  14466. GGML_PRINT_DEBUG_5("df/dx1 = %10.6f\n", ggml_get_f32_1d(ps[1]->grad, 0));
  14467. for (int i = 0; i < np; ++i) {
  14468. GGML_PRINT_DEBUG("param %d: %10.6f, g = %10.6f\n", i,
  14469. ggml_get_f32_1d(ps[i], 0), ggml_get_f32_1d(ps[i]->grad, 0));
  14470. }
  14471. const int64_t t_start_wall = ggml_time_us();
  14472. const int64_t t_start_cpu = ggml_cycles();
  14473. UNUSED(t_start_wall);
  14474. UNUSED(t_start_cpu);
  14475. {
  14476. float gnorm = 1.0f;
  14477. if (gclip > 0.0f) {
  14478. // gradient clipping
  14479. ggml_float sum = 0.0;
  14480. for (int64_t i = 0; i < nx; ++i) {
  14481. sum += (ggml_float)(g[i]*g[i]);
  14482. }
  14483. ggml_float norm = sqrt(sum);
  14484. if (norm > (ggml_float) gclip) {
  14485. gnorm = (float) ((ggml_float) gclip / norm);
  14486. }
  14487. }
  14488. const float beta1h = alpha*sched/(1.0f - powf(beta1, opt->iter));
  14489. const float beta2h = 1.0f/(1.0f - powf(beta2, opt->iter));
  14490. int64_t i = 0;
  14491. for (int p = 0; p < np; ++p) {
  14492. const int64_t ne = ggml_nelements(ps[p]);
  14493. const float p_decay = ((ps[p]->n_dims >= decay_min_ndim) ? decay : 0.0f) * sched;
  14494. for (int64_t j = 0; j < ne; ++j) {
  14495. float x = ggml_get_f32_1d(ps[p], j);
  14496. float g_ = g[i]*gnorm;
  14497. m[i] = m[i]*beta1 + g_*(1.0f - beta1);
  14498. v[i] = v[i]*beta2 + g_*g_*(1.0f - beta2);
  14499. float mh = m[i]*beta1h;
  14500. float vh = v[i]*beta2h;
  14501. vh = sqrtf(vh) + eps;
  14502. x = x*(1.0f - p_decay) - mh/vh;
  14503. ggml_set_f32_1d(ps[p], j, x);
  14504. ++i;
  14505. }
  14506. }
  14507. }
  14508. fx = 0;
  14509. ggml_set_zero(opt->adam.g);
  14510. for (int accum_step = 0; accum_step < n_accum; ++accum_step) {
  14511. if (callback) {
  14512. callback(callback_data, accum_step, &sched, &cancel);
  14513. if (cancel) {
  14514. return GGML_OPT_CANCEL;;
  14515. }
  14516. }
  14517. // ggml_graph_reset (gf);
  14518. ggml_set_f32 (f->grad, 1.0f);
  14519. ggml_graph_compute(gb, &cplan);
  14520. ggml_opt_acc_grad(np, ps, g, accum_norm);
  14521. fx += ggml_get_f32_1d(f, 0);
  14522. }
  14523. fx *= accum_norm;
  14524. opt->loss_after = fx;
  14525. // check convergence
  14526. if (fabsf(fx - fx_prev[0])/fx < params.adam.eps_f) {
  14527. GGML_PRINT_DEBUG("converged\n");
  14528. return GGML_OPT_OK;
  14529. }
  14530. // delta-based convergence test
  14531. if (pf != NULL) {
  14532. // need at least params.past iterations to start checking for convergence
  14533. if (params.past <= iter0 + t) {
  14534. const float rate = (pf[(iter0 + t)%params.past] - fx)/fx;
  14535. if (fabsf(rate) < params.delta) {
  14536. return GGML_OPT_OK;
  14537. }
  14538. }
  14539. pf[(iter0 + t)%params.past] = fx;
  14540. }
  14541. // check for improvement
  14542. if (params.max_no_improvement > 0) {
  14543. if (fx_best[0] > fx) {
  14544. fx_best[0] = fx;
  14545. n_no_improvement[0] = 0;
  14546. } else {
  14547. ++n_no_improvement[0];
  14548. if (n_no_improvement[0] >= params.max_no_improvement) {
  14549. return GGML_OPT_OK;
  14550. }
  14551. }
  14552. }
  14553. fx_prev[0] = fx;
  14554. {
  14555. const int64_t t_end_cpu = ggml_cycles();
  14556. GGML_PRINT_DEBUG("time iter: %5.3f s\n", ((float)(t_end_cpu - t_start_cpu))/CLOCKS_PER_SEC);
  14557. UNUSED(t_end_cpu);
  14558. const int64_t t_end_wall = ggml_time_us();
  14559. GGML_PRINT_DEBUG("wall time iter: %5.3f s\n", (t_end_wall - t_start_wall)/1e6);
  14560. UNUSED(t_end_wall);
  14561. }
  14562. }
  14563. return GGML_OPT_DID_NOT_CONVERGE;
  14564. }
  14565. //
  14566. // L-BFGS
  14567. //
  14568. // the L-BFGS implementation below is based on the following implementation:
  14569. //
  14570. // https://github.com/chokkan/liblbfgs
  14571. //
  14572. struct ggml_lbfgs_iteration_data {
  14573. float alpha;
  14574. float ys;
  14575. float * s;
  14576. float * y;
  14577. };
  14578. static enum ggml_opt_result linesearch_backtracking(
  14579. const struct ggml_opt_params * params,
  14580. int nx,
  14581. float * x,
  14582. float * fx,
  14583. float * g,
  14584. float * d,
  14585. float * step,
  14586. const float * xp,
  14587. struct ggml_tensor * f,
  14588. struct ggml_cgraph * gb,
  14589. struct ggml_cplan * cplan,
  14590. const int np,
  14591. struct ggml_tensor * ps[],
  14592. bool * cancel,
  14593. ggml_opt_callback callback,
  14594. void * callback_data) {
  14595. int count = 0;
  14596. float width = 0.0f;
  14597. float dg = 0.0f;
  14598. float finit = 0.0f;
  14599. float dginit = 0.0f;
  14600. float dgtest = 0.0f;
  14601. const float dec = 0.5f;
  14602. const float inc = 2.1f;
  14603. const int n_accum = MAX(1, params->n_gradient_accumulation);
  14604. const float accum_norm = 1.0f / (float) n_accum;
  14605. if (*step <= 0.f) {
  14606. return GGML_LINESEARCH_INVALID_PARAMETERS;
  14607. }
  14608. // compute the initial gradient in the search direction
  14609. ggml_vec_dot_f32(nx, &dginit, g, d);
  14610. // make sure that d points to a descent direction
  14611. if (0 < dginit) {
  14612. return GGML_LINESEARCH_FAIL;
  14613. }
  14614. // initialize local variables
  14615. finit = *fx;
  14616. dgtest = params->lbfgs.ftol*dginit;
  14617. while (true) {
  14618. ggml_vec_cpy_f32(nx, x, xp);
  14619. ggml_vec_mad_f32(nx, x, d, *step);
  14620. // evaluate the function and gradient values
  14621. {
  14622. ggml_opt_set_params(np, ps, x);
  14623. *fx = 0;
  14624. memset(g, 0, sizeof(float)*nx);
  14625. for (int accum_step = 0; accum_step < n_accum; ++accum_step) {
  14626. if (callback) {
  14627. // LBFG-S does not support learning rate -> ignore learning schedule
  14628. float sched = 0;
  14629. callback(callback_data, accum_step, &sched, cancel);
  14630. if (*cancel) {
  14631. return GGML_OPT_CANCEL;
  14632. }
  14633. }
  14634. // ggml_graph_reset (gf);
  14635. ggml_set_f32 (f->grad, 1.0f);
  14636. ggml_graph_compute(gb, cplan);
  14637. ggml_opt_acc_grad(np, ps, g, accum_norm);
  14638. *fx += ggml_get_f32_1d(f, 0);
  14639. }
  14640. *fx *= accum_norm;
  14641. }
  14642. ++count;
  14643. if (*fx > finit + (*step)*dgtest) {
  14644. width = dec;
  14645. } else {
  14646. // Armijo condition is satisfied
  14647. if (params->lbfgs.linesearch == GGML_LINESEARCH_BACKTRACKING_ARMIJO) {
  14648. return count;
  14649. }
  14650. ggml_vec_dot_f32(nx, &dg, g, d);
  14651. // check the Wolfe condition
  14652. if (dg < params->lbfgs.wolfe * dginit) {
  14653. width = inc;
  14654. } else {
  14655. if(params->lbfgs.linesearch == GGML_LINESEARCH_BACKTRACKING_WOLFE) {
  14656. // regular Wolfe conditions
  14657. return count;
  14658. }
  14659. if(dg > -params->lbfgs.wolfe*dginit) {
  14660. width = dec;
  14661. } else {
  14662. // strong Wolfe condition (GGML_LINESEARCH_BACKTRACKING_STRONG_WOLFE)
  14663. return count;
  14664. }
  14665. }
  14666. }
  14667. if (*step < params->lbfgs.min_step) {
  14668. return GGML_LINESEARCH_MINIMUM_STEP;
  14669. }
  14670. if (*step > params->lbfgs.max_step) {
  14671. return GGML_LINESEARCH_MAXIMUM_STEP;
  14672. }
  14673. if (params->lbfgs.max_linesearch <= count) {
  14674. return GGML_LINESEARCH_MAXIMUM_ITERATIONS;
  14675. }
  14676. (*step) *= width;
  14677. }
  14678. GGML_UNREACHABLE();
  14679. }
  14680. static enum ggml_opt_result ggml_opt_lbfgs(
  14681. struct ggml_context * ctx,
  14682. struct ggml_opt_context * opt,
  14683. struct ggml_opt_params params,
  14684. struct ggml_tensor * f,
  14685. struct ggml_cgraph * gf,
  14686. struct ggml_cgraph * gb,
  14687. ggml_opt_callback callback,
  14688. void * callback_data) {
  14689. if (params.lbfgs.linesearch == GGML_LINESEARCH_BACKTRACKING_WOLFE ||
  14690. params.lbfgs.linesearch == GGML_LINESEARCH_BACKTRACKING_STRONG_WOLFE) {
  14691. if (params.lbfgs.wolfe <= params.lbfgs.ftol || 1.f <= params.lbfgs.wolfe) {
  14692. return GGML_OPT_INVALID_WOLFE;
  14693. }
  14694. }
  14695. const int m = params.lbfgs.m;
  14696. // these will store the parameters we want to optimize
  14697. struct ggml_tensor * ps[GGML_MAX_PARAMS];
  14698. int np = 0;
  14699. int nx = 0;
  14700. for (int i = 0; i < gf->n_nodes; ++i) {
  14701. if (gf->nodes[i]->is_param) {
  14702. GGML_PRINT_DEBUG("found param %d: grad->op = %d\n", np, gf->nodes[i]->grad->op);
  14703. GGML_ASSERT(np < GGML_MAX_PARAMS);
  14704. ps[np++] = gf->nodes[i];
  14705. nx += ggml_nelements(gf->nodes[i]);
  14706. }
  14707. }
  14708. if ((opt->params.type != params.type) || (opt->nx != nx) || (opt->params.past != params.past) || (opt->params.lbfgs.m != params.lbfgs.m)) {
  14709. int iter = opt->iter;
  14710. ggml_opt_init(ctx, opt, params, nx);
  14711. opt->iter = iter;
  14712. }
  14713. struct ggml_cplan cplan = ggml_graph_plan(gb, params.n_threads);
  14714. struct ggml_object * obj = ggml_new_object(ctx, GGML_OBJECT_WORK_BUFFER, cplan.work_size);
  14715. cplan.work_data = (uint8_t *)ctx->mem_buffer + obj->offs;
  14716. float * x = opt->lbfgs.x->data; // current parameters
  14717. float * xp = opt->lbfgs.xp->data; // previous parameters
  14718. float * g = opt->lbfgs.g->data; // current gradient
  14719. float * gp = opt->lbfgs.gp->data; // previous gradient
  14720. float * d = opt->lbfgs.d->data; // search direction
  14721. float * pf = params.past > 0 ? opt->lbfgs.pf->data : NULL; // past function values
  14722. const int n_accum = MAX(1, params.n_gradient_accumulation);
  14723. const float accum_norm = 1.0f / (float) n_accum;
  14724. float fx = 0.0f; // cost function value
  14725. float xnorm = 0.0f; // ||x||
  14726. float gnorm = 0.0f; // ||g||
  14727. // initialize x from the graph nodes
  14728. ggml_opt_get_params(np, ps, x);
  14729. // the L-BFGS memory
  14730. float * lm_alpha = opt->lbfgs.lmal->data;
  14731. float * lm_ys = opt->lbfgs.lmys->data;
  14732. float * lm_s = opt->lbfgs.lms->data;
  14733. float * lm_y = opt->lbfgs.lmy->data;
  14734. bool cancel = false;
  14735. // evaluate the function value and its gradient
  14736. {
  14737. ggml_opt_set_params(np, ps, x);
  14738. fx = 0;
  14739. memset(g, 0, sizeof(float)*nx);
  14740. for (int accum_step = 0; accum_step < n_accum; ++accum_step) {
  14741. if (callback) {
  14742. // LBFG-S does not support learning rate -> ignore learning schedule
  14743. float sched = 0;
  14744. callback(callback_data, accum_step, &sched, &cancel);
  14745. if (cancel) {
  14746. return GGML_OPT_CANCEL;
  14747. }
  14748. }
  14749. // ggml_graph_reset (gf);
  14750. ggml_set_f32 (f->grad, 1.0f);
  14751. ggml_graph_compute(gb, &cplan);
  14752. ggml_opt_acc_grad(np, ps, g, accum_norm);
  14753. fx += ggml_get_f32_1d(f, 0);
  14754. }
  14755. fx *= accum_norm;
  14756. opt->loss_before = fx;
  14757. opt->loss_after = fx;
  14758. }
  14759. // search direction = -gradient
  14760. ggml_vec_neg_f32(nx, d, g);
  14761. // ||x||, ||g||
  14762. ggml_vec_norm_f32(nx, &xnorm, x);
  14763. ggml_vec_norm_f32(nx, &gnorm, g);
  14764. if (xnorm < 1.0f) {
  14765. xnorm = 1.0f;
  14766. }
  14767. // already optimized
  14768. if (gnorm/xnorm <= params.lbfgs.eps) {
  14769. return GGML_OPT_OK;
  14770. }
  14771. if (opt->just_initialized) {
  14772. if (pf) {
  14773. pf[0] = fx;
  14774. }
  14775. opt->lbfgs.fx_best = fx;
  14776. // initial step
  14777. ggml_vec_norm_inv_f32(nx, &opt->lbfgs.step, d);
  14778. opt->lbfgs.j = 0;
  14779. opt->lbfgs.k = 1;
  14780. opt->lbfgs.end = 0;
  14781. opt->lbfgs.n_no_improvement = 0;
  14782. opt->just_initialized = false;
  14783. }
  14784. float * fx_best = &opt->lbfgs.fx_best;
  14785. float * step = &opt->lbfgs.step;
  14786. int * j = &opt->lbfgs.j;
  14787. int * k = &opt->lbfgs.k;
  14788. int * end = &opt->lbfgs.end;
  14789. int * n_no_improvement = &opt->lbfgs.n_no_improvement;
  14790. int ls = 0;
  14791. int bound = 0;
  14792. float ys = 0.0f;
  14793. float yy = 0.0f;
  14794. float beta = 0.0f;
  14795. int it = 0;
  14796. while (true) {
  14797. // store the current position and gradient vectors
  14798. ggml_vec_cpy_f32(nx, xp, x);
  14799. ggml_vec_cpy_f32(nx, gp, g);
  14800. // TODO: instead of passing &cancel here, use the return code of the linesearch
  14801. // to determine if the optimization should be cancelled
  14802. // this is a simple change, but not doing this atm, since I don't have a nice
  14803. // way to test and don't want to break something with so many changes lined up
  14804. ls = linesearch_backtracking(&params, nx, x, &fx, g, d, step, xp, f, gb, &cplan, np, ps, &cancel, callback, callback_data);
  14805. if (cancel) {
  14806. return GGML_OPT_CANCEL;
  14807. }
  14808. if (ls < 0) {
  14809. // linesearch failed - go back to the previous point and return
  14810. ggml_vec_cpy_f32(nx, x, xp);
  14811. ggml_vec_cpy_f32(nx, g, gp);
  14812. return ls;
  14813. }
  14814. opt->loss_after = fx;
  14815. ggml_vec_norm_f32(nx, &xnorm, x);
  14816. ggml_vec_norm_f32(nx, &gnorm, g);
  14817. GGML_PRINT_DEBUG("f = %10.6f\n", ggml_get_f32_1d(f, 0));
  14818. if (xnorm < 1.0f) {
  14819. xnorm = 1.0f;
  14820. }
  14821. if (gnorm/xnorm <= params.lbfgs.eps) {
  14822. // converged
  14823. return GGML_OPT_OK;
  14824. }
  14825. // delta-based convergence test
  14826. if (pf != NULL) {
  14827. // need at least params.past iterations to start checking for convergence
  14828. if (params.past <= k[0]) {
  14829. const float rate = (pf[k[0]%params.past] - fx)/fx;
  14830. if (fabsf(rate) < params.delta) {
  14831. return GGML_OPT_OK;
  14832. }
  14833. }
  14834. pf[k[0]%params.past] = fx;
  14835. }
  14836. // check for improvement
  14837. if (params.max_no_improvement > 0) {
  14838. if (fx < fx_best[0]) {
  14839. fx_best[0] = fx;
  14840. n_no_improvement[0] = 0;
  14841. } else {
  14842. n_no_improvement[0]++;
  14843. if (n_no_improvement[0] >= params.max_no_improvement) {
  14844. return GGML_OPT_OK;
  14845. }
  14846. }
  14847. }
  14848. if (params.lbfgs.n_iter != 0 && params.lbfgs.n_iter < it + 1) {
  14849. // reached the maximum number of iterations
  14850. return GGML_OPT_DID_NOT_CONVERGE;
  14851. }
  14852. // update vectors s and y:
  14853. // s_{k+1} = x_{k+1} - x_{k} = \step * d_{k}.
  14854. // y_{k+1} = g_{k+1} - g_{k}.
  14855. //
  14856. ggml_vec_sub_f32(nx, &lm_s[end[0]*nx], x, xp);
  14857. ggml_vec_sub_f32(nx, &lm_y[end[0]*nx], g, gp);
  14858. // compute scalars ys and yy:
  14859. // ys = y^t \cdot s -> 1 / \rho.
  14860. // yy = y^t \cdot y.
  14861. //
  14862. ggml_vec_dot_f32(nx, &ys, &lm_y[end[0]*nx], &lm_s[end[0]*nx]);
  14863. ggml_vec_dot_f32(nx, &yy, &lm_y[end[0]*nx], &lm_y[end[0]*nx]);
  14864. lm_ys[end[0]] = ys;
  14865. // find new search direction
  14866. // ref: https://en.wikipedia.org/wiki/Limited-memory_BFGS
  14867. bound = (m <= k[0]) ? m : k[0];
  14868. k[0]++;
  14869. it++;
  14870. end[0] = (end[0] + 1)%m;
  14871. // initialize search direction with -g
  14872. ggml_vec_neg_f32(nx, d, g);
  14873. j[0] = end[0];
  14874. for (int i = 0; i < bound; ++i) {
  14875. j[0] = (j[0] + m - 1) % m;
  14876. // \alpha_{j} = \rho_{j} s^{t}_{j} \cdot q_{k+1}
  14877. ggml_vec_dot_f32(nx, &lm_alpha[j[0]], &lm_s[j[0]*nx], d);
  14878. lm_alpha[j[0]] /= lm_ys[j[0]];
  14879. // q_{i} = q_{i+1} - \alpha_{i} y_{i}
  14880. ggml_vec_mad_f32(nx, d, &lm_y[j[0]*nx], -lm_alpha[j[0]]);
  14881. }
  14882. ggml_vec_scale_f32(nx, d, ys/yy);
  14883. for (int i = 0; i < bound; ++i) {
  14884. // \beta_{j} = \rho_{j} y^t_{j} \cdot \gamma_{i}
  14885. ggml_vec_dot_f32(nx, &beta, &lm_y[j[0]*nx], d);
  14886. beta /= lm_ys[j[0]];
  14887. // \gamma_{i+1} = \gamma_{i} + (\alpha_{j} - \beta_{j}) s_{j}
  14888. ggml_vec_mad_f32(nx, d, &lm_s[j[0]*nx], lm_alpha[j[0]] - beta);
  14889. j[0] = (j[0] + 1)%m;
  14890. }
  14891. step[0] = 1.0;
  14892. }
  14893. GGML_UNREACHABLE();
  14894. }
  14895. struct ggml_opt_params ggml_opt_default_params(enum ggml_opt_type type) {
  14896. struct ggml_opt_params result;
  14897. switch (type) {
  14898. case GGML_OPT_ADAM:
  14899. {
  14900. result = (struct ggml_opt_params) {
  14901. .type = GGML_OPT_ADAM,
  14902. .graph_size = GGML_DEFAULT_GRAPH_SIZE,
  14903. .n_threads = 1, // FIXME: GGML_DEFAULT_N_THREADS ?
  14904. .past = 0,
  14905. .delta = 1e-5f,
  14906. .max_no_improvement = 100,
  14907. .print_forward_graph = true,
  14908. .print_backward_graph = true,
  14909. .n_gradient_accumulation = 1,
  14910. .adam = {
  14911. .n_iter = 10000,
  14912. .sched = 1.000f,
  14913. .decay = 0.0f,
  14914. .decay_min_ndim = 2,
  14915. .alpha = 0.001f,
  14916. .beta1 = 0.9f,
  14917. .beta2 = 0.999f,
  14918. .eps = 1e-8f,
  14919. .eps_f = 1e-5f,
  14920. .eps_g = 1e-3f,
  14921. .gclip = 0.0f,
  14922. },
  14923. };
  14924. } break;
  14925. case GGML_OPT_LBFGS:
  14926. {
  14927. result = (struct ggml_opt_params) {
  14928. .type = GGML_OPT_LBFGS,
  14929. .graph_size = GGML_DEFAULT_GRAPH_SIZE,
  14930. .n_threads = 1,
  14931. .past = 0,
  14932. .delta = 1e-5f,
  14933. .max_no_improvement = 0,
  14934. .print_forward_graph = true,
  14935. .print_backward_graph = true,
  14936. .n_gradient_accumulation = 1,
  14937. .lbfgs = {
  14938. .m = 6,
  14939. .n_iter = 100,
  14940. .max_linesearch = 20,
  14941. .eps = 1e-5f,
  14942. .ftol = 1e-4f,
  14943. .wolfe = 0.9f,
  14944. .min_step = 1e-20f,
  14945. .max_step = 1e+20f,
  14946. .linesearch = GGML_LINESEARCH_DEFAULT,
  14947. },
  14948. };
  14949. } break;
  14950. }
  14951. return result;
  14952. }
  14953. GGML_API void ggml_opt_init(
  14954. struct ggml_context * ctx,
  14955. struct ggml_opt_context * opt,
  14956. struct ggml_opt_params params,
  14957. int64_t nx) {
  14958. opt->ctx = ctx;
  14959. opt->params = params;
  14960. opt->iter = 0;
  14961. opt->nx = nx;
  14962. opt->just_initialized = true;
  14963. if (opt->ctx == NULL) {
  14964. struct ggml_init_params ctx_opt_params;
  14965. if (opt->params.type == GGML_OPT_ADAM) {
  14966. ctx_opt_params.mem_size = GGML_MEM_ALIGN*3 + ggml_tensor_overhead()*3 + ggml_type_size(GGML_TYPE_F32)*nx*3;
  14967. if (opt->params.past > 0) {
  14968. ctx_opt_params.mem_size += GGML_MEM_ALIGN + ggml_tensor_overhead() + ggml_type_size(GGML_TYPE_F32)*opt->params.past;
  14969. }
  14970. } else if (opt->params.type == GGML_OPT_LBFGS) {
  14971. ctx_opt_params.mem_size = GGML_MEM_ALIGN*9 + ggml_tensor_overhead()*9 + ggml_type_size(GGML_TYPE_F32)*(nx*5 + opt->params.lbfgs.m*2 + nx*opt->params.lbfgs.m*2);
  14972. if (opt->params.past > 0) {
  14973. ctx_opt_params.mem_size += GGML_MEM_ALIGN + ggml_tensor_overhead() + ggml_type_size(GGML_TYPE_F32)*opt->params.past;
  14974. }
  14975. }
  14976. ctx_opt_params.mem_buffer = NULL;
  14977. ctx_opt_params.no_alloc = false;
  14978. opt->ctx = ggml_init(ctx_opt_params);
  14979. }
  14980. switch (opt->params.type) {
  14981. case GGML_OPT_ADAM:
  14982. {
  14983. opt->adam.g = ggml_new_tensor_1d(opt->ctx, GGML_TYPE_F32, nx);
  14984. opt->adam.m = ggml_new_tensor_1d(opt->ctx, GGML_TYPE_F32, nx);
  14985. opt->adam.v = ggml_new_tensor_1d(opt->ctx, GGML_TYPE_F32, nx);
  14986. opt->adam.pf = params.past > 0
  14987. ? ggml_new_tensor_1d(opt->ctx, GGML_TYPE_F32, params.past)
  14988. : NULL;
  14989. ggml_set_zero(opt->adam.m);
  14990. ggml_set_zero(opt->adam.v);
  14991. if (opt->adam.pf) {
  14992. ggml_set_zero(opt->adam.pf);
  14993. }
  14994. } break;
  14995. case GGML_OPT_LBFGS:
  14996. {
  14997. opt->lbfgs.x = ggml_new_tensor_1d(opt->ctx, GGML_TYPE_F32, nx);
  14998. opt->lbfgs.xp = ggml_new_tensor_1d(opt->ctx, GGML_TYPE_F32, nx);
  14999. opt->lbfgs.g = ggml_new_tensor_1d(opt->ctx, GGML_TYPE_F32, nx);
  15000. opt->lbfgs.gp = ggml_new_tensor_1d(opt->ctx, GGML_TYPE_F32, nx);
  15001. opt->lbfgs.d = ggml_new_tensor_1d(opt->ctx, GGML_TYPE_F32, nx);
  15002. opt->lbfgs.pf = params.past > 0
  15003. ? ggml_new_tensor_1d(opt->ctx, GGML_TYPE_F32, params.past)
  15004. : NULL;
  15005. opt->lbfgs.lmal = ggml_new_tensor_1d(opt->ctx, GGML_TYPE_F32, params.lbfgs.m);
  15006. opt->lbfgs.lmys = ggml_new_tensor_1d(opt->ctx, GGML_TYPE_F32, params.lbfgs.m);
  15007. opt->lbfgs.lms = ggml_new_tensor_2d(opt->ctx, GGML_TYPE_F32, nx, params.lbfgs.m);
  15008. opt->lbfgs.lmy = ggml_new_tensor_2d(opt->ctx, GGML_TYPE_F32, nx, params.lbfgs.m);
  15009. ggml_set_zero(opt->lbfgs.x);
  15010. ggml_set_zero(opt->lbfgs.xp);
  15011. ggml_set_zero(opt->lbfgs.g);
  15012. ggml_set_zero(opt->lbfgs.gp);
  15013. ggml_set_zero(opt->lbfgs.d);
  15014. if (opt->lbfgs.pf) {
  15015. ggml_set_zero(opt->lbfgs.pf);
  15016. }
  15017. ggml_set_zero(opt->lbfgs.lmal);
  15018. ggml_set_zero(opt->lbfgs.lmys);
  15019. ggml_set_zero(opt->lbfgs.lms);
  15020. ggml_set_zero(opt->lbfgs.lmy);
  15021. } break;
  15022. }
  15023. }
  15024. enum ggml_opt_result ggml_opt(
  15025. struct ggml_context * ctx,
  15026. struct ggml_opt_params params,
  15027. struct ggml_tensor * f) {
  15028. bool free_ctx = false;
  15029. if (ctx == NULL) {
  15030. struct ggml_init_params params_ctx = {
  15031. .mem_size = 16*1024*1024,
  15032. .mem_buffer = NULL,
  15033. .no_alloc = false,
  15034. };
  15035. ctx = ggml_init(params_ctx);
  15036. if (ctx == NULL) {
  15037. return GGML_OPT_NO_CONTEXT;
  15038. }
  15039. free_ctx = true;
  15040. }
  15041. enum ggml_opt_result result = GGML_OPT_OK;
  15042. struct ggml_opt_context * opt = (struct ggml_opt_context *) alloca(sizeof(struct ggml_opt_context));
  15043. ggml_opt_init(ctx, opt, params, 0);
  15044. result = ggml_opt_resume(ctx, opt, f);
  15045. if (free_ctx) {
  15046. ggml_free(ctx);
  15047. }
  15048. return result;
  15049. }
  15050. enum ggml_opt_result ggml_opt_resume(
  15051. struct ggml_context * ctx,
  15052. struct ggml_opt_context * opt,
  15053. struct ggml_tensor * f) {
  15054. // build forward + backward compute graphs
  15055. struct ggml_cgraph * gf = ggml_new_graph_custom(ctx, opt->params.graph_size, true);
  15056. ggml_build_forward_expand(gf, f);
  15057. struct ggml_cgraph * gb = ggml_graph_dup(ctx, gf);
  15058. ggml_build_backward_expand(ctx, gf, gb, true);
  15059. return ggml_opt_resume_g(ctx, opt, f, gf, gb, NULL, NULL);
  15060. }
  15061. enum ggml_opt_result ggml_opt_resume_g(
  15062. struct ggml_context * ctx,
  15063. struct ggml_opt_context * opt,
  15064. struct ggml_tensor * f,
  15065. struct ggml_cgraph * gf,
  15066. struct ggml_cgraph * gb,
  15067. ggml_opt_callback callback,
  15068. void * callback_data) {
  15069. // build forward + backward compute graphs
  15070. enum ggml_opt_result result = GGML_OPT_OK;
  15071. switch (opt->params.type) {
  15072. case GGML_OPT_ADAM:
  15073. {
  15074. result = ggml_opt_adam(ctx, opt, opt->params, f, gf, gb, callback, callback_data);
  15075. } break;
  15076. case GGML_OPT_LBFGS:
  15077. {
  15078. result = ggml_opt_lbfgs(ctx, opt, opt->params, f, gf, gb, callback, callback_data);
  15079. } break;
  15080. }
  15081. if (opt->params.print_forward_graph) {
  15082. ggml_graph_print (gf);
  15083. ggml_graph_dump_dot(gf, NULL, "opt-forward.dot");
  15084. }
  15085. if (opt->params.print_backward_graph) {
  15086. ggml_graph_print (gb);
  15087. ggml_graph_dump_dot(gb, gf, "opt-backward.dot");
  15088. }
  15089. return result;
  15090. }
  15091. ////////////////////////////////////////////////////////////////////////////////
  15092. size_t ggml_quantize_q4_0(const float * src, void * dst, int n, int k, int64_t * hist) {
  15093. assert(k % QK4_0 == 0);
  15094. const int nb = k / QK4_0;
  15095. for (int b = 0; b < n; b += k) {
  15096. block_q4_0 * restrict y = (block_q4_0 *) dst + b/QK4_0;
  15097. quantize_row_q4_0_reference(src + b, y, k);
  15098. for (int i = 0; i < nb; i++) {
  15099. for (int j = 0; j < QK4_0; j += 2) {
  15100. const uint8_t vi0 = y[i].qs[j/2] & 0x0F;
  15101. const uint8_t vi1 = y[i].qs[j/2] >> 4;
  15102. hist[vi0]++;
  15103. hist[vi1]++;
  15104. }
  15105. }
  15106. }
  15107. return (n/QK4_0*sizeof(block_q4_0));
  15108. }
  15109. size_t ggml_quantize_q4_1(const float * src, void * dst, int n, int k, int64_t * hist) {
  15110. assert(k % QK4_1 == 0);
  15111. const int nb = k / QK4_1;
  15112. for (int b = 0; b < n; b += k) {
  15113. block_q4_1 * restrict y = (block_q4_1 *) dst + b/QK4_1;
  15114. quantize_row_q4_1_reference(src + b, y, k);
  15115. for (int i = 0; i < nb; i++) {
  15116. for (int j = 0; j < QK4_1; j += 2) {
  15117. const uint8_t vi0 = y[i].qs[j/2] & 0x0F;
  15118. const uint8_t vi1 = y[i].qs[j/2] >> 4;
  15119. hist[vi0]++;
  15120. hist[vi1]++;
  15121. }
  15122. }
  15123. }
  15124. return (n/QK4_1*sizeof(block_q4_1));
  15125. }
  15126. size_t ggml_quantize_q5_0(const float * src, void * dst, int n, int k, int64_t * hist) {
  15127. assert(k % QK5_0 == 0);
  15128. const int nb = k / QK5_0;
  15129. for (int b = 0; b < n; b += k) {
  15130. block_q5_0 * restrict y = (block_q5_0 *)dst + b/QK5_0;
  15131. quantize_row_q5_0_reference(src + b, y, k);
  15132. for (int i = 0; i < nb; i++) {
  15133. uint32_t qh;
  15134. memcpy(&qh, &y[i].qh, sizeof(qh));
  15135. for (int j = 0; j < QK5_0; j += 2) {
  15136. const uint8_t vh0 = ((qh & (1u << (j/2 + 0 ))) >> (j/2 + 0 )) << 4;
  15137. const uint8_t vh1 = ((qh & (1u << (j/2 + 16))) >> (j/2 + 12));
  15138. // cast to 16 bins
  15139. const uint8_t vi0 = ((y[i].qs[j/2] & 0x0F) | vh0) / 2;
  15140. const uint8_t vi1 = ((y[i].qs[j/2] >> 4) | vh1) / 2;
  15141. hist[vi0]++;
  15142. hist[vi1]++;
  15143. }
  15144. }
  15145. }
  15146. return (n/QK5_0*sizeof(block_q5_0));
  15147. }
  15148. size_t ggml_quantize_q5_1(const float * src, void * dst, int n, int k, int64_t * hist) {
  15149. assert(k % QK5_1 == 0);
  15150. const int nb = k / QK5_1;
  15151. for (int b = 0; b < n; b += k) {
  15152. block_q5_1 * restrict y = (block_q5_1 *)dst + b/QK5_1;
  15153. quantize_row_q5_1_reference(src + b, y, k);
  15154. for (int i = 0; i < nb; i++) {
  15155. uint32_t qh;
  15156. memcpy(&qh, &y[i].qh, sizeof(qh));
  15157. for (int j = 0; j < QK5_1; j += 2) {
  15158. const uint8_t vh0 = ((qh & (1u << (j/2 + 0 ))) >> (j/2 + 0 )) << 4;
  15159. const uint8_t vh1 = ((qh & (1u << (j/2 + 16))) >> (j/2 + 12));
  15160. // cast to 16 bins
  15161. const uint8_t vi0 = ((y[i].qs[j/2] & 0x0F) | vh0) / 2;
  15162. const uint8_t vi1 = ((y[i].qs[j/2] >> 4) | vh1) / 2;
  15163. hist[vi0]++;
  15164. hist[vi1]++;
  15165. }
  15166. }
  15167. }
  15168. return (n/QK5_1*sizeof(block_q5_1));
  15169. }
  15170. size_t ggml_quantize_q8_0(const float * src, void * dst, int n, int k, int64_t * hist) {
  15171. assert(k % QK8_0 == 0);
  15172. const int nb = k / QK8_0;
  15173. for (int b = 0; b < n; b += k) {
  15174. block_q8_0 * restrict y = (block_q8_0 *)dst + b/QK8_0;
  15175. quantize_row_q8_0_reference(src + b, y, k);
  15176. for (int i = 0; i < nb; i++) {
  15177. for (int j = 0; j < QK8_0; ++j) {
  15178. const int8_t vi = y[i].qs[j];
  15179. hist[vi/16 + 8]++;
  15180. }
  15181. }
  15182. }
  15183. return (n/QK8_0*sizeof(block_q8_0));
  15184. }
  15185. size_t ggml_quantize_chunk(enum ggml_type type, const float * src, void * dst, int start, int n, int64_t * hist) {
  15186. size_t result = 0;
  15187. switch (type) {
  15188. case GGML_TYPE_Q4_0:
  15189. {
  15190. GGML_ASSERT(start % QK4_0 == 0);
  15191. block_q4_0 * block = (block_q4_0*)dst + start / QK4_0;
  15192. result = ggml_quantize_q4_0(src + start, block, n, n, hist);
  15193. } break;
  15194. case GGML_TYPE_Q4_1:
  15195. {
  15196. GGML_ASSERT(start % QK4_1 == 0);
  15197. block_q4_1 * block = (block_q4_1*)dst + start / QK4_1;
  15198. result = ggml_quantize_q4_1(src + start, block, n, n, hist);
  15199. } break;
  15200. case GGML_TYPE_Q5_0:
  15201. {
  15202. GGML_ASSERT(start % QK5_0 == 0);
  15203. block_q5_0 * block = (block_q5_0*)dst + start / QK5_0;
  15204. result = ggml_quantize_q5_0(src + start, block, n, n, hist);
  15205. } break;
  15206. case GGML_TYPE_Q5_1:
  15207. {
  15208. GGML_ASSERT(start % QK5_1 == 0);
  15209. block_q5_1 * block = (block_q5_1*)dst + start / QK5_1;
  15210. result = ggml_quantize_q5_1(src + start, block, n, n, hist);
  15211. } break;
  15212. case GGML_TYPE_Q8_0:
  15213. {
  15214. GGML_ASSERT(start % QK8_0 == 0);
  15215. block_q8_0 * block = (block_q8_0*)dst + start / QK8_0;
  15216. result = ggml_quantize_q8_0(src + start, block, n, n, hist);
  15217. } break;
  15218. case GGML_TYPE_Q2_K:
  15219. {
  15220. GGML_ASSERT(start % QK_K == 0);
  15221. block_q2_K * block = (block_q2_K*)dst + start / QK_K;
  15222. result = ggml_quantize_q2_K(src + start, block, n, n, hist);
  15223. } break;
  15224. case GGML_TYPE_Q3_K:
  15225. {
  15226. GGML_ASSERT(start % QK_K == 0);
  15227. block_q3_K * block = (block_q3_K*)dst + start / QK_K;
  15228. result = ggml_quantize_q3_K(src + start, block, n, n, hist);
  15229. } break;
  15230. case GGML_TYPE_Q4_K:
  15231. {
  15232. GGML_ASSERT(start % QK_K == 0);
  15233. block_q4_K * block = (block_q4_K*)dst + start / QK_K;
  15234. result = ggml_quantize_q4_K(src + start, block, n, n, hist);
  15235. } break;
  15236. case GGML_TYPE_Q5_K:
  15237. {
  15238. GGML_ASSERT(start % QK_K == 0);
  15239. block_q5_K * block = (block_q5_K*)dst + start / QK_K;
  15240. result = ggml_quantize_q5_K(src + start, block, n, n, hist);
  15241. } break;
  15242. case GGML_TYPE_Q6_K:
  15243. {
  15244. GGML_ASSERT(start % QK_K == 0);
  15245. block_q6_K * block = (block_q6_K*)dst + start / QK_K;
  15246. result = ggml_quantize_q6_K(src + start, block, n, n, hist);
  15247. } break;
  15248. case GGML_TYPE_F16:
  15249. {
  15250. int elemsize = sizeof(ggml_fp16_t);
  15251. ggml_fp32_to_fp16_row(src + start, (ggml_fp16_t *)dst + start, n);
  15252. result = n * elemsize;
  15253. } break;
  15254. case GGML_TYPE_F32:
  15255. {
  15256. int elemsize = sizeof(float);
  15257. result = n * elemsize;
  15258. memcpy((uint8_t *)dst + start * elemsize, src + start, result);
  15259. } break;
  15260. default:
  15261. assert(false);
  15262. }
  15263. return result;
  15264. }
  15265. ////////////////////////////////////////////////////////////////////////////////
  15266. struct gguf_str {
  15267. uint64_t n; // GGUFv2
  15268. char * data;
  15269. };
  15270. static const size_t GGUF_TYPE_SIZE[GGUF_TYPE_COUNT] = {
  15271. [GGUF_TYPE_UINT8] = sizeof(uint8_t),
  15272. [GGUF_TYPE_INT8] = sizeof(int8_t),
  15273. [GGUF_TYPE_UINT16] = sizeof(uint16_t),
  15274. [GGUF_TYPE_INT16] = sizeof(int16_t),
  15275. [GGUF_TYPE_UINT32] = sizeof(uint32_t),
  15276. [GGUF_TYPE_INT32] = sizeof(int32_t),
  15277. [GGUF_TYPE_FLOAT32] = sizeof(float),
  15278. [GGUF_TYPE_BOOL] = sizeof(bool),
  15279. [GGUF_TYPE_STRING] = sizeof(struct gguf_str),
  15280. [GGUF_TYPE_UINT64] = sizeof(uint64_t),
  15281. [GGUF_TYPE_INT64] = sizeof(int64_t),
  15282. [GGUF_TYPE_FLOAT64] = sizeof(double),
  15283. [GGUF_TYPE_ARRAY] = 0, // undefined
  15284. };
  15285. static_assert(GGUF_TYPE_COUNT == 13, "GGUF_TYPE_COUNT != 13");
  15286. static const char * GGUF_TYPE_NAME[GGUF_TYPE_COUNT] = {
  15287. [GGUF_TYPE_UINT8] = "u8",
  15288. [GGUF_TYPE_INT8] = "i8",
  15289. [GGUF_TYPE_UINT16] = "u16",
  15290. [GGUF_TYPE_INT16] = "i16",
  15291. [GGUF_TYPE_UINT32] = "u32",
  15292. [GGUF_TYPE_INT32] = "i32",
  15293. [GGUF_TYPE_FLOAT32] = "f32",
  15294. [GGUF_TYPE_BOOL] = "bool",
  15295. [GGUF_TYPE_STRING] = "str",
  15296. [GGUF_TYPE_ARRAY] = "arr",
  15297. [GGUF_TYPE_UINT64] = "u64",
  15298. [GGUF_TYPE_INT64] = "i64",
  15299. [GGUF_TYPE_FLOAT64] = "f64",
  15300. };
  15301. static_assert(GGUF_TYPE_COUNT == 13, "GGUF_TYPE_COUNT != 13");
  15302. union gguf_value {
  15303. uint8_t uint8;
  15304. int8_t int8;
  15305. uint16_t uint16;
  15306. int16_t int16;
  15307. uint32_t uint32;
  15308. int32_t int32;
  15309. float float32;
  15310. uint64_t uint64;
  15311. int64_t int64;
  15312. double float64;
  15313. bool bool_;
  15314. struct gguf_str str;
  15315. struct {
  15316. enum gguf_type type;
  15317. uint64_t n; // GGUFv2
  15318. void * data;
  15319. } arr;
  15320. };
  15321. struct gguf_kv {
  15322. struct gguf_str key;
  15323. enum gguf_type type;
  15324. union gguf_value value;
  15325. };
  15326. struct gguf_header {
  15327. char magic[4];
  15328. uint32_t version;
  15329. uint64_t n_tensors; // GGUFv2
  15330. uint64_t n_kv; // GGUFv2
  15331. };
  15332. struct gguf_tensor_info {
  15333. struct gguf_str name;
  15334. uint32_t n_dims;
  15335. uint64_t ne[GGML_MAX_DIMS];
  15336. enum ggml_type type;
  15337. uint64_t offset; // offset from start of `data`, must be a multiple of `ALIGNMENT`
  15338. // for writing API
  15339. const void * data;
  15340. size_t size;
  15341. };
  15342. struct gguf_context {
  15343. struct gguf_header header;
  15344. struct gguf_kv * kv;
  15345. struct gguf_tensor_info * infos;
  15346. size_t alignment;
  15347. size_t offset; // offset of `data` from beginning of file
  15348. size_t size; // size of `data` in bytes
  15349. //uint8_t * padding;
  15350. void * data;
  15351. };
  15352. static bool gguf_fread_el(FILE * file, void * dst, size_t size, size_t * offset) {
  15353. const size_t n = fread(dst, 1, size, file);
  15354. *offset += n;
  15355. return n == size;
  15356. }
  15357. static bool gguf_fread_str(FILE * file, struct gguf_str * p, size_t * offset) {
  15358. p->n = 0;
  15359. p->data = NULL;
  15360. bool ok = true;
  15361. ok = ok && gguf_fread_el(file, &p->n, sizeof(p->n), offset); p->data = calloc(p->n + 1, 1);
  15362. ok = ok && gguf_fread_el(file, p->data, p->n, offset);
  15363. return ok;
  15364. }
  15365. struct gguf_context * gguf_init_empty(void) {
  15366. struct gguf_context * ctx = GGML_ALIGNED_MALLOC(sizeof(struct gguf_context));
  15367. memcpy(ctx->header.magic, GGUF_MAGIC, sizeof(ctx->header.magic));
  15368. ctx->header.version = GGUF_VERSION;
  15369. ctx->header.n_tensors = 0;
  15370. ctx->header.n_kv = 0;
  15371. ctx->kv = NULL;
  15372. ctx->infos = NULL;
  15373. ctx->alignment = GGUF_DEFAULT_ALIGNMENT;
  15374. ctx->offset = 0;
  15375. ctx->size = 0;
  15376. ctx->data = NULL;
  15377. return ctx;
  15378. }
  15379. struct gguf_context * gguf_init_from_file(const char * fname, struct gguf_init_params params) {
  15380. FILE * file = fopen(fname, "rb");
  15381. if (!file) {
  15382. return NULL;
  15383. }
  15384. // offset from start of file
  15385. size_t offset = 0;
  15386. char magic[4];
  15387. // check the magic before making allocations
  15388. {
  15389. gguf_fread_el(file, &magic, sizeof(magic), &offset);
  15390. for (uint32_t i = 0; i < sizeof(magic); i++) {
  15391. if (magic[i] != GGUF_MAGIC[i]) {
  15392. fprintf(stderr, "%s: invalid magic characters '%c%c%c%c'\n", __func__, magic[0], magic[1], magic[2], magic[3]);
  15393. fclose(file);
  15394. return NULL;
  15395. }
  15396. }
  15397. }
  15398. bool ok = true;
  15399. struct gguf_context * ctx = GGML_ALIGNED_MALLOC(sizeof(struct gguf_context));
  15400. // read the header
  15401. {
  15402. strncpy(ctx->header.magic, magic, 4);
  15403. ctx->kv = NULL;
  15404. ctx->infos = NULL;
  15405. ctx->data = NULL;
  15406. ok = ok && gguf_fread_el(file, &ctx->header.version, sizeof(ctx->header.version), &offset);
  15407. ok = ok && gguf_fread_el(file, &ctx->header.n_tensors, sizeof(ctx->header.n_tensors), &offset);
  15408. ok = ok && gguf_fread_el(file, &ctx->header.n_kv, sizeof(ctx->header.n_kv), &offset);
  15409. if (ctx->header.version == 1) {
  15410. fprintf(stderr, "%s: GGUFv1 is no longer supported. please use a more up-to-date version\n", __func__);
  15411. fclose(file);
  15412. gguf_free(ctx);
  15413. return NULL;
  15414. }
  15415. if (!ok) {
  15416. fprintf(stderr, "%s: failed to read header\n", __func__);
  15417. fclose(file);
  15418. gguf_free(ctx);
  15419. return NULL;
  15420. }
  15421. }
  15422. // read the kv pairs
  15423. {
  15424. ctx->kv = malloc(ctx->header.n_kv * sizeof(struct gguf_kv));
  15425. for (uint64_t i = 0; i < ctx->header.n_kv; ++i) {
  15426. struct gguf_kv * kv = &ctx->kv[i];
  15427. //fprintf(stderr, "%s: reading kv %d\n", __func__, i);
  15428. ok = ok && gguf_fread_str(file, &kv->key, &offset);
  15429. ok = ok && gguf_fread_el (file, &kv->type, sizeof(kv->type), &offset);
  15430. //fprintf(stderr, "%s: reading kv with key %s\n", __func__, kv->key.data);
  15431. switch (kv->type) {
  15432. case GGUF_TYPE_UINT8: ok = ok && gguf_fread_el (file, &kv->value.uint8, sizeof(kv->value.uint8), &offset); break;
  15433. case GGUF_TYPE_INT8: ok = ok && gguf_fread_el (file, &kv->value.int8, sizeof(kv->value.int8), &offset); break;
  15434. case GGUF_TYPE_UINT16: ok = ok && gguf_fread_el (file, &kv->value.uint16, sizeof(kv->value.uint16), &offset); break;
  15435. case GGUF_TYPE_INT16: ok = ok && gguf_fread_el (file, &kv->value.int16, sizeof(kv->value.int16), &offset); break;
  15436. case GGUF_TYPE_UINT32: ok = ok && gguf_fread_el (file, &kv->value.uint32, sizeof(kv->value.uint32), &offset); break;
  15437. case GGUF_TYPE_INT32: ok = ok && gguf_fread_el (file, &kv->value.int32, sizeof(kv->value.int32), &offset); break;
  15438. case GGUF_TYPE_FLOAT32: ok = ok && gguf_fread_el (file, &kv->value.float32, sizeof(kv->value.float32), &offset); break;
  15439. case GGUF_TYPE_UINT64: ok = ok && gguf_fread_el (file, &kv->value.uint64, sizeof(kv->value.uint64), &offset); break;
  15440. case GGUF_TYPE_INT64: ok = ok && gguf_fread_el (file, &kv->value.int64, sizeof(kv->value.int64), &offset); break;
  15441. case GGUF_TYPE_FLOAT64: ok = ok && gguf_fread_el (file, &kv->value.float64, sizeof(kv->value.float64), &offset); break;
  15442. case GGUF_TYPE_BOOL: ok = ok && gguf_fread_el (file, &kv->value.bool_, sizeof(kv->value.bool_), &offset); break;
  15443. case GGUF_TYPE_STRING: ok = ok && gguf_fread_str(file, &kv->value.str, &offset); break;
  15444. case GGUF_TYPE_ARRAY:
  15445. {
  15446. ok = ok && gguf_fread_el(file, &kv->value.arr.type, sizeof(kv->value.arr.type), &offset);
  15447. ok = ok && gguf_fread_el(file, &kv->value.arr.n, sizeof(kv->value.arr.n), &offset);
  15448. switch (kv->value.arr.type) {
  15449. case GGUF_TYPE_UINT8:
  15450. case GGUF_TYPE_INT8:
  15451. case GGUF_TYPE_UINT16:
  15452. case GGUF_TYPE_INT16:
  15453. case GGUF_TYPE_UINT32:
  15454. case GGUF_TYPE_INT32:
  15455. case GGUF_TYPE_FLOAT32:
  15456. case GGUF_TYPE_UINT64:
  15457. case GGUF_TYPE_INT64:
  15458. case GGUF_TYPE_FLOAT64:
  15459. case GGUF_TYPE_BOOL:
  15460. {
  15461. kv->value.arr.data = malloc(kv->value.arr.n * GGUF_TYPE_SIZE[kv->value.arr.type]);
  15462. ok = ok && gguf_fread_el(file, kv->value.arr.data, kv->value.arr.n * GGUF_TYPE_SIZE[kv->value.arr.type], &offset);
  15463. } break;
  15464. case GGUF_TYPE_STRING:
  15465. {
  15466. kv->value.arr.data = malloc(kv->value.arr.n * sizeof(struct gguf_str));
  15467. for (uint64_t j = 0; j < kv->value.arr.n; ++j) {
  15468. ok = ok && gguf_fread_str(file, &((struct gguf_str *) kv->value.arr.data)[j], &offset);
  15469. }
  15470. } break;
  15471. case GGUF_TYPE_ARRAY:
  15472. case GGUF_TYPE_COUNT: GGML_ASSERT(false && "invalid type"); break;
  15473. }
  15474. } break;
  15475. case GGUF_TYPE_COUNT: GGML_ASSERT(false && "invalid type");
  15476. }
  15477. if (!ok) {
  15478. break;
  15479. }
  15480. }
  15481. if (!ok) {
  15482. fprintf(stderr, "%s: failed to read key-value pairs\n", __func__);
  15483. fclose(file);
  15484. gguf_free(ctx);
  15485. return NULL;
  15486. }
  15487. }
  15488. // read the tensor infos
  15489. {
  15490. ctx->infos = malloc(ctx->header.n_tensors * sizeof(struct gguf_tensor_info));
  15491. for (uint64_t i = 0; i < ctx->header.n_tensors; ++i) {
  15492. struct gguf_tensor_info * info = &ctx->infos[i];
  15493. for (int j = 0; j < GGML_MAX_DIMS; ++j) {
  15494. info->ne[j] = 1;
  15495. }
  15496. ok = ok && gguf_fread_str(file, &info->name, &offset);
  15497. ok = ok && gguf_fread_el (file, &info->n_dims, sizeof(info->n_dims), &offset);
  15498. for (uint32_t j = 0; j < info->n_dims; ++j) {
  15499. ok = ok && gguf_fread_el(file, &info->ne[j], sizeof(info->ne[j]), &offset);
  15500. }
  15501. ok = ok && gguf_fread_el (file, &info->type, sizeof(info->type), &offset);
  15502. ok = ok && gguf_fread_el (file, &info->offset, sizeof(info->offset), &offset);
  15503. if (!ok) {
  15504. fprintf(stderr, "%s: failed to read tensor info\n", __func__);
  15505. fclose(file);
  15506. gguf_free(ctx);
  15507. return NULL;
  15508. }
  15509. }
  15510. }
  15511. ctx->alignment = GGUF_DEFAULT_ALIGNMENT;
  15512. int alignment_idx = gguf_find_key(ctx, "general.alignment");
  15513. if (alignment_idx != -1) {
  15514. ctx->alignment = gguf_get_val_u32(ctx, alignment_idx);
  15515. }
  15516. // we require the data section to be aligned, so take into account any padding
  15517. {
  15518. const size_t offset_pad = offset % ctx->alignment;
  15519. if (offset_pad != 0) {
  15520. offset += ctx->alignment - offset_pad;
  15521. fseek(file, offset, SEEK_SET);
  15522. }
  15523. }
  15524. // store the current file offset - this is where the data section starts
  15525. ctx->offset = offset;
  15526. // compute the total size of the data section, taking into account the alignment
  15527. {
  15528. ctx->size = 0;
  15529. for (uint64_t i = 0; i < ctx->header.n_tensors; ++i) {
  15530. struct gguf_tensor_info * info = &ctx->infos[i];
  15531. const int64_t ne =
  15532. (int64_t) info->ne[0] *
  15533. (int64_t) info->ne[1] *
  15534. (int64_t) info->ne[2] *
  15535. (int64_t) info->ne[3];
  15536. if (ne % ggml_blck_size(info->type) != 0) {
  15537. fprintf(stderr, "%s: tensor '%s' number of elements (%" PRId64 ") is not a multiple of block size (%d)\n",
  15538. __func__, info->name.data, ne, ggml_blck_size(info->type));
  15539. fclose(file);
  15540. gguf_free(ctx);
  15541. return NULL;
  15542. }
  15543. const size_t size_cur = (ne*ggml_type_size(info->type))/ggml_blck_size(info->type);
  15544. ctx->size += GGML_PAD(size_cur, ctx->alignment);
  15545. }
  15546. }
  15547. // load the tensor data only if requested
  15548. if (params.ctx != NULL) {
  15549. // if the provided gguf_context is no_alloc, then we create "empty" tensors and do not read the binary blob
  15550. // otherwise, we load the binary blob into the created ggml_context as well, and point the "data" members of
  15551. // the ggml_tensor structs to the appropriate locations in the binary blob
  15552. // compute the exact size needed for the new ggml_context
  15553. const size_t mem_size =
  15554. params.no_alloc ?
  15555. (ctx->header.n_tensors )*ggml_tensor_overhead() :
  15556. (ctx->header.n_tensors + 1)*ggml_tensor_overhead() + ctx->size;
  15557. struct ggml_init_params pdata = {
  15558. .mem_size = mem_size,
  15559. .mem_buffer = NULL,
  15560. .no_alloc = params.no_alloc,
  15561. };
  15562. *params.ctx = ggml_init(pdata);
  15563. struct ggml_context * ctx_data = *params.ctx;
  15564. struct ggml_tensor * data = NULL;
  15565. if (!params.no_alloc) {
  15566. data = ggml_new_tensor_1d(ctx_data, GGML_TYPE_I8, ctx->size);
  15567. ok = ok && data != NULL;
  15568. // read the binary blob with the tensor data
  15569. ok = ok && gguf_fread_el(file, data->data, ctx->size, &offset);
  15570. if (!ok) {
  15571. fprintf(stderr, "%s: failed to read tensor data\n", __func__);
  15572. fclose(file);
  15573. ggml_free(ctx_data);
  15574. gguf_free(ctx);
  15575. return NULL;
  15576. }
  15577. ctx->data = data->data;
  15578. }
  15579. ggml_set_no_alloc(ctx_data, true);
  15580. // create the tensors
  15581. for (uint64_t i = 0; i < ctx->header.n_tensors; ++i) {
  15582. const int64_t ne[GGML_MAX_DIMS] = {
  15583. ctx->infos[i].ne[0],
  15584. ctx->infos[i].ne[1],
  15585. ctx->infos[i].ne[2],
  15586. ctx->infos[i].ne[3],
  15587. };
  15588. struct ggml_tensor * cur = ggml_new_tensor(ctx_data, ctx->infos[i].type, ctx->infos[i].n_dims, ne);
  15589. ok = ok && cur != NULL;
  15590. ggml_set_name(cur, ctx->infos[i].name.data);
  15591. if (!ok) {
  15592. break;
  15593. }
  15594. // point the data member to the appropriate location in the binary blob using the tensor infos
  15595. if (!params.no_alloc) {
  15596. //cur->data = (char *) data->data + ctx->infos[i].offset - ctx->offset; // offset from start of file
  15597. cur->data = (char *) data->data + ctx->infos[i].offset; // offset from data
  15598. }
  15599. }
  15600. if (!ok) {
  15601. fprintf(stderr, "%s: failed to read the tensor data\n", __func__);
  15602. fclose(file);
  15603. ggml_free(ctx_data);
  15604. gguf_free(ctx);
  15605. return NULL;
  15606. }
  15607. ggml_set_no_alloc(ctx_data, params.no_alloc);
  15608. }
  15609. fclose(file);
  15610. return ctx;
  15611. }
  15612. void gguf_free(struct gguf_context * ctx) {
  15613. if (ctx == NULL) {
  15614. return;
  15615. }
  15616. if (ctx->kv) {
  15617. // free string memory - not great..
  15618. for (uint32_t i = 0; i < ctx->header.n_kv; ++i) {
  15619. struct gguf_kv * kv = &ctx->kv[i];
  15620. if (kv->key.data) {
  15621. free(kv->key.data);
  15622. }
  15623. if (kv->type == GGUF_TYPE_STRING) {
  15624. if (kv->value.str.data) {
  15625. free(kv->value.str.data);
  15626. }
  15627. }
  15628. if (kv->type == GGUF_TYPE_ARRAY) {
  15629. if (kv->value.arr.data) {
  15630. if (kv->value.arr.type == GGUF_TYPE_STRING) {
  15631. for (uint32_t j = 0; j < kv->value.arr.n; ++j) {
  15632. struct gguf_str * str = &((struct gguf_str *) kv->value.arr.data)[j];
  15633. if (str->data) {
  15634. free(str->data);
  15635. }
  15636. }
  15637. }
  15638. free(kv->value.arr.data);
  15639. }
  15640. }
  15641. }
  15642. free(ctx->kv);
  15643. }
  15644. if (ctx->infos) {
  15645. for (uint32_t i = 0; i < ctx->header.n_tensors; ++i) {
  15646. struct gguf_tensor_info * info = &ctx->infos[i];
  15647. if (info->name.data) {
  15648. free(info->name.data);
  15649. }
  15650. }
  15651. free(ctx->infos);
  15652. }
  15653. GGML_ALIGNED_FREE(ctx);
  15654. }
  15655. const char * gguf_type_name(enum gguf_type type) {
  15656. return GGUF_TYPE_NAME[type];
  15657. }
  15658. int gguf_get_version(const struct gguf_context * ctx) {
  15659. return ctx->header.version;
  15660. }
  15661. size_t gguf_get_alignment(const struct gguf_context * ctx) {
  15662. return ctx->alignment;
  15663. }
  15664. size_t gguf_get_data_offset(const struct gguf_context * ctx) {
  15665. return ctx->offset;
  15666. }
  15667. void * gguf_get_data(const struct gguf_context * ctx) {
  15668. return ctx->data;
  15669. }
  15670. int gguf_get_n_kv(const struct gguf_context * ctx) {
  15671. return ctx->header.n_kv;
  15672. }
  15673. int gguf_find_key(const struct gguf_context * ctx, const char * key) {
  15674. // return -1 if key not found
  15675. int keyfound = -1;
  15676. const int n_kv = gguf_get_n_kv(ctx);
  15677. for (int i = 0; i < n_kv; ++i) {
  15678. if (strcmp(key, gguf_get_key(ctx, i)) == 0) {
  15679. keyfound = i;
  15680. break;
  15681. }
  15682. }
  15683. return keyfound;
  15684. }
  15685. const char * gguf_get_key(const struct gguf_context * ctx, int key_id) {
  15686. GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx));
  15687. return ctx->kv[key_id].key.data;
  15688. }
  15689. enum gguf_type gguf_get_kv_type(const struct gguf_context * ctx, int key_id) {
  15690. GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx));
  15691. return ctx->kv[key_id].type;
  15692. }
  15693. enum gguf_type gguf_get_arr_type(const struct gguf_context * ctx, int key_id) {
  15694. GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx));
  15695. GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_ARRAY);
  15696. return ctx->kv[key_id].value.arr.type;
  15697. }
  15698. const void * gguf_get_arr_data(const struct gguf_context * ctx, int key_id) {
  15699. GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx));
  15700. GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_ARRAY);
  15701. return ctx->kv[key_id].value.arr.data;
  15702. }
  15703. const char * gguf_get_arr_str(const struct gguf_context * ctx, int key_id, int i) {
  15704. GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx));
  15705. GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_ARRAY);
  15706. struct gguf_kv * kv = &ctx->kv[key_id];
  15707. struct gguf_str * str = &((struct gguf_str *) kv->value.arr.data)[i];
  15708. return str->data;
  15709. }
  15710. int gguf_get_arr_n(const struct gguf_context * ctx, int key_id) {
  15711. GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx));
  15712. GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_ARRAY);
  15713. return ctx->kv[key_id].value.arr.n;
  15714. }
  15715. uint8_t gguf_get_val_u8(const struct gguf_context * ctx, int key_id) {
  15716. GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx));
  15717. GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_UINT8);
  15718. return ctx->kv[key_id].value.uint8;
  15719. }
  15720. int8_t gguf_get_val_i8(const struct gguf_context * ctx, int key_id) {
  15721. GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx));
  15722. GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_INT8);
  15723. return ctx->kv[key_id].value.int8;
  15724. }
  15725. uint16_t gguf_get_val_u16(const struct gguf_context * ctx, int key_id) {
  15726. GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx));
  15727. GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_UINT16);
  15728. return ctx->kv[key_id].value.uint16;
  15729. }
  15730. int16_t gguf_get_val_i16(const struct gguf_context * ctx, int key_id) {
  15731. GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx));
  15732. GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_INT16);
  15733. return ctx->kv[key_id].value.int16;
  15734. }
  15735. uint32_t gguf_get_val_u32(const struct gguf_context * ctx, int key_id) {
  15736. GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx));
  15737. GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_UINT32);
  15738. return ctx->kv[key_id].value.uint32;
  15739. }
  15740. int32_t gguf_get_val_i32(const struct gguf_context * ctx, int key_id) {
  15741. GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx));
  15742. GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_INT32);
  15743. return ctx->kv[key_id].value.int32;
  15744. }
  15745. float gguf_get_val_f32(const struct gguf_context * ctx, int key_id) {
  15746. GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx));
  15747. GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_FLOAT32);
  15748. return ctx->kv[key_id].value.float32;
  15749. }
  15750. uint64_t gguf_get_val_u64(const struct gguf_context * ctx, int key_id) {
  15751. GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx));
  15752. GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_UINT64);
  15753. return ctx->kv[key_id].value.uint64;
  15754. }
  15755. int64_t gguf_get_val_i64(const struct gguf_context * ctx, int key_id) {
  15756. GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx));
  15757. GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_INT64);
  15758. return ctx->kv[key_id].value.int64;
  15759. }
  15760. double gguf_get_val_f64(const struct gguf_context * ctx, int key_id) {
  15761. GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx));
  15762. GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_FLOAT64);
  15763. return ctx->kv[key_id].value.float64;
  15764. }
  15765. bool gguf_get_val_bool(const struct gguf_context * ctx, int key_id) {
  15766. GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx));
  15767. GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_BOOL);
  15768. return ctx->kv[key_id].value.bool_;
  15769. }
  15770. const char * gguf_get_val_str(const struct gguf_context * ctx, int key_id) {
  15771. GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx));
  15772. GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_STRING);
  15773. return ctx->kv[key_id].value.str.data;
  15774. }
  15775. const void * gguf_get_val_data(const struct gguf_context * ctx, int key_id) {
  15776. GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx));
  15777. GGML_ASSERT(ctx->kv[key_id].type != GGUF_TYPE_ARRAY);
  15778. GGML_ASSERT(ctx->kv[key_id].type != GGUF_TYPE_STRING);
  15779. return &ctx->kv[key_id].value;
  15780. }
  15781. int gguf_get_n_tensors(const struct gguf_context * ctx) {
  15782. return ctx->header.n_tensors;
  15783. }
  15784. int gguf_find_tensor(const struct gguf_context * ctx, const char * name) {
  15785. // return -1 if tensor not found
  15786. int tensorfound = -1;
  15787. const int n_tensors = gguf_get_n_tensors(ctx);
  15788. for (int i = 0; i < n_tensors; ++i) {
  15789. if (strcmp(name, gguf_get_tensor_name(ctx, i)) == 0) {
  15790. tensorfound = i;
  15791. break;
  15792. }
  15793. }
  15794. return tensorfound;
  15795. }
  15796. size_t gguf_get_tensor_offset(const struct gguf_context * ctx, int i) {
  15797. return ctx->infos[i].offset;
  15798. }
  15799. char * gguf_get_tensor_name(const struct gguf_context * ctx, int i) {
  15800. return ctx->infos[i].name.data;
  15801. }
  15802. // returns the index
  15803. static int gguf_get_or_add_key(struct gguf_context * ctx, const char * key) {
  15804. const int idx = gguf_find_key(ctx, key);
  15805. if (idx >= 0) {
  15806. return idx;
  15807. }
  15808. const int n_kv = gguf_get_n_kv(ctx);
  15809. ctx->kv = realloc(ctx->kv, (n_kv + 1) * sizeof(struct gguf_kv));
  15810. ctx->kv[n_kv].key.n = strlen(key);
  15811. ctx->kv[n_kv].key.data = strdup(key);
  15812. ctx->header.n_kv++;
  15813. return n_kv;
  15814. }
  15815. void gguf_set_val_u8(struct gguf_context * ctx, const char * key, uint8_t val) {
  15816. const int idx = gguf_get_or_add_key(ctx, key);
  15817. ctx->kv[idx].type = GGUF_TYPE_UINT8;
  15818. ctx->kv[idx].value.uint8 = val;
  15819. }
  15820. void gguf_set_val_i8(struct gguf_context * ctx, const char * key, int8_t val) {
  15821. const int idx = gguf_get_or_add_key(ctx, key);
  15822. ctx->kv[idx].type = GGUF_TYPE_INT8;
  15823. ctx->kv[idx].value.int8 = val;
  15824. }
  15825. void gguf_set_val_u16(struct gguf_context * ctx, const char * key, uint16_t val) {
  15826. const int idx = gguf_get_or_add_key(ctx, key);
  15827. ctx->kv[idx].type = GGUF_TYPE_UINT16;
  15828. ctx->kv[idx].value.uint16 = val;
  15829. }
  15830. void gguf_set_val_i16(struct gguf_context * ctx, const char * key, int16_t val) {
  15831. const int idx = gguf_get_or_add_key(ctx, key);
  15832. ctx->kv[idx].type = GGUF_TYPE_INT16;
  15833. ctx->kv[idx].value.int16 = val;
  15834. }
  15835. void gguf_set_val_u32(struct gguf_context * ctx, const char * key, uint32_t val) {
  15836. const int idx = gguf_get_or_add_key(ctx, key);
  15837. ctx->kv[idx].type = GGUF_TYPE_UINT32;
  15838. ctx->kv[idx].value.uint32 = val;
  15839. }
  15840. void gguf_set_val_i32(struct gguf_context * ctx, const char * key, int32_t val) {
  15841. const int idx = gguf_get_or_add_key(ctx, key);
  15842. ctx->kv[idx].type = GGUF_TYPE_INT32;
  15843. ctx->kv[idx].value.int32 = val;
  15844. }
  15845. void gguf_set_val_f32(struct gguf_context * ctx, const char * key, float val) {
  15846. const int idx = gguf_get_or_add_key(ctx, key);
  15847. ctx->kv[idx].type = GGUF_TYPE_FLOAT32;
  15848. ctx->kv[idx].value.float32 = val;
  15849. }
  15850. void gguf_set_val_u64(struct gguf_context * ctx, const char * key, uint64_t val) {
  15851. const int idx = gguf_get_or_add_key(ctx, key);
  15852. ctx->kv[idx].type = GGUF_TYPE_UINT64;
  15853. ctx->kv[idx].value.uint64 = val;
  15854. }
  15855. void gguf_set_val_i64(struct gguf_context * ctx, const char * key, int64_t val) {
  15856. const int idx = gguf_get_or_add_key(ctx, key);
  15857. ctx->kv[idx].type = GGUF_TYPE_INT64;
  15858. ctx->kv[idx].value.int64 = val;
  15859. }
  15860. void gguf_set_val_f64(struct gguf_context * ctx, const char * key, double val) {
  15861. const int idx = gguf_get_or_add_key(ctx, key);
  15862. ctx->kv[idx].type = GGUF_TYPE_FLOAT64;
  15863. ctx->kv[idx].value.float64 = val;
  15864. }
  15865. void gguf_set_val_bool(struct gguf_context * ctx, const char * key, bool val) {
  15866. const int idx = gguf_get_or_add_key(ctx, key);
  15867. ctx->kv[idx].type = GGUF_TYPE_BOOL;
  15868. ctx->kv[idx].value.bool_ = val;
  15869. }
  15870. void gguf_set_val_str(struct gguf_context * ctx, const char * key, const char * val) {
  15871. const int idx = gguf_get_or_add_key(ctx, key);
  15872. ctx->kv[idx].type = GGUF_TYPE_STRING;
  15873. ctx->kv[idx].value.str.n = strlen(val);
  15874. ctx->kv[idx].value.str.data = strdup(val);
  15875. }
  15876. void gguf_set_arr_data(struct gguf_context * ctx, const char * key, enum gguf_type type, const void * data, int n) {
  15877. const int idx = gguf_get_or_add_key(ctx, key);
  15878. ctx->kv[idx].type = GGUF_TYPE_ARRAY;
  15879. ctx->kv[idx].value.arr.type = type;
  15880. ctx->kv[idx].value.arr.n = n;
  15881. ctx->kv[idx].value.arr.data = malloc(n*GGUF_TYPE_SIZE[type]);
  15882. memcpy(ctx->kv[idx].value.arr.data, data, n*GGUF_TYPE_SIZE[type]);
  15883. }
  15884. void gguf_set_arr_str(struct gguf_context * ctx, const char * key, const char ** data, int n) {
  15885. const int idx = gguf_get_or_add_key(ctx, key);
  15886. ctx->kv[idx].type = GGUF_TYPE_ARRAY;
  15887. ctx->kv[idx].value.arr.type = GGUF_TYPE_STRING;
  15888. ctx->kv[idx].value.arr.n = n;
  15889. ctx->kv[idx].value.arr.data = malloc(n*sizeof(struct gguf_str));
  15890. for (int i = 0; i < n; i++) {
  15891. struct gguf_str * str = &((struct gguf_str *)ctx->kv[idx].value.arr.data)[i];
  15892. str->n = strlen(data[i]);
  15893. str->data = strdup(data[i]);
  15894. }
  15895. }
  15896. // set or add KV pairs from another context
  15897. void gguf_set_kv(struct gguf_context * ctx, struct gguf_context * src) {
  15898. for (uint32_t i = 0; i < src->header.n_kv; i++) {
  15899. switch (src->kv[i].type) {
  15900. case GGUF_TYPE_UINT8: gguf_set_val_u8 (ctx, src->kv[i].key.data, src->kv[i].value.uint8); break;
  15901. case GGUF_TYPE_INT8: gguf_set_val_i8 (ctx, src->kv[i].key.data, src->kv[i].value.int8); break;
  15902. case GGUF_TYPE_UINT16: gguf_set_val_u16 (ctx, src->kv[i].key.data, src->kv[i].value.uint16); break;
  15903. case GGUF_TYPE_INT16: gguf_set_val_i16 (ctx, src->kv[i].key.data, src->kv[i].value.int16); break;
  15904. case GGUF_TYPE_UINT32: gguf_set_val_u32 (ctx, src->kv[i].key.data, src->kv[i].value.uint32); break;
  15905. case GGUF_TYPE_INT32: gguf_set_val_i32 (ctx, src->kv[i].key.data, src->kv[i].value.int32); break;
  15906. case GGUF_TYPE_FLOAT32: gguf_set_val_f32 (ctx, src->kv[i].key.data, src->kv[i].value.float32); break;
  15907. case GGUF_TYPE_UINT64: gguf_set_val_u64 (ctx, src->kv[i].key.data, src->kv[i].value.uint64); break;
  15908. case GGUF_TYPE_INT64: gguf_set_val_i64 (ctx, src->kv[i].key.data, src->kv[i].value.int64); break;
  15909. case GGUF_TYPE_FLOAT64: gguf_set_val_f64 (ctx, src->kv[i].key.data, src->kv[i].value.float64); break;
  15910. case GGUF_TYPE_BOOL: gguf_set_val_bool(ctx, src->kv[i].key.data, src->kv[i].value.bool_); break;
  15911. case GGUF_TYPE_STRING: gguf_set_val_str (ctx, src->kv[i].key.data, src->kv[i].value.str.data); break;
  15912. case GGUF_TYPE_ARRAY:
  15913. {
  15914. if (src->kv[i].value.arr.type == GGUF_TYPE_STRING) {
  15915. const char ** data = malloc(src->kv[i].value.arr.n*sizeof(char *));
  15916. for (uint32_t j = 0; j < src->kv[i].value.arr.n; j++) {
  15917. data[j] = ((struct gguf_str *)src->kv[i].value.arr.data)[j].data;
  15918. }
  15919. gguf_set_arr_str(ctx, src->kv[i].key.data, data, src->kv[i].value.arr.n);
  15920. free(data);
  15921. } else if (src->kv[i].value.arr.type == GGUF_TYPE_ARRAY) {
  15922. GGML_ASSERT(false && "nested arrays not supported");
  15923. } else {
  15924. gguf_set_arr_data(ctx, src->kv[i].key.data, src->kv[i].value.arr.type, src->kv[i].value.arr.data, src->kv[i].value.arr.n);
  15925. }
  15926. } break;
  15927. case GGUF_TYPE_COUNT: GGML_ASSERT(false && "invalid type"); break;
  15928. }
  15929. }
  15930. }
  15931. void gguf_add_tensor(
  15932. struct gguf_context * ctx,
  15933. const struct ggml_tensor * tensor) {
  15934. const int idx = ctx->header.n_tensors;
  15935. ctx->infos = realloc(ctx->infos, (idx + 1)*sizeof(struct gguf_tensor_info));
  15936. ctx->infos[idx].name.n = strlen(tensor->name);
  15937. ctx->infos[idx].name.data = strdup(tensor->name);
  15938. for (int i = 0; i < GGML_MAX_DIMS; ++i) {
  15939. ctx->infos[idx].ne[i] = 1;
  15940. }
  15941. ctx->infos[idx].n_dims = tensor->n_dims;
  15942. for (int i = 0; i < tensor->n_dims; i++) {
  15943. ctx->infos[idx].ne[i] = tensor->ne[i];
  15944. }
  15945. ctx->infos[idx].type = tensor->type;
  15946. ctx->infos[idx].offset = 0;
  15947. ctx->infos[idx].data = tensor->data;
  15948. ctx->infos[idx].size = ggml_nbytes(tensor);
  15949. if (ctx->header.n_tensors > 0) {
  15950. ctx->infos[idx].offset = ctx->infos[idx - 1].offset + GGML_PAD(ctx->infos[idx - 1].size, ctx->alignment);
  15951. }
  15952. ctx->header.n_tensors++;
  15953. }
  15954. void gguf_set_tensor_type(struct gguf_context * ctx, const char * name, enum ggml_type type) {
  15955. const int idx = gguf_find_tensor(ctx, name);
  15956. if (idx < 0) {
  15957. GGML_ASSERT(false && "tensor not found");
  15958. }
  15959. ctx->infos[idx].type = type;
  15960. }
  15961. void gguf_set_tensor_data(struct gguf_context * ctx, const char * name, const void * data, size_t size) {
  15962. const int idx = gguf_find_tensor(ctx, name);
  15963. if (idx < 0) {
  15964. GGML_ASSERT(false && "tensor not found");
  15965. }
  15966. ctx->infos[idx].data = data;
  15967. ctx->infos[idx].size = size;
  15968. // update offsets
  15969. for (uint32_t i = idx + 1; i < ctx->header.n_tensors; ++i) {
  15970. ctx->infos[i].offset = ctx->infos[i - 1].offset + GGML_PAD(ctx->infos[i - 1].size, ctx->alignment);
  15971. }
  15972. }
  15973. //static void gguf_fwrite_str(FILE * file, const struct gguf_str * val) {
  15974. // fwrite(&val->n, sizeof(val->n), 1, file);
  15975. // fwrite(val->data, sizeof(char), val->n, file);
  15976. //}
  15977. //
  15978. //static void gguf_fwrite_el(FILE * file, const void * val, size_t size) {
  15979. // fwrite(val, sizeof(char), size, file);
  15980. //}
  15981. struct gguf_buf {
  15982. void * data;
  15983. size_t size;
  15984. size_t offset;
  15985. };
  15986. static struct gguf_buf gguf_buf_init(size_t size) {
  15987. struct gguf_buf buf = {
  15988. /*buf.data =*/ size == 0 ? NULL : malloc(size),
  15989. /*buf.size =*/ size,
  15990. /*buf.offset =*/ 0,
  15991. };
  15992. return buf;
  15993. }
  15994. static void gguf_buf_free(struct gguf_buf buf) {
  15995. if (buf.data) {
  15996. free(buf.data);
  15997. }
  15998. }
  15999. static void gguf_buf_grow(struct gguf_buf * buf, size_t size) {
  16000. if (buf->offset + size > buf->size) {
  16001. buf->size = 1.5*(buf->offset + size);
  16002. if (buf->data) {
  16003. buf->data = realloc(buf->data, buf->size);
  16004. }
  16005. }
  16006. }
  16007. static void gguf_bwrite_str(struct gguf_buf * buf, const struct gguf_str * val) {
  16008. gguf_buf_grow(buf, sizeof(val->n) + val->n);
  16009. if (buf->data) {
  16010. memcpy((char *) buf->data + buf->offset, &val->n, sizeof(val->n));
  16011. }
  16012. buf->offset += sizeof(val->n);
  16013. if (buf->data) {
  16014. memcpy((char *) buf->data + buf->offset, val->data, val->n);
  16015. }
  16016. buf->offset += val->n;
  16017. }
  16018. static void gguf_bwrite_el(struct gguf_buf * buf, const void * val, size_t el_size) {
  16019. gguf_buf_grow(buf, el_size);
  16020. if (buf->data) {
  16021. memcpy((char *) buf->data + buf->offset, val, el_size);
  16022. }
  16023. buf->offset += el_size;
  16024. }
  16025. static void gguf_write_to_buf(const struct gguf_context * ctx, struct gguf_buf * buf, bool only_meta) {
  16026. // write header
  16027. gguf_bwrite_el(buf, &ctx->header.magic, sizeof(ctx->header.magic));
  16028. gguf_bwrite_el(buf, &ctx->header.version, sizeof(ctx->header.version));
  16029. gguf_bwrite_el(buf, &ctx->header.n_tensors, sizeof(ctx->header.n_tensors));
  16030. gguf_bwrite_el(buf, &ctx->header.n_kv, sizeof(ctx->header.n_kv));
  16031. // write key-value pairs
  16032. for (uint32_t i = 0; i < ctx->header.n_kv; ++i) {
  16033. struct gguf_kv * kv = &ctx->kv[i];
  16034. gguf_bwrite_str(buf, &kv->key);
  16035. gguf_bwrite_el (buf, &kv->type, sizeof(kv->type));
  16036. switch (kv->type) {
  16037. case GGUF_TYPE_UINT8: gguf_bwrite_el( buf, &kv->value.uint8, sizeof(kv->value.uint8) ); break;
  16038. case GGUF_TYPE_INT8: gguf_bwrite_el (buf, &kv->value.int8, sizeof(kv->value.int8) ); break;
  16039. case GGUF_TYPE_UINT16: gguf_bwrite_el (buf, &kv->value.uint16, sizeof(kv->value.uint16) ); break;
  16040. case GGUF_TYPE_INT16: gguf_bwrite_el (buf, &kv->value.int16, sizeof(kv->value.int16) ); break;
  16041. case GGUF_TYPE_UINT32: gguf_bwrite_el (buf, &kv->value.uint32, sizeof(kv->value.uint32) ); break;
  16042. case GGUF_TYPE_INT32: gguf_bwrite_el (buf, &kv->value.int32, sizeof(kv->value.int32) ); break;
  16043. case GGUF_TYPE_FLOAT32: gguf_bwrite_el (buf, &kv->value.float32, sizeof(kv->value.float32)); break;
  16044. case GGUF_TYPE_UINT64: gguf_bwrite_el (buf, &kv->value.uint64, sizeof(kv->value.uint64) ); break;
  16045. case GGUF_TYPE_INT64: gguf_bwrite_el (buf, &kv->value.int64, sizeof(kv->value.int64) ); break;
  16046. case GGUF_TYPE_FLOAT64: gguf_bwrite_el (buf, &kv->value.float64, sizeof(kv->value.float64)); break;
  16047. case GGUF_TYPE_BOOL: gguf_bwrite_el (buf, &kv->value.bool_, sizeof(kv->value.bool_) ); break;
  16048. case GGUF_TYPE_STRING: gguf_bwrite_str(buf, &kv->value.str ); break;
  16049. case GGUF_TYPE_ARRAY:
  16050. {
  16051. gguf_bwrite_el(buf, &kv->value.arr.type, sizeof(kv->value.arr.type));
  16052. gguf_bwrite_el(buf, &kv->value.arr.n, sizeof(kv->value.arr.n) );
  16053. switch (kv->value.arr.type) {
  16054. case GGUF_TYPE_UINT8:
  16055. case GGUF_TYPE_INT8:
  16056. case GGUF_TYPE_UINT16:
  16057. case GGUF_TYPE_INT16:
  16058. case GGUF_TYPE_UINT32:
  16059. case GGUF_TYPE_INT32:
  16060. case GGUF_TYPE_FLOAT32:
  16061. case GGUF_TYPE_UINT64:
  16062. case GGUF_TYPE_INT64:
  16063. case GGUF_TYPE_FLOAT64:
  16064. case GGUF_TYPE_BOOL:
  16065. {
  16066. gguf_bwrite_el(buf, kv->value.arr.data, kv->value.arr.n * GGUF_TYPE_SIZE[kv->value.arr.type]);
  16067. } break;
  16068. case GGUF_TYPE_STRING:
  16069. {
  16070. for (uint32_t j = 0; j < kv->value.arr.n; ++j) {
  16071. gguf_bwrite_str(buf, &((struct gguf_str *) kv->value.arr.data)[j]);
  16072. }
  16073. } break;
  16074. case GGUF_TYPE_ARRAY:
  16075. case GGUF_TYPE_COUNT: GGML_ASSERT(false && "invalid type"); break;
  16076. }
  16077. } break;
  16078. case GGUF_TYPE_COUNT: GGML_ASSERT(false && "invalid type");
  16079. }
  16080. }
  16081. // write tensor infos
  16082. for (uint32_t i = 0; i < ctx->header.n_tensors; ++i) {
  16083. struct gguf_tensor_info * info = &ctx->infos[i];
  16084. gguf_bwrite_str(buf, &info->name);
  16085. gguf_bwrite_el (buf, &info->n_dims, sizeof(info->n_dims));
  16086. for (uint32_t j = 0; j < info->n_dims; ++j) {
  16087. gguf_bwrite_el(buf, &info->ne[j], sizeof(info->ne[j]));
  16088. }
  16089. gguf_bwrite_el(buf, &info->type, sizeof(info->type));
  16090. gguf_bwrite_el(buf, &info->offset, sizeof(info->offset));
  16091. }
  16092. // we require the data section to be aligned, so take into account any padding
  16093. {
  16094. const size_t offset = buf->offset;
  16095. const size_t offset_pad = GGML_PAD(offset, ctx->alignment);
  16096. if (offset_pad != offset) {
  16097. uint8_t pad = 0;
  16098. for (size_t i = 0; i < offset_pad - offset; ++i) {
  16099. gguf_bwrite_el(buf, &pad, sizeof(pad));
  16100. }
  16101. }
  16102. }
  16103. if (only_meta) {
  16104. return;
  16105. }
  16106. size_t offset = 0;
  16107. // write tensor data
  16108. for (uint32_t i = 0; i < ctx->header.n_tensors; ++i) {
  16109. struct gguf_tensor_info * info = &ctx->infos[i];
  16110. const size_t size = info->size;
  16111. const size_t size_pad = GGML_PAD(size, ctx->alignment);
  16112. gguf_bwrite_el(buf, info->data, size);
  16113. if (size_pad != size) {
  16114. uint8_t pad = 0;
  16115. for (size_t j = 0; j < size_pad - size; ++j) {
  16116. gguf_bwrite_el(buf, &pad, sizeof(pad));
  16117. }
  16118. }
  16119. GGML_ASSERT(offset == info->offset);
  16120. offset += size_pad;
  16121. }
  16122. }
  16123. void gguf_write_to_file(const struct gguf_context * ctx, const char * fname, bool only_meta) {
  16124. FILE * file = fopen(fname, "wb");
  16125. if (!file) {
  16126. GGML_ASSERT(false && "failed to open file for writing");
  16127. }
  16128. struct gguf_buf buf = gguf_buf_init(16*1024);
  16129. gguf_write_to_buf(ctx, &buf, only_meta);
  16130. fwrite(buf.data, 1, buf.offset, file);
  16131. gguf_buf_free(buf);
  16132. fclose(file);
  16133. }
  16134. size_t gguf_get_meta_size(const struct gguf_context * ctx) {
  16135. // no allocs - only compute size
  16136. struct gguf_buf buf = gguf_buf_init(0);
  16137. gguf_write_to_buf(ctx, &buf, true);
  16138. return buf.offset;
  16139. }
  16140. void gguf_get_meta_data(const struct gguf_context * ctx, void * data) {
  16141. struct gguf_buf buf = gguf_buf_init(16*1024);
  16142. gguf_write_to_buf(ctx, &buf, true);
  16143. memcpy(data, buf.data, buf.offset);
  16144. gguf_buf_free(buf);
  16145. }
  16146. ////////////////////////////////////////////////////////////////////////////////
  16147. int ggml_cpu_has_avx(void) {
  16148. #if defined(__AVX__)
  16149. return 1;
  16150. #else
  16151. return 0;
  16152. #endif
  16153. }
  16154. int ggml_cpu_has_avx2(void) {
  16155. #if defined(__AVX2__)
  16156. return 1;
  16157. #else
  16158. return 0;
  16159. #endif
  16160. }
  16161. int ggml_cpu_has_avx512(void) {
  16162. #if defined(__AVX512F__)
  16163. return 1;
  16164. #else
  16165. return 0;
  16166. #endif
  16167. }
  16168. int ggml_cpu_has_avx512_vbmi(void) {
  16169. #if defined(__AVX512VBMI__)
  16170. return 1;
  16171. #else
  16172. return 0;
  16173. #endif
  16174. }
  16175. int ggml_cpu_has_avx512_vnni(void) {
  16176. #if defined(__AVX512VNNI__)
  16177. return 1;
  16178. #else
  16179. return 0;
  16180. #endif
  16181. }
  16182. int ggml_cpu_has_fma(void) {
  16183. #if defined(__FMA__)
  16184. return 1;
  16185. #else
  16186. return 0;
  16187. #endif
  16188. }
  16189. int ggml_cpu_has_neon(void) {
  16190. #if defined(__ARM_NEON)
  16191. return 1;
  16192. #else
  16193. return 0;
  16194. #endif
  16195. }
  16196. int ggml_cpu_has_arm_fma(void) {
  16197. #if defined(__ARM_FEATURE_FMA)
  16198. return 1;
  16199. #else
  16200. return 0;
  16201. #endif
  16202. }
  16203. int ggml_cpu_has_metal(void) {
  16204. #if defined(GGML_USE_METAL)
  16205. return 1;
  16206. #else
  16207. return 0;
  16208. #endif
  16209. }
  16210. int ggml_cpu_has_f16c(void) {
  16211. #if defined(__F16C__)
  16212. return 1;
  16213. #else
  16214. return 0;
  16215. #endif
  16216. }
  16217. int ggml_cpu_has_fp16_va(void) {
  16218. #if defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC)
  16219. return 1;
  16220. #else
  16221. return 0;
  16222. #endif
  16223. }
  16224. int ggml_cpu_has_wasm_simd(void) {
  16225. #if defined(__wasm_simd128__)
  16226. return 1;
  16227. #else
  16228. return 0;
  16229. #endif
  16230. }
  16231. int ggml_cpu_has_blas(void) {
  16232. #if defined(GGML_USE_ACCELERATE) || defined(GGML_USE_OPENBLAS) || defined(GGML_USE_CUBLAS) || defined(GGML_USE_CLBLAST)
  16233. return 1;
  16234. #else
  16235. return 0;
  16236. #endif
  16237. }
  16238. int ggml_cpu_has_cublas(void) {
  16239. #if defined(GGML_USE_CUBLAS)
  16240. return 1;
  16241. #else
  16242. return 0;
  16243. #endif
  16244. }
  16245. int ggml_cpu_has_clblast(void) {
  16246. #if defined(GGML_USE_CLBLAST)
  16247. return 1;
  16248. #else
  16249. return 0;
  16250. #endif
  16251. }
  16252. int ggml_cpu_has_gpublas(void) {
  16253. return ggml_cpu_has_cublas() || ggml_cpu_has_clblast();
  16254. }
  16255. int ggml_cpu_has_sse3(void) {
  16256. #if defined(__SSE3__)
  16257. return 1;
  16258. #else
  16259. return 0;
  16260. #endif
  16261. }
  16262. int ggml_cpu_has_ssse3(void) {
  16263. #if defined(__SSSE3__)
  16264. return 1;
  16265. #else
  16266. return 0;
  16267. #endif
  16268. }
  16269. int ggml_cpu_has_vsx(void) {
  16270. #if defined(__POWER9_VECTOR__)
  16271. return 1;
  16272. #else
  16273. return 0;
  16274. #endif
  16275. }
  16276. ////////////////////////////////////////////////////////////////////////////////