ggml.c 654 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316431743184319432043214322432343244325432643274328432943304331433243334334433543364337433843394340434143424343434443454346434743484349435043514352435343544355435643574358435943604361436243634364436543664367436843694370437143724373437443754376437743784379438043814382438343844385438643874388438943904391439243934394439543964397439843994400440144024403440444054406440744084409441044114412441344144415441644174418441944204421442244234424442544264427442844294430443144324433443444354436443744384439444044414442444344444445444644474448444944504451445244534454445544564457445844594460446144624463446444654466446744684469447044714472447344744475447644774478447944804481448244834484448544864487448844894490449144924493449444954496449744984499450045014502450345044505450645074508450945104511451245134514451545164517451845194520452145224523452445254526452745284529453045314532453345344535453645374538453945404541454245434544454545464547454845494550455145524553455445554556455745584559456045614562456345644565456645674568456945704571457245734574457545764577457845794580458145824583458445854586458745884589459045914592459345944595459645974598459946004601460246034604460546064607460846094610461146124613461446154616461746184619462046214622462346244625462646274628462946304631463246334634463546364637463846394640464146424643464446454646464746484649465046514652465346544655465646574658465946604661466246634664466546664667466846694670467146724673467446754676467746784679468046814682468346844685468646874688468946904691469246934694469546964697469846994700470147024703470447054706470747084709471047114712471347144715471647174718471947204721472247234724472547264727472847294730473147324733473447354736473747384739474047414742474347444745474647474748474947504751475247534754475547564757475847594760476147624763476447654766476747684769477047714772477347744775477647774778477947804781478247834784478547864787478847894790479147924793479447954796479747984799480048014802480348044805480648074808480948104811481248134814481548164817481848194820482148224823482448254826482748284829483048314832483348344835483648374838483948404841484248434844484548464847484848494850485148524853485448554856485748584859486048614862486348644865486648674868486948704871487248734874487548764877487848794880488148824883488448854886488748884889489048914892489348944895489648974898489949004901490249034904490549064907490849094910491149124913491449154916491749184919492049214922492349244925492649274928492949304931493249334934493549364937493849394940494149424943494449454946494749484949495049514952495349544955495649574958495949604961496249634964496549664967496849694970497149724973497449754976497749784979498049814982498349844985498649874988498949904991499249934994499549964997499849995000500150025003500450055006500750085009501050115012501350145015501650175018501950205021502250235024502550265027502850295030503150325033503450355036503750385039504050415042504350445045504650475048504950505051505250535054505550565057505850595060506150625063506450655066506750685069507050715072507350745075507650775078507950805081508250835084508550865087508850895090509150925093509450955096509750985099510051015102510351045105510651075108510951105111511251135114511551165117511851195120512151225123512451255126512751285129513051315132513351345135513651375138513951405141514251435144514551465147514851495150515151525153515451555156515751585159516051615162516351645165516651675168516951705171517251735174517551765177517851795180518151825183518451855186518751885189519051915192519351945195519651975198519952005201520252035204520552065207520852095210521152125213521452155216521752185219522052215222522352245225522652275228522952305231523252335234523552365237523852395240524152425243524452455246524752485249525052515252525352545255525652575258525952605261526252635264526552665267526852695270527152725273527452755276527752785279528052815282528352845285528652875288528952905291529252935294529552965297529852995300530153025303530453055306530753085309531053115312531353145315531653175318531953205321532253235324532553265327532853295330533153325333533453355336533753385339534053415342534353445345534653475348534953505351535253535354535553565357535853595360536153625363536453655366536753685369537053715372537353745375537653775378537953805381538253835384538553865387538853895390539153925393539453955396539753985399540054015402540354045405540654075408540954105411541254135414541554165417541854195420542154225423542454255426542754285429543054315432543354345435543654375438543954405441544254435444544554465447544854495450545154525453545454555456545754585459546054615462546354645465546654675468546954705471547254735474547554765477547854795480548154825483548454855486548754885489549054915492549354945495549654975498549955005501550255035504550555065507550855095510551155125513551455155516551755185519552055215522552355245525552655275528552955305531553255335534553555365537553855395540554155425543554455455546554755485549555055515552555355545555555655575558555955605561556255635564556555665567556855695570557155725573557455755576557755785579558055815582558355845585558655875588558955905591559255935594559555965597559855995600560156025603560456055606560756085609561056115612561356145615561656175618561956205621562256235624562556265627562856295630563156325633563456355636563756385639564056415642564356445645564656475648564956505651565256535654565556565657565856595660566156625663566456655666566756685669567056715672567356745675567656775678567956805681568256835684568556865687568856895690569156925693569456955696569756985699570057015702570357045705570657075708570957105711571257135714571557165717571857195720572157225723572457255726572757285729573057315732573357345735573657375738573957405741574257435744574557465747574857495750575157525753575457555756575757585759576057615762576357645765576657675768576957705771577257735774577557765777577857795780578157825783578457855786578757885789579057915792579357945795579657975798579958005801580258035804580558065807580858095810581158125813581458155816581758185819582058215822582358245825582658275828582958305831583258335834583558365837583858395840584158425843584458455846584758485849585058515852585358545855585658575858585958605861586258635864586558665867586858695870587158725873587458755876587758785879588058815882588358845885588658875888588958905891589258935894589558965897589858995900590159025903590459055906590759085909591059115912591359145915591659175918591959205921592259235924592559265927592859295930593159325933593459355936593759385939594059415942594359445945594659475948594959505951595259535954595559565957595859595960596159625963596459655966596759685969597059715972597359745975597659775978597959805981598259835984598559865987598859895990599159925993599459955996599759985999600060016002600360046005600660076008600960106011601260136014601560166017601860196020602160226023602460256026602760286029603060316032603360346035603660376038603960406041604260436044604560466047604860496050605160526053605460556056605760586059606060616062606360646065606660676068606960706071607260736074607560766077607860796080608160826083608460856086608760886089609060916092609360946095609660976098609961006101610261036104610561066107610861096110611161126113611461156116611761186119612061216122612361246125612661276128612961306131613261336134613561366137613861396140614161426143614461456146614761486149615061516152615361546155615661576158615961606161616261636164616561666167616861696170617161726173617461756176617761786179618061816182618361846185618661876188618961906191619261936194619561966197619861996200620162026203620462056206620762086209621062116212621362146215621662176218621962206221622262236224622562266227622862296230623162326233623462356236623762386239624062416242624362446245624662476248624962506251625262536254625562566257625862596260626162626263626462656266626762686269627062716272627362746275627662776278627962806281628262836284628562866287628862896290629162926293629462956296629762986299630063016302630363046305630663076308630963106311631263136314631563166317631863196320632163226323632463256326632763286329633063316332633363346335633663376338633963406341634263436344634563466347634863496350635163526353635463556356635763586359636063616362636363646365636663676368636963706371637263736374637563766377637863796380638163826383638463856386638763886389639063916392639363946395639663976398639964006401640264036404640564066407640864096410641164126413641464156416641764186419642064216422642364246425642664276428642964306431643264336434643564366437643864396440644164426443644464456446644764486449645064516452645364546455645664576458645964606461646264636464646564666467646864696470647164726473647464756476647764786479648064816482648364846485648664876488648964906491649264936494649564966497649864996500650165026503650465056506650765086509651065116512651365146515651665176518651965206521652265236524652565266527652865296530653165326533653465356536653765386539654065416542654365446545654665476548654965506551655265536554655565566557655865596560656165626563656465656566656765686569657065716572657365746575657665776578657965806581658265836584658565866587658865896590659165926593659465956596659765986599660066016602660366046605660666076608660966106611661266136614661566166617661866196620662166226623662466256626662766286629663066316632663366346635663666376638663966406641664266436644664566466647664866496650665166526653665466556656665766586659666066616662666366646665666666676668666966706671667266736674667566766677667866796680668166826683668466856686668766886689669066916692669366946695669666976698669967006701670267036704670567066707670867096710671167126713671467156716671767186719672067216722672367246725672667276728672967306731673267336734673567366737673867396740674167426743674467456746674767486749675067516752675367546755675667576758675967606761676267636764676567666767676867696770677167726773677467756776677767786779678067816782678367846785678667876788678967906791679267936794679567966797679867996800680168026803680468056806680768086809681068116812681368146815681668176818681968206821682268236824682568266827682868296830683168326833683468356836683768386839684068416842684368446845684668476848684968506851685268536854685568566857685868596860686168626863686468656866686768686869687068716872687368746875687668776878687968806881688268836884688568866887688868896890689168926893689468956896689768986899690069016902690369046905690669076908690969106911691269136914691569166917691869196920692169226923692469256926692769286929693069316932693369346935693669376938693969406941694269436944694569466947694869496950695169526953695469556956695769586959696069616962696369646965696669676968696969706971697269736974697569766977697869796980698169826983698469856986698769886989699069916992699369946995699669976998699970007001700270037004700570067007700870097010701170127013701470157016701770187019702070217022702370247025702670277028702970307031703270337034703570367037703870397040704170427043704470457046704770487049705070517052705370547055705670577058705970607061706270637064706570667067706870697070707170727073707470757076707770787079708070817082708370847085708670877088708970907091709270937094709570967097709870997100710171027103710471057106710771087109711071117112711371147115711671177118711971207121712271237124712571267127712871297130713171327133713471357136713771387139714071417142714371447145714671477148714971507151715271537154715571567157715871597160716171627163716471657166716771687169717071717172717371747175717671777178717971807181718271837184718571867187718871897190719171927193719471957196719771987199720072017202720372047205720672077208720972107211721272137214721572167217721872197220722172227223722472257226722772287229723072317232723372347235723672377238723972407241724272437244724572467247724872497250725172527253725472557256725772587259726072617262726372647265726672677268726972707271727272737274727572767277727872797280728172827283728472857286728772887289729072917292729372947295729672977298729973007301730273037304730573067307730873097310731173127313731473157316731773187319732073217322732373247325732673277328732973307331733273337334733573367337733873397340734173427343734473457346734773487349735073517352735373547355735673577358735973607361736273637364736573667367736873697370737173727373737473757376737773787379738073817382738373847385738673877388738973907391739273937394739573967397739873997400740174027403740474057406740774087409741074117412741374147415741674177418741974207421742274237424742574267427742874297430743174327433743474357436743774387439744074417442744374447445744674477448744974507451745274537454745574567457745874597460746174627463746474657466746774687469747074717472747374747475747674777478747974807481748274837484748574867487748874897490749174927493749474957496749774987499750075017502750375047505750675077508750975107511751275137514751575167517751875197520752175227523752475257526752775287529753075317532753375347535753675377538753975407541754275437544754575467547754875497550755175527553755475557556755775587559756075617562756375647565756675677568756975707571757275737574757575767577757875797580758175827583758475857586758775887589759075917592759375947595759675977598759976007601760276037604760576067607760876097610761176127613761476157616761776187619762076217622762376247625762676277628762976307631763276337634763576367637763876397640764176427643764476457646764776487649765076517652765376547655765676577658765976607661766276637664766576667667766876697670767176727673767476757676767776787679768076817682768376847685768676877688768976907691769276937694769576967697769876997700770177027703770477057706770777087709771077117712771377147715771677177718771977207721772277237724772577267727772877297730773177327733773477357736773777387739774077417742774377447745774677477748774977507751775277537754775577567757775877597760776177627763776477657766776777687769777077717772777377747775777677777778777977807781778277837784778577867787778877897790779177927793779477957796779777987799780078017802780378047805780678077808780978107811781278137814781578167817781878197820782178227823782478257826782778287829783078317832783378347835783678377838783978407841784278437844784578467847784878497850785178527853785478557856785778587859786078617862786378647865786678677868786978707871787278737874787578767877787878797880788178827883788478857886788778887889789078917892789378947895789678977898789979007901790279037904790579067907790879097910791179127913791479157916791779187919792079217922792379247925792679277928792979307931793279337934793579367937793879397940794179427943794479457946794779487949795079517952795379547955795679577958795979607961796279637964796579667967796879697970797179727973797479757976797779787979798079817982798379847985798679877988798979907991799279937994799579967997799879998000800180028003800480058006800780088009801080118012801380148015801680178018801980208021802280238024802580268027802880298030803180328033803480358036803780388039804080418042804380448045804680478048804980508051805280538054805580568057805880598060806180628063806480658066806780688069807080718072807380748075807680778078807980808081808280838084808580868087808880898090809180928093809480958096809780988099810081018102810381048105810681078108810981108111811281138114811581168117811881198120812181228123812481258126812781288129813081318132813381348135813681378138813981408141814281438144814581468147814881498150815181528153815481558156815781588159816081618162816381648165816681678168816981708171817281738174817581768177817881798180818181828183818481858186818781888189819081918192819381948195819681978198819982008201820282038204820582068207820882098210821182128213821482158216821782188219822082218222822382248225822682278228822982308231823282338234823582368237823882398240824182428243824482458246824782488249825082518252825382548255825682578258825982608261826282638264826582668267826882698270827182728273827482758276827782788279828082818282828382848285828682878288828982908291829282938294829582968297829882998300830183028303830483058306830783088309831083118312831383148315831683178318831983208321832283238324832583268327832883298330833183328333833483358336833783388339834083418342834383448345834683478348834983508351835283538354835583568357835883598360836183628363836483658366836783688369837083718372837383748375837683778378837983808381838283838384838583868387838883898390839183928393839483958396839783988399840084018402840384048405840684078408840984108411841284138414841584168417841884198420842184228423842484258426842784288429843084318432843384348435843684378438843984408441844284438444844584468447844884498450845184528453845484558456845784588459846084618462846384648465846684678468846984708471847284738474847584768477847884798480848184828483848484858486848784888489849084918492849384948495849684978498849985008501850285038504850585068507850885098510851185128513851485158516851785188519852085218522852385248525852685278528852985308531853285338534853585368537853885398540854185428543854485458546854785488549855085518552855385548555855685578558855985608561856285638564856585668567856885698570857185728573857485758576857785788579858085818582858385848585858685878588858985908591859285938594859585968597859885998600860186028603860486058606860786088609861086118612861386148615861686178618861986208621862286238624862586268627862886298630863186328633863486358636863786388639864086418642864386448645864686478648864986508651865286538654865586568657865886598660866186628663866486658666866786688669867086718672867386748675867686778678867986808681868286838684868586868687868886898690869186928693869486958696869786988699870087018702870387048705870687078708870987108711871287138714871587168717871887198720872187228723872487258726872787288729873087318732873387348735873687378738873987408741874287438744874587468747874887498750875187528753875487558756875787588759876087618762876387648765876687678768876987708771877287738774877587768777877887798780878187828783878487858786878787888789879087918792879387948795879687978798879988008801880288038804880588068807880888098810881188128813881488158816881788188819882088218822882388248825882688278828882988308831883288338834883588368837883888398840884188428843884488458846884788488849885088518852885388548855885688578858885988608861886288638864886588668867886888698870887188728873887488758876887788788879888088818882888388848885888688878888888988908891889288938894889588968897889888998900890189028903890489058906890789088909891089118912891389148915891689178918891989208921892289238924892589268927892889298930893189328933893489358936893789388939894089418942894389448945894689478948894989508951895289538954895589568957895889598960896189628963896489658966896789688969897089718972897389748975897689778978897989808981898289838984898589868987898889898990899189928993899489958996899789988999900090019002900390049005900690079008900990109011901290139014901590169017901890199020902190229023902490259026902790289029903090319032903390349035903690379038903990409041904290439044904590469047904890499050905190529053905490559056905790589059906090619062906390649065906690679068906990709071907290739074907590769077907890799080908190829083908490859086908790889089909090919092909390949095909690979098909991009101910291039104910591069107910891099110911191129113911491159116911791189119912091219122912391249125912691279128912991309131913291339134913591369137913891399140914191429143914491459146914791489149915091519152915391549155915691579158915991609161916291639164916591669167916891699170917191729173917491759176917791789179918091819182918391849185918691879188918991909191919291939194919591969197919891999200920192029203920492059206920792089209921092119212921392149215921692179218921992209221922292239224922592269227922892299230923192329233923492359236923792389239924092419242924392449245924692479248924992509251925292539254925592569257925892599260926192629263926492659266926792689269927092719272927392749275927692779278927992809281928292839284928592869287928892899290929192929293929492959296929792989299930093019302930393049305930693079308930993109311931293139314931593169317931893199320932193229323932493259326932793289329933093319332933393349335933693379338933993409341934293439344934593469347934893499350935193529353935493559356935793589359936093619362936393649365936693679368936993709371937293739374937593769377937893799380938193829383938493859386938793889389939093919392939393949395939693979398939994009401940294039404940594069407940894099410941194129413941494159416941794189419942094219422942394249425942694279428942994309431943294339434943594369437943894399440944194429443944494459446944794489449945094519452945394549455945694579458945994609461946294639464946594669467946894699470947194729473947494759476947794789479948094819482948394849485948694879488948994909491949294939494949594969497949894999500950195029503950495059506950795089509951095119512951395149515951695179518951995209521952295239524952595269527952895299530953195329533953495359536953795389539954095419542954395449545954695479548954995509551955295539554955595569557955895599560956195629563956495659566956795689569957095719572957395749575957695779578957995809581958295839584958595869587958895899590959195929593959495959596959795989599960096019602960396049605960696079608960996109611961296139614961596169617961896199620962196229623962496259626962796289629963096319632963396349635963696379638963996409641964296439644964596469647964896499650965196529653965496559656965796589659966096619662966396649665966696679668966996709671967296739674967596769677967896799680968196829683968496859686968796889689969096919692969396949695969696979698969997009701970297039704970597069707970897099710971197129713971497159716971797189719972097219722972397249725972697279728972997309731973297339734973597369737973897399740974197429743974497459746974797489749975097519752975397549755975697579758975997609761976297639764976597669767976897699770977197729773977497759776977797789779978097819782978397849785978697879788978997909791979297939794979597969797979897999800980198029803980498059806980798089809981098119812981398149815981698179818981998209821982298239824982598269827982898299830983198329833983498359836983798389839984098419842984398449845984698479848984998509851985298539854985598569857985898599860986198629863986498659866986798689869987098719872987398749875987698779878987998809881988298839884988598869887988898899890989198929893989498959896989798989899990099019902990399049905990699079908990999109911991299139914991599169917991899199920992199229923992499259926992799289929993099319932993399349935993699379938993999409941994299439944994599469947994899499950995199529953995499559956995799589959996099619962996399649965996699679968996999709971997299739974997599769977997899799980998199829983998499859986998799889989999099919992999399949995999699979998999910000100011000210003100041000510006100071000810009100101001110012100131001410015100161001710018100191002010021100221002310024100251002610027100281002910030100311003210033100341003510036100371003810039100401004110042100431004410045100461004710048100491005010051100521005310054100551005610057100581005910060100611006210063100641006510066100671006810069100701007110072100731007410075100761007710078100791008010081100821008310084100851008610087100881008910090100911009210093100941009510096100971009810099101001010110102101031010410105101061010710108101091011010111101121011310114101151011610117101181011910120101211012210123101241012510126101271012810129101301013110132101331013410135101361013710138101391014010141101421014310144101451014610147101481014910150101511015210153101541015510156101571015810159101601016110162101631016410165101661016710168101691017010171101721017310174101751017610177101781017910180101811018210183101841018510186101871018810189101901019110192101931019410195101961019710198101991020010201102021020310204102051020610207102081020910210102111021210213102141021510216102171021810219102201022110222102231022410225102261022710228102291023010231102321023310234102351023610237102381023910240102411024210243102441024510246102471024810249102501025110252102531025410255102561025710258102591026010261102621026310264102651026610267102681026910270102711027210273102741027510276102771027810279102801028110282102831028410285102861028710288102891029010291102921029310294102951029610297102981029910300103011030210303103041030510306103071030810309103101031110312103131031410315103161031710318103191032010321103221032310324103251032610327103281032910330103311033210333103341033510336103371033810339103401034110342103431034410345103461034710348103491035010351103521035310354103551035610357103581035910360103611036210363103641036510366103671036810369103701037110372103731037410375103761037710378103791038010381103821038310384103851038610387103881038910390103911039210393103941039510396103971039810399104001040110402104031040410405104061040710408104091041010411104121041310414104151041610417104181041910420104211042210423104241042510426104271042810429104301043110432104331043410435104361043710438104391044010441104421044310444104451044610447104481044910450104511045210453104541045510456104571045810459104601046110462104631046410465104661046710468104691047010471104721047310474104751047610477104781047910480104811048210483104841048510486104871048810489104901049110492104931049410495104961049710498104991050010501105021050310504105051050610507105081050910510105111051210513105141051510516105171051810519105201052110522105231052410525105261052710528105291053010531105321053310534105351053610537105381053910540105411054210543105441054510546105471054810549105501055110552105531055410555105561055710558105591056010561105621056310564105651056610567105681056910570105711057210573105741057510576105771057810579105801058110582105831058410585105861058710588105891059010591105921059310594105951059610597105981059910600106011060210603106041060510606106071060810609106101061110612106131061410615106161061710618106191062010621106221062310624106251062610627106281062910630106311063210633106341063510636106371063810639106401064110642106431064410645106461064710648106491065010651106521065310654106551065610657106581065910660106611066210663106641066510666106671066810669106701067110672106731067410675106761067710678106791068010681106821068310684106851068610687106881068910690106911069210693106941069510696106971069810699107001070110702107031070410705107061070710708107091071010711107121071310714107151071610717107181071910720107211072210723107241072510726107271072810729107301073110732107331073410735107361073710738107391074010741107421074310744107451074610747107481074910750107511075210753107541075510756107571075810759107601076110762107631076410765107661076710768107691077010771107721077310774107751077610777107781077910780107811078210783107841078510786107871078810789107901079110792107931079410795107961079710798107991080010801108021080310804108051080610807108081080910810108111081210813108141081510816108171081810819108201082110822108231082410825108261082710828108291083010831108321083310834108351083610837108381083910840108411084210843108441084510846108471084810849108501085110852108531085410855108561085710858108591086010861108621086310864108651086610867108681086910870108711087210873108741087510876108771087810879108801088110882108831088410885108861088710888108891089010891108921089310894108951089610897108981089910900109011090210903109041090510906109071090810909109101091110912109131091410915109161091710918109191092010921109221092310924109251092610927109281092910930109311093210933109341093510936109371093810939109401094110942109431094410945109461094710948109491095010951109521095310954109551095610957109581095910960109611096210963109641096510966109671096810969109701097110972109731097410975109761097710978109791098010981109821098310984109851098610987109881098910990109911099210993109941099510996109971099810999110001100111002110031100411005110061100711008110091101011011110121101311014110151101611017110181101911020110211102211023110241102511026110271102811029110301103111032110331103411035110361103711038110391104011041110421104311044110451104611047110481104911050110511105211053110541105511056110571105811059110601106111062110631106411065110661106711068110691107011071110721107311074110751107611077110781107911080110811108211083110841108511086110871108811089110901109111092110931109411095110961109711098110991110011101111021110311104111051110611107111081110911110111111111211113111141111511116111171111811119111201112111122111231112411125111261112711128111291113011131111321113311134111351113611137111381113911140111411114211143111441114511146111471114811149111501115111152111531115411155111561115711158111591116011161111621116311164111651116611167111681116911170111711117211173111741117511176111771117811179111801118111182111831118411185111861118711188111891119011191111921119311194111951119611197111981119911200112011120211203112041120511206112071120811209112101121111212112131121411215112161121711218112191122011221112221122311224112251122611227112281122911230112311123211233112341123511236112371123811239112401124111242112431124411245112461124711248112491125011251112521125311254112551125611257112581125911260112611126211263112641126511266112671126811269112701127111272112731127411275112761127711278112791128011281112821128311284112851128611287112881128911290112911129211293112941129511296112971129811299113001130111302113031130411305113061130711308113091131011311113121131311314113151131611317113181131911320113211132211323113241132511326113271132811329113301133111332113331133411335113361133711338113391134011341113421134311344113451134611347113481134911350113511135211353113541135511356113571135811359113601136111362113631136411365113661136711368113691137011371113721137311374113751137611377113781137911380113811138211383113841138511386113871138811389113901139111392113931139411395113961139711398113991140011401114021140311404114051140611407114081140911410114111141211413114141141511416114171141811419114201142111422114231142411425114261142711428114291143011431114321143311434114351143611437114381143911440114411144211443114441144511446114471144811449114501145111452114531145411455114561145711458114591146011461114621146311464114651146611467114681146911470114711147211473114741147511476114771147811479114801148111482114831148411485114861148711488114891149011491114921149311494114951149611497114981149911500115011150211503115041150511506115071150811509115101151111512115131151411515115161151711518115191152011521115221152311524115251152611527115281152911530115311153211533115341153511536115371153811539115401154111542115431154411545115461154711548115491155011551115521155311554115551155611557115581155911560115611156211563115641156511566115671156811569115701157111572115731157411575115761157711578115791158011581115821158311584115851158611587115881158911590115911159211593115941159511596115971159811599116001160111602116031160411605116061160711608116091161011611116121161311614116151161611617116181161911620116211162211623116241162511626116271162811629116301163111632116331163411635116361163711638116391164011641116421164311644116451164611647116481164911650116511165211653116541165511656116571165811659116601166111662116631166411665116661166711668116691167011671116721167311674116751167611677116781167911680116811168211683116841168511686116871168811689116901169111692116931169411695116961169711698116991170011701117021170311704117051170611707117081170911710117111171211713117141171511716117171171811719117201172111722117231172411725117261172711728117291173011731117321173311734117351173611737117381173911740117411174211743117441174511746117471174811749117501175111752117531175411755117561175711758117591176011761117621176311764117651176611767117681176911770117711177211773117741177511776117771177811779117801178111782117831178411785117861178711788117891179011791117921179311794117951179611797117981179911800118011180211803118041180511806118071180811809118101181111812118131181411815118161181711818118191182011821118221182311824118251182611827118281182911830118311183211833118341183511836118371183811839118401184111842118431184411845118461184711848118491185011851118521185311854118551185611857118581185911860118611186211863118641186511866118671186811869118701187111872118731187411875118761187711878118791188011881118821188311884118851188611887118881188911890118911189211893118941189511896118971189811899119001190111902119031190411905119061190711908119091191011911119121191311914119151191611917119181191911920119211192211923119241192511926119271192811929119301193111932119331193411935119361193711938119391194011941119421194311944119451194611947119481194911950119511195211953119541195511956119571195811959119601196111962119631196411965119661196711968119691197011971119721197311974119751197611977119781197911980119811198211983119841198511986119871198811989119901199111992119931199411995119961199711998119991200012001120021200312004120051200612007120081200912010120111201212013120141201512016120171201812019120201202112022120231202412025120261202712028120291203012031120321203312034120351203612037120381203912040120411204212043120441204512046120471204812049120501205112052120531205412055120561205712058120591206012061120621206312064120651206612067120681206912070120711207212073120741207512076120771207812079120801208112082120831208412085120861208712088120891209012091120921209312094120951209612097120981209912100121011210212103121041210512106121071210812109121101211112112121131211412115121161211712118121191212012121121221212312124121251212612127121281212912130121311213212133121341213512136121371213812139121401214112142121431214412145121461214712148121491215012151121521215312154121551215612157121581215912160121611216212163121641216512166121671216812169121701217112172121731217412175121761217712178121791218012181121821218312184121851218612187121881218912190121911219212193121941219512196121971219812199122001220112202122031220412205122061220712208122091221012211122121221312214122151221612217122181221912220122211222212223122241222512226122271222812229122301223112232122331223412235122361223712238122391224012241122421224312244122451224612247122481224912250122511225212253122541225512256122571225812259122601226112262122631226412265122661226712268122691227012271122721227312274122751227612277122781227912280122811228212283122841228512286122871228812289122901229112292122931229412295122961229712298122991230012301123021230312304123051230612307123081230912310123111231212313123141231512316123171231812319123201232112322123231232412325123261232712328123291233012331123321233312334123351233612337123381233912340123411234212343123441234512346123471234812349123501235112352123531235412355123561235712358123591236012361123621236312364123651236612367123681236912370123711237212373123741237512376123771237812379123801238112382123831238412385123861238712388123891239012391123921239312394123951239612397123981239912400124011240212403124041240512406124071240812409124101241112412124131241412415124161241712418124191242012421124221242312424124251242612427124281242912430124311243212433124341243512436124371243812439124401244112442124431244412445124461244712448124491245012451124521245312454124551245612457124581245912460124611246212463124641246512466124671246812469124701247112472124731247412475124761247712478124791248012481124821248312484124851248612487124881248912490124911249212493124941249512496124971249812499125001250112502125031250412505125061250712508125091251012511125121251312514125151251612517125181251912520125211252212523125241252512526125271252812529125301253112532125331253412535125361253712538125391254012541125421254312544125451254612547125481254912550125511255212553125541255512556125571255812559125601256112562125631256412565125661256712568125691257012571125721257312574125751257612577125781257912580125811258212583125841258512586125871258812589125901259112592125931259412595125961259712598125991260012601126021260312604126051260612607126081260912610126111261212613126141261512616126171261812619126201262112622126231262412625126261262712628126291263012631126321263312634126351263612637126381263912640126411264212643126441264512646126471264812649126501265112652126531265412655126561265712658126591266012661126621266312664126651266612667126681266912670126711267212673126741267512676126771267812679126801268112682126831268412685126861268712688126891269012691126921269312694126951269612697126981269912700127011270212703127041270512706127071270812709127101271112712127131271412715127161271712718127191272012721127221272312724127251272612727127281272912730127311273212733127341273512736127371273812739127401274112742127431274412745127461274712748127491275012751127521275312754127551275612757127581275912760127611276212763127641276512766127671276812769127701277112772127731277412775127761277712778127791278012781127821278312784127851278612787127881278912790127911279212793127941279512796127971279812799128001280112802128031280412805128061280712808128091281012811128121281312814128151281612817128181281912820128211282212823128241282512826128271282812829128301283112832128331283412835128361283712838128391284012841128421284312844128451284612847128481284912850128511285212853128541285512856128571285812859128601286112862128631286412865128661286712868128691287012871128721287312874128751287612877128781287912880128811288212883128841288512886128871288812889128901289112892128931289412895128961289712898128991290012901129021290312904129051290612907129081290912910129111291212913129141291512916129171291812919129201292112922129231292412925129261292712928129291293012931129321293312934129351293612937129381293912940129411294212943129441294512946129471294812949129501295112952129531295412955129561295712958129591296012961129621296312964129651296612967129681296912970129711297212973129741297512976129771297812979129801298112982129831298412985129861298712988129891299012991129921299312994129951299612997129981299913000130011300213003130041300513006130071300813009130101301113012130131301413015130161301713018130191302013021130221302313024130251302613027130281302913030130311303213033130341303513036130371303813039130401304113042130431304413045130461304713048130491305013051130521305313054130551305613057130581305913060130611306213063130641306513066130671306813069130701307113072130731307413075130761307713078130791308013081130821308313084130851308613087130881308913090130911309213093130941309513096130971309813099131001310113102131031310413105131061310713108131091311013111131121311313114131151311613117131181311913120131211312213123131241312513126131271312813129131301313113132131331313413135131361313713138131391314013141131421314313144131451314613147131481314913150131511315213153131541315513156131571315813159131601316113162131631316413165131661316713168131691317013171131721317313174131751317613177131781317913180131811318213183131841318513186131871318813189131901319113192131931319413195131961319713198131991320013201132021320313204132051320613207132081320913210132111321213213132141321513216132171321813219132201322113222132231322413225132261322713228132291323013231132321323313234132351323613237132381323913240132411324213243132441324513246132471324813249132501325113252132531325413255132561325713258132591326013261132621326313264132651326613267132681326913270132711327213273132741327513276132771327813279132801328113282132831328413285132861328713288132891329013291132921329313294132951329613297132981329913300133011330213303133041330513306133071330813309133101331113312133131331413315133161331713318133191332013321133221332313324133251332613327133281332913330133311333213333133341333513336133371333813339133401334113342133431334413345133461334713348133491335013351133521335313354133551335613357133581335913360133611336213363133641336513366133671336813369133701337113372133731337413375133761337713378133791338013381133821338313384133851338613387133881338913390133911339213393133941339513396133971339813399134001340113402134031340413405134061340713408134091341013411134121341313414134151341613417134181341913420134211342213423134241342513426134271342813429134301343113432134331343413435134361343713438134391344013441134421344313444134451344613447134481344913450134511345213453134541345513456134571345813459134601346113462134631346413465134661346713468134691347013471134721347313474134751347613477134781347913480134811348213483134841348513486134871348813489134901349113492134931349413495134961349713498134991350013501135021350313504135051350613507135081350913510135111351213513135141351513516135171351813519135201352113522135231352413525135261352713528135291353013531135321353313534135351353613537135381353913540135411354213543135441354513546135471354813549135501355113552135531355413555135561355713558135591356013561135621356313564135651356613567135681356913570135711357213573135741357513576135771357813579135801358113582135831358413585135861358713588135891359013591135921359313594135951359613597135981359913600136011360213603136041360513606136071360813609136101361113612136131361413615136161361713618136191362013621136221362313624136251362613627136281362913630136311363213633136341363513636136371363813639136401364113642136431364413645136461364713648136491365013651136521365313654136551365613657136581365913660136611366213663136641366513666136671366813669136701367113672136731367413675136761367713678136791368013681136821368313684136851368613687136881368913690136911369213693136941369513696136971369813699137001370113702137031370413705137061370713708137091371013711137121371313714137151371613717137181371913720137211372213723137241372513726137271372813729137301373113732137331373413735137361373713738137391374013741137421374313744137451374613747137481374913750137511375213753137541375513756137571375813759137601376113762137631376413765137661376713768137691377013771137721377313774137751377613777137781377913780137811378213783137841378513786137871378813789137901379113792137931379413795137961379713798137991380013801138021380313804138051380613807138081380913810138111381213813138141381513816138171381813819138201382113822138231382413825138261382713828138291383013831138321383313834138351383613837138381383913840138411384213843138441384513846138471384813849138501385113852138531385413855138561385713858138591386013861138621386313864138651386613867138681386913870138711387213873138741387513876138771387813879138801388113882138831388413885138861388713888138891389013891138921389313894138951389613897138981389913900139011390213903139041390513906139071390813909139101391113912139131391413915139161391713918139191392013921139221392313924139251392613927139281392913930139311393213933139341393513936139371393813939139401394113942139431394413945139461394713948139491395013951139521395313954139551395613957139581395913960139611396213963139641396513966139671396813969139701397113972139731397413975139761397713978139791398013981139821398313984139851398613987139881398913990139911399213993139941399513996139971399813999140001400114002140031400414005140061400714008140091401014011140121401314014140151401614017140181401914020140211402214023140241402514026140271402814029140301403114032140331403414035140361403714038140391404014041140421404314044140451404614047140481404914050140511405214053140541405514056140571405814059140601406114062140631406414065140661406714068140691407014071140721407314074140751407614077140781407914080140811408214083140841408514086140871408814089140901409114092140931409414095140961409714098140991410014101141021410314104141051410614107141081410914110141111411214113141141411514116141171411814119141201412114122141231412414125141261412714128141291413014131141321413314134141351413614137141381413914140141411414214143141441414514146141471414814149141501415114152141531415414155141561415714158141591416014161141621416314164141651416614167141681416914170141711417214173141741417514176141771417814179141801418114182141831418414185141861418714188141891419014191141921419314194141951419614197141981419914200142011420214203142041420514206142071420814209142101421114212142131421414215142161421714218142191422014221142221422314224142251422614227142281422914230142311423214233142341423514236142371423814239142401424114242142431424414245142461424714248142491425014251142521425314254142551425614257142581425914260142611426214263142641426514266142671426814269142701427114272142731427414275142761427714278142791428014281142821428314284142851428614287142881428914290142911429214293142941429514296142971429814299143001430114302143031430414305143061430714308143091431014311143121431314314143151431614317143181431914320143211432214323143241432514326143271432814329143301433114332143331433414335143361433714338143391434014341143421434314344143451434614347143481434914350143511435214353143541435514356143571435814359143601436114362143631436414365143661436714368143691437014371143721437314374143751437614377143781437914380143811438214383143841438514386143871438814389143901439114392143931439414395143961439714398143991440014401144021440314404144051440614407144081440914410144111441214413144141441514416144171441814419144201442114422144231442414425144261442714428144291443014431144321443314434144351443614437144381443914440144411444214443144441444514446144471444814449144501445114452144531445414455144561445714458144591446014461144621446314464144651446614467144681446914470144711447214473144741447514476144771447814479144801448114482144831448414485144861448714488144891449014491144921449314494144951449614497144981449914500145011450214503145041450514506145071450814509145101451114512145131451414515145161451714518145191452014521145221452314524145251452614527145281452914530145311453214533145341453514536145371453814539145401454114542145431454414545145461454714548145491455014551145521455314554145551455614557145581455914560145611456214563145641456514566145671456814569145701457114572145731457414575145761457714578145791458014581145821458314584145851458614587145881458914590145911459214593145941459514596145971459814599146001460114602146031460414605146061460714608146091461014611146121461314614146151461614617146181461914620146211462214623146241462514626146271462814629146301463114632146331463414635146361463714638146391464014641146421464314644146451464614647146481464914650146511465214653146541465514656146571465814659146601466114662146631466414665146661466714668146691467014671146721467314674146751467614677146781467914680146811468214683146841468514686146871468814689146901469114692146931469414695146961469714698146991470014701147021470314704147051470614707147081470914710147111471214713147141471514716147171471814719147201472114722147231472414725147261472714728147291473014731147321473314734147351473614737147381473914740147411474214743147441474514746147471474814749147501475114752147531475414755147561475714758147591476014761147621476314764147651476614767147681476914770147711477214773147741477514776147771477814779147801478114782147831478414785147861478714788147891479014791147921479314794147951479614797147981479914800148011480214803148041480514806148071480814809148101481114812148131481414815148161481714818148191482014821148221482314824148251482614827148281482914830148311483214833148341483514836148371483814839148401484114842148431484414845148461484714848148491485014851148521485314854148551485614857148581485914860148611486214863148641486514866148671486814869148701487114872148731487414875148761487714878148791488014881148821488314884148851488614887148881488914890148911489214893148941489514896148971489814899149001490114902149031490414905149061490714908149091491014911149121491314914149151491614917149181491914920149211492214923149241492514926149271492814929149301493114932149331493414935149361493714938149391494014941149421494314944149451494614947149481494914950149511495214953149541495514956149571495814959149601496114962149631496414965149661496714968149691497014971149721497314974149751497614977149781497914980149811498214983149841498514986149871498814989149901499114992149931499414995149961499714998149991500015001150021500315004150051500615007150081500915010150111501215013150141501515016150171501815019150201502115022150231502415025150261502715028150291503015031150321503315034150351503615037150381503915040150411504215043150441504515046150471504815049150501505115052150531505415055150561505715058150591506015061150621506315064150651506615067150681506915070150711507215073150741507515076150771507815079150801508115082150831508415085150861508715088150891509015091150921509315094150951509615097150981509915100151011510215103151041510515106151071510815109151101511115112151131511415115151161511715118151191512015121151221512315124151251512615127151281512915130151311513215133151341513515136151371513815139151401514115142151431514415145151461514715148151491515015151151521515315154151551515615157151581515915160151611516215163151641516515166151671516815169151701517115172151731517415175151761517715178151791518015181151821518315184151851518615187151881518915190151911519215193151941519515196151971519815199152001520115202152031520415205152061520715208152091521015211152121521315214152151521615217152181521915220152211522215223152241522515226152271522815229152301523115232152331523415235152361523715238152391524015241152421524315244152451524615247152481524915250152511525215253152541525515256152571525815259152601526115262152631526415265152661526715268152691527015271152721527315274152751527615277152781527915280152811528215283152841528515286152871528815289152901529115292152931529415295152961529715298152991530015301153021530315304153051530615307153081530915310153111531215313153141531515316153171531815319153201532115322153231532415325153261532715328153291533015331153321533315334153351533615337153381533915340153411534215343153441534515346153471534815349153501535115352153531535415355153561535715358153591536015361153621536315364153651536615367153681536915370153711537215373153741537515376153771537815379153801538115382153831538415385153861538715388153891539015391153921539315394153951539615397153981539915400154011540215403154041540515406154071540815409154101541115412154131541415415154161541715418154191542015421154221542315424154251542615427154281542915430154311543215433154341543515436154371543815439154401544115442154431544415445154461544715448154491545015451154521545315454154551545615457154581545915460154611546215463154641546515466154671546815469154701547115472154731547415475154761547715478154791548015481154821548315484154851548615487154881548915490154911549215493154941549515496154971549815499155001550115502155031550415505155061550715508155091551015511155121551315514155151551615517155181551915520155211552215523155241552515526155271552815529155301553115532155331553415535155361553715538155391554015541155421554315544155451554615547155481554915550155511555215553155541555515556155571555815559155601556115562155631556415565155661556715568155691557015571155721557315574155751557615577155781557915580155811558215583155841558515586155871558815589155901559115592155931559415595155961559715598155991560015601156021560315604156051560615607156081560915610156111561215613156141561515616156171561815619156201562115622156231562415625156261562715628156291563015631156321563315634156351563615637156381563915640156411564215643156441564515646156471564815649156501565115652156531565415655156561565715658156591566015661156621566315664156651566615667156681566915670156711567215673156741567515676156771567815679156801568115682156831568415685156861568715688156891569015691156921569315694156951569615697156981569915700157011570215703157041570515706157071570815709157101571115712157131571415715157161571715718157191572015721157221572315724157251572615727157281572915730157311573215733157341573515736157371573815739157401574115742157431574415745157461574715748157491575015751157521575315754157551575615757157581575915760157611576215763157641576515766157671576815769157701577115772157731577415775157761577715778157791578015781157821578315784157851578615787157881578915790157911579215793157941579515796157971579815799158001580115802158031580415805158061580715808158091581015811158121581315814158151581615817158181581915820158211582215823158241582515826158271582815829158301583115832158331583415835158361583715838158391584015841158421584315844158451584615847158481584915850158511585215853158541585515856158571585815859158601586115862158631586415865158661586715868158691587015871158721587315874158751587615877158781587915880158811588215883158841588515886158871588815889158901589115892158931589415895158961589715898158991590015901159021590315904159051590615907159081590915910159111591215913159141591515916159171591815919159201592115922159231592415925159261592715928159291593015931159321593315934159351593615937159381593915940159411594215943159441594515946159471594815949159501595115952159531595415955159561595715958159591596015961159621596315964159651596615967159681596915970159711597215973159741597515976159771597815979159801598115982159831598415985159861598715988159891599015991159921599315994159951599615997159981599916000160011600216003160041600516006160071600816009160101601116012160131601416015160161601716018160191602016021160221602316024160251602616027160281602916030160311603216033160341603516036160371603816039160401604116042160431604416045160461604716048160491605016051160521605316054160551605616057160581605916060160611606216063160641606516066160671606816069160701607116072160731607416075160761607716078160791608016081160821608316084160851608616087160881608916090160911609216093160941609516096160971609816099161001610116102161031610416105161061610716108161091611016111161121611316114161151611616117161181611916120161211612216123161241612516126161271612816129161301613116132161331613416135161361613716138161391614016141161421614316144161451614616147161481614916150161511615216153161541615516156161571615816159161601616116162161631616416165161661616716168161691617016171161721617316174161751617616177161781617916180161811618216183161841618516186161871618816189161901619116192161931619416195161961619716198161991620016201162021620316204162051620616207162081620916210162111621216213162141621516216162171621816219162201622116222162231622416225162261622716228162291623016231162321623316234162351623616237162381623916240162411624216243162441624516246162471624816249162501625116252162531625416255162561625716258162591626016261162621626316264162651626616267162681626916270162711627216273162741627516276162771627816279162801628116282162831628416285162861628716288162891629016291162921629316294162951629616297162981629916300163011630216303163041630516306163071630816309163101631116312163131631416315163161631716318163191632016321163221632316324163251632616327163281632916330163311633216333163341633516336163371633816339163401634116342163431634416345163461634716348163491635016351163521635316354163551635616357163581635916360163611636216363163641636516366163671636816369163701637116372163731637416375163761637716378163791638016381163821638316384163851638616387163881638916390163911639216393163941639516396163971639816399164001640116402164031640416405164061640716408164091641016411164121641316414164151641616417164181641916420164211642216423164241642516426164271642816429164301643116432164331643416435164361643716438164391644016441164421644316444164451644616447164481644916450164511645216453164541645516456164571645816459164601646116462164631646416465164661646716468164691647016471164721647316474164751647616477164781647916480164811648216483164841648516486164871648816489164901649116492164931649416495164961649716498164991650016501165021650316504165051650616507165081650916510165111651216513165141651516516165171651816519165201652116522165231652416525165261652716528165291653016531165321653316534165351653616537165381653916540165411654216543165441654516546165471654816549165501655116552165531655416555165561655716558165591656016561165621656316564165651656616567165681656916570165711657216573165741657516576165771657816579165801658116582165831658416585165861658716588165891659016591165921659316594165951659616597165981659916600166011660216603166041660516606166071660816609166101661116612166131661416615166161661716618166191662016621166221662316624166251662616627166281662916630166311663216633166341663516636166371663816639166401664116642166431664416645166461664716648166491665016651166521665316654166551665616657166581665916660166611666216663166641666516666166671666816669166701667116672166731667416675166761667716678166791668016681166821668316684166851668616687166881668916690166911669216693166941669516696166971669816699167001670116702167031670416705167061670716708167091671016711167121671316714167151671616717167181671916720167211672216723167241672516726167271672816729167301673116732167331673416735167361673716738167391674016741167421674316744167451674616747167481674916750167511675216753167541675516756167571675816759167601676116762167631676416765167661676716768167691677016771167721677316774167751677616777167781677916780167811678216783167841678516786167871678816789167901679116792167931679416795167961679716798167991680016801168021680316804168051680616807168081680916810168111681216813168141681516816168171681816819168201682116822168231682416825168261682716828168291683016831168321683316834168351683616837168381683916840168411684216843168441684516846168471684816849168501685116852168531685416855168561685716858168591686016861168621686316864168651686616867168681686916870168711687216873168741687516876168771687816879168801688116882168831688416885168861688716888168891689016891168921689316894168951689616897168981689916900169011690216903169041690516906169071690816909169101691116912169131691416915169161691716918169191692016921169221692316924169251692616927169281692916930169311693216933169341693516936169371693816939169401694116942169431694416945169461694716948169491695016951169521695316954169551695616957169581695916960169611696216963169641696516966169671696816969169701697116972169731697416975169761697716978169791698016981169821698316984169851698616987169881698916990169911699216993169941699516996169971699816999170001700117002170031700417005170061700717008170091701017011170121701317014170151701617017170181701917020170211702217023170241702517026170271702817029170301703117032170331703417035170361703717038170391704017041170421704317044170451704617047170481704917050170511705217053170541705517056170571705817059170601706117062170631706417065170661706717068170691707017071170721707317074170751707617077170781707917080170811708217083170841708517086170871708817089170901709117092170931709417095170961709717098170991710017101171021710317104171051710617107171081710917110171111711217113171141711517116171171711817119171201712117122171231712417125171261712717128171291713017131171321713317134171351713617137171381713917140171411714217143171441714517146171471714817149171501715117152171531715417155171561715717158171591716017161171621716317164171651716617167171681716917170171711717217173171741717517176171771717817179171801718117182171831718417185171861718717188171891719017191171921719317194171951719617197171981719917200172011720217203172041720517206172071720817209172101721117212172131721417215172161721717218172191722017221172221722317224172251722617227172281722917230172311723217233172341723517236172371723817239172401724117242172431724417245172461724717248172491725017251172521725317254172551725617257172581725917260172611726217263172641726517266172671726817269172701727117272172731727417275172761727717278172791728017281172821728317284172851728617287172881728917290172911729217293172941729517296172971729817299173001730117302173031730417305173061730717308173091731017311173121731317314173151731617317173181731917320173211732217323173241732517326173271732817329173301733117332173331733417335173361733717338173391734017341173421734317344173451734617347173481734917350173511735217353173541735517356173571735817359173601736117362173631736417365173661736717368173691737017371173721737317374173751737617377173781737917380173811738217383173841738517386173871738817389173901739117392173931739417395173961739717398173991740017401174021740317404174051740617407174081740917410174111741217413174141741517416174171741817419174201742117422174231742417425174261742717428174291743017431174321743317434174351743617437174381743917440174411744217443174441744517446174471744817449174501745117452174531745417455174561745717458174591746017461174621746317464174651746617467174681746917470174711747217473174741747517476174771747817479174801748117482174831748417485174861748717488174891749017491174921749317494174951749617497174981749917500175011750217503175041750517506175071750817509175101751117512175131751417515175161751717518175191752017521175221752317524175251752617527175281752917530175311753217533175341753517536175371753817539175401754117542175431754417545175461754717548175491755017551175521755317554175551755617557175581755917560175611756217563175641756517566175671756817569175701757117572175731757417575175761757717578175791758017581175821758317584175851758617587175881758917590175911759217593175941759517596175971759817599176001760117602176031760417605176061760717608176091761017611176121761317614176151761617617176181761917620176211762217623176241762517626176271762817629176301763117632176331763417635176361763717638176391764017641176421764317644176451764617647176481764917650176511765217653176541765517656176571765817659176601766117662176631766417665176661766717668176691767017671176721767317674176751767617677176781767917680176811768217683176841768517686176871768817689176901769117692176931769417695176961769717698176991770017701177021770317704177051770617707177081770917710177111771217713177141771517716177171771817719177201772117722177231772417725177261772717728177291773017731177321773317734177351773617737177381773917740177411774217743177441774517746177471774817749177501775117752177531775417755177561775717758177591776017761177621776317764177651776617767177681776917770177711777217773177741777517776177771777817779177801778117782177831778417785177861778717788177891779017791177921779317794177951779617797177981779917800178011780217803178041780517806178071780817809178101781117812178131781417815178161781717818178191782017821178221782317824178251782617827178281782917830178311783217833178341783517836178371783817839178401784117842178431784417845178461784717848178491785017851178521785317854178551785617857178581785917860178611786217863178641786517866178671786817869178701787117872178731787417875178761787717878178791788017881178821788317884178851788617887178881788917890178911789217893178941789517896178971789817899179001790117902179031790417905179061790717908179091791017911179121791317914179151791617917179181791917920179211792217923179241792517926179271792817929179301793117932179331793417935179361793717938179391794017941179421794317944179451794617947179481794917950179511795217953179541795517956179571795817959179601796117962179631796417965179661796717968179691797017971179721797317974179751797617977179781797917980179811798217983179841798517986179871798817989179901799117992179931799417995179961799717998179991800018001180021800318004180051800618007180081800918010180111801218013180141801518016180171801818019180201802118022180231802418025180261802718028180291803018031180321803318034180351803618037180381803918040180411804218043180441804518046180471804818049180501805118052180531805418055180561805718058180591806018061180621806318064180651806618067180681806918070180711807218073180741807518076180771807818079180801808118082180831808418085180861808718088180891809018091180921809318094180951809618097180981809918100181011810218103181041810518106181071810818109181101811118112181131811418115181161811718118181191812018121181221812318124181251812618127181281812918130181311813218133181341813518136181371813818139181401814118142181431814418145181461814718148181491815018151181521815318154181551815618157181581815918160181611816218163181641816518166181671816818169181701817118172181731817418175181761817718178181791818018181181821818318184181851818618187181881818918190181911819218193181941819518196181971819818199182001820118202182031820418205182061820718208182091821018211182121821318214182151821618217182181821918220182211822218223182241822518226182271822818229182301823118232182331823418235182361823718238182391824018241182421824318244182451824618247182481824918250182511825218253182541825518256182571825818259182601826118262182631826418265182661826718268182691827018271182721827318274182751827618277182781827918280182811828218283182841828518286182871828818289182901829118292182931829418295182961829718298182991830018301183021830318304183051830618307183081830918310183111831218313183141831518316183171831818319183201832118322183231832418325183261832718328183291833018331183321833318334183351833618337183381833918340183411834218343183441834518346183471834818349183501835118352183531835418355183561835718358183591836018361183621836318364183651836618367183681836918370183711837218373183741837518376183771837818379183801838118382183831838418385183861838718388183891839018391183921839318394183951839618397183981839918400184011840218403184041840518406184071840818409184101841118412184131841418415184161841718418184191842018421184221842318424184251842618427184281842918430184311843218433184341843518436184371843818439184401844118442184431844418445184461844718448184491845018451184521845318454184551845618457184581845918460184611846218463184641846518466184671846818469184701847118472184731847418475184761847718478184791848018481184821848318484184851848618487184881848918490184911849218493184941849518496184971849818499185001850118502185031850418505185061850718508185091851018511185121851318514185151851618517185181851918520185211852218523185241852518526185271852818529185301853118532185331853418535185361853718538185391854018541185421854318544185451854618547185481854918550185511855218553185541855518556185571855818559185601856118562185631856418565185661856718568185691857018571185721857318574185751857618577185781857918580185811858218583185841858518586185871858818589185901859118592185931859418595185961859718598185991860018601186021860318604186051860618607186081860918610186111861218613186141861518616186171861818619186201862118622186231862418625186261862718628186291863018631186321863318634186351863618637186381863918640186411864218643186441864518646186471864818649186501865118652186531865418655186561865718658186591866018661186621866318664186651866618667186681866918670186711867218673186741867518676186771867818679186801868118682186831868418685186861868718688186891869018691186921869318694186951869618697186981869918700187011870218703187041870518706187071870818709187101871118712187131871418715187161871718718187191872018721187221872318724187251872618727187281872918730187311873218733187341873518736187371873818739187401874118742187431874418745187461874718748187491875018751187521875318754187551875618757187581875918760187611876218763187641876518766187671876818769187701877118772187731877418775187761877718778187791878018781187821878318784187851878618787187881878918790187911879218793187941879518796187971879818799188001880118802188031880418805188061880718808188091881018811188121881318814188151881618817188181881918820188211882218823188241882518826188271882818829188301883118832188331883418835188361883718838188391884018841188421884318844188451884618847188481884918850188511885218853188541885518856188571885818859188601886118862188631886418865188661886718868188691887018871188721887318874188751887618877188781887918880188811888218883188841888518886188871888818889188901889118892188931889418895188961889718898188991890018901189021890318904189051890618907189081890918910189111891218913189141891518916189171891818919189201892118922189231892418925189261892718928189291893018931189321893318934189351893618937189381893918940189411894218943189441894518946189471894818949189501895118952189531895418955189561895718958189591896018961189621896318964189651896618967189681896918970189711897218973189741897518976189771897818979189801898118982189831898418985189861898718988189891899018991189921899318994189951899618997189981899919000190011900219003190041900519006190071900819009190101901119012190131901419015190161901719018190191902019021190221902319024190251902619027190281902919030190311903219033190341903519036190371903819039190401904119042190431904419045190461904719048190491905019051190521905319054190551905619057190581905919060190611906219063190641906519066190671906819069190701907119072190731907419075190761907719078190791908019081190821908319084190851908619087190881908919090190911909219093190941909519096190971909819099191001910119102191031910419105191061910719108191091911019111191121911319114191151911619117191181911919120191211912219123191241912519126191271912819129191301913119132191331913419135191361913719138191391914019141191421914319144191451914619147191481914919150191511915219153191541915519156191571915819159191601916119162191631916419165191661916719168191691917019171191721917319174191751917619177191781917919180191811918219183191841918519186191871918819189191901919119192191931919419195191961919719198191991920019201192021920319204192051920619207192081920919210192111921219213192141921519216192171921819219192201922119222192231922419225192261922719228192291923019231192321923319234192351923619237192381923919240192411924219243192441924519246192471924819249192501925119252192531925419255192561925719258192591926019261192621926319264192651926619267192681926919270192711927219273192741927519276192771927819279192801928119282192831928419285192861928719288192891929019291192921929319294192951929619297192981929919300193011930219303193041930519306193071930819309193101931119312193131931419315193161931719318193191932019321193221932319324193251932619327193281932919330193311933219333193341933519336193371933819339193401934119342193431934419345193461934719348193491935019351193521935319354193551935619357193581935919360193611936219363193641936519366193671936819369193701937119372193731937419375193761937719378193791938019381193821938319384193851938619387193881938919390193911939219393193941939519396193971939819399194001940119402194031940419405194061940719408194091941019411194121941319414194151941619417194181941919420194211942219423194241942519426194271942819429194301943119432194331943419435194361943719438194391944019441194421944319444194451944619447194481944919450194511945219453194541945519456194571945819459194601946119462194631946419465194661946719468194691947019471194721947319474194751947619477194781947919480194811948219483194841948519486194871948819489194901949119492194931949419495194961949719498194991950019501195021950319504195051950619507195081950919510195111951219513195141951519516195171951819519195201952119522195231952419525195261952719528195291953019531195321953319534195351953619537195381953919540195411954219543195441954519546195471954819549195501955119552195531955419555195561955719558195591956019561195621956319564195651956619567195681956919570195711957219573195741957519576195771957819579195801958119582195831958419585195861958719588195891959019591195921959319594195951959619597195981959919600196011960219603196041960519606196071960819609196101961119612196131961419615196161961719618196191962019621196221962319624196251962619627196281962919630196311963219633196341963519636196371963819639196401964119642196431964419645196461964719648196491965019651196521965319654196551965619657196581965919660196611966219663196641966519666196671966819669196701967119672196731967419675196761967719678196791968019681196821968319684196851968619687196881968919690196911969219693196941969519696196971969819699197001970119702197031970419705197061970719708197091971019711197121971319714197151971619717197181971919720197211972219723197241972519726197271972819729197301973119732197331973419735197361973719738197391974019741197421974319744197451974619747197481974919750197511975219753197541975519756197571975819759197601976119762197631976419765197661976719768197691977019771197721977319774197751977619777197781977919780197811978219783197841978519786197871978819789197901979119792197931979419795197961979719798197991980019801198021980319804198051980619807198081980919810198111981219813198141981519816198171981819819198201982119822198231982419825198261982719828198291983019831198321983319834198351983619837198381983919840198411984219843198441984519846198471984819849198501985119852198531985419855198561985719858198591986019861198621986319864198651986619867198681986919870198711987219873198741987519876198771987819879198801988119882198831988419885198861988719888198891989019891198921989319894198951989619897198981989919900199011990219903199041990519906199071990819909199101991119912199131991419915199161991719918199191992019921199221992319924199251992619927199281992919930199311993219933199341993519936199371993819939199401994119942199431994419945199461994719948199491995019951199521995319954199551995619957199581995919960199611996219963199641996519966199671996819969199701997119972199731997419975199761997719978199791998019981199821998319984199851998619987199881998919990199911999219993199941999519996199971999819999200002000120002200032000420005200062000720008200092001020011200122001320014200152001620017200182001920020200212002220023200242002520026200272002820029200302003120032200332003420035200362003720038200392004020041200422004320044200452004620047200482004920050200512005220053200542005520056200572005820059200602006120062200632006420065200662006720068200692007020071200722007320074200752007620077200782007920080200812008220083200842008520086200872008820089200902009120092200932009420095200962009720098200992010020101201022010320104201052010620107201082010920110201112011220113201142011520116201172011820119201202012120122201232012420125201262012720128201292013020131201322013320134201352013620137201382013920140201412014220143201442014520146201472014820149201502015120152201532015420155201562015720158201592016020161201622016320164201652016620167201682016920170201712017220173201742017520176201772017820179201802018120182201832018420185201862018720188201892019020191201922019320194201952019620197201982019920200202012020220203202042020520206202072020820209202102021120212202132021420215202162021720218202192022020221202222022320224202252022620227202282022920230202312023220233202342023520236202372023820239202402024120242202432024420245202462024720248202492025020251202522025320254202552025620257202582025920260202612026220263202642026520266202672026820269202702027120272202732027420275202762027720278202792028020281202822028320284202852028620287202882028920290202912029220293202942029520296202972029820299203002030120302203032030420305203062030720308203092031020311203122031320314203152031620317203182031920320203212032220323203242032520326203272032820329
  1. #define _CRT_SECURE_NO_DEPRECATE // Disables ridiculous "unsafe" warnings on Windows
  2. #define _USE_MATH_DEFINES // For M_PI on MSVC
  3. #include "ggml-impl.h"
  4. #include "ggml-quants.h"
  5. #if defined(_MSC_VER) || defined(__MINGW32__)
  6. #include <malloc.h> // using malloc.h with MSC/MINGW
  7. #elif !defined(__FreeBSD__) && !defined(__NetBSD__) && !defined(__OpenBSD__)
  8. #include <alloca.h>
  9. #endif
  10. #include <assert.h>
  11. #include <errno.h>
  12. #include <time.h>
  13. #include <math.h>
  14. #include <stdlib.h>
  15. #include <string.h>
  16. #include <stdint.h>
  17. #include <inttypes.h>
  18. #include <stdio.h>
  19. #include <float.h>
  20. #include <limits.h>
  21. #include <stdarg.h>
  22. #include <signal.h>
  23. #ifdef GGML_USE_METAL
  24. #include <unistd.h>
  25. #endif
  26. #if defined(_MSC_VER)
  27. // disable "possible loss of data" to avoid hundreds of casts
  28. // we should just be careful :)
  29. #pragma warning(disable: 4244 4267)
  30. // disable POSIX deprecation warnings
  31. // these functions are never going away, anyway
  32. #pragma warning(disable: 4996)
  33. #endif
  34. #if defined(_WIN32)
  35. #include <windows.h>
  36. typedef volatile LONG atomic_int;
  37. typedef atomic_int atomic_bool;
  38. static void atomic_store(atomic_int * ptr, LONG val) {
  39. InterlockedExchange(ptr, val);
  40. }
  41. static LONG atomic_load(atomic_int * ptr) {
  42. return InterlockedCompareExchange(ptr, 0, 0);
  43. }
  44. static LONG atomic_fetch_add(atomic_int * ptr, LONG inc) {
  45. return InterlockedExchangeAdd(ptr, inc);
  46. }
  47. static LONG atomic_fetch_sub(atomic_int * ptr, LONG dec) {
  48. return atomic_fetch_add(ptr, -(dec));
  49. }
  50. typedef HANDLE pthread_t;
  51. typedef DWORD thread_ret_t;
  52. static int pthread_create(pthread_t * out, void * unused, thread_ret_t(*func)(void *), void * arg) {
  53. (void) unused;
  54. HANDLE handle = CreateThread(NULL, 0, (LPTHREAD_START_ROUTINE) func, arg, 0, NULL);
  55. if (handle == NULL)
  56. {
  57. return EAGAIN;
  58. }
  59. *out = handle;
  60. return 0;
  61. }
  62. static int pthread_join(pthread_t thread, void * unused) {
  63. (void) unused;
  64. int ret = (int) WaitForSingleObject(thread, INFINITE);
  65. CloseHandle(thread);
  66. return ret;
  67. }
  68. static int sched_yield (void) {
  69. Sleep (0);
  70. return 0;
  71. }
  72. #else
  73. #include <pthread.h>
  74. #include <stdatomic.h>
  75. typedef void * thread_ret_t;
  76. #include <sys/types.h>
  77. #include <sys/stat.h>
  78. #include <unistd.h>
  79. #endif
  80. #ifdef GGML_USE_CPU_HBM
  81. #include <hbwmalloc.h>
  82. #endif
  83. #if defined(__APPLE__)
  84. #include <TargetConditionals.h>
  85. #endif
  86. #if (defined(__linux__) || defined(__APPLE__) || defined(__FreeBSD__) || defined(__NetBSD__) || defined(__OpenBSD__)) && \
  87. (!defined(TARGET_OS_TV) && !defined(TARGET_OS_WATCH))
  88. #include <sys/wait.h>
  89. void ggml_print_backtrace(void) {
  90. /*
  91. #include <execinfo.h>
  92. #include <dlfcn.h>
  93. void * trace[100];
  94. int nptrs = backtrace(trace, sizeof(trace)/sizeof(trace[0]));
  95. backtrace_symbols_fd(trace, nptrs, STDERR_FILENO);
  96. */
  97. // backtrack_symbols does not show line numbers, use gdb instead
  98. char attach[32];
  99. snprintf(attach, sizeof(attach), "attach %d", getpid());
  100. int pid = fork();
  101. if (pid == 0) {
  102. execlp("gdb", "gdb", "--batch",
  103. "-ex", "set style enabled on",
  104. "-ex", attach,
  105. "-ex", "bt -frame-info source-and-location",
  106. "-ex", "detach",
  107. "-ex", "quit",
  108. (char *) NULL);
  109. } else {
  110. waitpid(pid, NULL, 0);
  111. }
  112. }
  113. #else
  114. void ggml_print_backtrace(void) {
  115. // platform not supported
  116. }
  117. #endif
  118. /*#define GGML_PERF*/
  119. #define GGML_DEBUG 0
  120. #define GGML_GELU_FP16
  121. #define GGML_GELU_QUICK_FP16
  122. #define GGML_SILU_FP16
  123. // #define GGML_CROSS_ENTROPY_EXP_FP16
  124. // #define GGML_FLASH_ATTN_EXP_FP16
  125. #define GGML_SOFT_MAX_UNROLL 4
  126. #define GGML_VEC_DOT_UNROLL 2
  127. #define GGML_VEC_MAD_UNROLL 32
  128. //
  129. // logging
  130. //
  131. #if (GGML_DEBUG >= 1)
  132. #define GGML_PRINT_DEBUG(...) printf(__VA_ARGS__)
  133. #else
  134. #define GGML_PRINT_DEBUG(...)
  135. #endif
  136. #if (GGML_DEBUG >= 5)
  137. #define GGML_PRINT_DEBUG_5(...) printf(__VA_ARGS__)
  138. #else
  139. #define GGML_PRINT_DEBUG_5(...)
  140. #endif
  141. #if (GGML_DEBUG >= 10)
  142. #define GGML_PRINT_DEBUG_10(...) printf(__VA_ARGS__)
  143. #else
  144. #define GGML_PRINT_DEBUG_10(...)
  145. #endif
  146. #define GGML_PRINT(...) printf(__VA_ARGS__)
  147. //
  148. // end of logging block
  149. //
  150. #ifdef GGML_USE_ACCELERATE
  151. // uncomment to use vDSP for soft max computation
  152. // note: not sure if it is actually faster
  153. //#define GGML_SOFT_MAX_ACCELERATE
  154. #endif
  155. #if defined(_MSC_VER) || defined(__MINGW32__)
  156. #define GGML_ALIGNED_MALLOC(size) _aligned_malloc(size, GGML_MEM_ALIGN)
  157. #define GGML_ALIGNED_FREE(ptr) _aligned_free(ptr)
  158. #else
  159. inline static void * ggml_aligned_malloc(size_t size) {
  160. if (size == 0) {
  161. GGML_PRINT("WARNING: Behavior may be unexpected when allocating 0 bytes for ggml_aligned_malloc!\n");
  162. return NULL;
  163. }
  164. void * aligned_memory = NULL;
  165. #ifdef GGML_USE_CPU_HBM
  166. int result = hbw_posix_memalign(&aligned_memory, 16, size);
  167. #elif GGML_USE_METAL
  168. int result = posix_memalign(&aligned_memory, sysconf(_SC_PAGESIZE), size);
  169. #else
  170. int result = posix_memalign(&aligned_memory, GGML_MEM_ALIGN, size);
  171. #endif
  172. if (result != 0) {
  173. // Handle allocation failure
  174. const char *error_desc = "unknown allocation error";
  175. switch (result) {
  176. case EINVAL:
  177. error_desc = "invalid alignment value";
  178. break;
  179. case ENOMEM:
  180. error_desc = "insufficient memory";
  181. break;
  182. }
  183. GGML_PRINT("%s: %s (attempted to allocate %6.2f MB)\n", __func__, error_desc, size/(1024.0*1024.0));
  184. return NULL;
  185. }
  186. return aligned_memory;
  187. }
  188. #define GGML_ALIGNED_MALLOC(size) ggml_aligned_malloc(size)
  189. #ifdef GGML_USE_CPU_HBM
  190. #define GGML_ALIGNED_FREE(ptr) if(NULL != ptr) hbw_free(ptr)
  191. #else
  192. #define GGML_ALIGNED_FREE(ptr) free(ptr)
  193. #endif
  194. #endif
  195. #define UNUSED GGML_UNUSED
  196. #define SWAP(x, y, T) do { T SWAP = x; x = y; y = SWAP; } while (0)
  197. #if defined(GGML_USE_ACCELERATE)
  198. #include <Accelerate/Accelerate.h>
  199. #if defined(GGML_USE_CLBLAST) // allow usage of CLBlast alongside Accelerate functions
  200. #include "ggml-opencl.h"
  201. #endif
  202. #elif defined(GGML_USE_OPENBLAS)
  203. #if defined(GGML_BLAS_USE_MKL)
  204. #include <mkl.h>
  205. #else
  206. #include <cblas.h>
  207. #endif
  208. #elif defined(GGML_USE_CUBLAS)
  209. #include "ggml-cuda.h"
  210. #elif defined(GGML_USE_CLBLAST)
  211. #include "ggml-opencl.h"
  212. #endif
  213. // floating point type used to accumulate sums
  214. typedef double ggml_float;
  215. #undef MIN
  216. #undef MAX
  217. #define MIN(a, b) ((a) < (b) ? (a) : (b))
  218. #define MAX(a, b) ((a) > (b) ? (a) : (b))
  219. //
  220. // global data
  221. //
  222. // precomputed gelu table for f16 (128 KB)
  223. static ggml_fp16_t ggml_table_gelu_f16[1 << 16];
  224. // precomputed quick gelu table for f16 (128 KB)
  225. static ggml_fp16_t ggml_table_gelu_quick_f16[1 << 16];
  226. // precomputed silu table for f16 (128 KB)
  227. static ggml_fp16_t ggml_table_silu_f16[1 << 16];
  228. // precomputed exp table for f16 (128 KB)
  229. static ggml_fp16_t ggml_table_exp_f16[1 << 16];
  230. // precomputed f32 table for f16 (256 KB) (ggml-impl.h)
  231. float ggml_table_f32_f16[1 << 16];
  232. // note: do not use these inside ggml.c
  233. // these are meant to be used via the ggml.h API
  234. float ggml_fp16_to_fp32(ggml_fp16_t x) {
  235. return (float) GGML_FP16_TO_FP32(x);
  236. }
  237. ggml_fp16_t ggml_fp32_to_fp16(float x) {
  238. return GGML_FP32_TO_FP16(x);
  239. }
  240. void ggml_fp16_to_fp32_row(const ggml_fp16_t * x, float * y, int n) {
  241. for (int i = 0; i < n; i++) {
  242. y[i] = GGML_FP16_TO_FP32(x[i]);
  243. }
  244. }
  245. void ggml_fp32_to_fp16_row(const float * x, ggml_fp16_t * y, int n) {
  246. int i = 0;
  247. #if defined(__F16C__)
  248. for (; i + 7 < n; i += 8) {
  249. __m256 x_vec = _mm256_loadu_ps(x + i);
  250. __m128i y_vec = _mm256_cvtps_ph(x_vec, _MM_FROUND_TO_NEAREST_INT);
  251. _mm_storeu_si128((__m128i *)(y + i), y_vec);
  252. }
  253. for(; i + 3 < n; i += 4) {
  254. __m128 x_vec = _mm_loadu_ps(x + i);
  255. __m128i y_vec = _mm_cvtps_ph(x_vec, _MM_FROUND_TO_NEAREST_INT);
  256. _mm_storel_epi64((__m128i *)(y + i), y_vec);
  257. }
  258. #endif
  259. for (; i < n; i++) {
  260. y[i] = GGML_FP32_TO_FP16(x[i]);
  261. }
  262. }
  263. //
  264. // timing
  265. //
  266. #if defined(_MSC_VER) || defined(__MINGW32__)
  267. static int64_t timer_freq, timer_start;
  268. void ggml_time_init(void) {
  269. LARGE_INTEGER t;
  270. QueryPerformanceFrequency(&t);
  271. timer_freq = t.QuadPart;
  272. // The multiplication by 1000 or 1000000 below can cause an overflow if timer_freq
  273. // and the uptime is high enough.
  274. // We subtract the program start time to reduce the likelihood of that happening.
  275. QueryPerformanceCounter(&t);
  276. timer_start = t.QuadPart;
  277. }
  278. int64_t ggml_time_ms(void) {
  279. LARGE_INTEGER t;
  280. QueryPerformanceCounter(&t);
  281. return ((t.QuadPart-timer_start) * 1000) / timer_freq;
  282. }
  283. int64_t ggml_time_us(void) {
  284. LARGE_INTEGER t;
  285. QueryPerformanceCounter(&t);
  286. return ((t.QuadPart-timer_start) * 1000000) / timer_freq;
  287. }
  288. #else
  289. void ggml_time_init(void) {}
  290. int64_t ggml_time_ms(void) {
  291. struct timespec ts;
  292. clock_gettime(CLOCK_MONOTONIC, &ts);
  293. return (int64_t)ts.tv_sec*1000 + (int64_t)ts.tv_nsec/1000000;
  294. }
  295. int64_t ggml_time_us(void) {
  296. struct timespec ts;
  297. clock_gettime(CLOCK_MONOTONIC, &ts);
  298. return (int64_t)ts.tv_sec*1000000 + (int64_t)ts.tv_nsec/1000;
  299. }
  300. #endif
  301. int64_t ggml_cycles(void) {
  302. return clock();
  303. }
  304. int64_t ggml_cycles_per_ms(void) {
  305. return CLOCKS_PER_SEC/1000;
  306. }
  307. #ifdef GGML_PERF
  308. #define ggml_perf_time_ms() ggml_time_ms()
  309. #define ggml_perf_time_us() ggml_time_us()
  310. #define ggml_perf_cycles() ggml_cycles()
  311. #define ggml_perf_cycles_per_ms() ggml_cycles_per_ms()
  312. #else
  313. #define ggml_perf_time_ms() 0
  314. #define ggml_perf_time_us() 0
  315. #define ggml_perf_cycles() 0
  316. #define ggml_perf_cycles_per_ms() 0
  317. #endif
  318. //
  319. // cache line
  320. //
  321. #if defined(__cpp_lib_hardware_interference_size)
  322. #define CACHE_LINE_SIZE hardware_destructive_interference_size
  323. #else
  324. #if defined(__POWER9_VECTOR__)
  325. #define CACHE_LINE_SIZE 128
  326. #else
  327. #define CACHE_LINE_SIZE 64
  328. #endif
  329. #endif
  330. static const size_t CACHE_LINE_SIZE_F32 = CACHE_LINE_SIZE/sizeof(float);
  331. static void ggml_vec_dot_f32(const int n, float * restrict s, const float * restrict x, const float * restrict y);
  332. static void ggml_vec_dot_f16(const int n, float * restrict s, ggml_fp16_t * restrict x, ggml_fp16_t * restrict y);
  333. static const ggml_type_traits_t type_traits[GGML_TYPE_COUNT] = {
  334. [GGML_TYPE_I8] = {
  335. .type_name = "i8",
  336. .blck_size = 1,
  337. .type_size = sizeof(int8_t),
  338. .is_quantized = false,
  339. },
  340. [GGML_TYPE_I16] = {
  341. .type_name = "i16",
  342. .blck_size = 1,
  343. .type_size = sizeof(int16_t),
  344. .is_quantized = false,
  345. },
  346. [GGML_TYPE_I32] = {
  347. .type_name = "i32",
  348. .blck_size = 1,
  349. .type_size = sizeof(int32_t),
  350. .is_quantized = false,
  351. },
  352. [GGML_TYPE_F32] = {
  353. .type_name = "f32",
  354. .blck_size = 1,
  355. .type_size = sizeof(float),
  356. .is_quantized = false,
  357. .vec_dot = (ggml_vec_dot_t) ggml_vec_dot_f32,
  358. .vec_dot_type = GGML_TYPE_F32,
  359. },
  360. [GGML_TYPE_F16] = {
  361. .type_name = "f16",
  362. .blck_size = 1,
  363. .type_size = sizeof(ggml_fp16_t),
  364. .is_quantized = false,
  365. .to_float = (ggml_to_float_t) ggml_fp16_to_fp32_row,
  366. .from_float = (ggml_from_float_t) ggml_fp32_to_fp16_row,
  367. .from_float_reference = (ggml_from_float_t) ggml_fp32_to_fp16_row,
  368. .vec_dot = (ggml_vec_dot_t) ggml_vec_dot_f16,
  369. .vec_dot_type = GGML_TYPE_F16,
  370. },
  371. [GGML_TYPE_Q4_0] = {
  372. .type_name = "q4_0",
  373. .blck_size = QK4_0,
  374. .type_size = sizeof(block_q4_0),
  375. .is_quantized = true,
  376. .to_float = (ggml_to_float_t) dequantize_row_q4_0,
  377. .from_float = quantize_row_q4_0,
  378. .from_float_reference = (ggml_from_float_t) quantize_row_q4_0_reference,
  379. .vec_dot = ggml_vec_dot_q4_0_q8_0,
  380. .vec_dot_type = GGML_TYPE_Q8_0,
  381. },
  382. [GGML_TYPE_Q4_1] = {
  383. .type_name = "q4_1",
  384. .blck_size = QK4_1,
  385. .type_size = sizeof(block_q4_1),
  386. .is_quantized = true,
  387. .to_float = (ggml_to_float_t) dequantize_row_q4_1,
  388. .from_float = quantize_row_q4_1,
  389. .from_float_reference = (ggml_from_float_t) quantize_row_q4_1_reference,
  390. .vec_dot = ggml_vec_dot_q4_1_q8_1,
  391. .vec_dot_type = GGML_TYPE_Q8_1,
  392. },
  393. [4] = { // GGML_TYPE_Q4_2
  394. .type_name = "DEPRECATED",
  395. .blck_size = 0,
  396. .type_size = 0,
  397. .is_quantized = false,
  398. .to_float = NULL,
  399. .from_float = NULL,
  400. .from_float_reference = NULL,
  401. .vec_dot = NULL,
  402. .vec_dot_type = GGML_TYPE_COUNT,
  403. },
  404. [5] = { // GGML_TYPE_Q4_3
  405. .type_name = "DEPRECATED",
  406. .blck_size = 0,
  407. .type_size = 0,
  408. .is_quantized = false,
  409. .to_float = NULL,
  410. .from_float = NULL,
  411. .from_float_reference = NULL,
  412. .vec_dot = NULL,
  413. .vec_dot_type = GGML_TYPE_COUNT,
  414. },
  415. [GGML_TYPE_Q5_0] = {
  416. .type_name = "q5_0",
  417. .blck_size = QK5_0,
  418. .type_size = sizeof(block_q5_0),
  419. .is_quantized = true,
  420. .to_float = (ggml_to_float_t) dequantize_row_q5_0,
  421. .from_float = quantize_row_q5_0,
  422. .from_float_reference = (ggml_from_float_t) quantize_row_q5_0_reference,
  423. .vec_dot = ggml_vec_dot_q5_0_q8_0,
  424. .vec_dot_type = GGML_TYPE_Q8_0,
  425. },
  426. [GGML_TYPE_Q5_1] = {
  427. .type_name = "q5_1",
  428. .blck_size = QK5_1,
  429. .type_size = sizeof(block_q5_1),
  430. .is_quantized = true,
  431. .to_float = (ggml_to_float_t) dequantize_row_q5_1,
  432. .from_float = quantize_row_q5_1,
  433. .from_float_reference = (ggml_from_float_t) quantize_row_q5_1_reference,
  434. .vec_dot = ggml_vec_dot_q5_1_q8_1,
  435. .vec_dot_type = GGML_TYPE_Q8_1,
  436. },
  437. [GGML_TYPE_Q8_0] = {
  438. .type_name = "q8_0",
  439. .blck_size = QK8_0,
  440. .type_size = sizeof(block_q8_0),
  441. .is_quantized = true,
  442. .to_float = (ggml_to_float_t) dequantize_row_q8_0,
  443. .from_float = quantize_row_q8_0,
  444. .from_float_reference = (ggml_from_float_t) quantize_row_q8_0_reference,
  445. .vec_dot = ggml_vec_dot_q8_0_q8_0,
  446. .vec_dot_type = GGML_TYPE_Q8_0,
  447. },
  448. [GGML_TYPE_Q8_1] = {
  449. .type_name = "q8_1",
  450. .blck_size = QK8_1,
  451. .type_size = sizeof(block_q8_1),
  452. .is_quantized = true,
  453. .from_float = quantize_row_q8_1,
  454. .from_float_reference = (ggml_from_float_t) quantize_row_q8_1_reference,
  455. .vec_dot_type = GGML_TYPE_Q8_1,
  456. },
  457. [GGML_TYPE_Q2_K] = {
  458. .type_name = "q2_K",
  459. .blck_size = QK_K,
  460. .type_size = sizeof(block_q2_K),
  461. .is_quantized = true,
  462. .to_float = (ggml_to_float_t) dequantize_row_q2_K,
  463. .from_float = quantize_row_q2_K,
  464. .from_float_reference = (ggml_from_float_t) quantize_row_q2_K_reference,
  465. .vec_dot = ggml_vec_dot_q2_K_q8_K,
  466. .vec_dot_type = GGML_TYPE_Q8_K,
  467. },
  468. [GGML_TYPE_Q3_K] = {
  469. .type_name = "q3_K",
  470. .blck_size = QK_K,
  471. .type_size = sizeof(block_q3_K),
  472. .is_quantized = true,
  473. .to_float = (ggml_to_float_t) dequantize_row_q3_K,
  474. .from_float = quantize_row_q3_K,
  475. .from_float_reference = (ggml_from_float_t) quantize_row_q3_K_reference,
  476. .vec_dot = ggml_vec_dot_q3_K_q8_K,
  477. .vec_dot_type = GGML_TYPE_Q8_K,
  478. },
  479. [GGML_TYPE_Q4_K] = {
  480. .type_name = "q4_K",
  481. .blck_size = QK_K,
  482. .type_size = sizeof(block_q4_K),
  483. .is_quantized = true,
  484. .to_float = (ggml_to_float_t) dequantize_row_q4_K,
  485. .from_float = quantize_row_q4_K,
  486. .from_float_reference = (ggml_from_float_t) quantize_row_q4_K_reference,
  487. .vec_dot = ggml_vec_dot_q4_K_q8_K,
  488. .vec_dot_type = GGML_TYPE_Q8_K,
  489. },
  490. [GGML_TYPE_Q5_K] = {
  491. .type_name = "q5_K",
  492. .blck_size = QK_K,
  493. .type_size = sizeof(block_q5_K),
  494. .is_quantized = true,
  495. .to_float = (ggml_to_float_t) dequantize_row_q5_K,
  496. .from_float = quantize_row_q5_K,
  497. .from_float_reference = (ggml_from_float_t) quantize_row_q5_K_reference,
  498. .vec_dot = ggml_vec_dot_q5_K_q8_K,
  499. .vec_dot_type = GGML_TYPE_Q8_K,
  500. },
  501. [GGML_TYPE_Q6_K] = {
  502. .type_name = "q6_K",
  503. .blck_size = QK_K,
  504. .type_size = sizeof(block_q6_K),
  505. .is_quantized = true,
  506. .to_float = (ggml_to_float_t) dequantize_row_q6_K,
  507. .from_float = quantize_row_q6_K,
  508. .from_float_reference = (ggml_from_float_t) quantize_row_q6_K_reference,
  509. .vec_dot = ggml_vec_dot_q6_K_q8_K,
  510. .vec_dot_type = GGML_TYPE_Q8_K,
  511. },
  512. [GGML_TYPE_IQ2_XXS] = {
  513. .type_name = "iq2_xxs",
  514. .blck_size = QK_K,
  515. .type_size = sizeof(block_iq2_xxs),
  516. .is_quantized = true,
  517. .to_float = (ggml_to_float_t) dequantize_row_iq2_xxs,
  518. .from_float = NULL,
  519. .from_float_reference = NULL,
  520. .vec_dot = ggml_vec_dot_iq2_xxs_q8_K,
  521. .vec_dot_type = GGML_TYPE_Q8_K,
  522. },
  523. [GGML_TYPE_IQ2_XS] = {
  524. .type_name = "iq2_xs",
  525. .blck_size = QK_K,
  526. .type_size = sizeof(block_iq2_xs),
  527. .is_quantized = true,
  528. .to_float = (ggml_to_float_t) dequantize_row_iq2_xs,
  529. .from_float = NULL,
  530. .from_float_reference = NULL,
  531. .vec_dot = ggml_vec_dot_iq2_xs_q8_K,
  532. .vec_dot_type = GGML_TYPE_Q8_K,
  533. },
  534. [GGML_TYPE_Q8_K] = {
  535. .type_name = "q8_K",
  536. .blck_size = QK_K,
  537. .type_size = sizeof(block_q8_K),
  538. .is_quantized = true,
  539. .from_float = quantize_row_q8_K,
  540. }
  541. };
  542. // For internal test use
  543. ggml_type_traits_t ggml_internal_get_type_traits(enum ggml_type type) {
  544. GGML_ASSERT(type < GGML_TYPE_COUNT);
  545. return type_traits[type];
  546. }
  547. //
  548. // simd mappings
  549. //
  550. #if defined(__ARM_NEON)
  551. #if !defined(__aarch64__)
  552. // 64-bit compatibility
  553. inline static float vaddvq_f32(float32x4_t v) {
  554. return vgetq_lane_f32(v, 0) + vgetq_lane_f32(v, 1) + vgetq_lane_f32(v, 2) + vgetq_lane_f32(v, 3);
  555. }
  556. #endif
  557. #endif
  558. // we define a common set of C macros which map to specific intrinsics based on the current architecture
  559. // we then implement the fundamental computation operations below using only these macros
  560. // adding support for new architectures requires to define the corresponding SIMD macros
  561. //
  562. // GGML_F32_STEP / GGML_F16_STEP
  563. // number of elements to process in a single step
  564. //
  565. // GGML_F32_EPR / GGML_F16_EPR
  566. // number of elements to fit in a single register
  567. //
  568. #if defined(__ARM_NEON) && defined(__ARM_FEATURE_FMA)
  569. #define GGML_SIMD
  570. // F32 NEON
  571. #define GGML_F32_STEP 16
  572. #define GGML_F32_EPR 4
  573. #define GGML_F32x4 float32x4_t
  574. #define GGML_F32x4_ZERO vdupq_n_f32(0.0f)
  575. #define GGML_F32x4_SET1(x) vdupq_n_f32(x)
  576. #define GGML_F32x4_LOAD vld1q_f32
  577. #define GGML_F32x4_STORE vst1q_f32
  578. #define GGML_F32x4_FMA(a, b, c) vfmaq_f32(a, b, c)
  579. #define GGML_F32x4_ADD vaddq_f32
  580. #define GGML_F32x4_MUL vmulq_f32
  581. #define GGML_F32x4_REDUCE_ONE(x) vaddvq_f32(x)
  582. #define GGML_F32x4_REDUCE(res, x) \
  583. { \
  584. int offset = GGML_F32_ARR >> 1; \
  585. for (int i = 0; i < offset; ++i) { \
  586. x[i] = vaddq_f32(x[i], x[offset+i]); \
  587. } \
  588. offset >>= 1; \
  589. for (int i = 0; i < offset; ++i) { \
  590. x[i] = vaddq_f32(x[i], x[offset+i]); \
  591. } \
  592. offset >>= 1; \
  593. for (int i = 0; i < offset; ++i) { \
  594. x[i] = vaddq_f32(x[i], x[offset+i]); \
  595. } \
  596. res = GGML_F32x4_REDUCE_ONE(x[0]); \
  597. }
  598. #define GGML_F32_VEC GGML_F32x4
  599. #define GGML_F32_VEC_ZERO GGML_F32x4_ZERO
  600. #define GGML_F32_VEC_SET1 GGML_F32x4_SET1
  601. #define GGML_F32_VEC_LOAD GGML_F32x4_LOAD
  602. #define GGML_F32_VEC_STORE GGML_F32x4_STORE
  603. #define GGML_F32_VEC_FMA GGML_F32x4_FMA
  604. #define GGML_F32_VEC_ADD GGML_F32x4_ADD
  605. #define GGML_F32_VEC_MUL GGML_F32x4_MUL
  606. #define GGML_F32_VEC_REDUCE GGML_F32x4_REDUCE
  607. // F16 NEON
  608. #if defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC)
  609. #define GGML_F16_STEP 32
  610. #define GGML_F16_EPR 8
  611. #define GGML_F16x8 float16x8_t
  612. #define GGML_F16x8_ZERO vdupq_n_f16(0.0f)
  613. #define GGML_F16x8_SET1(x) vdupq_n_f16(x)
  614. #define GGML_F16x8_LOAD vld1q_f16
  615. #define GGML_F16x8_STORE vst1q_f16
  616. #define GGML_F16x8_FMA(a, b, c) vfmaq_f16(a, b, c)
  617. #define GGML_F16x8_ADD vaddq_f16
  618. #define GGML_F16x8_MUL vmulq_f16
  619. #define GGML_F16x8_REDUCE(res, x) \
  620. do { \
  621. int offset = GGML_F16_ARR >> 1; \
  622. for (int i = 0; i < offset; ++i) { \
  623. x[i] = vaddq_f16(x[i], x[offset+i]); \
  624. } \
  625. offset >>= 1; \
  626. for (int i = 0; i < offset; ++i) { \
  627. x[i] = vaddq_f16(x[i], x[offset+i]); \
  628. } \
  629. offset >>= 1; \
  630. for (int i = 0; i < offset; ++i) { \
  631. x[i] = vaddq_f16(x[i], x[offset+i]); \
  632. } \
  633. const float32x4_t t0 = vcvt_f32_f16(vget_low_f16 (x[0])); \
  634. const float32x4_t t1 = vcvt_f32_f16(vget_high_f16(x[0])); \
  635. res = (ggml_float) vaddvq_f32(vaddq_f32(t0, t1)); \
  636. } while (0)
  637. #define GGML_F16_VEC GGML_F16x8
  638. #define GGML_F16_VEC_ZERO GGML_F16x8_ZERO
  639. #define GGML_F16_VEC_SET1 GGML_F16x8_SET1
  640. #define GGML_F16_VEC_LOAD(p, i) GGML_F16x8_LOAD(p)
  641. #define GGML_F16_VEC_STORE(p, r, i) GGML_F16x8_STORE(p, r[i])
  642. #define GGML_F16_VEC_FMA GGML_F16x8_FMA
  643. #define GGML_F16_VEC_ADD GGML_F16x8_ADD
  644. #define GGML_F16_VEC_MUL GGML_F16x8_MUL
  645. #define GGML_F16_VEC_REDUCE GGML_F16x8_REDUCE
  646. #else
  647. // if FP16 vector arithmetic is not supported, we use FP32 instead
  648. // and take advantage of the vcvt_ functions to convert to/from FP16
  649. #define GGML_F16_STEP 16
  650. #define GGML_F16_EPR 4
  651. #define GGML_F32Cx4 float32x4_t
  652. #define GGML_F32Cx4_ZERO vdupq_n_f32(0.0f)
  653. #define GGML_F32Cx4_SET1(x) vdupq_n_f32(x)
  654. #define GGML_F32Cx4_LOAD(x) vcvt_f32_f16(vld1_f16(x))
  655. #define GGML_F32Cx4_STORE(x, y) vst1_f16(x, vcvt_f16_f32(y))
  656. #define GGML_F32Cx4_FMA(a, b, c) vfmaq_f32(a, b, c)
  657. #define GGML_F32Cx4_ADD vaddq_f32
  658. #define GGML_F32Cx4_MUL vmulq_f32
  659. #define GGML_F32Cx4_REDUCE GGML_F32x4_REDUCE
  660. #define GGML_F16_VEC GGML_F32Cx4
  661. #define GGML_F16_VEC_ZERO GGML_F32Cx4_ZERO
  662. #define GGML_F16_VEC_SET1 GGML_F32Cx4_SET1
  663. #define GGML_F16_VEC_LOAD(p, i) GGML_F32Cx4_LOAD(p)
  664. #define GGML_F16_VEC_STORE(p, r, i) GGML_F32Cx4_STORE(p, r[i])
  665. #define GGML_F16_VEC_FMA GGML_F32Cx4_FMA
  666. #define GGML_F16_VEC_ADD GGML_F32Cx4_ADD
  667. #define GGML_F16_VEC_MUL GGML_F32Cx4_MUL
  668. #define GGML_F16_VEC_REDUCE GGML_F32Cx4_REDUCE
  669. #endif
  670. #elif defined(__AVX__)
  671. #define GGML_SIMD
  672. // F32 AVX
  673. #define GGML_F32_STEP 32
  674. #define GGML_F32_EPR 8
  675. #define GGML_F32x8 __m256
  676. #define GGML_F32x8_ZERO _mm256_setzero_ps()
  677. #define GGML_F32x8_SET1(x) _mm256_set1_ps(x)
  678. #define GGML_F32x8_LOAD _mm256_loadu_ps
  679. #define GGML_F32x8_STORE _mm256_storeu_ps
  680. #if defined(__FMA__)
  681. #define GGML_F32x8_FMA(a, b, c) _mm256_fmadd_ps(b, c, a)
  682. #else
  683. #define GGML_F32x8_FMA(a, b, c) _mm256_add_ps(_mm256_mul_ps(b, c), a)
  684. #endif
  685. #define GGML_F32x8_ADD _mm256_add_ps
  686. #define GGML_F32x8_MUL _mm256_mul_ps
  687. #define GGML_F32x8_REDUCE(res, x) \
  688. do { \
  689. int offset = GGML_F32_ARR >> 1; \
  690. for (int i = 0; i < offset; ++i) { \
  691. x[i] = _mm256_add_ps(x[i], x[offset+i]); \
  692. } \
  693. offset >>= 1; \
  694. for (int i = 0; i < offset; ++i) { \
  695. x[i] = _mm256_add_ps(x[i], x[offset+i]); \
  696. } \
  697. offset >>= 1; \
  698. for (int i = 0; i < offset; ++i) { \
  699. x[i] = _mm256_add_ps(x[i], x[offset+i]); \
  700. } \
  701. const __m128 t0 = _mm_add_ps(_mm256_castps256_ps128(x[0]), \
  702. _mm256_extractf128_ps(x[0], 1)); \
  703. const __m128 t1 = _mm_hadd_ps(t0, t0); \
  704. res = _mm_cvtss_f32(_mm_hadd_ps(t1, t1)); \
  705. } while (0)
  706. // TODO: is this optimal ?
  707. #define GGML_F32_VEC GGML_F32x8
  708. #define GGML_F32_VEC_ZERO GGML_F32x8_ZERO
  709. #define GGML_F32_VEC_SET1 GGML_F32x8_SET1
  710. #define GGML_F32_VEC_LOAD GGML_F32x8_LOAD
  711. #define GGML_F32_VEC_STORE GGML_F32x8_STORE
  712. #define GGML_F32_VEC_FMA GGML_F32x8_FMA
  713. #define GGML_F32_VEC_ADD GGML_F32x8_ADD
  714. #define GGML_F32_VEC_MUL GGML_F32x8_MUL
  715. #define GGML_F32_VEC_REDUCE GGML_F32x8_REDUCE
  716. // F16 AVX
  717. #define GGML_F16_STEP 32
  718. #define GGML_F16_EPR 8
  719. // F16 arithmetic is not supported by AVX, so we use F32 instead
  720. #define GGML_F32Cx8 __m256
  721. #define GGML_F32Cx8_ZERO _mm256_setzero_ps()
  722. #define GGML_F32Cx8_SET1(x) _mm256_set1_ps(x)
  723. #if defined(__F16C__)
  724. // the _mm256_cvt intrinsics require F16C
  725. #define GGML_F32Cx8_LOAD(x) _mm256_cvtph_ps(_mm_loadu_si128((__m128i *)(x)))
  726. #define GGML_F32Cx8_STORE(x, y) _mm_storeu_si128((__m128i *)(x), _mm256_cvtps_ph(y, 0))
  727. #else
  728. static inline __m256 __avx_f32cx8_load(ggml_fp16_t *x) {
  729. float tmp[8];
  730. for (int i = 0; i < 8; i++) {
  731. tmp[i] = GGML_FP16_TO_FP32(x[i]);
  732. }
  733. return _mm256_loadu_ps(tmp);
  734. }
  735. static inline void __avx_f32cx8_store(ggml_fp16_t *x, __m256 y) {
  736. float arr[8];
  737. _mm256_storeu_ps(arr, y);
  738. for (int i = 0; i < 8; i++)
  739. x[i] = GGML_FP32_TO_FP16(arr[i]);
  740. }
  741. #define GGML_F32Cx8_LOAD(x) __avx_f32cx8_load(x)
  742. #define GGML_F32Cx8_STORE(x, y) __avx_f32cx8_store(x, y)
  743. #endif
  744. #define GGML_F32Cx8_FMA GGML_F32x8_FMA
  745. #define GGML_F32Cx8_ADD _mm256_add_ps
  746. #define GGML_F32Cx8_MUL _mm256_mul_ps
  747. #define GGML_F32Cx8_REDUCE GGML_F32x8_REDUCE
  748. #define GGML_F16_VEC GGML_F32Cx8
  749. #define GGML_F16_VEC_ZERO GGML_F32Cx8_ZERO
  750. #define GGML_F16_VEC_SET1 GGML_F32Cx8_SET1
  751. #define GGML_F16_VEC_LOAD(p, i) GGML_F32Cx8_LOAD(p)
  752. #define GGML_F16_VEC_STORE(p, r, i) GGML_F32Cx8_STORE(p, r[i])
  753. #define GGML_F16_VEC_FMA GGML_F32Cx8_FMA
  754. #define GGML_F16_VEC_ADD GGML_F32Cx8_ADD
  755. #define GGML_F16_VEC_MUL GGML_F32Cx8_MUL
  756. #define GGML_F16_VEC_REDUCE GGML_F32Cx8_REDUCE
  757. #elif defined(__POWER9_VECTOR__)
  758. #define GGML_SIMD
  759. // F32 POWER9
  760. #define GGML_F32_STEP 32
  761. #define GGML_F32_EPR 4
  762. #define GGML_F32x4 vector float
  763. #define GGML_F32x4_ZERO 0.0f
  764. #define GGML_F32x4_SET1 vec_splats
  765. #define GGML_F32x4_LOAD(p) vec_xl(0, p)
  766. #define GGML_F32x4_STORE(p, r) vec_xst(r, 0, p)
  767. #define GGML_F32x4_FMA(a, b, c) vec_madd(b, c, a)
  768. #define GGML_F32x4_ADD vec_add
  769. #define GGML_F32x4_MUL vec_mul
  770. #define GGML_F32x4_REDUCE(res, x) \
  771. { \
  772. int offset = GGML_F32_ARR >> 1; \
  773. for (int i = 0; i < offset; ++i) { \
  774. x[i] = vec_add(x[i], x[offset+i]); \
  775. } \
  776. offset >>= 1; \
  777. for (int i = 0; i < offset; ++i) { \
  778. x[i] = vec_add(x[i], x[offset+i]); \
  779. } \
  780. offset >>= 1; \
  781. for (int i = 0; i < offset; ++i) { \
  782. x[i] = vec_add(x[i], x[offset+i]); \
  783. } \
  784. res = vec_extract(x[0], 0) + \
  785. vec_extract(x[0], 1) + \
  786. vec_extract(x[0], 2) + \
  787. vec_extract(x[0], 3); \
  788. }
  789. #define GGML_F32_VEC GGML_F32x4
  790. #define GGML_F32_VEC_ZERO GGML_F32x4_ZERO
  791. #define GGML_F32_VEC_SET1 GGML_F32x4_SET1
  792. #define GGML_F32_VEC_LOAD GGML_F32x4_LOAD
  793. #define GGML_F32_VEC_STORE GGML_F32x4_STORE
  794. #define GGML_F32_VEC_FMA GGML_F32x4_FMA
  795. #define GGML_F32_VEC_ADD GGML_F32x4_ADD
  796. #define GGML_F32_VEC_MUL GGML_F32x4_MUL
  797. #define GGML_F32_VEC_REDUCE GGML_F32x4_REDUCE
  798. // F16 POWER9
  799. #define GGML_F16_STEP GGML_F32_STEP
  800. #define GGML_F16_EPR GGML_F32_EPR
  801. #define GGML_F16_VEC GGML_F32x4
  802. #define GGML_F16_VEC_ZERO GGML_F32x4_ZERO
  803. #define GGML_F16_VEC_SET1 GGML_F32x4_SET1
  804. #define GGML_F16_VEC_FMA GGML_F32x4_FMA
  805. #define GGML_F16_VEC_REDUCE GGML_F32x4_REDUCE
  806. // Use vec_xl, not vec_ld, in case the load address is not aligned.
  807. #define GGML_F16_VEC_LOAD(p, i) (i & 0x1) ? \
  808. vec_extract_fp32_from_shorth(vec_xl(0, p - GGML_F16_EPR)) : \
  809. vec_extract_fp32_from_shortl(vec_xl(0, p))
  810. #define GGML_ENDIAN_BYTE(i) ((unsigned char *)&(uint16_t){1})[i]
  811. #define GGML_F16_VEC_STORE(p, r, i) \
  812. if (i & 0x1) \
  813. vec_xst(vec_pack_to_short_fp32(r[i - GGML_ENDIAN_BYTE(1)], \
  814. r[i - GGML_ENDIAN_BYTE(0)]), \
  815. 0, p - GGML_F16_EPR)
  816. #elif defined(__wasm_simd128__)
  817. #define GGML_SIMD
  818. // F32 WASM
  819. #define GGML_F32_STEP 16
  820. #define GGML_F32_EPR 4
  821. #define GGML_F32x4 v128_t
  822. #define GGML_F32x4_ZERO wasm_f32x4_splat(0.0f)
  823. #define GGML_F32x4_SET1(x) wasm_f32x4_splat(x)
  824. #define GGML_F32x4_LOAD wasm_v128_load
  825. #define GGML_F32x4_STORE wasm_v128_store
  826. #define GGML_F32x4_FMA(a, b, c) wasm_f32x4_add(wasm_f32x4_mul(b, c), a)
  827. #define GGML_F32x4_ADD wasm_f32x4_add
  828. #define GGML_F32x4_MUL wasm_f32x4_mul
  829. #define GGML_F32x4_REDUCE(res, x) \
  830. { \
  831. int offset = GGML_F32_ARR >> 1; \
  832. for (int i = 0; i < offset; ++i) { \
  833. x[i] = wasm_f32x4_add(x[i], x[offset+i]); \
  834. } \
  835. offset >>= 1; \
  836. for (int i = 0; i < offset; ++i) { \
  837. x[i] = wasm_f32x4_add(x[i], x[offset+i]); \
  838. } \
  839. offset >>= 1; \
  840. for (int i = 0; i < offset; ++i) { \
  841. x[i] = wasm_f32x4_add(x[i], x[offset+i]); \
  842. } \
  843. res = wasm_f32x4_extract_lane(x[0], 0) + \
  844. wasm_f32x4_extract_lane(x[0], 1) + \
  845. wasm_f32x4_extract_lane(x[0], 2) + \
  846. wasm_f32x4_extract_lane(x[0], 3); \
  847. }
  848. #define GGML_F32_VEC GGML_F32x4
  849. #define GGML_F32_VEC_ZERO GGML_F32x4_ZERO
  850. #define GGML_F32_VEC_SET1 GGML_F32x4_SET1
  851. #define GGML_F32_VEC_LOAD GGML_F32x4_LOAD
  852. #define GGML_F32_VEC_STORE GGML_F32x4_STORE
  853. #define GGML_F32_VEC_FMA GGML_F32x4_FMA
  854. #define GGML_F32_VEC_ADD GGML_F32x4_ADD
  855. #define GGML_F32_VEC_MUL GGML_F32x4_MUL
  856. #define GGML_F32_VEC_REDUCE GGML_F32x4_REDUCE
  857. // F16 WASM
  858. #define GGML_F16_STEP 16
  859. #define GGML_F16_EPR 4
  860. inline static v128_t __wasm_f16x4_load(const ggml_fp16_t * p) {
  861. float tmp[4];
  862. tmp[0] = GGML_FP16_TO_FP32(p[0]);
  863. tmp[1] = GGML_FP16_TO_FP32(p[1]);
  864. tmp[2] = GGML_FP16_TO_FP32(p[2]);
  865. tmp[3] = GGML_FP16_TO_FP32(p[3]);
  866. return wasm_v128_load(tmp);
  867. }
  868. inline static void __wasm_f16x4_store(ggml_fp16_t * p, v128_t x) {
  869. float tmp[4];
  870. wasm_v128_store(tmp, x);
  871. p[0] = GGML_FP32_TO_FP16(tmp[0]);
  872. p[1] = GGML_FP32_TO_FP16(tmp[1]);
  873. p[2] = GGML_FP32_TO_FP16(tmp[2]);
  874. p[3] = GGML_FP32_TO_FP16(tmp[3]);
  875. }
  876. #define GGML_F16x4 v128_t
  877. #define GGML_F16x4_ZERO wasm_f32x4_splat(0.0f)
  878. #define GGML_F16x4_SET1(x) wasm_f32x4_splat(x)
  879. #define GGML_F16x4_LOAD(x) __wasm_f16x4_load(x)
  880. #define GGML_F16x4_STORE(x, y) __wasm_f16x4_store(x, y)
  881. #define GGML_F16x4_FMA GGML_F32x4_FMA
  882. #define GGML_F16x4_ADD wasm_f32x4_add
  883. #define GGML_F16x4_MUL wasm_f32x4_mul
  884. #define GGML_F16x4_REDUCE(res, x) \
  885. { \
  886. int offset = GGML_F16_ARR >> 1; \
  887. for (int i = 0; i < offset; ++i) { \
  888. x[i] = wasm_f32x4_add(x[i], x[offset+i]); \
  889. } \
  890. offset >>= 1; \
  891. for (int i = 0; i < offset; ++i) { \
  892. x[i] = wasm_f32x4_add(x[i], x[offset+i]); \
  893. } \
  894. offset >>= 1; \
  895. for (int i = 0; i < offset; ++i) { \
  896. x[i] = wasm_f32x4_add(x[i], x[offset+i]); \
  897. } \
  898. res = wasm_f32x4_extract_lane(x[0], 0) + \
  899. wasm_f32x4_extract_lane(x[0], 1) + \
  900. wasm_f32x4_extract_lane(x[0], 2) + \
  901. wasm_f32x4_extract_lane(x[0], 3); \
  902. }
  903. #define GGML_F16_VEC GGML_F16x4
  904. #define GGML_F16_VEC_ZERO GGML_F16x4_ZERO
  905. #define GGML_F16_VEC_SET1 GGML_F16x4_SET1
  906. #define GGML_F16_VEC_LOAD(p, i) GGML_F16x4_LOAD(p)
  907. #define GGML_F16_VEC_STORE(p, r, i) GGML_F16x4_STORE(p, r[i])
  908. #define GGML_F16_VEC_FMA GGML_F16x4_FMA
  909. #define GGML_F16_VEC_ADD GGML_F16x4_ADD
  910. #define GGML_F16_VEC_MUL GGML_F16x4_MUL
  911. #define GGML_F16_VEC_REDUCE GGML_F16x4_REDUCE
  912. #elif defined(__SSE3__)
  913. #define GGML_SIMD
  914. // F32 SSE
  915. #define GGML_F32_STEP 32
  916. #define GGML_F32_EPR 4
  917. #define GGML_F32x4 __m128
  918. #define GGML_F32x4_ZERO _mm_setzero_ps()
  919. #define GGML_F32x4_SET1(x) _mm_set1_ps(x)
  920. #define GGML_F32x4_LOAD _mm_loadu_ps
  921. #define GGML_F32x4_STORE _mm_storeu_ps
  922. #if defined(__FMA__)
  923. // TODO: Does this work?
  924. #define GGML_F32x4_FMA(a, b, c) _mm_fmadd_ps(b, c, a)
  925. #else
  926. #define GGML_F32x4_FMA(a, b, c) _mm_add_ps(_mm_mul_ps(b, c), a)
  927. #endif
  928. #define GGML_F32x4_ADD _mm_add_ps
  929. #define GGML_F32x4_MUL _mm_mul_ps
  930. #define GGML_F32x4_REDUCE(res, x) \
  931. { \
  932. int offset = GGML_F32_ARR >> 1; \
  933. for (int i = 0; i < offset; ++i) { \
  934. x[i] = _mm_add_ps(x[i], x[offset+i]); \
  935. } \
  936. offset >>= 1; \
  937. for (int i = 0; i < offset; ++i) { \
  938. x[i] = _mm_add_ps(x[i], x[offset+i]); \
  939. } \
  940. offset >>= 1; \
  941. for (int i = 0; i < offset; ++i) { \
  942. x[i] = _mm_add_ps(x[i], x[offset+i]); \
  943. } \
  944. const __m128 t0 = _mm_hadd_ps(x[0], x[0]); \
  945. res = _mm_cvtss_f32(_mm_hadd_ps(t0, t0)); \
  946. }
  947. // TODO: is this optimal ?
  948. #define GGML_F32_VEC GGML_F32x4
  949. #define GGML_F32_VEC_ZERO GGML_F32x4_ZERO
  950. #define GGML_F32_VEC_SET1 GGML_F32x4_SET1
  951. #define GGML_F32_VEC_LOAD GGML_F32x4_LOAD
  952. #define GGML_F32_VEC_STORE GGML_F32x4_STORE
  953. #define GGML_F32_VEC_FMA GGML_F32x4_FMA
  954. #define GGML_F32_VEC_ADD GGML_F32x4_ADD
  955. #define GGML_F32_VEC_MUL GGML_F32x4_MUL
  956. #define GGML_F32_VEC_REDUCE GGML_F32x4_REDUCE
  957. // F16 SSE
  958. #define GGML_F16_STEP 32
  959. #define GGML_F16_EPR 4
  960. static inline __m128 __sse_f16x4_load(ggml_fp16_t *x) {
  961. float tmp[4];
  962. tmp[0] = GGML_FP16_TO_FP32(x[0]);
  963. tmp[1] = GGML_FP16_TO_FP32(x[1]);
  964. tmp[2] = GGML_FP16_TO_FP32(x[2]);
  965. tmp[3] = GGML_FP16_TO_FP32(x[3]);
  966. return _mm_loadu_ps(tmp);
  967. }
  968. static inline void __sse_f16x4_store(ggml_fp16_t *x, __m128 y) {
  969. float arr[4];
  970. _mm_storeu_ps(arr, y);
  971. x[0] = GGML_FP32_TO_FP16(arr[0]);
  972. x[1] = GGML_FP32_TO_FP16(arr[1]);
  973. x[2] = GGML_FP32_TO_FP16(arr[2]);
  974. x[3] = GGML_FP32_TO_FP16(arr[3]);
  975. }
  976. #define GGML_F32Cx4 __m128
  977. #define GGML_F32Cx4_ZERO _mm_setzero_ps()
  978. #define GGML_F32Cx4_SET1(x) _mm_set1_ps(x)
  979. #define GGML_F32Cx4_LOAD(x) __sse_f16x4_load(x)
  980. #define GGML_F32Cx4_STORE(x, y) __sse_f16x4_store(x, y)
  981. #define GGML_F32Cx4_FMA GGML_F32x4_FMA
  982. #define GGML_F32Cx4_ADD _mm_add_ps
  983. #define GGML_F32Cx4_MUL _mm_mul_ps
  984. #define GGML_F32Cx4_REDUCE GGML_F32x4_REDUCE
  985. #define GGML_F16_VEC GGML_F32Cx4
  986. #define GGML_F16_VEC_ZERO GGML_F32Cx4_ZERO
  987. #define GGML_F16_VEC_SET1 GGML_F32Cx4_SET1
  988. #define GGML_F16_VEC_LOAD(p, i) GGML_F32Cx4_LOAD(p)
  989. #define GGML_F16_VEC_STORE(p, r, i) GGML_F32Cx4_STORE(p, r[i])
  990. #define GGML_F16_VEC_FMA GGML_F32Cx4_FMA
  991. #define GGML_F16_VEC_ADD GGML_F32Cx4_ADD
  992. #define GGML_F16_VEC_MUL GGML_F32Cx4_MUL
  993. #define GGML_F16_VEC_REDUCE GGML_F32Cx4_REDUCE
  994. #endif
  995. // GGML_F32_ARR / GGML_F16_ARR
  996. // number of registers to use per step
  997. #ifdef GGML_SIMD
  998. #define GGML_F32_ARR (GGML_F32_STEP/GGML_F32_EPR)
  999. #define GGML_F16_ARR (GGML_F16_STEP/GGML_F16_EPR)
  1000. #endif
  1001. //
  1002. // fundamental operations
  1003. //
  1004. inline static void ggml_vec_set_i8(const int n, int8_t * x, const int8_t v) { for (int i = 0; i < n; ++i) x[i] = v; }
  1005. inline static void ggml_vec_set_i16(const int n, int16_t * x, const int16_t v) { for (int i = 0; i < n; ++i) x[i] = v; }
  1006. inline static void ggml_vec_set_i32(const int n, int32_t * x, const int32_t v) { for (int i = 0; i < n; ++i) x[i] = v; }
  1007. inline static void ggml_vec_set_f16(const int n, ggml_fp16_t * x, const int32_t v) { for (int i = 0; i < n; ++i) x[i] = v; }
  1008. inline static void ggml_vec_add_f32 (const int n, float * z, const float * x, const float * y) { for (int i = 0; i < n; ++i) z[i] = x[i] + y[i]; }
  1009. inline static void ggml_vec_add1_f32(const int n, float * z, const float * x, const float v) { for (int i = 0; i < n; ++i) z[i] = x[i] + v; }
  1010. inline static void ggml_vec_acc_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] += x[i]; }
  1011. inline static void ggml_vec_acc1_f32(const int n, float * y, const float v) { for (int i = 0; i < n; ++i) y[i] += v; }
  1012. inline static void ggml_vec_sub_f32 (const int n, float * z, const float * x, const float * y) { for (int i = 0; i < n; ++i) z[i] = x[i] - y[i]; }
  1013. inline static void ggml_vec_set_f32 (const int n, float * x, const float v) { for (int i = 0; i < n; ++i) x[i] = v; }
  1014. inline static void ggml_vec_cpy_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = x[i]; }
  1015. inline static void ggml_vec_neg_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = -x[i]; }
  1016. inline static void ggml_vec_mul_f32 (const int n, float * z, const float * x, const float * y) { for (int i = 0; i < n; ++i) z[i] = x[i]*y[i]; }
  1017. inline static void ggml_vec_div_f32 (const int n, float * z, const float * x, const float * y) { for (int i = 0; i < n; ++i) z[i] = x[i]/y[i]; }
  1018. static void ggml_vec_dot_f32(const int n, float * restrict s, const float * restrict x, const float * restrict y) {
  1019. #ifdef GGML_SIMD
  1020. float sumf = 0.0f;
  1021. const int np = (n & ~(GGML_F32_STEP - 1));
  1022. GGML_F32_VEC sum[GGML_F32_ARR] = { GGML_F32_VEC_ZERO };
  1023. GGML_F32_VEC ax[GGML_F32_ARR];
  1024. GGML_F32_VEC ay[GGML_F32_ARR];
  1025. for (int i = 0; i < np; i += GGML_F32_STEP) {
  1026. for (int j = 0; j < GGML_F32_ARR; j++) {
  1027. ax[j] = GGML_F32_VEC_LOAD(x + i + j*GGML_F32_EPR);
  1028. ay[j] = GGML_F32_VEC_LOAD(y + i + j*GGML_F32_EPR);
  1029. sum[j] = GGML_F32_VEC_FMA(sum[j], ax[j], ay[j]);
  1030. }
  1031. }
  1032. // reduce sum0..sum3 to sum0
  1033. GGML_F32_VEC_REDUCE(sumf, sum);
  1034. // leftovers
  1035. for (int i = np; i < n; ++i) {
  1036. sumf += x[i]*y[i];
  1037. }
  1038. #else
  1039. // scalar
  1040. ggml_float sumf = 0.0;
  1041. for (int i = 0; i < n; ++i) {
  1042. sumf += (ggml_float)(x[i]*y[i]);
  1043. }
  1044. #endif
  1045. *s = sumf;
  1046. }
  1047. static void ggml_vec_dot_f16(const int n, float * restrict s, ggml_fp16_t * restrict x, ggml_fp16_t * restrict y) {
  1048. ggml_float sumf = 0.0;
  1049. #if defined(GGML_SIMD)
  1050. const int np = (n & ~(GGML_F16_STEP - 1));
  1051. GGML_F16_VEC sum[GGML_F16_ARR] = { GGML_F16_VEC_ZERO };
  1052. GGML_F16_VEC ax[GGML_F16_ARR];
  1053. GGML_F16_VEC ay[GGML_F16_ARR];
  1054. for (int i = 0; i < np; i += GGML_F16_STEP) {
  1055. for (int j = 0; j < GGML_F16_ARR; j++) {
  1056. ax[j] = GGML_F16_VEC_LOAD(x + i + j*GGML_F16_EPR, j);
  1057. ay[j] = GGML_F16_VEC_LOAD(y + i + j*GGML_F16_EPR, j);
  1058. sum[j] = GGML_F16_VEC_FMA(sum[j], ax[j], ay[j]);
  1059. }
  1060. }
  1061. // reduce sum0..sum3 to sum0
  1062. GGML_F16_VEC_REDUCE(sumf, sum);
  1063. // leftovers
  1064. for (int i = np; i < n; ++i) {
  1065. sumf += (ggml_float)(GGML_FP16_TO_FP32(x[i])*GGML_FP16_TO_FP32(y[i]));
  1066. }
  1067. #else
  1068. for (int i = 0; i < n; ++i) {
  1069. sumf += (ggml_float)(GGML_FP16_TO_FP32(x[i])*GGML_FP16_TO_FP32(y[i]));
  1070. }
  1071. #endif
  1072. *s = sumf;
  1073. }
  1074. // compute GGML_VEC_DOT_UNROLL dot products at once
  1075. // xs - x row stride in bytes
  1076. inline static void ggml_vec_dot_f16_unroll(const int n, const int xs, float * restrict s, void * restrict xv, ggml_fp16_t * restrict y) {
  1077. ggml_float sumf[GGML_VEC_DOT_UNROLL] = { 0.0 };
  1078. ggml_fp16_t * restrict x[GGML_VEC_DOT_UNROLL];
  1079. for (int i = 0; i < GGML_VEC_DOT_UNROLL; ++i) {
  1080. x[i] = (ggml_fp16_t *) ((char *) xv + i*xs);
  1081. }
  1082. #if defined(GGML_SIMD)
  1083. const int np = (n & ~(GGML_F16_STEP - 1));
  1084. GGML_F16_VEC sum[GGML_VEC_DOT_UNROLL][GGML_F16_ARR] = { { GGML_F16_VEC_ZERO } };
  1085. GGML_F16_VEC ax[GGML_F16_ARR];
  1086. GGML_F16_VEC ay[GGML_F16_ARR];
  1087. for (int i = 0; i < np; i += GGML_F16_STEP) {
  1088. for (int j = 0; j < GGML_F16_ARR; j++) {
  1089. ay[j] = GGML_F16_VEC_LOAD(y + i + j*GGML_F16_EPR, j);
  1090. for (int k = 0; k < GGML_VEC_DOT_UNROLL; ++k) {
  1091. ax[j] = GGML_F16_VEC_LOAD(x[k] + i + j*GGML_F16_EPR, j);
  1092. sum[k][j] = GGML_F16_VEC_FMA(sum[k][j], ax[j], ay[j]);
  1093. }
  1094. }
  1095. }
  1096. // reduce sum0..sum3 to sum0
  1097. for (int k = 0; k < GGML_VEC_DOT_UNROLL; ++k) {
  1098. GGML_F16_VEC_REDUCE(sumf[k], sum[k]);
  1099. }
  1100. // leftovers
  1101. for (int i = np; i < n; ++i) {
  1102. for (int j = 0; j < GGML_VEC_DOT_UNROLL; ++j) {
  1103. sumf[j] += (ggml_float)(GGML_FP16_TO_FP32(x[j][i])*GGML_FP16_TO_FP32(y[i]));
  1104. }
  1105. }
  1106. #else
  1107. for (int i = 0; i < n; ++i) {
  1108. for (int j = 0; j < GGML_VEC_DOT_UNROLL; ++j) {
  1109. sumf[j] += (ggml_float)(GGML_FP16_TO_FP32(x[j][i])*GGML_FP16_TO_FP32(y[i]));
  1110. }
  1111. }
  1112. #endif
  1113. for (int i = 0; i < GGML_VEC_DOT_UNROLL; ++i) {
  1114. s[i] = sumf[i];
  1115. }
  1116. }
  1117. inline static void ggml_vec_mad_f32(const int n, float * restrict y, const float * restrict x, const float v) {
  1118. #if defined(GGML_SIMD)
  1119. const int np = (n & ~(GGML_F32_STEP - 1));
  1120. GGML_F32_VEC vx = GGML_F32_VEC_SET1(v);
  1121. GGML_F32_VEC ax[GGML_F32_ARR];
  1122. GGML_F32_VEC ay[GGML_F32_ARR];
  1123. for (int i = 0; i < np; i += GGML_F32_STEP) {
  1124. for (int j = 0; j < GGML_F32_ARR; j++) {
  1125. ax[j] = GGML_F32_VEC_LOAD(x + i + j*GGML_F32_EPR);
  1126. ay[j] = GGML_F32_VEC_LOAD(y + i + j*GGML_F32_EPR);
  1127. ay[j] = GGML_F32_VEC_FMA(ay[j], ax[j], vx);
  1128. GGML_F32_VEC_STORE(y + i + j*GGML_F32_EPR, ay[j]);
  1129. }
  1130. }
  1131. // leftovers
  1132. for (int i = np; i < n; ++i) {
  1133. y[i] += x[i]*v;
  1134. }
  1135. #else
  1136. // scalar
  1137. for (int i = 0; i < n; ++i) {
  1138. y[i] += x[i]*v;
  1139. }
  1140. #endif
  1141. }
  1142. // xs and vs are byte strides of x and v
  1143. inline static void ggml_vec_mad_f32_unroll(const int n, const int xs, const int vs, float * restrict y, const float * restrict xv, const float * restrict vv) {
  1144. const float * restrict x[GGML_VEC_MAD_UNROLL];
  1145. const float * restrict v[GGML_VEC_MAD_UNROLL];
  1146. for (int i = 0; i < GGML_VEC_MAD_UNROLL; ++i) {
  1147. x[i] = (const float *) ((const char *) xv + i*xs);
  1148. v[i] = (const float *) ((const char *) vv + i*vs);
  1149. }
  1150. #if defined(GGML_SIMD)
  1151. const int np = (n & ~(GGML_F32_STEP - 1));
  1152. GGML_F32_VEC vx[GGML_VEC_MAD_UNROLL];
  1153. for (int k = 0; k < GGML_VEC_MAD_UNROLL; ++k) {
  1154. vx[k] = GGML_F32_VEC_SET1(v[k][0]);
  1155. }
  1156. GGML_F32_VEC ax[GGML_VEC_MAD_UNROLL][GGML_F32_ARR];
  1157. GGML_F32_VEC ay[GGML_F32_ARR];
  1158. for (int i = 0; i < np; i += GGML_F32_STEP) {
  1159. for (int j = 0; j < GGML_F32_ARR; j++) {
  1160. ay[j] = GGML_F32_VEC_LOAD(y + i + j*GGML_F32_EPR);
  1161. for (int k = 0; k < GGML_VEC_MAD_UNROLL; ++k) {
  1162. ax[k][j] = GGML_F32_VEC_LOAD(x[k] + i + j*GGML_F32_EPR);
  1163. ay[j] = GGML_F32_VEC_FMA(ay[j], ax[k][j], vx[k]);
  1164. }
  1165. GGML_F32_VEC_STORE(y + i + j*GGML_F32_EPR, ay[j]);
  1166. }
  1167. }
  1168. // leftovers
  1169. for (int k = 0; k < GGML_VEC_MAD_UNROLL; ++k) {
  1170. for (int i = np; i < n; ++i) {
  1171. y[i] += x[k][i]*v[k][0];
  1172. }
  1173. }
  1174. #else
  1175. // scalar
  1176. for (int k = 0; k < GGML_VEC_MAD_UNROLL; ++k) {
  1177. for (int i = 0; i < n; ++i) {
  1178. y[i] += x[k][i]*v[k][0];
  1179. }
  1180. }
  1181. #endif
  1182. }
  1183. //inline static void ggml_vec_scale_f32(const int n, float * y, const float v) { for (int i = 0; i < n; ++i) y[i] *= v; }
  1184. inline static void ggml_vec_scale_f32(const int n, float * y, const float v) {
  1185. #if defined(GGML_USE_ACCELERATE)
  1186. vDSP_vsmul(y, 1, &v, y, 1, n);
  1187. #elif defined(GGML_SIMD)
  1188. const int np = (n & ~(GGML_F32_STEP - 1));
  1189. GGML_F32_VEC vx = GGML_F32_VEC_SET1(v);
  1190. GGML_F32_VEC ay[GGML_F32_ARR];
  1191. for (int i = 0; i < np; i += GGML_F32_STEP) {
  1192. for (int j = 0; j < GGML_F32_ARR; j++) {
  1193. ay[j] = GGML_F32_VEC_LOAD(y + i + j*GGML_F32_EPR);
  1194. ay[j] = GGML_F32_VEC_MUL(ay[j], vx);
  1195. GGML_F32_VEC_STORE(y + i + j*GGML_F32_EPR, ay[j]);
  1196. }
  1197. }
  1198. // leftovers
  1199. for (int i = np; i < n; ++i) {
  1200. y[i] *= v;
  1201. }
  1202. #else
  1203. // scalar
  1204. for (int i = 0; i < n; ++i) {
  1205. y[i] *= v;
  1206. }
  1207. #endif
  1208. }
  1209. inline static void ggml_vec_norm_f32 (const int n, float * s, const float * x) { ggml_vec_dot_f32(n, s, x, x); *s = sqrtf(*s); }
  1210. inline static void ggml_vec_sqr_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = x[i]*x[i]; }
  1211. inline static void ggml_vec_sqrt_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = sqrtf(x[i]); }
  1212. inline static void ggml_vec_log_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = logf(x[i]); }
  1213. inline static void ggml_vec_abs_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = fabsf(x[i]); }
  1214. inline static void ggml_vec_sgn_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = (x[i] > 0.f) ? 1.f : ((x[i] < 0.f) ? -1.f : 0.f); }
  1215. inline static void ggml_vec_step_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = (x[i] > 0.f) ? 1.f : 0.f; }
  1216. inline static void ggml_vec_tanh_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = tanhf(x[i]); }
  1217. inline static void ggml_vec_elu_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = (x[i] > 0.f) ? x[i] : expf(x[i])-1; }
  1218. inline static void ggml_vec_relu_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = (x[i] > 0.f) ? x[i] : 0.f; }
  1219. inline static void ggml_vec_leaky_relu_f32 (const int n, float * y, const float * x, const float ns) { for (int i = 0; i < n; ++i) y[i] = ((x[i] > 0.f) ? x[i] : 0.f) + ns * ((x[i] < 0.0f) ? x[i] : 0.f); }
  1220. // TODO: optimize performance
  1221. inline static void ggml_vec_hardswish_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = x[i] * fminf(1.0f, fmaxf(0.0f, (x[i] + 3.0f) / 6.0f)); }
  1222. inline static void ggml_vec_hardsigmoid_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = fminf(1.0f, fmaxf(0.0f, (x[i] + 3.0f) / 6.0f)); }
  1223. static const float GELU_COEF_A = 0.044715f;
  1224. static const float GELU_QUICK_COEF = -1.702f;
  1225. static const float SQRT_2_OVER_PI = 0.79788456080286535587989211986876f;
  1226. inline static float ggml_gelu_f32(float x) {
  1227. return 0.5f*x*(1.0f + tanhf(SQRT_2_OVER_PI*x*(1.0f + GELU_COEF_A*x*x)));
  1228. }
  1229. inline static void ggml_vec_gelu_f16(const int n, ggml_fp16_t * y, const ggml_fp16_t * x) {
  1230. const uint16_t * i16 = (const uint16_t *) x;
  1231. for (int i = 0; i < n; ++i) {
  1232. y[i] = ggml_table_gelu_f16[i16[i]];
  1233. }
  1234. }
  1235. #ifdef GGML_GELU_FP16
  1236. inline static void ggml_vec_gelu_f32(const int n, float * y, const float * x) {
  1237. uint16_t t;
  1238. for (int i = 0; i < n; ++i) {
  1239. ggml_fp16_t fp16 = GGML_FP32_TO_FP16(x[i]);
  1240. memcpy(&t, &fp16, sizeof(uint16_t));
  1241. y[i] = GGML_FP16_TO_FP32(ggml_table_gelu_f16[t]);
  1242. }
  1243. }
  1244. #else
  1245. inline static void ggml_vec_gelu_f32(const int n, float * y, const float * x) {
  1246. for (int i = 0; i < n; ++i) {
  1247. y[i] = ggml_gelu_f32(x[i]);
  1248. }
  1249. }
  1250. #endif
  1251. inline static float ggml_gelu_quick_f32(float x) {
  1252. return x*(1.0f/(1.0f+expf(GELU_QUICK_COEF*x)));
  1253. }
  1254. //inline static void ggml_vec_gelu_quick_f16(const int n, ggml_fp16_t * y, const ggml_fp16_t * x) {
  1255. // const uint16_t * i16 = (const uint16_t *) x;
  1256. // for (int i = 0; i < n; ++i) {
  1257. // y[i] = ggml_table_gelu_quick_f16[i16[i]];
  1258. // }
  1259. //}
  1260. #ifdef GGML_GELU_QUICK_FP16
  1261. inline static void ggml_vec_gelu_quick_f32(const int n, float * y, const float * x) {
  1262. uint16_t t;
  1263. for (int i = 0; i < n; ++i) {
  1264. ggml_fp16_t fp16 = GGML_FP32_TO_FP16(x[i]);
  1265. memcpy(&t, &fp16, sizeof(uint16_t));
  1266. y[i] = GGML_FP16_TO_FP32(ggml_table_gelu_quick_f16[t]);
  1267. }
  1268. }
  1269. #else
  1270. inline static void ggml_vec_gelu_quick_f32(const int n, float * y, const float * x) {
  1271. for (int i = 0; i < n; ++i) {
  1272. y[i] = ggml_gelu_quick_f32(x[i]);
  1273. }
  1274. }
  1275. #endif
  1276. // Sigmoid Linear Unit (SiLU) function
  1277. inline static float ggml_silu_f32(float x) {
  1278. return x/(1.0f + expf(-x));
  1279. }
  1280. //inline static void ggml_vec_silu_f16(const int n, ggml_fp16_t * y, const ggml_fp16_t * x) {
  1281. // const uint16_t * i16 = (const uint16_t *) x;
  1282. // for (int i = 0; i < n; ++i) {
  1283. // y[i] = ggml_table_silu_f16[i16[i]];
  1284. // }
  1285. //}
  1286. #ifdef GGML_SILU_FP16
  1287. inline static void ggml_vec_silu_f32(const int n, float * y, const float * x) {
  1288. uint16_t t;
  1289. for (int i = 0; i < n; ++i) {
  1290. ggml_fp16_t fp16 = GGML_FP32_TO_FP16(x[i]);
  1291. memcpy(&t, &fp16, sizeof(uint16_t));
  1292. y[i] = GGML_FP16_TO_FP32(ggml_table_silu_f16[t]);
  1293. }
  1294. }
  1295. #else
  1296. inline static void ggml_vec_silu_f32(const int n, float * y, const float * x) {
  1297. for (int i = 0; i < n; ++i) {
  1298. y[i] = ggml_silu_f32(x[i]);
  1299. }
  1300. }
  1301. #endif
  1302. inline static float ggml_silu_backward_f32(float x, float dy) {
  1303. const float s = 1.0f/(1.0f + expf(-x));
  1304. return dy*s*(1.0f + x*(1.0f - s));
  1305. }
  1306. #ifdef GGML_SILU_FP16
  1307. inline static void ggml_vec_silu_backward_f32(const int n, float * dx, const float * x, const float * dy) {
  1308. for (int i = 0; i < n; ++i) {
  1309. // we did not use x[i] to compute forward silu but its f16 equivalent
  1310. // take derivative at f16 of x[i]:
  1311. ggml_fp16_t fp16 = GGML_FP32_TO_FP16(x[i]);
  1312. float usedx = GGML_FP16_TO_FP32(fp16);
  1313. dx[i] = ggml_silu_backward_f32(usedx, dy[i]);
  1314. }
  1315. }
  1316. #else
  1317. inline static void ggml_vec_silu_backward_f32(const int n, float * dx, const float * x, const float * dy) {
  1318. for (int i = 0; i < n; ++i) {
  1319. dx[i] = ggml_silu_backward_f32(x[i], dy[i]);
  1320. }
  1321. }
  1322. #endif
  1323. inline static void ggml_vec_sum_f32(const int n, float * s, const float * x) {
  1324. #ifndef GGML_USE_ACCELERATE
  1325. ggml_float sum = 0.0;
  1326. for (int i = 0; i < n; ++i) {
  1327. sum += (ggml_float)x[i];
  1328. }
  1329. *s = sum;
  1330. #else
  1331. vDSP_sve(x, 1, s, n);
  1332. #endif
  1333. }
  1334. inline static void ggml_vec_sum_f32_ggf(const int n, ggml_float * s, const float * x) {
  1335. ggml_float sum = 0.0;
  1336. for (int i = 0; i < n; ++i) {
  1337. sum += (ggml_float)x[i];
  1338. }
  1339. *s = sum;
  1340. }
  1341. inline static void ggml_vec_sum_f16_ggf(const int n, float * s, const ggml_fp16_t * x) {
  1342. float sum = 0.0f;
  1343. for (int i = 0; i < n; ++i) {
  1344. sum += GGML_FP16_TO_FP32(x[i]);
  1345. }
  1346. *s = sum;
  1347. }
  1348. inline static void ggml_vec_max_f32(const int n, float * s, const float * x) {
  1349. #ifndef GGML_USE_ACCELERATE
  1350. float max = -INFINITY;
  1351. for (int i = 0; i < n; ++i) {
  1352. max = MAX(max, x[i]);
  1353. }
  1354. *s = max;
  1355. #else
  1356. vDSP_maxv(x, 1, s, n);
  1357. #endif
  1358. }
  1359. inline static void ggml_vec_norm_inv_f32(const int n, float * s, const float * x) {
  1360. ggml_vec_norm_f32(n, s, x);
  1361. *s = 1.f/(*s);
  1362. }
  1363. inline static void ggml_vec_argmax_f32(const int n, int * s, const float * x) {
  1364. float max = -INFINITY;
  1365. int idx = 0;
  1366. for (int i = 0; i < n; ++i) {
  1367. max = MAX(max, x[i]);
  1368. if (max == x[i]) { idx = i; }
  1369. }
  1370. *s = idx;
  1371. }
  1372. //
  1373. // data types
  1374. //
  1375. static const char * GGML_OP_NAME[GGML_OP_COUNT] = {
  1376. "NONE",
  1377. "DUP",
  1378. "ADD",
  1379. "ADD1",
  1380. "ACC",
  1381. "SUB",
  1382. "MUL",
  1383. "DIV",
  1384. "SQR",
  1385. "SQRT",
  1386. "LOG",
  1387. "SUM",
  1388. "SUM_ROWS",
  1389. "MEAN",
  1390. "ARGMAX",
  1391. "REPEAT",
  1392. "REPEAT_BACK",
  1393. "CONCAT",
  1394. "SILU_BACK",
  1395. "NORM",
  1396. "RMS_NORM",
  1397. "RMS_NORM_BACK",
  1398. "GROUP_NORM",
  1399. "MUL_MAT",
  1400. "MUL_MAT_ID",
  1401. "OUT_PROD",
  1402. "SCALE",
  1403. "SET",
  1404. "CPY",
  1405. "CONT",
  1406. "RESHAPE",
  1407. "VIEW",
  1408. "PERMUTE",
  1409. "TRANSPOSE",
  1410. "GET_ROWS",
  1411. "GET_ROWS_BACK",
  1412. "DIAG",
  1413. "DIAG_MASK_INF",
  1414. "DIAG_MASK_ZERO",
  1415. "SOFT_MAX",
  1416. "SOFT_MAX_BACK",
  1417. "ROPE",
  1418. "ROPE_BACK",
  1419. "ALIBI",
  1420. "CLAMP",
  1421. "CONV_TRANSPOSE_1D",
  1422. "IM2COL",
  1423. "CONV_TRANSPOSE_2D",
  1424. "POOL_1D",
  1425. "POOL_2D",
  1426. "UPSCALE",
  1427. "PAD",
  1428. "ARGSORT",
  1429. "LEAKY_RELU",
  1430. "FLASH_ATTN",
  1431. "FLASH_FF",
  1432. "FLASH_ATTN_BACK",
  1433. "WIN_PART",
  1434. "WIN_UNPART",
  1435. "GET_REL_POS",
  1436. "ADD_REL_POS",
  1437. "UNARY",
  1438. "MAP_UNARY",
  1439. "MAP_BINARY",
  1440. "MAP_CUSTOM1_F32",
  1441. "MAP_CUSTOM2_F32",
  1442. "MAP_CUSTOM3_F32",
  1443. "MAP_CUSTOM1",
  1444. "MAP_CUSTOM2",
  1445. "MAP_CUSTOM3",
  1446. "CROSS_ENTROPY_LOSS",
  1447. "CROSS_ENTROPY_LOSS_BACK",
  1448. };
  1449. static_assert(GGML_OP_COUNT == 72, "GGML_OP_COUNT != 72");
  1450. static const char * GGML_OP_SYMBOL[GGML_OP_COUNT] = {
  1451. "none",
  1452. "x",
  1453. "x+y",
  1454. "x+y",
  1455. "view(x,nb,offset)+=y->x",
  1456. "x-y",
  1457. "x*y",
  1458. "x/y",
  1459. "x^2",
  1460. "√x",
  1461. "log(x)",
  1462. "Σx",
  1463. "Σx_k",
  1464. "Σx/n",
  1465. "argmax(x)",
  1466. "repeat(x)",
  1467. "repeat_back(x)",
  1468. "concat(x, y)",
  1469. "silu_back(x)",
  1470. "norm(x)",
  1471. "rms_norm(x)",
  1472. "rms_norm_back(x)",
  1473. "group_norm(x)",
  1474. "X*Y",
  1475. "X[i]*Y",
  1476. "X*Y",
  1477. "x*v",
  1478. "y-\\>view(x)",
  1479. "x-\\>y",
  1480. "cont(x)",
  1481. "reshape(x)",
  1482. "view(x)",
  1483. "permute(x)",
  1484. "transpose(x)",
  1485. "get_rows(x)",
  1486. "get_rows_back(x)",
  1487. "diag(x)",
  1488. "diag_mask_inf(x)",
  1489. "diag_mask_zero(x)",
  1490. "soft_max(x)",
  1491. "soft_max_back(x)",
  1492. "rope(x)",
  1493. "rope_back(x)",
  1494. "alibi(x)",
  1495. "clamp(x)",
  1496. "conv_transpose_1d(x)",
  1497. "im2col(x)",
  1498. "conv_transpose_2d(x)",
  1499. "pool_1d(x)",
  1500. "pool_2d(x)",
  1501. "upscale(x)",
  1502. "pad(x)",
  1503. "argsort(x)",
  1504. "leaky_relu(x)",
  1505. "flash_attn(x)",
  1506. "flash_ff(x)",
  1507. "flash_attn_back(x)",
  1508. "win_part(x)",
  1509. "win_unpart(x)",
  1510. "get_rel_pos(x)",
  1511. "add_rel_pos(x)",
  1512. "unary(x)",
  1513. "f(x)",
  1514. "f(x,y)",
  1515. "custom_f32(x)",
  1516. "custom_f32(x,y)",
  1517. "custom_f32(x,y,z)",
  1518. "custom(x)",
  1519. "custom(x,y)",
  1520. "custom(x,y,z)",
  1521. "cross_entropy_loss(x,y)",
  1522. "cross_entropy_loss_back(x,y)",
  1523. };
  1524. static_assert(GGML_OP_COUNT == 72, "GGML_OP_COUNT != 72");
  1525. static_assert(GGML_OP_POOL_COUNT == 2, "GGML_OP_POOL_COUNT != 2");
  1526. static const char * GGML_UNARY_OP_NAME[GGML_UNARY_OP_COUNT] = {
  1527. "ABS",
  1528. "SGN",
  1529. "NEG",
  1530. "STEP",
  1531. "TANH",
  1532. "ELU",
  1533. "RELU",
  1534. "GELU",
  1535. "GELU_QUICK",
  1536. "SILU",
  1537. "HARDSWISH",
  1538. "HARDSIGMOID",
  1539. };
  1540. static_assert(GGML_UNARY_OP_COUNT == 12, "GGML_UNARY_OP_COUNT != 12");
  1541. static_assert(sizeof(struct ggml_object)%GGML_MEM_ALIGN == 0, "ggml_object size must be a multiple of GGML_MEM_ALIGN");
  1542. static_assert(sizeof(struct ggml_tensor)%GGML_MEM_ALIGN == 0, "ggml_tensor size must be a multiple of GGML_MEM_ALIGN");
  1543. // WARN:
  1544. // Mis-configuration can lead to problem that's hard to reason about:
  1545. // * At best it crash or talks nosense.
  1546. // * At worst it talks slightly difference but hard to perceive.
  1547. //
  1548. // An op has to enable INIT or FINALIZE when any of it's branch needs that pass.
  1549. // Take care about compile options (e.g., GGML_USE_xxx).
  1550. static bool GGML_OP_HAS_INIT [GGML_OP_COUNT] = { 0 };
  1551. static bool GGML_OP_HAS_FINALIZE[GGML_OP_COUNT] = { 0 };
  1552. static void ggml_setup_op_has_task_pass(void) {
  1553. { // INIT
  1554. bool * p = GGML_OP_HAS_INIT;
  1555. p[GGML_OP_ACC ] = true;
  1556. p[GGML_OP_MUL_MAT ] = true;
  1557. p[GGML_OP_MUL_MAT_ID ] = true;
  1558. p[GGML_OP_OUT_PROD ] = true;
  1559. p[GGML_OP_SET ] = true;
  1560. p[GGML_OP_GET_ROWS_BACK ] = true;
  1561. p[GGML_OP_DIAG_MASK_INF ] = true;
  1562. p[GGML_OP_DIAG_MASK_ZERO ] = true;
  1563. p[GGML_OP_CONV_TRANSPOSE_1D ] = true;
  1564. p[GGML_OP_CONV_TRANSPOSE_2D ] = true;
  1565. p[GGML_OP_FLASH_ATTN_BACK ] = true;
  1566. p[GGML_OP_CROSS_ENTROPY_LOSS ] = true;
  1567. p[GGML_OP_ADD_REL_POS ] = true;
  1568. }
  1569. { // FINALIZE
  1570. bool * p = GGML_OP_HAS_FINALIZE;
  1571. p[GGML_OP_CROSS_ENTROPY_LOSS ] = true;
  1572. }
  1573. }
  1574. //
  1575. // ggml context
  1576. //
  1577. struct ggml_context {
  1578. size_t mem_size;
  1579. void * mem_buffer;
  1580. bool mem_buffer_owned;
  1581. bool no_alloc;
  1582. bool no_alloc_save; // this is used to save the no_alloc state when using scratch buffers
  1583. int n_objects;
  1584. struct ggml_object * objects_begin;
  1585. struct ggml_object * objects_end;
  1586. struct ggml_scratch scratch;
  1587. struct ggml_scratch scratch_save;
  1588. };
  1589. struct ggml_context_container {
  1590. bool used;
  1591. struct ggml_context context;
  1592. };
  1593. //
  1594. // NUMA support
  1595. //
  1596. #define GGML_NUMA_MAX_NODES 8
  1597. #define GGML_NUMA_MAX_CPUS 512
  1598. struct ggml_numa_node {
  1599. uint32_t cpus[GGML_NUMA_MAX_CPUS]; // hardware threads on this node
  1600. uint32_t n_cpus;
  1601. };
  1602. struct ggml_numa_nodes {
  1603. struct ggml_numa_node nodes[GGML_NUMA_MAX_NODES];
  1604. uint32_t n_nodes;
  1605. uint32_t total_cpus; // hardware threads on system
  1606. };
  1607. //
  1608. // ggml state
  1609. //
  1610. struct ggml_state {
  1611. struct ggml_context_container contexts[GGML_MAX_CONTEXTS];
  1612. struct ggml_numa_nodes numa;
  1613. };
  1614. // global state
  1615. static struct ggml_state g_state;
  1616. static atomic_int g_state_barrier = 0;
  1617. // barrier via spin lock
  1618. inline static void ggml_critical_section_start(void) {
  1619. int processing = atomic_fetch_add(&g_state_barrier, 1);
  1620. while (processing > 0) {
  1621. // wait for other threads to finish
  1622. atomic_fetch_sub(&g_state_barrier, 1);
  1623. sched_yield(); // TODO: reconsider this
  1624. processing = atomic_fetch_add(&g_state_barrier, 1);
  1625. }
  1626. }
  1627. // TODO: make this somehow automatically executed
  1628. // some sort of "sentry" mechanism
  1629. inline static void ggml_critical_section_end(void) {
  1630. atomic_fetch_sub(&g_state_barrier, 1);
  1631. }
  1632. void ggml_numa_init(void) {
  1633. if (g_state.numa.n_nodes > 0) {
  1634. fprintf(stderr, "ggml_numa_init: NUMA already initialized\n");
  1635. return;
  1636. }
  1637. #ifdef __linux__
  1638. struct stat st;
  1639. char path[256];
  1640. int rv;
  1641. // enumerate nodes
  1642. while (g_state.numa.n_nodes < GGML_NUMA_MAX_NODES) {
  1643. rv = snprintf(path, sizeof(path), "/sys/devices/system/node/node%u", g_state.numa.n_nodes);
  1644. GGML_ASSERT(rv > 0 && (unsigned)rv < sizeof(path));
  1645. if (stat(path, &st) != 0) { break; }
  1646. ++g_state.numa.n_nodes;
  1647. }
  1648. // enumerate CPUs
  1649. while (g_state.numa.total_cpus < GGML_NUMA_MAX_CPUS) {
  1650. rv = snprintf(path, sizeof(path), "/sys/devices/system/cpu/cpu%u", g_state.numa.total_cpus);
  1651. GGML_ASSERT(rv > 0 && (unsigned)rv < sizeof(path));
  1652. if (stat(path, &st) != 0) { break; }
  1653. ++g_state.numa.total_cpus;
  1654. }
  1655. GGML_PRINT_DEBUG("found %u numa nodes, %u CPUs\n", g_state.numa.n_nodes, g_state.numa.total_cpus);
  1656. if (g_state.numa.n_nodes < 1 || g_state.numa.total_cpus < 1) {
  1657. g_state.numa.n_nodes = 0;
  1658. return;
  1659. }
  1660. for (uint32_t n = 0; n < g_state.numa.n_nodes; ++n) {
  1661. struct ggml_numa_node * node = &g_state.numa.nodes[n];
  1662. GGML_PRINT_DEBUG("CPUs on node %u:", n);
  1663. node->n_cpus = 0;
  1664. for (uint32_t c = 0; c < g_state.numa.total_cpus; ++c) {
  1665. rv = snprintf(path, sizeof(path), "/sys/devices/system/node/node%u/cpu%u", n, c);
  1666. GGML_ASSERT(rv > 0 && (unsigned)rv < sizeof(path));
  1667. if (stat(path, &st) == 0) {
  1668. node->cpus[node->n_cpus++] = c;
  1669. GGML_PRINT_DEBUG(" %u", c);
  1670. }
  1671. }
  1672. GGML_PRINT_DEBUG("\n");
  1673. }
  1674. if (ggml_is_numa()) {
  1675. FILE *fptr = fopen("/proc/sys/kernel/numa_balancing", "r");
  1676. if (fptr != NULL) {
  1677. char buf[42];
  1678. if (fgets(buf, sizeof(buf), fptr) && strncmp(buf, "0\n", sizeof(buf)) != 0) {
  1679. GGML_PRINT("WARNING: /proc/sys/kernel/numa_balancing is enabled, this has been observed to impair performance\n");
  1680. }
  1681. fclose(fptr);
  1682. }
  1683. }
  1684. #else
  1685. // TODO
  1686. #endif
  1687. }
  1688. bool ggml_is_numa(void) {
  1689. return g_state.numa.n_nodes > 1;
  1690. }
  1691. ////////////////////////////////////////////////////////////////////////////////
  1692. void ggml_print_object(const struct ggml_object * obj) {
  1693. GGML_PRINT(" - ggml_object: type = %d, offset = %zu, size = %zu, next = %p\n",
  1694. obj->type, obj->offs, obj->size, (const void *) obj->next);
  1695. }
  1696. void ggml_print_objects(const struct ggml_context * ctx) {
  1697. struct ggml_object * obj = ctx->objects_begin;
  1698. GGML_PRINT("%s: objects in context %p:\n", __func__, (const void *) ctx);
  1699. while (obj != NULL) {
  1700. ggml_print_object(obj);
  1701. obj = obj->next;
  1702. }
  1703. GGML_PRINT("%s: --- end ---\n", __func__);
  1704. }
  1705. GGML_CALL int64_t ggml_nelements(const struct ggml_tensor * tensor) {
  1706. static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function");
  1707. return tensor->ne[0]*tensor->ne[1]*tensor->ne[2]*tensor->ne[3];
  1708. }
  1709. GGML_CALL int64_t ggml_nrows(const struct ggml_tensor * tensor) {
  1710. static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function");
  1711. return tensor->ne[1]*tensor->ne[2]*tensor->ne[3];
  1712. }
  1713. GGML_CALL size_t ggml_nbytes(const struct ggml_tensor * tensor) {
  1714. size_t nbytes;
  1715. size_t blck_size = ggml_blck_size(tensor->type);
  1716. if (blck_size == 1) {
  1717. nbytes = ggml_type_size(tensor->type);
  1718. for (int i = 0; i < GGML_MAX_DIMS; ++i) {
  1719. nbytes += (tensor->ne[i] - 1)*tensor->nb[i];
  1720. }
  1721. }
  1722. else {
  1723. nbytes = tensor->ne[0]*tensor->nb[0]/blck_size;
  1724. for (int i = 1; i < GGML_MAX_DIMS; ++i) {
  1725. nbytes += (tensor->ne[i] - 1)*tensor->nb[i];
  1726. }
  1727. }
  1728. return nbytes;
  1729. }
  1730. size_t ggml_nbytes_pad(const struct ggml_tensor * tensor) {
  1731. return GGML_PAD(ggml_nbytes(tensor), GGML_MEM_ALIGN);
  1732. }
  1733. GGML_CALL int ggml_blck_size(enum ggml_type type) {
  1734. return type_traits[type].blck_size;
  1735. }
  1736. GGML_CALL size_t ggml_type_size(enum ggml_type type) {
  1737. return type_traits[type].type_size;
  1738. }
  1739. GGML_CALL size_t ggml_row_size(enum ggml_type type, int64_t ne) {
  1740. assert(ne % ggml_blck_size(type) == 0);
  1741. return ggml_type_size(type)*ne/ggml_blck_size(type);
  1742. }
  1743. double ggml_type_sizef(enum ggml_type type) {
  1744. return ((double)(type_traits[type].type_size))/type_traits[type].blck_size;
  1745. }
  1746. GGML_CALL const char * ggml_type_name(enum ggml_type type) {
  1747. return type_traits[type].type_name;
  1748. }
  1749. GGML_CALL bool ggml_is_quantized(enum ggml_type type) {
  1750. return type_traits[type].is_quantized;
  1751. }
  1752. GGML_CALL const char * ggml_op_name(enum ggml_op op) {
  1753. return GGML_OP_NAME[op];
  1754. }
  1755. const char * ggml_op_symbol(enum ggml_op op) {
  1756. return GGML_OP_SYMBOL[op];
  1757. }
  1758. const char * ggml_unary_op_name(enum ggml_unary_op op) {
  1759. return GGML_UNARY_OP_NAME[op];
  1760. }
  1761. GGML_CALL const char * ggml_op_desc(const struct ggml_tensor * t) {
  1762. if (t->op == GGML_OP_UNARY) {
  1763. enum ggml_unary_op uop = ggml_get_unary_op(t);
  1764. return ggml_unary_op_name(uop);
  1765. }
  1766. else {
  1767. return ggml_op_name(t->op);
  1768. }
  1769. }
  1770. GGML_CALL size_t ggml_element_size(const struct ggml_tensor * tensor) {
  1771. return ggml_type_size(tensor->type);
  1772. }
  1773. bool ggml_is_scalar(const struct ggml_tensor * tensor) {
  1774. static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function");
  1775. return tensor->ne[0] == 1 && tensor->ne[1] == 1 && tensor->ne[2] == 1 && tensor->ne[3] == 1;
  1776. }
  1777. bool ggml_is_vector(const struct ggml_tensor * tensor) {
  1778. static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function");
  1779. return tensor->ne[1] == 1 && tensor->ne[2] == 1 && tensor->ne[3] == 1;
  1780. }
  1781. bool ggml_is_matrix(const struct ggml_tensor * tensor) {
  1782. static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function");
  1783. return tensor->ne[2] == 1 && tensor->ne[3] == 1;
  1784. }
  1785. bool ggml_is_3d(const struct ggml_tensor * tensor) {
  1786. return tensor->ne[3] == 1;
  1787. }
  1788. int ggml_n_dims(const struct ggml_tensor * tensor) {
  1789. for (int i = GGML_MAX_DIMS - 1; i >= 1; --i) {
  1790. if (tensor->ne[i] > 1) {
  1791. return i + 1;
  1792. }
  1793. }
  1794. return 1;
  1795. }
  1796. static inline bool ggml_can_mul_mat(const struct ggml_tensor * t0, const struct ggml_tensor * t1) {
  1797. static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function");
  1798. return (t0->ne[0] == t1->ne[0]) &&
  1799. (t1->ne[2]%t0->ne[2] == 0) && // verify t0 is broadcastable
  1800. (t1->ne[3]%t0->ne[3] == 0);
  1801. }
  1802. static inline bool ggml_can_out_prod(const struct ggml_tensor * t0, const struct ggml_tensor * t1) {
  1803. static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function");
  1804. return (t0->ne[1] == t1->ne[1]) &&
  1805. (t1->ne[2]%t0->ne[2] == 0) && // verify t0 is broadcastable
  1806. (t1->ne[3]%t0->ne[3] == 0);
  1807. }
  1808. enum ggml_type ggml_ftype_to_ggml_type(enum ggml_ftype ftype) {
  1809. enum ggml_type wtype = GGML_TYPE_COUNT;
  1810. switch (ftype) {
  1811. case GGML_FTYPE_ALL_F32: wtype = GGML_TYPE_F32; break;
  1812. case GGML_FTYPE_MOSTLY_F16: wtype = GGML_TYPE_F16; break;
  1813. case GGML_FTYPE_MOSTLY_Q4_0: wtype = GGML_TYPE_Q4_0; break;
  1814. case GGML_FTYPE_MOSTLY_Q4_1: wtype = GGML_TYPE_Q4_1; break;
  1815. case GGML_FTYPE_MOSTLY_Q5_0: wtype = GGML_TYPE_Q5_0; break;
  1816. case GGML_FTYPE_MOSTLY_Q5_1: wtype = GGML_TYPE_Q5_1; break;
  1817. case GGML_FTYPE_MOSTLY_Q8_0: wtype = GGML_TYPE_Q8_0; break;
  1818. case GGML_FTYPE_MOSTLY_Q2_K: wtype = GGML_TYPE_Q2_K; break;
  1819. case GGML_FTYPE_MOSTLY_Q3_K: wtype = GGML_TYPE_Q3_K; break;
  1820. case GGML_FTYPE_MOSTLY_Q4_K: wtype = GGML_TYPE_Q4_K; break;
  1821. case GGML_FTYPE_MOSTLY_Q5_K: wtype = GGML_TYPE_Q5_K; break;
  1822. case GGML_FTYPE_MOSTLY_Q6_K: wtype = GGML_TYPE_Q6_K; break;
  1823. case GGML_FTYPE_MOSTLY_IQ2_XXS: wtype = GGML_TYPE_IQ2_XXS; break;
  1824. case GGML_FTYPE_MOSTLY_IQ2_XS: wtype = GGML_TYPE_IQ2_XS; break;
  1825. case GGML_FTYPE_UNKNOWN: wtype = GGML_TYPE_COUNT; break;
  1826. case GGML_FTYPE_MOSTLY_Q4_1_SOME_F16: wtype = GGML_TYPE_COUNT; break;
  1827. }
  1828. GGML_ASSERT(wtype != GGML_TYPE_COUNT);
  1829. return wtype;
  1830. }
  1831. size_t ggml_tensor_overhead(void) {
  1832. return GGML_OBJECT_SIZE + GGML_TENSOR_SIZE;
  1833. }
  1834. GGML_CALL bool ggml_is_transposed(const struct ggml_tensor * tensor) {
  1835. return tensor->nb[0] > tensor->nb[1];
  1836. }
  1837. GGML_CALL bool ggml_is_contiguous(const struct ggml_tensor * tensor) {
  1838. static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function");
  1839. return
  1840. tensor->nb[0] == ggml_type_size(tensor->type) &&
  1841. tensor->nb[1] == (tensor->nb[0]*tensor->ne[0])/ggml_blck_size(tensor->type) &&
  1842. tensor->nb[2] == tensor->nb[1]*tensor->ne[1] &&
  1843. tensor->nb[3] == tensor->nb[2]*tensor->ne[2];
  1844. }
  1845. static inline bool ggml_is_contiguous_except_dim_1(const struct ggml_tensor * tensor) {
  1846. static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function");
  1847. return
  1848. tensor->nb[0] == ggml_type_size(tensor->type) &&
  1849. tensor->nb[2] == tensor->nb[1]*tensor->ne[1] &&
  1850. tensor->nb[3] == tensor->nb[2]*tensor->ne[2];
  1851. }
  1852. GGML_CALL bool ggml_is_permuted(const struct ggml_tensor * tensor) {
  1853. static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function");
  1854. return tensor->nb[0] > tensor->nb[1] || tensor->nb[1] > tensor->nb[2] || tensor->nb[2] > tensor->nb[3];
  1855. }
  1856. static inline bool ggml_is_padded_1d(const struct ggml_tensor * tensor) {
  1857. static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function");
  1858. return
  1859. tensor->nb[0] == ggml_type_size(tensor->type) &&
  1860. tensor->nb[2] == tensor->nb[1]*tensor->ne[1] &&
  1861. tensor->nb[3] == tensor->nb[2]*tensor->ne[2];
  1862. }
  1863. bool ggml_are_same_shape(const struct ggml_tensor * t0, const struct ggml_tensor * t1) {
  1864. static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function");
  1865. return
  1866. (t0->ne[0] == t1->ne[0] ) &&
  1867. (t0->ne[1] == t1->ne[1] ) &&
  1868. (t0->ne[2] == t1->ne[2] ) &&
  1869. (t0->ne[3] == t1->ne[3] );
  1870. }
  1871. // check if t1 can be represented as a repeatition of t0
  1872. static inline bool ggml_can_repeat(const struct ggml_tensor * t0, const struct ggml_tensor * t1) {
  1873. static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function");
  1874. return
  1875. (t1->ne[0]%t0->ne[0] == 0) &&
  1876. (t1->ne[1]%t0->ne[1] == 0) &&
  1877. (t1->ne[2]%t0->ne[2] == 0) &&
  1878. (t1->ne[3]%t0->ne[3] == 0);
  1879. }
  1880. static inline bool ggml_can_repeat_rows(const struct ggml_tensor * t0, const struct ggml_tensor * t1) {
  1881. static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function");
  1882. return (t0->ne[0] == t1->ne[0]) && ggml_can_repeat(t0, t1);
  1883. }
  1884. static inline int ggml_up32(int n) {
  1885. return (n + 31) & ~31;
  1886. }
  1887. //static inline int ggml_up64(int n) {
  1888. // return (n + 63) & ~63;
  1889. //}
  1890. static inline int ggml_up(int n, int m) {
  1891. // assert m is a power of 2
  1892. GGML_ASSERT((m & (m - 1)) == 0);
  1893. return (n + m - 1) & ~(m - 1);
  1894. }
  1895. // assert that pointer is aligned to GGML_MEM_ALIGN
  1896. #define ggml_assert_aligned(ptr) \
  1897. GGML_ASSERT(((uintptr_t) (ptr))%GGML_MEM_ALIGN == 0)
  1898. ////////////////////////////////////////////////////////////////////////////////
  1899. struct ggml_context * ggml_init(struct ggml_init_params params) {
  1900. // make this function thread safe
  1901. ggml_critical_section_start();
  1902. static bool is_first_call = true;
  1903. if (is_first_call) {
  1904. // initialize time system (required on Windows)
  1905. ggml_time_init();
  1906. // initialize GELU, Quick GELU, SILU and EXP F32 tables
  1907. {
  1908. const uint64_t t_start = ggml_time_us(); UNUSED(t_start);
  1909. ggml_fp16_t ii;
  1910. for (int i = 0; i < (1 << 16); ++i) {
  1911. uint16_t ui = i;
  1912. memcpy(&ii, &ui, sizeof(ii));
  1913. const float f = ggml_table_f32_f16[i] = GGML_COMPUTE_FP16_TO_FP32(ii);
  1914. ggml_table_gelu_f16[i] = GGML_FP32_TO_FP16(ggml_gelu_f32(f));
  1915. ggml_table_gelu_quick_f16[i] = GGML_FP32_TO_FP16(ggml_gelu_quick_f32(f));
  1916. ggml_table_silu_f16[i] = GGML_FP32_TO_FP16(ggml_silu_f32(f));
  1917. ggml_table_exp_f16[i] = GGML_FP32_TO_FP16(expf(f));
  1918. }
  1919. const uint64_t t_end = ggml_time_us(); UNUSED(t_end);
  1920. GGML_PRINT_DEBUG("%s: GELU, Quick GELU, SILU and EXP tables initialized in %f ms\n", __func__, (t_end - t_start)/1000.0f);
  1921. }
  1922. // initialize g_state
  1923. {
  1924. const uint64_t t_start = ggml_time_us(); UNUSED(t_start);
  1925. g_state = (struct ggml_state) {
  1926. /*.contexts =*/ { { 0 } },
  1927. /*.numa =*/ {
  1928. .n_nodes = 0,
  1929. .total_cpus = 0,
  1930. },
  1931. };
  1932. for (int i = 0; i < GGML_MAX_CONTEXTS; ++i) {
  1933. g_state.contexts[i].used = false;
  1934. }
  1935. const uint64_t t_end = ggml_time_us(); UNUSED(t_end);
  1936. GGML_PRINT_DEBUG("%s: g_state initialized in %f ms\n", __func__, (t_end - t_start)/1000.0f);
  1937. }
  1938. #if defined(GGML_USE_CUBLAS)
  1939. ggml_init_cublas();
  1940. #elif defined(GGML_USE_CLBLAST)
  1941. ggml_cl_init();
  1942. #endif
  1943. ggml_setup_op_has_task_pass();
  1944. is_first_call = false;
  1945. }
  1946. // find non-used context in g_state
  1947. struct ggml_context * ctx = NULL;
  1948. for (int i = 0; i < GGML_MAX_CONTEXTS; i++) {
  1949. if (!g_state.contexts[i].used) {
  1950. g_state.contexts[i].used = true;
  1951. ctx = &g_state.contexts[i].context;
  1952. GGML_PRINT_DEBUG("%s: found unused context %d\n", __func__, i);
  1953. break;
  1954. }
  1955. }
  1956. if (ctx == NULL) {
  1957. GGML_PRINT_DEBUG("%s: no unused context found\n", __func__);
  1958. ggml_critical_section_end();
  1959. return NULL;
  1960. }
  1961. // allow to call ggml_init with 0 size
  1962. if (params.mem_size == 0) {
  1963. params.mem_size = GGML_MEM_ALIGN;
  1964. }
  1965. const size_t mem_size = params.mem_buffer ? params.mem_size : GGML_PAD(params.mem_size, GGML_MEM_ALIGN);
  1966. *ctx = (struct ggml_context) {
  1967. /*.mem_size =*/ mem_size,
  1968. /*.mem_buffer =*/ params.mem_buffer ? params.mem_buffer : GGML_ALIGNED_MALLOC(mem_size),
  1969. /*.mem_buffer_owned =*/ params.mem_buffer ? false : true,
  1970. /*.no_alloc =*/ params.no_alloc,
  1971. /*.no_alloc_save =*/ params.no_alloc,
  1972. /*.n_objects =*/ 0,
  1973. /*.objects_begin =*/ NULL,
  1974. /*.objects_end =*/ NULL,
  1975. /*.scratch =*/ { 0, 0, NULL, },
  1976. /*.scratch_save =*/ { 0, 0, NULL, },
  1977. };
  1978. GGML_ASSERT(ctx->mem_buffer != NULL);
  1979. ggml_assert_aligned(ctx->mem_buffer);
  1980. GGML_PRINT_DEBUG("%s: context initialized\n", __func__);
  1981. ggml_critical_section_end();
  1982. return ctx;
  1983. }
  1984. void ggml_free(struct ggml_context * ctx) {
  1985. if (ctx == NULL) {
  1986. return;
  1987. }
  1988. // make this function thread safe
  1989. ggml_critical_section_start();
  1990. bool found = false;
  1991. for (int i = 0; i < GGML_MAX_CONTEXTS; i++) {
  1992. if (&g_state.contexts[i].context == ctx) {
  1993. g_state.contexts[i].used = false;
  1994. GGML_PRINT_DEBUG("%s: context %d has been freed. memory used = %zu\n",
  1995. __func__, i, ggml_used_mem(ctx));
  1996. if (ctx->mem_buffer_owned) {
  1997. GGML_ALIGNED_FREE(ctx->mem_buffer);
  1998. }
  1999. found = true;
  2000. break;
  2001. }
  2002. }
  2003. if (!found) {
  2004. GGML_PRINT_DEBUG("%s: context not found\n", __func__);
  2005. }
  2006. ggml_critical_section_end();
  2007. }
  2008. size_t ggml_used_mem(const struct ggml_context * ctx) {
  2009. return ctx->objects_end == NULL ? 0 : ctx->objects_end->offs + ctx->objects_end->size;
  2010. }
  2011. size_t ggml_set_scratch(struct ggml_context * ctx, struct ggml_scratch scratch) {
  2012. const size_t result = ctx->scratch.data ? ctx->scratch.offs : 0;
  2013. ctx->scratch = scratch;
  2014. return result;
  2015. }
  2016. bool ggml_get_no_alloc(struct ggml_context * ctx) {
  2017. return ctx->no_alloc;
  2018. }
  2019. void ggml_set_no_alloc(struct ggml_context * ctx, bool no_alloc) {
  2020. ctx->no_alloc = no_alloc;
  2021. }
  2022. void * ggml_get_mem_buffer(const struct ggml_context * ctx) {
  2023. return ctx->mem_buffer;
  2024. }
  2025. size_t ggml_get_mem_size(const struct ggml_context * ctx) {
  2026. return ctx->mem_size;
  2027. }
  2028. size_t ggml_get_max_tensor_size(const struct ggml_context * ctx) {
  2029. size_t max_size = 0;
  2030. for (struct ggml_tensor * tensor = ggml_get_first_tensor(ctx); tensor != NULL; tensor = ggml_get_next_tensor(ctx, tensor)) {
  2031. max_size = MAX(max_size, ggml_nbytes(tensor));
  2032. }
  2033. return max_size;
  2034. }
  2035. // IMPORTANT:
  2036. // when creating "opt" tensors, always save and load the scratch buffer
  2037. // this is an error prone process, but it is necessary to support inplace
  2038. // operators when using scratch buffers
  2039. // TODO: implement a better way
  2040. static void ggml_scratch_save(struct ggml_context * ctx) {
  2041. // this is needed to allow opt tensors to store their data
  2042. // TODO: again, need to find a better way
  2043. ctx->no_alloc_save = ctx->no_alloc;
  2044. ctx->no_alloc = false;
  2045. ctx->scratch_save = ctx->scratch;
  2046. ctx->scratch.data = NULL;
  2047. }
  2048. static void ggml_scratch_load(struct ggml_context * ctx) {
  2049. ctx->no_alloc = ctx->no_alloc_save;
  2050. ctx->scratch = ctx->scratch_save;
  2051. }
  2052. ////////////////////////////////////////////////////////////////////////////////
  2053. static struct ggml_object * ggml_new_object(struct ggml_context * ctx, enum ggml_object_type type, size_t size) {
  2054. // always insert objects at the end of the context's memory pool
  2055. struct ggml_object * obj_cur = ctx->objects_end;
  2056. const size_t cur_offs = obj_cur == NULL ? 0 : obj_cur->offs;
  2057. const size_t cur_size = obj_cur == NULL ? 0 : obj_cur->size;
  2058. const size_t cur_end = cur_offs + cur_size;
  2059. // align to GGML_MEM_ALIGN
  2060. size_t size_needed = GGML_PAD(size, GGML_MEM_ALIGN);
  2061. char * const mem_buffer = ctx->mem_buffer;
  2062. struct ggml_object * const obj_new = (struct ggml_object *)(mem_buffer + cur_end);
  2063. if (cur_end + size_needed + GGML_OBJECT_SIZE > ctx->mem_size) {
  2064. GGML_PRINT("%s: not enough space in the context's memory pool (needed %zu, available %zu)\n",
  2065. __func__, cur_end + size_needed, ctx->mem_size);
  2066. assert(false);
  2067. return NULL;
  2068. }
  2069. *obj_new = (struct ggml_object) {
  2070. .offs = cur_end + GGML_OBJECT_SIZE,
  2071. .size = size_needed,
  2072. .next = NULL,
  2073. .type = type,
  2074. };
  2075. ggml_assert_aligned(mem_buffer + obj_new->offs);
  2076. if (obj_cur != NULL) {
  2077. obj_cur->next = obj_new;
  2078. } else {
  2079. // this is the first object in this context
  2080. ctx->objects_begin = obj_new;
  2081. }
  2082. ctx->objects_end = obj_new;
  2083. //printf("%s: inserted new object at %zu, size = %zu\n", __func__, cur_end, obj_new->size);
  2084. return obj_new;
  2085. }
  2086. static struct ggml_tensor * ggml_new_tensor_impl(
  2087. struct ggml_context * ctx,
  2088. enum ggml_type type,
  2089. int n_dims,
  2090. const int64_t * ne,
  2091. struct ggml_tensor * view_src,
  2092. size_t view_offs) {
  2093. assert(n_dims >= 1 && n_dims <= GGML_MAX_DIMS);
  2094. // find the base tensor and absolute offset
  2095. if (view_src != NULL && view_src->view_src != NULL) {
  2096. view_offs += view_src->view_offs;
  2097. view_src = view_src->view_src;
  2098. }
  2099. size_t data_size = ggml_row_size(type, ne[0]);
  2100. for (int i = 1; i < n_dims; i++) {
  2101. data_size *= ne[i];
  2102. }
  2103. GGML_ASSERT(view_src == NULL || data_size + view_offs <= ggml_nbytes(view_src));
  2104. void * data = view_src != NULL ? view_src->data : NULL;
  2105. if (data != NULL) {
  2106. data = (char *) data + view_offs;
  2107. }
  2108. size_t obj_alloc_size = 0;
  2109. if (view_src == NULL && !ctx->no_alloc) {
  2110. if (ctx->scratch.data != NULL) {
  2111. // allocate tensor data in the scratch buffer
  2112. if (ctx->scratch.offs + data_size > ctx->scratch.size) {
  2113. GGML_PRINT("%s: not enough space in the scratch memory pool (needed %zu, available %zu)\n",
  2114. __func__, ctx->scratch.offs + data_size, ctx->scratch.size);
  2115. assert(false);
  2116. return NULL;
  2117. }
  2118. data = (char * const) ctx->scratch.data + ctx->scratch.offs;
  2119. ctx->scratch.offs += data_size;
  2120. } else {
  2121. // allocate tensor data in the context's memory pool
  2122. obj_alloc_size = data_size;
  2123. }
  2124. }
  2125. struct ggml_object * const obj_new = ggml_new_object(ctx, GGML_OBJECT_TENSOR, GGML_TENSOR_SIZE + obj_alloc_size);
  2126. // TODO: for recoverable errors, we would need to free the data allocated from the scratch buffer here
  2127. struct ggml_tensor * const result = (struct ggml_tensor *)((char *)ctx->mem_buffer + obj_new->offs);
  2128. *result = (struct ggml_tensor) {
  2129. /*.type =*/ type,
  2130. /*.backend =*/ GGML_BACKEND_CPU,
  2131. /*.buffer =*/ NULL,
  2132. /*.ne =*/ { 1, 1, 1, 1 },
  2133. /*.nb =*/ { 0, 0, 0, 0 },
  2134. /*.op =*/ GGML_OP_NONE,
  2135. /*.op_params =*/ { 0 },
  2136. /*.is_param =*/ false,
  2137. /*.grad =*/ NULL,
  2138. /*.src =*/ { NULL },
  2139. /*.perf_runs =*/ 0,
  2140. /*.perf_cycles =*/ 0,
  2141. /*.perf_time_us =*/ 0,
  2142. /*.view_src =*/ view_src,
  2143. /*.view_offs =*/ view_offs,
  2144. /*.data =*/ obj_alloc_size > 0 ? (void *)(result + 1) : data,
  2145. /*.name =*/ { 0 },
  2146. /*.extra =*/ NULL,
  2147. /*.padding =*/ { 0 },
  2148. };
  2149. // TODO: this should not be needed as long as we don't rely on aligned SIMD loads
  2150. //ggml_assert_aligned(result->data);
  2151. for (int i = 0; i < n_dims; i++) {
  2152. result->ne[i] = ne[i];
  2153. }
  2154. result->nb[0] = ggml_type_size(type);
  2155. result->nb[1] = result->nb[0]*(result->ne[0]/ggml_blck_size(type));
  2156. for (int i = 2; i < GGML_MAX_DIMS; i++) {
  2157. result->nb[i] = result->nb[i - 1]*result->ne[i - 1];
  2158. }
  2159. ctx->n_objects++;
  2160. return result;
  2161. }
  2162. struct ggml_tensor * ggml_new_tensor(
  2163. struct ggml_context * ctx,
  2164. enum ggml_type type,
  2165. int n_dims,
  2166. const int64_t * ne) {
  2167. return ggml_new_tensor_impl(ctx, type, n_dims, ne, NULL, 0);
  2168. }
  2169. struct ggml_tensor * ggml_new_tensor_1d(
  2170. struct ggml_context * ctx,
  2171. enum ggml_type type,
  2172. int64_t ne0) {
  2173. return ggml_new_tensor(ctx, type, 1, &ne0);
  2174. }
  2175. struct ggml_tensor * ggml_new_tensor_2d(
  2176. struct ggml_context * ctx,
  2177. enum ggml_type type,
  2178. int64_t ne0,
  2179. int64_t ne1) {
  2180. const int64_t ne[2] = { ne0, ne1 };
  2181. return ggml_new_tensor(ctx, type, 2, ne);
  2182. }
  2183. struct ggml_tensor * ggml_new_tensor_3d(
  2184. struct ggml_context * ctx,
  2185. enum ggml_type type,
  2186. int64_t ne0,
  2187. int64_t ne1,
  2188. int64_t ne2) {
  2189. const int64_t ne[3] = { ne0, ne1, ne2 };
  2190. return ggml_new_tensor(ctx, type, 3, ne);
  2191. }
  2192. struct ggml_tensor * ggml_new_tensor_4d(
  2193. struct ggml_context * ctx,
  2194. enum ggml_type type,
  2195. int64_t ne0,
  2196. int64_t ne1,
  2197. int64_t ne2,
  2198. int64_t ne3) {
  2199. const int64_t ne[4] = { ne0, ne1, ne2, ne3 };
  2200. return ggml_new_tensor(ctx, type, 4, ne);
  2201. }
  2202. struct ggml_tensor * ggml_new_i32(struct ggml_context * ctx, int32_t value) {
  2203. ggml_scratch_save(ctx);
  2204. struct ggml_tensor * result = ggml_new_tensor_1d(ctx, GGML_TYPE_I32, 1);
  2205. ggml_scratch_load(ctx);
  2206. ggml_set_i32(result, value);
  2207. return result;
  2208. }
  2209. struct ggml_tensor * ggml_new_f32(struct ggml_context * ctx, float value) {
  2210. ggml_scratch_save(ctx);
  2211. struct ggml_tensor * result = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, 1);
  2212. ggml_scratch_load(ctx);
  2213. ggml_set_f32(result, value);
  2214. return result;
  2215. }
  2216. struct ggml_tensor * ggml_dup_tensor(struct ggml_context * ctx, const struct ggml_tensor * src) {
  2217. return ggml_new_tensor(ctx, src->type, GGML_MAX_DIMS, src->ne);
  2218. }
  2219. static void ggml_set_op_params(struct ggml_tensor * tensor, const void * params, size_t params_size) {
  2220. GGML_ASSERT(tensor != NULL); // silence -Warray-bounds warnings
  2221. assert(params_size <= GGML_MAX_OP_PARAMS);
  2222. memcpy(tensor->op_params, params, params_size);
  2223. }
  2224. static int32_t ggml_get_op_params_i32(const struct ggml_tensor * tensor, uint32_t i) {
  2225. assert(i < GGML_MAX_OP_PARAMS / sizeof(int32_t));
  2226. return ((const int32_t *)(tensor->op_params))[i];
  2227. }
  2228. static void ggml_set_op_params_i32(struct ggml_tensor * tensor, uint32_t i, int32_t value) {
  2229. assert(i < GGML_MAX_OP_PARAMS / sizeof(int32_t));
  2230. ((int32_t *)(tensor->op_params))[i] = value;
  2231. }
  2232. struct ggml_tensor * ggml_set_zero(struct ggml_tensor * tensor) {
  2233. memset(tensor->data, 0, ggml_nbytes(tensor));
  2234. return tensor;
  2235. }
  2236. struct ggml_tensor * ggml_set_i32 (struct ggml_tensor * tensor, int32_t value) {
  2237. const int n = ggml_nrows(tensor);
  2238. const int nc = tensor->ne[0];
  2239. const size_t n1 = tensor->nb[1];
  2240. char * const data = tensor->data;
  2241. switch (tensor->type) {
  2242. case GGML_TYPE_I8:
  2243. {
  2244. assert(tensor->nb[0] == sizeof(int8_t));
  2245. for (int i = 0; i < n; i++) {
  2246. ggml_vec_set_i8(nc, (int8_t *)(data + i*n1), value);
  2247. }
  2248. } break;
  2249. case GGML_TYPE_I16:
  2250. {
  2251. assert(tensor->nb[0] == sizeof(int16_t));
  2252. for (int i = 0; i < n; i++) {
  2253. ggml_vec_set_i16(nc, (int16_t *)(data + i*n1), value);
  2254. }
  2255. } break;
  2256. case GGML_TYPE_I32:
  2257. {
  2258. assert(tensor->nb[0] == sizeof(int32_t));
  2259. for (int i = 0; i < n; i++) {
  2260. ggml_vec_set_i32(nc, (int32_t *)(data + i*n1), value);
  2261. }
  2262. } break;
  2263. case GGML_TYPE_F16:
  2264. {
  2265. assert(tensor->nb[0] == sizeof(ggml_fp16_t));
  2266. for (int i = 0; i < n; i++) {
  2267. ggml_vec_set_f16(nc, (ggml_fp16_t *)(data + i*n1), GGML_FP32_TO_FP16(value));
  2268. }
  2269. } break;
  2270. case GGML_TYPE_F32:
  2271. {
  2272. assert(tensor->nb[0] == sizeof(float));
  2273. for (int i = 0; i < n; i++) {
  2274. ggml_vec_set_f32(nc, (float *)(data + i*n1), value);
  2275. }
  2276. } break;
  2277. default:
  2278. {
  2279. GGML_ASSERT(false);
  2280. } break;
  2281. }
  2282. return tensor;
  2283. }
  2284. struct ggml_tensor * ggml_set_f32(struct ggml_tensor * tensor, float value) {
  2285. const int n = ggml_nrows(tensor);
  2286. const int nc = tensor->ne[0];
  2287. const size_t n1 = tensor->nb[1];
  2288. char * const data = tensor->data;
  2289. switch (tensor->type) {
  2290. case GGML_TYPE_I8:
  2291. {
  2292. assert(tensor->nb[0] == sizeof(int8_t));
  2293. for (int i = 0; i < n; i++) {
  2294. ggml_vec_set_i8(nc, (int8_t *)(data + i*n1), value);
  2295. }
  2296. } break;
  2297. case GGML_TYPE_I16:
  2298. {
  2299. assert(tensor->nb[0] == sizeof(int16_t));
  2300. for (int i = 0; i < n; i++) {
  2301. ggml_vec_set_i16(nc, (int16_t *)(data + i*n1), value);
  2302. }
  2303. } break;
  2304. case GGML_TYPE_I32:
  2305. {
  2306. assert(tensor->nb[0] == sizeof(int32_t));
  2307. for (int i = 0; i < n; i++) {
  2308. ggml_vec_set_i32(nc, (int32_t *)(data + i*n1), value);
  2309. }
  2310. } break;
  2311. case GGML_TYPE_F16:
  2312. {
  2313. assert(tensor->nb[0] == sizeof(ggml_fp16_t));
  2314. for (int i = 0; i < n; i++) {
  2315. ggml_vec_set_f16(nc, (ggml_fp16_t *)(data + i*n1), GGML_FP32_TO_FP16(value));
  2316. }
  2317. } break;
  2318. case GGML_TYPE_F32:
  2319. {
  2320. assert(tensor->nb[0] == sizeof(float));
  2321. for (int i = 0; i < n; i++) {
  2322. ggml_vec_set_f32(nc, (float *)(data + i*n1), value);
  2323. }
  2324. } break;
  2325. default:
  2326. {
  2327. GGML_ASSERT(false);
  2328. } break;
  2329. }
  2330. return tensor;
  2331. }
  2332. void ggml_unravel_index(const struct ggml_tensor * tensor, int64_t i, int64_t * i0, int64_t * i1, int64_t * i2, int64_t * i3) {
  2333. const int64_t ne2 = tensor->ne[2];
  2334. const int64_t ne1 = tensor->ne[1];
  2335. const int64_t ne0 = tensor->ne[0];
  2336. const int64_t i3_ = (i/(ne2*ne1*ne0));
  2337. const int64_t i2_ = (i - i3_*ne2*ne1*ne0)/(ne1*ne0);
  2338. const int64_t i1_ = (i - i3_*ne2*ne1*ne0 - i2_*ne1*ne0)/ne0;
  2339. const int64_t i0_ = (i - i3_*ne2*ne1*ne0 - i2_*ne1*ne0 - i1_*ne0);
  2340. if (i0) {
  2341. * i0 = i0_;
  2342. }
  2343. if (i1) {
  2344. * i1 = i1_;
  2345. }
  2346. if (i2) {
  2347. * i2 = i2_;
  2348. }
  2349. if (i3) {
  2350. * i3 = i3_;
  2351. }
  2352. }
  2353. int32_t ggml_get_i32_1d(const struct ggml_tensor * tensor, int i) {
  2354. if (!ggml_is_contiguous(tensor)) {
  2355. int64_t id[4] = { 0, 0, 0, 0 };
  2356. ggml_unravel_index(tensor, i, &id[0], &id[1], &id[2], &id[3]);
  2357. return ggml_get_i32_nd(tensor, id[0], id[1], id[2], id[3]);
  2358. }
  2359. switch (tensor->type) {
  2360. case GGML_TYPE_I8:
  2361. {
  2362. GGML_ASSERT(tensor->nb[0] == sizeof(int8_t));
  2363. return ((int8_t *)(tensor->data))[i];
  2364. }
  2365. case GGML_TYPE_I16:
  2366. {
  2367. GGML_ASSERT(tensor->nb[0] == sizeof(int16_t));
  2368. return ((int16_t *)(tensor->data))[i];
  2369. }
  2370. case GGML_TYPE_I32:
  2371. {
  2372. GGML_ASSERT(tensor->nb[0] == sizeof(int32_t));
  2373. return ((int32_t *)(tensor->data))[i];
  2374. }
  2375. case GGML_TYPE_F16:
  2376. {
  2377. GGML_ASSERT(tensor->nb[0] == sizeof(ggml_fp16_t));
  2378. return GGML_FP16_TO_FP32(((ggml_fp16_t *)(tensor->data))[i]);
  2379. }
  2380. case GGML_TYPE_F32:
  2381. {
  2382. GGML_ASSERT(tensor->nb[0] == sizeof(float));
  2383. return ((float *)(tensor->data))[i];
  2384. }
  2385. default:
  2386. {
  2387. GGML_ASSERT(false);
  2388. }
  2389. }
  2390. return 0.0f;
  2391. }
  2392. void ggml_set_i32_1d(const struct ggml_tensor * tensor, int i, int32_t value) {
  2393. if (!ggml_is_contiguous(tensor)) {
  2394. int64_t id[4] = { 0, 0, 0, 0 };
  2395. ggml_unravel_index(tensor, i, &id[0], &id[1], &id[2], &id[3]);
  2396. ggml_set_i32_nd(tensor, id[0], id[1], id[2], id[3], value);
  2397. return;
  2398. }
  2399. switch (tensor->type) {
  2400. case GGML_TYPE_I8:
  2401. {
  2402. GGML_ASSERT(tensor->nb[0] == sizeof(int8_t));
  2403. ((int8_t *)(tensor->data))[i] = value;
  2404. } break;
  2405. case GGML_TYPE_I16:
  2406. {
  2407. GGML_ASSERT(tensor->nb[0] == sizeof(int16_t));
  2408. ((int16_t *)(tensor->data))[i] = value;
  2409. } break;
  2410. case GGML_TYPE_I32:
  2411. {
  2412. GGML_ASSERT(tensor->nb[0] == sizeof(int32_t));
  2413. ((int32_t *)(tensor->data))[i] = value;
  2414. } break;
  2415. case GGML_TYPE_F16:
  2416. {
  2417. GGML_ASSERT(tensor->nb[0] == sizeof(ggml_fp16_t));
  2418. ((ggml_fp16_t *)(tensor->data))[i] = GGML_FP32_TO_FP16(value);
  2419. } break;
  2420. case GGML_TYPE_F32:
  2421. {
  2422. GGML_ASSERT(tensor->nb[0] == sizeof(float));
  2423. ((float *)(tensor->data))[i] = value;
  2424. } break;
  2425. default:
  2426. {
  2427. GGML_ASSERT(false);
  2428. } break;
  2429. }
  2430. }
  2431. int32_t ggml_get_i32_nd(const struct ggml_tensor * tensor, int i0, int i1, int i2, int i3) {
  2432. void * data = (char *) tensor->data + i0*tensor->nb[0] + i1*tensor->nb[1] + i2*tensor->nb[2] + i3*tensor->nb[3];
  2433. switch (tensor->type) {
  2434. case GGML_TYPE_I8:
  2435. return ((int8_t *) data)[0];
  2436. case GGML_TYPE_I16:
  2437. return ((int16_t *) data)[0];
  2438. case GGML_TYPE_I32:
  2439. return ((int32_t *) data)[0];
  2440. case GGML_TYPE_F16:
  2441. return GGML_FP16_TO_FP32(((ggml_fp16_t *) data)[0]);
  2442. case GGML_TYPE_F32:
  2443. return ((float *) data)[0];
  2444. default:
  2445. GGML_ASSERT(false);
  2446. }
  2447. return 0.0f;
  2448. }
  2449. void ggml_set_i32_nd(const struct ggml_tensor * tensor, int i0, int i1, int i2, int i3, int32_t value) {
  2450. void * data = (char *) tensor->data + i0*tensor->nb[0] + i1*tensor->nb[1] + i2*tensor->nb[2] + i3*tensor->nb[3];
  2451. switch (tensor->type) {
  2452. case GGML_TYPE_I8:
  2453. {
  2454. ((int8_t *)(data))[0] = value;
  2455. } break;
  2456. case GGML_TYPE_I16:
  2457. {
  2458. ((int16_t *)(data))[0] = value;
  2459. } break;
  2460. case GGML_TYPE_I32:
  2461. {
  2462. ((int32_t *)(data))[0] = value;
  2463. } break;
  2464. case GGML_TYPE_F16:
  2465. {
  2466. ((ggml_fp16_t *)(data))[0] = GGML_FP32_TO_FP16(value);
  2467. } break;
  2468. case GGML_TYPE_F32:
  2469. {
  2470. ((float *)(data))[0] = value;
  2471. } break;
  2472. default:
  2473. {
  2474. GGML_ASSERT(false);
  2475. } break;
  2476. }
  2477. }
  2478. float ggml_get_f32_1d(const struct ggml_tensor * tensor, int i) {
  2479. if (!ggml_is_contiguous(tensor)) {
  2480. int64_t id[4] = { 0, 0, 0, 0 };
  2481. ggml_unravel_index(tensor, i, &id[0], &id[1], &id[2], &id[3]);
  2482. return ggml_get_f32_nd(tensor, id[0], id[1], id[2], id[3]);
  2483. }
  2484. switch (tensor->type) {
  2485. case GGML_TYPE_I8:
  2486. {
  2487. GGML_ASSERT(tensor->nb[0] == sizeof(int8_t));
  2488. return ((int8_t *)(tensor->data))[i];
  2489. }
  2490. case GGML_TYPE_I16:
  2491. {
  2492. GGML_ASSERT(tensor->nb[0] == sizeof(int16_t));
  2493. return ((int16_t *)(tensor->data))[i];
  2494. }
  2495. case GGML_TYPE_I32:
  2496. {
  2497. GGML_ASSERT(tensor->nb[0] == sizeof(int32_t));
  2498. return ((int32_t *)(tensor->data))[i];
  2499. }
  2500. case GGML_TYPE_F16:
  2501. {
  2502. GGML_ASSERT(tensor->nb[0] == sizeof(ggml_fp16_t));
  2503. return GGML_FP16_TO_FP32(((ggml_fp16_t *)(tensor->data))[i]);
  2504. }
  2505. case GGML_TYPE_F32:
  2506. {
  2507. GGML_ASSERT(tensor->nb[0] == sizeof(float));
  2508. return ((float *)(tensor->data))[i];
  2509. }
  2510. default:
  2511. {
  2512. GGML_ASSERT(false);
  2513. }
  2514. }
  2515. return 0.0f;
  2516. }
  2517. void ggml_set_f32_1d(const struct ggml_tensor * tensor, int i, float value) {
  2518. if (!ggml_is_contiguous(tensor)) {
  2519. int64_t id[4] = { 0, 0, 0, 0 };
  2520. ggml_unravel_index(tensor, i, &id[0], &id[1], &id[2], &id[3]);
  2521. ggml_set_f32_nd(tensor, id[0], id[1], id[2], id[3], value);
  2522. return;
  2523. }
  2524. switch (tensor->type) {
  2525. case GGML_TYPE_I8:
  2526. {
  2527. GGML_ASSERT(tensor->nb[0] == sizeof(int8_t));
  2528. ((int8_t *)(tensor->data))[i] = value;
  2529. } break;
  2530. case GGML_TYPE_I16:
  2531. {
  2532. GGML_ASSERT(tensor->nb[0] == sizeof(int16_t));
  2533. ((int16_t *)(tensor->data))[i] = value;
  2534. } break;
  2535. case GGML_TYPE_I32:
  2536. {
  2537. GGML_ASSERT(tensor->nb[0] == sizeof(int32_t));
  2538. ((int32_t *)(tensor->data))[i] = value;
  2539. } break;
  2540. case GGML_TYPE_F16:
  2541. {
  2542. GGML_ASSERT(tensor->nb[0] == sizeof(ggml_fp16_t));
  2543. ((ggml_fp16_t *)(tensor->data))[i] = GGML_FP32_TO_FP16(value);
  2544. } break;
  2545. case GGML_TYPE_F32:
  2546. {
  2547. GGML_ASSERT(tensor->nb[0] == sizeof(float));
  2548. ((float *)(tensor->data))[i] = value;
  2549. } break;
  2550. default:
  2551. {
  2552. GGML_ASSERT(false);
  2553. } break;
  2554. }
  2555. }
  2556. float ggml_get_f32_nd(const struct ggml_tensor * tensor, int i0, int i1, int i2, int i3) {
  2557. void * data = (char *) tensor->data + i0*tensor->nb[0] + i1*tensor->nb[1] + i2*tensor->nb[2] + i3*tensor->nb[3];
  2558. switch (tensor->type) {
  2559. case GGML_TYPE_I8:
  2560. return ((int8_t *) data)[0];
  2561. case GGML_TYPE_I16:
  2562. return ((int16_t *) data)[0];
  2563. case GGML_TYPE_I32:
  2564. return ((int32_t *) data)[0];
  2565. case GGML_TYPE_F16:
  2566. return GGML_FP16_TO_FP32(((ggml_fp16_t *) data)[0]);
  2567. case GGML_TYPE_F32:
  2568. return ((float *) data)[0];
  2569. default:
  2570. GGML_ASSERT(false);
  2571. }
  2572. return 0.0f;
  2573. }
  2574. void ggml_set_f32_nd(const struct ggml_tensor * tensor, int i0, int i1, int i2, int i3, float value) {
  2575. void * data = (char *) tensor->data + i0*tensor->nb[0] + i1*tensor->nb[1] + i2*tensor->nb[2] + i3*tensor->nb[3];
  2576. switch (tensor->type) {
  2577. case GGML_TYPE_I8:
  2578. {
  2579. ((int8_t *)(data))[0] = value;
  2580. } break;
  2581. case GGML_TYPE_I16:
  2582. {
  2583. ((int16_t *)(data))[0] = value;
  2584. } break;
  2585. case GGML_TYPE_I32:
  2586. {
  2587. ((int32_t *)(data))[0] = value;
  2588. } break;
  2589. case GGML_TYPE_F16:
  2590. {
  2591. ((ggml_fp16_t *)(data))[0] = GGML_FP32_TO_FP16(value);
  2592. } break;
  2593. case GGML_TYPE_F32:
  2594. {
  2595. ((float *)(data))[0] = value;
  2596. } break;
  2597. default:
  2598. {
  2599. GGML_ASSERT(false);
  2600. } break;
  2601. }
  2602. }
  2603. void * ggml_get_data(const struct ggml_tensor * tensor) {
  2604. return tensor->data;
  2605. }
  2606. float * ggml_get_data_f32(const struct ggml_tensor * tensor) {
  2607. assert(tensor->type == GGML_TYPE_F32);
  2608. return (float *)(tensor->data);
  2609. }
  2610. GGML_CALL enum ggml_unary_op ggml_get_unary_op(const struct ggml_tensor * tensor) {
  2611. GGML_ASSERT(tensor->op == GGML_OP_UNARY);
  2612. return (enum ggml_unary_op) ggml_get_op_params_i32(tensor, 0);
  2613. }
  2614. const char * ggml_get_name(const struct ggml_tensor * tensor) {
  2615. return tensor->name;
  2616. }
  2617. struct ggml_tensor * ggml_set_name(struct ggml_tensor * tensor, const char * name) {
  2618. strncpy(tensor->name, name, sizeof(tensor->name));
  2619. tensor->name[sizeof(tensor->name) - 1] = '\0';
  2620. return tensor;
  2621. }
  2622. struct ggml_tensor * ggml_format_name(struct ggml_tensor * tensor, const char * fmt, ...) {
  2623. va_list args;
  2624. va_start(args, fmt);
  2625. vsnprintf(tensor->name, sizeof(tensor->name), fmt, args);
  2626. va_end(args);
  2627. return tensor;
  2628. }
  2629. struct ggml_tensor * ggml_view_tensor(
  2630. struct ggml_context * ctx,
  2631. struct ggml_tensor * src) {
  2632. struct ggml_tensor * result = ggml_new_tensor_impl(ctx, src->type, GGML_MAX_DIMS, src->ne, src, 0);
  2633. ggml_format_name(result, "%s (view)", src->name);
  2634. for (int i = 0; i < GGML_MAX_DIMS; i++) {
  2635. result->nb[i] = src->nb[i];
  2636. }
  2637. return result;
  2638. }
  2639. struct ggml_tensor * ggml_get_first_tensor(const struct ggml_context * ctx) {
  2640. struct ggml_object * obj = ctx->objects_begin;
  2641. char * const mem_buffer = ctx->mem_buffer;
  2642. while (obj != NULL) {
  2643. if (obj->type == GGML_OBJECT_TENSOR) {
  2644. return (struct ggml_tensor *)(mem_buffer + obj->offs);
  2645. }
  2646. obj = obj->next;
  2647. }
  2648. return NULL;
  2649. }
  2650. struct ggml_tensor * ggml_get_next_tensor(const struct ggml_context * ctx, struct ggml_tensor * tensor) {
  2651. struct ggml_object * obj = (struct ggml_object *) ((char *)tensor - GGML_OBJECT_SIZE);
  2652. obj = obj->next;
  2653. char * const mem_buffer = ctx->mem_buffer;
  2654. while (obj != NULL) {
  2655. if (obj->type == GGML_OBJECT_TENSOR) {
  2656. return (struct ggml_tensor *)(mem_buffer + obj->offs);
  2657. }
  2658. obj = obj->next;
  2659. }
  2660. return NULL;
  2661. }
  2662. struct ggml_tensor * ggml_get_tensor(struct ggml_context * ctx, const char * name) {
  2663. struct ggml_object * obj = ctx->objects_begin;
  2664. char * const mem_buffer = ctx->mem_buffer;
  2665. while (obj != NULL) {
  2666. if (obj->type == GGML_OBJECT_TENSOR) {
  2667. struct ggml_tensor * cur = (struct ggml_tensor *)(mem_buffer + obj->offs);
  2668. if (strcmp(cur->name, name) == 0) {
  2669. return cur;
  2670. }
  2671. }
  2672. obj = obj->next;
  2673. }
  2674. return NULL;
  2675. }
  2676. ////////////////////////////////////////////////////////////////////////////////
  2677. // ggml_dup
  2678. static struct ggml_tensor * ggml_dup_impl(
  2679. struct ggml_context * ctx,
  2680. struct ggml_tensor * a,
  2681. bool inplace) {
  2682. bool is_node = false;
  2683. if (!inplace && (a->grad)) {
  2684. is_node = true;
  2685. }
  2686. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  2687. result->op = GGML_OP_DUP;
  2688. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  2689. result->src[0] = a;
  2690. return result;
  2691. }
  2692. struct ggml_tensor * ggml_dup(
  2693. struct ggml_context * ctx,
  2694. struct ggml_tensor * a) {
  2695. return ggml_dup_impl(ctx, a, false);
  2696. }
  2697. struct ggml_tensor * ggml_dup_inplace(
  2698. struct ggml_context * ctx,
  2699. struct ggml_tensor * a) {
  2700. return ggml_dup_impl(ctx, a, true);
  2701. }
  2702. // ggml_add
  2703. static struct ggml_tensor * ggml_add_impl(
  2704. struct ggml_context * ctx,
  2705. struct ggml_tensor * a,
  2706. struct ggml_tensor * b,
  2707. bool inplace) {
  2708. GGML_ASSERT(ggml_can_repeat(b, a));
  2709. bool is_node = false;
  2710. if (!inplace && (a->grad || b->grad)) {
  2711. // TODO: support backward pass for broadcasting
  2712. GGML_ASSERT(ggml_are_same_shape(a, b));
  2713. is_node = true;
  2714. }
  2715. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  2716. result->op = GGML_OP_ADD;
  2717. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  2718. result->src[0] = a;
  2719. result->src[1] = b;
  2720. return result;
  2721. }
  2722. struct ggml_tensor * ggml_add(
  2723. struct ggml_context * ctx,
  2724. struct ggml_tensor * a,
  2725. struct ggml_tensor * b) {
  2726. return ggml_add_impl(ctx, a, b, false);
  2727. }
  2728. struct ggml_tensor * ggml_add_inplace(
  2729. struct ggml_context * ctx,
  2730. struct ggml_tensor * a,
  2731. struct ggml_tensor * b) {
  2732. return ggml_add_impl(ctx, a, b, true);
  2733. }
  2734. // ggml_add_cast
  2735. static struct ggml_tensor * ggml_add_cast_impl(
  2736. struct ggml_context * ctx,
  2737. struct ggml_tensor * a,
  2738. struct ggml_tensor * b,
  2739. enum ggml_type type) {
  2740. // TODO: support less-strict constraint
  2741. // GGML_ASSERT(ggml_can_repeat(b, a));
  2742. GGML_ASSERT(ggml_can_repeat_rows(b, a));
  2743. GGML_ASSERT(ggml_is_quantized(a->type) || a->type == GGML_TYPE_F16); // currently only supported for quantized input and f16
  2744. bool is_node = false;
  2745. if (a->grad || b->grad) {
  2746. // TODO: support backward pass for broadcasting
  2747. GGML_ASSERT(ggml_are_same_shape(a, b));
  2748. is_node = true;
  2749. }
  2750. struct ggml_tensor * result = ggml_new_tensor(ctx, type, GGML_MAX_DIMS, a->ne);
  2751. result->op = GGML_OP_ADD;
  2752. result->grad = is_node ? ggml_new_tensor(ctx, GGML_TYPE_F32, GGML_MAX_DIMS, a->ne) : NULL;
  2753. result->src[0] = a;
  2754. result->src[1] = b;
  2755. return result;
  2756. }
  2757. struct ggml_tensor * ggml_add_cast(
  2758. struct ggml_context * ctx,
  2759. struct ggml_tensor * a,
  2760. struct ggml_tensor * b,
  2761. enum ggml_type type) {
  2762. return ggml_add_cast_impl(ctx, a, b, type);
  2763. }
  2764. // ggml_add1
  2765. static struct ggml_tensor * ggml_add1_impl(
  2766. struct ggml_context * ctx,
  2767. struct ggml_tensor * a,
  2768. struct ggml_tensor * b,
  2769. bool inplace) {
  2770. GGML_ASSERT(ggml_is_scalar(b));
  2771. GGML_ASSERT(ggml_is_padded_1d(a));
  2772. bool is_node = false;
  2773. if (a->grad || b->grad) {
  2774. is_node = true;
  2775. }
  2776. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  2777. result->op = GGML_OP_ADD1;
  2778. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  2779. result->src[0] = a;
  2780. result->src[1] = b;
  2781. return result;
  2782. }
  2783. struct ggml_tensor * ggml_add1(
  2784. struct ggml_context * ctx,
  2785. struct ggml_tensor * a,
  2786. struct ggml_tensor * b) {
  2787. return ggml_add1_impl(ctx, a, b, false);
  2788. }
  2789. struct ggml_tensor * ggml_add1_inplace(
  2790. struct ggml_context * ctx,
  2791. struct ggml_tensor * a,
  2792. struct ggml_tensor * b) {
  2793. return ggml_add1_impl(ctx, a, b, true);
  2794. }
  2795. // ggml_acc
  2796. static struct ggml_tensor * ggml_acc_impl(
  2797. struct ggml_context * ctx,
  2798. struct ggml_tensor * a,
  2799. struct ggml_tensor * b,
  2800. size_t nb1,
  2801. size_t nb2,
  2802. size_t nb3,
  2803. size_t offset,
  2804. bool inplace) {
  2805. GGML_ASSERT(ggml_nelements(b) <= ggml_nelements(a));
  2806. GGML_ASSERT(ggml_is_contiguous(a));
  2807. GGML_ASSERT(a->type == GGML_TYPE_F32);
  2808. GGML_ASSERT(b->type == GGML_TYPE_F32);
  2809. bool is_node = false;
  2810. if (!inplace && (a->grad || b->grad)) {
  2811. is_node = true;
  2812. }
  2813. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  2814. int32_t params[] = { nb1, nb2, nb3, offset, inplace ? 1 : 0 };
  2815. ggml_set_op_params(result, params, sizeof(params));
  2816. result->op = GGML_OP_ACC;
  2817. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  2818. result->src[0] = a;
  2819. result->src[1] = b;
  2820. return result;
  2821. }
  2822. struct ggml_tensor * ggml_acc(
  2823. struct ggml_context * ctx,
  2824. struct ggml_tensor * a,
  2825. struct ggml_tensor * b,
  2826. size_t nb1,
  2827. size_t nb2,
  2828. size_t nb3,
  2829. size_t offset) {
  2830. return ggml_acc_impl(ctx, a, b, nb1, nb2, nb3, offset, false);
  2831. }
  2832. struct ggml_tensor * ggml_acc_inplace(
  2833. struct ggml_context * ctx,
  2834. struct ggml_tensor * a,
  2835. struct ggml_tensor * b,
  2836. size_t nb1,
  2837. size_t nb2,
  2838. size_t nb3,
  2839. size_t offset) {
  2840. return ggml_acc_impl(ctx, a, b, nb1, nb2, nb3, offset, true);
  2841. }
  2842. // ggml_sub
  2843. static struct ggml_tensor * ggml_sub_impl(
  2844. struct ggml_context * ctx,
  2845. struct ggml_tensor * a,
  2846. struct ggml_tensor * b,
  2847. bool inplace) {
  2848. GGML_ASSERT(ggml_are_same_shape(a, b));
  2849. bool is_node = false;
  2850. if (!inplace && (a->grad || b->grad)) {
  2851. is_node = true;
  2852. }
  2853. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  2854. result->op = GGML_OP_SUB;
  2855. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  2856. result->src[0] = a;
  2857. result->src[1] = b;
  2858. return result;
  2859. }
  2860. struct ggml_tensor * ggml_sub(
  2861. struct ggml_context * ctx,
  2862. struct ggml_tensor * a,
  2863. struct ggml_tensor * b) {
  2864. return ggml_sub_impl(ctx, a, b, false);
  2865. }
  2866. struct ggml_tensor * ggml_sub_inplace(
  2867. struct ggml_context * ctx,
  2868. struct ggml_tensor * a,
  2869. struct ggml_tensor * b) {
  2870. return ggml_sub_impl(ctx, a, b, true);
  2871. }
  2872. // ggml_mul
  2873. static struct ggml_tensor * ggml_mul_impl(
  2874. struct ggml_context * ctx,
  2875. struct ggml_tensor * a,
  2876. struct ggml_tensor * b,
  2877. bool inplace) {
  2878. GGML_ASSERT(ggml_can_repeat(b, a));
  2879. bool is_node = false;
  2880. if (!inplace && (a->grad || b->grad)) {
  2881. // TODO: support backward pass for broadcasting
  2882. GGML_ASSERT(ggml_are_same_shape(a, b));
  2883. is_node = true;
  2884. }
  2885. if (inplace) {
  2886. GGML_ASSERT(!is_node);
  2887. }
  2888. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  2889. result->op = GGML_OP_MUL;
  2890. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  2891. result->src[0] = a;
  2892. result->src[1] = b;
  2893. return result;
  2894. }
  2895. struct ggml_tensor * ggml_mul(
  2896. struct ggml_context * ctx,
  2897. struct ggml_tensor * a,
  2898. struct ggml_tensor * b) {
  2899. return ggml_mul_impl(ctx, a, b, false);
  2900. }
  2901. struct ggml_tensor * ggml_mul_inplace(
  2902. struct ggml_context * ctx,
  2903. struct ggml_tensor * a,
  2904. struct ggml_tensor * b) {
  2905. return ggml_mul_impl(ctx, a, b, true);
  2906. }
  2907. // ggml_div
  2908. static struct ggml_tensor * ggml_div_impl(
  2909. struct ggml_context * ctx,
  2910. struct ggml_tensor * a,
  2911. struct ggml_tensor * b,
  2912. bool inplace) {
  2913. GGML_ASSERT(ggml_can_repeat(b, a));
  2914. bool is_node = false;
  2915. if (!inplace && (a->grad || b->grad)) {
  2916. is_node = true;
  2917. }
  2918. if (inplace) {
  2919. GGML_ASSERT(!is_node);
  2920. }
  2921. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  2922. result->op = GGML_OP_DIV;
  2923. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  2924. result->src[0] = a;
  2925. result->src[1] = b;
  2926. return result;
  2927. }
  2928. struct ggml_tensor * ggml_div(
  2929. struct ggml_context * ctx,
  2930. struct ggml_tensor * a,
  2931. struct ggml_tensor * b) {
  2932. return ggml_div_impl(ctx, a, b, false);
  2933. }
  2934. struct ggml_tensor * ggml_div_inplace(
  2935. struct ggml_context * ctx,
  2936. struct ggml_tensor * a,
  2937. struct ggml_tensor * b) {
  2938. return ggml_div_impl(ctx, a, b, true);
  2939. }
  2940. // ggml_sqr
  2941. static struct ggml_tensor * ggml_sqr_impl(
  2942. struct ggml_context * ctx,
  2943. struct ggml_tensor * a,
  2944. bool inplace) {
  2945. bool is_node = false;
  2946. if (!inplace && (a->grad)) {
  2947. is_node = true;
  2948. }
  2949. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  2950. result->op = GGML_OP_SQR;
  2951. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  2952. result->src[0] = a;
  2953. return result;
  2954. }
  2955. struct ggml_tensor * ggml_sqr(
  2956. struct ggml_context * ctx,
  2957. struct ggml_tensor * a) {
  2958. return ggml_sqr_impl(ctx, a, false);
  2959. }
  2960. struct ggml_tensor * ggml_sqr_inplace(
  2961. struct ggml_context * ctx,
  2962. struct ggml_tensor * a) {
  2963. return ggml_sqr_impl(ctx, a, true);
  2964. }
  2965. // ggml_sqrt
  2966. static struct ggml_tensor * ggml_sqrt_impl(
  2967. struct ggml_context * ctx,
  2968. struct ggml_tensor * a,
  2969. bool inplace) {
  2970. bool is_node = false;
  2971. if (!inplace && (a->grad)) {
  2972. is_node = true;
  2973. }
  2974. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  2975. result->op = GGML_OP_SQRT;
  2976. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  2977. result->src[0] = a;
  2978. return result;
  2979. }
  2980. struct ggml_tensor * ggml_sqrt(
  2981. struct ggml_context * ctx,
  2982. struct ggml_tensor * a) {
  2983. return ggml_sqrt_impl(ctx, a, false);
  2984. }
  2985. struct ggml_tensor * ggml_sqrt_inplace(
  2986. struct ggml_context * ctx,
  2987. struct ggml_tensor * a) {
  2988. return ggml_sqrt_impl(ctx, a, true);
  2989. }
  2990. // ggml_log
  2991. static struct ggml_tensor * ggml_log_impl(
  2992. struct ggml_context * ctx,
  2993. struct ggml_tensor * a,
  2994. bool inplace) {
  2995. bool is_node = false;
  2996. if (!inplace && (a->grad)) {
  2997. is_node = true;
  2998. }
  2999. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  3000. result->op = GGML_OP_LOG;
  3001. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3002. result->src[0] = a;
  3003. return result;
  3004. }
  3005. struct ggml_tensor * ggml_log(
  3006. struct ggml_context * ctx,
  3007. struct ggml_tensor * a) {
  3008. return ggml_log_impl(ctx, a, false);
  3009. }
  3010. struct ggml_tensor * ggml_log_inplace(
  3011. struct ggml_context * ctx,
  3012. struct ggml_tensor * a) {
  3013. return ggml_log_impl(ctx, a, true);
  3014. }
  3015. // ggml_sum
  3016. struct ggml_tensor * ggml_sum(
  3017. struct ggml_context * ctx,
  3018. struct ggml_tensor * a) {
  3019. bool is_node = false;
  3020. if (a->grad) {
  3021. is_node = true;
  3022. }
  3023. struct ggml_tensor * result = ggml_new_tensor_1d(ctx, a->type, 1);
  3024. result->op = GGML_OP_SUM;
  3025. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3026. result->src[0] = a;
  3027. return result;
  3028. }
  3029. // ggml_sum_rows
  3030. struct ggml_tensor * ggml_sum_rows(
  3031. struct ggml_context * ctx,
  3032. struct ggml_tensor * a) {
  3033. bool is_node = false;
  3034. if (a->grad) {
  3035. is_node = true;
  3036. }
  3037. int64_t ne[GGML_MAX_DIMS] = { 1 };
  3038. for (int i = 1; i < GGML_MAX_DIMS; ++i) {
  3039. ne[i] = a->ne[i];
  3040. }
  3041. struct ggml_tensor * result = ggml_new_tensor(ctx, a->type, GGML_MAX_DIMS, ne);
  3042. result->op = GGML_OP_SUM_ROWS;
  3043. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3044. result->src[0] = a;
  3045. return result;
  3046. }
  3047. // ggml_mean
  3048. struct ggml_tensor * ggml_mean(
  3049. struct ggml_context * ctx,
  3050. struct ggml_tensor * a) {
  3051. bool is_node = false;
  3052. if (a->grad) {
  3053. GGML_ASSERT(false); // TODO: implement
  3054. is_node = true;
  3055. }
  3056. int64_t ne[4] = { 1, a->ne[1], a->ne[2], a->ne[3] };
  3057. struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, 4, ne);
  3058. result->op = GGML_OP_MEAN;
  3059. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3060. result->src[0] = a;
  3061. return result;
  3062. }
  3063. // ggml_argmax
  3064. struct ggml_tensor * ggml_argmax(
  3065. struct ggml_context * ctx,
  3066. struct ggml_tensor * a) {
  3067. GGML_ASSERT(ggml_is_matrix(a));
  3068. bool is_node = false;
  3069. if (a->grad) {
  3070. GGML_ASSERT(false);
  3071. is_node = true;
  3072. }
  3073. struct ggml_tensor * result = ggml_new_tensor_1d(ctx, GGML_TYPE_I32, a->ne[1]);
  3074. result->op = GGML_OP_ARGMAX;
  3075. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3076. result->src[0] = a;
  3077. return result;
  3078. }
  3079. // ggml_repeat
  3080. struct ggml_tensor * ggml_repeat(
  3081. struct ggml_context * ctx,
  3082. struct ggml_tensor * a,
  3083. struct ggml_tensor * b) {
  3084. GGML_ASSERT(ggml_can_repeat(a, b));
  3085. bool is_node = false;
  3086. if (a->grad) {
  3087. is_node = true;
  3088. }
  3089. struct ggml_tensor * result = ggml_new_tensor(ctx, a->type, GGML_MAX_DIMS, b->ne);
  3090. result->op = GGML_OP_REPEAT;
  3091. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3092. result->src[0] = a;
  3093. return result;
  3094. }
  3095. // ggml_repeat_back
  3096. struct ggml_tensor * ggml_repeat_back(
  3097. struct ggml_context * ctx,
  3098. struct ggml_tensor * a,
  3099. struct ggml_tensor * b) {
  3100. GGML_ASSERT(ggml_can_repeat(b, a));
  3101. bool is_node = false;
  3102. if (a->grad) {
  3103. is_node = true;
  3104. }
  3105. if (ggml_are_same_shape(a, b) && !is_node) {
  3106. return a;
  3107. }
  3108. struct ggml_tensor * result = ggml_new_tensor(ctx, a->type, GGML_MAX_DIMS, b->ne);
  3109. result->op = GGML_OP_REPEAT_BACK;
  3110. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3111. result->src[0] = a;
  3112. return result;
  3113. }
  3114. // ggml_concat
  3115. struct ggml_tensor * ggml_concat(
  3116. struct ggml_context* ctx,
  3117. struct ggml_tensor* a,
  3118. struct ggml_tensor* b) {
  3119. GGML_ASSERT(a->ne[0] == b->ne[0] && a->ne[1] == b->ne[1] && a->ne[3] == b->ne[3]);
  3120. bool is_node = false;
  3121. if (a->grad || b->grad) {
  3122. is_node = true;
  3123. }
  3124. struct ggml_tensor * result = ggml_new_tensor_4d(ctx, a->type, a->ne[0], a->ne[1], a->ne[2] + b->ne[2], a->ne[3]);
  3125. result->op = GGML_OP_CONCAT;
  3126. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3127. result->src[0] = a;
  3128. result->src[1] = b;
  3129. return result;
  3130. }
  3131. // ggml_abs
  3132. struct ggml_tensor * ggml_abs(
  3133. struct ggml_context * ctx,
  3134. struct ggml_tensor * a) {
  3135. return ggml_unary(ctx, a, GGML_UNARY_OP_ABS);
  3136. }
  3137. struct ggml_tensor * ggml_abs_inplace(
  3138. struct ggml_context * ctx,
  3139. struct ggml_tensor * a) {
  3140. return ggml_unary_inplace(ctx, a, GGML_UNARY_OP_ABS);
  3141. }
  3142. // ggml_sgn
  3143. struct ggml_tensor * ggml_sgn(
  3144. struct ggml_context * ctx,
  3145. struct ggml_tensor * a) {
  3146. return ggml_unary(ctx, a, GGML_UNARY_OP_SGN);
  3147. }
  3148. struct ggml_tensor * ggml_sgn_inplace(
  3149. struct ggml_context * ctx,
  3150. struct ggml_tensor * a) {
  3151. return ggml_unary_inplace(ctx, a, GGML_UNARY_OP_SGN);
  3152. }
  3153. // ggml_neg
  3154. struct ggml_tensor * ggml_neg(
  3155. struct ggml_context * ctx,
  3156. struct ggml_tensor * a) {
  3157. return ggml_unary(ctx, a, GGML_UNARY_OP_NEG);
  3158. }
  3159. struct ggml_tensor * ggml_neg_inplace(
  3160. struct ggml_context * ctx,
  3161. struct ggml_tensor * a) {
  3162. return ggml_unary_inplace(ctx, a, GGML_UNARY_OP_NEG);
  3163. }
  3164. // ggml_step
  3165. struct ggml_tensor * ggml_step(
  3166. struct ggml_context * ctx,
  3167. struct ggml_tensor * a) {
  3168. return ggml_unary(ctx, a, GGML_UNARY_OP_STEP);
  3169. }
  3170. struct ggml_tensor * ggml_step_inplace(
  3171. struct ggml_context * ctx,
  3172. struct ggml_tensor * a) {
  3173. return ggml_unary_inplace(ctx, a, GGML_UNARY_OP_STEP);
  3174. }
  3175. // ggml_tanh
  3176. struct ggml_tensor * ggml_tanh(
  3177. struct ggml_context * ctx,
  3178. struct ggml_tensor * a) {
  3179. return ggml_unary(ctx, a, GGML_UNARY_OP_TANH);
  3180. }
  3181. struct ggml_tensor * ggml_tanh_inplace(
  3182. struct ggml_context * ctx,
  3183. struct ggml_tensor * a) {
  3184. return ggml_unary_inplace(ctx, a, GGML_UNARY_OP_TANH);
  3185. }
  3186. // ggml_elu
  3187. struct ggml_tensor * ggml_elu(
  3188. struct ggml_context * ctx,
  3189. struct ggml_tensor * a) {
  3190. return ggml_unary(ctx, a, GGML_UNARY_OP_ELU);
  3191. }
  3192. struct ggml_tensor * ggml_elu_inplace(
  3193. struct ggml_context * ctx,
  3194. struct ggml_tensor * a) {
  3195. return ggml_unary_inplace(ctx, a, GGML_UNARY_OP_ELU);
  3196. }
  3197. // ggml_relu
  3198. struct ggml_tensor * ggml_relu(
  3199. struct ggml_context * ctx,
  3200. struct ggml_tensor * a) {
  3201. return ggml_unary(ctx, a, GGML_UNARY_OP_RELU);
  3202. }
  3203. struct ggml_tensor * ggml_relu_inplace(
  3204. struct ggml_context * ctx,
  3205. struct ggml_tensor * a) {
  3206. return ggml_unary_inplace(ctx, a, GGML_UNARY_OP_RELU);
  3207. }
  3208. // ggml_leaky_relu
  3209. struct ggml_tensor * ggml_leaky_relu(
  3210. struct ggml_context * ctx,
  3211. struct ggml_tensor * a, float negative_slope, bool inplace) {
  3212. bool is_node = false;
  3213. if (!inplace && (a->grad)) {
  3214. is_node = true;
  3215. }
  3216. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  3217. ggml_set_op_params(result, &negative_slope, sizeof(negative_slope));
  3218. result->op = GGML_OP_LEAKY_RELU;
  3219. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3220. result->src[0] = a;
  3221. return result;
  3222. }
  3223. // ggml_gelu
  3224. struct ggml_tensor * ggml_gelu(
  3225. struct ggml_context * ctx,
  3226. struct ggml_tensor * a) {
  3227. return ggml_unary(ctx, a, GGML_UNARY_OP_GELU);
  3228. }
  3229. struct ggml_tensor * ggml_gelu_inplace(
  3230. struct ggml_context * ctx,
  3231. struct ggml_tensor * a) {
  3232. return ggml_unary_inplace(ctx, a, GGML_UNARY_OP_GELU);
  3233. }
  3234. // ggml_gelu_quick
  3235. struct ggml_tensor * ggml_gelu_quick(
  3236. struct ggml_context * ctx,
  3237. struct ggml_tensor * a) {
  3238. return ggml_unary(ctx, a, GGML_UNARY_OP_GELU_QUICK);
  3239. }
  3240. struct ggml_tensor * ggml_gelu_quick_inplace(
  3241. struct ggml_context * ctx,
  3242. struct ggml_tensor * a) {
  3243. return ggml_unary_inplace(ctx, a, GGML_UNARY_OP_GELU_QUICK);
  3244. }
  3245. // ggml_silu
  3246. struct ggml_tensor * ggml_silu(
  3247. struct ggml_context * ctx,
  3248. struct ggml_tensor * a) {
  3249. return ggml_unary(ctx, a, GGML_UNARY_OP_SILU);
  3250. }
  3251. struct ggml_tensor * ggml_silu_inplace(
  3252. struct ggml_context * ctx,
  3253. struct ggml_tensor * a) {
  3254. return ggml_unary_inplace(ctx, a, GGML_UNARY_OP_SILU);
  3255. }
  3256. // ggml_silu_back
  3257. struct ggml_tensor * ggml_silu_back(
  3258. struct ggml_context * ctx,
  3259. struct ggml_tensor * a,
  3260. struct ggml_tensor * b) {
  3261. bool is_node = false;
  3262. if (a->grad || b->grad) {
  3263. // TODO: implement backward
  3264. is_node = true;
  3265. }
  3266. struct ggml_tensor * result = ggml_dup_tensor(ctx, a);
  3267. result->op = GGML_OP_SILU_BACK;
  3268. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3269. result->src[0] = a;
  3270. result->src[1] = b;
  3271. return result;
  3272. }
  3273. // ggml hardswish
  3274. struct ggml_tensor * ggml_hardswish(
  3275. struct ggml_context * ctx,
  3276. struct ggml_tensor * a) {
  3277. return ggml_unary(ctx, a, GGML_UNARY_OP_HARDSWISH);
  3278. }
  3279. // ggml hardsigmoid
  3280. struct ggml_tensor * ggml_hardsigmoid(
  3281. struct ggml_context * ctx,
  3282. struct ggml_tensor * a) {
  3283. return ggml_unary(ctx, a, GGML_UNARY_OP_HARDSIGMOID);
  3284. }
  3285. // ggml_norm
  3286. static struct ggml_tensor * ggml_norm_impl(
  3287. struct ggml_context * ctx,
  3288. struct ggml_tensor * a,
  3289. float eps,
  3290. bool inplace) {
  3291. bool is_node = false;
  3292. if (!inplace && (a->grad)) {
  3293. GGML_ASSERT(false); // TODO: implement backward
  3294. is_node = true;
  3295. }
  3296. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  3297. ggml_set_op_params(result, &eps, sizeof(eps));
  3298. result->op = GGML_OP_NORM;
  3299. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3300. result->src[0] = a;
  3301. return result;
  3302. }
  3303. struct ggml_tensor * ggml_norm(
  3304. struct ggml_context * ctx,
  3305. struct ggml_tensor * a,
  3306. float eps) {
  3307. return ggml_norm_impl(ctx, a, eps, false);
  3308. }
  3309. struct ggml_tensor * ggml_norm_inplace(
  3310. struct ggml_context * ctx,
  3311. struct ggml_tensor * a,
  3312. float eps) {
  3313. return ggml_norm_impl(ctx, a, eps, true);
  3314. }
  3315. // ggml_rms_norm
  3316. static struct ggml_tensor * ggml_rms_norm_impl(
  3317. struct ggml_context * ctx,
  3318. struct ggml_tensor * a,
  3319. float eps,
  3320. bool inplace) {
  3321. bool is_node = false;
  3322. if (!inplace && (a->grad)) {
  3323. is_node = true;
  3324. }
  3325. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  3326. ggml_set_op_params(result, &eps, sizeof(eps));
  3327. result->op = GGML_OP_RMS_NORM;
  3328. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3329. result->src[0] = a;
  3330. return result;
  3331. }
  3332. struct ggml_tensor * ggml_rms_norm(
  3333. struct ggml_context * ctx,
  3334. struct ggml_tensor * a,
  3335. float eps) {
  3336. return ggml_rms_norm_impl(ctx, a, eps, false);
  3337. }
  3338. struct ggml_tensor * ggml_rms_norm_inplace(
  3339. struct ggml_context * ctx,
  3340. struct ggml_tensor * a,
  3341. float eps) {
  3342. return ggml_rms_norm_impl(ctx, a, eps, true);
  3343. }
  3344. // ggml_rms_norm_back
  3345. struct ggml_tensor * ggml_rms_norm_back(
  3346. struct ggml_context * ctx,
  3347. struct ggml_tensor * a,
  3348. struct ggml_tensor * b,
  3349. float eps) {
  3350. bool is_node = false;
  3351. if (a->grad) {
  3352. // TODO: implement backward
  3353. is_node = true;
  3354. }
  3355. struct ggml_tensor * result = ggml_dup_tensor(ctx, a);
  3356. ggml_set_op_params(result, &eps, sizeof(eps));
  3357. result->op = GGML_OP_RMS_NORM_BACK;
  3358. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3359. result->src[0] = a;
  3360. result->src[1] = b;
  3361. return result;
  3362. }
  3363. // ggml_group_norm
  3364. static struct ggml_tensor * ggml_group_norm_impl(
  3365. struct ggml_context * ctx,
  3366. struct ggml_tensor * a,
  3367. int n_groups,
  3368. bool inplace) {
  3369. bool is_node = false;
  3370. if (!inplace && (a->grad)) {
  3371. GGML_ASSERT(false); // TODO: implement backward
  3372. is_node = true;
  3373. }
  3374. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  3375. result->op_params[0] = n_groups;
  3376. result->op = GGML_OP_GROUP_NORM;
  3377. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3378. result->src[0] = a;
  3379. return result;
  3380. }
  3381. struct ggml_tensor * ggml_group_norm(
  3382. struct ggml_context * ctx,
  3383. struct ggml_tensor * a,
  3384. int n_groups) {
  3385. return ggml_group_norm_impl(ctx, a, n_groups, false);
  3386. }
  3387. struct ggml_tensor * ggml_group_norm_inplace(
  3388. struct ggml_context * ctx,
  3389. struct ggml_tensor * a,
  3390. int n_groups) {
  3391. return ggml_group_norm_impl(ctx, a, n_groups, true);
  3392. }
  3393. // ggml_mul_mat
  3394. struct ggml_tensor * ggml_mul_mat(
  3395. struct ggml_context * ctx,
  3396. struct ggml_tensor * a,
  3397. struct ggml_tensor * b) {
  3398. GGML_ASSERT(ggml_can_mul_mat(a, b));
  3399. GGML_ASSERT(!ggml_is_transposed(a));
  3400. bool is_node = false;
  3401. if (a->grad || b->grad) {
  3402. is_node = true;
  3403. }
  3404. const int64_t ne[4] = { a->ne[1], b->ne[1], b->ne[2], b->ne[3] };
  3405. struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, 4, ne);
  3406. result->op = GGML_OP_MUL_MAT;
  3407. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3408. result->src[0] = a;
  3409. result->src[1] = b;
  3410. return result;
  3411. }
  3412. void ggml_mul_mat_set_prec(
  3413. struct ggml_tensor * a,
  3414. enum ggml_prec prec) {
  3415. const int32_t prec_i32 = (int32_t) prec;
  3416. ggml_set_op_params_i32(a, 0, prec_i32);
  3417. }
  3418. // ggml_mul_mat_id
  3419. struct ggml_tensor * ggml_mul_mat_id(
  3420. struct ggml_context * ctx,
  3421. struct ggml_tensor * const as[],
  3422. int n_as,
  3423. struct ggml_tensor * ids,
  3424. int id,
  3425. struct ggml_tensor * b) {
  3426. GGML_ASSERT(ids->type == GGML_TYPE_I32);
  3427. GGML_ASSERT(ids->ne[2] == 1 && ids->ne[3] == 1);
  3428. GGML_ASSERT(ids->ne[1] == b->ne[1]);
  3429. GGML_ASSERT(ids->ne[2] == b->ne[2] && ids->ne[3] == b->ne[3]);
  3430. GGML_ASSERT(n_as > 0 && n_as <= GGML_MAX_SRC - 2);
  3431. GGML_ASSERT(id >= 0 && id < ids->ne[0]);
  3432. bool is_node = false;
  3433. if (as[0]->grad || b->grad) {
  3434. is_node = true;
  3435. }
  3436. const int64_t ne[4] = { as[0]->ne[1], b->ne[1], b->ne[2], b->ne[3] };
  3437. struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, 4, ne);
  3438. ggml_set_op_params_i32(result, 0, id);
  3439. ggml_set_op_params_i32(result, 1, n_as);
  3440. result->op = GGML_OP_MUL_MAT_ID;
  3441. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3442. result->src[0] = ids;
  3443. result->src[1] = b;
  3444. for (int i = 0; i < n_as; i++) {
  3445. struct ggml_tensor * a = as[i];
  3446. GGML_ASSERT(ggml_are_same_shape(as[0], a));
  3447. GGML_ASSERT(ggml_can_mul_mat(a, b));
  3448. GGML_ASSERT(!ggml_is_transposed(a));
  3449. result->src[i + 2] = a;
  3450. }
  3451. return result;
  3452. }
  3453. // ggml_out_prod
  3454. struct ggml_tensor * ggml_out_prod(
  3455. struct ggml_context * ctx,
  3456. struct ggml_tensor * a,
  3457. struct ggml_tensor * b) {
  3458. GGML_ASSERT(ggml_can_out_prod(a, b));
  3459. GGML_ASSERT(!ggml_is_transposed(a));
  3460. bool is_node = false;
  3461. if (a->grad || b->grad) {
  3462. is_node = true;
  3463. }
  3464. // a is broadcastable to b for ne[2] and ne[3] -> use b->ne[2] and b->ne[3]
  3465. const int64_t ne[4] = { a->ne[0], b->ne[0], b->ne[2], b->ne[3] };
  3466. struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, 4, ne);
  3467. result->op = GGML_OP_OUT_PROD;
  3468. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3469. result->src[0] = a;
  3470. result->src[1] = b;
  3471. return result;
  3472. }
  3473. // ggml_scale
  3474. static struct ggml_tensor * ggml_scale_impl(
  3475. struct ggml_context * ctx,
  3476. struct ggml_tensor * a,
  3477. float s,
  3478. bool inplace) {
  3479. GGML_ASSERT(ggml_is_padded_1d(a));
  3480. bool is_node = false;
  3481. if (a->grad) {
  3482. is_node = true;
  3483. }
  3484. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  3485. ggml_set_op_params(result, &s, sizeof(s));
  3486. result->op = GGML_OP_SCALE;
  3487. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3488. result->src[0] = a;
  3489. return result;
  3490. }
  3491. struct ggml_tensor * ggml_scale(
  3492. struct ggml_context * ctx,
  3493. struct ggml_tensor * a,
  3494. float s) {
  3495. return ggml_scale_impl(ctx, a, s, false);
  3496. }
  3497. struct ggml_tensor * ggml_scale_inplace(
  3498. struct ggml_context * ctx,
  3499. struct ggml_tensor * a,
  3500. float s) {
  3501. return ggml_scale_impl(ctx, a, s, true);
  3502. }
  3503. // ggml_set
  3504. static struct ggml_tensor * ggml_set_impl(
  3505. struct ggml_context * ctx,
  3506. struct ggml_tensor * a,
  3507. struct ggml_tensor * b,
  3508. size_t nb1,
  3509. size_t nb2,
  3510. size_t nb3,
  3511. size_t offset,
  3512. bool inplace) {
  3513. GGML_ASSERT(ggml_nelements(a) >= ggml_nelements(b));
  3514. bool is_node = false;
  3515. if (a->grad || b->grad) {
  3516. is_node = true;
  3517. }
  3518. // make a view of the destination
  3519. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  3520. int32_t params[] = { nb1, nb2, nb3, offset, inplace ? 1 : 0 };
  3521. ggml_set_op_params(result, params, sizeof(params));
  3522. result->op = GGML_OP_SET;
  3523. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3524. result->src[0] = a;
  3525. result->src[1] = b;
  3526. return result;
  3527. }
  3528. struct ggml_tensor * ggml_set(
  3529. struct ggml_context * ctx,
  3530. struct ggml_tensor * a,
  3531. struct ggml_tensor * b,
  3532. size_t nb1,
  3533. size_t nb2,
  3534. size_t nb3,
  3535. size_t offset) {
  3536. return ggml_set_impl(ctx, a, b, nb1, nb2, nb3, offset, false);
  3537. }
  3538. struct ggml_tensor * ggml_set_inplace(
  3539. struct ggml_context * ctx,
  3540. struct ggml_tensor * a,
  3541. struct ggml_tensor * b,
  3542. size_t nb1,
  3543. size_t nb2,
  3544. size_t nb3,
  3545. size_t offset) {
  3546. return ggml_set_impl(ctx, a, b, nb1, nb2, nb3, offset, true);
  3547. }
  3548. struct ggml_tensor * ggml_set_1d(
  3549. struct ggml_context * ctx,
  3550. struct ggml_tensor * a,
  3551. struct ggml_tensor * b,
  3552. size_t offset) {
  3553. return ggml_set_impl(ctx, a, b, a->nb[1], a->nb[2], a->nb[3], offset, false);
  3554. }
  3555. struct ggml_tensor * ggml_set_1d_inplace(
  3556. struct ggml_context * ctx,
  3557. struct ggml_tensor * a,
  3558. struct ggml_tensor * b,
  3559. size_t offset) {
  3560. return ggml_set_impl(ctx, a, b, a->nb[1], a->nb[2], a->nb[3], offset, true);
  3561. }
  3562. struct ggml_tensor * ggml_set_2d(
  3563. struct ggml_context * ctx,
  3564. struct ggml_tensor * a,
  3565. struct ggml_tensor * b,
  3566. size_t nb1,
  3567. size_t offset) {
  3568. return ggml_set_impl(ctx, a, b, nb1, a->nb[2], a->nb[3], offset, false);
  3569. }
  3570. struct ggml_tensor * ggml_set_2d_inplace(
  3571. struct ggml_context * ctx,
  3572. struct ggml_tensor * a,
  3573. struct ggml_tensor * b,
  3574. size_t nb1,
  3575. size_t offset) {
  3576. return ggml_set_impl(ctx, a, b, nb1, a->nb[2], a->nb[3], offset, true);
  3577. }
  3578. // ggml_cpy
  3579. static struct ggml_tensor * ggml_cpy_impl(
  3580. struct ggml_context * ctx,
  3581. struct ggml_tensor * a,
  3582. struct ggml_tensor * b) {
  3583. GGML_ASSERT(ggml_nelements(a) == ggml_nelements(b));
  3584. bool is_node = false;
  3585. if (a->grad || b->grad) {
  3586. // inplace is false and either one have a grad
  3587. is_node = true;
  3588. }
  3589. // make a view of the destination
  3590. struct ggml_tensor * result = ggml_view_tensor(ctx, b);
  3591. if (strlen(b->name) > 0) {
  3592. ggml_format_name(result, "%s (copy of %s)", b->name, a->name);
  3593. } else {
  3594. ggml_format_name(result, "%s (copy)", a->name);
  3595. }
  3596. result->op = GGML_OP_CPY;
  3597. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3598. result->src[0] = a;
  3599. result->src[1] = b;
  3600. return result;
  3601. }
  3602. struct ggml_tensor * ggml_cpy(
  3603. struct ggml_context * ctx,
  3604. struct ggml_tensor * a,
  3605. struct ggml_tensor * b) {
  3606. return ggml_cpy_impl(ctx, a, b);
  3607. }
  3608. struct ggml_tensor * ggml_cast(
  3609. struct ggml_context * ctx,
  3610. struct ggml_tensor * a,
  3611. enum ggml_type type) {
  3612. bool is_node = false;
  3613. struct ggml_tensor * result = ggml_new_tensor(ctx, type, GGML_MAX_DIMS, a->ne);
  3614. ggml_format_name(result, "%s (copy)", a->name);
  3615. result->op = GGML_OP_CPY;
  3616. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3617. result->src[0] = a;
  3618. result->src[1] = result;
  3619. return result;
  3620. }
  3621. // ggml_cont
  3622. static struct ggml_tensor * ggml_cont_impl(
  3623. struct ggml_context * ctx,
  3624. struct ggml_tensor * a) {
  3625. bool is_node = false;
  3626. if (a->grad) {
  3627. is_node = true;
  3628. }
  3629. struct ggml_tensor * result = ggml_dup_tensor(ctx, a);
  3630. ggml_format_name(result, "%s (cont)", a->name);
  3631. result->op = GGML_OP_CONT;
  3632. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3633. result->src[0] = a;
  3634. return result;
  3635. }
  3636. struct ggml_tensor * ggml_cont(
  3637. struct ggml_context * ctx,
  3638. struct ggml_tensor * a) {
  3639. return ggml_cont_impl(ctx, a);
  3640. }
  3641. // make contiguous, with new shape
  3642. GGML_API struct ggml_tensor * ggml_cont_1d(
  3643. struct ggml_context * ctx,
  3644. struct ggml_tensor * a,
  3645. int64_t ne0) {
  3646. return ggml_cont_4d(ctx, a, ne0, 1, 1, 1);
  3647. }
  3648. GGML_API struct ggml_tensor * ggml_cont_2d(
  3649. struct ggml_context * ctx,
  3650. struct ggml_tensor * a,
  3651. int64_t ne0,
  3652. int64_t ne1) {
  3653. return ggml_cont_4d(ctx, a, ne0, ne1, 1, 1);
  3654. }
  3655. GGML_API struct ggml_tensor * ggml_cont_3d(
  3656. struct ggml_context * ctx,
  3657. struct ggml_tensor * a,
  3658. int64_t ne0,
  3659. int64_t ne1,
  3660. int64_t ne2) {
  3661. return ggml_cont_4d(ctx, a, ne0, ne1, ne2, 1);
  3662. }
  3663. struct ggml_tensor * ggml_cont_4d(
  3664. struct ggml_context * ctx,
  3665. struct ggml_tensor * a,
  3666. int64_t ne0,
  3667. int64_t ne1,
  3668. int64_t ne2,
  3669. int64_t ne3) {
  3670. GGML_ASSERT(ggml_nelements(a) == (ne0*ne1*ne2*ne3));
  3671. bool is_node = false;
  3672. struct ggml_tensor * result = ggml_new_tensor_4d(ctx, a->type, ne0, ne1, ne2, ne3);
  3673. ggml_format_name(result, "%s (cont)", a->name);
  3674. result->op = GGML_OP_CONT;
  3675. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3676. result->src[0] = a;
  3677. return result;
  3678. }
  3679. // ggml_reshape
  3680. struct ggml_tensor * ggml_reshape(
  3681. struct ggml_context * ctx,
  3682. struct ggml_tensor * a,
  3683. struct ggml_tensor * b) {
  3684. GGML_ASSERT(ggml_is_contiguous(a));
  3685. // as only the shape of b is relevant, and not its memory layout, b is allowed to be non contiguous.
  3686. GGML_ASSERT(ggml_nelements(a) == ggml_nelements(b));
  3687. bool is_node = false;
  3688. if (a->grad) {
  3689. is_node = true;
  3690. }
  3691. if (b->grad) {
  3692. // gradient propagation is not supported
  3693. //GGML_ASSERT(false);
  3694. }
  3695. struct ggml_tensor * result = ggml_new_tensor_impl(ctx, a->type, GGML_MAX_DIMS, b->ne, a, 0);
  3696. ggml_format_name(result, "%s (reshaped)", a->name);
  3697. result->op = GGML_OP_RESHAPE;
  3698. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3699. result->src[0] = a;
  3700. return result;
  3701. }
  3702. struct ggml_tensor * ggml_reshape_1d(
  3703. struct ggml_context * ctx,
  3704. struct ggml_tensor * a,
  3705. int64_t ne0) {
  3706. GGML_ASSERT(ggml_is_contiguous(a));
  3707. GGML_ASSERT(ggml_nelements(a) == ne0);
  3708. bool is_node = false;
  3709. if (a->grad) {
  3710. is_node = true;
  3711. }
  3712. const int64_t ne[1] = { ne0 };
  3713. struct ggml_tensor * result = ggml_new_tensor_impl(ctx, a->type, 1, ne, a, 0);
  3714. ggml_format_name(result, "%s (reshaped)", a->name);
  3715. result->op = GGML_OP_RESHAPE;
  3716. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3717. result->src[0] = a;
  3718. return result;
  3719. }
  3720. struct ggml_tensor * ggml_reshape_2d(
  3721. struct ggml_context * ctx,
  3722. struct ggml_tensor * a,
  3723. int64_t ne0,
  3724. int64_t ne1) {
  3725. GGML_ASSERT(ggml_is_contiguous(a));
  3726. GGML_ASSERT(ggml_nelements(a) == ne0*ne1);
  3727. bool is_node = false;
  3728. if (a->grad) {
  3729. is_node = true;
  3730. }
  3731. const int64_t ne[2] = { ne0, ne1 };
  3732. struct ggml_tensor * result = ggml_new_tensor_impl(ctx, a->type, 2, ne, a, 0);
  3733. ggml_format_name(result, "%s (reshaped)", a->name);
  3734. result->op = GGML_OP_RESHAPE;
  3735. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3736. result->src[0] = a;
  3737. return result;
  3738. }
  3739. struct ggml_tensor * ggml_reshape_3d(
  3740. struct ggml_context * ctx,
  3741. struct ggml_tensor * a,
  3742. int64_t ne0,
  3743. int64_t ne1,
  3744. int64_t ne2) {
  3745. GGML_ASSERT(ggml_is_contiguous(a));
  3746. GGML_ASSERT(ggml_nelements(a) == ne0*ne1*ne2);
  3747. bool is_node = false;
  3748. if (a->grad) {
  3749. is_node = true;
  3750. }
  3751. const int64_t ne[3] = { ne0, ne1, ne2 };
  3752. struct ggml_tensor * result = ggml_new_tensor_impl(ctx, a->type, 3, ne, a, 0);
  3753. ggml_format_name(result, "%s (reshaped)", a->name);
  3754. result->op = GGML_OP_RESHAPE;
  3755. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3756. result->src[0] = a;
  3757. return result;
  3758. }
  3759. struct ggml_tensor * ggml_reshape_4d(
  3760. struct ggml_context * ctx,
  3761. struct ggml_tensor * a,
  3762. int64_t ne0,
  3763. int64_t ne1,
  3764. int64_t ne2,
  3765. int64_t ne3) {
  3766. GGML_ASSERT(ggml_is_contiguous(a));
  3767. GGML_ASSERT(ggml_nelements(a) == ne0*ne1*ne2*ne3);
  3768. bool is_node = false;
  3769. if (a->grad) {
  3770. is_node = true;
  3771. }
  3772. const int64_t ne[4] = { ne0, ne1, ne2, ne3 };
  3773. struct ggml_tensor * result = ggml_new_tensor_impl(ctx, a->type, 4, ne, a, 0);
  3774. ggml_format_name(result, "%s (reshaped)", a->name);
  3775. result->op = GGML_OP_RESHAPE;
  3776. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3777. result->src[0] = a;
  3778. return result;
  3779. }
  3780. static struct ggml_tensor * ggml_view_impl(
  3781. struct ggml_context * ctx,
  3782. struct ggml_tensor * a,
  3783. int n_dims,
  3784. const int64_t * ne,
  3785. size_t offset) {
  3786. bool is_node = false;
  3787. if (a->grad) {
  3788. is_node = true;
  3789. }
  3790. struct ggml_tensor * result = ggml_new_tensor_impl(ctx, a->type, n_dims, ne, a, offset);
  3791. ggml_format_name(result, "%s (view)", a->name);
  3792. ggml_set_op_params(result, &offset, sizeof(offset));
  3793. result->op = GGML_OP_VIEW;
  3794. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3795. result->src[0] = a;
  3796. return result;
  3797. }
  3798. // ggml_view_1d
  3799. struct ggml_tensor * ggml_view_1d(
  3800. struct ggml_context * ctx,
  3801. struct ggml_tensor * a,
  3802. int64_t ne0,
  3803. size_t offset) {
  3804. struct ggml_tensor * result = ggml_view_impl(ctx, a, 1, &ne0, offset);
  3805. return result;
  3806. }
  3807. // ggml_view_2d
  3808. struct ggml_tensor * ggml_view_2d(
  3809. struct ggml_context * ctx,
  3810. struct ggml_tensor * a,
  3811. int64_t ne0,
  3812. int64_t ne1,
  3813. size_t nb1,
  3814. size_t offset) {
  3815. const int64_t ne[2] = { ne0, ne1 };
  3816. struct ggml_tensor * result = ggml_view_impl(ctx, a, 2, ne, offset);
  3817. result->nb[1] = nb1;
  3818. result->nb[2] = result->nb[1]*ne1;
  3819. result->nb[3] = result->nb[2];
  3820. return result;
  3821. }
  3822. // ggml_view_3d
  3823. struct ggml_tensor * ggml_view_3d(
  3824. struct ggml_context * ctx,
  3825. struct ggml_tensor * a,
  3826. int64_t ne0,
  3827. int64_t ne1,
  3828. int64_t ne2,
  3829. size_t nb1,
  3830. size_t nb2,
  3831. size_t offset) {
  3832. const int64_t ne[3] = { ne0, ne1, ne2 };
  3833. struct ggml_tensor * result = ggml_view_impl(ctx, a, 3, ne, offset);
  3834. result->nb[1] = nb1;
  3835. result->nb[2] = nb2;
  3836. result->nb[3] = result->nb[2]*ne2;
  3837. return result;
  3838. }
  3839. // ggml_view_4d
  3840. struct ggml_tensor * ggml_view_4d(
  3841. struct ggml_context * ctx,
  3842. struct ggml_tensor * a,
  3843. int64_t ne0,
  3844. int64_t ne1,
  3845. int64_t ne2,
  3846. int64_t ne3,
  3847. size_t nb1,
  3848. size_t nb2,
  3849. size_t nb3,
  3850. size_t offset) {
  3851. const int64_t ne[4] = { ne0, ne1, ne2, ne3 };
  3852. struct ggml_tensor * result = ggml_view_impl(ctx, a, 4, ne, offset);
  3853. result->nb[1] = nb1;
  3854. result->nb[2] = nb2;
  3855. result->nb[3] = nb3;
  3856. return result;
  3857. }
  3858. // ggml_permute
  3859. struct ggml_tensor * ggml_permute(
  3860. struct ggml_context * ctx,
  3861. struct ggml_tensor * a,
  3862. int axis0,
  3863. int axis1,
  3864. int axis2,
  3865. int axis3) {
  3866. GGML_ASSERT(axis0 >= 0 && axis0 < GGML_MAX_DIMS);
  3867. GGML_ASSERT(axis1 >= 0 && axis1 < GGML_MAX_DIMS);
  3868. GGML_ASSERT(axis2 >= 0 && axis2 < GGML_MAX_DIMS);
  3869. GGML_ASSERT(axis3 >= 0 && axis3 < GGML_MAX_DIMS);
  3870. GGML_ASSERT(axis0 != axis1);
  3871. GGML_ASSERT(axis0 != axis2);
  3872. GGML_ASSERT(axis0 != axis3);
  3873. GGML_ASSERT(axis1 != axis2);
  3874. GGML_ASSERT(axis1 != axis3);
  3875. GGML_ASSERT(axis2 != axis3);
  3876. bool is_node = false;
  3877. if (a->grad) {
  3878. is_node = true;
  3879. }
  3880. struct ggml_tensor * result = ggml_view_tensor(ctx, a);
  3881. ggml_format_name(result, "%s (permuted)", a->name);
  3882. int ne[GGML_MAX_DIMS];
  3883. int nb[GGML_MAX_DIMS];
  3884. ne[axis0] = a->ne[0];
  3885. ne[axis1] = a->ne[1];
  3886. ne[axis2] = a->ne[2];
  3887. ne[axis3] = a->ne[3];
  3888. nb[axis0] = a->nb[0];
  3889. nb[axis1] = a->nb[1];
  3890. nb[axis2] = a->nb[2];
  3891. nb[axis3] = a->nb[3];
  3892. result->ne[0] = ne[0];
  3893. result->ne[1] = ne[1];
  3894. result->ne[2] = ne[2];
  3895. result->ne[3] = ne[3];
  3896. result->nb[0] = nb[0];
  3897. result->nb[1] = nb[1];
  3898. result->nb[2] = nb[2];
  3899. result->nb[3] = nb[3];
  3900. result->op = GGML_OP_PERMUTE;
  3901. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3902. result->src[0] = a;
  3903. int32_t params[] = { axis0, axis1, axis2, axis3 };
  3904. ggml_set_op_params(result, params, sizeof(params));
  3905. return result;
  3906. }
  3907. // ggml_transpose
  3908. struct ggml_tensor * ggml_transpose(
  3909. struct ggml_context * ctx,
  3910. struct ggml_tensor * a) {
  3911. bool is_node = false;
  3912. if (a->grad) {
  3913. is_node = true;
  3914. }
  3915. struct ggml_tensor * result = ggml_view_tensor(ctx, a);
  3916. ggml_format_name(result, "%s (transposed)", a->name);
  3917. result->ne[0] = a->ne[1];
  3918. result->ne[1] = a->ne[0];
  3919. result->nb[0] = a->nb[1];
  3920. result->nb[1] = a->nb[0];
  3921. result->op = GGML_OP_TRANSPOSE;
  3922. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3923. result->src[0] = a;
  3924. return result;
  3925. }
  3926. // ggml_get_rows
  3927. struct ggml_tensor * ggml_get_rows(
  3928. struct ggml_context * ctx,
  3929. struct ggml_tensor * a,
  3930. struct ggml_tensor * b) {
  3931. GGML_ASSERT(a->ne[2] == b->ne[1]);
  3932. GGML_ASSERT(b->ne[3] == 1);
  3933. GGML_ASSERT(b->type == GGML_TYPE_I32);
  3934. bool is_node = false;
  3935. if (a->grad || b->grad) {
  3936. is_node = true;
  3937. }
  3938. // TODO: implement non F32 return
  3939. enum ggml_type type = GGML_TYPE_F32;
  3940. if (a->type == GGML_TYPE_I32) {
  3941. type = a->type;
  3942. }
  3943. struct ggml_tensor * result = ggml_new_tensor_4d(ctx, type, a->ne[0], b->ne[0], b->ne[1], b->ne[2]);
  3944. result->op = GGML_OP_GET_ROWS;
  3945. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3946. result->src[0] = a;
  3947. result->src[1] = b;
  3948. return result;
  3949. }
  3950. // ggml_get_rows_back
  3951. struct ggml_tensor * ggml_get_rows_back(
  3952. struct ggml_context * ctx,
  3953. struct ggml_tensor * a,
  3954. struct ggml_tensor * b,
  3955. struct ggml_tensor * c) {
  3956. GGML_ASSERT(ggml_is_matrix(a) && ggml_is_vector(b) && b->type == GGML_TYPE_I32);
  3957. GGML_ASSERT(ggml_is_matrix(c) && (a->ne[0] == c->ne[0]));
  3958. bool is_node = false;
  3959. if (a->grad || b->grad) {
  3960. is_node = true;
  3961. }
  3962. // TODO: implement non F32 return
  3963. //struct ggml_tensor * result = ggml_new_tensor_2d(ctx, a->type, a->ne[0], b->ne[0]);
  3964. struct ggml_tensor * result = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, c->ne[0], c->ne[1]);
  3965. result->op = GGML_OP_GET_ROWS_BACK;
  3966. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3967. result->src[0] = a;
  3968. result->src[1] = b;
  3969. return result;
  3970. }
  3971. // ggml_diag
  3972. struct ggml_tensor * ggml_diag(
  3973. struct ggml_context * ctx,
  3974. struct ggml_tensor * a) {
  3975. GGML_ASSERT(a->ne[1] == 1);
  3976. bool is_node = false;
  3977. if (a->grad) {
  3978. is_node = true;
  3979. }
  3980. const int64_t ne[4] = { a->ne[0], a->ne[0], a->ne[2], a->ne[3] };
  3981. struct ggml_tensor * result = ggml_new_tensor(ctx, a->type, 4, ne);
  3982. result->op = GGML_OP_DIAG;
  3983. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3984. result->src[0] = a;
  3985. return result;
  3986. }
  3987. // ggml_diag_mask_inf
  3988. static struct ggml_tensor * ggml_diag_mask_inf_impl(
  3989. struct ggml_context * ctx,
  3990. struct ggml_tensor * a,
  3991. int n_past,
  3992. bool inplace) {
  3993. bool is_node = false;
  3994. if (a->grad) {
  3995. is_node = true;
  3996. }
  3997. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  3998. int32_t params[] = { n_past };
  3999. ggml_set_op_params(result, params, sizeof(params));
  4000. result->op = GGML_OP_DIAG_MASK_INF;
  4001. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4002. result->src[0] = a;
  4003. return result;
  4004. }
  4005. struct ggml_tensor * ggml_diag_mask_inf(
  4006. struct ggml_context * ctx,
  4007. struct ggml_tensor * a,
  4008. int n_past) {
  4009. return ggml_diag_mask_inf_impl(ctx, a, n_past, false);
  4010. }
  4011. struct ggml_tensor * ggml_diag_mask_inf_inplace(
  4012. struct ggml_context * ctx,
  4013. struct ggml_tensor * a,
  4014. int n_past) {
  4015. return ggml_diag_mask_inf_impl(ctx, a, n_past, true);
  4016. }
  4017. // ggml_diag_mask_zero
  4018. static struct ggml_tensor * ggml_diag_mask_zero_impl(
  4019. struct ggml_context * ctx,
  4020. struct ggml_tensor * a,
  4021. int n_past,
  4022. bool inplace) {
  4023. bool is_node = false;
  4024. if (a->grad) {
  4025. is_node = true;
  4026. }
  4027. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  4028. int32_t params[] = { n_past };
  4029. ggml_set_op_params(result, params, sizeof(params));
  4030. result->op = GGML_OP_DIAG_MASK_ZERO;
  4031. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4032. result->src[0] = a;
  4033. return result;
  4034. }
  4035. struct ggml_tensor * ggml_diag_mask_zero(
  4036. struct ggml_context * ctx,
  4037. struct ggml_tensor * a,
  4038. int n_past) {
  4039. return ggml_diag_mask_zero_impl(ctx, a, n_past, false);
  4040. }
  4041. struct ggml_tensor * ggml_diag_mask_zero_inplace(
  4042. struct ggml_context * ctx,
  4043. struct ggml_tensor * a,
  4044. int n_past) {
  4045. return ggml_diag_mask_zero_impl(ctx, a, n_past, true);
  4046. }
  4047. // ggml_soft_max
  4048. static struct ggml_tensor * ggml_soft_max_impl(
  4049. struct ggml_context * ctx,
  4050. struct ggml_tensor * a,
  4051. struct ggml_tensor * mask,
  4052. float scale,
  4053. bool inplace) {
  4054. GGML_ASSERT(ggml_is_contiguous(a));
  4055. if (mask) {
  4056. GGML_ASSERT(ggml_is_contiguous(mask));
  4057. GGML_ASSERT(mask->ne[2] == 1);
  4058. GGML_ASSERT(mask->ne[3] == 1);
  4059. GGML_ASSERT(ggml_can_repeat_rows(mask, a));
  4060. }
  4061. bool is_node = false;
  4062. if (a->grad) {
  4063. is_node = true;
  4064. }
  4065. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  4066. float params[] = { scale };
  4067. ggml_set_op_params(result, params, sizeof(params));
  4068. result->op = GGML_OP_SOFT_MAX;
  4069. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4070. result->src[0] = a;
  4071. result->src[1] = mask;
  4072. return result;
  4073. }
  4074. struct ggml_tensor * ggml_soft_max(
  4075. struct ggml_context * ctx,
  4076. struct ggml_tensor * a) {
  4077. return ggml_soft_max_impl(ctx, a, NULL, 1.0f, false);
  4078. }
  4079. struct ggml_tensor * ggml_soft_max_inplace(
  4080. struct ggml_context * ctx,
  4081. struct ggml_tensor * a) {
  4082. return ggml_soft_max_impl(ctx, a, NULL, 1.0f, true);
  4083. }
  4084. struct ggml_tensor * ggml_soft_max_ext(
  4085. struct ggml_context * ctx,
  4086. struct ggml_tensor * a,
  4087. struct ggml_tensor * mask,
  4088. float scale) {
  4089. return ggml_soft_max_impl(ctx, a, mask, scale, false);
  4090. }
  4091. // ggml_soft_max_back
  4092. static struct ggml_tensor * ggml_soft_max_back_impl(
  4093. struct ggml_context * ctx,
  4094. struct ggml_tensor * a,
  4095. struct ggml_tensor * b,
  4096. bool inplace) {
  4097. bool is_node = false;
  4098. if (a->grad || b->grad) {
  4099. is_node = true; // TODO : implement backward pass
  4100. }
  4101. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  4102. result->op = GGML_OP_SOFT_MAX_BACK;
  4103. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4104. result->src[0] = a;
  4105. result->src[1] = b;
  4106. return result;
  4107. }
  4108. struct ggml_tensor * ggml_soft_max_back(
  4109. struct ggml_context * ctx,
  4110. struct ggml_tensor * a,
  4111. struct ggml_tensor * b) {
  4112. return ggml_soft_max_back_impl(ctx, a, b, false);
  4113. }
  4114. struct ggml_tensor * ggml_soft_max_back_inplace(
  4115. struct ggml_context * ctx,
  4116. struct ggml_tensor * a,
  4117. struct ggml_tensor * b) {
  4118. return ggml_soft_max_back_impl(ctx, a, b, true);
  4119. }
  4120. // ggml_rope
  4121. static struct ggml_tensor * ggml_rope_impl(
  4122. struct ggml_context * ctx,
  4123. struct ggml_tensor * a,
  4124. struct ggml_tensor * b,
  4125. int n_dims,
  4126. int mode,
  4127. int n_ctx,
  4128. int n_orig_ctx,
  4129. float freq_base,
  4130. float freq_scale,
  4131. float ext_factor,
  4132. float attn_factor,
  4133. float beta_fast,
  4134. float beta_slow,
  4135. float xpos_base,
  4136. bool xpos_down,
  4137. bool inplace) {
  4138. GGML_ASSERT(ggml_is_vector(b));
  4139. GGML_ASSERT(b->type == GGML_TYPE_I32);
  4140. GGML_ASSERT(a->ne[2] == b->ne[0]);
  4141. bool is_node = false;
  4142. if (a->grad) {
  4143. is_node = true;
  4144. }
  4145. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  4146. int32_t params[13] = { /*n_past*/ 0, n_dims, mode, n_ctx, n_orig_ctx };
  4147. memcpy(params + 5, &freq_base, sizeof(float));
  4148. memcpy(params + 6, &freq_scale, sizeof(float));
  4149. memcpy(params + 7, &ext_factor, sizeof(float));
  4150. memcpy(params + 8, &attn_factor, sizeof(float));
  4151. memcpy(params + 9, &beta_fast, sizeof(float));
  4152. memcpy(params + 10, &beta_slow, sizeof(float));
  4153. memcpy(params + 11, &xpos_base, sizeof(float));
  4154. memcpy(params + 12, &xpos_down, sizeof(bool));
  4155. ggml_set_op_params(result, params, sizeof(params));
  4156. result->op = GGML_OP_ROPE;
  4157. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4158. result->src[0] = a;
  4159. result->src[1] = b;
  4160. return result;
  4161. }
  4162. struct ggml_tensor * ggml_rope(
  4163. struct ggml_context * ctx,
  4164. struct ggml_tensor * a,
  4165. struct ggml_tensor * b,
  4166. int n_dims,
  4167. int mode,
  4168. int n_ctx) {
  4169. return ggml_rope_impl(
  4170. ctx, a, b, n_dims, mode, n_ctx, 0, 10000.0f, 1.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, false, false
  4171. );
  4172. }
  4173. struct ggml_tensor * ggml_rope_inplace(
  4174. struct ggml_context * ctx,
  4175. struct ggml_tensor * a,
  4176. struct ggml_tensor * b,
  4177. int n_dims,
  4178. int mode,
  4179. int n_ctx) {
  4180. return ggml_rope_impl(
  4181. ctx, a, b, n_dims, mode, n_ctx, 0, 10000.0f, 1.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, false, true
  4182. );
  4183. }
  4184. struct ggml_tensor * ggml_rope_custom(
  4185. struct ggml_context * ctx,
  4186. struct ggml_tensor * a,
  4187. struct ggml_tensor * b,
  4188. int n_dims,
  4189. int mode,
  4190. int n_ctx,
  4191. int n_orig_ctx,
  4192. float freq_base,
  4193. float freq_scale,
  4194. float ext_factor,
  4195. float attn_factor,
  4196. float beta_fast,
  4197. float beta_slow) {
  4198. return ggml_rope_impl(
  4199. ctx, a, b, n_dims, mode, n_ctx, n_orig_ctx, freq_base, freq_scale,
  4200. ext_factor, attn_factor, beta_fast, beta_slow, 0.0f, false, false
  4201. );
  4202. }
  4203. struct ggml_tensor * ggml_rope_custom_inplace(
  4204. struct ggml_context * ctx,
  4205. struct ggml_tensor * a,
  4206. struct ggml_tensor * b,
  4207. int n_dims,
  4208. int mode,
  4209. int n_ctx,
  4210. int n_orig_ctx,
  4211. float freq_base,
  4212. float freq_scale,
  4213. float ext_factor,
  4214. float attn_factor,
  4215. float beta_fast,
  4216. float beta_slow) {
  4217. return ggml_rope_impl(
  4218. ctx, a, b, n_dims, mode, n_ctx, n_orig_ctx, freq_base, freq_scale,
  4219. ext_factor, attn_factor, beta_fast, beta_slow, 0.0f, false, true
  4220. );
  4221. }
  4222. struct ggml_tensor * ggml_rope_xpos_inplace(
  4223. struct ggml_context * ctx,
  4224. struct ggml_tensor * a,
  4225. struct ggml_tensor * b,
  4226. int n_dims,
  4227. float base,
  4228. bool down) {
  4229. return ggml_rope_impl(ctx, a, b, n_dims, 0, 0, 0, 10000.0f, 1.0f, 0.0f, 1.0f, 0.0f, 0.0f, base, down, true);
  4230. }
  4231. // ggml_rope_back
  4232. struct ggml_tensor * ggml_rope_back(
  4233. struct ggml_context * ctx,
  4234. struct ggml_tensor * a,
  4235. struct ggml_tensor * b,
  4236. int n_dims,
  4237. int mode,
  4238. int n_ctx,
  4239. int n_orig_ctx,
  4240. float freq_base,
  4241. float freq_scale,
  4242. float ext_factor,
  4243. float attn_factor,
  4244. float beta_fast,
  4245. float beta_slow,
  4246. float xpos_base,
  4247. bool xpos_down) {
  4248. GGML_ASSERT(ggml_is_vector(b));
  4249. GGML_ASSERT(b->type == GGML_TYPE_I32);
  4250. GGML_ASSERT(a->ne[2] == b->ne[0]);
  4251. GGML_ASSERT((mode & 4) == 0 && "ggml_rope_back() for ChatGLM not implemented yet");
  4252. bool is_node = false;
  4253. if (a->grad) {
  4254. is_node = false; // TODO: implement backward
  4255. }
  4256. struct ggml_tensor * result = ggml_dup_tensor(ctx, a);
  4257. int32_t params[13] = { /*n_past*/ 0, n_dims, mode, n_ctx, n_orig_ctx };
  4258. memcpy(params + 5, &freq_base, sizeof(float));
  4259. memcpy(params + 6, &freq_scale, sizeof(float));
  4260. memcpy(params + 7, &ext_factor, sizeof(float));
  4261. memcpy(params + 8, &attn_factor, sizeof(float));
  4262. memcpy(params + 9, &beta_fast, sizeof(float));
  4263. memcpy(params + 10, &beta_slow, sizeof(float));
  4264. memcpy(params + 11, &xpos_base, sizeof(float));
  4265. memcpy(params + 12, &xpos_down, sizeof(bool));
  4266. ggml_set_op_params(result, params, sizeof(params));
  4267. result->op = GGML_OP_ROPE_BACK;
  4268. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4269. result->src[0] = a;
  4270. result->src[1] = b;
  4271. return result;
  4272. }
  4273. // ggml_alibi
  4274. struct ggml_tensor * ggml_alibi(
  4275. struct ggml_context * ctx,
  4276. struct ggml_tensor * a,
  4277. int n_past,
  4278. int n_head,
  4279. float bias_max) {
  4280. GGML_ASSERT(n_past >= 0);
  4281. bool is_node = false;
  4282. if (a->grad) {
  4283. GGML_ASSERT(false); // TODO: implement backward
  4284. is_node = true;
  4285. }
  4286. // TODO: when implement backward, fix this:
  4287. //struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  4288. struct ggml_tensor * result = ggml_view_tensor(ctx, a);
  4289. int32_t op_params[3] = { n_past, n_head };
  4290. memcpy(op_params + 2, &bias_max, sizeof(float));
  4291. ggml_set_op_params(result, op_params, sizeof(op_params));
  4292. result->op = GGML_OP_ALIBI;
  4293. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4294. result->src[0] = a;
  4295. return result;
  4296. }
  4297. // ggml_clamp
  4298. struct ggml_tensor * ggml_clamp(
  4299. struct ggml_context * ctx,
  4300. struct ggml_tensor * a,
  4301. float min,
  4302. float max) {
  4303. bool is_node = false;
  4304. if (a->grad) {
  4305. GGML_ASSERT(false); // TODO: implement backward
  4306. is_node = true;
  4307. }
  4308. // TODO: when implement backward, fix this:
  4309. struct ggml_tensor * result = ggml_view_tensor(ctx, a);
  4310. float params[] = { min, max };
  4311. ggml_set_op_params(result, params, sizeof(params));
  4312. result->op = GGML_OP_CLAMP;
  4313. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4314. result->src[0] = a;
  4315. return result;
  4316. }
  4317. // ggml_conv_1d
  4318. static int64_t ggml_calc_conv_output_size(int64_t ins, int64_t ks, int s, int p, int d) {
  4319. return (ins + 2 * p - d * (ks - 1) - 1) / s + 1;
  4320. }
  4321. GGML_API struct ggml_tensor * ggml_conv_1d(
  4322. struct ggml_context * ctx,
  4323. struct ggml_tensor * a,
  4324. struct ggml_tensor * b,
  4325. int s0,
  4326. int p0,
  4327. int d0) {
  4328. struct ggml_tensor * im2col = ggml_im2col(ctx, a, b, s0, 0, p0, 0, d0, 0, false); // [N, OL, IC * K]
  4329. struct ggml_tensor * result =
  4330. ggml_mul_mat(ctx,
  4331. ggml_reshape_2d(ctx, im2col, im2col->ne[0], (im2col->ne[2] * im2col->ne[1])), // [N, OL, IC * K] => [N*OL, IC * K]
  4332. ggml_reshape_2d(ctx, a, (a->ne[0] * a->ne[1]), a->ne[2])); // [OC,IC, K] => [OC, IC * K]
  4333. result = ggml_reshape_3d(ctx, result, im2col->ne[1], a->ne[2], im2col->ne[2]); // [N, OC, OL]
  4334. return result;
  4335. }
  4336. // ggml_conv_1d_ph
  4337. struct ggml_tensor* ggml_conv_1d_ph(
  4338. struct ggml_context * ctx,
  4339. struct ggml_tensor * a,
  4340. struct ggml_tensor * b,
  4341. int s,
  4342. int d) {
  4343. return ggml_conv_1d(ctx, a, b, s, a->ne[0] / 2, d);
  4344. }
  4345. // ggml_conv_transpose_1d
  4346. static int64_t ggml_calc_conv_transpose_1d_output_size(int64_t ins, int64_t ks, int s, int p, int d) {
  4347. return (ins - 1) * s - 2 * p + d * (ks - 1) + 1;
  4348. }
  4349. GGML_API struct ggml_tensor * ggml_conv_transpose_1d(
  4350. struct ggml_context * ctx,
  4351. struct ggml_tensor * a,
  4352. struct ggml_tensor * b,
  4353. int s0,
  4354. int p0,
  4355. int d0) {
  4356. GGML_ASSERT(ggml_is_matrix(b));
  4357. GGML_ASSERT(a->ne[2] == b->ne[1]);
  4358. GGML_ASSERT(a->ne[3] == 1);
  4359. GGML_ASSERT(p0 == 0);
  4360. GGML_ASSERT(d0 == 1);
  4361. bool is_node = false;
  4362. if (a->grad || b->grad) {
  4363. GGML_ASSERT(false); // TODO: implement backward
  4364. is_node = true;
  4365. }
  4366. const int64_t ne[4] = {
  4367. ggml_calc_conv_transpose_1d_output_size(b->ne[0], a->ne[0], s0, 0 /*p0*/, 1 /*d0*/),
  4368. a->ne[1], b->ne[2], 1,
  4369. };
  4370. struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, 4, ne);
  4371. int32_t params[] = { s0, p0, d0 };
  4372. ggml_set_op_params(result, params, sizeof(params));
  4373. result->op = GGML_OP_CONV_TRANSPOSE_1D;
  4374. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4375. result->src[0] = a;
  4376. result->src[1] = b;
  4377. return result;
  4378. }
  4379. // ggml_conv_depthwise
  4380. struct ggml_tensor * ggml_conv_depthwise_2d(
  4381. struct ggml_context * ctx,
  4382. struct ggml_tensor * a,
  4383. struct ggml_tensor * b,
  4384. int s0,
  4385. int s1,
  4386. int p0,
  4387. int p1,
  4388. int d0,
  4389. int d1) {
  4390. struct ggml_tensor * new_a = ggml_reshape_4d(ctx, a, a->ne[0], a->ne[1], 1, a->ne[2] * a->ne[3]);
  4391. struct ggml_tensor * im2col = ggml_im2col(ctx, new_a,
  4392. ggml_reshape_4d(ctx, b, b->ne[0], b->ne[1], 1, b->ne[2] * b->ne[3]),
  4393. s0, s1, p0, p1, d0, d1, true); // [N * IC, OH, OW, KH * KW]
  4394. struct ggml_tensor * result =
  4395. ggml_mul_mat(ctx,
  4396. ggml_reshape_4d(ctx, new_a, (new_a->ne[0] * new_a->ne[1]), new_a->ne[2], new_a->ne[3], 1), // [OC,1, KH, KW] => [1, OC, 1, KH * KW]
  4397. ggml_reshape_4d(ctx, im2col, im2col->ne[0], im2col->ne[2] * im2col->ne[1], b->ne[2], b->ne[3])); // [N * IC, OH, OW, KH * KW] => [N, IC, OH * OW, KH * KW]
  4398. result = ggml_reshape_4d(ctx, result, im2col->ne[1], im2col->ne[2], b->ne[2], b->ne[3]); // [N, OC, OH, OW]
  4399. return result;
  4400. }
  4401. // ggml_conv_2d
  4402. // im2col: [N, IC, IH, IW] => [N, OH, OW, IC*KH*KW]
  4403. // a: [OC,IC, KH, KW]
  4404. // b: [N, IC, IH, IW]
  4405. // result: [N, OH, OW, IC*KH*KW]
  4406. struct ggml_tensor * ggml_im2col(
  4407. struct ggml_context * ctx,
  4408. struct ggml_tensor * a,
  4409. struct ggml_tensor * b,
  4410. int s0,
  4411. int s1,
  4412. int p0,
  4413. int p1,
  4414. int d0,
  4415. int d1,
  4416. bool is_2D) {
  4417. if(is_2D) {
  4418. GGML_ASSERT(a->ne[2] == b->ne[2]);
  4419. } else {
  4420. GGML_ASSERT(a->ne[1] == b->ne[1]);
  4421. }
  4422. bool is_node = false;
  4423. if (a->grad || b->grad) {
  4424. GGML_ASSERT(false); // TODO: implement backward
  4425. is_node = true;
  4426. }
  4427. const int64_t OH = is_2D ? ggml_calc_conv_output_size(b->ne[1], a->ne[1], s1, p1, d1) : 0;
  4428. const int64_t OW = ggml_calc_conv_output_size(b->ne[0], a->ne[0], s0, p0, d0);
  4429. const int64_t ne[4] = {
  4430. is_2D ? (a->ne[2] * a->ne[1] * a->ne[0]) : a->ne[1] * a->ne[0],
  4431. OW,
  4432. is_2D ? OH : b->ne[2],
  4433. is_2D ? b->ne[3] : 1,
  4434. };
  4435. struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F16, 4, ne);
  4436. int32_t params[] = { s0, s1, p0, p1, d0, d1, (is_2D ? 1 : 0) };
  4437. ggml_set_op_params(result, params, sizeof(params));
  4438. result->op = GGML_OP_IM2COL;
  4439. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4440. result->src[0] = a;
  4441. result->src[1] = b;
  4442. return result;
  4443. }
  4444. // a: [OC,IC, KH, KW]
  4445. // b: [N, IC, IH, IW]
  4446. // result: [N, OC, OH, OW]
  4447. struct ggml_tensor * ggml_conv_2d(
  4448. struct ggml_context * ctx,
  4449. struct ggml_tensor * a,
  4450. struct ggml_tensor * b,
  4451. int s0,
  4452. int s1,
  4453. int p0,
  4454. int p1,
  4455. int d0,
  4456. int d1) {
  4457. struct ggml_tensor * im2col = ggml_im2col(ctx, a, b, s0, s1, p0, p1, d0, d1, true); // [N, OH, OW, IC * KH * KW]
  4458. struct ggml_tensor * result =
  4459. ggml_mul_mat(ctx,
  4460. ggml_reshape_2d(ctx, im2col, im2col->ne[0], im2col->ne[3] * im2col->ne[2] * im2col->ne[1]), // [N, OH, OW, IC * KH * KW] => [N*OH*OW, IC * KH * KW]
  4461. ggml_reshape_2d(ctx, a, (a->ne[0] * a->ne[1] * a->ne[2]), a->ne[3])); // [OC,IC, KH, KW] => [OC, IC * KH * KW]
  4462. result = ggml_reshape_4d(ctx, result, im2col->ne[1], im2col->ne[2], a->ne[3], im2col->ne[3]); // [N, OC, OH, OW]
  4463. return result;
  4464. }
  4465. // ggml_conv_2d_sk_p0
  4466. struct ggml_tensor * ggml_conv_2d_sk_p0(
  4467. struct ggml_context * ctx,
  4468. struct ggml_tensor * a,
  4469. struct ggml_tensor * b) {
  4470. return ggml_conv_2d(ctx, a, b, a->ne[0], a->ne[1], 0, 0, 1, 1);
  4471. }
  4472. // ggml_conv_2d_s1_ph
  4473. struct ggml_tensor * ggml_conv_2d_s1_ph(
  4474. struct ggml_context * ctx,
  4475. struct ggml_tensor * a,
  4476. struct ggml_tensor * b) {
  4477. return ggml_conv_2d(ctx, a, b, 1, 1, a->ne[0] / 2, a->ne[1] / 2, 1, 1);
  4478. }
  4479. // ggml_conv_transpose_2d_p0
  4480. static int64_t ggml_calc_conv_transpose_output_size(int64_t ins, int64_t ks, int s, int p) {
  4481. return (ins - 1) * s - 2 * p + ks;
  4482. }
  4483. struct ggml_tensor * ggml_conv_transpose_2d_p0(
  4484. struct ggml_context * ctx,
  4485. struct ggml_tensor * a,
  4486. struct ggml_tensor * b,
  4487. int stride) {
  4488. GGML_ASSERT(a->ne[3] == b->ne[2]);
  4489. bool is_node = false;
  4490. if (a->grad || b->grad) {
  4491. GGML_ASSERT(false); // TODO: implement backward
  4492. is_node = true;
  4493. }
  4494. const int64_t ne[4] = {
  4495. ggml_calc_conv_transpose_output_size(b->ne[0], a->ne[0], stride, 0 /*p0*/),
  4496. ggml_calc_conv_transpose_output_size(b->ne[1], a->ne[1], stride, 0 /*p1*/),
  4497. a->ne[2], b->ne[3],
  4498. };
  4499. struct ggml_tensor* result = ggml_new_tensor(ctx, GGML_TYPE_F32, 4, ne);
  4500. ggml_set_op_params_i32(result, 0, stride);
  4501. result->op = GGML_OP_CONV_TRANSPOSE_2D;
  4502. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4503. result->src[0] = a;
  4504. result->src[1] = b;
  4505. return result;
  4506. }
  4507. // ggml_pool_*
  4508. static int64_t ggml_calc_pool_output_size(int64_t ins, int ks, int s, float p) {
  4509. return (ins + 2 * p - ks) / s + 1;
  4510. }
  4511. // ggml_pool_1d
  4512. struct ggml_tensor * ggml_pool_1d(
  4513. struct ggml_context * ctx,
  4514. struct ggml_tensor * a,
  4515. enum ggml_op_pool op,
  4516. int k0,
  4517. int s0,
  4518. int p0) {
  4519. bool is_node = false;
  4520. if (a->grad) {
  4521. GGML_ASSERT(false); // TODO: implement backward
  4522. is_node = true;
  4523. }
  4524. const int64_t ne[2] = {
  4525. ggml_calc_pool_output_size(a->ne[0], k0, s0, p0),
  4526. a->ne[1],
  4527. };
  4528. struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, 2, ne);
  4529. int32_t params[] = { op, k0, s0, p0 };
  4530. ggml_set_op_params(result, params, sizeof(params));
  4531. result->op = GGML_OP_POOL_1D;
  4532. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4533. result->src[0] = a;
  4534. return result;
  4535. }
  4536. // ggml_pool_2d
  4537. struct ggml_tensor * ggml_pool_2d(
  4538. struct ggml_context * ctx,
  4539. struct ggml_tensor * a,
  4540. enum ggml_op_pool op,
  4541. int k0,
  4542. int k1,
  4543. int s0,
  4544. int s1,
  4545. float p0,
  4546. float p1) {
  4547. bool is_node = false;
  4548. if (a->grad) {
  4549. GGML_ASSERT(false); // TODO: implement backward
  4550. is_node = true;
  4551. }
  4552. const int64_t ne[3] = {
  4553. ggml_calc_pool_output_size(a->ne[0], k0, s0, p0),
  4554. ggml_calc_pool_output_size(a->ne[1], k1, s1, p1),
  4555. a->ne[2],
  4556. };
  4557. struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, 3, ne);
  4558. int32_t params[] = { op, k0, k1, s0, s1, p0, p1 };
  4559. ggml_set_op_params(result, params, sizeof(params));
  4560. result->op = GGML_OP_POOL_2D;
  4561. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4562. result->src[0] = a;
  4563. return result;
  4564. }
  4565. // ggml_upscale
  4566. static struct ggml_tensor * ggml_upscale_impl(
  4567. struct ggml_context * ctx,
  4568. struct ggml_tensor * a,
  4569. int scale_factor) {
  4570. bool is_node = false;
  4571. if (a->grad) {
  4572. GGML_ASSERT(false); // TODO: implement backward
  4573. is_node = true;
  4574. }
  4575. struct ggml_tensor * result = ggml_new_tensor_4d(ctx, a->type,
  4576. a->ne[0] * scale_factor,
  4577. a->ne[1] * scale_factor,
  4578. a->ne[2], a->ne[3]);
  4579. result->op = GGML_OP_UPSCALE;
  4580. result->op_params[0] = scale_factor;
  4581. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4582. result->src[0] = a;
  4583. return result;
  4584. }
  4585. struct ggml_tensor * ggml_pad(
  4586. struct ggml_context * ctx,
  4587. struct ggml_tensor * a,
  4588. int p0, int p1, int p2, int p3) {
  4589. bool is_node = false;
  4590. if (a->grad) {
  4591. GGML_ASSERT(false); // TODO: implement backward
  4592. is_node = true;
  4593. }
  4594. struct ggml_tensor * result = ggml_new_tensor_4d(ctx, a->type,
  4595. a->ne[0] + p0,
  4596. a->ne[1] + p1,
  4597. a->ne[2] + p2,
  4598. a->ne[3] + p3);
  4599. result->op = GGML_OP_PAD;
  4600. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4601. result->src[0] = a;
  4602. return result;
  4603. }
  4604. struct ggml_tensor * ggml_upscale(
  4605. struct ggml_context * ctx,
  4606. struct ggml_tensor * a,
  4607. int scale_factor) {
  4608. return ggml_upscale_impl(ctx, a, scale_factor);
  4609. }
  4610. // ggml_argsort
  4611. struct ggml_tensor * ggml_argsort(
  4612. struct ggml_context * ctx,
  4613. struct ggml_tensor * a,
  4614. enum ggml_sort_order order) {
  4615. bool is_node = false;
  4616. struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_I32, GGML_MAX_DIMS, a->ne);
  4617. ggml_set_op_params_i32(result, 0, (int32_t) order);
  4618. result->op = GGML_OP_ARGSORT;
  4619. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4620. result->src[0] = a;
  4621. return result;
  4622. }
  4623. // ggml_top_k
  4624. struct ggml_tensor * ggml_top_k(
  4625. struct ggml_context * ctx,
  4626. struct ggml_tensor * a,
  4627. int k) {
  4628. GGML_ASSERT(a->ne[0] >= k);
  4629. struct ggml_tensor * result = ggml_argsort(ctx, a, GGML_SORT_DESC);
  4630. result = ggml_view_4d(ctx, result,
  4631. k, result->ne[1], result->ne[2], result->ne[3],
  4632. result->nb[1], result->nb[2], result->nb[3],
  4633. 0);
  4634. return result;
  4635. }
  4636. // ggml_flash_attn
  4637. struct ggml_tensor * ggml_flash_attn(
  4638. struct ggml_context * ctx,
  4639. struct ggml_tensor * q,
  4640. struct ggml_tensor * k,
  4641. struct ggml_tensor * v,
  4642. bool masked) {
  4643. GGML_ASSERT(ggml_can_mul_mat(k, q));
  4644. // TODO: check if vT can be multiplied by (k*qT)
  4645. bool is_node = false;
  4646. if (q->grad || k->grad || v->grad) {
  4647. is_node = true;
  4648. }
  4649. //struct ggml_tensor * result = ggml_dup_tensor(ctx, q);
  4650. struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, GGML_MAX_DIMS, q->ne);
  4651. int32_t t = masked ? 1 : 0;
  4652. ggml_set_op_params(result, &t, sizeof(t));
  4653. result->op = GGML_OP_FLASH_ATTN;
  4654. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4655. result->src[0] = q;
  4656. result->src[1] = k;
  4657. result->src[2] = v;
  4658. return result;
  4659. }
  4660. // ggml_flash_ff
  4661. struct ggml_tensor * ggml_flash_ff(
  4662. struct ggml_context * ctx,
  4663. struct ggml_tensor * a,
  4664. struct ggml_tensor * b0,
  4665. struct ggml_tensor * b1,
  4666. struct ggml_tensor * c0,
  4667. struct ggml_tensor * c1) {
  4668. GGML_ASSERT(ggml_can_mul_mat(b0, a));
  4669. // TODO: more checks
  4670. bool is_node = false;
  4671. if (a->grad || b0->grad || b1->grad || c0->grad || c1->grad) {
  4672. is_node = true;
  4673. }
  4674. //struct ggml_tensor * result = ggml_dup_tensor(ctx, a);
  4675. struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, GGML_MAX_DIMS, a->ne);
  4676. result->op = GGML_OP_FLASH_FF;
  4677. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4678. result->src[0] = a;
  4679. result->src[1] = b0;
  4680. result->src[2] = b1;
  4681. result->src[3] = c0;
  4682. result->src[4] = c1;
  4683. return result;
  4684. }
  4685. // ggml_flash_attn_back
  4686. struct ggml_tensor * ggml_flash_attn_back(
  4687. struct ggml_context * ctx,
  4688. struct ggml_tensor * q,
  4689. struct ggml_tensor * k,
  4690. struct ggml_tensor * v,
  4691. struct ggml_tensor * d,
  4692. bool masked) {
  4693. GGML_ASSERT(ggml_can_mul_mat(k, q));
  4694. // TODO: check if vT can be multiplied by (k*qT)
  4695. // d shape [D,N,ne2,ne3]
  4696. // q shape [D,N,ne2,ne3]
  4697. // k shape [D,M,kvne2,ne3]
  4698. // v shape [M,D,kvne2,ne3]
  4699. const int64_t D = q->ne[0];
  4700. const int64_t N = q->ne[1];
  4701. const int64_t M = k->ne[1];
  4702. const int64_t ne2 = q->ne[2];
  4703. const int64_t ne3 = q->ne[3];
  4704. const int64_t kvne2 = k->ne[2];
  4705. GGML_ASSERT(k->ne[0] == D);
  4706. GGML_ASSERT(v->ne[0] == M);
  4707. GGML_ASSERT(v->ne[1] == D);
  4708. GGML_ASSERT(d->ne[0] == D);
  4709. GGML_ASSERT(d->ne[1] == N);
  4710. GGML_ASSERT(k->ne[2] == kvne2);
  4711. GGML_ASSERT(k->ne[3] == ne3);
  4712. GGML_ASSERT(v->ne[2] == kvne2);
  4713. GGML_ASSERT(v->ne[3] == ne3);
  4714. GGML_ASSERT(d->ne[2] == ne2);
  4715. GGML_ASSERT(d->ne[3] == ne3);
  4716. GGML_ASSERT(ne2 % kvne2 == 0);
  4717. bool is_node = false;
  4718. if (q->grad || k->grad || v->grad) {
  4719. // when using this operation (in backwards pass) these grads are set.
  4720. // we don't want to create (big) grad of our result, so is_node is false.
  4721. is_node = false;
  4722. }
  4723. // store gradients of q, k and v as continuous tensors concatenated in result.
  4724. // note: v and gradv are actually transposed, i.e. v->ne[0] != D.
  4725. const int64_t elem_q = ggml_nelements(q);
  4726. const int64_t elem_k = ggml_nelements(k);
  4727. const int64_t elem_v = ggml_nelements(v);
  4728. enum ggml_type result_type = GGML_TYPE_F32;
  4729. GGML_ASSERT(ggml_blck_size(result_type) == 1);
  4730. const size_t tsize = ggml_type_size(result_type);
  4731. const size_t offs_q = 0;
  4732. const size_t offs_k = offs_q + GGML_PAD(elem_q * tsize, GGML_MEM_ALIGN);
  4733. const size_t offs_v = offs_k + GGML_PAD(elem_k * tsize, GGML_MEM_ALIGN);
  4734. const size_t end = offs_v + GGML_PAD(elem_v * tsize, GGML_MEM_ALIGN);
  4735. const size_t nelements = (end + tsize - 1)/tsize;
  4736. struct ggml_tensor * result = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, nelements);
  4737. int32_t masked_i = masked ? 1 : 0;
  4738. ggml_set_op_params(result, &masked_i, sizeof(masked_i));
  4739. result->op = GGML_OP_FLASH_ATTN_BACK;
  4740. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4741. result->src[0] = q;
  4742. result->src[1] = k;
  4743. result->src[2] = v;
  4744. result->src[3] = d;
  4745. return result;
  4746. }
  4747. // ggml_win_part
  4748. struct ggml_tensor * ggml_win_part(
  4749. struct ggml_context * ctx,
  4750. struct ggml_tensor * a,
  4751. int w) {
  4752. GGML_ASSERT(a->ne[3] == 1);
  4753. GGML_ASSERT(a->type == GGML_TYPE_F32);
  4754. bool is_node = false;
  4755. if (a->grad) {
  4756. GGML_ASSERT(false); // TODO: implement backward
  4757. is_node = true;
  4758. }
  4759. // padding
  4760. const int px = (w - a->ne[1]%w)%w;
  4761. const int py = (w - a->ne[2]%w)%w;
  4762. const int npx = (px + a->ne[1])/w;
  4763. const int npy = (py + a->ne[2])/w;
  4764. const int np = npx*npy;
  4765. const int64_t ne[4] = { a->ne[0], w, w, np, };
  4766. struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, 4, ne);
  4767. int32_t params[] = { npx, npy, w };
  4768. ggml_set_op_params(result, params, sizeof(params));
  4769. result->op = GGML_OP_WIN_PART;
  4770. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4771. result->src[0] = a;
  4772. return result;
  4773. }
  4774. // ggml_win_unpart
  4775. struct ggml_tensor * ggml_win_unpart(
  4776. struct ggml_context * ctx,
  4777. struct ggml_tensor * a,
  4778. int w0,
  4779. int h0,
  4780. int w) {
  4781. GGML_ASSERT(a->type == GGML_TYPE_F32);
  4782. bool is_node = false;
  4783. if (a->grad) {
  4784. GGML_ASSERT(false); // TODO: implement backward
  4785. is_node = true;
  4786. }
  4787. const int64_t ne[4] = { a->ne[0], w0, h0, 1, };
  4788. struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, 3, ne);
  4789. int32_t params[] = { w };
  4790. ggml_set_op_params(result, params, sizeof(params));
  4791. result->op = GGML_OP_WIN_UNPART;
  4792. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4793. result->src[0] = a;
  4794. return result;
  4795. }
  4796. // ggml_get_rel_pos
  4797. struct ggml_tensor * ggml_get_rel_pos(
  4798. struct ggml_context * ctx,
  4799. struct ggml_tensor * a,
  4800. int qh,
  4801. int kh) {
  4802. GGML_ASSERT(qh == kh);
  4803. GGML_ASSERT(2*MAX(qh, kh) - 1 == a->ne[1]);
  4804. bool is_node = false;
  4805. if (a->grad) {
  4806. GGML_ASSERT(false); // TODO: implement backward
  4807. is_node = true;
  4808. }
  4809. const int64_t ne[4] = { a->ne[0], kh, qh, 1, };
  4810. struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F16, 3, ne);
  4811. result->op = GGML_OP_GET_REL_POS;
  4812. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4813. result->src[0] = a;
  4814. return result;
  4815. }
  4816. // ggml_add_rel_pos
  4817. static struct ggml_tensor * ggml_add_rel_pos_impl(
  4818. struct ggml_context * ctx,
  4819. struct ggml_tensor * a,
  4820. struct ggml_tensor * pw,
  4821. struct ggml_tensor * ph,
  4822. bool inplace) {
  4823. GGML_ASSERT(ggml_are_same_shape(pw, ph));
  4824. GGML_ASSERT(ggml_is_contiguous(a));
  4825. GGML_ASSERT(ggml_is_contiguous(pw));
  4826. GGML_ASSERT(ggml_is_contiguous(ph));
  4827. GGML_ASSERT(ph->type == GGML_TYPE_F32);
  4828. GGML_ASSERT(pw->type == GGML_TYPE_F32);
  4829. GGML_ASSERT(pw->ne[3] == a->ne[2]);
  4830. GGML_ASSERT(pw->ne[0]*pw->ne[0] == a->ne[0]);
  4831. GGML_ASSERT(pw->ne[1]*pw->ne[2] == a->ne[1]);
  4832. bool is_node = false;
  4833. if (!inplace && (a->grad || pw->grad || ph->grad)) {
  4834. is_node = true;
  4835. }
  4836. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  4837. ggml_set_op_params_i32(result, 0, inplace ? 1 : 0);
  4838. result->op = GGML_OP_ADD_REL_POS;
  4839. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4840. result->src[0] = a;
  4841. result->src[1] = pw;
  4842. result->src[2] = ph;
  4843. return result;
  4844. }
  4845. struct ggml_tensor * ggml_add_rel_pos(
  4846. struct ggml_context * ctx,
  4847. struct ggml_tensor * a,
  4848. struct ggml_tensor * pw,
  4849. struct ggml_tensor * ph) {
  4850. return ggml_add_rel_pos_impl(ctx, a, pw, ph, false);
  4851. }
  4852. struct ggml_tensor * ggml_add_rel_pos_inplace(
  4853. struct ggml_context * ctx,
  4854. struct ggml_tensor * a,
  4855. struct ggml_tensor * pw,
  4856. struct ggml_tensor * ph) {
  4857. return ggml_add_rel_pos_impl(ctx, a, pw, ph, true);
  4858. }
  4859. // gmml_unary
  4860. static struct ggml_tensor * ggml_unary_impl(
  4861. struct ggml_context * ctx,
  4862. struct ggml_tensor * a,
  4863. enum ggml_unary_op op,
  4864. bool inplace) {
  4865. bool is_node = false;
  4866. if (!inplace && (a->grad)) {
  4867. is_node = true;
  4868. }
  4869. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  4870. ggml_set_op_params_i32(result, 0, (int32_t) op);
  4871. result->op = GGML_OP_UNARY;
  4872. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4873. result->src[0] = a;
  4874. return result;
  4875. }
  4876. struct ggml_tensor * ggml_unary(
  4877. struct ggml_context * ctx,
  4878. struct ggml_tensor * a,
  4879. enum ggml_unary_op op) {
  4880. return ggml_unary_impl(ctx, a, op, false);
  4881. }
  4882. struct ggml_tensor * ggml_unary_inplace(
  4883. struct ggml_context * ctx,
  4884. struct ggml_tensor * a,
  4885. enum ggml_unary_op op) {
  4886. return ggml_unary_impl(ctx, a, op, true);
  4887. }
  4888. // ggml_map_unary
  4889. static struct ggml_tensor * ggml_map_unary_impl_f32(
  4890. struct ggml_context * ctx,
  4891. struct ggml_tensor * a,
  4892. const ggml_unary_op_f32_t fun,
  4893. bool inplace) {
  4894. bool is_node = false;
  4895. if (!inplace && a->grad) {
  4896. is_node = true;
  4897. }
  4898. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  4899. ggml_set_op_params(result, (const void *) &fun, sizeof(fun));
  4900. result->op = GGML_OP_MAP_UNARY;
  4901. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4902. result->src[0] = a;
  4903. return result;
  4904. }
  4905. struct ggml_tensor * ggml_map_unary_f32(
  4906. struct ggml_context * ctx,
  4907. struct ggml_tensor * a,
  4908. const ggml_unary_op_f32_t fun) {
  4909. return ggml_map_unary_impl_f32(ctx, a, fun, false);
  4910. }
  4911. struct ggml_tensor * ggml_map_unary_inplace_f32(
  4912. struct ggml_context * ctx,
  4913. struct ggml_tensor * a,
  4914. const ggml_unary_op_f32_t fun) {
  4915. return ggml_map_unary_impl_f32(ctx, a, fun, true);
  4916. }
  4917. // ggml_map_binary
  4918. static struct ggml_tensor * ggml_map_binary_impl_f32(
  4919. struct ggml_context * ctx,
  4920. struct ggml_tensor * a,
  4921. struct ggml_tensor * b,
  4922. const ggml_binary_op_f32_t fun,
  4923. bool inplace) {
  4924. GGML_ASSERT(ggml_are_same_shape(a, b));
  4925. bool is_node = false;
  4926. if (!inplace && (a->grad || b->grad)) {
  4927. is_node = true;
  4928. }
  4929. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  4930. ggml_set_op_params(result, (const void *) &fun, sizeof(fun));
  4931. result->op = GGML_OP_MAP_BINARY;
  4932. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4933. result->src[0] = a;
  4934. result->src[1] = b;
  4935. return result;
  4936. }
  4937. struct ggml_tensor * ggml_map_binary_f32(
  4938. struct ggml_context * ctx,
  4939. struct ggml_tensor * a,
  4940. struct ggml_tensor * b,
  4941. const ggml_binary_op_f32_t fun) {
  4942. return ggml_map_binary_impl_f32(ctx, a, b, fun, false);
  4943. }
  4944. struct ggml_tensor * ggml_map_binary_inplace_f32(
  4945. struct ggml_context * ctx,
  4946. struct ggml_tensor * a,
  4947. struct ggml_tensor * b,
  4948. const ggml_binary_op_f32_t fun) {
  4949. return ggml_map_binary_impl_f32(ctx, a, b, fun, true);
  4950. }
  4951. // ggml_map_custom1_f32
  4952. static struct ggml_tensor * ggml_map_custom1_impl_f32(
  4953. struct ggml_context * ctx,
  4954. struct ggml_tensor * a,
  4955. const ggml_custom1_op_f32_t fun,
  4956. bool inplace) {
  4957. bool is_node = false;
  4958. if (!inplace && a->grad) {
  4959. is_node = true;
  4960. }
  4961. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  4962. ggml_set_op_params(result, (const void *) &fun, sizeof(fun));
  4963. result->op = GGML_OP_MAP_CUSTOM1_F32;
  4964. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4965. result->src[0] = a;
  4966. return result;
  4967. }
  4968. struct ggml_tensor * ggml_map_custom1_f32(
  4969. struct ggml_context * ctx,
  4970. struct ggml_tensor * a,
  4971. const ggml_custom1_op_f32_t fun) {
  4972. return ggml_map_custom1_impl_f32(ctx, a, fun, false);
  4973. }
  4974. struct ggml_tensor * ggml_map_custom1_inplace_f32(
  4975. struct ggml_context * ctx,
  4976. struct ggml_tensor * a,
  4977. const ggml_custom1_op_f32_t fun) {
  4978. return ggml_map_custom1_impl_f32(ctx, a, fun, true);
  4979. }
  4980. // ggml_map_custom2_f32
  4981. static struct ggml_tensor * ggml_map_custom2_impl_f32(
  4982. struct ggml_context * ctx,
  4983. struct ggml_tensor * a,
  4984. struct ggml_tensor * b,
  4985. const ggml_custom2_op_f32_t fun,
  4986. bool inplace) {
  4987. bool is_node = false;
  4988. if (!inplace && (a->grad || b->grad)) {
  4989. is_node = true;
  4990. }
  4991. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  4992. ggml_set_op_params(result, (const void *) &fun, sizeof(fun));
  4993. result->op = GGML_OP_MAP_CUSTOM2_F32;
  4994. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4995. result->src[0] = a;
  4996. result->src[1] = b;
  4997. return result;
  4998. }
  4999. struct ggml_tensor * ggml_map_custom2_f32(
  5000. struct ggml_context * ctx,
  5001. struct ggml_tensor * a,
  5002. struct ggml_tensor * b,
  5003. const ggml_custom2_op_f32_t fun) {
  5004. return ggml_map_custom2_impl_f32(ctx, a, b, fun, false);
  5005. }
  5006. struct ggml_tensor * ggml_map_custom2_inplace_f32(
  5007. struct ggml_context * ctx,
  5008. struct ggml_tensor * a,
  5009. struct ggml_tensor * b,
  5010. const ggml_custom2_op_f32_t fun) {
  5011. return ggml_map_custom2_impl_f32(ctx, a, b, fun, true);
  5012. }
  5013. // ggml_map_custom3_f32
  5014. static struct ggml_tensor * ggml_map_custom3_impl_f32(
  5015. struct ggml_context * ctx,
  5016. struct ggml_tensor * a,
  5017. struct ggml_tensor * b,
  5018. struct ggml_tensor * c,
  5019. const ggml_custom3_op_f32_t fun,
  5020. bool inplace) {
  5021. bool is_node = false;
  5022. if (!inplace && (a->grad || b->grad || c->grad)) {
  5023. is_node = true;
  5024. }
  5025. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  5026. ggml_set_op_params(result, (const void *) &fun, sizeof(fun));
  5027. result->op = GGML_OP_MAP_CUSTOM3_F32;
  5028. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5029. result->src[0] = a;
  5030. result->src[1] = b;
  5031. result->src[2] = c;
  5032. return result;
  5033. }
  5034. struct ggml_tensor * ggml_map_custom3_f32(
  5035. struct ggml_context * ctx,
  5036. struct ggml_tensor * a,
  5037. struct ggml_tensor * b,
  5038. struct ggml_tensor * c,
  5039. const ggml_custom3_op_f32_t fun) {
  5040. return ggml_map_custom3_impl_f32(ctx, a, b, c, fun, false);
  5041. }
  5042. struct ggml_tensor * ggml_map_custom3_inplace_f32(
  5043. struct ggml_context * ctx,
  5044. struct ggml_tensor * a,
  5045. struct ggml_tensor * b,
  5046. struct ggml_tensor * c,
  5047. const ggml_custom3_op_f32_t fun) {
  5048. return ggml_map_custom3_impl_f32(ctx, a, b, c, fun, true);
  5049. }
  5050. // ggml_map_custom1
  5051. struct ggml_map_custom1_op_params {
  5052. ggml_custom1_op_t fun;
  5053. int n_tasks;
  5054. void * userdata;
  5055. };
  5056. static struct ggml_tensor * ggml_map_custom1_impl(
  5057. struct ggml_context * ctx,
  5058. struct ggml_tensor * a,
  5059. const ggml_custom1_op_t fun,
  5060. int n_tasks,
  5061. void * userdata,
  5062. bool inplace) {
  5063. GGML_ASSERT(n_tasks == GGML_N_TASKS_MAX || n_tasks > 0);
  5064. bool is_node = false;
  5065. if (!inplace && a->grad) {
  5066. is_node = true;
  5067. }
  5068. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  5069. struct ggml_map_custom1_op_params params = {
  5070. /*.fun =*/ fun,
  5071. /*.n_tasks =*/ n_tasks,
  5072. /*.userdata =*/ userdata
  5073. };
  5074. ggml_set_op_params(result, (const void *) &params, sizeof(params));
  5075. result->op = GGML_OP_MAP_CUSTOM1;
  5076. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5077. result->src[0] = a;
  5078. return result;
  5079. }
  5080. struct ggml_tensor * ggml_map_custom1(
  5081. struct ggml_context * ctx,
  5082. struct ggml_tensor * a,
  5083. const ggml_custom1_op_t fun,
  5084. int n_tasks,
  5085. void * userdata) {
  5086. return ggml_map_custom1_impl(ctx, a, fun, n_tasks, userdata, false);
  5087. }
  5088. struct ggml_tensor * ggml_map_custom1_inplace(
  5089. struct ggml_context * ctx,
  5090. struct ggml_tensor * a,
  5091. const ggml_custom1_op_t fun,
  5092. int n_tasks,
  5093. void * userdata) {
  5094. return ggml_map_custom1_impl(ctx, a, fun, n_tasks, userdata, true);
  5095. }
  5096. // ggml_map_custom2
  5097. struct ggml_map_custom2_op_params {
  5098. ggml_custom2_op_t fun;
  5099. int n_tasks;
  5100. void * userdata;
  5101. };
  5102. static struct ggml_tensor * ggml_map_custom2_impl(
  5103. struct ggml_context * ctx,
  5104. struct ggml_tensor * a,
  5105. struct ggml_tensor * b,
  5106. const ggml_custom2_op_t fun,
  5107. int n_tasks,
  5108. void * userdata,
  5109. bool inplace) {
  5110. GGML_ASSERT(n_tasks == GGML_N_TASKS_MAX || n_tasks > 0);
  5111. bool is_node = false;
  5112. if (!inplace && (a->grad || b->grad)) {
  5113. is_node = true;
  5114. }
  5115. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  5116. struct ggml_map_custom2_op_params params = {
  5117. /*.fun =*/ fun,
  5118. /*.n_tasks =*/ n_tasks,
  5119. /*.userdata =*/ userdata
  5120. };
  5121. ggml_set_op_params(result, (const void *) &params, sizeof(params));
  5122. result->op = GGML_OP_MAP_CUSTOM2;
  5123. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5124. result->src[0] = a;
  5125. result->src[1] = b;
  5126. return result;
  5127. }
  5128. struct ggml_tensor * ggml_map_custom2(
  5129. struct ggml_context * ctx,
  5130. struct ggml_tensor * a,
  5131. struct ggml_tensor * b,
  5132. const ggml_custom2_op_t fun,
  5133. int n_tasks,
  5134. void * userdata) {
  5135. return ggml_map_custom2_impl(ctx, a, b, fun, n_tasks, userdata, false);
  5136. }
  5137. struct ggml_tensor * ggml_map_custom2_inplace(
  5138. struct ggml_context * ctx,
  5139. struct ggml_tensor * a,
  5140. struct ggml_tensor * b,
  5141. const ggml_custom2_op_t fun,
  5142. int n_tasks,
  5143. void * userdata) {
  5144. return ggml_map_custom2_impl(ctx, a, b, fun, n_tasks, userdata, true);
  5145. }
  5146. // ggml_map_custom3
  5147. struct ggml_map_custom3_op_params {
  5148. ggml_custom3_op_t fun;
  5149. int n_tasks;
  5150. void * userdata;
  5151. };
  5152. static struct ggml_tensor * ggml_map_custom3_impl(
  5153. struct ggml_context * ctx,
  5154. struct ggml_tensor * a,
  5155. struct ggml_tensor * b,
  5156. struct ggml_tensor * c,
  5157. const ggml_custom3_op_t fun,
  5158. int n_tasks,
  5159. void * userdata,
  5160. bool inplace) {
  5161. GGML_ASSERT(n_tasks == GGML_N_TASKS_MAX || n_tasks > 0);
  5162. bool is_node = false;
  5163. if (!inplace && (a->grad || b->grad || c->grad)) {
  5164. is_node = true;
  5165. }
  5166. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  5167. struct ggml_map_custom3_op_params params = {
  5168. /*.fun =*/ fun,
  5169. /*.n_tasks =*/ n_tasks,
  5170. /*.userdata =*/ userdata
  5171. };
  5172. ggml_set_op_params(result, (const void *) &params, sizeof(params));
  5173. result->op = GGML_OP_MAP_CUSTOM3;
  5174. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5175. result->src[0] = a;
  5176. result->src[1] = b;
  5177. result->src[2] = c;
  5178. return result;
  5179. }
  5180. struct ggml_tensor * ggml_map_custom3(
  5181. struct ggml_context * ctx,
  5182. struct ggml_tensor * a,
  5183. struct ggml_tensor * b,
  5184. struct ggml_tensor * c,
  5185. const ggml_custom3_op_t fun,
  5186. int n_tasks,
  5187. void * userdata) {
  5188. return ggml_map_custom3_impl(ctx, a, b, c, fun, n_tasks, userdata, false);
  5189. }
  5190. struct ggml_tensor * ggml_map_custom3_inplace(
  5191. struct ggml_context * ctx,
  5192. struct ggml_tensor * a,
  5193. struct ggml_tensor * b,
  5194. struct ggml_tensor * c,
  5195. const ggml_custom3_op_t fun,
  5196. int n_tasks,
  5197. void * userdata) {
  5198. return ggml_map_custom3_impl(ctx, a, b, c, fun, n_tasks, userdata, true);
  5199. }
  5200. // ggml_cross_entropy_loss
  5201. struct ggml_tensor * ggml_cross_entropy_loss(
  5202. struct ggml_context * ctx,
  5203. struct ggml_tensor * a,
  5204. struct ggml_tensor * b) {
  5205. GGML_ASSERT(ggml_are_same_shape(a, b));
  5206. bool is_node = false;
  5207. if (a->grad || b->grad) {
  5208. is_node = true;
  5209. }
  5210. struct ggml_tensor * result = ggml_new_tensor_1d(ctx, a->type, 1);
  5211. result->op = GGML_OP_CROSS_ENTROPY_LOSS;
  5212. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5213. result->src[0] = a;
  5214. result->src[1] = b;
  5215. return result;
  5216. }
  5217. // ggml_cross_entropy_loss_back
  5218. struct ggml_tensor * ggml_cross_entropy_loss_back(
  5219. struct ggml_context * ctx,
  5220. struct ggml_tensor * a,
  5221. struct ggml_tensor * b,
  5222. struct ggml_tensor * c) {
  5223. GGML_ASSERT(ggml_are_same_shape(a, b));
  5224. GGML_ASSERT(ggml_is_scalar(c));
  5225. struct ggml_tensor * result = ggml_dup_tensor(ctx, a);
  5226. result->op = GGML_OP_CROSS_ENTROPY_LOSS_BACK;
  5227. result->grad = NULL;
  5228. result->src[0] = a;
  5229. result->src[1] = b;
  5230. result->src[2] = c;
  5231. return result;
  5232. }
  5233. ////////////////////////////////////////////////////////////////////////////////
  5234. void ggml_set_param(
  5235. struct ggml_context * ctx,
  5236. struct ggml_tensor * tensor) {
  5237. tensor->is_param = true;
  5238. GGML_ASSERT(tensor->grad == NULL);
  5239. tensor->grad = ggml_dup_tensor(ctx, tensor);
  5240. ggml_format_name(tensor->grad, "%s (grad)", tensor->name);
  5241. }
  5242. // ggml_compute_forward_dup
  5243. static void ggml_compute_forward_dup_same_cont(
  5244. const struct ggml_compute_params * params,
  5245. const struct ggml_tensor * src0,
  5246. struct ggml_tensor * dst) {
  5247. GGML_ASSERT(ggml_nelements(dst) == ggml_nelements(src0));
  5248. GGML_ASSERT(ggml_is_contiguous(dst) && ggml_is_contiguous(src0));
  5249. GGML_ASSERT(src0->type == dst->type);
  5250. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  5251. return;
  5252. }
  5253. const size_t nb00 = src0->nb[0];
  5254. const size_t nb0 = dst->nb[0];
  5255. const int ith = params->ith; // thread index
  5256. const int nth = params->nth; // number of threads
  5257. // parallelize by elements
  5258. const int ne = ggml_nelements(dst);
  5259. const int dr = (ne + nth - 1) / nth;
  5260. const int ie0 = dr * ith;
  5261. const int ie1 = MIN(ie0 + dr, ne);
  5262. if (ie0 < ie1) {
  5263. memcpy(
  5264. ((char *) dst->data + ie0*nb0),
  5265. ((char *) src0->data + ie0*nb00),
  5266. (ie1 - ie0) * ggml_type_size(src0->type));
  5267. }
  5268. }
  5269. static void ggml_compute_forward_dup_f16(
  5270. const struct ggml_compute_params * params,
  5271. const struct ggml_tensor * src0,
  5272. struct ggml_tensor * dst) {
  5273. GGML_ASSERT(ggml_nelements(dst) == ggml_nelements(src0));
  5274. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  5275. return;
  5276. }
  5277. GGML_TENSOR_UNARY_OP_LOCALS
  5278. const int ith = params->ith; // thread index
  5279. const int nth = params->nth; // number of threads
  5280. if (ggml_is_contiguous(src0) && ggml_is_contiguous(dst) && src0->type == dst->type) {
  5281. ggml_compute_forward_dup_same_cont(params, src0, dst);
  5282. return;
  5283. }
  5284. // parallelize by rows
  5285. const int nr = ne01;
  5286. // number of rows per thread
  5287. const int dr = (nr + nth - 1) / nth;
  5288. // row range for this thread
  5289. const int ir0 = dr * ith;
  5290. const int ir1 = MIN(ir0 + dr, nr);
  5291. if (src0->type == dst->type &&
  5292. ne00 == ne0 &&
  5293. nb00 == ggml_type_size(src0->type) && nb0 == ggml_type_size(dst->type)) {
  5294. // copy by rows
  5295. const size_t rs = ne00*nb00;
  5296. for (int64_t i03 = 0; i03 < ne03; i03++) {
  5297. for (int64_t i02 = 0; i02 < ne02; i02++) {
  5298. for (int64_t i01 = ir0; i01 < ir1; i01++) {
  5299. memcpy(
  5300. ((char *) dst->data + i01*nb1 + i02*nb2 + i03*nb3),
  5301. ((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03),
  5302. rs);
  5303. }
  5304. }
  5305. }
  5306. return;
  5307. }
  5308. // TODO: add more special-case implementations for tensor shapes/strides that can benefit from memcpy
  5309. if (ggml_is_contiguous(dst)) {
  5310. if (nb00 == sizeof(ggml_fp16_t)) {
  5311. if (dst->type == GGML_TYPE_F16) {
  5312. size_t id = 0;
  5313. const size_t rs = ne00 * nb00;
  5314. char * dst_ptr = (char *) dst->data;
  5315. for (int i03 = 0; i03 < ne03; i03++) {
  5316. for (int i02 = 0; i02 < ne02; i02++) {
  5317. id += rs * ir0;
  5318. for (int i01 = ir0; i01 < ir1; i01++) {
  5319. const char * src0_ptr = (char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03;
  5320. memcpy(dst_ptr + id, src0_ptr, rs);
  5321. id += rs;
  5322. }
  5323. id += rs * (ne01 - ir1);
  5324. }
  5325. }
  5326. } else if (dst->type == GGML_TYPE_F32) {
  5327. size_t id = 0;
  5328. float * dst_ptr = (float *) dst->data;
  5329. for (int i03 = 0; i03 < ne03; i03++) {
  5330. for (int i02 = 0; i02 < ne02; i02++) {
  5331. id += ne00 * ir0;
  5332. for (int i01 = ir0; i01 < ir1; i01++) {
  5333. const ggml_fp16_t * src0_ptr = (ggml_fp16_t *) ((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03);
  5334. for (int i00 = 0; i00 < ne00; i00++) {
  5335. dst_ptr[id] = GGML_FP16_TO_FP32(src0_ptr[i00]);
  5336. id++;
  5337. }
  5338. }
  5339. id += ne00 * (ne01 - ir1);
  5340. }
  5341. }
  5342. } else if (type_traits[dst->type].from_float) {
  5343. ggml_from_float_t const quantize_row_q = type_traits[dst->type].from_float;
  5344. float * src0_f32 = (float *) params->wdata + (ne00 + CACHE_LINE_SIZE_F32) * ith;
  5345. size_t id = 0;
  5346. size_t rs = nb0 * (ne00 / ggml_blck_size(dst->type));
  5347. char * dst_ptr = (char *) dst->data;
  5348. for (int i03 = 0; i03 < ne03; i03++) {
  5349. for (int i02 = 0; i02 < ne02; i02++) {
  5350. id += rs * ir0;
  5351. for (int i01 = ir0; i01 < ir1; i01++) {
  5352. const ggml_fp16_t * src0_ptr = (ggml_fp16_t *) ((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03);
  5353. for (int i00 = 0; i00 < ne00; i00++) {
  5354. src0_f32[i00] = GGML_FP16_TO_FP32(src0_ptr[i00]);
  5355. }
  5356. quantize_row_q(src0_f32, dst_ptr + id, ne00);
  5357. id += rs;
  5358. }
  5359. id += rs * (ne01 - ir1);
  5360. }
  5361. }
  5362. } else {
  5363. GGML_ASSERT(false); // TODO: implement
  5364. }
  5365. } else {
  5366. //printf("%s: this is not optimal - fix me\n", __func__);
  5367. if (dst->type == GGML_TYPE_F32) {
  5368. size_t id = 0;
  5369. float * dst_ptr = (float *) dst->data;
  5370. for (int i03 = 0; i03 < ne03; i03++) {
  5371. for (int i02 = 0; i02 < ne02; i02++) {
  5372. id += ne00 * ir0;
  5373. for (int i01 = ir0; i01 < ir1; i01++) {
  5374. for (int i00 = 0; i00 < ne00; i00++) {
  5375. const ggml_fp16_t * src0_ptr = (ggml_fp16_t *) ((char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03);
  5376. dst_ptr[id] = GGML_FP16_TO_FP32(*src0_ptr);
  5377. id++;
  5378. }
  5379. }
  5380. id += ne00 * (ne01 - ir1);
  5381. }
  5382. }
  5383. } else if (dst->type == GGML_TYPE_F16) {
  5384. size_t id = 0;
  5385. ggml_fp16_t * dst_ptr = (ggml_fp16_t *) dst->data;
  5386. for (int i03 = 0; i03 < ne03; i03++) {
  5387. for (int i02 = 0; i02 < ne02; i02++) {
  5388. id += ne00 * ir0;
  5389. for (int i01 = ir0; i01 < ir1; i01++) {
  5390. for (int i00 = 0; i00 < ne00; i00++) {
  5391. const ggml_fp16_t * src0_ptr = (ggml_fp16_t *) ((char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03);
  5392. dst_ptr[id] = *src0_ptr;
  5393. id++;
  5394. }
  5395. }
  5396. id += ne00 * (ne01 - ir1);
  5397. }
  5398. }
  5399. } else {
  5400. GGML_ASSERT(false); // TODO: implement
  5401. }
  5402. }
  5403. return;
  5404. }
  5405. // dst counters
  5406. int64_t i10 = 0;
  5407. int64_t i11 = 0;
  5408. int64_t i12 = 0;
  5409. int64_t i13 = 0;
  5410. if (dst->type == GGML_TYPE_F16) {
  5411. for (int64_t i03 = 0; i03 < ne03; i03++) {
  5412. for (int64_t i02 = 0; i02 < ne02; i02++) {
  5413. i10 += ne00 * ir0;
  5414. while (i10 >= ne0) {
  5415. i10 -= ne0;
  5416. if (++i11 == ne1) {
  5417. i11 = 0;
  5418. if (++i12 == ne2) {
  5419. i12 = 0;
  5420. if (++i13 == ne3) {
  5421. i13 = 0;
  5422. }
  5423. }
  5424. }
  5425. }
  5426. for (int64_t i01 = ir0; i01 < ir1; i01++) {
  5427. for (int64_t i00 = 0; i00 < ne00; i00++) {
  5428. const char * src0_ptr = ((char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03);
  5429. char * dst_ptr = ((char *) dst->data + i10*nb0 + i11*nb1 + i12*nb2 + i13*nb3);
  5430. memcpy(dst_ptr, src0_ptr, sizeof(ggml_fp16_t));
  5431. if (++i10 == ne00) {
  5432. i10 = 0;
  5433. if (++i11 == ne01) {
  5434. i11 = 0;
  5435. if (++i12 == ne02) {
  5436. i12 = 0;
  5437. if (++i13 == ne03) {
  5438. i13 = 0;
  5439. }
  5440. }
  5441. }
  5442. }
  5443. }
  5444. }
  5445. i10 += ne00 * (ne01 - ir1);
  5446. while (i10 >= ne0) {
  5447. i10 -= ne0;
  5448. if (++i11 == ne1) {
  5449. i11 = 0;
  5450. if (++i12 == ne2) {
  5451. i12 = 0;
  5452. if (++i13 == ne3) {
  5453. i13 = 0;
  5454. }
  5455. }
  5456. }
  5457. }
  5458. }
  5459. }
  5460. } else if (dst->type == GGML_TYPE_F32) {
  5461. for (int64_t i03 = 0; i03 < ne03; i03++) {
  5462. for (int64_t i02 = 0; i02 < ne02; i02++) {
  5463. i10 += ne00 * ir0;
  5464. while (i10 >= ne0) {
  5465. i10 -= ne0;
  5466. if (++i11 == ne1) {
  5467. i11 = 0;
  5468. if (++i12 == ne2) {
  5469. i12 = 0;
  5470. if (++i13 == ne3) {
  5471. i13 = 0;
  5472. }
  5473. }
  5474. }
  5475. }
  5476. for (int64_t i01 = ir0; i01 < ir1; i01++) {
  5477. for (int64_t i00 = 0; i00 < ne00; i00++) {
  5478. const char * src0_ptr = ((char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03);
  5479. char * dst_ptr = ((char *) dst->data + i10*nb0 + i11*nb1 + i12*nb2 + i13*nb3);
  5480. *(float *) dst_ptr = GGML_FP16_TO_FP32(*(const ggml_fp16_t *) src0_ptr);
  5481. if (++i10 == ne0) {
  5482. i10 = 0;
  5483. if (++i11 == ne1) {
  5484. i11 = 0;
  5485. if (++i12 == ne2) {
  5486. i12 = 0;
  5487. if (++i13 == ne3) {
  5488. i13 = 0;
  5489. }
  5490. }
  5491. }
  5492. }
  5493. }
  5494. }
  5495. i10 += ne00 * (ne01 - ir1);
  5496. while (i10 >= ne0) {
  5497. i10 -= ne0;
  5498. if (++i11 == ne1) {
  5499. i11 = 0;
  5500. if (++i12 == ne2) {
  5501. i12 = 0;
  5502. if (++i13 == ne3) {
  5503. i13 = 0;
  5504. }
  5505. }
  5506. }
  5507. }
  5508. }
  5509. }
  5510. } else {
  5511. GGML_ASSERT(false); // TODO: implement
  5512. }
  5513. }
  5514. static void ggml_compute_forward_dup_f32(
  5515. const struct ggml_compute_params * params,
  5516. const struct ggml_tensor * src0,
  5517. struct ggml_tensor * dst) {
  5518. GGML_ASSERT(ggml_nelements(dst) == ggml_nelements(src0));
  5519. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  5520. return;
  5521. }
  5522. GGML_TENSOR_UNARY_OP_LOCALS
  5523. const int ith = params->ith; // thread index
  5524. const int nth = params->nth; // number of threads
  5525. if (ggml_is_contiguous(src0) && ggml_is_contiguous(dst) && src0->type == dst->type) {
  5526. ggml_compute_forward_dup_same_cont(params, src0, dst);
  5527. return;
  5528. }
  5529. // parallelize by rows
  5530. const int nr = ne01;
  5531. // number of rows per thread
  5532. const int dr = (nr + nth - 1) / nth;
  5533. // row range for this thread
  5534. const int ir0 = dr * ith;
  5535. const int ir1 = MIN(ir0 + dr, nr);
  5536. if (src0->type == dst->type &&
  5537. ne00 == ne0 &&
  5538. nb00 == ggml_type_size(src0->type) && nb0 == ggml_type_size(dst->type)) {
  5539. // copy by rows
  5540. const size_t rs = ne00*nb00;
  5541. for (int64_t i03 = 0; i03 < ne03; i03++) {
  5542. for (int64_t i02 = 0; i02 < ne02; i02++) {
  5543. for (int64_t i01 = ir0; i01 < ir1; i01++) {
  5544. memcpy(
  5545. ((char *) dst->data + i01*nb1 + i02*nb2 + i03*nb3),
  5546. ((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03),
  5547. rs);
  5548. }
  5549. }
  5550. }
  5551. return;
  5552. }
  5553. if (ggml_is_contiguous(dst)) {
  5554. // TODO: simplify
  5555. if (nb00 == sizeof(float)) {
  5556. if (dst->type == GGML_TYPE_F32) {
  5557. size_t id = 0;
  5558. const size_t rs = ne00 * nb00;
  5559. char * dst_ptr = (char *) dst->data;
  5560. for (int i03 = 0; i03 < ne03; i03++) {
  5561. for (int i02 = 0; i02 < ne02; i02++) {
  5562. id += rs * ir0;
  5563. for (int i01 = ir0; i01 < ir1; i01++) {
  5564. const char * src0_ptr = (char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03;
  5565. memcpy(dst_ptr + id, src0_ptr, rs);
  5566. id += rs;
  5567. }
  5568. id += rs * (ne01 - ir1);
  5569. }
  5570. }
  5571. } else if (type_traits[dst->type].from_float) {
  5572. ggml_from_float_t const quantize_row_q = type_traits[dst->type].from_float;
  5573. size_t id = 0;
  5574. size_t rs = nb0 * (ne00 / ggml_blck_size(dst->type));
  5575. char * dst_ptr = (char *) dst->data;
  5576. for (int i03 = 0; i03 < ne03; i03++) {
  5577. for (int i02 = 0; i02 < ne02; i02++) {
  5578. id += rs * ir0;
  5579. for (int i01 = ir0; i01 < ir1; i01++) {
  5580. const float * src0_ptr = (float *) ((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03);
  5581. quantize_row_q(src0_ptr, dst_ptr + id, ne00);
  5582. id += rs;
  5583. }
  5584. id += rs * (ne01 - ir1);
  5585. }
  5586. }
  5587. } else {
  5588. GGML_ASSERT(false); // TODO: implement
  5589. }
  5590. } else {
  5591. //printf("%s: this is not optimal - fix me\n", __func__);
  5592. if (dst->type == GGML_TYPE_F32) {
  5593. size_t id = 0;
  5594. float * dst_ptr = (float *) dst->data;
  5595. for (int i03 = 0; i03 < ne03; i03++) {
  5596. for (int i02 = 0; i02 < ne02; i02++) {
  5597. id += ne00 * ir0;
  5598. for (int i01 = ir0; i01 < ir1; i01++) {
  5599. for (int i00 = 0; i00 < ne00; i00++) {
  5600. const float * src0_ptr = (float *) ((char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03);
  5601. dst_ptr[id] = *src0_ptr;
  5602. id++;
  5603. }
  5604. }
  5605. id += ne00 * (ne01 - ir1);
  5606. }
  5607. }
  5608. } else if (dst->type == GGML_TYPE_F16) {
  5609. size_t id = 0;
  5610. ggml_fp16_t * dst_ptr = (ggml_fp16_t *) dst->data;
  5611. for (int i03 = 0; i03 < ne03; i03++) {
  5612. for (int i02 = 0; i02 < ne02; i02++) {
  5613. id += ne00 * ir0;
  5614. for (int i01 = ir0; i01 < ir1; i01++) {
  5615. for (int i00 = 0; i00 < ne00; i00++) {
  5616. const float * src0_ptr = (float *) ((char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03);
  5617. dst_ptr[id] = GGML_FP32_TO_FP16(*src0_ptr);
  5618. id++;
  5619. }
  5620. }
  5621. id += ne00 * (ne01 - ir1);
  5622. }
  5623. }
  5624. } else {
  5625. GGML_ASSERT(false); // TODO: implement
  5626. }
  5627. }
  5628. return;
  5629. }
  5630. // dst counters
  5631. int64_t i10 = 0;
  5632. int64_t i11 = 0;
  5633. int64_t i12 = 0;
  5634. int64_t i13 = 0;
  5635. if (dst->type == GGML_TYPE_F32) {
  5636. for (int64_t i03 = 0; i03 < ne03; i03++) {
  5637. for (int64_t i02 = 0; i02 < ne02; i02++) {
  5638. i10 += ne00 * ir0;
  5639. while (i10 >= ne0) {
  5640. i10 -= ne0;
  5641. if (++i11 == ne1) {
  5642. i11 = 0;
  5643. if (++i12 == ne2) {
  5644. i12 = 0;
  5645. if (++i13 == ne3) {
  5646. i13 = 0;
  5647. }
  5648. }
  5649. }
  5650. }
  5651. for (int64_t i01 = ir0; i01 < ir1; i01++) {
  5652. for (int64_t i00 = 0; i00 < ne00; i00++) {
  5653. const char * src0_ptr = ((char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03);
  5654. char * dst_ptr = ((char *) dst->data + i10*nb0 + i11*nb1 + i12*nb2 + i13*nb3);
  5655. memcpy(dst_ptr, src0_ptr, sizeof(float));
  5656. if (++i10 == ne0) {
  5657. i10 = 0;
  5658. if (++i11 == ne1) {
  5659. i11 = 0;
  5660. if (++i12 == ne2) {
  5661. i12 = 0;
  5662. if (++i13 == ne3) {
  5663. i13 = 0;
  5664. }
  5665. }
  5666. }
  5667. }
  5668. }
  5669. }
  5670. i10 += ne00 * (ne01 - ir1);
  5671. while (i10 >= ne0) {
  5672. i10 -= ne0;
  5673. if (++i11 == ne1) {
  5674. i11 = 0;
  5675. if (++i12 == ne2) {
  5676. i12 = 0;
  5677. if (++i13 == ne3) {
  5678. i13 = 0;
  5679. }
  5680. }
  5681. }
  5682. }
  5683. }
  5684. }
  5685. } else if (dst->type == GGML_TYPE_F16) {
  5686. for (int64_t i03 = 0; i03 < ne03; i03++) {
  5687. for (int64_t i02 = 0; i02 < ne02; i02++) {
  5688. i10 += ne00 * ir0;
  5689. while (i10 >= ne0) {
  5690. i10 -= ne0;
  5691. if (++i11 == ne1) {
  5692. i11 = 0;
  5693. if (++i12 == ne2) {
  5694. i12 = 0;
  5695. if (++i13 == ne3) {
  5696. i13 = 0;
  5697. }
  5698. }
  5699. }
  5700. }
  5701. for (int64_t i01 = ir0; i01 < ir1; i01++) {
  5702. for (int64_t i00 = 0; i00 < ne00; i00++) {
  5703. const char * src0_ptr = ((char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03);
  5704. char * dst_ptr = ((char *) dst->data + i10*nb0 + i11*nb1 + i12*nb2 + i13*nb3);
  5705. *(ggml_fp16_t *) dst_ptr = GGML_FP32_TO_FP16(*(const float *) src0_ptr);
  5706. if (++i10 == ne0) {
  5707. i10 = 0;
  5708. if (++i11 == ne1) {
  5709. i11 = 0;
  5710. if (++i12 == ne2) {
  5711. i12 = 0;
  5712. if (++i13 == ne3) {
  5713. i13 = 0;
  5714. }
  5715. }
  5716. }
  5717. }
  5718. }
  5719. }
  5720. i10 += ne00 * (ne01 - ir1);
  5721. while (i10 >= ne0) {
  5722. i10 -= ne0;
  5723. if (++i11 == ne1) {
  5724. i11 = 0;
  5725. if (++i12 == ne2) {
  5726. i12 = 0;
  5727. if (++i13 == ne3) {
  5728. i13 = 0;
  5729. }
  5730. }
  5731. }
  5732. }
  5733. }
  5734. }
  5735. } else {
  5736. GGML_ASSERT(false); // TODO: implement
  5737. }
  5738. }
  5739. // A simplified version of ggml_compute_forward_dup that doesn't do float upcasting, and just plain old memcpy.
  5740. static void ggml_compute_forward_dup_bytes(
  5741. const struct ggml_compute_params * params,
  5742. const struct ggml_tensor * src0,
  5743. struct ggml_tensor * dst) {
  5744. GGML_ASSERT(ggml_nelements(dst) == ggml_nelements(src0));
  5745. GGML_ASSERT(src0->type == dst->type);
  5746. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  5747. return;
  5748. }
  5749. if (ggml_is_contiguous(src0) && ggml_is_contiguous(dst)) {
  5750. ggml_compute_forward_dup_same_cont(params, src0, dst);
  5751. return;
  5752. }
  5753. GGML_TENSOR_UNARY_OP_LOCALS;
  5754. const size_t type_size = ggml_type_size(src0->type);
  5755. const int ith = params->ith; // thread index
  5756. const int nth = params->nth; // number of threads
  5757. // parallelize by rows
  5758. const int nr = ne01;
  5759. // number of rows per thread
  5760. const int dr = (nr + nth - 1) / nth;
  5761. // row range for this thread
  5762. const int ir0 = dr * ith;
  5763. const int ir1 = MIN(ir0 + dr, nr);
  5764. if (src0->type == dst->type &&
  5765. ne00 == ne0 &&
  5766. nb00 == type_size && nb0 == type_size) {
  5767. // copy by rows
  5768. const size_t rs = ne00 * type_size;
  5769. for (int64_t i03 = 0; i03 < ne03; i03++) {
  5770. for (int64_t i02 = 0; i02 < ne02; i02++) {
  5771. for (int64_t i01 = ir0; i01 < ir1; i01++) {
  5772. memcpy(
  5773. ((char *) dst->data + i01*nb1 + i02*nb2 + i03*nb3),
  5774. ((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03),
  5775. rs);
  5776. }
  5777. }
  5778. }
  5779. return;
  5780. }
  5781. if (ggml_is_contiguous(dst)) {
  5782. size_t id = 0;
  5783. char * dst_ptr = (char *) dst->data;
  5784. const size_t rs = ne00 * type_size;
  5785. if (nb00 == type_size) {
  5786. // src0 is contigous on first dimension, copy by rows
  5787. for (int64_t i03 = 0; i03 < ne03; i03++) {
  5788. for (int64_t i02 = 0; i02 < ne02; i02++) {
  5789. id += rs * ir0;
  5790. for (int64_t i01 = ir0; i01 < ir1; i01++) {
  5791. const char * src0_ptr = (char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03;
  5792. memcpy(dst_ptr + id, src0_ptr, rs);
  5793. id += rs;
  5794. }
  5795. id += rs * (ne01 - ir1);
  5796. }
  5797. }
  5798. } else {
  5799. //printf("%s: this is not optimal - fix me\n", __func__);
  5800. for (int64_t i03 = 0; i03 < ne03; i03++) {
  5801. for (int64_t i02 = 0; i02 < ne02; i02++) {
  5802. id += rs * ir0;
  5803. for (int64_t i01 = ir0; i01 < ir1; i01++) {
  5804. for (int64_t i00 = 0; i00 < ne00; i00++) {
  5805. const char * src0_ptr = (char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03;
  5806. memcpy(dst_ptr + id, src0_ptr, type_size);
  5807. id += type_size;
  5808. }
  5809. }
  5810. id += rs * (ne01 - ir1);
  5811. }
  5812. }
  5813. }
  5814. return;
  5815. }
  5816. // dst counters
  5817. int64_t i10 = 0;
  5818. int64_t i11 = 0;
  5819. int64_t i12 = 0;
  5820. int64_t i13 = 0;
  5821. for (int64_t i03 = 0; i03 < ne03; i03++) {
  5822. for (int64_t i02 = 0; i02 < ne02; i02++) {
  5823. i10 += ne00 * ir0;
  5824. while (i10 >= ne0) {
  5825. i10 -= ne0;
  5826. if (++i11 == ne1) {
  5827. i11 = 0;
  5828. if (++i12 == ne2) {
  5829. i12 = 0;
  5830. if (++i13 == ne3) {
  5831. i13 = 0;
  5832. }
  5833. }
  5834. }
  5835. }
  5836. for (int64_t i01 = ir0; i01 < ir1; i01++) {
  5837. for (int64_t i00 = 0; i00 < ne00; i00++) {
  5838. const char * src0_ptr = ((char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03);
  5839. char * dst_ptr = ((char *) dst->data + i10*nb0 + i11*nb1 + i12*nb2 + i13*nb3);
  5840. memcpy(dst_ptr, src0_ptr, type_size);
  5841. if (++i10 == ne0) {
  5842. i10 = 0;
  5843. if (++i11 == ne1) {
  5844. i11 = 0;
  5845. if (++i12 == ne2) {
  5846. i12 = 0;
  5847. if (++i13 == ne3) {
  5848. i13 = 0;
  5849. }
  5850. }
  5851. }
  5852. }
  5853. }
  5854. }
  5855. i10 += ne00 * (ne01 - ir1);
  5856. while (i10 >= ne0) {
  5857. i10 -= ne0;
  5858. if (++i11 == ne1) {
  5859. i11 = 0;
  5860. if (++i12 == ne2) {
  5861. i12 = 0;
  5862. if (++i13 == ne3) {
  5863. i13 = 0;
  5864. }
  5865. }
  5866. }
  5867. }
  5868. }
  5869. }
  5870. }
  5871. static void ggml_compute_forward_dup(
  5872. const struct ggml_compute_params * params,
  5873. const struct ggml_tensor * src0,
  5874. struct ggml_tensor * dst) {
  5875. if (src0->type == dst->type) {
  5876. ggml_compute_forward_dup_bytes(params, src0, dst);
  5877. return;
  5878. }
  5879. switch (src0->type) {
  5880. case GGML_TYPE_F16:
  5881. {
  5882. ggml_compute_forward_dup_f16(params, src0, dst);
  5883. } break;
  5884. case GGML_TYPE_F32:
  5885. {
  5886. ggml_compute_forward_dup_f32(params, src0, dst);
  5887. } break;
  5888. default:
  5889. {
  5890. GGML_ASSERT(false);
  5891. } break;
  5892. }
  5893. }
  5894. // ggml_compute_forward_add
  5895. static void ggml_compute_forward_add_f32(
  5896. const struct ggml_compute_params * params,
  5897. const struct ggml_tensor * src0,
  5898. const struct ggml_tensor * src1,
  5899. struct ggml_tensor * dst) {
  5900. GGML_ASSERT(ggml_can_repeat(src1, src0) && ggml_are_same_shape(src0, dst));
  5901. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  5902. return;
  5903. }
  5904. const int ith = params->ith;
  5905. const int nth = params->nth;
  5906. #ifdef GGML_USE_CLBLAST
  5907. if (src1->backend == GGML_BACKEND_GPU) {
  5908. // TODO: OpenCL kernel support full broadcast
  5909. GGML_ASSERT(ggml_can_repeat_rows(src1, src0));
  5910. if (ith == 0) {
  5911. ggml_cl_add(src0, src1, dst);
  5912. }
  5913. return;
  5914. }
  5915. #endif
  5916. const int nr = ggml_nrows(src0);
  5917. GGML_TENSOR_BINARY_OP_LOCALS
  5918. GGML_ASSERT( nb0 == sizeof(float));
  5919. GGML_ASSERT(nb00 == sizeof(float));
  5920. // rows per thread
  5921. const int dr = (nr + nth - 1)/nth;
  5922. // row range for this thread
  5923. const int ir0 = dr*ith;
  5924. const int ir1 = MIN(ir0 + dr, nr);
  5925. if (nb10 == sizeof(float)) {
  5926. for (int ir = ir0; ir < ir1; ++ir) {
  5927. // src1 is broadcastable across src0 and dst in i1, i2, i3
  5928. const int64_t i03 = ir/(ne02*ne01);
  5929. const int64_t i02 = (ir - i03*ne02*ne01)/ne01;
  5930. const int64_t i01 = (ir - i03*ne02*ne01 - i02*ne01);
  5931. const int64_t i13 = i03 % ne13;
  5932. const int64_t i12 = i02 % ne12;
  5933. const int64_t i11 = i01 % ne11;
  5934. const int64_t nr0 = ne00 / ne10;
  5935. float * dst_ptr = (float *) ((char *) dst->data + i03*nb3 + i02*nb2 + i01*nb1 );
  5936. float * src0_ptr = (float *) ((char *) src0->data + i03*nb03 + i02*nb02 + i01*nb01);
  5937. float * src1_ptr = (float *) ((char *) src1->data + i13*nb13 + i12*nb12 + i11*nb11);
  5938. for (int64_t r = 0; r < nr0; ++r) {
  5939. #ifdef GGML_USE_ACCELERATE
  5940. vDSP_vadd(src0_ptr + r*ne10, 1, src1_ptr, 1, dst_ptr + r*ne10, 1, ne10);
  5941. #else
  5942. ggml_vec_add_f32(ne10, dst_ptr + r*ne10, src0_ptr + r*ne10, src1_ptr);
  5943. #endif
  5944. }
  5945. }
  5946. } else {
  5947. // src1 is not contiguous
  5948. for (int ir = ir0; ir < ir1; ++ir) {
  5949. // src1 is broadcastable across src0 and dst in i1, i2, i3
  5950. const int64_t i03 = ir/(ne02*ne01);
  5951. const int64_t i02 = (ir - i03*ne02*ne01)/ne01;
  5952. const int64_t i01 = (ir - i03*ne02*ne01 - i02*ne01);
  5953. const int64_t i13 = i03 % ne13;
  5954. const int64_t i12 = i02 % ne12;
  5955. const int64_t i11 = i01 % ne11;
  5956. float * dst_ptr = (float *) ((char *) dst->data + i03*nb3 + i02*nb2 + i01*nb1 );
  5957. float * src0_ptr = (float *) ((char *) src0->data + i03*nb03 + i02*nb02 + i01*nb01);
  5958. for (int64_t i0 = 0; i0 < ne0; ++i0) {
  5959. const int64_t i10 = i0 % ne10;
  5960. float * src1_ptr = (float *) ((char *) src1->data + i13*nb13 + i12*nb12 + i11*nb11 + i10*nb10);
  5961. dst_ptr[i0] = src0_ptr[i0] + *src1_ptr;
  5962. }
  5963. }
  5964. }
  5965. }
  5966. static void ggml_compute_forward_add_f16_f32(
  5967. const struct ggml_compute_params * params,
  5968. const struct ggml_tensor * src0,
  5969. const struct ggml_tensor * src1,
  5970. struct ggml_tensor * dst) {
  5971. GGML_ASSERT(ggml_are_same_shape(src0, src1) && ggml_are_same_shape(src0, dst));
  5972. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  5973. return;
  5974. }
  5975. const int ith = params->ith;
  5976. const int nth = params->nth;
  5977. const int nr = ggml_nrows(src0);
  5978. GGML_TENSOR_BINARY_OP_LOCALS
  5979. GGML_ASSERT(src0->type == GGML_TYPE_F16);
  5980. GGML_ASSERT(src1->type == GGML_TYPE_F32);
  5981. if (dst->type == GGML_TYPE_F32) {
  5982. GGML_ASSERT( nb0 == sizeof(float));
  5983. }
  5984. else {
  5985. GGML_ASSERT(dst->type == GGML_TYPE_F16);
  5986. GGML_ASSERT( nb0 == sizeof(ggml_fp16_t));
  5987. }
  5988. GGML_ASSERT(nb00 == sizeof(ggml_fp16_t));
  5989. // rows per thread
  5990. const int dr = (nr + nth - 1)/nth;
  5991. // row range for this thread
  5992. const int ir0 = dr*ith;
  5993. const int ir1 = MIN(ir0 + dr, nr);
  5994. if (nb10 == sizeof(float)) {
  5995. if (dst->type == GGML_TYPE_F16) {
  5996. for (int ir = ir0; ir < ir1; ++ir) {
  5997. // src0, src1 and dst are same shape => same indices
  5998. const int i3 = ir/(ne2*ne1);
  5999. const int i2 = (ir - i3*ne2*ne1)/ne1;
  6000. const int i1 = (ir - i3*ne2*ne1 - i2*ne1);
  6001. ggml_fp16_t * dst_ptr = (ggml_fp16_t *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1);
  6002. ggml_fp16_t * src0_ptr = (ggml_fp16_t *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01);
  6003. float * src1_ptr = (float *) ((char *) src1->data + i3*nb13 + i2*nb12 + i1*nb11);
  6004. for (int i = 0; i < ne0; i++) {
  6005. dst_ptr[i] = GGML_FP32_TO_FP16(GGML_FP16_TO_FP32(src0_ptr[i]) + src1_ptr[i]);
  6006. }
  6007. }
  6008. } else {
  6009. for (int ir = ir0; ir < ir1; ++ir) {
  6010. // src0, src1 and dst are same shape => same indices
  6011. const int i3 = ir/(ne2*ne1);
  6012. const int i2 = (ir - i3*ne2*ne1)/ne1;
  6013. const int i1 = (ir - i3*ne2*ne1 - i2*ne1);
  6014. float * dst_ptr = (float *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1);
  6015. ggml_fp16_t * src0_ptr = (ggml_fp16_t *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01);
  6016. float * src1_ptr = (float *) ((char *) src1->data + i3*nb13 + i2*nb12 + i1*nb11);
  6017. for (int i = 0; i < ne0; i++) {
  6018. dst_ptr[i] = GGML_FP16_TO_FP32(src0_ptr[i]) + src1_ptr[i];
  6019. }
  6020. }
  6021. }
  6022. }
  6023. else {
  6024. // src1 is not contiguous
  6025. GGML_ASSERT(false);
  6026. }
  6027. }
  6028. static void ggml_compute_forward_add_f16_f16(
  6029. const struct ggml_compute_params * params,
  6030. const struct ggml_tensor * src0,
  6031. const struct ggml_tensor * src1,
  6032. struct ggml_tensor * dst) {
  6033. GGML_ASSERT(ggml_are_same_shape(src0, src1) && ggml_are_same_shape(src0, dst));
  6034. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  6035. return;
  6036. }
  6037. const int ith = params->ith;
  6038. const int nth = params->nth;
  6039. const int nr = ggml_nrows(src0);
  6040. GGML_TENSOR_BINARY_OP_LOCALS
  6041. GGML_ASSERT(src0->type == GGML_TYPE_F16);
  6042. GGML_ASSERT(src1->type == GGML_TYPE_F16);
  6043. GGML_ASSERT(dst->type == GGML_TYPE_F16);
  6044. GGML_ASSERT( nb0 == sizeof(ggml_fp16_t));
  6045. GGML_ASSERT(nb00 == sizeof(ggml_fp16_t));
  6046. // rows per thread
  6047. const int dr = (nr + nth - 1)/nth;
  6048. // row range for this thread
  6049. const int ir0 = dr*ith;
  6050. const int ir1 = MIN(ir0 + dr, nr);
  6051. if (nb10 == sizeof(ggml_fp16_t)) {
  6052. for (int ir = ir0; ir < ir1; ++ir) {
  6053. // src0, src1 and dst are same shape => same indices
  6054. const int i3 = ir/(ne2*ne1);
  6055. const int i2 = (ir - i3*ne2*ne1)/ne1;
  6056. const int i1 = (ir - i3*ne2*ne1 - i2*ne1);
  6057. ggml_fp16_t * dst_ptr = (ggml_fp16_t *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1);
  6058. ggml_fp16_t * src0_ptr = (ggml_fp16_t *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01);
  6059. ggml_fp16_t * src1_ptr = (ggml_fp16_t *) ((char *) src1->data + i3*nb13 + i2*nb12 + i1*nb11);
  6060. for (int i = 0; i < ne0; i++) {
  6061. dst_ptr[i] = GGML_FP32_TO_FP16(GGML_FP16_TO_FP32(src0_ptr[i]) + GGML_FP16_TO_FP32(src1_ptr[i]));
  6062. }
  6063. }
  6064. }
  6065. else {
  6066. // src1 is not contiguous
  6067. GGML_ASSERT(false);
  6068. }
  6069. }
  6070. static void ggml_compute_forward_add_q_f32(
  6071. const struct ggml_compute_params * params,
  6072. const struct ggml_tensor * src0,
  6073. const struct ggml_tensor * src1,
  6074. struct ggml_tensor * dst) {
  6075. GGML_ASSERT(ggml_are_same_shape(src0, src1) && ggml_are_same_shape(src0, dst));
  6076. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  6077. return;
  6078. }
  6079. const int nr = ggml_nrows(src0);
  6080. GGML_TENSOR_BINARY_OP_LOCALS
  6081. const int ith = params->ith;
  6082. const int nth = params->nth;
  6083. const enum ggml_type type = src0->type;
  6084. const enum ggml_type dtype = dst->type;
  6085. ggml_to_float_t const dequantize_row_q = type_traits[type].to_float;
  6086. ggml_from_float_t const quantize_row_q = type_traits[dtype].from_float;
  6087. // we don't support permuted src0 or src1
  6088. GGML_ASSERT(nb00 == ggml_type_size(type));
  6089. GGML_ASSERT(nb10 == sizeof(float));
  6090. // dst cannot be transposed or permuted
  6091. GGML_ASSERT(nb0 <= nb1);
  6092. GGML_ASSERT(nb1 <= nb2);
  6093. GGML_ASSERT(nb2 <= nb3);
  6094. GGML_ASSERT(ggml_is_quantized(src0->type));
  6095. GGML_ASSERT(src1->type == GGML_TYPE_F32);
  6096. // rows per thread
  6097. const int dr = (nr + nth - 1)/nth;
  6098. // row range for this thread
  6099. const int ir0 = dr*ith;
  6100. const int ir1 = MIN(ir0 + dr, nr);
  6101. float * wdata = (float *) params->wdata + (ne00 + CACHE_LINE_SIZE_F32) * ith;
  6102. for (int ir = ir0; ir < ir1; ++ir) {
  6103. // src0 indices
  6104. const int i03 = ir/(ne02*ne01);
  6105. const int i02 = (ir - i03*ne02*ne01)/ne01;
  6106. const int i01 = (ir - i03*ne02*ne01 - i02*ne01);
  6107. // src1 and dst are same shape as src0 => same indices
  6108. const int i13 = i03;
  6109. const int i12 = i02;
  6110. const int i11 = i01;
  6111. const int i3 = i03;
  6112. const int i2 = i02;
  6113. const int i1 = i01;
  6114. void * src0_row = (void *) ((char *) src0->data + (i01*nb01 + i02*nb02 + i03*nb03));
  6115. float * src1_row = (float *)((char *) src1->data + (i11*nb11 + i12*nb12 + i13*nb13));
  6116. void * dst_row = (void *) ((char *) dst->data + ( i1*nb1 + i2*nb2 + i3*nb3));
  6117. assert(ne00 % 32 == 0);
  6118. // unquantize row from src0 to temp buffer
  6119. dequantize_row_q(src0_row, wdata, ne00);
  6120. // add src1
  6121. ggml_vec_acc_f32(ne00, wdata, src1_row);
  6122. // quantize row to dst
  6123. if (quantize_row_q != NULL) {
  6124. quantize_row_q(wdata, dst_row, ne00);
  6125. } else {
  6126. memcpy(dst_row, wdata, ne0*nb0);
  6127. }
  6128. }
  6129. }
  6130. static void ggml_compute_forward_add(
  6131. const struct ggml_compute_params * params,
  6132. const struct ggml_tensor * src0,
  6133. const struct ggml_tensor * src1,
  6134. struct ggml_tensor * dst) {
  6135. switch (src0->type) {
  6136. case GGML_TYPE_F32:
  6137. {
  6138. ggml_compute_forward_add_f32(params, src0, src1, dst);
  6139. } break;
  6140. case GGML_TYPE_F16:
  6141. {
  6142. if (src1->type == GGML_TYPE_F16) {
  6143. ggml_compute_forward_add_f16_f16(params, src0, src1, dst);
  6144. }
  6145. else if (src1->type == GGML_TYPE_F32) {
  6146. ggml_compute_forward_add_f16_f32(params, src0, src1, dst);
  6147. }
  6148. else {
  6149. GGML_ASSERT(false);
  6150. }
  6151. } break;
  6152. case GGML_TYPE_Q4_0:
  6153. case GGML_TYPE_Q4_1:
  6154. case GGML_TYPE_Q5_0:
  6155. case GGML_TYPE_Q5_1:
  6156. case GGML_TYPE_Q8_0:
  6157. case GGML_TYPE_Q2_K:
  6158. case GGML_TYPE_Q3_K:
  6159. case GGML_TYPE_Q4_K:
  6160. case GGML_TYPE_Q5_K:
  6161. case GGML_TYPE_Q6_K:
  6162. case GGML_TYPE_IQ2_XXS:
  6163. case GGML_TYPE_IQ2_XS:
  6164. {
  6165. ggml_compute_forward_add_q_f32(params, src0, src1, dst);
  6166. } break;
  6167. default:
  6168. {
  6169. GGML_ASSERT(false);
  6170. } break;
  6171. }
  6172. }
  6173. // ggml_compute_forward_add1
  6174. static void ggml_compute_forward_add1_f32(
  6175. const struct ggml_compute_params * params,
  6176. const struct ggml_tensor * src0,
  6177. const struct ggml_tensor * src1,
  6178. struct ggml_tensor * dst) {
  6179. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  6180. GGML_ASSERT(ggml_is_scalar(src1));
  6181. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  6182. return;
  6183. }
  6184. const int ith = params->ith;
  6185. const int nth = params->nth;
  6186. const int nr = ggml_nrows(src0);
  6187. GGML_TENSOR_UNARY_OP_LOCALS
  6188. GGML_ASSERT( nb0 == sizeof(float));
  6189. GGML_ASSERT(nb00 == sizeof(float));
  6190. // rows per thread
  6191. const int dr = (nr + nth - 1)/nth;
  6192. // row range for this thread
  6193. const int ir0 = dr*ith;
  6194. const int ir1 = MIN(ir0 + dr, nr);
  6195. for (int ir = ir0; ir < ir1; ++ir) {
  6196. // src0 and dst are same shape => same indices
  6197. const int i3 = ir/(ne2*ne1);
  6198. const int i2 = (ir - i3*ne2*ne1)/ne1;
  6199. const int i1 = (ir - i3*ne2*ne1 - i2*ne1);
  6200. #ifdef GGML_USE_ACCELERATE
  6201. UNUSED(ggml_vec_add1_f32);
  6202. vDSP_vadd(
  6203. (float *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01), 1,
  6204. (float *) ((char *) src1->data), 0,
  6205. (float *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 ), 1,
  6206. ne0);
  6207. #else
  6208. ggml_vec_add1_f32(ne0,
  6209. (float *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 ),
  6210. (float *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01),
  6211. *(float *) src1->data);
  6212. #endif
  6213. }
  6214. }
  6215. static void ggml_compute_forward_add1_f16_f32(
  6216. const struct ggml_compute_params * params,
  6217. const struct ggml_tensor * src0,
  6218. const struct ggml_tensor * src1,
  6219. struct ggml_tensor * dst) {
  6220. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  6221. GGML_ASSERT(ggml_is_scalar(src1));
  6222. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  6223. return;
  6224. }
  6225. // scalar to add
  6226. const float v = *(float *) src1->data;
  6227. const int ith = params->ith;
  6228. const int nth = params->nth;
  6229. const int nr = ggml_nrows(src0);
  6230. GGML_TENSOR_UNARY_OP_LOCALS
  6231. GGML_ASSERT(src0->type == GGML_TYPE_F16);
  6232. GGML_ASSERT(src1->type == GGML_TYPE_F32);
  6233. GGML_ASSERT(dst->type == GGML_TYPE_F16);
  6234. GGML_ASSERT( nb0 == sizeof(ggml_fp16_t));
  6235. GGML_ASSERT(nb00 == sizeof(ggml_fp16_t));
  6236. // rows per thread
  6237. const int dr = (nr + nth - 1)/nth;
  6238. // row range for this thread
  6239. const int ir0 = dr*ith;
  6240. const int ir1 = MIN(ir0 + dr, nr);
  6241. for (int ir = ir0; ir < ir1; ++ir) {
  6242. // src0 and dst are same shape => same indices
  6243. const int i3 = ir/(ne2*ne1);
  6244. const int i2 = (ir - i3*ne2*ne1)/ne1;
  6245. const int i1 = (ir - i3*ne2*ne1 - i2*ne1);
  6246. ggml_fp16_t * dst_ptr = (ggml_fp16_t *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 );
  6247. ggml_fp16_t * src0_ptr = (ggml_fp16_t *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01);
  6248. for (int i = 0; i < ne0; i++) {
  6249. dst_ptr[i] = GGML_FP32_TO_FP16(GGML_FP16_TO_FP32(src0_ptr[i]) + v);
  6250. }
  6251. }
  6252. }
  6253. static void ggml_compute_forward_add1_f16_f16(
  6254. const struct ggml_compute_params * params,
  6255. const struct ggml_tensor * src0,
  6256. const struct ggml_tensor * src1,
  6257. struct ggml_tensor * dst) {
  6258. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  6259. GGML_ASSERT(ggml_is_scalar(src1));
  6260. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  6261. return;
  6262. }
  6263. // scalar to add
  6264. const float v = GGML_FP16_TO_FP32(*(ggml_fp16_t *) src1->data);
  6265. const int ith = params->ith;
  6266. const int nth = params->nth;
  6267. const int nr = ggml_nrows(src0);
  6268. GGML_TENSOR_UNARY_OP_LOCALS
  6269. GGML_ASSERT(src0->type == GGML_TYPE_F16);
  6270. GGML_ASSERT(src1->type == GGML_TYPE_F16);
  6271. GGML_ASSERT(dst->type == GGML_TYPE_F16);
  6272. GGML_ASSERT( nb0 == sizeof(ggml_fp16_t));
  6273. GGML_ASSERT(nb00 == sizeof(ggml_fp16_t));
  6274. // rows per thread
  6275. const int dr = (nr + nth - 1)/nth;
  6276. // row range for this thread
  6277. const int ir0 = dr*ith;
  6278. const int ir1 = MIN(ir0 + dr, nr);
  6279. for (int ir = ir0; ir < ir1; ++ir) {
  6280. // src0 and dst are same shape => same indices
  6281. const int i3 = ir/(ne2*ne1);
  6282. const int i2 = (ir - i3*ne2*ne1)/ne1;
  6283. const int i1 = (ir - i3*ne2*ne1 - i2*ne1);
  6284. ggml_fp16_t * dst_ptr = (ggml_fp16_t *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 );
  6285. ggml_fp16_t * src0_ptr = (ggml_fp16_t *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01);
  6286. for (int i = 0; i < ne0; i++) {
  6287. dst_ptr[i] = GGML_FP32_TO_FP16(GGML_FP16_TO_FP32(src0_ptr[i]) + v);
  6288. }
  6289. }
  6290. }
  6291. static void ggml_compute_forward_add1_q_f32(
  6292. const struct ggml_compute_params * params,
  6293. const struct ggml_tensor * src0,
  6294. const struct ggml_tensor * src1,
  6295. struct ggml_tensor * dst) {
  6296. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  6297. GGML_ASSERT(ggml_is_scalar(src1));
  6298. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  6299. return;
  6300. }
  6301. // scalar to add
  6302. const float v = *(float *) src1->data;
  6303. const int ith = params->ith;
  6304. const int nth = params->nth;
  6305. const int nr = ggml_nrows(src0);
  6306. GGML_TENSOR_UNARY_OP_LOCALS
  6307. const enum ggml_type type = src0->type;
  6308. ggml_to_float_t const dequantize_row_q = type_traits[type].to_float;
  6309. ggml_from_float_t const quantize_row_q = type_traits[type].from_float;
  6310. // we don't support permuted src0
  6311. GGML_ASSERT(nb00 == ggml_type_size(type));
  6312. // dst cannot be transposed or permuted
  6313. GGML_ASSERT(nb0 <= nb1);
  6314. GGML_ASSERT(nb1 <= nb2);
  6315. GGML_ASSERT(nb2 <= nb3);
  6316. GGML_ASSERT(ggml_is_quantized(src0->type));
  6317. GGML_ASSERT(dst->type == src0->type);
  6318. GGML_ASSERT(src1->type == GGML_TYPE_F32);
  6319. // rows per thread
  6320. const int dr = (nr + nth - 1)/nth;
  6321. // row range for this thread
  6322. const int ir0 = dr*ith;
  6323. const int ir1 = MIN(ir0 + dr, nr);
  6324. float * wdata = (float *) params->wdata + (ne0 + CACHE_LINE_SIZE_F32) * ith;
  6325. for (int ir = ir0; ir < ir1; ++ir) {
  6326. // src0 and dst are same shape => same indices
  6327. const int i3 = ir/(ne2*ne1);
  6328. const int i2 = (ir - i3*ne2*ne1)/ne1;
  6329. const int i1 = (ir - i3*ne2*ne1 - i2*ne1);
  6330. void * src0_row = (void *) ((char *) src0->data + (i1*nb01 + i2*nb02 + i3*nb03));
  6331. void * dst_row = (void *) ((char *) dst->data + (i1*nb1 + i2*nb2 + i3*nb0 ));
  6332. assert(ne0 % 32 == 0);
  6333. // unquantize row from src0 to temp buffer
  6334. dequantize_row_q(src0_row, wdata, ne0);
  6335. // add src1
  6336. ggml_vec_acc1_f32(ne0, wdata, v);
  6337. // quantize row to dst
  6338. quantize_row_q(wdata, dst_row, ne0);
  6339. }
  6340. }
  6341. static void ggml_compute_forward_add1(
  6342. const struct ggml_compute_params * params,
  6343. const struct ggml_tensor * src0,
  6344. const struct ggml_tensor * src1,
  6345. struct ggml_tensor * dst) {
  6346. switch (src0->type) {
  6347. case GGML_TYPE_F32:
  6348. {
  6349. ggml_compute_forward_add1_f32(params, src0, src1, dst);
  6350. } break;
  6351. case GGML_TYPE_F16:
  6352. {
  6353. if (src1->type == GGML_TYPE_F16) {
  6354. ggml_compute_forward_add1_f16_f16(params, src0, src1, dst);
  6355. }
  6356. else if (src1->type == GGML_TYPE_F32) {
  6357. ggml_compute_forward_add1_f16_f32(params, src0, src1, dst);
  6358. }
  6359. else {
  6360. GGML_ASSERT(false);
  6361. }
  6362. } break;
  6363. case GGML_TYPE_Q4_0:
  6364. case GGML_TYPE_Q4_1:
  6365. case GGML_TYPE_Q5_0:
  6366. case GGML_TYPE_Q5_1:
  6367. case GGML_TYPE_Q8_0:
  6368. case GGML_TYPE_Q8_1:
  6369. case GGML_TYPE_Q2_K:
  6370. case GGML_TYPE_Q3_K:
  6371. case GGML_TYPE_Q4_K:
  6372. case GGML_TYPE_Q5_K:
  6373. case GGML_TYPE_Q6_K:
  6374. case GGML_TYPE_IQ2_XXS:
  6375. case GGML_TYPE_IQ2_XS:
  6376. {
  6377. ggml_compute_forward_add1_q_f32(params, src0, src1, dst);
  6378. } break;
  6379. default:
  6380. {
  6381. GGML_ASSERT(false);
  6382. } break;
  6383. }
  6384. }
  6385. // ggml_compute_forward_acc
  6386. static void ggml_compute_forward_acc_f32(
  6387. const struct ggml_compute_params * params,
  6388. const struct ggml_tensor * src0,
  6389. const struct ggml_tensor * src1,
  6390. struct ggml_tensor * dst) {
  6391. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  6392. GGML_ASSERT(ggml_is_contiguous(dst) && ggml_is_contiguous(src0));
  6393. // view src0 and dst with these strides and data offset inbytes during acc
  6394. // nb0 is implicitly element_size because src0 and dst are contiguous
  6395. size_t nb1 = ((int32_t *) dst->op_params)[0];
  6396. size_t nb2 = ((int32_t *) dst->op_params)[1];
  6397. size_t nb3 = ((int32_t *) dst->op_params)[2];
  6398. size_t offset = ((int32_t *) dst->op_params)[3];
  6399. bool inplace = (bool) ((int32_t *) dst->op_params)[4];
  6400. if (!inplace && (params->type == GGML_TASK_INIT)) {
  6401. if (params->ith != 0) {
  6402. return;
  6403. }
  6404. // memcpy needs to be synchronized across threads to avoid race conditions.
  6405. // => do it in INIT phase
  6406. memcpy(
  6407. ((char *) dst->data),
  6408. ((char *) src0->data),
  6409. ggml_nbytes(dst));
  6410. }
  6411. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  6412. return;
  6413. }
  6414. const int ith = params->ith;
  6415. const int nth = params->nth;
  6416. const int nr = ggml_nrows(src1);
  6417. const int nc = src1->ne[0];
  6418. GGML_TENSOR_LOCALS(int64_t, ne1, src1, ne)
  6419. GGML_TENSOR_LOCALS(size_t, nb1, src1, nb)
  6420. // src0 and dst as viewed during acc
  6421. const size_t nb0 = ggml_element_size(src0);
  6422. const size_t nb00 = nb0;
  6423. const size_t nb01 = nb1;
  6424. const size_t nb02 = nb2;
  6425. const size_t nb03 = nb3;
  6426. GGML_ASSERT(offset + (ne10 == 0 ? 0 : ne10-1)*nb0 + (ne11 == 0 ? 0 : ne11-1)*nb1 + (ne12 == 0 ? 0 : ne12-1)*nb2 + (ne13 == 0 ? 0 : ne13-1)*nb3 < ggml_nbytes(dst));
  6427. GGML_ASSERT(offset + (ne10 == 0 ? 0 : ne10-1)*nb00 + (ne11 == 0 ? 0 : ne11-1)*nb01 + (ne12 == 0 ? 0 : ne12-1)*nb02 + (ne13 == 0 ? 0 : ne13-1)*nb03 < ggml_nbytes(src0));
  6428. GGML_ASSERT(nb10 == sizeof(float));
  6429. // rows per thread
  6430. const int dr = (nr + nth - 1)/nth;
  6431. // row range for this thread
  6432. const int ir0 = dr*ith;
  6433. const int ir1 = MIN(ir0 + dr, nr);
  6434. for (int ir = ir0; ir < ir1; ++ir) {
  6435. // src0 and dst are viewed with shape of src1 and offset
  6436. // => same indices
  6437. const int i3 = ir/(ne12*ne11);
  6438. const int i2 = (ir - i3*ne12*ne11)/ne11;
  6439. const int i1 = (ir - i3*ne12*ne11 - i2*ne11);
  6440. #ifdef GGML_USE_ACCELERATE
  6441. vDSP_vadd(
  6442. (float *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + offset), 1,
  6443. (float *) ((char *) src1->data + i3*nb13 + i2*nb12 + i1*nb11), 1,
  6444. (float *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + offset), 1, nc);
  6445. #else
  6446. ggml_vec_add_f32(nc,
  6447. (float *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + offset),
  6448. (float *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + offset),
  6449. (float *) ((char *) src1->data + i3*nb13 + i2*nb12 + i1*nb11));
  6450. #endif
  6451. }
  6452. }
  6453. static void ggml_compute_forward_acc(
  6454. const struct ggml_compute_params * params,
  6455. const struct ggml_tensor * src0,
  6456. const struct ggml_tensor * src1,
  6457. struct ggml_tensor * dst) {
  6458. switch (src0->type) {
  6459. case GGML_TYPE_F32:
  6460. {
  6461. ggml_compute_forward_acc_f32(params, src0, src1, dst);
  6462. } break;
  6463. case GGML_TYPE_F16:
  6464. case GGML_TYPE_Q4_0:
  6465. case GGML_TYPE_Q4_1:
  6466. case GGML_TYPE_Q5_0:
  6467. case GGML_TYPE_Q5_1:
  6468. case GGML_TYPE_Q8_0:
  6469. case GGML_TYPE_Q8_1:
  6470. case GGML_TYPE_Q2_K:
  6471. case GGML_TYPE_Q3_K:
  6472. case GGML_TYPE_Q4_K:
  6473. case GGML_TYPE_Q5_K:
  6474. case GGML_TYPE_Q6_K:
  6475. case GGML_TYPE_IQ2_XXS:
  6476. case GGML_TYPE_IQ2_XS:
  6477. default:
  6478. {
  6479. GGML_ASSERT(false);
  6480. } break;
  6481. }
  6482. }
  6483. // ggml_compute_forward_sub
  6484. static void ggml_compute_forward_sub_f32(
  6485. const struct ggml_compute_params * params,
  6486. const struct ggml_tensor * src0,
  6487. const struct ggml_tensor * src1,
  6488. struct ggml_tensor * dst) {
  6489. assert(params->ith == 0);
  6490. assert(ggml_are_same_shape(src0, src1) && ggml_are_same_shape(src0, dst));
  6491. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  6492. return;
  6493. }
  6494. const int nr = ggml_nrows(src0);
  6495. GGML_TENSOR_BINARY_OP_LOCALS
  6496. GGML_ASSERT( nb0 == sizeof(float));
  6497. GGML_ASSERT(nb00 == sizeof(float));
  6498. if (nb10 == sizeof(float)) {
  6499. for (int ir = 0; ir < nr; ++ir) {
  6500. // src0, src1 and dst are same shape => same indices
  6501. const int i3 = ir/(ne2*ne1);
  6502. const int i2 = (ir - i3*ne2*ne1)/ne1;
  6503. const int i1 = (ir - i3*ne2*ne1 - i2*ne1);
  6504. #ifdef GGML_USE_ACCELERATE
  6505. vDSP_vsub(
  6506. (float *) ((char *) src1->data + i3*nb13 + i2*nb12 + i1*nb11), 1,
  6507. (float *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01), 1,
  6508. (float *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 ), 1,
  6509. ne0);
  6510. #else
  6511. ggml_vec_sub_f32(ne0,
  6512. (float *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 ),
  6513. (float *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01),
  6514. (float *) ((char *) src1->data + i3*nb13 + i2*nb12 + i1*nb11));
  6515. #endif
  6516. // }
  6517. // }
  6518. }
  6519. } else {
  6520. // src1 is not contiguous
  6521. for (int ir = 0; ir < nr; ++ir) {
  6522. // src0, src1 and dst are same shape => same indices
  6523. const int i3 = ir/(ne2*ne1);
  6524. const int i2 = (ir - i3*ne2*ne1)/ne1;
  6525. const int i1 = (ir - i3*ne2*ne1 - i2*ne1);
  6526. float * dst_ptr = (float *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 );
  6527. float * src0_ptr = (float *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01);
  6528. for (int i0 = 0; i0 < ne0; i0++) {
  6529. float * src1_ptr = (float *) ((char *) src1->data + i3*nb13 + i2*nb12 + i1*nb11 + i0*nb10);
  6530. dst_ptr[i0] = src0_ptr[i0] - *src1_ptr;
  6531. }
  6532. }
  6533. }
  6534. }
  6535. static void ggml_compute_forward_sub(
  6536. const struct ggml_compute_params * params,
  6537. const struct ggml_tensor * src0,
  6538. const struct ggml_tensor * src1,
  6539. struct ggml_tensor * dst) {
  6540. switch (src0->type) {
  6541. case GGML_TYPE_F32:
  6542. {
  6543. ggml_compute_forward_sub_f32(params, src0, src1, dst);
  6544. } break;
  6545. default:
  6546. {
  6547. GGML_ASSERT(false);
  6548. } break;
  6549. }
  6550. }
  6551. // ggml_compute_forward_mul
  6552. static void ggml_compute_forward_mul_f32(
  6553. const struct ggml_compute_params * params,
  6554. const struct ggml_tensor * src0,
  6555. const struct ggml_tensor * src1,
  6556. struct ggml_tensor * dst) {
  6557. GGML_ASSERT(ggml_can_repeat(src1, src0) && ggml_are_same_shape(src0, dst));
  6558. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  6559. return;
  6560. }
  6561. const int ith = params->ith;
  6562. const int nth = params->nth;
  6563. #ifdef GGML_USE_CLBLAST
  6564. if (src1->backend == GGML_BACKEND_GPU) {
  6565. // TODO: OpenCL kernel support full broadcast
  6566. GGML_ASSERT(ggml_can_repeat_rows(src1, src0));
  6567. if (ith == 0) {
  6568. ggml_cl_mul(src0, src1, dst);
  6569. }
  6570. return;
  6571. }
  6572. #endif
  6573. const int64_t nr = ggml_nrows(src0);
  6574. GGML_TENSOR_BINARY_OP_LOCALS
  6575. GGML_ASSERT( nb0 == sizeof(float));
  6576. GGML_ASSERT(nb00 == sizeof(float));
  6577. if (nb10 == sizeof(float)) {
  6578. for (int64_t ir = ith; ir < nr; ir += nth) {
  6579. // src0 and dst are same shape => same indices
  6580. const int64_t i03 = ir/(ne02*ne01);
  6581. const int64_t i02 = (ir - i03*ne02*ne01)/ne01;
  6582. const int64_t i01 = (ir - i03*ne02*ne01 - i02*ne01);
  6583. const int64_t i13 = i03 % ne13;
  6584. const int64_t i12 = i02 % ne12;
  6585. const int64_t i11 = i01 % ne11;
  6586. const int64_t nr0 = ne00 / ne10;
  6587. float * dst_ptr = (float *) ((char *) dst->data + i03*nb3 + i02*nb2 + i01*nb1 );
  6588. float * src0_ptr = (float *) ((char *) src0->data + i03*nb03 + i02*nb02 + i01*nb01);
  6589. float * src1_ptr = (float *) ((char *) src1->data + i13*nb13 + i12*nb12 + i11*nb11);
  6590. for (int64_t r = 0 ; r < nr0; ++r) {
  6591. #ifdef GGML_USE_ACCELERATE
  6592. UNUSED(ggml_vec_mul_f32);
  6593. vDSP_vmul(src0_ptr + r*ne10, 1, src1_ptr, 1, dst_ptr + r*ne10, 1, ne10);
  6594. #else
  6595. ggml_vec_mul_f32(ne10, dst_ptr + r*ne10, src0_ptr + r*ne10, src1_ptr);
  6596. #endif
  6597. }
  6598. }
  6599. } else {
  6600. // src1 is not contiguous
  6601. for (int64_t ir = ith; ir < nr; ir += nth) {
  6602. // src0 and dst are same shape => same indices
  6603. // src1 is broadcastable across src0 and dst in i1, i2, i3
  6604. const int64_t i03 = ir/(ne02*ne01);
  6605. const int64_t i02 = (ir - i03*ne02*ne01)/ne01;
  6606. const int64_t i01 = (ir - i03*ne02*ne01 - i02*ne01);
  6607. const int64_t i13 = i03 % ne13;
  6608. const int64_t i12 = i02 % ne12;
  6609. const int64_t i11 = i01 % ne11;
  6610. float * dst_ptr = (float *) ((char *) dst->data + i03*nb3 + i02*nb2 + i01*nb1 );
  6611. float * src0_ptr = (float *) ((char *) src0->data + i03*nb03 + i02*nb02 + i01*nb01);
  6612. for (int64_t i0 = 0; i0 < ne00; ++i0) {
  6613. const int64_t i10 = i0 % ne10;
  6614. float * src1_ptr = (float *) ((char *) src1->data + i13*nb13 + i12*nb12 + i11*nb11 + i10*nb10);
  6615. dst_ptr[i0] = src0_ptr[i0] * (*src1_ptr);
  6616. }
  6617. }
  6618. }
  6619. }
  6620. static void ggml_compute_forward_mul(
  6621. const struct ggml_compute_params * params,
  6622. const struct ggml_tensor * src0,
  6623. const struct ggml_tensor * src1,
  6624. struct ggml_tensor * dst) {
  6625. GGML_ASSERT(src1->type == GGML_TYPE_F32 && "only f32 src1 supported for now");
  6626. switch (src0->type) {
  6627. case GGML_TYPE_F32:
  6628. {
  6629. ggml_compute_forward_mul_f32(params, src0, src1, dst);
  6630. } break;
  6631. default:
  6632. {
  6633. GGML_ASSERT(false);
  6634. } break;
  6635. }
  6636. }
  6637. // ggml_compute_forward_div
  6638. static void ggml_compute_forward_div_f32(
  6639. const struct ggml_compute_params * params,
  6640. const struct ggml_tensor * src0,
  6641. const struct ggml_tensor * src1,
  6642. struct ggml_tensor * dst) {
  6643. GGML_ASSERT(ggml_can_repeat(src1, src0) && ggml_are_same_shape(src0, dst));
  6644. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  6645. return;
  6646. }
  6647. const int ith = params->ith;
  6648. const int nth = params->nth;
  6649. const int64_t nr = ggml_nrows(src0);
  6650. GGML_TENSOR_BINARY_OP_LOCALS
  6651. GGML_ASSERT( nb0 == sizeof(float));
  6652. GGML_ASSERT(nb00 == sizeof(float));
  6653. if (nb10 == sizeof(float)) {
  6654. for (int64_t ir = ith; ir < nr; ir += nth) {
  6655. // src0 and dst are same shape => same indices
  6656. const int64_t i03 = ir/(ne02*ne01);
  6657. const int64_t i02 = (ir - i03*ne02*ne01)/ne01;
  6658. const int64_t i01 = (ir - i03*ne02*ne01 - i02*ne01);
  6659. const int64_t i13 = i03 % ne13;
  6660. const int64_t i12 = i02 % ne12;
  6661. const int64_t i11 = i01 % ne11;
  6662. const int64_t nr0 = ne00 / ne10;
  6663. float * dst_ptr = (float *) ((char *) dst->data + i03*nb3 + i02*nb2 + i01*nb1 );
  6664. float * src0_ptr = (float *) ((char *) src0->data + i03*nb03 + i02*nb02 + i01*nb01);
  6665. float * src1_ptr = (float *) ((char *) src1->data + i13*nb13 + i12*nb12 + i11*nb11);
  6666. for (int64_t r = 0; r < nr0; ++r) {
  6667. #ifdef GGML_USE_ACCELERATE
  6668. UNUSED(ggml_vec_div_f32);
  6669. vDSP_vdiv(src1_ptr, 1, src0_ptr + r*ne10, 1, dst_ptr + r*ne10, 1, ne10);
  6670. #else
  6671. ggml_vec_div_f32(ne10, dst_ptr + r*ne10, src0_ptr + r*ne10, src1_ptr);
  6672. #endif
  6673. }
  6674. }
  6675. } else {
  6676. // src1 is not contiguous
  6677. for (int64_t ir = ith; ir < nr; ir += nth) {
  6678. // src0 and dst are same shape => same indices
  6679. // src1 is broadcastable across src0 and dst in i1, i2, i3
  6680. const int64_t i03 = ir/(ne02*ne01);
  6681. const int64_t i02 = (ir - i03*ne02*ne01)/ne01;
  6682. const int64_t i01 = (ir - i03*ne02*ne01 - i02*ne01);
  6683. const int64_t i13 = i03 % ne13;
  6684. const int64_t i12 = i02 % ne12;
  6685. const int64_t i11 = i01 % ne11;
  6686. float * dst_ptr = (float *) ((char *) dst->data + i03*nb3 + i02*nb2 + i01*nb1 );
  6687. float * src0_ptr = (float *) ((char *) src0->data + i03*nb03 + i02*nb02 + i01*nb01);
  6688. for (int64_t i0 = 0; i0 < ne00; ++i0) {
  6689. const int64_t i10 = i0 % ne10;
  6690. float * src1_ptr = (float *) ((char *) src1->data + i13*nb13 + i12*nb12 + i11*nb11 + i10*nb10);
  6691. dst_ptr[i0] = src0_ptr[i0] / (*src1_ptr);
  6692. }
  6693. }
  6694. }
  6695. }
  6696. static void ggml_compute_forward_div(
  6697. const struct ggml_compute_params * params,
  6698. const struct ggml_tensor * src0,
  6699. const struct ggml_tensor * src1,
  6700. struct ggml_tensor * dst) {
  6701. switch (src0->type) {
  6702. case GGML_TYPE_F32:
  6703. {
  6704. ggml_compute_forward_div_f32(params, src0, src1, dst);
  6705. } break;
  6706. default:
  6707. {
  6708. GGML_ASSERT(false);
  6709. } break;
  6710. }
  6711. }
  6712. // ggml_compute_forward_sqr
  6713. static void ggml_compute_forward_sqr_f32(
  6714. const struct ggml_compute_params * params,
  6715. const struct ggml_tensor * src0,
  6716. struct ggml_tensor * dst) {
  6717. assert(params->ith == 0);
  6718. assert(ggml_are_same_shape(src0, dst));
  6719. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  6720. return;
  6721. }
  6722. const int n = ggml_nrows(src0);
  6723. const int nc = src0->ne[0];
  6724. assert( dst->nb[0] == sizeof(float));
  6725. assert(src0->nb[0] == sizeof(float));
  6726. for (int i = 0; i < n; i++) {
  6727. ggml_vec_sqr_f32(nc,
  6728. (float *) ((char *) dst->data + i*( dst->nb[1])),
  6729. (float *) ((char *) src0->data + i*(src0->nb[1])));
  6730. }
  6731. }
  6732. static void ggml_compute_forward_sqr(
  6733. const struct ggml_compute_params * params,
  6734. const struct ggml_tensor * src0,
  6735. struct ggml_tensor * dst) {
  6736. switch (src0->type) {
  6737. case GGML_TYPE_F32:
  6738. {
  6739. ggml_compute_forward_sqr_f32(params, src0, dst);
  6740. } break;
  6741. default:
  6742. {
  6743. GGML_ASSERT(false);
  6744. } break;
  6745. }
  6746. }
  6747. // ggml_compute_forward_sqrt
  6748. static void ggml_compute_forward_sqrt_f32(
  6749. const struct ggml_compute_params * params,
  6750. const struct ggml_tensor * src0,
  6751. struct ggml_tensor * dst) {
  6752. assert(params->ith == 0);
  6753. assert(ggml_are_same_shape(src0, dst));
  6754. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  6755. return;
  6756. }
  6757. const int n = ggml_nrows(src0);
  6758. const int nc = src0->ne[0];
  6759. assert( dst->nb[0] == sizeof(float));
  6760. assert(src0->nb[0] == sizeof(float));
  6761. for (int i = 0; i < n; i++) {
  6762. ggml_vec_sqrt_f32(nc,
  6763. (float *) ((char *) dst->data + i*( dst->nb[1])),
  6764. (float *) ((char *) src0->data + i*(src0->nb[1])));
  6765. }
  6766. }
  6767. static void ggml_compute_forward_sqrt(
  6768. const struct ggml_compute_params * params,
  6769. const struct ggml_tensor * src0,
  6770. struct ggml_tensor * dst) {
  6771. switch (src0->type) {
  6772. case GGML_TYPE_F32:
  6773. {
  6774. ggml_compute_forward_sqrt_f32(params, src0, dst);
  6775. } break;
  6776. default:
  6777. {
  6778. GGML_ASSERT(false);
  6779. } break;
  6780. }
  6781. }
  6782. // ggml_compute_forward_log
  6783. static void ggml_compute_forward_log_f32(
  6784. const struct ggml_compute_params * params,
  6785. const struct ggml_tensor * src0,
  6786. struct ggml_tensor * dst) {
  6787. GGML_ASSERT(params->ith == 0);
  6788. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  6789. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  6790. return;
  6791. }
  6792. const int n = ggml_nrows(src0);
  6793. const int nc = src0->ne[0];
  6794. GGML_ASSERT( dst->nb[0] == sizeof(float));
  6795. GGML_ASSERT(src0->nb[0] == sizeof(float));
  6796. for (int i = 0; i < n; i++) {
  6797. ggml_vec_log_f32(nc,
  6798. (float *) ((char *) dst->data + i*( dst->nb[1])),
  6799. (float *) ((char *) src0->data + i*(src0->nb[1])));
  6800. }
  6801. }
  6802. static void ggml_compute_forward_log(
  6803. const struct ggml_compute_params * params,
  6804. const struct ggml_tensor * src0,
  6805. struct ggml_tensor * dst) {
  6806. switch (src0->type) {
  6807. case GGML_TYPE_F32:
  6808. {
  6809. ggml_compute_forward_log_f32(params, src0, dst);
  6810. } break;
  6811. default:
  6812. {
  6813. GGML_ASSERT(false);
  6814. } break;
  6815. }
  6816. }
  6817. // ggml_compute_forward_sum
  6818. static void ggml_compute_forward_sum_f32(
  6819. const struct ggml_compute_params * params,
  6820. const struct ggml_tensor * src0,
  6821. struct ggml_tensor * dst) {
  6822. assert(params->ith == 0);
  6823. assert(ggml_is_scalar(dst));
  6824. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  6825. return;
  6826. }
  6827. assert(ggml_is_scalar(dst));
  6828. assert(src0->nb[0] == sizeof(float));
  6829. GGML_TENSOR_LOCALS(int64_t, ne0, src0, ne)
  6830. GGML_TENSOR_LOCALS(size_t, nb0, src0, nb)
  6831. ggml_float sum = 0;
  6832. ggml_float row_sum = 0;
  6833. for (int64_t i03 = 0; i03 < ne03; i03++) {
  6834. for (int64_t i02 = 0; i02 < ne02; i02++) {
  6835. for (int64_t i01 = 0; i01 < ne01; i01++) {
  6836. ggml_vec_sum_f32_ggf(ne00,
  6837. &row_sum,
  6838. (float *) ((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03));
  6839. sum += row_sum;
  6840. }
  6841. }
  6842. }
  6843. ((float *) dst->data)[0] = sum;
  6844. }
  6845. static void ggml_compute_forward_sum_f16(
  6846. const struct ggml_compute_params * params,
  6847. const struct ggml_tensor * src0,
  6848. struct ggml_tensor * dst) {
  6849. assert(params->ith == 0);
  6850. assert(ggml_is_scalar(dst));
  6851. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  6852. return;
  6853. }
  6854. assert(src0->nb[0] == sizeof(ggml_fp16_t));
  6855. GGML_TENSOR_LOCALS(int64_t, ne0, src0, ne)
  6856. GGML_TENSOR_LOCALS(size_t, nb0, src0, nb)
  6857. float sum = 0;
  6858. float row_sum = 0;
  6859. for (int64_t i03 = 0; i03 < ne03; i03++) {
  6860. for (int64_t i02 = 0; i02 < ne02; i02++) {
  6861. for (int64_t i01 = 0; i01 < ne01; i01++) {
  6862. ggml_vec_sum_f16_ggf(ne00,
  6863. &row_sum,
  6864. (ggml_fp16_t *) ((char *) src0->data + i01 * nb01 + i02 * nb02 + i03 * nb03));
  6865. sum += row_sum;
  6866. }
  6867. }
  6868. }
  6869. ((ggml_fp16_t *) dst->data)[0] = GGML_FP32_TO_FP16(sum);
  6870. }
  6871. static void ggml_compute_forward_sum(
  6872. const struct ggml_compute_params * params,
  6873. const struct ggml_tensor * src0,
  6874. struct ggml_tensor * dst) {
  6875. switch (src0->type) {
  6876. case GGML_TYPE_F32:
  6877. {
  6878. ggml_compute_forward_sum_f32(params, src0, dst);
  6879. } break;
  6880. case GGML_TYPE_F16:
  6881. {
  6882. ggml_compute_forward_sum_f16(params, src0, dst);
  6883. } break;
  6884. default:
  6885. {
  6886. GGML_ASSERT(false);
  6887. } break;
  6888. }
  6889. }
  6890. // ggml_compute_forward_sum_rows
  6891. static void ggml_compute_forward_sum_rows_f32(
  6892. const struct ggml_compute_params * params,
  6893. const struct ggml_tensor * src0,
  6894. struct ggml_tensor * dst) {
  6895. GGML_ASSERT(params->ith == 0);
  6896. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  6897. return;
  6898. }
  6899. GGML_ASSERT(src0->nb[0] == sizeof(float));
  6900. GGML_ASSERT(dst->nb[0] == sizeof(float));
  6901. GGML_TENSOR_UNARY_OP_LOCALS
  6902. GGML_ASSERT(ne0 == 1);
  6903. GGML_ASSERT(ne1 == ne01);
  6904. GGML_ASSERT(ne2 == ne02);
  6905. GGML_ASSERT(ne3 == ne03);
  6906. for (int64_t i3 = 0; i3 < ne03; i3++) {
  6907. for (int64_t i2 = 0; i2 < ne02; i2++) {
  6908. for (int64_t i1 = 0; i1 < ne01; i1++) {
  6909. float * src_row = (float *) ((char *) src0->data + i1*nb01 + i2*nb02 + i3*nb03);
  6910. float * dst_row = (float *) ((char *) dst->data + i1*nb1 + i2*nb2 + i3*nb3);
  6911. float row_sum = 0;
  6912. ggml_vec_sum_f32(ne00, &row_sum, src_row);
  6913. dst_row[0] = row_sum;
  6914. }
  6915. }
  6916. }
  6917. }
  6918. static void ggml_compute_forward_sum_rows(
  6919. const struct ggml_compute_params * params,
  6920. const struct ggml_tensor * src0,
  6921. struct ggml_tensor * dst) {
  6922. switch (src0->type) {
  6923. case GGML_TYPE_F32:
  6924. {
  6925. ggml_compute_forward_sum_rows_f32(params, src0, dst);
  6926. } break;
  6927. default:
  6928. {
  6929. GGML_ASSERT(false);
  6930. } break;
  6931. }
  6932. }
  6933. // ggml_compute_forward_mean
  6934. static void ggml_compute_forward_mean_f32(
  6935. const struct ggml_compute_params * params,
  6936. const struct ggml_tensor * src0,
  6937. struct ggml_tensor * dst) {
  6938. assert(params->ith == 0);
  6939. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  6940. return;
  6941. }
  6942. assert(src0->nb[0] == sizeof(float));
  6943. GGML_TENSOR_UNARY_OP_LOCALS
  6944. assert(ne0 == 1);
  6945. assert(ne1 == ne01);
  6946. assert(ne2 == ne02);
  6947. assert(ne3 == ne03);
  6948. UNUSED(ne0);
  6949. UNUSED(ne1);
  6950. UNUSED(ne2);
  6951. UNUSED(ne3);
  6952. for (int64_t i03 = 0; i03 < ne03; i03++) {
  6953. for (int64_t i02 = 0; i02 < ne02; i02++) {
  6954. for (int64_t i01 = 0; i01 < ne01; i01++) {
  6955. ggml_vec_sum_f32(ne00,
  6956. (float *) ((char *) dst->data + i01*nb1 + i02*nb2 + i03*nb3),
  6957. (float *) ((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03));
  6958. *(float *) ((char *) dst->data + i01*nb1 + i02*nb2 + i03*nb3) /= (float) ne00;
  6959. }
  6960. }
  6961. }
  6962. }
  6963. static void ggml_compute_forward_mean(
  6964. const struct ggml_compute_params * params,
  6965. const struct ggml_tensor * src0,
  6966. struct ggml_tensor * dst) {
  6967. switch (src0->type) {
  6968. case GGML_TYPE_F32:
  6969. {
  6970. ggml_compute_forward_mean_f32(params, src0, dst);
  6971. } break;
  6972. default:
  6973. {
  6974. GGML_ASSERT(false);
  6975. } break;
  6976. }
  6977. }
  6978. // ggml_compute_forward_argmax
  6979. static void ggml_compute_forward_argmax_f32(
  6980. const struct ggml_compute_params * params,
  6981. const struct ggml_tensor * src0,
  6982. struct ggml_tensor * dst) {
  6983. assert(params->ith == 0);
  6984. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  6985. return;
  6986. }
  6987. assert(src0->nb[0] == sizeof(float));
  6988. assert(dst->nb[0] == sizeof(float));
  6989. const int64_t ne00 = src0->ne[0];
  6990. const int64_t ne01 = src0->ne[1];
  6991. const size_t nb01 = src0->nb[1];
  6992. const size_t nb0 = dst->nb[0];
  6993. for (int64_t i1 = 0; i1 < ne01; i1++) {
  6994. float * src = (float *) ((char *) src0->data + i1*nb01);
  6995. int32_t * dst_ = (int32_t *) ((char *) dst->data + i1*nb0);
  6996. int v = 0;
  6997. ggml_vec_argmax_f32(ne00, &v, src);
  6998. dst_[0] = v;
  6999. }
  7000. }
  7001. static void ggml_compute_forward_argmax(
  7002. const struct ggml_compute_params * params,
  7003. const struct ggml_tensor * src0,
  7004. struct ggml_tensor * dst) {
  7005. switch (src0->type) {
  7006. case GGML_TYPE_F32:
  7007. {
  7008. ggml_compute_forward_argmax_f32(params, src0, dst);
  7009. } break;
  7010. default:
  7011. {
  7012. GGML_ASSERT(false);
  7013. } break;
  7014. }
  7015. }
  7016. // ggml_compute_forward_repeat
  7017. static void ggml_compute_forward_repeat_f32(
  7018. const struct ggml_compute_params * params,
  7019. const struct ggml_tensor * src0,
  7020. struct ggml_tensor * dst) {
  7021. GGML_ASSERT(params->ith == 0);
  7022. GGML_ASSERT(ggml_can_repeat(src0, dst));
  7023. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  7024. return;
  7025. }
  7026. GGML_TENSOR_UNARY_OP_LOCALS
  7027. // guaranteed to be an integer due to the check in ggml_can_repeat
  7028. const int nr0 = (int)(ne0/ne00);
  7029. const int nr1 = (int)(ne1/ne01);
  7030. const int nr2 = (int)(ne2/ne02);
  7031. const int nr3 = (int)(ne3/ne03);
  7032. // TODO: support for transposed / permuted tensors
  7033. GGML_ASSERT(nb0 == sizeof(float));
  7034. GGML_ASSERT(nb00 == sizeof(float));
  7035. // TODO: maybe this is not optimal?
  7036. for (int i3 = 0; i3 < nr3; i3++) {
  7037. for (int k3 = 0; k3 < ne03; k3++) {
  7038. for (int i2 = 0; i2 < nr2; i2++) {
  7039. for (int k2 = 0; k2 < ne02; k2++) {
  7040. for (int i1 = 0; i1 < nr1; i1++) {
  7041. for (int k1 = 0; k1 < ne01; k1++) {
  7042. for (int i0 = 0; i0 < nr0; i0++) {
  7043. ggml_vec_cpy_f32(ne00,
  7044. (float *) ((char *) dst->data + (i3*ne03 + k3)*nb3 + (i2*ne02 + k2)*nb2 + (i1*ne01 + k1)*nb1 + (i0*ne00)*nb0),
  7045. (float *) ((char *) src0->data + ( k3)*nb03 + ( k2)*nb02 + ( k1)*nb01));
  7046. }
  7047. }
  7048. }
  7049. }
  7050. }
  7051. }
  7052. }
  7053. }
  7054. static void ggml_compute_forward_repeat_f16(
  7055. const struct ggml_compute_params * params,
  7056. const struct ggml_tensor * src0,
  7057. struct ggml_tensor * dst) {
  7058. GGML_ASSERT(params->ith == 0);
  7059. GGML_ASSERT(ggml_can_repeat(src0, dst));
  7060. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  7061. return;
  7062. }
  7063. GGML_TENSOR_UNARY_OP_LOCALS
  7064. // guaranteed to be an integer due to the check in ggml_can_repeat
  7065. const int nr0 = (int)(ne0/ne00);
  7066. const int nr1 = (int)(ne1/ne01);
  7067. const int nr2 = (int)(ne2/ne02);
  7068. const int nr3 = (int)(ne3/ne03);
  7069. // TODO: support for transposed / permuted tensors
  7070. GGML_ASSERT(nb0 == sizeof(ggml_fp16_t));
  7071. GGML_ASSERT(nb00 == sizeof(ggml_fp16_t));
  7072. // TODO: maybe this is not optimal?
  7073. for (int i3 = 0; i3 < nr3; i3++) {
  7074. for (int k3 = 0; k3 < ne03; k3++) {
  7075. for (int i2 = 0; i2 < nr2; i2++) {
  7076. for (int k2 = 0; k2 < ne02; k2++) {
  7077. for (int i1 = 0; i1 < nr1; i1++) {
  7078. for (int k1 = 0; k1 < ne01; k1++) {
  7079. for (int i0 = 0; i0 < nr0; i0++) {
  7080. ggml_fp16_t * y = (ggml_fp16_t *) ((char *) dst->data + (i3*ne03 + k3)*nb3 + (i2*ne02 + k2)*nb2 + (i1*ne01 + k1)*nb1 + (i0*ne00)*nb0);
  7081. ggml_fp16_t * x = (ggml_fp16_t *) ((char *) src0->data + ( k3)*nb03 + ( k2)*nb02 + ( k1)*nb01);
  7082. // ggml_vec_cpy_f16(ne00, y, x)
  7083. for (int i = 0; i < ne00; ++i) {
  7084. y[i] = x[i];
  7085. }
  7086. }
  7087. }
  7088. }
  7089. }
  7090. }
  7091. }
  7092. }
  7093. }
  7094. static void ggml_compute_forward_repeat(
  7095. const struct ggml_compute_params * params,
  7096. const struct ggml_tensor * src0,
  7097. struct ggml_tensor * dst) {
  7098. switch (src0->type) {
  7099. case GGML_TYPE_F16:
  7100. case GGML_TYPE_I16:
  7101. {
  7102. ggml_compute_forward_repeat_f16(params, src0, dst);
  7103. } break;
  7104. case GGML_TYPE_F32:
  7105. case GGML_TYPE_I32:
  7106. {
  7107. ggml_compute_forward_repeat_f32(params, src0, dst);
  7108. } break;
  7109. default:
  7110. {
  7111. GGML_ASSERT(false);
  7112. } break;
  7113. }
  7114. }
  7115. // ggml_compute_forward_repeat_back
  7116. static void ggml_compute_forward_repeat_back_f32(
  7117. const struct ggml_compute_params * params,
  7118. const struct ggml_tensor * src0,
  7119. struct ggml_tensor * dst) {
  7120. GGML_ASSERT(params->ith == 0);
  7121. GGML_ASSERT(ggml_can_repeat(dst, src0));
  7122. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  7123. return;
  7124. }
  7125. GGML_TENSOR_UNARY_OP_LOCALS
  7126. // guaranteed to be an integer due to the check in ggml_can_repeat
  7127. const int nr0 = (int)(ne00/ne0);
  7128. const int nr1 = (int)(ne01/ne1);
  7129. const int nr2 = (int)(ne02/ne2);
  7130. const int nr3 = (int)(ne03/ne3);
  7131. // TODO: support for transposed / permuted tensors
  7132. GGML_ASSERT(nb0 == sizeof(float));
  7133. GGML_ASSERT(nb00 == sizeof(float));
  7134. if (ggml_is_contiguous(dst)) {
  7135. ggml_vec_set_f32(ne0*ne1*ne2*ne3, dst->data, 0);
  7136. } else {
  7137. for (int k3 = 0; k3 < ne3; k3++) {
  7138. for (int k2 = 0; k2 < ne2; k2++) {
  7139. for (int k1 = 0; k1 < ne1; k1++) {
  7140. ggml_vec_set_f32(ne0,
  7141. (float *) ((char *) dst->data + k1*nb1 + k2*nb2 + k3*nb3),
  7142. 0);
  7143. }
  7144. }
  7145. }
  7146. }
  7147. // TODO: maybe this is not optimal?
  7148. for (int i3 = 0; i3 < nr3; i3++) {
  7149. for (int k3 = 0; k3 < ne3; k3++) {
  7150. for (int i2 = 0; i2 < nr2; i2++) {
  7151. for (int k2 = 0; k2 < ne2; k2++) {
  7152. for (int i1 = 0; i1 < nr1; i1++) {
  7153. for (int k1 = 0; k1 < ne1; k1++) {
  7154. for (int i0 = 0; i0 < nr0; i0++) {
  7155. ggml_vec_acc_f32(ne0,
  7156. (float *) ((char *) dst->data + ( k3)*nb3 + ( k2)*nb2 + ( k1)*nb1),
  7157. (float *) ((char *) src0->data + (i3*ne3 + k3)*nb03 + (i2*ne2 + k2)*nb02 + (i1*ne1 + k1)*nb01 + (i0*ne0)*nb00));
  7158. }
  7159. }
  7160. }
  7161. }
  7162. }
  7163. }
  7164. }
  7165. }
  7166. static void ggml_compute_forward_repeat_back(
  7167. const struct ggml_compute_params * params,
  7168. const struct ggml_tensor * src0,
  7169. struct ggml_tensor * dst) {
  7170. switch (src0->type) {
  7171. case GGML_TYPE_F32:
  7172. {
  7173. ggml_compute_forward_repeat_back_f32(params, src0, dst);
  7174. } break;
  7175. default:
  7176. {
  7177. GGML_ASSERT(false);
  7178. } break;
  7179. }
  7180. }
  7181. // ggml_compute_forward_concat
  7182. static void ggml_compute_forward_concat_f32(
  7183. const struct ggml_compute_params * params,
  7184. const struct ggml_tensor * src0,
  7185. const struct ggml_tensor * src1,
  7186. struct ggml_tensor * dst) {
  7187. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  7188. return;
  7189. }
  7190. GGML_ASSERT(src0->nb[0] == sizeof(float));
  7191. const int ith = params->ith;
  7192. const int nth = params->nth;
  7193. GGML_TENSOR_BINARY_OP_LOCALS
  7194. // TODO: support for transposed / permuted tensors
  7195. GGML_ASSERT(nb0 == sizeof(float));
  7196. GGML_ASSERT(nb00 == sizeof(float));
  7197. GGML_ASSERT(nb10 == sizeof(float));
  7198. for (int i3 = 0; i3 < ne3; i3++) {
  7199. for (int i2 = ith; i2 < ne2; i2 += nth) {
  7200. if (i2 < ne02) { // src0
  7201. for (int i1 = 0; i1 < ne1; i1++) {
  7202. for (int i0 = 0; i0 < ne0; i0++) {
  7203. const float * x = (float *)((char *) src0->data + i0 * nb00 + i1 * nb01 + i2 * nb02 + i3 * nb03);
  7204. float * y = (float *)((char *)dst->data + i0 * nb0 + i1 * nb1 + i2 * nb2 + i3 * nb3);
  7205. *y = *x;
  7206. }
  7207. }
  7208. } // src1
  7209. else {
  7210. for (int i1 = 0; i1 < ne1; i1++) {
  7211. for (int i0 = 0; i0 < ne0; i0++) {
  7212. const float * x = (float *)((char *) src1->data + i0 * nb10 + i1 * nb11 + (i2 - ne02) * nb12 + i3 * nb13);
  7213. float * y = (float *)((char *)dst->data + i0 * nb0 + i1 * nb1 + i2 * nb2 + i3 * nb3);
  7214. *y = *x;
  7215. }
  7216. }
  7217. }
  7218. }
  7219. }
  7220. }
  7221. static void ggml_compute_forward_concat(
  7222. const struct ggml_compute_params* params,
  7223. const struct ggml_tensor* src0,
  7224. const struct ggml_tensor* src1,
  7225. struct ggml_tensor* dst) {
  7226. switch (src0->type) {
  7227. case GGML_TYPE_F32:
  7228. case GGML_TYPE_I32:
  7229. {
  7230. ggml_compute_forward_concat_f32(params, src0, src1, dst);
  7231. } break;
  7232. default:
  7233. {
  7234. GGML_ASSERT(false);
  7235. } break;
  7236. }
  7237. }
  7238. // ggml_compute_forward_abs
  7239. static void ggml_compute_forward_abs_f32(
  7240. const struct ggml_compute_params * params,
  7241. const struct ggml_tensor * src0,
  7242. struct ggml_tensor * dst) {
  7243. assert(params->ith == 0);
  7244. assert(ggml_are_same_shape(src0, dst));
  7245. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  7246. return;
  7247. }
  7248. const int n = ggml_nrows(src0);
  7249. const int nc = src0->ne[0];
  7250. assert(dst->nb[0] == sizeof(float));
  7251. assert(src0->nb[0] == sizeof(float));
  7252. for (int i = 0; i < n; i++) {
  7253. ggml_vec_abs_f32(nc,
  7254. (float *) ((char *) dst->data + i*( dst->nb[1])),
  7255. (float *) ((char *) src0->data + i*(src0->nb[1])));
  7256. }
  7257. }
  7258. static void ggml_compute_forward_abs(
  7259. const struct ggml_compute_params * params,
  7260. const struct ggml_tensor * src0,
  7261. struct ggml_tensor * dst) {
  7262. switch (src0->type) {
  7263. case GGML_TYPE_F32:
  7264. {
  7265. ggml_compute_forward_abs_f32(params, src0, dst);
  7266. } break;
  7267. default:
  7268. {
  7269. GGML_ASSERT(false);
  7270. } break;
  7271. }
  7272. }
  7273. // ggml_compute_forward_sgn
  7274. static void ggml_compute_forward_sgn_f32(
  7275. const struct ggml_compute_params * params,
  7276. const struct ggml_tensor * src0,
  7277. struct ggml_tensor * dst) {
  7278. assert(params->ith == 0);
  7279. assert(ggml_are_same_shape(src0, dst));
  7280. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  7281. return;
  7282. }
  7283. const int n = ggml_nrows(src0);
  7284. const int nc = src0->ne[0];
  7285. assert(dst->nb[0] == sizeof(float));
  7286. assert(src0->nb[0] == sizeof(float));
  7287. for (int i = 0; i < n; i++) {
  7288. ggml_vec_sgn_f32(nc,
  7289. (float *) ((char *) dst->data + i*( dst->nb[1])),
  7290. (float *) ((char *) src0->data + i*(src0->nb[1])));
  7291. }
  7292. }
  7293. static void ggml_compute_forward_sgn(
  7294. const struct ggml_compute_params * params,
  7295. const struct ggml_tensor * src0,
  7296. struct ggml_tensor * dst) {
  7297. switch (src0->type) {
  7298. case GGML_TYPE_F32:
  7299. {
  7300. ggml_compute_forward_sgn_f32(params, src0, dst);
  7301. } break;
  7302. default:
  7303. {
  7304. GGML_ASSERT(false);
  7305. } break;
  7306. }
  7307. }
  7308. // ggml_compute_forward_neg
  7309. static void ggml_compute_forward_neg_f32(
  7310. const struct ggml_compute_params * params,
  7311. const struct ggml_tensor * src0,
  7312. struct ggml_tensor * dst) {
  7313. assert(params->ith == 0);
  7314. assert(ggml_are_same_shape(src0, dst));
  7315. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  7316. return;
  7317. }
  7318. const int n = ggml_nrows(src0);
  7319. const int nc = src0->ne[0];
  7320. assert(dst->nb[0] == sizeof(float));
  7321. assert(src0->nb[0] == sizeof(float));
  7322. for (int i = 0; i < n; i++) {
  7323. ggml_vec_neg_f32(nc,
  7324. (float *) ((char *) dst->data + i*( dst->nb[1])),
  7325. (float *) ((char *) src0->data + i*(src0->nb[1])));
  7326. }
  7327. }
  7328. static void ggml_compute_forward_neg(
  7329. const struct ggml_compute_params * params,
  7330. const struct ggml_tensor * src0,
  7331. struct ggml_tensor * dst) {
  7332. switch (src0->type) {
  7333. case GGML_TYPE_F32:
  7334. {
  7335. ggml_compute_forward_neg_f32(params, src0, dst);
  7336. } break;
  7337. default:
  7338. {
  7339. GGML_ASSERT(false);
  7340. } break;
  7341. }
  7342. }
  7343. // ggml_compute_forward_step
  7344. static void ggml_compute_forward_step_f32(
  7345. const struct ggml_compute_params * params,
  7346. const struct ggml_tensor * src0,
  7347. struct ggml_tensor * dst) {
  7348. assert(params->ith == 0);
  7349. assert(ggml_are_same_shape(src0, dst));
  7350. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  7351. return;
  7352. }
  7353. const int n = ggml_nrows(src0);
  7354. const int nc = src0->ne[0];
  7355. assert(dst->nb[0] == sizeof(float));
  7356. assert(src0->nb[0] == sizeof(float));
  7357. for (int i = 0; i < n; i++) {
  7358. ggml_vec_step_f32(nc,
  7359. (float *) ((char *) dst->data + i*( dst->nb[1])),
  7360. (float *) ((char *) src0->data + i*(src0->nb[1])));
  7361. }
  7362. }
  7363. static void ggml_compute_forward_step(
  7364. const struct ggml_compute_params * params,
  7365. const struct ggml_tensor * src0,
  7366. struct ggml_tensor * dst) {
  7367. switch (src0->type) {
  7368. case GGML_TYPE_F32:
  7369. {
  7370. ggml_compute_forward_step_f32(params, src0, dst);
  7371. } break;
  7372. default:
  7373. {
  7374. GGML_ASSERT(false);
  7375. } break;
  7376. }
  7377. }
  7378. // ggml_compute_forward_tanh
  7379. static void ggml_compute_forward_tanh_f32(
  7380. const struct ggml_compute_params * params,
  7381. const struct ggml_tensor * src0,
  7382. struct ggml_tensor * dst) {
  7383. assert(params->ith == 0);
  7384. assert(ggml_are_same_shape(src0, dst));
  7385. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  7386. return;
  7387. }
  7388. const int n = ggml_nrows(src0);
  7389. const int nc = src0->ne[0];
  7390. assert(dst->nb[0] == sizeof(float));
  7391. assert(src0->nb[0] == sizeof(float));
  7392. for (int i = 0; i < n; i++) {
  7393. ggml_vec_tanh_f32(nc,
  7394. (float *) ((char *) dst->data + i*( dst->nb[1])),
  7395. (float *) ((char *) src0->data + i*(src0->nb[1])));
  7396. }
  7397. }
  7398. static void ggml_compute_forward_tanh(
  7399. const struct ggml_compute_params * params,
  7400. const struct ggml_tensor * src0,
  7401. struct ggml_tensor * dst) {
  7402. switch (src0->type) {
  7403. case GGML_TYPE_F32:
  7404. {
  7405. ggml_compute_forward_tanh_f32(params, src0, dst);
  7406. } break;
  7407. default:
  7408. {
  7409. GGML_ASSERT(false);
  7410. } break;
  7411. }
  7412. }
  7413. // ggml_compute_forward_elu
  7414. static void ggml_compute_forward_elu_f32(
  7415. const struct ggml_compute_params * params,
  7416. const struct ggml_tensor * src0,
  7417. struct ggml_tensor * dst) {
  7418. assert(params->ith == 0);
  7419. assert(ggml_are_same_shape(src0, dst));
  7420. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  7421. return;
  7422. }
  7423. const int n = ggml_nrows(src0);
  7424. const int nc = src0->ne[0];
  7425. assert(dst->nb[0] == sizeof(float));
  7426. assert(src0->nb[0] == sizeof(float));
  7427. for (int i = 0; i < n; i++) {
  7428. ggml_vec_elu_f32(nc,
  7429. (float *) ((char *) dst->data + i*( dst->nb[1])),
  7430. (float *) ((char *) src0->data + i*(src0->nb[1])));
  7431. }
  7432. }
  7433. static void ggml_compute_forward_elu(
  7434. const struct ggml_compute_params * params,
  7435. const struct ggml_tensor * src0,
  7436. struct ggml_tensor * dst) {
  7437. switch (src0->type) {
  7438. case GGML_TYPE_F32:
  7439. {
  7440. ggml_compute_forward_elu_f32(params, src0, dst);
  7441. } break;
  7442. default:
  7443. {
  7444. GGML_ASSERT(false);
  7445. } break;
  7446. }
  7447. }
  7448. // ggml_compute_forward_relu
  7449. static void ggml_compute_forward_relu_f32(
  7450. const struct ggml_compute_params * params,
  7451. const struct ggml_tensor * src0,
  7452. struct ggml_tensor * dst) {
  7453. assert(params->ith == 0);
  7454. assert(ggml_are_same_shape(src0, dst));
  7455. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  7456. return;
  7457. }
  7458. const int n = ggml_nrows(src0);
  7459. const int nc = src0->ne[0];
  7460. assert(dst->nb[0] == sizeof(float));
  7461. assert(src0->nb[0] == sizeof(float));
  7462. for (int i = 0; i < n; i++) {
  7463. ggml_vec_relu_f32(nc,
  7464. (float *) ((char *) dst->data + i*( dst->nb[1])),
  7465. (float *) ((char *) src0->data + i*(src0->nb[1])));
  7466. }
  7467. }
  7468. static void ggml_compute_forward_relu(
  7469. const struct ggml_compute_params * params,
  7470. const struct ggml_tensor * src0,
  7471. struct ggml_tensor * dst) {
  7472. switch (src0->type) {
  7473. case GGML_TYPE_F32:
  7474. {
  7475. ggml_compute_forward_relu_f32(params, src0, dst);
  7476. } break;
  7477. default:
  7478. {
  7479. GGML_ASSERT(false);
  7480. } break;
  7481. }
  7482. }
  7483. // ggml_compute_forward_gelu
  7484. static void ggml_compute_forward_gelu_f32(
  7485. const struct ggml_compute_params * params,
  7486. const struct ggml_tensor * src0,
  7487. struct ggml_tensor * dst) {
  7488. GGML_ASSERT(ggml_is_contiguous_except_dim_1(src0));
  7489. GGML_ASSERT(ggml_is_contiguous_except_dim_1(dst));
  7490. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  7491. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  7492. return;
  7493. }
  7494. const int ith = params->ith;
  7495. const int nth = params->nth;
  7496. const int nc = src0->ne[0];
  7497. const int nr = ggml_nrows(src0);
  7498. // rows per thread
  7499. const int dr = (nr + nth - 1)/nth;
  7500. // row range for this thread
  7501. const int ir0 = dr*ith;
  7502. const int ir1 = MIN(ir0 + dr, nr);
  7503. for (int i1 = ir0; i1 < ir1; i1++) {
  7504. ggml_vec_gelu_f32(nc,
  7505. (float *) ((char *) dst->data + i1*( dst->nb[1])),
  7506. (float *) ((char *) src0->data + i1*(src0->nb[1])));
  7507. #ifndef NDEBUG
  7508. for (int k = 0; k < nc; k++) {
  7509. const float x = ((float *) ((char *) dst->data + i1*( dst->nb[1])))[k];
  7510. UNUSED(x);
  7511. assert(!isnan(x));
  7512. assert(!isinf(x));
  7513. }
  7514. #endif
  7515. }
  7516. }
  7517. static void ggml_compute_forward_gelu(
  7518. const struct ggml_compute_params * params,
  7519. const struct ggml_tensor * src0,
  7520. struct ggml_tensor * dst) {
  7521. switch (src0->type) {
  7522. case GGML_TYPE_F32:
  7523. {
  7524. ggml_compute_forward_gelu_f32(params, src0, dst);
  7525. } break;
  7526. default:
  7527. {
  7528. GGML_ASSERT(false);
  7529. } break;
  7530. }
  7531. }
  7532. // ggml_compute_forward_gelu_quick
  7533. static void ggml_compute_forward_gelu_quick_f32(
  7534. const struct ggml_compute_params * params,
  7535. const struct ggml_tensor * src0,
  7536. struct ggml_tensor * dst) {
  7537. GGML_ASSERT(ggml_is_contiguous_except_dim_1(src0));
  7538. GGML_ASSERT(ggml_is_contiguous_except_dim_1(dst));
  7539. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  7540. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  7541. return;
  7542. }
  7543. const int ith = params->ith;
  7544. const int nth = params->nth;
  7545. const int nc = src0->ne[0];
  7546. const int nr = ggml_nrows(src0);
  7547. // rows per thread
  7548. const int dr = (nr + nth - 1)/nth;
  7549. // row range for this thread
  7550. const int ir0 = dr*ith;
  7551. const int ir1 = MIN(ir0 + dr, nr);
  7552. for (int i1 = ir0; i1 < ir1; i1++) {
  7553. ggml_vec_gelu_quick_f32(nc,
  7554. (float *) ((char *) dst->data + i1*( dst->nb[1])),
  7555. (float *) ((char *) src0->data + i1*(src0->nb[1])));
  7556. #ifndef NDEBUG
  7557. for (int k = 0; k < nc; k++) {
  7558. const float x = ((float *) ((char *) dst->data + i1*( dst->nb[1])))[k];
  7559. UNUSED(x);
  7560. assert(!isnan(x));
  7561. assert(!isinf(x));
  7562. }
  7563. #endif
  7564. }
  7565. }
  7566. static void ggml_compute_forward_gelu_quick(
  7567. const struct ggml_compute_params * params,
  7568. const struct ggml_tensor * src0,
  7569. struct ggml_tensor * dst) {
  7570. switch (src0->type) {
  7571. case GGML_TYPE_F32:
  7572. {
  7573. ggml_compute_forward_gelu_quick_f32(params, src0, dst);
  7574. } break;
  7575. default:
  7576. {
  7577. GGML_ASSERT(false);
  7578. } break;
  7579. }
  7580. }
  7581. // ggml_compute_forward_silu
  7582. static void ggml_compute_forward_silu_f32(
  7583. const struct ggml_compute_params * params,
  7584. const struct ggml_tensor * src0,
  7585. struct ggml_tensor * dst) {
  7586. GGML_ASSERT(ggml_is_contiguous_except_dim_1(src0));
  7587. GGML_ASSERT(ggml_is_contiguous_except_dim_1(dst));
  7588. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  7589. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  7590. return;
  7591. }
  7592. const int ith = params->ith;
  7593. const int nth = params->nth;
  7594. const int nc = src0->ne[0];
  7595. const int nr = ggml_nrows(src0);
  7596. // rows per thread
  7597. const int dr = (nr + nth - 1)/nth;
  7598. // row range for this thread
  7599. const int ir0 = dr*ith;
  7600. const int ir1 = MIN(ir0 + dr, nr);
  7601. for (int i1 = ir0; i1 < ir1; i1++) {
  7602. ggml_vec_silu_f32(nc,
  7603. (float *) ((char *) dst->data + i1*( dst->nb[1])),
  7604. (float *) ((char *) src0->data + i1*(src0->nb[1])));
  7605. #ifndef NDEBUG
  7606. for (int k = 0; k < nc; k++) {
  7607. const float x = ((float *) ((char *) dst->data + i1*(dst->nb[1])))[k];
  7608. UNUSED(x);
  7609. assert(!isnan(x));
  7610. assert(!isinf(x));
  7611. }
  7612. #endif
  7613. }
  7614. }
  7615. static void ggml_compute_forward_silu(
  7616. const struct ggml_compute_params * params,
  7617. const struct ggml_tensor * src0,
  7618. struct ggml_tensor * dst) {
  7619. switch (src0->type) {
  7620. case GGML_TYPE_F32:
  7621. {
  7622. ggml_compute_forward_silu_f32(params, src0, dst);
  7623. } break;
  7624. default:
  7625. {
  7626. GGML_ASSERT(false);
  7627. } break;
  7628. }
  7629. }
  7630. // ggml_compute_forward_leaky_relu
  7631. static void ggml_compute_forward_leaky_relu_f32(
  7632. const struct ggml_compute_params * params,
  7633. const struct ggml_tensor * src0,
  7634. struct ggml_tensor * dst) {
  7635. assert(params->ith == 0);
  7636. assert(ggml_are_same_shape(src0, dst));
  7637. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  7638. return;
  7639. }
  7640. const int n = ggml_nrows(src0);
  7641. const int nc = src0->ne[0];
  7642. float negative_slope;
  7643. memcpy(&negative_slope, dst->op_params, sizeof(float));
  7644. assert(dst->nb[0] == sizeof(float));
  7645. assert(src0->nb[0] == sizeof(float));
  7646. for (int i = 0; i < n; i++) {
  7647. ggml_vec_leaky_relu_f32(nc,
  7648. (float *) ((char *) dst->data + i*( dst->nb[1])),
  7649. (float *) ((char *) src0->data + i*(src0->nb[1])), negative_slope);
  7650. }
  7651. }
  7652. static void ggml_compute_forward_leaky_relu(
  7653. const struct ggml_compute_params * params,
  7654. const struct ggml_tensor * src0,
  7655. struct ggml_tensor * dst) {
  7656. switch (src0->type) {
  7657. case GGML_TYPE_F32:
  7658. {
  7659. ggml_compute_forward_leaky_relu_f32(params, src0, dst);
  7660. } break;
  7661. default:
  7662. {
  7663. GGML_ASSERT(false);
  7664. } break;
  7665. }
  7666. }
  7667. // ggml_compute_forward_silu_back
  7668. static void ggml_compute_forward_silu_back_f32(
  7669. const struct ggml_compute_params * params,
  7670. const struct ggml_tensor * src0,
  7671. const struct ggml_tensor * grad,
  7672. struct ggml_tensor * dst) {
  7673. GGML_ASSERT(ggml_is_contiguous_except_dim_1(grad));
  7674. GGML_ASSERT(ggml_is_contiguous_except_dim_1(src0));
  7675. GGML_ASSERT(ggml_is_contiguous_except_dim_1(dst));
  7676. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  7677. GGML_ASSERT(ggml_are_same_shape(src0, grad));
  7678. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  7679. return;
  7680. }
  7681. const int ith = params->ith;
  7682. const int nth = params->nth;
  7683. const int nc = src0->ne[0];
  7684. const int nr = ggml_nrows(src0);
  7685. // rows per thread
  7686. const int dr = (nr + nth - 1)/nth;
  7687. // row range for this thread
  7688. const int ir0 = dr*ith;
  7689. const int ir1 = MIN(ir0 + dr, nr);
  7690. for (int i1 = ir0; i1 < ir1; i1++) {
  7691. ggml_vec_silu_backward_f32(nc,
  7692. (float *) ((char *) dst->data + i1*( dst->nb[1])),
  7693. (float *) ((char *) src0->data + i1*(src0->nb[1])),
  7694. (float *) ((char *) grad->data + i1*(grad->nb[1])));
  7695. #ifndef NDEBUG
  7696. for (int k = 0; k < nc; k++) {
  7697. const float x = ((float *) ((char *) dst->data + i1*( dst->nb[1])))[k];
  7698. UNUSED(x);
  7699. assert(!isnan(x));
  7700. assert(!isinf(x));
  7701. }
  7702. #endif
  7703. }
  7704. }
  7705. static void ggml_compute_forward_silu_back(
  7706. const struct ggml_compute_params * params,
  7707. const struct ggml_tensor * src0,
  7708. const struct ggml_tensor * grad,
  7709. struct ggml_tensor * dst) {
  7710. switch (src0->type) {
  7711. case GGML_TYPE_F32:
  7712. {
  7713. ggml_compute_forward_silu_back_f32(params, src0, grad, dst);
  7714. } break;
  7715. default:
  7716. {
  7717. GGML_ASSERT(false);
  7718. } break;
  7719. }
  7720. }
  7721. static void ggml_compute_forward_hardswish_f32(
  7722. const struct ggml_compute_params * params,
  7723. const struct ggml_tensor * src0,
  7724. struct ggml_tensor * dst) {
  7725. assert(params->ith == 0);
  7726. assert(ggml_are_same_shape(src0, dst));
  7727. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  7728. return;
  7729. }
  7730. const int n = ggml_nrows(src0);
  7731. const int nc = src0->ne[0];
  7732. assert(dst->nb[0] == sizeof(float));
  7733. assert(src0->nb[0] == sizeof(float));
  7734. for (int i = 0; i < n; i++) {
  7735. ggml_vec_hardswish_f32(nc,
  7736. (float *) ((char *) dst->data + i*( dst->nb[1])),
  7737. (float *) ((char *) src0->data + i*(src0->nb[1])));
  7738. }
  7739. }
  7740. static void ggml_compute_forward_hardswish(
  7741. const struct ggml_compute_params * params,
  7742. const struct ggml_tensor * src0,
  7743. struct ggml_tensor * dst) {
  7744. switch (src0->type) {
  7745. case GGML_TYPE_F32:
  7746. {
  7747. ggml_compute_forward_hardswish_f32(params, src0, dst);
  7748. } break;
  7749. default:
  7750. {
  7751. GGML_ASSERT(false);
  7752. } break;
  7753. }
  7754. }
  7755. static void ggml_compute_forward_hardsigmoid_f32(
  7756. const struct ggml_compute_params * params,
  7757. const struct ggml_tensor * src0,
  7758. struct ggml_tensor * dst) {
  7759. assert(params->ith == 0);
  7760. assert(ggml_are_same_shape(src0, dst));
  7761. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  7762. return;
  7763. }
  7764. const int n = ggml_nrows(src0);
  7765. const int nc = src0->ne[0];
  7766. assert(dst->nb[0] == sizeof(float));
  7767. assert(src0->nb[0] == sizeof(float));
  7768. for (int i = 0; i < n; i++) {
  7769. ggml_vec_hardsigmoid_f32(nc,
  7770. (float *) ((char *) dst->data + i*( dst->nb[1])),
  7771. (float *) ((char *) src0->data + i*(src0->nb[1])));
  7772. }
  7773. }
  7774. static void ggml_compute_forward_hardsigmoid(
  7775. const struct ggml_compute_params * params,
  7776. const struct ggml_tensor * src0,
  7777. struct ggml_tensor * dst) {
  7778. switch (src0->type) {
  7779. case GGML_TYPE_F32:
  7780. {
  7781. ggml_compute_forward_hardsigmoid_f32(params, src0, dst);
  7782. } break;
  7783. default:
  7784. {
  7785. GGML_ASSERT(false);
  7786. } break;
  7787. }
  7788. }
  7789. // ggml_compute_forward_norm
  7790. static void ggml_compute_forward_norm_f32(
  7791. const struct ggml_compute_params * params,
  7792. const struct ggml_tensor * src0,
  7793. struct ggml_tensor * dst) {
  7794. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  7795. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  7796. return;
  7797. }
  7798. GGML_ASSERT(src0->nb[0] == sizeof(float));
  7799. const int ith = params->ith;
  7800. const int nth = params->nth;
  7801. GGML_TENSOR_UNARY_OP_LOCALS
  7802. float eps;
  7803. memcpy(&eps, dst->op_params, sizeof(float));
  7804. GGML_ASSERT(eps > 0.0f);
  7805. // TODO: optimize
  7806. for (int64_t i03 = 0; i03 < ne03; i03++) {
  7807. for (int64_t i02 = 0; i02 < ne02; i02++) {
  7808. for (int64_t i01 = ith; i01 < ne01; i01 += nth) {
  7809. const float * x = (float *) ((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03);
  7810. ggml_float sum = 0.0;
  7811. for (int64_t i00 = 0; i00 < ne00; i00++) {
  7812. sum += (ggml_float)x[i00];
  7813. }
  7814. float mean = sum/ne00;
  7815. float * y = (float *) ((char *) dst->data + i01*nb1 + i02*nb2 + i03*nb3);
  7816. ggml_float sum2 = 0.0;
  7817. for (int64_t i00 = 0; i00 < ne00; i00++) {
  7818. float v = x[i00] - mean;
  7819. y[i00] = v;
  7820. sum2 += (ggml_float)(v*v);
  7821. }
  7822. float variance = sum2/ne00;
  7823. const float scale = 1.0f/sqrtf(variance + eps);
  7824. ggml_vec_scale_f32(ne00, y, scale);
  7825. }
  7826. }
  7827. }
  7828. }
  7829. static void ggml_compute_forward_norm(
  7830. const struct ggml_compute_params * params,
  7831. const struct ggml_tensor * src0,
  7832. struct ggml_tensor * dst) {
  7833. switch (src0->type) {
  7834. case GGML_TYPE_F32:
  7835. {
  7836. ggml_compute_forward_norm_f32(params, src0, dst);
  7837. } break;
  7838. default:
  7839. {
  7840. GGML_ASSERT(false);
  7841. } break;
  7842. }
  7843. }
  7844. // ggml_compute_forward_group_rms_norm
  7845. static void ggml_compute_forward_rms_norm_f32(
  7846. const struct ggml_compute_params * params,
  7847. const struct ggml_tensor * src0,
  7848. struct ggml_tensor * dst) {
  7849. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  7850. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  7851. return;
  7852. }
  7853. GGML_ASSERT(src0->nb[0] == sizeof(float));
  7854. const int ith = params->ith;
  7855. const int nth = params->nth;
  7856. GGML_TENSOR_UNARY_OP_LOCALS
  7857. float eps;
  7858. memcpy(&eps, dst->op_params, sizeof(float));
  7859. GGML_ASSERT(eps > 0.0f);
  7860. // TODO: optimize
  7861. for (int64_t i03 = 0; i03 < ne03; i03++) {
  7862. for (int64_t i02 = 0; i02 < ne02; i02++) {
  7863. for (int64_t i01 = ith; i01 < ne01; i01 += nth) {
  7864. const float * x = (float *) ((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03);
  7865. ggml_float sum = 0.0;
  7866. for (int64_t i00 = 0; i00 < ne00; i00++) {
  7867. sum += (ggml_float)(x[i00] * x[i00]);
  7868. }
  7869. const float mean = sum/ne00;
  7870. float * y = (float *) ((char *) dst->data + i01*nb1 + i02*nb2 + i03*nb3);
  7871. memcpy(y, x, ne00 * sizeof(float));
  7872. // for (int i00 = 0; i00 < ne00; i00++) {
  7873. // y[i00] = x[i00];
  7874. // }
  7875. const float scale = 1.0f/sqrtf(mean + eps);
  7876. ggml_vec_scale_f32(ne00, y, scale);
  7877. }
  7878. }
  7879. }
  7880. }
  7881. static void ggml_compute_forward_rms_norm(
  7882. const struct ggml_compute_params * params,
  7883. const struct ggml_tensor * src0,
  7884. struct ggml_tensor * dst) {
  7885. switch (src0->type) {
  7886. case GGML_TYPE_F32:
  7887. {
  7888. ggml_compute_forward_rms_norm_f32(params, src0, dst);
  7889. } break;
  7890. default:
  7891. {
  7892. GGML_ASSERT(false);
  7893. } break;
  7894. }
  7895. }
  7896. static void ggml_compute_forward_rms_norm_back_f32(
  7897. const struct ggml_compute_params * params,
  7898. const struct ggml_tensor * src0,
  7899. const struct ggml_tensor * src1,
  7900. struct ggml_tensor * dst) {
  7901. GGML_ASSERT(ggml_are_same_shape(src0, dst) && ggml_are_same_shape(src0, src1));
  7902. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  7903. return;
  7904. }
  7905. GGML_ASSERT(src0->nb[0] == sizeof(float));
  7906. const int ith = params->ith;
  7907. const int nth = params->nth;
  7908. GGML_TENSOR_BINARY_OP_LOCALS
  7909. float eps;
  7910. memcpy(&eps, dst->op_params, sizeof(float));
  7911. // TODO: optimize
  7912. for (int64_t i03 = 0; i03 < ne03; i03++) {
  7913. for (int64_t i02 = 0; i02 < ne02; i02++) {
  7914. for (int64_t i01 = ith; i01 < ne01; i01 += nth) {
  7915. // src1 is same shape as src0 => same indices
  7916. const int64_t i11 = i01;
  7917. const int64_t i12 = i02;
  7918. const int64_t i13 = i03;
  7919. const float * x = (float *) ((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03);
  7920. const float * dz = (float *) ((char *) src1->data + i11*nb11 + i12*nb12 + i13*nb13);
  7921. ggml_float sum_xx = 0.0;
  7922. ggml_float sum_xdz = 0.0;
  7923. for (int64_t i00 = 0; i00 < ne00; i00++) {
  7924. sum_xx += (ggml_float)(x[i00] * x[i00]);
  7925. sum_xdz += (ggml_float)(x[i00] * dz[i00]);
  7926. }
  7927. //const float mean = (float)(sum_xx)/ne00;
  7928. const float mean_eps = (float)(sum_xx)/ne00 + eps;
  7929. const float sum_eps = (float)(sum_xx) + eps*ne00;
  7930. //const float mean_xdz = (float)(sum_xdz)/ne00;
  7931. // we could cache rms from forward pass to improve performance.
  7932. // to do this implement ggml_rms and compose ggml_rms_norm using ggml_rms.
  7933. //const float rms = sqrtf(mean_eps);
  7934. const float rrms = 1.0f / sqrtf(mean_eps);
  7935. //const float scale = -rrms/(ne00 * mean_eps); // -1/(n*rms**3)
  7936. {
  7937. // z = rms_norm(x)
  7938. //
  7939. // rms_norm(src0) =
  7940. // scale(
  7941. // src0,
  7942. // div(
  7943. // 1,
  7944. // sqrt(
  7945. // add(
  7946. // scale(
  7947. // sum(
  7948. // sqr(
  7949. // src0)),
  7950. // (1.0/N)),
  7951. // eps))));
  7952. // postorder:
  7953. // ## op args grad
  7954. // 00 param src0 grad[#00]
  7955. // 01 const 1
  7956. // 02 sqr (#00) grad[#02]
  7957. // 03 sum (#02) grad[#03]
  7958. // 04 const 1/N
  7959. // 05 scale (#03, #04) grad[#05]
  7960. // 06 const eps
  7961. // 07 add (#05, #06) grad[#07]
  7962. // 08 sqrt (#07) grad[#08]
  7963. // 09 div (#01,#08) grad[#09]
  7964. // 10 scale (#00,#09) grad[#10]
  7965. //
  7966. // backward pass, given grad[#10]
  7967. // #10: scale
  7968. // grad[#00] += scale(grad[#10],#09)
  7969. // grad[#09] += sum(mul(grad[#10],#00))
  7970. // #09: div
  7971. // grad[#08] += neg(mul(grad[#09], div(#09,#08)))
  7972. // #08: sqrt
  7973. // grad[#07] += mul(grad[#08], div(0.5, #08))
  7974. // #07: add
  7975. // grad[#05] += grad[#07]
  7976. // #05: scale
  7977. // grad[#03] += scale(grad[#05],#04)
  7978. // #03: sum
  7979. // grad[#02] += repeat(grad[#03], #02)
  7980. // #02:
  7981. // grad[#00] += scale(mul(#00, grad[#02]), 2.0)
  7982. //
  7983. // substitute and simplify:
  7984. // grad[#00] = scale(grad(#10), #09) + scale(mul(#00, grad[#02]), 2.0)
  7985. // grad[#02] = repeat(grad[#03], #02)
  7986. // grad[#02] = repeat(scale(grad[#05],#04), #02)
  7987. // grad[#02] = repeat(scale(grad[#07],#04), #02)
  7988. // grad[#02] = repeat(scale(mul(grad[#08], div(0.5, #08)),#04), #02)
  7989. // grad[#02] = repeat(scale(mul(neg(mul(grad[#09], div(#09,#08))), div(0.5, #08)),#04), #02)
  7990. // grad[#02] = repeat(scale(mul(neg(mul(sum(mul(grad[#10],#00)), div(#09,#08))), div(0.5, #08)),#04), #02)
  7991. // grad[#02] = repeat(-(sum(mul(grad[#10],#00)) * div(#09,#08) * div(0.5, #08) * (1/N)), #02)
  7992. // grad[#02] = repeat(-(sum(mul(grad[#10],#00)) * div(div(#01,#08),#08) * div(0.5, #08) * (1/N)), #02)
  7993. // grad[#02] = repeat(-(sum(mul(grad[#10],#00)) * div(1,#08*#08) * div(0.5, #08) * (1/N)), #02)
  7994. // grad[#02] = repeat(-(sum(mul(grad[#10],#00)) * div(1,#07) * div(0.5, #08) * (1/N)), #02)
  7995. // grad[#00] = scale(grad(#10), #09) + scale(mul(#00, grad[#02]), 2.0)
  7996. // grad[#00] = scale(grad(#10), #09) + scale(mul(#00, repeat(-(sum(mul(grad[#10],#00)) * div(1,#07) * div(0.5, #08) * (1/N)), #02)), 2.0)
  7997. // grad[#00] = scale(grad(#10), #09) + scale(scale(#00, -(sum(mul(grad[#10],#00)) * div(1,#07) * div(0.5, #08) * (1/N))), 2.0)
  7998. // grad[#00] = scale(grad(#10), #09) + scale(#00, -(sum(mul(grad[#10],#00)) * div(1,#07) * div(1,#08) * (1/N)))
  7999. // grad[#00] = scale(grad(#10), #09) + scale(#00, sum(mul(grad[#10],#00)) * div(1,#07*#08) * (-1/N))
  8000. // grad[#00] = scale(grad(#10), #09) + scale(#00, sum(mul(grad[#10],#00)) * div(1,#07*#08) * (-1/N))
  8001. // grad[#00] = scale(grad(#10), #09) + scale(#00, sum(mul(grad[#10],#00)) * div(1,mean_eps*rms) * (-1/N))
  8002. // grad[#00] = scale(grad(#10), #09) + scale(#00, sum(mul(grad[#10],#00)) * div(-1,rms*N*mean_eps))
  8003. // grad[#00] = scale(grad(#10), #09) + scale(#00, sum(mul(grad[#10],#00)) * div(-1,rms*N*(sum_xx/N+eps)))
  8004. // grad[#00] = scale(grad(#10), #09) + scale(#00, sum(mul(grad[#10],#00)) * div(-1,rms*N*sum_xx+rms*N*eps))
  8005. // grad[#00] = scale(dz, rrms) + scale(x, sum(mul(dz,x)) * div(-1,rms*N*mean_eps))
  8006. // grad[#00] = scale(dz, rrms) + scale(x, sum_xdz * div(-1,rms*N*mean_eps))
  8007. // a = b*c + d*e
  8008. // a = b*c*f/f + d*e*f/f
  8009. // a = (b*c*f + d*e*f)*(1/f)
  8010. // a = (b*c*(1/c) + d*e*(1/c))*(1/(1/c))
  8011. // a = (b + d*e/c)*c
  8012. // b = dz, c = rrms, d = x, e = sum_xdz * div(-1,rms*N*mean_eps)
  8013. // a = (dz + x*sum_xdz * div(-1,rms*N*mean_eps)/rrms)*rrms
  8014. // a = (dz + x*sum_xdz * div(-1,rms*N*mean_eps)*rms)*rrms
  8015. // a = (dz + x*sum_xdz * div(-rms,rms*N*mean_eps))*rrms
  8016. // a = (dz + x*sum_xdz * div(-1,N*mean_eps))*rrms
  8017. // a = (dz + x*div(-sum_xdz,N*mean_eps))*rrms
  8018. // a = (dz + x*div(-mean_xdz,mean_eps))*rrms
  8019. // grad[#00] = scale(dz + scale(x, div(-mean_xdz,mean_eps)),rrms)
  8020. // grad[#00] = scale(dz + scale(x, -mean_xdz/mean_eps),rrms)
  8021. // dx = scale(dz + scale(x, -mean_xdz/mean_eps),rrms)
  8022. }
  8023. // dx = scale(dz + scale(x, -mean_xdz/mean_eps),rrms)
  8024. // post-order:
  8025. // dx := x
  8026. // dx := scale(dx,-mean_xdz/mean_eps)
  8027. // dx := add(dx, dz)
  8028. // dx := scale(dx, rrms)
  8029. float * dx = (float *) ((char *) dst->data + i01*nb1 + i02*nb2 + i03*nb3);
  8030. ggml_vec_cpy_f32 (ne00, dx, x);
  8031. // ggml_vec_scale_f32(ne00, dx, -mean_xdz/mean_eps);
  8032. ggml_vec_scale_f32(ne00, dx, (float)(-sum_xdz)/sum_eps);
  8033. ggml_vec_acc_f32 (ne00, dx, dz);
  8034. ggml_vec_scale_f32(ne00, dx, rrms);
  8035. }
  8036. }
  8037. }
  8038. }
  8039. static void ggml_compute_forward_rms_norm_back(
  8040. const struct ggml_compute_params * params,
  8041. const struct ggml_tensor * src0,
  8042. const struct ggml_tensor * src1,
  8043. struct ggml_tensor * dst) {
  8044. switch (src0->type) {
  8045. case GGML_TYPE_F32:
  8046. {
  8047. ggml_compute_forward_rms_norm_back_f32(params, src0, src1, dst);
  8048. } break;
  8049. default:
  8050. {
  8051. GGML_ASSERT(false);
  8052. } break;
  8053. }
  8054. }
  8055. // ggml_compute_forward_group_norm
  8056. static void ggml_compute_forward_group_norm_f32(
  8057. const struct ggml_compute_params * params,
  8058. const struct ggml_tensor * src0,
  8059. struct ggml_tensor * dst) {
  8060. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  8061. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  8062. return;
  8063. }
  8064. GGML_ASSERT(src0->nb[0] == sizeof(float));
  8065. const int ith = params->ith;
  8066. const int nth = params->nth;
  8067. GGML_TENSOR_UNARY_OP_LOCALS
  8068. const float eps = 1e-6f; // TODO: make this a parameter
  8069. // TODO: optimize
  8070. int n_channels = src0->ne[2];
  8071. int n_groups = dst->op_params[0];
  8072. int n_channels_per_group = (n_channels + n_groups - 1) / n_groups;
  8073. for (int i = ith; i < n_groups; i+=nth) {
  8074. int start = i * n_channels_per_group;
  8075. int end = start + n_channels_per_group;
  8076. if (end > n_channels) {
  8077. end = n_channels;
  8078. }
  8079. int step = end - start;
  8080. for (int64_t i03 = 0; i03 < ne03; i03++) {
  8081. ggml_float sum = 0.0;
  8082. for (int64_t i02 = start; i02 < end; i02++) {
  8083. for (int64_t i01 = 0; i01 < ne01; i01++) {
  8084. const float * x = (float *)((char *) src0->data + i01 * nb01 + i02 * nb02 + i03 * nb03);
  8085. for (int64_t i00 = 0; i00 < ne00; i00++) {
  8086. sum += (ggml_float)x[i00];
  8087. }
  8088. }
  8089. }
  8090. float mean = sum / (ne00 * ne01 * step);
  8091. ggml_float sum2 = 0.0;
  8092. for (int64_t i02 = start; i02 < end; i02++) {
  8093. for (int64_t i01 = 0; i01 < ne01; i01++) {
  8094. const float * x = (float *)((char *) src0->data + i01 * nb01 + i02 * nb02 + i03 * nb03);
  8095. float * y = (float *)((char *) dst->data + i01 * nb1 + i02 * nb2 + i03 * nb3);
  8096. for (int64_t i00 = 0; i00 < ne00; i00++) {
  8097. float v = x[i00] - mean;
  8098. y[i00] = v;
  8099. sum2 += (ggml_float)(v * v);
  8100. }
  8101. }
  8102. }
  8103. float variance = sum2 / (ne00 * ne01 * step);
  8104. const float scale = 1.0f / sqrtf(variance + eps);
  8105. for (int64_t i02 = start; i02 < end; i02++) {
  8106. for (int64_t i01 = 0; i01 < ne01; i01++) {
  8107. float * y = (float *)((char *) dst->data + i01 * nb1 + i02 * nb2 + i03 * nb3);
  8108. ggml_vec_scale_f32(ne00, y, scale);
  8109. }
  8110. }
  8111. }
  8112. }
  8113. }
  8114. static void ggml_compute_forward_group_norm(
  8115. const struct ggml_compute_params * params,
  8116. const struct ggml_tensor * src0,
  8117. struct ggml_tensor * dst) {
  8118. switch (src0->type) {
  8119. case GGML_TYPE_F32:
  8120. {
  8121. ggml_compute_forward_group_norm_f32(params, src0, dst);
  8122. } break;
  8123. default:
  8124. {
  8125. GGML_ASSERT(false);
  8126. } break;
  8127. }
  8128. }
  8129. // ggml_compute_forward_mul_mat
  8130. #if defined(GGML_USE_ACCELERATE) || defined(GGML_USE_OPENBLAS)
  8131. // helper function to determine if it is better to use BLAS or not
  8132. // for large matrices, BLAS is faster
  8133. static bool ggml_compute_forward_mul_mat_use_blas(struct ggml_tensor * dst) {
  8134. const struct ggml_tensor * src0 = dst->src[0];
  8135. const struct ggml_tensor * src1 = dst->src[1];
  8136. //const int64_t ne00 = src0->ne[0];
  8137. //const int64_t ne01 = src0->ne[1];
  8138. const int64_t ne10 = src1->ne[0];
  8139. const int64_t ne0 = dst->ne[0];
  8140. const int64_t ne1 = dst->ne[1];
  8141. // NOTE: with GGML_OP_MUL_MAT_ID we don't want to go through the BLAS branch because it will dequantize (to_float)
  8142. // all the experts for each batch element and the processing would become incredibly slow
  8143. // TODO: find the optimal values for these
  8144. if (dst->op != GGML_OP_MUL_MAT_ID &&
  8145. ggml_is_contiguous(src0) &&
  8146. ggml_is_contiguous(src1) &&
  8147. //src0->type == GGML_TYPE_F32 &&
  8148. src1->type == GGML_TYPE_F32 &&
  8149. (ne0 >= 32 && ne1 >= 32 && ne10 >= 32)) {
  8150. /*printf("BLAS: %d %d %d %d %d\n", ne0, ne1, ne10, ne00, ne01);*/
  8151. return true;
  8152. }
  8153. return false;
  8154. }
  8155. #endif
  8156. static void ggml_compute_forward_mul_mat(
  8157. const struct ggml_compute_params * params,
  8158. const struct ggml_tensor * src0,
  8159. const struct ggml_tensor * src1,
  8160. struct ggml_tensor * dst) {
  8161. int64_t t0 = ggml_perf_time_us();
  8162. UNUSED(t0);
  8163. GGML_TENSOR_BINARY_OP_LOCALS
  8164. const int ith = params->ith;
  8165. const int nth = params->nth;
  8166. const enum ggml_type type = src0->type;
  8167. const bool src1_cont = ggml_is_contiguous(src1);
  8168. ggml_vec_dot_t const vec_dot = type_traits[type].vec_dot;
  8169. enum ggml_type const vec_dot_type = type_traits[type].vec_dot_type;
  8170. ggml_from_float_t const from_float_to_vec_dot = type_traits[vec_dot_type].from_float;
  8171. GGML_ASSERT(ne0 == ne01);
  8172. GGML_ASSERT(ne1 == ne11);
  8173. GGML_ASSERT(ne2 == ne12);
  8174. GGML_ASSERT(ne3 == ne13);
  8175. // we don't support permuted src0 or src1
  8176. GGML_ASSERT(nb00 == ggml_type_size(type));
  8177. GGML_ASSERT(nb10 == ggml_type_size(src1->type));
  8178. // dst cannot be transposed or permuted
  8179. GGML_ASSERT(nb0 == sizeof(float));
  8180. GGML_ASSERT(nb0 <= nb1);
  8181. GGML_ASSERT(nb1 <= nb2);
  8182. GGML_ASSERT(nb2 <= nb3);
  8183. // broadcast factors
  8184. const int64_t r2 = ne12/ne02;
  8185. const int64_t r3 = ne13/ne03;
  8186. // nb01 >= nb00 - src0 is not transposed
  8187. // compute by src0 rows
  8188. #if defined(GGML_USE_CLBLAST)
  8189. if (ggml_cl_can_mul_mat(src0, src1, dst)) {
  8190. if (params->ith == 0 && params->type == GGML_TASK_COMPUTE) {
  8191. ggml_cl_mul_mat(src0, src1, dst, params->wdata, params->wsize);
  8192. }
  8193. return;
  8194. }
  8195. #endif
  8196. #if defined(GGML_USE_ACCELERATE) || defined(GGML_USE_OPENBLAS)
  8197. if (ggml_compute_forward_mul_mat_use_blas(dst)) {
  8198. const int64_t ne_plane = ne01*ne00;
  8199. const int64_t desired_wsize = ne13*ne12*ne_plane*sizeof(float);
  8200. UNUSED(desired_wsize);
  8201. if (params->type == GGML_TASK_INIT) {
  8202. if (type != GGML_TYPE_F32) {
  8203. assert(params->wsize >= desired_wsize);
  8204. // parallelize by src0 rows
  8205. for (int64_t i13 = 0; i13 < ne13; i13++) {
  8206. for (int64_t i12 = 0; i12 < ne12; i12++) {
  8207. // broadcast src0 into src1 across 2nd,3rd dimension
  8208. const int64_t i03 = i13/r3;
  8209. const int64_t i02 = i12/r2;
  8210. const void * x = (char *) src0->data + i02*nb02 + i03*nb03;
  8211. float * const wdata = (float *) params->wdata + i13*ne12*ne_plane + i12*ne_plane;
  8212. ggml_to_float_t const to_float = type_traits[type].to_float;
  8213. for (int64_t i01 = ith; i01 < ne01; i01 += nth) {
  8214. to_float((const char *) x + i01*nb01, wdata + i01*ne00, ne00);
  8215. }
  8216. }
  8217. }
  8218. }
  8219. return;
  8220. }
  8221. if (params->type == GGML_TASK_FINALIZE) {
  8222. return;
  8223. }
  8224. // perform sgemm, parallelization controlled by blas lib
  8225. if (ith != 0) {
  8226. return;
  8227. }
  8228. //const int64_t tgemm0 = ggml_perf_time_us();
  8229. for (int64_t i13 = 0; i13 < ne13; i13++) {
  8230. for (int64_t i12 = 0; i12 < ne12; i12++) {
  8231. const int64_t i03 = i13/r3;
  8232. const int64_t i02 = i12/r2;
  8233. const void * x = (char *) src0->data + i02*nb02 + i03*nb03;
  8234. const float * y = (float *) ((char *) src1->data + i12*nb12 + i13*nb13);
  8235. float * d = (float *) ((char *) dst->data + i12*nb2 + i13*nb3);
  8236. if (type != GGML_TYPE_F32) {
  8237. x = (float *) params->wdata + i13*ne12*ne_plane + i12*ne_plane;
  8238. }
  8239. cblas_sgemm(CblasRowMajor, CblasNoTrans, CblasTrans,
  8240. ne1, ne01, ne10,
  8241. 1.0f, y, ne10,
  8242. x, ne00,
  8243. 0.0f, d, ne01);
  8244. }
  8245. }
  8246. //printf("cblas_sgemm = %.3f ms, %lld flops\n", (ggml_perf_time_us() - tgemm0)/1000.0, ne13*ne12*ne1*ne01*ne10*2);
  8247. //printf("CBLAS = %f ms, %d x %d x %d x %d\n", (ggml_perf_time_us() - t0)/1000.0, ne0, ne1, ne2, ne3);
  8248. return;
  8249. }
  8250. #endif
  8251. if (params->type == GGML_TASK_INIT) {
  8252. if (ith != 0) {
  8253. return;
  8254. }
  8255. if (src1->type != vec_dot_type) {
  8256. char * wdata = params->wdata;
  8257. const size_t row_size = ggml_row_size(vec_dot_type, ne10);
  8258. assert(params->wsize >= ne11*ne12*ne13*row_size);
  8259. GGML_ASSERT(src1->type == GGML_TYPE_F32);
  8260. for (int64_t i13 = 0; i13 < ne13; ++i13) {
  8261. for (int64_t i12 = 0; i12 < ne12; ++i12) {
  8262. for (int64_t i11 = 0; i11 < ne11; ++i11) {
  8263. from_float_to_vec_dot((float *)((char *) src1->data + i13*nb13 + i12*nb12 + i11*nb11), (void *) wdata, ne10);
  8264. wdata += row_size;
  8265. }
  8266. }
  8267. }
  8268. }
  8269. return;
  8270. }
  8271. if (params->type == GGML_TASK_FINALIZE) {
  8272. return;
  8273. }
  8274. const void * wdata = (src1->type == vec_dot_type) ? src1->data : params->wdata;
  8275. const size_t row_size = ggml_row_size(vec_dot_type, ne10);
  8276. const int64_t nr0 = ne01; // src0 rows
  8277. const int64_t nr1 = ne1*ne12*ne13; // src1 rows
  8278. //printf("nr0 = %lld, nr1 = %lld\n", nr0, nr1);
  8279. // distribute the thread work across the inner or outer loop based on which one is larger
  8280. const int64_t nth0 = nr0 > nr1 ? nth : 1; // parallelize by src0 rows
  8281. const int64_t nth1 = nr0 > nr1 ? 1 : nth; // parallelize by src1 rows
  8282. const int64_t ith0 = ith % nth0;
  8283. const int64_t ith1 = ith / nth0;
  8284. const int64_t dr0 = (nr0 + nth0 - 1)/nth0;
  8285. const int64_t dr1 = (nr1 + nth1 - 1)/nth1;
  8286. const int64_t ir010 = dr0*ith0;
  8287. const int64_t ir011 = MIN(ir010 + dr0, nr0);
  8288. const int64_t ir110 = dr1*ith1;
  8289. const int64_t ir111 = MIN(ir110 + dr1, nr1);
  8290. //printf("ir010 = %6lld, ir011 = %6lld, ir110 = %6lld, ir111 = %6lld\n", ir010, ir011, ir110, ir111);
  8291. // threads with no work simply yield (not sure if it helps)
  8292. if (ir010 >= ir011 || ir110 >= ir111) {
  8293. sched_yield();
  8294. return;
  8295. }
  8296. assert(ne12 % ne02 == 0);
  8297. assert(ne13 % ne03 == 0);
  8298. // block-tiling attempt
  8299. const int64_t blck_0 = 16;
  8300. const int64_t blck_1 = 16;
  8301. // attempt to reduce false-sharing (does not seem to make a difference)
  8302. float tmp[16];
  8303. for (int64_t iir1 = ir110; iir1 < ir111; iir1 += blck_1) {
  8304. for (int64_t iir0 = ir010; iir0 < ir011; iir0 += blck_0) {
  8305. for (int64_t ir1 = iir1; ir1 < iir1 + blck_1 && ir1 < ir111; ++ir1) {
  8306. const int64_t i13 = (ir1/(ne12*ne1));
  8307. const int64_t i12 = (ir1 - i13*ne12*ne1)/ne1;
  8308. const int64_t i11 = (ir1 - i13*ne12*ne1 - i12*ne1);
  8309. // broadcast src0 into src1
  8310. const int64_t i03 = i13/r3;
  8311. const int64_t i02 = i12/r2;
  8312. const int64_t i1 = i11;
  8313. const int64_t i2 = i12;
  8314. const int64_t i3 = i13;
  8315. const char * src0_row = (const char *) src0->data + (0 + i02*nb02 + i03*nb03);
  8316. // desc: when src1 is not a contiguous memory block we have to calculate the offset using the strides
  8317. // if it is, then we have either copied the data to params->wdata and made it contiguous or we are using
  8318. // the original src1 data pointer, so we should index using the indices directly
  8319. // TODO: this is a bit of a hack, we should probably have a better way to handle this
  8320. const char * src1_col = (const char *) wdata +
  8321. (src1_cont || src1->type != vec_dot_type
  8322. ? (i11 + i12*ne11 + i13*ne12*ne11)*row_size
  8323. : (i11*nb11 + i12*nb12 + i13*nb13));
  8324. float * dst_col = (float *) ((char *) dst->data + (i1*nb1 + i2*nb2 + i3*nb3));
  8325. //for (int64_t ir0 = iir0; ir0 < iir0 + blck_0 && ir0 < ir011; ++ir0) {
  8326. // vec_dot(ne00, &dst_col[ir0], src0_row + ir0*nb01, src1_col);
  8327. //}
  8328. for (int64_t ir0 = iir0; ir0 < iir0 + blck_0 && ir0 < ir011; ++ir0) {
  8329. vec_dot(ne00, &tmp[ir0 - iir0], src0_row + ir0*nb01, src1_col);
  8330. }
  8331. memcpy(&dst_col[iir0], tmp, (MIN(iir0 + blck_0, ir011) - iir0)*sizeof(float));
  8332. }
  8333. }
  8334. }
  8335. }
  8336. // ggml_compute_forward_mul_mat_id
  8337. static void ggml_compute_forward_mul_mat_id(
  8338. const struct ggml_compute_params * params,
  8339. const struct ggml_tensor * ids,
  8340. const struct ggml_tensor * src1,
  8341. struct ggml_tensor * dst) {
  8342. const struct ggml_tensor * src0 = dst->src[2]; // only for GGML_TENSOR_BINARY_OP_LOCALS
  8343. GGML_TENSOR_BINARY_OP_LOCALS
  8344. const int ith = params->ith;
  8345. const int nth = params->nth;
  8346. const enum ggml_type type = src0->type;
  8347. const bool src1_cont = ggml_is_contiguous(src1);
  8348. ggml_vec_dot_t const vec_dot = type_traits[type].vec_dot;
  8349. enum ggml_type const vec_dot_type = type_traits[type].vec_dot_type;
  8350. ggml_from_float_t const from_float_to_vec_dot = type_traits[vec_dot_type].from_float;
  8351. GGML_ASSERT(ne0 == ne01);
  8352. GGML_ASSERT(ne1 == ne11);
  8353. GGML_ASSERT(ne2 == ne12);
  8354. GGML_ASSERT(ne3 == ne13);
  8355. // we don't support permuted src0 or src1
  8356. GGML_ASSERT(nb00 == ggml_type_size(type));
  8357. GGML_ASSERT(nb10 == ggml_type_size(src1->type));
  8358. // dst cannot be transposed or permuted
  8359. GGML_ASSERT(nb0 == sizeof(float));
  8360. GGML_ASSERT(nb0 <= nb1);
  8361. GGML_ASSERT(nb1 <= nb2);
  8362. GGML_ASSERT(nb2 <= nb3);
  8363. // broadcast factors
  8364. const int64_t r2 = ne12/ne02;
  8365. const int64_t r3 = ne13/ne03;
  8366. // row groups
  8367. const int id = ggml_get_op_params_i32(dst, 0);
  8368. const int n_as = ggml_get_op_params_i32(dst, 1);
  8369. char * wdata_src1_end = (src1->type == vec_dot_type) ?
  8370. (char *) params->wdata :
  8371. (char *) params->wdata + GGML_PAD(ggml_row_size(vec_dot_type, ggml_nelements(src1)), sizeof(int64_t));
  8372. int64_t * matrix_row_counts = (int64_t *) (wdata_src1_end); // [n_as]
  8373. int64_t * matrix_rows = matrix_row_counts + n_as; // [n_as][ne11]
  8374. #define MMID_MATRIX_ROW(row_id, i1) matrix_rows[(row_id)*ne11 + (i1)]
  8375. if (params->type == GGML_TASK_INIT) {
  8376. if (ith != 0) {
  8377. return;
  8378. }
  8379. char * wdata = params->wdata;
  8380. if (src1->type != vec_dot_type) {
  8381. const size_t row_size = ggml_row_size(vec_dot_type, ne10);
  8382. assert(params->wsize >= ne11*ne12*ne13*row_size);
  8383. assert(src1->type == GGML_TYPE_F32);
  8384. for (int64_t i13 = 0; i13 < ne13; ++i13) {
  8385. for (int64_t i12 = 0; i12 < ne12; ++i12) {
  8386. for (int64_t i11 = 0; i11 < ne11; ++i11) {
  8387. from_float_to_vec_dot((float *)((char *) src1->data + i13*nb13 + i12*nb12 + i11*nb11), (void *) wdata, ne10);
  8388. wdata += row_size;
  8389. }
  8390. }
  8391. }
  8392. }
  8393. // initialize matrix_row_counts
  8394. GGML_ASSERT(wdata == wdata_src1_end);
  8395. memset(matrix_row_counts, 0, n_as*sizeof(int64_t));
  8396. // group rows by src0 matrix
  8397. for (int64_t i01 = 0; i01 < ids->ne[1]; i01++) {
  8398. const int32_t row_id = *(const int32_t *) ((const char *) ids->data + i01*ids->nb[1] + id*ids->nb[0]);
  8399. GGML_ASSERT(row_id >= 0 && row_id < n_as);
  8400. MMID_MATRIX_ROW(row_id, matrix_row_counts[row_id]) = i01;
  8401. matrix_row_counts[row_id] += 1;
  8402. }
  8403. return;
  8404. }
  8405. if (params->type == GGML_TASK_FINALIZE) {
  8406. return;
  8407. }
  8408. // compute each matrix multiplication in sequence
  8409. for (int cur_a = 0; cur_a < n_as; ++cur_a) {
  8410. const int64_t cne1 = matrix_row_counts[cur_a];
  8411. if (cne1 == 0) {
  8412. continue;
  8413. }
  8414. const struct ggml_tensor * src0_cur = dst->src[cur_a + 2];
  8415. const void * wdata = (src1->type == vec_dot_type) ? src1->data : params->wdata;
  8416. const size_t row_size = ggml_row_size(vec_dot_type, ne10);
  8417. const int64_t nr0 = ne01; // src0 rows
  8418. const int64_t nr1 = cne1*ne12*ne13; // src1 rows
  8419. //printf("nr0 = %lld, nr1 = %lld\n", nr0, nr1);
  8420. // distribute the thread work across the inner or outer loop based on which one is larger
  8421. const int64_t nth0 = nr0 > nr1 ? nth : 1; // parallelize by src0 rows
  8422. const int64_t nth1 = nr0 > nr1 ? 1 : nth; // parallelize by src1 rows
  8423. const int64_t ith0 = ith % nth0;
  8424. const int64_t ith1 = ith / nth0;
  8425. const int64_t dr0 = (nr0 + nth0 - 1)/nth0;
  8426. const int64_t dr1 = (nr1 + nth1 - 1)/nth1;
  8427. const int64_t ir010 = dr0*ith0;
  8428. const int64_t ir011 = MIN(ir010 + dr0, nr0);
  8429. const int64_t ir110 = dr1*ith1;
  8430. const int64_t ir111 = MIN(ir110 + dr1, nr1);
  8431. //printf("ir010 = %6lld, ir011 = %6lld, ir110 = %6lld, ir111 = %6lld\n", ir010, ir011, ir110, ir111);
  8432. // threads with no work simply yield (not sure if it helps)
  8433. if (ir010 >= ir011 || ir110 >= ir111) {
  8434. sched_yield();
  8435. continue;
  8436. }
  8437. assert(ne12 % ne02 == 0);
  8438. assert(ne13 % ne03 == 0);
  8439. // block-tiling attempt
  8440. const int64_t blck_0 = 16;
  8441. const int64_t blck_1 = 16;
  8442. // attempt to reduce false-sharing (does not seem to make a difference)
  8443. float tmp[16];
  8444. for (int64_t iir1 = ir110; iir1 < ir111; iir1 += blck_1) {
  8445. for (int64_t iir0 = ir010; iir0 < ir011; iir0 += blck_0) {
  8446. for (int64_t ir1 = iir1; ir1 < iir1 + blck_1 && ir1 < ir111; ++ir1) {
  8447. const int64_t i13 = (ir1/(ne12*cne1)); // Note: currently, src1 is always a matrix
  8448. const int64_t i12 = (ir1 - i13*ne12*cne1)/cne1;
  8449. const int64_t _i11 = (ir1 - i13*ne12*cne1 - i12*cne1);
  8450. const int64_t i11 = MMID_MATRIX_ROW(cur_a, _i11);
  8451. // broadcast src0 into src1
  8452. const int64_t i03 = i13/r3;
  8453. const int64_t i02 = i12/r2;
  8454. const int64_t i1 = i11;
  8455. const int64_t i2 = i12;
  8456. const int64_t i3 = i13;
  8457. const char * src0_row = (const char *) src0_cur->data + (0 + i02*nb02 + i03*nb03);
  8458. // desc: when src1 is not a contiguous memory block we have to calculate the offset using the strides
  8459. // if it is, then we have either copied the data to params->wdata and made it contiguous or we are using
  8460. // the original src1 data pointer, so we should index using the indices directly
  8461. // TODO: this is a bit of a hack, we should probably have a better way to handle this
  8462. const char * src1_col = (const char *) wdata +
  8463. (src1_cont || src1->type != vec_dot_type
  8464. ? (i11 + i12*ne11 + i13*ne12*ne11)*row_size
  8465. : (i11*nb11 + i12*nb12 + i13*nb13));
  8466. float * dst_col = (float *) ((char *) dst->data + (i1*nb1 + i2*nb2 + i3*nb3));
  8467. //for (int64_t ir0 = iir0; ir0 < iir0 + blck_0 && ir0 < ir011; ++ir0) {
  8468. // vec_dot(ne00, &dst_col[ir0], src0_row + ir0*nb01, src1_col);
  8469. //}
  8470. for (int64_t ir0 = iir0; ir0 < iir0 + blck_0 && ir0 < ir011; ++ir0) {
  8471. vec_dot(ne00, &tmp[ir0 - iir0], src0_row + ir0*nb01, src1_col);
  8472. }
  8473. memcpy(&dst_col[iir0], tmp, (MIN(iir0 + blck_0, ir011) - iir0)*sizeof(float));
  8474. }
  8475. }
  8476. }
  8477. }
  8478. #undef MMID_MATRIX_ROW
  8479. }
  8480. // ggml_compute_forward_out_prod
  8481. static void ggml_compute_forward_out_prod_f32(
  8482. const struct ggml_compute_params * params,
  8483. const struct ggml_tensor * src0,
  8484. const struct ggml_tensor * src1,
  8485. struct ggml_tensor * dst) {
  8486. // int64_t t0 = ggml_perf_time_us();
  8487. // UNUSED(t0);
  8488. GGML_TENSOR_BINARY_OP_LOCALS
  8489. const int ith = params->ith;
  8490. const int nth = params->nth;
  8491. GGML_ASSERT(ne0 == ne00);
  8492. GGML_ASSERT(ne1 == ne10);
  8493. GGML_ASSERT(ne2 == ne02);
  8494. GGML_ASSERT(ne02 == ne12);
  8495. GGML_ASSERT(ne3 == ne13);
  8496. GGML_ASSERT(ne03 == ne13);
  8497. // we don't support permuted src0 or src1
  8498. GGML_ASSERT(nb00 == sizeof(float));
  8499. // dst cannot be transposed or permuted
  8500. GGML_ASSERT(nb0 == sizeof(float));
  8501. // GGML_ASSERT(nb0 <= nb1);
  8502. // GGML_ASSERT(nb1 <= nb2);
  8503. // GGML_ASSERT(nb2 <= nb3);
  8504. // nb01 >= nb00 - src0 is not transposed
  8505. // compute by src0 rows
  8506. // TODO: #if defined(GGML_USE_CUBLAS) ggml_cuda_out_prod
  8507. // TODO: #if defined(GGML_USE_CLBLAST)
  8508. #if defined(GGML_USE_ACCELERATE) || defined(GGML_USE_OPENBLAS)
  8509. bool use_blas = ggml_is_matrix(src0) &&
  8510. ggml_is_matrix(src1) &&
  8511. ggml_is_contiguous(src0) &&
  8512. (ggml_is_contiguous(src1) || ggml_is_transposed(src1));
  8513. #endif
  8514. if (params->type == GGML_TASK_INIT) {
  8515. #if defined(GGML_USE_ACCELERATE) || defined(GGML_USE_OPENBLAS) // gemm beta will zero dst
  8516. if (use_blas) {
  8517. return;
  8518. }
  8519. #endif
  8520. if (ith != 0) {
  8521. return;
  8522. }
  8523. ggml_vec_set_f32(ne0*ne1*ne2*ne3, dst->data, 0);
  8524. return;
  8525. }
  8526. if (params->type == GGML_TASK_FINALIZE) {
  8527. return;
  8528. }
  8529. #if defined(GGML_USE_ACCELERATE) || defined(GGML_USE_OPENBLAS)
  8530. if (use_blas) {
  8531. if (params->ith != 0) { // All threads other than the first do no work.
  8532. return;
  8533. }
  8534. // Arguments to ggml_compute_forward_out_prod (expressed as major,minor)
  8535. // src0: (k,n)
  8536. // src1: (k,m)
  8537. // dst: (m,n)
  8538. //
  8539. // Arguments to sgemm (see https://github.com/Reference-LAPACK/lapack/blob/master/BLAS/SRC/sgemm.f)
  8540. // Also expressed as (major,minor)
  8541. // a: (m,k): so src1 transposed
  8542. // b: (k,n): so src0
  8543. // c: (m,n)
  8544. //
  8545. // However, if ggml_is_transposed(src1) is true, then
  8546. // src1->data already contains a transposed version, so sgemm mustn't
  8547. // transpose it further.
  8548. int n = src0->ne[0];
  8549. int k = src0->ne[1];
  8550. int m = src1->ne[0];
  8551. int transposeA, lda;
  8552. if (!ggml_is_transposed(src1)) {
  8553. transposeA = CblasTrans;
  8554. lda = m;
  8555. } else {
  8556. transposeA = CblasNoTrans;
  8557. lda = k;
  8558. }
  8559. float * a = (float *) ((char *) src1->data);
  8560. float * b = (float *) ((char *) src0->data);
  8561. float * c = (float *) ((char *) dst->data);
  8562. cblas_sgemm(CblasRowMajor, transposeA, CblasNoTrans, m, n, k, 1.0, a, lda, b, n, 0.0, c, n);
  8563. return;
  8564. }
  8565. #endif
  8566. // dst[:,:,:,:] = 0
  8567. // for i2,i3:
  8568. // for i1:
  8569. // for i01:
  8570. // for i0:
  8571. // dst[i0,i1,i2,i3] += src0[i0,i01,i2,i3] * src1[i1,i01,i2,i3]
  8572. // parallelize by last three dimensions
  8573. // total rows in dst
  8574. const int64_t nr = ne1*ne2*ne3;
  8575. // rows per thread
  8576. const int64_t dr = (nr + nth - 1)/nth;
  8577. // row range for this thread
  8578. const int64_t ir0 = dr*ith;
  8579. const int64_t ir1 = MIN(ir0 + dr, nr);
  8580. // block-tiling attempt
  8581. const int64_t blck_0 = MAX(GGML_VEC_MAD_UNROLL, 32);
  8582. const int64_t blck_1 = 16;
  8583. for (int64_t bir = ir0; bir < ir1; bir += blck_1) {
  8584. const int64_t bir1 = MIN(bir + blck_1, ir1);
  8585. for (int64_t bi01 = 0; bi01 < ne01; bi01 += blck_0) {
  8586. const int64_t bne01 = MIN(bi01 + blck_0, ne01);
  8587. for (int64_t ir = bir; ir < bir1; ++ir) {
  8588. // dst indices
  8589. const int64_t i3 = ir/(ne2*ne1);
  8590. const int64_t i2 = (ir - i3*ne2*ne1)/ne1;
  8591. const int64_t i1 = (ir - i3*ne2*ne1 - i2*ne1);
  8592. const int64_t i02 = i2;
  8593. const int64_t i03 = i3;
  8594. //const int64_t i10 = i1;
  8595. const int64_t i12 = i2;
  8596. const int64_t i13 = i3;
  8597. #if GGML_VEC_MAD_UNROLL > 2
  8598. const int64_t bne01_unroll = bne01 - (bne01 % GGML_VEC_MAD_UNROLL);
  8599. for (int64_t i01 = bi01; i01 < bne01_unroll; i01 += GGML_VEC_MAD_UNROLL) {
  8600. const int64_t i11 = i01;
  8601. float * s0 = (float *) ((char *) src0->data + ( i01*nb01 + i02*nb02 + i03*nb03));
  8602. float * s1 = (float *) ((char *) src1->data + (i1*nb10 + i11*nb11 + i12*nb12 + i13*nb13));
  8603. float * d = (float *) ((char *) dst->data + ( i1*nb1 + i2*nb2 + i3*nb3));
  8604. ggml_vec_mad_f32_unroll(ne0, nb01, nb11, d, s0, s1);
  8605. }
  8606. for (int64_t i01 = bne01_unroll; i01 < bne01; ++i01) {
  8607. const int64_t i11 = i01;
  8608. float * s0 = (float *) ((char *) src0->data + ( i01*nb01 + i02*nb02 + i03*nb03));
  8609. float * s1 = (float *) ((char *) src1->data + (i1*nb10 + i11*nb11 + i12*nb12 + i13*nb13));
  8610. float * d = (float *) ((char *) dst->data + ( i1*nb1 + i2*nb2 + i3*nb3));
  8611. ggml_vec_mad_f32(ne0, d, s0, *s1);
  8612. }
  8613. #else
  8614. for (int64_t i01 = bi01; i01 < bne01; ++i01) {
  8615. const int64_t i11 = i01;
  8616. float * s0 = (float *) ((char *) src0->data + ( i01*nb01 + i02*nb02 + i03*nb03));
  8617. float * s1 = (float *) ((char *) src1->data + (i1*nb10 + i11*nb11 + i12*nb12 + i13*nb13));
  8618. float * d = (float *) ((char *) dst->data + ( i1*nb1 + i2*nb2 + i3*nb3));
  8619. ggml_vec_mad_f32(ne0, d, s0, *s1);
  8620. }
  8621. #endif
  8622. }
  8623. }
  8624. }
  8625. //int64_t t1 = ggml_perf_time_us();
  8626. //static int64_t acc = 0;
  8627. //acc += t1 - t0;
  8628. //if (t1 - t0 > 10) {
  8629. // printf("\n");
  8630. // printf("ne00 = %5d, ne01 = %5d, ne02 = %5d, ne03 = %5d\n", ne00, ne01, ne02, ne03);
  8631. // printf("nb00 = %5d, nb01 = %5d, nb02 = %5d, nb03 = %5d\n", nb00, nb01, nb02, nb03);
  8632. // printf("ne10 = %5d, ne11 = %5d, ne12 = %5d, ne13 = %5d\n", ne10, ne11, ne12, ne13);
  8633. // printf("nb10 = %5d, nb11 = %5d, nb12 = %5d, nb13 = %5d\n", nb10, nb11, nb12, nb13);
  8634. // printf("XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX task %d/%d: %d us, acc = %d\n", ith, nth, (int) (t1 - t0), (int) acc);
  8635. //}
  8636. }
  8637. static void ggml_compute_forward_out_prod_q_f32(
  8638. const struct ggml_compute_params * params,
  8639. const struct ggml_tensor * src0,
  8640. const struct ggml_tensor * src1,
  8641. struct ggml_tensor * dst) {
  8642. // int64_t t0 = ggml_perf_time_us();
  8643. // UNUSED(t0);
  8644. GGML_TENSOR_BINARY_OP_LOCALS;
  8645. const int ith = params->ith;
  8646. const int nth = params->nth;
  8647. const enum ggml_type type = src0->type;
  8648. ggml_to_float_t const dequantize_row_q = type_traits[type].to_float;
  8649. GGML_ASSERT(ne02 == ne12);
  8650. GGML_ASSERT(ne03 == ne13);
  8651. GGML_ASSERT(ne2 == ne12);
  8652. GGML_ASSERT(ne3 == ne13);
  8653. // we don't support permuted src0 dim0
  8654. GGML_ASSERT(nb00 == ggml_type_size(type));
  8655. // dst dim0 cannot be transposed or permuted
  8656. GGML_ASSERT(nb0 == sizeof(float));
  8657. // GGML_ASSERT(nb0 <= nb1);
  8658. // GGML_ASSERT(nb1 <= nb2);
  8659. // GGML_ASSERT(nb2 <= nb3);
  8660. GGML_ASSERT(ne0 == ne00);
  8661. GGML_ASSERT(ne1 == ne10);
  8662. GGML_ASSERT(ne2 == ne02);
  8663. GGML_ASSERT(ne3 == ne03);
  8664. // nb01 >= nb00 - src0 is not transposed
  8665. // compute by src0 rows
  8666. // TODO: #if defined(GGML_USE_CUBLAS) ggml_cuda_out_prod
  8667. // TODO: #if defined(GGML_USE_ACCELERATE) || defined(GGML_USE_OPENBLAS) || defined(GGML_USE_CLBLAST)
  8668. if (params->type == GGML_TASK_INIT) {
  8669. if (ith != 0) {
  8670. return;
  8671. }
  8672. ggml_vec_set_f32(ne0*ne1*ne2*ne3, dst->data, 0);
  8673. return;
  8674. }
  8675. if (params->type == GGML_TASK_FINALIZE) {
  8676. return;
  8677. }
  8678. // parallelize by last three dimensions
  8679. // total rows in dst
  8680. const int64_t nr = ne1*ne2*ne3;
  8681. // rows per thread
  8682. const int64_t dr = (nr + nth - 1)/nth;
  8683. // row range for this thread
  8684. const int64_t ir0 = dr*ith;
  8685. const int64_t ir1 = MIN(ir0 + dr, nr);
  8686. // dst[:,:,:,:] = 0
  8687. // for i2,i3:
  8688. // for i1:
  8689. // for i01:
  8690. // for i0:
  8691. // dst[i0,i1,i2,i3] += src0[i0,i01,i2,i3] * src1[i1,i01,i2,i3]
  8692. float * wdata = (float *) params->wdata + (ne0 + CACHE_LINE_SIZE_F32) * ith;
  8693. for (int64_t ir = ir0; ir < ir1; ++ir) {
  8694. // dst indices
  8695. const int64_t i3 = ir/(ne2*ne1);
  8696. const int64_t i2 = (ir - i3*ne2*ne1)/ne1;
  8697. const int64_t i1 = (ir - i3*ne2*ne1 - i2*ne1);
  8698. const int64_t i02 = i2;
  8699. const int64_t i03 = i3;
  8700. //const int64_t i10 = i1;
  8701. const int64_t i12 = i2;
  8702. const int64_t i13 = i3;
  8703. for (int64_t i01 = 0; i01 < ne01; ++i01) {
  8704. const int64_t i11 = i01;
  8705. float * s0 = (float *) ((char *) src0->data + ( i01*nb01 + i02*nb02 + i03*nb03));
  8706. float * s1 = (float *) ((char *) src1->data + (i1*nb10 + i11*nb11 + i12*nb12 + i13*nb13));
  8707. float * d = (float *) ((char *) dst->data + ( i1*nb1 + i2*nb2 + i3*nb3));
  8708. dequantize_row_q(s0, wdata, ne0);
  8709. ggml_vec_mad_f32(ne0, d, wdata, *s1);
  8710. }
  8711. }
  8712. //int64_t t1 = ggml_perf_time_us();
  8713. //static int64_t acc = 0;
  8714. //acc += t1 - t0;
  8715. //if (t1 - t0 > 10) {
  8716. // printf("\n");
  8717. // printf("ne00 = %5d, ne01 = %5d, ne02 = %5d, ne03 = %5d\n", ne00, ne01, ne02, ne03);
  8718. // printf("nb00 = %5d, nb01 = %5d, nb02 = %5d, nb03 = %5d\n", nb00, nb01, nb02, nb03);
  8719. // printf("ne10 = %5d, ne11 = %5d, ne12 = %5d, ne13 = %5d\n", ne10, ne11, ne12, ne13);
  8720. // printf("nb10 = %5d, nb11 = %5d, nb12 = %5d, nb13 = %5d\n", nb10, nb11, nb12, nb13);
  8721. // printf("XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX task %d/%d: %d us, acc = %d\n", ith, nth, (int) (t1 - t0), (int) acc);
  8722. //}
  8723. }
  8724. static void ggml_compute_forward_out_prod(
  8725. const struct ggml_compute_params * params,
  8726. const struct ggml_tensor * src0,
  8727. const struct ggml_tensor * src1,
  8728. struct ggml_tensor * dst) {
  8729. switch (src0->type) {
  8730. case GGML_TYPE_Q4_0:
  8731. case GGML_TYPE_Q4_1:
  8732. case GGML_TYPE_Q5_0:
  8733. case GGML_TYPE_Q5_1:
  8734. case GGML_TYPE_Q8_0:
  8735. case GGML_TYPE_Q2_K:
  8736. case GGML_TYPE_Q3_K:
  8737. case GGML_TYPE_Q4_K:
  8738. case GGML_TYPE_Q5_K:
  8739. case GGML_TYPE_Q6_K:
  8740. case GGML_TYPE_IQ2_XXS:
  8741. case GGML_TYPE_IQ2_XS:
  8742. {
  8743. ggml_compute_forward_out_prod_q_f32(params, src0, src1, dst);
  8744. } break;
  8745. case GGML_TYPE_F16:
  8746. {
  8747. GGML_ASSERT(false); // todo
  8748. // ggml_compute_forward_out_prod_f16_f32(params, src0, src1, dst);
  8749. } break;
  8750. case GGML_TYPE_F32:
  8751. {
  8752. ggml_compute_forward_out_prod_f32(params, src0, src1, dst);
  8753. } break;
  8754. default:
  8755. {
  8756. GGML_ASSERT(false);
  8757. } break;
  8758. }
  8759. }
  8760. // ggml_compute_forward_scale
  8761. static void ggml_compute_forward_scale_f32(
  8762. const struct ggml_compute_params * params,
  8763. const struct ggml_tensor * src0,
  8764. struct ggml_tensor * dst) {
  8765. GGML_ASSERT(ggml_is_contiguous(src0));
  8766. GGML_ASSERT(ggml_is_contiguous(dst));
  8767. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  8768. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  8769. return;
  8770. }
  8771. // scale factor
  8772. float v;
  8773. memcpy(&v, dst->op_params, sizeof(float));
  8774. const int ith = params->ith;
  8775. const int nth = params->nth;
  8776. const int nc = src0->ne[0];
  8777. const int nr = ggml_nrows(src0);
  8778. // rows per thread
  8779. const int dr = (nr + nth - 1)/nth;
  8780. // row range for this thread
  8781. const int ir0 = dr*ith;
  8782. const int ir1 = MIN(ir0 + dr, nr);
  8783. const size_t nb01 = src0->nb[1];
  8784. const size_t nb1 = dst->nb[1];
  8785. for (int i1 = ir0; i1 < ir1; i1++) {
  8786. if (dst->data != src0->data) {
  8787. // src0 is same shape as dst => same indices
  8788. memcpy((char *)dst->data + i1*nb1, (char *)src0->data + i1*nb01, nc * sizeof(float));
  8789. }
  8790. ggml_vec_scale_f32(nc, (float *) ((char *) dst->data + i1*nb1), v);
  8791. }
  8792. }
  8793. static void ggml_compute_forward_scale(
  8794. const struct ggml_compute_params * params,
  8795. const struct ggml_tensor * src0,
  8796. struct ggml_tensor * dst) {
  8797. switch (src0->type) {
  8798. case GGML_TYPE_F32:
  8799. {
  8800. ggml_compute_forward_scale_f32(params, src0, dst);
  8801. } break;
  8802. default:
  8803. {
  8804. GGML_ASSERT(false);
  8805. } break;
  8806. }
  8807. }
  8808. // ggml_compute_forward_set
  8809. static void ggml_compute_forward_set_f32(
  8810. const struct ggml_compute_params * params,
  8811. const struct ggml_tensor * src0,
  8812. const struct ggml_tensor * src1,
  8813. struct ggml_tensor * dst) {
  8814. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  8815. GGML_ASSERT(ggml_is_contiguous(dst) && ggml_is_contiguous(src0));
  8816. // view src0 and dst with these strides and data offset inbytes during set
  8817. // nb0 is implicitly element_size because src0 and dst are contiguous
  8818. size_t nb1 = ((int32_t *) dst->op_params)[0];
  8819. size_t nb2 = ((int32_t *) dst->op_params)[1];
  8820. size_t nb3 = ((int32_t *) dst->op_params)[2];
  8821. size_t offset = ((int32_t *) dst->op_params)[3];
  8822. bool inplace = (bool) ((int32_t *) dst->op_params)[4];
  8823. if (!inplace && (params->type == GGML_TASK_INIT)) {
  8824. if (params->ith != 0) {
  8825. return;
  8826. }
  8827. // memcpy needs to be synchronized across threads to avoid race conditions.
  8828. // => do it in INIT phase
  8829. memcpy(
  8830. ((char *) dst->data),
  8831. ((char *) src0->data),
  8832. ggml_nbytes(dst));
  8833. }
  8834. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  8835. return;
  8836. }
  8837. const int ith = params->ith;
  8838. const int nth = params->nth;
  8839. const int nr = ggml_nrows(src1);
  8840. const int nc = src1->ne[0];
  8841. GGML_TENSOR_LOCALS(int64_t, ne1, src1, ne)
  8842. GGML_TENSOR_LOCALS(size_t, nb1, src1, nb)
  8843. // src0 and dst as viewed during set
  8844. const size_t nb0 = ggml_element_size(src0);
  8845. const int im0 = (ne10 == 0 ? 0 : ne10-1);
  8846. const int im1 = (ne11 == 0 ? 0 : ne11-1);
  8847. const int im2 = (ne12 == 0 ? 0 : ne12-1);
  8848. const int im3 = (ne13 == 0 ? 0 : ne13-1);
  8849. GGML_ASSERT(offset + im0*nb0 + im1*nb1 + im2*nb2 + im3*nb3 <= ggml_nbytes(dst));
  8850. GGML_ASSERT(nb10 == sizeof(float));
  8851. // rows per thread
  8852. const int dr = (nr + nth - 1)/nth;
  8853. // row range for this thread
  8854. const int ir0 = dr*ith;
  8855. const int ir1 = MIN(ir0 + dr, nr);
  8856. for (int ir = ir0; ir < ir1; ++ir) {
  8857. // src0 and dst are viewed with shape of src1 and offset
  8858. // => same indices
  8859. const int i3 = ir/(ne12*ne11);
  8860. const int i2 = (ir - i3*ne12*ne11)/ne11;
  8861. const int i1 = (ir - i3*ne12*ne11 - i2*ne11);
  8862. ggml_vec_cpy_f32(nc,
  8863. (float *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + offset),
  8864. (float *) ((char *) src1->data + i3*nb13 + i2*nb12 + i1*nb11));
  8865. }
  8866. }
  8867. static void ggml_compute_forward_set(
  8868. const struct ggml_compute_params * params,
  8869. const struct ggml_tensor * src0,
  8870. const struct ggml_tensor * src1,
  8871. struct ggml_tensor * dst) {
  8872. switch (src0->type) {
  8873. case GGML_TYPE_F32:
  8874. {
  8875. ggml_compute_forward_set_f32(params, src0, src1, dst);
  8876. } break;
  8877. case GGML_TYPE_F16:
  8878. case GGML_TYPE_Q4_0:
  8879. case GGML_TYPE_Q4_1:
  8880. case GGML_TYPE_Q5_0:
  8881. case GGML_TYPE_Q5_1:
  8882. case GGML_TYPE_Q8_0:
  8883. case GGML_TYPE_Q8_1:
  8884. case GGML_TYPE_Q2_K:
  8885. case GGML_TYPE_Q3_K:
  8886. case GGML_TYPE_Q4_K:
  8887. case GGML_TYPE_Q5_K:
  8888. case GGML_TYPE_Q6_K:
  8889. case GGML_TYPE_IQ2_XXS:
  8890. case GGML_TYPE_IQ2_XS:
  8891. default:
  8892. {
  8893. GGML_ASSERT(false);
  8894. } break;
  8895. }
  8896. }
  8897. // ggml_compute_forward_cpy
  8898. static void ggml_compute_forward_cpy(
  8899. const struct ggml_compute_params * params,
  8900. const struct ggml_tensor * src0,
  8901. struct ggml_tensor * dst) {
  8902. ggml_compute_forward_dup(params, src0, dst);
  8903. }
  8904. // ggml_compute_forward_cont
  8905. static void ggml_compute_forward_cont(
  8906. const struct ggml_compute_params * params,
  8907. const struct ggml_tensor * src0,
  8908. struct ggml_tensor * dst) {
  8909. ggml_compute_forward_dup(params, src0, dst);
  8910. }
  8911. // ggml_compute_forward_reshape
  8912. static void ggml_compute_forward_reshape(
  8913. const struct ggml_compute_params * params,
  8914. const struct ggml_tensor * src0,
  8915. struct ggml_tensor * dst) {
  8916. // NOP
  8917. UNUSED(params);
  8918. UNUSED(src0);
  8919. UNUSED(dst);
  8920. }
  8921. // ggml_compute_forward_view
  8922. static void ggml_compute_forward_view(
  8923. const struct ggml_compute_params * params,
  8924. const struct ggml_tensor * src0) {
  8925. // NOP
  8926. UNUSED(params);
  8927. UNUSED(src0);
  8928. }
  8929. // ggml_compute_forward_permute
  8930. static void ggml_compute_forward_permute(
  8931. const struct ggml_compute_params * params,
  8932. const struct ggml_tensor * src0) {
  8933. // NOP
  8934. UNUSED(params);
  8935. UNUSED(src0);
  8936. }
  8937. // ggml_compute_forward_transpose
  8938. static void ggml_compute_forward_transpose(
  8939. const struct ggml_compute_params * params,
  8940. const struct ggml_tensor * src0) {
  8941. // NOP
  8942. UNUSED(params);
  8943. UNUSED(src0);
  8944. }
  8945. // ggml_compute_forward_get_rows
  8946. static void ggml_compute_forward_get_rows_q(
  8947. const struct ggml_compute_params * params,
  8948. const struct ggml_tensor * src0,
  8949. const struct ggml_tensor * src1,
  8950. struct ggml_tensor * dst) {
  8951. assert(params->ith == 0);
  8952. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  8953. return;
  8954. }
  8955. GGML_TENSOR_BINARY_OP_LOCALS
  8956. const int64_t nc = ne00;
  8957. const int64_t nr = ggml_nelements(src1); GGML_UNUSED(nr);
  8958. const enum ggml_type type = src0->type;
  8959. ggml_to_float_t const dequantize_row_q = type_traits[type].to_float;
  8960. assert(ne0 == nc);
  8961. assert(ne02 == ne11);
  8962. assert(nb00 == ggml_type_size(type));
  8963. assert(ggml_nrows(dst) == nr);
  8964. // TODO: multi-thread
  8965. for (int64_t i12 = 0; i12 < ne12; ++i12) {
  8966. for (int64_t i11 = 0; i11 < ne11; ++i11) {
  8967. for (int64_t i10 = 0; i10 < ne10; ++i10) {
  8968. const int64_t i01 = *(int32_t *) ((char *) src1->data + i10*nb10 + i11*nb11 + i12*nb12);
  8969. dequantize_row_q(
  8970. (const void *) ((char *) src0->data + i01*nb01 + i11*nb02 + i12*nb03),
  8971. (float *) ((char *) dst->data + i10*nb1 + i11*nb2 + i12*nb3), nc);
  8972. }
  8973. }
  8974. }
  8975. }
  8976. static void ggml_compute_forward_get_rows_f16(
  8977. const struct ggml_compute_params * params,
  8978. const struct ggml_tensor * src0,
  8979. const struct ggml_tensor * src1,
  8980. struct ggml_tensor * dst) {
  8981. assert(params->ith == 0);
  8982. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  8983. return;
  8984. }
  8985. GGML_TENSOR_BINARY_OP_LOCALS
  8986. const int64_t nc = ne00;
  8987. const int64_t nr = ggml_nelements(src1); GGML_UNUSED(nr);
  8988. assert(ne0 == nc);
  8989. assert(ne02 == ne11);
  8990. assert(nb00 == sizeof(ggml_fp16_t));
  8991. assert(ggml_nrows(dst) == nr);
  8992. // TODO: multi-thread
  8993. for (int64_t i12 = 0; i12 < ne12; ++i12) {
  8994. for (int64_t i11 = 0; i11 < ne11; ++i11) {
  8995. for (int64_t i10 = 0; i10 < ne10; ++i10) {
  8996. const int64_t i01 = *(int32_t *) ((char *) src1->data + i10*nb10 + i11*nb11 + i12*nb12);
  8997. ggml_fp16_to_fp32_row(
  8998. (const void *) ((char *) src0->data + i01*nb01 + i11*nb02 + i12*nb03),
  8999. (float *) ((char *) dst->data + i10*nb1 + i11*nb2 + i12*nb3), nc);
  9000. }
  9001. }
  9002. }
  9003. }
  9004. static void ggml_compute_forward_get_rows_f32(
  9005. const struct ggml_compute_params * params,
  9006. const struct ggml_tensor * src0,
  9007. const struct ggml_tensor * src1,
  9008. struct ggml_tensor * dst) {
  9009. assert(params->ith == 0);
  9010. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  9011. return;
  9012. }
  9013. GGML_TENSOR_BINARY_OP_LOCALS
  9014. const int64_t nc = ne00;
  9015. const int64_t nr = ggml_nelements(src1); GGML_UNUSED(nr);
  9016. assert(ne0 == nc);
  9017. assert(ne02 == ne11);
  9018. assert(nb00 == sizeof(float));
  9019. assert(ggml_nrows(dst) == nr);
  9020. // TODO: multi-thread
  9021. for (int64_t i12 = 0; i12 < ne12; ++i12) {
  9022. for (int64_t i11 = 0; i11 < ne11; ++i11) {
  9023. for (int64_t i10 = 0; i10 < ne10; ++i10) {
  9024. const int64_t i01 = *(int32_t *) ((char *) src1->data + i10*nb10 + i11*nb11 + i12*nb12);
  9025. ggml_vec_cpy_f32(nc,
  9026. (float *) ((char *) dst->data + i10*nb1 + i11*nb2 + i12*nb3),
  9027. (float *) ((char *) src0->data + i01*nb01 + i11*nb02 + i12*nb03));
  9028. }
  9029. }
  9030. }
  9031. }
  9032. static void ggml_compute_forward_get_rows(
  9033. const struct ggml_compute_params * params,
  9034. const struct ggml_tensor * src0,
  9035. const struct ggml_tensor * src1,
  9036. struct ggml_tensor * dst) {
  9037. switch (src0->type) {
  9038. case GGML_TYPE_Q4_0:
  9039. case GGML_TYPE_Q4_1:
  9040. case GGML_TYPE_Q5_0:
  9041. case GGML_TYPE_Q5_1:
  9042. case GGML_TYPE_Q8_0:
  9043. case GGML_TYPE_Q8_1:
  9044. case GGML_TYPE_Q2_K:
  9045. case GGML_TYPE_Q3_K:
  9046. case GGML_TYPE_Q4_K:
  9047. case GGML_TYPE_Q5_K:
  9048. case GGML_TYPE_Q6_K:
  9049. case GGML_TYPE_IQ2_XXS:
  9050. case GGML_TYPE_IQ2_XS:
  9051. {
  9052. ggml_compute_forward_get_rows_q(params, src0, src1, dst);
  9053. } break;
  9054. case GGML_TYPE_F16:
  9055. {
  9056. ggml_compute_forward_get_rows_f16(params, src0, src1, dst);
  9057. } break;
  9058. case GGML_TYPE_F32:
  9059. case GGML_TYPE_I32:
  9060. {
  9061. ggml_compute_forward_get_rows_f32(params, src0, src1, dst);
  9062. } break;
  9063. default:
  9064. {
  9065. GGML_ASSERT(false);
  9066. } break;
  9067. }
  9068. //static bool first = true;
  9069. //printf("ne0 = %d, ne1 = %d, ne2 = %d\n", dst->ne[0], dst->ne[1], dst->ne[2]);
  9070. //if (first) {
  9071. // first = false;
  9072. //} else {
  9073. // for (int k = 0; k < dst->ne[1]; ++k) {
  9074. // for (int j = 0; j < dst->ne[0]/16; ++j) {
  9075. // for (int i = 0; i < 16; ++i) {
  9076. // printf("%8.4f ", ((float *) dst->data)[k*dst->ne[0] + j*16 + i]);
  9077. // }
  9078. // printf("\n");
  9079. // }
  9080. // printf("\n");
  9081. // }
  9082. // printf("\n");
  9083. // exit(0);
  9084. //}
  9085. }
  9086. // ggml_compute_forward_get_rows_back
  9087. static void ggml_compute_forward_get_rows_back_f32_f16(
  9088. const struct ggml_compute_params * params,
  9089. const struct ggml_tensor * src0,
  9090. const struct ggml_tensor * src1,
  9091. struct ggml_tensor * dst) {
  9092. GGML_ASSERT(params->ith == 0);
  9093. GGML_ASSERT(ggml_is_contiguous(dst));
  9094. // ggml_compute_forward_dup_same_cont(params, opt0, dst);
  9095. if (params->type == GGML_TASK_INIT) {
  9096. if (params->ith != 0) {
  9097. return;
  9098. }
  9099. memset(dst->data, 0, ggml_nbytes(dst));
  9100. }
  9101. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  9102. return;
  9103. }
  9104. const int nc = src0->ne[0];
  9105. const int nr = ggml_nelements(src1);
  9106. GGML_ASSERT( dst->ne[0] == nc);
  9107. GGML_ASSERT(src0->nb[0] == sizeof(ggml_fp16_t));
  9108. for (int i = 0; i < nr; ++i) {
  9109. const int r = ((int32_t *) src1->data)[i];
  9110. for (int j = 0; j < nc; ++j) {
  9111. ggml_fp16_t v = ((ggml_fp16_t *) ((char *) src0->data + i*src0->nb[1]))[j];
  9112. ((float *) ((char *) dst->data + r*dst->nb[1]))[j] += GGML_FP16_TO_FP32(v);
  9113. }
  9114. }
  9115. }
  9116. static void ggml_compute_forward_get_rows_back_f32(
  9117. const struct ggml_compute_params * params,
  9118. const struct ggml_tensor * src0,
  9119. const struct ggml_tensor * src1,
  9120. struct ggml_tensor * dst) {
  9121. GGML_ASSERT(params->ith == 0);
  9122. GGML_ASSERT(ggml_is_contiguous(dst));
  9123. // ggml_compute_forward_dup_same_cont(params, opt0, dst);
  9124. if (params->type == GGML_TASK_INIT) {
  9125. if (params->ith != 0) {
  9126. return;
  9127. }
  9128. memset(dst->data, 0, ggml_nbytes(dst));
  9129. }
  9130. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  9131. return;
  9132. }
  9133. const int nc = src0->ne[0];
  9134. const int nr = ggml_nelements(src1);
  9135. GGML_ASSERT( dst->ne[0] == nc);
  9136. GGML_ASSERT(src0->nb[0] == sizeof(float));
  9137. for (int i = 0; i < nr; ++i) {
  9138. const int r = ((int32_t *) src1->data)[i];
  9139. ggml_vec_add_f32(nc,
  9140. (float *) ((char *) dst->data + r*dst->nb[1]),
  9141. (float *) ((char *) dst->data + r*dst->nb[1]),
  9142. (float *) ((char *) src0->data + i*src0->nb[1]));
  9143. }
  9144. }
  9145. static void ggml_compute_forward_get_rows_back(
  9146. const struct ggml_compute_params * params,
  9147. const struct ggml_tensor * src0,
  9148. const struct ggml_tensor * src1,
  9149. struct ggml_tensor * dst) {
  9150. switch (src0->type) {
  9151. case GGML_TYPE_F16:
  9152. {
  9153. ggml_compute_forward_get_rows_back_f32_f16(params, src0, src1, dst);
  9154. } break;
  9155. case GGML_TYPE_F32:
  9156. {
  9157. ggml_compute_forward_get_rows_back_f32(params, src0, src1, dst);
  9158. } break;
  9159. default:
  9160. {
  9161. GGML_ASSERT(false);
  9162. } break;
  9163. }
  9164. //static bool first = true;
  9165. //printf("ne0 = %d, ne1 = %d, ne2 = %d\n", dst->ne[0], dst->ne[1], dst->ne[2]);
  9166. //if (first) {
  9167. // first = false;
  9168. //} else {
  9169. // for (int k = 0; k < dst->ne[1]; ++k) {
  9170. // for (int j = 0; j < dst->ne[0]/16; ++j) {
  9171. // for (int i = 0; i < 16; ++i) {
  9172. // printf("%8.4f ", ((float *) dst->data)[k*dst->ne[0] + j*16 + i]);
  9173. // }
  9174. // printf("\n");
  9175. // }
  9176. // printf("\n");
  9177. // }
  9178. // printf("\n");
  9179. // exit(0);
  9180. //}
  9181. }
  9182. // ggml_compute_forward_diag
  9183. static void ggml_compute_forward_diag_f32(
  9184. const struct ggml_compute_params * params,
  9185. const struct ggml_tensor * src0,
  9186. struct ggml_tensor * dst) {
  9187. GGML_ASSERT(params->ith == 0);
  9188. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  9189. return;
  9190. }
  9191. // TODO: handle transposed/permuted matrices
  9192. GGML_TENSOR_UNARY_OP_LOCALS
  9193. GGML_ASSERT(ne00 == ne0);
  9194. GGML_ASSERT(ne00 == ne1);
  9195. GGML_ASSERT(ne01 == 1);
  9196. GGML_ASSERT(ne02 == ne2);
  9197. GGML_ASSERT(ne03 == ne3);
  9198. GGML_ASSERT(nb00 == sizeof(float));
  9199. GGML_ASSERT(nb0 == sizeof(float));
  9200. for (int i3 = 0; i3 < ne3; i3++) {
  9201. for (int i2 = 0; i2 < ne2; i2++) {
  9202. for (int i1 = 0; i1 < ne1; i1++) {
  9203. float * d = (float *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1);
  9204. float * s = (float *)((char *) src0->data + i3*nb03 + i2*nb02);
  9205. for (int i0 = 0; i0 < i1; i0++) {
  9206. d[i0] = 0;
  9207. }
  9208. d[i1] = s[i1];
  9209. for (int i0 = i1+1; i0 < ne0; i0++) {
  9210. d[i0] = 0;
  9211. }
  9212. }
  9213. }
  9214. }
  9215. }
  9216. static void ggml_compute_forward_diag(
  9217. const struct ggml_compute_params * params,
  9218. const struct ggml_tensor * src0,
  9219. struct ggml_tensor * dst) {
  9220. switch (src0->type) {
  9221. case GGML_TYPE_F32:
  9222. {
  9223. ggml_compute_forward_diag_f32(params, src0, dst);
  9224. } break;
  9225. default:
  9226. {
  9227. GGML_ASSERT(false);
  9228. } break;
  9229. }
  9230. }
  9231. // ggml_compute_forward_diag_mask_inf
  9232. static void ggml_compute_forward_diag_mask_f32(
  9233. const struct ggml_compute_params * params,
  9234. const struct ggml_tensor * src0,
  9235. struct ggml_tensor * dst,
  9236. const float value) {
  9237. const int ith = params->ith;
  9238. const int nth = params->nth;
  9239. const int n_past = ((int32_t *) dst->op_params)[0];
  9240. const bool inplace = src0->data == dst->data;
  9241. GGML_ASSERT(n_past >= 0);
  9242. if (!inplace && (params->type == GGML_TASK_INIT)) {
  9243. if (ith != 0) {
  9244. return;
  9245. }
  9246. // memcpy needs to be synchronized across threads to avoid race conditions.
  9247. // => do it in INIT phase
  9248. GGML_ASSERT(ggml_nelements(dst) == ggml_nelements(src0));
  9249. GGML_ASSERT(ggml_is_contiguous(dst) && ggml_is_contiguous(src0));
  9250. memcpy(
  9251. ((char *) dst->data),
  9252. ((char *) src0->data),
  9253. ggml_nbytes(dst));
  9254. }
  9255. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  9256. return;
  9257. }
  9258. // TODO: handle transposed/permuted matrices
  9259. const int n = ggml_nrows(src0);
  9260. const int nc = src0->ne[0];
  9261. const int nr = src0->ne[1];
  9262. const int nz = n/nr;
  9263. GGML_ASSERT( dst->nb[0] == sizeof(float));
  9264. GGML_ASSERT(src0->nb[0] == sizeof(float));
  9265. for (int k = 0; k < nz; k++) {
  9266. for (int j = ith; j < nr; j += nth) {
  9267. for (int i = n_past; i < nc; i++) {
  9268. if (i > n_past + j) {
  9269. *(float *)((char *) dst->data + k*dst->nb[2] + j*dst->nb[1] + i*dst->nb[0]) = value;
  9270. }
  9271. }
  9272. }
  9273. }
  9274. }
  9275. static void ggml_compute_forward_diag_mask_inf(
  9276. const struct ggml_compute_params * params,
  9277. const struct ggml_tensor * src0,
  9278. struct ggml_tensor * dst) {
  9279. switch (src0->type) {
  9280. case GGML_TYPE_F32:
  9281. {
  9282. ggml_compute_forward_diag_mask_f32(params, src0, dst, -INFINITY);
  9283. } break;
  9284. default:
  9285. {
  9286. GGML_ASSERT(false);
  9287. } break;
  9288. }
  9289. }
  9290. static void ggml_compute_forward_diag_mask_zero(
  9291. const struct ggml_compute_params * params,
  9292. const struct ggml_tensor * src0,
  9293. struct ggml_tensor * dst) {
  9294. switch (src0->type) {
  9295. case GGML_TYPE_F32:
  9296. {
  9297. ggml_compute_forward_diag_mask_f32(params, src0, dst, 0);
  9298. } break;
  9299. default:
  9300. {
  9301. GGML_ASSERT(false);
  9302. } break;
  9303. }
  9304. }
  9305. // ggml_compute_forward_soft_max
  9306. static void ggml_compute_forward_soft_max_f32(
  9307. const struct ggml_compute_params * params,
  9308. const struct ggml_tensor * src0,
  9309. const struct ggml_tensor * src1,
  9310. struct ggml_tensor * dst) {
  9311. assert(ggml_is_contiguous(dst));
  9312. assert(ggml_are_same_shape(src0, dst));
  9313. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  9314. return;
  9315. }
  9316. float scale = 1.0f;
  9317. memcpy(&scale, (float *) dst->op_params + 0, sizeof(float));
  9318. // TODO: handle transposed/permuted matrices
  9319. const int ith = params->ith;
  9320. const int nth = params->nth;
  9321. const int64_t ne11 = src1 ? src1->ne[1] : 1;
  9322. const int nc = src0->ne[0];
  9323. const int nr = ggml_nrows(src0);
  9324. // rows per thread
  9325. const int dr = (nr + nth - 1)/nth;
  9326. // row range for this thread
  9327. const int ir0 = dr*ith;
  9328. const int ir1 = MIN(ir0 + dr, nr);
  9329. float * wp = (float *) params->wdata + (nc + CACHE_LINE_SIZE_F32) * ith;
  9330. for (int i1 = ir0; i1 < ir1; i1++) {
  9331. float * sp = (float *)((char *) src0->data + i1*src0->nb[1]);
  9332. float * dp = (float *)((char *) dst->data + i1*dst->nb[1]);
  9333. // broadcast the mask across rows
  9334. float * mp = src1 ? (float *)((char *) src1->data + (i1%ne11)*src1->nb[1]) : NULL;
  9335. ggml_vec_cpy_f32 (nc, wp, sp);
  9336. ggml_vec_scale_f32(nc, wp, scale);
  9337. if (mp) {
  9338. ggml_vec_acc_f32(nc, wp, mp);
  9339. }
  9340. #ifndef NDEBUG
  9341. for (int i = 0; i < nc; ++i) {
  9342. //printf("p[%d] = %f\n", i, p[i]);
  9343. assert(!isnan(wp[i]));
  9344. }
  9345. #endif
  9346. float max = -INFINITY;
  9347. ggml_vec_max_f32(nc, &max, wp);
  9348. ggml_float sum = 0.0;
  9349. uint16_t scvt;
  9350. for (int i = 0; i < nc; i++) {
  9351. if (wp[i] == -INFINITY) {
  9352. dp[i] = 0.0f;
  9353. } else {
  9354. // const float val = (wp[i] == -INFINITY) ? 0.0 : exp(wp[i] - max);
  9355. ggml_fp16_t s = GGML_FP32_TO_FP16(wp[i] - max);
  9356. memcpy(&scvt, &s, sizeof(scvt));
  9357. const float val = GGML_FP16_TO_FP32(ggml_table_exp_f16[scvt]);
  9358. sum += (ggml_float)val;
  9359. dp[i] = val;
  9360. }
  9361. }
  9362. assert(sum > 0.0);
  9363. sum = 1.0/sum;
  9364. ggml_vec_scale_f32(nc, dp, sum);
  9365. #ifndef NDEBUG
  9366. for (int i = 0; i < nc; ++i) {
  9367. assert(!isnan(dp[i]));
  9368. assert(!isinf(dp[i]));
  9369. }
  9370. #endif
  9371. }
  9372. }
  9373. static void ggml_compute_forward_soft_max(
  9374. const struct ggml_compute_params * params,
  9375. const struct ggml_tensor * src0,
  9376. const struct ggml_tensor * src1,
  9377. struct ggml_tensor * dst) {
  9378. switch (src0->type) {
  9379. case GGML_TYPE_F32:
  9380. {
  9381. ggml_compute_forward_soft_max_f32(params, src0, src1, dst);
  9382. } break;
  9383. default:
  9384. {
  9385. GGML_ASSERT(false);
  9386. } break;
  9387. }
  9388. }
  9389. // ggml_compute_forward_soft_max_back
  9390. static void ggml_compute_forward_soft_max_back_f32(
  9391. const struct ggml_compute_params * params,
  9392. const struct ggml_tensor * src0,
  9393. const struct ggml_tensor * src1,
  9394. struct ggml_tensor * dst) {
  9395. GGML_ASSERT(ggml_is_contiguous(src0));
  9396. GGML_ASSERT(ggml_is_contiguous(src1));
  9397. GGML_ASSERT(ggml_is_contiguous(dst));
  9398. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  9399. GGML_ASSERT(ggml_are_same_shape(src1, dst));
  9400. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  9401. return;
  9402. }
  9403. // TODO: handle transposed/permuted matrices
  9404. const int ith = params->ith;
  9405. const int nth = params->nth;
  9406. const int nc = src0->ne[0];
  9407. const int nr = ggml_nrows(src0);
  9408. // rows per thread
  9409. const int dr = (nr + nth - 1)/nth;
  9410. // row range for this thread
  9411. const int ir0 = dr*ith;
  9412. const int ir1 = MIN(ir0 + dr, nr);
  9413. for (int i1 = ir0; i1 < ir1; i1++) {
  9414. float *dy = (float *)((char *) src0->data + i1*src0->nb[1]);
  9415. float *y = (float *)((char *) src1->data + i1*src1->nb[1]);
  9416. float *dx = (float *)((char *) dst->data + i1*dst->nb[1]);
  9417. #ifndef NDEBUG
  9418. for (int i = 0; i < nc; ++i) {
  9419. //printf("p[%d] = %f\n", i, p[i]);
  9420. assert(!isnan(dy[i]));
  9421. assert(!isnan(y[i]));
  9422. }
  9423. #endif
  9424. // Jii = yi - yi*yi
  9425. // Jij = -yi*yj
  9426. // J = diag(y)-y.T*y
  9427. // dx = J * dy
  9428. // dxk = sum_i(Jki * dyi)
  9429. // dxk = sum_i(-yk*yi * dyi) - (-yk*yk)*dyk + (yk - yk*yk)*dyk
  9430. // dxk = sum_i(-yk*yi * dyi) + yk*yk*dyk + yk*dyk - yk*yk*dyk
  9431. // dxk = sum_i(-yk*yi * dyi) + yk*dyk
  9432. // dxk = -yk * sum_i(yi * dyi) + yk*dyk
  9433. // dxk = -yk * dot(y, dy) + yk*dyk
  9434. // dxk = yk * (- dot(y, dy) + dyk)
  9435. // dxk = yk * (dyk - dot(y, dy))
  9436. //
  9437. // post-order:
  9438. // dot_y_dy := dot(y, dy)
  9439. // dx := dy
  9440. // dx := dx - dot_y_dy
  9441. // dx := dx * y
  9442. // linear runtime, no additional memory
  9443. float dot_y_dy = 0;
  9444. ggml_vec_dot_f32 (nc, &dot_y_dy, y, dy);
  9445. ggml_vec_cpy_f32 (nc, dx, dy);
  9446. ggml_vec_acc1_f32(nc, dx, -dot_y_dy);
  9447. ggml_vec_mul_f32 (nc, dx, dx, y);
  9448. #ifndef NDEBUG
  9449. for (int i = 0; i < nc; ++i) {
  9450. assert(!isnan(dx[i]));
  9451. assert(!isinf(dx[i]));
  9452. }
  9453. #endif
  9454. }
  9455. }
  9456. static void ggml_compute_forward_soft_max_back(
  9457. const struct ggml_compute_params * params,
  9458. const struct ggml_tensor * src0,
  9459. const struct ggml_tensor * src1,
  9460. struct ggml_tensor * dst) {
  9461. switch (src0->type) {
  9462. case GGML_TYPE_F32:
  9463. {
  9464. ggml_compute_forward_soft_max_back_f32(params, src0, src1, dst);
  9465. } break;
  9466. default:
  9467. {
  9468. GGML_ASSERT(false);
  9469. } break;
  9470. }
  9471. }
  9472. // ggml_compute_forward_alibi
  9473. static void ggml_compute_forward_alibi_f32(
  9474. const struct ggml_compute_params * params,
  9475. const struct ggml_tensor * src0,
  9476. struct ggml_tensor * dst) {
  9477. assert(params->ith == 0);
  9478. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  9479. return;
  9480. }
  9481. //const int n_past = ((int32_t *) dst->op_params)[0];
  9482. const int n_head = ((int32_t *) dst->op_params)[1];
  9483. float max_bias;
  9484. memcpy(&max_bias, (int32_t *) dst->op_params + 2, sizeof(float));
  9485. const int64_t ne0 = src0->ne[0]; // all_seq_len = n_past + ne1
  9486. const int64_t ne1 = src0->ne[1]; // seq_len_without_past
  9487. const int64_t ne2 = src0->ne[2]; // n_head -> this is k
  9488. //const int64_t ne3 = src0->ne[3]; // 1 -> bsz
  9489. const int64_t n = ggml_nrows(src0);
  9490. const int64_t ne2_ne3 = n/ne1; // ne2*ne3
  9491. const size_t nb0 = src0->nb[0];
  9492. const size_t nb1 = src0->nb[1];
  9493. const size_t nb2 = src0->nb[2];
  9494. //const int nb3 = src0->nb[3];
  9495. GGML_ASSERT(nb0 == sizeof(float));
  9496. GGML_ASSERT(n_head == ne2);
  9497. // add alibi to src0 (KQ_scaled)
  9498. const int n_heads_log2_floor = 1 << (int) floor(log2(n_head));
  9499. const float m0 = powf(2.0f, -(max_bias) / n_heads_log2_floor);
  9500. const float m1 = powf(2.0f, -(max_bias / 2.0f) / n_heads_log2_floor);
  9501. for (int64_t i = 0; i < ne0; i++) {
  9502. for (int64_t j = 0; j < ne1; j++) {
  9503. for (int64_t k = 0; k < ne2_ne3; k++) {
  9504. float * const src = (float *)((char *) src0->data + i*nb0 + j*nb1 + k*nb2);
  9505. float * pdst = (float *)((char *) dst->data + i*nb0 + j*nb1 + k*nb2);
  9506. // TODO: k*nb2 or k*nb3
  9507. float m_k;
  9508. if (k < n_heads_log2_floor) {
  9509. m_k = powf(m0, k + 1);
  9510. } else {
  9511. m_k = powf(m1, 2 * (k - n_heads_log2_floor) + 1);
  9512. }
  9513. pdst[0] = i * m_k + src[0];
  9514. }
  9515. }
  9516. }
  9517. }
  9518. static void ggml_compute_forward_alibi_f16(
  9519. const struct ggml_compute_params * params,
  9520. const struct ggml_tensor * src0,
  9521. struct ggml_tensor * dst) {
  9522. assert(params->ith == 0);
  9523. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  9524. return;
  9525. }
  9526. //const int n_past = ((int32_t *) dst->op_params)[0];
  9527. const int n_head = ((int32_t *) dst->op_params)[1];
  9528. float max_bias;
  9529. memcpy(&max_bias, (int32_t *) dst->op_params + 2, sizeof(float));
  9530. const int ne0 = src0->ne[0]; // all_seq_len = n_past + ne1
  9531. const int ne1 = src0->ne[1]; // seq_len_without_past
  9532. const int ne2 = src0->ne[2]; // n_head -> this is k
  9533. //const int ne3 = src0->ne[3]; // 1 -> bsz
  9534. const int n = ggml_nrows(src0);
  9535. const int ne2_ne3 = n/ne1; // ne2*ne3
  9536. const int nb0 = src0->nb[0];
  9537. const int nb1 = src0->nb[1];
  9538. const int nb2 = src0->nb[2];
  9539. //const int nb3 = src0->nb[3];
  9540. GGML_ASSERT(nb0 == sizeof(ggml_fp16_t));
  9541. //GGML_ASSERT(ne1 + n_past == ne0); (void) n_past;
  9542. GGML_ASSERT(n_head == ne2);
  9543. // add alibi to src0 (KQ_scaled)
  9544. const int n_heads_log2_floor = 1 << (int) floor(log2(n_head));
  9545. const float m0 = powf(2.0f, -(max_bias) / n_heads_log2_floor);
  9546. const float m1 = powf(2.0f, -(max_bias / 2.0f) / n_heads_log2_floor);
  9547. for (int i = 0; i < ne0; i++) {
  9548. for (int j = 0; j < ne1; j++) {
  9549. for (int k = 0; k < ne2_ne3; k++) {
  9550. ggml_fp16_t * const src = (ggml_fp16_t *)((char *) src0->data + i*nb0 + j*nb1 + k*nb2);
  9551. float * pdst = (float *)((char *) dst->data + i*nb0 + j*nb1 + k*nb2);
  9552. // TODO: k*nb2 or k*nb3
  9553. float m_k;
  9554. if (k < n_heads_log2_floor) {
  9555. m_k = powf(m0, k + 1);
  9556. } else {
  9557. m_k = powf(m1, 2 * (k - n_heads_log2_floor) + 1);
  9558. }
  9559. // we return F32
  9560. pdst[0] = i * m_k + GGML_FP16_TO_FP32(src[0]);
  9561. }
  9562. }
  9563. }
  9564. }
  9565. static void ggml_compute_forward_alibi(
  9566. const struct ggml_compute_params * params,
  9567. const struct ggml_tensor * src0,
  9568. struct ggml_tensor * dst) {
  9569. switch (src0->type) {
  9570. case GGML_TYPE_F16:
  9571. {
  9572. ggml_compute_forward_alibi_f16(params, src0, dst);
  9573. } break;
  9574. case GGML_TYPE_F32:
  9575. {
  9576. ggml_compute_forward_alibi_f32(params, src0, dst);
  9577. } break;
  9578. case GGML_TYPE_Q4_0:
  9579. case GGML_TYPE_Q4_1:
  9580. case GGML_TYPE_Q5_0:
  9581. case GGML_TYPE_Q5_1:
  9582. case GGML_TYPE_Q8_0:
  9583. case GGML_TYPE_Q8_1:
  9584. case GGML_TYPE_Q2_K:
  9585. case GGML_TYPE_Q3_K:
  9586. case GGML_TYPE_Q4_K:
  9587. case GGML_TYPE_Q5_K:
  9588. case GGML_TYPE_Q6_K:
  9589. case GGML_TYPE_IQ2_XXS:
  9590. case GGML_TYPE_IQ2_XS:
  9591. case GGML_TYPE_Q8_K:
  9592. case GGML_TYPE_I8:
  9593. case GGML_TYPE_I16:
  9594. case GGML_TYPE_I32:
  9595. case GGML_TYPE_COUNT:
  9596. {
  9597. GGML_ASSERT(false);
  9598. } break;
  9599. }
  9600. }
  9601. // ggml_compute_forward_clamp
  9602. static void ggml_compute_forward_clamp_f32(
  9603. const struct ggml_compute_params * params,
  9604. const struct ggml_tensor * src0,
  9605. struct ggml_tensor * dst) {
  9606. assert(params->ith == 0);
  9607. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  9608. return;
  9609. }
  9610. float min;
  9611. float max;
  9612. memcpy(&min, (float *) dst->op_params + 0, sizeof(float));
  9613. memcpy(&max, (float *) dst->op_params + 1, sizeof(float));
  9614. const int ith = params->ith;
  9615. const int nth = params->nth;
  9616. const int n = ggml_nrows(src0);
  9617. const int nc = src0->ne[0];
  9618. const size_t nb00 = src0->nb[0];
  9619. const size_t nb01 = src0->nb[1];
  9620. const size_t nb0 = dst->nb[0];
  9621. const size_t nb1 = dst->nb[1];
  9622. GGML_ASSERT( nb0 == sizeof(float));
  9623. GGML_ASSERT(nb00 == sizeof(float));
  9624. for (int j = ith; j < n; j += nth) {
  9625. float * dst_ptr = (float *) ((char *) dst->data + j*nb1);
  9626. float * src0_ptr = (float *) ((char *) src0->data + j*nb01);
  9627. for (int i = 0; i < nc; i++) {
  9628. dst_ptr[i] = MAX(MIN(src0_ptr[i], max), min);
  9629. }
  9630. }
  9631. }
  9632. static void ggml_compute_forward_clamp(
  9633. const struct ggml_compute_params * params,
  9634. const struct ggml_tensor * src0,
  9635. struct ggml_tensor * dst) {
  9636. switch (src0->type) {
  9637. case GGML_TYPE_F32:
  9638. {
  9639. ggml_compute_forward_clamp_f32(params, src0, dst);
  9640. } break;
  9641. case GGML_TYPE_F16:
  9642. case GGML_TYPE_Q4_0:
  9643. case GGML_TYPE_Q4_1:
  9644. case GGML_TYPE_Q5_0:
  9645. case GGML_TYPE_Q5_1:
  9646. case GGML_TYPE_Q8_0:
  9647. case GGML_TYPE_Q8_1:
  9648. case GGML_TYPE_Q2_K:
  9649. case GGML_TYPE_Q3_K:
  9650. case GGML_TYPE_Q4_K:
  9651. case GGML_TYPE_Q5_K:
  9652. case GGML_TYPE_Q6_K:
  9653. case GGML_TYPE_IQ2_XXS:
  9654. case GGML_TYPE_IQ2_XS:
  9655. case GGML_TYPE_Q8_K:
  9656. case GGML_TYPE_I8:
  9657. case GGML_TYPE_I16:
  9658. case GGML_TYPE_I32:
  9659. case GGML_TYPE_COUNT:
  9660. {
  9661. GGML_ASSERT(false);
  9662. } break;
  9663. }
  9664. }
  9665. // ggml_compute_forward_rope
  9666. static float rope_yarn_ramp(const float low, const float high, const int i0) {
  9667. const float y = (i0 / 2 - low) / MAX(0.001f, high - low);
  9668. return 1 - MIN(1, MAX(0, y));
  9669. }
  9670. // YaRN algorithm based on LlamaYaRNScaledRotaryEmbedding.py from https://github.com/jquesnelle/yarn
  9671. // MIT licensed. Copyright (c) 2023 Jeffrey Quesnelle and Bowen Peng.
  9672. static void rope_yarn(
  9673. float theta_extrap, float freq_scale, float corr_dims[2], int64_t i0, float ext_factor, float mscale,
  9674. float * cos_theta, float * sin_theta
  9675. ) {
  9676. // Get n-d rotational scaling corrected for extrapolation
  9677. float theta_interp = freq_scale * theta_extrap;
  9678. float theta = theta_interp;
  9679. if (ext_factor != 0.0f) {
  9680. float ramp_mix = rope_yarn_ramp(corr_dims[0], corr_dims[1], i0) * ext_factor;
  9681. theta = theta_interp * (1 - ramp_mix) + theta_extrap * ramp_mix;
  9682. // Get n-d magnitude scaling corrected for interpolation
  9683. mscale *= 1.0f + 0.1f * logf(1.0f / freq_scale);
  9684. }
  9685. *cos_theta = cosf(theta) * mscale;
  9686. *sin_theta = sinf(theta) * mscale;
  9687. }
  9688. // Apparently solving `n_rot = 2pi * x * base^((2 * max_pos_emb) / n_dims)` for x, we get
  9689. // `corr_dim(n_rot) = n_dims * log(max_pos_emb / (n_rot * 2pi)) / (2 * log(base))`
  9690. static float ggml_rope_yarn_corr_dim(int n_dims, int n_orig_ctx, float n_rot, float base) {
  9691. return n_dims * logf(n_orig_ctx / (n_rot * 2 * (float)M_PI)) / (2 * logf(base));
  9692. }
  9693. static void ggml_rope_cache_init(
  9694. float theta_base, float freq_scale, float corr_dims[2], int64_t ne0, float ext_factor, float mscale,
  9695. float * cache, float sin_sign, float theta_scale
  9696. ) {
  9697. float theta = theta_base;
  9698. for (int64_t i0 = 0; i0 < ne0; i0 += 2) {
  9699. rope_yarn(
  9700. theta, freq_scale, corr_dims, i0, ext_factor, mscale, &cache[i0 + 0], &cache[i0 + 1]
  9701. );
  9702. cache[i0 + 1] *= sin_sign;
  9703. theta *= theta_scale;
  9704. }
  9705. }
  9706. GGML_CALL void ggml_rope_yarn_corr_dims(
  9707. int n_dims, int n_orig_ctx, float freq_base, float beta_fast, float beta_slow, float dims[2]
  9708. ) {
  9709. // start and end correction dims
  9710. dims[0] = MAX(0, floorf(ggml_rope_yarn_corr_dim(n_dims, n_orig_ctx, beta_fast, freq_base)));
  9711. dims[1] = MIN(n_dims - 1, ceilf(ggml_rope_yarn_corr_dim(n_dims, n_orig_ctx, beta_slow, freq_base)));
  9712. }
  9713. static void ggml_compute_forward_rope_f32(
  9714. const struct ggml_compute_params * params,
  9715. const struct ggml_tensor * src0,
  9716. const struct ggml_tensor * src1,
  9717. struct ggml_tensor * dst,
  9718. const bool forward) {
  9719. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  9720. return;
  9721. }
  9722. float freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow;
  9723. // these two only relevant for xPos RoPE:
  9724. float xpos_base;
  9725. bool xpos_down;
  9726. //const int n_past = ((int32_t *) dst->op_params)[0];
  9727. const int n_dims = ((int32_t *) dst->op_params)[1];
  9728. const int mode = ((int32_t *) dst->op_params)[2];
  9729. const int n_ctx = ((int32_t *) dst->op_params)[3];
  9730. const int n_orig_ctx = ((int32_t *) dst->op_params)[4];
  9731. memcpy(&freq_base, (int32_t *) dst->op_params + 5, sizeof(float));
  9732. memcpy(&freq_scale, (int32_t *) dst->op_params + 6, sizeof(float));
  9733. memcpy(&ext_factor, (int32_t *) dst->op_params + 7, sizeof(float));
  9734. memcpy(&attn_factor, (int32_t *) dst->op_params + 8, sizeof(float));
  9735. memcpy(&beta_fast, (int32_t *) dst->op_params + 9, sizeof(float));
  9736. memcpy(&beta_slow, (int32_t *) dst->op_params + 10, sizeof(float));
  9737. memcpy(&xpos_base, (int32_t *) dst->op_params + 11, sizeof(float));
  9738. memcpy(&xpos_down, (int32_t *) dst->op_params + 12, sizeof(bool));
  9739. GGML_TENSOR_UNARY_OP_LOCALS
  9740. //printf("ne0: %d, ne1: %d, ne2: %d, ne3: %d\n", ne0, ne1, ne2, ne3);
  9741. //printf("n_past = %d, ne2 = %d\n", n_past, ne2);
  9742. GGML_ASSERT(nb00 == sizeof(float));
  9743. const int ith = params->ith;
  9744. const int nth = params->nth;
  9745. const int nr = ggml_nrows(dst);
  9746. GGML_ASSERT(n_dims <= ne0);
  9747. GGML_ASSERT(n_dims % 2 == 0);
  9748. // rows per thread
  9749. const int dr = (nr + nth - 1)/nth;
  9750. // row range for this thread
  9751. const int ir0 = dr*ith;
  9752. const int ir1 = MIN(ir0 + dr, nr);
  9753. // row index used to determine which thread to use
  9754. int ir = 0;
  9755. const float theta_scale = powf(freq_base, -2.0f/n_dims);
  9756. const float inv_ndims = -1.f/n_dims;
  9757. float corr_dims[2];
  9758. ggml_rope_yarn_corr_dims(n_dims, n_orig_ctx, freq_base, beta_fast, beta_slow, corr_dims);
  9759. const bool is_neox = mode & 2;
  9760. const bool is_glm = mode & 4;
  9761. // backward process uses inverse rotation by cos and sin.
  9762. // cos and sin build a rotation matrix, where the inverse is the transpose.
  9763. // this essentially just switches the sign of sin.
  9764. const float sin_sign = forward ? 1.0f : -1.0f;
  9765. const int32_t * pos = (const int32_t *) src1->data;
  9766. for (int64_t i3 = 0; i3 < ne3; i3++) {
  9767. for (int64_t i2 = 0; i2 < ne2; i2++) {
  9768. const int64_t p = pos[i2];
  9769. float * cache = (float *) params->wdata + (ne0 + CACHE_LINE_SIZE_F32)*ith;
  9770. if (!is_glm && !is_neox) { // TODO: cache sin/cos for glm, neox
  9771. ggml_rope_cache_init(p, freq_scale, corr_dims, ne0, ext_factor, attn_factor, cache, sin_sign, theta_scale);
  9772. }
  9773. for (int64_t i1 = 0; i1 < ne1; i1++) {
  9774. if (ir++ < ir0) continue;
  9775. if (ir > ir1) break;
  9776. float theta_base = (float)p;
  9777. if (is_glm) {
  9778. theta_base = MIN(p, n_ctx - 2);
  9779. float block_theta = MAX(p - (n_ctx - 2), 0);
  9780. for (int64_t i0 = 0; i0 < ne0 / 4; i0++) {
  9781. const float cos_theta = cosf(theta_base);
  9782. const float sin_theta = sinf(theta_base) * sin_sign;
  9783. const float cos_block_theta = cosf(block_theta);
  9784. const float sin_block_theta = sinf(block_theta) * sin_sign;
  9785. theta_base *= theta_scale;
  9786. block_theta *= theta_scale;
  9787. const float * const src = (float *)((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00);
  9788. float * dst_data = (float *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0);
  9789. const float x0 = src[0];
  9790. const float x1 = src[n_dims/2];
  9791. const float x2 = src[n_dims];
  9792. const float x3 = src[n_dims/2*3];
  9793. dst_data[0] = x0*cos_theta - x1*sin_theta;
  9794. dst_data[n_dims/2] = x0*sin_theta + x1*cos_theta;
  9795. dst_data[n_dims] = x2*cos_block_theta - x3*sin_block_theta;
  9796. dst_data[n_dims/2*3] = x2*sin_block_theta + x3*cos_block_theta;
  9797. }
  9798. } else if (!is_neox) {
  9799. for (int64_t i0 = 0; i0 < ne0; i0 += 2) {
  9800. const float cos_theta = cache[i0 + 0];
  9801. const float sin_theta = cache[i0 + 1];
  9802. // zeta scaling for xPos only:
  9803. float zeta = xpos_base != 0.0f ? powf((i0 + 0.4f * ne0) / (1.4f * ne0), p / xpos_base) : 1.0f;
  9804. if (xpos_down) zeta = 1.0f / zeta;
  9805. const float * const src = (float *)((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00);
  9806. float * dst_data = (float *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0);
  9807. const float x0 = src[0];
  9808. const float x1 = src[1];
  9809. dst_data[0] = x0*cos_theta*zeta - x1*sin_theta*zeta;
  9810. dst_data[1] = x0*sin_theta*zeta + x1*cos_theta*zeta;
  9811. }
  9812. } else {
  9813. // TODO: this might be wrong for ne0 != n_dims - need double check
  9814. // it seems we have to rope just the first n_dims elements and do nothing with the rest
  9815. // ref: https://github.com/ml-explore/mlx/blob/dc2edc762c797e3b8de50b1dad4dc0a131691033/benchmarks/python/llama_jax_bench.py#L11-L26
  9816. theta_base *= freq_scale;
  9817. for (int64_t ic = 0; ic < ne0; ic += 2) {
  9818. if (ic < n_dims) {
  9819. const int64_t ib = 0;
  9820. // simplified from `(ib * n_dims + ic) * inv_ndims`
  9821. float cur_rot = inv_ndims * ic - ib;
  9822. float cos_theta, sin_theta;
  9823. rope_yarn(
  9824. theta_base, freq_scale, corr_dims, cur_rot, ext_factor, attn_factor,
  9825. &cos_theta, &sin_theta
  9826. );
  9827. sin_theta *= sin_sign;
  9828. theta_base *= theta_scale;
  9829. const int64_t i0 = ib*n_dims + ic/2;
  9830. const float * const src = (float *)((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00);
  9831. float * dst_data = (float *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0);
  9832. const float x0 = src[0];
  9833. const float x1 = src[n_dims/2];
  9834. dst_data[0] = x0*cos_theta - x1*sin_theta;
  9835. dst_data[n_dims/2] = x0*sin_theta + x1*cos_theta;
  9836. } else {
  9837. const int64_t i0 = ic;
  9838. const float * const src = (float *)((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00);
  9839. float * dst_data = (float *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0);
  9840. dst_data[0] = src[0];
  9841. dst_data[1] = src[1];
  9842. }
  9843. }
  9844. }
  9845. }
  9846. }
  9847. }
  9848. }
  9849. static void ggml_compute_forward_rope_f16(
  9850. const struct ggml_compute_params * params,
  9851. const struct ggml_tensor * src0,
  9852. const struct ggml_tensor * src1,
  9853. struct ggml_tensor * dst,
  9854. const bool forward) {
  9855. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  9856. return;
  9857. }
  9858. float freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow;
  9859. //const int n_past = ((int32_t *) dst->op_params)[0];
  9860. const int n_dims = ((int32_t *) dst->op_params)[1];
  9861. const int mode = ((int32_t *) dst->op_params)[2];
  9862. const int n_ctx = ((int32_t *) dst->op_params)[3];
  9863. const int n_orig_ctx = ((int32_t *) dst->op_params)[4];
  9864. memcpy(&freq_base, (int32_t *) dst->op_params + 5, sizeof(float));
  9865. memcpy(&freq_scale, (int32_t *) dst->op_params + 6, sizeof(float));
  9866. memcpy(&ext_factor, (int32_t *) dst->op_params + 7, sizeof(float));
  9867. memcpy(&attn_factor, (int32_t *) dst->op_params + 8, sizeof(float));
  9868. memcpy(&beta_fast, (int32_t *) dst->op_params + 9, sizeof(float));
  9869. memcpy(&beta_slow, (int32_t *) dst->op_params + 10, sizeof(float));
  9870. GGML_TENSOR_UNARY_OP_LOCALS
  9871. //printf("ne0: %d, ne1: %d, ne2: %d, ne3: %d\n", ne0, ne1, ne2, ne3);
  9872. //printf("n_past = %d, ne2 = %d\n", n_past, ne2);
  9873. GGML_ASSERT(nb0 == sizeof(ggml_fp16_t));
  9874. const int ith = params->ith;
  9875. const int nth = params->nth;
  9876. const int nr = ggml_nrows(dst);
  9877. GGML_ASSERT(n_dims <= ne0);
  9878. GGML_ASSERT(n_dims % 2 == 0);
  9879. // rows per thread
  9880. const int dr = (nr + nth - 1)/nth;
  9881. // row range for this thread
  9882. const int ir0 = dr*ith;
  9883. const int ir1 = MIN(ir0 + dr, nr);
  9884. // row index used to determine which thread to use
  9885. int ir = 0;
  9886. const float theta_scale = powf(freq_base, -2.0f/n_dims);
  9887. const float inv_ndims = -1.f/n_dims;
  9888. float corr_dims[2];
  9889. ggml_rope_yarn_corr_dims(n_dims, n_orig_ctx, freq_base, beta_fast, beta_slow, corr_dims);
  9890. const bool is_neox = mode & 2;
  9891. const bool is_glm = mode & 4;
  9892. // backward process uses inverse rotation by cos and sin.
  9893. // cos and sin build a rotation matrix, where the inverse is the transpose.
  9894. // this essentially just switches the sign of sin.
  9895. const float sin_sign = forward ? 1.0f : -1.0f;
  9896. const int32_t * pos = (const int32_t *) src1->data;
  9897. for (int64_t i3 = 0; i3 < ne3; i3++) {
  9898. for (int64_t i2 = 0; i2 < ne2; i2++) {
  9899. const int64_t p = pos[i2];
  9900. float * cache = (float *) params->wdata + (ne0 + CACHE_LINE_SIZE_F32)*ith;
  9901. if (!is_glm && !is_neox) { // TODO: cache sin/cos for glm, neox
  9902. ggml_rope_cache_init(p, freq_scale, corr_dims, ne0, ext_factor, attn_factor, cache, sin_sign, theta_scale);
  9903. }
  9904. for (int64_t i1 = 0; i1 < ne1; i1++) {
  9905. if (ir++ < ir0) continue;
  9906. if (ir > ir1) break;
  9907. float theta_base = (float)p;
  9908. if (is_glm) {
  9909. theta_base = MIN(p, n_ctx - 2);
  9910. float block_theta = MAX(p - (n_ctx - 2), 0);
  9911. for (int64_t i0 = 0; i0 < ne0 / 4; i0++) {
  9912. const float cos_theta = cosf(theta_base);
  9913. const float sin_theta = sinf(theta_base) * sin_sign;
  9914. const float cos_block_theta = cosf(block_theta);
  9915. const float sin_block_theta = sinf(block_theta) * sin_sign;
  9916. theta_base *= theta_scale;
  9917. block_theta *= theta_scale;
  9918. const ggml_fp16_t * const src = (ggml_fp16_t *)((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00);
  9919. ggml_fp16_t * dst_data = (ggml_fp16_t *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0);
  9920. const float x0 = GGML_FP16_TO_FP32(src[0]);
  9921. const float x1 = GGML_FP16_TO_FP32(src[n_dims/2]);
  9922. const float x2 = GGML_FP16_TO_FP32(src[n_dims]);
  9923. const float x3 = GGML_FP16_TO_FP32(src[n_dims/2*3]);
  9924. dst_data[0] = GGML_FP32_TO_FP16(x0*cos_theta - x1*sin_theta);
  9925. dst_data[n_dims/2] = GGML_FP32_TO_FP16(x0*sin_theta + x1*cos_theta);
  9926. dst_data[n_dims] = GGML_FP32_TO_FP16(x2*cos_block_theta - x3*sin_block_theta);
  9927. dst_data[n_dims/2*3] = GGML_FP32_TO_FP16(x2*sin_block_theta + x3*cos_block_theta);
  9928. }
  9929. } else if (!is_neox) {
  9930. for (int64_t i0 = 0; i0 < ne0; i0 += 2) {
  9931. const float cos_theta = cache[i0 + 0];
  9932. const float sin_theta = cache[i0 + 1];
  9933. const ggml_fp16_t * const src = (ggml_fp16_t *)((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00);
  9934. ggml_fp16_t * dst_data = (ggml_fp16_t *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0);
  9935. const float x0 = GGML_FP16_TO_FP32(src[0]);
  9936. const float x1 = GGML_FP16_TO_FP32(src[1]);
  9937. dst_data[0] = GGML_FP32_TO_FP16(x0*cos_theta - x1*sin_theta);
  9938. dst_data[1] = GGML_FP32_TO_FP16(x0*sin_theta + x1*cos_theta);
  9939. }
  9940. } else {
  9941. // TODO: this might be wrong for ne0 != n_dims - need double check
  9942. // it seems we have to rope just the first n_dims elements and do nothing with the rest
  9943. // ref: https://github.com/ml-explore/mlx/blob/dc2edc762c797e3b8de50b1dad4dc0a131691033/benchmarks/python/llama_jax_bench.py#L11-L26
  9944. theta_base *= freq_scale;
  9945. for (int64_t ic = 0; ic < ne0; ic += 2) {
  9946. if (ic < n_dims) {
  9947. const int64_t ib = 0;
  9948. // simplified from `(ib * n_dims + ic) * inv_ndims`
  9949. float cur_rot = inv_ndims * ic - ib;
  9950. float cos_theta, sin_theta;
  9951. rope_yarn(
  9952. theta_base, freq_scale, corr_dims, cur_rot, ext_factor, attn_factor,
  9953. &cos_theta, &sin_theta
  9954. );
  9955. sin_theta *= sin_sign;
  9956. theta_base *= theta_scale;
  9957. const int64_t i0 = ib*n_dims + ic/2;
  9958. const ggml_fp16_t * const src = (ggml_fp16_t *)((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00);
  9959. ggml_fp16_t * dst_data = (ggml_fp16_t *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0);
  9960. const float x0 = GGML_FP16_TO_FP32(src[0]);
  9961. const float x1 = GGML_FP16_TO_FP32(src[n_dims/2]);
  9962. dst_data[0] = GGML_FP32_TO_FP16(x0*cos_theta - x1*sin_theta);
  9963. dst_data[n_dims/2] = GGML_FP32_TO_FP16(x0*sin_theta + x1*cos_theta);
  9964. } else {
  9965. const int64_t i0 = ic;
  9966. const ggml_fp16_t * const src = (ggml_fp16_t *)((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00);
  9967. ggml_fp16_t * dst_data = (ggml_fp16_t *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0);
  9968. dst_data[0] = src[0];
  9969. dst_data[1] = src[1];
  9970. }
  9971. }
  9972. }
  9973. }
  9974. }
  9975. }
  9976. }
  9977. static void ggml_compute_forward_rope(
  9978. const struct ggml_compute_params * params,
  9979. const struct ggml_tensor * src0,
  9980. const struct ggml_tensor * src1,
  9981. struct ggml_tensor * dst) {
  9982. switch (src0->type) {
  9983. case GGML_TYPE_F16:
  9984. {
  9985. ggml_compute_forward_rope_f16(params, src0, src1, dst, true);
  9986. } break;
  9987. case GGML_TYPE_F32:
  9988. {
  9989. ggml_compute_forward_rope_f32(params, src0, src1, dst, true);
  9990. } break;
  9991. default:
  9992. {
  9993. GGML_ASSERT(false);
  9994. } break;
  9995. }
  9996. }
  9997. // ggml_compute_forward_rope_back
  9998. static void ggml_compute_forward_rope_back(
  9999. const struct ggml_compute_params * params,
  10000. const struct ggml_tensor * src0,
  10001. const struct ggml_tensor * src1,
  10002. struct ggml_tensor * dst) {
  10003. switch (src0->type) {
  10004. case GGML_TYPE_F16:
  10005. {
  10006. ggml_compute_forward_rope_f16(params, src0, src1, dst, false);
  10007. } break;
  10008. case GGML_TYPE_F32:
  10009. {
  10010. ggml_compute_forward_rope_f32(params, src0, src1, dst, false);
  10011. } break;
  10012. default:
  10013. {
  10014. GGML_ASSERT(false);
  10015. } break;
  10016. }
  10017. }
  10018. // ggml_compute_forward_conv_transpose_1d
  10019. static void ggml_compute_forward_conv_transpose_1d_f16_f32(
  10020. const struct ggml_compute_params * params,
  10021. const struct ggml_tensor * src0,
  10022. const struct ggml_tensor * src1,
  10023. struct ggml_tensor * dst) {
  10024. GGML_ASSERT(src0->type == GGML_TYPE_F16);
  10025. GGML_ASSERT(src1->type == GGML_TYPE_F32);
  10026. GGML_ASSERT( dst->type == GGML_TYPE_F32);
  10027. int64_t t0 = ggml_perf_time_us();
  10028. UNUSED(t0);
  10029. GGML_TENSOR_BINARY_OP_LOCALS
  10030. const int ith = params->ith;
  10031. const int nth = params->nth;
  10032. const int nk = ne00*ne01*ne02;
  10033. GGML_ASSERT(nb00 == sizeof(ggml_fp16_t));
  10034. GGML_ASSERT(nb10 == sizeof(float));
  10035. if (params->type == GGML_TASK_INIT) {
  10036. if (ith != 0) {
  10037. return;
  10038. }
  10039. memset(params->wdata, 0, params->wsize);
  10040. // permute kernel data (src0) from (K x Cout x Cin) to (Cin x K x Cout)
  10041. {
  10042. ggml_fp16_t * const wdata = (ggml_fp16_t *) params->wdata + 0;
  10043. for (int64_t i02 = 0; i02 < ne02; i02++) {
  10044. for (int64_t i01 = 0; i01 < ne01; i01++) {
  10045. const ggml_fp16_t * const src = (ggml_fp16_t *)((char *) src0->data + i02*nb02 + i01*nb01);
  10046. ggml_fp16_t * dst_data = wdata + i01*ne00*ne02;
  10047. for (int64_t i00 = 0; i00 < ne00; i00++) {
  10048. dst_data[i00*ne02 + i02] = src[i00];
  10049. }
  10050. }
  10051. }
  10052. }
  10053. // permute source data (src1) from (L x Cin) to (Cin x L)
  10054. {
  10055. ggml_fp16_t * const wdata = (ggml_fp16_t *) params->wdata + nk;
  10056. ggml_fp16_t * dst_data = wdata;
  10057. for (int64_t i11 = 0; i11 < ne11; i11++) {
  10058. const float * const src = (float *)((char *) src1->data + i11*nb11);
  10059. for (int64_t i10 = 0; i10 < ne10; i10++) {
  10060. dst_data[i10*ne11 + i11] = GGML_FP32_TO_FP16(src[i10]);
  10061. }
  10062. }
  10063. }
  10064. // need to zero dst since we are accumulating into it
  10065. memset(dst->data, 0, ggml_nbytes(dst));
  10066. return;
  10067. }
  10068. if (params->type == GGML_TASK_FINALIZE) {
  10069. return;
  10070. }
  10071. const int32_t s0 = ((const int32_t*)(dst->op_params))[0];
  10072. // total rows in dst
  10073. const int nr = ne1;
  10074. // rows per thread
  10075. const int dr = (nr + nth - 1)/nth;
  10076. // row range for this thread
  10077. const int ir0 = dr*ith;
  10078. const int ir1 = MIN(ir0 + dr, nr);
  10079. ggml_fp16_t * const wdata = (ggml_fp16_t *) params->wdata + 0;
  10080. ggml_fp16_t * const wdata_src = wdata + nk;
  10081. for (int i1 = ir0; i1 < ir1; i1++) {
  10082. float * dst_data = (float *)((char *) dst->data + i1*nb1);
  10083. ggml_fp16_t * wdata_kernel = wdata + i1*ne02*ne00;
  10084. for (int i10 = 0; i10 < ne10; i10++) {
  10085. const int i1n = i10*ne11;
  10086. for (int i00 = 0; i00 < ne00; i00++) {
  10087. float v = 0;
  10088. ggml_vec_dot_f16(ne02, &v,
  10089. (ggml_fp16_t *) wdata_src + i1n,
  10090. (ggml_fp16_t *) wdata_kernel + i00*ne02);
  10091. dst_data[i10*s0 + i00] += v;
  10092. }
  10093. }
  10094. }
  10095. }
  10096. static void ggml_compute_forward_conv_transpose_1d_f32(
  10097. const struct ggml_compute_params * params,
  10098. const struct ggml_tensor * src0,
  10099. const struct ggml_tensor * src1,
  10100. struct ggml_tensor * dst) {
  10101. GGML_ASSERT(src0->type == GGML_TYPE_F32);
  10102. GGML_ASSERT(src1->type == GGML_TYPE_F32);
  10103. GGML_ASSERT( dst->type == GGML_TYPE_F32);
  10104. int64_t t0 = ggml_perf_time_us();
  10105. UNUSED(t0);
  10106. GGML_TENSOR_BINARY_OP_LOCALS
  10107. const int ith = params->ith;
  10108. const int nth = params->nth;
  10109. const int nk = ne00*ne01*ne02;
  10110. GGML_ASSERT(nb00 == sizeof(float));
  10111. GGML_ASSERT(nb10 == sizeof(float));
  10112. if (params->type == GGML_TASK_INIT) {
  10113. if (ith != 0) {
  10114. return;
  10115. }
  10116. memset(params->wdata, 0, params->wsize);
  10117. // prepare kernel data (src0) from (K x Cout x Cin) to (Cin x K x Cout)
  10118. {
  10119. float * const wdata = (float *) params->wdata + 0;
  10120. for (int64_t i02 = 0; i02 < ne02; i02++) {
  10121. for (int64_t i01 = 0; i01 < ne01; i01++) {
  10122. const float * const src = (float *)((char *) src0->data + i02*nb02 + i01*nb01);
  10123. float * dst_data = wdata + i01*ne00*ne02;
  10124. for (int64_t i00 = 0; i00 < ne00; i00++) {
  10125. dst_data[i00*ne02 + i02] = src[i00];
  10126. }
  10127. }
  10128. }
  10129. }
  10130. // prepare source data (src1)
  10131. {
  10132. float * const wdata = (float *) params->wdata + nk;
  10133. float * dst_data = wdata;
  10134. for (int64_t i11 = 0; i11 < ne11; i11++) {
  10135. const float * const src = (float *)((char *) src1->data + i11*nb11);
  10136. for (int64_t i10 = 0; i10 < ne10; i10++) {
  10137. dst_data[i10*ne11 + i11] = src[i10];
  10138. }
  10139. }
  10140. }
  10141. // need to zero dst since we are accumulating into it
  10142. memset(dst->data, 0, ggml_nbytes(dst));
  10143. return;
  10144. }
  10145. if (params->type == GGML_TASK_FINALIZE) {
  10146. return;
  10147. }
  10148. const int32_t s0 = ((const int32_t*)(dst->op_params))[0];
  10149. // total rows in dst
  10150. const int nr = ne1;
  10151. // rows per thread
  10152. const int dr = (nr + nth - 1)/nth;
  10153. // row range for this thread
  10154. const int ir0 = dr*ith;
  10155. const int ir1 = MIN(ir0 + dr, nr);
  10156. float * const wdata = (float *) params->wdata + 0;
  10157. float * const wdata_src = wdata + nk;
  10158. for (int i1 = ir0; i1 < ir1; i1++) {
  10159. float * dst_data = (float *)((char *) dst->data + i1*nb1);
  10160. float * wdata_kernel = wdata + i1*ne02*ne00;
  10161. for (int i10 = 0; i10 < ne10; i10++) {
  10162. const int i1n = i10*ne11;
  10163. for (int i00 = 0; i00 < ne00; i00++) {
  10164. float v = 0;
  10165. ggml_vec_dot_f32(ne02, &v,
  10166. wdata_src + i1n,
  10167. wdata_kernel + i00*ne02);
  10168. dst_data[i10*s0 + i00] += v;
  10169. }
  10170. }
  10171. }
  10172. }
  10173. static void ggml_compute_forward_conv_transpose_1d(
  10174. const struct ggml_compute_params * params,
  10175. const struct ggml_tensor * src0,
  10176. const struct ggml_tensor * src1,
  10177. struct ggml_tensor * dst) {
  10178. switch (src0->type) {
  10179. case GGML_TYPE_F16:
  10180. {
  10181. ggml_compute_forward_conv_transpose_1d_f16_f32(params, src0, src1, dst);
  10182. } break;
  10183. case GGML_TYPE_F32:
  10184. {
  10185. ggml_compute_forward_conv_transpose_1d_f32(params, src0, src1, dst);
  10186. } break;
  10187. default:
  10188. {
  10189. GGML_ASSERT(false);
  10190. } break;
  10191. }
  10192. }
  10193. // src0: kernel [OC, IC, KH, KW]
  10194. // src1: image [N, IC, IH, IW]
  10195. // dst: result [N, OH, OW, IC*KH*KW]
  10196. static void ggml_compute_forward_im2col_f16(
  10197. const struct ggml_compute_params * params,
  10198. const struct ggml_tensor * src0,
  10199. const struct ggml_tensor * src1,
  10200. struct ggml_tensor * dst) {
  10201. GGML_ASSERT(src0->type == GGML_TYPE_F16);
  10202. GGML_ASSERT(src1->type == GGML_TYPE_F32);
  10203. GGML_ASSERT( dst->type == GGML_TYPE_F16);
  10204. int64_t t0 = ggml_perf_time_us();
  10205. UNUSED(t0);
  10206. GGML_TENSOR_BINARY_OP_LOCALS;
  10207. const int32_t s0 = ((const int32_t *)(dst->op_params))[0];
  10208. const int32_t s1 = ((const int32_t *)(dst->op_params))[1];
  10209. const int32_t p0 = ((const int32_t *)(dst->op_params))[2];
  10210. const int32_t p1 = ((const int32_t *)(dst->op_params))[3];
  10211. const int32_t d0 = ((const int32_t *)(dst->op_params))[4];
  10212. const int32_t d1 = ((const int32_t *)(dst->op_params))[5];
  10213. const bool is_2D = ((const int32_t *)(dst->op_params))[6] == 1;
  10214. const int ith = params->ith;
  10215. const int nth = params->nth;
  10216. const int64_t N = is_2D ? ne13 : ne12;
  10217. const int64_t IC = is_2D ? ne12 : ne11;
  10218. const int64_t IH = is_2D ? ne11 : 1;
  10219. const int64_t IW = ne10;
  10220. const int64_t KH = is_2D ? ne01 : 1;
  10221. const int64_t KW = ne00;
  10222. const int64_t OH = is_2D ? ne2 : 1;
  10223. const int64_t OW = ne1;
  10224. int ofs0 = is_2D ? nb13 : nb12;
  10225. int ofs1 = is_2D ? nb12 : nb11;
  10226. GGML_ASSERT(nb00 == sizeof(ggml_fp16_t));
  10227. GGML_ASSERT(nb10 == sizeof(float));
  10228. if (params->type == GGML_TASK_INIT) {
  10229. return;
  10230. }
  10231. if (params->type == GGML_TASK_FINALIZE) {
  10232. return;
  10233. }
  10234. // im2col: [N, IC, IH, IW] => [N, OH, OW, IC*KH*KW]
  10235. {
  10236. ggml_fp16_t * const wdata = (ggml_fp16_t *) dst->data;
  10237. for (int64_t in = 0; in < N; in++) {
  10238. for (int64_t ioh = 0; ioh < OH; ioh++) { // 1
  10239. for (int64_t iow = 0; iow < OW; iow++) {
  10240. for (int64_t iic = ith; iic < IC; iic += nth) {
  10241. // micro kernel
  10242. ggml_fp16_t * dst_data = wdata + (in*OH*OW + ioh*OW + iow)*(IC*KH*KW); // [IC, KH, KW]
  10243. const float * const src_data = (float *)((char *) src1->data + in*ofs0 + iic*ofs1); // [IH, IW]
  10244. for (int64_t ikh = 0; ikh < KH; ikh++) { // 1
  10245. for (int64_t ikw = 0; ikw < KW; ikw++) {
  10246. const int64_t iiw = iow*s0 + ikw*d0 - p0;
  10247. const int64_t iih = ioh*s1 + ikh*d1 - p1;
  10248. if (iih < 0 || iih >= IH || iiw < 0 || iiw >= IW) {
  10249. dst_data[iic*(KH*KW) + ikh*KW + ikw] = 0;
  10250. } else {
  10251. dst_data[iic*(KH*KW) + ikh*KW + ikw] = GGML_FP32_TO_FP16(src_data[iih*IW + iiw]);
  10252. }
  10253. }
  10254. }
  10255. }
  10256. }
  10257. }
  10258. }
  10259. }
  10260. }
  10261. static void ggml_compute_forward_im2col(
  10262. const struct ggml_compute_params * params,
  10263. const struct ggml_tensor * src0,
  10264. const struct ggml_tensor * src1,
  10265. struct ggml_tensor * dst) {
  10266. switch (src0->type) {
  10267. case GGML_TYPE_F16:
  10268. {
  10269. ggml_compute_forward_im2col_f16(params, src0, src1, dst);
  10270. } break;
  10271. case GGML_TYPE_F32:
  10272. {
  10273. GGML_ASSERT(false);
  10274. } break;
  10275. default:
  10276. {
  10277. GGML_ASSERT(false);
  10278. } break;
  10279. }
  10280. }
  10281. // ggml_compute_forward_conv_transpose_2d
  10282. static void ggml_compute_forward_conv_transpose_2d(
  10283. const struct ggml_compute_params * params,
  10284. const struct ggml_tensor * src0,
  10285. const struct ggml_tensor * src1,
  10286. struct ggml_tensor * dst) {
  10287. GGML_ASSERT(src0->type == GGML_TYPE_F16);
  10288. GGML_ASSERT(src1->type == GGML_TYPE_F32);
  10289. GGML_ASSERT( dst->type == GGML_TYPE_F32);
  10290. int64_t t0 = ggml_perf_time_us();
  10291. UNUSED(t0);
  10292. GGML_TENSOR_BINARY_OP_LOCALS
  10293. const int ith = params->ith;
  10294. const int nth = params->nth;
  10295. const int nk = ne00*ne01*ne02*ne03;
  10296. GGML_ASSERT(nb00 == sizeof(ggml_fp16_t));
  10297. GGML_ASSERT(nb10 == sizeof(float));
  10298. if (params->type == GGML_TASK_INIT) {
  10299. if (ith != 0) {
  10300. return;
  10301. }
  10302. memset(params->wdata, 0, params->wsize);
  10303. // permute kernel data (src0) from (Kw x Kh x Cout x Cin) to (Cin x Kw x Kh x Cout)
  10304. {
  10305. ggml_fp16_t * const wdata = (ggml_fp16_t *) params->wdata + 0;
  10306. for (int64_t i03 = 0; i03 < ne03; i03++) {
  10307. for (int64_t i02 = 0; i02 < ne02; i02++) {
  10308. const ggml_fp16_t * const src = (ggml_fp16_t *)((char *) src0->data + i03*nb03 + i02*nb02);
  10309. ggml_fp16_t * dst_data = wdata + i02*ne01*ne00*ne03;
  10310. for (int64_t i01 = 0; i01 < ne01; i01++) {
  10311. for (int64_t i00 = 0; i00 < ne00; i00++) {
  10312. dst_data[i01*ne00*ne03 + i00*ne03 + i03] = src[i01 * ne00 + i00];
  10313. }
  10314. }
  10315. }
  10316. }
  10317. }
  10318. // permute source data (src1) from (Sw x Sh x Cin) to (Cin x Sw x Sh)
  10319. {
  10320. ggml_fp16_t * const wdata = (ggml_fp16_t *) params->wdata + nk;
  10321. for (int i12 = 0; i12 < ne12; i12++) {
  10322. for (int i11 = 0; i11 < ne11; i11++) {
  10323. const float * const src = (float *)((char *) src1->data + i12*nb12 + i11*nb11);
  10324. ggml_fp16_t * dst_data = wdata + i11*ne10*ne12;
  10325. for (int i10 = 0; i10 < ne10; i10++) {
  10326. dst_data[i10*ne12 + i12] = GGML_FP32_TO_FP16(src[i10]);
  10327. }
  10328. }
  10329. }
  10330. }
  10331. memset(dst->data, 0, ggml_nbytes(dst));
  10332. return;
  10333. }
  10334. if (params->type == GGML_TASK_FINALIZE) {
  10335. return;
  10336. }
  10337. const int32_t stride = ggml_get_op_params_i32(dst, 0);
  10338. // total patches in dst
  10339. const int np = ne2;
  10340. // patches per thread
  10341. const int dp = (np + nth - 1)/nth;
  10342. // patch range for this thread
  10343. const int ip0 = dp*ith;
  10344. const int ip1 = MIN(ip0 + dp, np);
  10345. ggml_fp16_t * const wdata = (ggml_fp16_t *) params->wdata + 0;
  10346. ggml_fp16_t * const wdata_src = wdata + nk;
  10347. for (int i2 = ip0; i2 < ip1; i2++) { // Cout
  10348. float * dst_data = (float *)((char *) dst->data + i2*nb2);
  10349. ggml_fp16_t * wdata_kernel = wdata + i2*ne01*ne00*ne03;
  10350. for (int i11 = 0; i11 < ne11; i11++) {
  10351. for (int i10 = 0; i10 < ne10; i10++) {
  10352. const int i1n = i11*ne10*ne12 + i10*ne12;
  10353. for (int i01 = 0; i01 < ne01; i01++) {
  10354. for (int i00 = 0; i00 < ne00; i00++) {
  10355. float v = 0;
  10356. ggml_vec_dot_f16(ne03, &v,
  10357. wdata_src + i1n,
  10358. wdata_kernel + i01*ne00*ne03 + i00*ne03);
  10359. dst_data[(i11*stride + i01)*ne0 + i10*stride + i00] += v;
  10360. }
  10361. }
  10362. }
  10363. }
  10364. }
  10365. }
  10366. // ggml_compute_forward_pool_1d_sk_p0
  10367. static void ggml_compute_forward_pool_1d_sk_p0(
  10368. const struct ggml_compute_params * params,
  10369. const enum ggml_op_pool op,
  10370. const struct ggml_tensor * src,
  10371. const int k,
  10372. struct ggml_tensor * dst) {
  10373. assert(src->type == GGML_TYPE_F32);
  10374. assert(params->ith == 0);
  10375. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  10376. return;
  10377. }
  10378. const char * cdata = (const char *)src->data;
  10379. const char * const data_end = cdata + ggml_nbytes(src);
  10380. float * drow = (float *)dst->data;
  10381. const int64_t rs = dst->ne[0];
  10382. while (cdata < data_end) {
  10383. const float * const srow = (const float *)cdata;
  10384. int j = 0;
  10385. for (int64_t i = 0; i < rs; ++i) {
  10386. switch (op) {
  10387. case GGML_OP_POOL_AVG: drow[i] = 0; break;
  10388. case GGML_OP_POOL_MAX: drow[i] = -FLT_MAX; break;
  10389. case GGML_OP_POOL_COUNT: GGML_ASSERT(false); break;
  10390. }
  10391. for (int ki = 0; ki < k; ++ki) {
  10392. switch (op) {
  10393. case GGML_OP_POOL_AVG: drow[i] += srow[j]; break;
  10394. case GGML_OP_POOL_MAX: if (srow[j] > drow[i]) drow[i] = srow[j]; break;
  10395. case GGML_OP_POOL_COUNT: GGML_ASSERT(false); break;
  10396. }
  10397. ++j;
  10398. }
  10399. switch (op) {
  10400. case GGML_OP_POOL_AVG: drow[i] /= k; break;
  10401. case GGML_OP_POOL_MAX: break;
  10402. case GGML_OP_POOL_COUNT: GGML_ASSERT(false); break;
  10403. }
  10404. }
  10405. cdata += src->nb[1];
  10406. drow += rs;
  10407. }
  10408. }
  10409. // ggml_compute_forward_pool_1d
  10410. static void ggml_compute_forward_pool_1d(
  10411. const struct ggml_compute_params * params,
  10412. const struct ggml_tensor * src0,
  10413. struct ggml_tensor * dst) {
  10414. const int32_t * opts = (const int32_t *)dst->op_params;
  10415. enum ggml_op_pool op = opts[0];
  10416. const int k0 = opts[1];
  10417. const int s0 = opts[2];
  10418. const int p0 = opts[3];
  10419. GGML_ASSERT(p0 == 0); // padding not supported
  10420. GGML_ASSERT(k0 == s0); // only s = k supported
  10421. ggml_compute_forward_pool_1d_sk_p0(params, op, src0, k0, dst);
  10422. }
  10423. // ggml_compute_forward_pool_2d
  10424. static void ggml_compute_forward_pool_2d(
  10425. const struct ggml_compute_params * params,
  10426. const struct ggml_tensor * src,
  10427. struct ggml_tensor * dst) {
  10428. assert(src->type == GGML_TYPE_F32);
  10429. assert(params->ith == 0);
  10430. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  10431. return;
  10432. }
  10433. const int32_t * opts = (const int32_t *)dst->op_params;
  10434. enum ggml_op_pool op = opts[0];
  10435. const int k0 = opts[1];
  10436. const int k1 = opts[2];
  10437. const int s0 = opts[3];
  10438. const int s1 = opts[4];
  10439. const int p0 = opts[5];
  10440. const int p1 = opts[6];
  10441. const char * cdata = (const char*)src->data;
  10442. const char * const data_end = cdata + ggml_nbytes(src);
  10443. const int64_t px = dst->ne[0];
  10444. const int64_t py = dst->ne[1];
  10445. const int64_t pa = px * py;
  10446. float * dplane = (float *)dst->data;
  10447. const int ka = k0 * k1;
  10448. const int offset0 = -p0;
  10449. const int offset1 = -p1;
  10450. while (cdata < data_end) {
  10451. for (int oy = 0; oy < py; ++oy) {
  10452. float * const drow = dplane + oy * px;
  10453. for (int ox = 0; ox < px; ++ox) {
  10454. float * const out = drow + ox;
  10455. switch (op) {
  10456. case GGML_OP_POOL_AVG: *out = 0; break;
  10457. case GGML_OP_POOL_MAX: *out = -FLT_MAX; break;
  10458. case GGML_OP_POOL_COUNT: GGML_ASSERT(false); break;
  10459. }
  10460. const int ix = offset0 + ox * s0;
  10461. const int iy = offset1 + oy * s1;
  10462. for (int ky = 0; ky < k1; ++ky) {
  10463. if (iy + ky < 0 || iy + ky >= src->ne[1]) continue;
  10464. const float * const srow = (const float *)(cdata + src->nb[1] * (iy + ky));
  10465. for (int kx = 0; kx < k0; ++kx) {
  10466. int j = ix + kx;
  10467. if (j < 0 || j >= src->ne[0]) continue;
  10468. switch (op) {
  10469. case GGML_OP_POOL_AVG: *out += srow[j]; break;
  10470. case GGML_OP_POOL_MAX: if (srow[j] > *out) *out = srow[j]; break;
  10471. case GGML_OP_POOL_COUNT: GGML_ASSERT(false); break;
  10472. }
  10473. }
  10474. }
  10475. switch (op) {
  10476. case GGML_OP_POOL_AVG: *out /= ka; break;
  10477. case GGML_OP_POOL_MAX: break;
  10478. case GGML_OP_POOL_COUNT: GGML_ASSERT(false); break;
  10479. }
  10480. }
  10481. }
  10482. cdata += src->nb[2];
  10483. dplane += pa;
  10484. }
  10485. }
  10486. // ggml_compute_forward_upscale
  10487. static void ggml_compute_forward_upscale_f32(
  10488. const struct ggml_compute_params * params,
  10489. const struct ggml_tensor * src0,
  10490. struct ggml_tensor * dst) {
  10491. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  10492. return;
  10493. }
  10494. GGML_ASSERT(src0->nb[0] == sizeof(float));
  10495. const int ith = params->ith;
  10496. const int nth = params->nth;
  10497. GGML_TENSOR_UNARY_OP_LOCALS
  10498. const int scale_factor = dst->op_params[0];
  10499. // TODO: optimize
  10500. for (int64_t i3 = 0; i3 < ne3; i3++) {
  10501. const int64_t i03 = i3;
  10502. for (int64_t i2 = ith; i2 < ne2; i2 += nth) {
  10503. const int64_t i02 = i2;
  10504. for (int64_t i1 = 0; i1 < ne1; i1++) {
  10505. const int64_t i01 = i1 / scale_factor;
  10506. for (int64_t i0 = 0; i0 < ne0; i0++) {
  10507. const int64_t i00 = i0 / scale_factor;
  10508. const float * x = (float *)((char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03);
  10509. float * y = (float *)((char *) dst->data + i0*nb0 + i1*nb1 + i2*nb2 + i3*nb3);
  10510. *y = *x;
  10511. }
  10512. }
  10513. }
  10514. }
  10515. }
  10516. static void ggml_compute_forward_upscale(
  10517. const struct ggml_compute_params * params,
  10518. const struct ggml_tensor * src0,
  10519. struct ggml_tensor * dst) {
  10520. switch (src0->type) {
  10521. case GGML_TYPE_F32:
  10522. {
  10523. ggml_compute_forward_upscale_f32(params, src0, dst);
  10524. } break;
  10525. default:
  10526. {
  10527. GGML_ASSERT(false);
  10528. } break;
  10529. }
  10530. }
  10531. // ggml_compute_forward_pad
  10532. static void ggml_compute_forward_pad_f32(
  10533. const struct ggml_compute_params * params,
  10534. const struct ggml_tensor * src0,
  10535. struct ggml_tensor * dst) {
  10536. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  10537. return;
  10538. }
  10539. GGML_ASSERT(src0->nb[0] == sizeof(float));
  10540. GGML_ASSERT( dst->nb[0] == sizeof(float));
  10541. const int ith = params->ith;
  10542. const int nth = params->nth;
  10543. GGML_TENSOR_UNARY_OP_LOCALS
  10544. float * dst_ptr = (float *) dst->data;
  10545. // TODO: optimize
  10546. for (int64_t i2 = 0; i2 < ne2; ++i2) {
  10547. for (int64_t i1 = ith; i1 < ne1; i1 += nth) {
  10548. for (int64_t i0 = 0; i0 < ne0; ++i0) {
  10549. for (int64_t i3 = 0; i3 < ne3; ++i3) {
  10550. const int64_t dst_idx = i3*(ne0*ne1*ne2) + i2*(ne0*ne1) + i1*ne0 + i0;
  10551. const float * src_ptr = (const float *)((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00);
  10552. if (i0 < ne00 && i1 < ne01 && i2 < ne02 && i3 < ne03) {
  10553. dst_ptr[dst_idx] = *src_ptr;
  10554. } else {
  10555. dst_ptr[dst_idx] = 0;
  10556. }
  10557. }
  10558. }
  10559. }
  10560. }
  10561. }
  10562. static void ggml_compute_forward_pad(
  10563. const struct ggml_compute_params * params,
  10564. const struct ggml_tensor * src0,
  10565. struct ggml_tensor * dst) {
  10566. switch (src0->type) {
  10567. case GGML_TYPE_F32:
  10568. {
  10569. ggml_compute_forward_pad_f32(params, src0, dst);
  10570. } break;
  10571. default:
  10572. {
  10573. GGML_ASSERT(false);
  10574. } break;
  10575. }
  10576. }
  10577. // ggml_compute_forward_argsort
  10578. static void ggml_compute_forward_argsort_f32(
  10579. const struct ggml_compute_params * params,
  10580. const struct ggml_tensor * src0,
  10581. struct ggml_tensor * dst) {
  10582. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  10583. return;
  10584. }
  10585. GGML_TENSOR_UNARY_OP_LOCALS
  10586. GGML_ASSERT(nb0 == sizeof(float));
  10587. const int ith = params->ith;
  10588. const int nth = params->nth;
  10589. const int64_t nr = ggml_nrows(src0);
  10590. enum ggml_sort_order order = (enum ggml_sort_order) ggml_get_op_params_i32(dst, 0);
  10591. for (int64_t i = ith; i < nr; i += nth) {
  10592. int32_t * dst_data = (int32_t *)((char *) dst->data + i*nb1);
  10593. const float * src_data = (float *)((char *) src0->data + i*nb01);
  10594. for (int64_t j = 0; j < ne0; j++) {
  10595. dst_data[j] = j;
  10596. }
  10597. // C doesn't have a functional sort, so we do a bubble sort instead
  10598. for (int64_t j = 0; j < ne0; j++) {
  10599. for (int64_t k = j + 1; k < ne0; k++) {
  10600. if ((order == GGML_SORT_ASC && src_data[dst_data[j]] > src_data[dst_data[k]]) ||
  10601. (order == GGML_SORT_DESC && src_data[dst_data[j]] < src_data[dst_data[k]])) {
  10602. int32_t tmp = dst_data[j];
  10603. dst_data[j] = dst_data[k];
  10604. dst_data[k] = tmp;
  10605. }
  10606. }
  10607. }
  10608. }
  10609. }
  10610. static void ggml_compute_forward_argsort(
  10611. const struct ggml_compute_params * params,
  10612. const struct ggml_tensor * src0,
  10613. struct ggml_tensor * dst) {
  10614. switch (src0->type) {
  10615. case GGML_TYPE_F32:
  10616. {
  10617. ggml_compute_forward_argsort_f32(params, src0, dst);
  10618. } break;
  10619. default:
  10620. {
  10621. GGML_ASSERT(false);
  10622. } break;
  10623. }
  10624. }
  10625. // ggml_compute_forward_flash_attn
  10626. static void ggml_compute_forward_flash_attn_f32(
  10627. const struct ggml_compute_params * params,
  10628. const struct ggml_tensor * q,
  10629. const struct ggml_tensor * k,
  10630. const struct ggml_tensor * v,
  10631. const bool masked,
  10632. struct ggml_tensor * dst) {
  10633. int64_t t0 = ggml_perf_time_us();
  10634. UNUSED(t0);
  10635. GGML_TENSOR_LOCALS(int64_t, neq, q, ne)
  10636. GGML_TENSOR_LOCALS(size_t, nbq, q, nb)
  10637. GGML_TENSOR_LOCALS(int64_t, nek, k, ne)
  10638. GGML_TENSOR_LOCALS(size_t, nbk, k, nb)
  10639. GGML_TENSOR_LOCALS(int64_t, nev, v, ne)
  10640. GGML_TENSOR_LOCALS(size_t, nbv, v, nb)
  10641. GGML_TENSOR_LOCALS(int64_t, ne, dst, ne)
  10642. GGML_TENSOR_LOCALS(size_t, nb, dst, nb)
  10643. const int ith = params->ith;
  10644. const int nth = params->nth;
  10645. const int64_t D = neq0;
  10646. const int64_t N = neq1;
  10647. const int64_t P = nek1 - N;
  10648. const int64_t M = P + N;
  10649. const int Mup = ggml_up(M, GGML_SOFT_MAX_UNROLL);
  10650. GGML_ASSERT(ne0 == D);
  10651. GGML_ASSERT(ne1 == N);
  10652. GGML_ASSERT(P >= 0);
  10653. GGML_ASSERT(nbq0 == sizeof(float));
  10654. GGML_ASSERT(nbk0 == sizeof(float));
  10655. GGML_ASSERT(nbv0 == sizeof(float));
  10656. GGML_ASSERT(neq0 == D);
  10657. GGML_ASSERT(nek0 == D);
  10658. GGML_ASSERT(nev1 == D);
  10659. GGML_ASSERT(neq1 == N);
  10660. GGML_ASSERT(nek1 == N + P);
  10661. GGML_ASSERT(nev1 == D);
  10662. // dst cannot be transposed or permuted
  10663. GGML_ASSERT(nb0 == sizeof(float));
  10664. GGML_ASSERT(nb0 <= nb1);
  10665. GGML_ASSERT(nb1 <= nb2);
  10666. GGML_ASSERT(nb2 <= nb3);
  10667. if (params->type == GGML_TASK_INIT) {
  10668. return;
  10669. }
  10670. if (params->type == GGML_TASK_FINALIZE) {
  10671. return;
  10672. }
  10673. // parallelize by q rows using ggml_vec_dot_f32
  10674. // total rows in q
  10675. const int nr = neq1*neq2*neq3;
  10676. // rows per thread
  10677. const int dr = (nr + nth - 1)/nth;
  10678. // row range for this thread
  10679. const int ir0 = dr*ith;
  10680. const int ir1 = MIN(ir0 + dr, nr);
  10681. const float scale = 1.0f/sqrtf(D);
  10682. //printf("P=%d N=%d D=%d ir0=%d ir1=%d scale = %f\n", P, N, D, ir0, ir1, scale);
  10683. for (int ir = ir0; ir < ir1; ++ir) {
  10684. // q indices
  10685. const int iq3 = ir/(neq2*neq1);
  10686. const int iq2 = (ir - iq3*neq2*neq1)/neq1;
  10687. const int iq1 = (ir - iq3*neq2*neq1 - iq2*neq1);
  10688. float * S = (float *) params->wdata + ith*(Mup + CACHE_LINE_SIZE_F32);
  10689. for (int i = M; i < Mup; ++i) {
  10690. S[i] = -INFINITY;
  10691. }
  10692. const int64_t masked_begin = masked ? (P + iq1 + 1) : M;
  10693. for (int64_t ic = 0; ic < masked_begin; ++ic) {
  10694. // k indices
  10695. const int ik3 = iq3;
  10696. const int ik2 = iq2 % nek2;
  10697. const int ik1 = ic;
  10698. // S indices
  10699. const int i1 = ik1;
  10700. ggml_vec_dot_f32(neq0,
  10701. S + i1,
  10702. (float *) ((char *) k->data + (ik1*nbk1 + ik2*nbk2 + ik3*nbk3)),
  10703. (float *) ((char *) q->data + (iq1*nbq1 + iq2*nbq2 + iq3*nbq3)));
  10704. }
  10705. // scale
  10706. ggml_vec_scale_f32(masked_begin, S, scale);
  10707. for (int64_t i = masked_begin; i < M; i++) {
  10708. S[i] = -INFINITY;
  10709. }
  10710. // softmax
  10711. // exclude known -INF S[..] values from max and loop
  10712. // dont forget to set their SW values to zero
  10713. {
  10714. float max = -INFINITY;
  10715. ggml_vec_max_f32(masked_begin, &max, S);
  10716. ggml_float sum = 0.0;
  10717. {
  10718. #ifdef GGML_SOFT_MAX_ACCELERATE
  10719. max = -max;
  10720. vDSP_vsadd(S, 1, &max, S, 1, Mup);
  10721. vvexpf(S, S, &Mup);
  10722. ggml_vec_sum_f32(Mup, &sum, S);
  10723. #else
  10724. uint16_t scvt[GGML_SOFT_MAX_UNROLL]; UNUSED(scvt);
  10725. ggml_float sump[GGML_SOFT_MAX_UNROLL] = { 0.0 };
  10726. for (int i = 0; i < Mup; i += GGML_SOFT_MAX_UNROLL) {
  10727. if (i >= masked_begin) {
  10728. break;
  10729. }
  10730. float * SS = S + i;
  10731. for (int j = 0; j < GGML_SOFT_MAX_UNROLL; ++j) {
  10732. if (i + j >= masked_begin) {
  10733. break;
  10734. } else if (SS[j] == -INFINITY) {
  10735. SS[j] = 0.0f;
  10736. } else {
  10737. #ifndef GGML_FLASH_ATTN_EXP_FP16
  10738. const float val = expf(SS[j] - max);
  10739. #else
  10740. ggml_fp16_t s = GGML_FP32_TO_FP16(SS[j] - max);
  10741. memcpy(&scvt[j], &s, sizeof(uint16_t));
  10742. const float val = GGML_FP16_TO_FP32(ggml_table_exp_f16[scvt[j]]);
  10743. #endif
  10744. sump[j] += (ggml_float)val;
  10745. SS[j] = val;
  10746. }
  10747. }
  10748. }
  10749. for (int i = 0; i < GGML_SOFT_MAX_UNROLL; i++) {
  10750. sum += sump[i];
  10751. }
  10752. #endif
  10753. }
  10754. assert(sum > 0.0);
  10755. sum = 1.0/sum;
  10756. ggml_vec_scale_f32(masked_begin, S, sum);
  10757. #ifndef NDEBUG
  10758. for (int i = 0; i < masked_begin; ++i) {
  10759. assert(!isnan(S[i]));
  10760. assert(!isinf(S[i]));
  10761. }
  10762. #endif
  10763. }
  10764. for (int64_t ic = 0; ic < nev1; ++ic) {
  10765. // dst indices
  10766. const int i1 = iq1;
  10767. const int i2 = iq2;
  10768. const int i3 = iq3;
  10769. // v indices
  10770. const int iv2 = iq2 % nev2;
  10771. const int iv3 = iq3;
  10772. ggml_vec_dot_f32(masked_begin,
  10773. (float *) ((char *) dst->data + (ic*nb0 + i1*nb1 + i2*nb2 + i3*nb3)),
  10774. (float *) ((char *) v->data + ( ic*nbv1 + iv2*nbv2 + iv3*nbv3)),
  10775. S);
  10776. }
  10777. }
  10778. }
  10779. static void ggml_compute_forward_flash_attn_f16(
  10780. const struct ggml_compute_params * params,
  10781. const struct ggml_tensor * q,
  10782. const struct ggml_tensor * k,
  10783. const struct ggml_tensor * v,
  10784. const bool masked,
  10785. struct ggml_tensor * dst) {
  10786. int64_t t0 = ggml_perf_time_us();
  10787. UNUSED(t0);
  10788. GGML_TENSOR_LOCALS(int64_t, neq, q, ne)
  10789. GGML_TENSOR_LOCALS(size_t, nbq, q, nb)
  10790. GGML_TENSOR_LOCALS(int64_t, nek, k, ne)
  10791. GGML_TENSOR_LOCALS(size_t, nbk, k, nb)
  10792. GGML_TENSOR_LOCALS(int64_t, nev, v, ne)
  10793. GGML_TENSOR_LOCALS(size_t, nbv, v, nb)
  10794. GGML_TENSOR_LOCALS(int64_t, ne, dst, ne)
  10795. GGML_TENSOR_LOCALS(size_t, nb, dst, nb)
  10796. const int ith = params->ith;
  10797. const int nth = params->nth;
  10798. const int64_t D = neq0;
  10799. const int64_t N = neq1;
  10800. const int64_t P = nek1 - N;
  10801. const int64_t M = P + N;
  10802. const int Mup = ggml_up(M, GGML_SOFT_MAX_UNROLL);
  10803. GGML_ASSERT(ne0 == D);
  10804. GGML_ASSERT(ne1 == N);
  10805. GGML_ASSERT(P >= 0);
  10806. GGML_ASSERT(nbq0 == sizeof(ggml_fp16_t));
  10807. GGML_ASSERT(nbk0 == sizeof(ggml_fp16_t));
  10808. GGML_ASSERT(nbv0 == sizeof(ggml_fp16_t));
  10809. GGML_ASSERT(neq0 == D);
  10810. GGML_ASSERT(nek0 == D);
  10811. GGML_ASSERT(nev1 == D);
  10812. GGML_ASSERT(neq1 == N);
  10813. GGML_ASSERT(nek1 == N + P);
  10814. GGML_ASSERT(nev1 == D);
  10815. // dst cannot be transposed or permuted
  10816. GGML_ASSERT(nb0 == sizeof(float));
  10817. GGML_ASSERT(nb0 <= nb1);
  10818. GGML_ASSERT(nb1 <= nb2);
  10819. GGML_ASSERT(nb2 <= nb3);
  10820. if (params->type == GGML_TASK_INIT) {
  10821. return;
  10822. }
  10823. if (params->type == GGML_TASK_FINALIZE) {
  10824. return;
  10825. }
  10826. // parallelize by q rows using ggml_vec_dot_f32
  10827. // total rows in q
  10828. const int nr = neq1*neq2*neq3;
  10829. // rows per thread
  10830. const int dr = (nr + nth - 1)/nth;
  10831. // row range for this thread
  10832. const int ir0 = dr*ith;
  10833. const int ir1 = MIN(ir0 + dr, nr);
  10834. const float scale = 1.0f/sqrtf(D);
  10835. //printf("P=%d N=%d D=%d ir0=%d ir1=%d scale = %f\n", P, N, D, ir0, ir1, scale);
  10836. for (int ir = ir0; ir < ir1; ++ir) {
  10837. // q indices
  10838. const int iq3 = ir/(neq2*neq1);
  10839. const int iq2 = (ir - iq3*neq2*neq1)/neq1;
  10840. const int iq1 = (ir - iq3*neq2*neq1 - iq2*neq1);
  10841. float * S = (float *) params->wdata + ith*(2*Mup + CACHE_LINE_SIZE_F32);
  10842. for (int i = M; i < Mup; ++i) {
  10843. S[i] = -INFINITY;
  10844. }
  10845. if (GGML_VEC_DOT_UNROLL > 2 || nek1 % GGML_VEC_DOT_UNROLL != 0) {
  10846. for (int64_t ic = 0; ic < nek1; ++ic) {
  10847. // k indices
  10848. const int ik3 = iq3;
  10849. const int ik2 = iq2 % nek2;
  10850. const int ik1 = ic;
  10851. // S indices
  10852. const int i1 = ik1;
  10853. ggml_vec_dot_f16(neq0,
  10854. S + i1,
  10855. (ggml_fp16_t *) ((char *) k->data + (ik1*nbk1 + ik2*nbk2 + ik3*nbk3)),
  10856. (ggml_fp16_t *) ((char *) q->data + (iq1*nbq1 + iq2*nbq2 + iq3*nbq3)));
  10857. }
  10858. } else {
  10859. for (int64_t ic = 0; ic < nek1; ic += GGML_VEC_DOT_UNROLL) {
  10860. // k indices
  10861. const int ik3 = iq3;
  10862. const int ik2 = iq2 % nek2;
  10863. const int ik1 = ic;
  10864. // S indices
  10865. const int i1 = ik1;
  10866. ggml_vec_dot_f16_unroll(neq0, nbk1,
  10867. S + i1,
  10868. ((char *) k->data + (ik1*nbk1 + ik2*nbk2 + ik3*nbk3)),
  10869. (ggml_fp16_t *) ((char *) q->data + (iq1*nbq1 + iq2*nbq2 + iq3*nbq3)));
  10870. }
  10871. }
  10872. // scale
  10873. ggml_vec_scale_f32(nek1, S, scale);
  10874. if (masked) {
  10875. for (int64_t i = P; i < M; i++) {
  10876. if (i > P + iq1) {
  10877. S[i] = -INFINITY;
  10878. }
  10879. }
  10880. }
  10881. // softmax
  10882. // todo: exclude known -INF S[..] values from max and loop, assuming their results to be zero.
  10883. // dont forget to set their S values to zero
  10884. {
  10885. float max = -INFINITY;
  10886. ggml_vec_max_f32(M, &max, S);
  10887. ggml_float sum = 0.0;
  10888. {
  10889. #ifdef GGML_SOFT_MAX_ACCELERATE
  10890. max = -max;
  10891. vDSP_vsadd(S, 1, &max, S, 1, Mup);
  10892. vvexpf(S, S, &Mup);
  10893. ggml_vec_sum_f32(Mup, &sum, S);
  10894. #else
  10895. uint16_t scvt[GGML_SOFT_MAX_UNROLL];
  10896. ggml_float sump[GGML_SOFT_MAX_UNROLL] = { 0.0 };
  10897. for (int i = 0; i < Mup; i += GGML_SOFT_MAX_UNROLL) {
  10898. float * SS = S + i;
  10899. for (int j = 0; j < GGML_SOFT_MAX_UNROLL; ++j) {
  10900. if (SS[j] == -INFINITY) {
  10901. SS[j] = 0.0f;
  10902. } else {
  10903. ggml_fp16_t s = GGML_FP32_TO_FP16(SS[j] - max);
  10904. memcpy(&scvt[j], &s, sizeof(uint16_t));
  10905. const float val = GGML_FP16_TO_FP32(ggml_table_exp_f16[scvt[j]]);
  10906. sump[j] += (ggml_float)val;
  10907. SS[j] = val;
  10908. }
  10909. }
  10910. }
  10911. for (int i = 0; i < GGML_SOFT_MAX_UNROLL; i++) {
  10912. sum += sump[i];
  10913. }
  10914. #endif
  10915. }
  10916. assert(sum > 0.0);
  10917. sum = 1.0/sum;
  10918. ggml_vec_scale_f32(M, S, sum);
  10919. #ifndef NDEBUG
  10920. for (int i = 0; i < M; ++i) {
  10921. assert(!isnan(S[i]));
  10922. assert(!isinf(S[i]));
  10923. }
  10924. #endif
  10925. }
  10926. ggml_fp16_t * S16 = (ggml_fp16_t *) ((float *) params->wdata + ith*(2*Mup + CACHE_LINE_SIZE_F32) + Mup);
  10927. for (int64_t i = 0; i < M; i++) {
  10928. S16[i] = GGML_FP32_TO_FP16(S[i]);
  10929. }
  10930. // todo: exclude known zero S[..] values from dot (reducing nev0 and increasing begin of v and S16).
  10931. if (GGML_VEC_DOT_UNROLL == 1 || (nev1 % GGML_VEC_DOT_UNROLL != 0)) {
  10932. for (int64_t ic = 0; ic < nev1; ++ic) {
  10933. // dst indices
  10934. const int i1 = iq1;
  10935. const int i2 = iq2;
  10936. const int i3 = iq3;
  10937. // v indices
  10938. const int iv2 = iq2 % nev2;
  10939. const int iv3 = iq3;
  10940. ggml_vec_dot_f16(nev0,
  10941. (float *) ((char *) dst->data + (ic*nb0 + i1*nb1 + i2*nb2 + i3*nb3)),
  10942. (ggml_fp16_t *) ((char *) v->data + ( ic*nbv1 + iv2*nbv2 + iv3*nbv3)),
  10943. S16);
  10944. }
  10945. } else {
  10946. for (int64_t ic = 0; ic < nev1; ic += GGML_VEC_DOT_UNROLL) {
  10947. // dst indices
  10948. const int i1 = iq1;
  10949. const int i2 = iq2;
  10950. const int i3 = iq3;
  10951. // v indices
  10952. const int iv2 = iq2 % nev2;
  10953. const int iv3 = iq3;
  10954. ggml_vec_dot_f16_unroll(nev0, nbv1,
  10955. (float *) ((char *) dst->data + (ic*nb0 + i1*nb1 + i2*nb2 + i3*nb3)),
  10956. ((char *) v->data + ( ic*nbv1 + iv2*nbv2 + iv3*nbv3)),
  10957. S16);
  10958. }
  10959. }
  10960. }
  10961. }
  10962. static void ggml_compute_forward_flash_attn(
  10963. const struct ggml_compute_params * params,
  10964. const struct ggml_tensor * q,
  10965. const struct ggml_tensor * k,
  10966. const struct ggml_tensor * v,
  10967. const bool masked,
  10968. struct ggml_tensor * dst) {
  10969. switch (q->type) {
  10970. case GGML_TYPE_F16:
  10971. {
  10972. ggml_compute_forward_flash_attn_f16(params, q, k, v, masked, dst);
  10973. } break;
  10974. case GGML_TYPE_F32:
  10975. {
  10976. ggml_compute_forward_flash_attn_f32(params, q, k, v, masked, dst);
  10977. } break;
  10978. default:
  10979. {
  10980. GGML_ASSERT(false);
  10981. } break;
  10982. }
  10983. }
  10984. // ggml_compute_forward_flash_ff
  10985. static void ggml_compute_forward_flash_ff_f16(
  10986. const struct ggml_compute_params * params,
  10987. const struct ggml_tensor * a, // F16
  10988. const struct ggml_tensor * b0, // F16 fc_w
  10989. const struct ggml_tensor * b1, // F32 fc_b
  10990. const struct ggml_tensor * c0, // F16 proj_w
  10991. const struct ggml_tensor * c1, // F32 proj_b
  10992. struct ggml_tensor * dst) {
  10993. int64_t t0 = ggml_perf_time_us();
  10994. UNUSED(t0);
  10995. GGML_TENSOR_LOCALS(int64_t, nea, a, ne)
  10996. GGML_TENSOR_LOCALS(size_t, nba, a, nb)
  10997. GGML_TENSOR_LOCALS(int64_t, neb0, b0, ne)
  10998. GGML_TENSOR_LOCALS(size_t, nbb0, b0, nb)
  10999. GGML_TENSOR_LOCALS(int64_t, neb1, b1, ne)
  11000. GGML_TENSOR_LOCALS(size_t, nbb1, b1, nb)
  11001. GGML_TENSOR_LOCALS(int64_t, nec0, c0, ne)
  11002. GGML_TENSOR_LOCALS(size_t, nbc0, c0, nb)
  11003. GGML_TENSOR_LOCALS(int64_t, nec1, c1, ne)
  11004. GGML_TENSOR_LOCALS(size_t, nbc1, c1, nb)
  11005. GGML_TENSOR_LOCALS(int64_t, ne, dst, ne)
  11006. GGML_TENSOR_LOCALS(size_t, nb, dst, nb)
  11007. const int ith = params->ith;
  11008. const int nth = params->nth;
  11009. const int64_t D = nea0;
  11010. //const int64_t N = nea1;
  11011. const int64_t M = neb01;
  11012. GGML_ASSERT(ne0 == nea0);
  11013. GGML_ASSERT(ne1 == nea1);
  11014. GGML_ASSERT(ne2 == nea2);
  11015. GGML_ASSERT(nba0 == sizeof(ggml_fp16_t));
  11016. GGML_ASSERT(nbb00 == sizeof(ggml_fp16_t));
  11017. GGML_ASSERT(nbb10 == sizeof(float));
  11018. GGML_ASSERT(nbc00 == sizeof(ggml_fp16_t));
  11019. GGML_ASSERT(nbc10 == sizeof(float));
  11020. GGML_ASSERT(neb00 == D);
  11021. GGML_ASSERT(neb01 == M);
  11022. GGML_ASSERT(neb10 == M);
  11023. GGML_ASSERT(neb11 == 1);
  11024. GGML_ASSERT(nec00 == M);
  11025. GGML_ASSERT(nec01 == D);
  11026. GGML_ASSERT(nec10 == D);
  11027. GGML_ASSERT(nec11 == 1);
  11028. // dst cannot be transposed or permuted
  11029. GGML_ASSERT(nb0 == sizeof(float));
  11030. GGML_ASSERT(nb0 <= nb1);
  11031. GGML_ASSERT(nb1 <= nb2);
  11032. GGML_ASSERT(nb2 <= nb3);
  11033. if (params->type == GGML_TASK_INIT) {
  11034. return;
  11035. }
  11036. if (params->type == GGML_TASK_FINALIZE) {
  11037. return;
  11038. }
  11039. // parallelize by a rows using ggml_vec_dot_f32
  11040. // total rows in a
  11041. const int nr = nea1*nea2*nea3;
  11042. // rows per thread
  11043. const int dr = (nr + nth - 1)/nth;
  11044. // row range for this thread
  11045. const int ir0 = dr*ith;
  11046. const int ir1 = MIN(ir0 + dr, nr);
  11047. for (int ir = ir0; ir < ir1; ++ir) {
  11048. // a indices
  11049. const int ia3 = ir/(nea2*nea1);
  11050. const int ia2 = (ir - ia3*nea2*nea1)/nea1;
  11051. const int ia1 = (ir - ia3*nea2*nea1 - ia2*nea1);
  11052. float * S = (float *) params->wdata + ith*(2*M + CACHE_LINE_SIZE_F32);
  11053. for (int64_t ic = 0; ic < neb01; ++ic) {
  11054. // b0 indices
  11055. const int ib03 = ia3;
  11056. const int ib02 = ia2;
  11057. const int ib01 = ic;
  11058. // S indices
  11059. const int i1 = ib01;
  11060. ggml_vec_dot_f16(nea0,
  11061. S + i1,
  11062. (ggml_fp16_t *) ((char *) b0->data + (ib01*nbb01 + ib02*nbb02 + ib03*nbb03)),
  11063. (ggml_fp16_t *) ((char *) a->data + ( ia1*nba1 + ia2*nba2 + ia3*nba3)));
  11064. }
  11065. ggml_vec_add_f32(neb01, S, S, (float *) b1->data);
  11066. //ggml_vec_gelu_f32(neb01, S, S);
  11067. ggml_fp16_t * S16 = (ggml_fp16_t *) ((float *) params->wdata + ith*(2*M + CACHE_LINE_SIZE_F32) + M);
  11068. for (int64_t i = 0; i < M; i++) {
  11069. S16[i] = GGML_FP32_TO_FP16(S[i]);
  11070. }
  11071. ggml_vec_gelu_f16(neb01, S16, S16);
  11072. {
  11073. // dst indices
  11074. const int i1 = ia1;
  11075. const int i2 = ia2;
  11076. const int i3 = ia3;
  11077. for (int64_t ic = 0; ic < nec01; ++ic) {
  11078. ggml_vec_dot_f16(neb01,
  11079. (float *) ((char *) dst->data + (ic*nb0 + i1*nb1 + i2*nb2 + i3*nb3)),
  11080. (ggml_fp16_t *) ((char *) c0->data + ( ic*nbc01 + i2*nbc02 + i3*nbc03)),
  11081. S16);
  11082. }
  11083. ggml_vec_add_f32(nec01,
  11084. (float *) ((char *) dst->data + (i1*nb1 + i2*nb2 + i3*nb3)),
  11085. (float *) ((char *) dst->data + (i1*nb1 + i2*nb2 + i3*nb3)),
  11086. (float *) c1->data);
  11087. }
  11088. }
  11089. }
  11090. static void ggml_compute_forward_flash_ff(
  11091. const struct ggml_compute_params * params,
  11092. const struct ggml_tensor * a,
  11093. const struct ggml_tensor * b0,
  11094. const struct ggml_tensor * b1,
  11095. const struct ggml_tensor * c0,
  11096. const struct ggml_tensor * c1,
  11097. struct ggml_tensor * dst) {
  11098. switch (b0->type) {
  11099. case GGML_TYPE_F16:
  11100. {
  11101. ggml_compute_forward_flash_ff_f16(params, a, b0, b1, c0, c1, dst);
  11102. } break;
  11103. case GGML_TYPE_F32:
  11104. {
  11105. GGML_ASSERT(false); // TODO
  11106. } break;
  11107. default:
  11108. {
  11109. GGML_ASSERT(false);
  11110. } break;
  11111. }
  11112. }
  11113. // ggml_compute_forward_flash_attn_back
  11114. static void ggml_compute_forward_flash_attn_back_f32(
  11115. const struct ggml_compute_params * params,
  11116. const struct ggml_tensor * q,
  11117. const struct ggml_tensor * k,
  11118. const struct ggml_tensor * v,
  11119. const struct ggml_tensor * d,
  11120. const bool masked,
  11121. struct ggml_tensor * dst) {
  11122. int64_t t0 = ggml_perf_time_us();
  11123. UNUSED(t0);
  11124. GGML_TENSOR_LOCALS(int64_t, neq, q, ne)
  11125. GGML_TENSOR_LOCALS(size_t, nbq, q, nb)
  11126. GGML_TENSOR_LOCALS(int64_t, nek, k, ne)
  11127. GGML_TENSOR_LOCALS(size_t, nbk, k, nb)
  11128. GGML_TENSOR_LOCALS(int64_t, nev, v, ne)
  11129. GGML_TENSOR_LOCALS(size_t, nbv, v, nb)
  11130. GGML_TENSOR_LOCALS(int64_t, ned, d, ne)
  11131. GGML_TENSOR_LOCALS(size_t, nbd, d, nb)
  11132. GGML_TENSOR_LOCALS(int64_t, ne, dst, ne)
  11133. GGML_TENSOR_LOCALS(size_t, nb, dst, nb)
  11134. const int ith = params->ith;
  11135. const int nth = params->nth;
  11136. const int64_t D = neq0;
  11137. const int64_t N = neq1;
  11138. const int64_t P = nek1 - N;
  11139. const int64_t M = P + N;
  11140. const int Mup = ggml_up(M, GGML_SOFT_MAX_UNROLL);
  11141. const int mxDM = MAX(D, Mup);
  11142. // GGML_ASSERT(ne0 == D);
  11143. // GGML_ASSERT(ne1 == N);
  11144. GGML_ASSERT(P >= 0);
  11145. GGML_ASSERT(nbq0 == sizeof(float));
  11146. GGML_ASSERT(nbk0 == sizeof(float));
  11147. GGML_ASSERT(nbv0 == sizeof(float));
  11148. GGML_ASSERT(neq0 == D);
  11149. GGML_ASSERT(nek0 == D);
  11150. GGML_ASSERT(nev1 == D);
  11151. GGML_ASSERT(ned0 == D);
  11152. GGML_ASSERT(neq1 == N);
  11153. GGML_ASSERT(nek1 == N + P);
  11154. GGML_ASSERT(nev1 == D);
  11155. GGML_ASSERT(ned1 == N);
  11156. // dst cannot be transposed or permuted
  11157. GGML_ASSERT(nb0 == sizeof(float));
  11158. GGML_ASSERT(nb0 <= nb1);
  11159. GGML_ASSERT(nb1 <= nb2);
  11160. GGML_ASSERT(nb2 <= nb3);
  11161. if (params->type == GGML_TASK_INIT) {
  11162. if (ith == 0) {
  11163. memset(dst->data, 0, nb0*ne0*ne1*ne2*ne3);
  11164. }
  11165. return;
  11166. }
  11167. if (params->type == GGML_TASK_FINALIZE) {
  11168. return;
  11169. }
  11170. const int64_t elem_q = ggml_nelements(q);
  11171. const int64_t elem_k = ggml_nelements(k);
  11172. enum ggml_type result_type = dst->type;
  11173. GGML_ASSERT(ggml_blck_size(result_type) == 1);
  11174. const size_t tsize = ggml_type_size(result_type);
  11175. const size_t offs_q = 0;
  11176. const size_t offs_k = offs_q + GGML_PAD(elem_q * tsize, GGML_MEM_ALIGN);
  11177. const size_t offs_v = offs_k + GGML_PAD(elem_k * tsize, GGML_MEM_ALIGN);
  11178. void * grad_q = (char *) dst->data;
  11179. void * grad_k = (char *) dst->data + offs_k;
  11180. void * grad_v = (char *) dst->data + offs_v;
  11181. const size_t nbgq1 = nb0*neq0;
  11182. const size_t nbgq2 = nb0*neq0*neq1;
  11183. const size_t nbgq3 = nb0*neq0*neq1*neq2;
  11184. const size_t nbgk1 = nb0*nek0;
  11185. const size_t nbgk2 = nb0*nek0*nek1;
  11186. const size_t nbgk3 = nb0*nek0*nek1*neq2;
  11187. const size_t nbgv1 = nb0*nev0;
  11188. const size_t nbgv2 = nb0*nev0*nev1;
  11189. const size_t nbgv3 = nb0*nev0*nev1*neq2;
  11190. // parallelize by k rows using ggml_vec_dot_f32
  11191. // total rows in k
  11192. const int nr = nek2*nek3;
  11193. // rows per thread
  11194. const int dr = (nr + nth - 1)/nth;
  11195. // row range for this thread
  11196. const int ir0 = dr*ith;
  11197. const int ir1 = MIN(ir0 + dr, nr);
  11198. const float scale = 1.0f/sqrtf(D);
  11199. //printf("P=%d N=%d D=%d ir0=%d ir1=%d scale = %f\n", P, N, D, ir0, ir1, scale);
  11200. // how often k2 (and v2) is repeated in q2
  11201. int nrep = neq2/nek2;
  11202. for (int ir = ir0; ir < ir1; ++ir) {
  11203. // q indices
  11204. const int ik3 = ir/(nek2);
  11205. const int ik2 = ir - ik3*nek2;
  11206. const int iq3 = ik3;
  11207. const int id3 = ik3;
  11208. const int iv3 = ik3;
  11209. const int iv2 = ik2;
  11210. for (int irep = 0; irep < nrep; ++irep) {
  11211. const int iq2 = ik2 + irep*nek2;
  11212. const int id2 = iq2;
  11213. // (ik2 + irep*nek2) % nek2 == ik2
  11214. for (int iq1 = 0; iq1 < neq1; ++iq1) {
  11215. const int id1 = iq1;
  11216. // not sure about CACHE_LINE_SIZE_F32..
  11217. // - maybe it must not be multiplied by 2 and excluded from .. in SM 1*(..) offset?
  11218. float * S = (float *) params->wdata + ith*2*(mxDM + CACHE_LINE_SIZE_F32) + 0*(mxDM+CACHE_LINE_SIZE_F32);
  11219. float * SM = (float *) params->wdata + ith*2*(mxDM + CACHE_LINE_SIZE_F32) + 1*(mxDM+CACHE_LINE_SIZE_F32);
  11220. for (int i = M; i < Mup; ++i) {
  11221. S[i] = -INFINITY;
  11222. }
  11223. const int64_t masked_begin = masked ? (P + iq1 + 1) : M;
  11224. for (int64_t ic = 0; ic < masked_begin; ++ic) {
  11225. // k indices
  11226. const int ik1 = ic;
  11227. // S indices
  11228. const int i1 = ik1;
  11229. ggml_vec_dot_f32(neq0,
  11230. S + i1,
  11231. (float *) ((char *) k->data + (ik1*nbk1 + ik2*nbk2 + ik3*nbk3)),
  11232. (float *) ((char *) q->data + (iq1*nbq1 + iq2*nbq2 + iq3*nbq3)));
  11233. }
  11234. // scale
  11235. ggml_vec_scale_f32(masked_begin, S, scale);
  11236. for (int64_t i = masked_begin; i < M; i++) {
  11237. S[i] = -INFINITY;
  11238. }
  11239. // softmax
  11240. // exclude known -INF S[..] values from max and loop
  11241. // dont forget to set their SM values to zero
  11242. {
  11243. float max = -INFINITY;
  11244. ggml_vec_max_f32(masked_begin, &max, S);
  11245. ggml_float sum = 0.0;
  11246. {
  11247. #ifdef GGML_SOFT_MAX_ACCELERATE
  11248. max = -max;
  11249. vDSP_vsadd(SM, 1, &max, SM, 1, Mup);
  11250. vvexpf(SM, SM, &Mup);
  11251. ggml_vec_sum_f32(Mup, &sum, SM);
  11252. #else
  11253. uint16_t scvt[GGML_SOFT_MAX_UNROLL]; UNUSED(scvt);
  11254. ggml_float sump[GGML_SOFT_MAX_UNROLL] = { 0.0 };
  11255. for (int i = 0; i < Mup; i += GGML_SOFT_MAX_UNROLL) {
  11256. if (i >= masked_begin) {
  11257. break;
  11258. }
  11259. float * SR = S + i;
  11260. float * SW = SM + i;
  11261. for (int j = 0; j < GGML_SOFT_MAX_UNROLL; ++j) {
  11262. if (i + j >= masked_begin) {
  11263. break;
  11264. } else if (SR[j] == -INFINITY) {
  11265. SW[j] = 0.0f;
  11266. } else {
  11267. #ifndef GGML_FLASH_ATTN_EXP_FP16
  11268. const float val = expf(SR[j] - max);
  11269. #else
  11270. ggml_fp16_t s = GGML_FP32_TO_FP16(SR[j] - max);
  11271. memcpy(&scvt[j], &s, sizeof(uint16_t));
  11272. const float val = GGML_FP16_TO_FP32(ggml_table_exp_f16[scvt[j]]);
  11273. #endif
  11274. sump[j] += (ggml_float)val;
  11275. SW[j] = val;
  11276. }
  11277. }
  11278. }
  11279. for (int i = 0; i < GGML_SOFT_MAX_UNROLL; i++) {
  11280. sum += sump[i];
  11281. }
  11282. #endif
  11283. }
  11284. assert(sum > 0.0);
  11285. sum = 1.0/sum;
  11286. ggml_vec_scale_f32(masked_begin, SM, sum);
  11287. }
  11288. // step-by-step explanation
  11289. {
  11290. // forward-process shape grads from backward process
  11291. // parallel_for ik2,ik3:
  11292. // for irep:
  11293. // iq2 = ik2 + irep*nek2
  11294. // k[:D,:M,:,:] [D,M,:,:] grad[k][:D,:M,ik2,ik3] += grad[kcur]
  11295. // q[:D,:N,:,:] [D,N,:,:] grad[q][:D,iq1,iq2,iq3] += grad[qcur]
  11296. // v[:M,:D,:,:] [M,D,:,:] grad[v][:M,:D,iv2,iv3] += grad[vcur]
  11297. // for iq1:
  11298. // kcur = k[:D,:M,ik2,ik3] [D,M,1,1] grad[kcur] = grad[S1].T @ qcur
  11299. // qcur = q[:D,iq1,iq2,iq3] [D,1,1,1] grad[qcur] = grad[S1] @ kcur
  11300. // vcur = v[:M,:D,iv2,iv3] [M,D,1,1] grad[vcur] = grad[S5].T @ S4
  11301. // S0 = -Inf [D,1,1,1]
  11302. // ~S1[i] = dot(kcur[:D,i], qcur)
  11303. // S1 = qcur @ kcur.T [M,1,1,1] grad[S1] = grad[S2] * scale
  11304. // S2 = S1 * scale [M,1,1,1] grad[S2] = diag_mask_zero(grad[S3], P)
  11305. // S3 = diag_mask_inf(S2, P) [M,1,1,1] grad[S3] = S4 * (grad[S4] - dot(S4, grad[S4]))
  11306. // S4 = softmax(S3) [M,1,1,1] grad[S4] = grad[S5] @ vcur
  11307. // ~S5[i] = dot(vcur[:,i], S4)
  11308. // S5 = S4 @ vcur.T [D,1,1,1] grad[S5] = d[:D,id1,id2,id3]
  11309. // ~dst[i,iq1,iq2,iq3] = S5[i] ^
  11310. // dst[:D,iq1,iq2,iq3] = S5 | grad[dst[:D,iq1,iq2,iq3]] = d[:D,id1,id2,id3]
  11311. // dst backward-/ grad[dst] = d
  11312. //
  11313. // output gradients with their dependencies:
  11314. //
  11315. // grad[kcur] = grad[S1].T @ qcur
  11316. // grad[S1] = diag_mask_zero(grad[S3], P) * scale
  11317. // grad[S3] = S4 * (grad[S4] - dot(S4, grad[S4]))
  11318. // grad[S4] = grad[S5] @ vcur
  11319. // grad[S4] = d[:D,id1,id2,id3] @ vcur
  11320. // grad[qcur] = grad[S1] @ kcur
  11321. // grad[vcur] = grad[S5].T @ S4
  11322. // grad[vcur] = d[:D,id1,id2,id3].T @ S4
  11323. //
  11324. // in post-order:
  11325. //
  11326. // S1 = qcur @ kcur.T
  11327. // S2 = S1 * scale
  11328. // S3 = diag_mask_inf(S2, P)
  11329. // S4 = softmax(S3)
  11330. // grad[S4] = d[:D,id1,id2,id3] @ vcur
  11331. // grad[S3] = S4 * (grad[S4] - dot(S4, grad[S4]))
  11332. // grad[S1] = diag_mask_zero(grad[S3], P) * scale
  11333. // grad[qcur] = grad[S1] @ kcur
  11334. // grad[kcur] = grad[S1].T @ qcur
  11335. // grad[vcur] = d[:D,id1,id2,id3].T @ S4
  11336. //
  11337. // using less variables (SM=S4):
  11338. //
  11339. // S = diag_mask_inf(qcur @ kcur.T * scale, P)
  11340. // SM = softmax(S)
  11341. // S = d[:D,iq1,iq2,iq3] @ vcur
  11342. // dot_SM_gradSM = dot(SM, S)
  11343. // S = SM * (S - dot(SM, S))
  11344. // S = diag_mask_zero(S, P) * scale
  11345. //
  11346. // grad[q][:D,iq1,iq2,iq3] += S @ kcur
  11347. // grad[k][:D,:M,ik2,ik3] += S.T @ qcur
  11348. // grad[v][:M,:D,iv2,iv3] += d[:D,id1,id2,id3].T @ SM
  11349. }
  11350. // S = gradSM = d[:D,id1,id2,id3] @ vcur[:,:,iv2,iv3]
  11351. // S = d[:D,id1,id2,id3] @ vcur[:,:,iv2,iv3]
  11352. // for ic:
  11353. // S[:M] += vcur[:M,ic,iv2,iv3] * d[ic,id1,id2,id3]
  11354. // exclude known future zero S[..] values from operation
  11355. ggml_vec_set_f32(masked_begin, S, 0);
  11356. for (int64_t ic = 0; ic < D; ++ic) {
  11357. ggml_vec_mad_f32(masked_begin,
  11358. S,
  11359. (float *) ((char *) v->data + ( ic*nbv1 + iv2*nbv2 + iv3*nbv3)),
  11360. *(float *) ((char *) d->data + (ic*nbd0 + id1*nbd1 + id2*nbd2 + id3*nbd3)));
  11361. }
  11362. // S = SM * (S - dot(SM, S))
  11363. float dot_SM_gradSM = 0;
  11364. ggml_vec_dot_f32 (masked_begin, &dot_SM_gradSM, SM, S);
  11365. ggml_vec_acc1_f32(M, S, -dot_SM_gradSM);
  11366. ggml_vec_mul_f32 (masked_begin, S, S, SM);
  11367. // S = diag_mask_zero(S, P) * scale
  11368. // already done by above ggml_vec_set_f32
  11369. // exclude known zero S[..] values from operation
  11370. ggml_vec_scale_f32(masked_begin, S, scale);
  11371. // S shape [M,1]
  11372. // SM shape [M,1]
  11373. // kcur shape [D,M]
  11374. // qcur shape [D,1]
  11375. // vcur shape [M,D]
  11376. // grad[q][:D,iq1,iq2,iq3] += S @ kcur
  11377. // grad[q][:D,iq1,iq2,iq3] += shape[M,1] @ shape[D,M]
  11378. // for ic:
  11379. // grad[q][:D,iq1,iq2,iq3] += S[ic] * kcur[:D,ic,ik2,ik3]
  11380. // exclude known zero S[..] values from loop
  11381. for (int64_t ic = 0; ic < masked_begin; ++ic) {
  11382. ggml_vec_mad_f32(D,
  11383. (float *) ((char *) grad_q + (iq1*nbgq1 + iq2*nbgq2 + iq3*nbgq3)),
  11384. (float *) ((char *) k->data + (ic*nbk1 + ik2*nbk2 + ik3*nbk3)),
  11385. S[ic]);
  11386. }
  11387. // grad[k][:D,:M,iq2,iq3] += S.T @ qcur
  11388. // for ic:
  11389. // grad[k][:D,ic,iq2,iq3] += S.T[0,ic] * qcur[:D,0]
  11390. // grad[k][:D,ic,iq2,iq3] += S[ic] * qcur[:D,0]
  11391. // exclude known zero S[..] values from loop
  11392. for (int64_t ic = 0; ic < masked_begin; ++ic) {
  11393. ggml_vec_mad_f32(D,
  11394. (float *) ((char *) grad_k + (ic*nbgk1 + ik2*nbgk2 + ik3*nbgk3)),
  11395. (float *) ((char *) q->data + (iq1*nbq1 + iq2*nbq2 + iq3*nbq3)),
  11396. S[ic]);
  11397. }
  11398. // grad[v][:M,:D,iv2,iv3] += d[:D,id1,id2,id3].T @ SM
  11399. // for ic:
  11400. // grad[v][:M,ic,iv2,iv3] += d[:D,id1,id2,id3].T[0,ic] * SM[:M]
  11401. // grad[v][:M,ic,iv2,iv3] += d[ic,id1,id2,id3] * SM[:M]
  11402. // exclude known zero SM[..] values from mad
  11403. for (int64_t ic = 0; ic < D; ++ic) {
  11404. ggml_vec_mad_f32(masked_begin,
  11405. (float *) ((char *) grad_v + ( ic*nbgv1 + iv2*nbgv2 + iv3*nbgv3)),
  11406. SM,
  11407. *(float *) ((char *) d->data + (ic*nbd0 + id1*nbd1 + id2*nbd2 + id3*nbd3)));
  11408. }
  11409. }
  11410. }
  11411. }
  11412. }
  11413. static void ggml_compute_forward_flash_attn_back(
  11414. const struct ggml_compute_params * params,
  11415. const struct ggml_tensor * q,
  11416. const struct ggml_tensor * k,
  11417. const struct ggml_tensor * v,
  11418. const struct ggml_tensor * d,
  11419. const bool masked,
  11420. struct ggml_tensor * dst) {
  11421. switch (q->type) {
  11422. case GGML_TYPE_F32:
  11423. {
  11424. ggml_compute_forward_flash_attn_back_f32(params, q, k, v, d, masked, dst);
  11425. } break;
  11426. default:
  11427. {
  11428. GGML_ASSERT(false);
  11429. } break;
  11430. }
  11431. }
  11432. // ggml_compute_forward_win_part
  11433. static void ggml_compute_forward_win_part_f32(
  11434. const struct ggml_compute_params * params,
  11435. const struct ggml_tensor * src0,
  11436. struct ggml_tensor * dst) {
  11437. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  11438. return;
  11439. }
  11440. GGML_TENSOR_LOCALS(int64_t, ne0, src0, ne)
  11441. GGML_TENSOR_LOCALS(int64_t, ne, dst, ne)
  11442. const int32_t nep0 = ((const int32_t *)(dst->op_params))[0];
  11443. const int32_t nep1 = ((const int32_t *)(dst->op_params))[1];
  11444. const int32_t w = ((const int32_t *)(dst->op_params))[2];
  11445. assert(ne00 == ne0);
  11446. assert(ne3 == nep0*nep1);
  11447. // TODO: optimize / multi-thread
  11448. for (int py = 0; py < nep1; ++py) {
  11449. for (int px = 0; px < nep0; ++px) {
  11450. const int64_t i3 = py*nep0 + px;
  11451. for (int64_t i2 = 0; i2 < ne2; ++i2) {
  11452. for (int64_t i1 = 0; i1 < ne1; ++i1) {
  11453. for (int64_t i0 = 0; i0 < ne0; ++i0) {
  11454. const int64_t i02 = py*w + i2;
  11455. const int64_t i01 = px*w + i1;
  11456. const int64_t i00 = i0;
  11457. const int64_t i = i3*ne2*ne1*ne0 + i2*ne1*ne0 + i1*ne0 + i0;
  11458. const int64_t j = i02*ne01*ne00 + i01*ne00 + i00;
  11459. if (py*w + i2 >= ne02 || px*w + i1 >= ne01) {
  11460. ((float *) dst->data)[i] = 0.0f;
  11461. } else {
  11462. ((float *) dst->data)[i] = ((float *) src0->data)[j];
  11463. }
  11464. }
  11465. }
  11466. }
  11467. }
  11468. }
  11469. }
  11470. static void ggml_compute_forward_win_part(
  11471. const struct ggml_compute_params * params,
  11472. const struct ggml_tensor * src0,
  11473. struct ggml_tensor * dst) {
  11474. switch (src0->type) {
  11475. case GGML_TYPE_F32:
  11476. {
  11477. ggml_compute_forward_win_part_f32(params, src0, dst);
  11478. } break;
  11479. default:
  11480. {
  11481. GGML_ASSERT(false);
  11482. } break;
  11483. }
  11484. }
  11485. // ggml_compute_forward_win_unpart
  11486. static void ggml_compute_forward_win_unpart_f32(
  11487. const struct ggml_compute_params * params,
  11488. const struct ggml_tensor * src0,
  11489. struct ggml_tensor * dst) {
  11490. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  11491. return;
  11492. }
  11493. GGML_TENSOR_LOCALS(int64_t, ne0, src0, ne)
  11494. GGML_TENSOR_LOCALS(int64_t, ne, dst, ne)
  11495. const int32_t w = ((const int32_t *)(dst->op_params))[0];
  11496. // padding
  11497. const int px = (w - ne1%w)%w;
  11498. //const int py = (w - ne2%w)%w;
  11499. const int npx = (px + ne1)/w;
  11500. //const int npy = (py + ne2)/w;
  11501. assert(ne0 == ne00);
  11502. // TODO: optimize / multi-thread
  11503. for (int64_t i2 = 0; i2 < ne2; ++i2) {
  11504. for (int64_t i1 = 0; i1 < ne1; ++i1) {
  11505. for (int64_t i0 = 0; i0 < ne0; ++i0) {
  11506. const int ip2 = i2/w;
  11507. const int ip1 = i1/w;
  11508. const int64_t i02 = i2%w;
  11509. const int64_t i01 = i1%w;
  11510. const int64_t i00 = i0;
  11511. const int64_t i = (ip2*npx + ip1)*ne02*ne01*ne00 + i02*ne01*ne00 + i01*ne00 + i00;
  11512. const int64_t j = i2*ne1*ne0 + i1*ne0 + i0;
  11513. ((float *) dst->data)[j] = ((float *) src0->data)[i];
  11514. }
  11515. }
  11516. }
  11517. }
  11518. static void ggml_compute_forward_win_unpart(
  11519. const struct ggml_compute_params * params,
  11520. const struct ggml_tensor * src0,
  11521. struct ggml_tensor * dst) {
  11522. switch (src0->type) {
  11523. case GGML_TYPE_F32:
  11524. {
  11525. ggml_compute_forward_win_unpart_f32(params, src0, dst);
  11526. } break;
  11527. default:
  11528. {
  11529. GGML_ASSERT(false);
  11530. } break;
  11531. }
  11532. }
  11533. //gmml_compute_forward_unary
  11534. static void ggml_compute_forward_unary(
  11535. const struct ggml_compute_params * params,
  11536. const struct ggml_tensor * src0,
  11537. struct ggml_tensor * dst) {
  11538. const enum ggml_unary_op op = ggml_get_unary_op(dst);
  11539. switch (op) {
  11540. case GGML_UNARY_OP_ABS:
  11541. {
  11542. ggml_compute_forward_abs(params, src0, dst);
  11543. } break;
  11544. case GGML_UNARY_OP_SGN:
  11545. {
  11546. ggml_compute_forward_sgn(params, src0, dst);
  11547. } break;
  11548. case GGML_UNARY_OP_NEG:
  11549. {
  11550. ggml_compute_forward_neg(params, src0, dst);
  11551. } break;
  11552. case GGML_UNARY_OP_STEP:
  11553. {
  11554. ggml_compute_forward_step(params, src0, dst);
  11555. } break;
  11556. case GGML_UNARY_OP_TANH:
  11557. {
  11558. ggml_compute_forward_tanh(params, src0, dst);
  11559. } break;
  11560. case GGML_UNARY_OP_ELU:
  11561. {
  11562. ggml_compute_forward_elu(params, src0, dst);
  11563. } break;
  11564. case GGML_UNARY_OP_RELU:
  11565. {
  11566. ggml_compute_forward_relu(params, src0, dst);
  11567. } break;
  11568. case GGML_UNARY_OP_GELU:
  11569. {
  11570. ggml_compute_forward_gelu(params, src0, dst);
  11571. } break;
  11572. case GGML_UNARY_OP_GELU_QUICK:
  11573. {
  11574. ggml_compute_forward_gelu_quick(params, src0, dst);
  11575. } break;
  11576. case GGML_UNARY_OP_SILU:
  11577. {
  11578. ggml_compute_forward_silu(params, src0, dst);
  11579. } break;
  11580. case GGML_UNARY_OP_HARDSWISH:
  11581. {
  11582. ggml_compute_forward_hardswish(params, src0, dst);
  11583. } break;
  11584. case GGML_UNARY_OP_HARDSIGMOID:
  11585. {
  11586. ggml_compute_forward_hardsigmoid(params, src0, dst);
  11587. } break;
  11588. default:
  11589. {
  11590. GGML_ASSERT(false);
  11591. } break;
  11592. }
  11593. }
  11594. // ggml_compute_forward_get_rel_pos
  11595. static void ggml_compute_forward_get_rel_pos_f16(
  11596. const struct ggml_compute_params * params,
  11597. const struct ggml_tensor * src0,
  11598. struct ggml_tensor * dst) {
  11599. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  11600. return;
  11601. }
  11602. // ref: https://github.com/facebookresearch/segment-anything/blob/main/segment_anything/modeling/image_encoder.py#L292-L322
  11603. GGML_TENSOR_UNARY_OP_LOCALS
  11604. const int64_t w = ne1;
  11605. ggml_fp16_t * src0_data = (ggml_fp16_t *) src0->data;
  11606. ggml_fp16_t * dst_data = (ggml_fp16_t *) dst->data;
  11607. for (int64_t i2 = 0; i2 < ne2; ++i2) {
  11608. for (int64_t i1 = 0; i1 < ne1; ++i1) {
  11609. const int64_t pos = (w - i1 - 1) + i2;
  11610. for (int64_t i0 = 0; i0 < ne0; ++i0) {
  11611. dst_data[i2*ne1*ne0 + i1*ne0 + i0] = src0_data[pos*ne00 + i0];
  11612. }
  11613. }
  11614. }
  11615. }
  11616. static void ggml_compute_forward_get_rel_pos(
  11617. const struct ggml_compute_params * params,
  11618. const struct ggml_tensor * src0,
  11619. struct ggml_tensor * dst) {
  11620. switch (src0->type) {
  11621. case GGML_TYPE_F16:
  11622. {
  11623. ggml_compute_forward_get_rel_pos_f16(params, src0, dst);
  11624. } break;
  11625. default:
  11626. {
  11627. GGML_ASSERT(false);
  11628. } break;
  11629. }
  11630. }
  11631. // ggml_compute_forward_add_rel_pos
  11632. static void ggml_compute_forward_add_rel_pos_f32(
  11633. const struct ggml_compute_params * params,
  11634. const struct ggml_tensor * src0,
  11635. const struct ggml_tensor * src1,
  11636. const struct ggml_tensor * src2,
  11637. struct ggml_tensor * dst) {
  11638. const bool inplace = (bool) ((int32_t *) dst->op_params)[0];
  11639. if (!inplace && params->type == GGML_TASK_INIT) {
  11640. if (params->ith != 0) {
  11641. return;
  11642. }
  11643. memcpy((char *) dst->data, (char *) src0->data, ggml_nbytes(dst));
  11644. return;
  11645. }
  11646. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  11647. return;
  11648. }
  11649. int64_t t0 = ggml_perf_time_us();
  11650. UNUSED(t0);
  11651. // ref: https://github.com/facebookresearch/segment-anything/blob/main/segment_anything/modeling/image_encoder.py#L357-L359
  11652. float * src1_data = (float *) src1->data;
  11653. float * src2_data = (float *) src2->data;
  11654. float * dst_data = (float *) dst->data;
  11655. const int64_t ne10 = src1->ne[0];
  11656. const int64_t ne11 = src1->ne[1];
  11657. const int64_t ne12 = src1->ne[2];
  11658. const int64_t ne13 = src1->ne[3];
  11659. const int ith = params->ith;
  11660. const int nth = params->nth;
  11661. // total patches in dst
  11662. const int np = ne13;
  11663. // patches per thread
  11664. const int dp = (np + nth - 1)/nth;
  11665. // patch range for this thread
  11666. const int ip0 = dp*ith;
  11667. const int ip1 = MIN(ip0 + dp, np);
  11668. for (int64_t i13 = ip0; i13 < ip1; ++i13) {
  11669. for (int64_t i12 = 0; i12 < ne12; ++i12) {
  11670. for (int64_t i11 = 0; i11 < ne11; ++i11) {
  11671. const int64_t jp1 = i13*ne12*ne11*ne10 + i12*ne11*ne10 + i11*ne10;
  11672. for (int64_t i10 = 0; i10 < ne10; ++i10) {
  11673. const int64_t jp0 = jp1 + i10;
  11674. const float src1_e = src1_data[jp0];
  11675. const float src2_e = src2_data[jp0];
  11676. const int64_t jdh = jp0 * ne10;
  11677. const int64_t jdw = jdh - (ne10 - 1) * i10;
  11678. for (int64_t j = 0; j < ne10; ++j) {
  11679. dst_data[jdh + j ] += src2_e;
  11680. dst_data[jdw + j*ne10] += src1_e;
  11681. }
  11682. }
  11683. }
  11684. }
  11685. }
  11686. }
  11687. static void ggml_compute_forward_add_rel_pos(
  11688. const struct ggml_compute_params * params,
  11689. const struct ggml_tensor * src0,
  11690. const struct ggml_tensor * src1,
  11691. const struct ggml_tensor * src2,
  11692. struct ggml_tensor * dst) {
  11693. switch (src0->type) {
  11694. case GGML_TYPE_F32:
  11695. {
  11696. ggml_compute_forward_add_rel_pos_f32(params, src0, src1, src2, dst);
  11697. } break;
  11698. default:
  11699. {
  11700. GGML_ASSERT(false);
  11701. } break;
  11702. }
  11703. }
  11704. // ggml_compute_forward_map_unary
  11705. static void ggml_compute_forward_map_unary_f32(
  11706. const struct ggml_compute_params * params,
  11707. const struct ggml_tensor * src0,
  11708. struct ggml_tensor * dst,
  11709. const ggml_unary_op_f32_t fun) {
  11710. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  11711. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  11712. return;
  11713. }
  11714. const int n = ggml_nrows(src0);
  11715. const int nc = src0->ne[0];
  11716. assert( dst->nb[0] == sizeof(float));
  11717. assert(src0->nb[0] == sizeof(float));
  11718. for (int i = 0; i < n; i++) {
  11719. fun(nc,
  11720. (float *) ((char *) dst->data + i*( dst->nb[1])),
  11721. (float *) ((char *) src0->data + i*(src0->nb[1])));
  11722. }
  11723. }
  11724. static void ggml_compute_forward_map_unary(
  11725. const struct ggml_compute_params * params,
  11726. const struct ggml_tensor * src0,
  11727. struct ggml_tensor * dst,
  11728. const ggml_unary_op_f32_t fun) {
  11729. switch (src0->type) {
  11730. case GGML_TYPE_F32:
  11731. {
  11732. ggml_compute_forward_map_unary_f32(params, src0, dst, fun);
  11733. } break;
  11734. default:
  11735. {
  11736. GGML_ASSERT(false);
  11737. } break;
  11738. }
  11739. }
  11740. // ggml_compute_forward_map_binary
  11741. static void ggml_compute_forward_map_binary_f32(
  11742. const struct ggml_compute_params * params,
  11743. const struct ggml_tensor * src0,
  11744. const struct ggml_tensor * src1,
  11745. struct ggml_tensor * dst,
  11746. const ggml_binary_op_f32_t fun) {
  11747. assert(params->ith == 0);
  11748. assert(ggml_are_same_shape(src0, src1) && ggml_are_same_shape(src0, dst));
  11749. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  11750. return;
  11751. }
  11752. const int n = ggml_nrows(src0);
  11753. const int nc = src0->ne[0];
  11754. assert( dst->nb[0] == sizeof(float));
  11755. assert(src0->nb[0] == sizeof(float));
  11756. assert(src1->nb[0] == sizeof(float));
  11757. for (int i = 0; i < n; i++) {
  11758. fun(nc,
  11759. (float *) ((char *) dst->data + i*( dst->nb[1])),
  11760. (float *) ((char *) src0->data + i*(src0->nb[1])),
  11761. (float *) ((char *) src1->data + i*(src1->nb[1])));
  11762. }
  11763. }
  11764. static void ggml_compute_forward_map_binary(
  11765. const struct ggml_compute_params * params,
  11766. const struct ggml_tensor * src0,
  11767. const struct ggml_tensor * src1,
  11768. struct ggml_tensor * dst,
  11769. const ggml_binary_op_f32_t fun) {
  11770. switch (src0->type) {
  11771. case GGML_TYPE_F32:
  11772. {
  11773. ggml_compute_forward_map_binary_f32(params, src0, src1, dst, fun);
  11774. } break;
  11775. default:
  11776. {
  11777. GGML_ASSERT(false);
  11778. } break;
  11779. }
  11780. }
  11781. // ggml_compute_forward_map_custom1
  11782. static void ggml_compute_forward_map_custom1_f32(
  11783. const struct ggml_compute_params * params,
  11784. const struct ggml_tensor * a,
  11785. struct ggml_tensor * dst,
  11786. const ggml_custom1_op_f32_t fun) {
  11787. assert(params->ith == 0);
  11788. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  11789. return;
  11790. }
  11791. fun(dst, a);
  11792. }
  11793. // ggml_compute_forward_map_custom2
  11794. static void ggml_compute_forward_map_custom2_f32(
  11795. const struct ggml_compute_params * params,
  11796. const struct ggml_tensor * a,
  11797. const struct ggml_tensor * b,
  11798. struct ggml_tensor * dst,
  11799. const ggml_custom2_op_f32_t fun) {
  11800. assert(params->ith == 0);
  11801. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  11802. return;
  11803. }
  11804. fun(dst, a, b);
  11805. }
  11806. // ggml_compute_forward_map_custom3
  11807. static void ggml_compute_forward_map_custom3_f32(
  11808. const struct ggml_compute_params * params,
  11809. const struct ggml_tensor * a,
  11810. const struct ggml_tensor * b,
  11811. const struct ggml_tensor * c,
  11812. struct ggml_tensor * dst,
  11813. const ggml_custom3_op_f32_t fun) {
  11814. assert(params->ith == 0);
  11815. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  11816. return;
  11817. }
  11818. fun(dst, a, b, c);
  11819. }
  11820. // ggml_compute_forward_map_custom1
  11821. static void ggml_compute_forward_map_custom1(
  11822. const struct ggml_compute_params * params,
  11823. const struct ggml_tensor * a,
  11824. struct ggml_tensor * dst) {
  11825. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  11826. return;
  11827. }
  11828. struct ggml_map_custom1_op_params * p = (struct ggml_map_custom1_op_params *) dst->op_params;
  11829. p->fun(dst, a, params->ith, params->nth, p->userdata);
  11830. }
  11831. // ggml_compute_forward_map_custom2
  11832. static void ggml_compute_forward_map_custom2(
  11833. const struct ggml_compute_params * params,
  11834. const struct ggml_tensor * a,
  11835. const struct ggml_tensor * b,
  11836. struct ggml_tensor * dst) {
  11837. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  11838. return;
  11839. }
  11840. struct ggml_map_custom2_op_params * p = (struct ggml_map_custom2_op_params *) dst->op_params;
  11841. p->fun(dst, a, b, params->ith, params->nth, p->userdata);
  11842. }
  11843. // ggml_compute_forward_map_custom3
  11844. static void ggml_compute_forward_map_custom3(
  11845. const struct ggml_compute_params * params,
  11846. const struct ggml_tensor * a,
  11847. const struct ggml_tensor * b,
  11848. const struct ggml_tensor * c,
  11849. struct ggml_tensor * dst) {
  11850. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  11851. return;
  11852. }
  11853. struct ggml_map_custom3_op_params * p = (struct ggml_map_custom3_op_params *) dst->op_params;
  11854. p->fun(dst, a, b, c, params->ith, params->nth, p->userdata);
  11855. }
  11856. // ggml_compute_forward_cross_entropy_loss
  11857. static void ggml_compute_forward_cross_entropy_loss_f32(
  11858. const struct ggml_compute_params * params,
  11859. const struct ggml_tensor * src0,
  11860. const struct ggml_tensor * src1,
  11861. struct ggml_tensor * dst) {
  11862. GGML_ASSERT(ggml_is_contiguous(src0));
  11863. GGML_ASSERT(ggml_is_contiguous(src1));
  11864. GGML_ASSERT(ggml_is_scalar(dst));
  11865. GGML_ASSERT(ggml_are_same_shape(src0, src1));
  11866. const int ith = params->ith;
  11867. const int nth = params->nth;
  11868. float * sums = (float *) params->wdata;
  11869. // TODO: handle transposed/permuted matrices
  11870. const int nc = src0->ne[0];
  11871. const int nr = ggml_nrows(src0);
  11872. GGML_ASSERT(params->wsize >= sizeof(float) * (nth + nth * nc));
  11873. if (params->type == GGML_TASK_INIT) {
  11874. if (ith == 0) {
  11875. memset(sums, 0, sizeof(float) * (nth + nth * nc));
  11876. }
  11877. return;
  11878. }
  11879. if (params->type == GGML_TASK_FINALIZE) {
  11880. if (ith == 0) {
  11881. float * dp = (float *) dst->data;
  11882. ggml_vec_sum_f32(nth, dp, sums);
  11883. dp[0] *= -1.0f / (float) nr;
  11884. }
  11885. return;
  11886. }
  11887. const double eps = 1e-9;
  11888. // rows per thread
  11889. const int dr = (nr + nth - 1)/nth;
  11890. // row range for this thread
  11891. const int ir0 = dr*ith;
  11892. const int ir1 = MIN(ir0 + dr, nr);
  11893. for (int i1 = ir0; i1 < ir1; i1++) {
  11894. float * s0 = (float *)((char *) src0->data + i1*src0->nb[1]);
  11895. float * s1 = (float *)((char *) src1->data + i1*src1->nb[1]);
  11896. float * st = ((float *) params->wdata) + nth + ith*nc;
  11897. #ifndef NDEBUG
  11898. for (int i = 0; i < nc; ++i) {
  11899. //printf("p[%d] = %f\n", i, p[i]);
  11900. assert(!isnan(s0[i]));
  11901. assert(!isnan(s1[i]));
  11902. }
  11903. #endif
  11904. // soft_max
  11905. ggml_float sum = 0.0;
  11906. {
  11907. float max = -INFINITY;
  11908. ggml_vec_max_f32(nc, &max, s0);
  11909. uint16_t scvt; UNUSED(scvt);
  11910. for (int i = 0; i < nc; i++) {
  11911. if (s0[i] == -INFINITY) {
  11912. st[i] = 0.0f;
  11913. } else {
  11914. #ifndef GGML_CROSS_ENTROPY_EXP_FP16
  11915. const float s = s0[i] - max;
  11916. const float val = expf(s);
  11917. #else
  11918. ggml_fp16_t s = GGML_FP32_TO_FP16(s0[i] - max);
  11919. memcpy(&scvt, &s, sizeof(scvt));
  11920. const float val = GGML_FP16_TO_FP32(ggml_table_exp_f16[scvt]);
  11921. #endif
  11922. sum += (ggml_float)val;
  11923. st[i] = val;
  11924. }
  11925. }
  11926. assert(sum > 0.0);
  11927. // sum = 1.0/sum;
  11928. }
  11929. // avoid log(0) by rescaling from [0..1] to [eps..1]
  11930. sum = (1.0 - eps) / sum;
  11931. ggml_vec_scale_f32(nc, st, sum);
  11932. ggml_vec_add1_f32(nc, st, st, eps);
  11933. ggml_vec_log_f32(nc, st, st);
  11934. ggml_vec_mul_f32(nc, st, st, s1);
  11935. float st_sum = 0;
  11936. ggml_vec_sum_f32(nc, &st_sum, st);
  11937. sums[ith] += st_sum;
  11938. #ifndef NDEBUG
  11939. for (int i = 0; i < nc; ++i) {
  11940. assert(!isnan(st[i]));
  11941. assert(!isinf(st[i]));
  11942. }
  11943. #endif
  11944. }
  11945. }
  11946. static void ggml_compute_forward_cross_entropy_loss(
  11947. const struct ggml_compute_params * params,
  11948. const struct ggml_tensor * src0,
  11949. const struct ggml_tensor * src1,
  11950. struct ggml_tensor * dst) {
  11951. switch (src0->type) {
  11952. case GGML_TYPE_F32:
  11953. {
  11954. ggml_compute_forward_cross_entropy_loss_f32(params, src0, src1, dst);
  11955. } break;
  11956. default:
  11957. {
  11958. GGML_ASSERT(false);
  11959. } break;
  11960. }
  11961. }
  11962. // ggml_compute_forward_cross_entropy_loss_back
  11963. static void ggml_compute_forward_cross_entropy_loss_back_f32(
  11964. const struct ggml_compute_params * params,
  11965. const struct ggml_tensor * src0,
  11966. const struct ggml_tensor * src1,
  11967. const struct ggml_tensor * opt0,
  11968. struct ggml_tensor * dst) {
  11969. GGML_ASSERT(ggml_is_contiguous(dst));
  11970. GGML_ASSERT(ggml_is_contiguous(src0));
  11971. GGML_ASSERT(ggml_is_contiguous(src1));
  11972. GGML_ASSERT(ggml_is_contiguous(opt0));
  11973. GGML_ASSERT(ggml_are_same_shape(src0, src1) && ggml_are_same_shape(src0, dst));
  11974. const int64_t ith = params->ith;
  11975. const int64_t nth = params->nth;
  11976. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  11977. return;
  11978. }
  11979. const double eps = 1e-9;
  11980. // TODO: handle transposed/permuted matrices
  11981. const int64_t nc = src0->ne[0];
  11982. const int64_t nr = ggml_nrows(src0);
  11983. // rows per thread
  11984. const int64_t dr = (nr + nth - 1)/nth;
  11985. // row range for this thread
  11986. const int64_t ir0 = dr*ith;
  11987. const int64_t ir1 = MIN(ir0 + dr, nr);
  11988. float * d = (float *) opt0->data;
  11989. for (int64_t i1 = ir0; i1 < ir1; i1++) {
  11990. float * ds0 = (float *)((char *) dst->data + i1*dst->nb[1]);
  11991. float * s0 = (float *)((char *) src0->data + i1*src0->nb[1]);
  11992. float * s1 = (float *)((char *) src1->data + i1*src1->nb[1]);
  11993. #ifndef NDEBUG
  11994. for (int i = 0; i < nc; ++i) {
  11995. //printf("p[%d] = %f\n", i, p[i]);
  11996. assert(!isnan(s0[i]));
  11997. assert(!isnan(s1[i]));
  11998. }
  11999. #endif
  12000. // soft_max
  12001. ggml_float sum = 0.0;
  12002. {
  12003. float max = -INFINITY;
  12004. ggml_vec_max_f32(nc, &max, s0);
  12005. uint16_t scvt; UNUSED(scvt);
  12006. for (int i = 0; i < nc; i++) {
  12007. if (s0[i] == -INFINITY) {
  12008. ds0[i] = 0.0f;
  12009. } else {
  12010. #ifndef GGML_CROSS_ENTROPY_EXP_FP16
  12011. const float s = s0[i] - max;
  12012. const float val = expf(s);
  12013. #else
  12014. ggml_fp16_t s = GGML_FP32_TO_FP16(s0[i] - max);
  12015. memcpy(&scvt, &s, sizeof(scvt));
  12016. const float val = GGML_FP16_TO_FP32(ggml_table_exp_f16[scvt]);
  12017. #endif
  12018. sum += (ggml_float)val;
  12019. ds0[i] = val;
  12020. }
  12021. }
  12022. assert(sum > 0.0);
  12023. sum = (1.0 - eps)/sum;
  12024. }
  12025. // grad(src0) = (softmax(src0) - src1) * grad(cross_entropy_loss(src0, src1)) / nr
  12026. ggml_vec_scale_f32(nc, ds0, sum);
  12027. ggml_vec_add1_f32(nc, ds0, ds0, eps);
  12028. ggml_vec_sub_f32(nc, ds0, ds0, s1);
  12029. ggml_vec_scale_f32(nc, ds0, d[0] / (float) nr);
  12030. #ifndef NDEBUG
  12031. for (int i = 0; i < nc; ++i) {
  12032. assert(!isnan(ds0[i]));
  12033. assert(!isinf(ds0[i]));
  12034. }
  12035. #endif
  12036. }
  12037. }
  12038. static void ggml_compute_forward_cross_entropy_loss_back(
  12039. const struct ggml_compute_params * params,
  12040. const struct ggml_tensor * src0,
  12041. const struct ggml_tensor * src1,
  12042. const struct ggml_tensor * opt0,
  12043. struct ggml_tensor * dst) {
  12044. switch (src0->type) {
  12045. case GGML_TYPE_F32:
  12046. {
  12047. ggml_compute_forward_cross_entropy_loss_back_f32(params, src0, src1, opt0, dst);
  12048. } break;
  12049. default:
  12050. {
  12051. GGML_ASSERT(false);
  12052. } break;
  12053. }
  12054. }
  12055. /////////////////////////////////
  12056. static void ggml_compute_forward(struct ggml_compute_params * params, struct ggml_tensor * tensor) {
  12057. GGML_ASSERT(params);
  12058. if (tensor->op == GGML_OP_NONE) {
  12059. return;
  12060. }
  12061. #ifdef GGML_USE_CUBLAS
  12062. bool skip_cpu = ggml_cuda_compute_forward(params, tensor);
  12063. if (skip_cpu) {
  12064. return;
  12065. }
  12066. GGML_ASSERT(tensor->src[0] == NULL || tensor->src[0]->backend == GGML_BACKEND_CPU);
  12067. GGML_ASSERT(tensor->src[1] == NULL || tensor->src[1]->backend == GGML_BACKEND_CPU);
  12068. #endif // GGML_USE_CUBLAS
  12069. switch (tensor->op) {
  12070. case GGML_OP_DUP:
  12071. {
  12072. ggml_compute_forward_dup(params, tensor->src[0], tensor);
  12073. } break;
  12074. case GGML_OP_ADD:
  12075. {
  12076. ggml_compute_forward_add(params, tensor->src[0], tensor->src[1], tensor);
  12077. } break;
  12078. case GGML_OP_ADD1:
  12079. {
  12080. ggml_compute_forward_add1(params, tensor->src[0], tensor->src[1], tensor);
  12081. } break;
  12082. case GGML_OP_ACC:
  12083. {
  12084. ggml_compute_forward_acc(params, tensor->src[0], tensor->src[1], tensor);
  12085. } break;
  12086. case GGML_OP_SUB:
  12087. {
  12088. ggml_compute_forward_sub(params, tensor->src[0], tensor->src[1], tensor);
  12089. } break;
  12090. case GGML_OP_MUL:
  12091. {
  12092. ggml_compute_forward_mul(params, tensor->src[0], tensor->src[1], tensor);
  12093. } break;
  12094. case GGML_OP_DIV:
  12095. {
  12096. ggml_compute_forward_div(params, tensor->src[0], tensor->src[1], tensor);
  12097. } break;
  12098. case GGML_OP_SQR:
  12099. {
  12100. ggml_compute_forward_sqr(params, tensor->src[0], tensor);
  12101. } break;
  12102. case GGML_OP_SQRT:
  12103. {
  12104. ggml_compute_forward_sqrt(params, tensor->src[0], tensor);
  12105. } break;
  12106. case GGML_OP_LOG:
  12107. {
  12108. ggml_compute_forward_log(params, tensor->src[0], tensor);
  12109. } break;
  12110. case GGML_OP_SUM:
  12111. {
  12112. ggml_compute_forward_sum(params, tensor->src[0], tensor);
  12113. } break;
  12114. case GGML_OP_SUM_ROWS:
  12115. {
  12116. ggml_compute_forward_sum_rows(params, tensor->src[0], tensor);
  12117. } break;
  12118. case GGML_OP_MEAN:
  12119. {
  12120. ggml_compute_forward_mean(params, tensor->src[0], tensor);
  12121. } break;
  12122. case GGML_OP_ARGMAX:
  12123. {
  12124. ggml_compute_forward_argmax(params, tensor->src[0], tensor);
  12125. } break;
  12126. case GGML_OP_REPEAT:
  12127. {
  12128. ggml_compute_forward_repeat(params, tensor->src[0], tensor);
  12129. } break;
  12130. case GGML_OP_REPEAT_BACK:
  12131. {
  12132. ggml_compute_forward_repeat_back(params, tensor->src[0], tensor);
  12133. } break;
  12134. case GGML_OP_CONCAT:
  12135. {
  12136. ggml_compute_forward_concat(params, tensor->src[0], tensor->src[1], tensor);
  12137. } break;
  12138. case GGML_OP_SILU_BACK:
  12139. {
  12140. ggml_compute_forward_silu_back(params, tensor->src[0], tensor->src[1], tensor);
  12141. } break;
  12142. case GGML_OP_NORM:
  12143. {
  12144. ggml_compute_forward_norm(params, tensor->src[0], tensor);
  12145. } break;
  12146. case GGML_OP_RMS_NORM:
  12147. {
  12148. ggml_compute_forward_rms_norm(params, tensor->src[0], tensor);
  12149. } break;
  12150. case GGML_OP_RMS_NORM_BACK:
  12151. {
  12152. ggml_compute_forward_rms_norm_back(params, tensor->src[0], tensor->src[1], tensor);
  12153. } break;
  12154. case GGML_OP_GROUP_NORM:
  12155. {
  12156. ggml_compute_forward_group_norm(params, tensor->src[0], tensor);
  12157. } break;
  12158. case GGML_OP_MUL_MAT:
  12159. {
  12160. ggml_compute_forward_mul_mat(params, tensor->src[0], tensor->src[1], tensor);
  12161. } break;
  12162. case GGML_OP_MUL_MAT_ID:
  12163. {
  12164. ggml_compute_forward_mul_mat_id(params, tensor->src[0], tensor->src[1], tensor);
  12165. } break;
  12166. case GGML_OP_OUT_PROD:
  12167. {
  12168. ggml_compute_forward_out_prod(params, tensor->src[0], tensor->src[1], tensor);
  12169. } break;
  12170. case GGML_OP_SCALE:
  12171. {
  12172. ggml_compute_forward_scale(params, tensor->src[0], tensor);
  12173. } break;
  12174. case GGML_OP_SET:
  12175. {
  12176. ggml_compute_forward_set(params, tensor->src[0], tensor->src[1], tensor);
  12177. } break;
  12178. case GGML_OP_CPY:
  12179. {
  12180. ggml_compute_forward_cpy(params, tensor->src[0], tensor);
  12181. } break;
  12182. case GGML_OP_CONT:
  12183. {
  12184. ggml_compute_forward_cont(params, tensor->src[0], tensor);
  12185. } break;
  12186. case GGML_OP_RESHAPE:
  12187. {
  12188. ggml_compute_forward_reshape(params, tensor->src[0], tensor);
  12189. } break;
  12190. case GGML_OP_VIEW:
  12191. {
  12192. ggml_compute_forward_view(params, tensor->src[0]);
  12193. } break;
  12194. case GGML_OP_PERMUTE:
  12195. {
  12196. ggml_compute_forward_permute(params, tensor->src[0]);
  12197. } break;
  12198. case GGML_OP_TRANSPOSE:
  12199. {
  12200. ggml_compute_forward_transpose(params, tensor->src[0]);
  12201. } break;
  12202. case GGML_OP_GET_ROWS:
  12203. {
  12204. ggml_compute_forward_get_rows(params, tensor->src[0], tensor->src[1], tensor);
  12205. } break;
  12206. case GGML_OP_GET_ROWS_BACK:
  12207. {
  12208. ggml_compute_forward_get_rows_back(params, tensor->src[0], tensor->src[1], tensor);
  12209. } break;
  12210. case GGML_OP_DIAG:
  12211. {
  12212. ggml_compute_forward_diag(params, tensor->src[0], tensor);
  12213. } break;
  12214. case GGML_OP_DIAG_MASK_INF:
  12215. {
  12216. ggml_compute_forward_diag_mask_inf(params, tensor->src[0], tensor);
  12217. } break;
  12218. case GGML_OP_DIAG_MASK_ZERO:
  12219. {
  12220. ggml_compute_forward_diag_mask_zero(params, tensor->src[0], tensor);
  12221. } break;
  12222. case GGML_OP_SOFT_MAX:
  12223. {
  12224. ggml_compute_forward_soft_max(params, tensor->src[0], tensor->src[1], tensor);
  12225. } break;
  12226. case GGML_OP_SOFT_MAX_BACK:
  12227. {
  12228. ggml_compute_forward_soft_max_back(params, tensor->src[0], tensor->src[1], tensor);
  12229. } break;
  12230. case GGML_OP_ROPE:
  12231. {
  12232. ggml_compute_forward_rope(params, tensor->src[0], tensor->src[1], tensor);
  12233. } break;
  12234. case GGML_OP_ROPE_BACK:
  12235. {
  12236. ggml_compute_forward_rope_back(params, tensor->src[0], tensor->src[1], tensor);
  12237. } break;
  12238. case GGML_OP_ALIBI:
  12239. {
  12240. ggml_compute_forward_alibi(params, tensor->src[0], tensor);
  12241. } break;
  12242. case GGML_OP_CLAMP:
  12243. {
  12244. ggml_compute_forward_clamp(params, tensor->src[0], tensor);
  12245. } break;
  12246. case GGML_OP_CONV_TRANSPOSE_1D:
  12247. {
  12248. ggml_compute_forward_conv_transpose_1d(params, tensor->src[0], tensor->src[1], tensor);
  12249. } break;
  12250. case GGML_OP_IM2COL:
  12251. {
  12252. ggml_compute_forward_im2col(params, tensor->src[0], tensor->src[1], tensor);
  12253. } break;
  12254. case GGML_OP_CONV_TRANSPOSE_2D:
  12255. {
  12256. ggml_compute_forward_conv_transpose_2d(params, tensor->src[0], tensor->src[1], tensor);
  12257. } break;
  12258. case GGML_OP_POOL_1D:
  12259. {
  12260. ggml_compute_forward_pool_1d(params, tensor->src[0], tensor);
  12261. } break;
  12262. case GGML_OP_POOL_2D:
  12263. {
  12264. ggml_compute_forward_pool_2d(params, tensor->src[0], tensor);
  12265. } break;
  12266. case GGML_OP_UPSCALE:
  12267. {
  12268. ggml_compute_forward_upscale(params, tensor->src[0], tensor);
  12269. } break;
  12270. case GGML_OP_PAD:
  12271. {
  12272. ggml_compute_forward_pad(params, tensor->src[0], tensor);
  12273. } break;
  12274. case GGML_OP_ARGSORT:
  12275. {
  12276. ggml_compute_forward_argsort(params, tensor->src[0], tensor);
  12277. } break;
  12278. case GGML_OP_LEAKY_RELU:
  12279. {
  12280. ggml_compute_forward_leaky_relu(params, tensor->src[0], tensor);
  12281. } break;
  12282. case GGML_OP_FLASH_ATTN:
  12283. {
  12284. const int32_t t = ggml_get_op_params_i32(tensor, 0);
  12285. GGML_ASSERT(t == 0 || t == 1);
  12286. const bool masked = t != 0;
  12287. ggml_compute_forward_flash_attn(params, tensor->src[0], tensor->src[1], tensor->src[2], masked, tensor);
  12288. } break;
  12289. case GGML_OP_FLASH_FF:
  12290. {
  12291. ggml_compute_forward_flash_ff(params, tensor->src[0], tensor->src[1], tensor->src[2], tensor->src[3], tensor->src[4], tensor);
  12292. } break;
  12293. case GGML_OP_FLASH_ATTN_BACK:
  12294. {
  12295. int32_t t = ggml_get_op_params_i32(tensor, 0);
  12296. GGML_ASSERT(t == 0 || t == 1);
  12297. bool masked = t != 0;
  12298. ggml_compute_forward_flash_attn_back(params, tensor->src[0], tensor->src[1], tensor->src[2], tensor->src[3], masked, tensor);
  12299. } break;
  12300. case GGML_OP_WIN_PART:
  12301. {
  12302. ggml_compute_forward_win_part(params, tensor->src[0], tensor);
  12303. } break;
  12304. case GGML_OP_WIN_UNPART:
  12305. {
  12306. ggml_compute_forward_win_unpart(params, tensor->src[0], tensor);
  12307. } break;
  12308. case GGML_OP_UNARY:
  12309. {
  12310. ggml_compute_forward_unary(params, tensor->src[0], tensor);
  12311. } break;
  12312. case GGML_OP_GET_REL_POS:
  12313. {
  12314. ggml_compute_forward_get_rel_pos(params, tensor->src[0], tensor);
  12315. } break;
  12316. case GGML_OP_ADD_REL_POS:
  12317. {
  12318. ggml_compute_forward_add_rel_pos(params, tensor->src[0], tensor->src[1], tensor->src[2], tensor);
  12319. } break;
  12320. case GGML_OP_MAP_UNARY:
  12321. {
  12322. ggml_unary_op_f32_t fun;
  12323. memcpy(&fun, tensor->op_params, sizeof(fun));
  12324. ggml_compute_forward_map_unary(params, tensor->src[0], tensor, fun);
  12325. }
  12326. break;
  12327. case GGML_OP_MAP_BINARY:
  12328. {
  12329. ggml_binary_op_f32_t fun;
  12330. memcpy(&fun, tensor->op_params, sizeof(fun));
  12331. ggml_compute_forward_map_binary(params, tensor->src[0], tensor->src[1], tensor, fun);
  12332. }
  12333. break;
  12334. case GGML_OP_MAP_CUSTOM1_F32:
  12335. {
  12336. ggml_custom1_op_f32_t fun;
  12337. memcpy(&fun, tensor->op_params, sizeof(fun));
  12338. ggml_compute_forward_map_custom1_f32(params, tensor->src[0], tensor, fun);
  12339. }
  12340. break;
  12341. case GGML_OP_MAP_CUSTOM2_F32:
  12342. {
  12343. ggml_custom2_op_f32_t fun;
  12344. memcpy(&fun, tensor->op_params, sizeof(fun));
  12345. ggml_compute_forward_map_custom2_f32(params, tensor->src[0], tensor->src[1], tensor, fun);
  12346. }
  12347. break;
  12348. case GGML_OP_MAP_CUSTOM3_F32:
  12349. {
  12350. ggml_custom3_op_f32_t fun;
  12351. memcpy(&fun, tensor->op_params, sizeof(fun));
  12352. ggml_compute_forward_map_custom3_f32(params, tensor->src[0], tensor->src[1], tensor->src[2], tensor, fun);
  12353. }
  12354. break;
  12355. case GGML_OP_MAP_CUSTOM1:
  12356. {
  12357. ggml_compute_forward_map_custom1(params, tensor->src[0], tensor);
  12358. }
  12359. break;
  12360. case GGML_OP_MAP_CUSTOM2:
  12361. {
  12362. ggml_compute_forward_map_custom2(params, tensor->src[0], tensor->src[1], tensor);
  12363. }
  12364. break;
  12365. case GGML_OP_MAP_CUSTOM3:
  12366. {
  12367. ggml_compute_forward_map_custom3(params, tensor->src[0], tensor->src[1], tensor->src[2], tensor);
  12368. }
  12369. break;
  12370. case GGML_OP_CROSS_ENTROPY_LOSS:
  12371. {
  12372. ggml_compute_forward_cross_entropy_loss(params, tensor->src[0], tensor->src[1], tensor);
  12373. }
  12374. break;
  12375. case GGML_OP_CROSS_ENTROPY_LOSS_BACK:
  12376. {
  12377. ggml_compute_forward_cross_entropy_loss_back(params, tensor->src[0], tensor->src[1], tensor->src[2], tensor);
  12378. }
  12379. break;
  12380. case GGML_OP_NONE:
  12381. {
  12382. // nop
  12383. } break;
  12384. case GGML_OP_COUNT:
  12385. {
  12386. GGML_ASSERT(false);
  12387. } break;
  12388. }
  12389. }
  12390. ////////////////////////////////////////////////////////////////////////////////
  12391. static size_t ggml_hash_size(size_t min_sz) {
  12392. // next primes after powers of two
  12393. static const size_t primes[] = {
  12394. 2, 3, 5, 11, 17, 37, 67, 131, 257, 521, 1031,
  12395. 2053, 4099, 8209, 16411, 32771, 65537, 131101,
  12396. 262147, 524309, 1048583, 2097169, 4194319, 8388617,
  12397. 16777259, 33554467, 67108879, 134217757, 268435459,
  12398. 536870923, 1073741827, 2147483659
  12399. };
  12400. static const size_t n_primes = sizeof(primes)/sizeof(primes[0]);
  12401. // find the smallest prime that is larger or equal to min_sz
  12402. size_t l = 0;
  12403. size_t r = n_primes;
  12404. while (l < r) {
  12405. size_t m = (l + r)/2;
  12406. if (primes[m] < min_sz) {
  12407. l = m + 1;
  12408. } else {
  12409. r = m;
  12410. }
  12411. }
  12412. size_t sz = l < n_primes ? primes[l] : min_sz | 1;
  12413. return sz;
  12414. }
  12415. static size_t ggml_hash(const void * p) {
  12416. return (size_t)p;
  12417. }
  12418. size_t ggml_hash_find(const struct ggml_hash_set hash_set, struct ggml_tensor * key) {
  12419. size_t h = ggml_hash(key) % hash_set.size;
  12420. // linear probing
  12421. size_t i = h;
  12422. while (hash_set.keys[i] != NULL && hash_set.keys[i] != key) {
  12423. i = (i + 1) % hash_set.size;
  12424. if (i == h) {
  12425. // visited all hash table entries -> not found
  12426. return GGML_HASHTABLE_FULL;
  12427. }
  12428. }
  12429. return i;
  12430. }
  12431. bool ggml_hash_contains(struct ggml_hash_set hash_set, struct ggml_tensor * key) {
  12432. size_t i = ggml_hash_find(hash_set, key);
  12433. return i != GGML_HASHTABLE_FULL && hash_set.keys[i] == key;
  12434. }
  12435. size_t ggml_hash_insert(struct ggml_hash_set hash_set, struct ggml_tensor * key) {
  12436. size_t i = ggml_hash_find(hash_set, key);
  12437. GGML_ASSERT(i != GGML_HASHTABLE_FULL);
  12438. if (hash_set.keys[i] == key) {
  12439. return GGML_HASHTABLE_ALREADY_EXISTS;
  12440. }
  12441. // insert
  12442. GGML_ASSERT(hash_set.keys[i] == NULL);
  12443. hash_set.keys[i] = key;
  12444. return i;
  12445. }
  12446. size_t ggml_hash_find_or_insert(struct ggml_hash_set hash_set, struct ggml_tensor * key) {
  12447. size_t i = ggml_hash_find(hash_set, key);
  12448. GGML_ASSERT(i != GGML_HASHTABLE_FULL);
  12449. hash_set.keys[i] = key;
  12450. return i;
  12451. }
  12452. struct ggml_hash_set ggml_hash_set_new(size_t size) {
  12453. size = ggml_hash_size(size);
  12454. struct ggml_hash_set result;
  12455. result.size = size;
  12456. result.keys = malloc(sizeof(struct ggml_tensor *) * size);
  12457. memset(result.keys, 0, sizeof(struct ggml_tensor *) * size);
  12458. return result;
  12459. }
  12460. static void ggml_hash_set_free(struct ggml_hash_set hash_set) {
  12461. free(hash_set.keys);
  12462. }
  12463. struct hash_map {
  12464. struct ggml_hash_set set;
  12465. struct ggml_tensor ** vals;
  12466. };
  12467. static struct hash_map * ggml_new_hash_map(size_t size) {
  12468. struct hash_map * result = malloc(sizeof(struct hash_map));
  12469. result->set = ggml_hash_set_new(size);
  12470. result->vals = malloc(sizeof(struct ggml_tensor *) * result->set.size);
  12471. memset(result->vals, 0, sizeof(struct ggml_tensor *) * result->set.size);
  12472. return result;
  12473. }
  12474. static void ggml_hash_map_free(struct hash_map * map) {
  12475. ggml_hash_set_free(map->set);
  12476. free(map->vals);
  12477. free(map);
  12478. }
  12479. // gradient checkpointing
  12480. static struct ggml_tensor * ggml_recompute_graph_node(
  12481. struct ggml_context * ctx,
  12482. struct ggml_cgraph * graph,
  12483. struct hash_map * replacements,
  12484. struct ggml_tensor * node) {
  12485. if (node == NULL) {
  12486. return NULL;
  12487. }
  12488. if (node->is_param) {
  12489. return node;
  12490. }
  12491. if (!ggml_hash_contains(graph->visited_hash_table, node)) {
  12492. return node;
  12493. }
  12494. int count_children = 0;
  12495. for (int k = 0; k < GGML_MAX_SRC; ++k) {
  12496. if (node->src[k]) {
  12497. ++count_children;
  12498. }
  12499. }
  12500. if (count_children == 0) {
  12501. return node;
  12502. }
  12503. size_t i = ggml_hash_find(replacements->set, node);
  12504. GGML_ASSERT(i != GGML_HASHTABLE_FULL); // assert that not full
  12505. if (replacements->set.keys[i] == node) {
  12506. return replacements->vals[i];
  12507. }
  12508. struct ggml_tensor * clone = ggml_new_tensor(ctx, node->type, GGML_MAX_DIMS, node->ne);
  12509. // insert clone into replacements
  12510. GGML_ASSERT(replacements->set.keys[i] == NULL); // assert that we don't overwrite
  12511. replacements->set.keys[i] = node;
  12512. replacements->vals[i] = clone;
  12513. clone->op = node->op;
  12514. clone->grad = node->grad;
  12515. clone->is_param = node->is_param;
  12516. clone->extra = node->extra;
  12517. for (int k = 0; k < GGML_MAX_DIMS; ++k) {
  12518. clone->nb[k] = node->nb[k];
  12519. }
  12520. for (int k = 0; k < GGML_MAX_SRC; ++k) {
  12521. clone->src[k] = ggml_recompute_graph_node(ctx, graph, replacements, node->src[k]);
  12522. }
  12523. if (node->view_src != NULL) {
  12524. clone->data = (node->view_src->data == NULL)
  12525. ? NULL // view_src not yet allocated
  12526. : (char *) node->view_src->data // view_src already allocated
  12527. + node->view_offs;
  12528. clone->view_src = node->view_src;
  12529. clone->view_offs = node->view_offs;
  12530. }
  12531. GGML_ASSERT(sizeof(node->op_params) == sizeof(int32_t) * (GGML_MAX_OP_PARAMS / sizeof(int32_t)));
  12532. GGML_ASSERT(sizeof(node->name) == GGML_MAX_NAME);
  12533. memcpy(clone->op_params, node->op_params, sizeof(node->op_params));
  12534. ggml_format_name(clone, "%s (clone)", ggml_get_name(node));
  12535. return clone;
  12536. }
  12537. void ggml_build_backward_gradient_checkpointing(
  12538. struct ggml_context * ctx,
  12539. struct ggml_cgraph * gf,
  12540. struct ggml_cgraph * gb,
  12541. struct ggml_cgraph * gb_tmp,
  12542. struct ggml_tensor * * checkpoints,
  12543. int n_checkpoints) {
  12544. ggml_graph_cpy(gf, gb_tmp);
  12545. ggml_build_backward_expand(ctx, gf, gb_tmp, true);
  12546. if (n_checkpoints <= 0) {
  12547. ggml_graph_cpy(gb_tmp, gb);
  12548. return;
  12549. }
  12550. struct hash_map * replacements = ggml_new_hash_map(gf->n_nodes + gf->n_leafs + n_checkpoints);
  12551. // insert checkpoints in replacements
  12552. for (int i = 0; i < n_checkpoints; ++i) {
  12553. size_t k = ggml_hash_find(replacements->set, checkpoints[i]);
  12554. GGML_ASSERT(k != GGML_HASHTABLE_FULL); // assert that not full
  12555. GGML_ASSERT(replacements->set.keys[k] == NULL); // assert that we don't overwrite
  12556. replacements->set.keys[k] = checkpoints[i];
  12557. replacements->vals[k] = checkpoints[i];
  12558. }
  12559. ggml_graph_cpy(gf, gb);
  12560. // rewrite gb_tmp->nodes[gf->n_nodes:gb_tmp->n_nodes],
  12561. // replacing references to gb_tmp->nodes[0:gf->n_nodes] ( == gf->nodes[0:gf->n_nodes]),
  12562. // by recomputing them from checkpoints
  12563. for (int i = gf->n_nodes; i<gb_tmp->n_nodes; ++i) {
  12564. struct ggml_tensor * node = gb_tmp->nodes[i];
  12565. for (int k = 0; k < GGML_MAX_SRC; ++k) {
  12566. // insert new tensors recomputing src, reusing already made replacements,
  12567. // remember replacements: remember new tensors with mapping from corresponding gf nodes
  12568. // recurse for input tensors,
  12569. // unless (i.e. terminating when) input tensors are replacements (like checkpoints)
  12570. node->src[k] = ggml_recompute_graph_node(ctx, gf, replacements, node->src[k]);
  12571. }
  12572. // insert rewritten backward node with replacements made into resulting backward graph gb
  12573. ggml_build_forward_expand(gb, node);
  12574. }
  12575. ggml_hash_map_free(replacements);
  12576. }
  12577. // functions to change gradients considering the case that input a might be initial gradient with zero value
  12578. static struct ggml_tensor * ggml_add_or_set(struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b, struct ggml_hash_set zero_table) {
  12579. if (ggml_hash_contains(zero_table, a)) {
  12580. return b;
  12581. } else {
  12582. return ggml_add_impl(ctx, a, b, false);
  12583. }
  12584. }
  12585. static struct ggml_tensor * ggml_acc_or_set(struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b, size_t nb1, size_t nb2, size_t nb3, size_t offset, struct ggml_hash_set zero_table) {
  12586. if (ggml_hash_contains(zero_table, a)) {
  12587. struct ggml_tensor * a_zero = ggml_scale(ctx, a, 0.0f);
  12588. return ggml_acc_impl(ctx, a_zero, b, nb1, nb2, nb3, offset, false);
  12589. } else {
  12590. return ggml_acc_impl(ctx, a, b, nb1, nb2, nb3, offset, false);
  12591. }
  12592. }
  12593. static struct ggml_tensor * ggml_add1_or_set(struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b, struct ggml_hash_set zero_table) {
  12594. if (ggml_hash_contains(zero_table, a)) {
  12595. return ggml_repeat(ctx, b, a);
  12596. } else {
  12597. return ggml_add1_impl(ctx, a, b, false);
  12598. }
  12599. }
  12600. static struct ggml_tensor * ggml_sub_or_set(struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b, struct ggml_hash_set zero_table) {
  12601. if (ggml_hash_contains(zero_table, a)) {
  12602. return ggml_neg(ctx, b);
  12603. } else {
  12604. return ggml_sub_impl(ctx, a, b, false);
  12605. }
  12606. }
  12607. static void ggml_compute_backward(struct ggml_context * ctx, struct ggml_tensor * tensor, struct ggml_hash_set zero_table) {
  12608. struct ggml_tensor * src0 = tensor->src[0];
  12609. struct ggml_tensor * src1 = tensor->src[1];
  12610. switch (tensor->op) {
  12611. case GGML_OP_DUP:
  12612. {
  12613. if (src0->grad) {
  12614. src0->grad = ggml_add_or_set(ctx, src0->grad, tensor->grad, zero_table);
  12615. }
  12616. } break;
  12617. case GGML_OP_ADD:
  12618. {
  12619. if (src0->grad) {
  12620. src0->grad = ggml_add_or_set(ctx, src0->grad, tensor->grad, zero_table);
  12621. }
  12622. if (src1->grad) {
  12623. src1->grad = ggml_add_or_set(ctx, src1->grad, tensor->grad, zero_table);
  12624. }
  12625. } break;
  12626. case GGML_OP_ADD1:
  12627. {
  12628. if (src0->grad) {
  12629. src0->grad = ggml_add_or_set(ctx, src0->grad, tensor->grad, zero_table);
  12630. }
  12631. if (src1->grad) {
  12632. src1->grad = ggml_add_or_set(ctx,
  12633. src1->grad,
  12634. ggml_mean(ctx, tensor->grad), // TODO: should probably be sum instead of mean
  12635. zero_table);
  12636. }
  12637. } break;
  12638. case GGML_OP_ACC:
  12639. {
  12640. if (src0->grad) {
  12641. src0->grad = ggml_add_or_set(ctx, src0->grad, tensor->grad, zero_table);
  12642. }
  12643. if (src1->grad) {
  12644. const size_t nb1 = ((int32_t *) tensor->op_params)[0];
  12645. const size_t nb2 = ((int32_t *) tensor->op_params)[1];
  12646. const size_t nb3 = ((int32_t *) tensor->op_params)[2];
  12647. const size_t offset = ((int32_t *) tensor->op_params)[3];
  12648. struct ggml_tensor * tensor_grad_view = ggml_view_4d(ctx,
  12649. tensor->grad,
  12650. src1->grad->ne[0],
  12651. src1->grad->ne[1],
  12652. src1->grad->ne[2],
  12653. src1->grad->ne[3],
  12654. nb1, nb2, nb3, offset);
  12655. src1->grad =
  12656. ggml_add_or_set(ctx,
  12657. src1->grad,
  12658. ggml_reshape(ctx,
  12659. ggml_cont(ctx, tensor_grad_view),
  12660. src1->grad),
  12661. zero_table);
  12662. }
  12663. } break;
  12664. case GGML_OP_SUB:
  12665. {
  12666. if (src0->grad) {
  12667. src0->grad = ggml_add_or_set(ctx, src0->grad, tensor->grad, zero_table);
  12668. }
  12669. if (src1->grad) {
  12670. src1->grad = ggml_sub_or_set(ctx, src1->grad, tensor->grad, zero_table);
  12671. }
  12672. } break;
  12673. case GGML_OP_MUL:
  12674. {
  12675. if (src0->grad) {
  12676. src0->grad =
  12677. ggml_add_or_set(ctx,
  12678. src0->grad,
  12679. ggml_mul(ctx, src1, tensor->grad),
  12680. zero_table);
  12681. }
  12682. if (src1->grad) {
  12683. src1->grad =
  12684. ggml_add_or_set(ctx,
  12685. src1->grad,
  12686. ggml_mul(ctx, src0, tensor->grad),
  12687. zero_table);
  12688. }
  12689. } break;
  12690. case GGML_OP_DIV:
  12691. {
  12692. if (src0->grad) {
  12693. src0->grad =
  12694. ggml_add_or_set(ctx,
  12695. src0->grad,
  12696. ggml_div(ctx, tensor->grad, src1),
  12697. zero_table);
  12698. }
  12699. if (src1->grad) {
  12700. src1->grad =
  12701. ggml_sub_or_set(ctx,
  12702. src1->grad,
  12703. ggml_mul(ctx,
  12704. tensor->grad,
  12705. ggml_div(ctx, tensor, src1)),
  12706. zero_table);
  12707. }
  12708. } break;
  12709. case GGML_OP_SQR:
  12710. {
  12711. if (src0->grad) {
  12712. src0->grad =
  12713. ggml_add_or_set(ctx,
  12714. src0->grad,
  12715. ggml_scale(ctx,
  12716. ggml_mul(ctx, src0, tensor->grad),
  12717. 2.0f),
  12718. zero_table);
  12719. }
  12720. } break;
  12721. case GGML_OP_SQRT:
  12722. {
  12723. if (src0->grad) {
  12724. src0->grad =
  12725. ggml_add_or_set(ctx,
  12726. src0->grad,
  12727. ggml_scale(ctx,
  12728. ggml_div(ctx,
  12729. tensor->grad,
  12730. tensor),
  12731. 0.5f),
  12732. zero_table);
  12733. }
  12734. } break;
  12735. case GGML_OP_LOG:
  12736. {
  12737. if (src0->grad) {
  12738. src0->grad =
  12739. ggml_add_or_set(ctx,
  12740. src0->grad,
  12741. ggml_div(ctx,
  12742. tensor->grad,
  12743. src0),
  12744. zero_table);
  12745. }
  12746. } break;
  12747. case GGML_OP_SUM:
  12748. {
  12749. if (src0->grad) {
  12750. src0->grad =
  12751. ggml_add1_or_set(ctx,
  12752. src0->grad,
  12753. tensor->grad,
  12754. zero_table);
  12755. }
  12756. } break;
  12757. case GGML_OP_SUM_ROWS:
  12758. {
  12759. if (src0->grad) {
  12760. src0->grad =
  12761. ggml_add_or_set(ctx,
  12762. src0->grad,
  12763. ggml_repeat(ctx,
  12764. tensor->grad,
  12765. src0->grad),
  12766. zero_table);
  12767. }
  12768. } break;
  12769. case GGML_OP_MEAN:
  12770. case GGML_OP_ARGMAX:
  12771. {
  12772. GGML_ASSERT(false); // TODO: implement
  12773. } break;
  12774. case GGML_OP_REPEAT:
  12775. {
  12776. // necessary for llama
  12777. if (src0->grad) {
  12778. src0->grad = ggml_add_or_set(ctx,
  12779. src0->grad,
  12780. ggml_repeat_back(ctx, tensor->grad, src0->grad),
  12781. zero_table);
  12782. }
  12783. } break;
  12784. case GGML_OP_REPEAT_BACK:
  12785. {
  12786. if (src0->grad) {
  12787. // TODO: test this
  12788. src0->grad = ggml_add_or_set(ctx,
  12789. src0->grad,
  12790. ggml_repeat(ctx, tensor->grad, src0->grad),
  12791. zero_table);
  12792. }
  12793. } break;
  12794. case GGML_OP_CONCAT:
  12795. {
  12796. GGML_ASSERT(false); // TODO: implement
  12797. } break;
  12798. case GGML_OP_SILU_BACK:
  12799. {
  12800. GGML_ASSERT(false); // TODO: not implemented
  12801. } break;
  12802. case GGML_OP_NORM:
  12803. {
  12804. GGML_ASSERT(false); // TODO: not implemented
  12805. } break;
  12806. case GGML_OP_RMS_NORM:
  12807. {
  12808. // necessary for llama
  12809. if (src0->grad) {
  12810. float eps;
  12811. memcpy(&eps, tensor->op_params, sizeof(float));
  12812. src0->grad = ggml_add_or_set(ctx,
  12813. src0->grad,
  12814. ggml_rms_norm_back(ctx, src0, tensor->grad, eps),
  12815. zero_table);
  12816. }
  12817. } break;
  12818. case GGML_OP_RMS_NORM_BACK:
  12819. {
  12820. GGML_ASSERT(false); // TODO: not implemented
  12821. } break;
  12822. case GGML_OP_GROUP_NORM:
  12823. {
  12824. GGML_ASSERT(false); // TODO: not implemented
  12825. } break;
  12826. case GGML_OP_MUL_MAT:
  12827. {
  12828. // https://cs231n.github.io/optimization-2/#staged
  12829. // # forward pass
  12830. // s0 = np.random.randn(5, 10)
  12831. // s1 = np.random.randn(10, 3)
  12832. // t = s0.dot(s1)
  12833. // # now suppose we had the gradient on t from above in the circuit
  12834. // dt = np.random.randn(*t.shape) # same shape as t
  12835. // ds0 = dt.dot(s1.T) #.T gives the transpose of the matrix
  12836. // ds1 = t.T.dot(dt)
  12837. // tensor.shape [m,p,qq,rr]
  12838. // src0.shape [n,m,q1,r1]
  12839. // src1.shape [n,p,qq,rr]
  12840. // necessary for llama
  12841. if (src0->grad) {
  12842. struct ggml_tensor * s1_tg =
  12843. ggml_out_prod(ctx, // [n,m,qq,rr]
  12844. src1, // [n,p,qq,rr]
  12845. tensor->grad); // [m,p,qq,rr]
  12846. const int64_t qq = s1_tg->ne[2];
  12847. const int64_t rr = s1_tg->ne[3];
  12848. const int64_t q1 = src0->ne[2];
  12849. const int64_t r1 = src0->ne[3];
  12850. const bool ne2_broadcasted = qq > q1;
  12851. const bool ne3_broadcasted = rr > r1;
  12852. if (ne2_broadcasted || ne3_broadcasted) {
  12853. // sum broadcast repetitions of s1_tg into shape of src0
  12854. s1_tg = ggml_repeat_back(ctx, s1_tg, src0);
  12855. }
  12856. src0->grad =
  12857. ggml_add_or_set(ctx,
  12858. src0->grad, // [n,m,q1,r1]
  12859. s1_tg, // [n,m,q1,r1]
  12860. zero_table);
  12861. }
  12862. if (src1->grad) {
  12863. src1->grad =
  12864. ggml_add_or_set(ctx,
  12865. src1->grad, // [n,p,qq,rr]
  12866. // ggml_mul_mat(ctx, // [n,p,qq,rr]
  12867. // ggml_cont(ctx, // [m,n,q1,r1]
  12868. // ggml_transpose(ctx, src0)), // [m,n,q1,r1]
  12869. // tensor->grad), // [m,p,qq,rr]
  12870. // // when src0 is bigger than tensor->grad (this is mostly the case in llama),
  12871. // // avoid transpose of src0, rather transpose smaller tensor->grad
  12872. // // and then use ggml_out_prod
  12873. ggml_out_prod(ctx, // [n,p,qq,rr]
  12874. src0, // [n,m,q1,r1]
  12875. ggml_transpose(ctx, // [p,m,qq,rr]
  12876. tensor->grad)), // [m,p,qq,rr]
  12877. zero_table);
  12878. }
  12879. } break;
  12880. case GGML_OP_MUL_MAT_ID:
  12881. {
  12882. GGML_ASSERT(false); // TODO: not implemented
  12883. } break;
  12884. case GGML_OP_OUT_PROD:
  12885. {
  12886. GGML_ASSERT(false); // TODO: not implemented
  12887. } break;
  12888. case GGML_OP_SCALE:
  12889. {
  12890. // necessary for llama
  12891. if (src0->grad) {
  12892. float s;
  12893. memcpy(&s, tensor->op_params, sizeof(float));
  12894. src0->grad =
  12895. ggml_add_or_set(ctx,
  12896. src0->grad,
  12897. ggml_scale_impl(ctx, tensor->grad, s, false),
  12898. zero_table);
  12899. }
  12900. } break;
  12901. case GGML_OP_SET:
  12902. {
  12903. const size_t nb1 = ((int32_t *) tensor->op_params)[0];
  12904. const size_t nb2 = ((int32_t *) tensor->op_params)[1];
  12905. const size_t nb3 = ((int32_t *) tensor->op_params)[2];
  12906. const size_t offset = ((int32_t *) tensor->op_params)[3];
  12907. struct ggml_tensor * tensor_grad_view = NULL;
  12908. if (src0->grad || src1->grad) {
  12909. GGML_ASSERT(src0->type == tensor->type);
  12910. GGML_ASSERT(tensor->grad->type == tensor->type);
  12911. GGML_ASSERT(tensor->grad->type == src1->grad->type);
  12912. tensor_grad_view = ggml_view_4d(ctx,
  12913. tensor->grad,
  12914. src1->grad->ne[0],
  12915. src1->grad->ne[1],
  12916. src1->grad->ne[2],
  12917. src1->grad->ne[3],
  12918. nb1, nb2, nb3, offset);
  12919. }
  12920. if (src0->grad) {
  12921. src0->grad = ggml_add_or_set(ctx,
  12922. src0->grad,
  12923. ggml_acc_impl(ctx,
  12924. tensor->grad,
  12925. ggml_neg(ctx, tensor_grad_view),
  12926. nb1, nb2, nb3, offset, false),
  12927. zero_table);
  12928. }
  12929. if (src1->grad) {
  12930. src1->grad =
  12931. ggml_add_or_set(ctx,
  12932. src1->grad,
  12933. ggml_reshape(ctx,
  12934. ggml_cont(ctx, tensor_grad_view),
  12935. src1->grad),
  12936. zero_table);
  12937. }
  12938. } break;
  12939. case GGML_OP_CPY:
  12940. {
  12941. // necessary for llama
  12942. // cpy overwrites value of src1 by src0 and returns view(src1)
  12943. // the overwriting is mathematically equivalent to:
  12944. // tensor = src0 * 1 + src1 * 0
  12945. if (src0->grad) {
  12946. // dsrc0 = dtensor * 1
  12947. src0->grad = ggml_add_or_set(ctx, src0->grad, tensor->grad, zero_table);
  12948. }
  12949. if (src1->grad) {
  12950. // dsrc1 = dtensor * 0 -> noop
  12951. }
  12952. } break;
  12953. case GGML_OP_CONT:
  12954. {
  12955. // same as cpy
  12956. if (src0->grad) {
  12957. GGML_ASSERT(ggml_is_contiguous(src0->grad));
  12958. GGML_ASSERT(ggml_is_contiguous(tensor->grad));
  12959. src0->grad = ggml_add_or_set(ctx, src0->grad, tensor->grad, zero_table);
  12960. }
  12961. } break;
  12962. case GGML_OP_RESHAPE:
  12963. {
  12964. // necessary for llama
  12965. if (src0->grad) {
  12966. src0->grad =
  12967. ggml_add_or_set(ctx, src0->grad,
  12968. ggml_reshape(ctx,
  12969. ggml_is_contiguous(tensor->grad)
  12970. ? tensor->grad
  12971. : ggml_cont(ctx, tensor->grad),
  12972. src0->grad),
  12973. zero_table);
  12974. }
  12975. } break;
  12976. case GGML_OP_VIEW:
  12977. {
  12978. // necessary for llama
  12979. if (src0->grad) {
  12980. size_t offset;
  12981. memcpy(&offset, tensor->op_params, sizeof(offset));
  12982. size_t nb1 = tensor->nb[1];
  12983. size_t nb2 = tensor->nb[2];
  12984. size_t nb3 = tensor->nb[3];
  12985. if (src0->type != src0->grad->type) {
  12986. // gradient is typically F32, but src0 could be other type
  12987. size_t ng = ggml_element_size(src0->grad);
  12988. size_t n0 = ggml_element_size(src0);
  12989. GGML_ASSERT(offset % n0 == 0);
  12990. GGML_ASSERT(nb1 % n0 == 0);
  12991. GGML_ASSERT(nb2 % n0 == 0);
  12992. GGML_ASSERT(nb3 % n0 == 0);
  12993. offset = (offset / n0) * ng;
  12994. nb1 = (nb1 / n0) * ng;
  12995. nb2 = (nb2 / n0) * ng;
  12996. nb3 = (nb3 / n0) * ng;
  12997. }
  12998. src0->grad = ggml_acc_or_set(ctx, src0->grad, tensor->grad, nb1, nb2, nb3, offset, zero_table);
  12999. }
  13000. } break;
  13001. case GGML_OP_PERMUTE:
  13002. {
  13003. // necessary for llama
  13004. if (src0->grad) {
  13005. int32_t * axes = (int32_t *) tensor->op_params;
  13006. int axis0 = axes[0] & 0x3;
  13007. int axis1 = axes[1] & 0x3;
  13008. int axis2 = axes[2] & 0x3;
  13009. int axis3 = axes[3] & 0x3;
  13010. int axes_backward[4] = {0,0,0,0};
  13011. axes_backward[axis0] = 0;
  13012. axes_backward[axis1] = 1;
  13013. axes_backward[axis2] = 2;
  13014. axes_backward[axis3] = 3;
  13015. src0->grad =
  13016. ggml_add_or_set(ctx, src0->grad,
  13017. ggml_permute(ctx,
  13018. tensor->grad,
  13019. axes_backward[0],
  13020. axes_backward[1],
  13021. axes_backward[2],
  13022. axes_backward[3]),
  13023. zero_table);
  13024. }
  13025. } break;
  13026. case GGML_OP_TRANSPOSE:
  13027. {
  13028. // necessary for llama
  13029. if (src0->grad) {
  13030. src0->grad =
  13031. ggml_add_or_set(ctx, src0->grad,
  13032. ggml_transpose(ctx, tensor->grad),
  13033. zero_table);
  13034. }
  13035. } break;
  13036. case GGML_OP_GET_ROWS:
  13037. {
  13038. // necessary for llama (only for tokenizer)
  13039. if (src0->grad) {
  13040. src0->grad =
  13041. ggml_add_or_set(ctx, src0->grad,
  13042. // last ggml_get_rows_back argument src0->grad is only
  13043. // necessary to setup correct output shape
  13044. ggml_get_rows_back(ctx, tensor->grad, src1, src0->grad),
  13045. zero_table);
  13046. }
  13047. if (src1->grad) {
  13048. // noop
  13049. }
  13050. } break;
  13051. case GGML_OP_GET_ROWS_BACK:
  13052. {
  13053. GGML_ASSERT(false); // TODO: not implemented
  13054. } break;
  13055. case GGML_OP_DIAG:
  13056. {
  13057. GGML_ASSERT(false); // TODO: not implemented
  13058. } break;
  13059. case GGML_OP_DIAG_MASK_INF:
  13060. {
  13061. // necessary for llama
  13062. if (src0->grad) {
  13063. const int n_past = ((int32_t *) tensor->op_params)[0];
  13064. src0->grad =
  13065. ggml_add_or_set(ctx, src0->grad,
  13066. /* ggml_diag_mask_inf_impl() shouldn't be here */
  13067. /* ref: https://github.com/ggerganov/llama.cpp/pull/4203#discussion_r1412377992 */
  13068. ggml_diag_mask_zero_impl(ctx, tensor->grad, n_past, false),
  13069. zero_table);
  13070. }
  13071. } break;
  13072. case GGML_OP_DIAG_MASK_ZERO:
  13073. {
  13074. // necessary for llama
  13075. if (src0->grad) {
  13076. const int n_past = ((int32_t *) tensor->op_params)[0];
  13077. src0->grad =
  13078. ggml_add_or_set(ctx, src0->grad,
  13079. ggml_diag_mask_zero_impl(ctx, tensor->grad, n_past, false),
  13080. zero_table);
  13081. }
  13082. } break;
  13083. case GGML_OP_SOFT_MAX:
  13084. {
  13085. // necessary for llama
  13086. if (src0->grad) {
  13087. src0->grad =
  13088. ggml_add_or_set(ctx, src0->grad,
  13089. ggml_soft_max_back(ctx, tensor->grad, tensor),
  13090. zero_table);
  13091. }
  13092. } break;
  13093. case GGML_OP_SOFT_MAX_BACK:
  13094. {
  13095. GGML_ASSERT(false); // TODO: not implemented
  13096. } break;
  13097. case GGML_OP_ROPE:
  13098. {
  13099. // necessary for llama
  13100. if (src0->grad) {
  13101. //const int n_past = ((int32_t *) tensor->op_params)[0];
  13102. const int n_dims = ((int32_t *) tensor->op_params)[1];
  13103. const int mode = ((int32_t *) tensor->op_params)[2];
  13104. const int n_ctx = ((int32_t *) tensor->op_params)[3];
  13105. const int n_orig_ctx = ((int32_t *) tensor->op_params)[4];
  13106. float freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow, xpos_base, xpos_down;
  13107. memcpy(&freq_base, (int32_t *) tensor->op_params + 5, sizeof(float));
  13108. memcpy(&freq_scale, (int32_t *) tensor->op_params + 6, sizeof(float));
  13109. memcpy(&ext_factor, (int32_t *) tensor->op_params + 7, sizeof(float));
  13110. memcpy(&attn_factor, (int32_t *) tensor->op_params + 8, sizeof(float));
  13111. memcpy(&beta_fast, (int32_t *) tensor->op_params + 9, sizeof(float));
  13112. memcpy(&beta_slow, (int32_t *) tensor->op_params + 10, sizeof(float));
  13113. memcpy(&xpos_base, (int32_t *) tensor->op_params + 11, sizeof(float));
  13114. memcpy(&xpos_down, (int32_t *) tensor->op_params + 12, sizeof(bool));
  13115. src0->grad = ggml_add_or_set(ctx,
  13116. src0->grad,
  13117. ggml_rope_back(ctx,
  13118. tensor->grad,
  13119. src1,
  13120. n_dims,
  13121. mode,
  13122. n_ctx,
  13123. n_orig_ctx,
  13124. freq_base,
  13125. freq_scale,
  13126. ext_factor,
  13127. attn_factor,
  13128. beta_fast,
  13129. beta_slow,
  13130. xpos_base,
  13131. xpos_down),
  13132. zero_table);
  13133. }
  13134. } break;
  13135. case GGML_OP_ROPE_BACK:
  13136. {
  13137. if (src0->grad) {
  13138. //const int n_past = ((int32_t *) tensor->op_params)[0];
  13139. const int n_dims = ((int32_t *) tensor->op_params)[1];
  13140. const int mode = ((int32_t *) tensor->op_params)[2];
  13141. const int n_ctx = ((int32_t *) tensor->op_params)[3];
  13142. const int n_orig_ctx = ((int32_t *) tensor->op_params)[4];
  13143. float freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow, xpos_base, xpos_down;
  13144. memcpy(&freq_base, (int32_t *) tensor->op_params + 5, sizeof(float));
  13145. memcpy(&freq_scale, (int32_t *) tensor->op_params + 6, sizeof(float));
  13146. memcpy(&ext_factor, (int32_t *) tensor->op_params + 7, sizeof(float));
  13147. memcpy(&attn_factor, (int32_t *) tensor->op_params + 8, sizeof(float));
  13148. memcpy(&beta_fast, (int32_t *) tensor->op_params + 9, sizeof(float));
  13149. memcpy(&beta_slow, (int32_t *) tensor->op_params + 10, sizeof(float));
  13150. memcpy(&xpos_base, (int32_t *) tensor->op_params + 11, sizeof(float));
  13151. memcpy(&xpos_down, (int32_t *) tensor->op_params + 12, sizeof(bool));
  13152. src0->grad = ggml_add_or_set(ctx,
  13153. src0->grad,
  13154. ggml_rope_impl(ctx,
  13155. tensor->grad,
  13156. src1,
  13157. n_dims,
  13158. mode,
  13159. n_ctx,
  13160. n_orig_ctx,
  13161. freq_base,
  13162. freq_scale,
  13163. ext_factor,
  13164. attn_factor,
  13165. beta_fast,
  13166. beta_slow,
  13167. xpos_base,
  13168. xpos_down,
  13169. false),
  13170. zero_table);
  13171. }
  13172. } break;
  13173. case GGML_OP_ALIBI:
  13174. {
  13175. GGML_ASSERT(false); // TODO: not implemented
  13176. } break;
  13177. case GGML_OP_CLAMP:
  13178. {
  13179. GGML_ASSERT(false); // TODO: not implemented
  13180. } break;
  13181. case GGML_OP_CONV_TRANSPOSE_1D:
  13182. {
  13183. GGML_ASSERT(false); // TODO: not implemented
  13184. } break;
  13185. case GGML_OP_IM2COL:
  13186. {
  13187. GGML_ASSERT(false); // TODO: not implemented
  13188. } break;
  13189. case GGML_OP_CONV_TRANSPOSE_2D:
  13190. {
  13191. GGML_ASSERT(false); // TODO: not implemented
  13192. } break;
  13193. case GGML_OP_POOL_1D:
  13194. {
  13195. GGML_ASSERT(false); // TODO: not implemented
  13196. } break;
  13197. case GGML_OP_POOL_2D:
  13198. {
  13199. GGML_ASSERT(false); // TODO: not implemented
  13200. } break;
  13201. case GGML_OP_UPSCALE:
  13202. {
  13203. GGML_ASSERT(false); // TODO: not implemented
  13204. } break;
  13205. case GGML_OP_PAD:
  13206. {
  13207. GGML_ASSERT(false); // TODO: not implemented
  13208. } break;
  13209. case GGML_OP_ARGSORT:
  13210. {
  13211. GGML_ASSERT(false); // TODO: not implemented
  13212. } break;
  13213. case GGML_OP_LEAKY_RELU:
  13214. {
  13215. GGML_ASSERT(false); // TODO: not implemented
  13216. } break;
  13217. case GGML_OP_FLASH_ATTN:
  13218. {
  13219. struct ggml_tensor * flash_grad = NULL;
  13220. if (src0->grad || src1->grad || tensor->src[2]->grad) {
  13221. int32_t t = ggml_get_op_params_i32(tensor, 0);
  13222. GGML_ASSERT(t == 0 || t == 1);
  13223. bool masked = t != 0;
  13224. flash_grad =
  13225. ggml_flash_attn_back(ctx,
  13226. src0,
  13227. src1,
  13228. tensor->src[2],
  13229. tensor->grad,
  13230. masked);
  13231. }
  13232. struct ggml_tensor * src2 = tensor->src[2];
  13233. const int64_t elem_q = ggml_nelements(src0);
  13234. const int64_t elem_k = ggml_nelements(src1);
  13235. const int64_t elem_v = ggml_nelements(src2);
  13236. enum ggml_type result_type = flash_grad->type;
  13237. GGML_ASSERT(ggml_blck_size(result_type) == 1);
  13238. const size_t tsize = ggml_type_size(result_type);
  13239. const size_t offs_q = 0;
  13240. const size_t offs_k = offs_q + GGML_PAD(elem_q * tsize, GGML_MEM_ALIGN);
  13241. const size_t offs_v = offs_k + GGML_PAD(elem_k * tsize, GGML_MEM_ALIGN);
  13242. if (src0->grad) {
  13243. struct ggml_tensor * view_q = ggml_view_1d(ctx, flash_grad, elem_q, offs_q);
  13244. struct ggml_tensor * grad_q = ggml_reshape(ctx, view_q, src0);
  13245. src0->grad = ggml_add_or_set(ctx,
  13246. src0->grad,
  13247. grad_q,
  13248. zero_table);
  13249. }
  13250. if (src1->grad) {
  13251. struct ggml_tensor * view_k = ggml_view_1d(ctx, flash_grad, elem_k, offs_k);
  13252. struct ggml_tensor * grad_k = ggml_reshape(ctx, view_k, src1);
  13253. src1->grad = ggml_add_or_set(ctx,
  13254. src1->grad,
  13255. grad_k,
  13256. zero_table);
  13257. }
  13258. if (src2->grad) {
  13259. struct ggml_tensor * view_v = ggml_view_1d(ctx, flash_grad, elem_v, offs_v);
  13260. struct ggml_tensor * grad_v = ggml_reshape(ctx, view_v, src2);
  13261. src2->grad = ggml_add_or_set(ctx,
  13262. src2->grad,
  13263. grad_v,
  13264. zero_table);
  13265. }
  13266. } break;
  13267. case GGML_OP_FLASH_FF:
  13268. {
  13269. GGML_ASSERT(false); // not supported
  13270. } break;
  13271. case GGML_OP_FLASH_ATTN_BACK:
  13272. {
  13273. GGML_ASSERT(false); // not supported
  13274. } break;
  13275. case GGML_OP_WIN_PART:
  13276. case GGML_OP_WIN_UNPART:
  13277. case GGML_OP_UNARY:
  13278. {
  13279. switch (ggml_get_unary_op(tensor)) {
  13280. case GGML_UNARY_OP_ABS:
  13281. {
  13282. if (src0->grad) {
  13283. src0->grad =
  13284. ggml_add_or_set(ctx,
  13285. src0->grad,
  13286. ggml_mul(ctx,
  13287. ggml_sgn(ctx, src0),
  13288. tensor->grad),
  13289. zero_table);
  13290. }
  13291. } break;
  13292. case GGML_UNARY_OP_SGN:
  13293. {
  13294. if (src0->grad) {
  13295. // noop
  13296. }
  13297. } break;
  13298. case GGML_UNARY_OP_NEG:
  13299. {
  13300. if (src0->grad) {
  13301. src0->grad = ggml_sub_or_set(ctx, src0->grad, tensor->grad, zero_table);
  13302. }
  13303. } break;
  13304. case GGML_UNARY_OP_STEP:
  13305. {
  13306. if (src0->grad) {
  13307. // noop
  13308. }
  13309. } break;
  13310. case GGML_UNARY_OP_TANH:
  13311. {
  13312. GGML_ASSERT(false); // TODO: not implemented
  13313. } break;
  13314. case GGML_UNARY_OP_ELU:
  13315. {
  13316. GGML_ASSERT(false); // TODO: not implemented
  13317. } break;
  13318. case GGML_UNARY_OP_RELU:
  13319. {
  13320. if (src0->grad) {
  13321. src0->grad = ggml_add_or_set(ctx,
  13322. src0->grad,
  13323. ggml_mul(ctx,
  13324. ggml_step(ctx, src0),
  13325. tensor->grad),
  13326. zero_table);
  13327. }
  13328. } break;
  13329. case GGML_UNARY_OP_GELU:
  13330. {
  13331. GGML_ASSERT(false); // TODO: not implemented
  13332. } break;
  13333. case GGML_UNARY_OP_GELU_QUICK:
  13334. {
  13335. GGML_ASSERT(false); // TODO: not implemented
  13336. } break;
  13337. case GGML_UNARY_OP_SILU:
  13338. {
  13339. // necessary for llama
  13340. if (src0->grad) {
  13341. src0->grad = ggml_add_or_set(ctx,
  13342. src0->grad,
  13343. ggml_silu_back(ctx, src0, tensor->grad),
  13344. zero_table);
  13345. }
  13346. } break;
  13347. default:
  13348. GGML_ASSERT(false);
  13349. }
  13350. } break;
  13351. case GGML_OP_GET_REL_POS:
  13352. case GGML_OP_ADD_REL_POS:
  13353. case GGML_OP_MAP_UNARY:
  13354. case GGML_OP_MAP_BINARY:
  13355. case GGML_OP_MAP_CUSTOM1_F32:
  13356. case GGML_OP_MAP_CUSTOM2_F32:
  13357. case GGML_OP_MAP_CUSTOM3_F32:
  13358. case GGML_OP_MAP_CUSTOM1:
  13359. case GGML_OP_MAP_CUSTOM2:
  13360. case GGML_OP_MAP_CUSTOM3:
  13361. {
  13362. GGML_ASSERT(false); // not supported
  13363. } break;
  13364. case GGML_OP_CROSS_ENTROPY_LOSS:
  13365. {
  13366. if (src0->grad) {
  13367. src0->grad = ggml_add_or_set(ctx,
  13368. src0->grad,
  13369. ggml_cross_entropy_loss_back(ctx,
  13370. src0,
  13371. src1,
  13372. tensor->grad),
  13373. zero_table);
  13374. }
  13375. } break;
  13376. case GGML_OP_CROSS_ENTROPY_LOSS_BACK:
  13377. {
  13378. GGML_ASSERT(false); // not supported
  13379. } break;
  13380. case GGML_OP_NONE:
  13381. {
  13382. // nop
  13383. } break;
  13384. case GGML_OP_COUNT:
  13385. {
  13386. GGML_ASSERT(false);
  13387. } break;
  13388. }
  13389. for (int i = 0; i < GGML_MAX_SRC; ++i) {
  13390. if (tensor->src[i] && tensor->src[i]->grad) {
  13391. GGML_ASSERT(ggml_are_same_shape(tensor->src[i], tensor->src[i]->grad));
  13392. }
  13393. }
  13394. }
  13395. static void ggml_visit_parents(struct ggml_cgraph * cgraph, struct ggml_tensor * node) {
  13396. if (node->grad == NULL) {
  13397. // this usually happens when we generate intermediate nodes from constants in the backward pass
  13398. // it can also happen during forward pass, if the user performs computations with constants
  13399. if (node->op != GGML_OP_NONE) {
  13400. //GGML_PRINT_DEBUG("%s: warning: node %p has no grad, but op %d\n", __func__, (void *) node, node->op);
  13401. }
  13402. }
  13403. // check if already visited
  13404. if (ggml_hash_insert(cgraph->visited_hash_table, node) == GGML_HASHTABLE_ALREADY_EXISTS) {
  13405. return;
  13406. }
  13407. for (int i = 0; i < GGML_MAX_SRC; ++i) {
  13408. const int k =
  13409. (cgraph->order == GGML_CGRAPH_EVAL_ORDER_LEFT_TO_RIGHT) ? i :
  13410. (cgraph->order == GGML_CGRAPH_EVAL_ORDER_RIGHT_TO_LEFT) ? (GGML_MAX_SRC-1-i) :
  13411. /* unknown order, just fall back to using i*/ i;
  13412. if (node->src[k]) {
  13413. ggml_visit_parents(cgraph, node->src[k]);
  13414. }
  13415. }
  13416. if (node->op == GGML_OP_NONE && node->grad == NULL) {
  13417. // reached a leaf node, not part of the gradient graph (e.g. a constant)
  13418. GGML_ASSERT(cgraph->n_leafs < cgraph->size);
  13419. if (strlen(node->name) == 0) {
  13420. ggml_format_name(node, "leaf_%d", cgraph->n_leafs);
  13421. }
  13422. cgraph->leafs[cgraph->n_leafs] = node;
  13423. cgraph->n_leafs++;
  13424. } else {
  13425. GGML_ASSERT(cgraph->n_nodes < cgraph->size);
  13426. if (strlen(node->name) == 0) {
  13427. ggml_format_name(node, "node_%d", cgraph->n_nodes);
  13428. }
  13429. cgraph->nodes[cgraph->n_nodes] = node;
  13430. if (cgraph->grads) {
  13431. cgraph->grads[cgraph->n_nodes] = node->grad;
  13432. }
  13433. cgraph->n_nodes++;
  13434. }
  13435. }
  13436. static void ggml_build_forward_impl(struct ggml_cgraph * cgraph, struct ggml_tensor * tensor, bool expand) {
  13437. if (!expand) {
  13438. // TODO: this branch isn't accessible anymore, maybe move this to ggml_build_forward_expand
  13439. ggml_graph_clear(cgraph);
  13440. }
  13441. const int n0 = cgraph->n_nodes;
  13442. UNUSED(n0);
  13443. ggml_visit_parents(cgraph, tensor);
  13444. const int n_new = cgraph->n_nodes - n0;
  13445. GGML_PRINT_DEBUG("%s: visited %d new nodes\n", __func__, n_new);
  13446. if (n_new > 0) {
  13447. // the last added node should always be starting point
  13448. GGML_ASSERT(cgraph->nodes[cgraph->n_nodes - 1] == tensor);
  13449. }
  13450. }
  13451. void ggml_build_forward_expand(struct ggml_cgraph * cgraph, struct ggml_tensor * tensor) {
  13452. ggml_build_forward_impl(cgraph, tensor, true);
  13453. }
  13454. void ggml_build_backward_expand(struct ggml_context * ctx, struct ggml_cgraph * gf, struct ggml_cgraph * gb, bool keep) {
  13455. GGML_ASSERT(gf->n_nodes > 0);
  13456. // if we are keeping the gradient graph, we have to detach the gradient nodes from the original graph
  13457. if (keep) {
  13458. for (int i = 0; i < gf->n_nodes; i++) {
  13459. struct ggml_tensor * node = gf->nodes[i];
  13460. if (node->grad) {
  13461. node->grad = ggml_dup_tensor(ctx, node);
  13462. gf->grads[i] = node->grad;
  13463. }
  13464. }
  13465. }
  13466. // remember original gradients which start with zero values
  13467. struct ggml_hash_set zero_table = ggml_hash_set_new(gf->size);
  13468. for (int i = 0; i < gf->n_nodes; i++) {
  13469. if (gf->grads[i]) {
  13470. ggml_hash_insert(zero_table, gf->grads[i]);
  13471. }
  13472. }
  13473. for (int i = gf->n_nodes - 1; i >= 0; i--) {
  13474. struct ggml_tensor * node = gf->nodes[i];
  13475. // inplace operations to add gradients are not created by ggml_compute_backward
  13476. // use allocator to automatically make inplace operations
  13477. if (node->grad) {
  13478. ggml_compute_backward(ctx, node, zero_table);
  13479. }
  13480. }
  13481. for (int i = 0; i < gf->n_nodes; i++) {
  13482. struct ggml_tensor * node = gf->nodes[i];
  13483. if (node->is_param) {
  13484. GGML_PRINT_DEBUG("%s: found root node %p\n", __func__, (void *) node);
  13485. ggml_build_forward_expand(gb, node->grad);
  13486. }
  13487. }
  13488. ggml_hash_set_free(zero_table);
  13489. }
  13490. static size_t ggml_graph_nbytes(size_t size, bool grads) {
  13491. size_t nbytes = sizeof(struct ggml_cgraph);
  13492. nbytes += size * sizeof(struct ggml_tensor *) * 2; // leafs + nodes
  13493. if (grads) {
  13494. nbytes += size * sizeof(struct ggml_tensor *); // grads
  13495. }
  13496. nbytes += ggml_hash_size(size * 2) * sizeof(struct ggml_tensor *); // hash set
  13497. return nbytes;
  13498. }
  13499. size_t ggml_graph_overhead_custom(size_t size, bool grads) {
  13500. return GGML_OBJECT_SIZE + GGML_PAD(ggml_graph_nbytes(size, grads), GGML_MEM_ALIGN);
  13501. }
  13502. size_t ggml_graph_overhead(void) {
  13503. return ggml_graph_overhead_custom(GGML_DEFAULT_GRAPH_SIZE, false);
  13504. }
  13505. struct ggml_cgraph * ggml_new_graph_custom(struct ggml_context * ctx, size_t size, bool grads) {
  13506. const size_t obj_size = ggml_graph_nbytes(size, grads);
  13507. struct ggml_object * obj = ggml_new_object(ctx, GGML_OBJECT_GRAPH, obj_size);
  13508. struct ggml_cgraph * cgraph = (struct ggml_cgraph *) ((char *) ctx->mem_buffer + obj->offs);
  13509. struct ggml_tensor ** data_start = (struct ggml_tensor **) (cgraph + 1);
  13510. size_t hash_size = ggml_hash_size(size * 2);
  13511. struct ggml_tensor ** nodes_ptr = data_start;
  13512. struct ggml_tensor ** leafs_ptr = nodes_ptr + size;
  13513. struct ggml_tensor ** hash_keys_ptr = leafs_ptr + size;
  13514. struct ggml_tensor ** grads_ptr = grads ? hash_keys_ptr + hash_size : NULL;
  13515. // check that we allocated the correct amount of memory
  13516. assert(obj_size == (size_t) (
  13517. (grads ? (char *)(grads_ptr + size) : (char *)(hash_keys_ptr + hash_size)) - (char *)cgraph));
  13518. memset(hash_keys_ptr, 0, hash_size * sizeof(struct ggml_tensor *));
  13519. *cgraph = (struct ggml_cgraph) {
  13520. /*.size =*/ size,
  13521. /*.n_nodes =*/ 0,
  13522. /*.n_leafs =*/ 0,
  13523. /*.nodes =*/ nodes_ptr,
  13524. /*.grads =*/ grads_ptr,
  13525. /*.leafs =*/ leafs_ptr,
  13526. /*.hash_table =*/ { hash_size, hash_keys_ptr },
  13527. /*.order =*/ GGML_CGRAPH_EVAL_ORDER_LEFT_TO_RIGHT,
  13528. /*.perf_runs =*/ 0,
  13529. /*.perf_cycles =*/ 0,
  13530. /*.perf_time_us =*/ 0,
  13531. };
  13532. return cgraph;
  13533. }
  13534. struct ggml_cgraph * ggml_new_graph(struct ggml_context * ctx) {
  13535. return ggml_new_graph_custom(ctx, GGML_DEFAULT_GRAPH_SIZE, false);
  13536. }
  13537. struct ggml_cgraph ggml_graph_view(struct ggml_cgraph * cgraph0, int i0, int i1) {
  13538. struct ggml_cgraph cgraph = {
  13539. /*.size =*/ 0,
  13540. /*.n_nodes =*/ i1 - i0,
  13541. /*.n_leafs =*/ 0,
  13542. /*.nodes =*/ cgraph0->nodes + i0,
  13543. /*.grads =*/ cgraph0->grads ? cgraph0->grads + i0 : NULL,
  13544. /*.leafs =*/ NULL,
  13545. /*.hash_table =*/ { 0, NULL },
  13546. /*.order =*/ cgraph0->order,
  13547. /*.perf_runs =*/ 0,
  13548. /*.perf_cycles =*/ 0,
  13549. /*.perf_time_us =*/ 0,
  13550. };
  13551. return cgraph;
  13552. }
  13553. void ggml_graph_cpy(struct ggml_cgraph * src, struct ggml_cgraph * dst) {
  13554. GGML_ASSERT(dst->size >= src->n_leafs);
  13555. GGML_ASSERT(dst->size >= src->n_nodes);
  13556. GGML_ASSERT(dst->visited_hash_table.size >= src->visited_hash_table.size);
  13557. dst->n_leafs = src->n_leafs;
  13558. dst->n_nodes = src->n_nodes;
  13559. dst->order = src->order;
  13560. for (int i = 0; i < src->n_leafs; ++i) {
  13561. dst->leafs[i] = src->leafs[i];
  13562. }
  13563. for (int i = 0; i < src->n_nodes; ++i) {
  13564. dst->nodes[i] = src->nodes[i];
  13565. }
  13566. if (src->grads) {
  13567. GGML_ASSERT(dst->grads != NULL);
  13568. for (int i = 0; i < src->n_nodes; ++i) {
  13569. dst->grads[i] = src->grads[i];
  13570. }
  13571. }
  13572. for (size_t i = 0; i < src->visited_hash_table.size; ++i) {
  13573. if (src->visited_hash_table.keys[i]) {
  13574. ggml_hash_insert(dst->visited_hash_table, src->visited_hash_table.keys[i]);
  13575. }
  13576. }
  13577. }
  13578. struct ggml_cgraph * ggml_graph_dup(struct ggml_context * ctx, struct ggml_cgraph * cgraph) {
  13579. struct ggml_cgraph * result = ggml_new_graph_custom(ctx, cgraph->size, cgraph->grads != NULL);
  13580. ggml_graph_cpy(cgraph, result);
  13581. return result;
  13582. }
  13583. void ggml_graph_reset(struct ggml_cgraph * cgraph) {
  13584. GGML_ASSERT(cgraph->grads != NULL);
  13585. for (int i = 0; i < cgraph->n_nodes; i++) {
  13586. struct ggml_tensor * grad = cgraph->grads[i];
  13587. if (grad) {
  13588. ggml_set_zero(grad);
  13589. }
  13590. }
  13591. }
  13592. void ggml_graph_clear(struct ggml_cgraph * cgraph) {
  13593. cgraph->n_leafs = 0;
  13594. cgraph->n_nodes = 0;
  13595. memset(cgraph->visited_hash_table.keys, 0, cgraph->visited_hash_table.size * sizeof(struct ggml_tensor *));
  13596. }
  13597. //
  13598. // thread data
  13599. //
  13600. // synchronization is done via busy loops
  13601. // I tried using spin locks, but not sure how to use them correctly - the things I tried were slower than busy loops
  13602. //
  13603. #ifdef __APPLE__
  13604. //#include <os/lock.h>
  13605. //
  13606. //typedef os_unfair_lock ggml_lock_t;
  13607. //
  13608. //#define ggml_lock_init(x) UNUSED(x)
  13609. //#define ggml_lock_destroy(x) UNUSED(x)
  13610. //#define ggml_lock_lock os_unfair_lock_lock
  13611. //#define ggml_lock_unlock os_unfair_lock_unlock
  13612. //
  13613. //#define GGML_LOCK_INITIALIZER OS_UNFAIR_LOCK_INIT
  13614. typedef int ggml_lock_t;
  13615. #define ggml_lock_init(x) UNUSED(x)
  13616. #define ggml_lock_destroy(x) UNUSED(x)
  13617. #define ggml_lock_lock(x) UNUSED(x)
  13618. #define ggml_lock_unlock(x) UNUSED(x)
  13619. #define GGML_LOCK_INITIALIZER 0
  13620. typedef pthread_t ggml_thread_t;
  13621. #define ggml_thread_create pthread_create
  13622. #define ggml_thread_join pthread_join
  13623. #else
  13624. //typedef pthread_spinlock_t ggml_lock_t;
  13625. //#define ggml_lock_init(x) pthread_spin_init(x, PTHREAD_PROCESS_PRIVATE)
  13626. //#define ggml_lock_destroy pthread_spin_destroy
  13627. //#define ggml_lock_lock pthread_spin_lock
  13628. //#define ggml_lock_unlock pthread_spin_unlock
  13629. typedef int ggml_lock_t;
  13630. #define ggml_lock_init(x) UNUSED(x)
  13631. #define ggml_lock_destroy(x) UNUSED(x)
  13632. #if defined(__x86_64__) || (defined(_MSC_VER) && defined(_M_AMD64))
  13633. #define ggml_lock_lock(x) _mm_pause()
  13634. #else
  13635. #define ggml_lock_lock(x) UNUSED(x)
  13636. #endif
  13637. #define ggml_lock_unlock(x) UNUSED(x)
  13638. #define GGML_LOCK_INITIALIZER 0
  13639. typedef pthread_t ggml_thread_t;
  13640. #define ggml_thread_create pthread_create
  13641. #define ggml_thread_join pthread_join
  13642. #endif
  13643. // Android's libc implementation "bionic" does not support setting affinity
  13644. #if defined(__linux__) && !defined(__BIONIC__)
  13645. static void set_numa_thread_affinity(int thread_n, int n_threads) {
  13646. if (!ggml_is_numa()) {
  13647. return;
  13648. }
  13649. // run thread on node_num thread_n / (threads per node)
  13650. const int node_num = thread_n / ((n_threads + g_state.numa.n_nodes - 1) / g_state.numa.n_nodes);
  13651. struct ggml_numa_node * node = &g_state.numa.nodes[node_num];
  13652. size_t setsize = CPU_ALLOC_SIZE(g_state.numa.total_cpus);
  13653. cpu_set_t * cpus = CPU_ALLOC(g_state.numa.total_cpus);
  13654. CPU_ZERO_S(setsize, cpus);
  13655. for (size_t i = 0; i < node->n_cpus; ++i) {
  13656. CPU_SET_S(node->cpus[i], setsize, cpus);
  13657. }
  13658. int rv = pthread_setaffinity_np(pthread_self(), setsize, cpus);
  13659. if (rv) {
  13660. fprintf(stderr, "warning: pthread_setaffinity_np() failed: %s\n",
  13661. strerror(rv));
  13662. }
  13663. CPU_FREE(cpus);
  13664. }
  13665. static void clear_numa_thread_affinity(void) {
  13666. if (!ggml_is_numa()) {
  13667. return;
  13668. }
  13669. size_t setsize = CPU_ALLOC_SIZE(g_state.numa.total_cpus);
  13670. cpu_set_t * cpus = CPU_ALLOC(g_state.numa.total_cpus);
  13671. CPU_ZERO_S(setsize, cpus);
  13672. for (unsigned i = 0; i < g_state.numa.total_cpus; ++i) {
  13673. CPU_SET_S(i, setsize, cpus);
  13674. }
  13675. int rv = pthread_setaffinity_np(pthread_self(), setsize, cpus);
  13676. if (rv) {
  13677. fprintf(stderr, "warning: pthread_setaffinity_np() failed: %s\n",
  13678. strerror(rv));
  13679. }
  13680. CPU_FREE(cpus);
  13681. }
  13682. #else
  13683. // TODO: Windows etc.
  13684. // (the linux implementation may also work on BSD, someone should test)
  13685. static void set_numa_thread_affinity(int thread_n, int n_threads) { UNUSED(thread_n); UNUSED(n_threads); }
  13686. static void clear_numa_thread_affinity(void) {}
  13687. #endif
  13688. struct ggml_compute_state_shared {
  13689. const struct ggml_cgraph * cgraph;
  13690. const struct ggml_cplan * cplan;
  13691. int64_t perf_node_start_cycles;
  13692. int64_t perf_node_start_time_us;
  13693. const int n_threads;
  13694. // synchronization primitives
  13695. atomic_int n_active; // num active threads
  13696. atomic_int node_n; // active graph node
  13697. atomic_int node_task; // active graph node task phase
  13698. bool (*abort_callback)(void * data); // abort ggml_graph_compute when true
  13699. void * abort_callback_data;
  13700. };
  13701. struct ggml_compute_state {
  13702. ggml_thread_t thrd;
  13703. int ith;
  13704. struct ggml_compute_state_shared * shared;
  13705. };
  13706. static void ggml_graph_compute_perf_stats_node(struct ggml_tensor * node, const struct ggml_compute_state_shared * st) {
  13707. int64_t cycles_cur = ggml_perf_cycles() - st->perf_node_start_cycles;
  13708. int64_t time_us_cur = ggml_perf_time_us() - st->perf_node_start_time_us;
  13709. node->perf_runs++;
  13710. node->perf_cycles += cycles_cur;
  13711. node->perf_time_us += time_us_cur;
  13712. }
  13713. static int ggml_get_n_tasks(struct ggml_tensor * node, int n_threads) {
  13714. int n_tasks = 0;
  13715. switch (node->op) {
  13716. case GGML_OP_CPY:
  13717. case GGML_OP_DUP:
  13718. case GGML_OP_ADD:
  13719. case GGML_OP_ADD1:
  13720. case GGML_OP_ACC:
  13721. {
  13722. n_tasks = n_threads;
  13723. } break;
  13724. case GGML_OP_SUB:
  13725. case GGML_OP_SQR:
  13726. case GGML_OP_SQRT:
  13727. case GGML_OP_LOG:
  13728. case GGML_OP_SUM:
  13729. case GGML_OP_SUM_ROWS:
  13730. case GGML_OP_MEAN:
  13731. case GGML_OP_ARGMAX:
  13732. case GGML_OP_REPEAT:
  13733. case GGML_OP_REPEAT_BACK:
  13734. case GGML_OP_LEAKY_RELU:
  13735. {
  13736. n_tasks = 1;
  13737. } break;
  13738. case GGML_OP_UNARY:
  13739. switch (ggml_get_unary_op(node)) {
  13740. case GGML_UNARY_OP_ABS:
  13741. case GGML_UNARY_OP_SGN:
  13742. case GGML_UNARY_OP_NEG:
  13743. case GGML_UNARY_OP_STEP:
  13744. case GGML_UNARY_OP_TANH:
  13745. case GGML_UNARY_OP_ELU:
  13746. case GGML_UNARY_OP_RELU:
  13747. case GGML_UNARY_OP_HARDSWISH: // to opt for multiple threads
  13748. case GGML_UNARY_OP_HARDSIGMOID: // to opt for multiple threads
  13749. {
  13750. n_tasks = 1;
  13751. } break;
  13752. case GGML_UNARY_OP_GELU:
  13753. case GGML_UNARY_OP_GELU_QUICK:
  13754. case GGML_UNARY_OP_SILU:
  13755. {
  13756. n_tasks = n_threads;
  13757. } break;
  13758. default:
  13759. GGML_ASSERT(false);
  13760. }
  13761. break;
  13762. case GGML_OP_SILU_BACK:
  13763. case GGML_OP_MUL:
  13764. case GGML_OP_DIV:
  13765. case GGML_OP_NORM:
  13766. case GGML_OP_RMS_NORM:
  13767. case GGML_OP_RMS_NORM_BACK:
  13768. case GGML_OP_GROUP_NORM:
  13769. case GGML_OP_CONCAT:
  13770. {
  13771. n_tasks = n_threads;
  13772. } break;
  13773. case GGML_OP_MUL_MAT:
  13774. {
  13775. n_tasks = n_threads;
  13776. // TODO: use different scheduling for different matrix sizes
  13777. //const int nr0 = ggml_nrows(node->src[0]);
  13778. //const int nr1 = ggml_nrows(node->src[1]);
  13779. //n_tasks = MIN(n_threads, MAX(1, nr0/128));
  13780. //printf("nr0 = %8d, nr1 = %8d, nr0*nr1 = %8d, n_tasks%d\n", nr0, nr1, nr0*nr1, n_tasks);
  13781. } break;
  13782. case GGML_OP_MUL_MAT_ID:
  13783. {
  13784. n_tasks = n_threads;
  13785. } break;
  13786. case GGML_OP_OUT_PROD:
  13787. {
  13788. n_tasks = n_threads;
  13789. } break;
  13790. case GGML_OP_SCALE:
  13791. case GGML_OP_SET:
  13792. case GGML_OP_CONT:
  13793. case GGML_OP_RESHAPE:
  13794. case GGML_OP_VIEW:
  13795. case GGML_OP_PERMUTE:
  13796. case GGML_OP_TRANSPOSE:
  13797. case GGML_OP_GET_ROWS:
  13798. case GGML_OP_GET_ROWS_BACK:
  13799. case GGML_OP_DIAG:
  13800. {
  13801. n_tasks = 1;
  13802. } break;
  13803. case GGML_OP_DIAG_MASK_ZERO:
  13804. case GGML_OP_DIAG_MASK_INF:
  13805. case GGML_OP_SOFT_MAX_BACK:
  13806. case GGML_OP_ROPE:
  13807. case GGML_OP_ROPE_BACK:
  13808. case GGML_OP_ADD_REL_POS:
  13809. {
  13810. n_tasks = n_threads;
  13811. } break;
  13812. case GGML_OP_ALIBI:
  13813. {
  13814. n_tasks = 1; //TODO
  13815. } break;
  13816. case GGML_OP_CLAMP:
  13817. {
  13818. n_tasks = 1; //TODO
  13819. } break;
  13820. case GGML_OP_SOFT_MAX:
  13821. {
  13822. n_tasks = MIN(n_threads, ggml_nrows(node->src[0]));
  13823. } break;
  13824. case GGML_OP_CONV_TRANSPOSE_1D:
  13825. {
  13826. n_tasks = n_threads;
  13827. } break;
  13828. case GGML_OP_IM2COL:
  13829. {
  13830. n_tasks = n_threads;
  13831. } break;
  13832. case GGML_OP_CONV_TRANSPOSE_2D:
  13833. {
  13834. n_tasks = n_threads;
  13835. } break;
  13836. case GGML_OP_POOL_1D:
  13837. case GGML_OP_POOL_2D:
  13838. {
  13839. n_tasks = 1;
  13840. } break;
  13841. case GGML_OP_UPSCALE:
  13842. {
  13843. n_tasks = n_threads;
  13844. } break;
  13845. case GGML_OP_PAD:
  13846. {
  13847. n_tasks = n_threads;
  13848. } break;
  13849. case GGML_OP_ARGSORT:
  13850. {
  13851. n_tasks = n_threads;
  13852. } break;
  13853. case GGML_OP_FLASH_ATTN:
  13854. {
  13855. n_tasks = n_threads;
  13856. } break;
  13857. case GGML_OP_FLASH_FF:
  13858. {
  13859. n_tasks = n_threads;
  13860. } break;
  13861. case GGML_OP_FLASH_ATTN_BACK:
  13862. {
  13863. n_tasks = n_threads;
  13864. } break;
  13865. case GGML_OP_WIN_PART:
  13866. case GGML_OP_WIN_UNPART:
  13867. case GGML_OP_GET_REL_POS:
  13868. case GGML_OP_MAP_UNARY:
  13869. case GGML_OP_MAP_BINARY:
  13870. case GGML_OP_MAP_CUSTOM1_F32:
  13871. case GGML_OP_MAP_CUSTOM2_F32:
  13872. case GGML_OP_MAP_CUSTOM3_F32:
  13873. {
  13874. n_tasks = 1;
  13875. } break;
  13876. case GGML_OP_MAP_CUSTOM1:
  13877. {
  13878. struct ggml_map_custom1_op_params * p = (struct ggml_map_custom1_op_params *) node->op_params;
  13879. if (p->n_tasks == GGML_N_TASKS_MAX) {
  13880. n_tasks = n_threads;
  13881. } else {
  13882. n_tasks = MIN(p->n_tasks, n_threads);
  13883. }
  13884. } break;
  13885. case GGML_OP_MAP_CUSTOM2:
  13886. {
  13887. struct ggml_map_custom2_op_params * p = (struct ggml_map_custom2_op_params *) node->op_params;
  13888. if (p->n_tasks == GGML_N_TASKS_MAX) {
  13889. n_tasks = n_threads;
  13890. } else {
  13891. n_tasks = MIN(p->n_tasks, n_threads);
  13892. }
  13893. } break;
  13894. case GGML_OP_MAP_CUSTOM3:
  13895. {
  13896. struct ggml_map_custom3_op_params * p = (struct ggml_map_custom3_op_params *) node->op_params;
  13897. if (p->n_tasks == GGML_N_TASKS_MAX) {
  13898. n_tasks = n_threads;
  13899. } else {
  13900. n_tasks = MIN(p->n_tasks, n_threads);
  13901. }
  13902. } break;
  13903. case GGML_OP_CROSS_ENTROPY_LOSS:
  13904. {
  13905. n_tasks = n_threads;
  13906. } break;
  13907. case GGML_OP_CROSS_ENTROPY_LOSS_BACK:
  13908. {
  13909. n_tasks = n_threads;
  13910. } break;
  13911. case GGML_OP_NONE:
  13912. {
  13913. n_tasks = 1;
  13914. } break;
  13915. case GGML_OP_COUNT:
  13916. {
  13917. GGML_ASSERT(false);
  13918. } break;
  13919. default:
  13920. {
  13921. fprintf(stderr, "%s: op not implemented: ", __func__);
  13922. if (node->op < GGML_OP_COUNT) {
  13923. fprintf(stderr, "%s\n", ggml_op_name(node->op));
  13924. } else {
  13925. fprintf(stderr, "%d\n", node->op);
  13926. }
  13927. GGML_ASSERT(false);
  13928. } break;
  13929. }
  13930. assert(n_tasks > 0);
  13931. return n_tasks;
  13932. }
  13933. static void ggml_graph_compute_thread_sync_node(int * node_n, struct ggml_compute_state * state, const bool do_yield) {
  13934. // wait for other threads to finish
  13935. const int last_node_n = * node_n;
  13936. while (true) {
  13937. if (do_yield) {
  13938. sched_yield();
  13939. }
  13940. * node_n = atomic_load(&state->shared->node_n);
  13941. if (* node_n != last_node_n) break;
  13942. }
  13943. }
  13944. static void ggml_graph_compute_thread_sync_task(int * task_phase, struct ggml_compute_state * state, const bool do_yield) {
  13945. // wait for other threads to finish
  13946. const int last_task_phase = * task_phase;
  13947. while (true) {
  13948. if (do_yield) {
  13949. sched_yield();
  13950. }
  13951. * task_phase = atomic_load(&state->shared->node_task);
  13952. if (* task_phase != last_task_phase) break;
  13953. }
  13954. }
  13955. static thread_ret_t ggml_graph_compute_thread(void * data) {
  13956. struct ggml_compute_state * state = (struct ggml_compute_state *) data;
  13957. const struct ggml_cgraph * cgraph = state->shared->cgraph;
  13958. const struct ggml_cplan * cplan = state->shared->cplan;
  13959. const int n_threads = state->shared->n_threads;
  13960. set_numa_thread_affinity(state->ith, n_threads);
  13961. int node_n = -1;
  13962. int task_phase = GGML_TASK_FINALIZE;
  13963. while (true) {
  13964. if (cplan->abort_callback && cplan->abort_callback(cplan->abort_callback_data)) {
  13965. state->shared->node_n += 1;
  13966. return (thread_ret_t) GGML_EXIT_ABORTED;
  13967. }
  13968. if (atomic_fetch_sub(&state->shared->n_active, 1) == 1) {
  13969. // all other threads are finished and spinning
  13970. // do finalize and init here so we don't have synchronize again
  13971. struct ggml_compute_params params = {
  13972. /*.type =*/ GGML_TASK_FINALIZE,
  13973. /*.ith =*/ 0,
  13974. /*.nth =*/ 0,
  13975. /*.wsize =*/ cplan->work_size,
  13976. /*.wdata =*/ cplan->work_data,
  13977. };
  13978. if (node_n != -1) {
  13979. /* FINALIZE */
  13980. struct ggml_tensor * node = cgraph->nodes[node_n];
  13981. if (GGML_OP_HAS_FINALIZE[node->op]) {
  13982. params.nth = ggml_get_n_tasks(node, n_threads);
  13983. ggml_compute_forward(&params, node);
  13984. }
  13985. ggml_graph_compute_perf_stats_node(node, state->shared);
  13986. }
  13987. // distribute new work or execute it direct if 1T
  13988. while (++node_n < cgraph->n_nodes) {
  13989. GGML_PRINT_DEBUG_5("%s: %d/%d\n", __func__, node_n, cgraph->n_nodes);
  13990. struct ggml_tensor * node = cgraph->nodes[node_n];
  13991. const int n_tasks = ggml_get_n_tasks(node, n_threads);
  13992. state->shared->perf_node_start_cycles = ggml_perf_cycles();
  13993. state->shared->perf_node_start_time_us = ggml_perf_time_us();
  13994. params.nth = n_tasks;
  13995. if (n_tasks == 1) {
  13996. /* INIT */
  13997. if (GGML_OP_HAS_INIT[node->op]) {
  13998. params.type = GGML_TASK_INIT;
  13999. ggml_compute_forward(&params, node);
  14000. }
  14001. // TODO: maybe push node_n to the atomic but if other threads see n_tasks is 1,
  14002. // they do something more efficient than spinning (?)
  14003. params.type = GGML_TASK_COMPUTE;
  14004. ggml_compute_forward(&params, node);
  14005. if (GGML_OP_HAS_FINALIZE[node->op]) {
  14006. params.type = GGML_TASK_FINALIZE;
  14007. ggml_compute_forward(&params, node);
  14008. }
  14009. ggml_graph_compute_perf_stats_node(node, state->shared);
  14010. } else {
  14011. break;
  14012. }
  14013. if (cplan->abort_callback && cplan->abort_callback(cplan->abort_callback_data)) {
  14014. break;
  14015. }
  14016. }
  14017. task_phase = GGML_TASK_INIT;
  14018. atomic_store(&state->shared->n_active, n_threads);
  14019. atomic_store(&state->shared->node_n, node_n);
  14020. atomic_store(&state->shared->node_task, task_phase);
  14021. } else {
  14022. ggml_graph_compute_thread_sync_node(&node_n, state, false);
  14023. ggml_graph_compute_thread_sync_task(&task_phase, state, false);
  14024. }
  14025. // check if we should stop
  14026. if (node_n >= cgraph->n_nodes) break;
  14027. /* INIT & COMPUTE */
  14028. struct ggml_tensor * node = cgraph->nodes[node_n];
  14029. const int n_tasks = ggml_get_n_tasks(node, n_threads);
  14030. struct ggml_compute_params params = {
  14031. /*.type =*/ GGML_TASK_INIT,
  14032. /*.ith =*/ state->ith,
  14033. /*.nth =*/ n_tasks,
  14034. /*.wsize =*/ cplan->work_size,
  14035. /*.wdata =*/ cplan->work_data,
  14036. };
  14037. if (state->ith < n_tasks) {
  14038. if (GGML_OP_HAS_INIT[node->op]) {
  14039. ggml_compute_forward(&params, node);
  14040. }
  14041. }
  14042. if (atomic_fetch_sub(&state->shared->n_active, 1) == 1) {
  14043. task_phase = GGML_TASK_COMPUTE;
  14044. atomic_store(&state->shared->n_active, n_threads);
  14045. atomic_store(&state->shared->node_task, task_phase);
  14046. }
  14047. else {
  14048. // TODO: this sched_yield can have significant impact on the performance - either positive or negative
  14049. // depending on the workload and the operating system.
  14050. // since it is not clear what is the best approach, it should potentially become user-configurable
  14051. // ref: https://github.com/ggerganov/ggml/issues/291
  14052. // UPD: adding the do_yield flag seems to resolve the issue universally
  14053. const bool do_yield = node_n < 0 || cgraph->nodes[node_n]->op == GGML_OP_MUL_MAT;
  14054. ggml_graph_compute_thread_sync_task(&task_phase, state, do_yield);
  14055. }
  14056. if (state->ith < n_tasks) {
  14057. params.type = GGML_TASK_COMPUTE;
  14058. ggml_compute_forward(&params, node);
  14059. }
  14060. if (atomic_fetch_sub(&state->shared->n_active, 1) == 1) {
  14061. task_phase = GGML_TASK_FINALIZE;
  14062. atomic_store(&state->shared->n_active, n_threads);
  14063. atomic_store(&state->shared->node_task, task_phase);
  14064. }
  14065. else {
  14066. ggml_graph_compute_thread_sync_task(&task_phase, state, false);
  14067. }
  14068. }
  14069. return GGML_EXIT_SUCCESS;
  14070. }
  14071. struct ggml_cplan ggml_graph_plan(const struct ggml_cgraph * cgraph, int n_threads) {
  14072. if (n_threads <= 0) {
  14073. n_threads = GGML_DEFAULT_N_THREADS;
  14074. }
  14075. size_t work_size = 0;
  14076. struct ggml_cplan cplan;
  14077. memset(&cplan, 0, sizeof(struct ggml_cplan));
  14078. // thread scheduling for the different operations + work buffer size estimation
  14079. for (int i = 0; i < cgraph->n_nodes; i++) {
  14080. struct ggml_tensor * node = cgraph->nodes[i];
  14081. const int n_tasks = ggml_get_n_tasks(node, n_threads);
  14082. size_t cur = 0;
  14083. switch (node->op) {
  14084. case GGML_OP_CPY:
  14085. case GGML_OP_DUP:
  14086. {
  14087. if (ggml_is_quantized(node->type)) {
  14088. cur = ggml_type_size(GGML_TYPE_F32) * node->ne[0] * n_tasks;
  14089. }
  14090. } break;
  14091. case GGML_OP_ADD:
  14092. case GGML_OP_ADD1:
  14093. {
  14094. if (ggml_is_quantized(node->src[0]->type)) {
  14095. cur = ggml_type_size(GGML_TYPE_F32) * node->src[0]->ne[0] * n_tasks;
  14096. }
  14097. } break;
  14098. case GGML_OP_ACC:
  14099. {
  14100. if (ggml_is_quantized(node->src[0]->type)) {
  14101. cur = ggml_type_size(GGML_TYPE_F32) * node->src[1]->ne[0] * n_tasks;
  14102. }
  14103. } break;
  14104. case GGML_OP_MUL_MAT:
  14105. {
  14106. const enum ggml_type vec_dot_type = type_traits[node->src[0]->type].vec_dot_type;
  14107. #if defined(GGML_USE_CLBLAST)
  14108. if (ggml_cl_can_mul_mat(node->src[0], node->src[1], node)) {
  14109. cur = ggml_cl_mul_mat_get_wsize(node->src[0], node->src[1], node);
  14110. } else
  14111. #endif
  14112. #if defined(GGML_USE_ACCELERATE) || defined(GGML_USE_OPENBLAS)
  14113. if (ggml_compute_forward_mul_mat_use_blas(node)) {
  14114. if (node->src[0]->type != GGML_TYPE_F32) {
  14115. // here we need memory for fully dequantized matrix from src0
  14116. // take into account that src0 can be broadcasted into src1[2,3]
  14117. cur = ggml_type_size(GGML_TYPE_F32)
  14118. * node->src[0]->ne[0]*node->src[0]->ne[1]
  14119. * node->src[1]->ne[2]*node->src[1]->ne[3];
  14120. }
  14121. } else
  14122. #endif
  14123. if (node->src[1]->type != vec_dot_type) {
  14124. cur = ggml_row_size(vec_dot_type, ggml_nelements(node->src[1]));
  14125. }
  14126. } break;
  14127. case GGML_OP_MUL_MAT_ID:
  14128. {
  14129. cur = 0;
  14130. const struct ggml_tensor * src0 = node->src[2];
  14131. const struct ggml_tensor * src1 = node->src[1];
  14132. const enum ggml_type vec_dot_type = type_traits[src0->type].vec_dot_type;
  14133. if (src1->type != vec_dot_type) {
  14134. cur += ggml_row_size(vec_dot_type, ggml_nelements(src1));
  14135. }
  14136. const int n_as = ggml_get_op_params_i32(node, 1);
  14137. cur += GGML_PAD(cur, sizeof(int64_t)); // align
  14138. cur += n_as * sizeof(int64_t); // matrix_row_counts
  14139. cur += n_as * src1->ne[1] * sizeof(int64_t); // matrix_rows
  14140. } break;
  14141. case GGML_OP_OUT_PROD:
  14142. {
  14143. if (ggml_is_quantized(node->src[0]->type)) {
  14144. cur = ggml_type_size(GGML_TYPE_F32) * node->src[0]->ne[0] * n_tasks;
  14145. }
  14146. } break;
  14147. case GGML_OP_SOFT_MAX:
  14148. case GGML_OP_ROPE:
  14149. {
  14150. cur = ggml_type_size(GGML_TYPE_F32) * node->ne[0] * n_tasks;
  14151. } break;
  14152. case GGML_OP_CONV_TRANSPOSE_1D:
  14153. {
  14154. GGML_ASSERT(node->src[0]->ne[3] == 1);
  14155. GGML_ASSERT(node->src[1]->ne[2] == 1);
  14156. GGML_ASSERT(node->src[1]->ne[3] == 1);
  14157. const int64_t ne00 = node->src[0]->ne[0]; // K
  14158. const int64_t ne01 = node->src[0]->ne[1]; // Cout
  14159. const int64_t ne02 = node->src[0]->ne[2]; // Cin
  14160. const int64_t ne10 = node->src[1]->ne[0]; // L
  14161. const int64_t ne11 = node->src[1]->ne[1]; // Cin
  14162. if (node->src[0]->type == GGML_TYPE_F16 &&
  14163. node->src[1]->type == GGML_TYPE_F32) {
  14164. cur += sizeof(ggml_fp16_t)*ne00*ne01*ne02;
  14165. cur += sizeof(ggml_fp16_t)*ne10*ne11;
  14166. } else if (node->src[0]->type == GGML_TYPE_F32 &&
  14167. node->src[1]->type == GGML_TYPE_F32) {
  14168. cur += sizeof(float)*ne00*ne01*ne02;
  14169. cur += sizeof(float)*ne10*ne11;
  14170. } else {
  14171. GGML_ASSERT(false);
  14172. }
  14173. } break;
  14174. case GGML_OP_CONV_TRANSPOSE_2D:
  14175. {
  14176. const int64_t ne00 = node->src[0]->ne[0]; // W
  14177. const int64_t ne01 = node->src[0]->ne[1]; // H
  14178. const int64_t ne02 = node->src[0]->ne[2]; // Channels Out
  14179. const int64_t ne03 = node->src[0]->ne[3]; // Channels In
  14180. const int64_t ne10 = node->src[1]->ne[0]; // W
  14181. const int64_t ne11 = node->src[1]->ne[1]; // H
  14182. const int64_t ne12 = node->src[1]->ne[2]; // Channels In
  14183. cur += sizeof(ggml_fp16_t)*ne00*ne01*ne02*ne03;
  14184. cur += sizeof(ggml_fp16_t)*ne10*ne11*ne12;
  14185. } break;
  14186. case GGML_OP_FLASH_ATTN:
  14187. {
  14188. const int64_t ne11 = ggml_up(node->src[1]->ne[1], GGML_SOFT_MAX_UNROLL);
  14189. if (node->src[1]->type == GGML_TYPE_F32) {
  14190. cur = sizeof(float)*ne11*n_tasks; // TODO: this can become (n_tasks-1)
  14191. cur += sizeof(float)*ne11*n_tasks; // this is overestimated by x2
  14192. } else if (node->src[1]->type == GGML_TYPE_F16) {
  14193. cur = sizeof(float)*ne11*n_tasks; // TODO: this can become (n_tasks-1)
  14194. cur += sizeof(float)*ne11*n_tasks; // this is overestimated by x2
  14195. }
  14196. } break;
  14197. case GGML_OP_FLASH_FF:
  14198. {
  14199. if (node->src[1]->type == GGML_TYPE_F32) {
  14200. cur = sizeof(float)*node->src[1]->ne[1]*n_tasks; // TODO: this can become (n_tasks-1)
  14201. cur += sizeof(float)*node->src[1]->ne[1]*n_tasks; // this is overestimated by x2
  14202. } else if (node->src[1]->type == GGML_TYPE_F16) {
  14203. cur = sizeof(float)*node->src[1]->ne[1]*n_tasks; // TODO: this can become (n_tasks-1)
  14204. cur += sizeof(float)*node->src[1]->ne[1]*n_tasks; // this is overestimated by x2
  14205. }
  14206. } break;
  14207. case GGML_OP_FLASH_ATTN_BACK:
  14208. {
  14209. const int64_t D = node->src[0]->ne[0];
  14210. const int64_t ne11 = ggml_up(node->src[1]->ne[1], GGML_SOFT_MAX_UNROLL);
  14211. const int64_t mxDn = MAX(D, ne11) * 2; // *2 because of S and SM in ggml_compute_forward_flash_attn_back
  14212. if (node->src[1]->type == GGML_TYPE_F32) {
  14213. cur = sizeof(float)*mxDn*n_tasks; // TODO: this can become (n_tasks-1)
  14214. cur += sizeof(float)*mxDn*n_tasks; // this is overestimated by x2
  14215. } else if (node->src[1]->type == GGML_TYPE_F16) {
  14216. cur = sizeof(float)*mxDn*n_tasks; // TODO: this can become (n_tasks-1)
  14217. cur += sizeof(float)*mxDn*n_tasks; // this is overestimated by x2
  14218. }
  14219. } break;
  14220. case GGML_OP_CROSS_ENTROPY_LOSS:
  14221. {
  14222. cur = ggml_type_size(node->type)*(n_tasks + node->src[0]->ne[0]*n_tasks);
  14223. } break;
  14224. case GGML_OP_COUNT:
  14225. {
  14226. GGML_ASSERT(false);
  14227. } break;
  14228. default:
  14229. break;
  14230. }
  14231. work_size = MAX(work_size, cur);
  14232. }
  14233. if (work_size > 0) {
  14234. work_size += CACHE_LINE_SIZE*(n_threads - 1);
  14235. }
  14236. cplan.n_threads = n_threads;
  14237. cplan.work_size = work_size;
  14238. cplan.work_data = NULL;
  14239. return cplan;
  14240. }
  14241. int ggml_graph_compute(struct ggml_cgraph * cgraph, struct ggml_cplan * cplan) {
  14242. {
  14243. GGML_ASSERT(cplan);
  14244. GGML_ASSERT(cplan->n_threads > 0);
  14245. if (cplan->work_size > 0) {
  14246. GGML_ASSERT(cplan->work_data);
  14247. }
  14248. }
  14249. const int n_threads = cplan->n_threads;
  14250. struct ggml_compute_state_shared state_shared = {
  14251. /*.cgraph =*/ cgraph,
  14252. /*.cgraph_plan =*/ cplan,
  14253. /*.perf_node_start_cycles =*/ 0,
  14254. /*.perf_node_start_time_us =*/ 0,
  14255. /*.n_threads =*/ n_threads,
  14256. /*.n_active =*/ n_threads,
  14257. /*.node_n =*/ -1,
  14258. /*.node_task =*/ GGML_TASK_FINALIZE,
  14259. /*.abort_callback =*/ NULL,
  14260. /*.abort_callback_data =*/ NULL,
  14261. };
  14262. struct ggml_compute_state * workers = alloca(sizeof(struct ggml_compute_state)*n_threads);
  14263. // create thread pool
  14264. if (n_threads > 1) {
  14265. for (int j = 1; j < n_threads; ++j) {
  14266. workers[j] = (struct ggml_compute_state) {
  14267. .thrd = 0,
  14268. .ith = j,
  14269. .shared = &state_shared,
  14270. };
  14271. const int rc = ggml_thread_create(&workers[j].thrd, NULL, ggml_graph_compute_thread, &workers[j]);
  14272. GGML_ASSERT(rc == 0);
  14273. UNUSED(rc);
  14274. }
  14275. }
  14276. workers[0].ith = 0;
  14277. workers[0].shared = &state_shared;
  14278. const int64_t perf_start_cycles = ggml_perf_cycles();
  14279. const int64_t perf_start_time_us = ggml_perf_time_us();
  14280. // this is a work thread too
  14281. int compute_status = (size_t) ggml_graph_compute_thread(&workers[0]);
  14282. // don't leave affinity set on the main thread
  14283. clear_numa_thread_affinity();
  14284. // join or kill thread pool
  14285. if (n_threads > 1) {
  14286. for (int j = 1; j < n_threads; j++) {
  14287. const int rc = ggml_thread_join(workers[j].thrd, NULL);
  14288. GGML_ASSERT(rc == 0);
  14289. }
  14290. }
  14291. // performance stats (graph)
  14292. {
  14293. int64_t perf_cycles_cur = ggml_perf_cycles() - perf_start_cycles;
  14294. int64_t perf_time_us_cur = ggml_perf_time_us() - perf_start_time_us;
  14295. cgraph->perf_runs++;
  14296. cgraph->perf_cycles += perf_cycles_cur;
  14297. cgraph->perf_time_us += perf_time_us_cur;
  14298. GGML_PRINT_DEBUG("%s: perf (%d) - cpu = %.3f / %.3f ms, wall = %.3f / %.3f ms\n",
  14299. __func__, cgraph->perf_runs,
  14300. (double) perf_cycles_cur / (double) ggml_cycles_per_ms(),
  14301. (double) cgraph->perf_cycles / (double) ggml_cycles_per_ms() / (double) cgraph->perf_runs,
  14302. (double) perf_time_us_cur / 1000.0,
  14303. (double) cgraph->perf_time_us / 1000.0 / cgraph->perf_runs);
  14304. }
  14305. return compute_status;
  14306. }
  14307. void ggml_graph_compute_with_ctx(struct ggml_context * ctx, struct ggml_cgraph * cgraph, int n_threads) {
  14308. struct ggml_cplan cplan = ggml_graph_plan(cgraph, n_threads);
  14309. struct ggml_object * obj = ggml_new_object(ctx, GGML_OBJECT_WORK_BUFFER, cplan.work_size);
  14310. cplan.work_data = (uint8_t *)ctx->mem_buffer + obj->offs;
  14311. ggml_graph_compute(cgraph, &cplan);
  14312. }
  14313. struct ggml_tensor * ggml_graph_get_tensor(struct ggml_cgraph * cgraph, const char * name) {
  14314. for (int i = 0; i < cgraph->n_leafs; i++) {
  14315. struct ggml_tensor * leaf = cgraph->leafs[i];
  14316. if (strcmp(leaf->name, name) == 0) {
  14317. return leaf;
  14318. }
  14319. }
  14320. for (int i = 0; i < cgraph->n_nodes; i++) {
  14321. struct ggml_tensor * node = cgraph->nodes[i];
  14322. if (strcmp(node->name, name) == 0) {
  14323. return node;
  14324. }
  14325. }
  14326. return NULL;
  14327. }
  14328. static void ggml_graph_export_leaf(const struct ggml_tensor * tensor, FILE * fout) {
  14329. const int64_t * ne = tensor->ne;
  14330. const size_t * nb = tensor->nb;
  14331. fprintf(fout, "%-6s %-12s %8d %" PRId64 " %" PRId64 " %" PRId64 " %" PRId64 " %16zu %16zu %16zu %16zu %16p %32s\n",
  14332. ggml_type_name(tensor->type),
  14333. ggml_op_name (tensor->op),
  14334. ggml_n_dims(tensor),
  14335. ne[0], ne[1], ne[2], ne[3],
  14336. nb[0], nb[1], nb[2], nb[3],
  14337. tensor->data,
  14338. tensor->name);
  14339. }
  14340. static void ggml_graph_export_node(const struct ggml_tensor * tensor, const char * arg, FILE * fout) {
  14341. const int64_t * ne = tensor->ne;
  14342. const size_t * nb = tensor->nb;
  14343. fprintf(fout, "%-6s %-6s %-12s %8d %" PRId64 " %" PRId64 " %" PRId64 " %" PRId64 " %16zu %16zu %16zu %16zu %16p %32s\n",
  14344. arg,
  14345. ggml_type_name(tensor->type),
  14346. ggml_op_name (tensor->op),
  14347. ggml_n_dims(tensor),
  14348. ne[0], ne[1], ne[2], ne[3],
  14349. nb[0], nb[1], nb[2], nb[3],
  14350. tensor->data,
  14351. tensor->name);
  14352. }
  14353. void ggml_graph_export(const struct ggml_cgraph * cgraph, const char * fname) {
  14354. uint64_t size_eval = 0;
  14355. // compute size of intermediate results
  14356. // TODO: does not take into account scratch buffers !!!!
  14357. for (int i = 0; i < cgraph->n_nodes; ++i) {
  14358. size_eval += ggml_nbytes_pad(cgraph->nodes[i]);
  14359. }
  14360. // print
  14361. {
  14362. FILE * fout = stdout;
  14363. fprintf(fout, "\n");
  14364. fprintf(fout, "%-16s %8x\n", "magic", GGML_FILE_MAGIC);
  14365. fprintf(fout, "%-16s %8d\n", "version", GGML_FILE_VERSION);
  14366. fprintf(fout, "%-16s %8d\n", "leafs", cgraph->n_leafs);
  14367. fprintf(fout, "%-16s %8d\n", "nodes", cgraph->n_nodes);
  14368. fprintf(fout, "%-16s %" PRIu64 "\n", "eval", size_eval);
  14369. // header
  14370. fprintf(fout, "\n");
  14371. fprintf(fout, "%-6s %-12s %8s %8s %8s %8s %8s %16s %16s %16s %16s %16s %16s\n",
  14372. "TYPE", "OP", "NDIMS", "NE0", "NE1", "NE2", "NE3", "NB0", "NB1", "NB2", "NB3", "DATA", "NAME");
  14373. for (int i = 0; i < cgraph->n_leafs; ++i) {
  14374. ggml_graph_export_leaf(cgraph->leafs[i], fout);
  14375. GGML_ASSERT(cgraph->leafs[i]->op == GGML_OP_NONE);
  14376. GGML_ASSERT(cgraph->leafs[i]->src[0] == NULL);
  14377. GGML_ASSERT(cgraph->leafs[i]->src[1] == NULL);
  14378. }
  14379. // header
  14380. fprintf(fout, "\n");
  14381. fprintf(fout, "%-6s %-6s %-12s %8s %8s %8s %8s %8s %16s %16s %16s %16s %8s %16s %16s\n",
  14382. "ARG", "TYPE", "OP", "NDIMS", "NE0", "NE1", "NE2", "NE3", "NB0", "NB1", "NB2", "NB3", "NTASKS", "DATA", "NAME");
  14383. for (int i = 0; i < cgraph->n_nodes; ++i) {
  14384. ggml_graph_export_node(cgraph->nodes[i], "DST", fout);
  14385. for (int j = 0; j < GGML_MAX_SRC; ++j) {
  14386. if (cgraph->nodes[i]->src[j]) {
  14387. ggml_graph_export_node(cgraph->nodes[i]->src[j], "SRC", fout);
  14388. }
  14389. }
  14390. fprintf(fout, "\n");
  14391. }
  14392. fprintf(fout, "\n");
  14393. }
  14394. // write binary data
  14395. {
  14396. FILE * fout = fopen(fname, "wb");
  14397. if (!fout) {
  14398. fprintf(stderr, "%s: failed to open %s\n", __func__, fname);
  14399. return;
  14400. }
  14401. // header
  14402. {
  14403. const uint32_t magic = GGML_FILE_MAGIC;
  14404. const uint32_t version = GGML_FILE_VERSION;
  14405. const uint32_t n_leafs = cgraph->n_leafs;
  14406. const uint32_t n_nodes = cgraph->n_nodes;
  14407. fwrite(&magic, sizeof(uint32_t), 1, fout);
  14408. fwrite(&version, sizeof(uint32_t), 1, fout);
  14409. fwrite(&n_leafs, sizeof(uint32_t), 1, fout);
  14410. fwrite(&n_nodes, sizeof(uint32_t), 1, fout);
  14411. fwrite(&size_eval, sizeof(uint64_t), 1, fout);
  14412. }
  14413. // leafs
  14414. {
  14415. for (int i = 0; i < cgraph->n_leafs; ++i) {
  14416. const struct ggml_tensor * tensor = cgraph->leafs[i];
  14417. const uint32_t type = tensor->type;
  14418. const uint32_t op = tensor->op;
  14419. fwrite(&type, sizeof(uint32_t), 1, fout);
  14420. fwrite(&op, sizeof(uint32_t), 1, fout);
  14421. for (int j = 0; j < GGML_MAX_DIMS; ++j) {
  14422. const uint64_t ne = tensor->ne[j];
  14423. const uint64_t nb = tensor->nb[j];
  14424. fwrite(&ne, sizeof(uint64_t), 1, fout);
  14425. fwrite(&nb, sizeof(uint64_t), 1, fout);
  14426. }
  14427. fwrite(tensor->name, sizeof(char), GGML_MAX_NAME, fout);
  14428. fwrite(tensor->op_params, sizeof(char), GGML_MAX_OP_PARAMS, fout);
  14429. // dump the data
  14430. // TODO: pad this to 32 byte boundary
  14431. {
  14432. const size_t size = ggml_nbytes(tensor);
  14433. fwrite(tensor->data, sizeof(char), size, fout);
  14434. }
  14435. }
  14436. }
  14437. // nodes
  14438. {
  14439. for (int i = 0; i < cgraph->n_nodes; ++i) {
  14440. const struct ggml_tensor * tensor = cgraph->nodes[i];
  14441. const uint32_t type = tensor->type;
  14442. const uint32_t op = tensor->op;
  14443. fwrite(&type, sizeof(uint32_t), 1, fout);
  14444. fwrite(&op, sizeof(uint32_t), 1, fout);
  14445. for (int j = 0; j < GGML_MAX_DIMS; ++j) {
  14446. const uint64_t ne = tensor->ne[j];
  14447. const uint64_t nb = tensor->nb[j];
  14448. fwrite(&ne, sizeof(uint64_t), 1, fout);
  14449. fwrite(&nb, sizeof(uint64_t), 1, fout);
  14450. }
  14451. fwrite(tensor->name, sizeof(char), GGML_MAX_NAME, fout);
  14452. fwrite(tensor->op_params, sizeof(char), GGML_MAX_OP_PARAMS, fout);
  14453. // output the op arguments
  14454. {
  14455. struct ggml_tensor * args[GGML_MAX_SRC] = { NULL };
  14456. for (int j = 0; j < GGML_MAX_SRC; ++j) {
  14457. args[j] = tensor->src[j];
  14458. }
  14459. for (int j = 0; j < GGML_MAX_SRC; ++j) {
  14460. if (args[j]) {
  14461. int32_t idx = -1;
  14462. // check if leaf
  14463. {
  14464. for (int k = 0; k < cgraph->n_leafs; ++k) {
  14465. if (args[j] == cgraph->leafs[k]) {
  14466. idx = k;
  14467. break;
  14468. }
  14469. }
  14470. }
  14471. // check if node
  14472. if (idx == -1) {
  14473. for (int k = 0; k < cgraph->n_nodes; ++k) {
  14474. if (args[j] == cgraph->nodes[k]) {
  14475. idx = cgraph->n_leafs + k;
  14476. break;
  14477. }
  14478. }
  14479. }
  14480. if (idx == -1) {
  14481. fprintf(stderr, "%s: failed to find tensor, arg = %d, node = %d\n", __func__, j, i);
  14482. fclose(fout);
  14483. return;
  14484. }
  14485. fwrite(&idx, sizeof(int32_t), 1, fout);
  14486. } else {
  14487. const int32_t nul = -1;
  14488. fwrite(&nul, sizeof(int32_t), 1, fout);
  14489. }
  14490. }
  14491. }
  14492. }
  14493. }
  14494. fclose(fout);
  14495. }
  14496. }
  14497. struct ggml_cgraph * ggml_graph_import(const char * fname, struct ggml_context ** ctx_data, struct ggml_context ** ctx_eval) {
  14498. assert(*ctx_data == NULL);
  14499. assert(*ctx_eval == NULL);
  14500. struct ggml_cgraph * result = NULL;
  14501. struct ggml_tensor * data = NULL;
  14502. // read file into data
  14503. {
  14504. FILE * fin = fopen(fname, "rb");
  14505. if (!fin) {
  14506. fprintf(stderr, "%s: failed to open %s\n", __func__, fname);
  14507. return result;
  14508. }
  14509. size_t fsize = 0;
  14510. fseek(fin, 0, SEEK_END);
  14511. fsize = ftell(fin);
  14512. fseek(fin, 0, SEEK_SET);
  14513. // create the data context
  14514. {
  14515. const size_t overhead = 1*ggml_tensor_overhead();
  14516. struct ggml_init_params params = {
  14517. .mem_size = fsize + overhead,
  14518. .mem_buffer = NULL,
  14519. .no_alloc = false,
  14520. };
  14521. *ctx_data = ggml_init(params);
  14522. if (!*ctx_data) {
  14523. fprintf(stderr, "%s: failed to create ggml context\n", __func__);
  14524. fclose(fin);
  14525. return result;
  14526. }
  14527. }
  14528. data = ggml_new_tensor_1d(*ctx_data, GGML_TYPE_I8, fsize);
  14529. {
  14530. const size_t ret = fread(data->data, sizeof(char), fsize, fin);
  14531. if (ret != fsize) {
  14532. fprintf(stderr, "%s: failed to read %s\n", __func__, fname);
  14533. fclose(fin);
  14534. return result;
  14535. }
  14536. }
  14537. fclose(fin);
  14538. }
  14539. // populate result
  14540. {
  14541. char * ptr = (char *) data->data;
  14542. const uint32_t magic = *(const uint32_t *) ptr; ptr += sizeof(magic);
  14543. if (magic != GGML_FILE_MAGIC) {
  14544. fprintf(stderr, "%s: invalid magic number, got %08x\n", __func__, magic);
  14545. return result;
  14546. }
  14547. const uint32_t version = *(const uint32_t *) ptr; ptr += sizeof(version);
  14548. if (version != GGML_FILE_VERSION) {
  14549. fprintf(stderr, "%s: invalid version number\n", __func__);
  14550. return result;
  14551. }
  14552. const uint32_t n_leafs = *(const uint32_t *) ptr; ptr += sizeof(n_leafs);
  14553. const uint32_t n_nodes = *(const uint32_t *) ptr; ptr += sizeof(n_nodes);
  14554. const uint64_t size_eval = *(const uint64_t *) ptr; ptr += sizeof(size_eval);
  14555. const int graph_size = MAX(n_leafs, n_nodes);
  14556. // create the data context
  14557. {
  14558. const size_t overhead = (n_leafs + n_nodes)*ggml_tensor_overhead() + ggml_graph_overhead_custom(graph_size, false);
  14559. struct ggml_init_params params = {
  14560. .mem_size = size_eval + overhead,
  14561. .mem_buffer = NULL,
  14562. .no_alloc = true,
  14563. };
  14564. *ctx_eval = ggml_init(params);
  14565. if (!*ctx_eval) {
  14566. fprintf(stderr, "%s: failed to create ggml context\n", __func__);
  14567. return result;
  14568. }
  14569. }
  14570. result = ggml_new_graph_custom(*ctx_eval, graph_size, false);
  14571. result->n_leafs = n_leafs;
  14572. result->n_nodes = n_nodes;
  14573. // leafs
  14574. {
  14575. uint32_t type;
  14576. uint32_t op;
  14577. for (uint32_t i = 0; i < n_leafs; ++i) {
  14578. type = *(const uint32_t *) ptr; ptr += sizeof(type);
  14579. op = *(const uint32_t *) ptr; ptr += sizeof(op);
  14580. int64_t ne[GGML_MAX_DIMS];
  14581. size_t nb[GGML_MAX_DIMS];
  14582. for (int j = 0; j < GGML_MAX_DIMS; ++j) {
  14583. uint64_t ne_cur;
  14584. uint64_t nb_cur;
  14585. ne_cur = *(const uint64_t *) ptr; ptr += sizeof(ne_cur);
  14586. nb_cur = *(const uint64_t *) ptr; ptr += sizeof(nb_cur);
  14587. ne[j] = ne_cur;
  14588. nb[j] = nb_cur;
  14589. }
  14590. struct ggml_tensor * tensor = ggml_new_tensor(*ctx_eval, (enum ggml_type) type, GGML_MAX_DIMS, ne);
  14591. tensor->op = (enum ggml_op) op;
  14592. memcpy(tensor->name, ptr, GGML_MAX_NAME); ptr += GGML_MAX_NAME;
  14593. memcpy(tensor->op_params, ptr, GGML_MAX_OP_PARAMS); ptr += GGML_MAX_OP_PARAMS;
  14594. tensor->data = (void *) ptr;
  14595. for (int j = 0; j < GGML_MAX_DIMS; ++j) {
  14596. tensor->nb[j] = nb[j];
  14597. }
  14598. result->leafs[i] = tensor;
  14599. ptr += ggml_nbytes(tensor);
  14600. fprintf(stderr, "%s: loaded leaf %d: '%16s', %9zu bytes\n", __func__, i, tensor->name, ggml_nbytes(tensor));
  14601. }
  14602. }
  14603. ggml_set_no_alloc(*ctx_eval, false);
  14604. // nodes
  14605. {
  14606. uint32_t type;
  14607. uint32_t op;
  14608. for (uint32_t i = 0; i < n_nodes; ++i) {
  14609. type = *(const uint32_t *) ptr; ptr += sizeof(type);
  14610. op = *(const uint32_t *) ptr; ptr += sizeof(op);
  14611. enum ggml_op eop = (enum ggml_op) op;
  14612. int64_t ne[GGML_MAX_DIMS];
  14613. size_t nb[GGML_MAX_DIMS];
  14614. for (int j = 0; j < GGML_MAX_DIMS; ++j) {
  14615. uint64_t ne_cur;
  14616. uint64_t nb_cur;
  14617. ne_cur = *(const uint64_t *) ptr; ptr += sizeof(ne_cur);
  14618. nb_cur = *(const uint64_t *) ptr; ptr += sizeof(nb_cur);
  14619. ne[j] = ne_cur;
  14620. nb[j] = nb_cur;
  14621. }
  14622. const char * ptr_name = ptr; ptr += GGML_MAX_NAME;
  14623. const char * ptr_op_params = ptr; ptr += GGML_MAX_OP_PARAMS;
  14624. const int32_t * ptr_arg_idx = (const int32_t *) ptr; ptr += GGML_MAX_SRC*sizeof(int32_t);
  14625. struct ggml_tensor * args[GGML_MAX_SRC] = { NULL };
  14626. // parse args
  14627. for (int j = 0; j < GGML_MAX_SRC; ++j) {
  14628. const int32_t arg_idx = ptr_arg_idx[j];
  14629. if (arg_idx == -1) {
  14630. continue;
  14631. }
  14632. if (arg_idx < result->n_leafs) {
  14633. args[j] = result->leafs[arg_idx];
  14634. } else {
  14635. args[j] = result->nodes[arg_idx - result->n_leafs];
  14636. }
  14637. }
  14638. // create the tensor
  14639. // "view" operations are handled differently
  14640. // TODO: handle inplace ops - currently a copy is always made
  14641. struct ggml_tensor * tensor = NULL;
  14642. switch (eop) {
  14643. // TODO: implement other view ops
  14644. case GGML_OP_RESHAPE:
  14645. {
  14646. tensor = ggml_reshape_4d(*ctx_eval, args[0], ne[0], ne[1], ne[2], ne[3]);
  14647. } break;
  14648. case GGML_OP_VIEW:
  14649. {
  14650. tensor = ggml_view_4d(*ctx_eval, args[0], ne[0], ne[1], ne[2], ne[3], 0, 0, 0, 0);
  14651. size_t offs;
  14652. memcpy(&offs, ptr_op_params, sizeof(offs));
  14653. tensor->data = ((char *) tensor->data) + offs;
  14654. } break;
  14655. case GGML_OP_TRANSPOSE:
  14656. {
  14657. tensor = ggml_transpose(*ctx_eval, args[0]);
  14658. } break;
  14659. case GGML_OP_PERMUTE:
  14660. {
  14661. tensor = ggml_view_4d(*ctx_eval, args[0], ne[0], ne[1], ne[2], ne[3], 0, 0, 0, 0);
  14662. } break;
  14663. default:
  14664. {
  14665. tensor = ggml_new_tensor(*ctx_eval, (enum ggml_type) type, GGML_MAX_DIMS, ne);
  14666. tensor->op = eop;
  14667. } break;
  14668. }
  14669. memcpy(tensor->name, ptr_name, GGML_MAX_NAME);
  14670. memcpy(tensor->op_params, ptr_op_params, GGML_MAX_OP_PARAMS);
  14671. for (int j = 0; j < GGML_MAX_DIMS; ++j) {
  14672. tensor->nb[j] = nb[j];
  14673. }
  14674. for (int j = 0; j < GGML_MAX_SRC; ++j) {
  14675. tensor->src[j] = args[j];
  14676. }
  14677. result->nodes[i] = tensor;
  14678. fprintf(stderr, "%s: loaded node %d: '%16s', %9zu bytes\n", __func__, i, tensor->name, ggml_nbytes(tensor));
  14679. }
  14680. }
  14681. }
  14682. return result;
  14683. }
  14684. void ggml_graph_print(const struct ggml_cgraph * cgraph) {
  14685. int64_t perf_total_per_op_us[GGML_OP_COUNT] = {0};
  14686. GGML_PRINT("=== GRAPH ===\n");
  14687. GGML_PRINT("n_nodes = %d\n", cgraph->n_nodes);
  14688. for (int i = 0; i < cgraph->n_nodes; i++) {
  14689. struct ggml_tensor * node = cgraph->nodes[i];
  14690. perf_total_per_op_us[node->op] += MAX(1, node->perf_time_us);
  14691. GGML_PRINT(" - %3d: [ %5" PRId64 ", %5" PRId64 ", %5" PRId64 "] %16s %s (%3d) cpu = %7.3f / %7.3f ms, wall = %7.3f / %7.3f ms\n",
  14692. i,
  14693. node->ne[0], node->ne[1], node->ne[2],
  14694. ggml_op_name(node->op), node->is_param ? "x" : node->grad ? "g" : " ", node->perf_runs,
  14695. (double) node->perf_cycles / (double) ggml_cycles_per_ms(),
  14696. (double) node->perf_cycles / (double) ggml_cycles_per_ms() / (double) node->perf_runs,
  14697. (double) node->perf_time_us / 1000.0,
  14698. (double) node->perf_time_us / 1000.0 / node->perf_runs);
  14699. }
  14700. GGML_PRINT("n_leafs = %d\n", cgraph->n_leafs);
  14701. for (int i = 0; i < cgraph->n_leafs; i++) {
  14702. struct ggml_tensor * node = cgraph->leafs[i];
  14703. GGML_PRINT(" - %3d: [ %5" PRId64 ", %5" PRId64 "] %8s %16s\n",
  14704. i,
  14705. node->ne[0], node->ne[1],
  14706. ggml_op_name(node->op),
  14707. ggml_get_name(node));
  14708. }
  14709. for (int i = 0; i < GGML_OP_COUNT; i++) {
  14710. if (perf_total_per_op_us[i] == 0) {
  14711. continue;
  14712. }
  14713. GGML_PRINT("perf_total_per_op_us[%16s] = %7.3f ms\n", ggml_op_name(i), (double) perf_total_per_op_us[i] / 1000.0);
  14714. }
  14715. GGML_PRINT("========================================\n");
  14716. }
  14717. // check if node is part of the graph
  14718. static bool ggml_graph_find(const struct ggml_cgraph * cgraph, const struct ggml_tensor * node) {
  14719. if (cgraph == NULL) {
  14720. return true;
  14721. }
  14722. for (int i = 0; i < cgraph->n_nodes; i++) {
  14723. if (cgraph->nodes[i] == node) {
  14724. return true;
  14725. }
  14726. }
  14727. return false;
  14728. }
  14729. static struct ggml_tensor * ggml_graph_get_parent(const struct ggml_cgraph * cgraph, const struct ggml_tensor * node) {
  14730. for (int i = 0; i < cgraph->n_nodes; i++) {
  14731. struct ggml_tensor * parent = cgraph->nodes[i];
  14732. if (parent->grad == node) {
  14733. return parent;
  14734. }
  14735. }
  14736. return NULL;
  14737. }
  14738. static void ggml_graph_dump_dot_node_edge(FILE * fp, const struct ggml_cgraph * gb, struct ggml_tensor * node, struct ggml_tensor * parent, const char * label) {
  14739. struct ggml_tensor * gparent = ggml_graph_get_parent(gb, node);
  14740. struct ggml_tensor * gparent0 = ggml_graph_get_parent(gb, parent);
  14741. fprintf(fp, " \"%p\":%s -> \"%p\":%s [ arrowhead = %s; style = %s; label = \"%s\"; ]\n",
  14742. gparent0 ? (void *) gparent0 : (void *) parent,
  14743. gparent0 ? "g" : "x",
  14744. gparent ? (void *) gparent : (void *) node,
  14745. gparent ? "g" : "x",
  14746. gparent ? "empty" : "vee",
  14747. gparent ? "dashed" : "solid",
  14748. label);
  14749. }
  14750. static void ggml_graph_dump_dot_leaf_edge(FILE * fp, struct ggml_tensor * node, struct ggml_tensor * parent, const char * label) {
  14751. fprintf(fp, " \"%p\":%s -> \"%p\":%s [ label = \"%s\"; ]\n",
  14752. (void *) parent, "x",
  14753. (void *) node, "x",
  14754. label);
  14755. }
  14756. void ggml_graph_dump_dot(const struct ggml_cgraph * gb, const struct ggml_cgraph * gf, const char * filename) {
  14757. char color[16];
  14758. FILE * fp = fopen(filename, "w");
  14759. GGML_ASSERT(fp);
  14760. fprintf(fp, "digraph G {\n");
  14761. fprintf(fp, " newrank = true;\n");
  14762. fprintf(fp, " rankdir = LR;\n");
  14763. for (int i = 0; i < gb->n_nodes; i++) {
  14764. struct ggml_tensor * node = gb->nodes[i];
  14765. if (ggml_graph_get_parent(gb, node) != NULL) {
  14766. continue;
  14767. }
  14768. if (node->is_param) {
  14769. snprintf(color, sizeof(color), "yellow");
  14770. } else if (node->grad) {
  14771. if (ggml_graph_find(gf, node)) {
  14772. snprintf(color, sizeof(color), "green");
  14773. } else {
  14774. snprintf(color, sizeof(color), "lightblue");
  14775. }
  14776. } else {
  14777. snprintf(color, sizeof(color), "white");
  14778. }
  14779. fprintf(fp, " \"%p\" [ "
  14780. "style = filled; fillcolor = %s; shape = record; "
  14781. "label=\"",
  14782. (void *) node, color);
  14783. if (strlen(node->name) > 0) {
  14784. fprintf(fp, "%s (%s)|", node->name, ggml_type_name(node->type));
  14785. } else {
  14786. fprintf(fp, "(%s)|", ggml_type_name(node->type));
  14787. }
  14788. if (ggml_is_matrix(node)) {
  14789. fprintf(fp, "%d [%" PRId64 ", %" PRId64 "] | <x>%s", i, node->ne[0], node->ne[1], ggml_op_symbol(node->op));
  14790. } else {
  14791. fprintf(fp, "%d [%" PRId64 ", %" PRId64 ", %" PRId64 "] | <x>%s", i, node->ne[0], node->ne[1], node->ne[2], ggml_op_symbol(node->op));
  14792. }
  14793. if (node->grad) {
  14794. fprintf(fp, " | <g>%s\"; ]\n", ggml_op_symbol(node->grad->op));
  14795. } else {
  14796. fprintf(fp, "\"; ]\n");
  14797. }
  14798. }
  14799. for (int i = 0; i < gb->n_leafs; i++) {
  14800. struct ggml_tensor * node = gb->leafs[i];
  14801. snprintf(color, sizeof(color), "pink");
  14802. fprintf(fp, " \"%p\" [ "
  14803. "style = filled; fillcolor = %s; shape = record; "
  14804. "label=\"<x>",
  14805. (void *) node, color);
  14806. if (strlen(node->name) > 0) {
  14807. fprintf(fp, "%s (%s)|", node->name, ggml_type_name(node->type));
  14808. } else {
  14809. fprintf(fp, "(%s)|", ggml_type_name(node->type));
  14810. }
  14811. fprintf(fp, "CONST %d [%" PRId64 ", %" PRId64 "]", i, node->ne[0], node->ne[1]);
  14812. if (ggml_nelements(node) < 5) {
  14813. fprintf(fp, " | (");
  14814. for (int j = 0; j < ggml_nelements(node); j++) {
  14815. if (node->type == GGML_TYPE_I8 || node->type == GGML_TYPE_I16 || node->type == GGML_TYPE_I32) {
  14816. fprintf(fp, "%d", ggml_get_i32_1d(node, j));
  14817. }
  14818. else if (node->type == GGML_TYPE_F32 || node->type == GGML_TYPE_F16) {
  14819. fprintf(fp, "%.1e", (double)ggml_get_f32_1d(node, j));
  14820. }
  14821. else {
  14822. fprintf(fp, "#");
  14823. }
  14824. if (j < ggml_nelements(node) - 1) {
  14825. fprintf(fp, ", ");
  14826. }
  14827. }
  14828. fprintf(fp, ")");
  14829. }
  14830. fprintf(fp, "\"; ]\n");
  14831. }
  14832. for (int i = 0; i < gb->n_nodes; i++) {
  14833. struct ggml_tensor * node = gb->nodes[i];
  14834. for (int j = 0; j < GGML_MAX_SRC; j++) {
  14835. if (node->src[j]) {
  14836. char label[16];
  14837. snprintf(label, sizeof(label), "src %d", j);
  14838. ggml_graph_dump_dot_node_edge(fp, gb, node, node->src[j], label);
  14839. }
  14840. }
  14841. }
  14842. for (int i = 0; i < gb->n_leafs; i++) {
  14843. struct ggml_tensor * node = gb->leafs[i];
  14844. for (int j = 0; j < GGML_MAX_SRC; j++) {
  14845. if (node->src[j]) {
  14846. char label[16];
  14847. snprintf(label, sizeof(label), "src %d", j);
  14848. ggml_graph_dump_dot_leaf_edge(fp, node, node->src[j], label);
  14849. }
  14850. }
  14851. }
  14852. fprintf(fp, "}\n");
  14853. fclose(fp);
  14854. GGML_PRINT("%s: dot -Tpng %s -o %s.png && open %s.png\n", __func__, filename, filename, filename);
  14855. }
  14856. ////////////////////////////////////////////////////////////////////////////////
  14857. static void ggml_opt_set_params(int np, struct ggml_tensor * const ps[], const float * x) {
  14858. int i = 0;
  14859. for (int p = 0; p < np; ++p) {
  14860. const int64_t ne = ggml_nelements(ps[p]) ;
  14861. // TODO: add function to set tensor from array
  14862. for (int64_t j = 0; j < ne; ++j) {
  14863. ggml_set_f32_1d(ps[p], j, x[i++]);
  14864. }
  14865. }
  14866. }
  14867. static void ggml_opt_get_params(int np, struct ggml_tensor * const ps[], float * x) {
  14868. int i = 0;
  14869. for (int p = 0; p < np; ++p) {
  14870. const int64_t ne = ggml_nelements(ps[p]) ;
  14871. // TODO: add function to get all elements at once
  14872. for (int64_t j = 0; j < ne; ++j) {
  14873. x[i++] = ggml_get_f32_1d(ps[p], j);
  14874. }
  14875. }
  14876. }
  14877. static void ggml_opt_get_grad(int np, struct ggml_tensor * const ps[], float * g) {
  14878. int64_t i = 0;
  14879. for (int p = 0; p < np; ++p) {
  14880. const int64_t ne = ggml_nelements(ps[p]) ;
  14881. // TODO: add function to get all elements at once
  14882. for (int64_t j = 0; j < ne; ++j) {
  14883. g[i++] = ggml_get_f32_1d(ps[p]->grad, j);
  14884. }
  14885. }
  14886. }
  14887. static void ggml_opt_acc_grad(int np, struct ggml_tensor * const ps[], float * g, float scale) {
  14888. int64_t i = 0;
  14889. for (int p = 0; p < np; ++p) {
  14890. const int64_t ne = ggml_nelements(ps[p]) ;
  14891. // TODO: add function to get all elements at once
  14892. for (int64_t j = 0; j < ne; ++j) {
  14893. g[i++] += ggml_get_f32_1d(ps[p]->grad, j) * scale;
  14894. }
  14895. }
  14896. }
  14897. //
  14898. // Using AdamW - ref: https://arxiv.org/pdf/1711.05101v3.pdf
  14899. //
  14900. // (Original Adam - ref: https://arxiv.org/pdf/1412.6980.pdf)
  14901. //
  14902. static enum ggml_opt_result ggml_opt_adam(
  14903. struct ggml_context * ctx,
  14904. struct ggml_opt_context * opt,
  14905. struct ggml_opt_params params,
  14906. struct ggml_tensor * f,
  14907. struct ggml_cgraph * gf,
  14908. struct ggml_cgraph * gb,
  14909. ggml_opt_callback callback,
  14910. void * callback_data) {
  14911. GGML_ASSERT(ggml_is_scalar(f));
  14912. // these will store the parameters we want to optimize
  14913. struct ggml_tensor * ps[GGML_MAX_PARAMS];
  14914. int np = 0;
  14915. int64_t nx = 0;
  14916. for (int i = 0; i < gf->n_nodes; ++i) {
  14917. if (gf->nodes[i]->is_param) {
  14918. GGML_PRINT_DEBUG("found param %d: grad->op = %d\n", np, gf->nodes[i]->grad->op);
  14919. GGML_ASSERT(np < GGML_MAX_PARAMS);
  14920. ps[np++] = gf->nodes[i];
  14921. nx += ggml_nelements(gf->nodes[i]);
  14922. }
  14923. }
  14924. if ((opt->params.type != params.type) || (opt->nx != nx) || (opt->params.past != params.past)) {
  14925. int iter = opt->iter;
  14926. ggml_opt_init(opt->ctx, opt, params, nx);
  14927. opt->iter = iter;
  14928. }
  14929. // constants
  14930. float sched = params.adam.sched;
  14931. const float alpha = params.adam.alpha;
  14932. const float decay = params.adam.decay * alpha;
  14933. const float beta1 = params.adam.beta1;
  14934. const float beta2 = params.adam.beta2;
  14935. const float eps = params.adam.eps;
  14936. const float gclip = params.adam.gclip;
  14937. const int decay_min_ndim = params.adam.decay_min_ndim;
  14938. const int n_accum = MAX(1, params.n_gradient_accumulation);
  14939. const float accum_norm = 1.0f / (float) n_accum;
  14940. float * g = opt->adam.g->data; // gradients
  14941. float * m = opt->adam.m->data; // first moment
  14942. float * v = opt->adam.v->data; // second moment
  14943. float * pf = params.past > 0 ? opt->adam.pf->data : NULL; // past function values
  14944. struct ggml_cplan cplan = ggml_graph_plan(gb, params.n_threads);
  14945. struct ggml_object * obj = ggml_new_object(ctx, GGML_OBJECT_WORK_BUFFER, cplan.work_size);
  14946. cplan.work_data = (uint8_t *)ctx->mem_buffer + obj->offs;
  14947. bool cancel = false;
  14948. // compute the function value
  14949. float fx = 0;
  14950. ggml_set_zero(opt->adam.g);
  14951. for (int accum_step = 0; accum_step < n_accum; ++accum_step) {
  14952. if (callback) {
  14953. callback(callback_data, accum_step, &sched, &cancel);
  14954. if (cancel) {
  14955. return GGML_OPT_CANCEL;
  14956. }
  14957. }
  14958. // ggml_graph_reset (gf);
  14959. ggml_set_f32 (f->grad, 1.0f);
  14960. ggml_graph_compute(gb, &cplan);
  14961. ggml_opt_acc_grad(np, ps, g, accum_norm);
  14962. fx += ggml_get_f32_1d(f, 0);
  14963. }
  14964. fx *= accum_norm;
  14965. opt->adam.fx_prev = fx;
  14966. opt->adam.fx_best = opt->adam.fx_prev;
  14967. if (pf) {
  14968. pf[opt->iter % params.past] = opt->adam.fx_prev;
  14969. }
  14970. opt->loss_before = opt->adam.fx_prev;
  14971. opt->loss_after = opt->adam.fx_prev;
  14972. // initialize
  14973. if (opt->just_initialized) {
  14974. opt->adam.n_no_improvement = 0;
  14975. opt->just_initialized = false;
  14976. }
  14977. float * fx_best = &opt->adam.fx_best;
  14978. float * fx_prev = &opt->adam.fx_prev;
  14979. int * n_no_improvement = &opt->adam.n_no_improvement;
  14980. int iter0 = opt->iter;
  14981. // run the optimizer
  14982. for (int t = 0; t < params.adam.n_iter; ++t) {
  14983. opt->iter = iter0 + t + 1;
  14984. GGML_PRINT_DEBUG ("=== iter %d ===\n", t);
  14985. GGML_PRINT_DEBUG ("f = %10.6f\n", ggml_get_f32_1d(f, 0));
  14986. GGML_PRINT_DEBUG_5("df/dx0 = %10.6f\n", ggml_get_f32_1d(ps[0]->grad, 0));
  14987. GGML_PRINT_DEBUG_5("df/dx1 = %10.6f\n", ggml_get_f32_1d(ps[1]->grad, 0));
  14988. for (int i = 0; i < np; ++i) {
  14989. GGML_PRINT_DEBUG("param %d: %10.6f, g = %10.6f\n", i,
  14990. ggml_get_f32_1d(ps[i], 0), ggml_get_f32_1d(ps[i]->grad, 0));
  14991. }
  14992. const int64_t t_start_wall = ggml_time_us();
  14993. const int64_t t_start_cpu = ggml_cycles();
  14994. UNUSED(t_start_wall);
  14995. UNUSED(t_start_cpu);
  14996. {
  14997. float gnorm = 1.0f;
  14998. if (gclip > 0.0f) {
  14999. // gradient clipping
  15000. ggml_float sum = 0.0;
  15001. for (int64_t i = 0; i < nx; ++i) {
  15002. sum += (ggml_float)(g[i]*g[i]);
  15003. }
  15004. ggml_float norm = sqrt(sum);
  15005. if (norm > (ggml_float) gclip) {
  15006. gnorm = (float) ((ggml_float) gclip / norm);
  15007. }
  15008. }
  15009. const float beta1h = alpha*sched/(1.0f - powf(beta1, opt->iter));
  15010. const float beta2h = 1.0f/(1.0f - powf(beta2, opt->iter));
  15011. int64_t i = 0;
  15012. for (int p = 0; p < np; ++p) {
  15013. const int64_t ne = ggml_nelements(ps[p]);
  15014. const float p_decay = ((ggml_n_dims(ps[p]) >= decay_min_ndim) ? decay : 0.0f) * sched;
  15015. for (int64_t j = 0; j < ne; ++j) {
  15016. float x = ggml_get_f32_1d(ps[p], j);
  15017. float g_ = g[i]*gnorm;
  15018. m[i] = m[i]*beta1 + g_*(1.0f - beta1);
  15019. v[i] = v[i]*beta2 + g_*g_*(1.0f - beta2);
  15020. float mh = m[i]*beta1h;
  15021. float vh = v[i]*beta2h;
  15022. vh = sqrtf(vh) + eps;
  15023. x = x*(1.0f - p_decay) - mh/vh;
  15024. ggml_set_f32_1d(ps[p], j, x);
  15025. ++i;
  15026. }
  15027. }
  15028. }
  15029. fx = 0;
  15030. ggml_set_zero(opt->adam.g);
  15031. for (int accum_step = 0; accum_step < n_accum; ++accum_step) {
  15032. if (callback) {
  15033. callback(callback_data, accum_step, &sched, &cancel);
  15034. if (cancel) {
  15035. return GGML_OPT_CANCEL;;
  15036. }
  15037. }
  15038. // ggml_graph_reset (gf);
  15039. ggml_set_f32 (f->grad, 1.0f);
  15040. ggml_graph_compute(gb, &cplan);
  15041. ggml_opt_acc_grad(np, ps, g, accum_norm);
  15042. fx += ggml_get_f32_1d(f, 0);
  15043. }
  15044. fx *= accum_norm;
  15045. opt->loss_after = fx;
  15046. // check convergence
  15047. if (fabsf(fx - fx_prev[0])/fx < params.adam.eps_f) {
  15048. GGML_PRINT_DEBUG("converged\n");
  15049. return GGML_OPT_OK;
  15050. }
  15051. // delta-based convergence test
  15052. if (pf != NULL) {
  15053. // need at least params.past iterations to start checking for convergence
  15054. if (params.past <= iter0 + t) {
  15055. const float rate = (pf[(iter0 + t)%params.past] - fx)/fx;
  15056. if (fabsf(rate) < params.delta) {
  15057. return GGML_OPT_OK;
  15058. }
  15059. }
  15060. pf[(iter0 + t)%params.past] = fx;
  15061. }
  15062. // check for improvement
  15063. if (params.max_no_improvement > 0) {
  15064. if (fx_best[0] > fx) {
  15065. fx_best[0] = fx;
  15066. n_no_improvement[0] = 0;
  15067. } else {
  15068. ++n_no_improvement[0];
  15069. if (n_no_improvement[0] >= params.max_no_improvement) {
  15070. return GGML_OPT_OK;
  15071. }
  15072. }
  15073. }
  15074. fx_prev[0] = fx;
  15075. {
  15076. const int64_t t_end_cpu = ggml_cycles();
  15077. GGML_PRINT_DEBUG("time iter: %5.3f s\n", ((float)(t_end_cpu - t_start_cpu))/CLOCKS_PER_SEC);
  15078. UNUSED(t_end_cpu);
  15079. const int64_t t_end_wall = ggml_time_us();
  15080. GGML_PRINT_DEBUG("wall time iter: %5.3f s\n", (t_end_wall - t_start_wall)/1e6);
  15081. UNUSED(t_end_wall);
  15082. }
  15083. }
  15084. return GGML_OPT_DID_NOT_CONVERGE;
  15085. }
  15086. //
  15087. // L-BFGS
  15088. //
  15089. // the L-BFGS implementation below is based on the following implementation:
  15090. //
  15091. // https://github.com/chokkan/liblbfgs
  15092. //
  15093. struct ggml_lbfgs_iteration_data {
  15094. float alpha;
  15095. float ys;
  15096. float * s;
  15097. float * y;
  15098. };
  15099. static enum ggml_opt_result linesearch_backtracking(
  15100. const struct ggml_opt_params * params,
  15101. int nx,
  15102. float * x,
  15103. float * fx,
  15104. float * g,
  15105. float * d,
  15106. float * step,
  15107. const float * xp,
  15108. struct ggml_tensor * f,
  15109. struct ggml_cgraph * gb,
  15110. struct ggml_cplan * cplan,
  15111. const int np,
  15112. struct ggml_tensor * ps[],
  15113. bool * cancel,
  15114. ggml_opt_callback callback,
  15115. void * callback_data) {
  15116. int count = 0;
  15117. float width = 0.0f;
  15118. float dg = 0.0f;
  15119. float finit = 0.0f;
  15120. float dginit = 0.0f;
  15121. float dgtest = 0.0f;
  15122. const float dec = 0.5f;
  15123. const float inc = 2.1f;
  15124. const int n_accum = MAX(1, params->n_gradient_accumulation);
  15125. const float accum_norm = 1.0f / (float) n_accum;
  15126. if (*step <= 0.f) {
  15127. return GGML_LINESEARCH_INVALID_PARAMETERS;
  15128. }
  15129. // compute the initial gradient in the search direction
  15130. ggml_vec_dot_f32(nx, &dginit, g, d);
  15131. // make sure that d points to a descent direction
  15132. if (0 < dginit) {
  15133. return GGML_LINESEARCH_FAIL;
  15134. }
  15135. // initialize local variables
  15136. finit = *fx;
  15137. dgtest = params->lbfgs.ftol*dginit;
  15138. while (true) {
  15139. ggml_vec_cpy_f32(nx, x, xp);
  15140. ggml_vec_mad_f32(nx, x, d, *step);
  15141. // evaluate the function and gradient values
  15142. {
  15143. ggml_opt_set_params(np, ps, x);
  15144. *fx = 0;
  15145. memset(g, 0, sizeof(float)*nx);
  15146. for (int accum_step = 0; accum_step < n_accum; ++accum_step) {
  15147. if (callback) {
  15148. // LBFG-S does not support learning rate -> ignore learning schedule
  15149. float sched = 0;
  15150. callback(callback_data, accum_step, &sched, cancel);
  15151. if (*cancel) {
  15152. return GGML_OPT_CANCEL;
  15153. }
  15154. }
  15155. // ggml_graph_reset (gf);
  15156. ggml_set_f32 (f->grad, 1.0f);
  15157. ggml_graph_compute(gb, cplan);
  15158. ggml_opt_acc_grad(np, ps, g, accum_norm);
  15159. *fx += ggml_get_f32_1d(f, 0);
  15160. }
  15161. *fx *= accum_norm;
  15162. }
  15163. ++count;
  15164. if (*fx > finit + (*step)*dgtest) {
  15165. width = dec;
  15166. } else {
  15167. // Armijo condition is satisfied
  15168. if (params->lbfgs.linesearch == GGML_LINESEARCH_BACKTRACKING_ARMIJO) {
  15169. return count;
  15170. }
  15171. ggml_vec_dot_f32(nx, &dg, g, d);
  15172. // check the Wolfe condition
  15173. if (dg < params->lbfgs.wolfe * dginit) {
  15174. width = inc;
  15175. } else {
  15176. if(params->lbfgs.linesearch == GGML_LINESEARCH_BACKTRACKING_WOLFE) {
  15177. // regular Wolfe conditions
  15178. return count;
  15179. }
  15180. if(dg > -params->lbfgs.wolfe*dginit) {
  15181. width = dec;
  15182. } else {
  15183. // strong Wolfe condition (GGML_LINESEARCH_BACKTRACKING_STRONG_WOLFE)
  15184. return count;
  15185. }
  15186. }
  15187. }
  15188. if (*step < params->lbfgs.min_step) {
  15189. return GGML_LINESEARCH_MINIMUM_STEP;
  15190. }
  15191. if (*step > params->lbfgs.max_step) {
  15192. return GGML_LINESEARCH_MAXIMUM_STEP;
  15193. }
  15194. if (params->lbfgs.max_linesearch <= count) {
  15195. return GGML_LINESEARCH_MAXIMUM_ITERATIONS;
  15196. }
  15197. (*step) *= width;
  15198. }
  15199. GGML_UNREACHABLE();
  15200. }
  15201. static enum ggml_opt_result ggml_opt_lbfgs(
  15202. struct ggml_context * ctx,
  15203. struct ggml_opt_context * opt,
  15204. struct ggml_opt_params params,
  15205. struct ggml_tensor * f,
  15206. struct ggml_cgraph * gf,
  15207. struct ggml_cgraph * gb,
  15208. ggml_opt_callback callback,
  15209. void * callback_data) {
  15210. if (params.lbfgs.linesearch == GGML_LINESEARCH_BACKTRACKING_WOLFE ||
  15211. params.lbfgs.linesearch == GGML_LINESEARCH_BACKTRACKING_STRONG_WOLFE) {
  15212. if (params.lbfgs.wolfe <= params.lbfgs.ftol || 1.f <= params.lbfgs.wolfe) {
  15213. return GGML_OPT_INVALID_WOLFE;
  15214. }
  15215. }
  15216. const int m = params.lbfgs.m;
  15217. // these will store the parameters we want to optimize
  15218. struct ggml_tensor * ps[GGML_MAX_PARAMS];
  15219. int np = 0;
  15220. int nx = 0;
  15221. for (int i = 0; i < gf->n_nodes; ++i) {
  15222. if (gf->nodes[i]->is_param) {
  15223. GGML_PRINT_DEBUG("found param %d: grad->op = %d\n", np, gf->nodes[i]->grad->op);
  15224. GGML_ASSERT(np < GGML_MAX_PARAMS);
  15225. ps[np++] = gf->nodes[i];
  15226. nx += ggml_nelements(gf->nodes[i]);
  15227. }
  15228. }
  15229. if ((opt->params.type != params.type) || (opt->nx != nx) || (opt->params.past != params.past) || (opt->params.lbfgs.m != params.lbfgs.m)) {
  15230. int iter = opt->iter;
  15231. ggml_opt_init(ctx, opt, params, nx);
  15232. opt->iter = iter;
  15233. }
  15234. struct ggml_cplan cplan = ggml_graph_plan(gb, params.n_threads);
  15235. struct ggml_object * obj = ggml_new_object(ctx, GGML_OBJECT_WORK_BUFFER, cplan.work_size);
  15236. cplan.work_data = (uint8_t *)ctx->mem_buffer + obj->offs;
  15237. float * x = opt->lbfgs.x->data; // current parameters
  15238. float * xp = opt->lbfgs.xp->data; // previous parameters
  15239. float * g = opt->lbfgs.g->data; // current gradient
  15240. float * gp = opt->lbfgs.gp->data; // previous gradient
  15241. float * d = opt->lbfgs.d->data; // search direction
  15242. float * pf = params.past > 0 ? opt->lbfgs.pf->data : NULL; // past function values
  15243. const int n_accum = MAX(1, params.n_gradient_accumulation);
  15244. const float accum_norm = 1.0f / (float) n_accum;
  15245. float fx = 0.0f; // cost function value
  15246. float xnorm = 0.0f; // ||x||
  15247. float gnorm = 0.0f; // ||g||
  15248. // initialize x from the graph nodes
  15249. ggml_opt_get_params(np, ps, x);
  15250. // the L-BFGS memory
  15251. float * lm_alpha = opt->lbfgs.lmal->data;
  15252. float * lm_ys = opt->lbfgs.lmys->data;
  15253. float * lm_s = opt->lbfgs.lms->data;
  15254. float * lm_y = opt->lbfgs.lmy->data;
  15255. bool cancel = false;
  15256. // evaluate the function value and its gradient
  15257. {
  15258. ggml_opt_set_params(np, ps, x);
  15259. fx = 0;
  15260. memset(g, 0, sizeof(float)*nx);
  15261. for (int accum_step = 0; accum_step < n_accum; ++accum_step) {
  15262. if (callback) {
  15263. // LBFG-S does not support learning rate -> ignore learning schedule
  15264. float sched = 0;
  15265. callback(callback_data, accum_step, &sched, &cancel);
  15266. if (cancel) {
  15267. return GGML_OPT_CANCEL;
  15268. }
  15269. }
  15270. // ggml_graph_reset (gf);
  15271. ggml_set_f32 (f->grad, 1.0f);
  15272. ggml_graph_compute(gb, &cplan);
  15273. ggml_opt_acc_grad(np, ps, g, accum_norm);
  15274. fx += ggml_get_f32_1d(f, 0);
  15275. }
  15276. fx *= accum_norm;
  15277. opt->loss_before = fx;
  15278. opt->loss_after = fx;
  15279. }
  15280. // search direction = -gradient
  15281. ggml_vec_neg_f32(nx, d, g);
  15282. // ||x||, ||g||
  15283. ggml_vec_norm_f32(nx, &xnorm, x);
  15284. ggml_vec_norm_f32(nx, &gnorm, g);
  15285. if (xnorm < 1.0f) {
  15286. xnorm = 1.0f;
  15287. }
  15288. // already optimized
  15289. if (gnorm/xnorm <= params.lbfgs.eps) {
  15290. return GGML_OPT_OK;
  15291. }
  15292. if (opt->just_initialized) {
  15293. if (pf) {
  15294. pf[0] = fx;
  15295. }
  15296. opt->lbfgs.fx_best = fx;
  15297. // initial step
  15298. ggml_vec_norm_inv_f32(nx, &opt->lbfgs.step, d);
  15299. opt->lbfgs.j = 0;
  15300. opt->lbfgs.k = 1;
  15301. opt->lbfgs.end = 0;
  15302. opt->lbfgs.n_no_improvement = 0;
  15303. opt->just_initialized = false;
  15304. }
  15305. float * fx_best = &opt->lbfgs.fx_best;
  15306. float * step = &opt->lbfgs.step;
  15307. int * j = &opt->lbfgs.j;
  15308. int * k = &opt->lbfgs.k;
  15309. int * end = &opt->lbfgs.end;
  15310. int * n_no_improvement = &opt->lbfgs.n_no_improvement;
  15311. int ls = 0;
  15312. int bound = 0;
  15313. float ys = 0.0f;
  15314. float yy = 0.0f;
  15315. float beta = 0.0f;
  15316. int it = 0;
  15317. while (true) {
  15318. // store the current position and gradient vectors
  15319. ggml_vec_cpy_f32(nx, xp, x);
  15320. ggml_vec_cpy_f32(nx, gp, g);
  15321. // TODO: instead of passing &cancel here, use the return code of the linesearch
  15322. // to determine if the optimization should be cancelled
  15323. // this is a simple change, but not doing this atm, since I don't have a nice
  15324. // way to test and don't want to break something with so many changes lined up
  15325. ls = linesearch_backtracking(&params, nx, x, &fx, g, d, step, xp, f, gb, &cplan, np, ps, &cancel, callback, callback_data);
  15326. if (cancel) {
  15327. return GGML_OPT_CANCEL;
  15328. }
  15329. if (ls < 0) {
  15330. // linesearch failed - go back to the previous point and return
  15331. ggml_vec_cpy_f32(nx, x, xp);
  15332. ggml_vec_cpy_f32(nx, g, gp);
  15333. return ls;
  15334. }
  15335. opt->loss_after = fx;
  15336. ggml_vec_norm_f32(nx, &xnorm, x);
  15337. ggml_vec_norm_f32(nx, &gnorm, g);
  15338. GGML_PRINT_DEBUG("f = %10.6f\n", ggml_get_f32_1d(f, 0));
  15339. if (xnorm < 1.0f) {
  15340. xnorm = 1.0f;
  15341. }
  15342. if (gnorm/xnorm <= params.lbfgs.eps) {
  15343. // converged
  15344. return GGML_OPT_OK;
  15345. }
  15346. // delta-based convergence test
  15347. if (pf != NULL) {
  15348. // need at least params.past iterations to start checking for convergence
  15349. if (params.past <= k[0]) {
  15350. const float rate = (pf[k[0]%params.past] - fx)/fx;
  15351. if (fabsf(rate) < params.delta) {
  15352. return GGML_OPT_OK;
  15353. }
  15354. }
  15355. pf[k[0]%params.past] = fx;
  15356. }
  15357. // check for improvement
  15358. if (params.max_no_improvement > 0) {
  15359. if (fx < fx_best[0]) {
  15360. fx_best[0] = fx;
  15361. n_no_improvement[0] = 0;
  15362. } else {
  15363. n_no_improvement[0]++;
  15364. if (n_no_improvement[0] >= params.max_no_improvement) {
  15365. return GGML_OPT_OK;
  15366. }
  15367. }
  15368. }
  15369. if (params.lbfgs.n_iter != 0 && params.lbfgs.n_iter < it + 1) {
  15370. // reached the maximum number of iterations
  15371. return GGML_OPT_DID_NOT_CONVERGE;
  15372. }
  15373. // update vectors s and y:
  15374. // s_{k+1} = x_{k+1} - x_{k} = \step * d_{k}.
  15375. // y_{k+1} = g_{k+1} - g_{k}.
  15376. //
  15377. ggml_vec_sub_f32(nx, &lm_s[end[0]*nx], x, xp);
  15378. ggml_vec_sub_f32(nx, &lm_y[end[0]*nx], g, gp);
  15379. // compute scalars ys and yy:
  15380. // ys = y^t \cdot s -> 1 / \rho.
  15381. // yy = y^t \cdot y.
  15382. //
  15383. ggml_vec_dot_f32(nx, &ys, &lm_y[end[0]*nx], &lm_s[end[0]*nx]);
  15384. ggml_vec_dot_f32(nx, &yy, &lm_y[end[0]*nx], &lm_y[end[0]*nx]);
  15385. lm_ys[end[0]] = ys;
  15386. // find new search direction
  15387. // ref: https://en.wikipedia.org/wiki/Limited-memory_BFGS
  15388. bound = (m <= k[0]) ? m : k[0];
  15389. k[0]++;
  15390. it++;
  15391. end[0] = (end[0] + 1)%m;
  15392. // initialize search direction with -g
  15393. ggml_vec_neg_f32(nx, d, g);
  15394. j[0] = end[0];
  15395. for (int i = 0; i < bound; ++i) {
  15396. j[0] = (j[0] + m - 1) % m;
  15397. // \alpha_{j} = \rho_{j} s^{t}_{j} \cdot q_{k+1}
  15398. ggml_vec_dot_f32(nx, &lm_alpha[j[0]], &lm_s[j[0]*nx], d);
  15399. lm_alpha[j[0]] /= lm_ys[j[0]];
  15400. // q_{i} = q_{i+1} - \alpha_{i} y_{i}
  15401. ggml_vec_mad_f32(nx, d, &lm_y[j[0]*nx], -lm_alpha[j[0]]);
  15402. }
  15403. ggml_vec_scale_f32(nx, d, ys/yy);
  15404. for (int i = 0; i < bound; ++i) {
  15405. // \beta_{j} = \rho_{j} y^t_{j} \cdot \gamma_{i}
  15406. ggml_vec_dot_f32(nx, &beta, &lm_y[j[0]*nx], d);
  15407. beta /= lm_ys[j[0]];
  15408. // \gamma_{i+1} = \gamma_{i} + (\alpha_{j} - \beta_{j}) s_{j}
  15409. ggml_vec_mad_f32(nx, d, &lm_s[j[0]*nx], lm_alpha[j[0]] - beta);
  15410. j[0] = (j[0] + 1)%m;
  15411. }
  15412. step[0] = 1.0;
  15413. }
  15414. GGML_UNREACHABLE();
  15415. }
  15416. struct ggml_opt_params ggml_opt_default_params(enum ggml_opt_type type) {
  15417. struct ggml_opt_params result;
  15418. switch (type) {
  15419. case GGML_OPT_ADAM:
  15420. {
  15421. result = (struct ggml_opt_params) {
  15422. .type = GGML_OPT_ADAM,
  15423. .graph_size = GGML_DEFAULT_GRAPH_SIZE,
  15424. .n_threads = 1, // FIXME: GGML_DEFAULT_N_THREADS ?
  15425. .past = 0,
  15426. .delta = 1e-5f,
  15427. .max_no_improvement = 100,
  15428. .print_forward_graph = true,
  15429. .print_backward_graph = true,
  15430. .n_gradient_accumulation = 1,
  15431. .adam = {
  15432. .n_iter = 10000,
  15433. .sched = 1.000f,
  15434. .decay = 0.0f,
  15435. .decay_min_ndim = 2,
  15436. .alpha = 0.001f,
  15437. .beta1 = 0.9f,
  15438. .beta2 = 0.999f,
  15439. .eps = 1e-8f,
  15440. .eps_f = 1e-5f,
  15441. .eps_g = 1e-3f,
  15442. .gclip = 0.0f,
  15443. },
  15444. };
  15445. } break;
  15446. case GGML_OPT_LBFGS:
  15447. {
  15448. result = (struct ggml_opt_params) {
  15449. .type = GGML_OPT_LBFGS,
  15450. .graph_size = GGML_DEFAULT_GRAPH_SIZE,
  15451. .n_threads = 1,
  15452. .past = 0,
  15453. .delta = 1e-5f,
  15454. .max_no_improvement = 0,
  15455. .print_forward_graph = true,
  15456. .print_backward_graph = true,
  15457. .n_gradient_accumulation = 1,
  15458. .lbfgs = {
  15459. .m = 6,
  15460. .n_iter = 100,
  15461. .max_linesearch = 20,
  15462. .eps = 1e-5f,
  15463. .ftol = 1e-4f,
  15464. .wolfe = 0.9f,
  15465. .min_step = 1e-20f,
  15466. .max_step = 1e+20f,
  15467. .linesearch = GGML_LINESEARCH_DEFAULT,
  15468. },
  15469. };
  15470. } break;
  15471. }
  15472. return result;
  15473. }
  15474. GGML_API void ggml_opt_init(
  15475. struct ggml_context * ctx,
  15476. struct ggml_opt_context * opt,
  15477. struct ggml_opt_params params,
  15478. int64_t nx) {
  15479. opt->ctx = ctx;
  15480. opt->params = params;
  15481. opt->iter = 0;
  15482. opt->nx = nx;
  15483. opt->just_initialized = true;
  15484. if (opt->ctx == NULL) {
  15485. struct ggml_init_params ctx_opt_params;
  15486. if (opt->params.type == GGML_OPT_ADAM) {
  15487. ctx_opt_params.mem_size = GGML_MEM_ALIGN*3 + ggml_tensor_overhead()*3 + ggml_type_size(GGML_TYPE_F32)*nx*3;
  15488. if (opt->params.past > 0) {
  15489. ctx_opt_params.mem_size += GGML_MEM_ALIGN + ggml_tensor_overhead() + ggml_type_size(GGML_TYPE_F32)*opt->params.past;
  15490. }
  15491. } else if (opt->params.type == GGML_OPT_LBFGS) {
  15492. ctx_opt_params.mem_size = GGML_MEM_ALIGN*9 + ggml_tensor_overhead()*9 + ggml_type_size(GGML_TYPE_F32)*(nx*5 + opt->params.lbfgs.m*2 + nx*opt->params.lbfgs.m*2);
  15493. if (opt->params.past > 0) {
  15494. ctx_opt_params.mem_size += GGML_MEM_ALIGN + ggml_tensor_overhead() + ggml_type_size(GGML_TYPE_F32)*opt->params.past;
  15495. }
  15496. }
  15497. ctx_opt_params.mem_buffer = NULL;
  15498. ctx_opt_params.no_alloc = false;
  15499. opt->ctx = ggml_init(ctx_opt_params);
  15500. }
  15501. switch (opt->params.type) {
  15502. case GGML_OPT_ADAM:
  15503. {
  15504. opt->adam.g = ggml_new_tensor_1d(opt->ctx, GGML_TYPE_F32, nx);
  15505. opt->adam.m = ggml_new_tensor_1d(opt->ctx, GGML_TYPE_F32, nx);
  15506. opt->adam.v = ggml_new_tensor_1d(opt->ctx, GGML_TYPE_F32, nx);
  15507. opt->adam.pf = params.past > 0
  15508. ? ggml_new_tensor_1d(opt->ctx, GGML_TYPE_F32, params.past)
  15509. : NULL;
  15510. ggml_set_zero(opt->adam.m);
  15511. ggml_set_zero(opt->adam.v);
  15512. if (opt->adam.pf) {
  15513. ggml_set_zero(opt->adam.pf);
  15514. }
  15515. } break;
  15516. case GGML_OPT_LBFGS:
  15517. {
  15518. opt->lbfgs.x = ggml_new_tensor_1d(opt->ctx, GGML_TYPE_F32, nx);
  15519. opt->lbfgs.xp = ggml_new_tensor_1d(opt->ctx, GGML_TYPE_F32, nx);
  15520. opt->lbfgs.g = ggml_new_tensor_1d(opt->ctx, GGML_TYPE_F32, nx);
  15521. opt->lbfgs.gp = ggml_new_tensor_1d(opt->ctx, GGML_TYPE_F32, nx);
  15522. opt->lbfgs.d = ggml_new_tensor_1d(opt->ctx, GGML_TYPE_F32, nx);
  15523. opt->lbfgs.pf = params.past > 0
  15524. ? ggml_new_tensor_1d(opt->ctx, GGML_TYPE_F32, params.past)
  15525. : NULL;
  15526. opt->lbfgs.lmal = ggml_new_tensor_1d(opt->ctx, GGML_TYPE_F32, params.lbfgs.m);
  15527. opt->lbfgs.lmys = ggml_new_tensor_1d(opt->ctx, GGML_TYPE_F32, params.lbfgs.m);
  15528. opt->lbfgs.lms = ggml_new_tensor_2d(opt->ctx, GGML_TYPE_F32, nx, params.lbfgs.m);
  15529. opt->lbfgs.lmy = ggml_new_tensor_2d(opt->ctx, GGML_TYPE_F32, nx, params.lbfgs.m);
  15530. ggml_set_zero(opt->lbfgs.x);
  15531. ggml_set_zero(opt->lbfgs.xp);
  15532. ggml_set_zero(opt->lbfgs.g);
  15533. ggml_set_zero(opt->lbfgs.gp);
  15534. ggml_set_zero(opt->lbfgs.d);
  15535. if (opt->lbfgs.pf) {
  15536. ggml_set_zero(opt->lbfgs.pf);
  15537. }
  15538. ggml_set_zero(opt->lbfgs.lmal);
  15539. ggml_set_zero(opt->lbfgs.lmys);
  15540. ggml_set_zero(opt->lbfgs.lms);
  15541. ggml_set_zero(opt->lbfgs.lmy);
  15542. } break;
  15543. }
  15544. }
  15545. enum ggml_opt_result ggml_opt(
  15546. struct ggml_context * ctx,
  15547. struct ggml_opt_params params,
  15548. struct ggml_tensor * f) {
  15549. bool free_ctx = false;
  15550. if (ctx == NULL) {
  15551. struct ggml_init_params params_ctx = {
  15552. .mem_size = 16*1024*1024,
  15553. .mem_buffer = NULL,
  15554. .no_alloc = false,
  15555. };
  15556. ctx = ggml_init(params_ctx);
  15557. if (ctx == NULL) {
  15558. return GGML_OPT_NO_CONTEXT;
  15559. }
  15560. free_ctx = true;
  15561. }
  15562. enum ggml_opt_result result = GGML_OPT_OK;
  15563. struct ggml_opt_context * opt = (struct ggml_opt_context *) alloca(sizeof(struct ggml_opt_context));
  15564. ggml_opt_init(ctx, opt, params, 0);
  15565. result = ggml_opt_resume(ctx, opt, f);
  15566. if (free_ctx) {
  15567. ggml_free(ctx);
  15568. }
  15569. return result;
  15570. }
  15571. enum ggml_opt_result ggml_opt_resume(
  15572. struct ggml_context * ctx,
  15573. struct ggml_opt_context * opt,
  15574. struct ggml_tensor * f) {
  15575. // build forward + backward compute graphs
  15576. struct ggml_cgraph * gf = ggml_new_graph_custom(ctx, opt->params.graph_size, true);
  15577. ggml_build_forward_expand(gf, f);
  15578. struct ggml_cgraph * gb = ggml_graph_dup(ctx, gf);
  15579. ggml_build_backward_expand(ctx, gf, gb, true);
  15580. return ggml_opt_resume_g(ctx, opt, f, gf, gb, NULL, NULL);
  15581. }
  15582. enum ggml_opt_result ggml_opt_resume_g(
  15583. struct ggml_context * ctx,
  15584. struct ggml_opt_context * opt,
  15585. struct ggml_tensor * f,
  15586. struct ggml_cgraph * gf,
  15587. struct ggml_cgraph * gb,
  15588. ggml_opt_callback callback,
  15589. void * callback_data) {
  15590. // build forward + backward compute graphs
  15591. enum ggml_opt_result result = GGML_OPT_OK;
  15592. switch (opt->params.type) {
  15593. case GGML_OPT_ADAM:
  15594. {
  15595. result = ggml_opt_adam(ctx, opt, opt->params, f, gf, gb, callback, callback_data);
  15596. } break;
  15597. case GGML_OPT_LBFGS:
  15598. {
  15599. result = ggml_opt_lbfgs(ctx, opt, opt->params, f, gf, gb, callback, callback_data);
  15600. } break;
  15601. }
  15602. if (opt->params.print_forward_graph) {
  15603. ggml_graph_print (gf);
  15604. ggml_graph_dump_dot(gf, NULL, "opt-forward.dot");
  15605. }
  15606. if (opt->params.print_backward_graph) {
  15607. ggml_graph_print (gb);
  15608. ggml_graph_dump_dot(gb, gf, "opt-backward.dot");
  15609. }
  15610. return result;
  15611. }
  15612. ////////////////////////////////////////////////////////////////////////////////
  15613. void ggml_quantize_init(enum ggml_type type) {
  15614. ggml_critical_section_start();
  15615. switch (type) {
  15616. case GGML_TYPE_IQ2_XXS: iq2xs_init_impl(256); break;
  15617. case GGML_TYPE_IQ2_XS: iq2xs_init_impl(512); break;
  15618. default: // nothing
  15619. break;
  15620. }
  15621. ggml_critical_section_end();
  15622. }
  15623. void ggml_quantize_free(void) {
  15624. ggml_critical_section_start();
  15625. iq2xs_free_impl(256);
  15626. iq2xs_free_impl(512);
  15627. ggml_critical_section_end();
  15628. }
  15629. size_t ggml_quantize_q4_0(const float * src, void * dst, int n, int k, int64_t * hist) {
  15630. assert(k % QK4_0 == 0);
  15631. const int nb = k / QK4_0;
  15632. for (int b = 0; b < n; b += k) {
  15633. block_q4_0 * restrict y = (block_q4_0 *) dst + b/QK4_0;
  15634. quantize_row_q4_0_reference(src + b, y, k);
  15635. for (int i = 0; i < nb; i++) {
  15636. for (int j = 0; j < QK4_0; j += 2) {
  15637. const uint8_t vi0 = y[i].qs[j/2] & 0x0F;
  15638. const uint8_t vi1 = y[i].qs[j/2] >> 4;
  15639. hist[vi0]++;
  15640. hist[vi1]++;
  15641. }
  15642. }
  15643. }
  15644. return (n/QK4_0*sizeof(block_q4_0));
  15645. }
  15646. size_t ggml_quantize_q4_1(const float * src, void * dst, int n, int k, int64_t * hist) {
  15647. assert(k % QK4_1 == 0);
  15648. const int nb = k / QK4_1;
  15649. for (int b = 0; b < n; b += k) {
  15650. block_q4_1 * restrict y = (block_q4_1 *) dst + b/QK4_1;
  15651. quantize_row_q4_1_reference(src + b, y, k);
  15652. for (int i = 0; i < nb; i++) {
  15653. for (int j = 0; j < QK4_1; j += 2) {
  15654. const uint8_t vi0 = y[i].qs[j/2] & 0x0F;
  15655. const uint8_t vi1 = y[i].qs[j/2] >> 4;
  15656. hist[vi0]++;
  15657. hist[vi1]++;
  15658. }
  15659. }
  15660. }
  15661. return (n/QK4_1*sizeof(block_q4_1));
  15662. }
  15663. size_t ggml_quantize_q5_0(const float * src, void * dst, int n, int k, int64_t * hist) {
  15664. assert(k % QK5_0 == 0);
  15665. const int nb = k / QK5_0;
  15666. for (int b = 0; b < n; b += k) {
  15667. block_q5_0 * restrict y = (block_q5_0 *)dst + b/QK5_0;
  15668. quantize_row_q5_0_reference(src + b, y, k);
  15669. for (int i = 0; i < nb; i++) {
  15670. uint32_t qh;
  15671. memcpy(&qh, &y[i].qh, sizeof(qh));
  15672. for (int j = 0; j < QK5_0; j += 2) {
  15673. const uint8_t vh0 = ((qh & (1u << (j/2 + 0 ))) >> (j/2 + 0 )) << 4;
  15674. const uint8_t vh1 = ((qh & (1u << (j/2 + 16))) >> (j/2 + 12));
  15675. // cast to 16 bins
  15676. const uint8_t vi0 = ((y[i].qs[j/2] & 0x0F) | vh0) / 2;
  15677. const uint8_t vi1 = ((y[i].qs[j/2] >> 4) | vh1) / 2;
  15678. hist[vi0]++;
  15679. hist[vi1]++;
  15680. }
  15681. }
  15682. }
  15683. return (n/QK5_0*sizeof(block_q5_0));
  15684. }
  15685. size_t ggml_quantize_q5_1(const float * src, void * dst, int n, int k, int64_t * hist) {
  15686. assert(k % QK5_1 == 0);
  15687. const int nb = k / QK5_1;
  15688. for (int b = 0; b < n; b += k) {
  15689. block_q5_1 * restrict y = (block_q5_1 *)dst + b/QK5_1;
  15690. quantize_row_q5_1_reference(src + b, y, k);
  15691. for (int i = 0; i < nb; i++) {
  15692. uint32_t qh;
  15693. memcpy(&qh, &y[i].qh, sizeof(qh));
  15694. for (int j = 0; j < QK5_1; j += 2) {
  15695. const uint8_t vh0 = ((qh & (1u << (j/2 + 0 ))) >> (j/2 + 0 )) << 4;
  15696. const uint8_t vh1 = ((qh & (1u << (j/2 + 16))) >> (j/2 + 12));
  15697. // cast to 16 bins
  15698. const uint8_t vi0 = ((y[i].qs[j/2] & 0x0F) | vh0) / 2;
  15699. const uint8_t vi1 = ((y[i].qs[j/2] >> 4) | vh1) / 2;
  15700. hist[vi0]++;
  15701. hist[vi1]++;
  15702. }
  15703. }
  15704. }
  15705. return (n/QK5_1*sizeof(block_q5_1));
  15706. }
  15707. size_t ggml_quantize_q8_0(const float * src, void * dst, int n, int k, int64_t * hist) {
  15708. assert(k % QK8_0 == 0);
  15709. const int nb = k / QK8_0;
  15710. for (int b = 0; b < n; b += k) {
  15711. block_q8_0 * restrict y = (block_q8_0 *)dst + b/QK8_0;
  15712. quantize_row_q8_0_reference(src + b, y, k);
  15713. for (int i = 0; i < nb; i++) {
  15714. for (int j = 0; j < QK8_0; ++j) {
  15715. const int8_t vi = y[i].qs[j];
  15716. hist[vi/16 + 8]++;
  15717. }
  15718. }
  15719. }
  15720. return (n/QK8_0*sizeof(block_q8_0));
  15721. }
  15722. bool ggml_quantize_requires_imatrix(enum ggml_type type) {
  15723. return
  15724. type == GGML_TYPE_IQ2_XXS ||
  15725. type == GGML_TYPE_IQ2_XS;
  15726. }
  15727. size_t ggml_quantize_chunk(enum ggml_type type, const float * src, void * dst, int start,
  15728. int nrows, int n_per_row, int64_t * hist, const float * imatrix) {
  15729. ggml_quantize_init(type); // this is noop if already initialized
  15730. size_t result = 0;
  15731. int n = nrows * n_per_row;
  15732. switch (type) {
  15733. case GGML_TYPE_Q4_0:
  15734. {
  15735. GGML_ASSERT(start % QK4_0 == 0);
  15736. GGML_ASSERT(start % n_per_row == 0);
  15737. size_t start_row = start / n_per_row;
  15738. size_t row_size = ggml_row_size(type, n_per_row);
  15739. result = quantize_q4_0(src + start, (char *)dst + start_row * row_size, nrows, n_per_row, hist, imatrix);
  15740. GGML_ASSERT(result == row_size * nrows);
  15741. } break;
  15742. case GGML_TYPE_Q4_1:
  15743. {
  15744. GGML_ASSERT(start % QK4_1 == 0);
  15745. GGML_ASSERT(start % n_per_row == 0);
  15746. size_t start_row = start / n_per_row;
  15747. size_t row_size = ggml_row_size(type, n_per_row);
  15748. result = quantize_q4_1(src + start, (char *)dst + start_row * row_size, nrows, n_per_row, hist, imatrix);
  15749. GGML_ASSERT(result == row_size * nrows);
  15750. } break;
  15751. case GGML_TYPE_Q5_0:
  15752. {
  15753. GGML_ASSERT(start % QK5_0 == 0);
  15754. GGML_ASSERT(start % n_per_row == 0);
  15755. size_t start_row = start / n_per_row;
  15756. size_t row_size = ggml_row_size(type, n_per_row);
  15757. result = quantize_q5_0(src + start, (char *)dst + start_row * row_size, nrows, n_per_row, hist, imatrix);
  15758. GGML_ASSERT(result == row_size * nrows);
  15759. } break;
  15760. case GGML_TYPE_Q5_1:
  15761. {
  15762. GGML_ASSERT(start % QK5_1 == 0);
  15763. GGML_ASSERT(start % n_per_row == 0);
  15764. size_t start_row = start / n_per_row;
  15765. size_t row_size = ggml_row_size(type, n_per_row);
  15766. result = quantize_q5_1(src + start, (char *)dst + start_row * row_size, nrows, n_per_row, hist, imatrix);
  15767. GGML_ASSERT(result == row_size * nrows);
  15768. } break;
  15769. case GGML_TYPE_Q8_0:
  15770. {
  15771. GGML_ASSERT(start % QK8_0 == 0);
  15772. block_q8_0 * block = (block_q8_0*)dst + start / QK8_0;
  15773. result = ggml_quantize_q8_0(src + start, block, n, n, hist);
  15774. } break;
  15775. case GGML_TYPE_Q2_K:
  15776. {
  15777. GGML_ASSERT(start % QK_K == 0);
  15778. GGML_ASSERT(start % n_per_row == 0);
  15779. size_t start_row = start / n_per_row;
  15780. size_t row_size = ggml_row_size(type, n_per_row);
  15781. result = quantize_q2_K(src + start, (char *)dst + start_row * row_size, nrows, n_per_row, hist, imatrix);
  15782. GGML_ASSERT(result == row_size * nrows);
  15783. } break;
  15784. case GGML_TYPE_Q3_K:
  15785. {
  15786. GGML_ASSERT(start % QK_K == 0);
  15787. GGML_ASSERT(start % n_per_row == 0);
  15788. size_t start_row = start / n_per_row;
  15789. size_t row_size = ggml_row_size(type, n_per_row);
  15790. result = quantize_q3_K(src + start, (char *)dst + start_row * row_size, nrows, n_per_row, hist, imatrix);
  15791. GGML_ASSERT(result == row_size * nrows);
  15792. } break;
  15793. case GGML_TYPE_Q4_K:
  15794. {
  15795. GGML_ASSERT(start % QK_K == 0);
  15796. GGML_ASSERT(start % n_per_row == 0);
  15797. size_t start_row = start / n_per_row;
  15798. size_t row_size = ggml_row_size(type, n_per_row);
  15799. result = quantize_q4_K(src + start, (char *)dst + start_row * row_size, nrows, n_per_row, hist, imatrix);
  15800. GGML_ASSERT(result == row_size * nrows);
  15801. } break;
  15802. case GGML_TYPE_Q5_K:
  15803. {
  15804. GGML_ASSERT(start % QK_K == 0);
  15805. GGML_ASSERT(start % n_per_row == 0);
  15806. size_t start_row = start / n_per_row;
  15807. size_t row_size = ggml_row_size(type, n_per_row);
  15808. result = quantize_q5_K(src + start, (char *)dst + start_row * row_size, nrows, n_per_row, hist, imatrix);
  15809. GGML_ASSERT(result == row_size * nrows);
  15810. } break;
  15811. case GGML_TYPE_Q6_K:
  15812. {
  15813. GGML_ASSERT(start % QK_K == 0);
  15814. GGML_ASSERT(start % n_per_row == 0);
  15815. size_t start_row = start / n_per_row;
  15816. size_t row_size = ggml_row_size(type, n_per_row);
  15817. result = quantize_q6_K(src + start, (char *)dst + start_row * row_size, nrows, n_per_row, hist, imatrix);
  15818. GGML_ASSERT(result == row_size * nrows);
  15819. } break;
  15820. case GGML_TYPE_IQ2_XXS:
  15821. {
  15822. GGML_ASSERT(start % QK_K == 0);
  15823. GGML_ASSERT(start % n_per_row == 0);
  15824. GGML_ASSERT(imatrix);
  15825. size_t start_row = start / n_per_row;
  15826. size_t row_size = ggml_row_size(type, n_per_row);
  15827. result = quantize_iq2_xxs(src + start, (char *)dst + start_row * row_size, nrows, n_per_row, hist, imatrix);
  15828. GGML_ASSERT(result == row_size * nrows);
  15829. } break;
  15830. case GGML_TYPE_IQ2_XS:
  15831. {
  15832. GGML_ASSERT(start % QK_K == 0);
  15833. GGML_ASSERT(start % n_per_row == 0);
  15834. GGML_ASSERT(imatrix);
  15835. size_t start_row = start / n_per_row;
  15836. size_t row_size = ggml_row_size(type, n_per_row);
  15837. result = quantize_iq2_xs(src + start, (char *)dst + start_row * row_size, nrows, n_per_row, hist, imatrix);
  15838. GGML_ASSERT(result == row_size * nrows);
  15839. } break;
  15840. case GGML_TYPE_F16:
  15841. {
  15842. size_t elemsize = sizeof(ggml_fp16_t);
  15843. ggml_fp32_to_fp16_row(src + start, (ggml_fp16_t *)dst + start, n);
  15844. result = n * elemsize;
  15845. } break;
  15846. case GGML_TYPE_F32:
  15847. {
  15848. size_t elemsize = sizeof(float);
  15849. result = n * elemsize;
  15850. memcpy((uint8_t *)dst + start * elemsize, src + start, result);
  15851. } break;
  15852. default:
  15853. assert(false);
  15854. }
  15855. return result;
  15856. }
  15857. ////////////////////////////////////////////////////////////////////////////////
  15858. struct gguf_str {
  15859. uint64_t n; // GGUFv2
  15860. char * data;
  15861. };
  15862. static const size_t GGUF_TYPE_SIZE[GGUF_TYPE_COUNT] = {
  15863. [GGUF_TYPE_UINT8] = sizeof(uint8_t),
  15864. [GGUF_TYPE_INT8] = sizeof(int8_t),
  15865. [GGUF_TYPE_UINT16] = sizeof(uint16_t),
  15866. [GGUF_TYPE_INT16] = sizeof(int16_t),
  15867. [GGUF_TYPE_UINT32] = sizeof(uint32_t),
  15868. [GGUF_TYPE_INT32] = sizeof(int32_t),
  15869. [GGUF_TYPE_FLOAT32] = sizeof(float),
  15870. [GGUF_TYPE_BOOL] = sizeof(bool),
  15871. [GGUF_TYPE_STRING] = sizeof(struct gguf_str),
  15872. [GGUF_TYPE_UINT64] = sizeof(uint64_t),
  15873. [GGUF_TYPE_INT64] = sizeof(int64_t),
  15874. [GGUF_TYPE_FLOAT64] = sizeof(double),
  15875. [GGUF_TYPE_ARRAY] = 0, // undefined
  15876. };
  15877. static_assert(GGUF_TYPE_COUNT == 13, "GGUF_TYPE_COUNT != 13");
  15878. static const char * GGUF_TYPE_NAME[GGUF_TYPE_COUNT] = {
  15879. [GGUF_TYPE_UINT8] = "u8",
  15880. [GGUF_TYPE_INT8] = "i8",
  15881. [GGUF_TYPE_UINT16] = "u16",
  15882. [GGUF_TYPE_INT16] = "i16",
  15883. [GGUF_TYPE_UINT32] = "u32",
  15884. [GGUF_TYPE_INT32] = "i32",
  15885. [GGUF_TYPE_FLOAT32] = "f32",
  15886. [GGUF_TYPE_BOOL] = "bool",
  15887. [GGUF_TYPE_STRING] = "str",
  15888. [GGUF_TYPE_ARRAY] = "arr",
  15889. [GGUF_TYPE_UINT64] = "u64",
  15890. [GGUF_TYPE_INT64] = "i64",
  15891. [GGUF_TYPE_FLOAT64] = "f64",
  15892. };
  15893. static_assert(GGUF_TYPE_COUNT == 13, "GGUF_TYPE_COUNT != 13");
  15894. union gguf_value {
  15895. uint8_t uint8;
  15896. int8_t int8;
  15897. uint16_t uint16;
  15898. int16_t int16;
  15899. uint32_t uint32;
  15900. int32_t int32;
  15901. float float32;
  15902. uint64_t uint64;
  15903. int64_t int64;
  15904. double float64;
  15905. bool bool_;
  15906. struct gguf_str str;
  15907. struct {
  15908. enum gguf_type type;
  15909. uint64_t n; // GGUFv2
  15910. void * data;
  15911. } arr;
  15912. };
  15913. struct gguf_kv {
  15914. struct gguf_str key;
  15915. enum gguf_type type;
  15916. union gguf_value value;
  15917. };
  15918. struct gguf_header {
  15919. char magic[4];
  15920. uint32_t version;
  15921. uint64_t n_tensors; // GGUFv2
  15922. uint64_t n_kv; // GGUFv2
  15923. };
  15924. struct gguf_tensor_info {
  15925. struct gguf_str name;
  15926. uint32_t n_dims;
  15927. uint64_t ne[GGML_MAX_DIMS];
  15928. enum ggml_type type;
  15929. uint64_t offset; // offset from start of `data`, must be a multiple of `ALIGNMENT`
  15930. // for writing API
  15931. const void * data;
  15932. size_t size;
  15933. };
  15934. struct gguf_context {
  15935. struct gguf_header header;
  15936. struct gguf_kv * kv;
  15937. struct gguf_tensor_info * infos;
  15938. size_t alignment;
  15939. size_t offset; // offset of `data` from beginning of file
  15940. size_t size; // size of `data` in bytes
  15941. //uint8_t * padding;
  15942. void * data;
  15943. };
  15944. static bool gguf_fread_el(FILE * file, void * dst, size_t size, size_t * offset) {
  15945. const size_t n = fread(dst, 1, size, file);
  15946. *offset += n;
  15947. return n == size;
  15948. }
  15949. static bool gguf_fread_str(FILE * file, struct gguf_str * p, size_t * offset) {
  15950. p->n = 0;
  15951. p->data = NULL;
  15952. bool ok = true;
  15953. ok = ok && gguf_fread_el(file, &p->n, sizeof(p->n), offset); p->data = calloc(p->n + 1, 1);
  15954. ok = ok && gguf_fread_el(file, p->data, p->n, offset);
  15955. return ok;
  15956. }
  15957. struct gguf_context * gguf_init_empty(void) {
  15958. struct gguf_context * ctx = GGML_ALIGNED_MALLOC(sizeof(struct gguf_context));
  15959. memcpy(ctx->header.magic, GGUF_MAGIC, sizeof(ctx->header.magic));
  15960. ctx->header.version = GGUF_VERSION;
  15961. ctx->header.n_tensors = 0;
  15962. ctx->header.n_kv = 0;
  15963. ctx->kv = NULL;
  15964. ctx->infos = NULL;
  15965. ctx->alignment = GGUF_DEFAULT_ALIGNMENT;
  15966. ctx->offset = 0;
  15967. ctx->size = 0;
  15968. ctx->data = NULL;
  15969. return ctx;
  15970. }
  15971. struct gguf_context * gguf_init_from_file(const char * fname, struct gguf_init_params params) {
  15972. FILE * file = fopen(fname, "rb");
  15973. if (!file) {
  15974. return NULL;
  15975. }
  15976. // offset from start of file
  15977. size_t offset = 0;
  15978. char magic[4];
  15979. // check the magic before making allocations
  15980. {
  15981. gguf_fread_el(file, &magic, sizeof(magic), &offset);
  15982. for (uint32_t i = 0; i < sizeof(magic); i++) {
  15983. if (magic[i] != GGUF_MAGIC[i]) {
  15984. fprintf(stderr, "%s: invalid magic characters '%c%c%c%c'\n", __func__, magic[0], magic[1], magic[2], magic[3]);
  15985. fclose(file);
  15986. return NULL;
  15987. }
  15988. }
  15989. }
  15990. bool ok = true;
  15991. struct gguf_context * ctx = GGML_ALIGNED_MALLOC(sizeof(struct gguf_context));
  15992. // read the header
  15993. {
  15994. strncpy(ctx->header.magic, magic, 4);
  15995. ctx->kv = NULL;
  15996. ctx->infos = NULL;
  15997. ctx->data = NULL;
  15998. ok = ok && gguf_fread_el(file, &ctx->header.version, sizeof(ctx->header.version), &offset);
  15999. ok = ok && gguf_fread_el(file, &ctx->header.n_tensors, sizeof(ctx->header.n_tensors), &offset);
  16000. ok = ok && gguf_fread_el(file, &ctx->header.n_kv, sizeof(ctx->header.n_kv), &offset);
  16001. if (ctx->header.version == 1) {
  16002. fprintf(stderr, "%s: GGUFv1 is no longer supported. please use a more up-to-date version\n", __func__);
  16003. fclose(file);
  16004. gguf_free(ctx);
  16005. return NULL;
  16006. }
  16007. if (!ok) {
  16008. fprintf(stderr, "%s: failed to read header\n", __func__);
  16009. fclose(file);
  16010. gguf_free(ctx);
  16011. return NULL;
  16012. }
  16013. }
  16014. // read the kv pairs
  16015. {
  16016. ctx->kv = malloc(ctx->header.n_kv * sizeof(struct gguf_kv));
  16017. for (uint64_t i = 0; i < ctx->header.n_kv; ++i) {
  16018. struct gguf_kv * kv = &ctx->kv[i];
  16019. //fprintf(stderr, "%s: reading kv %d\n", __func__, i);
  16020. ok = ok && gguf_fread_str(file, &kv->key, &offset);
  16021. ok = ok && gguf_fread_el (file, &kv->type, sizeof(kv->type), &offset);
  16022. //fprintf(stderr, "%s: reading kv with key %s\n", __func__, kv->key.data);
  16023. switch (kv->type) {
  16024. case GGUF_TYPE_UINT8: ok = ok && gguf_fread_el (file, &kv->value.uint8, sizeof(kv->value.uint8), &offset); break;
  16025. case GGUF_TYPE_INT8: ok = ok && gguf_fread_el (file, &kv->value.int8, sizeof(kv->value.int8), &offset); break;
  16026. case GGUF_TYPE_UINT16: ok = ok && gguf_fread_el (file, &kv->value.uint16, sizeof(kv->value.uint16), &offset); break;
  16027. case GGUF_TYPE_INT16: ok = ok && gguf_fread_el (file, &kv->value.int16, sizeof(kv->value.int16), &offset); break;
  16028. case GGUF_TYPE_UINT32: ok = ok && gguf_fread_el (file, &kv->value.uint32, sizeof(kv->value.uint32), &offset); break;
  16029. case GGUF_TYPE_INT32: ok = ok && gguf_fread_el (file, &kv->value.int32, sizeof(kv->value.int32), &offset); break;
  16030. case GGUF_TYPE_FLOAT32: ok = ok && gguf_fread_el (file, &kv->value.float32, sizeof(kv->value.float32), &offset); break;
  16031. case GGUF_TYPE_UINT64: ok = ok && gguf_fread_el (file, &kv->value.uint64, sizeof(kv->value.uint64), &offset); break;
  16032. case GGUF_TYPE_INT64: ok = ok && gguf_fread_el (file, &kv->value.int64, sizeof(kv->value.int64), &offset); break;
  16033. case GGUF_TYPE_FLOAT64: ok = ok && gguf_fread_el (file, &kv->value.float64, sizeof(kv->value.float64), &offset); break;
  16034. case GGUF_TYPE_BOOL: ok = ok && gguf_fread_el (file, &kv->value.bool_, sizeof(kv->value.bool_), &offset); break;
  16035. case GGUF_TYPE_STRING: ok = ok && gguf_fread_str(file, &kv->value.str, &offset); break;
  16036. case GGUF_TYPE_ARRAY:
  16037. {
  16038. ok = ok && gguf_fread_el(file, &kv->value.arr.type, sizeof(kv->value.arr.type), &offset);
  16039. ok = ok && gguf_fread_el(file, &kv->value.arr.n, sizeof(kv->value.arr.n), &offset);
  16040. switch (kv->value.arr.type) {
  16041. case GGUF_TYPE_UINT8:
  16042. case GGUF_TYPE_INT8:
  16043. case GGUF_TYPE_UINT16:
  16044. case GGUF_TYPE_INT16:
  16045. case GGUF_TYPE_UINT32:
  16046. case GGUF_TYPE_INT32:
  16047. case GGUF_TYPE_FLOAT32:
  16048. case GGUF_TYPE_UINT64:
  16049. case GGUF_TYPE_INT64:
  16050. case GGUF_TYPE_FLOAT64:
  16051. case GGUF_TYPE_BOOL:
  16052. {
  16053. kv->value.arr.data = malloc(kv->value.arr.n * GGUF_TYPE_SIZE[kv->value.arr.type]);
  16054. ok = ok && gguf_fread_el(file, kv->value.arr.data, kv->value.arr.n * GGUF_TYPE_SIZE[kv->value.arr.type], &offset);
  16055. } break;
  16056. case GGUF_TYPE_STRING:
  16057. {
  16058. kv->value.arr.data = malloc(kv->value.arr.n * sizeof(struct gguf_str));
  16059. for (uint64_t j = 0; j < kv->value.arr.n; ++j) {
  16060. ok = ok && gguf_fread_str(file, &((struct gguf_str *) kv->value.arr.data)[j], &offset);
  16061. }
  16062. } break;
  16063. case GGUF_TYPE_ARRAY:
  16064. case GGUF_TYPE_COUNT: GGML_ASSERT(false && "invalid type"); break;
  16065. }
  16066. } break;
  16067. case GGUF_TYPE_COUNT: GGML_ASSERT(false && "invalid type");
  16068. }
  16069. if (!ok) {
  16070. break;
  16071. }
  16072. }
  16073. if (!ok) {
  16074. fprintf(stderr, "%s: failed to read key-value pairs\n", __func__);
  16075. fclose(file);
  16076. gguf_free(ctx);
  16077. return NULL;
  16078. }
  16079. }
  16080. // read the tensor infos
  16081. {
  16082. ctx->infos = malloc(ctx->header.n_tensors * sizeof(struct gguf_tensor_info));
  16083. for (uint64_t i = 0; i < ctx->header.n_tensors; ++i) {
  16084. struct gguf_tensor_info * info = &ctx->infos[i];
  16085. for (int j = 0; j < GGML_MAX_DIMS; ++j) {
  16086. info->ne[j] = 1;
  16087. }
  16088. ok = ok && gguf_fread_str(file, &info->name, &offset);
  16089. ok = ok && gguf_fread_el (file, &info->n_dims, sizeof(info->n_dims), &offset);
  16090. for (uint32_t j = 0; j < info->n_dims; ++j) {
  16091. ok = ok && gguf_fread_el(file, &info->ne[j], sizeof(info->ne[j]), &offset);
  16092. }
  16093. ok = ok && gguf_fread_el (file, &info->type, sizeof(info->type), &offset);
  16094. ok = ok && gguf_fread_el (file, &info->offset, sizeof(info->offset), &offset);
  16095. if (!ok) {
  16096. fprintf(stderr, "%s: failed to read tensor info\n", __func__);
  16097. fclose(file);
  16098. gguf_free(ctx);
  16099. return NULL;
  16100. }
  16101. }
  16102. }
  16103. ctx->alignment = GGUF_DEFAULT_ALIGNMENT;
  16104. int alignment_idx = gguf_find_key(ctx, "general.alignment");
  16105. if (alignment_idx != -1) {
  16106. ctx->alignment = gguf_get_val_u32(ctx, alignment_idx);
  16107. }
  16108. // we require the data section to be aligned, so take into account any padding
  16109. {
  16110. const size_t offset_pad = offset % ctx->alignment;
  16111. if (offset_pad != 0) {
  16112. offset += ctx->alignment - offset_pad;
  16113. fseek(file, offset, SEEK_SET);
  16114. }
  16115. }
  16116. // store the current file offset - this is where the data section starts
  16117. ctx->offset = offset;
  16118. // compute the total size of the data section, taking into account the alignment
  16119. {
  16120. ctx->size = 0;
  16121. for (uint64_t i = 0; i < ctx->header.n_tensors; ++i) {
  16122. struct gguf_tensor_info * info = &ctx->infos[i];
  16123. const int64_t ne =
  16124. (int64_t) info->ne[0] *
  16125. (int64_t) info->ne[1] *
  16126. (int64_t) info->ne[2] *
  16127. (int64_t) info->ne[3];
  16128. if (ne % ggml_blck_size(info->type) != 0) {
  16129. fprintf(stderr, "%s: tensor '%s' of type %d (%s) number of elements (%" PRId64 ") is not a multiple of block size (%d)\n",
  16130. __func__, info->name.data, (int)info->type, ggml_type_name(info->type), ne, ggml_blck_size(info->type));
  16131. fclose(file);
  16132. gguf_free(ctx);
  16133. return NULL;
  16134. }
  16135. const size_t size_cur = ggml_row_size(info->type, ne);
  16136. ctx->size += GGML_PAD(size_cur, ctx->alignment);
  16137. }
  16138. }
  16139. // load the tensor data only if requested
  16140. if (params.ctx != NULL) {
  16141. // if the provided gguf_context is no_alloc, then we create "empty" tensors and do not read the binary blob
  16142. // otherwise, we load the binary blob into the created ggml_context as well, and point the "data" members of
  16143. // the ggml_tensor structs to the appropriate locations in the binary blob
  16144. // compute the exact size needed for the new ggml_context
  16145. const size_t mem_size =
  16146. params.no_alloc ?
  16147. (ctx->header.n_tensors )*ggml_tensor_overhead() :
  16148. (ctx->header.n_tensors + 1)*ggml_tensor_overhead() + ctx->size;
  16149. struct ggml_init_params pdata = {
  16150. .mem_size = mem_size,
  16151. .mem_buffer = NULL,
  16152. .no_alloc = params.no_alloc,
  16153. };
  16154. *params.ctx = ggml_init(pdata);
  16155. struct ggml_context * ctx_data = *params.ctx;
  16156. struct ggml_tensor * data = NULL;
  16157. if (!params.no_alloc) {
  16158. data = ggml_new_tensor_1d(ctx_data, GGML_TYPE_I8, ctx->size);
  16159. ok = ok && data != NULL;
  16160. // read the binary blob with the tensor data
  16161. ok = ok && gguf_fread_el(file, data->data, ctx->size, &offset);
  16162. if (!ok) {
  16163. fprintf(stderr, "%s: failed to read tensor data\n", __func__);
  16164. fclose(file);
  16165. ggml_free(ctx_data);
  16166. gguf_free(ctx);
  16167. return NULL;
  16168. }
  16169. ctx->data = data->data;
  16170. }
  16171. ggml_set_no_alloc(ctx_data, true);
  16172. // create the tensors
  16173. for (uint64_t i = 0; i < ctx->header.n_tensors; ++i) {
  16174. const int64_t ne[GGML_MAX_DIMS] = {
  16175. ctx->infos[i].ne[0],
  16176. ctx->infos[i].ne[1],
  16177. ctx->infos[i].ne[2],
  16178. ctx->infos[i].ne[3],
  16179. };
  16180. struct ggml_tensor * cur = ggml_new_tensor(ctx_data, ctx->infos[i].type, ctx->infos[i].n_dims, ne);
  16181. ok = ok && cur != NULL;
  16182. ggml_set_name(cur, ctx->infos[i].name.data);
  16183. if (!ok) {
  16184. break;
  16185. }
  16186. // point the data member to the appropriate location in the binary blob using the tensor infos
  16187. if (!params.no_alloc) {
  16188. //cur->data = (char *) data->data + ctx->infos[i].offset - ctx->offset; // offset from start of file
  16189. cur->data = (char *) data->data + ctx->infos[i].offset; // offset from data
  16190. }
  16191. }
  16192. if (!ok) {
  16193. fprintf(stderr, "%s: failed to read the tensor data\n", __func__);
  16194. fclose(file);
  16195. ggml_free(ctx_data);
  16196. gguf_free(ctx);
  16197. return NULL;
  16198. }
  16199. ggml_set_no_alloc(ctx_data, params.no_alloc);
  16200. }
  16201. fclose(file);
  16202. return ctx;
  16203. }
  16204. void gguf_free(struct gguf_context * ctx) {
  16205. if (ctx == NULL) {
  16206. return;
  16207. }
  16208. if (ctx->kv) {
  16209. // free string memory - not great..
  16210. for (uint64_t i = 0; i < ctx->header.n_kv; ++i) {
  16211. struct gguf_kv * kv = &ctx->kv[i];
  16212. if (kv->key.data) {
  16213. free(kv->key.data);
  16214. }
  16215. if (kv->type == GGUF_TYPE_STRING) {
  16216. if (kv->value.str.data) {
  16217. free(kv->value.str.data);
  16218. }
  16219. }
  16220. if (kv->type == GGUF_TYPE_ARRAY) {
  16221. if (kv->value.arr.data) {
  16222. if (kv->value.arr.type == GGUF_TYPE_STRING) {
  16223. for (uint64_t j = 0; j < kv->value.arr.n; ++j) {
  16224. struct gguf_str * str = &((struct gguf_str *) kv->value.arr.data)[j];
  16225. if (str->data) {
  16226. free(str->data);
  16227. }
  16228. }
  16229. }
  16230. free(kv->value.arr.data);
  16231. }
  16232. }
  16233. }
  16234. free(ctx->kv);
  16235. }
  16236. if (ctx->infos) {
  16237. for (uint64_t i = 0; i < ctx->header.n_tensors; ++i) {
  16238. struct gguf_tensor_info * info = &ctx->infos[i];
  16239. if (info->name.data) {
  16240. free(info->name.data);
  16241. }
  16242. }
  16243. free(ctx->infos);
  16244. }
  16245. GGML_ALIGNED_FREE(ctx);
  16246. }
  16247. const char * gguf_type_name(enum gguf_type type) {
  16248. return GGUF_TYPE_NAME[type];
  16249. }
  16250. int gguf_get_version(const struct gguf_context * ctx) {
  16251. return ctx->header.version;
  16252. }
  16253. size_t gguf_get_alignment(const struct gguf_context * ctx) {
  16254. return ctx->alignment;
  16255. }
  16256. size_t gguf_get_data_offset(const struct gguf_context * ctx) {
  16257. return ctx->offset;
  16258. }
  16259. void * gguf_get_data(const struct gguf_context * ctx) {
  16260. return ctx->data;
  16261. }
  16262. int gguf_get_n_kv(const struct gguf_context * ctx) {
  16263. return ctx->header.n_kv;
  16264. }
  16265. int gguf_find_key(const struct gguf_context * ctx, const char * key) {
  16266. // return -1 if key not found
  16267. int keyfound = -1;
  16268. const int n_kv = gguf_get_n_kv(ctx);
  16269. for (int i = 0; i < n_kv; ++i) {
  16270. if (strcmp(key, gguf_get_key(ctx, i)) == 0) {
  16271. keyfound = i;
  16272. break;
  16273. }
  16274. }
  16275. return keyfound;
  16276. }
  16277. const char * gguf_get_key(const struct gguf_context * ctx, int key_id) {
  16278. GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx));
  16279. return ctx->kv[key_id].key.data;
  16280. }
  16281. enum gguf_type gguf_get_kv_type(const struct gguf_context * ctx, int key_id) {
  16282. GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx));
  16283. return ctx->kv[key_id].type;
  16284. }
  16285. enum gguf_type gguf_get_arr_type(const struct gguf_context * ctx, int key_id) {
  16286. GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx));
  16287. GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_ARRAY);
  16288. return ctx->kv[key_id].value.arr.type;
  16289. }
  16290. const void * gguf_get_arr_data(const struct gguf_context * ctx, int key_id) {
  16291. GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx));
  16292. GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_ARRAY);
  16293. return ctx->kv[key_id].value.arr.data;
  16294. }
  16295. const char * gguf_get_arr_str(const struct gguf_context * ctx, int key_id, int i) {
  16296. GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx));
  16297. GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_ARRAY);
  16298. struct gguf_kv * kv = &ctx->kv[key_id];
  16299. struct gguf_str * str = &((struct gguf_str *) kv->value.arr.data)[i];
  16300. return str->data;
  16301. }
  16302. int gguf_get_arr_n(const struct gguf_context * ctx, int key_id) {
  16303. GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx));
  16304. GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_ARRAY);
  16305. return ctx->kv[key_id].value.arr.n;
  16306. }
  16307. uint8_t gguf_get_val_u8(const struct gguf_context * ctx, int key_id) {
  16308. GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx));
  16309. GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_UINT8);
  16310. return ctx->kv[key_id].value.uint8;
  16311. }
  16312. int8_t gguf_get_val_i8(const struct gguf_context * ctx, int key_id) {
  16313. GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx));
  16314. GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_INT8);
  16315. return ctx->kv[key_id].value.int8;
  16316. }
  16317. uint16_t gguf_get_val_u16(const struct gguf_context * ctx, int key_id) {
  16318. GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx));
  16319. GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_UINT16);
  16320. return ctx->kv[key_id].value.uint16;
  16321. }
  16322. int16_t gguf_get_val_i16(const struct gguf_context * ctx, int key_id) {
  16323. GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx));
  16324. GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_INT16);
  16325. return ctx->kv[key_id].value.int16;
  16326. }
  16327. uint32_t gguf_get_val_u32(const struct gguf_context * ctx, int key_id) {
  16328. GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx));
  16329. GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_UINT32);
  16330. return ctx->kv[key_id].value.uint32;
  16331. }
  16332. int32_t gguf_get_val_i32(const struct gguf_context * ctx, int key_id) {
  16333. GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx));
  16334. GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_INT32);
  16335. return ctx->kv[key_id].value.int32;
  16336. }
  16337. float gguf_get_val_f32(const struct gguf_context * ctx, int key_id) {
  16338. GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx));
  16339. GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_FLOAT32);
  16340. return ctx->kv[key_id].value.float32;
  16341. }
  16342. uint64_t gguf_get_val_u64(const struct gguf_context * ctx, int key_id) {
  16343. GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx));
  16344. GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_UINT64);
  16345. return ctx->kv[key_id].value.uint64;
  16346. }
  16347. int64_t gguf_get_val_i64(const struct gguf_context * ctx, int key_id) {
  16348. GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx));
  16349. GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_INT64);
  16350. return ctx->kv[key_id].value.int64;
  16351. }
  16352. double gguf_get_val_f64(const struct gguf_context * ctx, int key_id) {
  16353. GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx));
  16354. GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_FLOAT64);
  16355. return ctx->kv[key_id].value.float64;
  16356. }
  16357. bool gguf_get_val_bool(const struct gguf_context * ctx, int key_id) {
  16358. GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx));
  16359. GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_BOOL);
  16360. return ctx->kv[key_id].value.bool_;
  16361. }
  16362. const char * gguf_get_val_str(const struct gguf_context * ctx, int key_id) {
  16363. GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx));
  16364. GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_STRING);
  16365. return ctx->kv[key_id].value.str.data;
  16366. }
  16367. const void * gguf_get_val_data(const struct gguf_context * ctx, int key_id) {
  16368. GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx));
  16369. GGML_ASSERT(ctx->kv[key_id].type != GGUF_TYPE_ARRAY);
  16370. GGML_ASSERT(ctx->kv[key_id].type != GGUF_TYPE_STRING);
  16371. return &ctx->kv[key_id].value;
  16372. }
  16373. int gguf_get_n_tensors(const struct gguf_context * ctx) {
  16374. return ctx->header.n_tensors;
  16375. }
  16376. int gguf_find_tensor(const struct gguf_context * ctx, const char * name) {
  16377. // return -1 if tensor not found
  16378. int tensorfound = -1;
  16379. const int n_tensors = gguf_get_n_tensors(ctx);
  16380. for (int i = 0; i < n_tensors; ++i) {
  16381. if (strcmp(name, gguf_get_tensor_name(ctx, i)) == 0) {
  16382. tensorfound = i;
  16383. break;
  16384. }
  16385. }
  16386. return tensorfound;
  16387. }
  16388. size_t gguf_get_tensor_offset(const struct gguf_context * ctx, int i) {
  16389. return ctx->infos[i].offset;
  16390. }
  16391. char * gguf_get_tensor_name(const struct gguf_context * ctx, int i) {
  16392. return ctx->infos[i].name.data;
  16393. }
  16394. enum ggml_type gguf_get_tensor_type(const struct gguf_context * ctx, int i) {
  16395. return ctx->infos[i].type;
  16396. }
  16397. // returns the index
  16398. static int gguf_get_or_add_key(struct gguf_context * ctx, const char * key) {
  16399. const int idx = gguf_find_key(ctx, key);
  16400. if (idx >= 0) {
  16401. return idx;
  16402. }
  16403. const int n_kv = gguf_get_n_kv(ctx);
  16404. ctx->kv = realloc(ctx->kv, (n_kv + 1) * sizeof(struct gguf_kv));
  16405. ctx->kv[n_kv].key.n = strlen(key);
  16406. ctx->kv[n_kv].key.data = strdup(key);
  16407. ctx->header.n_kv++;
  16408. return n_kv;
  16409. }
  16410. void gguf_set_val_u8(struct gguf_context * ctx, const char * key, uint8_t val) {
  16411. const int idx = gguf_get_or_add_key(ctx, key);
  16412. ctx->kv[idx].type = GGUF_TYPE_UINT8;
  16413. ctx->kv[idx].value.uint8 = val;
  16414. }
  16415. void gguf_set_val_i8(struct gguf_context * ctx, const char * key, int8_t val) {
  16416. const int idx = gguf_get_or_add_key(ctx, key);
  16417. ctx->kv[idx].type = GGUF_TYPE_INT8;
  16418. ctx->kv[idx].value.int8 = val;
  16419. }
  16420. void gguf_set_val_u16(struct gguf_context * ctx, const char * key, uint16_t val) {
  16421. const int idx = gguf_get_or_add_key(ctx, key);
  16422. ctx->kv[idx].type = GGUF_TYPE_UINT16;
  16423. ctx->kv[idx].value.uint16 = val;
  16424. }
  16425. void gguf_set_val_i16(struct gguf_context * ctx, const char * key, int16_t val) {
  16426. const int idx = gguf_get_or_add_key(ctx, key);
  16427. ctx->kv[idx].type = GGUF_TYPE_INT16;
  16428. ctx->kv[idx].value.int16 = val;
  16429. }
  16430. void gguf_set_val_u32(struct gguf_context * ctx, const char * key, uint32_t val) {
  16431. const int idx = gguf_get_or_add_key(ctx, key);
  16432. ctx->kv[idx].type = GGUF_TYPE_UINT32;
  16433. ctx->kv[idx].value.uint32 = val;
  16434. }
  16435. void gguf_set_val_i32(struct gguf_context * ctx, const char * key, int32_t val) {
  16436. const int idx = gguf_get_or_add_key(ctx, key);
  16437. ctx->kv[idx].type = GGUF_TYPE_INT32;
  16438. ctx->kv[idx].value.int32 = val;
  16439. }
  16440. void gguf_set_val_f32(struct gguf_context * ctx, const char * key, float val) {
  16441. const int idx = gguf_get_or_add_key(ctx, key);
  16442. ctx->kv[idx].type = GGUF_TYPE_FLOAT32;
  16443. ctx->kv[idx].value.float32 = val;
  16444. }
  16445. void gguf_set_val_u64(struct gguf_context * ctx, const char * key, uint64_t val) {
  16446. const int idx = gguf_get_or_add_key(ctx, key);
  16447. ctx->kv[idx].type = GGUF_TYPE_UINT64;
  16448. ctx->kv[idx].value.uint64 = val;
  16449. }
  16450. void gguf_set_val_i64(struct gguf_context * ctx, const char * key, int64_t val) {
  16451. const int idx = gguf_get_or_add_key(ctx, key);
  16452. ctx->kv[idx].type = GGUF_TYPE_INT64;
  16453. ctx->kv[idx].value.int64 = val;
  16454. }
  16455. void gguf_set_val_f64(struct gguf_context * ctx, const char * key, double val) {
  16456. const int idx = gguf_get_or_add_key(ctx, key);
  16457. ctx->kv[idx].type = GGUF_TYPE_FLOAT64;
  16458. ctx->kv[idx].value.float64 = val;
  16459. }
  16460. void gguf_set_val_bool(struct gguf_context * ctx, const char * key, bool val) {
  16461. const int idx = gguf_get_or_add_key(ctx, key);
  16462. ctx->kv[idx].type = GGUF_TYPE_BOOL;
  16463. ctx->kv[idx].value.bool_ = val;
  16464. }
  16465. void gguf_set_val_str(struct gguf_context * ctx, const char * key, const char * val) {
  16466. const int idx = gguf_get_or_add_key(ctx, key);
  16467. ctx->kv[idx].type = GGUF_TYPE_STRING;
  16468. ctx->kv[idx].value.str.n = strlen(val);
  16469. ctx->kv[idx].value.str.data = strdup(val);
  16470. }
  16471. void gguf_set_arr_data(struct gguf_context * ctx, const char * key, enum gguf_type type, const void * data, int n) {
  16472. const int idx = gguf_get_or_add_key(ctx, key);
  16473. ctx->kv[idx].type = GGUF_TYPE_ARRAY;
  16474. ctx->kv[idx].value.arr.type = type;
  16475. ctx->kv[idx].value.arr.n = n;
  16476. ctx->kv[idx].value.arr.data = malloc(n*GGUF_TYPE_SIZE[type]);
  16477. memcpy(ctx->kv[idx].value.arr.data, data, n*GGUF_TYPE_SIZE[type]);
  16478. }
  16479. void gguf_set_arr_str(struct gguf_context * ctx, const char * key, const char ** data, int n) {
  16480. const int idx = gguf_get_or_add_key(ctx, key);
  16481. ctx->kv[idx].type = GGUF_TYPE_ARRAY;
  16482. ctx->kv[idx].value.arr.type = GGUF_TYPE_STRING;
  16483. ctx->kv[idx].value.arr.n = n;
  16484. ctx->kv[idx].value.arr.data = malloc(n*sizeof(struct gguf_str));
  16485. for (int i = 0; i < n; i++) {
  16486. struct gguf_str * str = &((struct gguf_str *)ctx->kv[idx].value.arr.data)[i];
  16487. str->n = strlen(data[i]);
  16488. str->data = strdup(data[i]);
  16489. }
  16490. }
  16491. // set or add KV pairs from another context
  16492. void gguf_set_kv(struct gguf_context * ctx, struct gguf_context * src) {
  16493. for (uint32_t i = 0; i < src->header.n_kv; i++) {
  16494. switch (src->kv[i].type) {
  16495. case GGUF_TYPE_UINT8: gguf_set_val_u8 (ctx, src->kv[i].key.data, src->kv[i].value.uint8); break;
  16496. case GGUF_TYPE_INT8: gguf_set_val_i8 (ctx, src->kv[i].key.data, src->kv[i].value.int8); break;
  16497. case GGUF_TYPE_UINT16: gguf_set_val_u16 (ctx, src->kv[i].key.data, src->kv[i].value.uint16); break;
  16498. case GGUF_TYPE_INT16: gguf_set_val_i16 (ctx, src->kv[i].key.data, src->kv[i].value.int16); break;
  16499. case GGUF_TYPE_UINT32: gguf_set_val_u32 (ctx, src->kv[i].key.data, src->kv[i].value.uint32); break;
  16500. case GGUF_TYPE_INT32: gguf_set_val_i32 (ctx, src->kv[i].key.data, src->kv[i].value.int32); break;
  16501. case GGUF_TYPE_FLOAT32: gguf_set_val_f32 (ctx, src->kv[i].key.data, src->kv[i].value.float32); break;
  16502. case GGUF_TYPE_UINT64: gguf_set_val_u64 (ctx, src->kv[i].key.data, src->kv[i].value.uint64); break;
  16503. case GGUF_TYPE_INT64: gguf_set_val_i64 (ctx, src->kv[i].key.data, src->kv[i].value.int64); break;
  16504. case GGUF_TYPE_FLOAT64: gguf_set_val_f64 (ctx, src->kv[i].key.data, src->kv[i].value.float64); break;
  16505. case GGUF_TYPE_BOOL: gguf_set_val_bool(ctx, src->kv[i].key.data, src->kv[i].value.bool_); break;
  16506. case GGUF_TYPE_STRING: gguf_set_val_str (ctx, src->kv[i].key.data, src->kv[i].value.str.data); break;
  16507. case GGUF_TYPE_ARRAY:
  16508. {
  16509. if (src->kv[i].value.arr.type == GGUF_TYPE_STRING) {
  16510. const char ** data = malloc(src->kv[i].value.arr.n*sizeof(char *));
  16511. for (uint32_t j = 0; j < src->kv[i].value.arr.n; j++) {
  16512. data[j] = ((struct gguf_str *)src->kv[i].value.arr.data)[j].data;
  16513. }
  16514. gguf_set_arr_str(ctx, src->kv[i].key.data, data, src->kv[i].value.arr.n);
  16515. free((void *)data);
  16516. } else if (src->kv[i].value.arr.type == GGUF_TYPE_ARRAY) {
  16517. GGML_ASSERT(false && "nested arrays not supported");
  16518. } else {
  16519. gguf_set_arr_data(ctx, src->kv[i].key.data, src->kv[i].value.arr.type, src->kv[i].value.arr.data, src->kv[i].value.arr.n);
  16520. }
  16521. } break;
  16522. case GGUF_TYPE_COUNT: GGML_ASSERT(false && "invalid type"); break;
  16523. }
  16524. }
  16525. }
  16526. void gguf_add_tensor(
  16527. struct gguf_context * ctx,
  16528. const struct ggml_tensor * tensor) {
  16529. const int idx = ctx->header.n_tensors;
  16530. ctx->infos = realloc(ctx->infos, (idx + 1)*sizeof(struct gguf_tensor_info));
  16531. ctx->infos[idx].name.n = strlen(tensor->name);
  16532. ctx->infos[idx].name.data = strdup(tensor->name);
  16533. for (int i = 0; i < GGML_MAX_DIMS; ++i) {
  16534. ctx->infos[idx].ne[i] = 1;
  16535. }
  16536. ctx->infos[idx].n_dims = ggml_n_dims(tensor);
  16537. for (uint32_t i = 0; i < ctx->infos[idx].n_dims; i++) {
  16538. ctx->infos[idx].ne[i] = tensor->ne[i];
  16539. }
  16540. ctx->infos[idx].type = tensor->type;
  16541. ctx->infos[idx].offset = 0;
  16542. ctx->infos[idx].data = tensor->data;
  16543. ctx->infos[idx].size = ggml_nbytes(tensor);
  16544. if (ctx->header.n_tensors > 0) {
  16545. ctx->infos[idx].offset = ctx->infos[idx - 1].offset + GGML_PAD(ctx->infos[idx - 1].size, ctx->alignment);
  16546. }
  16547. ctx->header.n_tensors++;
  16548. }
  16549. void gguf_set_tensor_type(struct gguf_context * ctx, const char * name, enum ggml_type type) {
  16550. const int idx = gguf_find_tensor(ctx, name);
  16551. if (idx < 0) {
  16552. GGML_ASSERT(false && "tensor not found");
  16553. }
  16554. ctx->infos[idx].type = type;
  16555. }
  16556. void gguf_set_tensor_data(struct gguf_context * ctx, const char * name, const void * data, size_t size) {
  16557. const int idx = gguf_find_tensor(ctx, name);
  16558. if (idx < 0) {
  16559. GGML_ASSERT(false && "tensor not found");
  16560. }
  16561. ctx->infos[idx].data = data;
  16562. ctx->infos[idx].size = size;
  16563. // update offsets
  16564. for (uint32_t i = idx + 1; i < ctx->header.n_tensors; ++i) {
  16565. ctx->infos[i].offset = ctx->infos[i - 1].offset + GGML_PAD(ctx->infos[i - 1].size, ctx->alignment);
  16566. }
  16567. }
  16568. //static void gguf_fwrite_str(FILE * file, const struct gguf_str * val) {
  16569. // fwrite(&val->n, sizeof(val->n), 1, file);
  16570. // fwrite(val->data, sizeof(char), val->n, file);
  16571. //}
  16572. //
  16573. //static void gguf_fwrite_el(FILE * file, const void * val, size_t size) {
  16574. // fwrite(val, sizeof(char), size, file);
  16575. //}
  16576. struct gguf_buf {
  16577. void * data;
  16578. size_t size;
  16579. size_t offset;
  16580. };
  16581. static struct gguf_buf gguf_buf_init(size_t size) {
  16582. struct gguf_buf buf = {
  16583. /*buf.data =*/ size == 0 ? NULL : malloc(size),
  16584. /*buf.size =*/ size,
  16585. /*buf.offset =*/ 0,
  16586. };
  16587. return buf;
  16588. }
  16589. static void gguf_buf_free(struct gguf_buf buf) {
  16590. if (buf.data) {
  16591. free(buf.data);
  16592. }
  16593. }
  16594. static void gguf_buf_grow(struct gguf_buf * buf, size_t size) {
  16595. if (buf->offset + size > buf->size) {
  16596. buf->size = 1.5*(buf->offset + size);
  16597. if (buf->data) {
  16598. buf->data = realloc(buf->data, buf->size);
  16599. }
  16600. }
  16601. }
  16602. static void gguf_bwrite_str(struct gguf_buf * buf, const struct gguf_str * val) {
  16603. gguf_buf_grow(buf, sizeof(val->n) + val->n);
  16604. if (buf->data) {
  16605. memcpy((char *) buf->data + buf->offset, &val->n, sizeof(val->n));
  16606. }
  16607. buf->offset += sizeof(val->n);
  16608. if (buf->data) {
  16609. memcpy((char *) buf->data + buf->offset, val->data, val->n);
  16610. }
  16611. buf->offset += val->n;
  16612. }
  16613. static void gguf_bwrite_el(struct gguf_buf * buf, const void * val, size_t el_size) {
  16614. gguf_buf_grow(buf, el_size);
  16615. if (buf->data) {
  16616. memcpy((char *) buf->data + buf->offset, val, el_size);
  16617. }
  16618. buf->offset += el_size;
  16619. }
  16620. static void gguf_write_to_buf(const struct gguf_context * ctx, struct gguf_buf * buf, bool only_meta) {
  16621. // write header
  16622. gguf_bwrite_el(buf, &ctx->header.magic, sizeof(ctx->header.magic));
  16623. gguf_bwrite_el(buf, &ctx->header.version, sizeof(ctx->header.version));
  16624. gguf_bwrite_el(buf, &ctx->header.n_tensors, sizeof(ctx->header.n_tensors));
  16625. gguf_bwrite_el(buf, &ctx->header.n_kv, sizeof(ctx->header.n_kv));
  16626. // write key-value pairs
  16627. for (uint32_t i = 0; i < ctx->header.n_kv; ++i) {
  16628. struct gguf_kv * kv = &ctx->kv[i];
  16629. gguf_bwrite_str(buf, &kv->key);
  16630. gguf_bwrite_el (buf, &kv->type, sizeof(kv->type));
  16631. switch (kv->type) {
  16632. case GGUF_TYPE_UINT8: gguf_bwrite_el( buf, &kv->value.uint8, sizeof(kv->value.uint8) ); break;
  16633. case GGUF_TYPE_INT8: gguf_bwrite_el (buf, &kv->value.int8, sizeof(kv->value.int8) ); break;
  16634. case GGUF_TYPE_UINT16: gguf_bwrite_el (buf, &kv->value.uint16, sizeof(kv->value.uint16) ); break;
  16635. case GGUF_TYPE_INT16: gguf_bwrite_el (buf, &kv->value.int16, sizeof(kv->value.int16) ); break;
  16636. case GGUF_TYPE_UINT32: gguf_bwrite_el (buf, &kv->value.uint32, sizeof(kv->value.uint32) ); break;
  16637. case GGUF_TYPE_INT32: gguf_bwrite_el (buf, &kv->value.int32, sizeof(kv->value.int32) ); break;
  16638. case GGUF_TYPE_FLOAT32: gguf_bwrite_el (buf, &kv->value.float32, sizeof(kv->value.float32)); break;
  16639. case GGUF_TYPE_UINT64: gguf_bwrite_el (buf, &kv->value.uint64, sizeof(kv->value.uint64) ); break;
  16640. case GGUF_TYPE_INT64: gguf_bwrite_el (buf, &kv->value.int64, sizeof(kv->value.int64) ); break;
  16641. case GGUF_TYPE_FLOAT64: gguf_bwrite_el (buf, &kv->value.float64, sizeof(kv->value.float64)); break;
  16642. case GGUF_TYPE_BOOL: gguf_bwrite_el (buf, &kv->value.bool_, sizeof(kv->value.bool_) ); break;
  16643. case GGUF_TYPE_STRING: gguf_bwrite_str(buf, &kv->value.str ); break;
  16644. case GGUF_TYPE_ARRAY:
  16645. {
  16646. gguf_bwrite_el(buf, &kv->value.arr.type, sizeof(kv->value.arr.type));
  16647. gguf_bwrite_el(buf, &kv->value.arr.n, sizeof(kv->value.arr.n) );
  16648. switch (kv->value.arr.type) {
  16649. case GGUF_TYPE_UINT8:
  16650. case GGUF_TYPE_INT8:
  16651. case GGUF_TYPE_UINT16:
  16652. case GGUF_TYPE_INT16:
  16653. case GGUF_TYPE_UINT32:
  16654. case GGUF_TYPE_INT32:
  16655. case GGUF_TYPE_FLOAT32:
  16656. case GGUF_TYPE_UINT64:
  16657. case GGUF_TYPE_INT64:
  16658. case GGUF_TYPE_FLOAT64:
  16659. case GGUF_TYPE_BOOL:
  16660. {
  16661. gguf_bwrite_el(buf, kv->value.arr.data, kv->value.arr.n * GGUF_TYPE_SIZE[kv->value.arr.type]);
  16662. } break;
  16663. case GGUF_TYPE_STRING:
  16664. {
  16665. for (uint32_t j = 0; j < kv->value.arr.n; ++j) {
  16666. gguf_bwrite_str(buf, &((struct gguf_str *) kv->value.arr.data)[j]);
  16667. }
  16668. } break;
  16669. case GGUF_TYPE_ARRAY:
  16670. case GGUF_TYPE_COUNT: GGML_ASSERT(false && "invalid type"); break;
  16671. }
  16672. } break;
  16673. case GGUF_TYPE_COUNT: GGML_ASSERT(false && "invalid type");
  16674. }
  16675. }
  16676. // write tensor infos
  16677. for (uint32_t i = 0; i < ctx->header.n_tensors; ++i) {
  16678. struct gguf_tensor_info * info = &ctx->infos[i];
  16679. gguf_bwrite_str(buf, &info->name);
  16680. gguf_bwrite_el (buf, &info->n_dims, sizeof(info->n_dims));
  16681. for (uint32_t j = 0; j < info->n_dims; ++j) {
  16682. gguf_bwrite_el(buf, &info->ne[j], sizeof(info->ne[j]));
  16683. }
  16684. gguf_bwrite_el(buf, &info->type, sizeof(info->type));
  16685. gguf_bwrite_el(buf, &info->offset, sizeof(info->offset));
  16686. }
  16687. // we require the data section to be aligned, so take into account any padding
  16688. {
  16689. const size_t offset = buf->offset;
  16690. const size_t offset_pad = GGML_PAD(offset, ctx->alignment);
  16691. if (offset_pad != offset) {
  16692. uint8_t pad = 0;
  16693. for (size_t i = 0; i < offset_pad - offset; ++i) {
  16694. gguf_bwrite_el(buf, &pad, sizeof(pad));
  16695. }
  16696. }
  16697. }
  16698. if (only_meta) {
  16699. return;
  16700. }
  16701. size_t offset = 0;
  16702. // write tensor data
  16703. for (uint32_t i = 0; i < ctx->header.n_tensors; ++i) {
  16704. struct gguf_tensor_info * info = &ctx->infos[i];
  16705. const size_t size = info->size;
  16706. const size_t size_pad = GGML_PAD(size, ctx->alignment);
  16707. gguf_bwrite_el(buf, info->data, size);
  16708. if (size_pad != size) {
  16709. uint8_t pad = 0;
  16710. for (size_t j = 0; j < size_pad - size; ++j) {
  16711. gguf_bwrite_el(buf, &pad, sizeof(pad));
  16712. }
  16713. }
  16714. GGML_ASSERT(offset == info->offset);
  16715. offset += size_pad;
  16716. }
  16717. }
  16718. void gguf_write_to_file(const struct gguf_context * ctx, const char * fname, bool only_meta) {
  16719. FILE * file = fopen(fname, "wb");
  16720. if (!file) {
  16721. GGML_ASSERT(false && "failed to open file for writing");
  16722. }
  16723. struct gguf_buf buf = gguf_buf_init(16*1024);
  16724. gguf_write_to_buf(ctx, &buf, only_meta);
  16725. fwrite(buf.data, 1, buf.offset, file);
  16726. gguf_buf_free(buf);
  16727. fclose(file);
  16728. }
  16729. size_t gguf_get_meta_size(const struct gguf_context * ctx) {
  16730. // no allocs - only compute size
  16731. struct gguf_buf buf = gguf_buf_init(0);
  16732. gguf_write_to_buf(ctx, &buf, true);
  16733. return buf.offset;
  16734. }
  16735. void gguf_get_meta_data(const struct gguf_context * ctx, void * data) {
  16736. struct gguf_buf buf = gguf_buf_init(16*1024);
  16737. gguf_write_to_buf(ctx, &buf, true);
  16738. memcpy(data, buf.data, buf.offset);
  16739. gguf_buf_free(buf);
  16740. }
  16741. ////////////////////////////////////////////////////////////////////////////////
  16742. int ggml_cpu_has_avx(void) {
  16743. #if defined(__AVX__)
  16744. return 1;
  16745. #else
  16746. return 0;
  16747. #endif
  16748. }
  16749. int ggml_cpu_has_avx_vnni(void) {
  16750. #if defined(__AVXVNNI__)
  16751. return 1;
  16752. #else
  16753. return 0;
  16754. #endif
  16755. }
  16756. int ggml_cpu_has_avx2(void) {
  16757. #if defined(__AVX2__)
  16758. return 1;
  16759. #else
  16760. return 0;
  16761. #endif
  16762. }
  16763. int ggml_cpu_has_avx512(void) {
  16764. #if defined(__AVX512F__)
  16765. return 1;
  16766. #else
  16767. return 0;
  16768. #endif
  16769. }
  16770. int ggml_cpu_has_avx512_vbmi(void) {
  16771. #if defined(__AVX512VBMI__)
  16772. return 1;
  16773. #else
  16774. return 0;
  16775. #endif
  16776. }
  16777. int ggml_cpu_has_avx512_vnni(void) {
  16778. #if defined(__AVX512VNNI__)
  16779. return 1;
  16780. #else
  16781. return 0;
  16782. #endif
  16783. }
  16784. int ggml_cpu_has_fma(void) {
  16785. #if defined(__FMA__)
  16786. return 1;
  16787. #else
  16788. return 0;
  16789. #endif
  16790. }
  16791. int ggml_cpu_has_neon(void) {
  16792. #if defined(__ARM_NEON)
  16793. return 1;
  16794. #else
  16795. return 0;
  16796. #endif
  16797. }
  16798. int ggml_cpu_has_arm_fma(void) {
  16799. #if defined(__ARM_FEATURE_FMA)
  16800. return 1;
  16801. #else
  16802. return 0;
  16803. #endif
  16804. }
  16805. int ggml_cpu_has_metal(void) {
  16806. #if defined(GGML_USE_METAL)
  16807. return 1;
  16808. #else
  16809. return 0;
  16810. #endif
  16811. }
  16812. int ggml_cpu_has_f16c(void) {
  16813. #if defined(__F16C__)
  16814. return 1;
  16815. #else
  16816. return 0;
  16817. #endif
  16818. }
  16819. int ggml_cpu_has_fp16_va(void) {
  16820. #if defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC)
  16821. return 1;
  16822. #else
  16823. return 0;
  16824. #endif
  16825. }
  16826. int ggml_cpu_has_wasm_simd(void) {
  16827. #if defined(__wasm_simd128__)
  16828. return 1;
  16829. #else
  16830. return 0;
  16831. #endif
  16832. }
  16833. int ggml_cpu_has_blas(void) {
  16834. #if defined(GGML_USE_ACCELERATE) || defined(GGML_USE_OPENBLAS) || defined(GGML_USE_CUBLAS) || defined(GGML_USE_CLBLAST)
  16835. return 1;
  16836. #else
  16837. return 0;
  16838. #endif
  16839. }
  16840. int ggml_cpu_has_cublas(void) {
  16841. #if defined(GGML_USE_CUBLAS)
  16842. return 1;
  16843. #else
  16844. return 0;
  16845. #endif
  16846. }
  16847. int ggml_cpu_has_clblast(void) {
  16848. #if defined(GGML_USE_CLBLAST)
  16849. return 1;
  16850. #else
  16851. return 0;
  16852. #endif
  16853. }
  16854. int ggml_cpu_has_gpublas(void) {
  16855. return ggml_cpu_has_cublas() || ggml_cpu_has_clblast();
  16856. }
  16857. int ggml_cpu_has_sse3(void) {
  16858. #if defined(__SSE3__)
  16859. return 1;
  16860. #else
  16861. return 0;
  16862. #endif
  16863. }
  16864. int ggml_cpu_has_ssse3(void) {
  16865. #if defined(__SSSE3__)
  16866. return 1;
  16867. #else
  16868. return 0;
  16869. #endif
  16870. }
  16871. int ggml_cpu_has_vsx(void) {
  16872. #if defined(__POWER9_VECTOR__)
  16873. return 1;
  16874. #else
  16875. return 0;
  16876. #endif
  16877. }
  16878. ////////////////////////////////////////////////////////////////////////////////