ggml.c 741 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316431743184319432043214322432343244325432643274328432943304331433243334334433543364337433843394340434143424343434443454346434743484349435043514352435343544355435643574358435943604361436243634364436543664367436843694370437143724373437443754376437743784379438043814382438343844385438643874388438943904391439243934394439543964397439843994400440144024403440444054406440744084409441044114412441344144415441644174418441944204421442244234424442544264427442844294430443144324433443444354436443744384439444044414442444344444445444644474448444944504451445244534454445544564457445844594460446144624463446444654466446744684469447044714472447344744475447644774478447944804481448244834484448544864487448844894490449144924493449444954496449744984499450045014502450345044505450645074508450945104511451245134514451545164517451845194520452145224523452445254526452745284529453045314532453345344535453645374538453945404541454245434544454545464547454845494550455145524553455445554556455745584559456045614562456345644565456645674568456945704571457245734574457545764577457845794580458145824583458445854586458745884589459045914592459345944595459645974598459946004601460246034604460546064607460846094610461146124613461446154616461746184619462046214622462346244625462646274628462946304631463246334634463546364637463846394640464146424643464446454646464746484649465046514652465346544655465646574658465946604661466246634664466546664667466846694670467146724673467446754676467746784679468046814682468346844685468646874688468946904691469246934694469546964697469846994700470147024703470447054706470747084709471047114712471347144715471647174718471947204721472247234724472547264727472847294730473147324733473447354736473747384739474047414742474347444745474647474748474947504751475247534754475547564757475847594760476147624763476447654766476747684769477047714772477347744775477647774778477947804781478247834784478547864787478847894790479147924793479447954796479747984799480048014802480348044805480648074808480948104811481248134814481548164817481848194820482148224823482448254826482748284829483048314832483348344835483648374838483948404841484248434844484548464847484848494850485148524853485448554856485748584859486048614862486348644865486648674868486948704871487248734874487548764877487848794880488148824883488448854886488748884889489048914892489348944895489648974898489949004901490249034904490549064907490849094910491149124913491449154916491749184919492049214922492349244925492649274928492949304931493249334934493549364937493849394940494149424943494449454946494749484949495049514952495349544955495649574958495949604961496249634964496549664967496849694970497149724973497449754976497749784979498049814982498349844985498649874988498949904991499249934994499549964997499849995000500150025003500450055006500750085009501050115012501350145015501650175018501950205021502250235024502550265027502850295030503150325033503450355036503750385039504050415042504350445045504650475048504950505051505250535054505550565057505850595060506150625063506450655066506750685069507050715072507350745075507650775078507950805081508250835084508550865087508850895090509150925093509450955096509750985099510051015102510351045105510651075108510951105111511251135114511551165117511851195120512151225123512451255126512751285129513051315132513351345135513651375138513951405141514251435144514551465147514851495150515151525153515451555156515751585159516051615162516351645165516651675168516951705171517251735174517551765177517851795180518151825183518451855186518751885189519051915192519351945195519651975198519952005201520252035204520552065207520852095210521152125213521452155216521752185219522052215222522352245225522652275228522952305231523252335234523552365237523852395240524152425243524452455246524752485249525052515252525352545255525652575258525952605261526252635264526552665267526852695270527152725273527452755276527752785279528052815282528352845285528652875288528952905291529252935294529552965297529852995300530153025303530453055306530753085309531053115312531353145315531653175318531953205321532253235324532553265327532853295330533153325333533453355336533753385339534053415342534353445345534653475348534953505351535253535354535553565357535853595360536153625363536453655366536753685369537053715372537353745375537653775378537953805381538253835384538553865387538853895390539153925393539453955396539753985399540054015402540354045405540654075408540954105411541254135414541554165417541854195420542154225423542454255426542754285429543054315432543354345435543654375438543954405441544254435444544554465447544854495450545154525453545454555456545754585459546054615462546354645465546654675468546954705471547254735474547554765477547854795480548154825483548454855486548754885489549054915492549354945495549654975498549955005501550255035504550555065507550855095510551155125513551455155516551755185519552055215522552355245525552655275528552955305531553255335534553555365537553855395540554155425543554455455546554755485549555055515552555355545555555655575558555955605561556255635564556555665567556855695570557155725573557455755576557755785579558055815582558355845585558655875588558955905591559255935594559555965597559855995600560156025603560456055606560756085609561056115612561356145615561656175618561956205621562256235624562556265627562856295630563156325633563456355636563756385639564056415642564356445645564656475648564956505651565256535654565556565657565856595660566156625663566456655666566756685669567056715672567356745675567656775678567956805681568256835684568556865687568856895690569156925693569456955696569756985699570057015702570357045705570657075708570957105711571257135714571557165717571857195720572157225723572457255726572757285729573057315732573357345735573657375738573957405741574257435744574557465747574857495750575157525753575457555756575757585759576057615762576357645765576657675768576957705771577257735774577557765777577857795780578157825783578457855786578757885789579057915792579357945795579657975798579958005801580258035804580558065807580858095810581158125813581458155816581758185819582058215822582358245825582658275828582958305831583258335834583558365837583858395840584158425843584458455846584758485849585058515852585358545855585658575858585958605861586258635864586558665867586858695870587158725873587458755876587758785879588058815882588358845885588658875888588958905891589258935894589558965897589858995900590159025903590459055906590759085909591059115912591359145915591659175918591959205921592259235924592559265927592859295930593159325933593459355936593759385939594059415942594359445945594659475948594959505951595259535954595559565957595859595960596159625963596459655966596759685969597059715972597359745975597659775978597959805981598259835984598559865987598859895990599159925993599459955996599759985999600060016002600360046005600660076008600960106011601260136014601560166017601860196020602160226023602460256026602760286029603060316032603360346035603660376038603960406041604260436044604560466047604860496050605160526053605460556056605760586059606060616062606360646065606660676068606960706071607260736074607560766077607860796080608160826083608460856086608760886089609060916092609360946095609660976098609961006101610261036104610561066107610861096110611161126113611461156116611761186119612061216122612361246125612661276128612961306131613261336134613561366137613861396140614161426143614461456146614761486149615061516152615361546155615661576158615961606161616261636164616561666167616861696170617161726173617461756176617761786179618061816182618361846185618661876188618961906191619261936194619561966197619861996200620162026203620462056206620762086209621062116212621362146215621662176218621962206221622262236224622562266227622862296230623162326233623462356236623762386239624062416242624362446245624662476248624962506251625262536254625562566257625862596260626162626263626462656266626762686269627062716272627362746275627662776278627962806281628262836284628562866287628862896290629162926293629462956296629762986299630063016302630363046305630663076308630963106311631263136314631563166317631863196320632163226323632463256326632763286329633063316332633363346335633663376338633963406341634263436344634563466347634863496350635163526353635463556356635763586359636063616362636363646365636663676368636963706371637263736374637563766377637863796380638163826383638463856386638763886389639063916392639363946395639663976398639964006401640264036404640564066407640864096410641164126413641464156416641764186419642064216422642364246425642664276428642964306431643264336434643564366437643864396440644164426443644464456446644764486449645064516452645364546455645664576458645964606461646264636464646564666467646864696470647164726473647464756476647764786479648064816482648364846485648664876488648964906491649264936494649564966497649864996500650165026503650465056506650765086509651065116512651365146515651665176518651965206521652265236524652565266527652865296530653165326533653465356536653765386539654065416542654365446545654665476548654965506551655265536554655565566557655865596560656165626563656465656566656765686569657065716572657365746575657665776578657965806581658265836584658565866587658865896590659165926593659465956596659765986599660066016602660366046605660666076608660966106611661266136614661566166617661866196620662166226623662466256626662766286629663066316632663366346635663666376638663966406641664266436644664566466647664866496650665166526653665466556656665766586659666066616662666366646665666666676668666966706671667266736674667566766677667866796680668166826683668466856686668766886689669066916692669366946695669666976698669967006701670267036704670567066707670867096710671167126713671467156716671767186719672067216722672367246725672667276728672967306731673267336734673567366737673867396740674167426743674467456746674767486749675067516752675367546755675667576758675967606761676267636764676567666767676867696770677167726773677467756776677767786779678067816782678367846785678667876788678967906791679267936794679567966797679867996800680168026803680468056806680768086809681068116812681368146815681668176818681968206821682268236824682568266827682868296830683168326833683468356836683768386839684068416842684368446845684668476848684968506851685268536854685568566857685868596860686168626863686468656866686768686869687068716872687368746875687668776878687968806881688268836884688568866887688868896890689168926893689468956896689768986899690069016902690369046905690669076908690969106911691269136914691569166917691869196920692169226923692469256926692769286929693069316932693369346935693669376938693969406941694269436944694569466947694869496950695169526953695469556956695769586959696069616962696369646965696669676968696969706971697269736974697569766977697869796980698169826983698469856986698769886989699069916992699369946995699669976998699970007001700270037004700570067007700870097010701170127013701470157016701770187019702070217022702370247025702670277028702970307031703270337034703570367037703870397040704170427043704470457046704770487049705070517052705370547055705670577058705970607061706270637064706570667067706870697070707170727073707470757076707770787079708070817082708370847085708670877088708970907091709270937094709570967097709870997100710171027103710471057106710771087109711071117112711371147115711671177118711971207121712271237124712571267127712871297130713171327133713471357136713771387139714071417142714371447145714671477148714971507151715271537154715571567157715871597160716171627163716471657166716771687169717071717172717371747175717671777178717971807181718271837184718571867187718871897190719171927193719471957196719771987199720072017202720372047205720672077208720972107211721272137214721572167217721872197220722172227223722472257226722772287229723072317232723372347235723672377238723972407241724272437244724572467247724872497250725172527253725472557256725772587259726072617262726372647265726672677268726972707271727272737274727572767277727872797280728172827283728472857286728772887289729072917292729372947295729672977298729973007301730273037304730573067307730873097310731173127313731473157316731773187319732073217322732373247325732673277328732973307331733273337334733573367337733873397340734173427343734473457346734773487349735073517352735373547355735673577358735973607361736273637364736573667367736873697370737173727373737473757376737773787379738073817382738373847385738673877388738973907391739273937394739573967397739873997400740174027403740474057406740774087409741074117412741374147415741674177418741974207421742274237424742574267427742874297430743174327433743474357436743774387439744074417442744374447445744674477448744974507451745274537454745574567457745874597460746174627463746474657466746774687469747074717472747374747475747674777478747974807481748274837484748574867487748874897490749174927493749474957496749774987499750075017502750375047505750675077508750975107511751275137514751575167517751875197520752175227523752475257526752775287529753075317532753375347535753675377538753975407541754275437544754575467547754875497550755175527553755475557556755775587559756075617562756375647565756675677568756975707571757275737574757575767577757875797580758175827583758475857586758775887589759075917592759375947595759675977598759976007601760276037604760576067607760876097610761176127613761476157616761776187619762076217622762376247625762676277628762976307631763276337634763576367637763876397640764176427643764476457646764776487649765076517652765376547655765676577658765976607661766276637664766576667667766876697670767176727673767476757676767776787679768076817682768376847685768676877688768976907691769276937694769576967697769876997700770177027703770477057706770777087709771077117712771377147715771677177718771977207721772277237724772577267727772877297730773177327733773477357736773777387739774077417742774377447745774677477748774977507751775277537754775577567757775877597760776177627763776477657766776777687769777077717772777377747775777677777778777977807781778277837784778577867787778877897790779177927793779477957796779777987799780078017802780378047805780678077808780978107811781278137814781578167817781878197820782178227823782478257826782778287829783078317832783378347835783678377838783978407841784278437844784578467847784878497850785178527853785478557856785778587859786078617862786378647865786678677868786978707871787278737874787578767877787878797880788178827883788478857886788778887889789078917892789378947895789678977898789979007901790279037904790579067907790879097910791179127913791479157916791779187919792079217922792379247925792679277928792979307931793279337934793579367937793879397940794179427943794479457946794779487949795079517952795379547955795679577958795979607961796279637964796579667967796879697970797179727973797479757976797779787979798079817982798379847985798679877988798979907991799279937994799579967997799879998000800180028003800480058006800780088009801080118012801380148015801680178018801980208021802280238024802580268027802880298030803180328033803480358036803780388039804080418042804380448045804680478048804980508051805280538054805580568057805880598060806180628063806480658066806780688069807080718072807380748075807680778078807980808081808280838084808580868087808880898090809180928093809480958096809780988099810081018102810381048105810681078108810981108111811281138114811581168117811881198120812181228123812481258126812781288129813081318132813381348135813681378138813981408141814281438144814581468147814881498150815181528153815481558156815781588159816081618162816381648165816681678168816981708171817281738174817581768177817881798180818181828183818481858186818781888189819081918192819381948195819681978198819982008201820282038204820582068207820882098210821182128213821482158216821782188219822082218222822382248225822682278228822982308231823282338234823582368237823882398240824182428243824482458246824782488249825082518252825382548255825682578258825982608261826282638264826582668267826882698270827182728273827482758276827782788279828082818282828382848285828682878288828982908291829282938294829582968297829882998300830183028303830483058306830783088309831083118312831383148315831683178318831983208321832283238324832583268327832883298330833183328333833483358336833783388339834083418342834383448345834683478348834983508351835283538354835583568357835883598360836183628363836483658366836783688369837083718372837383748375837683778378837983808381838283838384838583868387838883898390839183928393839483958396839783988399840084018402840384048405840684078408840984108411841284138414841584168417841884198420842184228423842484258426842784288429843084318432843384348435843684378438843984408441844284438444844584468447844884498450845184528453845484558456845784588459846084618462846384648465846684678468846984708471847284738474847584768477847884798480848184828483848484858486848784888489849084918492849384948495849684978498849985008501850285038504850585068507850885098510851185128513851485158516851785188519852085218522852385248525852685278528852985308531853285338534853585368537853885398540854185428543854485458546854785488549855085518552855385548555855685578558855985608561856285638564856585668567856885698570857185728573857485758576857785788579858085818582858385848585858685878588858985908591859285938594859585968597859885998600860186028603860486058606860786088609861086118612861386148615861686178618861986208621862286238624862586268627862886298630863186328633863486358636863786388639864086418642864386448645864686478648864986508651865286538654865586568657865886598660866186628663866486658666866786688669867086718672867386748675867686778678867986808681868286838684868586868687868886898690869186928693869486958696869786988699870087018702870387048705870687078708870987108711871287138714871587168717871887198720872187228723872487258726872787288729873087318732873387348735873687378738873987408741874287438744874587468747874887498750875187528753875487558756875787588759876087618762876387648765876687678768876987708771877287738774877587768777877887798780878187828783878487858786878787888789879087918792879387948795879687978798879988008801880288038804880588068807880888098810881188128813881488158816881788188819882088218822882388248825882688278828882988308831883288338834883588368837883888398840884188428843884488458846884788488849885088518852885388548855885688578858885988608861886288638864886588668867886888698870887188728873887488758876887788788879888088818882888388848885888688878888888988908891889288938894889588968897889888998900890189028903890489058906890789088909891089118912891389148915891689178918891989208921892289238924892589268927892889298930893189328933893489358936893789388939894089418942894389448945894689478948894989508951895289538954895589568957895889598960896189628963896489658966896789688969897089718972897389748975897689778978897989808981898289838984898589868987898889898990899189928993899489958996899789988999900090019002900390049005900690079008900990109011901290139014901590169017901890199020902190229023902490259026902790289029903090319032903390349035903690379038903990409041904290439044904590469047904890499050905190529053905490559056905790589059906090619062906390649065906690679068906990709071907290739074907590769077907890799080908190829083908490859086908790889089909090919092909390949095909690979098909991009101910291039104910591069107910891099110911191129113911491159116911791189119912091219122912391249125912691279128912991309131913291339134913591369137913891399140914191429143914491459146914791489149915091519152915391549155915691579158915991609161916291639164916591669167916891699170917191729173917491759176917791789179918091819182918391849185918691879188918991909191919291939194919591969197919891999200920192029203920492059206920792089209921092119212921392149215921692179218921992209221922292239224922592269227922892299230923192329233923492359236923792389239924092419242924392449245924692479248924992509251925292539254925592569257925892599260926192629263926492659266926792689269927092719272927392749275927692779278927992809281928292839284928592869287928892899290929192929293929492959296929792989299930093019302930393049305930693079308930993109311931293139314931593169317931893199320932193229323932493259326932793289329933093319332933393349335933693379338933993409341934293439344934593469347934893499350935193529353935493559356935793589359936093619362936393649365936693679368936993709371937293739374937593769377937893799380938193829383938493859386938793889389939093919392939393949395939693979398939994009401940294039404940594069407940894099410941194129413941494159416941794189419942094219422942394249425942694279428942994309431943294339434943594369437943894399440944194429443944494459446944794489449945094519452945394549455945694579458945994609461946294639464946594669467946894699470947194729473947494759476947794789479948094819482948394849485948694879488948994909491949294939494949594969497949894999500950195029503950495059506950795089509951095119512951395149515951695179518951995209521952295239524952595269527952895299530953195329533953495359536953795389539954095419542954395449545954695479548954995509551955295539554955595569557955895599560956195629563956495659566956795689569957095719572957395749575957695779578957995809581958295839584958595869587958895899590959195929593959495959596959795989599960096019602960396049605960696079608960996109611961296139614961596169617961896199620962196229623962496259626962796289629963096319632963396349635963696379638963996409641964296439644964596469647964896499650965196529653965496559656965796589659966096619662966396649665966696679668966996709671967296739674967596769677967896799680968196829683968496859686968796889689969096919692969396949695969696979698969997009701970297039704970597069707970897099710971197129713971497159716971797189719972097219722972397249725972697279728972997309731973297339734973597369737973897399740974197429743974497459746974797489749975097519752975397549755975697579758975997609761976297639764976597669767976897699770977197729773977497759776977797789779978097819782978397849785978697879788978997909791979297939794979597969797979897999800980198029803980498059806980798089809981098119812981398149815981698179818981998209821982298239824982598269827982898299830983198329833983498359836983798389839984098419842984398449845984698479848984998509851985298539854985598569857985898599860986198629863986498659866986798689869987098719872987398749875987698779878987998809881988298839884988598869887988898899890989198929893989498959896989798989899990099019902990399049905990699079908990999109911991299139914991599169917991899199920992199229923992499259926992799289929993099319932993399349935993699379938993999409941994299439944994599469947994899499950995199529953995499559956995799589959996099619962996399649965996699679968996999709971997299739974997599769977997899799980998199829983998499859986998799889989999099919992999399949995999699979998999910000100011000210003100041000510006100071000810009100101001110012100131001410015100161001710018100191002010021100221002310024100251002610027100281002910030100311003210033100341003510036100371003810039100401004110042100431004410045100461004710048100491005010051100521005310054100551005610057100581005910060100611006210063100641006510066100671006810069100701007110072100731007410075100761007710078100791008010081100821008310084100851008610087100881008910090100911009210093100941009510096100971009810099101001010110102101031010410105101061010710108101091011010111101121011310114101151011610117101181011910120101211012210123101241012510126101271012810129101301013110132101331013410135101361013710138101391014010141101421014310144101451014610147101481014910150101511015210153101541015510156101571015810159101601016110162101631016410165101661016710168101691017010171101721017310174101751017610177101781017910180101811018210183101841018510186101871018810189101901019110192101931019410195101961019710198101991020010201102021020310204102051020610207102081020910210102111021210213102141021510216102171021810219102201022110222102231022410225102261022710228102291023010231102321023310234102351023610237102381023910240102411024210243102441024510246102471024810249102501025110252102531025410255102561025710258102591026010261102621026310264102651026610267102681026910270102711027210273102741027510276102771027810279102801028110282102831028410285102861028710288102891029010291102921029310294102951029610297102981029910300103011030210303103041030510306103071030810309103101031110312103131031410315103161031710318103191032010321103221032310324103251032610327103281032910330103311033210333103341033510336103371033810339103401034110342103431034410345103461034710348103491035010351103521035310354103551035610357103581035910360103611036210363103641036510366103671036810369103701037110372103731037410375103761037710378103791038010381103821038310384103851038610387103881038910390103911039210393103941039510396103971039810399104001040110402104031040410405104061040710408104091041010411104121041310414104151041610417104181041910420104211042210423104241042510426104271042810429104301043110432104331043410435104361043710438104391044010441104421044310444104451044610447104481044910450104511045210453104541045510456104571045810459104601046110462104631046410465104661046710468104691047010471104721047310474104751047610477104781047910480104811048210483104841048510486104871048810489104901049110492104931049410495104961049710498104991050010501105021050310504105051050610507105081050910510105111051210513105141051510516105171051810519105201052110522105231052410525105261052710528105291053010531105321053310534105351053610537105381053910540105411054210543105441054510546105471054810549105501055110552105531055410555105561055710558105591056010561105621056310564105651056610567105681056910570105711057210573105741057510576105771057810579105801058110582105831058410585105861058710588105891059010591105921059310594105951059610597105981059910600106011060210603106041060510606106071060810609106101061110612106131061410615106161061710618106191062010621106221062310624106251062610627106281062910630106311063210633106341063510636106371063810639106401064110642106431064410645106461064710648106491065010651106521065310654106551065610657106581065910660106611066210663106641066510666106671066810669106701067110672106731067410675106761067710678106791068010681106821068310684106851068610687106881068910690106911069210693106941069510696106971069810699107001070110702107031070410705107061070710708107091071010711107121071310714107151071610717107181071910720107211072210723107241072510726107271072810729107301073110732107331073410735107361073710738107391074010741107421074310744107451074610747107481074910750107511075210753107541075510756107571075810759107601076110762107631076410765107661076710768107691077010771107721077310774107751077610777107781077910780107811078210783107841078510786107871078810789107901079110792107931079410795107961079710798107991080010801108021080310804108051080610807108081080910810108111081210813108141081510816108171081810819108201082110822108231082410825108261082710828108291083010831108321083310834108351083610837108381083910840108411084210843108441084510846108471084810849108501085110852108531085410855108561085710858108591086010861108621086310864108651086610867108681086910870108711087210873108741087510876108771087810879108801088110882108831088410885108861088710888108891089010891108921089310894108951089610897108981089910900109011090210903109041090510906109071090810909109101091110912109131091410915109161091710918109191092010921109221092310924109251092610927109281092910930109311093210933109341093510936109371093810939109401094110942109431094410945109461094710948109491095010951109521095310954109551095610957109581095910960109611096210963109641096510966109671096810969109701097110972109731097410975109761097710978109791098010981109821098310984109851098610987109881098910990109911099210993109941099510996109971099810999110001100111002110031100411005110061100711008110091101011011110121101311014110151101611017110181101911020110211102211023110241102511026110271102811029110301103111032110331103411035110361103711038110391104011041110421104311044110451104611047110481104911050110511105211053110541105511056110571105811059110601106111062110631106411065110661106711068110691107011071110721107311074110751107611077110781107911080110811108211083110841108511086110871108811089110901109111092110931109411095110961109711098110991110011101111021110311104111051110611107111081110911110111111111211113111141111511116111171111811119111201112111122111231112411125111261112711128111291113011131111321113311134111351113611137111381113911140111411114211143111441114511146111471114811149111501115111152111531115411155111561115711158111591116011161111621116311164111651116611167111681116911170111711117211173111741117511176111771117811179111801118111182111831118411185111861118711188111891119011191111921119311194111951119611197111981119911200112011120211203112041120511206112071120811209112101121111212112131121411215112161121711218112191122011221112221122311224112251122611227112281122911230112311123211233112341123511236112371123811239112401124111242112431124411245112461124711248112491125011251112521125311254112551125611257112581125911260112611126211263112641126511266112671126811269112701127111272112731127411275112761127711278112791128011281112821128311284112851128611287112881128911290112911129211293112941129511296112971129811299113001130111302113031130411305113061130711308113091131011311113121131311314113151131611317113181131911320113211132211323113241132511326113271132811329113301133111332113331133411335113361133711338113391134011341113421134311344113451134611347113481134911350113511135211353113541135511356113571135811359113601136111362113631136411365113661136711368113691137011371113721137311374113751137611377113781137911380113811138211383113841138511386113871138811389113901139111392113931139411395113961139711398113991140011401114021140311404114051140611407114081140911410114111141211413114141141511416114171141811419114201142111422114231142411425114261142711428114291143011431114321143311434114351143611437114381143911440114411144211443114441144511446114471144811449114501145111452114531145411455114561145711458114591146011461114621146311464114651146611467114681146911470114711147211473114741147511476114771147811479114801148111482114831148411485114861148711488114891149011491114921149311494114951149611497114981149911500115011150211503115041150511506115071150811509115101151111512115131151411515115161151711518115191152011521115221152311524115251152611527115281152911530115311153211533115341153511536115371153811539115401154111542115431154411545115461154711548115491155011551115521155311554115551155611557115581155911560115611156211563115641156511566115671156811569115701157111572115731157411575115761157711578115791158011581115821158311584115851158611587115881158911590115911159211593115941159511596115971159811599116001160111602116031160411605116061160711608116091161011611116121161311614116151161611617116181161911620116211162211623116241162511626116271162811629116301163111632116331163411635116361163711638116391164011641116421164311644116451164611647116481164911650116511165211653116541165511656116571165811659116601166111662116631166411665116661166711668116691167011671116721167311674116751167611677116781167911680116811168211683116841168511686116871168811689116901169111692116931169411695116961169711698116991170011701117021170311704117051170611707117081170911710117111171211713117141171511716117171171811719117201172111722117231172411725117261172711728117291173011731117321173311734117351173611737117381173911740117411174211743117441174511746117471174811749117501175111752117531175411755117561175711758117591176011761117621176311764117651176611767117681176911770117711177211773117741177511776117771177811779117801178111782117831178411785117861178711788117891179011791117921179311794117951179611797117981179911800118011180211803118041180511806118071180811809118101181111812118131181411815118161181711818118191182011821118221182311824118251182611827118281182911830118311183211833118341183511836118371183811839118401184111842118431184411845118461184711848118491185011851118521185311854118551185611857118581185911860118611186211863118641186511866118671186811869118701187111872118731187411875118761187711878118791188011881118821188311884118851188611887118881188911890118911189211893118941189511896118971189811899119001190111902119031190411905119061190711908119091191011911119121191311914119151191611917119181191911920119211192211923119241192511926119271192811929119301193111932119331193411935119361193711938119391194011941119421194311944119451194611947119481194911950119511195211953119541195511956119571195811959119601196111962119631196411965119661196711968119691197011971119721197311974119751197611977119781197911980119811198211983119841198511986119871198811989119901199111992119931199411995119961199711998119991200012001120021200312004120051200612007120081200912010120111201212013120141201512016120171201812019120201202112022120231202412025120261202712028120291203012031120321203312034120351203612037120381203912040120411204212043120441204512046120471204812049120501205112052120531205412055120561205712058120591206012061120621206312064120651206612067120681206912070120711207212073120741207512076120771207812079120801208112082120831208412085120861208712088120891209012091120921209312094120951209612097120981209912100121011210212103121041210512106121071210812109121101211112112121131211412115121161211712118121191212012121121221212312124121251212612127121281212912130121311213212133121341213512136121371213812139121401214112142121431214412145121461214712148121491215012151121521215312154121551215612157121581215912160121611216212163121641216512166121671216812169121701217112172121731217412175121761217712178121791218012181121821218312184121851218612187121881218912190121911219212193121941219512196121971219812199122001220112202122031220412205122061220712208122091221012211122121221312214122151221612217122181221912220122211222212223122241222512226122271222812229122301223112232122331223412235122361223712238122391224012241122421224312244122451224612247122481224912250122511225212253122541225512256122571225812259122601226112262122631226412265122661226712268122691227012271122721227312274122751227612277122781227912280122811228212283122841228512286122871228812289122901229112292122931229412295122961229712298122991230012301123021230312304123051230612307123081230912310123111231212313123141231512316123171231812319123201232112322123231232412325123261232712328123291233012331123321233312334123351233612337123381233912340123411234212343123441234512346123471234812349123501235112352123531235412355123561235712358123591236012361123621236312364123651236612367123681236912370123711237212373123741237512376123771237812379123801238112382123831238412385123861238712388123891239012391123921239312394123951239612397123981239912400124011240212403124041240512406124071240812409124101241112412124131241412415124161241712418124191242012421124221242312424124251242612427124281242912430124311243212433124341243512436124371243812439124401244112442124431244412445124461244712448124491245012451124521245312454124551245612457124581245912460124611246212463124641246512466124671246812469124701247112472124731247412475124761247712478124791248012481124821248312484124851248612487124881248912490124911249212493124941249512496124971249812499125001250112502125031250412505125061250712508125091251012511125121251312514125151251612517125181251912520125211252212523125241252512526125271252812529125301253112532125331253412535125361253712538125391254012541125421254312544125451254612547125481254912550125511255212553125541255512556125571255812559125601256112562125631256412565125661256712568125691257012571125721257312574125751257612577125781257912580125811258212583125841258512586125871258812589125901259112592125931259412595125961259712598125991260012601126021260312604126051260612607126081260912610126111261212613126141261512616126171261812619126201262112622126231262412625126261262712628126291263012631126321263312634126351263612637126381263912640126411264212643126441264512646126471264812649126501265112652126531265412655126561265712658126591266012661126621266312664126651266612667126681266912670126711267212673126741267512676126771267812679126801268112682126831268412685126861268712688126891269012691126921269312694126951269612697126981269912700127011270212703127041270512706127071270812709127101271112712127131271412715127161271712718127191272012721127221272312724127251272612727127281272912730127311273212733127341273512736127371273812739127401274112742127431274412745127461274712748127491275012751127521275312754127551275612757127581275912760127611276212763127641276512766127671276812769127701277112772127731277412775127761277712778127791278012781127821278312784127851278612787127881278912790127911279212793127941279512796127971279812799128001280112802128031280412805128061280712808128091281012811128121281312814128151281612817128181281912820128211282212823128241282512826128271282812829128301283112832128331283412835128361283712838128391284012841128421284312844128451284612847128481284912850128511285212853128541285512856128571285812859128601286112862128631286412865128661286712868128691287012871128721287312874128751287612877128781287912880128811288212883128841288512886128871288812889128901289112892128931289412895128961289712898128991290012901129021290312904129051290612907129081290912910129111291212913129141291512916129171291812919129201292112922129231292412925129261292712928129291293012931129321293312934129351293612937129381293912940129411294212943129441294512946129471294812949129501295112952129531295412955129561295712958129591296012961129621296312964129651296612967129681296912970129711297212973129741297512976129771297812979129801298112982129831298412985129861298712988129891299012991129921299312994129951299612997129981299913000130011300213003130041300513006130071300813009130101301113012130131301413015130161301713018130191302013021130221302313024130251302613027130281302913030130311303213033130341303513036130371303813039130401304113042130431304413045130461304713048130491305013051130521305313054130551305613057130581305913060130611306213063130641306513066130671306813069130701307113072130731307413075130761307713078130791308013081130821308313084130851308613087130881308913090130911309213093130941309513096130971309813099131001310113102131031310413105131061310713108131091311013111131121311313114131151311613117131181311913120131211312213123131241312513126131271312813129131301313113132131331313413135131361313713138131391314013141131421314313144131451314613147131481314913150131511315213153131541315513156131571315813159131601316113162131631316413165131661316713168131691317013171131721317313174131751317613177131781317913180131811318213183131841318513186131871318813189131901319113192131931319413195131961319713198131991320013201132021320313204132051320613207132081320913210132111321213213132141321513216132171321813219132201322113222132231322413225132261322713228132291323013231132321323313234132351323613237132381323913240132411324213243132441324513246132471324813249132501325113252132531325413255132561325713258132591326013261132621326313264132651326613267132681326913270132711327213273132741327513276132771327813279132801328113282132831328413285132861328713288132891329013291132921329313294132951329613297132981329913300133011330213303133041330513306133071330813309133101331113312133131331413315133161331713318133191332013321133221332313324133251332613327133281332913330133311333213333133341333513336133371333813339133401334113342133431334413345133461334713348133491335013351133521335313354133551335613357133581335913360133611336213363133641336513366133671336813369133701337113372133731337413375133761337713378133791338013381133821338313384133851338613387133881338913390133911339213393133941339513396133971339813399134001340113402134031340413405134061340713408134091341013411134121341313414134151341613417134181341913420134211342213423134241342513426134271342813429134301343113432134331343413435134361343713438134391344013441134421344313444134451344613447134481344913450134511345213453134541345513456134571345813459134601346113462134631346413465134661346713468134691347013471134721347313474134751347613477134781347913480134811348213483134841348513486134871348813489134901349113492134931349413495134961349713498134991350013501135021350313504135051350613507135081350913510135111351213513135141351513516135171351813519135201352113522135231352413525135261352713528135291353013531135321353313534135351353613537135381353913540135411354213543135441354513546135471354813549135501355113552135531355413555135561355713558135591356013561135621356313564135651356613567135681356913570135711357213573135741357513576135771357813579135801358113582135831358413585135861358713588135891359013591135921359313594135951359613597135981359913600136011360213603136041360513606136071360813609136101361113612136131361413615136161361713618136191362013621136221362313624136251362613627136281362913630136311363213633136341363513636136371363813639136401364113642136431364413645136461364713648136491365013651136521365313654136551365613657136581365913660136611366213663136641366513666136671366813669136701367113672136731367413675136761367713678136791368013681136821368313684136851368613687136881368913690136911369213693136941369513696136971369813699137001370113702137031370413705137061370713708137091371013711137121371313714137151371613717137181371913720137211372213723137241372513726137271372813729137301373113732137331373413735137361373713738137391374013741137421374313744137451374613747137481374913750137511375213753137541375513756137571375813759137601376113762137631376413765137661376713768137691377013771137721377313774137751377613777137781377913780137811378213783137841378513786137871378813789137901379113792137931379413795137961379713798137991380013801138021380313804138051380613807138081380913810138111381213813138141381513816138171381813819138201382113822138231382413825138261382713828138291383013831138321383313834138351383613837138381383913840138411384213843138441384513846138471384813849138501385113852138531385413855138561385713858138591386013861138621386313864138651386613867138681386913870138711387213873138741387513876138771387813879138801388113882138831388413885138861388713888138891389013891138921389313894138951389613897138981389913900139011390213903139041390513906139071390813909139101391113912139131391413915139161391713918139191392013921139221392313924139251392613927139281392913930139311393213933139341393513936139371393813939139401394113942139431394413945139461394713948139491395013951139521395313954139551395613957139581395913960139611396213963139641396513966139671396813969139701397113972139731397413975139761397713978139791398013981139821398313984139851398613987139881398913990139911399213993139941399513996139971399813999140001400114002140031400414005140061400714008140091401014011140121401314014140151401614017140181401914020140211402214023140241402514026140271402814029140301403114032140331403414035140361403714038140391404014041140421404314044140451404614047140481404914050140511405214053140541405514056140571405814059140601406114062140631406414065140661406714068140691407014071140721407314074140751407614077140781407914080140811408214083140841408514086140871408814089140901409114092140931409414095140961409714098140991410014101141021410314104141051410614107141081410914110141111411214113141141411514116141171411814119141201412114122141231412414125141261412714128141291413014131141321413314134141351413614137141381413914140141411414214143141441414514146141471414814149141501415114152141531415414155141561415714158141591416014161141621416314164141651416614167141681416914170141711417214173141741417514176141771417814179141801418114182141831418414185141861418714188141891419014191141921419314194141951419614197141981419914200142011420214203142041420514206142071420814209142101421114212142131421414215142161421714218142191422014221142221422314224142251422614227142281422914230142311423214233142341423514236142371423814239142401424114242142431424414245142461424714248142491425014251142521425314254142551425614257142581425914260142611426214263142641426514266142671426814269142701427114272142731427414275142761427714278142791428014281142821428314284142851428614287142881428914290142911429214293142941429514296142971429814299143001430114302143031430414305143061430714308143091431014311143121431314314143151431614317143181431914320143211432214323143241432514326143271432814329143301433114332143331433414335143361433714338143391434014341143421434314344143451434614347143481434914350143511435214353143541435514356143571435814359143601436114362143631436414365143661436714368143691437014371143721437314374143751437614377143781437914380143811438214383143841438514386143871438814389143901439114392143931439414395143961439714398143991440014401144021440314404144051440614407144081440914410144111441214413144141441514416144171441814419144201442114422144231442414425144261442714428144291443014431144321443314434144351443614437144381443914440144411444214443144441444514446144471444814449144501445114452144531445414455144561445714458144591446014461144621446314464144651446614467144681446914470144711447214473144741447514476144771447814479144801448114482144831448414485144861448714488144891449014491144921449314494144951449614497144981449914500145011450214503145041450514506145071450814509145101451114512145131451414515145161451714518145191452014521145221452314524145251452614527145281452914530145311453214533145341453514536145371453814539145401454114542145431454414545145461454714548145491455014551145521455314554145551455614557145581455914560145611456214563145641456514566145671456814569145701457114572145731457414575145761457714578145791458014581145821458314584145851458614587145881458914590145911459214593145941459514596145971459814599146001460114602146031460414605146061460714608146091461014611146121461314614146151461614617146181461914620146211462214623146241462514626146271462814629146301463114632146331463414635146361463714638146391464014641146421464314644146451464614647146481464914650146511465214653146541465514656146571465814659146601466114662146631466414665146661466714668146691467014671146721467314674146751467614677146781467914680146811468214683146841468514686146871468814689146901469114692146931469414695146961469714698146991470014701147021470314704147051470614707147081470914710147111471214713147141471514716147171471814719147201472114722147231472414725147261472714728147291473014731147321473314734147351473614737147381473914740147411474214743147441474514746147471474814749147501475114752147531475414755147561475714758147591476014761147621476314764147651476614767147681476914770147711477214773147741477514776147771477814779147801478114782147831478414785147861478714788147891479014791147921479314794147951479614797147981479914800148011480214803148041480514806148071480814809148101481114812148131481414815148161481714818148191482014821148221482314824148251482614827148281482914830148311483214833148341483514836148371483814839148401484114842148431484414845148461484714848148491485014851148521485314854148551485614857148581485914860148611486214863148641486514866148671486814869148701487114872148731487414875148761487714878148791488014881148821488314884148851488614887148881488914890148911489214893148941489514896148971489814899149001490114902149031490414905149061490714908149091491014911149121491314914149151491614917149181491914920149211492214923149241492514926149271492814929149301493114932149331493414935149361493714938149391494014941149421494314944149451494614947149481494914950149511495214953149541495514956149571495814959149601496114962149631496414965149661496714968149691497014971149721497314974149751497614977149781497914980149811498214983149841498514986149871498814989149901499114992149931499414995149961499714998149991500015001150021500315004150051500615007150081500915010150111501215013150141501515016150171501815019150201502115022150231502415025150261502715028150291503015031150321503315034150351503615037150381503915040150411504215043150441504515046150471504815049150501505115052150531505415055150561505715058150591506015061150621506315064150651506615067150681506915070150711507215073150741507515076150771507815079150801508115082150831508415085150861508715088150891509015091150921509315094150951509615097150981509915100151011510215103151041510515106151071510815109151101511115112151131511415115151161511715118151191512015121151221512315124151251512615127151281512915130151311513215133151341513515136151371513815139151401514115142151431514415145151461514715148151491515015151151521515315154151551515615157151581515915160151611516215163151641516515166151671516815169151701517115172151731517415175151761517715178151791518015181151821518315184151851518615187151881518915190151911519215193151941519515196151971519815199152001520115202152031520415205152061520715208152091521015211152121521315214152151521615217152181521915220152211522215223152241522515226152271522815229152301523115232152331523415235152361523715238152391524015241152421524315244152451524615247152481524915250152511525215253152541525515256152571525815259152601526115262152631526415265152661526715268152691527015271152721527315274152751527615277152781527915280152811528215283152841528515286152871528815289152901529115292152931529415295152961529715298152991530015301153021530315304153051530615307153081530915310153111531215313153141531515316153171531815319153201532115322153231532415325153261532715328153291533015331153321533315334153351533615337153381533915340153411534215343153441534515346153471534815349153501535115352153531535415355153561535715358153591536015361153621536315364153651536615367153681536915370153711537215373153741537515376153771537815379153801538115382153831538415385153861538715388153891539015391153921539315394153951539615397153981539915400154011540215403154041540515406154071540815409154101541115412154131541415415154161541715418154191542015421154221542315424154251542615427154281542915430154311543215433154341543515436154371543815439154401544115442154431544415445154461544715448154491545015451154521545315454154551545615457154581545915460154611546215463154641546515466154671546815469154701547115472154731547415475154761547715478154791548015481154821548315484154851548615487154881548915490154911549215493154941549515496154971549815499155001550115502155031550415505155061550715508155091551015511155121551315514155151551615517155181551915520155211552215523155241552515526155271552815529155301553115532155331553415535155361553715538155391554015541155421554315544155451554615547155481554915550155511555215553155541555515556155571555815559155601556115562155631556415565155661556715568155691557015571155721557315574155751557615577155781557915580155811558215583155841558515586155871558815589155901559115592155931559415595155961559715598155991560015601156021560315604156051560615607156081560915610156111561215613156141561515616156171561815619156201562115622156231562415625156261562715628156291563015631156321563315634156351563615637156381563915640156411564215643156441564515646156471564815649156501565115652156531565415655156561565715658156591566015661156621566315664156651566615667156681566915670156711567215673156741567515676156771567815679156801568115682156831568415685156861568715688156891569015691156921569315694156951569615697156981569915700157011570215703157041570515706157071570815709157101571115712157131571415715157161571715718157191572015721157221572315724157251572615727157281572915730157311573215733157341573515736157371573815739157401574115742157431574415745157461574715748157491575015751157521575315754157551575615757157581575915760157611576215763157641576515766157671576815769157701577115772157731577415775157761577715778157791578015781157821578315784157851578615787157881578915790157911579215793157941579515796157971579815799158001580115802158031580415805158061580715808158091581015811158121581315814158151581615817158181581915820158211582215823158241582515826158271582815829158301583115832158331583415835158361583715838158391584015841158421584315844158451584615847158481584915850158511585215853158541585515856158571585815859158601586115862158631586415865158661586715868158691587015871158721587315874158751587615877158781587915880158811588215883158841588515886158871588815889158901589115892158931589415895158961589715898158991590015901159021590315904159051590615907159081590915910159111591215913159141591515916159171591815919159201592115922159231592415925159261592715928159291593015931159321593315934159351593615937159381593915940159411594215943159441594515946159471594815949159501595115952159531595415955159561595715958159591596015961159621596315964159651596615967159681596915970159711597215973159741597515976159771597815979159801598115982159831598415985159861598715988159891599015991159921599315994159951599615997159981599916000160011600216003160041600516006160071600816009160101601116012160131601416015160161601716018160191602016021160221602316024160251602616027160281602916030160311603216033160341603516036160371603816039160401604116042160431604416045160461604716048160491605016051160521605316054160551605616057160581605916060160611606216063160641606516066160671606816069160701607116072160731607416075160761607716078160791608016081160821608316084160851608616087160881608916090160911609216093160941609516096160971609816099161001610116102161031610416105161061610716108161091611016111161121611316114161151611616117161181611916120161211612216123161241612516126161271612816129161301613116132161331613416135161361613716138161391614016141161421614316144161451614616147161481614916150161511615216153161541615516156161571615816159161601616116162161631616416165161661616716168161691617016171161721617316174161751617616177161781617916180161811618216183161841618516186161871618816189161901619116192161931619416195161961619716198161991620016201162021620316204162051620616207162081620916210162111621216213162141621516216162171621816219162201622116222162231622416225162261622716228162291623016231162321623316234162351623616237162381623916240162411624216243162441624516246162471624816249162501625116252162531625416255162561625716258162591626016261162621626316264162651626616267162681626916270162711627216273162741627516276162771627816279162801628116282162831628416285162861628716288162891629016291162921629316294162951629616297162981629916300163011630216303163041630516306163071630816309163101631116312163131631416315163161631716318163191632016321163221632316324163251632616327163281632916330163311633216333163341633516336163371633816339163401634116342163431634416345163461634716348163491635016351163521635316354163551635616357163581635916360163611636216363163641636516366163671636816369163701637116372163731637416375163761637716378163791638016381163821638316384163851638616387163881638916390163911639216393163941639516396163971639816399164001640116402164031640416405164061640716408164091641016411164121641316414164151641616417164181641916420164211642216423164241642516426164271642816429164301643116432164331643416435164361643716438164391644016441164421644316444164451644616447164481644916450164511645216453164541645516456164571645816459164601646116462164631646416465164661646716468164691647016471164721647316474164751647616477164781647916480164811648216483164841648516486164871648816489164901649116492164931649416495164961649716498164991650016501165021650316504165051650616507165081650916510165111651216513165141651516516165171651816519165201652116522165231652416525165261652716528165291653016531165321653316534165351653616537165381653916540165411654216543165441654516546165471654816549165501655116552165531655416555165561655716558165591656016561165621656316564165651656616567165681656916570165711657216573165741657516576165771657816579165801658116582165831658416585165861658716588165891659016591165921659316594165951659616597165981659916600166011660216603166041660516606166071660816609166101661116612166131661416615166161661716618166191662016621166221662316624166251662616627166281662916630166311663216633166341663516636166371663816639166401664116642166431664416645166461664716648166491665016651166521665316654166551665616657166581665916660166611666216663166641666516666166671666816669166701667116672166731667416675166761667716678166791668016681166821668316684166851668616687166881668916690166911669216693166941669516696166971669816699167001670116702167031670416705167061670716708167091671016711167121671316714167151671616717167181671916720167211672216723167241672516726167271672816729167301673116732167331673416735167361673716738167391674016741167421674316744167451674616747167481674916750167511675216753167541675516756167571675816759167601676116762167631676416765167661676716768167691677016771167721677316774167751677616777167781677916780167811678216783167841678516786167871678816789167901679116792167931679416795167961679716798167991680016801168021680316804168051680616807168081680916810168111681216813168141681516816168171681816819168201682116822168231682416825168261682716828168291683016831168321683316834168351683616837168381683916840168411684216843168441684516846168471684816849168501685116852168531685416855168561685716858168591686016861168621686316864168651686616867168681686916870168711687216873168741687516876168771687816879168801688116882168831688416885168861688716888168891689016891168921689316894168951689616897168981689916900169011690216903169041690516906169071690816909169101691116912169131691416915169161691716918169191692016921169221692316924169251692616927169281692916930169311693216933169341693516936169371693816939169401694116942169431694416945169461694716948169491695016951169521695316954169551695616957169581695916960169611696216963169641696516966169671696816969169701697116972169731697416975169761697716978169791698016981169821698316984169851698616987169881698916990169911699216993169941699516996169971699816999170001700117002170031700417005170061700717008170091701017011170121701317014170151701617017170181701917020170211702217023170241702517026170271702817029170301703117032170331703417035170361703717038170391704017041170421704317044170451704617047170481704917050170511705217053170541705517056170571705817059170601706117062170631706417065170661706717068170691707017071170721707317074170751707617077170781707917080170811708217083170841708517086170871708817089170901709117092170931709417095170961709717098170991710017101171021710317104171051710617107171081710917110171111711217113171141711517116171171711817119171201712117122171231712417125171261712717128171291713017131171321713317134171351713617137171381713917140171411714217143171441714517146171471714817149171501715117152171531715417155171561715717158171591716017161171621716317164171651716617167171681716917170171711717217173171741717517176171771717817179171801718117182171831718417185171861718717188171891719017191171921719317194171951719617197171981719917200172011720217203172041720517206172071720817209172101721117212172131721417215172161721717218172191722017221172221722317224172251722617227172281722917230172311723217233172341723517236172371723817239172401724117242172431724417245172461724717248172491725017251172521725317254172551725617257172581725917260172611726217263172641726517266172671726817269172701727117272172731727417275172761727717278172791728017281172821728317284172851728617287172881728917290172911729217293172941729517296172971729817299173001730117302173031730417305173061730717308173091731017311173121731317314173151731617317173181731917320173211732217323173241732517326173271732817329173301733117332173331733417335173361733717338173391734017341173421734317344173451734617347173481734917350173511735217353173541735517356173571735817359173601736117362173631736417365173661736717368173691737017371173721737317374173751737617377173781737917380173811738217383173841738517386173871738817389173901739117392173931739417395173961739717398173991740017401174021740317404174051740617407174081740917410174111741217413174141741517416174171741817419174201742117422174231742417425174261742717428174291743017431174321743317434174351743617437174381743917440174411744217443174441744517446174471744817449174501745117452174531745417455174561745717458174591746017461174621746317464174651746617467174681746917470174711747217473174741747517476174771747817479174801748117482174831748417485174861748717488174891749017491174921749317494174951749617497174981749917500175011750217503175041750517506175071750817509175101751117512175131751417515175161751717518175191752017521175221752317524175251752617527175281752917530175311753217533175341753517536175371753817539175401754117542175431754417545175461754717548175491755017551175521755317554175551755617557175581755917560175611756217563175641756517566175671756817569175701757117572175731757417575175761757717578175791758017581175821758317584175851758617587175881758917590175911759217593175941759517596175971759817599176001760117602176031760417605176061760717608176091761017611176121761317614176151761617617176181761917620176211762217623176241762517626176271762817629176301763117632176331763417635176361763717638176391764017641176421764317644176451764617647176481764917650176511765217653176541765517656176571765817659176601766117662176631766417665176661766717668176691767017671176721767317674176751767617677176781767917680176811768217683176841768517686176871768817689176901769117692176931769417695176961769717698176991770017701177021770317704177051770617707177081770917710177111771217713177141771517716177171771817719177201772117722177231772417725177261772717728177291773017731177321773317734177351773617737177381773917740177411774217743177441774517746177471774817749177501775117752177531775417755177561775717758177591776017761177621776317764177651776617767177681776917770177711777217773177741777517776177771777817779177801778117782177831778417785177861778717788177891779017791177921779317794177951779617797177981779917800178011780217803178041780517806178071780817809178101781117812178131781417815178161781717818178191782017821178221782317824178251782617827178281782917830178311783217833178341783517836178371783817839178401784117842178431784417845178461784717848178491785017851178521785317854178551785617857178581785917860178611786217863178641786517866178671786817869178701787117872178731787417875178761787717878178791788017881178821788317884178851788617887178881788917890178911789217893178941789517896178971789817899179001790117902179031790417905179061790717908179091791017911179121791317914179151791617917179181791917920179211792217923179241792517926179271792817929179301793117932179331793417935179361793717938179391794017941179421794317944179451794617947179481794917950179511795217953179541795517956179571795817959179601796117962179631796417965179661796717968179691797017971179721797317974179751797617977179781797917980179811798217983179841798517986179871798817989179901799117992179931799417995179961799717998179991800018001180021800318004180051800618007180081800918010180111801218013180141801518016180171801818019180201802118022180231802418025180261802718028180291803018031180321803318034180351803618037180381803918040180411804218043180441804518046180471804818049180501805118052180531805418055180561805718058180591806018061180621806318064180651806618067180681806918070180711807218073180741807518076180771807818079180801808118082180831808418085180861808718088180891809018091180921809318094180951809618097180981809918100181011810218103181041810518106181071810818109181101811118112181131811418115181161811718118181191812018121181221812318124181251812618127181281812918130181311813218133181341813518136181371813818139181401814118142181431814418145181461814718148181491815018151181521815318154181551815618157181581815918160181611816218163181641816518166181671816818169181701817118172181731817418175181761817718178181791818018181181821818318184181851818618187181881818918190181911819218193181941819518196181971819818199182001820118202182031820418205182061820718208182091821018211182121821318214182151821618217182181821918220182211822218223182241822518226182271822818229182301823118232182331823418235182361823718238182391824018241182421824318244182451824618247182481824918250182511825218253182541825518256182571825818259182601826118262182631826418265182661826718268182691827018271182721827318274182751827618277182781827918280182811828218283182841828518286182871828818289182901829118292182931829418295182961829718298182991830018301183021830318304183051830618307183081830918310183111831218313183141831518316183171831818319183201832118322183231832418325183261832718328183291833018331183321833318334183351833618337183381833918340183411834218343183441834518346183471834818349183501835118352183531835418355183561835718358183591836018361183621836318364183651836618367183681836918370183711837218373183741837518376183771837818379183801838118382183831838418385183861838718388183891839018391183921839318394183951839618397183981839918400184011840218403184041840518406184071840818409184101841118412184131841418415184161841718418184191842018421184221842318424184251842618427184281842918430184311843218433184341843518436184371843818439184401844118442184431844418445184461844718448184491845018451184521845318454184551845618457184581845918460184611846218463184641846518466184671846818469184701847118472184731847418475184761847718478184791848018481184821848318484184851848618487184881848918490184911849218493184941849518496184971849818499185001850118502185031850418505185061850718508185091851018511185121851318514185151851618517185181851918520185211852218523185241852518526185271852818529185301853118532185331853418535185361853718538185391854018541185421854318544185451854618547185481854918550185511855218553185541855518556185571855818559185601856118562185631856418565185661856718568185691857018571185721857318574185751857618577185781857918580185811858218583185841858518586185871858818589185901859118592185931859418595185961859718598185991860018601186021860318604186051860618607186081860918610186111861218613186141861518616186171861818619186201862118622186231862418625186261862718628186291863018631186321863318634186351863618637186381863918640186411864218643186441864518646186471864818649186501865118652186531865418655186561865718658186591866018661186621866318664186651866618667186681866918670186711867218673186741867518676186771867818679186801868118682186831868418685186861868718688186891869018691186921869318694186951869618697186981869918700187011870218703187041870518706187071870818709187101871118712187131871418715187161871718718187191872018721187221872318724187251872618727187281872918730187311873218733187341873518736187371873818739187401874118742187431874418745187461874718748187491875018751187521875318754187551875618757187581875918760187611876218763187641876518766187671876818769187701877118772187731877418775187761877718778187791878018781187821878318784187851878618787187881878918790187911879218793187941879518796187971879818799188001880118802188031880418805188061880718808188091881018811188121881318814188151881618817188181881918820188211882218823188241882518826188271882818829188301883118832188331883418835188361883718838188391884018841188421884318844188451884618847188481884918850188511885218853188541885518856188571885818859188601886118862188631886418865188661886718868188691887018871188721887318874188751887618877188781887918880188811888218883188841888518886188871888818889188901889118892188931889418895188961889718898188991890018901189021890318904189051890618907189081890918910189111891218913189141891518916189171891818919189201892118922189231892418925189261892718928189291893018931189321893318934189351893618937189381893918940189411894218943189441894518946189471894818949189501895118952189531895418955189561895718958189591896018961189621896318964189651896618967189681896918970189711897218973189741897518976189771897818979189801898118982189831898418985189861898718988189891899018991189921899318994189951899618997189981899919000190011900219003190041900519006190071900819009190101901119012190131901419015190161901719018190191902019021190221902319024190251902619027190281902919030190311903219033190341903519036190371903819039190401904119042190431904419045190461904719048190491905019051190521905319054190551905619057190581905919060190611906219063190641906519066190671906819069190701907119072190731907419075190761907719078190791908019081190821908319084190851908619087190881908919090190911909219093190941909519096190971909819099191001910119102191031910419105191061910719108191091911019111191121911319114191151911619117191181911919120191211912219123191241912519126191271912819129191301913119132191331913419135191361913719138191391914019141191421914319144191451914619147191481914919150191511915219153191541915519156191571915819159191601916119162191631916419165191661916719168191691917019171191721917319174191751917619177191781917919180191811918219183191841918519186191871918819189191901919119192191931919419195191961919719198191991920019201192021920319204192051920619207192081920919210192111921219213192141921519216192171921819219192201922119222192231922419225192261922719228192291923019231192321923319234192351923619237192381923919240192411924219243192441924519246192471924819249192501925119252192531925419255192561925719258192591926019261192621926319264192651926619267192681926919270192711927219273192741927519276192771927819279192801928119282192831928419285192861928719288192891929019291192921929319294192951929619297192981929919300193011930219303193041930519306193071930819309193101931119312193131931419315193161931719318193191932019321193221932319324193251932619327193281932919330193311933219333193341933519336193371933819339193401934119342193431934419345193461934719348193491935019351193521935319354193551935619357193581935919360193611936219363193641936519366193671936819369193701937119372193731937419375193761937719378193791938019381193821938319384193851938619387193881938919390193911939219393193941939519396193971939819399194001940119402194031940419405194061940719408194091941019411194121941319414194151941619417194181941919420194211942219423194241942519426194271942819429194301943119432194331943419435194361943719438194391944019441194421944319444194451944619447194481944919450194511945219453194541945519456194571945819459194601946119462194631946419465194661946719468194691947019471194721947319474194751947619477194781947919480194811948219483194841948519486194871948819489194901949119492194931949419495194961949719498194991950019501195021950319504195051950619507195081950919510195111951219513195141951519516195171951819519195201952119522195231952419525195261952719528195291953019531195321953319534195351953619537195381953919540195411954219543195441954519546195471954819549195501955119552195531955419555195561955719558195591956019561195621956319564195651956619567195681956919570195711957219573195741957519576195771957819579195801958119582195831958419585195861958719588195891959019591195921959319594195951959619597195981959919600196011960219603196041960519606196071960819609196101961119612196131961419615196161961719618196191962019621196221962319624196251962619627196281962919630196311963219633196341963519636196371963819639196401964119642196431964419645196461964719648196491965019651196521965319654196551965619657196581965919660196611966219663196641966519666196671966819669196701967119672196731967419675196761967719678196791968019681196821968319684196851968619687196881968919690196911969219693196941969519696196971969819699197001970119702197031970419705197061970719708197091971019711197121971319714197151971619717197181971919720197211972219723197241972519726197271972819729197301973119732197331973419735197361973719738197391974019741197421974319744197451974619747197481974919750197511975219753197541975519756197571975819759197601976119762197631976419765197661976719768197691977019771197721977319774197751977619777197781977919780197811978219783197841978519786197871978819789197901979119792197931979419795197961979719798197991980019801198021980319804198051980619807198081980919810198111981219813198141981519816198171981819819198201982119822198231982419825198261982719828198291983019831198321983319834198351983619837198381983919840198411984219843198441984519846198471984819849198501985119852198531985419855198561985719858198591986019861198621986319864198651986619867198681986919870198711987219873198741987519876198771987819879198801988119882198831988419885198861988719888198891989019891198921989319894198951989619897198981989919900199011990219903199041990519906199071990819909199101991119912199131991419915199161991719918199191992019921199221992319924199251992619927199281992919930199311993219933199341993519936199371993819939199401994119942199431994419945199461994719948199491995019951199521995319954199551995619957199581995919960199611996219963199641996519966199671996819969199701997119972199731997419975199761997719978199791998019981199821998319984199851998619987199881998919990199911999219993199941999519996199971999819999200002000120002200032000420005200062000720008200092001020011200122001320014200152001620017200182001920020200212002220023200242002520026200272002820029200302003120032200332003420035200362003720038200392004020041200422004320044200452004620047200482004920050200512005220053200542005520056200572005820059200602006120062200632006420065200662006720068200692007020071200722007320074200752007620077200782007920080200812008220083200842008520086200872008820089200902009120092200932009420095200962009720098200992010020101201022010320104201052010620107201082010920110201112011220113201142011520116201172011820119201202012120122201232012420125201262012720128201292013020131201322013320134201352013620137201382013920140201412014220143201442014520146201472014820149201502015120152201532015420155201562015720158201592016020161201622016320164201652016620167201682016920170201712017220173201742017520176201772017820179201802018120182201832018420185201862018720188201892019020191201922019320194201952019620197201982019920200202012020220203202042020520206202072020820209202102021120212202132021420215202162021720218202192022020221202222022320224202252022620227202282022920230202312023220233202342023520236202372023820239202402024120242202432024420245202462024720248202492025020251202522025320254202552025620257202582025920260202612026220263202642026520266202672026820269202702027120272202732027420275202762027720278202792028020281202822028320284202852028620287202882028920290202912029220293202942029520296202972029820299203002030120302203032030420305203062030720308203092031020311203122031320314203152031620317203182031920320203212032220323203242032520326203272032820329203302033120332203332033420335203362033720338203392034020341203422034320344203452034620347203482034920350203512035220353203542035520356203572035820359203602036120362203632036420365203662036720368203692037020371203722037320374203752037620377203782037920380203812038220383203842038520386203872038820389203902039120392203932039420395203962039720398203992040020401204022040320404204052040620407204082040920410204112041220413204142041520416204172041820419204202042120422204232042420425204262042720428204292043020431204322043320434204352043620437204382043920440204412044220443204442044520446204472044820449204502045120452204532045420455204562045720458204592046020461204622046320464204652046620467204682046920470204712047220473204742047520476204772047820479204802048120482204832048420485204862048720488204892049020491204922049320494204952049620497204982049920500205012050220503205042050520506205072050820509205102051120512205132051420515205162051720518205192052020521205222052320524205252052620527205282052920530205312053220533205342053520536205372053820539205402054120542205432054420545205462054720548205492055020551205522055320554205552055620557205582055920560205612056220563205642056520566205672056820569205702057120572205732057420575205762057720578205792058020581205822058320584205852058620587205882058920590205912059220593205942059520596205972059820599206002060120602206032060420605206062060720608206092061020611206122061320614206152061620617206182061920620206212062220623206242062520626206272062820629206302063120632206332063420635206362063720638206392064020641206422064320644206452064620647206482064920650206512065220653206542065520656206572065820659206602066120662206632066420665206662066720668206692067020671206722067320674206752067620677206782067920680206812068220683206842068520686206872068820689206902069120692206932069420695206962069720698206992070020701207022070320704207052070620707207082070920710207112071220713207142071520716207172071820719207202072120722207232072420725207262072720728207292073020731207322073320734207352073620737207382073920740207412074220743207442074520746207472074820749207502075120752207532075420755207562075720758207592076020761207622076320764207652076620767207682076920770207712077220773207742077520776207772077820779207802078120782207832078420785207862078720788207892079020791207922079320794207952079620797207982079920800208012080220803208042080520806208072080820809208102081120812208132081420815208162081720818208192082020821208222082320824208252082620827208282082920830208312083220833208342083520836208372083820839208402084120842208432084420845208462084720848208492085020851208522085320854208552085620857208582085920860208612086220863208642086520866208672086820869208702087120872208732087420875208762087720878208792088020881208822088320884208852088620887208882088920890208912089220893208942089520896208972089820899209002090120902209032090420905209062090720908209092091020911209122091320914209152091620917209182091920920209212092220923209242092520926209272092820929209302093120932209332093420935209362093720938209392094020941209422094320944209452094620947209482094920950209512095220953209542095520956209572095820959209602096120962209632096420965209662096720968209692097020971209722097320974209752097620977209782097920980209812098220983209842098520986209872098820989209902099120992209932099420995209962099720998209992100021001210022100321004210052100621007210082100921010210112101221013210142101521016210172101821019210202102121022210232102421025210262102721028210292103021031210322103321034210352103621037210382103921040210412104221043210442104521046210472104821049210502105121052210532105421055210562105721058210592106021061210622106321064210652106621067210682106921070210712107221073210742107521076210772107821079210802108121082210832108421085210862108721088210892109021091210922109321094210952109621097210982109921100211012110221103211042110521106211072110821109211102111121112211132111421115211162111721118211192112021121211222112321124211252112621127211282112921130211312113221133211342113521136211372113821139211402114121142211432114421145211462114721148211492115021151211522115321154211552115621157211582115921160211612116221163211642116521166211672116821169211702117121172211732117421175211762117721178211792118021181211822118321184211852118621187211882118921190211912119221193211942119521196211972119821199212002120121202212032120421205212062120721208212092121021211212122121321214212152121621217212182121921220212212122221223212242122521226212272122821229212302123121232212332123421235212362123721238212392124021241212422124321244212452124621247212482124921250212512125221253212542125521256212572125821259212602126121262212632126421265212662126721268212692127021271212722127321274212752127621277212782127921280212812128221283212842128521286212872128821289212902129121292212932129421295212962129721298212992130021301213022130321304213052130621307213082130921310213112131221313213142131521316213172131821319213202132121322213232132421325213262132721328213292133021331213322133321334213352133621337213382133921340213412134221343213442134521346213472134821349213502135121352213532135421355213562135721358213592136021361213622136321364213652136621367213682136921370213712137221373213742137521376213772137821379213802138121382213832138421385213862138721388213892139021391213922139321394213952139621397213982139921400214012140221403214042140521406214072140821409214102141121412214132141421415214162141721418214192142021421214222142321424214252142621427214282142921430214312143221433214342143521436214372143821439214402144121442214432144421445214462144721448214492145021451214522145321454214552145621457214582145921460214612146221463214642146521466214672146821469214702147121472214732147421475214762147721478214792148021481214822148321484214852148621487214882148921490214912149221493214942149521496214972149821499215002150121502215032150421505215062150721508215092151021511215122151321514215152151621517215182151921520215212152221523215242152521526215272152821529215302153121532215332153421535215362153721538215392154021541215422154321544215452154621547215482154921550215512155221553215542155521556215572155821559215602156121562215632156421565215662156721568215692157021571215722157321574215752157621577215782157921580215812158221583215842158521586215872158821589215902159121592215932159421595215962159721598215992160021601216022160321604216052160621607216082160921610216112161221613216142161521616216172161821619216202162121622216232162421625216262162721628216292163021631216322163321634216352163621637216382163921640216412164221643216442164521646216472164821649216502165121652216532165421655216562165721658216592166021661216622166321664216652166621667216682166921670216712167221673216742167521676216772167821679216802168121682216832168421685216862168721688216892169021691216922169321694216952169621697216982169921700217012170221703217042170521706217072170821709217102171121712217132171421715217162171721718217192172021721217222172321724217252172621727217282172921730217312173221733217342173521736217372173821739217402174121742217432174421745217462174721748217492175021751217522175321754217552175621757217582175921760217612176221763217642176521766217672176821769217702177121772217732177421775217762177721778217792178021781217822178321784217852178621787217882178921790217912179221793217942179521796217972179821799218002180121802218032180421805218062180721808218092181021811218122181321814218152181621817218182181921820218212182221823218242182521826218272182821829218302183121832218332183421835218362183721838218392184021841218422184321844218452184621847218482184921850218512185221853218542185521856218572185821859218602186121862218632186421865218662186721868218692187021871218722187321874218752187621877218782187921880218812188221883218842188521886218872188821889218902189121892218932189421895218962189721898218992190021901219022190321904219052190621907219082190921910219112191221913219142191521916219172191821919219202192121922219232192421925219262192721928219292193021931219322193321934219352193621937219382193921940219412194221943219442194521946219472194821949219502195121952219532195421955219562195721958219592196021961219622196321964219652196621967219682196921970219712197221973219742197521976219772197821979219802198121982219832198421985219862198721988219892199021991219922199321994219952199621997219982199922000220012200222003220042200522006220072200822009220102201122012220132201422015220162201722018220192202022021220222202322024220252202622027220282202922030220312203222033220342203522036220372203822039220402204122042220432204422045220462204722048220492205022051220522205322054220552205622057220582205922060220612206222063220642206522066220672206822069220702207122072220732207422075220762207722078220792208022081220822208322084220852208622087220882208922090220912209222093220942209522096220972209822099221002210122102221032210422105221062210722108221092211022111221122211322114221152211622117221182211922120221212212222123221242212522126221272212822129221302213122132221332213422135221362213722138221392214022141221422214322144221452214622147221482214922150221512215222153221542215522156221572215822159221602216122162221632216422165221662216722168221692217022171221722217322174221752217622177221782217922180221812218222183221842218522186221872218822189221902219122192221932219422195221962219722198221992220022201222022220322204222052220622207222082220922210222112221222213222142221522216222172221822219222202222122222222232222422225222262222722228222292223022231222322223322234222352223622237222382223922240222412224222243222442224522246222472224822249222502225122252222532225422255222562225722258222592226022261222622226322264222652226622267222682226922270222712227222273222742227522276222772227822279222802228122282222832228422285222862228722288222892229022291222922229322294222952229622297222982229922300223012230222303223042230522306223072230822309223102231122312223132231422315223162231722318223192232022321223222232322324223252232622327223282232922330223312233222333223342233522336223372233822339223402234122342223432234422345223462234722348223492235022351223522235322354223552235622357223582235922360223612236222363223642236522366223672236822369223702237122372223732237422375223762237722378223792238022381223822238322384223852238622387223882238922390223912239222393223942239522396223972239822399224002240122402224032240422405224062240722408224092241022411224122241322414224152241622417224182241922420224212242222423224242242522426224272242822429224302243122432224332243422435224362243722438224392244022441224422244322444224452244622447224482244922450224512245222453224542245522456224572245822459224602246122462224632246422465224662246722468224692247022471224722247322474224752247622477224782247922480224812248222483224842248522486224872248822489224902249122492224932249422495224962249722498224992250022501225022250322504225052250622507225082250922510225112251222513225142251522516225172251822519225202252122522225232252422525225262252722528225292253022531225322253322534225352253622537225382253922540225412254222543225442254522546225472254822549225502255122552225532255422555225562255722558225592256022561225622256322564225652256622567225682256922570225712257222573225742257522576225772257822579225802258122582225832258422585225862258722588225892259022591225922259322594225952259622597225982259922600226012260222603226042260522606226072260822609226102261122612226132261422615226162261722618226192262022621226222262322624226252262622627226282262922630226312263222633226342263522636226372263822639226402264122642226432264422645226462264722648226492265022651226522265322654226552265622657226582265922660226612266222663226642266522666226672266822669226702267122672226732267422675226762267722678226792268022681226822268322684226852268622687226882268922690226912269222693226942269522696226972269822699227002270122702227032270422705227062270722708227092271022711227122271322714227152271622717227182271922720227212272222723227242272522726227272272822729227302273122732227332273422735227362273722738227392274022741227422274322744227452274622747227482274922750227512275222753227542275522756227572275822759227602276122762227632276422765227662276722768227692277022771227722277322774227752277622777227782277922780227812278222783227842278522786227872278822789227902279122792227932279422795227962279722798227992280022801228022280322804228052280622807228082280922810228112281222813228142281522816228172281822819228202282122822228232282422825228262282722828228292283022831228322283322834228352283622837228382283922840228412284222843228442284522846228472284822849228502285122852228532285422855228562285722858228592286022861228622286322864228652286622867228682286922870228712287222873228742287522876228772287822879228802288122882228832288422885228862288722888228892289022891228922289322894228952289622897228982289922900229012290222903229042290522906229072290822909229102291122912229132291422915229162291722918229192292022921229222292322924229252292622927229282292922930229312293222933229342293522936229372293822939229402294122942229432294422945229462294722948229492295022951229522295322954229552295622957229582295922960229612296222963229642296522966229672296822969229702297122972229732297422975229762297722978229792298022981229822298322984229852298622987229882298922990229912299222993229942299522996229972299822999230002300123002230032300423005230062300723008230092301023011230122301323014230152301623017230182301923020230212302223023230242302523026230272302823029230302303123032230332303423035230362303723038230392304023041230422304323044230452304623047230482304923050230512305223053230542305523056230572305823059
  1. #define _CRT_SECURE_NO_DEPRECATE // Disables ridiculous "unsafe" warnings on Windows
  2. #define _USE_MATH_DEFINES // For M_PI on MSVC
  3. #include "ggml-impl.h"
  4. #include "ggml-quants.h"
  5. #include "ggml.h"
  6. #if defined(_MSC_VER) || defined(__MINGW32__)
  7. #include <malloc.h> // using malloc.h with MSC/MINGW
  8. #elif !defined(__FreeBSD__) && !defined(__NetBSD__) && !defined(__OpenBSD__)
  9. #include <alloca.h>
  10. #endif
  11. #include <assert.h>
  12. #include <errno.h>
  13. #include <time.h>
  14. #include <math.h>
  15. #include <stdlib.h>
  16. #include <string.h>
  17. #include <stdint.h>
  18. #include <inttypes.h>
  19. #include <stdio.h>
  20. #include <float.h>
  21. #include <limits.h>
  22. #include <stdarg.h>
  23. #include <signal.h>
  24. #if defined(__gnu_linux__)
  25. #include <syscall.h>
  26. #endif
  27. #ifdef GGML_USE_METAL
  28. #include <unistd.h>
  29. #endif
  30. #ifdef __ARM_FEATURE_MATMUL_INT8
  31. #undef GGML_USE_LLAMAFILE
  32. #endif
  33. #ifdef GGML_USE_LLAMAFILE
  34. #include "sgemm.h"
  35. #endif
  36. #if defined(_MSC_VER)
  37. // disable "possible loss of data" to avoid hundreds of casts
  38. // we should just be careful :)
  39. #pragma warning(disable: 4244 4267)
  40. // disable POSIX deprecation warnings
  41. // these functions are never going away, anyway
  42. #pragma warning(disable: 4996)
  43. #endif
  44. #if defined(_WIN32)
  45. #define WIN32_LEAN_AND_MEAN
  46. #ifndef NOMINMAX
  47. #define NOMINMAX
  48. #endif
  49. #include <windows.h>
  50. typedef volatile LONG atomic_int;
  51. typedef atomic_int atomic_bool;
  52. static void atomic_store(atomic_int * ptr, LONG val) {
  53. InterlockedExchange(ptr, val);
  54. }
  55. static LONG atomic_load(atomic_int * ptr) {
  56. return InterlockedCompareExchange(ptr, 0, 0);
  57. }
  58. static LONG atomic_fetch_add(atomic_int * ptr, LONG inc) {
  59. return InterlockedExchangeAdd(ptr, inc);
  60. }
  61. static LONG atomic_fetch_sub(atomic_int * ptr, LONG dec) {
  62. return atomic_fetch_add(ptr, -(dec));
  63. }
  64. typedef HANDLE pthread_t;
  65. typedef DWORD thread_ret_t;
  66. static int pthread_create(pthread_t * out, void * unused, thread_ret_t(*func)(void *), void * arg) {
  67. (void) unused;
  68. HANDLE handle = CreateThread(NULL, 0, (LPTHREAD_START_ROUTINE) func, arg, 0, NULL);
  69. if (handle == NULL)
  70. {
  71. return EAGAIN;
  72. }
  73. *out = handle;
  74. return 0;
  75. }
  76. static int pthread_join(pthread_t thread, void * unused) {
  77. (void) unused;
  78. int ret = (int) WaitForSingleObject(thread, INFINITE);
  79. CloseHandle(thread);
  80. return ret;
  81. }
  82. static int sched_yield (void) {
  83. Sleep (0);
  84. return 0;
  85. }
  86. #else
  87. #include <pthread.h>
  88. #include <stdatomic.h>
  89. typedef void * thread_ret_t;
  90. #include <sys/types.h>
  91. #include <sys/stat.h>
  92. #include <unistd.h>
  93. #endif
  94. #ifdef GGML_USE_CPU_HBM
  95. #include <hbwmalloc.h>
  96. #endif
  97. #if defined(__APPLE__)
  98. #include <TargetConditionals.h>
  99. #endif
  100. #if (defined(__linux__) || defined(__APPLE__) || defined(__FreeBSD__) || defined(__NetBSD__) || defined(__OpenBSD__)) && \
  101. (!defined(TARGET_OS_TV) && !defined(TARGET_OS_WATCH))
  102. #include <sys/wait.h>
  103. void ggml_print_backtrace(void) {
  104. /*
  105. #include <execinfo.h>
  106. #include <dlfcn.h>
  107. void * trace[100];
  108. int nptrs = backtrace(trace, sizeof(trace)/sizeof(trace[0]));
  109. backtrace_symbols_fd(trace, nptrs, STDERR_FILENO);
  110. */
  111. // backtrack_symbols does not show line numbers, use gdb instead
  112. char attach[32];
  113. snprintf(attach, sizeof(attach), "attach %d", getpid());
  114. int pid = fork();
  115. if (pid == 0) {
  116. execlp("gdb", "gdb", "--batch",
  117. "-ex", "set style enabled on",
  118. "-ex", attach,
  119. "-ex", "bt -frame-info source-and-location",
  120. "-ex", "detach",
  121. "-ex", "quit",
  122. (char *) NULL);
  123. } else {
  124. waitpid(pid, NULL, 0);
  125. }
  126. }
  127. #else
  128. void ggml_print_backtrace(void) {
  129. // platform not supported
  130. }
  131. #endif
  132. /*#define GGML_PERF*/
  133. #define GGML_DEBUG 0
  134. #define GGML_GELU_FP16
  135. #define GGML_GELU_QUICK_FP16
  136. #define GGML_SILU_FP16
  137. // #define GGML_CROSS_ENTROPY_EXP_FP16
  138. // #define GGML_FLASH_ATTN_EXP_FP16
  139. #define GGML_SOFT_MAX_UNROLL 4
  140. #define GGML_VEC_DOT_UNROLL 2
  141. #define GGML_VEC_MAD_UNROLL 32
  142. //
  143. // logging
  144. //
  145. #if (GGML_DEBUG >= 1)
  146. #define GGML_PRINT_DEBUG(...) printf(__VA_ARGS__)
  147. #else
  148. #define GGML_PRINT_DEBUG(...)
  149. #endif
  150. #if (GGML_DEBUG >= 5)
  151. #define GGML_PRINT_DEBUG_5(...) printf(__VA_ARGS__)
  152. #else
  153. #define GGML_PRINT_DEBUG_5(...)
  154. #endif
  155. #if (GGML_DEBUG >= 10)
  156. #define GGML_PRINT_DEBUG_10(...) printf(__VA_ARGS__)
  157. #else
  158. #define GGML_PRINT_DEBUG_10(...)
  159. #endif
  160. #define GGML_PRINT(...) printf(__VA_ARGS__)
  161. //
  162. // end of logging block
  163. //
  164. #ifdef GGML_USE_ACCELERATE
  165. // uncomment to use vDSP for soft max computation
  166. // note: not sure if it is actually faster
  167. //#define GGML_SOFT_MAX_ACCELERATE
  168. #endif
  169. #if defined(_MSC_VER) || defined(__MINGW32__)
  170. #define GGML_ALIGNED_MALLOC(size) _aligned_malloc(size, GGML_MEM_ALIGN)
  171. #define GGML_ALIGNED_FREE(ptr) _aligned_free(ptr)
  172. #else
  173. inline static void * ggml_aligned_malloc(size_t size) {
  174. if (size == 0) {
  175. GGML_PRINT("WARNING: Behavior may be unexpected when allocating 0 bytes for ggml_aligned_malloc!\n");
  176. return NULL;
  177. }
  178. void * aligned_memory = NULL;
  179. #ifdef GGML_USE_CPU_HBM
  180. int result = hbw_posix_memalign(&aligned_memory, 16, size);
  181. #elif GGML_USE_METAL
  182. int result = posix_memalign(&aligned_memory, sysconf(_SC_PAGESIZE), size);
  183. #else
  184. int result = posix_memalign(&aligned_memory, GGML_MEM_ALIGN, size);
  185. #endif
  186. if (result != 0) {
  187. // Handle allocation failure
  188. const char *error_desc = "unknown allocation error";
  189. switch (result) {
  190. case EINVAL:
  191. error_desc = "invalid alignment value";
  192. break;
  193. case ENOMEM:
  194. error_desc = "insufficient memory";
  195. break;
  196. }
  197. GGML_PRINT("%s: %s (attempted to allocate %6.2f MB)\n", __func__, error_desc, size/(1024.0*1024.0));
  198. GGML_ASSERT(false);
  199. return NULL;
  200. }
  201. return aligned_memory;
  202. }
  203. #define GGML_ALIGNED_MALLOC(size) ggml_aligned_malloc(size)
  204. #ifdef GGML_USE_CPU_HBM
  205. #define GGML_ALIGNED_FREE(ptr) if(NULL != ptr) hbw_free(ptr)
  206. #else
  207. #define GGML_ALIGNED_FREE(ptr) free(ptr)
  208. #endif
  209. #endif
  210. inline static void * ggml_malloc(size_t size) {
  211. if (size == 0) {
  212. GGML_PRINT("WARNING: Behavior may be unexpected when allocating 0 bytes for ggml_malloc!\n");
  213. return NULL;
  214. }
  215. void * result = malloc(size);
  216. if (result == NULL) {
  217. GGML_PRINT("%s: failed to allocate %6.2f MB\n", __func__, size/(1024.0*1024.0));
  218. GGML_ASSERT(false);
  219. }
  220. return result;
  221. }
  222. // calloc
  223. inline static void * ggml_calloc(size_t num, size_t size) {
  224. if (num == 0 || size == 0) {
  225. GGML_PRINT("WARNING: Behavior may be unexpected when allocating 0 bytes for ggml_calloc!\n");
  226. return NULL;
  227. }
  228. void * result = calloc(num, size);
  229. if (result == NULL) {
  230. GGML_PRINT("%s: failed to allocate %6.2f MB\n", __func__, size/(1024.0*1024.0));
  231. GGML_ASSERT(false);
  232. }
  233. return result;
  234. }
  235. #define GGML_MALLOC(size) ggml_malloc(size)
  236. #define GGML_CALLOC(num, size) ggml_calloc(num, size)
  237. #define GGML_FREE(ptr) free(ptr)
  238. #define UNUSED GGML_UNUSED
  239. #define SWAP(x, y, T) do { T SWAP = x; x = y; y = SWAP; } while (0)
  240. #if defined(GGML_USE_ACCELERATE)
  241. #include <Accelerate/Accelerate.h>
  242. #if defined(GGML_USE_CLBLAST) // allow usage of CLBlast alongside Accelerate functions
  243. #include "ggml-opencl.h"
  244. #endif
  245. #elif defined(GGML_USE_OPENBLAS)
  246. #if defined(GGML_BLAS_USE_MKL)
  247. #include <mkl.h>
  248. #else
  249. #include <cblas.h>
  250. #endif
  251. #elif defined(GGML_USE_CLBLAST)
  252. #include "ggml-opencl.h"
  253. #endif
  254. // floating point type used to accumulate sums
  255. typedef double ggml_float;
  256. #undef MIN
  257. #undef MAX
  258. #define MIN(a, b) ((a) < (b) ? (a) : (b))
  259. #define MAX(a, b) ((a) > (b) ? (a) : (b))
  260. //
  261. // global data
  262. //
  263. // precomputed gelu table for f16 (128 KB)
  264. static ggml_fp16_t ggml_table_gelu_f16[1 << 16];
  265. // precomputed quick gelu table for f16 (128 KB)
  266. static ggml_fp16_t ggml_table_gelu_quick_f16[1 << 16];
  267. // precomputed silu table for f16 (128 KB)
  268. static ggml_fp16_t ggml_table_silu_f16[1 << 16];
  269. // precomputed exp table for f16 (128 KB)
  270. static ggml_fp16_t ggml_table_exp_f16[1 << 16];
  271. // precomputed f32 table for f16 (256 KB) (ggml-impl.h)
  272. float ggml_table_f32_f16[1 << 16];
  273. GGML_CALL const char * ggml_status_to_string(enum ggml_status status) {
  274. switch (status) {
  275. case GGML_STATUS_ALLOC_FAILED: return "GGML status: error (failed to allocate memory)";
  276. case GGML_STATUS_FAILED: return "GGML status: error (operation failed)";
  277. case GGML_STATUS_SUCCESS: return "GGML status: success";
  278. case GGML_STATUS_ABORTED: return "GGML status: warning (operation aborted)";
  279. }
  280. return "GGML status: unknown";
  281. }
  282. float ggml_fp16_to_fp32(ggml_fp16_t x) {
  283. #define ggml_fp16_to_fp32 do_not_use__ggml_fp16_to_fp32__in_ggml
  284. return GGML_FP16_TO_FP32(x);
  285. }
  286. ggml_fp16_t ggml_fp32_to_fp16(float x) {
  287. #define ggml_fp32_to_fp16 do_not_use__ggml_fp32_to_fp16__in_ggml
  288. return GGML_FP32_TO_FP16(x);
  289. }
  290. float ggml_bf16_to_fp32(ggml_bf16_t x) {
  291. #define ggml_bf16_to_fp32 do_not_use__ggml_bf16_to_fp32__in_ggml
  292. return GGML_BF16_TO_FP32(x); // it just left shifts
  293. }
  294. ggml_bf16_t ggml_fp32_to_bf16(float x) {
  295. #define ggml_fp32_to_bf16 do_not_use__ggml_fp32_to_bf16__in_ggml
  296. return GGML_FP32_TO_BF16(x);
  297. }
  298. void ggml_fp16_to_fp32_row(const ggml_fp16_t * x, float * y, int64_t n) {
  299. for (int64_t i = 0; i < n; i++) {
  300. y[i] = GGML_FP16_TO_FP32(x[i]);
  301. }
  302. }
  303. void ggml_fp32_to_fp16_row(const float * x, ggml_fp16_t * y, int64_t n) {
  304. int64_t i = 0;
  305. #if defined(__F16C__)
  306. for (; i + 7 < n; i += 8) {
  307. __m256 x_vec = _mm256_loadu_ps(x + i);
  308. __m128i y_vec = _mm256_cvtps_ph(x_vec, _MM_FROUND_TO_NEAREST_INT);
  309. _mm_storeu_si128((__m128i *)(y + i), y_vec);
  310. }
  311. for(; i + 3 < n; i += 4) {
  312. __m128 x_vec = _mm_loadu_ps(x + i);
  313. __m128i y_vec = _mm_cvtps_ph(x_vec, _MM_FROUND_TO_NEAREST_INT);
  314. _mm_storel_epi64((__m128i *)(y + i), y_vec);
  315. }
  316. #endif
  317. for (; i < n; i++) {
  318. y[i] = GGML_FP32_TO_FP16(x[i]);
  319. }
  320. }
  321. void ggml_bf16_to_fp32_row(const ggml_bf16_t * x, float * y, int64_t n) {
  322. int64_t i = 0;
  323. #if defined(__AVX512F__)
  324. for (; i + 16 <= n; i += 16) {
  325. _mm512_storeu_ps(y + i,
  326. _mm512_castsi512_ps(
  327. _mm512_slli_epi32(
  328. _mm512_cvtepu16_epi32(
  329. _mm256_loadu_si256(
  330. (const __m256i *)(x + i))),
  331. 16)));
  332. }
  333. #elif defined(__AVX2__)
  334. for (; i + 8 <= n; i += 8) {
  335. _mm256_storeu_ps(y + i,
  336. _mm256_castsi256_ps(
  337. _mm256_slli_epi32(
  338. _mm256_cvtepu16_epi32(
  339. _mm_loadu_si128(
  340. (const __m128i *)(x + i))),
  341. 16)));
  342. }
  343. #endif
  344. for (; i < n; i++) {
  345. y[i] = GGML_BF16_TO_FP32(x[i]);
  346. }
  347. }
  348. void ggml_fp32_to_bf16_row(const float * x, ggml_bf16_t * y, int64_t n) {
  349. int i = 0;
  350. #if defined(__AVX512BF16__)
  351. for (; i + 32 <= n; i += 32) {
  352. _mm512_storeu_ps(
  353. (__m512 *)(y + i),
  354. (__m512)_mm512_cvtne2ps_pbh(_mm512_loadu_ps(x + i + 16),
  355. _mm512_loadu_ps(x + i)));
  356. }
  357. #endif
  358. for (; i < n; i++) {
  359. y[i] = GGML_FP32_TO_BF16(x[i]);
  360. }
  361. }
  362. bool ggml_guid_matches(ggml_guid_t guid_a, ggml_guid_t guid_b) {
  363. return memcmp(guid_a, guid_b, sizeof(ggml_guid)) == 0;
  364. }
  365. //
  366. // timing
  367. //
  368. #if defined(_MSC_VER) || defined(__MINGW32__)
  369. static int64_t timer_freq, timer_start;
  370. void ggml_time_init(void) {
  371. LARGE_INTEGER t;
  372. QueryPerformanceFrequency(&t);
  373. timer_freq = t.QuadPart;
  374. // The multiplication by 1000 or 1000000 below can cause an overflow if timer_freq
  375. // and the uptime is high enough.
  376. // We subtract the program start time to reduce the likelihood of that happening.
  377. QueryPerformanceCounter(&t);
  378. timer_start = t.QuadPart;
  379. }
  380. int64_t ggml_time_ms(void) {
  381. LARGE_INTEGER t;
  382. QueryPerformanceCounter(&t);
  383. return ((t.QuadPart-timer_start) * 1000) / timer_freq;
  384. }
  385. int64_t ggml_time_us(void) {
  386. LARGE_INTEGER t;
  387. QueryPerformanceCounter(&t);
  388. return ((t.QuadPart-timer_start) * 1000000) / timer_freq;
  389. }
  390. #else
  391. void ggml_time_init(void) {}
  392. int64_t ggml_time_ms(void) {
  393. struct timespec ts;
  394. clock_gettime(CLOCK_MONOTONIC, &ts);
  395. return (int64_t)ts.tv_sec*1000 + (int64_t)ts.tv_nsec/1000000;
  396. }
  397. int64_t ggml_time_us(void) {
  398. struct timespec ts;
  399. clock_gettime(CLOCK_MONOTONIC, &ts);
  400. return (int64_t)ts.tv_sec*1000000 + (int64_t)ts.tv_nsec/1000;
  401. }
  402. #endif
  403. int64_t ggml_cycles(void) {
  404. return clock();
  405. }
  406. int64_t ggml_cycles_per_ms(void) {
  407. return CLOCKS_PER_SEC/1000;
  408. }
  409. #ifdef GGML_PERF
  410. #define ggml_perf_time_ms() ggml_time_ms()
  411. #define ggml_perf_time_us() ggml_time_us()
  412. #define ggml_perf_cycles() ggml_cycles()
  413. #define ggml_perf_cycles_per_ms() ggml_cycles_per_ms()
  414. #else
  415. #define ggml_perf_time_ms() 0
  416. #define ggml_perf_time_us() 0
  417. #define ggml_perf_cycles() 0
  418. #define ggml_perf_cycles_per_ms() 0
  419. #endif
  420. //
  421. // cross-platform UTF-8 file paths
  422. //
  423. #ifdef _WIN32
  424. static wchar_t * ggml_mbstowcs(const char * mbs) {
  425. int wlen = MultiByteToWideChar(CP_UTF8, 0, mbs, -1, NULL, 0);
  426. if (!wlen) {
  427. errno = EINVAL;
  428. return NULL;
  429. }
  430. wchar_t * wbuf = GGML_MALLOC(wlen * sizeof(wchar_t));
  431. wlen = MultiByteToWideChar(CP_UTF8, 0, mbs, -1, wbuf, wlen);
  432. if (!wlen) {
  433. GGML_FREE(wbuf);
  434. errno = EINVAL;
  435. return NULL;
  436. }
  437. return wbuf;
  438. }
  439. #endif
  440. FILE * ggml_fopen(const char * fname, const char * mode) {
  441. #ifdef _WIN32
  442. FILE * file = NULL;
  443. // convert fname (UTF-8)
  444. wchar_t * wfname = ggml_mbstowcs(fname);
  445. if (wfname) {
  446. // convert mode (ANSI)
  447. wchar_t * wmode = GGML_MALLOC((strlen(mode) + 1) * sizeof(wchar_t));
  448. wchar_t * wmode_p = wmode;
  449. do {
  450. *wmode_p++ = (wchar_t)*mode;
  451. } while (*mode++);
  452. // open file
  453. file = _wfopen(wfname, wmode);
  454. GGML_FREE(wfname);
  455. GGML_FREE(wmode);
  456. }
  457. return file;
  458. #else
  459. return fopen(fname, mode);
  460. #endif
  461. }
  462. //
  463. // cache line
  464. //
  465. #if defined(__cpp_lib_hardware_interference_size)
  466. #define CACHE_LINE_SIZE hardware_destructive_interference_size
  467. #else
  468. #if defined(__POWER9_VECTOR__)
  469. #define CACHE_LINE_SIZE 128
  470. #else
  471. #define CACHE_LINE_SIZE 64
  472. #endif
  473. #endif
  474. static const size_t CACHE_LINE_SIZE_F32 = CACHE_LINE_SIZE/sizeof(float);
  475. static void ggml_vec_dot_f32(int n, float * restrict s, size_t bs, const float * restrict x, size_t bx, const float * restrict y, size_t by, int nrc);
  476. static void ggml_vec_dot_f16(int n, float * restrict s, size_t bs, ggml_fp16_t * restrict x, size_t bx, ggml_fp16_t * restrict y, size_t by, int nrc);
  477. static void ggml_vec_dot_bf16(int n, float * restrict s, size_t bs, ggml_bf16_t * restrict x, size_t bx, ggml_bf16_t * restrict y, size_t by, int nrc);
  478. static const ggml_type_traits_t type_traits[GGML_TYPE_COUNT] = {
  479. [GGML_TYPE_I8] = {
  480. .type_name = "i8",
  481. .blck_size = 1,
  482. .type_size = sizeof(int8_t),
  483. .is_quantized = false,
  484. },
  485. [GGML_TYPE_I16] = {
  486. .type_name = "i16",
  487. .blck_size = 1,
  488. .type_size = sizeof(int16_t),
  489. .is_quantized = false,
  490. },
  491. [GGML_TYPE_I32] = {
  492. .type_name = "i32",
  493. .blck_size = 1,
  494. .type_size = sizeof(int32_t),
  495. .is_quantized = false,
  496. },
  497. [GGML_TYPE_I64] = {
  498. .type_name = "i64",
  499. .blck_size = 1,
  500. .type_size = sizeof(int64_t),
  501. .is_quantized = false,
  502. },
  503. [GGML_TYPE_F64] = {
  504. .type_name = "f64",
  505. .blck_size = 1,
  506. .type_size = sizeof(double),
  507. .is_quantized = false,
  508. .nrows = 1,
  509. },
  510. [GGML_TYPE_F32] = {
  511. .type_name = "f32",
  512. .blck_size = 1,
  513. .type_size = sizeof(float),
  514. .is_quantized = false,
  515. .vec_dot = (ggml_vec_dot_t) ggml_vec_dot_f32,
  516. .vec_dot_type = GGML_TYPE_F32,
  517. .nrows = 1,
  518. },
  519. [GGML_TYPE_F16] = {
  520. .type_name = "f16",
  521. .blck_size = 1,
  522. .type_size = sizeof(ggml_fp16_t),
  523. .is_quantized = false,
  524. .to_float = (ggml_to_float_t) ggml_fp16_to_fp32_row,
  525. .from_float = (ggml_from_float_t) ggml_fp32_to_fp16_row,
  526. .from_float_reference = (ggml_from_float_t) ggml_fp32_to_fp16_row,
  527. .vec_dot = (ggml_vec_dot_t) ggml_vec_dot_f16,
  528. .vec_dot_type = GGML_TYPE_F16,
  529. .nrows = 1,
  530. },
  531. [GGML_TYPE_Q4_0] = {
  532. .type_name = "q4_0",
  533. .blck_size = QK4_0,
  534. .type_size = sizeof(block_q4_0),
  535. .is_quantized = true,
  536. .to_float = (ggml_to_float_t) dequantize_row_q4_0,
  537. .from_float = quantize_row_q4_0,
  538. .from_float_reference = (ggml_from_float_t) quantize_row_q4_0_reference,
  539. .vec_dot = ggml_vec_dot_q4_0_q8_0,
  540. .vec_dot_type = GGML_TYPE_Q8_0,
  541. #if defined (__ARM_FEATURE_MATMUL_INT8)
  542. .nrows = 2,
  543. #else
  544. .nrows = 1,
  545. #endif
  546. },
  547. [GGML_TYPE_Q4_1] = {
  548. .type_name = "q4_1",
  549. .blck_size = QK4_1,
  550. .type_size = sizeof(block_q4_1),
  551. .is_quantized = true,
  552. .to_float = (ggml_to_float_t) dequantize_row_q4_1,
  553. .from_float = quantize_row_q4_1,
  554. .from_float_reference = (ggml_from_float_t) quantize_row_q4_1_reference,
  555. .vec_dot = ggml_vec_dot_q4_1_q8_1,
  556. .vec_dot_type = GGML_TYPE_Q8_1,
  557. #if defined (__ARM_FEATURE_MATMUL_INT8)
  558. .nrows = 2,
  559. #else
  560. .nrows = 1,
  561. #endif
  562. },
  563. [4] = { // GGML_TYPE_Q4_2
  564. .type_name = "DEPRECATED",
  565. .blck_size = 0,
  566. .type_size = 0,
  567. .is_quantized = false,
  568. .to_float = NULL,
  569. .from_float = NULL,
  570. .from_float_reference = NULL,
  571. .vec_dot = NULL,
  572. .vec_dot_type = GGML_TYPE_COUNT,
  573. .nrows = 1,
  574. },
  575. [5] = { // GGML_TYPE_Q4_3
  576. .type_name = "DEPRECATED",
  577. .blck_size = 0,
  578. .type_size = 0,
  579. .is_quantized = false,
  580. .to_float = NULL,
  581. .from_float = NULL,
  582. .from_float_reference = NULL,
  583. .vec_dot = NULL,
  584. .vec_dot_type = GGML_TYPE_COUNT,
  585. .nrows = 1,
  586. },
  587. [GGML_TYPE_Q5_0] = {
  588. .type_name = "q5_0",
  589. .blck_size = QK5_0,
  590. .type_size = sizeof(block_q5_0),
  591. .is_quantized = true,
  592. .to_float = (ggml_to_float_t) dequantize_row_q5_0,
  593. .from_float = quantize_row_q5_0,
  594. .from_float_reference = (ggml_from_float_t) quantize_row_q5_0_reference,
  595. .vec_dot = ggml_vec_dot_q5_0_q8_0,
  596. .vec_dot_type = GGML_TYPE_Q8_0,
  597. .nrows = 1,
  598. },
  599. [GGML_TYPE_Q5_1] = {
  600. .type_name = "q5_1",
  601. .blck_size = QK5_1,
  602. .type_size = sizeof(block_q5_1),
  603. .is_quantized = true,
  604. .to_float = (ggml_to_float_t) dequantize_row_q5_1,
  605. .from_float = quantize_row_q5_1,
  606. .from_float_reference = (ggml_from_float_t) quantize_row_q5_1_reference,
  607. .vec_dot = ggml_vec_dot_q5_1_q8_1,
  608. .vec_dot_type = GGML_TYPE_Q8_1,
  609. .nrows = 1,
  610. },
  611. [GGML_TYPE_Q8_0] = {
  612. .type_name = "q8_0",
  613. .blck_size = QK8_0,
  614. .type_size = sizeof(block_q8_0),
  615. .is_quantized = true,
  616. .to_float = (ggml_to_float_t) dequantize_row_q8_0,
  617. .from_float = quantize_row_q8_0,
  618. .from_float_reference = (ggml_from_float_t) quantize_row_q8_0_reference,
  619. .vec_dot = ggml_vec_dot_q8_0_q8_0,
  620. .vec_dot_type = GGML_TYPE_Q8_0,
  621. #if defined (__ARM_FEATURE_MATMUL_INT8)
  622. .nrows = 2,
  623. #else
  624. .nrows = 1,
  625. #endif
  626. },
  627. [GGML_TYPE_Q8_1] = {
  628. .type_name = "q8_1",
  629. .blck_size = QK8_1,
  630. .type_size = sizeof(block_q8_1),
  631. .is_quantized = true,
  632. .from_float = quantize_row_q8_1,
  633. .from_float_reference = (ggml_from_float_t) quantize_row_q8_1_reference,
  634. .vec_dot_type = GGML_TYPE_Q8_1,
  635. .nrows = 1,
  636. },
  637. [GGML_TYPE_Q2_K] = {
  638. .type_name = "q2_K",
  639. .blck_size = QK_K,
  640. .type_size = sizeof(block_q2_K),
  641. .is_quantized = true,
  642. .to_float = (ggml_to_float_t) dequantize_row_q2_K,
  643. .from_float = quantize_row_q2_K,
  644. .from_float_reference = (ggml_from_float_t) quantize_row_q2_K_reference,
  645. .vec_dot = ggml_vec_dot_q2_K_q8_K,
  646. .vec_dot_type = GGML_TYPE_Q8_K,
  647. .nrows = 1,
  648. },
  649. [GGML_TYPE_Q3_K] = {
  650. .type_name = "q3_K",
  651. .blck_size = QK_K,
  652. .type_size = sizeof(block_q3_K),
  653. .is_quantized = true,
  654. .to_float = (ggml_to_float_t) dequantize_row_q3_K,
  655. .from_float = quantize_row_q3_K,
  656. .from_float_reference = (ggml_from_float_t) quantize_row_q3_K_reference,
  657. .vec_dot = ggml_vec_dot_q3_K_q8_K,
  658. .vec_dot_type = GGML_TYPE_Q8_K,
  659. .nrows = 1,
  660. },
  661. [GGML_TYPE_Q4_K] = {
  662. .type_name = "q4_K",
  663. .blck_size = QK_K,
  664. .type_size = sizeof(block_q4_K),
  665. .is_quantized = true,
  666. .to_float = (ggml_to_float_t) dequantize_row_q4_K,
  667. .from_float = quantize_row_q4_K,
  668. .from_float_reference = (ggml_from_float_t) quantize_row_q4_K_reference,
  669. .vec_dot = ggml_vec_dot_q4_K_q8_K,
  670. .vec_dot_type = GGML_TYPE_Q8_K,
  671. .nrows = 1,
  672. },
  673. [GGML_TYPE_Q5_K] = {
  674. .type_name = "q5_K",
  675. .blck_size = QK_K,
  676. .type_size = sizeof(block_q5_K),
  677. .is_quantized = true,
  678. .to_float = (ggml_to_float_t) dequantize_row_q5_K,
  679. .from_float = quantize_row_q5_K,
  680. .from_float_reference = (ggml_from_float_t) quantize_row_q5_K_reference,
  681. .vec_dot = ggml_vec_dot_q5_K_q8_K,
  682. .vec_dot_type = GGML_TYPE_Q8_K,
  683. .nrows = 1,
  684. },
  685. [GGML_TYPE_Q6_K] = {
  686. .type_name = "q6_K",
  687. .blck_size = QK_K,
  688. .type_size = sizeof(block_q6_K),
  689. .is_quantized = true,
  690. .to_float = (ggml_to_float_t) dequantize_row_q6_K,
  691. .from_float = quantize_row_q6_K,
  692. .from_float_reference = (ggml_from_float_t) quantize_row_q6_K_reference,
  693. .vec_dot = ggml_vec_dot_q6_K_q8_K,
  694. .vec_dot_type = GGML_TYPE_Q8_K,
  695. .nrows = 1,
  696. },
  697. [GGML_TYPE_IQ2_XXS] = {
  698. .type_name = "iq2_xxs",
  699. .blck_size = QK_K,
  700. .type_size = sizeof(block_iq2_xxs),
  701. .is_quantized = true,
  702. .to_float = (ggml_to_float_t) dequantize_row_iq2_xxs,
  703. .from_float = NULL,
  704. .from_float_reference = NULL,
  705. .vec_dot = ggml_vec_dot_iq2_xxs_q8_K,
  706. .vec_dot_type = GGML_TYPE_Q8_K,
  707. .nrows = 1,
  708. },
  709. [GGML_TYPE_IQ2_XS] = {
  710. .type_name = "iq2_xs",
  711. .blck_size = QK_K,
  712. .type_size = sizeof(block_iq2_xs),
  713. .is_quantized = true,
  714. .to_float = (ggml_to_float_t) dequantize_row_iq2_xs,
  715. .from_float = NULL,
  716. .from_float_reference = NULL,
  717. .vec_dot = ggml_vec_dot_iq2_xs_q8_K,
  718. .vec_dot_type = GGML_TYPE_Q8_K,
  719. .nrows = 1,
  720. },
  721. [GGML_TYPE_IQ3_XXS] = {
  722. .type_name = "iq3_xxs",
  723. .blck_size = QK_K,
  724. .type_size = sizeof(block_iq3_xxs),
  725. .is_quantized = true,
  726. .to_float = (ggml_to_float_t) dequantize_row_iq3_xxs,
  727. .from_float = quantize_row_iq3_xxs,
  728. .from_float_reference = (ggml_from_float_t)quantize_row_iq3_xxs_reference,
  729. .vec_dot = ggml_vec_dot_iq3_xxs_q8_K,
  730. .vec_dot_type = GGML_TYPE_Q8_K,
  731. .nrows = 1,
  732. },
  733. [GGML_TYPE_IQ3_S] = {
  734. .type_name = "iq3_s",
  735. .blck_size = QK_K,
  736. .type_size = sizeof(block_iq3_s),
  737. .is_quantized = true,
  738. .to_float = (ggml_to_float_t) dequantize_row_iq3_s,
  739. .from_float = quantize_row_iq3_s,
  740. .from_float_reference = (ggml_from_float_t)quantize_row_iq3_s_reference,
  741. .vec_dot = ggml_vec_dot_iq3_s_q8_K,
  742. .vec_dot_type = GGML_TYPE_Q8_K,
  743. .nrows = 1,
  744. },
  745. [GGML_TYPE_IQ2_S] = {
  746. .type_name = "iq2_s",
  747. .blck_size = QK_K,
  748. .type_size = sizeof(block_iq2_s),
  749. .is_quantized = true,
  750. .to_float = (ggml_to_float_t) dequantize_row_iq2_s,
  751. .from_float = quantize_row_iq2_s,
  752. .from_float_reference = (ggml_from_float_t)quantize_row_iq2_s_reference,
  753. .vec_dot = ggml_vec_dot_iq2_s_q8_K,
  754. .vec_dot_type = GGML_TYPE_Q8_K,
  755. .nrows = 1,
  756. },
  757. [GGML_TYPE_IQ1_S] = {
  758. .type_name = "iq1_s",
  759. .blck_size = QK_K,
  760. .type_size = sizeof(block_iq1_s),
  761. .is_quantized = true,
  762. .to_float = (ggml_to_float_t) dequantize_row_iq1_s,
  763. .from_float = NULL,
  764. .from_float_reference = NULL,
  765. .vec_dot = ggml_vec_dot_iq1_s_q8_K,
  766. .vec_dot_type = GGML_TYPE_Q8_K,
  767. .nrows = 1,
  768. },
  769. [GGML_TYPE_IQ1_M] = {
  770. .type_name = "iq1_m",
  771. .blck_size = QK_K,
  772. .type_size = sizeof(block_iq1_m),
  773. .is_quantized = true,
  774. .to_float = (ggml_to_float_t) dequantize_row_iq1_m,
  775. .from_float = NULL,
  776. .from_float_reference = NULL,
  777. .vec_dot = ggml_vec_dot_iq1_m_q8_K,
  778. .vec_dot_type = GGML_TYPE_Q8_K,
  779. .nrows = 1,
  780. },
  781. [GGML_TYPE_IQ4_NL] = {
  782. .type_name = "iq4_nl",
  783. .blck_size = QK4_NL,
  784. .type_size = sizeof(block_iq4_nl),
  785. .is_quantized = true,
  786. .to_float = (ggml_to_float_t) dequantize_row_iq4_nl,
  787. .from_float = quantize_row_iq4_nl,
  788. .from_float_reference = (ggml_from_float_t)quantize_row_iq4_nl_reference,
  789. .vec_dot = ggml_vec_dot_iq4_nl_q8_0,
  790. .vec_dot_type = GGML_TYPE_Q8_0,
  791. .nrows = 1,
  792. },
  793. [GGML_TYPE_IQ4_XS] = {
  794. .type_name = "iq4_xs",
  795. #if QK_K == 64
  796. .blck_size = QK4_NL,
  797. #else
  798. .blck_size = QK_K,
  799. #endif
  800. .type_size = sizeof(block_iq4_xs),
  801. .is_quantized = true,
  802. .to_float = (ggml_to_float_t) dequantize_row_iq4_xs,
  803. .from_float = quantize_row_iq4_xs,
  804. .from_float_reference = (ggml_from_float_t)quantize_row_iq4_xs_reference,
  805. .vec_dot = ggml_vec_dot_iq4_xs_q8_K,
  806. #if QK_K == 64
  807. .vec_dot_type = GGML_TYPE_Q8_0,
  808. #else
  809. .vec_dot_type = GGML_TYPE_Q8_K,
  810. #endif
  811. .nrows = 1,
  812. },
  813. [GGML_TYPE_Q8_K] = {
  814. .type_name = "q8_K",
  815. .blck_size = QK_K,
  816. .type_size = sizeof(block_q8_K),
  817. .is_quantized = true,
  818. .from_float = quantize_row_q8_K,
  819. },
  820. [GGML_TYPE_BF16] = {
  821. .type_name = "bf16",
  822. .blck_size = 1,
  823. .type_size = sizeof(ggml_bf16_t),
  824. .is_quantized = false,
  825. .to_float = (ggml_to_float_t) ggml_bf16_to_fp32_row,
  826. .from_float = (ggml_from_float_t) ggml_fp32_to_bf16_row,
  827. .from_float_reference = (ggml_from_float_t) ggml_fp32_to_bf16_row,
  828. .vec_dot = (ggml_vec_dot_t) ggml_vec_dot_bf16,
  829. .vec_dot_type = GGML_TYPE_BF16,
  830. .nrows = 1,
  831. }
  832. };
  833. // For internal test use
  834. ggml_type_traits_t ggml_internal_get_type_traits(enum ggml_type type) {
  835. GGML_ASSERT(type < GGML_TYPE_COUNT);
  836. return type_traits[type];
  837. }
  838. //
  839. // simd mappings
  840. //
  841. // we define a common set of C macros which map to specific intrinsics based on the current architecture
  842. // we then implement the fundamental computation operations below using only these macros
  843. // adding support for new architectures requires to define the corresponding SIMD macros
  844. //
  845. // GGML_F32_STEP / GGML_F16_STEP
  846. // number of elements to process in a single step
  847. //
  848. // GGML_F32_EPR / GGML_F16_EPR
  849. // number of elements to fit in a single register
  850. //
  851. #if defined(__ARM_NEON) && defined(__ARM_FEATURE_FMA)
  852. #define GGML_SIMD
  853. // F32 NEON
  854. #define GGML_F32_STEP 16
  855. #define GGML_F32_EPR 4
  856. #define GGML_F32x4 float32x4_t
  857. #define GGML_F32x4_ZERO vdupq_n_f32(0.0f)
  858. #define GGML_F32x4_SET1(x) vdupq_n_f32(x)
  859. #define GGML_F32x4_LOAD vld1q_f32
  860. #define GGML_F32x4_STORE vst1q_f32
  861. #define GGML_F32x4_FMA(a, b, c) vfmaq_f32(a, b, c)
  862. #define GGML_F32x4_ADD vaddq_f32
  863. #define GGML_F32x4_MUL vmulq_f32
  864. #define GGML_F32x4_REDUCE_ONE(x) vaddvq_f32(x)
  865. #define GGML_F32x4_REDUCE(res, x) \
  866. { \
  867. int offset = GGML_F32_ARR >> 1; \
  868. for (int i = 0; i < offset; ++i) { \
  869. x[i] = vaddq_f32(x[i], x[offset+i]); \
  870. } \
  871. offset >>= 1; \
  872. for (int i = 0; i < offset; ++i) { \
  873. x[i] = vaddq_f32(x[i], x[offset+i]); \
  874. } \
  875. offset >>= 1; \
  876. for (int i = 0; i < offset; ++i) { \
  877. x[i] = vaddq_f32(x[i], x[offset+i]); \
  878. } \
  879. res = GGML_F32x4_REDUCE_ONE(x[0]); \
  880. }
  881. #define GGML_F32_VEC GGML_F32x4
  882. #define GGML_F32_VEC_ZERO GGML_F32x4_ZERO
  883. #define GGML_F32_VEC_SET1 GGML_F32x4_SET1
  884. #define GGML_F32_VEC_LOAD GGML_F32x4_LOAD
  885. #define GGML_F32_VEC_STORE GGML_F32x4_STORE
  886. #define GGML_F32_VEC_FMA GGML_F32x4_FMA
  887. #define GGML_F32_VEC_ADD GGML_F32x4_ADD
  888. #define GGML_F32_VEC_MUL GGML_F32x4_MUL
  889. #define GGML_F32_VEC_REDUCE GGML_F32x4_REDUCE
  890. // F16 NEON
  891. #if defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC)
  892. #define GGML_F16_STEP 32
  893. #define GGML_F16_EPR 8
  894. #define GGML_F16x8 float16x8_t
  895. #define GGML_F16x8_ZERO vdupq_n_f16(0.0f)
  896. #define GGML_F16x8_SET1(x) vdupq_n_f16(x)
  897. #define GGML_F16x8_LOAD(x) vld1q_f16((const ggml_fp16_internal_t *)(x))
  898. #define GGML_F16x8_STORE vst1q_f16
  899. #define GGML_F16x8_FMA(a, b, c) vfmaq_f16(a, b, c)
  900. #define GGML_F16x8_ADD vaddq_f16
  901. #define GGML_F16x8_MUL vmulq_f16
  902. #define GGML_F16x8_REDUCE(res, x) \
  903. do { \
  904. int offset = GGML_F16_ARR >> 1; \
  905. for (int i = 0; i < offset; ++i) { \
  906. x[i] = vaddq_f16(x[i], x[offset+i]); \
  907. } \
  908. offset >>= 1; \
  909. for (int i = 0; i < offset; ++i) { \
  910. x[i] = vaddq_f16(x[i], x[offset+i]); \
  911. } \
  912. offset >>= 1; \
  913. for (int i = 0; i < offset; ++i) { \
  914. x[i] = vaddq_f16(x[i], x[offset+i]); \
  915. } \
  916. const float32x4_t t0 = vcvt_f32_f16(vget_low_f16 (x[0])); \
  917. const float32x4_t t1 = vcvt_f32_f16(vget_high_f16(x[0])); \
  918. res = (ggml_float) vaddvq_f32(vaddq_f32(t0, t1)); \
  919. } while (0)
  920. #define GGML_F16_VEC GGML_F16x8
  921. #define GGML_F16_VEC_ZERO GGML_F16x8_ZERO
  922. #define GGML_F16_VEC_SET1 GGML_F16x8_SET1
  923. #define GGML_F16_VEC_LOAD(p, i) GGML_F16x8_LOAD(p)
  924. #define GGML_F16_VEC_STORE(p, r, i) GGML_F16x8_STORE((ggml_fp16_internal_t *)(p), r[i])
  925. #define GGML_F16_VEC_FMA GGML_F16x8_FMA
  926. #define GGML_F16_VEC_ADD GGML_F16x8_ADD
  927. #define GGML_F16_VEC_MUL GGML_F16x8_MUL
  928. #define GGML_F16_VEC_REDUCE GGML_F16x8_REDUCE
  929. #else
  930. // if FP16 vector arithmetic is not supported, we use FP32 instead
  931. // and take advantage of the vcvt_ functions to convert to/from FP16
  932. #define GGML_F16_STEP 16
  933. #define GGML_F16_EPR 4
  934. #define GGML_F32Cx4 float32x4_t
  935. #define GGML_F32Cx4_ZERO vdupq_n_f32(0.0f)
  936. #define GGML_F32Cx4_SET1(x) vdupq_n_f32(x)
  937. #define GGML_F32Cx4_LOAD(x) vcvt_f32_f16(vld1_f16((const ggml_fp16_internal_t *)(x)))
  938. #define GGML_F32Cx4_STORE(x, y) vst1_f16(x, vcvt_f16_f32(y))
  939. #define GGML_F32Cx4_FMA(a, b, c) vfmaq_f32(a, b, c)
  940. #define GGML_F32Cx4_ADD vaddq_f32
  941. #define GGML_F32Cx4_MUL vmulq_f32
  942. #define GGML_F32Cx4_REDUCE GGML_F32x4_REDUCE
  943. #define GGML_F16_VEC GGML_F32Cx4
  944. #define GGML_F16_VEC_ZERO GGML_F32Cx4_ZERO
  945. #define GGML_F16_VEC_SET1 GGML_F32Cx4_SET1
  946. #define GGML_F16_VEC_LOAD(p, i) GGML_F32Cx4_LOAD(p)
  947. #define GGML_F16_VEC_STORE(p, r, i) GGML_F32Cx4_STORE((ggml_fp16_internal_t *)(p), r[i])
  948. #define GGML_F16_VEC_FMA GGML_F32Cx4_FMA
  949. #define GGML_F16_VEC_ADD GGML_F32Cx4_ADD
  950. #define GGML_F16_VEC_MUL GGML_F32Cx4_MUL
  951. #define GGML_F16_VEC_REDUCE GGML_F32Cx4_REDUCE
  952. #endif
  953. #elif defined(__AVX512F__)
  954. #define GGML_SIMD
  955. // F32 AVX512
  956. #define GGML_F32_STEP 64
  957. #define GGML_F32_EPR 16
  958. #define GGML_F32x16 __m512
  959. #define GGML_F32x16_ZERO _mm512_setzero_ps()
  960. #define GGML_F32x16_SET1(x) _mm512_set1_ps(x)
  961. #define GGML_F32x16_LOAD _mm512_loadu_ps
  962. #define GGML_F32x16_STORE _mm512_storeu_ps
  963. // _mm512_fmadd_ps is defined in AVX512F so no guard is required
  964. #define GGML_F32x16_FMA(a, b, c) _mm512_fmadd_ps(b, c, a)
  965. #define GGML_F32x16_ADD _mm512_add_ps
  966. #define GGML_F32x16_MUL _mm512_mul_ps
  967. #define GGML_F32x16_REDUCE(res, x) \
  968. do { \
  969. int offset = GGML_F32_ARR >> 1; \
  970. for (int i = 0; i < offset; ++i) { \
  971. x[i] = _mm512_add_ps(x[i], x[offset+i]); \
  972. } \
  973. offset >>= 1; \
  974. for (int i = 0; i < offset; ++i) { \
  975. x[i] = _mm512_add_ps(x[i], x[offset+i]); \
  976. } \
  977. offset >>= 1; \
  978. for (int i = 0; i < offset; ++i) { \
  979. x[i] = _mm512_add_ps(x[i], x[offset+i]); \
  980. } \
  981. res = _mm512_reduce_add_ps(x[0]); \
  982. } while (0)
  983. // TODO: is this optimal ?
  984. #define GGML_F32_VEC GGML_F32x16
  985. #define GGML_F32_VEC_ZERO GGML_F32x16_ZERO
  986. #define GGML_F32_VEC_SET1 GGML_F32x16_SET1
  987. #define GGML_F32_VEC_LOAD GGML_F32x16_LOAD
  988. #define GGML_F32_VEC_STORE GGML_F32x16_STORE
  989. #define GGML_F32_VEC_FMA GGML_F32x16_FMA
  990. #define GGML_F32_VEC_ADD GGML_F32x16_ADD
  991. #define GGML_F32_VEC_MUL GGML_F32x16_MUL
  992. #define GGML_F32_VEC_REDUCE GGML_F32x16_REDUCE
  993. // F16 AVX512
  994. // F16 AVX
  995. #define GGML_F16_STEP 64
  996. #define GGML_F16_EPR 16
  997. // AVX512 has FP16 extension (AVX512_FP16) but I don't have it on my machine so I use FP32 instead
  998. #define GGML_F32Cx16 __m512
  999. #define GGML_F32Cx16_ZERO _mm512_setzero_ps()
  1000. #define GGML_F32Cx16_SET1(x) _mm512_set1_ps(x)
  1001. // unlike _mm256_cvt intrinsics that require F16C, _mm512_cvt is defined in AVX512F
  1002. // so F16C guard isn't required
  1003. #define GGML_F32Cx16_LOAD(x) _mm512_cvtph_ps(_mm256_loadu_si256((const __m256i *)(x)))
  1004. #define GGML_F32Cx16_STORE(x, y) _mm256_storeu_si256((__m256i *)(x), _mm512_cvtps_ph(y, 0))
  1005. #define GGML_F32Cx16_FMA(a, b, c) _mm512_fmadd_ps(b, c, a)
  1006. #define GGML_F32Cx16_ADD _mm512_add_ps
  1007. #define GGML_F32Cx16_MUL _mm512_mul_ps
  1008. #define GGML_F32Cx16_REDUCE(res, x) \
  1009. do { \
  1010. int offset = GGML_F32_ARR >> 1; \
  1011. for (int i = 0; i < offset; ++i) { \
  1012. x[i] = _mm512_add_ps(x[i], x[offset+i]); \
  1013. } \
  1014. offset >>= 1; \
  1015. for (int i = 0; i < offset; ++i) { \
  1016. x[i] = _mm512_add_ps(x[i], x[offset+i]); \
  1017. } \
  1018. offset >>= 1; \
  1019. for (int i = 0; i < offset; ++i) { \
  1020. x[i] = _mm512_add_ps(x[i], x[offset+i]); \
  1021. } \
  1022. res = _mm512_reduce_add_ps(x[0]); \
  1023. } while (0)
  1024. #define GGML_F16_VEC GGML_F32Cx16
  1025. #define GGML_F16_VEC_ZERO GGML_F32Cx16_ZERO
  1026. #define GGML_F16_VEC_SET1 GGML_F32Cx16_SET1
  1027. #define GGML_F16_VEC_LOAD(p, i) GGML_F32Cx16_LOAD(p)
  1028. #define GGML_F16_VEC_STORE(p, r, i) GGML_F32Cx16_STORE(p, r[i])
  1029. #define GGML_F16_VEC_FMA GGML_F32Cx16_FMA
  1030. #define GGML_F16_VEC_ADD GGML_F32Cx16_ADD
  1031. #define GGML_F16_VEC_MUL GGML_F32Cx16_MUL
  1032. #define GGML_F16_VEC_REDUCE GGML_F32Cx16_REDUCE
  1033. #elif defined(__AVX__)
  1034. #define GGML_SIMD
  1035. // F32 AVX
  1036. #define GGML_F32_STEP 32
  1037. #define GGML_F32_EPR 8
  1038. #define GGML_F32x8 __m256
  1039. #define GGML_F32x8_ZERO _mm256_setzero_ps()
  1040. #define GGML_F32x8_SET1(x) _mm256_set1_ps(x)
  1041. #define GGML_F32x8_LOAD _mm256_loadu_ps
  1042. #define GGML_F32x8_STORE _mm256_storeu_ps
  1043. #if defined(__FMA__)
  1044. #define GGML_F32x8_FMA(a, b, c) _mm256_fmadd_ps(b, c, a)
  1045. #else
  1046. #define GGML_F32x8_FMA(a, b, c) _mm256_add_ps(_mm256_mul_ps(b, c), a)
  1047. #endif
  1048. #define GGML_F32x8_ADD _mm256_add_ps
  1049. #define GGML_F32x8_MUL _mm256_mul_ps
  1050. #define GGML_F32x8_REDUCE(res, x) \
  1051. do { \
  1052. int offset = GGML_F32_ARR >> 1; \
  1053. for (int i = 0; i < offset; ++i) { \
  1054. x[i] = _mm256_add_ps(x[i], x[offset+i]); \
  1055. } \
  1056. offset >>= 1; \
  1057. for (int i = 0; i < offset; ++i) { \
  1058. x[i] = _mm256_add_ps(x[i], x[offset+i]); \
  1059. } \
  1060. offset >>= 1; \
  1061. for (int i = 0; i < offset; ++i) { \
  1062. x[i] = _mm256_add_ps(x[i], x[offset+i]); \
  1063. } \
  1064. const __m128 t0 = _mm_add_ps(_mm256_castps256_ps128(x[0]), \
  1065. _mm256_extractf128_ps(x[0], 1)); \
  1066. const __m128 t1 = _mm_hadd_ps(t0, t0); \
  1067. res = (ggml_float) _mm_cvtss_f32(_mm_hadd_ps(t1, t1)); \
  1068. } while (0)
  1069. // TODO: is this optimal ?
  1070. #define GGML_F32_VEC GGML_F32x8
  1071. #define GGML_F32_VEC_ZERO GGML_F32x8_ZERO
  1072. #define GGML_F32_VEC_SET1 GGML_F32x8_SET1
  1073. #define GGML_F32_VEC_LOAD GGML_F32x8_LOAD
  1074. #define GGML_F32_VEC_STORE GGML_F32x8_STORE
  1075. #define GGML_F32_VEC_FMA GGML_F32x8_FMA
  1076. #define GGML_F32_VEC_ADD GGML_F32x8_ADD
  1077. #define GGML_F32_VEC_MUL GGML_F32x8_MUL
  1078. #define GGML_F32_VEC_REDUCE GGML_F32x8_REDUCE
  1079. // F16 AVX
  1080. #define GGML_F16_STEP 32
  1081. #define GGML_F16_EPR 8
  1082. // F16 arithmetic is not supported by AVX, so we use F32 instead
  1083. #define GGML_F32Cx8 __m256
  1084. #define GGML_F32Cx8_ZERO _mm256_setzero_ps()
  1085. #define GGML_F32Cx8_SET1(x) _mm256_set1_ps(x)
  1086. #if defined(__F16C__)
  1087. // the _mm256_cvt intrinsics require F16C
  1088. #define GGML_F32Cx8_LOAD(x) _mm256_cvtph_ps(_mm_loadu_si128((const __m128i *)(x)))
  1089. #define GGML_F32Cx8_STORE(x, y) _mm_storeu_si128((__m128i *)(x), _mm256_cvtps_ph(y, 0))
  1090. #else
  1091. static inline __m256 __avx_f32cx8_load(ggml_fp16_t *x) {
  1092. float tmp[8];
  1093. for (int i = 0; i < 8; i++) {
  1094. tmp[i] = GGML_FP16_TO_FP32(x[i]);
  1095. }
  1096. return _mm256_loadu_ps(tmp);
  1097. }
  1098. static inline void __avx_f32cx8_store(ggml_fp16_t *x, __m256 y) {
  1099. float arr[8];
  1100. _mm256_storeu_ps(arr, y);
  1101. for (int i = 0; i < 8; i++)
  1102. x[i] = GGML_FP32_TO_FP16(arr[i]);
  1103. }
  1104. #define GGML_F32Cx8_LOAD(x) __avx_f32cx8_load(x)
  1105. #define GGML_F32Cx8_STORE(x, y) __avx_f32cx8_store(x, y)
  1106. #endif
  1107. #define GGML_F32Cx8_FMA GGML_F32x8_FMA
  1108. #define GGML_F32Cx8_ADD _mm256_add_ps
  1109. #define GGML_F32Cx8_MUL _mm256_mul_ps
  1110. #define GGML_F32Cx8_REDUCE GGML_F32x8_REDUCE
  1111. #define GGML_F16_VEC GGML_F32Cx8
  1112. #define GGML_F16_VEC_ZERO GGML_F32Cx8_ZERO
  1113. #define GGML_F16_VEC_SET1 GGML_F32Cx8_SET1
  1114. #define GGML_F16_VEC_LOAD(p, i) GGML_F32Cx8_LOAD(p)
  1115. #define GGML_F16_VEC_STORE(p, r, i) GGML_F32Cx8_STORE(p, r[i])
  1116. #define GGML_F16_VEC_FMA GGML_F32Cx8_FMA
  1117. #define GGML_F16_VEC_ADD GGML_F32Cx8_ADD
  1118. #define GGML_F16_VEC_MUL GGML_F32Cx8_MUL
  1119. #define GGML_F16_VEC_REDUCE GGML_F32Cx8_REDUCE
  1120. #elif defined(__POWER9_VECTOR__)
  1121. #define GGML_SIMD
  1122. // F32 POWER9
  1123. #define GGML_F32_STEP 32
  1124. #define GGML_F32_EPR 4
  1125. #define GGML_F32x4 vector float
  1126. #define GGML_F32x4_ZERO 0.0f
  1127. #define GGML_F32x4_SET1 vec_splats
  1128. #define GGML_F32x4_LOAD(p) vec_xl(0, p)
  1129. #define GGML_F32x4_STORE(p, r) vec_xst(r, 0, p)
  1130. #define GGML_F32x4_FMA(a, b, c) vec_madd(b, c, a)
  1131. #define GGML_F32x4_ADD vec_add
  1132. #define GGML_F32x4_MUL vec_mul
  1133. #define GGML_F32x4_REDUCE(res, x) \
  1134. { \
  1135. int offset = GGML_F32_ARR >> 1; \
  1136. for (int i = 0; i < offset; ++i) { \
  1137. x[i] = vec_add(x[i], x[offset+i]); \
  1138. } \
  1139. offset >>= 1; \
  1140. for (int i = 0; i < offset; ++i) { \
  1141. x[i] = vec_add(x[i], x[offset+i]); \
  1142. } \
  1143. offset >>= 1; \
  1144. for (int i = 0; i < offset; ++i) { \
  1145. x[i] = vec_add(x[i], x[offset+i]); \
  1146. } \
  1147. res = vec_extract(x[0], 0) + \
  1148. vec_extract(x[0], 1) + \
  1149. vec_extract(x[0], 2) + \
  1150. vec_extract(x[0], 3); \
  1151. }
  1152. #define GGML_F32_VEC GGML_F32x4
  1153. #define GGML_F32_VEC_ZERO GGML_F32x4_ZERO
  1154. #define GGML_F32_VEC_SET1 GGML_F32x4_SET1
  1155. #define GGML_F32_VEC_LOAD GGML_F32x4_LOAD
  1156. #define GGML_F32_VEC_STORE GGML_F32x4_STORE
  1157. #define GGML_F32_VEC_FMA GGML_F32x4_FMA
  1158. #define GGML_F32_VEC_ADD GGML_F32x4_ADD
  1159. #define GGML_F32_VEC_MUL GGML_F32x4_MUL
  1160. #define GGML_F32_VEC_REDUCE GGML_F32x4_REDUCE
  1161. // F16 POWER9
  1162. #define GGML_F16_STEP GGML_F32_STEP
  1163. #define GGML_F16_EPR GGML_F32_EPR
  1164. #define GGML_F16_VEC GGML_F32x4
  1165. #define GGML_F16_VEC_ZERO GGML_F32x4_ZERO
  1166. #define GGML_F16_VEC_SET1 GGML_F32x4_SET1
  1167. #define GGML_F16_VEC_FMA GGML_F32x4_FMA
  1168. #define GGML_F16_VEC_ADD GGML_F32x4_ADD
  1169. #define GGML_F16_VEC_MUL GGML_F32x4_MUL
  1170. #define GGML_F16_VEC_REDUCE GGML_F32x4_REDUCE
  1171. // Use vec_xl, not vec_ld, in case the load address is not aligned.
  1172. #define GGML_F16_VEC_LOAD(p, i) (i & 0x1) ? \
  1173. vec_extract_fp32_from_shorth(vec_xl(0, p - GGML_F16_EPR)) : \
  1174. vec_extract_fp32_from_shortl(vec_xl(0, p))
  1175. #define GGML_ENDIAN_BYTE(i) ((unsigned char *)&(uint16_t){1})[i]
  1176. #define GGML_F16_VEC_STORE(p, r, i) \
  1177. if (i & 0x1) \
  1178. vec_xst(vec_pack_to_short_fp32(r[i - GGML_ENDIAN_BYTE(1)], \
  1179. r[i - GGML_ENDIAN_BYTE(0)]), \
  1180. 0, p - GGML_F16_EPR)
  1181. #elif defined(__wasm_simd128__)
  1182. #define GGML_SIMD
  1183. // F32 WASM
  1184. #define GGML_F32_STEP 16
  1185. #define GGML_F32_EPR 4
  1186. #define GGML_F32x4 v128_t
  1187. #define GGML_F32x4_ZERO wasm_f32x4_splat(0.0f)
  1188. #define GGML_F32x4_SET1(x) wasm_f32x4_splat(x)
  1189. #define GGML_F32x4_LOAD wasm_v128_load
  1190. #define GGML_F32x4_STORE wasm_v128_store
  1191. #define GGML_F32x4_FMA(a, b, c) wasm_f32x4_add(wasm_f32x4_mul(b, c), a)
  1192. #define GGML_F32x4_ADD wasm_f32x4_add
  1193. #define GGML_F32x4_MUL wasm_f32x4_mul
  1194. #define GGML_F32x4_REDUCE(res, x) \
  1195. { \
  1196. int offset = GGML_F32_ARR >> 1; \
  1197. for (int i = 0; i < offset; ++i) { \
  1198. x[i] = wasm_f32x4_add(x[i], x[offset+i]); \
  1199. } \
  1200. offset >>= 1; \
  1201. for (int i = 0; i < offset; ++i) { \
  1202. x[i] = wasm_f32x4_add(x[i], x[offset+i]); \
  1203. } \
  1204. offset >>= 1; \
  1205. for (int i = 0; i < offset; ++i) { \
  1206. x[i] = wasm_f32x4_add(x[i], x[offset+i]); \
  1207. } \
  1208. res = wasm_f32x4_extract_lane(x[0], 0) + \
  1209. wasm_f32x4_extract_lane(x[0], 1) + \
  1210. wasm_f32x4_extract_lane(x[0], 2) + \
  1211. wasm_f32x4_extract_lane(x[0], 3); \
  1212. }
  1213. #define GGML_F32_VEC GGML_F32x4
  1214. #define GGML_F32_VEC_ZERO GGML_F32x4_ZERO
  1215. #define GGML_F32_VEC_SET1 GGML_F32x4_SET1
  1216. #define GGML_F32_VEC_LOAD GGML_F32x4_LOAD
  1217. #define GGML_F32_VEC_STORE GGML_F32x4_STORE
  1218. #define GGML_F32_VEC_FMA GGML_F32x4_FMA
  1219. #define GGML_F32_VEC_ADD GGML_F32x4_ADD
  1220. #define GGML_F32_VEC_MUL GGML_F32x4_MUL
  1221. #define GGML_F32_VEC_REDUCE GGML_F32x4_REDUCE
  1222. // F16 WASM
  1223. #define GGML_F16_STEP 16
  1224. #define GGML_F16_EPR 4
  1225. inline static v128_t __wasm_f16x4_load(const ggml_fp16_t * p) {
  1226. float tmp[4];
  1227. tmp[0] = GGML_FP16_TO_FP32(p[0]);
  1228. tmp[1] = GGML_FP16_TO_FP32(p[1]);
  1229. tmp[2] = GGML_FP16_TO_FP32(p[2]);
  1230. tmp[3] = GGML_FP16_TO_FP32(p[3]);
  1231. return wasm_v128_load(tmp);
  1232. }
  1233. inline static void __wasm_f16x4_store(ggml_fp16_t * p, v128_t x) {
  1234. float tmp[4];
  1235. wasm_v128_store(tmp, x);
  1236. p[0] = GGML_FP32_TO_FP16(tmp[0]);
  1237. p[1] = GGML_FP32_TO_FP16(tmp[1]);
  1238. p[2] = GGML_FP32_TO_FP16(tmp[2]);
  1239. p[3] = GGML_FP32_TO_FP16(tmp[3]);
  1240. }
  1241. #define GGML_F16x4 v128_t
  1242. #define GGML_F16x4_ZERO wasm_f32x4_splat(0.0f)
  1243. #define GGML_F16x4_SET1(x) wasm_f32x4_splat(x)
  1244. #define GGML_F16x4_LOAD(x) __wasm_f16x4_load(x)
  1245. #define GGML_F16x4_STORE(x, y) __wasm_f16x4_store(x, y)
  1246. #define GGML_F16x4_FMA GGML_F32x4_FMA
  1247. #define GGML_F16x4_ADD wasm_f32x4_add
  1248. #define GGML_F16x4_MUL wasm_f32x4_mul
  1249. #define GGML_F16x4_REDUCE(res, x) \
  1250. { \
  1251. int offset = GGML_F16_ARR >> 1; \
  1252. for (int i = 0; i < offset; ++i) { \
  1253. x[i] = wasm_f32x4_add(x[i], x[offset+i]); \
  1254. } \
  1255. offset >>= 1; \
  1256. for (int i = 0; i < offset; ++i) { \
  1257. x[i] = wasm_f32x4_add(x[i], x[offset+i]); \
  1258. } \
  1259. offset >>= 1; \
  1260. for (int i = 0; i < offset; ++i) { \
  1261. x[i] = wasm_f32x4_add(x[i], x[offset+i]); \
  1262. } \
  1263. res = wasm_f32x4_extract_lane(x[0], 0) + \
  1264. wasm_f32x4_extract_lane(x[0], 1) + \
  1265. wasm_f32x4_extract_lane(x[0], 2) + \
  1266. wasm_f32x4_extract_lane(x[0], 3); \
  1267. }
  1268. #define GGML_F16_VEC GGML_F16x4
  1269. #define GGML_F16_VEC_ZERO GGML_F16x4_ZERO
  1270. #define GGML_F16_VEC_SET1 GGML_F16x4_SET1
  1271. #define GGML_F16_VEC_LOAD(p, i) GGML_F16x4_LOAD(p)
  1272. #define GGML_F16_VEC_STORE(p, r, i) GGML_F16x4_STORE(p, r[i])
  1273. #define GGML_F16_VEC_FMA GGML_F16x4_FMA
  1274. #define GGML_F16_VEC_ADD GGML_F16x4_ADD
  1275. #define GGML_F16_VEC_MUL GGML_F16x4_MUL
  1276. #define GGML_F16_VEC_REDUCE GGML_F16x4_REDUCE
  1277. #elif defined(__SSE3__)
  1278. #define GGML_SIMD
  1279. // F32 SSE
  1280. #define GGML_F32_STEP 32
  1281. #define GGML_F32_EPR 4
  1282. #define GGML_F32x4 __m128
  1283. #define GGML_F32x4_ZERO _mm_setzero_ps()
  1284. #define GGML_F32x4_SET1(x) _mm_set1_ps(x)
  1285. #define GGML_F32x4_LOAD _mm_loadu_ps
  1286. #define GGML_F32x4_STORE _mm_storeu_ps
  1287. #if defined(__FMA__)
  1288. // TODO: Does this work?
  1289. #define GGML_F32x4_FMA(a, b, c) _mm_fmadd_ps(b, c, a)
  1290. #else
  1291. #define GGML_F32x4_FMA(a, b, c) _mm_add_ps(_mm_mul_ps(b, c), a)
  1292. #endif
  1293. #define GGML_F32x4_ADD _mm_add_ps
  1294. #define GGML_F32x4_MUL _mm_mul_ps
  1295. #define GGML_F32x4_REDUCE(res, x) \
  1296. { \
  1297. int offset = GGML_F32_ARR >> 1; \
  1298. for (int i = 0; i < offset; ++i) { \
  1299. x[i] = _mm_add_ps(x[i], x[offset+i]); \
  1300. } \
  1301. offset >>= 1; \
  1302. for (int i = 0; i < offset; ++i) { \
  1303. x[i] = _mm_add_ps(x[i], x[offset+i]); \
  1304. } \
  1305. offset >>= 1; \
  1306. for (int i = 0; i < offset; ++i) { \
  1307. x[i] = _mm_add_ps(x[i], x[offset+i]); \
  1308. } \
  1309. const __m128 t0 = _mm_hadd_ps(x[0], x[0]); \
  1310. res = (ggml_float) _mm_cvtss_f32(_mm_hadd_ps(t0, t0)); \
  1311. }
  1312. // TODO: is this optimal ?
  1313. #define GGML_F32_VEC GGML_F32x4
  1314. #define GGML_F32_VEC_ZERO GGML_F32x4_ZERO
  1315. #define GGML_F32_VEC_SET1 GGML_F32x4_SET1
  1316. #define GGML_F32_VEC_LOAD GGML_F32x4_LOAD
  1317. #define GGML_F32_VEC_STORE GGML_F32x4_STORE
  1318. #define GGML_F32_VEC_FMA GGML_F32x4_FMA
  1319. #define GGML_F32_VEC_ADD GGML_F32x4_ADD
  1320. #define GGML_F32_VEC_MUL GGML_F32x4_MUL
  1321. #define GGML_F32_VEC_REDUCE GGML_F32x4_REDUCE
  1322. // F16 SSE
  1323. #define GGML_F16_STEP 32
  1324. #define GGML_F16_EPR 4
  1325. static inline __m128 __sse_f16x4_load(ggml_fp16_t *x) {
  1326. float tmp[4];
  1327. tmp[0] = GGML_FP16_TO_FP32(x[0]);
  1328. tmp[1] = GGML_FP16_TO_FP32(x[1]);
  1329. tmp[2] = GGML_FP16_TO_FP32(x[2]);
  1330. tmp[3] = GGML_FP16_TO_FP32(x[3]);
  1331. return _mm_loadu_ps(tmp);
  1332. }
  1333. static inline void __sse_f16x4_store(ggml_fp16_t *x, __m128 y) {
  1334. float arr[4];
  1335. _mm_storeu_ps(arr, y);
  1336. x[0] = GGML_FP32_TO_FP16(arr[0]);
  1337. x[1] = GGML_FP32_TO_FP16(arr[1]);
  1338. x[2] = GGML_FP32_TO_FP16(arr[2]);
  1339. x[3] = GGML_FP32_TO_FP16(arr[3]);
  1340. }
  1341. #define GGML_F32Cx4 __m128
  1342. #define GGML_F32Cx4_ZERO _mm_setzero_ps()
  1343. #define GGML_F32Cx4_SET1(x) _mm_set1_ps(x)
  1344. #define GGML_F32Cx4_LOAD(x) __sse_f16x4_load(x)
  1345. #define GGML_F32Cx4_STORE(x, y) __sse_f16x4_store(x, y)
  1346. #define GGML_F32Cx4_FMA GGML_F32x4_FMA
  1347. #define GGML_F32Cx4_ADD _mm_add_ps
  1348. #define GGML_F32Cx4_MUL _mm_mul_ps
  1349. #define GGML_F32Cx4_REDUCE GGML_F32x4_REDUCE
  1350. #define GGML_F16_VEC GGML_F32Cx4
  1351. #define GGML_F16_VEC_ZERO GGML_F32Cx4_ZERO
  1352. #define GGML_F16_VEC_SET1 GGML_F32Cx4_SET1
  1353. #define GGML_F16_VEC_LOAD(p, i) GGML_F32Cx4_LOAD(p)
  1354. #define GGML_F16_VEC_STORE(p, r, i) GGML_F32Cx4_STORE(p, r[i])
  1355. #define GGML_F16_VEC_FMA GGML_F32Cx4_FMA
  1356. #define GGML_F16_VEC_ADD GGML_F32Cx4_ADD
  1357. #define GGML_F16_VEC_MUL GGML_F32Cx4_MUL
  1358. #define GGML_F16_VEC_REDUCE GGML_F32Cx4_REDUCE
  1359. #endif
  1360. // GGML_F32_ARR / GGML_F16_ARR
  1361. // number of registers to use per step
  1362. #ifdef GGML_SIMD
  1363. #define GGML_F32_ARR (GGML_F32_STEP/GGML_F32_EPR)
  1364. #define GGML_F16_ARR (GGML_F16_STEP/GGML_F16_EPR)
  1365. #endif
  1366. //
  1367. // fundamental operations
  1368. //
  1369. inline static void ggml_vec_set_i8(const int n, int8_t * x, const int8_t v) { for (int i = 0; i < n; ++i) x[i] = v; }
  1370. inline static void ggml_vec_set_i16(const int n, int16_t * x, const int16_t v) { for (int i = 0; i < n; ++i) x[i] = v; }
  1371. inline static void ggml_vec_set_i32(const int n, int32_t * x, const int32_t v) { for (int i = 0; i < n; ++i) x[i] = v; }
  1372. inline static void ggml_vec_set_f16(const int n, ggml_fp16_t * x, const int32_t v) { for (int i = 0; i < n; ++i) x[i] = v; }
  1373. inline static void ggml_vec_set_bf16(const int n, ggml_bf16_t * x, const ggml_bf16_t v) { for (int i = 0; i < n; ++i) x[i] = v; }
  1374. inline static void ggml_vec_add_f32 (const int n, float * z, const float * x, const float * y) { for (int i = 0; i < n; ++i) z[i] = x[i] + y[i]; }
  1375. inline static void ggml_vec_add1_f32(const int n, float * z, const float * x, const float v) { for (int i = 0; i < n; ++i) z[i] = x[i] + v; }
  1376. inline static void ggml_vec_acc_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] += x[i]; }
  1377. inline static void ggml_vec_acc1_f32(const int n, float * y, const float v) { for (int i = 0; i < n; ++i) y[i] += v; }
  1378. inline static void ggml_vec_sub_f32 (const int n, float * z, const float * x, const float * y) { for (int i = 0; i < n; ++i) z[i] = x[i] - y[i]; }
  1379. inline static void ggml_vec_set_f32 (const int n, float * x, const float v) { for (int i = 0; i < n; ++i) x[i] = v; }
  1380. inline static void ggml_vec_cpy_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = x[i]; }
  1381. inline static void ggml_vec_neg_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = -x[i]; }
  1382. inline static void ggml_vec_mul_f32 (const int n, float * z, const float * x, const float * y) { for (int i = 0; i < n; ++i) z[i] = x[i]*y[i]; }
  1383. inline static void ggml_vec_div_f32 (const int n, float * z, const float * x, const float * y) { for (int i = 0; i < n; ++i) z[i] = x[i]/y[i]; }
  1384. static void ggml_vec_dot_f32(int n, float * restrict s, size_t bs, const float * restrict x, size_t bx, const float * restrict y, size_t by, int nrc) {
  1385. assert(nrc == 1);
  1386. UNUSED(nrc);
  1387. UNUSED(bx);
  1388. UNUSED(by);
  1389. UNUSED(bs);
  1390. #if defined(GGML_SIMD)
  1391. float sumf = 0.0f;
  1392. const int np = (n & ~(GGML_F32_STEP - 1));
  1393. GGML_F32_VEC sum[GGML_F32_ARR] = { GGML_F32_VEC_ZERO };
  1394. GGML_F32_VEC ax[GGML_F32_ARR];
  1395. GGML_F32_VEC ay[GGML_F32_ARR];
  1396. for (int i = 0; i < np; i += GGML_F32_STEP) {
  1397. for (int j = 0; j < GGML_F32_ARR; j++) {
  1398. ax[j] = GGML_F32_VEC_LOAD(x + i + j*GGML_F32_EPR);
  1399. ay[j] = GGML_F32_VEC_LOAD(y + i + j*GGML_F32_EPR);
  1400. sum[j] = GGML_F32_VEC_FMA(sum[j], ax[j], ay[j]);
  1401. }
  1402. }
  1403. // reduce sum0..sum3 to sum0
  1404. GGML_F32_VEC_REDUCE(sumf, sum);
  1405. // leftovers
  1406. for (int i = np; i < n; ++i) {
  1407. sumf += x[i]*y[i];
  1408. }
  1409. #else
  1410. // scalar
  1411. ggml_float sumf = 0.0;
  1412. for (int i = 0; i < n; ++i) {
  1413. sumf += (ggml_float)(x[i]*y[i]);
  1414. }
  1415. #endif
  1416. *s = sumf;
  1417. }
  1418. static void ggml_vec_dot_bf16(int n, float * restrict s, size_t bs, ggml_bf16_t * restrict x, size_t bx, ggml_bf16_t * restrict y, size_t by, int nrc) {
  1419. assert(nrc == 1);
  1420. UNUSED(nrc);
  1421. UNUSED(bx);
  1422. UNUSED(by);
  1423. UNUSED(bs);
  1424. int i = 0;
  1425. ggml_float sumf = 0;
  1426. #if defined(__AVX512BF16__)
  1427. __m512 c1 = _mm512_setzero_ps();
  1428. __m512 c2 = _mm512_setzero_ps();
  1429. for (; i + 64 <= n; i += 64) {
  1430. c1 = _mm512_dpbf16_ps(c1, (__m512bh)_mm512_loadu_ps((const float *)(x + i)),
  1431. (__m512bh)_mm512_loadu_ps((const float *)(y + i)));
  1432. c2 = _mm512_dpbf16_ps(c2, (__m512bh)_mm512_loadu_ps((const float *)(x + i + 32)),
  1433. (__m512bh)_mm512_loadu_ps((const float *)(y + i + 32)));
  1434. }
  1435. sumf += (ggml_float)_mm512_reduce_add_ps(c1);
  1436. sumf += (ggml_float)_mm512_reduce_add_ps(c2);
  1437. #elif defined(__AVX512F__)
  1438. #define LOAD(p) _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_cvtepu16_epi32(_mm256_loadu_si256((const __m256i *)(p))), 16))
  1439. __m512 c1 = _mm512_setzero_ps();
  1440. __m512 c2 = _mm512_setzero_ps();
  1441. for (; i + 32 <= n; i += 32) {
  1442. c1 = _mm512_add_ps(_mm512_mul_ps(LOAD(x + i), LOAD(y + i)), c1);
  1443. c2 = _mm512_add_ps(_mm512_mul_ps(LOAD(x + i + 16), LOAD(y + i + 16)), c2);
  1444. }
  1445. sumf += (ggml_float)_mm512_reduce_add_ps(c1);
  1446. sumf += (ggml_float)_mm512_reduce_add_ps(c2);
  1447. #undef LOAD
  1448. #elif defined(__AVX2__)
  1449. #define LOAD(p) _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_cvtepu16_epi32(_mm_loadu_si128((const __m128i *)(p))), 16))
  1450. __m256 c1 = _mm256_setzero_ps();
  1451. __m256 c2 = _mm256_setzero_ps();
  1452. __m256 c3 = _mm256_setzero_ps();
  1453. __m256 c4 = _mm256_setzero_ps();
  1454. for (; i + 32 <= n; i += 32) {
  1455. c1 = _mm256_add_ps(_mm256_mul_ps(LOAD(x + i), LOAD(y + i)), c1);
  1456. c2 = _mm256_add_ps(_mm256_mul_ps(LOAD(x + i + 8), LOAD(y + i + 8)), c2);
  1457. c3 = _mm256_add_ps(_mm256_mul_ps(LOAD(x + i + 16), LOAD(y + i + 16)), c3);
  1458. c4 = _mm256_add_ps(_mm256_mul_ps(LOAD(x + i + 24), LOAD(y + i + 24)), c4);
  1459. }
  1460. __m128 g;
  1461. c1 = _mm256_add_ps(_mm256_add_ps(c1, c3),
  1462. _mm256_add_ps(c2, c4));
  1463. g = _mm_add_ps(_mm256_extractf128_ps(c1, 1),
  1464. _mm256_castps256_ps128(c1));
  1465. g = _mm_add_ps(g, _mm_movehl_ps(g, g));
  1466. g = _mm_add_ss(g, _mm_movehdup_ps(g));
  1467. sumf += (ggml_float)_mm_cvtss_f32(g);
  1468. #undef LOAD
  1469. #endif
  1470. for (; i < n; ++i) {
  1471. sumf += (ggml_float)(GGML_BF16_TO_FP32(x[i]) *
  1472. GGML_BF16_TO_FP32(y[i]));
  1473. }
  1474. *s = sumf;
  1475. }
  1476. static void ggml_vec_dot_f16(int n, float * restrict s, size_t bs, ggml_fp16_t * restrict x, size_t bx, ggml_fp16_t * restrict y, size_t by, int nrc) {
  1477. assert(nrc == 1);
  1478. UNUSED(nrc);
  1479. UNUSED(bx);
  1480. UNUSED(by);
  1481. UNUSED(bs);
  1482. ggml_float sumf = 0.0;
  1483. #if defined(GGML_SIMD)
  1484. const int np = (n & ~(GGML_F16_STEP - 1));
  1485. GGML_F16_VEC sum[GGML_F16_ARR] = { GGML_F16_VEC_ZERO };
  1486. GGML_F16_VEC ax[GGML_F16_ARR];
  1487. GGML_F16_VEC ay[GGML_F16_ARR];
  1488. for (int i = 0; i < np; i += GGML_F16_STEP) {
  1489. for (int j = 0; j < GGML_F16_ARR; j++) {
  1490. ax[j] = GGML_F16_VEC_LOAD(x + i + j*GGML_F16_EPR, j);
  1491. ay[j] = GGML_F16_VEC_LOAD(y + i + j*GGML_F16_EPR, j);
  1492. sum[j] = GGML_F16_VEC_FMA(sum[j], ax[j], ay[j]);
  1493. }
  1494. }
  1495. // reduce sum0..sum3 to sum0
  1496. GGML_F16_VEC_REDUCE(sumf, sum);
  1497. // leftovers
  1498. for (int i = np; i < n; ++i) {
  1499. sumf += (ggml_float)(GGML_FP16_TO_FP32(x[i])*GGML_FP16_TO_FP32(y[i]));
  1500. }
  1501. #else
  1502. for (int i = 0; i < n; ++i) {
  1503. sumf += (ggml_float)(GGML_FP16_TO_FP32(x[i])*GGML_FP16_TO_FP32(y[i]));
  1504. }
  1505. #endif
  1506. *s = sumf;
  1507. }
  1508. // compute GGML_VEC_DOT_UNROLL dot products at once
  1509. // xs - x row stride in bytes
  1510. inline static void ggml_vec_dot_f16_unroll(const int n, const int xs, float * restrict s, void * restrict xv, ggml_fp16_t * restrict y) {
  1511. ggml_float sumf[GGML_VEC_DOT_UNROLL] = { 0.0 };
  1512. ggml_fp16_t * restrict x[GGML_VEC_DOT_UNROLL];
  1513. for (int i = 0; i < GGML_VEC_DOT_UNROLL; ++i) {
  1514. x[i] = (ggml_fp16_t *) ((char *) xv + i*xs);
  1515. }
  1516. #if defined(GGML_SIMD)
  1517. const int np = (n & ~(GGML_F16_STEP - 1));
  1518. GGML_F16_VEC sum[GGML_VEC_DOT_UNROLL][GGML_F16_ARR] = { { GGML_F16_VEC_ZERO } };
  1519. GGML_F16_VEC ax[GGML_F16_ARR];
  1520. GGML_F16_VEC ay[GGML_F16_ARR];
  1521. for (int i = 0; i < np; i += GGML_F16_STEP) {
  1522. for (int j = 0; j < GGML_F16_ARR; j++) {
  1523. ay[j] = GGML_F16_VEC_LOAD(y + i + j*GGML_F16_EPR, j);
  1524. for (int k = 0; k < GGML_VEC_DOT_UNROLL; ++k) {
  1525. ax[j] = GGML_F16_VEC_LOAD(x[k] + i + j*GGML_F16_EPR, j);
  1526. sum[k][j] = GGML_F16_VEC_FMA(sum[k][j], ax[j], ay[j]);
  1527. }
  1528. }
  1529. }
  1530. // reduce sum0..sum3 to sum0
  1531. for (int k = 0; k < GGML_VEC_DOT_UNROLL; ++k) {
  1532. GGML_F16_VEC_REDUCE(sumf[k], sum[k]);
  1533. }
  1534. // leftovers
  1535. for (int i = np; i < n; ++i) {
  1536. for (int j = 0; j < GGML_VEC_DOT_UNROLL; ++j) {
  1537. sumf[j] += (ggml_float)(GGML_FP16_TO_FP32(x[j][i])*GGML_FP16_TO_FP32(y[i]));
  1538. }
  1539. }
  1540. #else
  1541. for (int i = 0; i < n; ++i) {
  1542. for (int j = 0; j < GGML_VEC_DOT_UNROLL; ++j) {
  1543. sumf[j] += (ggml_float)(GGML_FP16_TO_FP32(x[j][i])*GGML_FP16_TO_FP32(y[i]));
  1544. }
  1545. }
  1546. #endif
  1547. for (int i = 0; i < GGML_VEC_DOT_UNROLL; ++i) {
  1548. s[i] = sumf[i];
  1549. }
  1550. }
  1551. inline static void ggml_vec_mad_f32(const int n, float * restrict y, const float * restrict x, const float v) {
  1552. #if defined(GGML_SIMD)
  1553. const int np = (n & ~(GGML_F32_STEP - 1));
  1554. GGML_F32_VEC vx = GGML_F32_VEC_SET1(v);
  1555. GGML_F32_VEC ax[GGML_F32_ARR];
  1556. GGML_F32_VEC ay[GGML_F32_ARR];
  1557. for (int i = 0; i < np; i += GGML_F32_STEP) {
  1558. for (int j = 0; j < GGML_F32_ARR; j++) {
  1559. ax[j] = GGML_F32_VEC_LOAD(x + i + j*GGML_F32_EPR);
  1560. ay[j] = GGML_F32_VEC_LOAD(y + i + j*GGML_F32_EPR);
  1561. ay[j] = GGML_F32_VEC_FMA(ay[j], ax[j], vx);
  1562. GGML_F32_VEC_STORE(y + i + j*GGML_F32_EPR, ay[j]);
  1563. }
  1564. }
  1565. // leftovers
  1566. for (int i = np; i < n; ++i) {
  1567. y[i] += x[i]*v;
  1568. }
  1569. #else
  1570. // scalar
  1571. for (int i = 0; i < n; ++i) {
  1572. y[i] += x[i]*v;
  1573. }
  1574. #endif
  1575. }
  1576. inline static void ggml_vec_mad_f16(const int n, ggml_fp16_t * restrict y, const ggml_fp16_t * restrict x, const float v) {
  1577. #if defined(GGML_SIMD)
  1578. const int np = (n & ~(GGML_F16_STEP - 1));
  1579. GGML_F16_VEC vx = GGML_F16_VEC_SET1(v);
  1580. GGML_F16_VEC ax[GGML_F16_ARR];
  1581. GGML_F16_VEC ay[GGML_F16_ARR];
  1582. for (int i = 0; i < np; i += GGML_F16_STEP) {
  1583. for (int j = 0; j < GGML_F16_ARR; j++) {
  1584. ax[j] = GGML_F16_VEC_LOAD(x + i + j*GGML_F16_EPR, j);
  1585. ay[j] = GGML_F16_VEC_LOAD(y + i + j*GGML_F16_EPR, j);
  1586. ay[j] = GGML_F16_VEC_FMA(ay[j], ax[j], vx);
  1587. GGML_F16_VEC_STORE(y + i + j*GGML_F16_EPR, ay, j);
  1588. }
  1589. }
  1590. // leftovers
  1591. for (int i = np; i < n; ++i) {
  1592. y[i] = GGML_FP32_TO_FP16(GGML_FP16_TO_FP32(y[i]) + GGML_FP16_TO_FP32(x[i])*v);
  1593. }
  1594. #else
  1595. // scalar
  1596. for (int i = 0; i < n; ++i) {
  1597. y[i] = GGML_FP32_TO_FP16(GGML_FP16_TO_FP32(y[i]) + GGML_FP16_TO_FP32(x[i])*v);
  1598. }
  1599. #endif
  1600. }
  1601. // xs and vs are byte strides of x and v
  1602. inline static void ggml_vec_mad_f32_unroll(const int n, const int xs, const int vs, float * restrict y, const float * restrict xv, const float * restrict vv) {
  1603. const float * restrict x[GGML_VEC_MAD_UNROLL];
  1604. const float * restrict v[GGML_VEC_MAD_UNROLL];
  1605. for (int i = 0; i < GGML_VEC_MAD_UNROLL; ++i) {
  1606. x[i] = (const float *) ((const char *) xv + i*xs);
  1607. v[i] = (const float *) ((const char *) vv + i*vs);
  1608. }
  1609. #if defined(GGML_SIMD)
  1610. const int np = (n & ~(GGML_F32_STEP - 1));
  1611. GGML_F32_VEC vx[GGML_VEC_MAD_UNROLL];
  1612. for (int k = 0; k < GGML_VEC_MAD_UNROLL; ++k) {
  1613. vx[k] = GGML_F32_VEC_SET1(v[k][0]);
  1614. }
  1615. GGML_F32_VEC ax[GGML_VEC_MAD_UNROLL][GGML_F32_ARR];
  1616. GGML_F32_VEC ay[GGML_F32_ARR];
  1617. for (int i = 0; i < np; i += GGML_F32_STEP) {
  1618. for (int j = 0; j < GGML_F32_ARR; j++) {
  1619. ay[j] = GGML_F32_VEC_LOAD(y + i + j*GGML_F32_EPR);
  1620. for (int k = 0; k < GGML_VEC_MAD_UNROLL; ++k) {
  1621. ax[k][j] = GGML_F32_VEC_LOAD(x[k] + i + j*GGML_F32_EPR);
  1622. ay[j] = GGML_F32_VEC_FMA(ay[j], ax[k][j], vx[k]);
  1623. }
  1624. GGML_F32_VEC_STORE(y + i + j*GGML_F32_EPR, ay[j]);
  1625. }
  1626. }
  1627. // leftovers
  1628. for (int k = 0; k < GGML_VEC_MAD_UNROLL; ++k) {
  1629. for (int i = np; i < n; ++i) {
  1630. y[i] += x[k][i]*v[k][0];
  1631. }
  1632. }
  1633. #else
  1634. // scalar
  1635. for (int k = 0; k < GGML_VEC_MAD_UNROLL; ++k) {
  1636. for (int i = 0; i < n; ++i) {
  1637. y[i] += x[k][i]*v[k][0];
  1638. }
  1639. }
  1640. #endif
  1641. }
  1642. //inline static void ggml_vec_scale_f32(const int n, float * y, const float v) { for (int i = 0; i < n; ++i) y[i] *= v; }
  1643. inline static void ggml_vec_scale_f32(const int n, float * y, const float v) {
  1644. #if defined(GGML_USE_ACCELERATE)
  1645. vDSP_vsmul(y, 1, &v, y, 1, n);
  1646. #elif defined(GGML_SIMD)
  1647. const int np = (n & ~(GGML_F32_STEP - 1));
  1648. GGML_F32_VEC vx = GGML_F32_VEC_SET1(v);
  1649. GGML_F32_VEC ay[GGML_F32_ARR];
  1650. for (int i = 0; i < np; i += GGML_F32_STEP) {
  1651. for (int j = 0; j < GGML_F32_ARR; j++) {
  1652. ay[j] = GGML_F32_VEC_LOAD(y + i + j*GGML_F32_EPR);
  1653. ay[j] = GGML_F32_VEC_MUL(ay[j], vx);
  1654. GGML_F32_VEC_STORE(y + i + j*GGML_F32_EPR, ay[j]);
  1655. }
  1656. }
  1657. // leftovers
  1658. for (int i = np; i < n; ++i) {
  1659. y[i] *= v;
  1660. }
  1661. #else
  1662. // scalar
  1663. for (int i = 0; i < n; ++i) {
  1664. y[i] *= v;
  1665. }
  1666. #endif
  1667. }
  1668. inline static void ggml_vec_scale_f16(const int n, ggml_fp16_t * y, const float v) {
  1669. #if defined(GGML_SIMD)
  1670. const int np = (n & ~(GGML_F16_STEP - 1));
  1671. GGML_F16_VEC vx = GGML_F16_VEC_SET1(v);
  1672. GGML_F16_VEC ay[GGML_F16_ARR];
  1673. for (int i = 0; i < np; i += GGML_F16_STEP) {
  1674. for (int j = 0; j < GGML_F16_ARR; j++) {
  1675. ay[j] = GGML_F16_VEC_LOAD(y + i + j*GGML_F16_EPR, j);
  1676. ay[j] = GGML_F16_VEC_MUL(ay[j], vx);
  1677. GGML_F16_VEC_STORE(y + i + j*GGML_F16_EPR, ay, j);
  1678. }
  1679. }
  1680. // leftovers
  1681. for (int i = np; i < n; ++i) {
  1682. y[i] = GGML_FP32_TO_FP16(GGML_FP16_TO_FP32(y[i])*v);
  1683. }
  1684. #else
  1685. // scalar
  1686. for (int i = 0; i < n; ++i) {
  1687. y[i] = GGML_FP32_TO_FP16(GGML_FP16_TO_FP32(y[i])*v);
  1688. }
  1689. #endif
  1690. }
  1691. inline static void ggml_vec_norm_f32 (const int n, float * s, const float * x) { ggml_vec_dot_f32(n, s, 0, x, 0, x, 0, 1); *s = sqrtf(*s); }
  1692. inline static void ggml_vec_sqr_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = x[i]*x[i]; }
  1693. inline static void ggml_vec_sqrt_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = sqrtf(x[i]); }
  1694. inline static void ggml_vec_log_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = logf(x[i]); }
  1695. inline static void ggml_vec_abs_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = fabsf(x[i]); }
  1696. inline static void ggml_vec_sgn_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = (x[i] > 0.f) ? 1.f : ((x[i] < 0.f) ? -1.f : 0.f); }
  1697. inline static void ggml_vec_step_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = (x[i] > 0.f) ? 1.f : 0.f; }
  1698. inline static void ggml_vec_tanh_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = tanhf(x[i]); }
  1699. inline static void ggml_vec_elu_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = (x[i] > 0.f) ? x[i] : expf(x[i])-1; }
  1700. inline static void ggml_vec_relu_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = (x[i] > 0.f) ? x[i] : 0.f; }
  1701. inline static void ggml_vec_leaky_relu_f32 (const int n, float * y, const float * x, const float ns) { for (int i = 0; i < n; ++i) y[i] = ((x[i] > 0.f) ? x[i] : 0.f) + ns * ((x[i] < 0.0f) ? x[i] : 0.f); }
  1702. inline static void ggml_vec_sigmoid_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = 1.f / (1.f + expf(-x[i])); }
  1703. // TODO: optimize performance
  1704. inline static void ggml_vec_hardswish_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = x[i] * fminf(1.0f, fmaxf(0.0f, (x[i] + 3.0f) / 6.0f)); }
  1705. inline static void ggml_vec_hardsigmoid_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = fminf(1.0f, fmaxf(0.0f, (x[i] + 3.0f) / 6.0f)); }
  1706. static const float GELU_COEF_A = 0.044715f;
  1707. static const float GELU_QUICK_COEF = -1.702f;
  1708. static const float SQRT_2_OVER_PI = 0.79788456080286535587989211986876f;
  1709. inline static float ggml_gelu_f32(float x) {
  1710. return 0.5f*x*(1.0f + tanhf(SQRT_2_OVER_PI*x*(1.0f + GELU_COEF_A*x*x)));
  1711. }
  1712. inline static void ggml_vec_gelu_f16(const int n, ggml_fp16_t * y, const ggml_fp16_t * x) {
  1713. const uint16_t * i16 = (const uint16_t *) x;
  1714. for (int i = 0; i < n; ++i) {
  1715. y[i] = ggml_table_gelu_f16[i16[i]];
  1716. }
  1717. }
  1718. #ifdef GGML_GELU_FP16
  1719. inline static void ggml_vec_gelu_f32(const int n, float * y, const float * x) {
  1720. uint16_t t;
  1721. for (int i = 0; i < n; ++i) {
  1722. if (x[i] <= -10.0f) {
  1723. y[i] = 0.0f;
  1724. } else if (x[i] >= 10.0f) {
  1725. y[i] = x[i];
  1726. } else {
  1727. ggml_fp16_t fp16 = GGML_FP32_TO_FP16(x[i]);
  1728. memcpy(&t, &fp16, sizeof(uint16_t));
  1729. y[i] = GGML_FP16_TO_FP32(ggml_table_gelu_f16[t]);
  1730. }
  1731. }
  1732. }
  1733. #else
  1734. inline static void ggml_vec_gelu_f32(const int n, float * y, const float * x) {
  1735. for (int i = 0; i < n; ++i) {
  1736. y[i] = ggml_gelu_f32(x[i]);
  1737. }
  1738. }
  1739. #endif
  1740. inline static float ggml_gelu_quick_f32(float x) {
  1741. return x*(1.0f/(1.0f+expf(GELU_QUICK_COEF*x)));
  1742. }
  1743. //inline static void ggml_vec_gelu_quick_f16(const int n, ggml_fp16_t * y, const ggml_fp16_t * x) {
  1744. // const uint16_t * i16 = (const uint16_t *) x;
  1745. // for (int i = 0; i < n; ++i) {
  1746. // y[i] = ggml_table_gelu_quick_f16[i16[i]];
  1747. // }
  1748. //}
  1749. #ifdef GGML_GELU_QUICK_FP16
  1750. inline static void ggml_vec_gelu_quick_f32(const int n, float * y, const float * x) {
  1751. uint16_t t;
  1752. for (int i = 0; i < n; ++i) {
  1753. ggml_fp16_t fp16 = GGML_FP32_TO_FP16(x[i]);
  1754. memcpy(&t, &fp16, sizeof(uint16_t));
  1755. y[i] = GGML_FP16_TO_FP32(ggml_table_gelu_quick_f16[t]);
  1756. }
  1757. }
  1758. #else
  1759. inline static void ggml_vec_gelu_quick_f32(const int n, float * y, const float * x) {
  1760. for (int i = 0; i < n; ++i) {
  1761. y[i] = ggml_gelu_quick_f32(x[i]);
  1762. }
  1763. }
  1764. #endif
  1765. // Sigmoid Linear Unit (SiLU) function
  1766. inline static float ggml_silu_f32(float x) {
  1767. return x/(1.0f + expf(-x));
  1768. }
  1769. //inline static void ggml_vec_silu_f16(const int n, ggml_fp16_t * y, const ggml_fp16_t * x) {
  1770. // const uint16_t * i16 = (const uint16_t *) x;
  1771. // for (int i = 0; i < n; ++i) {
  1772. // y[i] = ggml_table_silu_f16[i16[i]];
  1773. // }
  1774. //}
  1775. #ifdef GGML_SILU_FP16
  1776. inline static void ggml_vec_silu_f32(const int n, float * y, const float * x) {
  1777. uint16_t t;
  1778. for (int i = 0; i < n; ++i) {
  1779. ggml_fp16_t fp16 = GGML_FP32_TO_FP16(x[i]);
  1780. memcpy(&t, &fp16, sizeof(uint16_t));
  1781. y[i] = GGML_FP16_TO_FP32(ggml_table_silu_f16[t]);
  1782. }
  1783. }
  1784. #else
  1785. inline static void ggml_vec_silu_f32(const int n, float * y, const float * x) {
  1786. for (int i = 0; i < n; ++i) {
  1787. y[i] = ggml_silu_f32(x[i]);
  1788. }
  1789. }
  1790. #endif
  1791. inline static float ggml_silu_backward_f32(float x, float dy) {
  1792. const float s = 1.0f/(1.0f + expf(-x));
  1793. return dy*s*(1.0f + x*(1.0f - s));
  1794. }
  1795. #ifdef GGML_SILU_FP16
  1796. inline static void ggml_vec_silu_backward_f32(const int n, float * dx, const float * x, const float * dy) {
  1797. for (int i = 0; i < n; ++i) {
  1798. // we did not use x[i] to compute forward silu but its f16 equivalent
  1799. // take derivative at f16 of x[i]:
  1800. ggml_fp16_t fp16 = GGML_FP32_TO_FP16(x[i]);
  1801. float usedx = GGML_FP16_TO_FP32(fp16);
  1802. dx[i] = ggml_silu_backward_f32(usedx, dy[i]);
  1803. }
  1804. }
  1805. #else
  1806. inline static void ggml_vec_silu_backward_f32(const int n, float * dx, const float * x, const float * dy) {
  1807. for (int i = 0; i < n; ++i) {
  1808. dx[i] = ggml_silu_backward_f32(x[i], dy[i]);
  1809. }
  1810. }
  1811. #endif
  1812. inline static void ggml_vec_sum_f32(const int n, float * s, const float * x) {
  1813. #ifndef GGML_USE_ACCELERATE
  1814. ggml_float sum = 0.0;
  1815. for (int i = 0; i < n; ++i) {
  1816. sum += (ggml_float)x[i];
  1817. }
  1818. *s = sum;
  1819. #else
  1820. vDSP_sve(x, 1, s, n);
  1821. #endif
  1822. }
  1823. inline static void ggml_vec_sum_f32_ggf(const int n, ggml_float * s, const float * x) {
  1824. ggml_float sum = 0.0;
  1825. for (int i = 0; i < n; ++i) {
  1826. sum += (ggml_float)x[i];
  1827. }
  1828. *s = sum;
  1829. }
  1830. inline static void ggml_vec_sum_f16_ggf(const int n, float * s, const ggml_fp16_t * x) {
  1831. float sum = 0.0f;
  1832. for (int i = 0; i < n; ++i) {
  1833. sum += GGML_FP16_TO_FP32(x[i]);
  1834. }
  1835. *s = sum;
  1836. }
  1837. inline static void ggml_vec_sum_bf16_ggf(const int n, float * s, const ggml_bf16_t * x) {
  1838. float sum = 0.0f;
  1839. for (int i = 0; i < n; ++i) {
  1840. sum += GGML_BF16_TO_FP32(x[i]);
  1841. }
  1842. *s = sum;
  1843. }
  1844. inline static void ggml_vec_max_f32(const int n, float * s, const float * x) {
  1845. #ifndef GGML_USE_ACCELERATE
  1846. float max = -INFINITY;
  1847. for (int i = 0; i < n; ++i) {
  1848. max = MAX(max, x[i]);
  1849. }
  1850. *s = max;
  1851. #else
  1852. vDSP_maxv(x, 1, s, n);
  1853. #endif
  1854. }
  1855. inline static void ggml_vec_norm_inv_f32(const int n, float * s, const float * x) {
  1856. ggml_vec_norm_f32(n, s, x);
  1857. *s = 1.f/(*s);
  1858. }
  1859. inline static void ggml_vec_argmax_f32(const int n, int * s, const float * x) {
  1860. float max = -INFINITY;
  1861. int idx = 0;
  1862. for (int i = 0; i < n; ++i) {
  1863. max = MAX(max, x[i]);
  1864. if (max == x[i]) { idx = i; }
  1865. }
  1866. *s = idx;
  1867. }
  1868. //
  1869. // data types
  1870. //
  1871. static const char * GGML_OP_NAME[GGML_OP_COUNT] = {
  1872. "NONE",
  1873. "DUP",
  1874. "ADD",
  1875. "ADD1",
  1876. "ACC",
  1877. "SUB",
  1878. "MUL",
  1879. "DIV",
  1880. "SQR",
  1881. "SQRT",
  1882. "LOG",
  1883. "SUM",
  1884. "SUM_ROWS",
  1885. "MEAN",
  1886. "ARGMAX",
  1887. "REPEAT",
  1888. "REPEAT_BACK",
  1889. "CONCAT",
  1890. "SILU_BACK",
  1891. "NORM",
  1892. "RMS_NORM",
  1893. "RMS_NORM_BACK",
  1894. "GROUP_NORM",
  1895. "MUL_MAT",
  1896. "MUL_MAT_ID",
  1897. "OUT_PROD",
  1898. "SCALE",
  1899. "SET",
  1900. "CPY",
  1901. "CONT",
  1902. "RESHAPE",
  1903. "VIEW",
  1904. "PERMUTE",
  1905. "TRANSPOSE",
  1906. "GET_ROWS",
  1907. "GET_ROWS_BACK",
  1908. "DIAG",
  1909. "DIAG_MASK_INF",
  1910. "DIAG_MASK_ZERO",
  1911. "SOFT_MAX",
  1912. "SOFT_MAX_BACK",
  1913. "ROPE",
  1914. "ROPE_BACK",
  1915. "CLAMP",
  1916. "CONV_TRANSPOSE_1D",
  1917. "IM2COL",
  1918. "CONV_TRANSPOSE_2D",
  1919. "POOL_1D",
  1920. "POOL_2D",
  1921. "UPSCALE",
  1922. "PAD",
  1923. "ARANGE",
  1924. "TIMESTEP_EMBEDDING",
  1925. "ARGSORT",
  1926. "LEAKY_RELU",
  1927. "FLASH_ATTN",
  1928. "FLASH_ATTN_EXT",
  1929. "FLASH_FF",
  1930. "FLASH_ATTN_BACK",
  1931. "SSM_CONV",
  1932. "SSM_SCAN",
  1933. "WIN_PART",
  1934. "WIN_UNPART",
  1935. "GET_REL_POS",
  1936. "ADD_REL_POS",
  1937. "UNARY",
  1938. "MAP_UNARY",
  1939. "MAP_BINARY",
  1940. "MAP_CUSTOM1_F32",
  1941. "MAP_CUSTOM2_F32",
  1942. "MAP_CUSTOM3_F32",
  1943. "MAP_CUSTOM1",
  1944. "MAP_CUSTOM2",
  1945. "MAP_CUSTOM3",
  1946. "CROSS_ENTROPY_LOSS",
  1947. "CROSS_ENTROPY_LOSS_BACK",
  1948. };
  1949. static_assert(GGML_OP_COUNT == 76, "GGML_OP_COUNT != 76");
  1950. static const char * GGML_OP_SYMBOL[GGML_OP_COUNT] = {
  1951. "none",
  1952. "x",
  1953. "x+y",
  1954. "x+y",
  1955. "view(x,nb,offset)+=y->x",
  1956. "x-y",
  1957. "x*y",
  1958. "x/y",
  1959. "x^2",
  1960. "√x",
  1961. "log(x)",
  1962. "Σx",
  1963. "Σx_k",
  1964. "Σx/n",
  1965. "argmax(x)",
  1966. "repeat(x)",
  1967. "repeat_back(x)",
  1968. "concat(x, y)",
  1969. "silu_back(x)",
  1970. "norm(x)",
  1971. "rms_norm(x)",
  1972. "rms_norm_back(x)",
  1973. "group_norm(x)",
  1974. "X*Y",
  1975. "X[i]*Y",
  1976. "X*Y",
  1977. "x*v",
  1978. "y-\\>view(x)",
  1979. "x-\\>y",
  1980. "cont(x)",
  1981. "reshape(x)",
  1982. "view(x)",
  1983. "permute(x)",
  1984. "transpose(x)",
  1985. "get_rows(x)",
  1986. "get_rows_back(x)",
  1987. "diag(x)",
  1988. "diag_mask_inf(x)",
  1989. "diag_mask_zero(x)",
  1990. "soft_max(x)",
  1991. "soft_max_back(x)",
  1992. "rope(x)",
  1993. "rope_back(x)",
  1994. "clamp(x)",
  1995. "conv_transpose_1d(x)",
  1996. "im2col(x)",
  1997. "conv_transpose_2d(x)",
  1998. "pool_1d(x)",
  1999. "pool_2d(x)",
  2000. "upscale(x)",
  2001. "pad(x)",
  2002. "arange(start, stop, step)",
  2003. "timestep_embedding(timesteps, dim, max_period)",
  2004. "argsort(x)",
  2005. "leaky_relu(x)",
  2006. "flash_attn(x)",
  2007. "flash_attn_ext(x)",
  2008. "flash_ff(x)",
  2009. "flash_attn_back(x)",
  2010. "ssm_conv(x)",
  2011. "ssm_scan(x)",
  2012. "win_part(x)",
  2013. "win_unpart(x)",
  2014. "get_rel_pos(x)",
  2015. "add_rel_pos(x)",
  2016. "unary(x)",
  2017. "f(x)",
  2018. "f(x,y)",
  2019. "custom_f32(x)",
  2020. "custom_f32(x,y)",
  2021. "custom_f32(x,y,z)",
  2022. "custom(x)",
  2023. "custom(x,y)",
  2024. "custom(x,y,z)",
  2025. "cross_entropy_loss(x,y)",
  2026. "cross_entropy_loss_back(x,y)",
  2027. };
  2028. static_assert(GGML_OP_COUNT == 76, "GGML_OP_COUNT != 76");
  2029. static_assert(GGML_OP_POOL_COUNT == 2, "GGML_OP_POOL_COUNT != 2");
  2030. static const char * GGML_UNARY_OP_NAME[GGML_UNARY_OP_COUNT] = {
  2031. "ABS",
  2032. "SGN",
  2033. "NEG",
  2034. "STEP",
  2035. "TANH",
  2036. "ELU",
  2037. "RELU",
  2038. "SIGMOID",
  2039. "GELU",
  2040. "GELU_QUICK",
  2041. "SILU",
  2042. "HARDSWISH",
  2043. "HARDSIGMOID",
  2044. };
  2045. static_assert(GGML_UNARY_OP_COUNT == 13, "GGML_UNARY_OP_COUNT != 13");
  2046. static_assert(sizeof(struct ggml_object)%GGML_MEM_ALIGN == 0, "ggml_object size must be a multiple of GGML_MEM_ALIGN");
  2047. static_assert(sizeof(struct ggml_tensor)%GGML_MEM_ALIGN == 0, "ggml_tensor size must be a multiple of GGML_MEM_ALIGN");
  2048. // WARN:
  2049. // Mis-configuration can lead to problem that's hard to reason about:
  2050. // * At best it crash or talks nosense.
  2051. // * At worst it talks slightly difference but hard to perceive.
  2052. //
  2053. // An op has to enable INIT or FINALIZE when any of it's branch needs that pass.
  2054. // Take care about compile options (e.g., GGML_USE_xxx).
  2055. static bool GGML_OP_HAS_INIT [GGML_OP_COUNT] = { 0 };
  2056. static bool GGML_OP_HAS_FINALIZE[GGML_OP_COUNT] = { 0 };
  2057. static void ggml_setup_op_has_task_pass(void) {
  2058. { // INIT
  2059. bool * p = GGML_OP_HAS_INIT;
  2060. p[GGML_OP_ACC ] = true;
  2061. p[GGML_OP_MUL_MAT ] = true;
  2062. p[GGML_OP_MUL_MAT_ID ] = true;
  2063. p[GGML_OP_OUT_PROD ] = true;
  2064. p[GGML_OP_SET ] = true;
  2065. p[GGML_OP_GET_ROWS_BACK ] = true;
  2066. p[GGML_OP_DIAG_MASK_INF ] = true;
  2067. p[GGML_OP_DIAG_MASK_ZERO ] = true;
  2068. p[GGML_OP_CONV_TRANSPOSE_1D ] = true;
  2069. p[GGML_OP_CONV_TRANSPOSE_2D ] = true;
  2070. p[GGML_OP_FLASH_ATTN_BACK ] = true;
  2071. p[GGML_OP_CROSS_ENTROPY_LOSS ] = true;
  2072. p[GGML_OP_ADD_REL_POS ] = true;
  2073. }
  2074. { // FINALIZE
  2075. bool * p = GGML_OP_HAS_FINALIZE;
  2076. p[GGML_OP_CROSS_ENTROPY_LOSS ] = true;
  2077. }
  2078. }
  2079. //
  2080. // ggml context
  2081. //
  2082. struct ggml_context {
  2083. size_t mem_size;
  2084. void * mem_buffer;
  2085. bool mem_buffer_owned;
  2086. bool no_alloc;
  2087. bool no_alloc_save; // this is used to save the no_alloc state when using scratch buffers
  2088. int n_objects;
  2089. struct ggml_object * objects_begin;
  2090. struct ggml_object * objects_end;
  2091. struct ggml_scratch scratch;
  2092. struct ggml_scratch scratch_save;
  2093. };
  2094. struct ggml_context_container {
  2095. bool used;
  2096. struct ggml_context context;
  2097. };
  2098. //
  2099. // NUMA support
  2100. //
  2101. #define GGML_NUMA_MAX_NODES 8
  2102. #define GGML_NUMA_MAX_CPUS 512
  2103. struct ggml_numa_node {
  2104. uint32_t cpus[GGML_NUMA_MAX_CPUS]; // hardware threads on this node
  2105. uint32_t n_cpus;
  2106. };
  2107. struct ggml_numa_nodes {
  2108. enum ggml_numa_strategy numa_strategy;
  2109. struct ggml_numa_node nodes[GGML_NUMA_MAX_NODES];
  2110. uint32_t n_nodes;
  2111. uint32_t total_cpus; // hardware threads on system
  2112. uint32_t current_node; // node on which main process is execting
  2113. #if defined(__gnu_linux__)
  2114. cpu_set_t cpuset; // cpuset from numactl
  2115. #else
  2116. uint32_t cpuset; // no NUMA support outside of Linux at this time. Use a portable datatype
  2117. #endif
  2118. };
  2119. //
  2120. // ggml state
  2121. //
  2122. struct ggml_state {
  2123. struct ggml_context_container contexts[GGML_MAX_CONTEXTS];
  2124. struct ggml_numa_nodes numa;
  2125. };
  2126. // global state
  2127. static struct ggml_state g_state;
  2128. static atomic_int g_state_barrier = 0;
  2129. // barrier via spin lock
  2130. inline static void ggml_critical_section_start(void) {
  2131. int processing = atomic_fetch_add(&g_state_barrier, 1);
  2132. while (processing > 0) {
  2133. // wait for other threads to finish
  2134. atomic_fetch_sub(&g_state_barrier, 1);
  2135. sched_yield(); // TODO: reconsider this
  2136. processing = atomic_fetch_add(&g_state_barrier, 1);
  2137. }
  2138. }
  2139. // TODO: make this somehow automatically executed
  2140. // some sort of "sentry" mechanism
  2141. inline static void ggml_critical_section_end(void) {
  2142. atomic_fetch_sub(&g_state_barrier, 1);
  2143. }
  2144. #if defined(__gnu_linux__)
  2145. static cpu_set_t ggml_get_numa_affinity(void) {
  2146. cpu_set_t cpuset;
  2147. pthread_t thread;
  2148. thread = pthread_self();
  2149. CPU_ZERO(&cpuset);
  2150. pthread_getaffinity_np(thread, sizeof(cpu_set_t), &cpuset);
  2151. return cpuset;
  2152. }
  2153. #else
  2154. static uint32_t ggml_get_numa_affinity(void) {
  2155. return 0; // no NUMA support
  2156. }
  2157. #endif
  2158. void ggml_numa_init(enum ggml_numa_strategy numa_flag) {
  2159. if (g_state.numa.n_nodes > 0) {
  2160. fprintf(stderr, "ggml_numa_init: NUMA already initialized\n");
  2161. return;
  2162. }
  2163. #if defined(__gnu_linux__)
  2164. struct stat st;
  2165. char path[256];
  2166. int rv;
  2167. // set numa scheme
  2168. g_state.numa.numa_strategy = numa_flag;
  2169. GGML_PRINT_DEBUG("numa strategy %u\n",g_state.numa.numa_strategy);
  2170. g_state.numa.cpuset = ggml_get_numa_affinity();
  2171. // enumerate nodes
  2172. while (g_state.numa.n_nodes < GGML_NUMA_MAX_NODES) {
  2173. rv = snprintf(path, sizeof(path), "/sys/devices/system/node/node%u", g_state.numa.n_nodes);
  2174. GGML_ASSERT(rv > 0 && (unsigned)rv < sizeof(path));
  2175. if (stat(path, &st) != 0) { break; }
  2176. ++g_state.numa.n_nodes;
  2177. }
  2178. // enumerate CPUs
  2179. while (g_state.numa.total_cpus < GGML_NUMA_MAX_CPUS) {
  2180. rv = snprintf(path, sizeof(path), "/sys/devices/system/cpu/cpu%u", g_state.numa.total_cpus);
  2181. GGML_ASSERT(rv > 0 && (unsigned)rv < sizeof(path));
  2182. if (stat(path, &st) != 0) { break; }
  2183. ++g_state.numa.total_cpus;
  2184. }
  2185. GGML_PRINT_DEBUG("found %u numa nodes, %u CPUs\n", g_state.numa.n_nodes, g_state.numa.total_cpus);
  2186. // figure out which node we're on
  2187. uint current_cpu;
  2188. int getcpu_ret = 0;
  2189. #if __GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ > 28) || defined(__COSMOPOLITAN__)
  2190. getcpu_ret = getcpu(&current_cpu, &g_state.numa.current_node);
  2191. #else
  2192. // old glibc doesn't have a wrapper for this call. Fall back on direct syscall
  2193. # if !defined(SYS_getcpu) && defined(SYS_get_cpu)
  2194. # define SYS_getcpu SYS_get_cpu // some older glibc versions use this name
  2195. # endif
  2196. getcpu_ret = syscall(SYS_getcpu, &current_cpu, &g_state.numa.current_node);
  2197. #endif
  2198. if (g_state.numa.n_nodes < 1 || g_state.numa.total_cpus < 1 || getcpu_ret != 0) {
  2199. g_state.numa.n_nodes = 0;
  2200. return;
  2201. }
  2202. GGML_PRINT_DEBUG("found our process on numa node %u, CPU %u\n", g_state.numa.current_node, current_cpu);
  2203. for (uint32_t n = 0; n < g_state.numa.n_nodes; ++n) {
  2204. struct ggml_numa_node * node = &g_state.numa.nodes[n];
  2205. GGML_PRINT_DEBUG("CPUs on node %u:", n);
  2206. node->n_cpus = 0;
  2207. for (uint32_t c = 0; c < g_state.numa.total_cpus; ++c) {
  2208. rv = snprintf(path, sizeof(path), "/sys/devices/system/node/node%u/cpu%u", n, c);
  2209. GGML_ASSERT(rv > 0 && (unsigned)rv < sizeof(path));
  2210. if (stat(path, &st) == 0) {
  2211. node->cpus[node->n_cpus++] = c;
  2212. GGML_PRINT_DEBUG(" %u", c);
  2213. }
  2214. }
  2215. GGML_PRINT_DEBUG("\n");
  2216. }
  2217. if (ggml_is_numa()) {
  2218. FILE *fptr = fopen("/proc/sys/kernel/numa_balancing", "r");
  2219. if (fptr != NULL) {
  2220. char buf[42];
  2221. if (fgets(buf, sizeof(buf), fptr) && strncmp(buf, "0\n", sizeof(buf)) != 0) {
  2222. GGML_PRINT("WARNING: /proc/sys/kernel/numa_balancing is enabled, this has been observed to impair performance\n");
  2223. }
  2224. fclose(fptr);
  2225. }
  2226. }
  2227. #else
  2228. GGML_UNUSED(numa_flag);
  2229. // TODO
  2230. #endif
  2231. }
  2232. bool ggml_is_numa(void) {
  2233. return g_state.numa.n_nodes > 1;
  2234. }
  2235. ////////////////////////////////////////////////////////////////////////////////
  2236. void ggml_print_object(const struct ggml_object * obj) {
  2237. GGML_PRINT(" - ggml_object: type = %d, offset = %zu, size = %zu, next = %p\n",
  2238. obj->type, obj->offs, obj->size, (const void *) obj->next);
  2239. }
  2240. void ggml_print_objects(const struct ggml_context * ctx) {
  2241. struct ggml_object * obj = ctx->objects_begin;
  2242. GGML_PRINT("%s: objects in context %p:\n", __func__, (const void *) ctx);
  2243. while (obj != NULL) {
  2244. ggml_print_object(obj);
  2245. obj = obj->next;
  2246. }
  2247. GGML_PRINT("%s: --- end ---\n", __func__);
  2248. }
  2249. GGML_CALL int64_t ggml_nelements(const struct ggml_tensor * tensor) {
  2250. static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function");
  2251. return tensor->ne[0]*tensor->ne[1]*tensor->ne[2]*tensor->ne[3];
  2252. }
  2253. GGML_CALL int64_t ggml_nrows(const struct ggml_tensor * tensor) {
  2254. static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function");
  2255. return tensor->ne[1]*tensor->ne[2]*tensor->ne[3];
  2256. }
  2257. GGML_CALL size_t ggml_nbytes(const struct ggml_tensor * tensor) {
  2258. size_t nbytes;
  2259. size_t blck_size = ggml_blck_size(tensor->type);
  2260. if (blck_size == 1) {
  2261. nbytes = ggml_type_size(tensor->type);
  2262. for (int i = 0; i < GGML_MAX_DIMS; ++i) {
  2263. nbytes += (tensor->ne[i] - 1)*tensor->nb[i];
  2264. }
  2265. }
  2266. else {
  2267. nbytes = tensor->ne[0]*tensor->nb[0]/blck_size;
  2268. for (int i = 1; i < GGML_MAX_DIMS; ++i) {
  2269. nbytes += (tensor->ne[i] - 1)*tensor->nb[i];
  2270. }
  2271. }
  2272. return nbytes;
  2273. }
  2274. size_t ggml_nbytes_pad(const struct ggml_tensor * tensor) {
  2275. return GGML_PAD(ggml_nbytes(tensor), GGML_MEM_ALIGN);
  2276. }
  2277. GGML_CALL int ggml_blck_size(enum ggml_type type) {
  2278. return type_traits[type].blck_size;
  2279. }
  2280. GGML_CALL size_t ggml_type_size(enum ggml_type type) {
  2281. return type_traits[type].type_size;
  2282. }
  2283. GGML_CALL size_t ggml_row_size(enum ggml_type type, int64_t ne) {
  2284. assert(ne % ggml_blck_size(type) == 0);
  2285. return ggml_type_size(type)*ne/ggml_blck_size(type);
  2286. }
  2287. double ggml_type_sizef(enum ggml_type type) {
  2288. return ((double)(type_traits[type].type_size))/type_traits[type].blck_size;
  2289. }
  2290. GGML_CALL const char * ggml_type_name(enum ggml_type type) {
  2291. return type_traits[type].type_name;
  2292. }
  2293. GGML_CALL bool ggml_is_quantized(enum ggml_type type) {
  2294. return type_traits[type].is_quantized;
  2295. }
  2296. GGML_CALL const char * ggml_op_name(enum ggml_op op) {
  2297. return GGML_OP_NAME[op];
  2298. }
  2299. const char * ggml_op_symbol(enum ggml_op op) {
  2300. return GGML_OP_SYMBOL[op];
  2301. }
  2302. const char * ggml_unary_op_name(enum ggml_unary_op op) {
  2303. return GGML_UNARY_OP_NAME[op];
  2304. }
  2305. GGML_CALL const char * ggml_op_desc(const struct ggml_tensor * t) {
  2306. if (t->op == GGML_OP_UNARY) {
  2307. enum ggml_unary_op uop = ggml_get_unary_op(t);
  2308. return ggml_unary_op_name(uop);
  2309. }
  2310. else {
  2311. return ggml_op_name(t->op);
  2312. }
  2313. }
  2314. GGML_CALL size_t ggml_element_size(const struct ggml_tensor * tensor) {
  2315. return ggml_type_size(tensor->type);
  2316. }
  2317. bool ggml_is_scalar(const struct ggml_tensor * tensor) {
  2318. static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function");
  2319. return tensor->ne[0] == 1 && tensor->ne[1] == 1 && tensor->ne[2] == 1 && tensor->ne[3] == 1;
  2320. }
  2321. bool ggml_is_vector(const struct ggml_tensor * tensor) {
  2322. static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function");
  2323. return tensor->ne[1] == 1 && tensor->ne[2] == 1 && tensor->ne[3] == 1;
  2324. }
  2325. bool ggml_is_matrix(const struct ggml_tensor * tensor) {
  2326. static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function");
  2327. return tensor->ne[2] == 1 && tensor->ne[3] == 1;
  2328. }
  2329. bool ggml_is_3d(const struct ggml_tensor * tensor) {
  2330. return tensor->ne[3] == 1;
  2331. }
  2332. int ggml_n_dims(const struct ggml_tensor * tensor) {
  2333. for (int i = GGML_MAX_DIMS - 1; i >= 1; --i) {
  2334. if (tensor->ne[i] > 1) {
  2335. return i + 1;
  2336. }
  2337. }
  2338. return 1;
  2339. }
  2340. static inline bool ggml_can_mul_mat(const struct ggml_tensor * t0, const struct ggml_tensor * t1) {
  2341. static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function");
  2342. return (t0->ne[0] == t1->ne[0]) &&
  2343. (t1->ne[2]%t0->ne[2] == 0) && // verify t0 is broadcastable
  2344. (t1->ne[3]%t0->ne[3] == 0);
  2345. }
  2346. static inline bool ggml_can_out_prod(const struct ggml_tensor * t0, const struct ggml_tensor * t1) {
  2347. static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function");
  2348. return (t0->ne[1] == t1->ne[1]) &&
  2349. (t1->ne[2]%t0->ne[2] == 0) && // verify t0 is broadcastable
  2350. (t1->ne[3]%t0->ne[3] == 0);
  2351. }
  2352. enum ggml_type ggml_ftype_to_ggml_type(enum ggml_ftype ftype) {
  2353. enum ggml_type wtype = GGML_TYPE_COUNT;
  2354. switch (ftype) {
  2355. case GGML_FTYPE_ALL_F32: wtype = GGML_TYPE_F32; break;
  2356. case GGML_FTYPE_MOSTLY_F16: wtype = GGML_TYPE_F16; break;
  2357. case GGML_FTYPE_MOSTLY_BF16: wtype = GGML_TYPE_BF16; break;
  2358. case GGML_FTYPE_MOSTLY_Q4_0: wtype = GGML_TYPE_Q4_0; break;
  2359. case GGML_FTYPE_MOSTLY_Q4_1: wtype = GGML_TYPE_Q4_1; break;
  2360. case GGML_FTYPE_MOSTLY_Q5_0: wtype = GGML_TYPE_Q5_0; break;
  2361. case GGML_FTYPE_MOSTLY_Q5_1: wtype = GGML_TYPE_Q5_1; break;
  2362. case GGML_FTYPE_MOSTLY_Q8_0: wtype = GGML_TYPE_Q8_0; break;
  2363. case GGML_FTYPE_MOSTLY_Q2_K: wtype = GGML_TYPE_Q2_K; break;
  2364. case GGML_FTYPE_MOSTLY_Q3_K: wtype = GGML_TYPE_Q3_K; break;
  2365. case GGML_FTYPE_MOSTLY_Q4_K: wtype = GGML_TYPE_Q4_K; break;
  2366. case GGML_FTYPE_MOSTLY_Q5_K: wtype = GGML_TYPE_Q5_K; break;
  2367. case GGML_FTYPE_MOSTLY_Q6_K: wtype = GGML_TYPE_Q6_K; break;
  2368. case GGML_FTYPE_MOSTLY_IQ2_XXS: wtype = GGML_TYPE_IQ2_XXS; break;
  2369. case GGML_FTYPE_MOSTLY_IQ2_XS: wtype = GGML_TYPE_IQ2_XS; break;
  2370. case GGML_FTYPE_MOSTLY_IQ3_XXS: wtype = GGML_TYPE_IQ3_XXS; break;
  2371. case GGML_FTYPE_MOSTLY_IQ1_S: wtype = GGML_TYPE_IQ1_S; break;
  2372. case GGML_FTYPE_MOSTLY_IQ1_M: wtype = GGML_TYPE_IQ1_M; break;
  2373. case GGML_FTYPE_MOSTLY_IQ4_NL: wtype = GGML_TYPE_IQ4_NL; break;
  2374. case GGML_FTYPE_MOSTLY_IQ4_XS: wtype = GGML_TYPE_IQ4_XS; break;
  2375. case GGML_FTYPE_MOSTLY_IQ3_S: wtype = GGML_TYPE_IQ3_S; break;
  2376. case GGML_FTYPE_MOSTLY_IQ2_S: wtype = GGML_TYPE_IQ2_S; break;
  2377. case GGML_FTYPE_UNKNOWN: wtype = GGML_TYPE_COUNT; break;
  2378. case GGML_FTYPE_MOSTLY_Q4_1_SOME_F16: wtype = GGML_TYPE_COUNT; break;
  2379. }
  2380. GGML_ASSERT(wtype != GGML_TYPE_COUNT);
  2381. return wtype;
  2382. }
  2383. size_t ggml_tensor_overhead(void) {
  2384. return GGML_OBJECT_SIZE + GGML_TENSOR_SIZE;
  2385. }
  2386. GGML_CALL bool ggml_is_transposed(const struct ggml_tensor * tensor) {
  2387. return tensor->nb[0] > tensor->nb[1];
  2388. }
  2389. GGML_CALL bool ggml_is_contiguous(const struct ggml_tensor * tensor) {
  2390. static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function");
  2391. return
  2392. tensor->nb[0] == ggml_type_size(tensor->type) &&
  2393. tensor->nb[1] == (tensor->nb[0]*tensor->ne[0])/ggml_blck_size(tensor->type) &&
  2394. tensor->nb[2] == tensor->nb[1]*tensor->ne[1] &&
  2395. tensor->nb[3] == tensor->nb[2]*tensor->ne[2];
  2396. }
  2397. static inline bool ggml_is_contiguous_except_dim_1(const struct ggml_tensor * tensor) {
  2398. static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function");
  2399. return
  2400. tensor->nb[0] == ggml_type_size(tensor->type) &&
  2401. tensor->nb[2] == tensor->nb[1]*tensor->ne[1] &&
  2402. tensor->nb[3] == tensor->nb[2]*tensor->ne[2];
  2403. }
  2404. GGML_CALL bool ggml_is_permuted(const struct ggml_tensor * tensor) {
  2405. static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function");
  2406. return tensor->nb[0] > tensor->nb[1] || tensor->nb[1] > tensor->nb[2] || tensor->nb[2] > tensor->nb[3];
  2407. }
  2408. static inline bool ggml_is_padded_1d(const struct ggml_tensor * tensor) {
  2409. static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function");
  2410. return
  2411. tensor->nb[0] == ggml_type_size(tensor->type) &&
  2412. tensor->nb[2] == tensor->nb[1]*tensor->ne[1] &&
  2413. tensor->nb[3] == tensor->nb[2]*tensor->ne[2];
  2414. }
  2415. GGML_CALL bool ggml_is_empty(const struct ggml_tensor * tensor) {
  2416. for (int i = 0; i < GGML_MAX_DIMS; ++i) {
  2417. if (tensor->ne[i] == 0) {
  2418. // empty if any dimension has no elements
  2419. return true;
  2420. }
  2421. }
  2422. return false;
  2423. }
  2424. bool ggml_are_same_shape(const struct ggml_tensor * t0, const struct ggml_tensor * t1) {
  2425. static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function");
  2426. return
  2427. (t0->ne[0] == t1->ne[0] ) &&
  2428. (t0->ne[1] == t1->ne[1] ) &&
  2429. (t0->ne[2] == t1->ne[2] ) &&
  2430. (t0->ne[3] == t1->ne[3] );
  2431. }
  2432. bool ggml_are_same_stride(const struct ggml_tensor * t0, const struct ggml_tensor * t1) {
  2433. static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function");
  2434. return
  2435. (t0->nb[0] == t1->nb[0] ) &&
  2436. (t0->nb[1] == t1->nb[1] ) &&
  2437. (t0->nb[2] == t1->nb[2] ) &&
  2438. (t0->nb[3] == t1->nb[3] );
  2439. }
  2440. // check if t1 can be represented as a repeatition of t0
  2441. static inline bool ggml_can_repeat(const struct ggml_tensor * t0, const struct ggml_tensor * t1) {
  2442. static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function");
  2443. return ggml_is_empty(t0) ? ggml_is_empty(t1) :
  2444. (t1->ne[0]%t0->ne[0] == 0) &&
  2445. (t1->ne[1]%t0->ne[1] == 0) &&
  2446. (t1->ne[2]%t0->ne[2] == 0) &&
  2447. (t1->ne[3]%t0->ne[3] == 0);
  2448. }
  2449. static inline bool ggml_can_repeat_rows(const struct ggml_tensor * t0, const struct ggml_tensor * t1) {
  2450. static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function");
  2451. return (t0->ne[0] == t1->ne[0]) && ggml_can_repeat(t0, t1);
  2452. }
  2453. static inline int ggml_up32(int n) {
  2454. return (n + 31) & ~31;
  2455. }
  2456. //static inline int ggml_up64(int n) {
  2457. // return (n + 63) & ~63;
  2458. //}
  2459. static inline int ggml_up(int n, int m) {
  2460. // assert m is a power of 2
  2461. GGML_ASSERT((m & (m - 1)) == 0);
  2462. return (n + m - 1) & ~(m - 1);
  2463. }
  2464. // assert that pointer is aligned to GGML_MEM_ALIGN
  2465. #define ggml_assert_aligned(ptr) \
  2466. GGML_ASSERT(((uintptr_t) (ptr))%GGML_MEM_ALIGN == 0)
  2467. ////////////////////////////////////////////////////////////////////////////////
  2468. struct ggml_context * ggml_init(struct ggml_init_params params) {
  2469. // make this function thread safe
  2470. ggml_critical_section_start();
  2471. static bool is_first_call = true;
  2472. if (is_first_call) {
  2473. // initialize time system (required on Windows)
  2474. ggml_time_init();
  2475. // initialize GELU, Quick GELU, SILU and EXP F32 tables
  2476. {
  2477. const uint64_t t_start = ggml_time_us(); UNUSED(t_start);
  2478. for (int i = 0; i < (1 << 16); ++i) {
  2479. union {
  2480. uint16_t u16;
  2481. ggml_fp16_t fp16;
  2482. } u = {i};
  2483. float f = ggml_table_f32_f16[i] = GGML_COMPUTE_FP16_TO_FP32(u.fp16);
  2484. ggml_table_gelu_f16[i] = GGML_FP32_TO_FP16(ggml_gelu_f32(f));
  2485. ggml_table_gelu_quick_f16[i] = GGML_FP32_TO_FP16(ggml_gelu_quick_f32(f));
  2486. ggml_table_silu_f16[i] = GGML_FP32_TO_FP16(ggml_silu_f32(f));
  2487. ggml_table_exp_f16[i] = GGML_FP32_TO_FP16(expf(f));
  2488. }
  2489. const uint64_t t_end = ggml_time_us(); UNUSED(t_end);
  2490. GGML_PRINT_DEBUG("%s: GELU, Quick GELU, SILU and EXP tables initialized in %f ms\n", __func__, (t_end - t_start)/1000.0f);
  2491. }
  2492. // initialize g_state
  2493. {
  2494. const uint64_t t_start = ggml_time_us(); UNUSED(t_start);
  2495. g_state = (struct ggml_state) {
  2496. /*.contexts =*/ { { 0 } },
  2497. /*.numa =*/ {
  2498. .n_nodes = 0,
  2499. .total_cpus = 0,
  2500. },
  2501. };
  2502. for (int i = 0; i < GGML_MAX_CONTEXTS; ++i) {
  2503. g_state.contexts[i].used = false;
  2504. }
  2505. const uint64_t t_end = ggml_time_us(); UNUSED(t_end);
  2506. GGML_PRINT_DEBUG("%s: g_state initialized in %f ms\n", __func__, (t_end - t_start)/1000.0f);
  2507. }
  2508. #if defined(GGML_USE_CLBLAST)
  2509. ggml_cl_init();
  2510. #endif
  2511. ggml_setup_op_has_task_pass();
  2512. is_first_call = false;
  2513. }
  2514. // find non-used context in g_state
  2515. struct ggml_context * ctx = NULL;
  2516. for (int i = 0; i < GGML_MAX_CONTEXTS; i++) {
  2517. if (!g_state.contexts[i].used) {
  2518. g_state.contexts[i].used = true;
  2519. ctx = &g_state.contexts[i].context;
  2520. GGML_PRINT_DEBUG("%s: found unused context %d\n", __func__, i);
  2521. break;
  2522. }
  2523. }
  2524. if (ctx == NULL) {
  2525. GGML_PRINT_DEBUG("%s: no unused context found\n", __func__);
  2526. ggml_critical_section_end();
  2527. return NULL;
  2528. }
  2529. // allow to call ggml_init with 0 size
  2530. if (params.mem_size == 0) {
  2531. params.mem_size = GGML_MEM_ALIGN;
  2532. }
  2533. const size_t mem_size = params.mem_buffer ? params.mem_size : GGML_PAD(params.mem_size, GGML_MEM_ALIGN);
  2534. *ctx = (struct ggml_context) {
  2535. /*.mem_size =*/ mem_size,
  2536. /*.mem_buffer =*/ params.mem_buffer ? params.mem_buffer : GGML_ALIGNED_MALLOC(mem_size),
  2537. /*.mem_buffer_owned =*/ params.mem_buffer ? false : true,
  2538. /*.no_alloc =*/ params.no_alloc,
  2539. /*.no_alloc_save =*/ params.no_alloc,
  2540. /*.n_objects =*/ 0,
  2541. /*.objects_begin =*/ NULL,
  2542. /*.objects_end =*/ NULL,
  2543. /*.scratch =*/ { 0, 0, NULL, },
  2544. /*.scratch_save =*/ { 0, 0, NULL, },
  2545. };
  2546. GGML_ASSERT(ctx->mem_buffer != NULL);
  2547. ggml_assert_aligned(ctx->mem_buffer);
  2548. GGML_PRINT_DEBUG("%s: context initialized\n", __func__);
  2549. ggml_critical_section_end();
  2550. return ctx;
  2551. }
  2552. void ggml_free(struct ggml_context * ctx) {
  2553. if (ctx == NULL) {
  2554. return;
  2555. }
  2556. // make this function thread safe
  2557. ggml_critical_section_start();
  2558. bool found = false;
  2559. for (int i = 0; i < GGML_MAX_CONTEXTS; i++) {
  2560. if (&g_state.contexts[i].context == ctx) {
  2561. g_state.contexts[i].used = false;
  2562. GGML_PRINT_DEBUG("%s: context %d has been freed. memory used = %zu\n",
  2563. __func__, i, ggml_used_mem(ctx));
  2564. if (ctx->mem_buffer_owned) {
  2565. GGML_ALIGNED_FREE(ctx->mem_buffer);
  2566. }
  2567. found = true;
  2568. break;
  2569. }
  2570. }
  2571. if (!found) {
  2572. GGML_PRINT_DEBUG("%s: context not found\n", __func__);
  2573. }
  2574. ggml_critical_section_end();
  2575. }
  2576. size_t ggml_used_mem(const struct ggml_context * ctx) {
  2577. return ctx->objects_end == NULL ? 0 : ctx->objects_end->offs + ctx->objects_end->size;
  2578. }
  2579. size_t ggml_set_scratch(struct ggml_context * ctx, struct ggml_scratch scratch) {
  2580. const size_t result = ctx->scratch.data ? ctx->scratch.offs : 0;
  2581. ctx->scratch = scratch;
  2582. return result;
  2583. }
  2584. bool ggml_get_no_alloc(struct ggml_context * ctx) {
  2585. return ctx->no_alloc;
  2586. }
  2587. void ggml_set_no_alloc(struct ggml_context * ctx, bool no_alloc) {
  2588. ctx->no_alloc = no_alloc;
  2589. }
  2590. void * ggml_get_mem_buffer(const struct ggml_context * ctx) {
  2591. return ctx->mem_buffer;
  2592. }
  2593. size_t ggml_get_mem_size(const struct ggml_context * ctx) {
  2594. return ctx->mem_size;
  2595. }
  2596. size_t ggml_get_max_tensor_size(const struct ggml_context * ctx) {
  2597. size_t max_size = 0;
  2598. for (struct ggml_tensor * tensor = ggml_get_first_tensor(ctx); tensor != NULL; tensor = ggml_get_next_tensor(ctx, tensor)) {
  2599. size_t bytes = ggml_nbytes(tensor);
  2600. max_size = MAX(max_size, bytes);
  2601. }
  2602. return max_size;
  2603. }
  2604. // IMPORTANT:
  2605. // when creating "opt" tensors, always save and load the scratch buffer
  2606. // this is an error prone process, but it is necessary to support inplace
  2607. // operators when using scratch buffers
  2608. // TODO: implement a better way
  2609. static void ggml_scratch_save(struct ggml_context * ctx) {
  2610. // this is needed to allow opt tensors to store their data
  2611. // TODO: again, need to find a better way
  2612. ctx->no_alloc_save = ctx->no_alloc;
  2613. ctx->no_alloc = false;
  2614. ctx->scratch_save = ctx->scratch;
  2615. ctx->scratch.data = NULL;
  2616. }
  2617. static void ggml_scratch_load(struct ggml_context * ctx) {
  2618. ctx->no_alloc = ctx->no_alloc_save;
  2619. ctx->scratch = ctx->scratch_save;
  2620. }
  2621. ////////////////////////////////////////////////////////////////////////////////
  2622. static struct ggml_object * ggml_new_object(struct ggml_context * ctx, enum ggml_object_type type, size_t size) {
  2623. // always insert objects at the end of the context's memory pool
  2624. struct ggml_object * obj_cur = ctx->objects_end;
  2625. const size_t cur_offs = obj_cur == NULL ? 0 : obj_cur->offs;
  2626. const size_t cur_size = obj_cur == NULL ? 0 : obj_cur->size;
  2627. const size_t cur_end = cur_offs + cur_size;
  2628. // align to GGML_MEM_ALIGN
  2629. size_t size_needed = GGML_PAD(size, GGML_MEM_ALIGN);
  2630. char * const mem_buffer = ctx->mem_buffer;
  2631. struct ggml_object * const obj_new = (struct ggml_object *)(mem_buffer + cur_end);
  2632. if (cur_end + size_needed + GGML_OBJECT_SIZE > ctx->mem_size) {
  2633. GGML_PRINT("%s: not enough space in the context's memory pool (needed %zu, available %zu)\n",
  2634. __func__, cur_end + size_needed, ctx->mem_size);
  2635. assert(false);
  2636. return NULL;
  2637. }
  2638. *obj_new = (struct ggml_object) {
  2639. .offs = cur_end + GGML_OBJECT_SIZE,
  2640. .size = size_needed,
  2641. .next = NULL,
  2642. .type = type,
  2643. };
  2644. ggml_assert_aligned(mem_buffer + obj_new->offs);
  2645. if (obj_cur != NULL) {
  2646. obj_cur->next = obj_new;
  2647. } else {
  2648. // this is the first object in this context
  2649. ctx->objects_begin = obj_new;
  2650. }
  2651. ctx->objects_end = obj_new;
  2652. //printf("%s: inserted new object at %zu, size = %zu\n", __func__, cur_end, obj_new->size);
  2653. return obj_new;
  2654. }
  2655. static struct ggml_tensor * ggml_new_tensor_impl(
  2656. struct ggml_context * ctx,
  2657. enum ggml_type type,
  2658. int n_dims,
  2659. const int64_t * ne,
  2660. struct ggml_tensor * view_src,
  2661. size_t view_offs) {
  2662. assert(n_dims >= 1 && n_dims <= GGML_MAX_DIMS);
  2663. // find the base tensor and absolute offset
  2664. if (view_src != NULL && view_src->view_src != NULL) {
  2665. view_offs += view_src->view_offs;
  2666. view_src = view_src->view_src;
  2667. }
  2668. size_t data_size = ggml_row_size(type, ne[0]);
  2669. for (int i = 1; i < n_dims; i++) {
  2670. data_size *= ne[i];
  2671. }
  2672. GGML_ASSERT(view_src == NULL || data_size == 0 || data_size + view_offs <= ggml_nbytes(view_src));
  2673. void * data = view_src != NULL ? view_src->data : NULL;
  2674. if (data != NULL) {
  2675. data = (char *) data + view_offs;
  2676. }
  2677. size_t obj_alloc_size = 0;
  2678. if (view_src == NULL && !ctx->no_alloc) {
  2679. if (ctx->scratch.data != NULL) {
  2680. // allocate tensor data in the scratch buffer
  2681. if (ctx->scratch.offs + data_size > ctx->scratch.size) {
  2682. GGML_PRINT("%s: not enough space in the scratch memory pool (needed %zu, available %zu)\n",
  2683. __func__, ctx->scratch.offs + data_size, ctx->scratch.size);
  2684. assert(false);
  2685. return NULL;
  2686. }
  2687. data = (char * const) ctx->scratch.data + ctx->scratch.offs;
  2688. ctx->scratch.offs += data_size;
  2689. } else {
  2690. // allocate tensor data in the context's memory pool
  2691. obj_alloc_size = data_size;
  2692. }
  2693. }
  2694. struct ggml_object * const obj_new = ggml_new_object(ctx, GGML_OBJECT_TYPE_TENSOR, GGML_TENSOR_SIZE + obj_alloc_size);
  2695. // TODO: for recoverable errors, we would need to free the data allocated from the scratch buffer here
  2696. struct ggml_tensor * const result = (struct ggml_tensor *)((char *)ctx->mem_buffer + obj_new->offs);
  2697. *result = (struct ggml_tensor) {
  2698. /*.type =*/ type,
  2699. /*.backend =*/ GGML_BACKEND_TYPE_CPU,
  2700. /*.buffer =*/ NULL,
  2701. /*.ne =*/ { 1, 1, 1, 1 },
  2702. /*.nb =*/ { 0, 0, 0, 0 },
  2703. /*.op =*/ GGML_OP_NONE,
  2704. /*.op_params =*/ { 0 },
  2705. /*.flags =*/ 0,
  2706. /*.grad =*/ NULL,
  2707. /*.src =*/ { NULL },
  2708. /*.perf_runs =*/ 0,
  2709. /*.perf_cycles =*/ 0,
  2710. /*.perf_time_us =*/ 0,
  2711. /*.view_src =*/ view_src,
  2712. /*.view_offs =*/ view_offs,
  2713. /*.data =*/ obj_alloc_size > 0 ? (void *)(result + 1) : data,
  2714. /*.name =*/ { 0 },
  2715. /*.extra =*/ NULL,
  2716. /*.padding =*/ { 0 },
  2717. };
  2718. // TODO: this should not be needed as long as we don't rely on aligned SIMD loads
  2719. //ggml_assert_aligned(result->data);
  2720. for (int i = 0; i < n_dims; i++) {
  2721. result->ne[i] = ne[i];
  2722. }
  2723. result->nb[0] = ggml_type_size(type);
  2724. result->nb[1] = result->nb[0]*(result->ne[0]/ggml_blck_size(type));
  2725. for (int i = 2; i < GGML_MAX_DIMS; i++) {
  2726. result->nb[i] = result->nb[i - 1]*result->ne[i - 1];
  2727. }
  2728. ctx->n_objects++;
  2729. return result;
  2730. }
  2731. struct ggml_tensor * ggml_new_tensor(
  2732. struct ggml_context * ctx,
  2733. enum ggml_type type,
  2734. int n_dims,
  2735. const int64_t * ne) {
  2736. return ggml_new_tensor_impl(ctx, type, n_dims, ne, NULL, 0);
  2737. }
  2738. struct ggml_tensor * ggml_new_tensor_1d(
  2739. struct ggml_context * ctx,
  2740. enum ggml_type type,
  2741. int64_t ne0) {
  2742. return ggml_new_tensor(ctx, type, 1, &ne0);
  2743. }
  2744. struct ggml_tensor * ggml_new_tensor_2d(
  2745. struct ggml_context * ctx,
  2746. enum ggml_type type,
  2747. int64_t ne0,
  2748. int64_t ne1) {
  2749. const int64_t ne[2] = { ne0, ne1 };
  2750. return ggml_new_tensor(ctx, type, 2, ne);
  2751. }
  2752. struct ggml_tensor * ggml_new_tensor_3d(
  2753. struct ggml_context * ctx,
  2754. enum ggml_type type,
  2755. int64_t ne0,
  2756. int64_t ne1,
  2757. int64_t ne2) {
  2758. const int64_t ne[3] = { ne0, ne1, ne2 };
  2759. return ggml_new_tensor(ctx, type, 3, ne);
  2760. }
  2761. struct ggml_tensor * ggml_new_tensor_4d(
  2762. struct ggml_context * ctx,
  2763. enum ggml_type type,
  2764. int64_t ne0,
  2765. int64_t ne1,
  2766. int64_t ne2,
  2767. int64_t ne3) {
  2768. const int64_t ne[4] = { ne0, ne1, ne2, ne3 };
  2769. return ggml_new_tensor(ctx, type, 4, ne);
  2770. }
  2771. struct ggml_tensor * ggml_new_i32(struct ggml_context * ctx, int32_t value) {
  2772. ggml_scratch_save(ctx);
  2773. struct ggml_tensor * result = ggml_new_tensor_1d(ctx, GGML_TYPE_I32, 1);
  2774. ggml_scratch_load(ctx);
  2775. ggml_set_i32(result, value);
  2776. return result;
  2777. }
  2778. struct ggml_tensor * ggml_new_f32(struct ggml_context * ctx, float value) {
  2779. ggml_scratch_save(ctx);
  2780. struct ggml_tensor * result = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, 1);
  2781. ggml_scratch_load(ctx);
  2782. ggml_set_f32(result, value);
  2783. return result;
  2784. }
  2785. struct ggml_tensor * ggml_dup_tensor(struct ggml_context * ctx, const struct ggml_tensor * src) {
  2786. return ggml_new_tensor(ctx, src->type, GGML_MAX_DIMS, src->ne);
  2787. }
  2788. static void ggml_set_op_params(struct ggml_tensor * tensor, const void * params, size_t params_size) {
  2789. GGML_ASSERT(tensor != NULL); // silence -Warray-bounds warnings
  2790. assert(params_size <= GGML_MAX_OP_PARAMS);
  2791. memcpy(tensor->op_params, params, params_size);
  2792. }
  2793. static int32_t ggml_get_op_params_i32(const struct ggml_tensor * tensor, uint32_t i) {
  2794. assert(i < GGML_MAX_OP_PARAMS / sizeof(int32_t));
  2795. return ((const int32_t *)(tensor->op_params))[i];
  2796. }
  2797. static float ggml_get_op_params_f32(const struct ggml_tensor * tensor, uint32_t i) {
  2798. assert(i < GGML_MAX_OP_PARAMS / sizeof(float));
  2799. return ((const float *)(tensor->op_params))[i];
  2800. }
  2801. static void ggml_set_op_params_i32(struct ggml_tensor * tensor, uint32_t i, int32_t value) {
  2802. assert(i < GGML_MAX_OP_PARAMS / sizeof(int32_t));
  2803. ((int32_t *)(tensor->op_params))[i] = value;
  2804. }
  2805. static void ggml_set_op_params_f32(struct ggml_tensor * tensor, uint32_t i, float value) {
  2806. assert(i < GGML_MAX_OP_PARAMS / sizeof(float));
  2807. ((float *)(tensor->op_params))[i] = value;
  2808. }
  2809. struct ggml_tensor * ggml_set_zero(struct ggml_tensor * tensor) {
  2810. memset(tensor->data, 0, ggml_nbytes(tensor));
  2811. return tensor;
  2812. }
  2813. struct ggml_tensor * ggml_set_i32 (struct ggml_tensor * tensor, int32_t value) {
  2814. const int n = ggml_nrows(tensor);
  2815. const int nc = tensor->ne[0];
  2816. const size_t n1 = tensor->nb[1];
  2817. char * const data = tensor->data;
  2818. switch (tensor->type) {
  2819. case GGML_TYPE_I8:
  2820. {
  2821. assert(tensor->nb[0] == sizeof(int8_t));
  2822. for (int i = 0; i < n; i++) {
  2823. ggml_vec_set_i8(nc, (int8_t *)(data + i*n1), value);
  2824. }
  2825. } break;
  2826. case GGML_TYPE_I16:
  2827. {
  2828. assert(tensor->nb[0] == sizeof(int16_t));
  2829. for (int i = 0; i < n; i++) {
  2830. ggml_vec_set_i16(nc, (int16_t *)(data + i*n1), value);
  2831. }
  2832. } break;
  2833. case GGML_TYPE_I32:
  2834. {
  2835. assert(tensor->nb[0] == sizeof(int32_t));
  2836. for (int i = 0; i < n; i++) {
  2837. ggml_vec_set_i32(nc, (int32_t *)(data + i*n1), value);
  2838. }
  2839. } break;
  2840. case GGML_TYPE_F16:
  2841. {
  2842. assert(tensor->nb[0] == sizeof(ggml_fp16_t));
  2843. for (int i = 0; i < n; i++) {
  2844. ggml_vec_set_f16(nc, (ggml_fp16_t *)(data + i*n1), GGML_FP32_TO_FP16(value));
  2845. }
  2846. } break;
  2847. case GGML_TYPE_BF16:
  2848. {
  2849. assert(tensor->nb[0] == sizeof(ggml_fp16_t));
  2850. for (int i = 0; i < n; i++) {
  2851. ggml_vec_set_bf16(nc, (ggml_bf16_t *)(data + i*n1), GGML_FP32_TO_BF16(value));
  2852. }
  2853. } break;
  2854. case GGML_TYPE_F32:
  2855. {
  2856. assert(tensor->nb[0] == sizeof(float));
  2857. for (int i = 0; i < n; i++) {
  2858. ggml_vec_set_f32(nc, (float *)(data + i*n1), value);
  2859. }
  2860. } break;
  2861. default:
  2862. {
  2863. GGML_ASSERT(false);
  2864. } break;
  2865. }
  2866. return tensor;
  2867. }
  2868. struct ggml_tensor * ggml_set_f32(struct ggml_tensor * tensor, float value) {
  2869. const int n = ggml_nrows(tensor);
  2870. const int nc = tensor->ne[0];
  2871. const size_t n1 = tensor->nb[1];
  2872. char * const data = tensor->data;
  2873. switch (tensor->type) {
  2874. case GGML_TYPE_I8:
  2875. {
  2876. assert(tensor->nb[0] == sizeof(int8_t));
  2877. for (int i = 0; i < n; i++) {
  2878. ggml_vec_set_i8(nc, (int8_t *)(data + i*n1), value);
  2879. }
  2880. } break;
  2881. case GGML_TYPE_I16:
  2882. {
  2883. assert(tensor->nb[0] == sizeof(int16_t));
  2884. for (int i = 0; i < n; i++) {
  2885. ggml_vec_set_i16(nc, (int16_t *)(data + i*n1), value);
  2886. }
  2887. } break;
  2888. case GGML_TYPE_I32:
  2889. {
  2890. assert(tensor->nb[0] == sizeof(int32_t));
  2891. for (int i = 0; i < n; i++) {
  2892. ggml_vec_set_i32(nc, (int32_t *)(data + i*n1), value);
  2893. }
  2894. } break;
  2895. case GGML_TYPE_F16:
  2896. {
  2897. assert(tensor->nb[0] == sizeof(ggml_fp16_t));
  2898. for (int i = 0; i < n; i++) {
  2899. ggml_vec_set_f16(nc, (ggml_fp16_t *)(data + i*n1), GGML_FP32_TO_FP16(value));
  2900. }
  2901. } break;
  2902. case GGML_TYPE_BF16:
  2903. {
  2904. assert(tensor->nb[0] == sizeof(ggml_bf16_t));
  2905. for (int i = 0; i < n; i++) {
  2906. ggml_vec_set_bf16(nc, (ggml_bf16_t *)(data + i*n1), GGML_FP32_TO_BF16(value));
  2907. }
  2908. } break;
  2909. case GGML_TYPE_F32:
  2910. {
  2911. assert(tensor->nb[0] == sizeof(float));
  2912. for (int i = 0; i < n; i++) {
  2913. ggml_vec_set_f32(nc, (float *)(data + i*n1), value);
  2914. }
  2915. } break;
  2916. default:
  2917. {
  2918. GGML_ASSERT(false);
  2919. } break;
  2920. }
  2921. return tensor;
  2922. }
  2923. void ggml_unravel_index(const struct ggml_tensor * tensor, int64_t i, int64_t * i0, int64_t * i1, int64_t * i2, int64_t * i3) {
  2924. const int64_t ne2 = tensor->ne[2];
  2925. const int64_t ne1 = tensor->ne[1];
  2926. const int64_t ne0 = tensor->ne[0];
  2927. const int64_t i3_ = (i/(ne2*ne1*ne0));
  2928. const int64_t i2_ = (i - i3_*ne2*ne1*ne0)/(ne1*ne0);
  2929. const int64_t i1_ = (i - i3_*ne2*ne1*ne0 - i2_*ne1*ne0)/ne0;
  2930. const int64_t i0_ = (i - i3_*ne2*ne1*ne0 - i2_*ne1*ne0 - i1_*ne0);
  2931. if (i0) {
  2932. * i0 = i0_;
  2933. }
  2934. if (i1) {
  2935. * i1 = i1_;
  2936. }
  2937. if (i2) {
  2938. * i2 = i2_;
  2939. }
  2940. if (i3) {
  2941. * i3 = i3_;
  2942. }
  2943. }
  2944. int32_t ggml_get_i32_1d(const struct ggml_tensor * tensor, int i) {
  2945. if (!ggml_is_contiguous(tensor)) {
  2946. int64_t id[4] = { 0, 0, 0, 0 };
  2947. ggml_unravel_index(tensor, i, &id[0], &id[1], &id[2], &id[3]);
  2948. return ggml_get_i32_nd(tensor, id[0], id[1], id[2], id[3]);
  2949. }
  2950. switch (tensor->type) {
  2951. case GGML_TYPE_I8:
  2952. {
  2953. GGML_ASSERT(tensor->nb[0] == sizeof(int8_t));
  2954. return ((int8_t *)(tensor->data))[i];
  2955. }
  2956. case GGML_TYPE_I16:
  2957. {
  2958. GGML_ASSERT(tensor->nb[0] == sizeof(int16_t));
  2959. return ((int16_t *)(tensor->data))[i];
  2960. }
  2961. case GGML_TYPE_I32:
  2962. {
  2963. GGML_ASSERT(tensor->nb[0] == sizeof(int32_t));
  2964. return ((int32_t *)(tensor->data))[i];
  2965. }
  2966. case GGML_TYPE_F16:
  2967. {
  2968. GGML_ASSERT(tensor->nb[0] == sizeof(ggml_fp16_t));
  2969. return GGML_FP16_TO_FP32(((ggml_fp16_t *)(tensor->data))[i]);
  2970. }
  2971. case GGML_TYPE_BF16:
  2972. {
  2973. GGML_ASSERT(tensor->nb[0] == sizeof(ggml_bf16_t));
  2974. return GGML_BF16_TO_FP32(((ggml_bf16_t *)(tensor->data))[i]);
  2975. }
  2976. case GGML_TYPE_F32:
  2977. {
  2978. GGML_ASSERT(tensor->nb[0] == sizeof(float));
  2979. return ((float *)(tensor->data))[i];
  2980. }
  2981. default:
  2982. {
  2983. GGML_ASSERT(false);
  2984. }
  2985. }
  2986. return 0.0f;
  2987. }
  2988. void ggml_set_i32_1d(const struct ggml_tensor * tensor, int i, int32_t value) {
  2989. if (!ggml_is_contiguous(tensor)) {
  2990. int64_t id[4] = { 0, 0, 0, 0 };
  2991. ggml_unravel_index(tensor, i, &id[0], &id[1], &id[2], &id[3]);
  2992. ggml_set_i32_nd(tensor, id[0], id[1], id[2], id[3], value);
  2993. return;
  2994. }
  2995. switch (tensor->type) {
  2996. case GGML_TYPE_I8:
  2997. {
  2998. GGML_ASSERT(tensor->nb[0] == sizeof(int8_t));
  2999. ((int8_t *)(tensor->data))[i] = value;
  3000. } break;
  3001. case GGML_TYPE_I16:
  3002. {
  3003. GGML_ASSERT(tensor->nb[0] == sizeof(int16_t));
  3004. ((int16_t *)(tensor->data))[i] = value;
  3005. } break;
  3006. case GGML_TYPE_I32:
  3007. {
  3008. GGML_ASSERT(tensor->nb[0] == sizeof(int32_t));
  3009. ((int32_t *)(tensor->data))[i] = value;
  3010. } break;
  3011. case GGML_TYPE_F16:
  3012. {
  3013. GGML_ASSERT(tensor->nb[0] == sizeof(ggml_fp16_t));
  3014. ((ggml_fp16_t *)(tensor->data))[i] = GGML_FP32_TO_FP16(value);
  3015. } break;
  3016. case GGML_TYPE_BF16:
  3017. {
  3018. GGML_ASSERT(tensor->nb[0] == sizeof(ggml_bf16_t));
  3019. ((ggml_bf16_t *)(tensor->data))[i] = GGML_FP32_TO_BF16(value);
  3020. } break;
  3021. case GGML_TYPE_F32:
  3022. {
  3023. GGML_ASSERT(tensor->nb[0] == sizeof(float));
  3024. ((float *)(tensor->data))[i] = value;
  3025. } break;
  3026. default:
  3027. {
  3028. GGML_ASSERT(false);
  3029. } break;
  3030. }
  3031. }
  3032. int32_t ggml_get_i32_nd(const struct ggml_tensor * tensor, int i0, int i1, int i2, int i3) {
  3033. void * data = (char *) tensor->data + i0*tensor->nb[0] + i1*tensor->nb[1] + i2*tensor->nb[2] + i3*tensor->nb[3];
  3034. switch (tensor->type) {
  3035. case GGML_TYPE_I8:
  3036. return ((int8_t *) data)[0];
  3037. case GGML_TYPE_I16:
  3038. return ((int16_t *) data)[0];
  3039. case GGML_TYPE_I32:
  3040. return ((int32_t *) data)[0];
  3041. case GGML_TYPE_F16:
  3042. return GGML_FP16_TO_FP32(((ggml_fp16_t *) data)[0]);
  3043. case GGML_TYPE_BF16:
  3044. return GGML_BF16_TO_FP32(((ggml_bf16_t *) data)[0]);
  3045. case GGML_TYPE_F32:
  3046. return ((float *) data)[0];
  3047. default:
  3048. GGML_ASSERT(false);
  3049. }
  3050. return 0.0f;
  3051. }
  3052. void ggml_set_i32_nd(const struct ggml_tensor * tensor, int i0, int i1, int i2, int i3, int32_t value) {
  3053. void * data = (char *) tensor->data + i0*tensor->nb[0] + i1*tensor->nb[1] + i2*tensor->nb[2] + i3*tensor->nb[3];
  3054. switch (tensor->type) {
  3055. case GGML_TYPE_I8:
  3056. {
  3057. ((int8_t *)(data))[0] = value;
  3058. } break;
  3059. case GGML_TYPE_I16:
  3060. {
  3061. ((int16_t *)(data))[0] = value;
  3062. } break;
  3063. case GGML_TYPE_I32:
  3064. {
  3065. ((int32_t *)(data))[0] = value;
  3066. } break;
  3067. case GGML_TYPE_F16:
  3068. {
  3069. ((ggml_fp16_t *)(data))[0] = GGML_FP32_TO_FP16(value);
  3070. } break;
  3071. case GGML_TYPE_BF16:
  3072. {
  3073. ((ggml_bf16_t *)(data))[0] = GGML_FP32_TO_BF16(value);
  3074. } break;
  3075. case GGML_TYPE_F32:
  3076. {
  3077. ((float *)(data))[0] = value;
  3078. } break;
  3079. default:
  3080. {
  3081. GGML_ASSERT(false);
  3082. } break;
  3083. }
  3084. }
  3085. float ggml_get_f32_1d(const struct ggml_tensor * tensor, int i) {
  3086. if (!ggml_is_contiguous(tensor)) {
  3087. int64_t id[4] = { 0, 0, 0, 0 };
  3088. ggml_unravel_index(tensor, i, &id[0], &id[1], &id[2], &id[3]);
  3089. return ggml_get_f32_nd(tensor, id[0], id[1], id[2], id[3]);
  3090. }
  3091. switch (tensor->type) {
  3092. case GGML_TYPE_I8:
  3093. {
  3094. GGML_ASSERT(tensor->nb[0] == sizeof(int8_t));
  3095. return ((int8_t *)(tensor->data))[i];
  3096. }
  3097. case GGML_TYPE_I16:
  3098. {
  3099. GGML_ASSERT(tensor->nb[0] == sizeof(int16_t));
  3100. return ((int16_t *)(tensor->data))[i];
  3101. }
  3102. case GGML_TYPE_I32:
  3103. {
  3104. GGML_ASSERT(tensor->nb[0] == sizeof(int32_t));
  3105. return ((int32_t *)(tensor->data))[i];
  3106. }
  3107. case GGML_TYPE_F16:
  3108. {
  3109. GGML_ASSERT(tensor->nb[0] == sizeof(ggml_fp16_t));
  3110. return GGML_FP16_TO_FP32(((ggml_fp16_t *)(tensor->data))[i]);
  3111. }
  3112. case GGML_TYPE_BF16:
  3113. {
  3114. GGML_ASSERT(tensor->nb[0] == sizeof(ggml_bf16_t));
  3115. return GGML_BF16_TO_FP32(((ggml_bf16_t *)(tensor->data))[i]);
  3116. }
  3117. case GGML_TYPE_F32:
  3118. {
  3119. GGML_ASSERT(tensor->nb[0] == sizeof(float));
  3120. return ((float *)(tensor->data))[i];
  3121. }
  3122. default:
  3123. {
  3124. GGML_ASSERT(false);
  3125. }
  3126. }
  3127. return 0.0f;
  3128. }
  3129. void ggml_set_f32_1d(const struct ggml_tensor * tensor, int i, float value) {
  3130. if (!ggml_is_contiguous(tensor)) {
  3131. int64_t id[4] = { 0, 0, 0, 0 };
  3132. ggml_unravel_index(tensor, i, &id[0], &id[1], &id[2], &id[3]);
  3133. ggml_set_f32_nd(tensor, id[0], id[1], id[2], id[3], value);
  3134. return;
  3135. }
  3136. switch (tensor->type) {
  3137. case GGML_TYPE_I8:
  3138. {
  3139. GGML_ASSERT(tensor->nb[0] == sizeof(int8_t));
  3140. ((int8_t *)(tensor->data))[i] = value;
  3141. } break;
  3142. case GGML_TYPE_I16:
  3143. {
  3144. GGML_ASSERT(tensor->nb[0] == sizeof(int16_t));
  3145. ((int16_t *)(tensor->data))[i] = value;
  3146. } break;
  3147. case GGML_TYPE_I32:
  3148. {
  3149. GGML_ASSERT(tensor->nb[0] == sizeof(int32_t));
  3150. ((int32_t *)(tensor->data))[i] = value;
  3151. } break;
  3152. case GGML_TYPE_F16:
  3153. {
  3154. GGML_ASSERT(tensor->nb[0] == sizeof(ggml_fp16_t));
  3155. ((ggml_fp16_t *)(tensor->data))[i] = GGML_FP32_TO_FP16(value);
  3156. } break;
  3157. case GGML_TYPE_BF16:
  3158. {
  3159. GGML_ASSERT(tensor->nb[0] == sizeof(ggml_bf16_t));
  3160. ((ggml_bf16_t *)(tensor->data))[i] = GGML_FP32_TO_BF16(value);
  3161. } break;
  3162. case GGML_TYPE_F32:
  3163. {
  3164. GGML_ASSERT(tensor->nb[0] == sizeof(float));
  3165. ((float *)(tensor->data))[i] = value;
  3166. } break;
  3167. default:
  3168. {
  3169. GGML_ASSERT(false);
  3170. } break;
  3171. }
  3172. }
  3173. float ggml_get_f32_nd(const struct ggml_tensor * tensor, int i0, int i1, int i2, int i3) {
  3174. void * data = (char *) tensor->data + i0*tensor->nb[0] + i1*tensor->nb[1] + i2*tensor->nb[2] + i3*tensor->nb[3];
  3175. switch (tensor->type) {
  3176. case GGML_TYPE_I8:
  3177. return ((int8_t *) data)[0];
  3178. case GGML_TYPE_I16:
  3179. return ((int16_t *) data)[0];
  3180. case GGML_TYPE_I32:
  3181. return ((int32_t *) data)[0];
  3182. case GGML_TYPE_F16:
  3183. return GGML_FP16_TO_FP32(((ggml_fp16_t *) data)[0]);
  3184. case GGML_TYPE_BF16:
  3185. return GGML_BF16_TO_FP32(((ggml_bf16_t *) data)[0]);
  3186. case GGML_TYPE_F32:
  3187. return ((float *) data)[0];
  3188. default:
  3189. GGML_ASSERT(false);
  3190. }
  3191. return 0.0f;
  3192. }
  3193. void ggml_set_f32_nd(const struct ggml_tensor * tensor, int i0, int i1, int i2, int i3, float value) {
  3194. void * data = (char *) tensor->data + i0*tensor->nb[0] + i1*tensor->nb[1] + i2*tensor->nb[2] + i3*tensor->nb[3];
  3195. switch (tensor->type) {
  3196. case GGML_TYPE_I8:
  3197. {
  3198. ((int8_t *)(data))[0] = value;
  3199. } break;
  3200. case GGML_TYPE_I16:
  3201. {
  3202. ((int16_t *)(data))[0] = value;
  3203. } break;
  3204. case GGML_TYPE_I32:
  3205. {
  3206. ((int32_t *)(data))[0] = value;
  3207. } break;
  3208. case GGML_TYPE_F16:
  3209. {
  3210. ((ggml_fp16_t *)(data))[0] = GGML_FP32_TO_FP16(value);
  3211. } break;
  3212. case GGML_TYPE_BF16:
  3213. {
  3214. ((ggml_bf16_t *)(data))[0] = GGML_FP32_TO_BF16(value);
  3215. } break;
  3216. case GGML_TYPE_F32:
  3217. {
  3218. ((float *)(data))[0] = value;
  3219. } break;
  3220. default:
  3221. {
  3222. GGML_ASSERT(false);
  3223. } break;
  3224. }
  3225. }
  3226. void * ggml_get_data(const struct ggml_tensor * tensor) {
  3227. return tensor->data;
  3228. }
  3229. float * ggml_get_data_f32(const struct ggml_tensor * tensor) {
  3230. assert(tensor->type == GGML_TYPE_F32);
  3231. return (float *)(tensor->data);
  3232. }
  3233. GGML_CALL enum ggml_unary_op ggml_get_unary_op(const struct ggml_tensor * tensor) {
  3234. GGML_ASSERT(tensor->op == GGML_OP_UNARY);
  3235. return (enum ggml_unary_op) ggml_get_op_params_i32(tensor, 0);
  3236. }
  3237. const char * ggml_get_name(const struct ggml_tensor * tensor) {
  3238. return tensor->name;
  3239. }
  3240. struct ggml_tensor * ggml_set_name(struct ggml_tensor * tensor, const char * name) {
  3241. strncpy(tensor->name, name, sizeof(tensor->name) - 1);
  3242. tensor->name[sizeof(tensor->name) - 1] = '\0';
  3243. return tensor;
  3244. }
  3245. struct ggml_tensor * ggml_format_name(struct ggml_tensor * tensor, const char * fmt, ...) {
  3246. va_list args;
  3247. va_start(args, fmt);
  3248. vsnprintf(tensor->name, sizeof(tensor->name), fmt, args);
  3249. va_end(args);
  3250. return tensor;
  3251. }
  3252. struct ggml_tensor * ggml_view_tensor(
  3253. struct ggml_context * ctx,
  3254. struct ggml_tensor * src) {
  3255. struct ggml_tensor * result = ggml_new_tensor_impl(ctx, src->type, GGML_MAX_DIMS, src->ne, src, 0);
  3256. ggml_format_name(result, "%s (view)", src->name);
  3257. for (int i = 0; i < GGML_MAX_DIMS; i++) {
  3258. result->nb[i] = src->nb[i];
  3259. }
  3260. return result;
  3261. }
  3262. struct ggml_tensor * ggml_get_first_tensor(const struct ggml_context * ctx) {
  3263. struct ggml_object * obj = ctx->objects_begin;
  3264. char * const mem_buffer = ctx->mem_buffer;
  3265. while (obj != NULL) {
  3266. if (obj->type == GGML_OBJECT_TYPE_TENSOR) {
  3267. return (struct ggml_tensor *)(mem_buffer + obj->offs);
  3268. }
  3269. obj = obj->next;
  3270. }
  3271. return NULL;
  3272. }
  3273. struct ggml_tensor * ggml_get_next_tensor(const struct ggml_context * ctx, struct ggml_tensor * tensor) {
  3274. struct ggml_object * obj = (struct ggml_object *) ((char *)tensor - GGML_OBJECT_SIZE);
  3275. obj = obj->next;
  3276. char * const mem_buffer = ctx->mem_buffer;
  3277. while (obj != NULL) {
  3278. if (obj->type == GGML_OBJECT_TYPE_TENSOR) {
  3279. return (struct ggml_tensor *)(mem_buffer + obj->offs);
  3280. }
  3281. obj = obj->next;
  3282. }
  3283. return NULL;
  3284. }
  3285. struct ggml_tensor * ggml_get_tensor(struct ggml_context * ctx, const char * name) {
  3286. struct ggml_object * obj = ctx->objects_begin;
  3287. char * const mem_buffer = ctx->mem_buffer;
  3288. while (obj != NULL) {
  3289. if (obj->type == GGML_OBJECT_TYPE_TENSOR) {
  3290. struct ggml_tensor * cur = (struct ggml_tensor *)(mem_buffer + obj->offs);
  3291. if (strcmp(cur->name, name) == 0) {
  3292. return cur;
  3293. }
  3294. }
  3295. obj = obj->next;
  3296. }
  3297. return NULL;
  3298. }
  3299. ////////////////////////////////////////////////////////////////////////////////
  3300. // ggml_dup
  3301. static struct ggml_tensor * ggml_dup_impl(
  3302. struct ggml_context * ctx,
  3303. struct ggml_tensor * a,
  3304. bool inplace) {
  3305. bool is_node = false;
  3306. if (!inplace && (a->grad)) {
  3307. is_node = true;
  3308. }
  3309. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  3310. result->op = GGML_OP_DUP;
  3311. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3312. result->src[0] = a;
  3313. return result;
  3314. }
  3315. struct ggml_tensor * ggml_dup(
  3316. struct ggml_context * ctx,
  3317. struct ggml_tensor * a) {
  3318. return ggml_dup_impl(ctx, a, false);
  3319. }
  3320. struct ggml_tensor * ggml_dup_inplace(
  3321. struct ggml_context * ctx,
  3322. struct ggml_tensor * a) {
  3323. return ggml_dup_impl(ctx, a, true);
  3324. }
  3325. // ggml_add
  3326. static struct ggml_tensor * ggml_add_impl(
  3327. struct ggml_context * ctx,
  3328. struct ggml_tensor * a,
  3329. struct ggml_tensor * b,
  3330. bool inplace) {
  3331. GGML_ASSERT(ggml_can_repeat(b, a));
  3332. bool is_node = false;
  3333. if (!inplace && (a->grad || b->grad)) {
  3334. // TODO: support backward pass for broadcasting
  3335. GGML_ASSERT(ggml_are_same_shape(a, b));
  3336. is_node = true;
  3337. }
  3338. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  3339. result->op = GGML_OP_ADD;
  3340. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3341. result->src[0] = a;
  3342. result->src[1] = b;
  3343. return result;
  3344. }
  3345. struct ggml_tensor * ggml_add(
  3346. struct ggml_context * ctx,
  3347. struct ggml_tensor * a,
  3348. struct ggml_tensor * b) {
  3349. return ggml_add_impl(ctx, a, b, false);
  3350. }
  3351. struct ggml_tensor * ggml_add_inplace(
  3352. struct ggml_context * ctx,
  3353. struct ggml_tensor * a,
  3354. struct ggml_tensor * b) {
  3355. return ggml_add_impl(ctx, a, b, true);
  3356. }
  3357. // ggml_add_cast
  3358. static struct ggml_tensor * ggml_add_cast_impl(
  3359. struct ggml_context * ctx,
  3360. struct ggml_tensor * a,
  3361. struct ggml_tensor * b,
  3362. enum ggml_type type) {
  3363. // TODO: support less-strict constraint
  3364. // GGML_ASSERT(ggml_can_repeat(b, a));
  3365. GGML_ASSERT(ggml_can_repeat_rows(b, a));
  3366. // currently only supported for quantized input and f16
  3367. GGML_ASSERT(ggml_is_quantized(a->type) ||
  3368. a->type == GGML_TYPE_F16 ||
  3369. a->type == GGML_TYPE_BF16);
  3370. bool is_node = false;
  3371. if (a->grad || b->grad) {
  3372. // TODO: support backward pass for broadcasting
  3373. GGML_ASSERT(ggml_are_same_shape(a, b));
  3374. is_node = true;
  3375. }
  3376. struct ggml_tensor * result = ggml_new_tensor(ctx, type, GGML_MAX_DIMS, a->ne);
  3377. result->op = GGML_OP_ADD;
  3378. result->grad = is_node ? ggml_new_tensor(ctx, GGML_TYPE_F32, GGML_MAX_DIMS, a->ne) : NULL;
  3379. result->src[0] = a;
  3380. result->src[1] = b;
  3381. return result;
  3382. }
  3383. struct ggml_tensor * ggml_add_cast(
  3384. struct ggml_context * ctx,
  3385. struct ggml_tensor * a,
  3386. struct ggml_tensor * b,
  3387. enum ggml_type type) {
  3388. return ggml_add_cast_impl(ctx, a, b, type);
  3389. }
  3390. // ggml_add1
  3391. static struct ggml_tensor * ggml_add1_impl(
  3392. struct ggml_context * ctx,
  3393. struct ggml_tensor * a,
  3394. struct ggml_tensor * b,
  3395. bool inplace) {
  3396. GGML_ASSERT(ggml_is_scalar(b));
  3397. GGML_ASSERT(ggml_is_padded_1d(a));
  3398. bool is_node = false;
  3399. if (a->grad || b->grad) {
  3400. is_node = true;
  3401. }
  3402. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  3403. result->op = GGML_OP_ADD1;
  3404. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3405. result->src[0] = a;
  3406. result->src[1] = b;
  3407. return result;
  3408. }
  3409. struct ggml_tensor * ggml_add1(
  3410. struct ggml_context * ctx,
  3411. struct ggml_tensor * a,
  3412. struct ggml_tensor * b) {
  3413. return ggml_add1_impl(ctx, a, b, false);
  3414. }
  3415. struct ggml_tensor * ggml_add1_inplace(
  3416. struct ggml_context * ctx,
  3417. struct ggml_tensor * a,
  3418. struct ggml_tensor * b) {
  3419. return ggml_add1_impl(ctx, a, b, true);
  3420. }
  3421. // ggml_acc
  3422. static struct ggml_tensor * ggml_acc_impl(
  3423. struct ggml_context * ctx,
  3424. struct ggml_tensor * a,
  3425. struct ggml_tensor * b,
  3426. size_t nb1,
  3427. size_t nb2,
  3428. size_t nb3,
  3429. size_t offset,
  3430. bool inplace) {
  3431. GGML_ASSERT(ggml_nelements(b) <= ggml_nelements(a));
  3432. GGML_ASSERT(ggml_is_contiguous(a));
  3433. GGML_ASSERT(a->type == GGML_TYPE_F32);
  3434. GGML_ASSERT(b->type == GGML_TYPE_F32);
  3435. bool is_node = false;
  3436. if (!inplace && (a->grad || b->grad)) {
  3437. is_node = true;
  3438. }
  3439. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  3440. int32_t params[] = { nb1, nb2, nb3, offset, inplace ? 1 : 0 };
  3441. ggml_set_op_params(result, params, sizeof(params));
  3442. result->op = GGML_OP_ACC;
  3443. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3444. result->src[0] = a;
  3445. result->src[1] = b;
  3446. return result;
  3447. }
  3448. struct ggml_tensor * ggml_acc(
  3449. struct ggml_context * ctx,
  3450. struct ggml_tensor * a,
  3451. struct ggml_tensor * b,
  3452. size_t nb1,
  3453. size_t nb2,
  3454. size_t nb3,
  3455. size_t offset) {
  3456. return ggml_acc_impl(ctx, a, b, nb1, nb2, nb3, offset, false);
  3457. }
  3458. struct ggml_tensor * ggml_acc_inplace(
  3459. struct ggml_context * ctx,
  3460. struct ggml_tensor * a,
  3461. struct ggml_tensor * b,
  3462. size_t nb1,
  3463. size_t nb2,
  3464. size_t nb3,
  3465. size_t offset) {
  3466. return ggml_acc_impl(ctx, a, b, nb1, nb2, nb3, offset, true);
  3467. }
  3468. // ggml_sub
  3469. static struct ggml_tensor * ggml_sub_impl(
  3470. struct ggml_context * ctx,
  3471. struct ggml_tensor * a,
  3472. struct ggml_tensor * b,
  3473. bool inplace) {
  3474. GGML_ASSERT(ggml_are_same_shape(a, b));
  3475. bool is_node = false;
  3476. if (!inplace && (a->grad || b->grad)) {
  3477. is_node = true;
  3478. }
  3479. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  3480. result->op = GGML_OP_SUB;
  3481. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3482. result->src[0] = a;
  3483. result->src[1] = b;
  3484. return result;
  3485. }
  3486. struct ggml_tensor * ggml_sub(
  3487. struct ggml_context * ctx,
  3488. struct ggml_tensor * a,
  3489. struct ggml_tensor * b) {
  3490. return ggml_sub_impl(ctx, a, b, false);
  3491. }
  3492. struct ggml_tensor * ggml_sub_inplace(
  3493. struct ggml_context * ctx,
  3494. struct ggml_tensor * a,
  3495. struct ggml_tensor * b) {
  3496. return ggml_sub_impl(ctx, a, b, true);
  3497. }
  3498. // ggml_mul
  3499. static struct ggml_tensor * ggml_mul_impl(
  3500. struct ggml_context * ctx,
  3501. struct ggml_tensor * a,
  3502. struct ggml_tensor * b,
  3503. bool inplace) {
  3504. GGML_ASSERT(ggml_can_repeat(b, a));
  3505. bool is_node = false;
  3506. if (!inplace && (a->grad || b->grad)) {
  3507. // TODO: support backward pass for broadcasting
  3508. GGML_ASSERT(ggml_are_same_shape(a, b));
  3509. is_node = true;
  3510. }
  3511. if (inplace) {
  3512. GGML_ASSERT(!is_node);
  3513. }
  3514. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  3515. result->op = GGML_OP_MUL;
  3516. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3517. result->src[0] = a;
  3518. result->src[1] = b;
  3519. return result;
  3520. }
  3521. struct ggml_tensor * ggml_mul(
  3522. struct ggml_context * ctx,
  3523. struct ggml_tensor * a,
  3524. struct ggml_tensor * b) {
  3525. return ggml_mul_impl(ctx, a, b, false);
  3526. }
  3527. struct ggml_tensor * ggml_mul_inplace(
  3528. struct ggml_context * ctx,
  3529. struct ggml_tensor * a,
  3530. struct ggml_tensor * b) {
  3531. return ggml_mul_impl(ctx, a, b, true);
  3532. }
  3533. // ggml_div
  3534. static struct ggml_tensor * ggml_div_impl(
  3535. struct ggml_context * ctx,
  3536. struct ggml_tensor * a,
  3537. struct ggml_tensor * b,
  3538. bool inplace) {
  3539. GGML_ASSERT(ggml_can_repeat(b, a));
  3540. bool is_node = false;
  3541. if (!inplace && (a->grad || b->grad)) {
  3542. is_node = true;
  3543. }
  3544. if (inplace) {
  3545. GGML_ASSERT(!is_node);
  3546. }
  3547. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  3548. result->op = GGML_OP_DIV;
  3549. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3550. result->src[0] = a;
  3551. result->src[1] = b;
  3552. return result;
  3553. }
  3554. struct ggml_tensor * ggml_div(
  3555. struct ggml_context * ctx,
  3556. struct ggml_tensor * a,
  3557. struct ggml_tensor * b) {
  3558. return ggml_div_impl(ctx, a, b, false);
  3559. }
  3560. struct ggml_tensor * ggml_div_inplace(
  3561. struct ggml_context * ctx,
  3562. struct ggml_tensor * a,
  3563. struct ggml_tensor * b) {
  3564. return ggml_div_impl(ctx, a, b, true);
  3565. }
  3566. // ggml_sqr
  3567. static struct ggml_tensor * ggml_sqr_impl(
  3568. struct ggml_context * ctx,
  3569. struct ggml_tensor * a,
  3570. bool inplace) {
  3571. bool is_node = false;
  3572. if (!inplace && (a->grad)) {
  3573. is_node = true;
  3574. }
  3575. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  3576. result->op = GGML_OP_SQR;
  3577. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3578. result->src[0] = a;
  3579. return result;
  3580. }
  3581. struct ggml_tensor * ggml_sqr(
  3582. struct ggml_context * ctx,
  3583. struct ggml_tensor * a) {
  3584. return ggml_sqr_impl(ctx, a, false);
  3585. }
  3586. struct ggml_tensor * ggml_sqr_inplace(
  3587. struct ggml_context * ctx,
  3588. struct ggml_tensor * a) {
  3589. return ggml_sqr_impl(ctx, a, true);
  3590. }
  3591. // ggml_sqrt
  3592. static struct ggml_tensor * ggml_sqrt_impl(
  3593. struct ggml_context * ctx,
  3594. struct ggml_tensor * a,
  3595. bool inplace) {
  3596. bool is_node = false;
  3597. if (!inplace && (a->grad)) {
  3598. is_node = true;
  3599. }
  3600. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  3601. result->op = GGML_OP_SQRT;
  3602. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3603. result->src[0] = a;
  3604. return result;
  3605. }
  3606. struct ggml_tensor * ggml_sqrt(
  3607. struct ggml_context * ctx,
  3608. struct ggml_tensor * a) {
  3609. return ggml_sqrt_impl(ctx, a, false);
  3610. }
  3611. struct ggml_tensor * ggml_sqrt_inplace(
  3612. struct ggml_context * ctx,
  3613. struct ggml_tensor * a) {
  3614. return ggml_sqrt_impl(ctx, a, true);
  3615. }
  3616. // ggml_log
  3617. static struct ggml_tensor * ggml_log_impl(
  3618. struct ggml_context * ctx,
  3619. struct ggml_tensor * a,
  3620. bool inplace) {
  3621. bool is_node = false;
  3622. if (!inplace && (a->grad)) {
  3623. is_node = true;
  3624. }
  3625. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  3626. result->op = GGML_OP_LOG;
  3627. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3628. result->src[0] = a;
  3629. return result;
  3630. }
  3631. struct ggml_tensor * ggml_log(
  3632. struct ggml_context * ctx,
  3633. struct ggml_tensor * a) {
  3634. return ggml_log_impl(ctx, a, false);
  3635. }
  3636. struct ggml_tensor * ggml_log_inplace(
  3637. struct ggml_context * ctx,
  3638. struct ggml_tensor * a) {
  3639. return ggml_log_impl(ctx, a, true);
  3640. }
  3641. // ggml_sum
  3642. struct ggml_tensor * ggml_sum(
  3643. struct ggml_context * ctx,
  3644. struct ggml_tensor * a) {
  3645. bool is_node = false;
  3646. if (a->grad) {
  3647. is_node = true;
  3648. }
  3649. struct ggml_tensor * result = ggml_new_tensor_1d(ctx, a->type, 1);
  3650. result->op = GGML_OP_SUM;
  3651. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3652. result->src[0] = a;
  3653. return result;
  3654. }
  3655. // ggml_sum_rows
  3656. struct ggml_tensor * ggml_sum_rows(
  3657. struct ggml_context * ctx,
  3658. struct ggml_tensor * a) {
  3659. bool is_node = false;
  3660. if (a->grad) {
  3661. is_node = true;
  3662. }
  3663. int64_t ne[GGML_MAX_DIMS] = { 1 };
  3664. for (int i = 1; i < GGML_MAX_DIMS; ++i) {
  3665. ne[i] = a->ne[i];
  3666. }
  3667. struct ggml_tensor * result = ggml_new_tensor(ctx, a->type, GGML_MAX_DIMS, ne);
  3668. result->op = GGML_OP_SUM_ROWS;
  3669. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3670. result->src[0] = a;
  3671. return result;
  3672. }
  3673. // ggml_mean
  3674. struct ggml_tensor * ggml_mean(
  3675. struct ggml_context * ctx,
  3676. struct ggml_tensor * a) {
  3677. bool is_node = false;
  3678. if (a->grad) {
  3679. GGML_ASSERT(false); // TODO: implement
  3680. is_node = true;
  3681. }
  3682. int64_t ne[4] = { 1, a->ne[1], a->ne[2], a->ne[3] };
  3683. struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, 4, ne);
  3684. result->op = GGML_OP_MEAN;
  3685. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3686. result->src[0] = a;
  3687. return result;
  3688. }
  3689. // ggml_argmax
  3690. struct ggml_tensor * ggml_argmax(
  3691. struct ggml_context * ctx,
  3692. struct ggml_tensor * a) {
  3693. GGML_ASSERT(ggml_is_matrix(a));
  3694. bool is_node = false;
  3695. if (a->grad) {
  3696. GGML_ASSERT(false);
  3697. is_node = true;
  3698. }
  3699. struct ggml_tensor * result = ggml_new_tensor_1d(ctx, GGML_TYPE_I32, a->ne[1]);
  3700. result->op = GGML_OP_ARGMAX;
  3701. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3702. result->src[0] = a;
  3703. return result;
  3704. }
  3705. // ggml_repeat
  3706. struct ggml_tensor * ggml_repeat(
  3707. struct ggml_context * ctx,
  3708. struct ggml_tensor * a,
  3709. struct ggml_tensor * b) {
  3710. GGML_ASSERT(ggml_can_repeat(a, b));
  3711. bool is_node = false;
  3712. if (a->grad) {
  3713. is_node = true;
  3714. }
  3715. struct ggml_tensor * result = ggml_new_tensor(ctx, a->type, GGML_MAX_DIMS, b->ne);
  3716. result->op = GGML_OP_REPEAT;
  3717. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3718. result->src[0] = a;
  3719. return result;
  3720. }
  3721. // ggml_repeat_back
  3722. struct ggml_tensor * ggml_repeat_back(
  3723. struct ggml_context * ctx,
  3724. struct ggml_tensor * a,
  3725. struct ggml_tensor * b) {
  3726. GGML_ASSERT(ggml_can_repeat(b, a));
  3727. bool is_node = false;
  3728. if (a->grad) {
  3729. is_node = true;
  3730. }
  3731. if (ggml_are_same_shape(a, b) && !is_node) {
  3732. return a;
  3733. }
  3734. struct ggml_tensor * result = ggml_new_tensor(ctx, a->type, GGML_MAX_DIMS, b->ne);
  3735. result->op = GGML_OP_REPEAT_BACK;
  3736. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3737. result->src[0] = a;
  3738. return result;
  3739. }
  3740. // ggml_concat
  3741. struct ggml_tensor * ggml_concat(
  3742. struct ggml_context* ctx,
  3743. struct ggml_tensor* a,
  3744. struct ggml_tensor* b) {
  3745. GGML_ASSERT(a->ne[0] == b->ne[0] && a->ne[1] == b->ne[1] && a->ne[3] == b->ne[3]);
  3746. bool is_node = false;
  3747. if (a->grad || b->grad) {
  3748. is_node = true;
  3749. }
  3750. struct ggml_tensor * result = ggml_new_tensor_4d(ctx, a->type, a->ne[0], a->ne[1], a->ne[2] + b->ne[2], a->ne[3]);
  3751. result->op = GGML_OP_CONCAT;
  3752. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3753. result->src[0] = a;
  3754. result->src[1] = b;
  3755. return result;
  3756. }
  3757. // ggml_abs
  3758. struct ggml_tensor * ggml_abs(
  3759. struct ggml_context * ctx,
  3760. struct ggml_tensor * a) {
  3761. return ggml_unary(ctx, a, GGML_UNARY_OP_ABS);
  3762. }
  3763. struct ggml_tensor * ggml_abs_inplace(
  3764. struct ggml_context * ctx,
  3765. struct ggml_tensor * a) {
  3766. return ggml_unary_inplace(ctx, a, GGML_UNARY_OP_ABS);
  3767. }
  3768. // ggml_sgn
  3769. struct ggml_tensor * ggml_sgn(
  3770. struct ggml_context * ctx,
  3771. struct ggml_tensor * a) {
  3772. return ggml_unary(ctx, a, GGML_UNARY_OP_SGN);
  3773. }
  3774. struct ggml_tensor * ggml_sgn_inplace(
  3775. struct ggml_context * ctx,
  3776. struct ggml_tensor * a) {
  3777. return ggml_unary_inplace(ctx, a, GGML_UNARY_OP_SGN);
  3778. }
  3779. // ggml_neg
  3780. struct ggml_tensor * ggml_neg(
  3781. struct ggml_context * ctx,
  3782. struct ggml_tensor * a) {
  3783. return ggml_unary(ctx, a, GGML_UNARY_OP_NEG);
  3784. }
  3785. struct ggml_tensor * ggml_neg_inplace(
  3786. struct ggml_context * ctx,
  3787. struct ggml_tensor * a) {
  3788. return ggml_unary_inplace(ctx, a, GGML_UNARY_OP_NEG);
  3789. }
  3790. // ggml_step
  3791. struct ggml_tensor * ggml_step(
  3792. struct ggml_context * ctx,
  3793. struct ggml_tensor * a) {
  3794. return ggml_unary(ctx, a, GGML_UNARY_OP_STEP);
  3795. }
  3796. struct ggml_tensor * ggml_step_inplace(
  3797. struct ggml_context * ctx,
  3798. struct ggml_tensor * a) {
  3799. return ggml_unary_inplace(ctx, a, GGML_UNARY_OP_STEP);
  3800. }
  3801. // ggml_tanh
  3802. struct ggml_tensor * ggml_tanh(
  3803. struct ggml_context * ctx,
  3804. struct ggml_tensor * a) {
  3805. return ggml_unary(ctx, a, GGML_UNARY_OP_TANH);
  3806. }
  3807. struct ggml_tensor * ggml_tanh_inplace(
  3808. struct ggml_context * ctx,
  3809. struct ggml_tensor * a) {
  3810. return ggml_unary_inplace(ctx, a, GGML_UNARY_OP_TANH);
  3811. }
  3812. // ggml_elu
  3813. struct ggml_tensor * ggml_elu(
  3814. struct ggml_context * ctx,
  3815. struct ggml_tensor * a) {
  3816. return ggml_unary(ctx, a, GGML_UNARY_OP_ELU);
  3817. }
  3818. struct ggml_tensor * ggml_elu_inplace(
  3819. struct ggml_context * ctx,
  3820. struct ggml_tensor * a) {
  3821. return ggml_unary_inplace(ctx, a, GGML_UNARY_OP_ELU);
  3822. }
  3823. // ggml_relu
  3824. struct ggml_tensor * ggml_relu(
  3825. struct ggml_context * ctx,
  3826. struct ggml_tensor * a) {
  3827. return ggml_unary(ctx, a, GGML_UNARY_OP_RELU);
  3828. }
  3829. struct ggml_tensor * ggml_relu_inplace(
  3830. struct ggml_context * ctx,
  3831. struct ggml_tensor * a) {
  3832. return ggml_unary_inplace(ctx, a, GGML_UNARY_OP_RELU);
  3833. }
  3834. // ggml_leaky_relu
  3835. struct ggml_tensor * ggml_leaky_relu(
  3836. struct ggml_context * ctx,
  3837. struct ggml_tensor * a, float negative_slope, bool inplace) {
  3838. bool is_node = false;
  3839. if (!inplace && (a->grad)) {
  3840. is_node = true;
  3841. }
  3842. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  3843. ggml_set_op_params(result, &negative_slope, sizeof(negative_slope));
  3844. result->op = GGML_OP_LEAKY_RELU;
  3845. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3846. result->src[0] = a;
  3847. return result;
  3848. }
  3849. // ggml_sigmoid
  3850. struct ggml_tensor * ggml_sigmoid(
  3851. struct ggml_context * ctx,
  3852. struct ggml_tensor * a) {
  3853. return ggml_unary(ctx, a, GGML_UNARY_OP_SIGMOID);
  3854. }
  3855. struct ggml_tensor * ggml_sigmoid_inplace(
  3856. struct ggml_context * ctx,
  3857. struct ggml_tensor * a) {
  3858. return ggml_unary_inplace(ctx, a, GGML_UNARY_OP_SIGMOID);
  3859. }
  3860. // ggml_gelu
  3861. struct ggml_tensor * ggml_gelu(
  3862. struct ggml_context * ctx,
  3863. struct ggml_tensor * a) {
  3864. return ggml_unary(ctx, a, GGML_UNARY_OP_GELU);
  3865. }
  3866. struct ggml_tensor * ggml_gelu_inplace(
  3867. struct ggml_context * ctx,
  3868. struct ggml_tensor * a) {
  3869. return ggml_unary_inplace(ctx, a, GGML_UNARY_OP_GELU);
  3870. }
  3871. // ggml_gelu_quick
  3872. struct ggml_tensor * ggml_gelu_quick(
  3873. struct ggml_context * ctx,
  3874. struct ggml_tensor * a) {
  3875. return ggml_unary(ctx, a, GGML_UNARY_OP_GELU_QUICK);
  3876. }
  3877. struct ggml_tensor * ggml_gelu_quick_inplace(
  3878. struct ggml_context * ctx,
  3879. struct ggml_tensor * a) {
  3880. return ggml_unary_inplace(ctx, a, GGML_UNARY_OP_GELU_QUICK);
  3881. }
  3882. // ggml_silu
  3883. struct ggml_tensor * ggml_silu(
  3884. struct ggml_context * ctx,
  3885. struct ggml_tensor * a) {
  3886. return ggml_unary(ctx, a, GGML_UNARY_OP_SILU);
  3887. }
  3888. struct ggml_tensor * ggml_silu_inplace(
  3889. struct ggml_context * ctx,
  3890. struct ggml_tensor * a) {
  3891. return ggml_unary_inplace(ctx, a, GGML_UNARY_OP_SILU);
  3892. }
  3893. // ggml_silu_back
  3894. struct ggml_tensor * ggml_silu_back(
  3895. struct ggml_context * ctx,
  3896. struct ggml_tensor * a,
  3897. struct ggml_tensor * b) {
  3898. bool is_node = false;
  3899. if (a->grad || b->grad) {
  3900. // TODO: implement backward
  3901. is_node = true;
  3902. }
  3903. struct ggml_tensor * result = ggml_dup_tensor(ctx, a);
  3904. result->op = GGML_OP_SILU_BACK;
  3905. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3906. result->src[0] = a;
  3907. result->src[1] = b;
  3908. return result;
  3909. }
  3910. // ggml hardswish
  3911. struct ggml_tensor * ggml_hardswish(
  3912. struct ggml_context * ctx,
  3913. struct ggml_tensor * a) {
  3914. return ggml_unary(ctx, a, GGML_UNARY_OP_HARDSWISH);
  3915. }
  3916. // ggml hardsigmoid
  3917. struct ggml_tensor * ggml_hardsigmoid(
  3918. struct ggml_context * ctx,
  3919. struct ggml_tensor * a) {
  3920. return ggml_unary(ctx, a, GGML_UNARY_OP_HARDSIGMOID);
  3921. }
  3922. // ggml_norm
  3923. static struct ggml_tensor * ggml_norm_impl(
  3924. struct ggml_context * ctx,
  3925. struct ggml_tensor * a,
  3926. float eps,
  3927. bool inplace) {
  3928. bool is_node = false;
  3929. if (!inplace && (a->grad)) {
  3930. GGML_ASSERT(false); // TODO: implement backward
  3931. is_node = true;
  3932. }
  3933. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  3934. ggml_set_op_params(result, &eps, sizeof(eps));
  3935. result->op = GGML_OP_NORM;
  3936. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3937. result->src[0] = a;
  3938. return result;
  3939. }
  3940. struct ggml_tensor * ggml_norm(
  3941. struct ggml_context * ctx,
  3942. struct ggml_tensor * a,
  3943. float eps) {
  3944. return ggml_norm_impl(ctx, a, eps, false);
  3945. }
  3946. struct ggml_tensor * ggml_norm_inplace(
  3947. struct ggml_context * ctx,
  3948. struct ggml_tensor * a,
  3949. float eps) {
  3950. return ggml_norm_impl(ctx, a, eps, true);
  3951. }
  3952. // ggml_rms_norm
  3953. static struct ggml_tensor * ggml_rms_norm_impl(
  3954. struct ggml_context * ctx,
  3955. struct ggml_tensor * a,
  3956. float eps,
  3957. bool inplace) {
  3958. bool is_node = false;
  3959. if (!inplace && (a->grad)) {
  3960. is_node = true;
  3961. }
  3962. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  3963. ggml_set_op_params(result, &eps, sizeof(eps));
  3964. result->op = GGML_OP_RMS_NORM;
  3965. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3966. result->src[0] = a;
  3967. return result;
  3968. }
  3969. struct ggml_tensor * ggml_rms_norm(
  3970. struct ggml_context * ctx,
  3971. struct ggml_tensor * a,
  3972. float eps) {
  3973. return ggml_rms_norm_impl(ctx, a, eps, false);
  3974. }
  3975. struct ggml_tensor * ggml_rms_norm_inplace(
  3976. struct ggml_context * ctx,
  3977. struct ggml_tensor * a,
  3978. float eps) {
  3979. return ggml_rms_norm_impl(ctx, a, eps, true);
  3980. }
  3981. // ggml_rms_norm_back
  3982. struct ggml_tensor * ggml_rms_norm_back(
  3983. struct ggml_context * ctx,
  3984. struct ggml_tensor * a,
  3985. struct ggml_tensor * b,
  3986. float eps) {
  3987. bool is_node = false;
  3988. if (a->grad) {
  3989. // TODO: implement backward
  3990. is_node = true;
  3991. }
  3992. struct ggml_tensor * result = ggml_dup_tensor(ctx, a);
  3993. ggml_set_op_params(result, &eps, sizeof(eps));
  3994. result->op = GGML_OP_RMS_NORM_BACK;
  3995. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3996. result->src[0] = a;
  3997. result->src[1] = b;
  3998. return result;
  3999. }
  4000. // ggml_group_norm
  4001. static struct ggml_tensor * ggml_group_norm_impl(
  4002. struct ggml_context * ctx,
  4003. struct ggml_tensor * a,
  4004. int n_groups,
  4005. bool inplace) {
  4006. bool is_node = false;
  4007. if (!inplace && (a->grad)) {
  4008. GGML_ASSERT(false); // TODO: implement backward
  4009. is_node = true;
  4010. }
  4011. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  4012. result->op_params[0] = n_groups;
  4013. result->op = GGML_OP_GROUP_NORM;
  4014. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4015. result->src[0] = a;
  4016. return result;
  4017. }
  4018. struct ggml_tensor * ggml_group_norm(
  4019. struct ggml_context * ctx,
  4020. struct ggml_tensor * a,
  4021. int n_groups) {
  4022. return ggml_group_norm_impl(ctx, a, n_groups, false);
  4023. }
  4024. struct ggml_tensor * ggml_group_norm_inplace(
  4025. struct ggml_context * ctx,
  4026. struct ggml_tensor * a,
  4027. int n_groups) {
  4028. return ggml_group_norm_impl(ctx, a, n_groups, true);
  4029. }
  4030. // ggml_mul_mat
  4031. struct ggml_tensor * ggml_mul_mat(
  4032. struct ggml_context * ctx,
  4033. struct ggml_tensor * a,
  4034. struct ggml_tensor * b) {
  4035. GGML_ASSERT(ggml_can_mul_mat(a, b));
  4036. GGML_ASSERT(!ggml_is_transposed(a));
  4037. bool is_node = false;
  4038. if (a->grad || b->grad) {
  4039. is_node = true;
  4040. }
  4041. const int64_t ne[4] = { a->ne[1], b->ne[1], b->ne[2], b->ne[3] };
  4042. struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, 4, ne);
  4043. result->op = GGML_OP_MUL_MAT;
  4044. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4045. result->src[0] = a;
  4046. result->src[1] = b;
  4047. return result;
  4048. }
  4049. void ggml_mul_mat_set_prec(
  4050. struct ggml_tensor * a,
  4051. enum ggml_prec prec) {
  4052. GGML_ASSERT(a->op == GGML_OP_MUL_MAT);
  4053. const int32_t prec_i32 = (int32_t) prec;
  4054. ggml_set_op_params_i32(a, 0, prec_i32);
  4055. }
  4056. // ggml_mul_mat_id
  4057. /*
  4058. c = ggml_mul_mat_id(ctx, as, b, ids);
  4059. as -> [cols, rows, n_expert]
  4060. ids -> [n_experts_used, n_tokens] (i32)
  4061. b -> [cols, n_expert_used, n_tokens]
  4062. c -> [cols, n_expert_used, n_tokens]
  4063. in b, n_experts_used can be broadcasted to match the n_expert_used of ids
  4064. c ~= as[:,:,i] @ b[:,i%r,t], i = ids[e,t] for all e,t in ids
  4065. */
  4066. struct ggml_tensor * ggml_mul_mat_id(
  4067. struct ggml_context * ctx,
  4068. struct ggml_tensor * as,
  4069. struct ggml_tensor * b,
  4070. struct ggml_tensor * ids) {
  4071. GGML_ASSERT(!ggml_is_transposed(as));
  4072. GGML_ASSERT(ids->type == GGML_TYPE_I32);
  4073. GGML_ASSERT(as->ne[3] == 1); // as is 3d (one matrix per expert)
  4074. GGML_ASSERT(b->ne[3] == 1); // b is 3d
  4075. GGML_ASSERT(ids->ne[2] == 1 && ids->ne[3] == 1); // ids is 2d
  4076. GGML_ASSERT(ids->ne[1] == b->ne[2]); // must have an expert list per b row
  4077. GGML_ASSERT(as->ne[0] == b->ne[0]); // can_mul_mat
  4078. GGML_ASSERT(ids->ne[0] % b->ne[1] == 0); // can broadcast
  4079. bool is_node = false;
  4080. if (as->grad || b->grad) {
  4081. is_node = true;
  4082. }
  4083. const int64_t ne[4] = { as->ne[1], ids->ne[0], b->ne[2], 1 };
  4084. struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, 4, ne);
  4085. result->op = GGML_OP_MUL_MAT_ID;
  4086. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4087. result->src[0] = as;
  4088. result->src[1] = b;
  4089. result->src[2] = ids;
  4090. return result;
  4091. }
  4092. // ggml_out_prod
  4093. struct ggml_tensor * ggml_out_prod(
  4094. struct ggml_context * ctx,
  4095. struct ggml_tensor * a,
  4096. struct ggml_tensor * b) {
  4097. GGML_ASSERT(ggml_can_out_prod(a, b));
  4098. GGML_ASSERT(!ggml_is_transposed(a));
  4099. bool is_node = false;
  4100. if (a->grad || b->grad) {
  4101. is_node = true;
  4102. }
  4103. // a is broadcastable to b for ne[2] and ne[3] -> use b->ne[2] and b->ne[3]
  4104. const int64_t ne[4] = { a->ne[0], b->ne[0], b->ne[2], b->ne[3] };
  4105. struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, 4, ne);
  4106. result->op = GGML_OP_OUT_PROD;
  4107. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4108. result->src[0] = a;
  4109. result->src[1] = b;
  4110. return result;
  4111. }
  4112. // ggml_scale
  4113. static struct ggml_tensor * ggml_scale_impl(
  4114. struct ggml_context * ctx,
  4115. struct ggml_tensor * a,
  4116. float s,
  4117. bool inplace) {
  4118. GGML_ASSERT(ggml_is_padded_1d(a));
  4119. bool is_node = false;
  4120. if (a->grad) {
  4121. is_node = true;
  4122. }
  4123. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  4124. ggml_set_op_params(result, &s, sizeof(s));
  4125. result->op = GGML_OP_SCALE;
  4126. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4127. result->src[0] = a;
  4128. return result;
  4129. }
  4130. struct ggml_tensor * ggml_scale(
  4131. struct ggml_context * ctx,
  4132. struct ggml_tensor * a,
  4133. float s) {
  4134. return ggml_scale_impl(ctx, a, s, false);
  4135. }
  4136. struct ggml_tensor * ggml_scale_inplace(
  4137. struct ggml_context * ctx,
  4138. struct ggml_tensor * a,
  4139. float s) {
  4140. return ggml_scale_impl(ctx, a, s, true);
  4141. }
  4142. // ggml_set
  4143. static struct ggml_tensor * ggml_set_impl(
  4144. struct ggml_context * ctx,
  4145. struct ggml_tensor * a,
  4146. struct ggml_tensor * b,
  4147. size_t nb1,
  4148. size_t nb2,
  4149. size_t nb3,
  4150. size_t offset,
  4151. bool inplace) {
  4152. GGML_ASSERT(ggml_nelements(a) >= ggml_nelements(b));
  4153. bool is_node = false;
  4154. if (a->grad || b->grad) {
  4155. is_node = true;
  4156. }
  4157. // make a view of the destination
  4158. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  4159. int32_t params[] = { nb1, nb2, nb3, offset, inplace ? 1 : 0 };
  4160. ggml_set_op_params(result, params, sizeof(params));
  4161. result->op = GGML_OP_SET;
  4162. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4163. result->src[0] = a;
  4164. result->src[1] = b;
  4165. return result;
  4166. }
  4167. struct ggml_tensor * ggml_set(
  4168. struct ggml_context * ctx,
  4169. struct ggml_tensor * a,
  4170. struct ggml_tensor * b,
  4171. size_t nb1,
  4172. size_t nb2,
  4173. size_t nb3,
  4174. size_t offset) {
  4175. return ggml_set_impl(ctx, a, b, nb1, nb2, nb3, offset, false);
  4176. }
  4177. struct ggml_tensor * ggml_set_inplace(
  4178. struct ggml_context * ctx,
  4179. struct ggml_tensor * a,
  4180. struct ggml_tensor * b,
  4181. size_t nb1,
  4182. size_t nb2,
  4183. size_t nb3,
  4184. size_t offset) {
  4185. return ggml_set_impl(ctx, a, b, nb1, nb2, nb3, offset, true);
  4186. }
  4187. struct ggml_tensor * ggml_set_1d(
  4188. struct ggml_context * ctx,
  4189. struct ggml_tensor * a,
  4190. struct ggml_tensor * b,
  4191. size_t offset) {
  4192. return ggml_set_impl(ctx, a, b, a->nb[1], a->nb[2], a->nb[3], offset, false);
  4193. }
  4194. struct ggml_tensor * ggml_set_1d_inplace(
  4195. struct ggml_context * ctx,
  4196. struct ggml_tensor * a,
  4197. struct ggml_tensor * b,
  4198. size_t offset) {
  4199. return ggml_set_impl(ctx, a, b, a->nb[1], a->nb[2], a->nb[3], offset, true);
  4200. }
  4201. struct ggml_tensor * ggml_set_2d(
  4202. struct ggml_context * ctx,
  4203. struct ggml_tensor * a,
  4204. struct ggml_tensor * b,
  4205. size_t nb1,
  4206. size_t offset) {
  4207. return ggml_set_impl(ctx, a, b, nb1, a->nb[2], a->nb[3], offset, false);
  4208. }
  4209. struct ggml_tensor * ggml_set_2d_inplace(
  4210. struct ggml_context * ctx,
  4211. struct ggml_tensor * a,
  4212. struct ggml_tensor * b,
  4213. size_t nb1,
  4214. size_t offset) {
  4215. return ggml_set_impl(ctx, a, b, nb1, a->nb[2], a->nb[3], offset, true);
  4216. }
  4217. // ggml_cpy
  4218. static struct ggml_tensor * ggml_cpy_impl(
  4219. struct ggml_context * ctx,
  4220. struct ggml_tensor * a,
  4221. struct ggml_tensor * b) {
  4222. GGML_ASSERT(ggml_nelements(a) == ggml_nelements(b));
  4223. bool is_node = false;
  4224. if (a->grad || b->grad) {
  4225. // inplace is false and either one have a grad
  4226. is_node = true;
  4227. }
  4228. // make a view of the destination
  4229. struct ggml_tensor * result = ggml_view_tensor(ctx, b);
  4230. if (strlen(b->name) > 0) {
  4231. ggml_format_name(result, "%s (copy of %s)", b->name, a->name);
  4232. } else {
  4233. ggml_format_name(result, "%s (copy)", a->name);
  4234. }
  4235. result->op = GGML_OP_CPY;
  4236. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4237. result->src[0] = a;
  4238. result->src[1] = b;
  4239. return result;
  4240. }
  4241. struct ggml_tensor * ggml_cpy(
  4242. struct ggml_context * ctx,
  4243. struct ggml_tensor * a,
  4244. struct ggml_tensor * b) {
  4245. return ggml_cpy_impl(ctx, a, b);
  4246. }
  4247. struct ggml_tensor * ggml_cast(
  4248. struct ggml_context * ctx,
  4249. struct ggml_tensor * a,
  4250. enum ggml_type type) {
  4251. bool is_node = false;
  4252. struct ggml_tensor * result = ggml_new_tensor(ctx, type, GGML_MAX_DIMS, a->ne);
  4253. ggml_format_name(result, "%s (copy)", a->name);
  4254. result->op = GGML_OP_CPY;
  4255. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4256. result->src[0] = a;
  4257. result->src[1] = result;
  4258. return result;
  4259. }
  4260. // ggml_cont
  4261. static struct ggml_tensor * ggml_cont_impl(
  4262. struct ggml_context * ctx,
  4263. struct ggml_tensor * a) {
  4264. bool is_node = false;
  4265. if (a->grad) {
  4266. is_node = true;
  4267. }
  4268. struct ggml_tensor * result = ggml_dup_tensor(ctx, a);
  4269. ggml_format_name(result, "%s (cont)", a->name);
  4270. result->op = GGML_OP_CONT;
  4271. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4272. result->src[0] = a;
  4273. return result;
  4274. }
  4275. struct ggml_tensor * ggml_cont(
  4276. struct ggml_context * ctx,
  4277. struct ggml_tensor * a) {
  4278. return ggml_cont_impl(ctx, a);
  4279. }
  4280. // make contiguous, with new shape
  4281. GGML_API struct ggml_tensor * ggml_cont_1d(
  4282. struct ggml_context * ctx,
  4283. struct ggml_tensor * a,
  4284. int64_t ne0) {
  4285. return ggml_cont_4d(ctx, a, ne0, 1, 1, 1);
  4286. }
  4287. GGML_API struct ggml_tensor * ggml_cont_2d(
  4288. struct ggml_context * ctx,
  4289. struct ggml_tensor * a,
  4290. int64_t ne0,
  4291. int64_t ne1) {
  4292. return ggml_cont_4d(ctx, a, ne0, ne1, 1, 1);
  4293. }
  4294. GGML_API struct ggml_tensor * ggml_cont_3d(
  4295. struct ggml_context * ctx,
  4296. struct ggml_tensor * a,
  4297. int64_t ne0,
  4298. int64_t ne1,
  4299. int64_t ne2) {
  4300. return ggml_cont_4d(ctx, a, ne0, ne1, ne2, 1);
  4301. }
  4302. struct ggml_tensor * ggml_cont_4d(
  4303. struct ggml_context * ctx,
  4304. struct ggml_tensor * a,
  4305. int64_t ne0,
  4306. int64_t ne1,
  4307. int64_t ne2,
  4308. int64_t ne3) {
  4309. GGML_ASSERT(ggml_nelements(a) == (ne0*ne1*ne2*ne3));
  4310. bool is_node = false;
  4311. struct ggml_tensor * result = ggml_new_tensor_4d(ctx, a->type, ne0, ne1, ne2, ne3);
  4312. ggml_format_name(result, "%s (cont)", a->name);
  4313. result->op = GGML_OP_CONT;
  4314. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4315. result->src[0] = a;
  4316. return result;
  4317. }
  4318. // ggml_reshape
  4319. struct ggml_tensor * ggml_reshape(
  4320. struct ggml_context * ctx,
  4321. struct ggml_tensor * a,
  4322. struct ggml_tensor * b) {
  4323. GGML_ASSERT(ggml_is_contiguous(a));
  4324. // as only the shape of b is relevant, and not its memory layout, b is allowed to be non contiguous.
  4325. GGML_ASSERT(ggml_nelements(a) == ggml_nelements(b));
  4326. bool is_node = false;
  4327. if (a->grad) {
  4328. is_node = true;
  4329. }
  4330. if (b->grad) {
  4331. // gradient propagation is not supported
  4332. //GGML_ASSERT(false);
  4333. }
  4334. struct ggml_tensor * result = ggml_new_tensor_impl(ctx, a->type, GGML_MAX_DIMS, b->ne, a, 0);
  4335. ggml_format_name(result, "%s (reshaped)", a->name);
  4336. result->op = GGML_OP_RESHAPE;
  4337. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4338. result->src[0] = a;
  4339. return result;
  4340. }
  4341. struct ggml_tensor * ggml_reshape_1d(
  4342. struct ggml_context * ctx,
  4343. struct ggml_tensor * a,
  4344. int64_t ne0) {
  4345. GGML_ASSERT(ggml_is_contiguous(a));
  4346. GGML_ASSERT(ggml_nelements(a) == ne0);
  4347. bool is_node = false;
  4348. if (a->grad) {
  4349. is_node = true;
  4350. }
  4351. const int64_t ne[1] = { ne0 };
  4352. struct ggml_tensor * result = ggml_new_tensor_impl(ctx, a->type, 1, ne, a, 0);
  4353. ggml_format_name(result, "%s (reshaped)", a->name);
  4354. result->op = GGML_OP_RESHAPE;
  4355. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4356. result->src[0] = a;
  4357. return result;
  4358. }
  4359. struct ggml_tensor * ggml_reshape_2d(
  4360. struct ggml_context * ctx,
  4361. struct ggml_tensor * a,
  4362. int64_t ne0,
  4363. int64_t ne1) {
  4364. GGML_ASSERT(ggml_is_contiguous(a));
  4365. GGML_ASSERT(ggml_nelements(a) == ne0*ne1);
  4366. bool is_node = false;
  4367. if (a->grad) {
  4368. is_node = true;
  4369. }
  4370. const int64_t ne[2] = { ne0, ne1 };
  4371. struct ggml_tensor * result = ggml_new_tensor_impl(ctx, a->type, 2, ne, a, 0);
  4372. ggml_format_name(result, "%s (reshaped)", a->name);
  4373. result->op = GGML_OP_RESHAPE;
  4374. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4375. result->src[0] = a;
  4376. return result;
  4377. }
  4378. struct ggml_tensor * ggml_reshape_3d(
  4379. struct ggml_context * ctx,
  4380. struct ggml_tensor * a,
  4381. int64_t ne0,
  4382. int64_t ne1,
  4383. int64_t ne2) {
  4384. GGML_ASSERT(ggml_is_contiguous(a));
  4385. GGML_ASSERT(ggml_nelements(a) == ne0*ne1*ne2);
  4386. bool is_node = false;
  4387. if (a->grad) {
  4388. is_node = true;
  4389. }
  4390. const int64_t ne[3] = { ne0, ne1, ne2 };
  4391. struct ggml_tensor * result = ggml_new_tensor_impl(ctx, a->type, 3, ne, a, 0);
  4392. ggml_format_name(result, "%s (reshaped)", a->name);
  4393. result->op = GGML_OP_RESHAPE;
  4394. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4395. result->src[0] = a;
  4396. return result;
  4397. }
  4398. struct ggml_tensor * ggml_reshape_4d(
  4399. struct ggml_context * ctx,
  4400. struct ggml_tensor * a,
  4401. int64_t ne0,
  4402. int64_t ne1,
  4403. int64_t ne2,
  4404. int64_t ne3) {
  4405. GGML_ASSERT(ggml_is_contiguous(a));
  4406. GGML_ASSERT(ggml_nelements(a) == ne0*ne1*ne2*ne3);
  4407. bool is_node = false;
  4408. if (a->grad) {
  4409. is_node = true;
  4410. }
  4411. const int64_t ne[4] = { ne0, ne1, ne2, ne3 };
  4412. struct ggml_tensor * result = ggml_new_tensor_impl(ctx, a->type, 4, ne, a, 0);
  4413. ggml_format_name(result, "%s (reshaped)", a->name);
  4414. result->op = GGML_OP_RESHAPE;
  4415. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4416. result->src[0] = a;
  4417. return result;
  4418. }
  4419. static struct ggml_tensor * ggml_view_impl(
  4420. struct ggml_context * ctx,
  4421. struct ggml_tensor * a,
  4422. int n_dims,
  4423. const int64_t * ne,
  4424. size_t offset) {
  4425. bool is_node = false;
  4426. if (a->grad) {
  4427. is_node = true;
  4428. }
  4429. struct ggml_tensor * result = ggml_new_tensor_impl(ctx, a->type, n_dims, ne, a, offset);
  4430. ggml_format_name(result, "%s (view)", a->name);
  4431. ggml_set_op_params(result, &offset, sizeof(offset));
  4432. result->op = GGML_OP_VIEW;
  4433. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4434. result->src[0] = a;
  4435. return result;
  4436. }
  4437. // ggml_view_1d
  4438. struct ggml_tensor * ggml_view_1d(
  4439. struct ggml_context * ctx,
  4440. struct ggml_tensor * a,
  4441. int64_t ne0,
  4442. size_t offset) {
  4443. struct ggml_tensor * result = ggml_view_impl(ctx, a, 1, &ne0, offset);
  4444. return result;
  4445. }
  4446. // ggml_view_2d
  4447. struct ggml_tensor * ggml_view_2d(
  4448. struct ggml_context * ctx,
  4449. struct ggml_tensor * a,
  4450. int64_t ne0,
  4451. int64_t ne1,
  4452. size_t nb1,
  4453. size_t offset) {
  4454. const int64_t ne[2] = { ne0, ne1 };
  4455. struct ggml_tensor * result = ggml_view_impl(ctx, a, 2, ne, offset);
  4456. result->nb[1] = nb1;
  4457. result->nb[2] = result->nb[1]*ne1;
  4458. result->nb[3] = result->nb[2];
  4459. return result;
  4460. }
  4461. // ggml_view_3d
  4462. struct ggml_tensor * ggml_view_3d(
  4463. struct ggml_context * ctx,
  4464. struct ggml_tensor * a,
  4465. int64_t ne0,
  4466. int64_t ne1,
  4467. int64_t ne2,
  4468. size_t nb1,
  4469. size_t nb2,
  4470. size_t offset) {
  4471. const int64_t ne[3] = { ne0, ne1, ne2 };
  4472. struct ggml_tensor * result = ggml_view_impl(ctx, a, 3, ne, offset);
  4473. result->nb[1] = nb1;
  4474. result->nb[2] = nb2;
  4475. result->nb[3] = result->nb[2]*ne2;
  4476. return result;
  4477. }
  4478. // ggml_view_4d
  4479. struct ggml_tensor * ggml_view_4d(
  4480. struct ggml_context * ctx,
  4481. struct ggml_tensor * a,
  4482. int64_t ne0,
  4483. int64_t ne1,
  4484. int64_t ne2,
  4485. int64_t ne3,
  4486. size_t nb1,
  4487. size_t nb2,
  4488. size_t nb3,
  4489. size_t offset) {
  4490. const int64_t ne[4] = { ne0, ne1, ne2, ne3 };
  4491. struct ggml_tensor * result = ggml_view_impl(ctx, a, 4, ne, offset);
  4492. result->nb[1] = nb1;
  4493. result->nb[2] = nb2;
  4494. result->nb[3] = nb3;
  4495. return result;
  4496. }
  4497. // ggml_permute
  4498. struct ggml_tensor * ggml_permute(
  4499. struct ggml_context * ctx,
  4500. struct ggml_tensor * a,
  4501. int axis0,
  4502. int axis1,
  4503. int axis2,
  4504. int axis3) {
  4505. GGML_ASSERT(axis0 >= 0 && axis0 < GGML_MAX_DIMS);
  4506. GGML_ASSERT(axis1 >= 0 && axis1 < GGML_MAX_DIMS);
  4507. GGML_ASSERT(axis2 >= 0 && axis2 < GGML_MAX_DIMS);
  4508. GGML_ASSERT(axis3 >= 0 && axis3 < GGML_MAX_DIMS);
  4509. GGML_ASSERT(axis0 != axis1);
  4510. GGML_ASSERT(axis0 != axis2);
  4511. GGML_ASSERT(axis0 != axis3);
  4512. GGML_ASSERT(axis1 != axis2);
  4513. GGML_ASSERT(axis1 != axis3);
  4514. GGML_ASSERT(axis2 != axis3);
  4515. bool is_node = false;
  4516. if (a->grad) {
  4517. is_node = true;
  4518. }
  4519. struct ggml_tensor * result = ggml_view_tensor(ctx, a);
  4520. ggml_format_name(result, "%s (permuted)", a->name);
  4521. int ne[GGML_MAX_DIMS];
  4522. int nb[GGML_MAX_DIMS];
  4523. ne[axis0] = a->ne[0];
  4524. ne[axis1] = a->ne[1];
  4525. ne[axis2] = a->ne[2];
  4526. ne[axis3] = a->ne[3];
  4527. nb[axis0] = a->nb[0];
  4528. nb[axis1] = a->nb[1];
  4529. nb[axis2] = a->nb[2];
  4530. nb[axis3] = a->nb[3];
  4531. result->ne[0] = ne[0];
  4532. result->ne[1] = ne[1];
  4533. result->ne[2] = ne[2];
  4534. result->ne[3] = ne[3];
  4535. result->nb[0] = nb[0];
  4536. result->nb[1] = nb[1];
  4537. result->nb[2] = nb[2];
  4538. result->nb[3] = nb[3];
  4539. result->op = GGML_OP_PERMUTE;
  4540. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4541. result->src[0] = a;
  4542. int32_t params[] = { axis0, axis1, axis2, axis3 };
  4543. ggml_set_op_params(result, params, sizeof(params));
  4544. return result;
  4545. }
  4546. // ggml_transpose
  4547. struct ggml_tensor * ggml_transpose(
  4548. struct ggml_context * ctx,
  4549. struct ggml_tensor * a) {
  4550. bool is_node = false;
  4551. if (a->grad) {
  4552. is_node = true;
  4553. }
  4554. struct ggml_tensor * result = ggml_view_tensor(ctx, a);
  4555. ggml_format_name(result, "%s (transposed)", a->name);
  4556. result->ne[0] = a->ne[1];
  4557. result->ne[1] = a->ne[0];
  4558. result->nb[0] = a->nb[1];
  4559. result->nb[1] = a->nb[0];
  4560. result->op = GGML_OP_TRANSPOSE;
  4561. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4562. result->src[0] = a;
  4563. return result;
  4564. }
  4565. // ggml_get_rows
  4566. struct ggml_tensor * ggml_get_rows(
  4567. struct ggml_context * ctx,
  4568. struct ggml_tensor * a,
  4569. struct ggml_tensor * b) {
  4570. GGML_ASSERT(a->ne[2] == b->ne[1]);
  4571. GGML_ASSERT(b->ne[3] == 1);
  4572. GGML_ASSERT(b->type == GGML_TYPE_I32);
  4573. bool is_node = false;
  4574. if (a->grad || b->grad) {
  4575. is_node = true;
  4576. }
  4577. // TODO: implement non F32 return
  4578. enum ggml_type type = GGML_TYPE_F32;
  4579. if (a->type == GGML_TYPE_I32) {
  4580. type = a->type;
  4581. }
  4582. struct ggml_tensor * result = ggml_new_tensor_4d(ctx, type, a->ne[0], b->ne[0], b->ne[1], b->ne[2]);
  4583. result->op = GGML_OP_GET_ROWS;
  4584. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4585. result->src[0] = a;
  4586. result->src[1] = b;
  4587. return result;
  4588. }
  4589. // ggml_get_rows_back
  4590. struct ggml_tensor * ggml_get_rows_back(
  4591. struct ggml_context * ctx,
  4592. struct ggml_tensor * a,
  4593. struct ggml_tensor * b,
  4594. struct ggml_tensor * c) {
  4595. GGML_ASSERT(ggml_is_matrix(a) && ggml_is_vector(b) && b->type == GGML_TYPE_I32);
  4596. GGML_ASSERT(ggml_is_matrix(c) && (a->ne[0] == c->ne[0]));
  4597. bool is_node = false;
  4598. if (a->grad || b->grad) {
  4599. is_node = true;
  4600. }
  4601. // TODO: implement non F32 return
  4602. //struct ggml_tensor * result = ggml_new_tensor_2d(ctx, a->type, a->ne[0], b->ne[0]);
  4603. struct ggml_tensor * result = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, c->ne[0], c->ne[1]);
  4604. result->op = GGML_OP_GET_ROWS_BACK;
  4605. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4606. result->src[0] = a;
  4607. result->src[1] = b;
  4608. return result;
  4609. }
  4610. // ggml_diag
  4611. struct ggml_tensor * ggml_diag(
  4612. struct ggml_context * ctx,
  4613. struct ggml_tensor * a) {
  4614. GGML_ASSERT(a->ne[1] == 1);
  4615. bool is_node = false;
  4616. if (a->grad) {
  4617. is_node = true;
  4618. }
  4619. const int64_t ne[4] = { a->ne[0], a->ne[0], a->ne[2], a->ne[3] };
  4620. struct ggml_tensor * result = ggml_new_tensor(ctx, a->type, 4, ne);
  4621. result->op = GGML_OP_DIAG;
  4622. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4623. result->src[0] = a;
  4624. return result;
  4625. }
  4626. // ggml_diag_mask_inf
  4627. static struct ggml_tensor * ggml_diag_mask_inf_impl(
  4628. struct ggml_context * ctx,
  4629. struct ggml_tensor * a,
  4630. int n_past,
  4631. bool inplace) {
  4632. bool is_node = false;
  4633. if (a->grad) {
  4634. is_node = true;
  4635. }
  4636. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  4637. int32_t params[] = { n_past };
  4638. ggml_set_op_params(result, params, sizeof(params));
  4639. result->op = GGML_OP_DIAG_MASK_INF;
  4640. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4641. result->src[0] = a;
  4642. return result;
  4643. }
  4644. struct ggml_tensor * ggml_diag_mask_inf(
  4645. struct ggml_context * ctx,
  4646. struct ggml_tensor * a,
  4647. int n_past) {
  4648. return ggml_diag_mask_inf_impl(ctx, a, n_past, false);
  4649. }
  4650. struct ggml_tensor * ggml_diag_mask_inf_inplace(
  4651. struct ggml_context * ctx,
  4652. struct ggml_tensor * a,
  4653. int n_past) {
  4654. return ggml_diag_mask_inf_impl(ctx, a, n_past, true);
  4655. }
  4656. // ggml_diag_mask_zero
  4657. static struct ggml_tensor * ggml_diag_mask_zero_impl(
  4658. struct ggml_context * ctx,
  4659. struct ggml_tensor * a,
  4660. int n_past,
  4661. bool inplace) {
  4662. bool is_node = false;
  4663. if (a->grad) {
  4664. is_node = true;
  4665. }
  4666. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  4667. int32_t params[] = { n_past };
  4668. ggml_set_op_params(result, params, sizeof(params));
  4669. result->op = GGML_OP_DIAG_MASK_ZERO;
  4670. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4671. result->src[0] = a;
  4672. return result;
  4673. }
  4674. struct ggml_tensor * ggml_diag_mask_zero(
  4675. struct ggml_context * ctx,
  4676. struct ggml_tensor * a,
  4677. int n_past) {
  4678. return ggml_diag_mask_zero_impl(ctx, a, n_past, false);
  4679. }
  4680. struct ggml_tensor * ggml_diag_mask_zero_inplace(
  4681. struct ggml_context * ctx,
  4682. struct ggml_tensor * a,
  4683. int n_past) {
  4684. return ggml_diag_mask_zero_impl(ctx, a, n_past, true);
  4685. }
  4686. // ggml_soft_max
  4687. static struct ggml_tensor * ggml_soft_max_impl(
  4688. struct ggml_context * ctx,
  4689. struct ggml_tensor * a,
  4690. struct ggml_tensor * mask,
  4691. float scale,
  4692. float max_bias,
  4693. bool inplace) {
  4694. GGML_ASSERT(ggml_is_contiguous(a));
  4695. if (mask) {
  4696. GGML_ASSERT(mask->type == GGML_TYPE_F16 || mask->type == GGML_TYPE_F32);
  4697. GGML_ASSERT(ggml_is_contiguous(mask));
  4698. GGML_ASSERT(ggml_is_matrix(mask));
  4699. GGML_ASSERT(mask->ne[0] == a->ne[0]);
  4700. GGML_ASSERT(mask->ne[1] >= a->ne[1]);
  4701. }
  4702. if (max_bias > 0.0f) {
  4703. GGML_ASSERT(mask);
  4704. }
  4705. bool is_node = false;
  4706. if (a->grad) {
  4707. is_node = true;
  4708. }
  4709. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  4710. float params[] = { scale, max_bias };
  4711. ggml_set_op_params(result, params, sizeof(params));
  4712. result->op = GGML_OP_SOFT_MAX;
  4713. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4714. result->src[0] = a;
  4715. result->src[1] = mask;
  4716. return result;
  4717. }
  4718. struct ggml_tensor * ggml_soft_max(
  4719. struct ggml_context * ctx,
  4720. struct ggml_tensor * a) {
  4721. return ggml_soft_max_impl(ctx, a, NULL, 1.0f, 0.0f, false);
  4722. }
  4723. struct ggml_tensor * ggml_soft_max_inplace(
  4724. struct ggml_context * ctx,
  4725. struct ggml_tensor * a) {
  4726. return ggml_soft_max_impl(ctx, a, NULL, 1.0f, 0.0f, true);
  4727. }
  4728. struct ggml_tensor * ggml_soft_max_ext(
  4729. struct ggml_context * ctx,
  4730. struct ggml_tensor * a,
  4731. struct ggml_tensor * mask,
  4732. float scale,
  4733. float max_bias) {
  4734. return ggml_soft_max_impl(ctx, a, mask, scale, max_bias, false);
  4735. }
  4736. // ggml_soft_max_back
  4737. static struct ggml_tensor * ggml_soft_max_back_impl(
  4738. struct ggml_context * ctx,
  4739. struct ggml_tensor * a,
  4740. struct ggml_tensor * b,
  4741. bool inplace) {
  4742. bool is_node = false;
  4743. if (a->grad || b->grad) {
  4744. is_node = true; // TODO : implement backward pass
  4745. }
  4746. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  4747. result->op = GGML_OP_SOFT_MAX_BACK;
  4748. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4749. result->src[0] = a;
  4750. result->src[1] = b;
  4751. return result;
  4752. }
  4753. struct ggml_tensor * ggml_soft_max_back(
  4754. struct ggml_context * ctx,
  4755. struct ggml_tensor * a,
  4756. struct ggml_tensor * b) {
  4757. return ggml_soft_max_back_impl(ctx, a, b, false);
  4758. }
  4759. struct ggml_tensor * ggml_soft_max_back_inplace(
  4760. struct ggml_context * ctx,
  4761. struct ggml_tensor * a,
  4762. struct ggml_tensor * b) {
  4763. return ggml_soft_max_back_impl(ctx, a, b, true);
  4764. }
  4765. // ggml_rope
  4766. static struct ggml_tensor * ggml_rope_impl(
  4767. struct ggml_context * ctx,
  4768. struct ggml_tensor * a,
  4769. struct ggml_tensor * b,
  4770. int n_dims,
  4771. int mode,
  4772. int n_ctx,
  4773. int n_orig_ctx,
  4774. float freq_base,
  4775. float freq_scale,
  4776. float ext_factor,
  4777. float attn_factor,
  4778. float beta_fast,
  4779. float beta_slow,
  4780. float xpos_base,
  4781. bool xpos_down,
  4782. bool inplace) {
  4783. GGML_ASSERT(ggml_is_vector(b));
  4784. GGML_ASSERT(b->type == GGML_TYPE_I32);
  4785. GGML_ASSERT(a->ne[2] == b->ne[0]);
  4786. bool is_node = false;
  4787. if (a->grad) {
  4788. is_node = true;
  4789. }
  4790. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  4791. int32_t params[13] = { /*n_past*/ 0, n_dims, mode, n_ctx, n_orig_ctx };
  4792. memcpy(params + 5, &freq_base, sizeof(float));
  4793. memcpy(params + 6, &freq_scale, sizeof(float));
  4794. memcpy(params + 7, &ext_factor, sizeof(float));
  4795. memcpy(params + 8, &attn_factor, sizeof(float));
  4796. memcpy(params + 9, &beta_fast, sizeof(float));
  4797. memcpy(params + 10, &beta_slow, sizeof(float));
  4798. memcpy(params + 11, &xpos_base, sizeof(float));
  4799. memcpy(params + 12, &xpos_down, sizeof(bool));
  4800. ggml_set_op_params(result, params, sizeof(params));
  4801. result->op = GGML_OP_ROPE;
  4802. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4803. result->src[0] = a;
  4804. result->src[1] = b;
  4805. return result;
  4806. }
  4807. struct ggml_tensor * ggml_rope(
  4808. struct ggml_context * ctx,
  4809. struct ggml_tensor * a,
  4810. struct ggml_tensor * b,
  4811. int n_dims,
  4812. int mode,
  4813. int n_ctx) {
  4814. return ggml_rope_impl(
  4815. ctx, a, b, n_dims, mode, n_ctx, 0, 10000.0f, 1.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, false, false
  4816. );
  4817. }
  4818. struct ggml_tensor * ggml_rope_inplace(
  4819. struct ggml_context * ctx,
  4820. struct ggml_tensor * a,
  4821. struct ggml_tensor * b,
  4822. int n_dims,
  4823. int mode,
  4824. int n_ctx) {
  4825. return ggml_rope_impl(
  4826. ctx, a, b, n_dims, mode, n_ctx, 0, 10000.0f, 1.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, false, true
  4827. );
  4828. }
  4829. struct ggml_tensor * ggml_rope_custom(
  4830. struct ggml_context * ctx,
  4831. struct ggml_tensor * a,
  4832. struct ggml_tensor * b,
  4833. int n_dims,
  4834. int mode,
  4835. int n_ctx,
  4836. int n_orig_ctx,
  4837. float freq_base,
  4838. float freq_scale,
  4839. float ext_factor,
  4840. float attn_factor,
  4841. float beta_fast,
  4842. float beta_slow) {
  4843. return ggml_rope_impl(
  4844. ctx, a, b, n_dims, mode, n_ctx, n_orig_ctx, freq_base, freq_scale,
  4845. ext_factor, attn_factor, beta_fast, beta_slow, 0.0f, false, false
  4846. );
  4847. }
  4848. struct ggml_tensor * ggml_rope_custom_inplace(
  4849. struct ggml_context * ctx,
  4850. struct ggml_tensor * a,
  4851. struct ggml_tensor * b,
  4852. int n_dims,
  4853. int mode,
  4854. int n_ctx,
  4855. int n_orig_ctx,
  4856. float freq_base,
  4857. float freq_scale,
  4858. float ext_factor,
  4859. float attn_factor,
  4860. float beta_fast,
  4861. float beta_slow) {
  4862. return ggml_rope_impl(
  4863. ctx, a, b, n_dims, mode, n_ctx, n_orig_ctx, freq_base, freq_scale,
  4864. ext_factor, attn_factor, beta_fast, beta_slow, 0.0f, false, true
  4865. );
  4866. }
  4867. struct ggml_tensor * ggml_rope_xpos_inplace(
  4868. struct ggml_context * ctx,
  4869. struct ggml_tensor * a,
  4870. struct ggml_tensor * b,
  4871. int n_dims,
  4872. float base,
  4873. bool down) {
  4874. return ggml_rope_impl(ctx, a, b, n_dims, 0, 0, 0, 10000.0f, 1.0f, 0.0f, 1.0f, 0.0f, 0.0f, base, down, true);
  4875. }
  4876. // ggml_rope_back
  4877. struct ggml_tensor * ggml_rope_back(
  4878. struct ggml_context * ctx,
  4879. struct ggml_tensor * a,
  4880. struct ggml_tensor * b,
  4881. int n_dims,
  4882. int mode,
  4883. int n_ctx,
  4884. int n_orig_ctx,
  4885. float freq_base,
  4886. float freq_scale,
  4887. float ext_factor,
  4888. float attn_factor,
  4889. float beta_fast,
  4890. float beta_slow,
  4891. float xpos_base,
  4892. bool xpos_down) {
  4893. GGML_ASSERT(ggml_is_vector(b));
  4894. GGML_ASSERT(b->type == GGML_TYPE_I32);
  4895. GGML_ASSERT(a->ne[2] == b->ne[0]);
  4896. GGML_ASSERT((mode & 4) == 0 && "ggml_rope_back() for ChatGLM not implemented yet");
  4897. bool is_node = false;
  4898. if (a->grad) {
  4899. is_node = false; // TODO: implement backward
  4900. }
  4901. struct ggml_tensor * result = ggml_dup_tensor(ctx, a);
  4902. int32_t params[13] = { /*n_past*/ 0, n_dims, mode, n_ctx, n_orig_ctx };
  4903. memcpy(params + 5, &freq_base, sizeof(float));
  4904. memcpy(params + 6, &freq_scale, sizeof(float));
  4905. memcpy(params + 7, &ext_factor, sizeof(float));
  4906. memcpy(params + 8, &attn_factor, sizeof(float));
  4907. memcpy(params + 9, &beta_fast, sizeof(float));
  4908. memcpy(params + 10, &beta_slow, sizeof(float));
  4909. memcpy(params + 11, &xpos_base, sizeof(float));
  4910. memcpy(params + 12, &xpos_down, sizeof(bool));
  4911. ggml_set_op_params(result, params, sizeof(params));
  4912. result->op = GGML_OP_ROPE_BACK;
  4913. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4914. result->src[0] = a;
  4915. result->src[1] = b;
  4916. return result;
  4917. }
  4918. // ggml_clamp
  4919. struct ggml_tensor * ggml_clamp(
  4920. struct ggml_context * ctx,
  4921. struct ggml_tensor * a,
  4922. float min,
  4923. float max) {
  4924. bool is_node = false;
  4925. if (a->grad) {
  4926. GGML_ASSERT(false); // TODO: implement backward
  4927. is_node = true;
  4928. }
  4929. // TODO: when implement backward, fix this:
  4930. struct ggml_tensor * result = ggml_view_tensor(ctx, a);
  4931. float params[] = { min, max };
  4932. ggml_set_op_params(result, params, sizeof(params));
  4933. result->op = GGML_OP_CLAMP;
  4934. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4935. result->src[0] = a;
  4936. return result;
  4937. }
  4938. // ggml_conv_1d
  4939. static int64_t ggml_calc_conv_output_size(int64_t ins, int64_t ks, int s, int p, int d) {
  4940. return (ins + 2 * p - d * (ks - 1) - 1) / s + 1;
  4941. }
  4942. GGML_API struct ggml_tensor * ggml_conv_1d(
  4943. struct ggml_context * ctx,
  4944. struct ggml_tensor * a,
  4945. struct ggml_tensor * b,
  4946. int s0,
  4947. int p0,
  4948. int d0) {
  4949. struct ggml_tensor * im2col = ggml_im2col(ctx, a, b, s0, 0, p0, 0, d0, 0, false, GGML_TYPE_F16); // [N, OL, IC * K]
  4950. struct ggml_tensor * result =
  4951. ggml_mul_mat(ctx,
  4952. ggml_reshape_2d(ctx, im2col, im2col->ne[0], (im2col->ne[2] * im2col->ne[1])), // [N, OL, IC * K] => [N*OL, IC * K]
  4953. ggml_reshape_2d(ctx, a, (a->ne[0] * a->ne[1]), a->ne[2])); // [OC,IC, K] => [OC, IC * K]
  4954. result = ggml_reshape_3d(ctx, result, im2col->ne[1], a->ne[2], im2col->ne[2]); // [N, OC, OL]
  4955. return result;
  4956. }
  4957. // ggml_conv_1d_ph
  4958. struct ggml_tensor* ggml_conv_1d_ph(
  4959. struct ggml_context * ctx,
  4960. struct ggml_tensor * a,
  4961. struct ggml_tensor * b,
  4962. int s,
  4963. int d) {
  4964. return ggml_conv_1d(ctx, a, b, s, a->ne[0] / 2, d);
  4965. }
  4966. // ggml_conv_transpose_1d
  4967. static int64_t ggml_calc_conv_transpose_1d_output_size(int64_t ins, int64_t ks, int s, int p, int d) {
  4968. return (ins - 1) * s - 2 * p + d * (ks - 1) + 1;
  4969. }
  4970. GGML_API struct ggml_tensor * ggml_conv_transpose_1d(
  4971. struct ggml_context * ctx,
  4972. struct ggml_tensor * a,
  4973. struct ggml_tensor * b,
  4974. int s0,
  4975. int p0,
  4976. int d0) {
  4977. GGML_ASSERT(ggml_is_matrix(b));
  4978. GGML_ASSERT(a->ne[2] == b->ne[1]);
  4979. GGML_ASSERT(a->ne[3] == 1);
  4980. GGML_ASSERT(p0 == 0);
  4981. GGML_ASSERT(d0 == 1);
  4982. bool is_node = false;
  4983. if (a->grad || b->grad) {
  4984. GGML_ASSERT(false); // TODO: implement backward
  4985. is_node = true;
  4986. }
  4987. const int64_t ne[4] = {
  4988. ggml_calc_conv_transpose_1d_output_size(b->ne[0], a->ne[0], s0, 0 /*p0*/, 1 /*d0*/),
  4989. a->ne[1], b->ne[2], 1,
  4990. };
  4991. struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, 4, ne);
  4992. int32_t params[] = { s0, p0, d0 };
  4993. ggml_set_op_params(result, params, sizeof(params));
  4994. result->op = GGML_OP_CONV_TRANSPOSE_1D;
  4995. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4996. result->src[0] = a;
  4997. result->src[1] = b;
  4998. return result;
  4999. }
  5000. // ggml_conv_depthwise
  5001. struct ggml_tensor * ggml_conv_depthwise_2d(
  5002. struct ggml_context * ctx,
  5003. struct ggml_tensor * a,
  5004. struct ggml_tensor * b,
  5005. int s0,
  5006. int s1,
  5007. int p0,
  5008. int p1,
  5009. int d0,
  5010. int d1) {
  5011. struct ggml_tensor * new_a = ggml_reshape_4d(ctx, a, a->ne[0], a->ne[1], 1, a->ne[2] * a->ne[3]);
  5012. struct ggml_tensor * im2col = ggml_im2col(ctx, new_a,
  5013. ggml_reshape_4d(ctx, b, b->ne[0], b->ne[1], 1, b->ne[2] * b->ne[3]),
  5014. s0, s1, p0, p1, d0, d1, true, GGML_TYPE_F16); // [N * IC, OH, OW, KH * KW]
  5015. struct ggml_tensor * new_b = ggml_reshape_4d(ctx, im2col, im2col->ne[0], im2col->ne[2] * im2col->ne[1], b->ne[2], b->ne[3]); // [N * IC, OH, OW, KH * KW] => [N, IC, OH * OW, KH * KW]
  5016. new_a = ggml_reshape_4d(ctx, new_a, (new_a->ne[0] * new_a->ne[1]), new_a->ne[2], new_a->ne[3], 1); // [OC,1, KH, KW] => [1, OC, 1, KH * KW]
  5017. struct ggml_tensor * result = ggml_mul_mat(ctx, new_a, new_b);
  5018. result = ggml_reshape_4d(ctx, result, im2col->ne[1], im2col->ne[2], b->ne[2], b->ne[3]); // [N, OC, OH, OW]
  5019. return result;
  5020. }
  5021. // ggml_conv_2d
  5022. // im2col: [N, IC, IH, IW] => [N, OH, OW, IC*KH*KW]
  5023. // a: [OC,IC, KH, KW]
  5024. // b: [N, IC, IH, IW]
  5025. // result: [N, OH, OW, IC*KH*KW]
  5026. struct ggml_tensor * ggml_im2col(
  5027. struct ggml_context * ctx,
  5028. struct ggml_tensor * a,
  5029. struct ggml_tensor * b,
  5030. int s0,
  5031. int s1,
  5032. int p0,
  5033. int p1,
  5034. int d0,
  5035. int d1,
  5036. bool is_2D,
  5037. enum ggml_type dst_type) {
  5038. if(is_2D) {
  5039. GGML_ASSERT(a->ne[2] == b->ne[2]);
  5040. } else {
  5041. GGML_ASSERT(a->ne[1] == b->ne[1]);
  5042. }
  5043. bool is_node = false;
  5044. if (a->grad || b->grad) {
  5045. GGML_ASSERT(false); // TODO: implement backward
  5046. is_node = true;
  5047. }
  5048. const int64_t OH = is_2D ? ggml_calc_conv_output_size(b->ne[1], a->ne[1], s1, p1, d1) : 0;
  5049. const int64_t OW = ggml_calc_conv_output_size(b->ne[0], a->ne[0], s0, p0, d0);
  5050. const int64_t ne[4] = {
  5051. is_2D ? (a->ne[2] * a->ne[1] * a->ne[0]) : a->ne[1] * a->ne[0],
  5052. OW,
  5053. is_2D ? OH : b->ne[2],
  5054. is_2D ? b->ne[3] : 1,
  5055. };
  5056. struct ggml_tensor * result = ggml_new_tensor(ctx, dst_type, 4, ne);
  5057. int32_t params[] = { s0, s1, p0, p1, d0, d1, (is_2D ? 1 : 0) };
  5058. ggml_set_op_params(result, params, sizeof(params));
  5059. result->op = GGML_OP_IM2COL;
  5060. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5061. result->src[0] = a;
  5062. result->src[1] = b;
  5063. return result;
  5064. }
  5065. // a: [OC,IC, KH, KW]
  5066. // b: [N, IC, IH, IW]
  5067. // result: [N, OC, OH, OW]
  5068. struct ggml_tensor * ggml_conv_2d(
  5069. struct ggml_context * ctx,
  5070. struct ggml_tensor * a,
  5071. struct ggml_tensor * b,
  5072. int s0,
  5073. int s1,
  5074. int p0,
  5075. int p1,
  5076. int d0,
  5077. int d1) {
  5078. struct ggml_tensor * im2col = ggml_im2col(ctx, a, b, s0, s1, p0, p1, d0, d1, true, GGML_TYPE_F16); // [N, OH, OW, IC * KH * KW]
  5079. struct ggml_tensor * result =
  5080. ggml_mul_mat(ctx,
  5081. ggml_reshape_2d(ctx, im2col, im2col->ne[0], im2col->ne[3] * im2col->ne[2] * im2col->ne[1]), // [N, OH, OW, IC * KH * KW] => [N*OH*OW, IC * KH * KW]
  5082. ggml_reshape_2d(ctx, a, (a->ne[0] * a->ne[1] * a->ne[2]), a->ne[3])); // [OC,IC, KH, KW] => [OC, IC * KH * KW]
  5083. result = ggml_reshape_4d(ctx, result, im2col->ne[1], im2col->ne[2], im2col->ne[3], a->ne[3]); // [OC, N, OH, OW]
  5084. result = ggml_cont(ctx, ggml_permute(ctx, result, 0, 1, 3, 2)); // [N, OC, OH, OW]
  5085. return result;
  5086. }
  5087. // ggml_conv_2d_sk_p0
  5088. struct ggml_tensor * ggml_conv_2d_sk_p0(
  5089. struct ggml_context * ctx,
  5090. struct ggml_tensor * a,
  5091. struct ggml_tensor * b) {
  5092. return ggml_conv_2d(ctx, a, b, a->ne[0], a->ne[1], 0, 0, 1, 1);
  5093. }
  5094. // ggml_conv_2d_s1_ph
  5095. struct ggml_tensor * ggml_conv_2d_s1_ph(
  5096. struct ggml_context * ctx,
  5097. struct ggml_tensor * a,
  5098. struct ggml_tensor * b) {
  5099. return ggml_conv_2d(ctx, a, b, 1, 1, a->ne[0] / 2, a->ne[1] / 2, 1, 1);
  5100. }
  5101. // ggml_conv_transpose_2d_p0
  5102. static int64_t ggml_calc_conv_transpose_output_size(int64_t ins, int64_t ks, int s, int p) {
  5103. return (ins - 1) * s - 2 * p + ks;
  5104. }
  5105. struct ggml_tensor * ggml_conv_transpose_2d_p0(
  5106. struct ggml_context * ctx,
  5107. struct ggml_tensor * a,
  5108. struct ggml_tensor * b,
  5109. int stride) {
  5110. GGML_ASSERT(a->ne[3] == b->ne[2]);
  5111. bool is_node = false;
  5112. if (a->grad || b->grad) {
  5113. GGML_ASSERT(false); // TODO: implement backward
  5114. is_node = true;
  5115. }
  5116. const int64_t ne[4] = {
  5117. ggml_calc_conv_transpose_output_size(b->ne[0], a->ne[0], stride, 0 /*p0*/),
  5118. ggml_calc_conv_transpose_output_size(b->ne[1], a->ne[1], stride, 0 /*p1*/),
  5119. a->ne[2], b->ne[3],
  5120. };
  5121. struct ggml_tensor* result = ggml_new_tensor(ctx, GGML_TYPE_F32, 4, ne);
  5122. ggml_set_op_params_i32(result, 0, stride);
  5123. result->op = GGML_OP_CONV_TRANSPOSE_2D;
  5124. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5125. result->src[0] = a;
  5126. result->src[1] = b;
  5127. return result;
  5128. }
  5129. // ggml_pool_*
  5130. static int64_t ggml_calc_pool_output_size(int64_t ins, int ks, int s, float p) {
  5131. return (ins + 2 * p - ks) / s + 1;
  5132. }
  5133. // ggml_pool_1d
  5134. struct ggml_tensor * ggml_pool_1d(
  5135. struct ggml_context * ctx,
  5136. struct ggml_tensor * a,
  5137. enum ggml_op_pool op,
  5138. int k0,
  5139. int s0,
  5140. int p0) {
  5141. bool is_node = false;
  5142. if (a->grad) {
  5143. GGML_ASSERT(false); // TODO: implement backward
  5144. is_node = true;
  5145. }
  5146. const int64_t ne[4] = {
  5147. ggml_calc_pool_output_size(a->ne[0], k0, s0, p0),
  5148. a->ne[1],
  5149. a->ne[2],
  5150. a->ne[3],
  5151. };
  5152. struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, 4, ne);
  5153. int32_t params[] = { op, k0, s0, p0 };
  5154. ggml_set_op_params(result, params, sizeof(params));
  5155. result->op = GGML_OP_POOL_1D;
  5156. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5157. result->src[0] = a;
  5158. return result;
  5159. }
  5160. // ggml_pool_2d
  5161. struct ggml_tensor * ggml_pool_2d(
  5162. struct ggml_context * ctx,
  5163. struct ggml_tensor * a,
  5164. enum ggml_op_pool op,
  5165. int k0,
  5166. int k1,
  5167. int s0,
  5168. int s1,
  5169. float p0,
  5170. float p1) {
  5171. bool is_node = false;
  5172. if (a->grad) {
  5173. GGML_ASSERT(false); // TODO: implement backward
  5174. is_node = true;
  5175. }
  5176. struct ggml_tensor * result;
  5177. const int64_t ne[3] = {
  5178. ggml_calc_pool_output_size(a->ne[0], k0, s0, p0),
  5179. ggml_calc_pool_output_size(a->ne[1], k1, s1, p1),
  5180. a->ne[2],
  5181. };
  5182. result = ggml_new_tensor(ctx, GGML_TYPE_F32, 3, ne);
  5183. int32_t params[] = { op, k0, k1, s0, s1, p0, p1 };
  5184. ggml_set_op_params(result, params, sizeof(params));
  5185. result->op = GGML_OP_POOL_2D;
  5186. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5187. result->src[0] = a;
  5188. return result;
  5189. }
  5190. // ggml_upscale
  5191. static struct ggml_tensor * ggml_upscale_impl(
  5192. struct ggml_context * ctx,
  5193. struct ggml_tensor * a,
  5194. int ne0,
  5195. int ne1,
  5196. int ne2,
  5197. int ne3) {
  5198. bool is_node = false;
  5199. if (a->grad) {
  5200. GGML_ASSERT(false); // TODO: implement backward
  5201. is_node = true;
  5202. }
  5203. GGML_ASSERT(a->ne[0] <= ne0);
  5204. GGML_ASSERT(a->ne[1] <= ne1);
  5205. GGML_ASSERT(a->ne[2] <= ne2);
  5206. GGML_ASSERT(a->ne[3] <= ne3);
  5207. struct ggml_tensor * result = ggml_new_tensor_4d(ctx, a->type,
  5208. ne0,
  5209. ne1,
  5210. ne2,
  5211. ne3
  5212. );
  5213. result->op = GGML_OP_UPSCALE;
  5214. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5215. result->src[0] = a;
  5216. return result;
  5217. }
  5218. struct ggml_tensor * ggml_upscale(
  5219. struct ggml_context * ctx,
  5220. struct ggml_tensor * a,
  5221. int scale_factor) {
  5222. return ggml_upscale_impl(ctx, a, a->ne[0] * scale_factor, a->ne[1] * scale_factor, a->ne[2], a->ne[3]);
  5223. }
  5224. struct ggml_tensor * ggml_upscale_ext(
  5225. struct ggml_context * ctx,
  5226. struct ggml_tensor * a,
  5227. int ne0,
  5228. int ne1,
  5229. int ne2,
  5230. int ne3) {
  5231. return ggml_upscale_impl(ctx, a, ne0, ne1, ne2, ne3);
  5232. }
  5233. // ggml_pad
  5234. struct ggml_tensor * ggml_pad(
  5235. struct ggml_context * ctx,
  5236. struct ggml_tensor * a,
  5237. int p0, int p1, int p2, int p3) {
  5238. bool is_node = false;
  5239. if (a->grad) {
  5240. GGML_ASSERT(false); // TODO: implement backward
  5241. is_node = true;
  5242. }
  5243. struct ggml_tensor * result = ggml_new_tensor_4d(ctx, a->type,
  5244. a->ne[0] + p0,
  5245. a->ne[1] + p1,
  5246. a->ne[2] + p2,
  5247. a->ne[3] + p3);
  5248. result->op = GGML_OP_PAD;
  5249. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5250. result->src[0] = a;
  5251. return result;
  5252. }
  5253. // ggml_arange
  5254. struct ggml_tensor * ggml_arange(
  5255. struct ggml_context * ctx,
  5256. float start,
  5257. float stop,
  5258. float step) {
  5259. GGML_ASSERT(stop > start);
  5260. const int64_t steps = (int64_t) ceilf((stop - start) / step);
  5261. struct ggml_tensor * result = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, steps);
  5262. result->op = GGML_OP_ARANGE;
  5263. ggml_set_op_params_f32(result, 0, start);
  5264. ggml_set_op_params_f32(result, 1, stop);
  5265. ggml_set_op_params_f32(result, 2, step);
  5266. return result;
  5267. }
  5268. // ggml_timestep_embedding
  5269. struct ggml_tensor * ggml_timestep_embedding(
  5270. struct ggml_context * ctx,
  5271. struct ggml_tensor * timesteps,
  5272. int dim,
  5273. int max_period) {
  5274. bool is_node = false;
  5275. if (timesteps->grad) {
  5276. GGML_ASSERT(false); // TODO: implement backward
  5277. is_node = true;
  5278. }
  5279. int actual_dim = dim;
  5280. if (dim % 2 != 0) {
  5281. actual_dim = dim + 1;
  5282. }
  5283. struct ggml_tensor * result = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, actual_dim, timesteps->ne[0]);
  5284. result->op = GGML_OP_TIMESTEP_EMBEDDING;
  5285. ggml_set_op_params_i32(result, 0, dim);
  5286. ggml_set_op_params_i32(result, 1, max_period);
  5287. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5288. result->src[0] = timesteps;
  5289. return result;
  5290. }
  5291. // ggml_argsort
  5292. struct ggml_tensor * ggml_argsort(
  5293. struct ggml_context * ctx,
  5294. struct ggml_tensor * a,
  5295. enum ggml_sort_order order) {
  5296. bool is_node = false;
  5297. struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_I32, GGML_MAX_DIMS, a->ne);
  5298. ggml_set_op_params_i32(result, 0, (int32_t) order);
  5299. result->op = GGML_OP_ARGSORT;
  5300. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5301. result->src[0] = a;
  5302. return result;
  5303. }
  5304. // ggml_top_k
  5305. struct ggml_tensor * ggml_top_k(
  5306. struct ggml_context * ctx,
  5307. struct ggml_tensor * a,
  5308. int k) {
  5309. GGML_ASSERT(a->ne[0] >= k);
  5310. struct ggml_tensor * result = ggml_argsort(ctx, a, GGML_SORT_ORDER_DESC);
  5311. result = ggml_view_4d(ctx, result,
  5312. k, result->ne[1], result->ne[2], result->ne[3],
  5313. result->nb[1], result->nb[2], result->nb[3],
  5314. 0);
  5315. return result;
  5316. }
  5317. // ggml_flash_attn
  5318. struct ggml_tensor * ggml_flash_attn(
  5319. struct ggml_context * ctx,
  5320. struct ggml_tensor * q,
  5321. struct ggml_tensor * k,
  5322. struct ggml_tensor * v,
  5323. bool masked) {
  5324. GGML_ASSERT(ggml_can_mul_mat(k, q));
  5325. // TODO: check if vT can be multiplied by (k*qT)
  5326. bool is_node = false;
  5327. if (q->grad || k->grad || v->grad) {
  5328. is_node = true;
  5329. }
  5330. //struct ggml_tensor * result = ggml_dup_tensor(ctx, q);
  5331. struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, GGML_MAX_DIMS, q->ne);
  5332. int32_t t = masked ? 1 : 0;
  5333. ggml_set_op_params(result, &t, sizeof(t));
  5334. result->op = GGML_OP_FLASH_ATTN;
  5335. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5336. result->src[0] = q;
  5337. result->src[1] = k;
  5338. result->src[2] = v;
  5339. return result;
  5340. }
  5341. // ggml_flash_attn_ext
  5342. struct ggml_tensor * ggml_flash_attn_ext(
  5343. struct ggml_context * ctx,
  5344. struct ggml_tensor * q,
  5345. struct ggml_tensor * k,
  5346. struct ggml_tensor * v,
  5347. struct ggml_tensor * mask,
  5348. float scale,
  5349. float max_bias) {
  5350. GGML_ASSERT(ggml_can_mul_mat(k, q));
  5351. // TODO: check if vT can be multiplied by (k*qT)
  5352. if (mask) {
  5353. GGML_ASSERT(ggml_is_contiguous(mask));
  5354. GGML_ASSERT(mask->ne[2] == 1);
  5355. GGML_ASSERT(mask->ne[3] == 1);
  5356. GGML_ASSERT(mask->ne[1] >= GGML_PAD(q->ne[1], GGML_KQ_MASK_PAD) &&
  5357. "the Flash-Attention kernel requires the mask to be padded to GGML_KQ_MASK_PAD and at least n_queries big");
  5358. //GGML_ASSERT(ggml_can_repeat_rows(mask, qk));
  5359. }
  5360. if (max_bias > 0.0f) {
  5361. GGML_ASSERT(mask);
  5362. }
  5363. bool is_node = false;
  5364. if (q->grad || k->grad || v->grad) {
  5365. is_node = true;
  5366. }
  5367. // permute(0, 2, 1, 3)
  5368. int64_t ne[4] = { q->ne[0], q->ne[2], q->ne[1], q->ne[3] };
  5369. struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, 4, ne);
  5370. float params[] = { scale, max_bias };
  5371. ggml_set_op_params(result, params, sizeof(params));
  5372. result->op = GGML_OP_FLASH_ATTN_EXT;
  5373. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5374. result->src[0] = q;
  5375. result->src[1] = k;
  5376. result->src[2] = v;
  5377. result->src[3] = mask;
  5378. return result;
  5379. }
  5380. void ggml_flash_attn_ext_set_prec(
  5381. struct ggml_tensor * a,
  5382. enum ggml_prec prec) {
  5383. GGML_ASSERT(a->op == GGML_OP_FLASH_ATTN_EXT);
  5384. const int32_t prec_i32 = (int32_t) prec;
  5385. ggml_set_op_params_i32(a, 2, prec_i32); // scale is on first pos, max_bias on second
  5386. }
  5387. // ggml_flash_ff
  5388. struct ggml_tensor * ggml_flash_ff(
  5389. struct ggml_context * ctx,
  5390. struct ggml_tensor * a,
  5391. struct ggml_tensor * b0,
  5392. struct ggml_tensor * b1,
  5393. struct ggml_tensor * c0,
  5394. struct ggml_tensor * c1) {
  5395. GGML_ASSERT(ggml_can_mul_mat(b0, a));
  5396. // TODO: more checks
  5397. bool is_node = false;
  5398. if (a->grad || b0->grad || b1->grad || c0->grad || c1->grad) {
  5399. is_node = true;
  5400. }
  5401. //struct ggml_tensor * result = ggml_dup_tensor(ctx, a);
  5402. struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, GGML_MAX_DIMS, a->ne);
  5403. result->op = GGML_OP_FLASH_FF;
  5404. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5405. result->src[0] = a;
  5406. result->src[1] = b0;
  5407. result->src[2] = b1;
  5408. result->src[3] = c0;
  5409. result->src[4] = c1;
  5410. return result;
  5411. }
  5412. // ggml_flash_attn_back
  5413. struct ggml_tensor * ggml_flash_attn_back(
  5414. struct ggml_context * ctx,
  5415. struct ggml_tensor * q,
  5416. struct ggml_tensor * k,
  5417. struct ggml_tensor * v,
  5418. struct ggml_tensor * d,
  5419. bool masked) {
  5420. GGML_ASSERT(ggml_can_mul_mat(k, q));
  5421. // TODO: check if vT can be multiplied by (k*qT)
  5422. // d shape [D,N,ne2,ne3]
  5423. // q shape [D,N,ne2,ne3]
  5424. // k shape [D,M,kvne2,ne3]
  5425. // v shape [M,D,kvne2,ne3]
  5426. const int64_t D = q->ne[0];
  5427. const int64_t N = q->ne[1];
  5428. const int64_t M = k->ne[1];
  5429. const int64_t ne2 = q->ne[2];
  5430. const int64_t ne3 = q->ne[3];
  5431. const int64_t kvne2 = k->ne[2];
  5432. GGML_ASSERT(k->ne[0] == D);
  5433. GGML_ASSERT(v->ne[0] == M);
  5434. GGML_ASSERT(v->ne[1] == D);
  5435. GGML_ASSERT(d->ne[0] == D);
  5436. GGML_ASSERT(d->ne[1] == N);
  5437. GGML_ASSERT(k->ne[2] == kvne2);
  5438. GGML_ASSERT(k->ne[3] == ne3);
  5439. GGML_ASSERT(v->ne[2] == kvne2);
  5440. GGML_ASSERT(v->ne[3] == ne3);
  5441. GGML_ASSERT(d->ne[2] == ne2);
  5442. GGML_ASSERT(d->ne[3] == ne3);
  5443. GGML_ASSERT(ne2 % kvne2 == 0);
  5444. bool is_node = false;
  5445. if (q->grad || k->grad || v->grad) {
  5446. // when using this operation (in backwards pass) these grads are set.
  5447. // we don't want to create (big) grad of our result, so is_node is false.
  5448. is_node = false;
  5449. }
  5450. // store gradients of q, k and v as continuous tensors concatenated in result.
  5451. // note: v and gradv are actually transposed, i.e. v->ne[0] != D.
  5452. const int64_t elem_q = ggml_nelements(q);
  5453. const int64_t elem_k = ggml_nelements(k);
  5454. const int64_t elem_v = ggml_nelements(v);
  5455. enum ggml_type result_type = GGML_TYPE_F32;
  5456. GGML_ASSERT(ggml_blck_size(result_type) == 1);
  5457. const size_t tsize = ggml_type_size(result_type);
  5458. const size_t offs_q = 0;
  5459. const size_t offs_k = offs_q + GGML_PAD(elem_q * tsize, GGML_MEM_ALIGN);
  5460. const size_t offs_v = offs_k + GGML_PAD(elem_k * tsize, GGML_MEM_ALIGN);
  5461. const size_t end = offs_v + GGML_PAD(elem_v * tsize, GGML_MEM_ALIGN);
  5462. const size_t nelements = (end + tsize - 1)/tsize;
  5463. struct ggml_tensor * result = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, nelements);
  5464. int32_t masked_i = masked ? 1 : 0;
  5465. ggml_set_op_params(result, &masked_i, sizeof(masked_i));
  5466. result->op = GGML_OP_FLASH_ATTN_BACK;
  5467. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5468. result->src[0] = q;
  5469. result->src[1] = k;
  5470. result->src[2] = v;
  5471. result->src[3] = d;
  5472. return result;
  5473. }
  5474. // ggml_ssm_conv
  5475. struct ggml_tensor * ggml_ssm_conv(
  5476. struct ggml_context * ctx,
  5477. struct ggml_tensor * s,
  5478. struct ggml_tensor * x,
  5479. struct ggml_tensor * c,
  5480. struct ggml_tensor * sq) {
  5481. GGML_ASSERT(ggml_is_3d(s));
  5482. GGML_ASSERT(ggml_is_matrix(x));
  5483. GGML_ASSERT(ggml_is_matrix(c));
  5484. GGML_ASSERT(ggml_is_matrix(sq));
  5485. GGML_ASSERT(sq->type == GGML_TYPE_I32);
  5486. const int64_t d_conv = c->ne[0];
  5487. const int64_t d_inner = c->ne[1];
  5488. const int64_t n_tokens = x->ne[1];
  5489. const int64_t n_kv = s->ne[2];
  5490. GGML_ASSERT( s->ne[0] == d_conv - 1);
  5491. GGML_ASSERT( s->ne[1] == d_inner);
  5492. GGML_ASSERT( x->ne[0] == d_inner);
  5493. GGML_ASSERT(sq->ne[0] == n_kv);
  5494. GGML_ASSERT(sq->ne[1] == n_tokens);
  5495. bool is_node = false;
  5496. if (s->grad || x->grad || c->grad || sq->grad) {
  5497. GGML_ASSERT(false); // TODO: implement
  5498. is_node = true;
  5499. }
  5500. // 2-in-1 concatenated x and conv_states, {d_inner, n_tokens} with {d_conv, d_inner, n_kv}
  5501. struct ggml_tensor * result = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, (d_inner*n_tokens) + (d_conv*d_inner*n_kv));
  5502. result->op = GGML_OP_SSM_CONV;
  5503. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5504. result->src[0] = s;
  5505. result->src[1] = x;
  5506. result->src[2] = c;
  5507. result->src[3] = sq;
  5508. return result;
  5509. }
  5510. // ggml_ssm_scan
  5511. struct ggml_tensor * ggml_ssm_scan(
  5512. struct ggml_context * ctx,
  5513. struct ggml_tensor * s,
  5514. struct ggml_tensor * x,
  5515. struct ggml_tensor * dt,
  5516. struct ggml_tensor * A,
  5517. struct ggml_tensor * B,
  5518. struct ggml_tensor * C,
  5519. struct ggml_tensor * sq) {
  5520. GGML_ASSERT(ggml_is_contiguous(s));
  5521. GGML_ASSERT(ggml_is_contiguous(x));
  5522. GGML_ASSERT(ggml_is_contiguous(dt));
  5523. GGML_ASSERT(ggml_is_contiguous(A));
  5524. GGML_ASSERT(sq->type == GGML_TYPE_I32);
  5525. GGML_ASSERT(B->nb[0] == ggml_type_size(B->type));
  5526. GGML_ASSERT(C->nb[0] == ggml_type_size(C->type));
  5527. GGML_ASSERT(ggml_are_same_shape(x, dt));
  5528. {
  5529. const int64_t d_state = s->ne[0];
  5530. const int64_t d_inner = s->ne[1];
  5531. const int64_t n_tokens = x->ne[1];
  5532. GGML_ASSERT(x->ne[0] == d_inner);
  5533. GGML_ASSERT(A->ne[0] == d_state);
  5534. GGML_ASSERT(A->ne[1] == d_inner);
  5535. GGML_ASSERT(B->ne[0] == d_state);
  5536. GGML_ASSERT(B->ne[1] == n_tokens);
  5537. GGML_ASSERT(C->ne[0] == d_state);
  5538. GGML_ASSERT(C->ne[1] == n_tokens);
  5539. }
  5540. bool is_node = false;
  5541. if (s->grad || x->grad || dt->grad || A->grad || B->grad || C->grad || sq->grad) {
  5542. GGML_ASSERT(false); // TODO: implement
  5543. is_node = true;
  5544. }
  5545. // 2-in-1 concatenated y and ssm_states, {d_inner, n_tokens} with {d_state, d_inner, n_kv}
  5546. struct ggml_tensor * result = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, ggml_nelements(x) + ggml_nelements(s));
  5547. result->op = GGML_OP_SSM_SCAN;
  5548. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5549. result->src[0] = s;
  5550. result->src[1] = x;
  5551. result->src[2] = dt;
  5552. result->src[3] = A;
  5553. result->src[4] = B;
  5554. result->src[5] = C;
  5555. result->src[6] = sq;
  5556. return result;
  5557. }
  5558. // ggml_win_part
  5559. struct ggml_tensor * ggml_win_part(
  5560. struct ggml_context * ctx,
  5561. struct ggml_tensor * a,
  5562. int w) {
  5563. GGML_ASSERT(a->ne[3] == 1);
  5564. GGML_ASSERT(a->type == GGML_TYPE_F32);
  5565. bool is_node = false;
  5566. if (a->grad) {
  5567. GGML_ASSERT(false); // TODO: implement backward
  5568. is_node = true;
  5569. }
  5570. // padding
  5571. const int px = (w - a->ne[1]%w)%w;
  5572. const int py = (w - a->ne[2]%w)%w;
  5573. const int npx = (px + a->ne[1])/w;
  5574. const int npy = (py + a->ne[2])/w;
  5575. const int np = npx*npy;
  5576. const int64_t ne[4] = { a->ne[0], w, w, np, };
  5577. struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, 4, ne);
  5578. int32_t params[] = { npx, npy, w };
  5579. ggml_set_op_params(result, params, sizeof(params));
  5580. result->op = GGML_OP_WIN_PART;
  5581. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5582. result->src[0] = a;
  5583. return result;
  5584. }
  5585. // ggml_win_unpart
  5586. struct ggml_tensor * ggml_win_unpart(
  5587. struct ggml_context * ctx,
  5588. struct ggml_tensor * a,
  5589. int w0,
  5590. int h0,
  5591. int w) {
  5592. GGML_ASSERT(a->type == GGML_TYPE_F32);
  5593. bool is_node = false;
  5594. if (a->grad) {
  5595. GGML_ASSERT(false); // TODO: implement backward
  5596. is_node = true;
  5597. }
  5598. const int64_t ne[4] = { a->ne[0], w0, h0, 1, };
  5599. struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, 3, ne);
  5600. int32_t params[] = { w };
  5601. ggml_set_op_params(result, params, sizeof(params));
  5602. result->op = GGML_OP_WIN_UNPART;
  5603. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5604. result->src[0] = a;
  5605. return result;
  5606. }
  5607. // ggml_get_rel_pos
  5608. struct ggml_tensor * ggml_get_rel_pos(
  5609. struct ggml_context * ctx,
  5610. struct ggml_tensor * a,
  5611. int qh,
  5612. int kh) {
  5613. GGML_ASSERT(qh == kh);
  5614. GGML_ASSERT(2*MAX(qh, kh) - 1 == a->ne[1]);
  5615. bool is_node = false;
  5616. if (a->grad) {
  5617. GGML_ASSERT(false); // TODO: implement backward
  5618. is_node = true;
  5619. }
  5620. const int64_t ne[4] = { a->ne[0], kh, qh, 1, };
  5621. struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F16, 3, ne);
  5622. result->op = GGML_OP_GET_REL_POS;
  5623. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5624. result->src[0] = a;
  5625. return result;
  5626. }
  5627. // ggml_add_rel_pos
  5628. static struct ggml_tensor * ggml_add_rel_pos_impl(
  5629. struct ggml_context * ctx,
  5630. struct ggml_tensor * a,
  5631. struct ggml_tensor * pw,
  5632. struct ggml_tensor * ph,
  5633. bool inplace) {
  5634. GGML_ASSERT(ggml_are_same_shape(pw, ph));
  5635. GGML_ASSERT(ggml_is_contiguous(a));
  5636. GGML_ASSERT(ggml_is_contiguous(pw));
  5637. GGML_ASSERT(ggml_is_contiguous(ph));
  5638. GGML_ASSERT(ph->type == GGML_TYPE_F32);
  5639. GGML_ASSERT(pw->type == GGML_TYPE_F32);
  5640. GGML_ASSERT(pw->ne[3] == a->ne[2]);
  5641. GGML_ASSERT(pw->ne[0]*pw->ne[0] == a->ne[0]);
  5642. GGML_ASSERT(pw->ne[1]*pw->ne[2] == a->ne[1]);
  5643. bool is_node = false;
  5644. if (!inplace && (a->grad || pw->grad || ph->grad)) {
  5645. is_node = true;
  5646. }
  5647. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  5648. ggml_set_op_params_i32(result, 0, inplace ? 1 : 0);
  5649. result->op = GGML_OP_ADD_REL_POS;
  5650. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5651. result->src[0] = a;
  5652. result->src[1] = pw;
  5653. result->src[2] = ph;
  5654. return result;
  5655. }
  5656. struct ggml_tensor * ggml_add_rel_pos(
  5657. struct ggml_context * ctx,
  5658. struct ggml_tensor * a,
  5659. struct ggml_tensor * pw,
  5660. struct ggml_tensor * ph) {
  5661. return ggml_add_rel_pos_impl(ctx, a, pw, ph, false);
  5662. }
  5663. struct ggml_tensor * ggml_add_rel_pos_inplace(
  5664. struct ggml_context * ctx,
  5665. struct ggml_tensor * a,
  5666. struct ggml_tensor * pw,
  5667. struct ggml_tensor * ph) {
  5668. return ggml_add_rel_pos_impl(ctx, a, pw, ph, true);
  5669. }
  5670. // gmml_unary
  5671. static struct ggml_tensor * ggml_unary_impl(
  5672. struct ggml_context * ctx,
  5673. struct ggml_tensor * a,
  5674. enum ggml_unary_op op,
  5675. bool inplace) {
  5676. bool is_node = false;
  5677. if (!inplace && (a->grad)) {
  5678. is_node = true;
  5679. }
  5680. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  5681. ggml_set_op_params_i32(result, 0, (int32_t) op);
  5682. result->op = GGML_OP_UNARY;
  5683. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5684. result->src[0] = a;
  5685. return result;
  5686. }
  5687. struct ggml_tensor * ggml_unary(
  5688. struct ggml_context * ctx,
  5689. struct ggml_tensor * a,
  5690. enum ggml_unary_op op) {
  5691. return ggml_unary_impl(ctx, a, op, false);
  5692. }
  5693. struct ggml_tensor * ggml_unary_inplace(
  5694. struct ggml_context * ctx,
  5695. struct ggml_tensor * a,
  5696. enum ggml_unary_op op) {
  5697. return ggml_unary_impl(ctx, a, op, true);
  5698. }
  5699. // ggml_map_unary
  5700. static struct ggml_tensor * ggml_map_unary_impl_f32(
  5701. struct ggml_context * ctx,
  5702. struct ggml_tensor * a,
  5703. const ggml_unary_op_f32_t fun,
  5704. bool inplace) {
  5705. bool is_node = false;
  5706. if (!inplace && a->grad) {
  5707. is_node = true;
  5708. }
  5709. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  5710. ggml_set_op_params(result, (const void *) &fun, sizeof(fun));
  5711. result->op = GGML_OP_MAP_UNARY;
  5712. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5713. result->src[0] = a;
  5714. return result;
  5715. }
  5716. struct ggml_tensor * ggml_map_unary_f32(
  5717. struct ggml_context * ctx,
  5718. struct ggml_tensor * a,
  5719. const ggml_unary_op_f32_t fun) {
  5720. return ggml_map_unary_impl_f32(ctx, a, fun, false);
  5721. }
  5722. struct ggml_tensor * ggml_map_unary_inplace_f32(
  5723. struct ggml_context * ctx,
  5724. struct ggml_tensor * a,
  5725. const ggml_unary_op_f32_t fun) {
  5726. return ggml_map_unary_impl_f32(ctx, a, fun, true);
  5727. }
  5728. // ggml_map_binary
  5729. static struct ggml_tensor * ggml_map_binary_impl_f32(
  5730. struct ggml_context * ctx,
  5731. struct ggml_tensor * a,
  5732. struct ggml_tensor * b,
  5733. const ggml_binary_op_f32_t fun,
  5734. bool inplace) {
  5735. GGML_ASSERT(ggml_are_same_shape(a, b));
  5736. bool is_node = false;
  5737. if (!inplace && (a->grad || b->grad)) {
  5738. is_node = true;
  5739. }
  5740. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  5741. ggml_set_op_params(result, (const void *) &fun, sizeof(fun));
  5742. result->op = GGML_OP_MAP_BINARY;
  5743. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5744. result->src[0] = a;
  5745. result->src[1] = b;
  5746. return result;
  5747. }
  5748. struct ggml_tensor * ggml_map_binary_f32(
  5749. struct ggml_context * ctx,
  5750. struct ggml_tensor * a,
  5751. struct ggml_tensor * b,
  5752. const ggml_binary_op_f32_t fun) {
  5753. return ggml_map_binary_impl_f32(ctx, a, b, fun, false);
  5754. }
  5755. struct ggml_tensor * ggml_map_binary_inplace_f32(
  5756. struct ggml_context * ctx,
  5757. struct ggml_tensor * a,
  5758. struct ggml_tensor * b,
  5759. const ggml_binary_op_f32_t fun) {
  5760. return ggml_map_binary_impl_f32(ctx, a, b, fun, true);
  5761. }
  5762. // ggml_map_custom1_f32
  5763. static struct ggml_tensor * ggml_map_custom1_impl_f32(
  5764. struct ggml_context * ctx,
  5765. struct ggml_tensor * a,
  5766. const ggml_custom1_op_f32_t fun,
  5767. bool inplace) {
  5768. bool is_node = false;
  5769. if (!inplace && a->grad) {
  5770. is_node = true;
  5771. }
  5772. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  5773. ggml_set_op_params(result, (const void *) &fun, sizeof(fun));
  5774. result->op = GGML_OP_MAP_CUSTOM1_F32;
  5775. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5776. result->src[0] = a;
  5777. return result;
  5778. }
  5779. struct ggml_tensor * ggml_map_custom1_f32(
  5780. struct ggml_context * ctx,
  5781. struct ggml_tensor * a,
  5782. const ggml_custom1_op_f32_t fun) {
  5783. return ggml_map_custom1_impl_f32(ctx, a, fun, false);
  5784. }
  5785. struct ggml_tensor * ggml_map_custom1_inplace_f32(
  5786. struct ggml_context * ctx,
  5787. struct ggml_tensor * a,
  5788. const ggml_custom1_op_f32_t fun) {
  5789. return ggml_map_custom1_impl_f32(ctx, a, fun, true);
  5790. }
  5791. // ggml_map_custom2_f32
  5792. static struct ggml_tensor * ggml_map_custom2_impl_f32(
  5793. struct ggml_context * ctx,
  5794. struct ggml_tensor * a,
  5795. struct ggml_tensor * b,
  5796. const ggml_custom2_op_f32_t fun,
  5797. bool inplace) {
  5798. bool is_node = false;
  5799. if (!inplace && (a->grad || b->grad)) {
  5800. is_node = true;
  5801. }
  5802. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  5803. ggml_set_op_params(result, (const void *) &fun, sizeof(fun));
  5804. result->op = GGML_OP_MAP_CUSTOM2_F32;
  5805. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5806. result->src[0] = a;
  5807. result->src[1] = b;
  5808. return result;
  5809. }
  5810. struct ggml_tensor * ggml_map_custom2_f32(
  5811. struct ggml_context * ctx,
  5812. struct ggml_tensor * a,
  5813. struct ggml_tensor * b,
  5814. const ggml_custom2_op_f32_t fun) {
  5815. return ggml_map_custom2_impl_f32(ctx, a, b, fun, false);
  5816. }
  5817. struct ggml_tensor * ggml_map_custom2_inplace_f32(
  5818. struct ggml_context * ctx,
  5819. struct ggml_tensor * a,
  5820. struct ggml_tensor * b,
  5821. const ggml_custom2_op_f32_t fun) {
  5822. return ggml_map_custom2_impl_f32(ctx, a, b, fun, true);
  5823. }
  5824. // ggml_map_custom3_f32
  5825. static struct ggml_tensor * ggml_map_custom3_impl_f32(
  5826. struct ggml_context * ctx,
  5827. struct ggml_tensor * a,
  5828. struct ggml_tensor * b,
  5829. struct ggml_tensor * c,
  5830. const ggml_custom3_op_f32_t fun,
  5831. bool inplace) {
  5832. bool is_node = false;
  5833. if (!inplace && (a->grad || b->grad || c->grad)) {
  5834. is_node = true;
  5835. }
  5836. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  5837. ggml_set_op_params(result, (const void *) &fun, sizeof(fun));
  5838. result->op = GGML_OP_MAP_CUSTOM3_F32;
  5839. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5840. result->src[0] = a;
  5841. result->src[1] = b;
  5842. result->src[2] = c;
  5843. return result;
  5844. }
  5845. struct ggml_tensor * ggml_map_custom3_f32(
  5846. struct ggml_context * ctx,
  5847. struct ggml_tensor * a,
  5848. struct ggml_tensor * b,
  5849. struct ggml_tensor * c,
  5850. const ggml_custom3_op_f32_t fun) {
  5851. return ggml_map_custom3_impl_f32(ctx, a, b, c, fun, false);
  5852. }
  5853. struct ggml_tensor * ggml_map_custom3_inplace_f32(
  5854. struct ggml_context * ctx,
  5855. struct ggml_tensor * a,
  5856. struct ggml_tensor * b,
  5857. struct ggml_tensor * c,
  5858. const ggml_custom3_op_f32_t fun) {
  5859. return ggml_map_custom3_impl_f32(ctx, a, b, c, fun, true);
  5860. }
  5861. // ggml_map_custom1
  5862. struct ggml_map_custom1_op_params {
  5863. ggml_custom1_op_t fun;
  5864. int n_tasks;
  5865. void * userdata;
  5866. };
  5867. static struct ggml_tensor * ggml_map_custom1_impl(
  5868. struct ggml_context * ctx,
  5869. struct ggml_tensor * a,
  5870. const ggml_custom1_op_t fun,
  5871. int n_tasks,
  5872. void * userdata,
  5873. bool inplace) {
  5874. GGML_ASSERT(n_tasks == GGML_N_TASKS_MAX || n_tasks > 0);
  5875. bool is_node = false;
  5876. if (!inplace && a->grad) {
  5877. is_node = true;
  5878. }
  5879. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  5880. struct ggml_map_custom1_op_params params = {
  5881. /*.fun =*/ fun,
  5882. /*.n_tasks =*/ n_tasks,
  5883. /*.userdata =*/ userdata
  5884. };
  5885. ggml_set_op_params(result, (const void *) &params, sizeof(params));
  5886. result->op = GGML_OP_MAP_CUSTOM1;
  5887. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5888. result->src[0] = a;
  5889. return result;
  5890. }
  5891. struct ggml_tensor * ggml_map_custom1(
  5892. struct ggml_context * ctx,
  5893. struct ggml_tensor * a,
  5894. const ggml_custom1_op_t fun,
  5895. int n_tasks,
  5896. void * userdata) {
  5897. return ggml_map_custom1_impl(ctx, a, fun, n_tasks, userdata, false);
  5898. }
  5899. struct ggml_tensor * ggml_map_custom1_inplace(
  5900. struct ggml_context * ctx,
  5901. struct ggml_tensor * a,
  5902. const ggml_custom1_op_t fun,
  5903. int n_tasks,
  5904. void * userdata) {
  5905. return ggml_map_custom1_impl(ctx, a, fun, n_tasks, userdata, true);
  5906. }
  5907. // ggml_map_custom2
  5908. struct ggml_map_custom2_op_params {
  5909. ggml_custom2_op_t fun;
  5910. int n_tasks;
  5911. void * userdata;
  5912. };
  5913. static struct ggml_tensor * ggml_map_custom2_impl(
  5914. struct ggml_context * ctx,
  5915. struct ggml_tensor * a,
  5916. struct ggml_tensor * b,
  5917. const ggml_custom2_op_t fun,
  5918. int n_tasks,
  5919. void * userdata,
  5920. bool inplace) {
  5921. GGML_ASSERT(n_tasks == GGML_N_TASKS_MAX || n_tasks > 0);
  5922. bool is_node = false;
  5923. if (!inplace && (a->grad || b->grad)) {
  5924. is_node = true;
  5925. }
  5926. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  5927. struct ggml_map_custom2_op_params params = {
  5928. /*.fun =*/ fun,
  5929. /*.n_tasks =*/ n_tasks,
  5930. /*.userdata =*/ userdata
  5931. };
  5932. ggml_set_op_params(result, (const void *) &params, sizeof(params));
  5933. result->op = GGML_OP_MAP_CUSTOM2;
  5934. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5935. result->src[0] = a;
  5936. result->src[1] = b;
  5937. return result;
  5938. }
  5939. struct ggml_tensor * ggml_map_custom2(
  5940. struct ggml_context * ctx,
  5941. struct ggml_tensor * a,
  5942. struct ggml_tensor * b,
  5943. const ggml_custom2_op_t fun,
  5944. int n_tasks,
  5945. void * userdata) {
  5946. return ggml_map_custom2_impl(ctx, a, b, fun, n_tasks, userdata, false);
  5947. }
  5948. struct ggml_tensor * ggml_map_custom2_inplace(
  5949. struct ggml_context * ctx,
  5950. struct ggml_tensor * a,
  5951. struct ggml_tensor * b,
  5952. const ggml_custom2_op_t fun,
  5953. int n_tasks,
  5954. void * userdata) {
  5955. return ggml_map_custom2_impl(ctx, a, b, fun, n_tasks, userdata, true);
  5956. }
  5957. // ggml_map_custom3
  5958. struct ggml_map_custom3_op_params {
  5959. ggml_custom3_op_t fun;
  5960. int n_tasks;
  5961. void * userdata;
  5962. };
  5963. static struct ggml_tensor * ggml_map_custom3_impl(
  5964. struct ggml_context * ctx,
  5965. struct ggml_tensor * a,
  5966. struct ggml_tensor * b,
  5967. struct ggml_tensor * c,
  5968. const ggml_custom3_op_t fun,
  5969. int n_tasks,
  5970. void * userdata,
  5971. bool inplace) {
  5972. GGML_ASSERT(n_tasks == GGML_N_TASKS_MAX || n_tasks > 0);
  5973. bool is_node = false;
  5974. if (!inplace && (a->grad || b->grad || c->grad)) {
  5975. is_node = true;
  5976. }
  5977. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  5978. struct ggml_map_custom3_op_params params = {
  5979. /*.fun =*/ fun,
  5980. /*.n_tasks =*/ n_tasks,
  5981. /*.userdata =*/ userdata
  5982. };
  5983. ggml_set_op_params(result, (const void *) &params, sizeof(params));
  5984. result->op = GGML_OP_MAP_CUSTOM3;
  5985. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5986. result->src[0] = a;
  5987. result->src[1] = b;
  5988. result->src[2] = c;
  5989. return result;
  5990. }
  5991. struct ggml_tensor * ggml_map_custom3(
  5992. struct ggml_context * ctx,
  5993. struct ggml_tensor * a,
  5994. struct ggml_tensor * b,
  5995. struct ggml_tensor * c,
  5996. const ggml_custom3_op_t fun,
  5997. int n_tasks,
  5998. void * userdata) {
  5999. return ggml_map_custom3_impl(ctx, a, b, c, fun, n_tasks, userdata, false);
  6000. }
  6001. struct ggml_tensor * ggml_map_custom3_inplace(
  6002. struct ggml_context * ctx,
  6003. struct ggml_tensor * a,
  6004. struct ggml_tensor * b,
  6005. struct ggml_tensor * c,
  6006. const ggml_custom3_op_t fun,
  6007. int n_tasks,
  6008. void * userdata) {
  6009. return ggml_map_custom3_impl(ctx, a, b, c, fun, n_tasks, userdata, true);
  6010. }
  6011. // ggml_cross_entropy_loss
  6012. struct ggml_tensor * ggml_cross_entropy_loss(
  6013. struct ggml_context * ctx,
  6014. struct ggml_tensor * a,
  6015. struct ggml_tensor * b) {
  6016. GGML_ASSERT(ggml_are_same_shape(a, b));
  6017. bool is_node = false;
  6018. if (a->grad || b->grad) {
  6019. is_node = true;
  6020. }
  6021. struct ggml_tensor * result = ggml_new_tensor_1d(ctx, a->type, 1);
  6022. result->op = GGML_OP_CROSS_ENTROPY_LOSS;
  6023. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  6024. result->src[0] = a;
  6025. result->src[1] = b;
  6026. return result;
  6027. }
  6028. // ggml_cross_entropy_loss_back
  6029. struct ggml_tensor * ggml_cross_entropy_loss_back(
  6030. struct ggml_context * ctx,
  6031. struct ggml_tensor * a,
  6032. struct ggml_tensor * b,
  6033. struct ggml_tensor * c) {
  6034. GGML_ASSERT(ggml_are_same_shape(a, b));
  6035. GGML_ASSERT(ggml_is_scalar(c));
  6036. struct ggml_tensor * result = ggml_dup_tensor(ctx, a);
  6037. result->op = GGML_OP_CROSS_ENTROPY_LOSS_BACK;
  6038. result->grad = NULL;
  6039. result->src[0] = a;
  6040. result->src[1] = b;
  6041. result->src[2] = c;
  6042. return result;
  6043. }
  6044. ////////////////////////////////////////////////////////////////////////////////
  6045. void ggml_set_param(
  6046. struct ggml_context * ctx,
  6047. struct ggml_tensor * tensor) {
  6048. tensor->flags |= GGML_TENSOR_FLAG_PARAM;
  6049. GGML_ASSERT(tensor->grad == NULL);
  6050. tensor->grad = ggml_dup_tensor(ctx, tensor);
  6051. ggml_format_name(tensor->grad, "%s (grad)", tensor->name);
  6052. }
  6053. // ggml_compute_forward_dup
  6054. static void ggml_compute_forward_dup_same_cont(
  6055. const struct ggml_compute_params * params,
  6056. struct ggml_tensor * dst) {
  6057. const struct ggml_tensor * src0 = dst->src[0];
  6058. GGML_ASSERT(ggml_nelements(dst) == ggml_nelements(src0));
  6059. GGML_ASSERT(ggml_is_contiguous(dst) && ggml_is_contiguous(src0));
  6060. GGML_ASSERT(src0->type == dst->type);
  6061. if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
  6062. return;
  6063. }
  6064. const size_t nb00 = src0->nb[0];
  6065. const size_t nb0 = dst->nb[0];
  6066. const int ith = params->ith; // thread index
  6067. const int nth = params->nth; // number of threads
  6068. // parallelize by elements
  6069. const int ne = ggml_nelements(dst);
  6070. const int dr = (ne + nth - 1) / nth;
  6071. const int ie0 = dr * ith;
  6072. const int ie1 = MIN(ie0 + dr, ne);
  6073. if (ie0 < ie1) {
  6074. memcpy(
  6075. ((char *) dst->data + ie0*nb0),
  6076. ((char *) src0->data + ie0*nb00),
  6077. (ie1 - ie0) * ggml_type_size(src0->type));
  6078. }
  6079. }
  6080. static void ggml_compute_forward_dup_f16(
  6081. const struct ggml_compute_params * params,
  6082. struct ggml_tensor * dst) {
  6083. const struct ggml_tensor * src0 = dst->src[0];
  6084. GGML_ASSERT(ggml_nelements(dst) == ggml_nelements(src0));
  6085. if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
  6086. return;
  6087. }
  6088. GGML_TENSOR_UNARY_OP_LOCALS
  6089. const int ith = params->ith; // thread index
  6090. const int nth = params->nth; // number of threads
  6091. if (ggml_is_contiguous(src0) && ggml_is_contiguous(dst) && src0->type == dst->type) {
  6092. ggml_compute_forward_dup_same_cont(params, dst);
  6093. return;
  6094. }
  6095. // parallelize by rows
  6096. const int nr = ne01;
  6097. // number of rows per thread
  6098. const int dr = (nr + nth - 1) / nth;
  6099. // row range for this thread
  6100. const int ir0 = dr * ith;
  6101. const int ir1 = MIN(ir0 + dr, nr);
  6102. if (src0->type == dst->type &&
  6103. ne00 == ne0 &&
  6104. nb00 == ggml_type_size(src0->type) && nb0 == ggml_type_size(dst->type)) {
  6105. // copy by rows
  6106. const size_t rs = ne00*nb00;
  6107. for (int64_t i03 = 0; i03 < ne03; i03++) {
  6108. for (int64_t i02 = 0; i02 < ne02; i02++) {
  6109. for (int64_t i01 = ir0; i01 < ir1; i01++) {
  6110. memcpy(
  6111. ((char *) dst->data + i01*nb1 + i02*nb2 + i03*nb3),
  6112. ((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03),
  6113. rs);
  6114. }
  6115. }
  6116. }
  6117. return;
  6118. }
  6119. // TODO: add more special-case implementations for tensor shapes/strides that can benefit from memcpy
  6120. if (ggml_is_contiguous(dst)) {
  6121. if (nb00 == sizeof(ggml_fp16_t)) {
  6122. if (dst->type == GGML_TYPE_F16) {
  6123. size_t id = 0;
  6124. const size_t rs = ne00 * nb00;
  6125. char * dst_ptr = (char *) dst->data;
  6126. for (int i03 = 0; i03 < ne03; i03++) {
  6127. for (int i02 = 0; i02 < ne02; i02++) {
  6128. id += rs * ir0;
  6129. for (int i01 = ir0; i01 < ir1; i01++) {
  6130. const char * src0_ptr = (char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03;
  6131. memcpy(dst_ptr + id, src0_ptr, rs);
  6132. id += rs;
  6133. }
  6134. id += rs * (ne01 - ir1);
  6135. }
  6136. }
  6137. } else if (dst->type == GGML_TYPE_F32) {
  6138. size_t id = 0;
  6139. float * dst_ptr = (float *) dst->data;
  6140. for (int i03 = 0; i03 < ne03; i03++) {
  6141. for (int i02 = 0; i02 < ne02; i02++) {
  6142. id += ne00 * ir0;
  6143. for (int i01 = ir0; i01 < ir1; i01++) {
  6144. const ggml_fp16_t * src0_ptr = (ggml_fp16_t *) ((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03);
  6145. for (int i00 = 0; i00 < ne00; i00++) {
  6146. dst_ptr[id] = GGML_FP16_TO_FP32(src0_ptr[i00]);
  6147. id++;
  6148. }
  6149. }
  6150. id += ne00 * (ne01 - ir1);
  6151. }
  6152. }
  6153. } else if (type_traits[dst->type].from_float) {
  6154. ggml_from_float_t const quantize_row_q = type_traits[dst->type].from_float;
  6155. float * src0_f32 = (float *) params->wdata + (ne00 + CACHE_LINE_SIZE_F32) * ith;
  6156. size_t id = 0;
  6157. size_t rs = nb0 * (ne00 / ggml_blck_size(dst->type));
  6158. char * dst_ptr = (char *) dst->data;
  6159. for (int i03 = 0; i03 < ne03; i03++) {
  6160. for (int i02 = 0; i02 < ne02; i02++) {
  6161. id += rs * ir0;
  6162. for (int i01 = ir0; i01 < ir1; i01++) {
  6163. const ggml_fp16_t * src0_ptr = (ggml_fp16_t *) ((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03);
  6164. for (int i00 = 0; i00 < ne00; i00++) {
  6165. src0_f32[i00] = GGML_FP16_TO_FP32(src0_ptr[i00]);
  6166. }
  6167. quantize_row_q(src0_f32, dst_ptr + id, ne00);
  6168. id += rs;
  6169. }
  6170. id += rs * (ne01 - ir1);
  6171. }
  6172. }
  6173. } else {
  6174. GGML_ASSERT(false); // TODO: implement
  6175. }
  6176. } else {
  6177. //printf("%s: this is not optimal - fix me\n", __func__);
  6178. if (dst->type == GGML_TYPE_F32) {
  6179. size_t id = 0;
  6180. float * dst_ptr = (float *) dst->data;
  6181. for (int i03 = 0; i03 < ne03; i03++) {
  6182. for (int i02 = 0; i02 < ne02; i02++) {
  6183. id += ne00 * ir0;
  6184. for (int i01 = ir0; i01 < ir1; i01++) {
  6185. for (int i00 = 0; i00 < ne00; i00++) {
  6186. const ggml_fp16_t * src0_ptr = (ggml_fp16_t *) ((char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03);
  6187. dst_ptr[id] = GGML_FP16_TO_FP32(*src0_ptr);
  6188. id++;
  6189. }
  6190. }
  6191. id += ne00 * (ne01 - ir1);
  6192. }
  6193. }
  6194. } else if (dst->type == GGML_TYPE_F16) {
  6195. size_t id = 0;
  6196. ggml_fp16_t * dst_ptr = (ggml_fp16_t *) dst->data;
  6197. for (int i03 = 0; i03 < ne03; i03++) {
  6198. for (int i02 = 0; i02 < ne02; i02++) {
  6199. id += ne00 * ir0;
  6200. for (int i01 = ir0; i01 < ir1; i01++) {
  6201. for (int i00 = 0; i00 < ne00; i00++) {
  6202. const ggml_fp16_t * src0_ptr = (ggml_fp16_t *) ((char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03);
  6203. dst_ptr[id] = *src0_ptr;
  6204. id++;
  6205. }
  6206. }
  6207. id += ne00 * (ne01 - ir1);
  6208. }
  6209. }
  6210. } else {
  6211. GGML_ASSERT(false); // TODO: implement
  6212. }
  6213. }
  6214. return;
  6215. }
  6216. // dst counters
  6217. int64_t i10 = 0;
  6218. int64_t i11 = 0;
  6219. int64_t i12 = 0;
  6220. int64_t i13 = 0;
  6221. if (dst->type == GGML_TYPE_F16) {
  6222. for (int64_t i03 = 0; i03 < ne03; i03++) {
  6223. for (int64_t i02 = 0; i02 < ne02; i02++) {
  6224. i10 += ne00 * ir0;
  6225. while (i10 >= ne0) {
  6226. i10 -= ne0;
  6227. if (++i11 == ne1) {
  6228. i11 = 0;
  6229. if (++i12 == ne2) {
  6230. i12 = 0;
  6231. if (++i13 == ne3) {
  6232. i13 = 0;
  6233. }
  6234. }
  6235. }
  6236. }
  6237. for (int64_t i01 = ir0; i01 < ir1; i01++) {
  6238. for (int64_t i00 = 0; i00 < ne00; i00++) {
  6239. const char * src0_ptr = ((char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03);
  6240. char * dst_ptr = ((char *) dst->data + i10*nb0 + i11*nb1 + i12*nb2 + i13*nb3);
  6241. memcpy(dst_ptr, src0_ptr, sizeof(ggml_fp16_t));
  6242. if (++i10 == ne00) {
  6243. i10 = 0;
  6244. if (++i11 == ne01) {
  6245. i11 = 0;
  6246. if (++i12 == ne02) {
  6247. i12 = 0;
  6248. if (++i13 == ne03) {
  6249. i13 = 0;
  6250. }
  6251. }
  6252. }
  6253. }
  6254. }
  6255. }
  6256. i10 += ne00 * (ne01 - ir1);
  6257. while (i10 >= ne0) {
  6258. i10 -= ne0;
  6259. if (++i11 == ne1) {
  6260. i11 = 0;
  6261. if (++i12 == ne2) {
  6262. i12 = 0;
  6263. if (++i13 == ne3) {
  6264. i13 = 0;
  6265. }
  6266. }
  6267. }
  6268. }
  6269. }
  6270. }
  6271. } else if (dst->type == GGML_TYPE_F32) {
  6272. for (int64_t i03 = 0; i03 < ne03; i03++) {
  6273. for (int64_t i02 = 0; i02 < ne02; i02++) {
  6274. i10 += ne00 * ir0;
  6275. while (i10 >= ne0) {
  6276. i10 -= ne0;
  6277. if (++i11 == ne1) {
  6278. i11 = 0;
  6279. if (++i12 == ne2) {
  6280. i12 = 0;
  6281. if (++i13 == ne3) {
  6282. i13 = 0;
  6283. }
  6284. }
  6285. }
  6286. }
  6287. for (int64_t i01 = ir0; i01 < ir1; i01++) {
  6288. for (int64_t i00 = 0; i00 < ne00; i00++) {
  6289. const char * src0_ptr = ((char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03);
  6290. char * dst_ptr = ((char *) dst->data + i10*nb0 + i11*nb1 + i12*nb2 + i13*nb3);
  6291. *(float *) dst_ptr = GGML_FP16_TO_FP32(*(const ggml_fp16_t *) src0_ptr);
  6292. if (++i10 == ne0) {
  6293. i10 = 0;
  6294. if (++i11 == ne1) {
  6295. i11 = 0;
  6296. if (++i12 == ne2) {
  6297. i12 = 0;
  6298. if (++i13 == ne3) {
  6299. i13 = 0;
  6300. }
  6301. }
  6302. }
  6303. }
  6304. }
  6305. }
  6306. i10 += ne00 * (ne01 - ir1);
  6307. while (i10 >= ne0) {
  6308. i10 -= ne0;
  6309. if (++i11 == ne1) {
  6310. i11 = 0;
  6311. if (++i12 == ne2) {
  6312. i12 = 0;
  6313. if (++i13 == ne3) {
  6314. i13 = 0;
  6315. }
  6316. }
  6317. }
  6318. }
  6319. }
  6320. }
  6321. } else {
  6322. GGML_ASSERT(false); // TODO: implement
  6323. }
  6324. }
  6325. static void ggml_compute_forward_dup_bf16(
  6326. const struct ggml_compute_params * params,
  6327. struct ggml_tensor * dst) {
  6328. const struct ggml_tensor * src0 = dst->src[0];
  6329. GGML_ASSERT(ggml_nelements(dst) == ggml_nelements(src0));
  6330. if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
  6331. return;
  6332. }
  6333. GGML_TENSOR_UNARY_OP_LOCALS
  6334. const int ith = params->ith; // thread index
  6335. const int nth = params->nth; // number of threads
  6336. if (ggml_is_contiguous(src0) && ggml_is_contiguous(dst) && src0->type == dst->type) {
  6337. ggml_compute_forward_dup_same_cont(params, dst);
  6338. return;
  6339. }
  6340. // parallelize by rows
  6341. const int nr = ne01;
  6342. // number of rows per thread
  6343. const int dr = (nr + nth - 1) / nth;
  6344. // row range for this thread
  6345. const int ir0 = dr * ith;
  6346. const int ir1 = MIN(ir0 + dr, nr);
  6347. if (src0->type == dst->type &&
  6348. ne00 == ne0 &&
  6349. nb00 == ggml_type_size(src0->type) && nb0 == ggml_type_size(dst->type)) {
  6350. // copy by rows
  6351. const size_t rs = ne00*nb00;
  6352. for (int64_t i03 = 0; i03 < ne03; i03++) {
  6353. for (int64_t i02 = 0; i02 < ne02; i02++) {
  6354. for (int64_t i01 = ir0; i01 < ir1; i01++) {
  6355. memcpy(
  6356. ((char *) dst->data + i01*nb1 + i02*nb2 + i03*nb3),
  6357. ((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03),
  6358. rs);
  6359. }
  6360. }
  6361. }
  6362. return;
  6363. }
  6364. // TODO: add more special-case implementations for tensor shapes/strides that can benefit from memcpy
  6365. if (ggml_is_contiguous(dst)) {
  6366. if (nb00 == sizeof(ggml_bf16_t)) {
  6367. if (dst->type == GGML_TYPE_BF16) {
  6368. size_t id = 0;
  6369. const size_t rs = ne00 * nb00;
  6370. char * dst_ptr = (char *) dst->data;
  6371. for (int i03 = 0; i03 < ne03; i03++) {
  6372. for (int i02 = 0; i02 < ne02; i02++) {
  6373. id += rs * ir0;
  6374. for (int i01 = ir0; i01 < ir1; i01++) {
  6375. const char * src0_ptr = (char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03;
  6376. memcpy(dst_ptr + id, src0_ptr, rs);
  6377. id += rs;
  6378. }
  6379. id += rs * (ne01 - ir1);
  6380. }
  6381. }
  6382. } else if (dst->type == GGML_TYPE_F16) {
  6383. size_t id = 0;
  6384. ggml_fp16_t * dst_ptr = (ggml_fp16_t *) dst->data;
  6385. for (int i03 = 0; i03 < ne03; i03++) {
  6386. for (int i02 = 0; i02 < ne02; i02++) {
  6387. id += ne00 * ir0;
  6388. for (int i01 = ir0; i01 < ir1; i01++) {
  6389. const ggml_bf16_t * src0_ptr = (ggml_bf16_t *) ((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03);
  6390. for (int i00 = 0; i00 < ne00; i00++) {
  6391. dst_ptr[id] = GGML_FP32_TO_FP16(GGML_BF16_TO_FP32(src0_ptr[i00]));
  6392. id++;
  6393. }
  6394. }
  6395. id += ne00 * (ne01 - ir1);
  6396. }
  6397. }
  6398. } else if (dst->type == GGML_TYPE_F32) {
  6399. size_t id = 0;
  6400. float * dst_ptr = (float *) dst->data;
  6401. for (int i03 = 0; i03 < ne03; i03++) {
  6402. for (int i02 = 0; i02 < ne02; i02++) {
  6403. id += ne00 * ir0;
  6404. for (int i01 = ir0; i01 < ir1; i01++) {
  6405. const ggml_bf16_t * src0_ptr = (ggml_bf16_t *) ((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03);
  6406. for (int i00 = 0; i00 < ne00; i00++) {
  6407. dst_ptr[id] = GGML_BF16_TO_FP32(src0_ptr[i00]);
  6408. id++;
  6409. }
  6410. }
  6411. id += ne00 * (ne01 - ir1);
  6412. }
  6413. }
  6414. } else if (type_traits[dst->type].from_float) {
  6415. ggml_from_float_t const quantize_row_q = type_traits[dst->type].from_float;
  6416. float * src0_f32 = (float *) params->wdata + (ne00 + CACHE_LINE_SIZE_F32) * ith;
  6417. size_t id = 0;
  6418. size_t rs = nb0 * (ne00 / ggml_blck_size(dst->type));
  6419. char * dst_ptr = (char *) dst->data;
  6420. for (int i03 = 0; i03 < ne03; i03++) {
  6421. for (int i02 = 0; i02 < ne02; i02++) {
  6422. id += rs * ir0;
  6423. for (int i01 = ir0; i01 < ir1; i01++) {
  6424. const ggml_bf16_t * src0_ptr = (ggml_bf16_t *) ((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03);
  6425. for (int i00 = 0; i00 < ne00; i00++) {
  6426. src0_f32[i00] = GGML_BF16_TO_FP32(src0_ptr[i00]);
  6427. }
  6428. quantize_row_q(src0_f32, dst_ptr + id, ne00);
  6429. id += rs;
  6430. }
  6431. id += rs * (ne01 - ir1);
  6432. }
  6433. }
  6434. } else {
  6435. GGML_ASSERT(false); // TODO: implement
  6436. }
  6437. } else {
  6438. //printf("%s: this is not optimal - fix me\n", __func__);
  6439. if (dst->type == GGML_TYPE_F32) {
  6440. size_t id = 0;
  6441. float * dst_ptr = (float *) dst->data;
  6442. for (int i03 = 0; i03 < ne03; i03++) {
  6443. for (int i02 = 0; i02 < ne02; i02++) {
  6444. id += ne00 * ir0;
  6445. for (int i01 = ir0; i01 < ir1; i01++) {
  6446. for (int i00 = 0; i00 < ne00; i00++) {
  6447. const ggml_bf16_t * src0_ptr = (ggml_bf16_t *) ((char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03);
  6448. dst_ptr[id] = GGML_BF16_TO_FP32(*src0_ptr);
  6449. id++;
  6450. }
  6451. }
  6452. id += ne00 * (ne01 - ir1);
  6453. }
  6454. }
  6455. } else if (dst->type == GGML_TYPE_BF16) {
  6456. size_t id = 0;
  6457. ggml_bf16_t * dst_ptr = (ggml_bf16_t *) dst->data;
  6458. for (int i03 = 0; i03 < ne03; i03++) {
  6459. for (int i02 = 0; i02 < ne02; i02++) {
  6460. id += ne00 * ir0;
  6461. for (int i01 = ir0; i01 < ir1; i01++) {
  6462. for (int i00 = 0; i00 < ne00; i00++) {
  6463. const ggml_bf16_t * src0_ptr = (ggml_bf16_t *) ((char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03);
  6464. dst_ptr[id] = *src0_ptr;
  6465. id++;
  6466. }
  6467. }
  6468. id += ne00 * (ne01 - ir1);
  6469. }
  6470. }
  6471. } else if (dst->type == GGML_TYPE_F16) {
  6472. size_t id = 0;
  6473. ggml_fp16_t * dst_ptr = (ggml_fp16_t *) dst->data;
  6474. for (int i03 = 0; i03 < ne03; i03++) {
  6475. for (int i02 = 0; i02 < ne02; i02++) {
  6476. id += ne00 * ir0;
  6477. for (int i01 = ir0; i01 < ir1; i01++) {
  6478. for (int i00 = 0; i00 < ne00; i00++) {
  6479. const ggml_bf16_t * src0_ptr = (ggml_bf16_t *) ((char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03);
  6480. dst_ptr[id] = GGML_FP32_TO_FP16(GGML_BF16_TO_FP32(*src0_ptr));
  6481. id++;
  6482. }
  6483. }
  6484. id += ne00 * (ne01 - ir1);
  6485. }
  6486. }
  6487. } else {
  6488. GGML_ASSERT(false); // TODO: implement
  6489. }
  6490. }
  6491. return;
  6492. }
  6493. // dst counters
  6494. int64_t i10 = 0;
  6495. int64_t i11 = 0;
  6496. int64_t i12 = 0;
  6497. int64_t i13 = 0;
  6498. if (dst->type == GGML_TYPE_BF16) {
  6499. for (int64_t i03 = 0; i03 < ne03; i03++) {
  6500. for (int64_t i02 = 0; i02 < ne02; i02++) {
  6501. i10 += ne00 * ir0;
  6502. while (i10 >= ne0) {
  6503. i10 -= ne0;
  6504. if (++i11 == ne1) {
  6505. i11 = 0;
  6506. if (++i12 == ne2) {
  6507. i12 = 0;
  6508. if (++i13 == ne3) {
  6509. i13 = 0;
  6510. }
  6511. }
  6512. }
  6513. }
  6514. for (int64_t i01 = ir0; i01 < ir1; i01++) {
  6515. for (int64_t i00 = 0; i00 < ne00; i00++) {
  6516. const char * src0_ptr = ((char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03);
  6517. char * dst_ptr = ((char *) dst->data + i10*nb0 + i11*nb1 + i12*nb2 + i13*nb3);
  6518. memcpy(dst_ptr, src0_ptr, sizeof(ggml_bf16_t));
  6519. if (++i10 == ne00) {
  6520. i10 = 0;
  6521. if (++i11 == ne01) {
  6522. i11 = 0;
  6523. if (++i12 == ne02) {
  6524. i12 = 0;
  6525. if (++i13 == ne03) {
  6526. i13 = 0;
  6527. }
  6528. }
  6529. }
  6530. }
  6531. }
  6532. }
  6533. i10 += ne00 * (ne01 - ir1);
  6534. while (i10 >= ne0) {
  6535. i10 -= ne0;
  6536. if (++i11 == ne1) {
  6537. i11 = 0;
  6538. if (++i12 == ne2) {
  6539. i12 = 0;
  6540. if (++i13 == ne3) {
  6541. i13 = 0;
  6542. }
  6543. }
  6544. }
  6545. }
  6546. }
  6547. }
  6548. } else if (dst->type == GGML_TYPE_F16) {
  6549. for (int64_t i03 = 0; i03 < ne03; i03++) {
  6550. for (int64_t i02 = 0; i02 < ne02; i02++) {
  6551. i10 += ne00 * ir0;
  6552. while (i10 >= ne0) {
  6553. i10 -= ne0;
  6554. if (++i11 == ne1) {
  6555. i11 = 0;
  6556. if (++i12 == ne2) {
  6557. i12 = 0;
  6558. if (++i13 == ne3) {
  6559. i13 = 0;
  6560. }
  6561. }
  6562. }
  6563. }
  6564. for (int64_t i01 = ir0; i01 < ir1; i01++) {
  6565. for (int64_t i00 = 0; i00 < ne00; i00++) {
  6566. const char * src0_ptr = ((char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03);
  6567. char * dst_ptr = ((char *) dst->data + i10*nb0 + i11*nb1 + i12*nb2 + i13*nb3);
  6568. *(ggml_fp16_t *) dst_ptr = GGML_FP32_TO_FP16(GGML_BF16_TO_FP32(*(const ggml_bf16_t *) src0_ptr));
  6569. if (++i10 == ne0) {
  6570. i10 = 0;
  6571. if (++i11 == ne1) {
  6572. i11 = 0;
  6573. if (++i12 == ne2) {
  6574. i12 = 0;
  6575. if (++i13 == ne3) {
  6576. i13 = 0;
  6577. }
  6578. }
  6579. }
  6580. }
  6581. }
  6582. }
  6583. i10 += ne00 * (ne01 - ir1);
  6584. while (i10 >= ne0) {
  6585. i10 -= ne0;
  6586. if (++i11 == ne1) {
  6587. i11 = 0;
  6588. if (++i12 == ne2) {
  6589. i12 = 0;
  6590. if (++i13 == ne3) {
  6591. i13 = 0;
  6592. }
  6593. }
  6594. }
  6595. }
  6596. }
  6597. }
  6598. } else if (dst->type == GGML_TYPE_F32) {
  6599. for (int64_t i03 = 0; i03 < ne03; i03++) {
  6600. for (int64_t i02 = 0; i02 < ne02; i02++) {
  6601. i10 += ne00 * ir0;
  6602. while (i10 >= ne0) {
  6603. i10 -= ne0;
  6604. if (++i11 == ne1) {
  6605. i11 = 0;
  6606. if (++i12 == ne2) {
  6607. i12 = 0;
  6608. if (++i13 == ne3) {
  6609. i13 = 0;
  6610. }
  6611. }
  6612. }
  6613. }
  6614. for (int64_t i01 = ir0; i01 < ir1; i01++) {
  6615. for (int64_t i00 = 0; i00 < ne00; i00++) {
  6616. const char * src0_ptr = ((char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03);
  6617. char * dst_ptr = ((char *) dst->data + i10*nb0 + i11*nb1 + i12*nb2 + i13*nb3);
  6618. *(float *) dst_ptr = GGML_BF16_TO_FP32(*(const ggml_bf16_t *) src0_ptr);
  6619. if (++i10 == ne0) {
  6620. i10 = 0;
  6621. if (++i11 == ne1) {
  6622. i11 = 0;
  6623. if (++i12 == ne2) {
  6624. i12 = 0;
  6625. if (++i13 == ne3) {
  6626. i13 = 0;
  6627. }
  6628. }
  6629. }
  6630. }
  6631. }
  6632. }
  6633. i10 += ne00 * (ne01 - ir1);
  6634. while (i10 >= ne0) {
  6635. i10 -= ne0;
  6636. if (++i11 == ne1) {
  6637. i11 = 0;
  6638. if (++i12 == ne2) {
  6639. i12 = 0;
  6640. if (++i13 == ne3) {
  6641. i13 = 0;
  6642. }
  6643. }
  6644. }
  6645. }
  6646. }
  6647. }
  6648. } else {
  6649. GGML_ASSERT(false); // TODO: implement
  6650. }
  6651. }
  6652. static void ggml_compute_forward_dup_f32(
  6653. const struct ggml_compute_params * params,
  6654. struct ggml_tensor * dst) {
  6655. const struct ggml_tensor * src0 = dst->src[0];
  6656. GGML_ASSERT(ggml_nelements(dst) == ggml_nelements(src0));
  6657. if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
  6658. return;
  6659. }
  6660. GGML_TENSOR_UNARY_OP_LOCALS
  6661. const int ith = params->ith; // thread index
  6662. const int nth = params->nth; // number of threads
  6663. if (ggml_is_contiguous(src0) && ggml_is_contiguous(dst) && src0->type == dst->type) {
  6664. ggml_compute_forward_dup_same_cont(params, dst);
  6665. return;
  6666. }
  6667. // parallelize by rows
  6668. const int nr = ne01;
  6669. // number of rows per thread
  6670. const int dr = (nr + nth - 1) / nth;
  6671. // row range for this thread
  6672. const int ir0 = dr * ith;
  6673. const int ir1 = MIN(ir0 + dr, nr);
  6674. if (src0->type == dst->type &&
  6675. ne00 == ne0 &&
  6676. nb00 == ggml_type_size(src0->type) && nb0 == ggml_type_size(dst->type)) {
  6677. // copy by rows
  6678. const size_t rs = ne00*nb00;
  6679. for (int64_t i03 = 0; i03 < ne03; i03++) {
  6680. for (int64_t i02 = 0; i02 < ne02; i02++) {
  6681. for (int64_t i01 = ir0; i01 < ir1; i01++) {
  6682. memcpy(
  6683. ((char *) dst->data + i01*nb1 + i02*nb2 + i03*nb3),
  6684. ((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03),
  6685. rs);
  6686. }
  6687. }
  6688. }
  6689. return;
  6690. }
  6691. if (ggml_is_contiguous(dst)) {
  6692. // TODO: simplify
  6693. if (nb00 == sizeof(float)) {
  6694. if (dst->type == GGML_TYPE_F32) {
  6695. size_t id = 0;
  6696. const size_t rs = ne00 * nb00;
  6697. char * dst_ptr = (char *) dst->data;
  6698. for (int i03 = 0; i03 < ne03; i03++) {
  6699. for (int i02 = 0; i02 < ne02; i02++) {
  6700. id += rs * ir0;
  6701. for (int i01 = ir0; i01 < ir1; i01++) {
  6702. const char * src0_ptr = (char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03;
  6703. memcpy(dst_ptr + id, src0_ptr, rs);
  6704. id += rs;
  6705. }
  6706. id += rs * (ne01 - ir1);
  6707. }
  6708. }
  6709. } else if (type_traits[dst->type].from_float) {
  6710. ggml_from_float_t const quantize_row_q = type_traits[dst->type].from_float;
  6711. size_t id = 0;
  6712. size_t rs = nb0 * (ne00 / ggml_blck_size(dst->type));
  6713. char * dst_ptr = (char *) dst->data;
  6714. for (int i03 = 0; i03 < ne03; i03++) {
  6715. for (int i02 = 0; i02 < ne02; i02++) {
  6716. id += rs * ir0;
  6717. for (int i01 = ir0; i01 < ir1; i01++) {
  6718. const float * src0_ptr = (float *) ((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03);
  6719. quantize_row_q(src0_ptr, dst_ptr + id, ne00);
  6720. id += rs;
  6721. }
  6722. id += rs * (ne01 - ir1);
  6723. }
  6724. }
  6725. } else {
  6726. GGML_ASSERT(false); // TODO: implement
  6727. }
  6728. } else {
  6729. //printf("%s: this is not optimal - fix me\n", __func__);
  6730. if (dst->type == GGML_TYPE_F32) {
  6731. size_t id = 0;
  6732. float * dst_ptr = (float *) dst->data;
  6733. for (int i03 = 0; i03 < ne03; i03++) {
  6734. for (int i02 = 0; i02 < ne02; i02++) {
  6735. id += ne00 * ir0;
  6736. for (int i01 = ir0; i01 < ir1; i01++) {
  6737. for (int i00 = 0; i00 < ne00; i00++) {
  6738. const float * src0_ptr = (float *) ((char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03);
  6739. dst_ptr[id] = *src0_ptr;
  6740. id++;
  6741. }
  6742. }
  6743. id += ne00 * (ne01 - ir1);
  6744. }
  6745. }
  6746. } else if (dst->type == GGML_TYPE_F16) {
  6747. size_t id = 0;
  6748. ggml_fp16_t * dst_ptr = (ggml_fp16_t *) dst->data;
  6749. for (int i03 = 0; i03 < ne03; i03++) {
  6750. for (int i02 = 0; i02 < ne02; i02++) {
  6751. id += ne00 * ir0;
  6752. for (int i01 = ir0; i01 < ir1; i01++) {
  6753. for (int i00 = 0; i00 < ne00; i00++) {
  6754. const float * src0_ptr = (float *) ((char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03);
  6755. dst_ptr[id] = GGML_FP32_TO_FP16(*src0_ptr);
  6756. id++;
  6757. }
  6758. }
  6759. id += ne00 * (ne01 - ir1);
  6760. }
  6761. }
  6762. } else if (dst->type == GGML_TYPE_BF16) {
  6763. size_t id = 0;
  6764. ggml_bf16_t * dst_ptr = (ggml_bf16_t *) dst->data;
  6765. for (int i03 = 0; i03 < ne03; i03++) {
  6766. for (int i02 = 0; i02 < ne02; i02++) {
  6767. id += ne00 * ir0;
  6768. for (int i01 = ir0; i01 < ir1; i01++) {
  6769. for (int i00 = 0; i00 < ne00; i00++) {
  6770. const float * src0_ptr = (float *) ((char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03);
  6771. dst_ptr[id] = GGML_FP32_TO_BF16(*src0_ptr);
  6772. id++;
  6773. }
  6774. }
  6775. id += ne00 * (ne01 - ir1);
  6776. }
  6777. }
  6778. } else {
  6779. GGML_ASSERT(false); // TODO: implement
  6780. }
  6781. }
  6782. return;
  6783. }
  6784. // dst counters
  6785. int64_t i10 = 0;
  6786. int64_t i11 = 0;
  6787. int64_t i12 = 0;
  6788. int64_t i13 = 0;
  6789. if (dst->type == GGML_TYPE_F32) {
  6790. for (int64_t i03 = 0; i03 < ne03; i03++) {
  6791. for (int64_t i02 = 0; i02 < ne02; i02++) {
  6792. i10 += ne00 * ir0;
  6793. while (i10 >= ne0) {
  6794. i10 -= ne0;
  6795. if (++i11 == ne1) {
  6796. i11 = 0;
  6797. if (++i12 == ne2) {
  6798. i12 = 0;
  6799. if (++i13 == ne3) {
  6800. i13 = 0;
  6801. }
  6802. }
  6803. }
  6804. }
  6805. for (int64_t i01 = ir0; i01 < ir1; i01++) {
  6806. for (int64_t i00 = 0; i00 < ne00; i00++) {
  6807. const char * src0_ptr = ((char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03);
  6808. char * dst_ptr = ((char *) dst->data + i10*nb0 + i11*nb1 + i12*nb2 + i13*nb3);
  6809. memcpy(dst_ptr, src0_ptr, sizeof(float));
  6810. if (++i10 == ne0) {
  6811. i10 = 0;
  6812. if (++i11 == ne1) {
  6813. i11 = 0;
  6814. if (++i12 == ne2) {
  6815. i12 = 0;
  6816. if (++i13 == ne3) {
  6817. i13 = 0;
  6818. }
  6819. }
  6820. }
  6821. }
  6822. }
  6823. }
  6824. i10 += ne00 * (ne01 - ir1);
  6825. while (i10 >= ne0) {
  6826. i10 -= ne0;
  6827. if (++i11 == ne1) {
  6828. i11 = 0;
  6829. if (++i12 == ne2) {
  6830. i12 = 0;
  6831. if (++i13 == ne3) {
  6832. i13 = 0;
  6833. }
  6834. }
  6835. }
  6836. }
  6837. }
  6838. }
  6839. } else if (dst->type == GGML_TYPE_F16) {
  6840. for (int64_t i03 = 0; i03 < ne03; i03++) {
  6841. for (int64_t i02 = 0; i02 < ne02; i02++) {
  6842. i10 += ne00 * ir0;
  6843. while (i10 >= ne0) {
  6844. i10 -= ne0;
  6845. if (++i11 == ne1) {
  6846. i11 = 0;
  6847. if (++i12 == ne2) {
  6848. i12 = 0;
  6849. if (++i13 == ne3) {
  6850. i13 = 0;
  6851. }
  6852. }
  6853. }
  6854. }
  6855. for (int64_t i01 = ir0; i01 < ir1; i01++) {
  6856. for (int64_t i00 = 0; i00 < ne00; i00++) {
  6857. const char * src0_ptr = ((char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03);
  6858. char * dst_ptr = ((char *) dst->data + i10*nb0 + i11*nb1 + i12*nb2 + i13*nb3);
  6859. *(ggml_fp16_t *) dst_ptr = GGML_FP32_TO_FP16(*(const float *) src0_ptr);
  6860. if (++i10 == ne0) {
  6861. i10 = 0;
  6862. if (++i11 == ne1) {
  6863. i11 = 0;
  6864. if (++i12 == ne2) {
  6865. i12 = 0;
  6866. if (++i13 == ne3) {
  6867. i13 = 0;
  6868. }
  6869. }
  6870. }
  6871. }
  6872. }
  6873. }
  6874. i10 += ne00 * (ne01 - ir1);
  6875. while (i10 >= ne0) {
  6876. i10 -= ne0;
  6877. if (++i11 == ne1) {
  6878. i11 = 0;
  6879. if (++i12 == ne2) {
  6880. i12 = 0;
  6881. if (++i13 == ne3) {
  6882. i13 = 0;
  6883. }
  6884. }
  6885. }
  6886. }
  6887. }
  6888. }
  6889. } else if (dst->type == GGML_TYPE_BF16) {
  6890. for (int64_t i03 = 0; i03 < ne03; i03++) {
  6891. for (int64_t i02 = 0; i02 < ne02; i02++) {
  6892. i10 += ne00 * ir0;
  6893. while (i10 >= ne0) {
  6894. i10 -= ne0;
  6895. if (++i11 == ne1) {
  6896. i11 = 0;
  6897. if (++i12 == ne2) {
  6898. i12 = 0;
  6899. if (++i13 == ne3) {
  6900. i13 = 0;
  6901. }
  6902. }
  6903. }
  6904. }
  6905. for (int64_t i01 = ir0; i01 < ir1; i01++) {
  6906. for (int64_t i00 = 0; i00 < ne00; i00++) {
  6907. const char * src0_ptr = ((char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03);
  6908. char * dst_ptr = ((char *) dst->data + i10*nb0 + i11*nb1 + i12*nb2 + i13*nb3);
  6909. *(ggml_bf16_t *) dst_ptr = GGML_FP32_TO_BF16(*(const float *) src0_ptr);
  6910. if (++i10 == ne0) {
  6911. i10 = 0;
  6912. if (++i11 == ne1) {
  6913. i11 = 0;
  6914. if (++i12 == ne2) {
  6915. i12 = 0;
  6916. if (++i13 == ne3) {
  6917. i13 = 0;
  6918. }
  6919. }
  6920. }
  6921. }
  6922. }
  6923. }
  6924. i10 += ne00 * (ne01 - ir1);
  6925. while (i10 >= ne0) {
  6926. i10 -= ne0;
  6927. if (++i11 == ne1) {
  6928. i11 = 0;
  6929. if (++i12 == ne2) {
  6930. i12 = 0;
  6931. if (++i13 == ne3) {
  6932. i13 = 0;
  6933. }
  6934. }
  6935. }
  6936. }
  6937. }
  6938. }
  6939. } else {
  6940. GGML_ASSERT(false); // TODO: implement
  6941. }
  6942. }
  6943. // A simplified version of ggml_compute_forward_dup that doesn't do float upcasting, and just plain old memcpy.
  6944. static void ggml_compute_forward_dup_bytes(
  6945. const struct ggml_compute_params * params,
  6946. struct ggml_tensor * dst) {
  6947. const struct ggml_tensor * src0 = dst->src[0];
  6948. GGML_ASSERT(ggml_nelements(dst) == ggml_nelements(src0));
  6949. GGML_ASSERT(src0->type == dst->type);
  6950. if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
  6951. return;
  6952. }
  6953. if (ggml_is_contiguous(src0) && ggml_is_contiguous(dst)) {
  6954. ggml_compute_forward_dup_same_cont(params, dst);
  6955. return;
  6956. }
  6957. GGML_TENSOR_UNARY_OP_LOCALS;
  6958. const size_t type_size = ggml_type_size(src0->type);
  6959. const int ith = params->ith; // thread index
  6960. const int nth = params->nth; // number of threads
  6961. // parallelize by rows
  6962. const int nr = ne01;
  6963. // number of rows per thread
  6964. const int dr = (nr + nth - 1) / nth;
  6965. // row range for this thread
  6966. const int ir0 = dr * ith;
  6967. const int ir1 = MIN(ir0 + dr, nr);
  6968. if (src0->type == dst->type &&
  6969. ne00 == ne0 &&
  6970. nb00 == type_size && nb0 == type_size) {
  6971. // copy by rows
  6972. const size_t rs = ne00 * type_size;
  6973. for (int64_t i03 = 0; i03 < ne03; i03++) {
  6974. for (int64_t i02 = 0; i02 < ne02; i02++) {
  6975. for (int64_t i01 = ir0; i01 < ir1; i01++) {
  6976. memcpy(
  6977. ((char *) dst->data + i01*nb1 + i02*nb2 + i03*nb3),
  6978. ((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03),
  6979. rs);
  6980. }
  6981. }
  6982. }
  6983. return;
  6984. }
  6985. if (ggml_is_contiguous(dst)) {
  6986. size_t id = 0;
  6987. char * dst_ptr = (char *) dst->data;
  6988. const size_t rs = ne00 * type_size;
  6989. if (nb00 == type_size) {
  6990. // src0 is contigous on first dimension, copy by rows
  6991. for (int64_t i03 = 0; i03 < ne03; i03++) {
  6992. for (int64_t i02 = 0; i02 < ne02; i02++) {
  6993. id += rs * ir0;
  6994. for (int64_t i01 = ir0; i01 < ir1; i01++) {
  6995. const char * src0_ptr = (char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03;
  6996. memcpy(dst_ptr + id, src0_ptr, rs);
  6997. id += rs;
  6998. }
  6999. id += rs * (ne01 - ir1);
  7000. }
  7001. }
  7002. } else {
  7003. //printf("%s: this is not optimal - fix me\n", __func__);
  7004. for (int64_t i03 = 0; i03 < ne03; i03++) {
  7005. for (int64_t i02 = 0; i02 < ne02; i02++) {
  7006. id += rs * ir0;
  7007. for (int64_t i01 = ir0; i01 < ir1; i01++) {
  7008. for (int64_t i00 = 0; i00 < ne00; i00++) {
  7009. const char * src0_ptr = (char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03;
  7010. memcpy(dst_ptr + id, src0_ptr, type_size);
  7011. id += type_size;
  7012. }
  7013. }
  7014. id += rs * (ne01 - ir1);
  7015. }
  7016. }
  7017. }
  7018. return;
  7019. }
  7020. // dst counters
  7021. int64_t i10 = 0;
  7022. int64_t i11 = 0;
  7023. int64_t i12 = 0;
  7024. int64_t i13 = 0;
  7025. for (int64_t i03 = 0; i03 < ne03; i03++) {
  7026. for (int64_t i02 = 0; i02 < ne02; i02++) {
  7027. i10 += ne00 * ir0;
  7028. while (i10 >= ne0) {
  7029. i10 -= ne0;
  7030. if (++i11 == ne1) {
  7031. i11 = 0;
  7032. if (++i12 == ne2) {
  7033. i12 = 0;
  7034. if (++i13 == ne3) {
  7035. i13 = 0;
  7036. }
  7037. }
  7038. }
  7039. }
  7040. for (int64_t i01 = ir0; i01 < ir1; i01++) {
  7041. for (int64_t i00 = 0; i00 < ne00; i00++) {
  7042. const char * src0_ptr = ((char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03);
  7043. char * dst_ptr = ((char *) dst->data + i10*nb0 + i11*nb1 + i12*nb2 + i13*nb3);
  7044. memcpy(dst_ptr, src0_ptr, type_size);
  7045. if (++i10 == ne0) {
  7046. i10 = 0;
  7047. if (++i11 == ne1) {
  7048. i11 = 0;
  7049. if (++i12 == ne2) {
  7050. i12 = 0;
  7051. if (++i13 == ne3) {
  7052. i13 = 0;
  7053. }
  7054. }
  7055. }
  7056. }
  7057. }
  7058. }
  7059. i10 += ne00 * (ne01 - ir1);
  7060. while (i10 >= ne0) {
  7061. i10 -= ne0;
  7062. if (++i11 == ne1) {
  7063. i11 = 0;
  7064. if (++i12 == ne2) {
  7065. i12 = 0;
  7066. if (++i13 == ne3) {
  7067. i13 = 0;
  7068. }
  7069. }
  7070. }
  7071. }
  7072. }
  7073. }
  7074. }
  7075. static void ggml_compute_forward_dup(
  7076. const struct ggml_compute_params * params,
  7077. struct ggml_tensor * dst) {
  7078. const struct ggml_tensor * src0 = dst->src[0];
  7079. if (src0->type == dst->type) {
  7080. ggml_compute_forward_dup_bytes(params, dst);
  7081. return;
  7082. }
  7083. switch (src0->type) {
  7084. case GGML_TYPE_F16:
  7085. {
  7086. ggml_compute_forward_dup_f16(params, dst);
  7087. } break;
  7088. case GGML_TYPE_BF16:
  7089. {
  7090. ggml_compute_forward_dup_bf16(params, dst);
  7091. } break;
  7092. case GGML_TYPE_F32:
  7093. {
  7094. ggml_compute_forward_dup_f32(params, dst);
  7095. } break;
  7096. default:
  7097. {
  7098. GGML_ASSERT(false);
  7099. } break;
  7100. }
  7101. }
  7102. // ggml_compute_forward_add
  7103. static void ggml_compute_forward_add_f32(
  7104. const struct ggml_compute_params * params,
  7105. struct ggml_tensor * dst) {
  7106. const struct ggml_tensor * src0 = dst->src[0];
  7107. const struct ggml_tensor * src1 = dst->src[1];
  7108. GGML_ASSERT(ggml_can_repeat(src1, src0) && ggml_are_same_shape(src0, dst));
  7109. if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
  7110. return;
  7111. }
  7112. const int ith = params->ith;
  7113. const int nth = params->nth;
  7114. #ifdef GGML_USE_CLBLAST
  7115. if (src1->backend == GGML_BACKEND_TYPE_GPU) {
  7116. // TODO: OpenCL kernel support full broadcast
  7117. GGML_ASSERT(ggml_can_repeat_rows(src1, src0));
  7118. if (ith == 0) {
  7119. ggml_cl_add(src0, src1, dst);
  7120. }
  7121. return;
  7122. }
  7123. #endif
  7124. const int nr = ggml_nrows(src0);
  7125. GGML_TENSOR_BINARY_OP_LOCALS
  7126. GGML_ASSERT( nb0 == sizeof(float));
  7127. GGML_ASSERT(nb00 == sizeof(float));
  7128. // rows per thread
  7129. const int dr = (nr + nth - 1)/nth;
  7130. // row range for this thread
  7131. const int ir0 = dr*ith;
  7132. const int ir1 = MIN(ir0 + dr, nr);
  7133. if (nb10 == sizeof(float)) {
  7134. for (int ir = ir0; ir < ir1; ++ir) {
  7135. // src1 is broadcastable across src0 and dst in i1, i2, i3
  7136. const int64_t i03 = ir/(ne02*ne01);
  7137. const int64_t i02 = (ir - i03*ne02*ne01)/ne01;
  7138. const int64_t i01 = (ir - i03*ne02*ne01 - i02*ne01);
  7139. const int64_t i13 = i03 % ne13;
  7140. const int64_t i12 = i02 % ne12;
  7141. const int64_t i11 = i01 % ne11;
  7142. const int64_t nr0 = ne00 / ne10;
  7143. float * dst_ptr = (float *) ((char *) dst->data + i03*nb3 + i02*nb2 + i01*nb1 );
  7144. float * src0_ptr = (float *) ((char *) src0->data + i03*nb03 + i02*nb02 + i01*nb01);
  7145. float * src1_ptr = (float *) ((char *) src1->data + i13*nb13 + i12*nb12 + i11*nb11);
  7146. for (int64_t r = 0; r < nr0; ++r) {
  7147. #ifdef GGML_USE_ACCELERATE
  7148. vDSP_vadd(src0_ptr + r*ne10, 1, src1_ptr, 1, dst_ptr + r*ne10, 1, ne10);
  7149. #else
  7150. ggml_vec_add_f32(ne10, dst_ptr + r*ne10, src0_ptr + r*ne10, src1_ptr);
  7151. #endif
  7152. }
  7153. }
  7154. } else {
  7155. // src1 is not contiguous
  7156. for (int ir = ir0; ir < ir1; ++ir) {
  7157. // src1 is broadcastable across src0 and dst in i1, i2, i3
  7158. const int64_t i03 = ir/(ne02*ne01);
  7159. const int64_t i02 = (ir - i03*ne02*ne01)/ne01;
  7160. const int64_t i01 = (ir - i03*ne02*ne01 - i02*ne01);
  7161. const int64_t i13 = i03 % ne13;
  7162. const int64_t i12 = i02 % ne12;
  7163. const int64_t i11 = i01 % ne11;
  7164. float * dst_ptr = (float *) ((char *) dst->data + i03*nb3 + i02*nb2 + i01*nb1 );
  7165. float * src0_ptr = (float *) ((char *) src0->data + i03*nb03 + i02*nb02 + i01*nb01);
  7166. for (int64_t i0 = 0; i0 < ne0; ++i0) {
  7167. const int64_t i10 = i0 % ne10;
  7168. float * src1_ptr = (float *) ((char *) src1->data + i13*nb13 + i12*nb12 + i11*nb11 + i10*nb10);
  7169. dst_ptr[i0] = src0_ptr[i0] + *src1_ptr;
  7170. }
  7171. }
  7172. }
  7173. }
  7174. static void ggml_compute_forward_add_f16_f32(
  7175. const struct ggml_compute_params * params,
  7176. struct ggml_tensor * dst) {
  7177. const struct ggml_tensor * src0 = dst->src[0];
  7178. const struct ggml_tensor * src1 = dst->src[1];
  7179. GGML_ASSERT(ggml_are_same_shape(src0, src1) && ggml_are_same_shape(src0, dst));
  7180. if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
  7181. return;
  7182. }
  7183. const int ith = params->ith;
  7184. const int nth = params->nth;
  7185. const int nr = ggml_nrows(src0);
  7186. GGML_TENSOR_BINARY_OP_LOCALS
  7187. GGML_ASSERT(src0->type == GGML_TYPE_F16);
  7188. GGML_ASSERT(src1->type == GGML_TYPE_F32);
  7189. if (dst->type == GGML_TYPE_F32) {
  7190. GGML_ASSERT( nb0 == sizeof(float));
  7191. }
  7192. else {
  7193. GGML_ASSERT(dst->type == GGML_TYPE_F16);
  7194. GGML_ASSERT( nb0 == sizeof(ggml_fp16_t));
  7195. }
  7196. GGML_ASSERT(nb00 == sizeof(ggml_fp16_t));
  7197. // rows per thread
  7198. const int dr = (nr + nth - 1)/nth;
  7199. // row range for this thread
  7200. const int ir0 = dr*ith;
  7201. const int ir1 = MIN(ir0 + dr, nr);
  7202. if (nb10 == sizeof(float)) {
  7203. if (dst->type == GGML_TYPE_F16) {
  7204. for (int ir = ir0; ir < ir1; ++ir) {
  7205. // src0, src1 and dst are same shape => same indices
  7206. const int i3 = ir/(ne2*ne1);
  7207. const int i2 = (ir - i3*ne2*ne1)/ne1;
  7208. const int i1 = (ir - i3*ne2*ne1 - i2*ne1);
  7209. ggml_fp16_t * dst_ptr = (ggml_fp16_t *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1);
  7210. ggml_fp16_t * src0_ptr = (ggml_fp16_t *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01);
  7211. float * src1_ptr = (float *) ((char *) src1->data + i3*nb13 + i2*nb12 + i1*nb11);
  7212. for (int i = 0; i < ne0; i++) {
  7213. dst_ptr[i] = GGML_FP32_TO_FP16(GGML_FP16_TO_FP32(src0_ptr[i]) + src1_ptr[i]);
  7214. }
  7215. }
  7216. } else {
  7217. for (int ir = ir0; ir < ir1; ++ir) {
  7218. // src0, src1 and dst are same shape => same indices
  7219. const int i3 = ir/(ne2*ne1);
  7220. const int i2 = (ir - i3*ne2*ne1)/ne1;
  7221. const int i1 = (ir - i3*ne2*ne1 - i2*ne1);
  7222. float * dst_ptr = (float *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1);
  7223. ggml_fp16_t * src0_ptr = (ggml_fp16_t *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01);
  7224. float * src1_ptr = (float *) ((char *) src1->data + i3*nb13 + i2*nb12 + i1*nb11);
  7225. for (int i = 0; i < ne0; i++) {
  7226. dst_ptr[i] = GGML_FP16_TO_FP32(src0_ptr[i]) + src1_ptr[i];
  7227. }
  7228. }
  7229. }
  7230. }
  7231. else {
  7232. // src1 is not contiguous
  7233. GGML_ASSERT(false);
  7234. }
  7235. }
  7236. static void ggml_compute_forward_add_bf16_f32(
  7237. const struct ggml_compute_params * params,
  7238. struct ggml_tensor * dst) {
  7239. const struct ggml_tensor * src0 = dst->src[0];
  7240. const struct ggml_tensor * src1 = dst->src[1];
  7241. GGML_ASSERT(ggml_are_same_shape(src0, src1) && ggml_are_same_shape(src0, dst));
  7242. if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
  7243. return;
  7244. }
  7245. const int ith = params->ith;
  7246. const int nth = params->nth;
  7247. const int nr = ggml_nrows(src0);
  7248. GGML_TENSOR_BINARY_OP_LOCALS
  7249. GGML_ASSERT(src0->type == GGML_TYPE_BF16);
  7250. GGML_ASSERT(src1->type == GGML_TYPE_F32);
  7251. if (dst->type == GGML_TYPE_F32) {
  7252. GGML_ASSERT( nb0 == sizeof(float));
  7253. }
  7254. else {
  7255. GGML_ASSERT(dst->type == GGML_TYPE_BF16);
  7256. GGML_ASSERT( nb0 == sizeof(ggml_bf16_t));
  7257. }
  7258. GGML_ASSERT(nb00 == sizeof(ggml_bf16_t));
  7259. // rows per thread
  7260. const int dr = (nr + nth - 1)/nth;
  7261. // row range for this thread
  7262. const int ir0 = dr*ith;
  7263. const int ir1 = MIN(ir0 + dr, nr);
  7264. if (nb10 == sizeof(float)) {
  7265. if (dst->type == GGML_TYPE_BF16) {
  7266. for (int ir = ir0; ir < ir1; ++ir) {
  7267. // src0, src1 and dst are same shape => same indices
  7268. const int i3 = ir/(ne2*ne1);
  7269. const int i2 = (ir - i3*ne2*ne1)/ne1;
  7270. const int i1 = (ir - i3*ne2*ne1 - i2*ne1);
  7271. ggml_bf16_t * dst_ptr = (ggml_bf16_t *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1);
  7272. ggml_bf16_t * src0_ptr = (ggml_bf16_t *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01);
  7273. float * src1_ptr = (float *) ((char *) src1->data + i3*nb13 + i2*nb12 + i1*nb11);
  7274. for (int i = 0; i < ne0; i++) {
  7275. dst_ptr[i] = GGML_FP32_TO_BF16(GGML_BF16_TO_FP32(src0_ptr[i]) + src1_ptr[i]);
  7276. }
  7277. }
  7278. } else {
  7279. for (int ir = ir0; ir < ir1; ++ir) {
  7280. // src0, src1 and dst are same shape => same indices
  7281. const int i3 = ir/(ne2*ne1);
  7282. const int i2 = (ir - i3*ne2*ne1)/ne1;
  7283. const int i1 = (ir - i3*ne2*ne1 - i2*ne1);
  7284. float * dst_ptr = (float *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1);
  7285. ggml_bf16_t * src0_ptr = (ggml_bf16_t *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01);
  7286. float * src1_ptr = (float *) ((char *) src1->data + i3*nb13 + i2*nb12 + i1*nb11);
  7287. for (int i = 0; i < ne0; i++) {
  7288. dst_ptr[i] = GGML_BF16_TO_FP32(src0_ptr[i]) + src1_ptr[i];
  7289. }
  7290. }
  7291. }
  7292. }
  7293. else {
  7294. // src1 is not contiguous
  7295. GGML_ASSERT(false);
  7296. }
  7297. }
  7298. static void ggml_compute_forward_add_f16_f16(
  7299. const struct ggml_compute_params * params,
  7300. struct ggml_tensor * dst) {
  7301. const struct ggml_tensor * src0 = dst->src[0];
  7302. const struct ggml_tensor * src1 = dst->src[1];
  7303. GGML_ASSERT(ggml_are_same_shape(src0, src1) && ggml_are_same_shape(src0, dst));
  7304. if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
  7305. return;
  7306. }
  7307. const int ith = params->ith;
  7308. const int nth = params->nth;
  7309. const int nr = ggml_nrows(src0);
  7310. GGML_TENSOR_BINARY_OP_LOCALS
  7311. GGML_ASSERT(src0->type == GGML_TYPE_F16);
  7312. GGML_ASSERT(src1->type == GGML_TYPE_F16);
  7313. GGML_ASSERT(dst->type == GGML_TYPE_F16);
  7314. GGML_ASSERT( nb0 == sizeof(ggml_fp16_t));
  7315. GGML_ASSERT(nb00 == sizeof(ggml_fp16_t));
  7316. // rows per thread
  7317. const int dr = (nr + nth - 1)/nth;
  7318. // row range for this thread
  7319. const int ir0 = dr*ith;
  7320. const int ir1 = MIN(ir0 + dr, nr);
  7321. if (nb10 == sizeof(ggml_fp16_t)) {
  7322. for (int ir = ir0; ir < ir1; ++ir) {
  7323. // src0, src1 and dst are same shape => same indices
  7324. const int i3 = ir/(ne2*ne1);
  7325. const int i2 = (ir - i3*ne2*ne1)/ne1;
  7326. const int i1 = (ir - i3*ne2*ne1 - i2*ne1);
  7327. ggml_fp16_t * dst_ptr = (ggml_fp16_t *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1);
  7328. ggml_fp16_t * src0_ptr = (ggml_fp16_t *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01);
  7329. ggml_fp16_t * src1_ptr = (ggml_fp16_t *) ((char *) src1->data + i3*nb13 + i2*nb12 + i1*nb11);
  7330. for (int i = 0; i < ne0; i++) {
  7331. dst_ptr[i] = GGML_FP32_TO_FP16(GGML_FP16_TO_FP32(src0_ptr[i]) + GGML_FP16_TO_FP32(src1_ptr[i]));
  7332. }
  7333. }
  7334. }
  7335. else {
  7336. // src1 is not contiguous
  7337. GGML_ASSERT(false);
  7338. }
  7339. }
  7340. static void ggml_compute_forward_add_bf16_bf16(
  7341. const struct ggml_compute_params * params,
  7342. struct ggml_tensor * dst) {
  7343. const struct ggml_tensor * src0 = dst->src[0];
  7344. const struct ggml_tensor * src1 = dst->src[1];
  7345. GGML_ASSERT(ggml_are_same_shape(src0, src1) && ggml_are_same_shape(src0, dst));
  7346. if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
  7347. return;
  7348. }
  7349. const int ith = params->ith;
  7350. const int nth = params->nth;
  7351. const int nr = ggml_nrows(src0);
  7352. GGML_TENSOR_BINARY_OP_LOCALS
  7353. GGML_ASSERT(src0->type == GGML_TYPE_BF16);
  7354. GGML_ASSERT(src1->type == GGML_TYPE_BF16);
  7355. GGML_ASSERT(dst->type == GGML_TYPE_BF16);
  7356. GGML_ASSERT( nb0 == sizeof(ggml_bf16_t));
  7357. GGML_ASSERT(nb00 == sizeof(ggml_bf16_t));
  7358. // rows per thread
  7359. const int dr = (nr + nth - 1)/nth;
  7360. // row range for this thread
  7361. const int ir0 = dr*ith;
  7362. const int ir1 = MIN(ir0 + dr, nr);
  7363. if (nb10 == sizeof(ggml_bf16_t)) {
  7364. for (int ir = ir0; ir < ir1; ++ir) {
  7365. // src0, src1 and dst are same shape => same indices
  7366. const int i3 = ir/(ne2*ne1);
  7367. const int i2 = (ir - i3*ne2*ne1)/ne1;
  7368. const int i1 = (ir - i3*ne2*ne1 - i2*ne1);
  7369. ggml_bf16_t * dst_ptr = (ggml_bf16_t *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1);
  7370. ggml_bf16_t * src0_ptr = (ggml_bf16_t *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01);
  7371. ggml_bf16_t * src1_ptr = (ggml_bf16_t *) ((char *) src1->data + i3*nb13 + i2*nb12 + i1*nb11);
  7372. for (int i = 0; i < ne0; i++) {
  7373. dst_ptr[i] = GGML_FP32_TO_BF16(GGML_BF16_TO_FP32(src0_ptr[i]) + GGML_BF16_TO_FP32(src1_ptr[i]));
  7374. }
  7375. }
  7376. }
  7377. else {
  7378. // src1 is not contiguous
  7379. GGML_ASSERT(false);
  7380. }
  7381. }
  7382. static void ggml_compute_forward_add_q_f32(
  7383. const struct ggml_compute_params * params,
  7384. struct ggml_tensor * dst) {
  7385. const struct ggml_tensor * src0 = dst->src[0];
  7386. const struct ggml_tensor * src1 = dst->src[1];
  7387. GGML_ASSERT(ggml_are_same_shape(src0, src1) && ggml_are_same_shape(src0, dst));
  7388. if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
  7389. return;
  7390. }
  7391. const int nr = ggml_nrows(src0);
  7392. GGML_TENSOR_BINARY_OP_LOCALS
  7393. const int ith = params->ith;
  7394. const int nth = params->nth;
  7395. const enum ggml_type type = src0->type;
  7396. const enum ggml_type dtype = dst->type;
  7397. ggml_to_float_t const dequantize_row_q = type_traits[type].to_float;
  7398. ggml_from_float_t const quantize_row_q = type_traits[dtype].from_float;
  7399. // we don't support permuted src0 or src1
  7400. GGML_ASSERT(nb00 == ggml_type_size(type));
  7401. GGML_ASSERT(nb10 == sizeof(float));
  7402. // dst cannot be transposed or permuted
  7403. GGML_ASSERT(nb0 <= nb1);
  7404. GGML_ASSERT(nb1 <= nb2);
  7405. GGML_ASSERT(nb2 <= nb3);
  7406. GGML_ASSERT(ggml_is_quantized(src0->type));
  7407. GGML_ASSERT(src1->type == GGML_TYPE_F32);
  7408. // rows per thread
  7409. const int dr = (nr + nth - 1)/nth;
  7410. // row range for this thread
  7411. const int ir0 = dr*ith;
  7412. const int ir1 = MIN(ir0 + dr, nr);
  7413. float * wdata = (float *) params->wdata + (ne00 + CACHE_LINE_SIZE_F32) * ith;
  7414. for (int ir = ir0; ir < ir1; ++ir) {
  7415. // src0 indices
  7416. const int i03 = ir/(ne02*ne01);
  7417. const int i02 = (ir - i03*ne02*ne01)/ne01;
  7418. const int i01 = (ir - i03*ne02*ne01 - i02*ne01);
  7419. // src1 and dst are same shape as src0 => same indices
  7420. const int i13 = i03;
  7421. const int i12 = i02;
  7422. const int i11 = i01;
  7423. const int i3 = i03;
  7424. const int i2 = i02;
  7425. const int i1 = i01;
  7426. void * src0_row = (void *) ((char *) src0->data + (i01*nb01 + i02*nb02 + i03*nb03));
  7427. float * src1_row = (float *)((char *) src1->data + (i11*nb11 + i12*nb12 + i13*nb13));
  7428. void * dst_row = (void *) ((char *) dst->data + ( i1*nb1 + i2*nb2 + i3*nb3));
  7429. assert(ne00 % 32 == 0);
  7430. // unquantize row from src0 to temp buffer
  7431. dequantize_row_q(src0_row, wdata, ne00);
  7432. // add src1
  7433. ggml_vec_acc_f32(ne00, wdata, src1_row);
  7434. // quantize row to dst
  7435. if (quantize_row_q != NULL) {
  7436. quantize_row_q(wdata, dst_row, ne00);
  7437. } else {
  7438. memcpy(dst_row, wdata, ne0*nb0);
  7439. }
  7440. }
  7441. }
  7442. static void ggml_compute_forward_add(
  7443. const struct ggml_compute_params * params,
  7444. struct ggml_tensor * dst) {
  7445. const struct ggml_tensor * src0 = dst->src[0];
  7446. const struct ggml_tensor * src1 = dst->src[1];
  7447. switch (src0->type) {
  7448. case GGML_TYPE_F32:
  7449. {
  7450. if (src1->type == GGML_TYPE_F32) {
  7451. ggml_compute_forward_add_f32(params, dst);
  7452. }
  7453. else {
  7454. GGML_ASSERT(false);
  7455. }
  7456. } break;
  7457. case GGML_TYPE_F16:
  7458. {
  7459. if (src1->type == GGML_TYPE_F16) {
  7460. ggml_compute_forward_add_f16_f16(params, dst);
  7461. }
  7462. else if (src1->type == GGML_TYPE_F32) {
  7463. ggml_compute_forward_add_f16_f32(params, dst);
  7464. }
  7465. else {
  7466. GGML_ASSERT(false);
  7467. }
  7468. } break;
  7469. case GGML_TYPE_BF16:
  7470. {
  7471. if (src1->type == GGML_TYPE_BF16) {
  7472. ggml_compute_forward_add_bf16_bf16(params, dst);
  7473. }
  7474. else if (src1->type == GGML_TYPE_F32) {
  7475. ggml_compute_forward_add_bf16_f32(params, dst);
  7476. }
  7477. else {
  7478. GGML_ASSERT(false);
  7479. }
  7480. } break;
  7481. case GGML_TYPE_Q4_0:
  7482. case GGML_TYPE_Q4_1:
  7483. case GGML_TYPE_Q5_0:
  7484. case GGML_TYPE_Q5_1:
  7485. case GGML_TYPE_Q8_0:
  7486. case GGML_TYPE_Q2_K:
  7487. case GGML_TYPE_Q3_K:
  7488. case GGML_TYPE_Q4_K:
  7489. case GGML_TYPE_Q5_K:
  7490. case GGML_TYPE_Q6_K:
  7491. case GGML_TYPE_IQ2_XXS:
  7492. case GGML_TYPE_IQ2_XS:
  7493. case GGML_TYPE_IQ3_XXS:
  7494. case GGML_TYPE_IQ1_S:
  7495. case GGML_TYPE_IQ1_M:
  7496. case GGML_TYPE_IQ4_NL:
  7497. case GGML_TYPE_IQ4_XS:
  7498. case GGML_TYPE_IQ3_S:
  7499. case GGML_TYPE_IQ2_S:
  7500. {
  7501. ggml_compute_forward_add_q_f32(params, dst);
  7502. } break;
  7503. default:
  7504. {
  7505. GGML_ASSERT(false);
  7506. } break;
  7507. }
  7508. }
  7509. // ggml_compute_forward_add1
  7510. static void ggml_compute_forward_add1_f32(
  7511. const struct ggml_compute_params * params,
  7512. struct ggml_tensor * dst) {
  7513. const struct ggml_tensor * src0 = dst->src[0];
  7514. const struct ggml_tensor * src1 = dst->src[1];
  7515. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  7516. GGML_ASSERT(ggml_is_scalar(src1));
  7517. if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
  7518. return;
  7519. }
  7520. const int ith = params->ith;
  7521. const int nth = params->nth;
  7522. const int nr = ggml_nrows(src0);
  7523. GGML_TENSOR_UNARY_OP_LOCALS
  7524. GGML_ASSERT( nb0 == sizeof(float));
  7525. GGML_ASSERT(nb00 == sizeof(float));
  7526. // rows per thread
  7527. const int dr = (nr + nth - 1)/nth;
  7528. // row range for this thread
  7529. const int ir0 = dr*ith;
  7530. const int ir1 = MIN(ir0 + dr, nr);
  7531. for (int ir = ir0; ir < ir1; ++ir) {
  7532. // src0 and dst are same shape => same indices
  7533. const int i3 = ir/(ne2*ne1);
  7534. const int i2 = (ir - i3*ne2*ne1)/ne1;
  7535. const int i1 = (ir - i3*ne2*ne1 - i2*ne1);
  7536. #ifdef GGML_USE_ACCELERATE
  7537. UNUSED(ggml_vec_add1_f32);
  7538. vDSP_vadd(
  7539. (float *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01), 1,
  7540. (float *) ((char *) src1->data), 0,
  7541. (float *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 ), 1,
  7542. ne0);
  7543. #else
  7544. ggml_vec_add1_f32(ne0,
  7545. (float *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 ),
  7546. (float *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01),
  7547. *(float *) src1->data);
  7548. #endif
  7549. }
  7550. }
  7551. static void ggml_compute_forward_add1_f16_f32(
  7552. const struct ggml_compute_params * params,
  7553. struct ggml_tensor * dst) {
  7554. const struct ggml_tensor * src0 = dst->src[0];
  7555. const struct ggml_tensor * src1 = dst->src[1];
  7556. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  7557. GGML_ASSERT(ggml_is_scalar(src1));
  7558. if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
  7559. return;
  7560. }
  7561. // scalar to add
  7562. const float v = *(float *) src1->data;
  7563. const int ith = params->ith;
  7564. const int nth = params->nth;
  7565. const int nr = ggml_nrows(src0);
  7566. GGML_TENSOR_UNARY_OP_LOCALS
  7567. GGML_ASSERT(src0->type == GGML_TYPE_F16);
  7568. GGML_ASSERT(src1->type == GGML_TYPE_F32);
  7569. GGML_ASSERT(dst->type == GGML_TYPE_F16);
  7570. GGML_ASSERT( nb0 == sizeof(ggml_fp16_t));
  7571. GGML_ASSERT(nb00 == sizeof(ggml_fp16_t));
  7572. // rows per thread
  7573. const int dr = (nr + nth - 1)/nth;
  7574. // row range for this thread
  7575. const int ir0 = dr*ith;
  7576. const int ir1 = MIN(ir0 + dr, nr);
  7577. for (int ir = ir0; ir < ir1; ++ir) {
  7578. // src0 and dst are same shape => same indices
  7579. const int i3 = ir/(ne2*ne1);
  7580. const int i2 = (ir - i3*ne2*ne1)/ne1;
  7581. const int i1 = (ir - i3*ne2*ne1 - i2*ne1);
  7582. ggml_fp16_t * dst_ptr = (ggml_fp16_t *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 );
  7583. ggml_fp16_t * src0_ptr = (ggml_fp16_t *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01);
  7584. for (int i = 0; i < ne0; i++) {
  7585. dst_ptr[i] = GGML_FP32_TO_FP16(GGML_FP16_TO_FP32(src0_ptr[i]) + v);
  7586. }
  7587. }
  7588. }
  7589. static void ggml_compute_forward_add1_f16_f16(
  7590. const struct ggml_compute_params * params,
  7591. struct ggml_tensor * dst) {
  7592. const struct ggml_tensor * src0 = dst->src[0];
  7593. const struct ggml_tensor * src1 = dst->src[1];
  7594. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  7595. GGML_ASSERT(ggml_is_scalar(src1));
  7596. if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
  7597. return;
  7598. }
  7599. // scalar to add
  7600. const float v = GGML_FP16_TO_FP32(*(ggml_fp16_t *) src1->data);
  7601. const int ith = params->ith;
  7602. const int nth = params->nth;
  7603. const int nr = ggml_nrows(src0);
  7604. GGML_TENSOR_UNARY_OP_LOCALS
  7605. GGML_ASSERT(src0->type == GGML_TYPE_F16);
  7606. GGML_ASSERT(src1->type == GGML_TYPE_F16);
  7607. GGML_ASSERT(dst->type == GGML_TYPE_F16);
  7608. GGML_ASSERT( nb0 == sizeof(ggml_fp16_t));
  7609. GGML_ASSERT(nb00 == sizeof(ggml_fp16_t));
  7610. // rows per thread
  7611. const int dr = (nr + nth - 1)/nth;
  7612. // row range for this thread
  7613. const int ir0 = dr*ith;
  7614. const int ir1 = MIN(ir0 + dr, nr);
  7615. for (int ir = ir0; ir < ir1; ++ir) {
  7616. // src0 and dst are same shape => same indices
  7617. const int i3 = ir/(ne2*ne1);
  7618. const int i2 = (ir - i3*ne2*ne1)/ne1;
  7619. const int i1 = (ir - i3*ne2*ne1 - i2*ne1);
  7620. ggml_fp16_t * dst_ptr = (ggml_fp16_t *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 );
  7621. ggml_fp16_t * src0_ptr = (ggml_fp16_t *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01);
  7622. for (int i = 0; i < ne0; i++) {
  7623. dst_ptr[i] = GGML_FP32_TO_FP16(GGML_FP16_TO_FP32(src0_ptr[i]) + v);
  7624. }
  7625. }
  7626. }
  7627. static void ggml_compute_forward_add1_q_f32(
  7628. const struct ggml_compute_params * params,
  7629. struct ggml_tensor * dst) {
  7630. const struct ggml_tensor * src0 = dst->src[0];
  7631. const struct ggml_tensor * src1 = dst->src[1];
  7632. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  7633. GGML_ASSERT(ggml_is_scalar(src1));
  7634. if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
  7635. return;
  7636. }
  7637. // scalar to add
  7638. const float v = *(float *) src1->data;
  7639. const int ith = params->ith;
  7640. const int nth = params->nth;
  7641. const int nr = ggml_nrows(src0);
  7642. GGML_TENSOR_UNARY_OP_LOCALS
  7643. const enum ggml_type type = src0->type;
  7644. ggml_to_float_t const dequantize_row_q = type_traits[type].to_float;
  7645. ggml_from_float_t const quantize_row_q = type_traits[type].from_float;
  7646. // we don't support permuted src0
  7647. GGML_ASSERT(nb00 == ggml_type_size(type));
  7648. // dst cannot be transposed or permuted
  7649. GGML_ASSERT(nb0 <= nb1);
  7650. GGML_ASSERT(nb1 <= nb2);
  7651. GGML_ASSERT(nb2 <= nb3);
  7652. GGML_ASSERT(ggml_is_quantized(src0->type));
  7653. GGML_ASSERT(dst->type == src0->type);
  7654. GGML_ASSERT(src1->type == GGML_TYPE_F32);
  7655. // rows per thread
  7656. const int dr = (nr + nth - 1)/nth;
  7657. // row range for this thread
  7658. const int ir0 = dr*ith;
  7659. const int ir1 = MIN(ir0 + dr, nr);
  7660. float * wdata = (float *) params->wdata + (ne0 + CACHE_LINE_SIZE_F32) * ith;
  7661. for (int ir = ir0; ir < ir1; ++ir) {
  7662. // src0 and dst are same shape => same indices
  7663. const int i3 = ir/(ne2*ne1);
  7664. const int i2 = (ir - i3*ne2*ne1)/ne1;
  7665. const int i1 = (ir - i3*ne2*ne1 - i2*ne1);
  7666. void * src0_row = (void *) ((char *) src0->data + (i1*nb01 + i2*nb02 + i3*nb03));
  7667. void * dst_row = (void *) ((char *) dst->data + (i1*nb1 + i2*nb2 + i3*nb0 ));
  7668. assert(ne0 % 32 == 0);
  7669. // unquantize row from src0 to temp buffer
  7670. dequantize_row_q(src0_row, wdata, ne0);
  7671. // add src1
  7672. ggml_vec_acc1_f32(ne0, wdata, v);
  7673. // quantize row to dst
  7674. quantize_row_q(wdata, dst_row, ne0);
  7675. }
  7676. }
  7677. static void ggml_compute_forward_add1_bf16_f32(
  7678. const struct ggml_compute_params * params,
  7679. struct ggml_tensor * dst) {
  7680. const struct ggml_tensor * src0 = dst->src[0];
  7681. const struct ggml_tensor * src1 = dst->src[1];
  7682. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  7683. GGML_ASSERT(ggml_is_scalar(src1));
  7684. if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
  7685. return;
  7686. }
  7687. // scalar to add
  7688. const float v = *(float *) src1->data;
  7689. const int ith = params->ith;
  7690. const int nth = params->nth;
  7691. const int nr = ggml_nrows(src0);
  7692. GGML_TENSOR_UNARY_OP_LOCALS
  7693. GGML_ASSERT(src0->type == GGML_TYPE_BF16);
  7694. GGML_ASSERT(src1->type == GGML_TYPE_F32);
  7695. GGML_ASSERT(dst->type == GGML_TYPE_BF16);
  7696. GGML_ASSERT( nb0 == sizeof(ggml_bf16_t));
  7697. GGML_ASSERT(nb00 == sizeof(ggml_bf16_t));
  7698. // rows per thread
  7699. const int dr = (nr + nth - 1)/nth;
  7700. // row range for this thread
  7701. const int ir0 = dr*ith;
  7702. const int ir1 = MIN(ir0 + dr, nr);
  7703. for (int ir = ir0; ir < ir1; ++ir) {
  7704. // src0 and dst are same shape => same indices
  7705. const int i3 = ir/(ne2*ne1);
  7706. const int i2 = (ir - i3*ne2*ne1)/ne1;
  7707. const int i1 = (ir - i3*ne2*ne1 - i2*ne1);
  7708. ggml_bf16_t * dst_ptr = (ggml_bf16_t *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 );
  7709. ggml_bf16_t * src0_ptr = (ggml_bf16_t *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01);
  7710. for (int i = 0; i < ne0; i++) {
  7711. dst_ptr[i] = GGML_FP32_TO_BF16(GGML_BF16_TO_FP32(src0_ptr[i]) + v);
  7712. }
  7713. }
  7714. }
  7715. static void ggml_compute_forward_add1_bf16_bf16(
  7716. const struct ggml_compute_params * params,
  7717. struct ggml_tensor * dst) {
  7718. const struct ggml_tensor * src0 = dst->src[0];
  7719. const struct ggml_tensor * src1 = dst->src[1];
  7720. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  7721. GGML_ASSERT(ggml_is_scalar(src1));
  7722. if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
  7723. return;
  7724. }
  7725. // scalar to add
  7726. const float v = GGML_BF16_TO_FP32(*(ggml_bf16_t *) src1->data);
  7727. const int ith = params->ith;
  7728. const int nth = params->nth;
  7729. const int nr = ggml_nrows(src0);
  7730. GGML_TENSOR_UNARY_OP_LOCALS
  7731. GGML_ASSERT(src0->type == GGML_TYPE_BF16);
  7732. GGML_ASSERT(src1->type == GGML_TYPE_BF16);
  7733. GGML_ASSERT(dst->type == GGML_TYPE_BF16);
  7734. GGML_ASSERT( nb0 == sizeof(ggml_bf16_t));
  7735. GGML_ASSERT(nb00 == sizeof(ggml_bf16_t));
  7736. // rows per thread
  7737. const int dr = (nr + nth - 1)/nth;
  7738. // row range for this thread
  7739. const int ir0 = dr*ith;
  7740. const int ir1 = MIN(ir0 + dr, nr);
  7741. for (int ir = ir0; ir < ir1; ++ir) {
  7742. // src0 and dst are same shape => same indices
  7743. const int i3 = ir/(ne2*ne1);
  7744. const int i2 = (ir - i3*ne2*ne1)/ne1;
  7745. const int i1 = (ir - i3*ne2*ne1 - i2*ne1);
  7746. ggml_bf16_t * dst_ptr = (ggml_bf16_t *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 );
  7747. ggml_bf16_t * src0_ptr = (ggml_bf16_t *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01);
  7748. for (int i = 0; i < ne0; i++) {
  7749. dst_ptr[i] = GGML_FP32_TO_BF16(GGML_BF16_TO_FP32(src0_ptr[i]) + v);
  7750. }
  7751. }
  7752. }
  7753. static void ggml_compute_forward_add1(
  7754. const struct ggml_compute_params * params,
  7755. struct ggml_tensor * dst) {
  7756. const struct ggml_tensor * src0 = dst->src[0];
  7757. const struct ggml_tensor * src1 = dst->src[1];
  7758. switch (src0->type) {
  7759. case GGML_TYPE_F32:
  7760. {
  7761. ggml_compute_forward_add1_f32(params, dst);
  7762. } break;
  7763. case GGML_TYPE_F16:
  7764. {
  7765. if (src1->type == GGML_TYPE_F16) {
  7766. ggml_compute_forward_add1_f16_f16(params, dst);
  7767. }
  7768. else if (src1->type == GGML_TYPE_F32) {
  7769. ggml_compute_forward_add1_f16_f32(params, dst);
  7770. }
  7771. else {
  7772. GGML_ASSERT(false);
  7773. }
  7774. } break;
  7775. case GGML_TYPE_BF16:
  7776. {
  7777. if (src1->type == GGML_TYPE_BF16) {
  7778. ggml_compute_forward_add1_bf16_bf16(params, dst);
  7779. }
  7780. else if (src1->type == GGML_TYPE_F32) {
  7781. ggml_compute_forward_add1_bf16_f32(params, dst);
  7782. }
  7783. else {
  7784. GGML_ASSERT(false);
  7785. }
  7786. } break;
  7787. case GGML_TYPE_Q4_0:
  7788. case GGML_TYPE_Q4_1:
  7789. case GGML_TYPE_Q5_0:
  7790. case GGML_TYPE_Q5_1:
  7791. case GGML_TYPE_Q8_0:
  7792. case GGML_TYPE_Q8_1:
  7793. case GGML_TYPE_Q2_K:
  7794. case GGML_TYPE_Q3_K:
  7795. case GGML_TYPE_Q4_K:
  7796. case GGML_TYPE_Q5_K:
  7797. case GGML_TYPE_Q6_K:
  7798. case GGML_TYPE_IQ2_XXS:
  7799. case GGML_TYPE_IQ2_XS:
  7800. case GGML_TYPE_IQ3_XXS:
  7801. case GGML_TYPE_IQ1_S:
  7802. case GGML_TYPE_IQ1_M:
  7803. case GGML_TYPE_IQ4_NL:
  7804. case GGML_TYPE_IQ4_XS:
  7805. case GGML_TYPE_IQ3_S:
  7806. case GGML_TYPE_IQ2_S:
  7807. {
  7808. ggml_compute_forward_add1_q_f32(params, dst);
  7809. } break;
  7810. default:
  7811. {
  7812. GGML_ASSERT(false);
  7813. } break;
  7814. }
  7815. }
  7816. // ggml_compute_forward_acc
  7817. static void ggml_compute_forward_acc_f32(
  7818. const struct ggml_compute_params * params,
  7819. struct ggml_tensor * dst) {
  7820. const struct ggml_tensor * src0 = dst->src[0];
  7821. const struct ggml_tensor * src1 = dst->src[1];
  7822. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  7823. GGML_ASSERT(ggml_is_contiguous(dst) && ggml_is_contiguous(src0));
  7824. // view src0 and dst with these strides and data offset inbytes during acc
  7825. // nb0 is implicitly element_size because src0 and dst are contiguous
  7826. size_t nb1 = ((int32_t *) dst->op_params)[0];
  7827. size_t nb2 = ((int32_t *) dst->op_params)[1];
  7828. size_t nb3 = ((int32_t *) dst->op_params)[2];
  7829. size_t offset = ((int32_t *) dst->op_params)[3];
  7830. bool inplace = (bool) ((int32_t *) dst->op_params)[4];
  7831. if (!inplace && (params->type == GGML_TASK_TYPE_INIT)) {
  7832. if (params->ith != 0) {
  7833. return;
  7834. }
  7835. // memcpy needs to be synchronized across threads to avoid race conditions.
  7836. // => do it in INIT phase
  7837. memcpy(
  7838. ((char *) dst->data),
  7839. ((char *) src0->data),
  7840. ggml_nbytes(dst));
  7841. }
  7842. if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
  7843. return;
  7844. }
  7845. const int ith = params->ith;
  7846. const int nth = params->nth;
  7847. const int nr = ggml_nrows(src1);
  7848. const int nc = src1->ne[0];
  7849. GGML_TENSOR_LOCALS(int64_t, ne1, src1, ne)
  7850. GGML_TENSOR_LOCALS(size_t, nb1, src1, nb)
  7851. // src0 and dst as viewed during acc
  7852. const size_t nb0 = ggml_element_size(src0);
  7853. const size_t nb00 = nb0;
  7854. const size_t nb01 = nb1;
  7855. const size_t nb02 = nb2;
  7856. const size_t nb03 = nb3;
  7857. GGML_ASSERT(offset + (ne10 == 0 ? 0 : ne10-1)*nb0 + (ne11 == 0 ? 0 : ne11-1)*nb1 + (ne12 == 0 ? 0 : ne12-1)*nb2 + (ne13 == 0 ? 0 : ne13-1)*nb3 < ggml_nbytes(dst));
  7858. GGML_ASSERT(offset + (ne10 == 0 ? 0 : ne10-1)*nb00 + (ne11 == 0 ? 0 : ne11-1)*nb01 + (ne12 == 0 ? 0 : ne12-1)*nb02 + (ne13 == 0 ? 0 : ne13-1)*nb03 < ggml_nbytes(src0));
  7859. GGML_ASSERT(nb10 == sizeof(float));
  7860. // rows per thread
  7861. const int dr = (nr + nth - 1)/nth;
  7862. // row range for this thread
  7863. const int ir0 = dr*ith;
  7864. const int ir1 = MIN(ir0 + dr, nr);
  7865. for (int ir = ir0; ir < ir1; ++ir) {
  7866. // src0 and dst are viewed with shape of src1 and offset
  7867. // => same indices
  7868. const int i3 = ir/(ne12*ne11);
  7869. const int i2 = (ir - i3*ne12*ne11)/ne11;
  7870. const int i1 = (ir - i3*ne12*ne11 - i2*ne11);
  7871. #ifdef GGML_USE_ACCELERATE
  7872. vDSP_vadd(
  7873. (float *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + offset), 1,
  7874. (float *) ((char *) src1->data + i3*nb13 + i2*nb12 + i1*nb11), 1,
  7875. (float *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + offset), 1, nc);
  7876. #else
  7877. ggml_vec_add_f32(nc,
  7878. (float *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + offset),
  7879. (float *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + offset),
  7880. (float *) ((char *) src1->data + i3*nb13 + i2*nb12 + i1*nb11));
  7881. #endif
  7882. }
  7883. }
  7884. static void ggml_compute_forward_acc(
  7885. const struct ggml_compute_params * params,
  7886. struct ggml_tensor * dst) {
  7887. const struct ggml_tensor * src0 = dst->src[0];
  7888. switch (src0->type) {
  7889. case GGML_TYPE_F32:
  7890. {
  7891. ggml_compute_forward_acc_f32(params, dst);
  7892. } break;
  7893. case GGML_TYPE_F16:
  7894. case GGML_TYPE_BF16:
  7895. case GGML_TYPE_Q4_0:
  7896. case GGML_TYPE_Q4_1:
  7897. case GGML_TYPE_Q5_0:
  7898. case GGML_TYPE_Q5_1:
  7899. case GGML_TYPE_Q8_0:
  7900. case GGML_TYPE_Q8_1:
  7901. case GGML_TYPE_Q2_K:
  7902. case GGML_TYPE_Q3_K:
  7903. case GGML_TYPE_Q4_K:
  7904. case GGML_TYPE_Q5_K:
  7905. case GGML_TYPE_Q6_K:
  7906. case GGML_TYPE_IQ2_XXS:
  7907. case GGML_TYPE_IQ2_XS:
  7908. case GGML_TYPE_IQ3_XXS:
  7909. case GGML_TYPE_IQ1_S:
  7910. case GGML_TYPE_IQ1_M:
  7911. case GGML_TYPE_IQ4_NL:
  7912. case GGML_TYPE_IQ4_XS:
  7913. case GGML_TYPE_IQ3_S:
  7914. case GGML_TYPE_IQ2_S:
  7915. default:
  7916. {
  7917. GGML_ASSERT(false);
  7918. } break;
  7919. }
  7920. }
  7921. // ggml_compute_forward_sub
  7922. static void ggml_compute_forward_sub_f32(
  7923. const struct ggml_compute_params * params,
  7924. struct ggml_tensor * dst) {
  7925. const struct ggml_tensor * src0 = dst->src[0];
  7926. const struct ggml_tensor * src1 = dst->src[1];
  7927. assert(params->ith == 0);
  7928. assert(ggml_are_same_shape(src0, src1) && ggml_are_same_shape(src0, dst));
  7929. if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
  7930. return;
  7931. }
  7932. const int nr = ggml_nrows(src0);
  7933. GGML_TENSOR_BINARY_OP_LOCALS
  7934. GGML_ASSERT( nb0 == sizeof(float));
  7935. GGML_ASSERT(nb00 == sizeof(float));
  7936. if (nb10 == sizeof(float)) {
  7937. for (int ir = 0; ir < nr; ++ir) {
  7938. // src0, src1 and dst are same shape => same indices
  7939. const int i3 = ir/(ne2*ne1);
  7940. const int i2 = (ir - i3*ne2*ne1)/ne1;
  7941. const int i1 = (ir - i3*ne2*ne1 - i2*ne1);
  7942. #ifdef GGML_USE_ACCELERATE
  7943. vDSP_vsub(
  7944. (float *) ((char *) src1->data + i3*nb13 + i2*nb12 + i1*nb11), 1,
  7945. (float *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01), 1,
  7946. (float *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 ), 1,
  7947. ne0);
  7948. #else
  7949. ggml_vec_sub_f32(ne0,
  7950. (float *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 ),
  7951. (float *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01),
  7952. (float *) ((char *) src1->data + i3*nb13 + i2*nb12 + i1*nb11));
  7953. #endif
  7954. // }
  7955. // }
  7956. }
  7957. } else {
  7958. // src1 is not contiguous
  7959. for (int ir = 0; ir < nr; ++ir) {
  7960. // src0, src1 and dst are same shape => same indices
  7961. const int i3 = ir/(ne2*ne1);
  7962. const int i2 = (ir - i3*ne2*ne1)/ne1;
  7963. const int i1 = (ir - i3*ne2*ne1 - i2*ne1);
  7964. float * dst_ptr = (float *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 );
  7965. float * src0_ptr = (float *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01);
  7966. for (int i0 = 0; i0 < ne0; i0++) {
  7967. float * src1_ptr = (float *) ((char *) src1->data + i3*nb13 + i2*nb12 + i1*nb11 + i0*nb10);
  7968. dst_ptr[i0] = src0_ptr[i0] - *src1_ptr;
  7969. }
  7970. }
  7971. }
  7972. }
  7973. static void ggml_compute_forward_sub(
  7974. const struct ggml_compute_params * params,
  7975. struct ggml_tensor * dst) {
  7976. const struct ggml_tensor * src0 = dst->src[0];
  7977. switch (src0->type) {
  7978. case GGML_TYPE_F32:
  7979. {
  7980. ggml_compute_forward_sub_f32(params, dst);
  7981. } break;
  7982. default:
  7983. {
  7984. GGML_ASSERT(false);
  7985. } break;
  7986. }
  7987. }
  7988. // ggml_compute_forward_mul
  7989. static void ggml_compute_forward_mul_f32(
  7990. const struct ggml_compute_params * params,
  7991. struct ggml_tensor * dst) {
  7992. const struct ggml_tensor * src0 = dst->src[0];
  7993. const struct ggml_tensor * src1 = dst->src[1];
  7994. GGML_ASSERT(ggml_can_repeat(src1, src0) && ggml_are_same_shape(src0, dst));
  7995. if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
  7996. return;
  7997. }
  7998. const int ith = params->ith;
  7999. const int nth = params->nth;
  8000. #if defined(GGML_USE_CLBLAST)
  8001. if (src1->backend == GGML_BACKEND_TYPE_GPU) {
  8002. // TODO: OpenCL kernel support full broadcast
  8003. GGML_ASSERT(ggml_can_repeat_rows(src1, src0));
  8004. if (ith == 0) {
  8005. ggml_cl_mul(src0, src1, dst);
  8006. }
  8007. return;
  8008. }
  8009. #endif
  8010. const int64_t nr = ggml_nrows(src0);
  8011. GGML_TENSOR_BINARY_OP_LOCALS
  8012. GGML_ASSERT( nb0 == sizeof(float));
  8013. GGML_ASSERT(nb00 == sizeof(float));
  8014. if (nb10 == sizeof(float)) {
  8015. for (int64_t ir = ith; ir < nr; ir += nth) {
  8016. // src0 and dst are same shape => same indices
  8017. const int64_t i03 = ir/(ne02*ne01);
  8018. const int64_t i02 = (ir - i03*ne02*ne01)/ne01;
  8019. const int64_t i01 = (ir - i03*ne02*ne01 - i02*ne01);
  8020. const int64_t i13 = i03 % ne13;
  8021. const int64_t i12 = i02 % ne12;
  8022. const int64_t i11 = i01 % ne11;
  8023. const int64_t nr0 = ne00 / ne10;
  8024. float * dst_ptr = (float *) ((char *) dst->data + i03*nb3 + i02*nb2 + i01*nb1 );
  8025. float * src0_ptr = (float *) ((char *) src0->data + i03*nb03 + i02*nb02 + i01*nb01);
  8026. float * src1_ptr = (float *) ((char *) src1->data + i13*nb13 + i12*nb12 + i11*nb11);
  8027. for (int64_t r = 0 ; r < nr0; ++r) {
  8028. #ifdef GGML_USE_ACCELERATE
  8029. UNUSED(ggml_vec_mul_f32);
  8030. vDSP_vmul(src0_ptr + r*ne10, 1, src1_ptr, 1, dst_ptr + r*ne10, 1, ne10);
  8031. #else
  8032. ggml_vec_mul_f32(ne10, dst_ptr + r*ne10, src0_ptr + r*ne10, src1_ptr);
  8033. #endif
  8034. }
  8035. }
  8036. } else {
  8037. // src1 is not contiguous
  8038. for (int64_t ir = ith; ir < nr; ir += nth) {
  8039. // src0 and dst are same shape => same indices
  8040. // src1 is broadcastable across src0 and dst in i1, i2, i3
  8041. const int64_t i03 = ir/(ne02*ne01);
  8042. const int64_t i02 = (ir - i03*ne02*ne01)/ne01;
  8043. const int64_t i01 = (ir - i03*ne02*ne01 - i02*ne01);
  8044. const int64_t i13 = i03 % ne13;
  8045. const int64_t i12 = i02 % ne12;
  8046. const int64_t i11 = i01 % ne11;
  8047. float * dst_ptr = (float *) ((char *) dst->data + i03*nb3 + i02*nb2 + i01*nb1 );
  8048. float * src0_ptr = (float *) ((char *) src0->data + i03*nb03 + i02*nb02 + i01*nb01);
  8049. for (int64_t i0 = 0; i0 < ne00; ++i0) {
  8050. const int64_t i10 = i0 % ne10;
  8051. float * src1_ptr = (float *) ((char *) src1->data + i13*nb13 + i12*nb12 + i11*nb11 + i10*nb10);
  8052. dst_ptr[i0] = src0_ptr[i0] * (*src1_ptr);
  8053. }
  8054. }
  8055. }
  8056. }
  8057. static void ggml_compute_forward_mul(
  8058. const struct ggml_compute_params * params,
  8059. struct ggml_tensor * dst) {
  8060. const struct ggml_tensor * src0 = dst->src[0];
  8061. const struct ggml_tensor * src1 = dst->src[1];
  8062. GGML_ASSERT(src1->type == GGML_TYPE_F32 && "only f32 src1 supported for now");
  8063. switch (src0->type) {
  8064. case GGML_TYPE_F32:
  8065. {
  8066. ggml_compute_forward_mul_f32(params, dst);
  8067. } break;
  8068. default:
  8069. {
  8070. GGML_ASSERT(false);
  8071. } break;
  8072. }
  8073. }
  8074. // ggml_compute_forward_div
  8075. static void ggml_compute_forward_div_f32(
  8076. const struct ggml_compute_params * params,
  8077. struct ggml_tensor * dst) {
  8078. const struct ggml_tensor * src0 = dst->src[0];
  8079. const struct ggml_tensor * src1 = dst->src[1];
  8080. GGML_ASSERT(ggml_can_repeat(src1, src0) && ggml_are_same_shape(src0, dst));
  8081. if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
  8082. return;
  8083. }
  8084. const int ith = params->ith;
  8085. const int nth = params->nth;
  8086. const int64_t nr = ggml_nrows(src0);
  8087. GGML_TENSOR_BINARY_OP_LOCALS
  8088. GGML_ASSERT( nb0 == sizeof(float));
  8089. GGML_ASSERT(nb00 == sizeof(float));
  8090. if (nb10 == sizeof(float)) {
  8091. for (int64_t ir = ith; ir < nr; ir += nth) {
  8092. // src0 and dst are same shape => same indices
  8093. const int64_t i03 = ir/(ne02*ne01);
  8094. const int64_t i02 = (ir - i03*ne02*ne01)/ne01;
  8095. const int64_t i01 = (ir - i03*ne02*ne01 - i02*ne01);
  8096. const int64_t i13 = i03 % ne13;
  8097. const int64_t i12 = i02 % ne12;
  8098. const int64_t i11 = i01 % ne11;
  8099. const int64_t nr0 = ne00 / ne10;
  8100. float * dst_ptr = (float *) ((char *) dst->data + i03*nb3 + i02*nb2 + i01*nb1 );
  8101. float * src0_ptr = (float *) ((char *) src0->data + i03*nb03 + i02*nb02 + i01*nb01);
  8102. float * src1_ptr = (float *) ((char *) src1->data + i13*nb13 + i12*nb12 + i11*nb11);
  8103. for (int64_t r = 0; r < nr0; ++r) {
  8104. #ifdef GGML_USE_ACCELERATE
  8105. UNUSED(ggml_vec_div_f32);
  8106. vDSP_vdiv(src1_ptr, 1, src0_ptr + r*ne10, 1, dst_ptr + r*ne10, 1, ne10);
  8107. #else
  8108. ggml_vec_div_f32(ne10, dst_ptr + r*ne10, src0_ptr + r*ne10, src1_ptr);
  8109. #endif
  8110. }
  8111. }
  8112. } else {
  8113. // src1 is not contiguous
  8114. for (int64_t ir = ith; ir < nr; ir += nth) {
  8115. // src0 and dst are same shape => same indices
  8116. // src1 is broadcastable across src0 and dst in i1, i2, i3
  8117. const int64_t i03 = ir/(ne02*ne01);
  8118. const int64_t i02 = (ir - i03*ne02*ne01)/ne01;
  8119. const int64_t i01 = (ir - i03*ne02*ne01 - i02*ne01);
  8120. const int64_t i13 = i03 % ne13;
  8121. const int64_t i12 = i02 % ne12;
  8122. const int64_t i11 = i01 % ne11;
  8123. float * dst_ptr = (float *) ((char *) dst->data + i03*nb3 + i02*nb2 + i01*nb1 );
  8124. float * src0_ptr = (float *) ((char *) src0->data + i03*nb03 + i02*nb02 + i01*nb01);
  8125. for (int64_t i0 = 0; i0 < ne00; ++i0) {
  8126. const int64_t i10 = i0 % ne10;
  8127. float * src1_ptr = (float *) ((char *) src1->data + i13*nb13 + i12*nb12 + i11*nb11 + i10*nb10);
  8128. dst_ptr[i0] = src0_ptr[i0] / (*src1_ptr);
  8129. }
  8130. }
  8131. }
  8132. }
  8133. static void ggml_compute_forward_div(
  8134. const struct ggml_compute_params * params,
  8135. struct ggml_tensor * dst) {
  8136. const struct ggml_tensor * src0 = dst->src[0];
  8137. switch (src0->type) {
  8138. case GGML_TYPE_F32:
  8139. {
  8140. ggml_compute_forward_div_f32(params, dst);
  8141. } break;
  8142. default:
  8143. {
  8144. GGML_ASSERT(false);
  8145. } break;
  8146. }
  8147. }
  8148. // ggml_compute_forward_sqr
  8149. static void ggml_compute_forward_sqr_f32(
  8150. const struct ggml_compute_params * params,
  8151. struct ggml_tensor * dst) {
  8152. const struct ggml_tensor * src0 = dst->src[0];
  8153. assert(params->ith == 0);
  8154. assert(ggml_are_same_shape(src0, dst));
  8155. if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
  8156. return;
  8157. }
  8158. const int n = ggml_nrows(src0);
  8159. const int nc = src0->ne[0];
  8160. assert( dst->nb[0] == sizeof(float));
  8161. assert(src0->nb[0] == sizeof(float));
  8162. for (int i = 0; i < n; i++) {
  8163. ggml_vec_sqr_f32(nc,
  8164. (float *) ((char *) dst->data + i*( dst->nb[1])),
  8165. (float *) ((char *) src0->data + i*(src0->nb[1])));
  8166. }
  8167. }
  8168. static void ggml_compute_forward_sqr(
  8169. const struct ggml_compute_params * params,
  8170. struct ggml_tensor * dst) {
  8171. const struct ggml_tensor * src0 = dst->src[0];
  8172. switch (src0->type) {
  8173. case GGML_TYPE_F32:
  8174. {
  8175. ggml_compute_forward_sqr_f32(params, dst);
  8176. } break;
  8177. default:
  8178. {
  8179. GGML_ASSERT(false);
  8180. } break;
  8181. }
  8182. }
  8183. // ggml_compute_forward_sqrt
  8184. static void ggml_compute_forward_sqrt_f32(
  8185. const struct ggml_compute_params * params,
  8186. struct ggml_tensor * dst) {
  8187. const struct ggml_tensor * src0 = dst->src[0];
  8188. assert(params->ith == 0);
  8189. assert(ggml_are_same_shape(src0, dst));
  8190. if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
  8191. return;
  8192. }
  8193. const int n = ggml_nrows(src0);
  8194. const int nc = src0->ne[0];
  8195. assert( dst->nb[0] == sizeof(float));
  8196. assert(src0->nb[0] == sizeof(float));
  8197. for (int i = 0; i < n; i++) {
  8198. ggml_vec_sqrt_f32(nc,
  8199. (float *) ((char *) dst->data + i*( dst->nb[1])),
  8200. (float *) ((char *) src0->data + i*(src0->nb[1])));
  8201. }
  8202. }
  8203. static void ggml_compute_forward_sqrt(
  8204. const struct ggml_compute_params * params,
  8205. struct ggml_tensor * dst) {
  8206. const struct ggml_tensor * src0 = dst->src[0];
  8207. switch (src0->type) {
  8208. case GGML_TYPE_F32:
  8209. {
  8210. ggml_compute_forward_sqrt_f32(params, dst);
  8211. } break;
  8212. default:
  8213. {
  8214. GGML_ASSERT(false);
  8215. } break;
  8216. }
  8217. }
  8218. // ggml_compute_forward_log
  8219. static void ggml_compute_forward_log_f32(
  8220. const struct ggml_compute_params * params,
  8221. struct ggml_tensor * dst) {
  8222. const struct ggml_tensor * src0 = dst->src[0];
  8223. GGML_ASSERT(params->ith == 0);
  8224. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  8225. if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
  8226. return;
  8227. }
  8228. const int n = ggml_nrows(src0);
  8229. const int nc = src0->ne[0];
  8230. GGML_ASSERT( dst->nb[0] == sizeof(float));
  8231. GGML_ASSERT(src0->nb[0] == sizeof(float));
  8232. for (int i = 0; i < n; i++) {
  8233. ggml_vec_log_f32(nc,
  8234. (float *) ((char *) dst->data + i*( dst->nb[1])),
  8235. (float *) ((char *) src0->data + i*(src0->nb[1])));
  8236. }
  8237. }
  8238. static void ggml_compute_forward_log(
  8239. const struct ggml_compute_params * params,
  8240. struct ggml_tensor * dst) {
  8241. const struct ggml_tensor * src0 = dst->src[0];
  8242. switch (src0->type) {
  8243. case GGML_TYPE_F32:
  8244. {
  8245. ggml_compute_forward_log_f32(params, dst);
  8246. } break;
  8247. default:
  8248. {
  8249. GGML_ASSERT(false);
  8250. } break;
  8251. }
  8252. }
  8253. // ggml_compute_forward_sum
  8254. static void ggml_compute_forward_sum_f32(
  8255. const struct ggml_compute_params * params,
  8256. struct ggml_tensor * dst) {
  8257. const struct ggml_tensor * src0 = dst->src[0];
  8258. assert(params->ith == 0);
  8259. assert(ggml_is_scalar(dst));
  8260. if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
  8261. return;
  8262. }
  8263. assert(ggml_is_scalar(dst));
  8264. assert(src0->nb[0] == sizeof(float));
  8265. GGML_TENSOR_LOCALS(int64_t, ne0, src0, ne)
  8266. GGML_TENSOR_LOCALS(size_t, nb0, src0, nb)
  8267. ggml_float sum = 0;
  8268. ggml_float row_sum = 0;
  8269. for (int64_t i03 = 0; i03 < ne03; i03++) {
  8270. for (int64_t i02 = 0; i02 < ne02; i02++) {
  8271. for (int64_t i01 = 0; i01 < ne01; i01++) {
  8272. ggml_vec_sum_f32_ggf(ne00,
  8273. &row_sum,
  8274. (float *) ((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03));
  8275. sum += row_sum;
  8276. }
  8277. }
  8278. }
  8279. ((float *) dst->data)[0] = sum;
  8280. }
  8281. static void ggml_compute_forward_sum_f16(
  8282. const struct ggml_compute_params * params,
  8283. struct ggml_tensor * dst) {
  8284. const struct ggml_tensor * src0 = dst->src[0];
  8285. assert(params->ith == 0);
  8286. assert(ggml_is_scalar(dst));
  8287. if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
  8288. return;
  8289. }
  8290. assert(src0->nb[0] == sizeof(ggml_fp16_t));
  8291. GGML_TENSOR_LOCALS(int64_t, ne0, src0, ne)
  8292. GGML_TENSOR_LOCALS(size_t, nb0, src0, nb)
  8293. float sum = 0;
  8294. float row_sum = 0;
  8295. for (int64_t i03 = 0; i03 < ne03; i03++) {
  8296. for (int64_t i02 = 0; i02 < ne02; i02++) {
  8297. for (int64_t i01 = 0; i01 < ne01; i01++) {
  8298. ggml_vec_sum_f16_ggf(ne00,
  8299. &row_sum,
  8300. (ggml_fp16_t *) ((char *) src0->data + i01 * nb01 + i02 * nb02 + i03 * nb03));
  8301. sum += row_sum;
  8302. }
  8303. }
  8304. }
  8305. ((ggml_fp16_t *) dst->data)[0] = GGML_FP32_TO_FP16(sum);
  8306. }
  8307. static void ggml_compute_forward_sum_bf16(
  8308. const struct ggml_compute_params * params,
  8309. struct ggml_tensor * dst) {
  8310. const struct ggml_tensor * src0 = dst->src[0];
  8311. assert(params->ith == 0);
  8312. assert(ggml_is_scalar(dst));
  8313. if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
  8314. return;
  8315. }
  8316. assert(src0->nb[0] == sizeof(ggml_bf16_t));
  8317. GGML_TENSOR_LOCALS(int64_t, ne0, src0, ne)
  8318. GGML_TENSOR_LOCALS(size_t, nb0, src0, nb)
  8319. float sum = 0;
  8320. float row_sum = 0;
  8321. for (int64_t i03 = 0; i03 < ne03; i03++) {
  8322. for (int64_t i02 = 0; i02 < ne02; i02++) {
  8323. for (int64_t i01 = 0; i01 < ne01; i01++) {
  8324. ggml_vec_sum_bf16_ggf(ne00,
  8325. &row_sum,
  8326. (ggml_bf16_t *) ((char *) src0->data + i01 * nb01 + i02 * nb02 + i03 * nb03));
  8327. sum += row_sum;
  8328. }
  8329. }
  8330. }
  8331. ((ggml_bf16_t *) dst->data)[0] = GGML_FP32_TO_BF16(sum);
  8332. }
  8333. static void ggml_compute_forward_sum(
  8334. const struct ggml_compute_params * params,
  8335. struct ggml_tensor * dst) {
  8336. const struct ggml_tensor * src0 = dst->src[0];
  8337. switch (src0->type) {
  8338. case GGML_TYPE_F32:
  8339. {
  8340. ggml_compute_forward_sum_f32(params, dst);
  8341. } break;
  8342. case GGML_TYPE_F16:
  8343. {
  8344. ggml_compute_forward_sum_f16(params, dst);
  8345. } break;
  8346. case GGML_TYPE_BF16:
  8347. {
  8348. ggml_compute_forward_sum_bf16(params, dst);
  8349. } break;
  8350. default:
  8351. {
  8352. GGML_ASSERT(false);
  8353. } break;
  8354. }
  8355. }
  8356. // ggml_compute_forward_sum_rows
  8357. static void ggml_compute_forward_sum_rows_f32(
  8358. const struct ggml_compute_params * params,
  8359. struct ggml_tensor * dst) {
  8360. const struct ggml_tensor * src0 = dst->src[0];
  8361. GGML_ASSERT(params->ith == 0);
  8362. if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
  8363. return;
  8364. }
  8365. GGML_ASSERT(src0->nb[0] == sizeof(float));
  8366. GGML_ASSERT(dst->nb[0] == sizeof(float));
  8367. GGML_TENSOR_UNARY_OP_LOCALS
  8368. GGML_ASSERT(ne0 == 1);
  8369. GGML_ASSERT(ne1 == ne01);
  8370. GGML_ASSERT(ne2 == ne02);
  8371. GGML_ASSERT(ne3 == ne03);
  8372. for (int64_t i3 = 0; i3 < ne03; i3++) {
  8373. for (int64_t i2 = 0; i2 < ne02; i2++) {
  8374. for (int64_t i1 = 0; i1 < ne01; i1++) {
  8375. float * src_row = (float *) ((char *) src0->data + i1*nb01 + i2*nb02 + i3*nb03);
  8376. float * dst_row = (float *) ((char *) dst->data + i1*nb1 + i2*nb2 + i3*nb3);
  8377. float row_sum = 0;
  8378. ggml_vec_sum_f32(ne00, &row_sum, src_row);
  8379. dst_row[0] = row_sum;
  8380. }
  8381. }
  8382. }
  8383. }
  8384. static void ggml_compute_forward_sum_rows(
  8385. const struct ggml_compute_params * params,
  8386. struct ggml_tensor * dst) {
  8387. const struct ggml_tensor * src0 = dst->src[0];
  8388. switch (src0->type) {
  8389. case GGML_TYPE_F32:
  8390. {
  8391. ggml_compute_forward_sum_rows_f32(params, dst);
  8392. } break;
  8393. default:
  8394. {
  8395. GGML_ASSERT(false);
  8396. } break;
  8397. }
  8398. }
  8399. // ggml_compute_forward_mean
  8400. static void ggml_compute_forward_mean_f32(
  8401. const struct ggml_compute_params * params,
  8402. struct ggml_tensor * dst) {
  8403. const struct ggml_tensor * src0 = dst->src[0];
  8404. assert(params->ith == 0);
  8405. if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
  8406. return;
  8407. }
  8408. assert(src0->nb[0] == sizeof(float));
  8409. GGML_TENSOR_UNARY_OP_LOCALS
  8410. assert(ne0 == 1);
  8411. assert(ne1 == ne01);
  8412. assert(ne2 == ne02);
  8413. assert(ne3 == ne03);
  8414. UNUSED(ne0);
  8415. UNUSED(ne1);
  8416. UNUSED(ne2);
  8417. UNUSED(ne3);
  8418. for (int64_t i03 = 0; i03 < ne03; i03++) {
  8419. for (int64_t i02 = 0; i02 < ne02; i02++) {
  8420. for (int64_t i01 = 0; i01 < ne01; i01++) {
  8421. ggml_vec_sum_f32(ne00,
  8422. (float *) ((char *) dst->data + i01*nb1 + i02*nb2 + i03*nb3),
  8423. (float *) ((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03));
  8424. *(float *) ((char *) dst->data + i01*nb1 + i02*nb2 + i03*nb3) /= (float) ne00;
  8425. }
  8426. }
  8427. }
  8428. }
  8429. static void ggml_compute_forward_mean(
  8430. const struct ggml_compute_params * params,
  8431. struct ggml_tensor * dst) {
  8432. const struct ggml_tensor * src0 = dst->src[0];
  8433. switch (src0->type) {
  8434. case GGML_TYPE_F32:
  8435. {
  8436. ggml_compute_forward_mean_f32(params, dst);
  8437. } break;
  8438. default:
  8439. {
  8440. GGML_ASSERT(false);
  8441. } break;
  8442. }
  8443. }
  8444. // ggml_compute_forward_argmax
  8445. static void ggml_compute_forward_argmax_f32(
  8446. const struct ggml_compute_params * params,
  8447. struct ggml_tensor * dst) {
  8448. const struct ggml_tensor * src0 = dst->src[0];
  8449. assert(params->ith == 0);
  8450. if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
  8451. return;
  8452. }
  8453. assert(src0->nb[0] == sizeof(float));
  8454. assert(dst->nb[0] == sizeof(float));
  8455. const int64_t ne00 = src0->ne[0];
  8456. const int64_t ne01 = src0->ne[1];
  8457. const size_t nb01 = src0->nb[1];
  8458. const size_t nb0 = dst->nb[0];
  8459. for (int64_t i1 = 0; i1 < ne01; i1++) {
  8460. float * src = (float *) ((char *) src0->data + i1*nb01);
  8461. int32_t * dst_ = (int32_t *) ((char *) dst->data + i1*nb0);
  8462. int v = 0;
  8463. ggml_vec_argmax_f32(ne00, &v, src);
  8464. dst_[0] = v;
  8465. }
  8466. }
  8467. static void ggml_compute_forward_argmax(
  8468. const struct ggml_compute_params * params,
  8469. struct ggml_tensor * dst) {
  8470. const struct ggml_tensor * src0 = dst->src[0];
  8471. switch (src0->type) {
  8472. case GGML_TYPE_F32:
  8473. {
  8474. ggml_compute_forward_argmax_f32(params, dst);
  8475. } break;
  8476. default:
  8477. {
  8478. GGML_ASSERT(false);
  8479. } break;
  8480. }
  8481. }
  8482. // ggml_compute_forward_repeat
  8483. static void ggml_compute_forward_repeat_f32(
  8484. const struct ggml_compute_params * params,
  8485. struct ggml_tensor * dst) {
  8486. const struct ggml_tensor * src0 = dst->src[0];
  8487. GGML_ASSERT(params->ith == 0);
  8488. GGML_ASSERT(ggml_can_repeat(src0, dst));
  8489. if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
  8490. return;
  8491. }
  8492. GGML_TENSOR_UNARY_OP_LOCALS
  8493. // guaranteed to be an integer due to the check in ggml_can_repeat
  8494. const int nr0 = (int)(ne0/ne00);
  8495. const int nr1 = (int)(ne1/ne01);
  8496. const int nr2 = (int)(ne2/ne02);
  8497. const int nr3 = (int)(ne3/ne03);
  8498. // TODO: support for transposed / permuted tensors
  8499. GGML_ASSERT(nb0 == sizeof(float));
  8500. GGML_ASSERT(nb00 == sizeof(float));
  8501. // TODO: maybe this is not optimal?
  8502. for (int i3 = 0; i3 < nr3; i3++) {
  8503. for (int k3 = 0; k3 < ne03; k3++) {
  8504. for (int i2 = 0; i2 < nr2; i2++) {
  8505. for (int k2 = 0; k2 < ne02; k2++) {
  8506. for (int i1 = 0; i1 < nr1; i1++) {
  8507. for (int k1 = 0; k1 < ne01; k1++) {
  8508. for (int i0 = 0; i0 < nr0; i0++) {
  8509. ggml_vec_cpy_f32(ne00,
  8510. (float *) ((char *) dst->data + (i3*ne03 + k3)*nb3 + (i2*ne02 + k2)*nb2 + (i1*ne01 + k1)*nb1 + (i0*ne00)*nb0),
  8511. (float *) ((char *) src0->data + ( k3)*nb03 + ( k2)*nb02 + ( k1)*nb01));
  8512. }
  8513. }
  8514. }
  8515. }
  8516. }
  8517. }
  8518. }
  8519. }
  8520. static void ggml_compute_forward_repeat_f16(
  8521. const struct ggml_compute_params * params,
  8522. struct ggml_tensor * dst) {
  8523. const struct ggml_tensor * src0 = dst->src[0];
  8524. GGML_ASSERT(params->ith == 0);
  8525. GGML_ASSERT(ggml_can_repeat(src0, dst));
  8526. if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
  8527. return;
  8528. }
  8529. GGML_TENSOR_UNARY_OP_LOCALS
  8530. // guaranteed to be an integer due to the check in ggml_can_repeat
  8531. const int nr0 = (int)(ne0/ne00);
  8532. const int nr1 = (int)(ne1/ne01);
  8533. const int nr2 = (int)(ne2/ne02);
  8534. const int nr3 = (int)(ne3/ne03);
  8535. // TODO: support for transposed / permuted tensors
  8536. GGML_ASSERT(nb0 == sizeof(ggml_fp16_t));
  8537. GGML_ASSERT(nb00 == sizeof(ggml_fp16_t));
  8538. // TODO: maybe this is not optimal?
  8539. for (int i3 = 0; i3 < nr3; i3++) {
  8540. for (int k3 = 0; k3 < ne03; k3++) {
  8541. for (int i2 = 0; i2 < nr2; i2++) {
  8542. for (int k2 = 0; k2 < ne02; k2++) {
  8543. for (int i1 = 0; i1 < nr1; i1++) {
  8544. for (int k1 = 0; k1 < ne01; k1++) {
  8545. for (int i0 = 0; i0 < nr0; i0++) {
  8546. ggml_fp16_t * y = (ggml_fp16_t *) ((char *) dst->data + (i3*ne03 + k3)*nb3 + (i2*ne02 + k2)*nb2 + (i1*ne01 + k1)*nb1 + (i0*ne00)*nb0);
  8547. ggml_fp16_t * x = (ggml_fp16_t *) ((char *) src0->data + ( k3)*nb03 + ( k2)*nb02 + ( k1)*nb01);
  8548. // ggml_vec_cpy_f16(ne00, y, x)
  8549. for (int i = 0; i < ne00; ++i) {
  8550. y[i] = x[i];
  8551. }
  8552. }
  8553. }
  8554. }
  8555. }
  8556. }
  8557. }
  8558. }
  8559. }
  8560. static void ggml_compute_forward_repeat(
  8561. const struct ggml_compute_params * params,
  8562. struct ggml_tensor * dst) {
  8563. const struct ggml_tensor * src0 = dst->src[0];
  8564. switch (src0->type) {
  8565. case GGML_TYPE_F16:
  8566. case GGML_TYPE_BF16:
  8567. case GGML_TYPE_I16:
  8568. {
  8569. ggml_compute_forward_repeat_f16(params, dst);
  8570. } break;
  8571. case GGML_TYPE_F32:
  8572. case GGML_TYPE_I32:
  8573. {
  8574. ggml_compute_forward_repeat_f32(params, dst);
  8575. } break;
  8576. default:
  8577. {
  8578. GGML_ASSERT(false);
  8579. } break;
  8580. }
  8581. }
  8582. // ggml_compute_forward_repeat_back
  8583. static void ggml_compute_forward_repeat_back_f32(
  8584. const struct ggml_compute_params * params,
  8585. struct ggml_tensor * dst) {
  8586. const struct ggml_tensor * src0 = dst->src[0];
  8587. GGML_ASSERT(params->ith == 0);
  8588. GGML_ASSERT(ggml_can_repeat(dst, src0));
  8589. if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
  8590. return;
  8591. }
  8592. GGML_TENSOR_UNARY_OP_LOCALS
  8593. // guaranteed to be an integer due to the check in ggml_can_repeat
  8594. const int nr0 = (int)(ne00/ne0);
  8595. const int nr1 = (int)(ne01/ne1);
  8596. const int nr2 = (int)(ne02/ne2);
  8597. const int nr3 = (int)(ne03/ne3);
  8598. // TODO: support for transposed / permuted tensors
  8599. GGML_ASSERT(nb0 == sizeof(float));
  8600. GGML_ASSERT(nb00 == sizeof(float));
  8601. if (ggml_is_contiguous(dst)) {
  8602. ggml_vec_set_f32(ne0*ne1*ne2*ne3, dst->data, 0);
  8603. } else {
  8604. for (int k3 = 0; k3 < ne3; k3++) {
  8605. for (int k2 = 0; k2 < ne2; k2++) {
  8606. for (int k1 = 0; k1 < ne1; k1++) {
  8607. ggml_vec_set_f32(ne0,
  8608. (float *) ((char *) dst->data + k1*nb1 + k2*nb2 + k3*nb3),
  8609. 0);
  8610. }
  8611. }
  8612. }
  8613. }
  8614. // TODO: maybe this is not optimal?
  8615. for (int i3 = 0; i3 < nr3; i3++) {
  8616. for (int k3 = 0; k3 < ne3; k3++) {
  8617. for (int i2 = 0; i2 < nr2; i2++) {
  8618. for (int k2 = 0; k2 < ne2; k2++) {
  8619. for (int i1 = 0; i1 < nr1; i1++) {
  8620. for (int k1 = 0; k1 < ne1; k1++) {
  8621. for (int i0 = 0; i0 < nr0; i0++) {
  8622. ggml_vec_acc_f32(ne0,
  8623. (float *) ((char *) dst->data + ( k3)*nb3 + ( k2)*nb2 + ( k1)*nb1),
  8624. (float *) ((char *) src0->data + (i3*ne3 + k3)*nb03 + (i2*ne2 + k2)*nb02 + (i1*ne1 + k1)*nb01 + (i0*ne0)*nb00));
  8625. }
  8626. }
  8627. }
  8628. }
  8629. }
  8630. }
  8631. }
  8632. }
  8633. static void ggml_compute_forward_repeat_back(
  8634. const struct ggml_compute_params * params,
  8635. struct ggml_tensor * dst) {
  8636. const struct ggml_tensor * src0 = dst->src[0];
  8637. switch (src0->type) {
  8638. case GGML_TYPE_F32:
  8639. {
  8640. ggml_compute_forward_repeat_back_f32(params, dst);
  8641. } break;
  8642. default:
  8643. {
  8644. GGML_ASSERT(false);
  8645. } break;
  8646. }
  8647. }
  8648. // ggml_compute_forward_concat
  8649. static void ggml_compute_forward_concat_f32(
  8650. const struct ggml_compute_params * params,
  8651. struct ggml_tensor * dst) {
  8652. const struct ggml_tensor * src0 = dst->src[0];
  8653. const struct ggml_tensor * src1 = dst->src[1];
  8654. if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
  8655. return;
  8656. }
  8657. GGML_ASSERT(src0->nb[0] == sizeof(float));
  8658. const int ith = params->ith;
  8659. const int nth = params->nth;
  8660. GGML_TENSOR_BINARY_OP_LOCALS
  8661. // TODO: support for transposed / permuted tensors
  8662. GGML_ASSERT(nb0 == sizeof(float));
  8663. GGML_ASSERT(nb00 == sizeof(float));
  8664. GGML_ASSERT(nb10 == sizeof(float));
  8665. for (int i3 = 0; i3 < ne3; i3++) {
  8666. for (int i2 = ith; i2 < ne2; i2 += nth) {
  8667. if (i2 < ne02) { // src0
  8668. for (int i1 = 0; i1 < ne1; i1++) {
  8669. for (int i0 = 0; i0 < ne0; i0++) {
  8670. const float * x = (float *)((char *) src0->data + i0 * nb00 + i1 * nb01 + i2 * nb02 + i3 * nb03);
  8671. float * y = (float *)((char *)dst->data + i0 * nb0 + i1 * nb1 + i2 * nb2 + i3 * nb3);
  8672. *y = *x;
  8673. }
  8674. }
  8675. } // src1
  8676. else {
  8677. for (int i1 = 0; i1 < ne1; i1++) {
  8678. for (int i0 = 0; i0 < ne0; i0++) {
  8679. const float * x = (float *)((char *) src1->data + i0 * nb10 + i1 * nb11 + (i2 - ne02) * nb12 + i3 * nb13);
  8680. float * y = (float *)((char *)dst->data + i0 * nb0 + i1 * nb1 + i2 * nb2 + i3 * nb3);
  8681. *y = *x;
  8682. }
  8683. }
  8684. }
  8685. }
  8686. }
  8687. }
  8688. static void ggml_compute_forward_concat(
  8689. const struct ggml_compute_params* params,
  8690. struct ggml_tensor* dst) {
  8691. const struct ggml_tensor * src0 = dst->src[0];
  8692. switch (src0->type) {
  8693. case GGML_TYPE_F32:
  8694. case GGML_TYPE_I32:
  8695. {
  8696. ggml_compute_forward_concat_f32(params, dst);
  8697. } break;
  8698. default:
  8699. {
  8700. GGML_ASSERT(false);
  8701. } break;
  8702. }
  8703. }
  8704. // ggml_compute_forward_abs
  8705. static void ggml_compute_forward_abs_f32(
  8706. const struct ggml_compute_params * params,
  8707. struct ggml_tensor * dst) {
  8708. const struct ggml_tensor * src0 = dst->src[0];
  8709. assert(params->ith == 0);
  8710. assert(ggml_are_same_shape(src0, dst));
  8711. if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
  8712. return;
  8713. }
  8714. const int n = ggml_nrows(src0);
  8715. const int nc = src0->ne[0];
  8716. assert(dst->nb[0] == sizeof(float));
  8717. assert(src0->nb[0] == sizeof(float));
  8718. for (int i = 0; i < n; i++) {
  8719. ggml_vec_abs_f32(nc,
  8720. (float *) ((char *) dst->data + i*( dst->nb[1])),
  8721. (float *) ((char *) src0->data + i*(src0->nb[1])));
  8722. }
  8723. }
  8724. static void ggml_compute_forward_abs(
  8725. const struct ggml_compute_params * params,
  8726. struct ggml_tensor * dst) {
  8727. const struct ggml_tensor * src0 = dst->src[0];
  8728. switch (src0->type) {
  8729. case GGML_TYPE_F32:
  8730. {
  8731. ggml_compute_forward_abs_f32(params, dst);
  8732. } break;
  8733. default:
  8734. {
  8735. GGML_ASSERT(false);
  8736. } break;
  8737. }
  8738. }
  8739. // ggml_compute_forward_sgn
  8740. static void ggml_compute_forward_sgn_f32(
  8741. const struct ggml_compute_params * params,
  8742. struct ggml_tensor * dst) {
  8743. const struct ggml_tensor * src0 = dst->src[0];
  8744. assert(params->ith == 0);
  8745. assert(ggml_are_same_shape(src0, dst));
  8746. if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
  8747. return;
  8748. }
  8749. const int n = ggml_nrows(src0);
  8750. const int nc = src0->ne[0];
  8751. assert(dst->nb[0] == sizeof(float));
  8752. assert(src0->nb[0] == sizeof(float));
  8753. for (int i = 0; i < n; i++) {
  8754. ggml_vec_sgn_f32(nc,
  8755. (float *) ((char *) dst->data + i*( dst->nb[1])),
  8756. (float *) ((char *) src0->data + i*(src0->nb[1])));
  8757. }
  8758. }
  8759. static void ggml_compute_forward_sgn(
  8760. const struct ggml_compute_params * params,
  8761. struct ggml_tensor * dst) {
  8762. const struct ggml_tensor * src0 = dst->src[0];
  8763. switch (src0->type) {
  8764. case GGML_TYPE_F32:
  8765. {
  8766. ggml_compute_forward_sgn_f32(params, dst);
  8767. } break;
  8768. default:
  8769. {
  8770. GGML_ASSERT(false);
  8771. } break;
  8772. }
  8773. }
  8774. // ggml_compute_forward_neg
  8775. static void ggml_compute_forward_neg_f32(
  8776. const struct ggml_compute_params * params,
  8777. struct ggml_tensor * dst) {
  8778. const struct ggml_tensor * src0 = dst->src[0];
  8779. assert(params->ith == 0);
  8780. assert(ggml_are_same_shape(src0, dst));
  8781. if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
  8782. return;
  8783. }
  8784. const int n = ggml_nrows(src0);
  8785. const int nc = src0->ne[0];
  8786. assert(dst->nb[0] == sizeof(float));
  8787. assert(src0->nb[0] == sizeof(float));
  8788. for (int i = 0; i < n; i++) {
  8789. ggml_vec_neg_f32(nc,
  8790. (float *) ((char *) dst->data + i*( dst->nb[1])),
  8791. (float *) ((char *) src0->data + i*(src0->nb[1])));
  8792. }
  8793. }
  8794. static void ggml_compute_forward_neg(
  8795. const struct ggml_compute_params * params,
  8796. struct ggml_tensor * dst) {
  8797. const struct ggml_tensor * src0 = dst->src[0];
  8798. switch (src0->type) {
  8799. case GGML_TYPE_F32:
  8800. {
  8801. ggml_compute_forward_neg_f32(params, dst);
  8802. } break;
  8803. default:
  8804. {
  8805. GGML_ASSERT(false);
  8806. } break;
  8807. }
  8808. }
  8809. // ggml_compute_forward_step
  8810. static void ggml_compute_forward_step_f32(
  8811. const struct ggml_compute_params * params,
  8812. struct ggml_tensor * dst) {
  8813. const struct ggml_tensor * src0 = dst->src[0];
  8814. assert(params->ith == 0);
  8815. assert(ggml_are_same_shape(src0, dst));
  8816. if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
  8817. return;
  8818. }
  8819. const int n = ggml_nrows(src0);
  8820. const int nc = src0->ne[0];
  8821. assert(dst->nb[0] == sizeof(float));
  8822. assert(src0->nb[0] == sizeof(float));
  8823. for (int i = 0; i < n; i++) {
  8824. ggml_vec_step_f32(nc,
  8825. (float *) ((char *) dst->data + i*( dst->nb[1])),
  8826. (float *) ((char *) src0->data + i*(src0->nb[1])));
  8827. }
  8828. }
  8829. static void ggml_compute_forward_step(
  8830. const struct ggml_compute_params * params,
  8831. struct ggml_tensor * dst) {
  8832. const struct ggml_tensor * src0 = dst->src[0];
  8833. switch (src0->type) {
  8834. case GGML_TYPE_F32:
  8835. {
  8836. ggml_compute_forward_step_f32(params, dst);
  8837. } break;
  8838. default:
  8839. {
  8840. GGML_ASSERT(false);
  8841. } break;
  8842. }
  8843. }
  8844. // ggml_compute_forward_tanh
  8845. static void ggml_compute_forward_tanh_f32(
  8846. const struct ggml_compute_params * params,
  8847. struct ggml_tensor * dst) {
  8848. const struct ggml_tensor * src0 = dst->src[0];
  8849. assert(params->ith == 0);
  8850. assert(ggml_are_same_shape(src0, dst));
  8851. if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
  8852. return;
  8853. }
  8854. const int n = ggml_nrows(src0);
  8855. const int nc = src0->ne[0];
  8856. assert(dst->nb[0] == sizeof(float));
  8857. assert(src0->nb[0] == sizeof(float));
  8858. for (int i = 0; i < n; i++) {
  8859. ggml_vec_tanh_f32(nc,
  8860. (float *) ((char *) dst->data + i*( dst->nb[1])),
  8861. (float *) ((char *) src0->data + i*(src0->nb[1])));
  8862. }
  8863. }
  8864. static void ggml_compute_forward_tanh(
  8865. const struct ggml_compute_params * params,
  8866. struct ggml_tensor * dst) {
  8867. const struct ggml_tensor * src0 = dst->src[0];
  8868. switch (src0->type) {
  8869. case GGML_TYPE_F32:
  8870. {
  8871. ggml_compute_forward_tanh_f32(params, dst);
  8872. } break;
  8873. default:
  8874. {
  8875. GGML_ASSERT(false);
  8876. } break;
  8877. }
  8878. }
  8879. // ggml_compute_forward_elu
  8880. static void ggml_compute_forward_elu_f32(
  8881. const struct ggml_compute_params * params,
  8882. struct ggml_tensor * dst) {
  8883. const struct ggml_tensor * src0 = dst->src[0];
  8884. assert(params->ith == 0);
  8885. assert(ggml_are_same_shape(src0, dst));
  8886. if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
  8887. return;
  8888. }
  8889. const int n = ggml_nrows(src0);
  8890. const int nc = src0->ne[0];
  8891. assert(dst->nb[0] == sizeof(float));
  8892. assert(src0->nb[0] == sizeof(float));
  8893. for (int i = 0; i < n; i++) {
  8894. ggml_vec_elu_f32(nc,
  8895. (float *) ((char *) dst->data + i*( dst->nb[1])),
  8896. (float *) ((char *) src0->data + i*(src0->nb[1])));
  8897. }
  8898. }
  8899. static void ggml_compute_forward_elu(
  8900. const struct ggml_compute_params * params,
  8901. struct ggml_tensor * dst) {
  8902. const struct ggml_tensor * src0 = dst->src[0];
  8903. switch (src0->type) {
  8904. case GGML_TYPE_F32:
  8905. {
  8906. ggml_compute_forward_elu_f32(params, dst);
  8907. } break;
  8908. default:
  8909. {
  8910. GGML_ASSERT(false);
  8911. } break;
  8912. }
  8913. }
  8914. // ggml_compute_forward_relu
  8915. static void ggml_compute_forward_relu_f32(
  8916. const struct ggml_compute_params * params,
  8917. struct ggml_tensor * dst) {
  8918. const struct ggml_tensor * src0 = dst->src[0];
  8919. assert(params->ith == 0);
  8920. assert(ggml_are_same_shape(src0, dst));
  8921. if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
  8922. return;
  8923. }
  8924. const int n = ggml_nrows(src0);
  8925. const int nc = src0->ne[0];
  8926. assert(dst->nb[0] == sizeof(float));
  8927. assert(src0->nb[0] == sizeof(float));
  8928. for (int i = 0; i < n; i++) {
  8929. ggml_vec_relu_f32(nc,
  8930. (float *) ((char *) dst->data + i*( dst->nb[1])),
  8931. (float *) ((char *) src0->data + i*(src0->nb[1])));
  8932. }
  8933. }
  8934. static void ggml_compute_forward_relu(
  8935. const struct ggml_compute_params * params,
  8936. struct ggml_tensor * dst) {
  8937. const struct ggml_tensor * src0 = dst->src[0];
  8938. switch (src0->type) {
  8939. case GGML_TYPE_F32:
  8940. {
  8941. ggml_compute_forward_relu_f32(params, dst);
  8942. } break;
  8943. default:
  8944. {
  8945. GGML_ASSERT(false);
  8946. } break;
  8947. }
  8948. }
  8949. // ggml_compute_forward_sigmoid
  8950. static void ggml_compute_forward_sigmoid_f32(
  8951. const struct ggml_compute_params * params,
  8952. struct ggml_tensor * dst) {
  8953. const struct ggml_tensor * src0 = dst->src[0];
  8954. assert(params->ith == 0);
  8955. assert(ggml_are_same_shape(src0, dst));
  8956. if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
  8957. return;
  8958. }
  8959. const int n = ggml_nrows(src0);
  8960. const int nc = src0->ne[0];
  8961. assert(dst->nb[0] == sizeof(float));
  8962. assert(src0->nb[0] == sizeof(float));
  8963. for (int i = 0; i < n; i++) {
  8964. ggml_vec_sigmoid_f32(nc,
  8965. (float *) ((char *) dst->data + i*( dst->nb[1])),
  8966. (float *) ((char *) src0->data + i*(src0->nb[1])));
  8967. }
  8968. }
  8969. static void ggml_compute_forward_sigmoid(
  8970. const struct ggml_compute_params * params,
  8971. struct ggml_tensor * dst) {
  8972. const struct ggml_tensor * src0 = dst->src[0];
  8973. switch (src0->type) {
  8974. case GGML_TYPE_F32:
  8975. {
  8976. ggml_compute_forward_sigmoid_f32(params, dst);
  8977. } break;
  8978. default:
  8979. {
  8980. GGML_ASSERT(false);
  8981. } break;
  8982. }
  8983. }
  8984. // ggml_compute_forward_gelu
  8985. static void ggml_compute_forward_gelu_f32(
  8986. const struct ggml_compute_params * params,
  8987. struct ggml_tensor * dst) {
  8988. const struct ggml_tensor * src0 = dst->src[0];
  8989. GGML_ASSERT(ggml_is_contiguous_except_dim_1(src0));
  8990. GGML_ASSERT(ggml_is_contiguous_except_dim_1(dst));
  8991. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  8992. if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
  8993. return;
  8994. }
  8995. const int ith = params->ith;
  8996. const int nth = params->nth;
  8997. const int nc = src0->ne[0];
  8998. const int nr = ggml_nrows(src0);
  8999. // rows per thread
  9000. const int dr = (nr + nth - 1)/nth;
  9001. // row range for this thread
  9002. const int ir0 = dr*ith;
  9003. const int ir1 = MIN(ir0 + dr, nr);
  9004. for (int i1 = ir0; i1 < ir1; i1++) {
  9005. ggml_vec_gelu_f32(nc,
  9006. (float *) ((char *) dst->data + i1*( dst->nb[1])),
  9007. (float *) ((char *) src0->data + i1*(src0->nb[1])));
  9008. #ifndef NDEBUG
  9009. for (int k = 0; k < nc; k++) {
  9010. const float x = ((float *) ((char *) dst->data + i1*( dst->nb[1])))[k];
  9011. UNUSED(x);
  9012. assert(!isnan(x));
  9013. assert(!isinf(x));
  9014. }
  9015. #endif
  9016. }
  9017. }
  9018. static void ggml_compute_forward_gelu(
  9019. const struct ggml_compute_params * params,
  9020. struct ggml_tensor * dst) {
  9021. const struct ggml_tensor * src0 = dst->src[0];
  9022. switch (src0->type) {
  9023. case GGML_TYPE_F32:
  9024. {
  9025. ggml_compute_forward_gelu_f32(params, dst);
  9026. } break;
  9027. default:
  9028. {
  9029. GGML_ASSERT(false);
  9030. } break;
  9031. }
  9032. }
  9033. // ggml_compute_forward_gelu_quick
  9034. static void ggml_compute_forward_gelu_quick_f32(
  9035. const struct ggml_compute_params * params,
  9036. struct ggml_tensor * dst) {
  9037. const struct ggml_tensor * src0 = dst->src[0];
  9038. GGML_ASSERT(ggml_is_contiguous_except_dim_1(src0));
  9039. GGML_ASSERT(ggml_is_contiguous_except_dim_1(dst));
  9040. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  9041. if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
  9042. return;
  9043. }
  9044. const int ith = params->ith;
  9045. const int nth = params->nth;
  9046. const int nc = src0->ne[0];
  9047. const int nr = ggml_nrows(src0);
  9048. // rows per thread
  9049. const int dr = (nr + nth - 1)/nth;
  9050. // row range for this thread
  9051. const int ir0 = dr*ith;
  9052. const int ir1 = MIN(ir0 + dr, nr);
  9053. for (int i1 = ir0; i1 < ir1; i1++) {
  9054. ggml_vec_gelu_quick_f32(nc,
  9055. (float *) ((char *) dst->data + i1*( dst->nb[1])),
  9056. (float *) ((char *) src0->data + i1*(src0->nb[1])));
  9057. #ifndef NDEBUG
  9058. for (int k = 0; k < nc; k++) {
  9059. const float x = ((float *) ((char *) dst->data + i1*( dst->nb[1])))[k];
  9060. UNUSED(x);
  9061. assert(!isnan(x));
  9062. assert(!isinf(x));
  9063. }
  9064. #endif
  9065. }
  9066. }
  9067. static void ggml_compute_forward_gelu_quick(
  9068. const struct ggml_compute_params * params,
  9069. struct ggml_tensor * dst) {
  9070. const struct ggml_tensor * src0 = dst->src[0];
  9071. switch (src0->type) {
  9072. case GGML_TYPE_F32:
  9073. {
  9074. ggml_compute_forward_gelu_quick_f32(params, dst);
  9075. } break;
  9076. default:
  9077. {
  9078. GGML_ASSERT(false);
  9079. } break;
  9080. }
  9081. }
  9082. // ggml_compute_forward_silu
  9083. static void ggml_compute_forward_silu_f32(
  9084. const struct ggml_compute_params * params,
  9085. struct ggml_tensor * dst) {
  9086. const struct ggml_tensor * src0 = dst->src[0];
  9087. GGML_ASSERT(ggml_is_contiguous_except_dim_1(src0));
  9088. GGML_ASSERT(ggml_is_contiguous_except_dim_1(dst));
  9089. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  9090. if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
  9091. return;
  9092. }
  9093. const int ith = params->ith;
  9094. const int nth = params->nth;
  9095. const int nc = src0->ne[0];
  9096. const int nr = ggml_nrows(src0);
  9097. // rows per thread
  9098. const int dr = (nr + nth - 1)/nth;
  9099. // row range for this thread
  9100. const int ir0 = dr*ith;
  9101. const int ir1 = MIN(ir0 + dr, nr);
  9102. for (int i1 = ir0; i1 < ir1; i1++) {
  9103. ggml_vec_silu_f32(nc,
  9104. (float *) ((char *) dst->data + i1*( dst->nb[1])),
  9105. (float *) ((char *) src0->data + i1*(src0->nb[1])));
  9106. #ifndef NDEBUG
  9107. for (int k = 0; k < nc; k++) {
  9108. const float x = ((float *) ((char *) dst->data + i1*(dst->nb[1])))[k];
  9109. UNUSED(x);
  9110. assert(!isnan(x));
  9111. assert(!isinf(x));
  9112. }
  9113. #endif
  9114. }
  9115. }
  9116. static void ggml_compute_forward_silu(
  9117. const struct ggml_compute_params * params,
  9118. struct ggml_tensor * dst) {
  9119. const struct ggml_tensor * src0 = dst->src[0];
  9120. switch (src0->type) {
  9121. case GGML_TYPE_F32:
  9122. {
  9123. ggml_compute_forward_silu_f32(params, dst);
  9124. } break;
  9125. default:
  9126. {
  9127. GGML_ASSERT(false);
  9128. } break;
  9129. }
  9130. }
  9131. // ggml_compute_forward_leaky_relu
  9132. static void ggml_compute_forward_leaky_relu_f32(
  9133. const struct ggml_compute_params * params,
  9134. struct ggml_tensor * dst) {
  9135. const struct ggml_tensor * src0 = dst->src[0];
  9136. assert(params->ith == 0);
  9137. assert(ggml_are_same_shape(src0, dst));
  9138. if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
  9139. return;
  9140. }
  9141. const int n = ggml_nrows(src0);
  9142. const int nc = src0->ne[0];
  9143. float negative_slope;
  9144. memcpy(&negative_slope, dst->op_params, sizeof(float));
  9145. assert(dst->nb[0] == sizeof(float));
  9146. assert(src0->nb[0] == sizeof(float));
  9147. for (int i = 0; i < n; i++) {
  9148. ggml_vec_leaky_relu_f32(nc,
  9149. (float *) ((char *) dst->data + i*( dst->nb[1])),
  9150. (float *) ((char *) src0->data + i*(src0->nb[1])), negative_slope);
  9151. }
  9152. }
  9153. static void ggml_compute_forward_leaky_relu(
  9154. const struct ggml_compute_params * params,
  9155. struct ggml_tensor * dst) {
  9156. const struct ggml_tensor * src0 = dst->src[0];
  9157. switch (src0->type) {
  9158. case GGML_TYPE_F32:
  9159. {
  9160. ggml_compute_forward_leaky_relu_f32(params, dst);
  9161. } break;
  9162. default:
  9163. {
  9164. GGML_ASSERT(false);
  9165. } break;
  9166. }
  9167. }
  9168. // ggml_compute_forward_silu_back
  9169. static void ggml_compute_forward_silu_back_f32(
  9170. const struct ggml_compute_params * params,
  9171. struct ggml_tensor * dst) {
  9172. const struct ggml_tensor * src0 = dst->src[0];
  9173. const struct ggml_tensor * grad = dst->src[1];
  9174. GGML_ASSERT(ggml_is_contiguous_except_dim_1(grad));
  9175. GGML_ASSERT(ggml_is_contiguous_except_dim_1(src0));
  9176. GGML_ASSERT(ggml_is_contiguous_except_dim_1(dst));
  9177. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  9178. GGML_ASSERT(ggml_are_same_shape(src0, grad));
  9179. if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
  9180. return;
  9181. }
  9182. const int ith = params->ith;
  9183. const int nth = params->nth;
  9184. const int nc = src0->ne[0];
  9185. const int nr = ggml_nrows(src0);
  9186. // rows per thread
  9187. const int dr = (nr + nth - 1)/nth;
  9188. // row range for this thread
  9189. const int ir0 = dr*ith;
  9190. const int ir1 = MIN(ir0 + dr, nr);
  9191. for (int i1 = ir0; i1 < ir1; i1++) {
  9192. ggml_vec_silu_backward_f32(nc,
  9193. (float *) ((char *) dst->data + i1*( dst->nb[1])),
  9194. (float *) ((char *) src0->data + i1*(src0->nb[1])),
  9195. (float *) ((char *) grad->data + i1*(grad->nb[1])));
  9196. #ifndef NDEBUG
  9197. for (int k = 0; k < nc; k++) {
  9198. const float x = ((float *) ((char *) dst->data + i1*( dst->nb[1])))[k];
  9199. UNUSED(x);
  9200. assert(!isnan(x));
  9201. assert(!isinf(x));
  9202. }
  9203. #endif
  9204. }
  9205. }
  9206. static void ggml_compute_forward_silu_back(
  9207. const struct ggml_compute_params * params,
  9208. struct ggml_tensor * dst) {
  9209. const struct ggml_tensor * src0 = dst->src[0];
  9210. switch (src0->type) {
  9211. case GGML_TYPE_F32:
  9212. {
  9213. ggml_compute_forward_silu_back_f32(params, dst);
  9214. } break;
  9215. default:
  9216. {
  9217. GGML_ASSERT(false);
  9218. } break;
  9219. }
  9220. }
  9221. static void ggml_compute_forward_hardswish_f32(
  9222. const struct ggml_compute_params * params,
  9223. struct ggml_tensor * dst) {
  9224. const struct ggml_tensor * src0 = dst->src[0];
  9225. assert(params->ith == 0);
  9226. assert(ggml_are_same_shape(src0, dst));
  9227. if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
  9228. return;
  9229. }
  9230. const int n = ggml_nrows(src0);
  9231. const int nc = src0->ne[0];
  9232. assert(dst->nb[0] == sizeof(float));
  9233. assert(src0->nb[0] == sizeof(float));
  9234. for (int i = 0; i < n; i++) {
  9235. ggml_vec_hardswish_f32(nc,
  9236. (float *) ((char *) dst->data + i*( dst->nb[1])),
  9237. (float *) ((char *) src0->data + i*(src0->nb[1])));
  9238. }
  9239. }
  9240. static void ggml_compute_forward_hardswish(
  9241. const struct ggml_compute_params * params,
  9242. struct ggml_tensor * dst) {
  9243. const struct ggml_tensor * src0 = dst->src[0];
  9244. switch (src0->type) {
  9245. case GGML_TYPE_F32:
  9246. {
  9247. ggml_compute_forward_hardswish_f32(params, dst);
  9248. } break;
  9249. default:
  9250. {
  9251. GGML_ASSERT(false);
  9252. } break;
  9253. }
  9254. }
  9255. static void ggml_compute_forward_hardsigmoid_f32(
  9256. const struct ggml_compute_params * params,
  9257. struct ggml_tensor * dst) {
  9258. const struct ggml_tensor * src0 = dst->src[0];
  9259. assert(params->ith == 0);
  9260. assert(ggml_are_same_shape(src0, dst));
  9261. if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
  9262. return;
  9263. }
  9264. const int n = ggml_nrows(src0);
  9265. const int nc = src0->ne[0];
  9266. assert(dst->nb[0] == sizeof(float));
  9267. assert(src0->nb[0] == sizeof(float));
  9268. for (int i = 0; i < n; i++) {
  9269. ggml_vec_hardsigmoid_f32(nc,
  9270. (float *) ((char *) dst->data + i*( dst->nb[1])),
  9271. (float *) ((char *) src0->data + i*(src0->nb[1])));
  9272. }
  9273. }
  9274. static void ggml_compute_forward_hardsigmoid(
  9275. const struct ggml_compute_params * params,
  9276. struct ggml_tensor * dst) {
  9277. const struct ggml_tensor * src0 = dst->src[0];
  9278. switch (src0->type) {
  9279. case GGML_TYPE_F32:
  9280. {
  9281. ggml_compute_forward_hardsigmoid_f32(params, dst);
  9282. } break;
  9283. default:
  9284. {
  9285. GGML_ASSERT(false);
  9286. } break;
  9287. }
  9288. }
  9289. // ggml_compute_forward_norm
  9290. static void ggml_compute_forward_norm_f32(
  9291. const struct ggml_compute_params * params,
  9292. struct ggml_tensor * dst) {
  9293. const struct ggml_tensor * src0 = dst->src[0];
  9294. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  9295. if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
  9296. return;
  9297. }
  9298. GGML_ASSERT(src0->nb[0] == sizeof(float));
  9299. const int ith = params->ith;
  9300. const int nth = params->nth;
  9301. GGML_TENSOR_UNARY_OP_LOCALS
  9302. float eps;
  9303. memcpy(&eps, dst->op_params, sizeof(float));
  9304. GGML_ASSERT(eps > 0.0f);
  9305. // TODO: optimize
  9306. for (int64_t i03 = 0; i03 < ne03; i03++) {
  9307. for (int64_t i02 = 0; i02 < ne02; i02++) {
  9308. for (int64_t i01 = ith; i01 < ne01; i01 += nth) {
  9309. const float * x = (float *) ((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03);
  9310. ggml_float sum = 0.0;
  9311. for (int64_t i00 = 0; i00 < ne00; i00++) {
  9312. sum += (ggml_float)x[i00];
  9313. }
  9314. float mean = sum/ne00;
  9315. float * y = (float *) ((char *) dst->data + i01*nb1 + i02*nb2 + i03*nb3);
  9316. ggml_float sum2 = 0.0;
  9317. for (int64_t i00 = 0; i00 < ne00; i00++) {
  9318. float v = x[i00] - mean;
  9319. y[i00] = v;
  9320. sum2 += (ggml_float)(v*v);
  9321. }
  9322. float variance = sum2/ne00;
  9323. const float scale = 1.0f/sqrtf(variance + eps);
  9324. ggml_vec_scale_f32(ne00, y, scale);
  9325. }
  9326. }
  9327. }
  9328. }
  9329. static void ggml_compute_forward_norm(
  9330. const struct ggml_compute_params * params,
  9331. struct ggml_tensor * dst) {
  9332. const struct ggml_tensor * src0 = dst->src[0];
  9333. switch (src0->type) {
  9334. case GGML_TYPE_F32:
  9335. {
  9336. ggml_compute_forward_norm_f32(params, dst);
  9337. } break;
  9338. default:
  9339. {
  9340. GGML_ASSERT(false);
  9341. } break;
  9342. }
  9343. }
  9344. // ggml_compute_forward_group_rms_norm
  9345. static void ggml_compute_forward_rms_norm_f32(
  9346. const struct ggml_compute_params * params,
  9347. struct ggml_tensor * dst) {
  9348. const struct ggml_tensor * src0 = dst->src[0];
  9349. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  9350. if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
  9351. return;
  9352. }
  9353. GGML_ASSERT(src0->nb[0] == sizeof(float));
  9354. const int ith = params->ith;
  9355. const int nth = params->nth;
  9356. GGML_TENSOR_UNARY_OP_LOCALS
  9357. float eps;
  9358. memcpy(&eps, dst->op_params, sizeof(float));
  9359. GGML_ASSERT(eps > 0.0f);
  9360. // TODO: optimize
  9361. for (int64_t i03 = 0; i03 < ne03; i03++) {
  9362. for (int64_t i02 = 0; i02 < ne02; i02++) {
  9363. for (int64_t i01 = ith; i01 < ne01; i01 += nth) {
  9364. const float * x = (float *) ((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03);
  9365. ggml_float sum = 0.0;
  9366. for (int64_t i00 = 0; i00 < ne00; i00++) {
  9367. sum += (ggml_float)(x[i00] * x[i00]);
  9368. }
  9369. const float mean = sum/ne00;
  9370. float * y = (float *) ((char *) dst->data + i01*nb1 + i02*nb2 + i03*nb3);
  9371. memcpy(y, x, ne00 * sizeof(float));
  9372. // for (int i00 = 0; i00 < ne00; i00++) {
  9373. // y[i00] = x[i00];
  9374. // }
  9375. const float scale = 1.0f/sqrtf(mean + eps);
  9376. ggml_vec_scale_f32(ne00, y, scale);
  9377. }
  9378. }
  9379. }
  9380. }
  9381. static void ggml_compute_forward_rms_norm(
  9382. const struct ggml_compute_params * params,
  9383. struct ggml_tensor * dst) {
  9384. const struct ggml_tensor * src0 = dst->src[0];
  9385. switch (src0->type) {
  9386. case GGML_TYPE_F32:
  9387. {
  9388. ggml_compute_forward_rms_norm_f32(params, dst);
  9389. } break;
  9390. default:
  9391. {
  9392. GGML_ASSERT(false);
  9393. } break;
  9394. }
  9395. }
  9396. static void ggml_compute_forward_rms_norm_back_f32(
  9397. const struct ggml_compute_params * params,
  9398. struct ggml_tensor * dst) {
  9399. const struct ggml_tensor * src0 = dst->src[0];
  9400. const struct ggml_tensor * src1 = dst->src[1];
  9401. GGML_ASSERT(ggml_are_same_shape(src0, dst) && ggml_are_same_shape(src0, src1));
  9402. if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
  9403. return;
  9404. }
  9405. GGML_ASSERT(src0->nb[0] == sizeof(float));
  9406. const int ith = params->ith;
  9407. const int nth = params->nth;
  9408. GGML_TENSOR_BINARY_OP_LOCALS
  9409. float eps;
  9410. memcpy(&eps, dst->op_params, sizeof(float));
  9411. // TODO: optimize
  9412. for (int64_t i03 = 0; i03 < ne03; i03++) {
  9413. for (int64_t i02 = 0; i02 < ne02; i02++) {
  9414. for (int64_t i01 = ith; i01 < ne01; i01 += nth) {
  9415. // src1 is same shape as src0 => same indices
  9416. const int64_t i11 = i01;
  9417. const int64_t i12 = i02;
  9418. const int64_t i13 = i03;
  9419. const float * x = (float *) ((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03);
  9420. const float * dz = (float *) ((char *) src1->data + i11*nb11 + i12*nb12 + i13*nb13);
  9421. ggml_float sum_xx = 0.0;
  9422. ggml_float sum_xdz = 0.0;
  9423. for (int64_t i00 = 0; i00 < ne00; i00++) {
  9424. sum_xx += (ggml_float)(x[i00] * x[i00]);
  9425. sum_xdz += (ggml_float)(x[i00] * dz[i00]);
  9426. }
  9427. //const float mean = (float)(sum_xx)/ne00;
  9428. const float mean_eps = (float)(sum_xx)/ne00 + eps;
  9429. const float sum_eps = (float)(sum_xx) + eps*ne00;
  9430. //const float mean_xdz = (float)(sum_xdz)/ne00;
  9431. // we could cache rms from forward pass to improve performance.
  9432. // to do this implement ggml_rms and compose ggml_rms_norm using ggml_rms.
  9433. //const float rms = sqrtf(mean_eps);
  9434. const float rrms = 1.0f / sqrtf(mean_eps);
  9435. //const float scale = -rrms/(ne00 * mean_eps); // -1/(n*rms**3)
  9436. {
  9437. // z = rms_norm(x)
  9438. //
  9439. // rms_norm(src0) =
  9440. // scale(
  9441. // src0,
  9442. // div(
  9443. // 1,
  9444. // sqrt(
  9445. // add(
  9446. // scale(
  9447. // sum(
  9448. // sqr(
  9449. // src0)),
  9450. // (1.0/N)),
  9451. // eps))));
  9452. // postorder:
  9453. // ## op args grad
  9454. // 00 param src0 grad[#00]
  9455. // 01 const 1
  9456. // 02 sqr (#00) grad[#02]
  9457. // 03 sum (#02) grad[#03]
  9458. // 04 const 1/N
  9459. // 05 scale (#03, #04) grad[#05]
  9460. // 06 const eps
  9461. // 07 add (#05, #06) grad[#07]
  9462. // 08 sqrt (#07) grad[#08]
  9463. // 09 div (#01,#08) grad[#09]
  9464. // 10 scale (#00,#09) grad[#10]
  9465. //
  9466. // backward pass, given grad[#10]
  9467. // #10: scale
  9468. // grad[#00] += scale(grad[#10],#09)
  9469. // grad[#09] += sum(mul(grad[#10],#00))
  9470. // #09: div
  9471. // grad[#08] += neg(mul(grad[#09], div(#09,#08)))
  9472. // #08: sqrt
  9473. // grad[#07] += mul(grad[#08], div(0.5, #08))
  9474. // #07: add
  9475. // grad[#05] += grad[#07]
  9476. // #05: scale
  9477. // grad[#03] += scale(grad[#05],#04)
  9478. // #03: sum
  9479. // grad[#02] += repeat(grad[#03], #02)
  9480. // #02:
  9481. // grad[#00] += scale(mul(#00, grad[#02]), 2.0)
  9482. //
  9483. // substitute and simplify:
  9484. // grad[#00] = scale(grad(#10), #09) + scale(mul(#00, grad[#02]), 2.0)
  9485. // grad[#02] = repeat(grad[#03], #02)
  9486. // grad[#02] = repeat(scale(grad[#05],#04), #02)
  9487. // grad[#02] = repeat(scale(grad[#07],#04), #02)
  9488. // grad[#02] = repeat(scale(mul(grad[#08], div(0.5, #08)),#04), #02)
  9489. // grad[#02] = repeat(scale(mul(neg(mul(grad[#09], div(#09,#08))), div(0.5, #08)),#04), #02)
  9490. // grad[#02] = repeat(scale(mul(neg(mul(sum(mul(grad[#10],#00)), div(#09,#08))), div(0.5, #08)),#04), #02)
  9491. // grad[#02] = repeat(-(sum(mul(grad[#10],#00)) * div(#09,#08) * div(0.5, #08) * (1/N)), #02)
  9492. // grad[#02] = repeat(-(sum(mul(grad[#10],#00)) * div(div(#01,#08),#08) * div(0.5, #08) * (1/N)), #02)
  9493. // grad[#02] = repeat(-(sum(mul(grad[#10],#00)) * div(1,#08*#08) * div(0.5, #08) * (1/N)), #02)
  9494. // grad[#02] = repeat(-(sum(mul(grad[#10],#00)) * div(1,#07) * div(0.5, #08) * (1/N)), #02)
  9495. // grad[#00] = scale(grad(#10), #09) + scale(mul(#00, grad[#02]), 2.0)
  9496. // grad[#00] = scale(grad(#10), #09) + scale(mul(#00, repeat(-(sum(mul(grad[#10],#00)) * div(1,#07) * div(0.5, #08) * (1/N)), #02)), 2.0)
  9497. // grad[#00] = scale(grad(#10), #09) + scale(scale(#00, -(sum(mul(grad[#10],#00)) * div(1,#07) * div(0.5, #08) * (1/N))), 2.0)
  9498. // grad[#00] = scale(grad(#10), #09) + scale(#00, -(sum(mul(grad[#10],#00)) * div(1,#07) * div(1,#08) * (1/N)))
  9499. // grad[#00] = scale(grad(#10), #09) + scale(#00, sum(mul(grad[#10],#00)) * div(1,#07*#08) * (-1/N))
  9500. // grad[#00] = scale(grad(#10), #09) + scale(#00, sum(mul(grad[#10],#00)) * div(1,#07*#08) * (-1/N))
  9501. // grad[#00] = scale(grad(#10), #09) + scale(#00, sum(mul(grad[#10],#00)) * div(1,mean_eps*rms) * (-1/N))
  9502. // grad[#00] = scale(grad(#10), #09) + scale(#00, sum(mul(grad[#10],#00)) * div(-1,rms*N*mean_eps))
  9503. // grad[#00] = scale(grad(#10), #09) + scale(#00, sum(mul(grad[#10],#00)) * div(-1,rms*N*(sum_xx/N+eps)))
  9504. // grad[#00] = scale(grad(#10), #09) + scale(#00, sum(mul(grad[#10],#00)) * div(-1,rms*N*sum_xx+rms*N*eps))
  9505. // grad[#00] = scale(dz, rrms) + scale(x, sum(mul(dz,x)) * div(-1,rms*N*mean_eps))
  9506. // grad[#00] = scale(dz, rrms) + scale(x, sum_xdz * div(-1,rms*N*mean_eps))
  9507. // a = b*c + d*e
  9508. // a = b*c*f/f + d*e*f/f
  9509. // a = (b*c*f + d*e*f)*(1/f)
  9510. // a = (b*c*(1/c) + d*e*(1/c))*(1/(1/c))
  9511. // a = (b + d*e/c)*c
  9512. // b = dz, c = rrms, d = x, e = sum_xdz * div(-1,rms*N*mean_eps)
  9513. // a = (dz + x*sum_xdz * div(-1,rms*N*mean_eps)/rrms)*rrms
  9514. // a = (dz + x*sum_xdz * div(-1,rms*N*mean_eps)*rms)*rrms
  9515. // a = (dz + x*sum_xdz * div(-rms,rms*N*mean_eps))*rrms
  9516. // a = (dz + x*sum_xdz * div(-1,N*mean_eps))*rrms
  9517. // a = (dz + x*div(-sum_xdz,N*mean_eps))*rrms
  9518. // a = (dz + x*div(-mean_xdz,mean_eps))*rrms
  9519. // grad[#00] = scale(dz + scale(x, div(-mean_xdz,mean_eps)),rrms)
  9520. // grad[#00] = scale(dz + scale(x, -mean_xdz/mean_eps),rrms)
  9521. // dx = scale(dz + scale(x, -mean_xdz/mean_eps),rrms)
  9522. }
  9523. // dx = scale(dz + scale(x, -mean_xdz/mean_eps),rrms)
  9524. // post-order:
  9525. // dx := x
  9526. // dx := scale(dx,-mean_xdz/mean_eps)
  9527. // dx := add(dx, dz)
  9528. // dx := scale(dx, rrms)
  9529. float * dx = (float *) ((char *) dst->data + i01*nb1 + i02*nb2 + i03*nb3);
  9530. ggml_vec_cpy_f32 (ne00, dx, x);
  9531. // ggml_vec_scale_f32(ne00, dx, -mean_xdz/mean_eps);
  9532. ggml_vec_scale_f32(ne00, dx, (float)(-sum_xdz)/sum_eps);
  9533. ggml_vec_acc_f32 (ne00, dx, dz);
  9534. ggml_vec_scale_f32(ne00, dx, rrms);
  9535. }
  9536. }
  9537. }
  9538. }
  9539. static void ggml_compute_forward_rms_norm_back(
  9540. const struct ggml_compute_params * params,
  9541. struct ggml_tensor * dst) {
  9542. const struct ggml_tensor * src0 = dst->src[0];
  9543. switch (src0->type) {
  9544. case GGML_TYPE_F32:
  9545. {
  9546. ggml_compute_forward_rms_norm_back_f32(params, dst);
  9547. } break;
  9548. default:
  9549. {
  9550. GGML_ASSERT(false);
  9551. } break;
  9552. }
  9553. }
  9554. // ggml_compute_forward_group_norm
  9555. static void ggml_compute_forward_group_norm_f32(
  9556. const struct ggml_compute_params * params,
  9557. struct ggml_tensor * dst) {
  9558. const struct ggml_tensor * src0 = dst->src[0];
  9559. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  9560. if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
  9561. return;
  9562. }
  9563. GGML_ASSERT(src0->nb[0] == sizeof(float));
  9564. const int ith = params->ith;
  9565. const int nth = params->nth;
  9566. GGML_TENSOR_UNARY_OP_LOCALS
  9567. const float eps = 1e-6f; // TODO: make this a parameter
  9568. // TODO: optimize
  9569. int n_channels = src0->ne[2];
  9570. int n_groups = dst->op_params[0];
  9571. int n_channels_per_group = (n_channels + n_groups - 1) / n_groups;
  9572. for (int i = ith; i < n_groups; i += nth) {
  9573. int start = i * n_channels_per_group;
  9574. int end = start + n_channels_per_group;
  9575. if (end > n_channels) {
  9576. end = n_channels;
  9577. }
  9578. int step = end - start;
  9579. for (int64_t i03 = 0; i03 < ne03; i03++) {
  9580. ggml_float sum = 0.0;
  9581. for (int64_t i02 = start; i02 < end; i02++) {
  9582. for (int64_t i01 = 0; i01 < ne01; i01++) {
  9583. const float * x = (float *)((char *) src0->data + i01 * nb01 + i02 * nb02 + i03 * nb03);
  9584. ggml_float sumr = 0.0;
  9585. for (int64_t i00 = 0; i00 < ne00; i00++) {
  9586. sumr += (ggml_float)x[i00];
  9587. }
  9588. sum += sumr;
  9589. }
  9590. }
  9591. const float mean = sum / (ne00 * ne01 * step);
  9592. ggml_float sum2 = 0.0;
  9593. for (int64_t i02 = start; i02 < end; i02++) {
  9594. for (int64_t i01 = 0; i01 < ne01; i01++) {
  9595. const float * x = (float *)((char *) src0->data + i01 * nb01 + i02 * nb02 + i03 * nb03);
  9596. float * y = (float *)((char *) dst->data + i01 * nb1 + i02 * nb2 + i03 * nb3);
  9597. ggml_float sumr = 0.0;
  9598. for (int64_t i00 = 0; i00 < ne00; i00++) {
  9599. float v = x[i00] - mean;
  9600. y[i00] = v;
  9601. sumr += (ggml_float)(v * v);
  9602. }
  9603. sum2 += sumr;
  9604. }
  9605. }
  9606. const float variance = sum2 / (ne00 * ne01 * step);
  9607. const float scale = 1.0f / sqrtf(variance + eps);
  9608. for (int64_t i02 = start; i02 < end; i02++) {
  9609. for (int64_t i01 = 0; i01 < ne01; i01++) {
  9610. float * y = (float *)((char *) dst->data + i01 * nb1 + i02 * nb2 + i03 * nb3);
  9611. ggml_vec_scale_f32(ne00, y, scale);
  9612. }
  9613. }
  9614. }
  9615. }
  9616. }
  9617. static void ggml_compute_forward_group_norm(
  9618. const struct ggml_compute_params * params,
  9619. struct ggml_tensor * dst) {
  9620. const struct ggml_tensor * src0 = dst->src[0];
  9621. switch (src0->type) {
  9622. case GGML_TYPE_F32:
  9623. {
  9624. ggml_compute_forward_group_norm_f32(params, dst);
  9625. } break;
  9626. default:
  9627. {
  9628. GGML_ASSERT(false);
  9629. } break;
  9630. }
  9631. }
  9632. // ggml_compute_forward_mul_mat
  9633. #if defined(GGML_USE_ACCELERATE) || defined(GGML_USE_OPENBLAS)
  9634. // helper function to determine if it is better to use BLAS or not
  9635. // for large matrices, BLAS is faster
  9636. static bool ggml_compute_forward_mul_mat_use_blas(struct ggml_tensor * dst) {
  9637. const struct ggml_tensor * src0 = dst->src[0];
  9638. const struct ggml_tensor * src1 = dst->src[1];
  9639. //const int64_t ne00 = src0->ne[0];
  9640. //const int64_t ne01 = src0->ne[1];
  9641. const int64_t ne10 = src1->ne[0];
  9642. const int64_t ne0 = dst->ne[0];
  9643. const int64_t ne1 = dst->ne[1];
  9644. // NOTE: with GGML_OP_MUL_MAT_ID we don't want to go through the BLAS branch because it will dequantize (to_float)
  9645. // all the experts for each batch element and the processing would become incredibly slow
  9646. // TODO: find the optimal values for these
  9647. if (dst->op != GGML_OP_MUL_MAT_ID &&
  9648. ggml_is_contiguous(src0) &&
  9649. ggml_is_contiguous(src1) &&
  9650. //src0->type == GGML_TYPE_F32 &&
  9651. src1->type == GGML_TYPE_F32 &&
  9652. (ne0 >= 32 && ne1 >= 32 && ne10 >= 32)) {
  9653. /*printf("BLAS: %d %d %d %d %d\n", ne0, ne1, ne10, ne00, ne01);*/
  9654. return true;
  9655. }
  9656. return false;
  9657. }
  9658. #endif
  9659. static void ggml_compute_forward_mul_mat(
  9660. const struct ggml_compute_params * params,
  9661. struct ggml_tensor * dst) {
  9662. const struct ggml_tensor * src0 = dst->src[0];
  9663. const struct ggml_tensor * src1 = dst->src[1];
  9664. int64_t t0 = ggml_perf_time_us();
  9665. UNUSED(t0);
  9666. GGML_TENSOR_BINARY_OP_LOCALS
  9667. const int ith = params->ith;
  9668. const int nth = params->nth;
  9669. const enum ggml_type type = src0->type;
  9670. const bool src1_cont = ggml_is_contiguous(src1);
  9671. ggml_vec_dot_t const vec_dot = type_traits[type].vec_dot;
  9672. enum ggml_type const vec_dot_type = type_traits[type].vec_dot_type;
  9673. ggml_from_float_t const from_float_to_vec_dot = type_traits[vec_dot_type].from_float;
  9674. int64_t const vec_dot_num_rows = type_traits[type].nrows;
  9675. GGML_ASSERT(ne0 == ne01);
  9676. GGML_ASSERT(ne1 == ne11);
  9677. GGML_ASSERT(ne2 == ne12);
  9678. GGML_ASSERT(ne3 == ne13);
  9679. // we don't support permuted src0 or src1
  9680. GGML_ASSERT(nb00 == ggml_type_size(type));
  9681. GGML_ASSERT(nb10 == ggml_type_size(src1->type));
  9682. // dst cannot be transposed or permuted
  9683. GGML_ASSERT(nb0 == sizeof(float));
  9684. GGML_ASSERT(nb0 <= nb1);
  9685. GGML_ASSERT(nb1 <= nb2);
  9686. GGML_ASSERT(nb2 <= nb3);
  9687. // broadcast factors
  9688. const int64_t r2 = ne12/ne02;
  9689. const int64_t r3 = ne13/ne03;
  9690. // nb01 >= nb00 - src0 is not transposed
  9691. // compute by src0 rows
  9692. #if defined(GGML_USE_CLBLAST)
  9693. if (ggml_cl_can_mul_mat(src0, src1, dst)) {
  9694. if (params->ith == 0 && params->type == GGML_TASK_TYPE_COMPUTE) {
  9695. ggml_cl_mul_mat(src0, src1, dst, params->wdata, params->wsize);
  9696. }
  9697. return;
  9698. }
  9699. #endif
  9700. #if defined(GGML_USE_ACCELERATE) || defined(GGML_USE_OPENBLAS)
  9701. if (ggml_compute_forward_mul_mat_use_blas(dst)) {
  9702. const int64_t ne_plane = ne01*ne00;
  9703. const size_t desired_wsize = ne13*ne12*ne_plane*sizeof(float);
  9704. UNUSED(desired_wsize);
  9705. if (params->type == GGML_TASK_TYPE_INIT) {
  9706. if (type != GGML_TYPE_F32) {
  9707. assert(params->wsize >= desired_wsize);
  9708. // parallelize by src0 rows
  9709. for (int64_t i13 = 0; i13 < ne13; i13++) {
  9710. for (int64_t i12 = 0; i12 < ne12; i12++) {
  9711. // broadcast src0 into src1 across 2nd,3rd dimension
  9712. const int64_t i03 = i13/r3;
  9713. const int64_t i02 = i12/r2;
  9714. const void * x = (char *) src0->data + i02*nb02 + i03*nb03;
  9715. float * const wdata = (float *) params->wdata + i13*ne12*ne_plane + i12*ne_plane;
  9716. ggml_to_float_t const to_float = type_traits[type].to_float;
  9717. for (int64_t i01 = ith; i01 < ne01; i01 += nth) {
  9718. to_float((const char *) x + i01*nb01, wdata + i01*ne00, ne00);
  9719. }
  9720. }
  9721. }
  9722. }
  9723. return;
  9724. }
  9725. if (params->type == GGML_TASK_TYPE_FINALIZE) {
  9726. return;
  9727. }
  9728. // perform sgemm, parallelization controlled by blas lib
  9729. if (ith != 0) {
  9730. return;
  9731. }
  9732. //const int64_t tgemm0 = ggml_perf_time_us();
  9733. for (int64_t i13 = 0; i13 < ne13; i13++) {
  9734. for (int64_t i12 = 0; i12 < ne12; i12++) {
  9735. const int64_t i03 = i13/r3;
  9736. const int64_t i02 = i12/r2;
  9737. const void * x = (char *) src0->data + i02*nb02 + i03*nb03;
  9738. const float * y = (float *) ((char *) src1->data + i12*nb12 + i13*nb13);
  9739. float * d = (float *) ((char *) dst->data + i12*nb2 + i13*nb3);
  9740. if (type != GGML_TYPE_F32) {
  9741. x = (float *) params->wdata + i13*ne12*ne_plane + i12*ne_plane;
  9742. }
  9743. cblas_sgemm(CblasRowMajor, CblasNoTrans, CblasTrans,
  9744. ne1, ne01, ne10,
  9745. 1.0f, y, ne10,
  9746. x, ne00,
  9747. 0.0f, d, ne01);
  9748. }
  9749. }
  9750. //printf("cblas_sgemm = %.3f ms, %lld flops\n", (ggml_perf_time_us() - tgemm0)/1000.0, ne13*ne12*ne1*ne01*ne10*2);
  9751. //printf("CBLAS = %f ms, %d x %d x %d x %d\n", (ggml_perf_time_us() - t0)/1000.0, ne0, ne1, ne2, ne3);
  9752. return;
  9753. }
  9754. #endif
  9755. #if GGML_USE_LLAMAFILE
  9756. if (src1_cont) {
  9757. for (int64_t i13 = 0; i13 < ne13; i13++)
  9758. for (int64_t i12 = 0; i12 < ne12; i12++)
  9759. if (!llamafile_sgemm(ne01, ne11, ne00/ggml_blck_size(src0->type),
  9760. (const char *)src0->data + i12/r2*nb02 + i13/r3*nb03,
  9761. nb01/ggml_type_size(src0->type),
  9762. (const char *)src1->data + i12*nb12 + i13*nb13,
  9763. nb11/ggml_type_size(src1->type),
  9764. (char *)dst->data + i12*nb2 + i13*nb3,
  9765. nb1/ggml_type_size(dst->type),
  9766. ith, nth,
  9767. params->type,
  9768. src0->type,
  9769. src1->type,
  9770. dst->type))
  9771. goto UseGgmlGemm1;
  9772. return;
  9773. }
  9774. UseGgmlGemm1:;
  9775. #endif
  9776. if (params->type == GGML_TASK_TYPE_INIT) {
  9777. if (ith != 0) {
  9778. return;
  9779. }
  9780. if (src1->type != vec_dot_type) {
  9781. char * wdata = params->wdata;
  9782. const size_t row_size = ggml_row_size(vec_dot_type, ne10);
  9783. assert(params->wsize >= ne11*ne12*ne13*row_size);
  9784. GGML_ASSERT(src1->type == GGML_TYPE_F32);
  9785. for (int64_t i13 = 0; i13 < ne13; ++i13) {
  9786. for (int64_t i12 = 0; i12 < ne12; ++i12) {
  9787. for (int64_t i11 = 0; i11 < ne11; ++i11) {
  9788. from_float_to_vec_dot((float *)((char *) src1->data + i13*nb13 + i12*nb12 + i11*nb11), (void *) wdata, ne10);
  9789. wdata += row_size;
  9790. }
  9791. }
  9792. }
  9793. }
  9794. return;
  9795. }
  9796. if (params->type == GGML_TASK_TYPE_FINALIZE) {
  9797. return;
  9798. }
  9799. const void * wdata = (src1->type == vec_dot_type) ? src1->data : params->wdata;
  9800. const size_t row_size = ggml_row_size(vec_dot_type, ne10);
  9801. #if GGML_USE_LLAMAFILE
  9802. if (src1->type != vec_dot_type) {
  9803. for (int64_t i13 = 0; i13 < ne13; i13++)
  9804. for (int64_t i12 = 0; i12 < ne12; i12++)
  9805. if (!llamafile_sgemm(ne01, ne11, ne00/ggml_blck_size(src0->type),
  9806. (const char *)src0->data + i12/r2*nb02 + i13/r3*nb03,
  9807. nb01/ggml_type_size(src0->type),
  9808. (const char *)wdata + (i12*ne11 + i13*ne12*ne11)*row_size,
  9809. row_size/ggml_type_size(vec_dot_type),
  9810. (char *)dst->data + i12*nb2 + i13*nb3,
  9811. nb1/ggml_type_size(dst->type),
  9812. ith, nth,
  9813. params->type,
  9814. src0->type,
  9815. vec_dot_type,
  9816. dst->type))
  9817. goto UseGgmlGemm2;
  9818. return;
  9819. }
  9820. UseGgmlGemm2:;
  9821. #endif
  9822. const int64_t nr0 = ne01; // src0 rows
  9823. const int64_t nr1 = ne1*ne12*ne13; // src1 rows
  9824. //printf("nr0 = %lld, nr1 = %lld\n", nr0, nr1);
  9825. // distribute the thread work across the inner or outer loop based on which one is larger
  9826. const int64_t nth0 = nr0 > nr1 ? nth : 1; // parallelize by src0 rows
  9827. const int64_t nth1 = nr0 > nr1 ? 1 : nth; // parallelize by src1 rows
  9828. const int64_t ith0 = ith % nth0;
  9829. const int64_t ith1 = ith / nth0;
  9830. const int64_t dr0 = (nr0 + nth0 - 1)/nth0;
  9831. const int64_t dr1 = (nr1 + nth1 - 1)/nth1;
  9832. const int64_t ir010 = dr0*ith0;
  9833. const int64_t ir011 = MIN(ir010 + dr0, nr0);
  9834. const int64_t ir110 = dr1*ith1;
  9835. const int64_t ir111 = MIN(ir110 + dr1, nr1);
  9836. //printf("ir010 = %6lld, ir011 = %6lld, ir110 = %6lld, ir111 = %6lld\n", ir010, ir011, ir110, ir111);
  9837. // threads with no work simply yield (not sure if it helps)
  9838. if (ir010 >= ir011 || ir110 >= ir111) {
  9839. sched_yield();
  9840. return;
  9841. }
  9842. assert(ne12 % ne02 == 0);
  9843. assert(ne13 % ne03 == 0);
  9844. // block-tiling attempt
  9845. const int64_t blck_0 = 16;
  9846. const int64_t blck_1 = 16;
  9847. // dot kernels can handle 1 row and col at a time, but mmla kernels can process 2 rows and cols
  9848. int64_t nrc = vec_dot_num_rows;
  9849. // TODO: currently the mmla kernels support only even numbered rows/cols.
  9850. // this check can be removed once they are extended to support odd numbered rows/cols too
  9851. if ((nr0 % 2 != 0) || (ne11 % 2 != 0)) {
  9852. nrc = 1;
  9853. }
  9854. const size_t src1_col_stride = src1_cont || src1->type != vec_dot_type ? row_size : nb11;
  9855. // attempt to reduce false-sharing (does not seem to make a difference)
  9856. // 16 * 2, accounting for mmla kernels
  9857. float tmp[32];
  9858. for (int64_t iir1 = ir110; iir1 < ir111; iir1 += blck_1) {
  9859. for (int64_t iir0 = ir010; iir0 < ir011; iir0 += blck_0) {
  9860. for (int64_t ir1 = iir1; ir1 < iir1 + blck_1 && ir1 < ir111; ir1 += nrc) {
  9861. const int64_t i13 = (ir1/(ne12*ne1));
  9862. const int64_t i12 = (ir1 - i13*ne12*ne1)/ne1;
  9863. const int64_t i11 = (ir1 - i13*ne12*ne1 - i12*ne1);
  9864. // broadcast src0 into src1
  9865. const int64_t i03 = i13/r3;
  9866. const int64_t i02 = i12/r2;
  9867. const int64_t i1 = i11;
  9868. const int64_t i2 = i12;
  9869. const int64_t i3 = i13;
  9870. const char * src0_row = (const char *) src0->data + (0 + i02*nb02 + i03*nb03);
  9871. // desc: when src1 is not a contiguous memory block we have to calculate the offset using the strides
  9872. // if it is, then we have either copied the data to params->wdata and made it contiguous or we are using
  9873. // the original src1 data pointer, so we should index using the indices directly
  9874. // TODO: this is a bit of a hack, we should probably have a better way to handle this
  9875. const char * src1_col = (const char *) wdata +
  9876. (src1_cont || src1->type != vec_dot_type
  9877. ? (i11 + i12*ne11 + i13*ne12*ne11)*row_size
  9878. : (i11*nb11 + i12*nb12 + i13*nb13));
  9879. float * dst_col = (float *) ((char *) dst->data + (i1*nb1 + i2*nb2 + i3*nb3));
  9880. //for (int64_t ir0 = iir0; ir0 < iir0 + blck_0 && ir0 < ir011; ++ir0) {
  9881. // vec_dot(ne00, &dst_col[ir0], src0_row + ir0*nb01, src1_col);
  9882. //}
  9883. for (int64_t ir0 = iir0; ir0 < iir0 + blck_0 && ir0 < ir011; ir0 += nrc) {
  9884. vec_dot(ne00, &tmp[ir0 - iir0], (nrc>1 ? 16 : 0), src0_row + ir0*nb01, (nrc>1 ? nb01 : 0), src1_col, (nrc>1 ? src1_col_stride : 0), nrc);
  9885. }
  9886. for (int cn = 0; cn < nrc; ++cn) {
  9887. memcpy(&dst_col[iir0 + cn*nb1/nb0], tmp + (cn*16), (MIN(iir0 + blck_0, ir011) - iir0)*sizeof(float));
  9888. }
  9889. }
  9890. }
  9891. }
  9892. }
  9893. // ggml_compute_forward_mul_mat_id
  9894. static void ggml_compute_forward_mul_mat_id(
  9895. const struct ggml_compute_params * params,
  9896. struct ggml_tensor * dst) {
  9897. const struct ggml_tensor * src0 = dst->src[0];
  9898. const struct ggml_tensor * src1 = dst->src[1];
  9899. const struct ggml_tensor * ids = dst->src[2];
  9900. GGML_TENSOR_BINARY_OP_LOCALS
  9901. const int ith = params->ith;
  9902. const int nth = params->nth;
  9903. const enum ggml_type type = src0->type;
  9904. const bool src1_cont = ggml_is_contiguous(src1);
  9905. ggml_vec_dot_t const vec_dot = type_traits[type].vec_dot;
  9906. enum ggml_type const vec_dot_type = type_traits[type].vec_dot_type;
  9907. ggml_from_float_t const from_float_to_vec_dot = type_traits[vec_dot_type].from_float;
  9908. // we don't support permuted src0 or src1
  9909. GGML_ASSERT(nb00 == ggml_type_size(type));
  9910. GGML_ASSERT(nb10 == ggml_type_size(src1->type));
  9911. // dst cannot be transposed or permuted
  9912. GGML_ASSERT(nb0 == sizeof(float));
  9913. GGML_ASSERT(nb0 <= nb1);
  9914. GGML_ASSERT(nb1 <= nb2);
  9915. GGML_ASSERT(nb2 <= nb3);
  9916. // row groups
  9917. const int n_ids = ids->ne[0]; // n_expert_used
  9918. const int n_as = ne02; // n_expert
  9919. char * wdata_src1_end = (src1->type == vec_dot_type) ?
  9920. (char *) params->wdata :
  9921. (char *) params->wdata + GGML_PAD(ggml_row_size(vec_dot_type, ggml_nelements(src1)), sizeof(int64_t));
  9922. struct mmid_row_mapping {
  9923. int32_t i1;
  9924. int32_t i2;
  9925. };
  9926. int64_t * matrix_row_counts = (int64_t *) (wdata_src1_end); // [n_as]
  9927. struct mmid_row_mapping * matrix_rows = (struct mmid_row_mapping *)(matrix_row_counts + n_as); // [n_as][ne11]
  9928. if (params->type == GGML_TASK_TYPE_INIT) {
  9929. if (ith != 0) {
  9930. return;
  9931. }
  9932. char * wdata = params->wdata;
  9933. if (src1->type != vec_dot_type) {
  9934. const size_t row_size = ggml_row_size(vec_dot_type, ne10);
  9935. assert(params->wsize >= ne11*ne12*ne13*row_size);
  9936. assert(src1->type == GGML_TYPE_F32);
  9937. for (int64_t i13 = 0; i13 < ne13; ++i13) {
  9938. for (int64_t i12 = 0; i12 < ne12; ++i12) {
  9939. for (int64_t i11 = 0; i11 < ne11; ++i11) {
  9940. from_float_to_vec_dot((float *)((char *) src1->data + i13*nb13 + i12*nb12 + i11*nb11), (void *) wdata, ne10);
  9941. wdata += row_size;
  9942. }
  9943. }
  9944. }
  9945. }
  9946. // initialize matrix_row_counts
  9947. memset(matrix_row_counts, 0, n_as*sizeof(int64_t));
  9948. #define MMID_MATRIX_ROW(row_id, i1) matrix_rows[(row_id)*ne12 + (i1)]
  9949. // group rows by src0 matrix
  9950. for (int64_t iid1 = 0; iid1 < ids->ne[1]; ++iid1) {
  9951. for (int id = 0; id < n_ids; ++id) {
  9952. const int32_t i02 = *(const int32_t *) ((const char *) ids->data + iid1*ids->nb[1] + id*ids->nb[0]);
  9953. assert(i02 >= 0 && i02 < n_as);
  9954. MMID_MATRIX_ROW(i02, matrix_row_counts[i02]) = (struct mmid_row_mapping) {id, iid1};
  9955. matrix_row_counts[i02] += 1;
  9956. }
  9957. }
  9958. return;
  9959. }
  9960. if (params->type == GGML_TASK_TYPE_FINALIZE) {
  9961. return;
  9962. }
  9963. // compute each matrix multiplication in sequence
  9964. for (int cur_a = 0; cur_a < n_as; ++cur_a) {
  9965. const int64_t cne1 = matrix_row_counts[cur_a];
  9966. if (cne1 == 0) {
  9967. continue;
  9968. }
  9969. const char * src0_cur = (const char *) src0->data + cur_a*nb02;
  9970. const void * wdata = (src1->type == vec_dot_type) ? src1->data : params->wdata;
  9971. const size_t row_size = ggml_row_size(vec_dot_type, ne10);
  9972. const int64_t nr0 = ne01; // src0 rows
  9973. const int64_t nr1 = cne1; // src1 rows
  9974. // distribute the thread work across the inner or outer loop based on which one is larger
  9975. const int64_t nth0 = nr0 > nr1 ? nth : 1; // parallelize by src0 rows
  9976. const int64_t nth1 = nr0 > nr1 ? 1 : nth; // parallelize by src1 rows
  9977. const int64_t ith0 = ith % nth0;
  9978. const int64_t ith1 = ith / nth0;
  9979. const int64_t dr0 = (nr0 + nth0 - 1)/nth0;
  9980. const int64_t dr1 = (nr1 + nth1 - 1)/nth1;
  9981. const int64_t ir010 = dr0*ith0;
  9982. const int64_t ir011 = MIN(ir010 + dr0, nr0);
  9983. const int64_t ir110 = dr1*ith1;
  9984. const int64_t ir111 = MIN(ir110 + dr1, nr1);
  9985. // threads with no work simply yield (not sure if it helps)
  9986. //if (ir010 >= ir011 || ir110 >= ir111) {
  9987. // sched_yield();
  9988. // continue;
  9989. //}
  9990. // block-tiling attempt
  9991. const int64_t blck_0 = 16;
  9992. const int64_t blck_1 = 16;
  9993. // attempt to reduce false-sharing (does not seem to make a difference)
  9994. float tmp[16];
  9995. for (int64_t iir1 = ir110; iir1 < ir111; iir1 += blck_1) {
  9996. for (int64_t iir0 = ir010; iir0 < ir011; iir0 += blck_0) {
  9997. for (int64_t ir1 = iir1; ir1 < iir1 + blck_1 && ir1 < ir111; ++ir1) {
  9998. const int64_t _i12 = ir1; // logical row index for this expert
  9999. struct mmid_row_mapping row_mapping = MMID_MATRIX_ROW(cur_a, _i12);
  10000. const int id = row_mapping.i1; // selected expert index
  10001. const int64_t i11 = id % ne11;
  10002. const int64_t i12 = row_mapping.i2; // row index in src1
  10003. const int64_t i1 = id; // selected expert index
  10004. const int64_t i2 = i12; // row
  10005. // desc: when src1 is not a contiguous memory block we have to calculate the offset using the strides
  10006. // if it is, then we have either copied the data to params->wdata and made it contiguous or we are using
  10007. // the original src1 data pointer, so we should index using the indices directly
  10008. // TODO: this is a bit of a hack, we should probably have a better way to handle this
  10009. const char * src1_col = (const char *) wdata +
  10010. (src1_cont || src1->type != vec_dot_type
  10011. ? (i11 + i12*ne11)*row_size
  10012. : (i11*nb11 + i12*nb12));
  10013. float * dst_col = (float *) ((char *) dst->data + (i1*nb1 + i2*nb2));
  10014. //for (int64_t ir0 = iir0; ir0 < iir0 + blck_0 && ir0 < ir011; ++ir0) {
  10015. // vec_dot(ne00, &dst_col[ir0], src0_row + ir0*nb01, src1_col);
  10016. //}
  10017. for (int64_t ir0 = iir0; ir0 < iir0 + blck_0 && ir0 < ir011; ++ir0) {
  10018. vec_dot(ne00, &tmp[ir0 - iir0], 0, src0_cur + ir0*nb01, 0, src1_col, 0, 1);
  10019. }
  10020. memcpy(&dst_col[iir0], tmp, (MIN(iir0 + blck_0, ir011) - iir0)*sizeof(float));
  10021. }
  10022. }
  10023. }
  10024. }
  10025. #undef MMID_MATRIX_ROW
  10026. }
  10027. // ggml_compute_forward_out_prod
  10028. static void ggml_compute_forward_out_prod_f32(
  10029. const struct ggml_compute_params * params,
  10030. struct ggml_tensor * dst) {
  10031. const struct ggml_tensor * src0 = dst->src[0];
  10032. const struct ggml_tensor * src1 = dst->src[1];
  10033. // int64_t t0 = ggml_perf_time_us();
  10034. // UNUSED(t0);
  10035. GGML_TENSOR_BINARY_OP_LOCALS
  10036. const int ith = params->ith;
  10037. const int nth = params->nth;
  10038. GGML_ASSERT(ne0 == ne00);
  10039. GGML_ASSERT(ne1 == ne10);
  10040. GGML_ASSERT(ne2 == ne02);
  10041. GGML_ASSERT(ne02 == ne12);
  10042. GGML_ASSERT(ne3 == ne13);
  10043. GGML_ASSERT(ne03 == ne13);
  10044. // we don't support permuted src0 or src1
  10045. GGML_ASSERT(nb00 == sizeof(float));
  10046. // dst cannot be transposed or permuted
  10047. GGML_ASSERT(nb0 == sizeof(float));
  10048. // GGML_ASSERT(nb0 <= nb1);
  10049. // GGML_ASSERT(nb1 <= nb2);
  10050. // GGML_ASSERT(nb2 <= nb3);
  10051. // nb01 >= nb00 - src0 is not transposed
  10052. // compute by src0 rows
  10053. // TODO: #if defined(GGML_USE_CLBLAST)
  10054. #if defined(GGML_USE_ACCELERATE) || defined(GGML_USE_OPENBLAS)
  10055. bool use_blas = ggml_is_matrix(src0) &&
  10056. ggml_is_matrix(src1) &&
  10057. ggml_is_contiguous(src0) &&
  10058. (ggml_is_contiguous(src1) || ggml_is_transposed(src1));
  10059. #endif
  10060. if (params->type == GGML_TASK_TYPE_INIT) {
  10061. #if defined(GGML_USE_ACCELERATE) || defined(GGML_USE_OPENBLAS) // gemm beta will zero dst
  10062. if (use_blas) {
  10063. return;
  10064. }
  10065. #endif
  10066. if (ith != 0) {
  10067. return;
  10068. }
  10069. ggml_vec_set_f32(ne0*ne1*ne2*ne3, dst->data, 0);
  10070. return;
  10071. }
  10072. if (params->type == GGML_TASK_TYPE_FINALIZE) {
  10073. return;
  10074. }
  10075. #if defined(GGML_USE_ACCELERATE) || defined(GGML_USE_OPENBLAS)
  10076. if (use_blas) {
  10077. if (params->ith != 0) { // All threads other than the first do no work.
  10078. return;
  10079. }
  10080. // Arguments to ggml_compute_forward_out_prod (expressed as major,minor)
  10081. // src0: (k,n)
  10082. // src1: (k,m)
  10083. // dst: (m,n)
  10084. //
  10085. // Arguments to sgemm (see https://github.com/Reference-LAPACK/lapack/blob/master/BLAS/SRC/sgemm.f)
  10086. // Also expressed as (major,minor)
  10087. // a: (m,k): so src1 transposed
  10088. // b: (k,n): so src0
  10089. // c: (m,n)
  10090. //
  10091. // However, if ggml_is_transposed(src1) is true, then
  10092. // src1->data already contains a transposed version, so sgemm mustn't
  10093. // transpose it further.
  10094. int n = src0->ne[0];
  10095. int k = src0->ne[1];
  10096. int m = src1->ne[0];
  10097. int transposeA, lda;
  10098. if (!ggml_is_transposed(src1)) {
  10099. transposeA = CblasTrans;
  10100. lda = m;
  10101. } else {
  10102. transposeA = CblasNoTrans;
  10103. lda = k;
  10104. }
  10105. float * a = (float *) ((char *) src1->data);
  10106. float * b = (float *) ((char *) src0->data);
  10107. float * c = (float *) ((char *) dst->data);
  10108. cblas_sgemm(CblasRowMajor, transposeA, CblasNoTrans, m, n, k, 1.0, a, lda, b, n, 0.0, c, n);
  10109. return;
  10110. }
  10111. #endif
  10112. // dst[:,:,:,:] = 0
  10113. // for i2,i3:
  10114. // for i1:
  10115. // for i01:
  10116. // for i0:
  10117. // dst[i0,i1,i2,i3] += src0[i0,i01,i2,i3] * src1[i1,i01,i2,i3]
  10118. // parallelize by last three dimensions
  10119. // total rows in dst
  10120. const int64_t nr = ne1*ne2*ne3;
  10121. // rows per thread
  10122. const int64_t dr = (nr + nth - 1)/nth;
  10123. // row range for this thread
  10124. const int64_t ir0 = dr*ith;
  10125. const int64_t ir1 = MIN(ir0 + dr, nr);
  10126. // block-tiling attempt
  10127. const int64_t blck_0 = MAX(GGML_VEC_MAD_UNROLL, 32);
  10128. const int64_t blck_1 = 16;
  10129. for (int64_t bir = ir0; bir < ir1; bir += blck_1) {
  10130. const int64_t bir1 = MIN(bir + blck_1, ir1);
  10131. for (int64_t bi01 = 0; bi01 < ne01; bi01 += blck_0) {
  10132. const int64_t bne01 = MIN(bi01 + blck_0, ne01);
  10133. for (int64_t ir = bir; ir < bir1; ++ir) {
  10134. // dst indices
  10135. const int64_t i3 = ir/(ne2*ne1);
  10136. const int64_t i2 = (ir - i3*ne2*ne1)/ne1;
  10137. const int64_t i1 = (ir - i3*ne2*ne1 - i2*ne1);
  10138. const int64_t i02 = i2;
  10139. const int64_t i03 = i3;
  10140. //const int64_t i10 = i1;
  10141. const int64_t i12 = i2;
  10142. const int64_t i13 = i3;
  10143. #if GGML_VEC_MAD_UNROLL > 2
  10144. const int64_t bne01_unroll = bne01 - (bne01 % GGML_VEC_MAD_UNROLL);
  10145. for (int64_t i01 = bi01; i01 < bne01_unroll; i01 += GGML_VEC_MAD_UNROLL) {
  10146. const int64_t i11 = i01;
  10147. float * s0 = (float *) ((char *) src0->data + ( i01*nb01 + i02*nb02 + i03*nb03));
  10148. float * s1 = (float *) ((char *) src1->data + (i1*nb10 + i11*nb11 + i12*nb12 + i13*nb13));
  10149. float * d = (float *) ((char *) dst->data + ( i1*nb1 + i2*nb2 + i3*nb3));
  10150. ggml_vec_mad_f32_unroll(ne0, nb01, nb11, d, s0, s1);
  10151. }
  10152. for (int64_t i01 = bne01_unroll; i01 < bne01; ++i01) {
  10153. const int64_t i11 = i01;
  10154. float * s0 = (float *) ((char *) src0->data + ( i01*nb01 + i02*nb02 + i03*nb03));
  10155. float * s1 = (float *) ((char *) src1->data + (i1*nb10 + i11*nb11 + i12*nb12 + i13*nb13));
  10156. float * d = (float *) ((char *) dst->data + ( i1*nb1 + i2*nb2 + i3*nb3));
  10157. ggml_vec_mad_f32(ne0, d, s0, *s1);
  10158. }
  10159. #else
  10160. for (int64_t i01 = bi01; i01 < bne01; ++i01) {
  10161. const int64_t i11 = i01;
  10162. float * s0 = (float *) ((char *) src0->data + ( i01*nb01 + i02*nb02 + i03*nb03));
  10163. float * s1 = (float *) ((char *) src1->data + (i1*nb10 + i11*nb11 + i12*nb12 + i13*nb13));
  10164. float * d = (float *) ((char *) dst->data + ( i1*nb1 + i2*nb2 + i3*nb3));
  10165. ggml_vec_mad_f32(ne0, d, s0, *s1);
  10166. }
  10167. #endif
  10168. }
  10169. }
  10170. }
  10171. //int64_t t1 = ggml_perf_time_us();
  10172. //static int64_t acc = 0;
  10173. //acc += t1 - t0;
  10174. //if (t1 - t0 > 10) {
  10175. // printf("\n");
  10176. // printf("ne00 = %5d, ne01 = %5d, ne02 = %5d, ne03 = %5d\n", ne00, ne01, ne02, ne03);
  10177. // printf("nb00 = %5d, nb01 = %5d, nb02 = %5d, nb03 = %5d\n", nb00, nb01, nb02, nb03);
  10178. // printf("ne10 = %5d, ne11 = %5d, ne12 = %5d, ne13 = %5d\n", ne10, ne11, ne12, ne13);
  10179. // printf("nb10 = %5d, nb11 = %5d, nb12 = %5d, nb13 = %5d\n", nb10, nb11, nb12, nb13);
  10180. // printf("XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX task %d/%d: %d us, acc = %d\n", ith, nth, (int) (t1 - t0), (int) acc);
  10181. //}
  10182. }
  10183. static void ggml_compute_forward_out_prod_q_f32(
  10184. const struct ggml_compute_params * params,
  10185. struct ggml_tensor * dst) {
  10186. const struct ggml_tensor * src0 = dst->src[0];
  10187. const struct ggml_tensor * src1 = dst->src[1];
  10188. // int64_t t0 = ggml_perf_time_us();
  10189. // UNUSED(t0);
  10190. GGML_TENSOR_BINARY_OP_LOCALS;
  10191. const int ith = params->ith;
  10192. const int nth = params->nth;
  10193. const enum ggml_type type = src0->type;
  10194. ggml_to_float_t const dequantize_row_q = type_traits[type].to_float;
  10195. GGML_ASSERT(ne02 == ne12);
  10196. GGML_ASSERT(ne03 == ne13);
  10197. GGML_ASSERT(ne2 == ne12);
  10198. GGML_ASSERT(ne3 == ne13);
  10199. // we don't support permuted src0 dim0
  10200. GGML_ASSERT(nb00 == ggml_type_size(type));
  10201. // dst dim0 cannot be transposed or permuted
  10202. GGML_ASSERT(nb0 == sizeof(float));
  10203. // GGML_ASSERT(nb0 <= nb1);
  10204. // GGML_ASSERT(nb1 <= nb2);
  10205. // GGML_ASSERT(nb2 <= nb3);
  10206. GGML_ASSERT(ne0 == ne00);
  10207. GGML_ASSERT(ne1 == ne10);
  10208. GGML_ASSERT(ne2 == ne02);
  10209. GGML_ASSERT(ne3 == ne03);
  10210. // nb01 >= nb00 - src0 is not transposed
  10211. // compute by src0 rows
  10212. // TODO: #if defined(GGML_USE_ACCELERATE) || defined(GGML_USE_OPENBLAS) || defined(GGML_USE_CLBLAST)
  10213. if (params->type == GGML_TASK_TYPE_INIT) {
  10214. if (ith != 0) {
  10215. return;
  10216. }
  10217. ggml_vec_set_f32(ne0*ne1*ne2*ne3, dst->data, 0);
  10218. return;
  10219. }
  10220. if (params->type == GGML_TASK_TYPE_FINALIZE) {
  10221. return;
  10222. }
  10223. // parallelize by last three dimensions
  10224. // total rows in dst
  10225. const int64_t nr = ne1*ne2*ne3;
  10226. // rows per thread
  10227. const int64_t dr = (nr + nth - 1)/nth;
  10228. // row range for this thread
  10229. const int64_t ir0 = dr*ith;
  10230. const int64_t ir1 = MIN(ir0 + dr, nr);
  10231. // dst[:,:,:,:] = 0
  10232. // for i2,i3:
  10233. // for i1:
  10234. // for i01:
  10235. // for i0:
  10236. // dst[i0,i1,i2,i3] += src0[i0,i01,i2,i3] * src1[i1,i01,i2,i3]
  10237. float * wdata = (float *) params->wdata + (ne0 + CACHE_LINE_SIZE_F32) * ith;
  10238. for (int64_t ir = ir0; ir < ir1; ++ir) {
  10239. // dst indices
  10240. const int64_t i3 = ir/(ne2*ne1);
  10241. const int64_t i2 = (ir - i3*ne2*ne1)/ne1;
  10242. const int64_t i1 = (ir - i3*ne2*ne1 - i2*ne1);
  10243. const int64_t i02 = i2;
  10244. const int64_t i03 = i3;
  10245. //const int64_t i10 = i1;
  10246. const int64_t i12 = i2;
  10247. const int64_t i13 = i3;
  10248. for (int64_t i01 = 0; i01 < ne01; ++i01) {
  10249. const int64_t i11 = i01;
  10250. float * s0 = (float *) ((char *) src0->data + ( i01*nb01 + i02*nb02 + i03*nb03));
  10251. float * s1 = (float *) ((char *) src1->data + (i1*nb10 + i11*nb11 + i12*nb12 + i13*nb13));
  10252. float * d = (float *) ((char *) dst->data + ( i1*nb1 + i2*nb2 + i3*nb3));
  10253. dequantize_row_q(s0, wdata, ne0);
  10254. ggml_vec_mad_f32(ne0, d, wdata, *s1);
  10255. }
  10256. }
  10257. //int64_t t1 = ggml_perf_time_us();
  10258. //static int64_t acc = 0;
  10259. //acc += t1 - t0;
  10260. //if (t1 - t0 > 10) {
  10261. // printf("\n");
  10262. // printf("ne00 = %5d, ne01 = %5d, ne02 = %5d, ne03 = %5d\n", ne00, ne01, ne02, ne03);
  10263. // printf("nb00 = %5d, nb01 = %5d, nb02 = %5d, nb03 = %5d\n", nb00, nb01, nb02, nb03);
  10264. // printf("ne10 = %5d, ne11 = %5d, ne12 = %5d, ne13 = %5d\n", ne10, ne11, ne12, ne13);
  10265. // printf("nb10 = %5d, nb11 = %5d, nb12 = %5d, nb13 = %5d\n", nb10, nb11, nb12, nb13);
  10266. // printf("XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX task %d/%d: %d us, acc = %d\n", ith, nth, (int) (t1 - t0), (int) acc);
  10267. //}
  10268. }
  10269. static void ggml_compute_forward_out_prod(
  10270. const struct ggml_compute_params * params,
  10271. struct ggml_tensor * dst) {
  10272. const struct ggml_tensor * src0 = dst->src[0];
  10273. switch (src0->type) {
  10274. case GGML_TYPE_Q4_0:
  10275. case GGML_TYPE_Q4_1:
  10276. case GGML_TYPE_Q5_0:
  10277. case GGML_TYPE_Q5_1:
  10278. case GGML_TYPE_Q8_0:
  10279. case GGML_TYPE_Q2_K:
  10280. case GGML_TYPE_Q3_K:
  10281. case GGML_TYPE_Q4_K:
  10282. case GGML_TYPE_Q5_K:
  10283. case GGML_TYPE_Q6_K:
  10284. case GGML_TYPE_IQ2_XXS:
  10285. case GGML_TYPE_IQ2_XS:
  10286. case GGML_TYPE_IQ3_XXS:
  10287. case GGML_TYPE_IQ1_S:
  10288. case GGML_TYPE_IQ1_M:
  10289. case GGML_TYPE_IQ4_NL:
  10290. case GGML_TYPE_IQ4_XS:
  10291. case GGML_TYPE_IQ3_S:
  10292. case GGML_TYPE_IQ2_S:
  10293. {
  10294. ggml_compute_forward_out_prod_q_f32(params, dst);
  10295. } break;
  10296. case GGML_TYPE_F16:
  10297. {
  10298. GGML_ASSERT(false); // todo
  10299. // ggml_compute_forward_out_prod_f16_f32(params, dst);
  10300. } break;
  10301. case GGML_TYPE_F32:
  10302. {
  10303. ggml_compute_forward_out_prod_f32(params, dst);
  10304. } break;
  10305. default:
  10306. {
  10307. GGML_ASSERT(false);
  10308. } break;
  10309. }
  10310. }
  10311. // ggml_compute_forward_scale
  10312. static void ggml_compute_forward_scale_f32(
  10313. const struct ggml_compute_params * params,
  10314. struct ggml_tensor * dst) {
  10315. const struct ggml_tensor * src0 = dst->src[0];
  10316. GGML_ASSERT(ggml_is_contiguous(src0));
  10317. GGML_ASSERT(ggml_is_contiguous(dst));
  10318. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  10319. if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
  10320. return;
  10321. }
  10322. // scale factor
  10323. float v;
  10324. memcpy(&v, dst->op_params, sizeof(float));
  10325. const int ith = params->ith;
  10326. const int nth = params->nth;
  10327. const int nc = src0->ne[0];
  10328. const int nr = ggml_nrows(src0);
  10329. // rows per thread
  10330. const int dr = (nr + nth - 1)/nth;
  10331. // row range for this thread
  10332. const int ir0 = dr*ith;
  10333. const int ir1 = MIN(ir0 + dr, nr);
  10334. const size_t nb01 = src0->nb[1];
  10335. const size_t nb1 = dst->nb[1];
  10336. for (int i1 = ir0; i1 < ir1; i1++) {
  10337. if (dst->data != src0->data) {
  10338. // src0 is same shape as dst => same indices
  10339. memcpy((char *)dst->data + i1*nb1, (char *)src0->data + i1*nb01, nc * sizeof(float));
  10340. }
  10341. ggml_vec_scale_f32(nc, (float *) ((char *) dst->data + i1*nb1), v);
  10342. }
  10343. }
  10344. static void ggml_compute_forward_scale(
  10345. const struct ggml_compute_params * params,
  10346. struct ggml_tensor * dst) {
  10347. const struct ggml_tensor * src0 = dst->src[0];
  10348. switch (src0->type) {
  10349. case GGML_TYPE_F32:
  10350. {
  10351. ggml_compute_forward_scale_f32(params, dst);
  10352. } break;
  10353. default:
  10354. {
  10355. GGML_ASSERT(false);
  10356. } break;
  10357. }
  10358. }
  10359. // ggml_compute_forward_set
  10360. static void ggml_compute_forward_set_f32(
  10361. const struct ggml_compute_params * params,
  10362. struct ggml_tensor * dst) {
  10363. const struct ggml_tensor * src0 = dst->src[0];
  10364. const struct ggml_tensor * src1 = dst->src[1];
  10365. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  10366. GGML_ASSERT(ggml_is_contiguous(dst) && ggml_is_contiguous(src0));
  10367. // view src0 and dst with these strides and data offset inbytes during set
  10368. // nb0 is implicitly element_size because src0 and dst are contiguous
  10369. size_t nb1 = ((int32_t *) dst->op_params)[0];
  10370. size_t nb2 = ((int32_t *) dst->op_params)[1];
  10371. size_t nb3 = ((int32_t *) dst->op_params)[2];
  10372. size_t offset = ((int32_t *) dst->op_params)[3];
  10373. bool inplace = (bool) ((int32_t *) dst->op_params)[4];
  10374. if (!inplace && (params->type == GGML_TASK_TYPE_INIT)) {
  10375. if (params->ith != 0) {
  10376. return;
  10377. }
  10378. // memcpy needs to be synchronized across threads to avoid race conditions.
  10379. // => do it in INIT phase
  10380. memcpy(
  10381. ((char *) dst->data),
  10382. ((char *) src0->data),
  10383. ggml_nbytes(dst));
  10384. }
  10385. if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
  10386. return;
  10387. }
  10388. const int ith = params->ith;
  10389. const int nth = params->nth;
  10390. const int nr = ggml_nrows(src1);
  10391. const int nc = src1->ne[0];
  10392. GGML_TENSOR_LOCALS(int64_t, ne1, src1, ne)
  10393. GGML_TENSOR_LOCALS(size_t, nb1, src1, nb)
  10394. // src0 and dst as viewed during set
  10395. const size_t nb0 = ggml_element_size(src0);
  10396. const int im0 = (ne10 == 0 ? 0 : ne10-1);
  10397. const int im1 = (ne11 == 0 ? 0 : ne11-1);
  10398. const int im2 = (ne12 == 0 ? 0 : ne12-1);
  10399. const int im3 = (ne13 == 0 ? 0 : ne13-1);
  10400. GGML_ASSERT(offset + im0*nb0 + im1*nb1 + im2*nb2 + im3*nb3 <= ggml_nbytes(dst));
  10401. GGML_ASSERT(nb10 == sizeof(float));
  10402. // rows per thread
  10403. const int dr = (nr + nth - 1)/nth;
  10404. // row range for this thread
  10405. const int ir0 = dr*ith;
  10406. const int ir1 = MIN(ir0 + dr, nr);
  10407. for (int ir = ir0; ir < ir1; ++ir) {
  10408. // src0 and dst are viewed with shape of src1 and offset
  10409. // => same indices
  10410. const int i3 = ir/(ne12*ne11);
  10411. const int i2 = (ir - i3*ne12*ne11)/ne11;
  10412. const int i1 = (ir - i3*ne12*ne11 - i2*ne11);
  10413. ggml_vec_cpy_f32(nc,
  10414. (float *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + offset),
  10415. (float *) ((char *) src1->data + i3*nb13 + i2*nb12 + i1*nb11));
  10416. }
  10417. }
  10418. static void ggml_compute_forward_set(
  10419. const struct ggml_compute_params * params,
  10420. struct ggml_tensor * dst) {
  10421. const struct ggml_tensor * src0 = dst->src[0];
  10422. switch (src0->type) {
  10423. case GGML_TYPE_F32:
  10424. {
  10425. ggml_compute_forward_set_f32(params, dst);
  10426. } break;
  10427. case GGML_TYPE_F16:
  10428. case GGML_TYPE_BF16:
  10429. case GGML_TYPE_Q4_0:
  10430. case GGML_TYPE_Q4_1:
  10431. case GGML_TYPE_Q5_0:
  10432. case GGML_TYPE_Q5_1:
  10433. case GGML_TYPE_Q8_0:
  10434. case GGML_TYPE_Q8_1:
  10435. case GGML_TYPE_Q2_K:
  10436. case GGML_TYPE_Q3_K:
  10437. case GGML_TYPE_Q4_K:
  10438. case GGML_TYPE_Q5_K:
  10439. case GGML_TYPE_Q6_K:
  10440. case GGML_TYPE_IQ2_XXS:
  10441. case GGML_TYPE_IQ2_XS:
  10442. case GGML_TYPE_IQ3_XXS:
  10443. case GGML_TYPE_IQ1_S:
  10444. case GGML_TYPE_IQ1_M:
  10445. case GGML_TYPE_IQ4_NL:
  10446. case GGML_TYPE_IQ4_XS:
  10447. case GGML_TYPE_IQ3_S:
  10448. case GGML_TYPE_IQ2_S:
  10449. default:
  10450. {
  10451. GGML_ASSERT(false);
  10452. } break;
  10453. }
  10454. }
  10455. // ggml_compute_forward_cpy
  10456. static void ggml_compute_forward_cpy(
  10457. const struct ggml_compute_params * params,
  10458. struct ggml_tensor * dst) {
  10459. ggml_compute_forward_dup(params, dst);
  10460. }
  10461. // ggml_compute_forward_cont
  10462. static void ggml_compute_forward_cont(
  10463. const struct ggml_compute_params * params,
  10464. struct ggml_tensor * dst) {
  10465. ggml_compute_forward_dup(params, dst);
  10466. }
  10467. // ggml_compute_forward_reshape
  10468. static void ggml_compute_forward_reshape(
  10469. const struct ggml_compute_params * params,
  10470. struct ggml_tensor * dst) {
  10471. // NOP
  10472. UNUSED(params);
  10473. UNUSED(dst);
  10474. }
  10475. // ggml_compute_forward_view
  10476. static void ggml_compute_forward_view(
  10477. const struct ggml_compute_params * params,
  10478. const struct ggml_tensor * dst) {
  10479. // NOP
  10480. UNUSED(params);
  10481. UNUSED(dst);
  10482. }
  10483. // ggml_compute_forward_permute
  10484. static void ggml_compute_forward_permute(
  10485. const struct ggml_compute_params * params,
  10486. const struct ggml_tensor * dst) {
  10487. // NOP
  10488. UNUSED(params);
  10489. UNUSED(dst);
  10490. }
  10491. // ggml_compute_forward_transpose
  10492. static void ggml_compute_forward_transpose(
  10493. const struct ggml_compute_params * params,
  10494. const struct ggml_tensor * dst) {
  10495. // NOP
  10496. UNUSED(params);
  10497. UNUSED(dst);
  10498. }
  10499. // ggml_compute_forward_get_rows
  10500. static void ggml_compute_forward_get_rows_q(
  10501. const struct ggml_compute_params * params,
  10502. struct ggml_tensor * dst) {
  10503. const struct ggml_tensor * src0 = dst->src[0];
  10504. const struct ggml_tensor * src1 = dst->src[1];
  10505. if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
  10506. return;
  10507. }
  10508. GGML_TENSOR_BINARY_OP_LOCALS
  10509. const int64_t nc = ne00;
  10510. const int64_t nr = ggml_nelements(src1);
  10511. const enum ggml_type type = src0->type;
  10512. ggml_to_float_t const dequantize_row_q = type_traits[type].to_float;
  10513. assert(ne0 == nc);
  10514. assert(ne02 == ne11);
  10515. assert(nb00 == ggml_type_size(type));
  10516. assert(ggml_nrows(dst) == nr);
  10517. const int ith = params->ith;
  10518. const int nth = params->nth;
  10519. // rows per thread
  10520. const int dr = (nr + nth - 1)/nth;
  10521. // row range for this thread
  10522. const int ir0 = dr*ith;
  10523. const int ir1 = MIN(ir0 + dr, nr);
  10524. for (int64_t i = ir0; i < ir1; ++i) {
  10525. const int64_t i12 = i/(ne11*ne10);
  10526. const int64_t i11 = (i - i12*ne11*ne10)/ne10;
  10527. const int64_t i10 = (i - i12*ne11*ne10 - i11*ne10);
  10528. const int64_t i01 = *(int32_t *) ((char *) src1->data + i10*nb10 + i11*nb11 + i12*nb12);
  10529. dequantize_row_q(
  10530. (const void *) ((char *) src0->data + i01*nb01 + i11*nb02 + i12*nb03),
  10531. (float *) ((char *) dst->data + i10*nb1 + i11*nb2 + i12*nb3), nc);
  10532. }
  10533. }
  10534. static void ggml_compute_forward_get_rows_f16(
  10535. const struct ggml_compute_params * params,
  10536. struct ggml_tensor * dst) {
  10537. const struct ggml_tensor * src0 = dst->src[0];
  10538. const struct ggml_tensor * src1 = dst->src[1];
  10539. if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
  10540. return;
  10541. }
  10542. GGML_TENSOR_BINARY_OP_LOCALS
  10543. const int64_t nc = ne00;
  10544. const int64_t nr = ggml_nelements(src1);
  10545. assert(ne0 == nc);
  10546. assert(ne02 == ne11);
  10547. assert(nb00 == sizeof(ggml_fp16_t));
  10548. assert(ggml_nrows(dst) == nr);
  10549. const int ith = params->ith;
  10550. const int nth = params->nth;
  10551. // rows per thread
  10552. const int dr = (nr + nth - 1)/nth;
  10553. // row range for this thread
  10554. const int ir0 = dr*ith;
  10555. const int ir1 = MIN(ir0 + dr, nr);
  10556. for (int64_t i = ir0; i < ir1; ++i) {
  10557. const int64_t i12 = i/(ne11*ne10);
  10558. const int64_t i11 = (i - i12*ne11*ne10)/ne10;
  10559. const int64_t i10 = (i - i12*ne11*ne10 - i11*ne10);
  10560. const int64_t i01 = *(int32_t *) ((char *) src1->data + i10*nb10 + i11*nb11 + i12*nb12);
  10561. ggml_fp16_to_fp32_row(
  10562. (const void *) ((char *) src0->data + i01*nb01 + i11*nb02 + i12*nb03),
  10563. (float *) ((char *) dst->data + i10*nb1 + i11*nb2 + i12*nb3), nc);
  10564. }
  10565. }
  10566. static void ggml_compute_forward_get_rows_bf16(
  10567. const struct ggml_compute_params * params,
  10568. struct ggml_tensor * dst) {
  10569. const struct ggml_tensor * src0 = dst->src[0];
  10570. const struct ggml_tensor * src1 = dst->src[1];
  10571. if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
  10572. return;
  10573. }
  10574. GGML_TENSOR_BINARY_OP_LOCALS
  10575. const int64_t nc = ne00;
  10576. const int64_t nr = ggml_nelements(src1);
  10577. assert(ne0 == nc);
  10578. assert(ne02 == ne11);
  10579. assert(nb00 == sizeof(ggml_bf16_t));
  10580. assert(ggml_nrows(dst) == nr);
  10581. const int ith = params->ith;
  10582. const int nth = params->nth;
  10583. // rows per thread
  10584. const int dr = (nr + nth - 1)/nth;
  10585. // row range for this thread
  10586. const int ir0 = dr*ith;
  10587. const int ir1 = MIN(ir0 + dr, nr);
  10588. for (int64_t i = ir0; i < ir1; ++i) {
  10589. const int64_t i12 = i/(ne11*ne10);
  10590. const int64_t i11 = (i - i12*ne11*ne10)/ne10;
  10591. const int64_t i10 = (i - i12*ne11*ne10 - i11*ne10);
  10592. const int64_t i01 = *(int32_t *) ((char *) src1->data + i10*nb10 + i11*nb11 + i12*nb12);
  10593. ggml_bf16_to_fp32_row(
  10594. (const void *) ((char *) src0->data + i01*nb01 + i11*nb02 + i12*nb03),
  10595. (float *) ((char *) dst->data + i10*nb1 + i11*nb2 + i12*nb3), nc);
  10596. }
  10597. }
  10598. static void ggml_compute_forward_get_rows_f32(
  10599. const struct ggml_compute_params * params,
  10600. struct ggml_tensor * dst) {
  10601. const struct ggml_tensor * src0 = dst->src[0];
  10602. const struct ggml_tensor * src1 = dst->src[1];
  10603. if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
  10604. return;
  10605. }
  10606. GGML_TENSOR_BINARY_OP_LOCALS
  10607. const int64_t nc = ne00;
  10608. const int64_t nr = ggml_nelements(src1);
  10609. assert(ne0 == nc);
  10610. assert(ne02 == ne11);
  10611. assert(nb00 == sizeof(float));
  10612. assert(ggml_nrows(dst) == nr);
  10613. const int ith = params->ith;
  10614. const int nth = params->nth;
  10615. // rows per thread
  10616. const int dr = (nr + nth - 1)/nth;
  10617. // row range for this thread
  10618. const int ir0 = dr*ith;
  10619. const int ir1 = MIN(ir0 + dr, nr);
  10620. for (int64_t i = ir0; i < ir1; ++i) {
  10621. const int64_t i12 = i/(ne11*ne10);
  10622. const int64_t i11 = (i - i12*ne11*ne10)/ne10;
  10623. const int64_t i10 = (i - i12*ne11*ne10 - i11*ne10);
  10624. const int64_t i01 = *(int32_t *) ((char *) src1->data + i10*nb10 + i11*nb11 + i12*nb12);
  10625. ggml_vec_cpy_f32(nc,
  10626. (float *) ((char *) dst->data + i10*nb1 + i11*nb2 + i12*nb3),
  10627. (float *) ((char *) src0->data + i01*nb01 + i11*nb02 + i12*nb03));
  10628. }
  10629. }
  10630. static void ggml_compute_forward_get_rows(
  10631. const struct ggml_compute_params * params,
  10632. struct ggml_tensor * dst) {
  10633. const struct ggml_tensor * src0 = dst->src[0];
  10634. switch (src0->type) {
  10635. case GGML_TYPE_Q4_0:
  10636. case GGML_TYPE_Q4_1:
  10637. case GGML_TYPE_Q5_0:
  10638. case GGML_TYPE_Q5_1:
  10639. case GGML_TYPE_Q8_0:
  10640. case GGML_TYPE_Q8_1:
  10641. case GGML_TYPE_Q2_K:
  10642. case GGML_TYPE_Q3_K:
  10643. case GGML_TYPE_Q4_K:
  10644. case GGML_TYPE_Q5_K:
  10645. case GGML_TYPE_Q6_K:
  10646. case GGML_TYPE_IQ2_XXS:
  10647. case GGML_TYPE_IQ2_XS:
  10648. case GGML_TYPE_IQ3_XXS:
  10649. case GGML_TYPE_IQ1_S:
  10650. case GGML_TYPE_IQ1_M:
  10651. case GGML_TYPE_IQ4_NL:
  10652. case GGML_TYPE_IQ4_XS:
  10653. case GGML_TYPE_IQ3_S:
  10654. case GGML_TYPE_IQ2_S:
  10655. {
  10656. ggml_compute_forward_get_rows_q(params, dst);
  10657. } break;
  10658. case GGML_TYPE_F16:
  10659. {
  10660. ggml_compute_forward_get_rows_f16(params, dst);
  10661. } break;
  10662. case GGML_TYPE_BF16:
  10663. {
  10664. ggml_compute_forward_get_rows_bf16(params, dst);
  10665. } break;
  10666. case GGML_TYPE_F32:
  10667. case GGML_TYPE_I32:
  10668. {
  10669. ggml_compute_forward_get_rows_f32(params, dst);
  10670. } break;
  10671. default:
  10672. {
  10673. GGML_ASSERT(false);
  10674. } break;
  10675. }
  10676. //static bool first = true;
  10677. //printf("ne0 = %d, ne1 = %d, ne2 = %d\n", dst->ne[0], dst->ne[1], dst->ne[2]);
  10678. //if (first) {
  10679. // first = false;
  10680. //} else {
  10681. // for (int k = 0; k < dst->ne[1]; ++k) {
  10682. // for (int j = 0; j < dst->ne[0]/16; ++j) {
  10683. // for (int i = 0; i < 16; ++i) {
  10684. // printf("%8.4f ", ((float *) dst->data)[k*dst->ne[0] + j*16 + i]);
  10685. // }
  10686. // printf("\n");
  10687. // }
  10688. // printf("\n");
  10689. // }
  10690. // printf("\n");
  10691. // exit(0);
  10692. //}
  10693. }
  10694. // ggml_compute_forward_get_rows_back
  10695. static void ggml_compute_forward_get_rows_back_f32_f16(
  10696. const struct ggml_compute_params * params,
  10697. struct ggml_tensor * dst) {
  10698. const struct ggml_tensor * src0 = dst->src[0];
  10699. const struct ggml_tensor * src1 = dst->src[1];
  10700. GGML_ASSERT(params->ith == 0);
  10701. GGML_ASSERT(ggml_is_contiguous(dst));
  10702. // ggml_compute_forward_dup_same_cont(params, opt0, dst);
  10703. if (params->type == GGML_TASK_TYPE_INIT) {
  10704. if (params->ith != 0) {
  10705. return;
  10706. }
  10707. memset(dst->data, 0, ggml_nbytes(dst));
  10708. }
  10709. if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
  10710. return;
  10711. }
  10712. const int nc = src0->ne[0];
  10713. const int nr = ggml_nelements(src1);
  10714. GGML_ASSERT( dst->ne[0] == nc);
  10715. GGML_ASSERT(src0->nb[0] == sizeof(ggml_fp16_t));
  10716. for (int i = 0; i < nr; ++i) {
  10717. const int r = ((int32_t *) src1->data)[i];
  10718. for (int j = 0; j < nc; ++j) {
  10719. ggml_fp16_t v = ((ggml_fp16_t *) ((char *) src0->data + i*src0->nb[1]))[j];
  10720. ((float *) ((char *) dst->data + r*dst->nb[1]))[j] += GGML_FP16_TO_FP32(v);
  10721. }
  10722. }
  10723. }
  10724. static void ggml_compute_forward_get_rows_back_f32(
  10725. const struct ggml_compute_params * params,
  10726. struct ggml_tensor * dst) {
  10727. const struct ggml_tensor * src0 = dst->src[0];
  10728. const struct ggml_tensor * src1 = dst->src[1];
  10729. GGML_ASSERT(params->ith == 0);
  10730. GGML_ASSERT(ggml_is_contiguous(dst));
  10731. // ggml_compute_forward_dup_same_cont(params, opt0, dst);
  10732. if (params->type == GGML_TASK_TYPE_INIT) {
  10733. if (params->ith != 0) {
  10734. return;
  10735. }
  10736. memset(dst->data, 0, ggml_nbytes(dst));
  10737. }
  10738. if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
  10739. return;
  10740. }
  10741. const int nc = src0->ne[0];
  10742. const int nr = ggml_nelements(src1);
  10743. GGML_ASSERT( dst->ne[0] == nc);
  10744. GGML_ASSERT(src0->nb[0] == sizeof(float));
  10745. for (int i = 0; i < nr; ++i) {
  10746. const int r = ((int32_t *) src1->data)[i];
  10747. ggml_vec_add_f32(nc,
  10748. (float *) ((char *) dst->data + r*dst->nb[1]),
  10749. (float *) ((char *) dst->data + r*dst->nb[1]),
  10750. (float *) ((char *) src0->data + i*src0->nb[1]));
  10751. }
  10752. }
  10753. static void ggml_compute_forward_get_rows_back(
  10754. const struct ggml_compute_params * params,
  10755. struct ggml_tensor * dst) {
  10756. const struct ggml_tensor * src0 = dst->src[0];
  10757. switch (src0->type) {
  10758. case GGML_TYPE_F16:
  10759. {
  10760. ggml_compute_forward_get_rows_back_f32_f16(params, dst);
  10761. } break;
  10762. case GGML_TYPE_F32:
  10763. {
  10764. ggml_compute_forward_get_rows_back_f32(params, dst);
  10765. } break;
  10766. default:
  10767. {
  10768. GGML_ASSERT(false);
  10769. } break;
  10770. }
  10771. //static bool first = true;
  10772. //printf("ne0 = %d, ne1 = %d, ne2 = %d\n", dst->ne[0], dst->ne[1], dst->ne[2]);
  10773. //if (first) {
  10774. // first = false;
  10775. //} else {
  10776. // for (int k = 0; k < dst->ne[1]; ++k) {
  10777. // for (int j = 0; j < dst->ne[0]/16; ++j) {
  10778. // for (int i = 0; i < 16; ++i) {
  10779. // printf("%8.4f ", ((float *) dst->data)[k*dst->ne[0] + j*16 + i]);
  10780. // }
  10781. // printf("\n");
  10782. // }
  10783. // printf("\n");
  10784. // }
  10785. // printf("\n");
  10786. // exit(0);
  10787. //}
  10788. }
  10789. // ggml_compute_forward_diag
  10790. static void ggml_compute_forward_diag_f32(
  10791. const struct ggml_compute_params * params,
  10792. struct ggml_tensor * dst) {
  10793. const struct ggml_tensor * src0 = dst->src[0];
  10794. GGML_ASSERT(params->ith == 0);
  10795. if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
  10796. return;
  10797. }
  10798. // TODO: handle transposed/permuted matrices
  10799. GGML_TENSOR_UNARY_OP_LOCALS
  10800. GGML_ASSERT(ne00 == ne0);
  10801. GGML_ASSERT(ne00 == ne1);
  10802. GGML_ASSERT(ne01 == 1);
  10803. GGML_ASSERT(ne02 == ne2);
  10804. GGML_ASSERT(ne03 == ne3);
  10805. GGML_ASSERT(nb00 == sizeof(float));
  10806. GGML_ASSERT(nb0 == sizeof(float));
  10807. for (int i3 = 0; i3 < ne3; i3++) {
  10808. for (int i2 = 0; i2 < ne2; i2++) {
  10809. for (int i1 = 0; i1 < ne1; i1++) {
  10810. float * d = (float *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1);
  10811. float * s = (float *)((char *) src0->data + i3*nb03 + i2*nb02);
  10812. for (int i0 = 0; i0 < i1; i0++) {
  10813. d[i0] = 0;
  10814. }
  10815. d[i1] = s[i1];
  10816. for (int i0 = i1+1; i0 < ne0; i0++) {
  10817. d[i0] = 0;
  10818. }
  10819. }
  10820. }
  10821. }
  10822. }
  10823. static void ggml_compute_forward_diag(
  10824. const struct ggml_compute_params * params,
  10825. struct ggml_tensor * dst) {
  10826. const struct ggml_tensor * src0 = dst->src[0];
  10827. switch (src0->type) {
  10828. case GGML_TYPE_F32:
  10829. {
  10830. ggml_compute_forward_diag_f32(params, dst);
  10831. } break;
  10832. default:
  10833. {
  10834. GGML_ASSERT(false);
  10835. } break;
  10836. }
  10837. }
  10838. // ggml_compute_forward_diag_mask_inf
  10839. static void ggml_compute_forward_diag_mask_f32(
  10840. const struct ggml_compute_params * params,
  10841. struct ggml_tensor * dst,
  10842. const float value) {
  10843. const struct ggml_tensor * src0 = dst->src[0];
  10844. const int ith = params->ith;
  10845. const int nth = params->nth;
  10846. const int n_past = ((int32_t *) dst->op_params)[0];
  10847. const bool inplace = src0->data == dst->data;
  10848. GGML_ASSERT(n_past >= 0);
  10849. if (!inplace && (params->type == GGML_TASK_TYPE_INIT)) {
  10850. if (ith != 0) {
  10851. return;
  10852. }
  10853. // memcpy needs to be synchronized across threads to avoid race conditions.
  10854. // => do it in INIT phase
  10855. GGML_ASSERT(ggml_nelements(dst) == ggml_nelements(src0));
  10856. GGML_ASSERT(ggml_is_contiguous(dst) && ggml_is_contiguous(src0));
  10857. memcpy(
  10858. ((char *) dst->data),
  10859. ((char *) src0->data),
  10860. ggml_nbytes(dst));
  10861. }
  10862. if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
  10863. return;
  10864. }
  10865. // TODO: handle transposed/permuted matrices
  10866. const int n = ggml_nrows(src0);
  10867. const int nc = src0->ne[0];
  10868. const int nr = src0->ne[1];
  10869. const int nz = n/nr;
  10870. GGML_ASSERT( dst->nb[0] == sizeof(float));
  10871. GGML_ASSERT(src0->nb[0] == sizeof(float));
  10872. for (int k = 0; k < nz; k++) {
  10873. for (int j = ith; j < nr; j += nth) {
  10874. for (int i = n_past; i < nc; i++) {
  10875. if (i > n_past + j) {
  10876. *(float *)((char *) dst->data + k*dst->nb[2] + j*dst->nb[1] + i*dst->nb[0]) = value;
  10877. }
  10878. }
  10879. }
  10880. }
  10881. }
  10882. static void ggml_compute_forward_diag_mask_inf(
  10883. const struct ggml_compute_params * params,
  10884. struct ggml_tensor * dst) {
  10885. const struct ggml_tensor * src0 = dst->src[0];
  10886. switch (src0->type) {
  10887. case GGML_TYPE_F32:
  10888. {
  10889. ggml_compute_forward_diag_mask_f32(params, dst, -INFINITY);
  10890. } break;
  10891. default:
  10892. {
  10893. GGML_ASSERT(false);
  10894. } break;
  10895. }
  10896. }
  10897. static void ggml_compute_forward_diag_mask_zero(
  10898. const struct ggml_compute_params * params,
  10899. struct ggml_tensor * dst) {
  10900. const struct ggml_tensor * src0 = dst->src[0];
  10901. switch (src0->type) {
  10902. case GGML_TYPE_F32:
  10903. {
  10904. ggml_compute_forward_diag_mask_f32(params, dst, 0);
  10905. } break;
  10906. default:
  10907. {
  10908. GGML_ASSERT(false);
  10909. } break;
  10910. }
  10911. }
  10912. // ggml_compute_forward_soft_max
  10913. static void ggml_compute_forward_soft_max_f32(
  10914. const struct ggml_compute_params * params,
  10915. struct ggml_tensor * dst) {
  10916. const struct ggml_tensor * src0 = dst->src[0];
  10917. const struct ggml_tensor * src1 = dst->src[1];
  10918. assert(ggml_is_contiguous(dst));
  10919. assert(ggml_are_same_shape(src0, dst));
  10920. if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
  10921. return;
  10922. }
  10923. float scale = 1.0f;
  10924. float max_bias = 0.0f;
  10925. memcpy(&scale, (float *) dst->op_params + 0, sizeof(float));
  10926. memcpy(&max_bias, (float *) dst->op_params + 1, sizeof(float));
  10927. // TODO: handle transposed/permuted matrices
  10928. const int ith = params->ith;
  10929. const int nth = params->nth;
  10930. GGML_TENSOR_UNARY_OP_LOCALS
  10931. //const int64_t ne11 = src1 ? src1->ne[1] : 1;
  10932. // TODO: is this supposed to be ceil instead of floor?
  10933. // https://huggingface.co/mosaicml/mpt-7b/blob/main/attention.py#L370
  10934. const uint32_t n_head = ne02;
  10935. const uint32_t n_head_log2 = 1u << (uint32_t) floor(log2(n_head));
  10936. const float m0 = powf(2.0f, -(max_bias ) / n_head_log2);
  10937. const float m1 = powf(2.0f, -(max_bias / 2.0f) / n_head_log2);
  10938. const int nc = src0->ne[0];
  10939. const int nr = ggml_nrows(src0);
  10940. // rows per thread
  10941. const int dr = (nr + nth - 1)/nth;
  10942. // row range for this thread
  10943. const int ir0 = dr*ith;
  10944. const int ir1 = MIN(ir0 + dr, nr);
  10945. float * wp = (float *) params->wdata + (nc + CACHE_LINE_SIZE_F32) * ith;
  10946. const bool use_f16 = (src1 && src1->type == GGML_TYPE_F16);
  10947. for (int i1 = ir0; i1 < ir1; i1++) {
  10948. // ALiBi
  10949. const uint32_t h = (i1/ne01)%ne02; // head
  10950. const float slope = (max_bias > 0.0f) ? h < n_head_log2 ? powf(m0, h + 1) : powf(m1, 2*(h - n_head_log2) + 1) : 1.0f;
  10951. float * sp = (float *)((char *) src0->data + i1*src0->nb[1]);
  10952. float * dp = (float *)((char *) dst->data + i1*dst->nb[1]);
  10953. // broadcast the mask across rows
  10954. ggml_fp16_t * mp_f16 = src1 ? (ggml_fp16_t *)((char *) src1->data) + (i1%ne01)*ne00 : NULL;
  10955. float * mp_f32 = src1 ? (float *)((char *) src1->data) + (i1%ne01)*ne00 : NULL;
  10956. ggml_vec_cpy_f32 (nc, wp, sp);
  10957. ggml_vec_scale_f32(nc, wp, scale);
  10958. if (mp_f32) {
  10959. if (use_f16) {
  10960. for (int i = 0; i < nc; ++i) {
  10961. wp[i] += slope*GGML_FP16_TO_FP32(mp_f16[i]);
  10962. }
  10963. } else {
  10964. for (int i = 0; i < nc; ++i) {
  10965. wp[i] += slope*mp_f32[i];
  10966. }
  10967. }
  10968. }
  10969. #ifndef NDEBUG
  10970. for (int i = 0; i < nc; ++i) {
  10971. //printf("p[%d] = %f\n", i, p[i]);
  10972. assert(!isnan(wp[i]));
  10973. }
  10974. #endif
  10975. float max = -INFINITY;
  10976. ggml_vec_max_f32(nc, &max, wp);
  10977. ggml_float sum = 0.0;
  10978. uint16_t scvt;
  10979. for (int i = 0; i < nc; i++) {
  10980. if (wp[i] == -INFINITY) {
  10981. dp[i] = 0.0f;
  10982. } else {
  10983. // const float val = (wp[i] == -INFINITY) ? 0.0 : exp(wp[i] - max);
  10984. ggml_fp16_t s = GGML_FP32_TO_FP16(wp[i] - max);
  10985. memcpy(&scvt, &s, sizeof(scvt));
  10986. const float val = GGML_FP16_TO_FP32(ggml_table_exp_f16[scvt]);
  10987. sum += (ggml_float)val;
  10988. dp[i] = val;
  10989. }
  10990. }
  10991. assert(sum > 0.0);
  10992. sum = 1.0/sum;
  10993. ggml_vec_scale_f32(nc, dp, sum);
  10994. #ifndef NDEBUG
  10995. for (int i = 0; i < nc; ++i) {
  10996. assert(!isnan(dp[i]));
  10997. assert(!isinf(dp[i]));
  10998. }
  10999. #endif
  11000. }
  11001. }
  11002. static void ggml_compute_forward_soft_max(
  11003. const struct ggml_compute_params * params,
  11004. struct ggml_tensor * dst) {
  11005. const struct ggml_tensor * src0 = dst->src[0];
  11006. switch (src0->type) {
  11007. case GGML_TYPE_F32:
  11008. {
  11009. ggml_compute_forward_soft_max_f32(params, dst);
  11010. } break;
  11011. default:
  11012. {
  11013. GGML_ASSERT(false);
  11014. } break;
  11015. }
  11016. }
  11017. // ggml_compute_forward_soft_max_back
  11018. static void ggml_compute_forward_soft_max_back_f32(
  11019. const struct ggml_compute_params * params,
  11020. struct ggml_tensor * dst) {
  11021. const struct ggml_tensor * src0 = dst->src[0];
  11022. const struct ggml_tensor * src1 = dst->src[1];
  11023. GGML_ASSERT(ggml_is_contiguous(src0));
  11024. GGML_ASSERT(ggml_is_contiguous(src1));
  11025. GGML_ASSERT(ggml_is_contiguous(dst));
  11026. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  11027. GGML_ASSERT(ggml_are_same_shape(src1, dst));
  11028. if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
  11029. return;
  11030. }
  11031. // TODO: handle transposed/permuted matrices
  11032. const int ith = params->ith;
  11033. const int nth = params->nth;
  11034. const int nc = src0->ne[0];
  11035. const int nr = ggml_nrows(src0);
  11036. // rows per thread
  11037. const int dr = (nr + nth - 1)/nth;
  11038. // row range for this thread
  11039. const int ir0 = dr*ith;
  11040. const int ir1 = MIN(ir0 + dr, nr);
  11041. for (int i1 = ir0; i1 < ir1; i1++) {
  11042. float *dy = (float *)((char *) src0->data + i1*src0->nb[1]);
  11043. float *y = (float *)((char *) src1->data + i1*src1->nb[1]);
  11044. float *dx = (float *)((char *) dst->data + i1*dst->nb[1]);
  11045. #ifndef NDEBUG
  11046. for (int i = 0; i < nc; ++i) {
  11047. //printf("p[%d] = %f\n", i, p[i]);
  11048. assert(!isnan(dy[i]));
  11049. assert(!isnan(y[i]));
  11050. }
  11051. #endif
  11052. // Jii = yi - yi*yi
  11053. // Jij = -yi*yj
  11054. // J = diag(y)-y.T*y
  11055. // dx = J * dy
  11056. // dxk = sum_i(Jki * dyi)
  11057. // dxk = sum_i(-yk*yi * dyi) - (-yk*yk)*dyk + (yk - yk*yk)*dyk
  11058. // dxk = sum_i(-yk*yi * dyi) + yk*yk*dyk + yk*dyk - yk*yk*dyk
  11059. // dxk = sum_i(-yk*yi * dyi) + yk*dyk
  11060. // dxk = -yk * sum_i(yi * dyi) + yk*dyk
  11061. // dxk = -yk * dot(y, dy) + yk*dyk
  11062. // dxk = yk * (- dot(y, dy) + dyk)
  11063. // dxk = yk * (dyk - dot(y, dy))
  11064. //
  11065. // post-order:
  11066. // dot_y_dy := dot(y, dy)
  11067. // dx := dy
  11068. // dx := dx - dot_y_dy
  11069. // dx := dx * y
  11070. // linear runtime, no additional memory
  11071. float dot_y_dy = 0;
  11072. ggml_vec_dot_f32 (nc, &dot_y_dy, 0, y, 0, dy, 0, 1);
  11073. ggml_vec_cpy_f32 (nc, dx, dy);
  11074. ggml_vec_acc1_f32(nc, dx, -dot_y_dy);
  11075. ggml_vec_mul_f32 (nc, dx, dx, y);
  11076. #ifndef NDEBUG
  11077. for (int i = 0; i < nc; ++i) {
  11078. assert(!isnan(dx[i]));
  11079. assert(!isinf(dx[i]));
  11080. }
  11081. #endif
  11082. }
  11083. }
  11084. static void ggml_compute_forward_soft_max_back(
  11085. const struct ggml_compute_params * params,
  11086. struct ggml_tensor * dst) {
  11087. const struct ggml_tensor * src0 = dst->src[0];
  11088. switch (src0->type) {
  11089. case GGML_TYPE_F32:
  11090. {
  11091. ggml_compute_forward_soft_max_back_f32(params, dst);
  11092. } break;
  11093. default:
  11094. {
  11095. GGML_ASSERT(false);
  11096. } break;
  11097. }
  11098. }
  11099. // ggml_compute_forward_clamp
  11100. static void ggml_compute_forward_clamp_f32(
  11101. const struct ggml_compute_params * params,
  11102. struct ggml_tensor * dst) {
  11103. const struct ggml_tensor * src0 = dst->src[0];
  11104. assert(params->ith == 0);
  11105. if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
  11106. return;
  11107. }
  11108. float min;
  11109. float max;
  11110. memcpy(&min, (float *) dst->op_params + 0, sizeof(float));
  11111. memcpy(&max, (float *) dst->op_params + 1, sizeof(float));
  11112. const int ith = params->ith;
  11113. const int nth = params->nth;
  11114. const int n = ggml_nrows(src0);
  11115. const int nc = src0->ne[0];
  11116. const size_t nb00 = src0->nb[0];
  11117. const size_t nb01 = src0->nb[1];
  11118. const size_t nb0 = dst->nb[0];
  11119. const size_t nb1 = dst->nb[1];
  11120. GGML_ASSERT( nb0 == sizeof(float));
  11121. GGML_ASSERT(nb00 == sizeof(float));
  11122. for (int j = ith; j < n; j += nth) {
  11123. float * dst_ptr = (float *) ((char *) dst->data + j*nb1);
  11124. float * src0_ptr = (float *) ((char *) src0->data + j*nb01);
  11125. for (int i = 0; i < nc; i++) {
  11126. dst_ptr[i] = MAX(MIN(src0_ptr[i], max), min);
  11127. }
  11128. }
  11129. }
  11130. static void ggml_compute_forward_clamp(
  11131. const struct ggml_compute_params * params,
  11132. struct ggml_tensor * dst) {
  11133. const struct ggml_tensor * src0 = dst->src[0];
  11134. switch (src0->type) {
  11135. case GGML_TYPE_F32:
  11136. {
  11137. ggml_compute_forward_clamp_f32(params, dst);
  11138. } break;
  11139. case GGML_TYPE_F16:
  11140. case GGML_TYPE_BF16:
  11141. case GGML_TYPE_Q4_0:
  11142. case GGML_TYPE_Q4_1:
  11143. case GGML_TYPE_Q5_0:
  11144. case GGML_TYPE_Q5_1:
  11145. case GGML_TYPE_Q8_0:
  11146. case GGML_TYPE_Q8_1:
  11147. case GGML_TYPE_Q2_K:
  11148. case GGML_TYPE_Q3_K:
  11149. case GGML_TYPE_Q4_K:
  11150. case GGML_TYPE_Q5_K:
  11151. case GGML_TYPE_Q6_K:
  11152. case GGML_TYPE_IQ2_XXS:
  11153. case GGML_TYPE_IQ2_XS:
  11154. case GGML_TYPE_IQ3_XXS:
  11155. case GGML_TYPE_IQ1_S:
  11156. case GGML_TYPE_IQ1_M:
  11157. case GGML_TYPE_IQ4_NL:
  11158. case GGML_TYPE_IQ4_XS:
  11159. case GGML_TYPE_IQ3_S:
  11160. case GGML_TYPE_IQ2_S:
  11161. case GGML_TYPE_Q8_K:
  11162. case GGML_TYPE_I8:
  11163. case GGML_TYPE_I16:
  11164. case GGML_TYPE_I32:
  11165. case GGML_TYPE_I64:
  11166. case GGML_TYPE_F64:
  11167. case GGML_TYPE_COUNT:
  11168. {
  11169. GGML_ASSERT(false);
  11170. } break;
  11171. }
  11172. }
  11173. // ggml_compute_forward_rope
  11174. static float rope_yarn_ramp(const float low, const float high, const int i0) {
  11175. const float y = (i0 / 2 - low) / MAX(0.001f, high - low);
  11176. return 1 - MIN(1, MAX(0, y));
  11177. }
  11178. // YaRN algorithm based on LlamaYaRNScaledRotaryEmbedding.py from https://github.com/jquesnelle/yarn
  11179. // MIT licensed. Copyright (c) 2023 Jeffrey Quesnelle and Bowen Peng.
  11180. static void rope_yarn(
  11181. float theta_extrap, float freq_scale, float corr_dims[2], int64_t i0, float ext_factor, float mscale,
  11182. float * cos_theta, float * sin_theta
  11183. ) {
  11184. // Get n-d rotational scaling corrected for extrapolation
  11185. float theta_interp = freq_scale * theta_extrap;
  11186. float theta = theta_interp;
  11187. if (ext_factor != 0.0f) {
  11188. float ramp_mix = rope_yarn_ramp(corr_dims[0], corr_dims[1], i0) * ext_factor;
  11189. theta = theta_interp * (1 - ramp_mix) + theta_extrap * ramp_mix;
  11190. // Get n-d magnitude scaling corrected for interpolation
  11191. mscale *= 1.0f + 0.1f * logf(1.0f / freq_scale);
  11192. }
  11193. *cos_theta = cosf(theta) * mscale;
  11194. *sin_theta = sinf(theta) * mscale;
  11195. }
  11196. // Apparently solving `n_rot = 2pi * x * base^((2 * max_pos_emb) / n_dims)` for x, we get
  11197. // `corr_dim(n_rot) = n_dims * log(max_pos_emb / (n_rot * 2pi)) / (2 * log(base))`
  11198. static float ggml_rope_yarn_corr_dim(int n_dims, int n_orig_ctx, float n_rot, float base) {
  11199. return n_dims * logf(n_orig_ctx / (n_rot * 2 * (float)M_PI)) / (2 * logf(base));
  11200. }
  11201. static void ggml_rope_cache_init(
  11202. float theta_base, float freq_scale, float corr_dims[2], int64_t ne0, float ext_factor, float mscale,
  11203. float * cache, float sin_sign, float theta_scale
  11204. ) {
  11205. float theta = theta_base;
  11206. for (int64_t i0 = 0; i0 < ne0; i0 += 2) {
  11207. rope_yarn(
  11208. theta, freq_scale, corr_dims, i0, ext_factor, mscale, &cache[i0 + 0], &cache[i0 + 1]
  11209. );
  11210. cache[i0 + 1] *= sin_sign;
  11211. theta *= theta_scale;
  11212. }
  11213. }
  11214. GGML_CALL void ggml_rope_yarn_corr_dims(
  11215. int n_dims, int n_orig_ctx, float freq_base, float beta_fast, float beta_slow, float dims[2]
  11216. ) {
  11217. // start and end correction dims
  11218. float start = floorf(ggml_rope_yarn_corr_dim(n_dims, n_orig_ctx, beta_fast, freq_base));
  11219. float end = ceilf(ggml_rope_yarn_corr_dim(n_dims, n_orig_ctx, beta_slow, freq_base));
  11220. dims[0] = MAX(0, start);
  11221. dims[1] = MIN(n_dims - 1, end);
  11222. }
  11223. static void ggml_compute_forward_rope_f32(
  11224. const struct ggml_compute_params * params,
  11225. struct ggml_tensor * dst,
  11226. const bool forward) {
  11227. const struct ggml_tensor * src0 = dst->src[0];
  11228. const struct ggml_tensor * src1 = dst->src[1];
  11229. if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
  11230. return;
  11231. }
  11232. float freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow;
  11233. // these two only relevant for xPos RoPE:
  11234. float xpos_base;
  11235. bool xpos_down;
  11236. //const int n_past = ((int32_t *) dst->op_params)[0];
  11237. const int n_dims = ((int32_t *) dst->op_params)[1];
  11238. const int mode = ((int32_t *) dst->op_params)[2];
  11239. const int n_ctx = ((int32_t *) dst->op_params)[3];
  11240. const int n_orig_ctx = ((int32_t *) dst->op_params)[4];
  11241. memcpy(&freq_base, (int32_t *) dst->op_params + 5, sizeof(float));
  11242. memcpy(&freq_scale, (int32_t *) dst->op_params + 6, sizeof(float));
  11243. memcpy(&ext_factor, (int32_t *) dst->op_params + 7, sizeof(float));
  11244. memcpy(&attn_factor, (int32_t *) dst->op_params + 8, sizeof(float));
  11245. memcpy(&beta_fast, (int32_t *) dst->op_params + 9, sizeof(float));
  11246. memcpy(&beta_slow, (int32_t *) dst->op_params + 10, sizeof(float));
  11247. memcpy(&xpos_base, (int32_t *) dst->op_params + 11, sizeof(float));
  11248. memcpy(&xpos_down, (int32_t *) dst->op_params + 12, sizeof(bool));
  11249. GGML_TENSOR_UNARY_OP_LOCALS
  11250. //printf("ne0: %d, ne1: %d, ne2: %d, ne3: %d\n", ne0, ne1, ne2, ne3);
  11251. //printf("n_past = %d, ne2 = %d\n", n_past, ne2);
  11252. GGML_ASSERT(nb00 == sizeof(float));
  11253. const int ith = params->ith;
  11254. const int nth = params->nth;
  11255. const int nr = ggml_nrows(dst);
  11256. GGML_ASSERT(n_dims <= ne0);
  11257. GGML_ASSERT(n_dims % 2 == 0);
  11258. // rows per thread
  11259. const int dr = (nr + nth - 1)/nth;
  11260. // row range for this thread
  11261. const int ir0 = dr*ith;
  11262. const int ir1 = MIN(ir0 + dr, nr);
  11263. // row index used to determine which thread to use
  11264. int ir = 0;
  11265. const float theta_scale = powf(freq_base, -2.0f/n_dims);
  11266. const float inv_ndims = -1.f/n_dims;
  11267. float corr_dims[2];
  11268. ggml_rope_yarn_corr_dims(n_dims, n_orig_ctx, freq_base, beta_fast, beta_slow, corr_dims);
  11269. const bool is_neox = mode & 2;
  11270. const bool is_glm = mode & 4;
  11271. // backward process uses inverse rotation by cos and sin.
  11272. // cos and sin build a rotation matrix, where the inverse is the transpose.
  11273. // this essentially just switches the sign of sin.
  11274. const float sin_sign = forward ? 1.0f : -1.0f;
  11275. const int32_t * pos = (const int32_t *) src1->data;
  11276. for (int64_t i3 = 0; i3 < ne3; i3++) {
  11277. for (int64_t i2 = 0; i2 < ne2; i2++) {
  11278. const int64_t p = pos[i2];
  11279. float * cache = (float *) params->wdata + (ne0 + CACHE_LINE_SIZE_F32)*ith;
  11280. if (!is_glm && !is_neox) { // TODO: cache sin/cos for glm, neox
  11281. ggml_rope_cache_init(p, freq_scale, corr_dims, ne0, ext_factor, attn_factor, cache, sin_sign, theta_scale);
  11282. }
  11283. for (int64_t i1 = 0; i1 < ne1; i1++) {
  11284. if (ir++ < ir0) continue;
  11285. if (ir > ir1) break;
  11286. float theta_base = (float)p;
  11287. if (is_glm) {
  11288. theta_base = MIN(p, n_ctx - 2);
  11289. float block_theta = MAX(p - (n_ctx - 2), 0);
  11290. for (int64_t i0 = 0; i0 < ne0 / 4; i0++) {
  11291. const float cos_theta = cosf(theta_base);
  11292. const float sin_theta = sinf(theta_base) * sin_sign;
  11293. const float cos_block_theta = cosf(block_theta);
  11294. const float sin_block_theta = sinf(block_theta) * sin_sign;
  11295. theta_base *= theta_scale;
  11296. block_theta *= theta_scale;
  11297. const float * const src = (float *)((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00);
  11298. float * dst_data = (float *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0);
  11299. const float x0 = src[0];
  11300. const float x1 = src[n_dims/2];
  11301. const float x2 = src[n_dims];
  11302. const float x3 = src[n_dims/2*3];
  11303. dst_data[0] = x0*cos_theta - x1*sin_theta;
  11304. dst_data[n_dims/2] = x0*sin_theta + x1*cos_theta;
  11305. dst_data[n_dims] = x2*cos_block_theta - x3*sin_block_theta;
  11306. dst_data[n_dims/2*3] = x2*sin_block_theta + x3*cos_block_theta;
  11307. }
  11308. } else if (!is_neox) {
  11309. for (int64_t i0 = 0; i0 < ne0; i0 += 2) {
  11310. const float cos_theta = cache[i0 + 0];
  11311. const float sin_theta = cache[i0 + 1];
  11312. // zeta scaling for xPos only:
  11313. float zeta = xpos_base != 0.0f ? powf((i0 + 0.4f * ne0) / (1.4f * ne0), p / xpos_base) : 1.0f;
  11314. if (xpos_down) zeta = 1.0f / zeta;
  11315. const float * const src = (float *)((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00);
  11316. float * dst_data = (float *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0);
  11317. const float x0 = src[0];
  11318. const float x1 = src[1];
  11319. dst_data[0] = x0*cos_theta*zeta - x1*sin_theta*zeta;
  11320. dst_data[1] = x0*sin_theta*zeta + x1*cos_theta*zeta;
  11321. }
  11322. } else {
  11323. // TODO: this might be wrong for ne0 != n_dims - need double check
  11324. // it seems we have to rope just the first n_dims elements and do nothing with the rest
  11325. // ref: https://github.com/ml-explore/mlx/blob/dc2edc762c797e3b8de50b1dad4dc0a131691033/benchmarks/python/llama_jax_bench.py#L11-L26
  11326. theta_base *= freq_scale;
  11327. for (int64_t ic = 0; ic < ne0; ic += 2) {
  11328. if (ic < n_dims) {
  11329. const int64_t ib = 0;
  11330. // simplified from `(ib * n_dims + ic) * inv_ndims`
  11331. float cur_rot = inv_ndims * ic - ib;
  11332. float cos_theta, sin_theta;
  11333. rope_yarn(
  11334. theta_base, freq_scale, corr_dims, cur_rot, ext_factor, attn_factor,
  11335. &cos_theta, &sin_theta
  11336. );
  11337. sin_theta *= sin_sign;
  11338. theta_base *= theta_scale;
  11339. const int64_t i0 = ib*n_dims + ic/2;
  11340. const float * const src = (float *)((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00);
  11341. float * dst_data = (float *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0);
  11342. const float x0 = src[0];
  11343. const float x1 = src[n_dims/2];
  11344. dst_data[0] = x0*cos_theta - x1*sin_theta;
  11345. dst_data[n_dims/2] = x0*sin_theta + x1*cos_theta;
  11346. } else {
  11347. const int64_t i0 = ic;
  11348. const float * const src = (float *)((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00);
  11349. float * dst_data = (float *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0);
  11350. dst_data[0] = src[0];
  11351. dst_data[1] = src[1];
  11352. }
  11353. }
  11354. }
  11355. }
  11356. }
  11357. }
  11358. }
  11359. static void ggml_compute_forward_rope_f16(
  11360. const struct ggml_compute_params * params,
  11361. struct ggml_tensor * dst,
  11362. const bool forward) {
  11363. const struct ggml_tensor * src0 = dst->src[0];
  11364. const struct ggml_tensor * src1 = dst->src[1];
  11365. if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
  11366. return;
  11367. }
  11368. float freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow;
  11369. //const int n_past = ((int32_t *) dst->op_params)[0];
  11370. const int n_dims = ((int32_t *) dst->op_params)[1];
  11371. const int mode = ((int32_t *) dst->op_params)[2];
  11372. const int n_ctx = ((int32_t *) dst->op_params)[3];
  11373. const int n_orig_ctx = ((int32_t *) dst->op_params)[4];
  11374. memcpy(&freq_base, (int32_t *) dst->op_params + 5, sizeof(float));
  11375. memcpy(&freq_scale, (int32_t *) dst->op_params + 6, sizeof(float));
  11376. memcpy(&ext_factor, (int32_t *) dst->op_params + 7, sizeof(float));
  11377. memcpy(&attn_factor, (int32_t *) dst->op_params + 8, sizeof(float));
  11378. memcpy(&beta_fast, (int32_t *) dst->op_params + 9, sizeof(float));
  11379. memcpy(&beta_slow, (int32_t *) dst->op_params + 10, sizeof(float));
  11380. GGML_TENSOR_UNARY_OP_LOCALS
  11381. //printf("ne0: %d, ne1: %d, ne2: %d, ne3: %d\n", ne0, ne1, ne2, ne3);
  11382. //printf("n_past = %d, ne2 = %d\n", n_past, ne2);
  11383. GGML_ASSERT(nb0 == sizeof(ggml_fp16_t));
  11384. const int ith = params->ith;
  11385. const int nth = params->nth;
  11386. const int nr = ggml_nrows(dst);
  11387. GGML_ASSERT(n_dims <= ne0);
  11388. GGML_ASSERT(n_dims % 2 == 0);
  11389. // rows per thread
  11390. const int dr = (nr + nth - 1)/nth;
  11391. // row range for this thread
  11392. const int ir0 = dr*ith;
  11393. const int ir1 = MIN(ir0 + dr, nr);
  11394. // row index used to determine which thread to use
  11395. int ir = 0;
  11396. const float theta_scale = powf(freq_base, -2.0f/n_dims);
  11397. const float inv_ndims = -1.f/n_dims;
  11398. float corr_dims[2];
  11399. ggml_rope_yarn_corr_dims(n_dims, n_orig_ctx, freq_base, beta_fast, beta_slow, corr_dims);
  11400. const bool is_neox = mode & 2;
  11401. const bool is_glm = mode & 4;
  11402. // backward process uses inverse rotation by cos and sin.
  11403. // cos and sin build a rotation matrix, where the inverse is the transpose.
  11404. // this essentially just switches the sign of sin.
  11405. const float sin_sign = forward ? 1.0f : -1.0f;
  11406. const int32_t * pos = (const int32_t *) src1->data;
  11407. for (int64_t i3 = 0; i3 < ne3; i3++) {
  11408. for (int64_t i2 = 0; i2 < ne2; i2++) {
  11409. const int64_t p = pos[i2];
  11410. float * cache = (float *) params->wdata + (ne0 + CACHE_LINE_SIZE_F32)*ith;
  11411. if (!is_glm && !is_neox) { // TODO: cache sin/cos for glm, neox
  11412. ggml_rope_cache_init(p, freq_scale, corr_dims, ne0, ext_factor, attn_factor, cache, sin_sign, theta_scale);
  11413. }
  11414. for (int64_t i1 = 0; i1 < ne1; i1++) {
  11415. if (ir++ < ir0) continue;
  11416. if (ir > ir1) break;
  11417. float theta_base = (float)p;
  11418. if (is_glm) {
  11419. theta_base = MIN(p, n_ctx - 2);
  11420. float block_theta = MAX(p - (n_ctx - 2), 0);
  11421. for (int64_t i0 = 0; i0 < ne0 / 4; i0++) {
  11422. const float cos_theta = cosf(theta_base);
  11423. const float sin_theta = sinf(theta_base) * sin_sign;
  11424. const float cos_block_theta = cosf(block_theta);
  11425. const float sin_block_theta = sinf(block_theta) * sin_sign;
  11426. theta_base *= theta_scale;
  11427. block_theta *= theta_scale;
  11428. const ggml_fp16_t * const src = (ggml_fp16_t *)((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00);
  11429. ggml_fp16_t * dst_data = (ggml_fp16_t *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0);
  11430. const float x0 = GGML_FP16_TO_FP32(src[0]);
  11431. const float x1 = GGML_FP16_TO_FP32(src[n_dims/2]);
  11432. const float x2 = GGML_FP16_TO_FP32(src[n_dims]);
  11433. const float x3 = GGML_FP16_TO_FP32(src[n_dims/2*3]);
  11434. dst_data[0] = GGML_FP32_TO_FP16(x0*cos_theta - x1*sin_theta);
  11435. dst_data[n_dims/2] = GGML_FP32_TO_FP16(x0*sin_theta + x1*cos_theta);
  11436. dst_data[n_dims] = GGML_FP32_TO_FP16(x2*cos_block_theta - x3*sin_block_theta);
  11437. dst_data[n_dims/2*3] = GGML_FP32_TO_FP16(x2*sin_block_theta + x3*cos_block_theta);
  11438. }
  11439. } else if (!is_neox) {
  11440. for (int64_t i0 = 0; i0 < ne0; i0 += 2) {
  11441. const float cos_theta = cache[i0 + 0];
  11442. const float sin_theta = cache[i0 + 1];
  11443. const ggml_fp16_t * const src = (ggml_fp16_t *)((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00);
  11444. ggml_fp16_t * dst_data = (ggml_fp16_t *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0);
  11445. const float x0 = GGML_FP16_TO_FP32(src[0]);
  11446. const float x1 = GGML_FP16_TO_FP32(src[1]);
  11447. dst_data[0] = GGML_FP32_TO_FP16(x0*cos_theta - x1*sin_theta);
  11448. dst_data[1] = GGML_FP32_TO_FP16(x0*sin_theta + x1*cos_theta);
  11449. }
  11450. } else {
  11451. // TODO: this might be wrong for ne0 != n_dims - need double check
  11452. // it seems we have to rope just the first n_dims elements and do nothing with the rest
  11453. // ref: https://github.com/ml-explore/mlx/blob/dc2edc762c797e3b8de50b1dad4dc0a131691033/benchmarks/python/llama_jax_bench.py#L11-L26
  11454. theta_base *= freq_scale;
  11455. for (int64_t ic = 0; ic < ne0; ic += 2) {
  11456. if (ic < n_dims) {
  11457. const int64_t ib = 0;
  11458. // simplified from `(ib * n_dims + ic) * inv_ndims`
  11459. float cur_rot = inv_ndims * ic - ib;
  11460. float cos_theta, sin_theta;
  11461. rope_yarn(
  11462. theta_base, freq_scale, corr_dims, cur_rot, ext_factor, attn_factor,
  11463. &cos_theta, &sin_theta
  11464. );
  11465. sin_theta *= sin_sign;
  11466. theta_base *= theta_scale;
  11467. const int64_t i0 = ib*n_dims + ic/2;
  11468. const ggml_fp16_t * const src = (ggml_fp16_t *)((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00);
  11469. ggml_fp16_t * dst_data = (ggml_fp16_t *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0);
  11470. const float x0 = GGML_FP16_TO_FP32(src[0]);
  11471. const float x1 = GGML_FP16_TO_FP32(src[n_dims/2]);
  11472. dst_data[0] = GGML_FP32_TO_FP16(x0*cos_theta - x1*sin_theta);
  11473. dst_data[n_dims/2] = GGML_FP32_TO_FP16(x0*sin_theta + x1*cos_theta);
  11474. } else {
  11475. const int64_t i0 = ic;
  11476. const ggml_fp16_t * const src = (ggml_fp16_t *)((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00);
  11477. ggml_fp16_t * dst_data = (ggml_fp16_t *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0);
  11478. dst_data[0] = src[0];
  11479. dst_data[1] = src[1];
  11480. }
  11481. }
  11482. }
  11483. }
  11484. }
  11485. }
  11486. }
  11487. static void ggml_compute_forward_rope(
  11488. const struct ggml_compute_params * params,
  11489. struct ggml_tensor * dst) {
  11490. const struct ggml_tensor * src0 = dst->src[0];
  11491. switch (src0->type) {
  11492. case GGML_TYPE_F16:
  11493. {
  11494. ggml_compute_forward_rope_f16(params, dst, true);
  11495. } break;
  11496. case GGML_TYPE_F32:
  11497. {
  11498. ggml_compute_forward_rope_f32(params, dst, true);
  11499. } break;
  11500. default:
  11501. {
  11502. GGML_ASSERT(false);
  11503. } break;
  11504. }
  11505. }
  11506. // ggml_compute_forward_rope_back
  11507. static void ggml_compute_forward_rope_back(
  11508. const struct ggml_compute_params * params,
  11509. struct ggml_tensor * dst) {
  11510. const struct ggml_tensor * src0 = dst->src[0];
  11511. switch (src0->type) {
  11512. case GGML_TYPE_F16:
  11513. {
  11514. ggml_compute_forward_rope_f16(params, dst, false);
  11515. } break;
  11516. case GGML_TYPE_F32:
  11517. {
  11518. ggml_compute_forward_rope_f32(params, dst, false);
  11519. } break;
  11520. default:
  11521. {
  11522. GGML_ASSERT(false);
  11523. } break;
  11524. }
  11525. }
  11526. // ggml_compute_forward_conv_transpose_1d
  11527. static void ggml_compute_forward_conv_transpose_1d_f16_f32(
  11528. const struct ggml_compute_params * params,
  11529. struct ggml_tensor * dst) {
  11530. const struct ggml_tensor * src0 = dst->src[0];
  11531. const struct ggml_tensor * src1 = dst->src[1];
  11532. GGML_ASSERT(src0->type == GGML_TYPE_F16);
  11533. GGML_ASSERT(src1->type == GGML_TYPE_F32);
  11534. GGML_ASSERT( dst->type == GGML_TYPE_F32);
  11535. int64_t t0 = ggml_perf_time_us();
  11536. UNUSED(t0);
  11537. GGML_TENSOR_BINARY_OP_LOCALS
  11538. const int ith = params->ith;
  11539. const int nth = params->nth;
  11540. const int nk = ne00*ne01*ne02;
  11541. GGML_ASSERT(nb00 == sizeof(ggml_fp16_t));
  11542. GGML_ASSERT(nb10 == sizeof(float));
  11543. if (params->type == GGML_TASK_TYPE_INIT) {
  11544. if (ith != 0) {
  11545. return;
  11546. }
  11547. memset(params->wdata, 0, params->wsize);
  11548. // permute kernel data (src0) from (K x Cout x Cin) to (Cin x K x Cout)
  11549. {
  11550. ggml_fp16_t * const wdata = (ggml_fp16_t *) params->wdata + 0;
  11551. for (int64_t i02 = 0; i02 < ne02; i02++) {
  11552. for (int64_t i01 = 0; i01 < ne01; i01++) {
  11553. const ggml_fp16_t * const src = (ggml_fp16_t *)((char *) src0->data + i02*nb02 + i01*nb01);
  11554. ggml_fp16_t * dst_data = wdata + i01*ne00*ne02;
  11555. for (int64_t i00 = 0; i00 < ne00; i00++) {
  11556. dst_data[i00*ne02 + i02] = src[i00];
  11557. }
  11558. }
  11559. }
  11560. }
  11561. // permute source data (src1) from (L x Cin) to (Cin x L)
  11562. {
  11563. ggml_fp16_t * const wdata = (ggml_fp16_t *) params->wdata + nk;
  11564. ggml_fp16_t * dst_data = wdata;
  11565. for (int64_t i11 = 0; i11 < ne11; i11++) {
  11566. const float * const src = (float *)((char *) src1->data + i11*nb11);
  11567. for (int64_t i10 = 0; i10 < ne10; i10++) {
  11568. dst_data[i10*ne11 + i11] = GGML_FP32_TO_FP16(src[i10]);
  11569. }
  11570. }
  11571. }
  11572. // need to zero dst since we are accumulating into it
  11573. memset(dst->data, 0, ggml_nbytes(dst));
  11574. return;
  11575. }
  11576. if (params->type == GGML_TASK_TYPE_FINALIZE) {
  11577. return;
  11578. }
  11579. const int32_t s0 = ((const int32_t*)(dst->op_params))[0];
  11580. // total rows in dst
  11581. const int nr = ne1;
  11582. // rows per thread
  11583. const int dr = (nr + nth - 1)/nth;
  11584. // row range for this thread
  11585. const int ir0 = dr*ith;
  11586. const int ir1 = MIN(ir0 + dr, nr);
  11587. ggml_fp16_t * const wdata = (ggml_fp16_t *) params->wdata + 0;
  11588. ggml_fp16_t * const wdata_src = wdata + nk;
  11589. for (int i1 = ir0; i1 < ir1; i1++) {
  11590. float * dst_data = (float *)((char *) dst->data + i1*nb1);
  11591. ggml_fp16_t * wdata_kernel = wdata + i1*ne02*ne00;
  11592. for (int i10 = 0; i10 < ne10; i10++) {
  11593. const int i1n = i10*ne11;
  11594. for (int i00 = 0; i00 < ne00; i00++) {
  11595. float v = 0;
  11596. ggml_vec_dot_f16(ne02, &v, 0,
  11597. (ggml_fp16_t *) wdata_src + i1n, 0,
  11598. (ggml_fp16_t *) wdata_kernel + i00*ne02, 0, 1);
  11599. dst_data[i10*s0 + i00] += v;
  11600. }
  11601. }
  11602. }
  11603. }
  11604. static void ggml_compute_forward_conv_transpose_1d_f32(
  11605. const struct ggml_compute_params * params,
  11606. struct ggml_tensor * dst) {
  11607. const struct ggml_tensor * src0 = dst->src[0];
  11608. const struct ggml_tensor * src1 = dst->src[1];
  11609. GGML_ASSERT(src0->type == GGML_TYPE_F32);
  11610. GGML_ASSERT(src1->type == GGML_TYPE_F32);
  11611. GGML_ASSERT( dst->type == GGML_TYPE_F32);
  11612. int64_t t0 = ggml_perf_time_us();
  11613. UNUSED(t0);
  11614. GGML_TENSOR_BINARY_OP_LOCALS
  11615. const int ith = params->ith;
  11616. const int nth = params->nth;
  11617. const int nk = ne00*ne01*ne02;
  11618. GGML_ASSERT(nb00 == sizeof(float));
  11619. GGML_ASSERT(nb10 == sizeof(float));
  11620. if (params->type == GGML_TASK_TYPE_INIT) {
  11621. if (ith != 0) {
  11622. return;
  11623. }
  11624. memset(params->wdata, 0, params->wsize);
  11625. // prepare kernel data (src0) from (K x Cout x Cin) to (Cin x K x Cout)
  11626. {
  11627. float * const wdata = (float *) params->wdata + 0;
  11628. for (int64_t i02 = 0; i02 < ne02; i02++) {
  11629. for (int64_t i01 = 0; i01 < ne01; i01++) {
  11630. const float * const src = (float *)((char *) src0->data + i02*nb02 + i01*nb01);
  11631. float * dst_data = wdata + i01*ne00*ne02;
  11632. for (int64_t i00 = 0; i00 < ne00; i00++) {
  11633. dst_data[i00*ne02 + i02] = src[i00];
  11634. }
  11635. }
  11636. }
  11637. }
  11638. // prepare source data (src1)
  11639. {
  11640. float * const wdata = (float *) params->wdata + nk;
  11641. float * dst_data = wdata;
  11642. for (int64_t i11 = 0; i11 < ne11; i11++) {
  11643. const float * const src = (float *)((char *) src1->data + i11*nb11);
  11644. for (int64_t i10 = 0; i10 < ne10; i10++) {
  11645. dst_data[i10*ne11 + i11] = src[i10];
  11646. }
  11647. }
  11648. }
  11649. // need to zero dst since we are accumulating into it
  11650. memset(dst->data, 0, ggml_nbytes(dst));
  11651. return;
  11652. }
  11653. if (params->type == GGML_TASK_TYPE_FINALIZE) {
  11654. return;
  11655. }
  11656. const int32_t s0 = ((const int32_t*)(dst->op_params))[0];
  11657. // total rows in dst
  11658. const int nr = ne1;
  11659. // rows per thread
  11660. const int dr = (nr + nth - 1)/nth;
  11661. // row range for this thread
  11662. const int ir0 = dr*ith;
  11663. const int ir1 = MIN(ir0 + dr, nr);
  11664. float * const wdata = (float *) params->wdata + 0;
  11665. float * const wdata_src = wdata + nk;
  11666. for (int i1 = ir0; i1 < ir1; i1++) {
  11667. float * dst_data = (float *)((char *) dst->data + i1*nb1);
  11668. float * wdata_kernel = wdata + i1*ne02*ne00;
  11669. for (int i10 = 0; i10 < ne10; i10++) {
  11670. const int i1n = i10*ne11;
  11671. for (int i00 = 0; i00 < ne00; i00++) {
  11672. float v = 0;
  11673. ggml_vec_dot_f32(ne02, &v, 0,
  11674. wdata_src + i1n, 0,
  11675. wdata_kernel + i00*ne02, 0, 1);
  11676. dst_data[i10*s0 + i00] += v;
  11677. }
  11678. }
  11679. }
  11680. }
  11681. static void ggml_compute_forward_conv_transpose_1d(
  11682. const struct ggml_compute_params * params,
  11683. struct ggml_tensor * dst) {
  11684. const struct ggml_tensor * src0 = dst->src[0];
  11685. switch (src0->type) {
  11686. case GGML_TYPE_F16:
  11687. {
  11688. ggml_compute_forward_conv_transpose_1d_f16_f32(params, dst);
  11689. } break;
  11690. case GGML_TYPE_F32:
  11691. {
  11692. ggml_compute_forward_conv_transpose_1d_f32(params, dst);
  11693. } break;
  11694. default:
  11695. {
  11696. GGML_ASSERT(false);
  11697. } break;
  11698. }
  11699. }
  11700. // src0: kernel [OC, IC, KH, KW]
  11701. // src1: image [N, IC, IH, IW]
  11702. // dst: result [N, OH, OW, IC*KH*KW]
  11703. static void ggml_compute_forward_im2col_f32(
  11704. const struct ggml_compute_params * params,
  11705. struct ggml_tensor * dst) {
  11706. const struct ggml_tensor * src0 = dst->src[0];
  11707. const struct ggml_tensor * src1 = dst->src[1];
  11708. GGML_ASSERT(src0->type == GGML_TYPE_F16);
  11709. GGML_ASSERT(src1->type == GGML_TYPE_F32);
  11710. GGML_ASSERT( dst->type == GGML_TYPE_F32);
  11711. int64_t t0 = ggml_perf_time_us();
  11712. UNUSED(t0);
  11713. GGML_TENSOR_BINARY_OP_LOCALS;
  11714. const int32_t s0 = ((const int32_t *)(dst->op_params))[0];
  11715. const int32_t s1 = ((const int32_t *)(dst->op_params))[1];
  11716. const int32_t p0 = ((const int32_t *)(dst->op_params))[2];
  11717. const int32_t p1 = ((const int32_t *)(dst->op_params))[3];
  11718. const int32_t d0 = ((const int32_t *)(dst->op_params))[4];
  11719. const int32_t d1 = ((const int32_t *)(dst->op_params))[5];
  11720. const bool is_2D = ((const int32_t *)(dst->op_params))[6] == 1;
  11721. const int ith = params->ith;
  11722. const int nth = params->nth;
  11723. const int64_t N = is_2D ? ne13 : ne12;
  11724. const int64_t IC = is_2D ? ne12 : ne11;
  11725. const int64_t IH = is_2D ? ne11 : 1;
  11726. const int64_t IW = ne10;
  11727. const int64_t KH = is_2D ? ne01 : 1;
  11728. const int64_t KW = ne00;
  11729. const int64_t OH = is_2D ? ne2 : 1;
  11730. const int64_t OW = ne1;
  11731. int ofs0 = is_2D ? nb13 : nb12;
  11732. int ofs1 = is_2D ? nb12 : nb11;
  11733. GGML_ASSERT(nb00 == sizeof(ggml_fp16_t));
  11734. GGML_ASSERT(nb10 == sizeof(float));
  11735. if (params->type == GGML_TASK_TYPE_INIT) {
  11736. return;
  11737. }
  11738. if (params->type == GGML_TASK_TYPE_FINALIZE) {
  11739. return;
  11740. }
  11741. // im2col: [N, IC, IH, IW] => [N, OH, OW, IC*KH*KW]
  11742. {
  11743. float * const wdata = (float *) dst->data;
  11744. for (int64_t in = 0; in < N; in++) {
  11745. for (int64_t ioh = 0; ioh < OH; ioh++) { // 1
  11746. for (int64_t iow = 0; iow < OW; iow++) {
  11747. for (int64_t iic = ith; iic < IC; iic += nth) {
  11748. // micro kernel
  11749. float * dst_data = wdata + (in*OH*OW + ioh*OW + iow)*(IC*KH*KW); // [IC, KH, KW]
  11750. const float * const src_data = (float *)((char *) src1->data + in*ofs0 + iic*ofs1); // [IH, IW]
  11751. for (int64_t ikh = 0; ikh < KH; ikh++) { // 1
  11752. for (int64_t ikw = 0; ikw < KW; ikw++) {
  11753. const int64_t iiw = iow*s0 + ikw*d0 - p0;
  11754. const int64_t iih = ioh*s1 + ikh*d1 - p1;
  11755. if (iih < 0 || iih >= IH || iiw < 0 || iiw >= IW) {
  11756. dst_data[iic*(KH*KW) + ikh*KW + ikw] = 0;
  11757. } else {
  11758. dst_data[iic*(KH*KW) + ikh*KW + ikw] = (src_data[iih*IW + iiw]);
  11759. }
  11760. }
  11761. }
  11762. }
  11763. }
  11764. }
  11765. }
  11766. }
  11767. }
  11768. // src0: kernel [OC, IC, KH, KW]
  11769. // src1: image [N, IC, IH, IW]
  11770. // dst: result [N, OH, OW, IC*KH*KW]
  11771. static void ggml_compute_forward_im2col_f16(
  11772. const struct ggml_compute_params * params,
  11773. struct ggml_tensor * dst) {
  11774. const struct ggml_tensor * src0 = dst->src[0];
  11775. const struct ggml_tensor * src1 = dst->src[1];
  11776. GGML_ASSERT(src0->type == GGML_TYPE_F16);
  11777. GGML_ASSERT(src1->type == GGML_TYPE_F32);
  11778. GGML_ASSERT( dst->type == GGML_TYPE_F16);
  11779. int64_t t0 = ggml_perf_time_us();
  11780. UNUSED(t0);
  11781. GGML_TENSOR_BINARY_OP_LOCALS;
  11782. const int32_t s0 = ((const int32_t *)(dst->op_params))[0];
  11783. const int32_t s1 = ((const int32_t *)(dst->op_params))[1];
  11784. const int32_t p0 = ((const int32_t *)(dst->op_params))[2];
  11785. const int32_t p1 = ((const int32_t *)(dst->op_params))[3];
  11786. const int32_t d0 = ((const int32_t *)(dst->op_params))[4];
  11787. const int32_t d1 = ((const int32_t *)(dst->op_params))[5];
  11788. const bool is_2D = ((const int32_t *)(dst->op_params))[6] == 1;
  11789. const int ith = params->ith;
  11790. const int nth = params->nth;
  11791. const int64_t N = is_2D ? ne13 : ne12;
  11792. const int64_t IC = is_2D ? ne12 : ne11;
  11793. const int64_t IH = is_2D ? ne11 : 1;
  11794. const int64_t IW = ne10;
  11795. const int64_t KH = is_2D ? ne01 : 1;
  11796. const int64_t KW = ne00;
  11797. const int64_t OH = is_2D ? ne2 : 1;
  11798. const int64_t OW = ne1;
  11799. int ofs0 = is_2D ? nb13 : nb12;
  11800. int ofs1 = is_2D ? nb12 : nb11;
  11801. GGML_ASSERT(nb00 == sizeof(ggml_fp16_t));
  11802. GGML_ASSERT(nb10 == sizeof(float));
  11803. if (params->type == GGML_TASK_TYPE_INIT) {
  11804. return;
  11805. }
  11806. if (params->type == GGML_TASK_TYPE_FINALIZE) {
  11807. return;
  11808. }
  11809. // im2col: [N, IC, IH, IW] => [N, OH, OW, IC*KH*KW]
  11810. {
  11811. ggml_fp16_t * const wdata = (ggml_fp16_t *) dst->data;
  11812. for (int64_t in = 0; in < N; in++) {
  11813. for (int64_t ioh = 0; ioh < OH; ioh++) { // 1
  11814. for (int64_t iow = 0; iow < OW; iow++) {
  11815. for (int64_t iic = ith; iic < IC; iic += nth) {
  11816. // micro kernel
  11817. ggml_fp16_t * dst_data = wdata + (in*OH*OW + ioh*OW + iow)*(IC*KH*KW); // [IC, KH, KW]
  11818. const float * const src_data = (float *)((char *) src1->data + in*ofs0 + iic*ofs1); // [IH, IW]
  11819. for (int64_t ikh = 0; ikh < KH; ikh++) { // 1
  11820. for (int64_t ikw = 0; ikw < KW; ikw++) {
  11821. const int64_t iiw = iow*s0 + ikw*d0 - p0;
  11822. const int64_t iih = ioh*s1 + ikh*d1 - p1;
  11823. if (iih < 0 || iih >= IH || iiw < 0 || iiw >= IW) {
  11824. dst_data[iic*(KH*KW) + ikh*KW + ikw] = 0;
  11825. } else {
  11826. dst_data[iic*(KH*KW) + ikh*KW + ikw] = GGML_FP32_TO_FP16(src_data[iih*IW + iiw]);
  11827. }
  11828. }
  11829. }
  11830. }
  11831. }
  11832. }
  11833. }
  11834. }
  11835. }
  11836. static void ggml_compute_forward_im2col(
  11837. const struct ggml_compute_params * params,
  11838. struct ggml_tensor * dst) {
  11839. switch (dst->type) {
  11840. case GGML_TYPE_F16:
  11841. {
  11842. ggml_compute_forward_im2col_f16(params, dst);
  11843. } break;
  11844. case GGML_TYPE_F32:
  11845. {
  11846. ggml_compute_forward_im2col_f32(params, dst);
  11847. } break;
  11848. default:
  11849. {
  11850. GGML_ASSERT(false);
  11851. } break;
  11852. }
  11853. }
  11854. // ggml_compute_forward_conv_transpose_2d
  11855. static void ggml_compute_forward_conv_transpose_2d(
  11856. const struct ggml_compute_params * params,
  11857. struct ggml_tensor * dst) {
  11858. const struct ggml_tensor * src0 = dst->src[0];
  11859. const struct ggml_tensor * src1 = dst->src[1];
  11860. GGML_ASSERT(src0->type == GGML_TYPE_F16);
  11861. GGML_ASSERT(src1->type == GGML_TYPE_F32);
  11862. GGML_ASSERT( dst->type == GGML_TYPE_F32);
  11863. int64_t t0 = ggml_perf_time_us();
  11864. UNUSED(t0);
  11865. GGML_TENSOR_BINARY_OP_LOCALS
  11866. const int ith = params->ith;
  11867. const int nth = params->nth;
  11868. const int nk = ne00*ne01*ne02*ne03;
  11869. GGML_ASSERT(nb00 == sizeof(ggml_fp16_t));
  11870. GGML_ASSERT(nb10 == sizeof(float));
  11871. if (params->type == GGML_TASK_TYPE_INIT) {
  11872. if (ith != 0) {
  11873. return;
  11874. }
  11875. memset(params->wdata, 0, params->wsize);
  11876. // permute kernel data (src0) from (Kw x Kh x Cout x Cin) to (Cin x Kw x Kh x Cout)
  11877. {
  11878. ggml_fp16_t * const wdata = (ggml_fp16_t *) params->wdata + 0;
  11879. for (int64_t i03 = 0; i03 < ne03; i03++) {
  11880. for (int64_t i02 = 0; i02 < ne02; i02++) {
  11881. const ggml_fp16_t * const src = (ggml_fp16_t *)((char *) src0->data + i03*nb03 + i02*nb02);
  11882. ggml_fp16_t * dst_data = wdata + i02*ne01*ne00*ne03;
  11883. for (int64_t i01 = 0; i01 < ne01; i01++) {
  11884. for (int64_t i00 = 0; i00 < ne00; i00++) {
  11885. dst_data[i01*ne00*ne03 + i00*ne03 + i03] = src[i01 * ne00 + i00];
  11886. }
  11887. }
  11888. }
  11889. }
  11890. }
  11891. // permute source data (src1) from (Sw x Sh x Cin) to (Cin x Sw x Sh)
  11892. {
  11893. ggml_fp16_t * const wdata = (ggml_fp16_t *) params->wdata + nk;
  11894. for (int i12 = 0; i12 < ne12; i12++) {
  11895. for (int i11 = 0; i11 < ne11; i11++) {
  11896. const float * const src = (float *)((char *) src1->data + i12*nb12 + i11*nb11);
  11897. ggml_fp16_t * dst_data = wdata + i11*ne10*ne12;
  11898. for (int i10 = 0; i10 < ne10; i10++) {
  11899. dst_data[i10*ne12 + i12] = GGML_FP32_TO_FP16(src[i10]);
  11900. }
  11901. }
  11902. }
  11903. }
  11904. memset(dst->data, 0, ggml_nbytes(dst));
  11905. return;
  11906. }
  11907. if (params->type == GGML_TASK_TYPE_FINALIZE) {
  11908. return;
  11909. }
  11910. const int32_t stride = ggml_get_op_params_i32(dst, 0);
  11911. // total patches in dst
  11912. const int np = ne2;
  11913. // patches per thread
  11914. const int dp = (np + nth - 1)/nth;
  11915. // patch range for this thread
  11916. const int ip0 = dp*ith;
  11917. const int ip1 = MIN(ip0 + dp, np);
  11918. ggml_fp16_t * const wdata = (ggml_fp16_t *) params->wdata + 0;
  11919. ggml_fp16_t * const wdata_src = wdata + nk;
  11920. for (int i2 = ip0; i2 < ip1; i2++) { // Cout
  11921. float * dst_data = (float *)((char *) dst->data + i2*nb2);
  11922. ggml_fp16_t * wdata_kernel = wdata + i2*ne01*ne00*ne03;
  11923. for (int i11 = 0; i11 < ne11; i11++) {
  11924. for (int i10 = 0; i10 < ne10; i10++) {
  11925. const int i1n = i11*ne10*ne12 + i10*ne12;
  11926. for (int i01 = 0; i01 < ne01; i01++) {
  11927. for (int i00 = 0; i00 < ne00; i00++) {
  11928. float v = 0;
  11929. ggml_vec_dot_f16(ne03, &v, 0,
  11930. wdata_src + i1n, 0,
  11931. wdata_kernel + i01*ne00*ne03 + i00*ne03, 0, 1);
  11932. dst_data[(i11*stride + i01)*ne0 + i10*stride + i00] += v;
  11933. }
  11934. }
  11935. }
  11936. }
  11937. }
  11938. }
  11939. // ggml_compute_forward_pool_1d_sk_p0
  11940. static void ggml_compute_forward_pool_1d_sk_p0(
  11941. const struct ggml_compute_params * params,
  11942. const enum ggml_op_pool op,
  11943. const int k,
  11944. struct ggml_tensor * dst) {
  11945. const struct ggml_tensor * src = dst->src[0];
  11946. assert(src->type == GGML_TYPE_F32);
  11947. assert(params->ith == 0);
  11948. if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
  11949. return;
  11950. }
  11951. const char * cdata = (const char *)src->data;
  11952. const char * const data_end = cdata + ggml_nbytes(src);
  11953. float * drow = (float *)dst->data;
  11954. const int64_t rs = dst->ne[0];
  11955. while (cdata < data_end) {
  11956. const float * const srow = (const float *)cdata;
  11957. int j = 0;
  11958. for (int64_t i = 0; i < rs; ++i) {
  11959. switch (op) {
  11960. case GGML_OP_POOL_AVG: drow[i] = 0; break;
  11961. case GGML_OP_POOL_MAX: drow[i] = -FLT_MAX; break;
  11962. case GGML_OP_POOL_COUNT: GGML_ASSERT(false); break;
  11963. }
  11964. for (int ki = 0; ki < k; ++ki) {
  11965. switch (op) {
  11966. case GGML_OP_POOL_AVG: drow[i] += srow[j]; break;
  11967. case GGML_OP_POOL_MAX: if (srow[j] > drow[i]) drow[i] = srow[j]; break;
  11968. case GGML_OP_POOL_COUNT: GGML_ASSERT(false); break;
  11969. }
  11970. ++j;
  11971. }
  11972. switch (op) {
  11973. case GGML_OP_POOL_AVG: drow[i] /= k; break;
  11974. case GGML_OP_POOL_MAX: break;
  11975. case GGML_OP_POOL_COUNT: GGML_ASSERT(false); break;
  11976. }
  11977. }
  11978. cdata += src->nb[1];
  11979. drow += rs;
  11980. }
  11981. }
  11982. // ggml_compute_forward_pool_1d
  11983. static void ggml_compute_forward_pool_1d(
  11984. const struct ggml_compute_params * params,
  11985. struct ggml_tensor * dst) {
  11986. const int32_t * opts = (const int32_t *)dst->op_params;
  11987. enum ggml_op_pool op = opts[0];
  11988. const int k0 = opts[1];
  11989. const int s0 = opts[2];
  11990. const int p0 = opts[3];
  11991. GGML_ASSERT(p0 == 0); // padding not supported
  11992. GGML_ASSERT(k0 == s0); // only s = k supported
  11993. ggml_compute_forward_pool_1d_sk_p0(params, op, k0, dst);
  11994. }
  11995. // ggml_compute_forward_pool_2d
  11996. static void ggml_compute_forward_pool_2d(
  11997. const struct ggml_compute_params * params,
  11998. struct ggml_tensor * dst) {
  11999. const struct ggml_tensor * src = dst->src[0];
  12000. GGML_ASSERT(src->type == GGML_TYPE_F32);
  12001. GGML_ASSERT(params->ith == 0);
  12002. if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
  12003. return;
  12004. }
  12005. const int32_t * opts = (const int32_t *)dst->op_params;
  12006. enum ggml_op_pool op = opts[0];
  12007. const int k0 = opts[1];
  12008. const int k1 = opts[2];
  12009. const int s0 = opts[3];
  12010. const int s1 = opts[4];
  12011. const int p0 = opts[5];
  12012. const int p1 = opts[6];
  12013. const char * cdata = (const char*)src->data;
  12014. const char * const data_end = cdata + ggml_nbytes(src);
  12015. const int64_t px = dst->ne[0];
  12016. const int64_t py = dst->ne[1];
  12017. const int64_t pa = px * py;
  12018. float * dplane = (float *)dst->data;
  12019. const int ka = k0 * k1;
  12020. const int offset0 = -p0;
  12021. const int offset1 = -p1;
  12022. while (cdata < data_end) {
  12023. for (int oy = 0; oy < py; ++oy) {
  12024. float * const drow = dplane + oy * px;
  12025. for (int ox = 0; ox < px; ++ox) {
  12026. float * const out = drow + ox;
  12027. switch (op) {
  12028. case GGML_OP_POOL_AVG: *out = 0; break;
  12029. case GGML_OP_POOL_MAX: *out = -FLT_MAX; break;
  12030. case GGML_OP_POOL_COUNT: GGML_ASSERT(false); break;
  12031. }
  12032. const int ix = offset0 + ox * s0;
  12033. const int iy = offset1 + oy * s1;
  12034. for (int ky = 0; ky < k1; ++ky) {
  12035. if (iy + ky < 0 || iy + ky >= src->ne[1]) continue;
  12036. const float * const srow = (const float *)(cdata + src->nb[1] * (iy + ky));
  12037. for (int kx = 0; kx < k0; ++kx) {
  12038. int j = ix + kx;
  12039. if (j < 0 || j >= src->ne[0]) continue;
  12040. switch (op) {
  12041. case GGML_OP_POOL_AVG: *out += srow[j]; break;
  12042. case GGML_OP_POOL_MAX: if (srow[j] > *out) *out = srow[j]; break;
  12043. case GGML_OP_POOL_COUNT: GGML_ASSERT(false); break;
  12044. }
  12045. }
  12046. }
  12047. switch (op) {
  12048. case GGML_OP_POOL_AVG: *out /= ka; break;
  12049. case GGML_OP_POOL_MAX: break;
  12050. case GGML_OP_POOL_COUNT: GGML_ASSERT(false); break;
  12051. }
  12052. }
  12053. }
  12054. cdata += src->nb[2];
  12055. dplane += pa;
  12056. }
  12057. }
  12058. // ggml_compute_forward_upscale
  12059. static void ggml_compute_forward_upscale_f32(
  12060. const struct ggml_compute_params * params,
  12061. struct ggml_tensor * dst) {
  12062. const struct ggml_tensor * src0 = dst->src[0];
  12063. if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
  12064. return;
  12065. }
  12066. GGML_ASSERT(src0->type == GGML_TYPE_F32);
  12067. const int ith = params->ith;
  12068. const int nth = params->nth;
  12069. GGML_TENSOR_UNARY_OP_LOCALS
  12070. const float sf0 = (float)ne0/src0->ne[0];
  12071. const float sf1 = (float)ne1/src0->ne[1];
  12072. const float sf2 = (float)ne2/src0->ne[2];
  12073. const float sf3 = (float)ne3/src0->ne[3];
  12074. // TODO: optimize
  12075. for (int64_t i3 = 0; i3 < ne3; i3++) {
  12076. const int64_t i03 = i3 / sf3;
  12077. for (int64_t i2 = ith; i2 < ne2; i2 += nth) {
  12078. const int64_t i02 = i2 / sf2;
  12079. for (int64_t i1 = 0; i1 < ne1; i1++) {
  12080. const int64_t i01 = i1 / sf1;
  12081. for (int64_t i0 = 0; i0 < ne0; i0++) {
  12082. const int64_t i00 = i0 / sf0;
  12083. const float * x = (float *)((char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03);
  12084. float * y = (float *)((char *) dst->data + i0*nb0 + i1*nb1 + i2*nb2 + i3*nb3);
  12085. *y = *x;
  12086. }
  12087. }
  12088. }
  12089. }
  12090. }
  12091. static void ggml_compute_forward_upscale(
  12092. const struct ggml_compute_params * params,
  12093. struct ggml_tensor * dst) {
  12094. const struct ggml_tensor * src0 = dst->src[0];
  12095. switch (src0->type) {
  12096. case GGML_TYPE_F32:
  12097. {
  12098. ggml_compute_forward_upscale_f32(params, dst);
  12099. } break;
  12100. default:
  12101. {
  12102. GGML_ASSERT(false);
  12103. } break;
  12104. }
  12105. }
  12106. // ggml_compute_forward_pad
  12107. static void ggml_compute_forward_pad_f32(
  12108. const struct ggml_compute_params * params,
  12109. struct ggml_tensor * dst) {
  12110. const struct ggml_tensor * src0 = dst->src[0];
  12111. if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
  12112. return;
  12113. }
  12114. GGML_ASSERT(src0->nb[0] == sizeof(float));
  12115. GGML_ASSERT( dst->nb[0] == sizeof(float));
  12116. const int ith = params->ith;
  12117. const int nth = params->nth;
  12118. GGML_TENSOR_UNARY_OP_LOCALS
  12119. float * dst_ptr = (float *) dst->data;
  12120. // TODO: optimize
  12121. for (int64_t i2 = 0; i2 < ne2; ++i2) {
  12122. for (int64_t i1 = ith; i1 < ne1; i1 += nth) {
  12123. for (int64_t i0 = 0; i0 < ne0; ++i0) {
  12124. for (int64_t i3 = 0; i3 < ne3; ++i3) {
  12125. const int64_t dst_idx = i3*(ne0*ne1*ne2) + i2*(ne0*ne1) + i1*ne0 + i0;
  12126. const float * src_ptr = (const float *)((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00);
  12127. if (i0 < ne00 && i1 < ne01 && i2 < ne02 && i3 < ne03) {
  12128. dst_ptr[dst_idx] = *src_ptr;
  12129. } else {
  12130. dst_ptr[dst_idx] = 0;
  12131. }
  12132. }
  12133. }
  12134. }
  12135. }
  12136. }
  12137. static void ggml_compute_forward_pad(
  12138. const struct ggml_compute_params * params,
  12139. struct ggml_tensor * dst) {
  12140. const struct ggml_tensor * src0 = dst->src[0];
  12141. switch (src0->type) {
  12142. case GGML_TYPE_F32:
  12143. {
  12144. ggml_compute_forward_pad_f32(params, dst);
  12145. } break;
  12146. default:
  12147. {
  12148. GGML_ASSERT(false);
  12149. } break;
  12150. }
  12151. }
  12152. // ggml_compute_forward_arange
  12153. static void ggml_compute_forward_arange_f32(
  12154. const struct ggml_compute_params * params,
  12155. struct ggml_tensor * dst) {
  12156. if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
  12157. return;
  12158. }
  12159. GGML_ASSERT(dst->nb[0] == sizeof(float));
  12160. const int ith = params->ith;
  12161. const int nth = params->nth;
  12162. const float start = ggml_get_op_params_f32(dst, 0);
  12163. const float stop = ggml_get_op_params_f32(dst, 1);
  12164. const float step = ggml_get_op_params_f32(dst, 2);
  12165. const int64_t steps = (int64_t) ceilf((stop - start) / step);
  12166. GGML_ASSERT(ggml_nelements(dst) == steps);
  12167. for (int64_t i = ith; i < steps; i+= nth) {
  12168. float value = start + step * i;
  12169. ((float *)dst->data)[i] = value;
  12170. }
  12171. }
  12172. static void ggml_compute_forward_arange(
  12173. const struct ggml_compute_params * params,
  12174. struct ggml_tensor * dst) {
  12175. switch (dst->type) {
  12176. case GGML_TYPE_F32:
  12177. {
  12178. ggml_compute_forward_arange_f32(params, dst);
  12179. } break;
  12180. default:
  12181. {
  12182. GGML_ASSERT(false);
  12183. } break;
  12184. }
  12185. }
  12186. static void ggml_compute_forward_timestep_embedding_f32(
  12187. const struct ggml_compute_params * params,
  12188. struct ggml_tensor * dst) {
  12189. if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
  12190. return;
  12191. }
  12192. const struct ggml_tensor * src0 = dst->src[0];
  12193. GGML_ASSERT(src0->nb[0] == sizeof(float));
  12194. const int ith = params->ith;
  12195. const int nth = params->nth;
  12196. GGML_TENSOR_UNARY_OP_LOCALS
  12197. const int dim = ggml_get_op_params_i32(dst, 0);
  12198. const int max_period = ggml_get_op_params_i32(dst, 1);
  12199. int half = dim / 2;
  12200. for (int64_t i = 0; i < ne00; i++) {
  12201. float * embed_data = (float *)((char *) dst->data + i*nb1);
  12202. for (int64_t j = ith; j < half; j += nth) {
  12203. float timestep = ((float *)src0->data)[i];
  12204. float freq = (float)expf(-logf(max_period) * j / half);
  12205. float arg = timestep * freq;
  12206. embed_data[j] = cosf(arg);
  12207. embed_data[j + half] = sinf(arg);
  12208. }
  12209. if (dim % 2 != 0 && ith == 0) {
  12210. embed_data[dim] = 0.f;
  12211. }
  12212. }
  12213. }
  12214. static void ggml_compute_forward_timestep_embedding(
  12215. const struct ggml_compute_params * params,
  12216. struct ggml_tensor * dst) {
  12217. const struct ggml_tensor * src0 = dst->src[0];
  12218. switch (src0->type) {
  12219. case GGML_TYPE_F32:
  12220. {
  12221. ggml_compute_forward_timestep_embedding_f32(params, dst);
  12222. } break;
  12223. default:
  12224. {
  12225. GGML_ASSERT(false);
  12226. } break;
  12227. }
  12228. }
  12229. // ggml_compute_forward_argsort
  12230. static void ggml_compute_forward_argsort_f32(
  12231. const struct ggml_compute_params * params,
  12232. struct ggml_tensor * dst) {
  12233. const struct ggml_tensor * src0 = dst->src[0];
  12234. if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
  12235. return;
  12236. }
  12237. GGML_TENSOR_UNARY_OP_LOCALS
  12238. GGML_ASSERT(nb0 == sizeof(float));
  12239. const int ith = params->ith;
  12240. const int nth = params->nth;
  12241. const int64_t nr = ggml_nrows(src0);
  12242. enum ggml_sort_order order = (enum ggml_sort_order) ggml_get_op_params_i32(dst, 0);
  12243. for (int64_t i = ith; i < nr; i += nth) {
  12244. int32_t * dst_data = (int32_t *)((char *) dst->data + i*nb1);
  12245. const float * src_data = (float *)((char *) src0->data + i*nb01);
  12246. for (int64_t j = 0; j < ne0; j++) {
  12247. dst_data[j] = j;
  12248. }
  12249. // C doesn't have a functional sort, so we do a bubble sort instead
  12250. for (int64_t j = 0; j < ne0; j++) {
  12251. for (int64_t k = j + 1; k < ne0; k++) {
  12252. if ((order == GGML_SORT_ORDER_ASC && src_data[dst_data[j]] > src_data[dst_data[k]]) ||
  12253. (order == GGML_SORT_ORDER_DESC && src_data[dst_data[j]] < src_data[dst_data[k]])) {
  12254. int32_t tmp = dst_data[j];
  12255. dst_data[j] = dst_data[k];
  12256. dst_data[k] = tmp;
  12257. }
  12258. }
  12259. }
  12260. }
  12261. }
  12262. static void ggml_compute_forward_argsort(
  12263. const struct ggml_compute_params * params,
  12264. struct ggml_tensor * dst) {
  12265. const struct ggml_tensor * src0 = dst->src[0];
  12266. switch (src0->type) {
  12267. case GGML_TYPE_F32:
  12268. {
  12269. ggml_compute_forward_argsort_f32(params, dst);
  12270. } break;
  12271. default:
  12272. {
  12273. GGML_ASSERT(false);
  12274. } break;
  12275. }
  12276. }
  12277. // ggml_compute_forward_flash_attn
  12278. static void ggml_compute_forward_flash_attn_f32(
  12279. const struct ggml_compute_params * params,
  12280. const bool masked,
  12281. struct ggml_tensor * dst) {
  12282. const struct ggml_tensor * q = dst->src[0];
  12283. const struct ggml_tensor * k = dst->src[1];
  12284. const struct ggml_tensor * v = dst->src[2];
  12285. int64_t t0 = ggml_perf_time_us();
  12286. UNUSED(t0);
  12287. GGML_TENSOR_LOCALS(int64_t, neq, q, ne)
  12288. GGML_TENSOR_LOCALS(size_t, nbq, q, nb)
  12289. GGML_TENSOR_LOCALS(int64_t, nek, k, ne)
  12290. GGML_TENSOR_LOCALS(size_t, nbk, k, nb)
  12291. GGML_TENSOR_LOCALS(int64_t, nev, v, ne)
  12292. GGML_TENSOR_LOCALS(size_t, nbv, v, nb)
  12293. GGML_TENSOR_LOCALS(int64_t, ne, dst, ne)
  12294. GGML_TENSOR_LOCALS(size_t, nb, dst, nb)
  12295. const int ith = params->ith;
  12296. const int nth = params->nth;
  12297. const int64_t D = neq0;
  12298. const int64_t N = neq1;
  12299. const int64_t P = nek1 - N;
  12300. const int64_t M = P + N;
  12301. const int Mup = ggml_up(M, GGML_SOFT_MAX_UNROLL);
  12302. GGML_ASSERT(ne0 == D);
  12303. GGML_ASSERT(ne1 == N);
  12304. GGML_ASSERT(P >= 0);
  12305. GGML_ASSERT(nbq0 == sizeof(float));
  12306. GGML_ASSERT(nbk0 == sizeof(float));
  12307. GGML_ASSERT(nbv0 == sizeof(float));
  12308. GGML_ASSERT(neq0 == D);
  12309. GGML_ASSERT(nek0 == D);
  12310. GGML_ASSERT(nev1 == D);
  12311. GGML_ASSERT(neq1 == N);
  12312. GGML_ASSERT(nek1 == N + P);
  12313. GGML_ASSERT(nev1 == D);
  12314. // dst cannot be transposed or permuted
  12315. GGML_ASSERT(nb0 == sizeof(float));
  12316. GGML_ASSERT(nb0 <= nb1);
  12317. GGML_ASSERT(nb1 <= nb2);
  12318. GGML_ASSERT(nb2 <= nb3);
  12319. if (params->type == GGML_TASK_TYPE_INIT) {
  12320. return;
  12321. }
  12322. if (params->type == GGML_TASK_TYPE_FINALIZE) {
  12323. return;
  12324. }
  12325. // parallelize by q rows using ggml_vec_dot_f32
  12326. // total rows in q
  12327. const int nr = neq1*neq2*neq3;
  12328. // rows per thread
  12329. const int dr = (nr + nth - 1)/nth;
  12330. // row range for this thread
  12331. const int ir0 = dr*ith;
  12332. const int ir1 = MIN(ir0 + dr, nr);
  12333. const float scale = 1.0f/sqrtf(D);
  12334. //printf("P=%d N=%d D=%d ir0=%d ir1=%d scale = %f\n", P, N, D, ir0, ir1, scale);
  12335. for (int ir = ir0; ir < ir1; ++ir) {
  12336. // q indices
  12337. const int iq3 = ir/(neq2*neq1);
  12338. const int iq2 = (ir - iq3*neq2*neq1)/neq1;
  12339. const int iq1 = (ir - iq3*neq2*neq1 - iq2*neq1);
  12340. float * S = (float *) params->wdata + ith*(Mup + CACHE_LINE_SIZE_F32);
  12341. for (int i = M; i < Mup; ++i) {
  12342. S[i] = -INFINITY;
  12343. }
  12344. const int64_t masked_begin = masked ? (P + iq1 + 1) : M;
  12345. for (int64_t ic = 0; ic < masked_begin; ++ic) {
  12346. // k indices
  12347. const int ik3 = iq3;
  12348. const int ik2 = iq2 % nek2;
  12349. const int ik1 = ic;
  12350. // S indices
  12351. const int i1 = ik1;
  12352. ggml_vec_dot_f32(neq0,
  12353. S + i1, 0,
  12354. (float *) ((char *) k->data + (ik1*nbk1 + ik2*nbk2 + ik3*nbk3)), 0,
  12355. (float *) ((char *) q->data + (iq1*nbq1 + iq2*nbq2 + iq3*nbq3)), 0, 1);
  12356. }
  12357. // scale
  12358. ggml_vec_scale_f32(masked_begin, S, scale);
  12359. for (int64_t i = masked_begin; i < M; i++) {
  12360. S[i] = -INFINITY;
  12361. }
  12362. // softmax
  12363. // exclude known -INF S[..] values from max and loop
  12364. // dont forget to set their SW values to zero
  12365. {
  12366. float max = -INFINITY;
  12367. ggml_vec_max_f32(masked_begin, &max, S);
  12368. ggml_float sum = 0.0;
  12369. {
  12370. #ifdef GGML_SOFT_MAX_ACCELERATE
  12371. max = -max;
  12372. vDSP_vsadd(S, 1, &max, S, 1, Mup);
  12373. vvexpf(S, S, &Mup);
  12374. ggml_vec_sum_f32(Mup, &sum, S);
  12375. #else
  12376. uint16_t scvt[GGML_SOFT_MAX_UNROLL]; UNUSED(scvt);
  12377. ggml_float sump[GGML_SOFT_MAX_UNROLL] = { 0.0 };
  12378. for (int i = 0; i < Mup; i += GGML_SOFT_MAX_UNROLL) {
  12379. if (i >= masked_begin) {
  12380. break;
  12381. }
  12382. float * SS = S + i;
  12383. for (int j = 0; j < GGML_SOFT_MAX_UNROLL; ++j) {
  12384. if (i + j >= masked_begin) {
  12385. break;
  12386. } else if (SS[j] == -INFINITY) {
  12387. SS[j] = 0.0f;
  12388. } else {
  12389. #ifndef GGML_FLASH_ATTN_EXP_FP16
  12390. const float val = expf(SS[j] - max);
  12391. #else
  12392. ggml_fp16_t s = GGML_FP32_TO_FP16(SS[j] - max);
  12393. memcpy(&scvt[j], &s, sizeof(uint16_t));
  12394. const float val = GGML_FP16_TO_FP32(ggml_table_exp_f16[scvt[j]]);
  12395. #endif
  12396. sump[j] += (ggml_float)val;
  12397. SS[j] = val;
  12398. }
  12399. }
  12400. }
  12401. for (int i = 0; i < GGML_SOFT_MAX_UNROLL; i++) {
  12402. sum += sump[i];
  12403. }
  12404. #endif
  12405. }
  12406. assert(sum > 0.0);
  12407. sum = 1.0/sum;
  12408. ggml_vec_scale_f32(masked_begin, S, sum);
  12409. #ifndef NDEBUG
  12410. for (int i = 0; i < masked_begin; ++i) {
  12411. assert(!isnan(S[i]));
  12412. assert(!isinf(S[i]));
  12413. }
  12414. #endif
  12415. }
  12416. for (int64_t ic = 0; ic < nev1; ++ic) {
  12417. // dst indices
  12418. const int i1 = iq1;
  12419. const int i2 = iq2;
  12420. const int i3 = iq3;
  12421. // v indices
  12422. const int iv2 = iq2 % nev2;
  12423. const int iv3 = iq3;
  12424. ggml_vec_dot_f32(masked_begin,
  12425. (float *) ((char *) dst->data + (ic*nb0 + i1*nb1 + i2*nb2 + i3*nb3)), 0,
  12426. (float *) ((char *) v->data + ( ic*nbv1 + iv2*nbv2 + iv3*nbv3)), 0,
  12427. S, 0, 1);
  12428. }
  12429. }
  12430. }
  12431. static void ggml_compute_forward_flash_attn_f16(
  12432. const struct ggml_compute_params * params,
  12433. const bool masked,
  12434. struct ggml_tensor * dst) {
  12435. const struct ggml_tensor * q = dst->src[0];
  12436. const struct ggml_tensor * k = dst->src[1];
  12437. const struct ggml_tensor * v = dst->src[2];
  12438. int64_t t0 = ggml_perf_time_us();
  12439. UNUSED(t0);
  12440. GGML_TENSOR_LOCALS(int64_t, neq, q, ne)
  12441. GGML_TENSOR_LOCALS(size_t, nbq, q, nb)
  12442. GGML_TENSOR_LOCALS(int64_t, nek, k, ne)
  12443. GGML_TENSOR_LOCALS(size_t, nbk, k, nb)
  12444. GGML_TENSOR_LOCALS(int64_t, nev, v, ne)
  12445. GGML_TENSOR_LOCALS(size_t, nbv, v, nb)
  12446. GGML_TENSOR_LOCALS(int64_t, ne, dst, ne)
  12447. GGML_TENSOR_LOCALS(size_t, nb, dst, nb)
  12448. const int ith = params->ith;
  12449. const int nth = params->nth;
  12450. const int64_t D = neq0;
  12451. const int64_t N = neq1;
  12452. const int64_t P = nek1 - N;
  12453. const int64_t M = P + N;
  12454. const int Mup = ggml_up(M, GGML_SOFT_MAX_UNROLL);
  12455. GGML_ASSERT(ne0 == D);
  12456. GGML_ASSERT(ne1 == N);
  12457. GGML_ASSERT(P >= 0);
  12458. GGML_ASSERT(nbq0 == sizeof(ggml_fp16_t));
  12459. GGML_ASSERT(nbk0 == sizeof(ggml_fp16_t));
  12460. GGML_ASSERT(nbv0 == sizeof(ggml_fp16_t));
  12461. GGML_ASSERT(neq0 == D);
  12462. GGML_ASSERT(nek0 == D);
  12463. GGML_ASSERT(nev1 == D);
  12464. GGML_ASSERT(neq1 == N);
  12465. GGML_ASSERT(nek1 == N + P);
  12466. GGML_ASSERT(nev1 == D);
  12467. // dst cannot be transposed or permuted
  12468. GGML_ASSERT(nb0 == sizeof(float));
  12469. GGML_ASSERT(nb0 <= nb1);
  12470. GGML_ASSERT(nb1 <= nb2);
  12471. GGML_ASSERT(nb2 <= nb3);
  12472. if (params->type == GGML_TASK_TYPE_INIT) {
  12473. return;
  12474. }
  12475. if (params->type == GGML_TASK_TYPE_FINALIZE) {
  12476. return;
  12477. }
  12478. // parallelize by q rows using ggml_vec_dot_f32
  12479. // total rows in q
  12480. const int nr = neq1*neq2*neq3;
  12481. // rows per thread
  12482. const int dr = (nr + nth - 1)/nth;
  12483. // row range for this thread
  12484. const int ir0 = dr*ith;
  12485. const int ir1 = MIN(ir0 + dr, nr);
  12486. const float scale = 1.0f/sqrtf(D);
  12487. //printf("P=%d N=%d D=%d ir0=%d ir1=%d scale = %f\n", P, N, D, ir0, ir1, scale);
  12488. for (int ir = ir0; ir < ir1; ++ir) {
  12489. // q indices
  12490. const int iq3 = ir/(neq2*neq1);
  12491. const int iq2 = (ir - iq3*neq2*neq1)/neq1;
  12492. const int iq1 = (ir - iq3*neq2*neq1 - iq2*neq1);
  12493. float * S = (float *) params->wdata + ith*(2*Mup + CACHE_LINE_SIZE_F32);
  12494. for (int i = M; i < Mup; ++i) {
  12495. S[i] = -INFINITY;
  12496. }
  12497. if (GGML_VEC_DOT_UNROLL > 2 || nek1 % GGML_VEC_DOT_UNROLL != 0) {
  12498. for (int64_t ic = 0; ic < nek1; ++ic) {
  12499. // k indices
  12500. const int ik3 = iq3;
  12501. const int ik2 = iq2 % nek2;
  12502. const int ik1 = ic;
  12503. // S indices
  12504. const int i1 = ik1;
  12505. ggml_vec_dot_f16(neq0,
  12506. S + i1, 0,
  12507. (ggml_fp16_t *) ((char *) k->data + (ik1*nbk1 + ik2*nbk2 + ik3*nbk3)), 0,
  12508. (ggml_fp16_t *) ((char *) q->data + (iq1*nbq1 + iq2*nbq2 + iq3*nbq3)), 0, 1);
  12509. }
  12510. } else {
  12511. for (int64_t ic = 0; ic < nek1; ic += GGML_VEC_DOT_UNROLL) {
  12512. // k indices
  12513. const int ik3 = iq3;
  12514. const int ik2 = iq2 % nek2;
  12515. const int ik1 = ic;
  12516. // S indices
  12517. const int i1 = ik1;
  12518. ggml_vec_dot_f16_unroll(neq0, nbk1,
  12519. S + i1,
  12520. ((char *) k->data + (ik1*nbk1 + ik2*nbk2 + ik3*nbk3)),
  12521. (ggml_fp16_t *) ((char *) q->data + (iq1*nbq1 + iq2*nbq2 + iq3*nbq3)));
  12522. }
  12523. }
  12524. // scale
  12525. ggml_vec_scale_f32(nek1, S, scale);
  12526. if (masked) {
  12527. for (int64_t i = P; i < M; i++) {
  12528. if (i > P + iq1) {
  12529. S[i] = -INFINITY;
  12530. }
  12531. }
  12532. }
  12533. // softmax
  12534. // todo: exclude known -INF S[..] values from max and loop, assuming their results to be zero.
  12535. // dont forget to set their S values to zero
  12536. {
  12537. float max = -INFINITY;
  12538. ggml_vec_max_f32(M, &max, S);
  12539. ggml_float sum = 0.0;
  12540. {
  12541. #ifdef GGML_SOFT_MAX_ACCELERATE
  12542. max = -max;
  12543. vDSP_vsadd(S, 1, &max, S, 1, Mup);
  12544. vvexpf(S, S, &Mup);
  12545. ggml_vec_sum_f32(Mup, &sum, S);
  12546. #else
  12547. uint16_t scvt[GGML_SOFT_MAX_UNROLL];
  12548. ggml_float sump[GGML_SOFT_MAX_UNROLL] = { 0.0 };
  12549. for (int i = 0; i < Mup; i += GGML_SOFT_MAX_UNROLL) {
  12550. float * SS = S + i;
  12551. for (int j = 0; j < GGML_SOFT_MAX_UNROLL; ++j) {
  12552. if (SS[j] == -INFINITY) {
  12553. SS[j] = 0.0f;
  12554. } else {
  12555. ggml_fp16_t s = GGML_FP32_TO_FP16(SS[j] - max);
  12556. memcpy(&scvt[j], &s, sizeof(uint16_t));
  12557. const float val = GGML_FP16_TO_FP32(ggml_table_exp_f16[scvt[j]]);
  12558. sump[j] += (ggml_float)val;
  12559. SS[j] = val;
  12560. }
  12561. }
  12562. }
  12563. for (int i = 0; i < GGML_SOFT_MAX_UNROLL; i++) {
  12564. sum += sump[i];
  12565. }
  12566. #endif
  12567. }
  12568. assert(sum > 0.0);
  12569. sum = 1.0/sum;
  12570. ggml_vec_scale_f32(M, S, sum);
  12571. #ifndef NDEBUG
  12572. for (int i = 0; i < M; ++i) {
  12573. assert(!isnan(S[i]));
  12574. assert(!isinf(S[i]));
  12575. }
  12576. #endif
  12577. }
  12578. ggml_fp16_t * S16 = (ggml_fp16_t *) ((float *) params->wdata + ith*(2*Mup + CACHE_LINE_SIZE_F32) + Mup);
  12579. for (int64_t i = 0; i < M; i++) {
  12580. S16[i] = GGML_FP32_TO_FP16(S[i]);
  12581. }
  12582. // todo: exclude known zero S[..] values from dot (reducing nev0 and increasing begin of v and S16).
  12583. if (GGML_VEC_DOT_UNROLL == 1 || (nev1 % GGML_VEC_DOT_UNROLL != 0)) {
  12584. for (int64_t ic = 0; ic < nev1; ++ic) {
  12585. // dst indices
  12586. const int i1 = iq1;
  12587. const int i2 = iq2;
  12588. const int i3 = iq3;
  12589. // v indices
  12590. const int iv2 = iq2 % nev2;
  12591. const int iv3 = iq3;
  12592. ggml_vec_dot_f16(nev0,
  12593. (float *) ((char *) dst->data + (ic*nb0 + i1*nb1 + i2*nb2 + i3*nb3)), 0,
  12594. (ggml_fp16_t *) ((char *) v->data + ( ic*nbv1 + iv2*nbv2 + iv3*nbv3)), 0,
  12595. S16, 0, 1);
  12596. }
  12597. } else {
  12598. for (int64_t ic = 0; ic < nev1; ic += GGML_VEC_DOT_UNROLL) {
  12599. // dst indices
  12600. const int i1 = iq1;
  12601. const int i2 = iq2;
  12602. const int i3 = iq3;
  12603. // v indices
  12604. const int iv2 = iq2 % nev2;
  12605. const int iv3 = iq3;
  12606. ggml_vec_dot_f16_unroll(nev0, nbv1,
  12607. (float *) ((char *) dst->data + (ic*nb0 + i1*nb1 + i2*nb2 + i3*nb3)),
  12608. ((char *) v->data + ( ic*nbv1 + iv2*nbv2 + iv3*nbv3)),
  12609. S16);
  12610. }
  12611. }
  12612. }
  12613. }
  12614. static void ggml_compute_forward_flash_attn(
  12615. const struct ggml_compute_params * params,
  12616. const bool masked,
  12617. struct ggml_tensor * dst) {
  12618. const struct ggml_tensor * q = dst->src[0];
  12619. switch (q->type) {
  12620. case GGML_TYPE_F16:
  12621. {
  12622. ggml_compute_forward_flash_attn_f16(params, masked, dst);
  12623. } break;
  12624. case GGML_TYPE_F32:
  12625. {
  12626. ggml_compute_forward_flash_attn_f32(params, masked, dst);
  12627. } break;
  12628. default:
  12629. {
  12630. GGML_ASSERT(false);
  12631. } break;
  12632. }
  12633. }
  12634. // ggml_compute_forward_flash_attn_ext
  12635. static void ggml_compute_forward_flash_attn_ext_f16(
  12636. const struct ggml_compute_params * params,
  12637. const struct ggml_tensor * q,
  12638. const struct ggml_tensor * k,
  12639. const struct ggml_tensor * v,
  12640. const struct ggml_tensor * mask,
  12641. struct ggml_tensor * dst) {
  12642. int64_t t0 = ggml_perf_time_us();
  12643. UNUSED(t0);
  12644. GGML_TENSOR_LOCALS(int64_t, neq, q, ne)
  12645. GGML_TENSOR_LOCALS(size_t, nbq, q, nb)
  12646. GGML_TENSOR_LOCALS(int64_t, nek, k, ne)
  12647. GGML_TENSOR_LOCALS(size_t, nbk, k, nb)
  12648. GGML_TENSOR_LOCALS(int64_t, nev, v, ne)
  12649. GGML_TENSOR_LOCALS(size_t, nbv, v, nb)
  12650. GGML_TENSOR_LOCALS(int64_t, ne, dst, ne)
  12651. GGML_TENSOR_LOCALS(size_t, nb, dst, nb)
  12652. const int ith = params->ith;
  12653. const int nth = params->nth;
  12654. const int64_t D = neq0;
  12655. const int64_t N = neq1;
  12656. GGML_ASSERT(ne0 == D);
  12657. GGML_ASSERT(ne2 == N);
  12658. GGML_ASSERT(nbq0 == sizeof(float));
  12659. GGML_ASSERT(nbk0 == sizeof(ggml_fp16_t));
  12660. GGML_ASSERT(nbv0 == sizeof(ggml_fp16_t));
  12661. GGML_ASSERT(neq0 == D);
  12662. GGML_ASSERT(nek0 == D);
  12663. GGML_ASSERT(nev0 == D);
  12664. GGML_ASSERT(neq1 == N);
  12665. GGML_ASSERT(nev0 == D);
  12666. // dst cannot be transposed or permuted
  12667. GGML_ASSERT(nb0 == sizeof(float));
  12668. GGML_ASSERT(nb0 <= nb1);
  12669. GGML_ASSERT(nb1 <= nb2);
  12670. GGML_ASSERT(nb2 <= nb3);
  12671. // broadcast factors
  12672. const int64_t rk2 = neq2/nek2;
  12673. const int64_t rk3 = neq3/nek3;
  12674. const int64_t rv2 = neq2/nev2;
  12675. const int64_t rv3 = neq3/nev3;
  12676. if (params->type == GGML_TASK_TYPE_INIT) {
  12677. return;
  12678. }
  12679. if (params->type == GGML_TASK_TYPE_FINALIZE) {
  12680. return;
  12681. }
  12682. // parallelize by q rows using ggml_vec_dot_f32
  12683. // total rows in q
  12684. const int nr = neq1*neq2*neq3;
  12685. // rows per thread
  12686. const int dr = (nr + nth - 1)/nth;
  12687. // row range for this thread
  12688. const int ir0 = dr*ith;
  12689. const int ir1 = MIN(ir0 + dr, nr);
  12690. float scale = 1.0f;
  12691. float max_bias = 0.0f;
  12692. memcpy(&scale, (float *) dst->op_params + 0, sizeof(float));
  12693. memcpy(&max_bias, (float *) dst->op_params + 1, sizeof(float));
  12694. const uint32_t n_head = neq2;
  12695. const uint32_t n_head_log2 = 1u << (uint32_t) floor(log2(n_head));
  12696. const float m0 = powf(2.0f, -(max_bias ) / n_head_log2);
  12697. const float m1 = powf(2.0f, -(max_bias / 2.0f) / n_head_log2);
  12698. // loop over n_batch and n_head
  12699. for (int ir = ir0; ir < ir1; ++ir) {
  12700. // q indices
  12701. const int iq3 = ir/(neq2*neq1);
  12702. const int iq2 = (ir - iq3*neq2*neq1)/neq1;
  12703. const int iq1 = (ir - iq3*neq2*neq1 - iq2*neq1);
  12704. const uint32_t h = iq2; // head
  12705. const float slope = (max_bias > 0.0f) ? h < n_head_log2 ? powf(m0, h + 1) : powf(m1, 2*(h - n_head_log2) + 1) : 1.0f;
  12706. float S = 0.0f;
  12707. float M = -INFINITY;
  12708. float * V32 = (float *) params->wdata + ith*(2*D + CACHE_LINE_SIZE_F32);
  12709. ggml_fp16_t * Q16 = (ggml_fp16_t *) (V32); // reuse memory
  12710. ggml_fp16_t * V16 = (ggml_fp16_t *) (V32 + D);
  12711. memset(V16, 0, D*sizeof(ggml_fp16_t));
  12712. const ggml_fp16_t * mp = mask ? (ggml_fp16_t *)((char *) mask->data + iq1*mask->nb[1]) : NULL;
  12713. // k indices
  12714. const int ik3 = iq3 / rk3;
  12715. const int ik2 = iq2 / rk2;
  12716. // v indices
  12717. const int iv3 = iq3 / rv3;
  12718. const int iv2 = iq2 / rv2;
  12719. // online softmax / attention
  12720. // loop over n_kv and n_head_kv
  12721. // ref: https://arxiv.org/pdf/2112.05682.pdf
  12722. for (int64_t ic = 0; ic < nek1; ++ic) {
  12723. const float mv = mp ? slope*GGML_FP16_TO_FP32(mp[ic]) : 0.0f;
  12724. if (mv == -INFINITY) {
  12725. continue;
  12726. }
  12727. float s;
  12728. // convert Q to F16 in V32
  12729. {
  12730. const float * pq = (const float *) ((char *) q->data + (iq1*nbq1 + iq2*nbq2 + iq3*nbq3));
  12731. for (int64_t d = 0; d < D; ++d) {
  12732. Q16[d] = GGML_FP32_TO_FP16(pq[d]);
  12733. }
  12734. }
  12735. ggml_vec_dot_f16(D,
  12736. &s, 0,
  12737. (ggml_fp16_t *) ((char *) k->data + ( ic*nbk1 + ik2*nbk2 + ik3*nbk3)), 0,
  12738. Q16, 0, 1);
  12739. s = s*scale + mv;
  12740. const float Mold = M;
  12741. float ms = 1.0f;
  12742. float vs = 1.0f;
  12743. if (s > M) {
  12744. M = s;
  12745. ms = expf(Mold - M);
  12746. // V = V*expf(Mold - M)
  12747. ggml_vec_scale_f16(D, V16, ms);
  12748. } else {
  12749. vs = expf(s - M);
  12750. }
  12751. const ggml_fp16_t * v16 = (const ggml_fp16_t *) ((char *) v->data + (ic*nbv1 + iv2*nbv2 + iv3*nbv3));
  12752. // V += v*expf(s - M)
  12753. ggml_vec_mad_f16(D, V16, v16, vs);
  12754. S = S*ms + vs;
  12755. }
  12756. // V /= S
  12757. for (int64_t d = 0; d < D; ++d) {
  12758. V32[d] = GGML_FP16_TO_FP32(V16[d])/S;
  12759. }
  12760. // dst indices
  12761. const int i1 = iq1;
  12762. const int i2 = iq2;
  12763. const int i3 = iq3;
  12764. // original
  12765. //memcpy((char *) dst->data + (i1*nb1 + i2*nb2 + i3*nb3), V, nev0*sizeof(float));
  12766. // permute(0, 2, 1, 3)
  12767. memcpy((char *) dst->data + (i3*ne2*ne1 + i2 + i1*ne1)*nb1, V32, nb1);
  12768. }
  12769. }
  12770. static void ggml_compute_forward_flash_attn_ext(
  12771. const struct ggml_compute_params * params,
  12772. const struct ggml_tensor * q,
  12773. const struct ggml_tensor * k,
  12774. const struct ggml_tensor * v,
  12775. const struct ggml_tensor * mask,
  12776. struct ggml_tensor * dst) {
  12777. switch (dst->op_params[2]) {
  12778. case GGML_PREC_DEFAULT:
  12779. case GGML_PREC_F32:
  12780. {
  12781. // uses F32 accumulators
  12782. ggml_compute_forward_flash_attn_ext_f16(params, q, k, v, mask, dst);
  12783. } break;
  12784. default:
  12785. {
  12786. GGML_ASSERT(false);
  12787. } break;
  12788. }
  12789. }
  12790. // ggml_compute_forward_flash_ff
  12791. static void ggml_compute_forward_flash_ff_f16(
  12792. const struct ggml_compute_params * params,
  12793. struct ggml_tensor * dst) {
  12794. const struct ggml_tensor * a = dst->src[0]; // F16
  12795. const struct ggml_tensor * b0 = dst->src[1]; // F16 fc_w
  12796. const struct ggml_tensor * b1 = dst->src[2]; // F32 fc_b
  12797. const struct ggml_tensor * c0 = dst->src[3]; // F16 proj_w
  12798. const struct ggml_tensor * c1 = dst->src[4]; // F32 proj_b
  12799. int64_t t0 = ggml_perf_time_us();
  12800. UNUSED(t0);
  12801. GGML_TENSOR_LOCALS(int64_t, nea, a, ne)
  12802. GGML_TENSOR_LOCALS(size_t, nba, a, nb)
  12803. GGML_TENSOR_LOCALS(int64_t, neb0, b0, ne)
  12804. GGML_TENSOR_LOCALS(size_t, nbb0, b0, nb)
  12805. GGML_TENSOR_LOCALS(int64_t, neb1, b1, ne)
  12806. GGML_TENSOR_LOCALS(size_t, nbb1, b1, nb)
  12807. GGML_TENSOR_LOCALS(int64_t, nec0, c0, ne)
  12808. GGML_TENSOR_LOCALS(size_t, nbc0, c0, nb)
  12809. GGML_TENSOR_LOCALS(int64_t, nec1, c1, ne)
  12810. GGML_TENSOR_LOCALS(size_t, nbc1, c1, nb)
  12811. GGML_TENSOR_LOCALS(int64_t, ne, dst, ne)
  12812. GGML_TENSOR_LOCALS(size_t, nb, dst, nb)
  12813. const int ith = params->ith;
  12814. const int nth = params->nth;
  12815. const int64_t D = nea0;
  12816. //const int64_t N = nea1;
  12817. const int64_t M = neb01;
  12818. GGML_ASSERT(ne0 == nea0);
  12819. GGML_ASSERT(ne1 == nea1);
  12820. GGML_ASSERT(ne2 == nea2);
  12821. GGML_ASSERT(nba0 == sizeof(ggml_fp16_t));
  12822. GGML_ASSERT(nbb00 == sizeof(ggml_fp16_t));
  12823. GGML_ASSERT(nbb10 == sizeof(float));
  12824. GGML_ASSERT(nbc00 == sizeof(ggml_fp16_t));
  12825. GGML_ASSERT(nbc10 == sizeof(float));
  12826. GGML_ASSERT(neb00 == D);
  12827. GGML_ASSERT(neb01 == M);
  12828. GGML_ASSERT(neb10 == M);
  12829. GGML_ASSERT(neb11 == 1);
  12830. GGML_ASSERT(nec00 == M);
  12831. GGML_ASSERT(nec01 == D);
  12832. GGML_ASSERT(nec10 == D);
  12833. GGML_ASSERT(nec11 == 1);
  12834. // dst cannot be transposed or permuted
  12835. GGML_ASSERT(nb0 == sizeof(float));
  12836. GGML_ASSERT(nb0 <= nb1);
  12837. GGML_ASSERT(nb1 <= nb2);
  12838. GGML_ASSERT(nb2 <= nb3);
  12839. if (params->type == GGML_TASK_TYPE_INIT) {
  12840. return;
  12841. }
  12842. if (params->type == GGML_TASK_TYPE_FINALIZE) {
  12843. return;
  12844. }
  12845. // parallelize by a rows using ggml_vec_dot_f32
  12846. // total rows in a
  12847. const int nr = nea1*nea2*nea3;
  12848. // rows per thread
  12849. const int dr = (nr + nth - 1)/nth;
  12850. // row range for this thread
  12851. const int ir0 = dr*ith;
  12852. const int ir1 = MIN(ir0 + dr, nr);
  12853. for (int ir = ir0; ir < ir1; ++ir) {
  12854. // a indices
  12855. const int ia3 = ir/(nea2*nea1);
  12856. const int ia2 = (ir - ia3*nea2*nea1)/nea1;
  12857. const int ia1 = (ir - ia3*nea2*nea1 - ia2*nea1);
  12858. float * S = (float *) params->wdata + ith*(2*M + CACHE_LINE_SIZE_F32);
  12859. for (int64_t ic = 0; ic < neb01; ++ic) {
  12860. // b0 indices
  12861. const int ib03 = ia3;
  12862. const int ib02 = ia2;
  12863. const int ib01 = ic;
  12864. // S indices
  12865. const int i1 = ib01;
  12866. ggml_vec_dot_f16(nea0,
  12867. S + i1, 0,
  12868. (ggml_fp16_t *) ((char *) b0->data + (ib01*nbb01 + ib02*nbb02 + ib03*nbb03)), 0,
  12869. (ggml_fp16_t *) ((char *) a->data + ( ia1*nba1 + ia2*nba2 + ia3*nba3)), 0, 1);
  12870. }
  12871. ggml_vec_add_f32(neb01, S, S, (float *) b1->data);
  12872. //ggml_vec_gelu_f32(neb01, S, S);
  12873. ggml_fp16_t * S16 = (ggml_fp16_t *) ((float *) params->wdata + ith*(2*M + CACHE_LINE_SIZE_F32) + M);
  12874. for (int64_t i = 0; i < M; i++) {
  12875. S16[i] = GGML_FP32_TO_FP16(S[i]);
  12876. }
  12877. ggml_vec_gelu_f16(neb01, S16, S16);
  12878. {
  12879. // dst indices
  12880. const int i1 = ia1;
  12881. const int i2 = ia2;
  12882. const int i3 = ia3;
  12883. for (int64_t ic = 0; ic < nec01; ++ic) {
  12884. ggml_vec_dot_f16(neb01,
  12885. (float *) ((char *) dst->data + (ic*nb0 + i1*nb1 + i2*nb2 + i3*nb3)), 0,
  12886. (ggml_fp16_t *) ((char *) c0->data + ( ic*nbc01 + i2*nbc02 + i3*nbc03)), 0,
  12887. S16, 0, 1);
  12888. }
  12889. ggml_vec_add_f32(nec01,
  12890. (float *) ((char *) dst->data + (i1*nb1 + i2*nb2 + i3*nb3)),
  12891. (float *) ((char *) dst->data + (i1*nb1 + i2*nb2 + i3*nb3)),
  12892. (float *) c1->data);
  12893. }
  12894. }
  12895. }
  12896. static void ggml_compute_forward_flash_ff(
  12897. const struct ggml_compute_params * params,
  12898. struct ggml_tensor * dst) {
  12899. const struct ggml_tensor * b0 = dst->src[1];
  12900. switch (b0->type) {
  12901. case GGML_TYPE_F16:
  12902. {
  12903. ggml_compute_forward_flash_ff_f16(params, dst);
  12904. } break;
  12905. case GGML_TYPE_F32:
  12906. {
  12907. GGML_ASSERT(false); // TODO
  12908. } break;
  12909. default:
  12910. {
  12911. GGML_ASSERT(false);
  12912. } break;
  12913. }
  12914. }
  12915. // ggml_compute_forward_flash_attn_back
  12916. static void ggml_compute_forward_flash_attn_back_f32(
  12917. const struct ggml_compute_params * params,
  12918. const bool masked,
  12919. struct ggml_tensor * dst) {
  12920. const struct ggml_tensor * q = dst->src[0];
  12921. const struct ggml_tensor * k = dst->src[1];
  12922. const struct ggml_tensor * v = dst->src[2];
  12923. const struct ggml_tensor * d = dst->src[3];
  12924. int64_t t0 = ggml_perf_time_us();
  12925. UNUSED(t0);
  12926. GGML_TENSOR_LOCALS(int64_t, neq, q, ne)
  12927. GGML_TENSOR_LOCALS(size_t, nbq, q, nb)
  12928. GGML_TENSOR_LOCALS(int64_t, nek, k, ne)
  12929. GGML_TENSOR_LOCALS(size_t, nbk, k, nb)
  12930. GGML_TENSOR_LOCALS(int64_t, nev, v, ne)
  12931. GGML_TENSOR_LOCALS(size_t, nbv, v, nb)
  12932. GGML_TENSOR_LOCALS(int64_t, ned, d, ne)
  12933. GGML_TENSOR_LOCALS(size_t, nbd, d, nb)
  12934. GGML_TENSOR_LOCALS(int64_t, ne, dst, ne)
  12935. GGML_TENSOR_LOCALS(size_t, nb, dst, nb)
  12936. const int ith = params->ith;
  12937. const int nth = params->nth;
  12938. const int64_t D = neq0;
  12939. const int64_t N = neq1;
  12940. const int64_t P = nek1 - N;
  12941. const int64_t M = P + N;
  12942. const int Mup = ggml_up(M, GGML_SOFT_MAX_UNROLL);
  12943. const int mxDM = MAX(D, Mup);
  12944. // GGML_ASSERT(ne0 == D);
  12945. // GGML_ASSERT(ne1 == N);
  12946. GGML_ASSERT(P >= 0);
  12947. GGML_ASSERT(nbq0 == sizeof(float));
  12948. GGML_ASSERT(nbk0 == sizeof(float));
  12949. GGML_ASSERT(nbv0 == sizeof(float));
  12950. GGML_ASSERT(neq0 == D);
  12951. GGML_ASSERT(nek0 == D);
  12952. GGML_ASSERT(nev1 == D);
  12953. GGML_ASSERT(ned0 == D);
  12954. GGML_ASSERT(neq1 == N);
  12955. GGML_ASSERT(nek1 == N + P);
  12956. GGML_ASSERT(nev1 == D);
  12957. GGML_ASSERT(ned1 == N);
  12958. // dst cannot be transposed or permuted
  12959. GGML_ASSERT(nb0 == sizeof(float));
  12960. GGML_ASSERT(nb0 <= nb1);
  12961. GGML_ASSERT(nb1 <= nb2);
  12962. GGML_ASSERT(nb2 <= nb3);
  12963. if (params->type == GGML_TASK_TYPE_INIT) {
  12964. if (ith == 0) {
  12965. memset(dst->data, 0, nb0*ne0*ne1*ne2*ne3);
  12966. }
  12967. return;
  12968. }
  12969. if (params->type == GGML_TASK_TYPE_FINALIZE) {
  12970. return;
  12971. }
  12972. const int64_t elem_q = ggml_nelements(q);
  12973. const int64_t elem_k = ggml_nelements(k);
  12974. enum ggml_type result_type = dst->type;
  12975. GGML_ASSERT(ggml_blck_size(result_type) == 1);
  12976. const size_t tsize = ggml_type_size(result_type);
  12977. const size_t offs_q = 0;
  12978. const size_t offs_k = offs_q + GGML_PAD(elem_q * tsize, GGML_MEM_ALIGN);
  12979. const size_t offs_v = offs_k + GGML_PAD(elem_k * tsize, GGML_MEM_ALIGN);
  12980. void * grad_q = (char *) dst->data;
  12981. void * grad_k = (char *) dst->data + offs_k;
  12982. void * grad_v = (char *) dst->data + offs_v;
  12983. const size_t nbgq1 = nb0*neq0;
  12984. const size_t nbgq2 = nb0*neq0*neq1;
  12985. const size_t nbgq3 = nb0*neq0*neq1*neq2;
  12986. const size_t nbgk1 = nb0*nek0;
  12987. const size_t nbgk2 = nb0*nek0*nek1;
  12988. const size_t nbgk3 = nb0*nek0*nek1*neq2;
  12989. const size_t nbgv1 = nb0*nev0;
  12990. const size_t nbgv2 = nb0*nev0*nev1;
  12991. const size_t nbgv3 = nb0*nev0*nev1*neq2;
  12992. // parallelize by k rows using ggml_vec_dot_f32
  12993. // total rows in k
  12994. const int nr = nek2*nek3;
  12995. // rows per thread
  12996. const int dr = (nr + nth - 1)/nth;
  12997. // row range for this thread
  12998. const int ir0 = dr*ith;
  12999. const int ir1 = MIN(ir0 + dr, nr);
  13000. const float scale = 1.0f/sqrtf(D);
  13001. //printf("P=%d N=%d D=%d ir0=%d ir1=%d scale = %f\n", P, N, D, ir0, ir1, scale);
  13002. // how often k2 (and v2) is repeated in q2
  13003. int nrep = neq2/nek2;
  13004. for (int ir = ir0; ir < ir1; ++ir) {
  13005. // q indices
  13006. const int ik3 = ir/(nek2);
  13007. const int ik2 = ir - ik3*nek2;
  13008. const int iq3 = ik3;
  13009. const int id3 = ik3;
  13010. const int iv3 = ik3;
  13011. const int iv2 = ik2;
  13012. for (int irep = 0; irep < nrep; ++irep) {
  13013. const int iq2 = ik2 + irep*nek2;
  13014. const int id2 = iq2;
  13015. // (ik2 + irep*nek2) % nek2 == ik2
  13016. for (int iq1 = 0; iq1 < neq1; ++iq1) {
  13017. const int id1 = iq1;
  13018. // not sure about CACHE_LINE_SIZE_F32..
  13019. // - maybe it must not be multiplied by 2 and excluded from .. in SM 1*(..) offset?
  13020. float * S = (float *) params->wdata + ith*2*(mxDM + CACHE_LINE_SIZE_F32) + 0*(mxDM+CACHE_LINE_SIZE_F32);
  13021. float * SM = (float *) params->wdata + ith*2*(mxDM + CACHE_LINE_SIZE_F32) + 1*(mxDM+CACHE_LINE_SIZE_F32);
  13022. for (int i = M; i < Mup; ++i) {
  13023. S[i] = -INFINITY;
  13024. }
  13025. const int64_t masked_begin = masked ? (P + iq1 + 1) : M;
  13026. for (int64_t ic = 0; ic < masked_begin; ++ic) {
  13027. // k indices
  13028. const int ik1 = ic;
  13029. // S indices
  13030. const int i1 = ik1;
  13031. ggml_vec_dot_f32(neq0,
  13032. S + i1, 0,
  13033. (float *) ((char *) k->data + (ik1*nbk1 + ik2*nbk2 + ik3*nbk3)), 0,
  13034. (float *) ((char *) q->data + (iq1*nbq1 + iq2*nbq2 + iq3*nbq3)), 0, 1);
  13035. }
  13036. // scale
  13037. ggml_vec_scale_f32(masked_begin, S, scale);
  13038. for (int64_t i = masked_begin; i < M; i++) {
  13039. S[i] = -INFINITY;
  13040. }
  13041. // softmax
  13042. // exclude known -INF S[..] values from max and loop
  13043. // dont forget to set their SM values to zero
  13044. {
  13045. float max = -INFINITY;
  13046. ggml_vec_max_f32(masked_begin, &max, S);
  13047. ggml_float sum = 0.0;
  13048. {
  13049. #ifdef GGML_SOFT_MAX_ACCELERATE
  13050. max = -max;
  13051. vDSP_vsadd(SM, 1, &max, SM, 1, Mup);
  13052. vvexpf(SM, SM, &Mup);
  13053. ggml_vec_sum_f32(Mup, &sum, SM);
  13054. #else
  13055. uint16_t scvt[GGML_SOFT_MAX_UNROLL]; UNUSED(scvt);
  13056. ggml_float sump[GGML_SOFT_MAX_UNROLL] = { 0.0 };
  13057. for (int i = 0; i < Mup; i += GGML_SOFT_MAX_UNROLL) {
  13058. if (i >= masked_begin) {
  13059. break;
  13060. }
  13061. float * SR = S + i;
  13062. float * SW = SM + i;
  13063. for (int j = 0; j < GGML_SOFT_MAX_UNROLL; ++j) {
  13064. if (i + j >= masked_begin) {
  13065. break;
  13066. } else if (SR[j] == -INFINITY) {
  13067. SW[j] = 0.0f;
  13068. } else {
  13069. #ifndef GGML_FLASH_ATTN_EXP_FP16
  13070. const float val = expf(SR[j] - max);
  13071. #else
  13072. ggml_fp16_t s = GGML_FP32_TO_FP16(SR[j] - max);
  13073. memcpy(&scvt[j], &s, sizeof(uint16_t));
  13074. const float val = GGML_FP16_TO_FP32(ggml_table_exp_f16[scvt[j]]);
  13075. #endif
  13076. sump[j] += (ggml_float)val;
  13077. SW[j] = val;
  13078. }
  13079. }
  13080. }
  13081. for (int i = 0; i < GGML_SOFT_MAX_UNROLL; i++) {
  13082. sum += sump[i];
  13083. }
  13084. #endif
  13085. }
  13086. assert(sum > 0.0);
  13087. sum = 1.0/sum;
  13088. ggml_vec_scale_f32(masked_begin, SM, sum);
  13089. }
  13090. // step-by-step explanation
  13091. {
  13092. // forward-process shape grads from backward process
  13093. // parallel_for ik2,ik3:
  13094. // for irep:
  13095. // iq2 = ik2 + irep*nek2
  13096. // k[:D,:M,:,:] [D,M,:,:] grad[k][:D,:M,ik2,ik3] += grad[kcur]
  13097. // q[:D,:N,:,:] [D,N,:,:] grad[q][:D,iq1,iq2,iq3] += grad[qcur]
  13098. // v[:M,:D,:,:] [M,D,:,:] grad[v][:M,:D,iv2,iv3] += grad[vcur]
  13099. // for iq1:
  13100. // kcur = k[:D,:M,ik2,ik3] [D,M,1,1] grad[kcur] = grad[S1].T @ qcur
  13101. // qcur = q[:D,iq1,iq2,iq3] [D,1,1,1] grad[qcur] = grad[S1] @ kcur
  13102. // vcur = v[:M,:D,iv2,iv3] [M,D,1,1] grad[vcur] = grad[S5].T @ S4
  13103. // S0 = -Inf [D,1,1,1]
  13104. // ~S1[i] = dot(kcur[:D,i], qcur)
  13105. // S1 = qcur @ kcur.T [M,1,1,1] grad[S1] = grad[S2] * scale
  13106. // S2 = S1 * scale [M,1,1,1] grad[S2] = diag_mask_zero(grad[S3], P)
  13107. // S3 = diag_mask_inf(S2, P) [M,1,1,1] grad[S3] = S4 * (grad[S4] - dot(S4, grad[S4]))
  13108. // S4 = softmax(S3) [M,1,1,1] grad[S4] = grad[S5] @ vcur
  13109. // ~S5[i] = dot(vcur[:,i], S4)
  13110. // S5 = S4 @ vcur.T [D,1,1,1] grad[S5] = d[:D,id1,id2,id3]
  13111. // ~dst[i,iq1,iq2,iq3] = S5[i] ^
  13112. // dst[:D,iq1,iq2,iq3] = S5 | grad[dst[:D,iq1,iq2,iq3]] = d[:D,id1,id2,id3]
  13113. // dst backward-/ grad[dst] = d
  13114. //
  13115. // output gradients with their dependencies:
  13116. //
  13117. // grad[kcur] = grad[S1].T @ qcur
  13118. // grad[S1] = diag_mask_zero(grad[S3], P) * scale
  13119. // grad[S3] = S4 * (grad[S4] - dot(S4, grad[S4]))
  13120. // grad[S4] = grad[S5] @ vcur
  13121. // grad[S4] = d[:D,id1,id2,id3] @ vcur
  13122. // grad[qcur] = grad[S1] @ kcur
  13123. // grad[vcur] = grad[S5].T @ S4
  13124. // grad[vcur] = d[:D,id1,id2,id3].T @ S4
  13125. //
  13126. // in post-order:
  13127. //
  13128. // S1 = qcur @ kcur.T
  13129. // S2 = S1 * scale
  13130. // S3 = diag_mask_inf(S2, P)
  13131. // S4 = softmax(S3)
  13132. // grad[S4] = d[:D,id1,id2,id3] @ vcur
  13133. // grad[S3] = S4 * (grad[S4] - dot(S4, grad[S4]))
  13134. // grad[S1] = diag_mask_zero(grad[S3], P) * scale
  13135. // grad[qcur] = grad[S1] @ kcur
  13136. // grad[kcur] = grad[S1].T @ qcur
  13137. // grad[vcur] = d[:D,id1,id2,id3].T @ S4
  13138. //
  13139. // using less variables (SM=S4):
  13140. //
  13141. // S = diag_mask_inf(qcur @ kcur.T * scale, P)
  13142. // SM = softmax(S)
  13143. // S = d[:D,iq1,iq2,iq3] @ vcur
  13144. // dot_SM_gradSM = dot(SM, S)
  13145. // S = SM * (S - dot(SM, S))
  13146. // S = diag_mask_zero(S, P) * scale
  13147. //
  13148. // grad[q][:D,iq1,iq2,iq3] += S @ kcur
  13149. // grad[k][:D,:M,ik2,ik3] += S.T @ qcur
  13150. // grad[v][:M,:D,iv2,iv3] += d[:D,id1,id2,id3].T @ SM
  13151. }
  13152. // S = gradSM = d[:D,id1,id2,id3] @ vcur[:,:,iv2,iv3]
  13153. // S = d[:D,id1,id2,id3] @ vcur[:,:,iv2,iv3]
  13154. // for ic:
  13155. // S[:M] += vcur[:M,ic,iv2,iv3] * d[ic,id1,id2,id3]
  13156. // exclude known future zero S[..] values from operation
  13157. ggml_vec_set_f32(masked_begin, S, 0);
  13158. for (int64_t ic = 0; ic < D; ++ic) {
  13159. ggml_vec_mad_f32(masked_begin,
  13160. S,
  13161. (float *) ((char *) v->data + ( ic*nbv1 + iv2*nbv2 + iv3*nbv3)),
  13162. *(float *) ((char *) d->data + (ic*nbd0 + id1*nbd1 + id2*nbd2 + id3*nbd3)));
  13163. }
  13164. // S = SM * (S - dot(SM, S))
  13165. float dot_SM_gradSM = 0;
  13166. ggml_vec_dot_f32 (masked_begin, &dot_SM_gradSM, 0, SM, 0, S, 0, 1);
  13167. ggml_vec_acc1_f32(M, S, -dot_SM_gradSM);
  13168. ggml_vec_mul_f32 (masked_begin, S, S, SM);
  13169. // S = diag_mask_zero(S, P) * scale
  13170. // already done by above ggml_vec_set_f32
  13171. // exclude known zero S[..] values from operation
  13172. ggml_vec_scale_f32(masked_begin, S, scale);
  13173. // S shape [M,1]
  13174. // SM shape [M,1]
  13175. // kcur shape [D,M]
  13176. // qcur shape [D,1]
  13177. // vcur shape [M,D]
  13178. // grad[q][:D,iq1,iq2,iq3] += S @ kcur
  13179. // grad[q][:D,iq1,iq2,iq3] += shape[M,1] @ shape[D,M]
  13180. // for ic:
  13181. // grad[q][:D,iq1,iq2,iq3] += S[ic] * kcur[:D,ic,ik2,ik3]
  13182. // exclude known zero S[..] values from loop
  13183. for (int64_t ic = 0; ic < masked_begin; ++ic) {
  13184. ggml_vec_mad_f32(D,
  13185. (float *) ((char *) grad_q + (iq1*nbgq1 + iq2*nbgq2 + iq3*nbgq3)),
  13186. (float *) ((char *) k->data + (ic*nbk1 + ik2*nbk2 + ik3*nbk3)),
  13187. S[ic]);
  13188. }
  13189. // grad[k][:D,:M,iq2,iq3] += S.T @ qcur
  13190. // for ic:
  13191. // grad[k][:D,ic,iq2,iq3] += S.T[0,ic] * qcur[:D,0]
  13192. // grad[k][:D,ic,iq2,iq3] += S[ic] * qcur[:D,0]
  13193. // exclude known zero S[..] values from loop
  13194. for (int64_t ic = 0; ic < masked_begin; ++ic) {
  13195. ggml_vec_mad_f32(D,
  13196. (float *) ((char *) grad_k + (ic*nbgk1 + ik2*nbgk2 + ik3*nbgk3)),
  13197. (float *) ((char *) q->data + (iq1*nbq1 + iq2*nbq2 + iq3*nbq3)),
  13198. S[ic]);
  13199. }
  13200. // grad[v][:M,:D,iv2,iv3] += d[:D,id1,id2,id3].T @ SM
  13201. // for ic:
  13202. // grad[v][:M,ic,iv2,iv3] += d[:D,id1,id2,id3].T[0,ic] * SM[:M]
  13203. // grad[v][:M,ic,iv2,iv3] += d[ic,id1,id2,id3] * SM[:M]
  13204. // exclude known zero SM[..] values from mad
  13205. for (int64_t ic = 0; ic < D; ++ic) {
  13206. ggml_vec_mad_f32(masked_begin,
  13207. (float *) ((char *) grad_v + ( ic*nbgv1 + iv2*nbgv2 + iv3*nbgv3)),
  13208. SM,
  13209. *(float *) ((char *) d->data + (ic*nbd0 + id1*nbd1 + id2*nbd2 + id3*nbd3)));
  13210. }
  13211. }
  13212. }
  13213. }
  13214. }
  13215. static void ggml_compute_forward_flash_attn_back(
  13216. const struct ggml_compute_params * params,
  13217. const bool masked,
  13218. struct ggml_tensor * dst) {
  13219. const struct ggml_tensor * q = dst->src[0];
  13220. switch (q->type) {
  13221. case GGML_TYPE_F32:
  13222. {
  13223. ggml_compute_forward_flash_attn_back_f32(params, masked, dst);
  13224. } break;
  13225. default:
  13226. {
  13227. GGML_ASSERT(false);
  13228. } break;
  13229. }
  13230. }
  13231. // ggml_compute_forward_ssm_conv
  13232. static void ggml_compute_forward_ssm_conv_f32(
  13233. const struct ggml_compute_params * params,
  13234. struct ggml_tensor * dst) {
  13235. if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
  13236. return;
  13237. }
  13238. const struct ggml_tensor * src0 = dst->src[0]; // conv_state
  13239. const struct ggml_tensor * src1 = dst->src[1]; // x
  13240. const struct ggml_tensor * src2 = dst->src[2]; // conv1d.weight
  13241. const struct ggml_tensor * src3 = dst->src[3]; // state_seq
  13242. const int ith = params->ith;
  13243. const int nth = params->nth;
  13244. const int nc = src2->ne[0]; // d_conv
  13245. const int nr = src0->ne[1]; // d_inner
  13246. const int n_t = src1->ne[1]; // n_tokens
  13247. const int n_kv = src0->ne[2]; // max number of sequences in the batch
  13248. GGML_ASSERT((nr*n_t) + (nc*nr*n_kv) == ggml_nelements(dst));
  13249. GGML_ASSERT(src0->nb[0] == sizeof(float));
  13250. GGML_ASSERT(src1->nb[0] == sizeof(float));
  13251. GGML_ASSERT(src2->nb[0] == sizeof(float));
  13252. GGML_ASSERT(src3->nb[0] == sizeof(int32_t));
  13253. GGML_ASSERT(src0->nb[1] == src0->ne[0]*sizeof(float));
  13254. // for use with the destination state offset between sequences
  13255. GGML_ASSERT(src2->nb[2] == src2->ne[1]*src2->ne[0]*sizeof(float));
  13256. // rows per thread
  13257. const int dr = (nr + nth - 1)/nth;
  13258. // row range for this thread
  13259. const int ir0 = dr*ith;
  13260. const int ir1 = MIN(ir0 + dr, nr);
  13261. const int ir = ir1 - ir0;
  13262. if (n_kv > 1) {
  13263. // multiple sequences means it's hard to know when it's the first time a state is read,
  13264. // so copy them all over to the destination, just to be sure.
  13265. for (int i3 = 0; i3 < n_kv; ++i3) {
  13266. float * s0 = (float *) ((char *) src0->data + ir0*(src0->nb[1]) + i3*(src0->nb[2]));
  13267. float * s = (float *) ((char *) dst->data + ir0*(src2->nb[1]) + i3*(src2->nb[2]) + nr*n_t*sizeof(float));
  13268. // can't use memcpy because of d_conv vs d_conv - 1
  13269. for (int i1 = 0; i1 < ir; ++i1) {
  13270. for (int i0 = 0; i0 < nc - 1; ++i0) {
  13271. // copy s0 to last (d_conv - 1) columns of s
  13272. s[1 + i0 + i1*nc] = s0[i0 + i1*(nc - 1)];
  13273. }
  13274. }
  13275. }
  13276. }
  13277. for (int i2 = 0; i2 < n_t; ++i2) {
  13278. int32_t * sq = (int32_t *) ((char *) src3->data + i2*(src3->nb[1])); // {n_kv, n_tokens}
  13279. float * x = (float *) ((char *) dst->data + ir0*sizeof(float) + i2*(nr*sizeof(float))); // {d_inner, n_tokens}
  13280. float * s = (float *) ((char *) dst->data + ir0*(src2->nb[1]) + sq[0]*(src2->nb[2]) + nr*n_t*sizeof(float)); // {d_conv, d_inner, n_kv}
  13281. float * s0; // {d_conv - 1, d_inner, n_kv}
  13282. float * x0 = (float *) ((char *) src1->data + ir0*(src1->nb[0]) + i2*(src1->nb[1])); // {d_inner, n_tokens}
  13283. float * c = (float *) ((char *) src2->data + ir0*(src2->nb[1])); // {d_conv, d_inner}
  13284. int ne0s0;
  13285. GGML_ASSERT(0 <= sq[0] && sq[0] < n_kv);
  13286. // avoid needing to copy the state for the first token
  13287. if (i2 == 0) {
  13288. s0 = (float *) ((char *) src0->data + ir0*(src0->nb[1]) + sq[0]*(src0->nb[2])); // {d_conv - 1, d_inner, n_kv}
  13289. ne0s0 = src0->ne[0];
  13290. } else {
  13291. // the source is the last (d_conv - 1) columns of the destination
  13292. s0 = s + 1;
  13293. ne0s0 = nc;
  13294. }
  13295. // d_inner
  13296. for (int i1 = 0; i1 < ir; ++i1) {
  13297. // shift state left
  13298. for (int i0 = 0; i0 < nc - 1; ++i0) {
  13299. s[i0 + i1*nc] = s0[i0 + i1*ne0s0];
  13300. }
  13301. // insert x on the last column
  13302. s[(nc - 1) + i1*nc] = x0[i1];
  13303. }
  13304. // handle copies when there are multiple output states
  13305. for (int i3 = 1; i3 < n_kv; ++i3) {
  13306. int32_t seq = sq[i3];
  13307. if (0 <= seq && seq < n_kv) {
  13308. float * s1 = s + (seq - sq[0])*nc*nr;
  13309. memcpy(s1, s, nc*ir*sizeof(float));
  13310. } else {
  13311. // stop at negative or too big seq_ids
  13312. break;
  13313. }
  13314. }
  13315. // it seems a little faster when this is separate from the state shift
  13316. for (int i1 = 0; i1 < ir; ++i1) {
  13317. // rowwise dot product
  13318. float sumf = 0.0f;
  13319. for (int i0 = 0; i0 < nc; ++i0) {
  13320. int i = i0 + i1*nc;
  13321. sumf += s[i] * c[i];
  13322. }
  13323. x[i1] = sumf;
  13324. }
  13325. }
  13326. }
  13327. static void ggml_compute_forward_ssm_conv(
  13328. const struct ggml_compute_params * params,
  13329. struct ggml_tensor * dst) {
  13330. switch (dst->src[0]->type) {
  13331. case GGML_TYPE_F32:
  13332. {
  13333. ggml_compute_forward_ssm_conv_f32(params, dst);
  13334. } break;
  13335. default:
  13336. {
  13337. GGML_ASSERT(false);
  13338. } break;
  13339. }
  13340. }
  13341. // ggml_compute_forward_ssm_scan
  13342. static void ggml_compute_forward_ssm_scan_f32(
  13343. const struct ggml_compute_params * params,
  13344. struct ggml_tensor * dst) {
  13345. if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
  13346. return;
  13347. }
  13348. const struct ggml_tensor * src0 = dst->src[0]; // s
  13349. const struct ggml_tensor * src1 = dst->src[1]; // x
  13350. const struct ggml_tensor * src2 = dst->src[2]; // dt
  13351. const struct ggml_tensor * src3 = dst->src[3]; // A
  13352. const struct ggml_tensor * src4 = dst->src[4]; // B
  13353. const struct ggml_tensor * src5 = dst->src[5]; // C
  13354. const struct ggml_tensor * src6 = dst->src[6]; // sq
  13355. const int ith = params->ith;
  13356. const int nth = params->nth;
  13357. const int64_t nc = src0->ne[0]; // d_state
  13358. const int64_t nr = src0->ne[1]; // d_inner
  13359. const int64_t n_t = src1->ne[1]; // number of tokens in the batch
  13360. const int64_t n_kv = src0->ne[2]; // max number of sequences in the batch
  13361. GGML_ASSERT(ggml_nelements(src1) + ggml_nelements(src0) == ggml_nelements(dst));
  13362. GGML_ASSERT(src0->nb[0] == sizeof(float));
  13363. GGML_ASSERT(src1->nb[0] == sizeof(float));
  13364. GGML_ASSERT(src2->nb[0] == sizeof(float));
  13365. GGML_ASSERT(src3->nb[0] == sizeof(float));
  13366. GGML_ASSERT(src4->nb[0] == sizeof(float));
  13367. GGML_ASSERT(src5->nb[0] == sizeof(float));
  13368. // required for the dot product between s and C, and when copying the states
  13369. GGML_ASSERT(src0->nb[1] == src0->ne[0]*sizeof(float));
  13370. // required for per-sequence offsets for states
  13371. GGML_ASSERT(src0->nb[2] == src0->ne[0]*src0->ne[1]*sizeof(float));
  13372. // required to get correct offset for state destination (i.e. src1->nb[2])
  13373. GGML_ASSERT(src1->nb[2] == src1->ne[0]*src1->ne[1]*sizeof(float));
  13374. // rows per thread
  13375. const int dr = (nr + nth - 1)/nth;
  13376. // row range for this thread
  13377. const int ir0 = dr*ith;
  13378. const int ir1 = MIN(ir0 + dr, nr);
  13379. const int ir = ir1 - ir0;
  13380. if (n_kv > 1) {
  13381. // it's hard to know if the source states have already been copied
  13382. // when there are multiple, so copy them already.
  13383. for (int i3 = 0; i3 < n_kv; ++i3) {
  13384. float * s0 = (float *) ((char *) src0->data + ir0*(src0->nb[1]) + i3*(src0->nb[2]));
  13385. float * s = (float *) ((char *) dst->data + ir0*(src0->nb[1]) + i3*(src0->nb[2]) + src1->nb[2]);
  13386. memcpy(s, s0, nc*ir*sizeof(float));
  13387. }
  13388. }
  13389. for (int i2 = 0; i2 < n_t; ++i2) {
  13390. int32_t * sq = (int32_t *) ((char *) src6->data + i2*(src6->nb[1])); // {n_kv, n_tokens}
  13391. float * y = (float *) ((char *) dst->data + ir0*(src1->nb[0]) + i2*(src1->nb[1])); // {d_inner, n_tokens}
  13392. float * s = (float *) ((char *) dst->data + ir0*(src0->nb[1]) + sq[0]*(src0->nb[2]) + src1->nb[2]); // {d_state, d_inner, n_kv}
  13393. float * s0;
  13394. float * x = (float *) ((char *) src1->data + ir0*(src1->nb[0]) + i2*(src1->nb[1])); // {d_inner, n_tokens}
  13395. float * dt = (float *) ((char *) src2->data + ir0*(src2->nb[0]) + i2*(src2->nb[1])); // {d_inner, n_tokens}
  13396. float * A = (float *) ((char *) src3->data + ir0*(src3->nb[1])); // {d_state, d_inner}
  13397. float * B = (float *) ((char *) src4->data + i2*(src4->nb[1])); // {d_state, n_tokens}
  13398. float * C = (float *) ((char *) src5->data + i2*(src5->nb[1])); // {d_state, n_tokens}
  13399. GGML_ASSERT(0 <= sq[0] && sq[0] < n_kv);
  13400. // avoid needing to copy the state for the first token
  13401. if (i2 == 0) {
  13402. s0 = (float *) ((char *) src0->data + ir0*(src0->nb[1]) + sq[0]*(src0->nb[2])); // {d_state, d_inner, n_kv}
  13403. } else {
  13404. // otherwise the source is the same as the destination
  13405. s0 = s;
  13406. }
  13407. // d_inner
  13408. for (int i1 = 0; i1 < ir; ++i1) {
  13409. // ref: https://github.com/state-spaces/mamba/blob/34076d664838588a3c97727b263478ab9f621a07/mamba_ssm/ops/triton/selective_state_update.py#L78
  13410. float dt_soft_plus = dt[i1] <= 20.0f ? log1pf(expf(dt[i1])) : dt[i1];
  13411. float x_dt = x[i1] * dt_soft_plus;
  13412. float sumf = 0.0f;
  13413. // d_state
  13414. for (int i0 = 0; i0 < nc; ++i0) {
  13415. int i = i0 + i1*nc;
  13416. // state = prev_state * dA + dB * x
  13417. float state = (s0[i] * expf(dt_soft_plus * A[i])) + (B[i0] * x_dt);
  13418. // y = rowwise_dotprod(state, C)
  13419. sumf += state * C[i0];
  13420. s[i] = state;
  13421. }
  13422. y[i1] = sumf;
  13423. }
  13424. // handle copies when there are multiple output states
  13425. for (int i3 = 1; i3 < n_kv; ++i3) {
  13426. int32_t seq = sq[i3];
  13427. if (0 <= seq && seq < n_kv) {
  13428. float * s1 = s + (seq - sq[0])*nc*nr;
  13429. memcpy(s1, s, nc*ir*sizeof(float));
  13430. } else {
  13431. // stop at negative or too big seq_ids
  13432. break;
  13433. }
  13434. }
  13435. }
  13436. }
  13437. static void ggml_compute_forward_ssm_scan(
  13438. const struct ggml_compute_params * params,
  13439. struct ggml_tensor * dst) {
  13440. switch (dst->src[0]->type) {
  13441. case GGML_TYPE_F32:
  13442. {
  13443. ggml_compute_forward_ssm_scan_f32(params, dst);
  13444. } break;
  13445. default:
  13446. {
  13447. GGML_ASSERT(false);
  13448. } break;
  13449. }
  13450. }
  13451. // ggml_compute_forward_win_part
  13452. static void ggml_compute_forward_win_part_f32(
  13453. const struct ggml_compute_params * params,
  13454. struct ggml_tensor * dst) {
  13455. const struct ggml_tensor * src0 = dst->src[0];
  13456. if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
  13457. return;
  13458. }
  13459. GGML_TENSOR_LOCALS(int64_t, ne0, src0, ne)
  13460. GGML_TENSOR_LOCALS(int64_t, ne, dst, ne)
  13461. const int32_t nep0 = ((const int32_t *)(dst->op_params))[0];
  13462. const int32_t nep1 = ((const int32_t *)(dst->op_params))[1];
  13463. const int32_t w = ((const int32_t *)(dst->op_params))[2];
  13464. assert(ne00 == ne0);
  13465. assert(ne3 == nep0*nep1);
  13466. // TODO: optimize / multi-thread
  13467. for (int py = 0; py < nep1; ++py) {
  13468. for (int px = 0; px < nep0; ++px) {
  13469. const int64_t i3 = py*nep0 + px;
  13470. for (int64_t i2 = 0; i2 < ne2; ++i2) {
  13471. for (int64_t i1 = 0; i1 < ne1; ++i1) {
  13472. for (int64_t i0 = 0; i0 < ne0; ++i0) {
  13473. const int64_t i02 = py*w + i2;
  13474. const int64_t i01 = px*w + i1;
  13475. const int64_t i00 = i0;
  13476. const int64_t i = i3*ne2*ne1*ne0 + i2*ne1*ne0 + i1*ne0 + i0;
  13477. const int64_t j = i02*ne01*ne00 + i01*ne00 + i00;
  13478. if (py*w + i2 >= ne02 || px*w + i1 >= ne01) {
  13479. ((float *) dst->data)[i] = 0.0f;
  13480. } else {
  13481. ((float *) dst->data)[i] = ((float *) src0->data)[j];
  13482. }
  13483. }
  13484. }
  13485. }
  13486. }
  13487. }
  13488. }
  13489. static void ggml_compute_forward_win_part(
  13490. const struct ggml_compute_params * params,
  13491. struct ggml_tensor * dst) {
  13492. const struct ggml_tensor * src0 = dst->src[0];
  13493. switch (src0->type) {
  13494. case GGML_TYPE_F32:
  13495. {
  13496. ggml_compute_forward_win_part_f32(params, dst);
  13497. } break;
  13498. default:
  13499. {
  13500. GGML_ASSERT(false);
  13501. } break;
  13502. }
  13503. }
  13504. // ggml_compute_forward_win_unpart
  13505. static void ggml_compute_forward_win_unpart_f32(
  13506. const struct ggml_compute_params * params,
  13507. struct ggml_tensor * dst) {
  13508. const struct ggml_tensor * src0 = dst->src[0];
  13509. if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
  13510. return;
  13511. }
  13512. GGML_TENSOR_LOCALS(int64_t, ne0, src0, ne)
  13513. GGML_TENSOR_LOCALS(int64_t, ne, dst, ne)
  13514. const int32_t w = ((const int32_t *)(dst->op_params))[0];
  13515. // padding
  13516. const int px = (w - ne1%w)%w;
  13517. //const int py = (w - ne2%w)%w;
  13518. const int npx = (px + ne1)/w;
  13519. //const int npy = (py + ne2)/w;
  13520. assert(ne0 == ne00);
  13521. // TODO: optimize / multi-thread
  13522. for (int64_t i2 = 0; i2 < ne2; ++i2) {
  13523. for (int64_t i1 = 0; i1 < ne1; ++i1) {
  13524. for (int64_t i0 = 0; i0 < ne0; ++i0) {
  13525. const int ip2 = i2/w;
  13526. const int ip1 = i1/w;
  13527. const int64_t i02 = i2%w;
  13528. const int64_t i01 = i1%w;
  13529. const int64_t i00 = i0;
  13530. const int64_t i = (ip2*npx + ip1)*ne02*ne01*ne00 + i02*ne01*ne00 + i01*ne00 + i00;
  13531. const int64_t j = i2*ne1*ne0 + i1*ne0 + i0;
  13532. ((float *) dst->data)[j] = ((float *) src0->data)[i];
  13533. }
  13534. }
  13535. }
  13536. }
  13537. static void ggml_compute_forward_win_unpart(
  13538. const struct ggml_compute_params * params,
  13539. struct ggml_tensor * dst) {
  13540. const struct ggml_tensor * src0 = dst->src[0];
  13541. switch (src0->type) {
  13542. case GGML_TYPE_F32:
  13543. {
  13544. ggml_compute_forward_win_unpart_f32(params, dst);
  13545. } break;
  13546. default:
  13547. {
  13548. GGML_ASSERT(false);
  13549. } break;
  13550. }
  13551. }
  13552. //gmml_compute_forward_unary
  13553. static void ggml_compute_forward_unary(
  13554. const struct ggml_compute_params * params,
  13555. struct ggml_tensor * dst) {
  13556. const enum ggml_unary_op op = ggml_get_unary_op(dst);
  13557. switch (op) {
  13558. case GGML_UNARY_OP_ABS:
  13559. {
  13560. ggml_compute_forward_abs(params, dst);
  13561. } break;
  13562. case GGML_UNARY_OP_SGN:
  13563. {
  13564. ggml_compute_forward_sgn(params, dst);
  13565. } break;
  13566. case GGML_UNARY_OP_NEG:
  13567. {
  13568. ggml_compute_forward_neg(params, dst);
  13569. } break;
  13570. case GGML_UNARY_OP_STEP:
  13571. {
  13572. ggml_compute_forward_step(params, dst);
  13573. } break;
  13574. case GGML_UNARY_OP_TANH:
  13575. {
  13576. ggml_compute_forward_tanh(params, dst);
  13577. } break;
  13578. case GGML_UNARY_OP_ELU:
  13579. {
  13580. ggml_compute_forward_elu(params, dst);
  13581. } break;
  13582. case GGML_UNARY_OP_RELU:
  13583. {
  13584. ggml_compute_forward_relu(params, dst);
  13585. } break;
  13586. case GGML_UNARY_OP_SIGMOID:
  13587. {
  13588. ggml_compute_forward_sigmoid(params, dst);
  13589. } break;
  13590. case GGML_UNARY_OP_GELU:
  13591. {
  13592. ggml_compute_forward_gelu(params, dst);
  13593. } break;
  13594. case GGML_UNARY_OP_GELU_QUICK:
  13595. {
  13596. ggml_compute_forward_gelu_quick(params, dst);
  13597. } break;
  13598. case GGML_UNARY_OP_SILU:
  13599. {
  13600. ggml_compute_forward_silu(params, dst);
  13601. } break;
  13602. case GGML_UNARY_OP_HARDSWISH:
  13603. {
  13604. ggml_compute_forward_hardswish(params, dst);
  13605. } break;
  13606. case GGML_UNARY_OP_HARDSIGMOID:
  13607. {
  13608. ggml_compute_forward_hardsigmoid(params, dst);
  13609. } break;
  13610. default:
  13611. {
  13612. GGML_ASSERT(false);
  13613. } break;
  13614. }
  13615. }
  13616. // ggml_compute_forward_get_rel_pos
  13617. static void ggml_compute_forward_get_rel_pos_f16(
  13618. const struct ggml_compute_params * params,
  13619. struct ggml_tensor * dst) {
  13620. const struct ggml_tensor * src0 = dst->src[0];
  13621. if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
  13622. return;
  13623. }
  13624. // ref: https://github.com/facebookresearch/segment-anything/blob/main/segment_anything/modeling/image_encoder.py#L292-L322
  13625. GGML_TENSOR_UNARY_OP_LOCALS
  13626. const int64_t w = ne1;
  13627. ggml_fp16_t * src0_data = (ggml_fp16_t *) src0->data;
  13628. ggml_fp16_t * dst_data = (ggml_fp16_t *) dst->data;
  13629. for (int64_t i2 = 0; i2 < ne2; ++i2) {
  13630. for (int64_t i1 = 0; i1 < ne1; ++i1) {
  13631. const int64_t pos = (w - i1 - 1) + i2;
  13632. for (int64_t i0 = 0; i0 < ne0; ++i0) {
  13633. dst_data[i2*ne1*ne0 + i1*ne0 + i0] = src0_data[pos*ne00 + i0];
  13634. }
  13635. }
  13636. }
  13637. }
  13638. static void ggml_compute_forward_get_rel_pos(
  13639. const struct ggml_compute_params * params,
  13640. struct ggml_tensor * dst) {
  13641. const struct ggml_tensor * src0 = dst->src[0];
  13642. switch (src0->type) {
  13643. case GGML_TYPE_F16:
  13644. case GGML_TYPE_BF16:
  13645. {
  13646. ggml_compute_forward_get_rel_pos_f16(params, dst);
  13647. } break;
  13648. default:
  13649. {
  13650. GGML_ASSERT(false);
  13651. } break;
  13652. }
  13653. }
  13654. // ggml_compute_forward_add_rel_pos
  13655. static void ggml_compute_forward_add_rel_pos_f32(
  13656. const struct ggml_compute_params * params,
  13657. struct ggml_tensor * dst) {
  13658. const struct ggml_tensor * src0 = dst->src[0];
  13659. const struct ggml_tensor * src1 = dst->src[1];
  13660. const struct ggml_tensor * src2 = dst->src[2];
  13661. const bool inplace = (bool) ((int32_t *) dst->op_params)[0];
  13662. if (!inplace && params->type == GGML_TASK_TYPE_INIT) {
  13663. if (params->ith != 0) {
  13664. return;
  13665. }
  13666. memcpy((char *) dst->data, (char *) src0->data, ggml_nbytes(dst));
  13667. return;
  13668. }
  13669. if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
  13670. return;
  13671. }
  13672. int64_t t0 = ggml_perf_time_us();
  13673. UNUSED(t0);
  13674. // ref: https://github.com/facebookresearch/segment-anything/blob/main/segment_anything/modeling/image_encoder.py#L357-L359
  13675. float * src1_data = (float *) src1->data;
  13676. float * src2_data = (float *) src2->data;
  13677. float * dst_data = (float *) dst->data;
  13678. const int64_t ne10 = src1->ne[0];
  13679. const int64_t ne11 = src1->ne[1];
  13680. const int64_t ne12 = src1->ne[2];
  13681. const int64_t ne13 = src1->ne[3];
  13682. const int ith = params->ith;
  13683. const int nth = params->nth;
  13684. // total patches in dst
  13685. const int np = ne13;
  13686. // patches per thread
  13687. const int dp = (np + nth - 1)/nth;
  13688. // patch range for this thread
  13689. const int ip0 = dp*ith;
  13690. const int ip1 = MIN(ip0 + dp, np);
  13691. for (int64_t i13 = ip0; i13 < ip1; ++i13) {
  13692. for (int64_t i12 = 0; i12 < ne12; ++i12) {
  13693. for (int64_t i11 = 0; i11 < ne11; ++i11) {
  13694. const int64_t jp1 = i13*ne12*ne11*ne10 + i12*ne11*ne10 + i11*ne10;
  13695. for (int64_t i10 = 0; i10 < ne10; ++i10) {
  13696. const int64_t jp0 = jp1 + i10;
  13697. const float src1_e = src1_data[jp0];
  13698. const float src2_e = src2_data[jp0];
  13699. const int64_t jdh = jp0 * ne10;
  13700. const int64_t jdw = jdh - (ne10 - 1) * i10;
  13701. for (int64_t j = 0; j < ne10; ++j) {
  13702. dst_data[jdh + j ] += src2_e;
  13703. dst_data[jdw + j*ne10] += src1_e;
  13704. }
  13705. }
  13706. }
  13707. }
  13708. }
  13709. }
  13710. static void ggml_compute_forward_add_rel_pos(
  13711. const struct ggml_compute_params * params,
  13712. struct ggml_tensor * dst) {
  13713. const struct ggml_tensor * src0 = dst->src[0];
  13714. switch (src0->type) {
  13715. case GGML_TYPE_F32:
  13716. {
  13717. ggml_compute_forward_add_rel_pos_f32(params, dst);
  13718. } break;
  13719. default:
  13720. {
  13721. GGML_ASSERT(false);
  13722. } break;
  13723. }
  13724. }
  13725. // ggml_compute_forward_map_unary
  13726. static void ggml_compute_forward_map_unary_f32(
  13727. const struct ggml_compute_params * params,
  13728. struct ggml_tensor * dst,
  13729. const ggml_unary_op_f32_t fun) {
  13730. const struct ggml_tensor * src0 = dst->src[0];
  13731. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  13732. if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
  13733. return;
  13734. }
  13735. const int n = ggml_nrows(src0);
  13736. const int nc = src0->ne[0];
  13737. assert( dst->nb[0] == sizeof(float));
  13738. assert(src0->nb[0] == sizeof(float));
  13739. for (int i = 0; i < n; i++) {
  13740. fun(nc,
  13741. (float *) ((char *) dst->data + i*( dst->nb[1])),
  13742. (float *) ((char *) src0->data + i*(src0->nb[1])));
  13743. }
  13744. }
  13745. static void ggml_compute_forward_map_unary(
  13746. const struct ggml_compute_params * params,
  13747. struct ggml_tensor * dst,
  13748. const ggml_unary_op_f32_t fun) {
  13749. const struct ggml_tensor * src0 = dst->src[0];
  13750. switch (src0->type) {
  13751. case GGML_TYPE_F32:
  13752. {
  13753. ggml_compute_forward_map_unary_f32(params, dst, fun);
  13754. } break;
  13755. default:
  13756. {
  13757. GGML_ASSERT(false);
  13758. } break;
  13759. }
  13760. }
  13761. // ggml_compute_forward_map_binary
  13762. static void ggml_compute_forward_map_binary_f32(
  13763. const struct ggml_compute_params * params,
  13764. struct ggml_tensor * dst,
  13765. const ggml_binary_op_f32_t fun) {
  13766. const struct ggml_tensor * src0 = dst->src[0];
  13767. const struct ggml_tensor * src1 = dst->src[1];
  13768. assert(params->ith == 0);
  13769. assert(ggml_are_same_shape(src0, src1) && ggml_are_same_shape(src0, dst));
  13770. if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
  13771. return;
  13772. }
  13773. const int n = ggml_nrows(src0);
  13774. const int nc = src0->ne[0];
  13775. assert( dst->nb[0] == sizeof(float));
  13776. assert(src0->nb[0] == sizeof(float));
  13777. assert(src1->nb[0] == sizeof(float));
  13778. for (int i = 0; i < n; i++) {
  13779. fun(nc,
  13780. (float *) ((char *) dst->data + i*( dst->nb[1])),
  13781. (float *) ((char *) src0->data + i*(src0->nb[1])),
  13782. (float *) ((char *) src1->data + i*(src1->nb[1])));
  13783. }
  13784. }
  13785. static void ggml_compute_forward_map_binary(
  13786. const struct ggml_compute_params * params,
  13787. struct ggml_tensor * dst,
  13788. const ggml_binary_op_f32_t fun) {
  13789. const struct ggml_tensor * src0 = dst->src[0];
  13790. switch (src0->type) {
  13791. case GGML_TYPE_F32:
  13792. {
  13793. ggml_compute_forward_map_binary_f32(params, dst, fun);
  13794. } break;
  13795. default:
  13796. {
  13797. GGML_ASSERT(false);
  13798. } break;
  13799. }
  13800. }
  13801. // ggml_compute_forward_map_custom1
  13802. static void ggml_compute_forward_map_custom1_f32(
  13803. const struct ggml_compute_params * params,
  13804. struct ggml_tensor * dst,
  13805. const ggml_custom1_op_f32_t fun) {
  13806. const struct ggml_tensor * a = dst->src[0];
  13807. assert(params->ith == 0);
  13808. if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
  13809. return;
  13810. }
  13811. fun(dst, a);
  13812. }
  13813. // ggml_compute_forward_map_custom2
  13814. static void ggml_compute_forward_map_custom2_f32(
  13815. const struct ggml_compute_params * params,
  13816. struct ggml_tensor * dst,
  13817. const ggml_custom2_op_f32_t fun) {
  13818. const struct ggml_tensor * a = dst->src[0];
  13819. const struct ggml_tensor * b = dst->src[1];
  13820. assert(params->ith == 0);
  13821. if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
  13822. return;
  13823. }
  13824. fun(dst, a, b);
  13825. }
  13826. // ggml_compute_forward_map_custom3
  13827. static void ggml_compute_forward_map_custom3_f32(
  13828. const struct ggml_compute_params * params,
  13829. struct ggml_tensor * dst,
  13830. const ggml_custom3_op_f32_t fun) {
  13831. const struct ggml_tensor * a = dst->src[0];
  13832. const struct ggml_tensor * b = dst->src[1];
  13833. const struct ggml_tensor * c = dst->src[1];
  13834. assert(params->ith == 0);
  13835. if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
  13836. return;
  13837. }
  13838. fun(dst, a, b, c);
  13839. }
  13840. // ggml_compute_forward_map_custom1
  13841. static void ggml_compute_forward_map_custom1(
  13842. const struct ggml_compute_params * params,
  13843. struct ggml_tensor * dst) {
  13844. const struct ggml_tensor * a = dst->src[0];
  13845. if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
  13846. return;
  13847. }
  13848. struct ggml_map_custom1_op_params p;
  13849. memcpy(&p, dst->op_params, sizeof(p));
  13850. p.fun(dst, a, params->ith, params->nth, p.userdata);
  13851. }
  13852. // ggml_compute_forward_map_custom2
  13853. static void ggml_compute_forward_map_custom2(
  13854. const struct ggml_compute_params * params,
  13855. struct ggml_tensor * dst) {
  13856. const struct ggml_tensor * a = dst->src[0];
  13857. const struct ggml_tensor * b = dst->src[1];
  13858. if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
  13859. return;
  13860. }
  13861. struct ggml_map_custom2_op_params p;
  13862. memcpy(&p, dst->op_params, sizeof(p));
  13863. p.fun(dst, a, b, params->ith, params->nth, p.userdata);
  13864. }
  13865. // ggml_compute_forward_map_custom3
  13866. static void ggml_compute_forward_map_custom3(
  13867. const struct ggml_compute_params * params,
  13868. struct ggml_tensor * dst) {
  13869. const struct ggml_tensor * a = dst->src[0];
  13870. const struct ggml_tensor * b = dst->src[1];
  13871. const struct ggml_tensor * c = dst->src[2];
  13872. if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
  13873. return;
  13874. }
  13875. struct ggml_map_custom3_op_params p;
  13876. memcpy(&p, dst->op_params, sizeof(p));
  13877. p.fun(dst, a, b, c, params->ith, params->nth, p.userdata);
  13878. }
  13879. // ggml_compute_forward_cross_entropy_loss
  13880. static void ggml_compute_forward_cross_entropy_loss_f32(
  13881. const struct ggml_compute_params * params,
  13882. struct ggml_tensor * dst) {
  13883. const struct ggml_tensor * src0 = dst->src[0];
  13884. const struct ggml_tensor * src1 = dst->src[1];
  13885. GGML_ASSERT(ggml_is_contiguous(src0));
  13886. GGML_ASSERT(ggml_is_contiguous(src1));
  13887. GGML_ASSERT(ggml_is_scalar(dst));
  13888. GGML_ASSERT(ggml_are_same_shape(src0, src1));
  13889. const int ith = params->ith;
  13890. const int nth = params->nth;
  13891. float * sums = (float *) params->wdata;
  13892. // TODO: handle transposed/permuted matrices
  13893. const int nc = src0->ne[0];
  13894. const int nr = ggml_nrows(src0);
  13895. GGML_ASSERT(params->wsize >= sizeof(float) * (nth + nth * nc));
  13896. if (params->type == GGML_TASK_TYPE_INIT) {
  13897. if (ith == 0) {
  13898. memset(sums, 0, sizeof(float) * (nth + nth * nc));
  13899. }
  13900. return;
  13901. }
  13902. if (params->type == GGML_TASK_TYPE_FINALIZE) {
  13903. if (ith == 0) {
  13904. float * dp = (float *) dst->data;
  13905. ggml_vec_sum_f32(nth, dp, sums);
  13906. dp[0] *= -1.0f / (float) nr;
  13907. }
  13908. return;
  13909. }
  13910. const double eps = 1e-9;
  13911. // rows per thread
  13912. const int dr = (nr + nth - 1)/nth;
  13913. // row range for this thread
  13914. const int ir0 = dr*ith;
  13915. const int ir1 = MIN(ir0 + dr, nr);
  13916. for (int i1 = ir0; i1 < ir1; i1++) {
  13917. float * s0 = (float *)((char *) src0->data + i1*src0->nb[1]);
  13918. float * s1 = (float *)((char *) src1->data + i1*src1->nb[1]);
  13919. float * st = ((float *) params->wdata) + nth + ith*nc;
  13920. #ifndef NDEBUG
  13921. for (int i = 0; i < nc; ++i) {
  13922. //printf("p[%d] = %f\n", i, p[i]);
  13923. assert(!isnan(s0[i]));
  13924. assert(!isnan(s1[i]));
  13925. }
  13926. #endif
  13927. // soft_max
  13928. ggml_float sum = 0.0;
  13929. {
  13930. float max = -INFINITY;
  13931. ggml_vec_max_f32(nc, &max, s0);
  13932. uint16_t scvt; UNUSED(scvt);
  13933. for (int i = 0; i < nc; i++) {
  13934. if (s0[i] == -INFINITY) {
  13935. st[i] = 0.0f;
  13936. } else {
  13937. #ifndef GGML_CROSS_ENTROPY_EXP_FP16
  13938. const float s = s0[i] - max;
  13939. const float val = expf(s);
  13940. #else
  13941. ggml_fp16_t s = GGML_FP32_TO_FP16(s0[i] - max);
  13942. memcpy(&scvt, &s, sizeof(scvt));
  13943. const float val = GGML_FP16_TO_FP32(ggml_table_exp_f16[scvt]);
  13944. #endif
  13945. sum += (ggml_float)val;
  13946. st[i] = val;
  13947. }
  13948. }
  13949. assert(sum > 0.0);
  13950. // sum = 1.0/sum;
  13951. }
  13952. // avoid log(0) by rescaling from [0..1] to [eps..1]
  13953. sum = (1.0 - eps) / sum;
  13954. ggml_vec_scale_f32(nc, st, sum);
  13955. ggml_vec_add1_f32(nc, st, st, eps);
  13956. ggml_vec_log_f32(nc, st, st);
  13957. ggml_vec_mul_f32(nc, st, st, s1);
  13958. float st_sum = 0;
  13959. ggml_vec_sum_f32(nc, &st_sum, st);
  13960. sums[ith] += st_sum;
  13961. #ifndef NDEBUG
  13962. for (int i = 0; i < nc; ++i) {
  13963. assert(!isnan(st[i]));
  13964. assert(!isinf(st[i]));
  13965. }
  13966. #endif
  13967. }
  13968. }
  13969. static void ggml_compute_forward_cross_entropy_loss(
  13970. const struct ggml_compute_params * params,
  13971. struct ggml_tensor * dst) {
  13972. const struct ggml_tensor * src0 = dst->src[0];
  13973. switch (src0->type) {
  13974. case GGML_TYPE_F32:
  13975. {
  13976. ggml_compute_forward_cross_entropy_loss_f32(params, dst);
  13977. } break;
  13978. default:
  13979. {
  13980. GGML_ASSERT(false);
  13981. } break;
  13982. }
  13983. }
  13984. // ggml_compute_forward_cross_entropy_loss_back
  13985. static void ggml_compute_forward_cross_entropy_loss_back_f32(
  13986. const struct ggml_compute_params * params,
  13987. struct ggml_tensor * dst) {
  13988. const struct ggml_tensor * src0 = dst->src[0];
  13989. const struct ggml_tensor * src1 = dst->src[1];
  13990. const struct ggml_tensor * opt0 = dst->src[2];
  13991. GGML_ASSERT(ggml_is_contiguous(dst));
  13992. GGML_ASSERT(ggml_is_contiguous(src0));
  13993. GGML_ASSERT(ggml_is_contiguous(src1));
  13994. GGML_ASSERT(ggml_is_contiguous(opt0));
  13995. GGML_ASSERT(ggml_are_same_shape(src0, src1) && ggml_are_same_shape(src0, dst));
  13996. const int64_t ith = params->ith;
  13997. const int64_t nth = params->nth;
  13998. if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
  13999. return;
  14000. }
  14001. const double eps = 1e-9;
  14002. // TODO: handle transposed/permuted matrices
  14003. const int64_t nc = src0->ne[0];
  14004. const int64_t nr = ggml_nrows(src0);
  14005. // rows per thread
  14006. const int64_t dr = (nr + nth - 1)/nth;
  14007. // row range for this thread
  14008. const int64_t ir0 = dr*ith;
  14009. const int64_t ir1 = MIN(ir0 + dr, nr);
  14010. float * d = (float *) opt0->data;
  14011. for (int64_t i1 = ir0; i1 < ir1; i1++) {
  14012. float * ds0 = (float *)((char *) dst->data + i1*dst->nb[1]);
  14013. float * s0 = (float *)((char *) src0->data + i1*src0->nb[1]);
  14014. float * s1 = (float *)((char *) src1->data + i1*src1->nb[1]);
  14015. #ifndef NDEBUG
  14016. for (int i = 0; i < nc; ++i) {
  14017. //printf("p[%d] = %f\n", i, p[i]);
  14018. assert(!isnan(s0[i]));
  14019. assert(!isnan(s1[i]));
  14020. }
  14021. #endif
  14022. // soft_max
  14023. ggml_float sum = 0.0;
  14024. {
  14025. float max = -INFINITY;
  14026. ggml_vec_max_f32(nc, &max, s0);
  14027. uint16_t scvt; UNUSED(scvt);
  14028. for (int i = 0; i < nc; i++) {
  14029. if (s0[i] == -INFINITY) {
  14030. ds0[i] = 0.0f;
  14031. } else {
  14032. #ifndef GGML_CROSS_ENTROPY_EXP_FP16
  14033. const float s = s0[i] - max;
  14034. const float val = expf(s);
  14035. #else
  14036. ggml_fp16_t s = GGML_FP32_TO_FP16(s0[i] - max);
  14037. memcpy(&scvt, &s, sizeof(scvt));
  14038. const float val = GGML_FP16_TO_FP32(ggml_table_exp_f16[scvt]);
  14039. #endif
  14040. sum += (ggml_float)val;
  14041. ds0[i] = val;
  14042. }
  14043. }
  14044. assert(sum > 0.0);
  14045. sum = (1.0 - eps)/sum;
  14046. }
  14047. // grad(src0) = (softmax(src0) - src1) * grad(cross_entropy_loss(src0, src1)) / nr
  14048. ggml_vec_scale_f32(nc, ds0, sum);
  14049. ggml_vec_add1_f32(nc, ds0, ds0, eps);
  14050. ggml_vec_sub_f32(nc, ds0, ds0, s1);
  14051. ggml_vec_scale_f32(nc, ds0, d[0] / (float) nr);
  14052. #ifndef NDEBUG
  14053. for (int i = 0; i < nc; ++i) {
  14054. assert(!isnan(ds0[i]));
  14055. assert(!isinf(ds0[i]));
  14056. }
  14057. #endif
  14058. }
  14059. }
  14060. static void ggml_compute_forward_cross_entropy_loss_back(
  14061. const struct ggml_compute_params * params,
  14062. struct ggml_tensor * dst) {
  14063. const struct ggml_tensor * src0 = dst->src[0];
  14064. switch (src0->type) {
  14065. case GGML_TYPE_F32:
  14066. {
  14067. ggml_compute_forward_cross_entropy_loss_back_f32(params, dst);
  14068. } break;
  14069. default:
  14070. {
  14071. GGML_ASSERT(false);
  14072. } break;
  14073. }
  14074. }
  14075. /////////////////////////////////
  14076. static void ggml_compute_forward(struct ggml_compute_params * params, struct ggml_tensor * tensor) {
  14077. GGML_ASSERT(params);
  14078. if (tensor->op == GGML_OP_NONE || ggml_is_empty(tensor)) {
  14079. return;
  14080. }
  14081. switch (tensor->op) {
  14082. case GGML_OP_DUP:
  14083. {
  14084. ggml_compute_forward_dup(params, tensor);
  14085. } break;
  14086. case GGML_OP_ADD:
  14087. {
  14088. ggml_compute_forward_add(params, tensor);
  14089. } break;
  14090. case GGML_OP_ADD1:
  14091. {
  14092. ggml_compute_forward_add1(params, tensor);
  14093. } break;
  14094. case GGML_OP_ACC:
  14095. {
  14096. ggml_compute_forward_acc(params, tensor);
  14097. } break;
  14098. case GGML_OP_SUB:
  14099. {
  14100. ggml_compute_forward_sub(params, tensor);
  14101. } break;
  14102. case GGML_OP_MUL:
  14103. {
  14104. ggml_compute_forward_mul(params, tensor);
  14105. } break;
  14106. case GGML_OP_DIV:
  14107. {
  14108. ggml_compute_forward_div(params, tensor);
  14109. } break;
  14110. case GGML_OP_SQR:
  14111. {
  14112. ggml_compute_forward_sqr(params, tensor);
  14113. } break;
  14114. case GGML_OP_SQRT:
  14115. {
  14116. ggml_compute_forward_sqrt(params, tensor);
  14117. } break;
  14118. case GGML_OP_LOG:
  14119. {
  14120. ggml_compute_forward_log(params, tensor);
  14121. } break;
  14122. case GGML_OP_SUM:
  14123. {
  14124. ggml_compute_forward_sum(params, tensor);
  14125. } break;
  14126. case GGML_OP_SUM_ROWS:
  14127. {
  14128. ggml_compute_forward_sum_rows(params, tensor);
  14129. } break;
  14130. case GGML_OP_MEAN:
  14131. {
  14132. ggml_compute_forward_mean(params, tensor);
  14133. } break;
  14134. case GGML_OP_ARGMAX:
  14135. {
  14136. ggml_compute_forward_argmax(params, tensor);
  14137. } break;
  14138. case GGML_OP_REPEAT:
  14139. {
  14140. ggml_compute_forward_repeat(params, tensor);
  14141. } break;
  14142. case GGML_OP_REPEAT_BACK:
  14143. {
  14144. ggml_compute_forward_repeat_back(params, tensor);
  14145. } break;
  14146. case GGML_OP_CONCAT:
  14147. {
  14148. ggml_compute_forward_concat(params, tensor);
  14149. } break;
  14150. case GGML_OP_SILU_BACK:
  14151. {
  14152. ggml_compute_forward_silu_back(params, tensor);
  14153. } break;
  14154. case GGML_OP_NORM:
  14155. {
  14156. ggml_compute_forward_norm(params, tensor);
  14157. } break;
  14158. case GGML_OP_RMS_NORM:
  14159. {
  14160. ggml_compute_forward_rms_norm(params, tensor);
  14161. } break;
  14162. case GGML_OP_RMS_NORM_BACK:
  14163. {
  14164. ggml_compute_forward_rms_norm_back(params, tensor);
  14165. } break;
  14166. case GGML_OP_GROUP_NORM:
  14167. {
  14168. ggml_compute_forward_group_norm(params, tensor);
  14169. } break;
  14170. case GGML_OP_MUL_MAT:
  14171. {
  14172. ggml_compute_forward_mul_mat(params, tensor);
  14173. } break;
  14174. case GGML_OP_MUL_MAT_ID:
  14175. {
  14176. ggml_compute_forward_mul_mat_id(params, tensor);
  14177. } break;
  14178. case GGML_OP_OUT_PROD:
  14179. {
  14180. ggml_compute_forward_out_prod(params, tensor);
  14181. } break;
  14182. case GGML_OP_SCALE:
  14183. {
  14184. ggml_compute_forward_scale(params, tensor);
  14185. } break;
  14186. case GGML_OP_SET:
  14187. {
  14188. ggml_compute_forward_set(params, tensor);
  14189. } break;
  14190. case GGML_OP_CPY:
  14191. {
  14192. ggml_compute_forward_cpy(params, tensor);
  14193. } break;
  14194. case GGML_OP_CONT:
  14195. {
  14196. ggml_compute_forward_cont(params, tensor);
  14197. } break;
  14198. case GGML_OP_RESHAPE:
  14199. {
  14200. ggml_compute_forward_reshape(params, tensor);
  14201. } break;
  14202. case GGML_OP_VIEW:
  14203. {
  14204. ggml_compute_forward_view(params, tensor);
  14205. } break;
  14206. case GGML_OP_PERMUTE:
  14207. {
  14208. ggml_compute_forward_permute(params, tensor);
  14209. } break;
  14210. case GGML_OP_TRANSPOSE:
  14211. {
  14212. ggml_compute_forward_transpose(params, tensor);
  14213. } break;
  14214. case GGML_OP_GET_ROWS:
  14215. {
  14216. ggml_compute_forward_get_rows(params, tensor);
  14217. } break;
  14218. case GGML_OP_GET_ROWS_BACK:
  14219. {
  14220. ggml_compute_forward_get_rows_back(params, tensor);
  14221. } break;
  14222. case GGML_OP_DIAG:
  14223. {
  14224. ggml_compute_forward_diag(params, tensor);
  14225. } break;
  14226. case GGML_OP_DIAG_MASK_INF:
  14227. {
  14228. ggml_compute_forward_diag_mask_inf(params, tensor);
  14229. } break;
  14230. case GGML_OP_DIAG_MASK_ZERO:
  14231. {
  14232. ggml_compute_forward_diag_mask_zero(params, tensor);
  14233. } break;
  14234. case GGML_OP_SOFT_MAX:
  14235. {
  14236. ggml_compute_forward_soft_max(params, tensor);
  14237. } break;
  14238. case GGML_OP_SOFT_MAX_BACK:
  14239. {
  14240. ggml_compute_forward_soft_max_back(params, tensor);
  14241. } break;
  14242. case GGML_OP_ROPE:
  14243. {
  14244. ggml_compute_forward_rope(params, tensor);
  14245. } break;
  14246. case GGML_OP_ROPE_BACK:
  14247. {
  14248. ggml_compute_forward_rope_back(params, tensor);
  14249. } break;
  14250. case GGML_OP_CLAMP:
  14251. {
  14252. ggml_compute_forward_clamp(params, tensor);
  14253. } break;
  14254. case GGML_OP_CONV_TRANSPOSE_1D:
  14255. {
  14256. ggml_compute_forward_conv_transpose_1d(params, tensor);
  14257. } break;
  14258. case GGML_OP_IM2COL:
  14259. {
  14260. ggml_compute_forward_im2col(params, tensor);
  14261. } break;
  14262. case GGML_OP_CONV_TRANSPOSE_2D:
  14263. {
  14264. ggml_compute_forward_conv_transpose_2d(params, tensor);
  14265. } break;
  14266. case GGML_OP_POOL_1D:
  14267. {
  14268. ggml_compute_forward_pool_1d(params, tensor);
  14269. } break;
  14270. case GGML_OP_POOL_2D:
  14271. {
  14272. ggml_compute_forward_pool_2d(params, tensor);
  14273. } break;
  14274. case GGML_OP_UPSCALE:
  14275. {
  14276. ggml_compute_forward_upscale(params, tensor);
  14277. } break;
  14278. case GGML_OP_PAD:
  14279. {
  14280. ggml_compute_forward_pad(params, tensor);
  14281. } break;
  14282. case GGML_OP_ARANGE:
  14283. {
  14284. ggml_compute_forward_arange(params, tensor);
  14285. } break;
  14286. case GGML_OP_TIMESTEP_EMBEDDING:
  14287. {
  14288. ggml_compute_forward_timestep_embedding(params, tensor);
  14289. } break;
  14290. case GGML_OP_ARGSORT:
  14291. {
  14292. ggml_compute_forward_argsort(params, tensor);
  14293. } break;
  14294. case GGML_OP_LEAKY_RELU:
  14295. {
  14296. ggml_compute_forward_leaky_relu(params, tensor);
  14297. } break;
  14298. case GGML_OP_FLASH_ATTN:
  14299. {
  14300. const int32_t t = ggml_get_op_params_i32(tensor, 0);
  14301. GGML_ASSERT(t == 0 || t == 1);
  14302. const bool masked = t != 0;
  14303. ggml_compute_forward_flash_attn(params, masked, tensor);
  14304. } break;
  14305. case GGML_OP_FLASH_ATTN_EXT:
  14306. {
  14307. ggml_compute_forward_flash_attn_ext(params, tensor->src[0], tensor->src[1], tensor->src[2], tensor->src[3], tensor);
  14308. } break;
  14309. case GGML_OP_FLASH_FF:
  14310. {
  14311. ggml_compute_forward_flash_ff(params, tensor);
  14312. } break;
  14313. case GGML_OP_FLASH_ATTN_BACK:
  14314. {
  14315. int32_t t = ggml_get_op_params_i32(tensor, 0);
  14316. GGML_ASSERT(t == 0 || t == 1);
  14317. bool masked = t != 0;
  14318. ggml_compute_forward_flash_attn_back(params, masked, tensor);
  14319. } break;
  14320. case GGML_OP_SSM_CONV:
  14321. {
  14322. ggml_compute_forward_ssm_conv(params, tensor);
  14323. } break;
  14324. case GGML_OP_SSM_SCAN:
  14325. {
  14326. ggml_compute_forward_ssm_scan(params, tensor);
  14327. } break;
  14328. case GGML_OP_WIN_PART:
  14329. {
  14330. ggml_compute_forward_win_part(params, tensor);
  14331. } break;
  14332. case GGML_OP_WIN_UNPART:
  14333. {
  14334. ggml_compute_forward_win_unpart(params, tensor);
  14335. } break;
  14336. case GGML_OP_UNARY:
  14337. {
  14338. ggml_compute_forward_unary(params, tensor);
  14339. } break;
  14340. case GGML_OP_GET_REL_POS:
  14341. {
  14342. ggml_compute_forward_get_rel_pos(params, tensor);
  14343. } break;
  14344. case GGML_OP_ADD_REL_POS:
  14345. {
  14346. ggml_compute_forward_add_rel_pos(params, tensor);
  14347. } break;
  14348. case GGML_OP_MAP_UNARY:
  14349. {
  14350. ggml_unary_op_f32_t fun;
  14351. memcpy(&fun, tensor->op_params, sizeof(fun));
  14352. ggml_compute_forward_map_unary(params, tensor, fun);
  14353. }
  14354. break;
  14355. case GGML_OP_MAP_BINARY:
  14356. {
  14357. ggml_binary_op_f32_t fun;
  14358. memcpy(&fun, tensor->op_params, sizeof(fun));
  14359. ggml_compute_forward_map_binary(params, tensor, fun);
  14360. }
  14361. break;
  14362. case GGML_OP_MAP_CUSTOM1_F32:
  14363. {
  14364. ggml_custom1_op_f32_t fun;
  14365. memcpy(&fun, tensor->op_params, sizeof(fun));
  14366. ggml_compute_forward_map_custom1_f32(params, tensor, fun);
  14367. }
  14368. break;
  14369. case GGML_OP_MAP_CUSTOM2_F32:
  14370. {
  14371. ggml_custom2_op_f32_t fun;
  14372. memcpy(&fun, tensor->op_params, sizeof(fun));
  14373. ggml_compute_forward_map_custom2_f32(params, tensor, fun);
  14374. }
  14375. break;
  14376. case GGML_OP_MAP_CUSTOM3_F32:
  14377. {
  14378. ggml_custom3_op_f32_t fun;
  14379. memcpy(&fun, tensor->op_params, sizeof(fun));
  14380. ggml_compute_forward_map_custom3_f32(params, tensor, fun);
  14381. }
  14382. break;
  14383. case GGML_OP_MAP_CUSTOM1:
  14384. {
  14385. ggml_compute_forward_map_custom1(params, tensor);
  14386. }
  14387. break;
  14388. case GGML_OP_MAP_CUSTOM2:
  14389. {
  14390. ggml_compute_forward_map_custom2(params, tensor);
  14391. }
  14392. break;
  14393. case GGML_OP_MAP_CUSTOM3:
  14394. {
  14395. ggml_compute_forward_map_custom3(params, tensor);
  14396. }
  14397. break;
  14398. case GGML_OP_CROSS_ENTROPY_LOSS:
  14399. {
  14400. ggml_compute_forward_cross_entropy_loss(params, tensor);
  14401. }
  14402. break;
  14403. case GGML_OP_CROSS_ENTROPY_LOSS_BACK:
  14404. {
  14405. ggml_compute_forward_cross_entropy_loss_back(params, tensor);
  14406. }
  14407. break;
  14408. case GGML_OP_NONE:
  14409. {
  14410. // nop
  14411. } break;
  14412. case GGML_OP_COUNT:
  14413. {
  14414. GGML_ASSERT(false);
  14415. } break;
  14416. }
  14417. }
  14418. ////////////////////////////////////////////////////////////////////////////////
  14419. static size_t ggml_hash_size(size_t min_sz) {
  14420. // next primes after powers of two
  14421. static const size_t primes[] = {
  14422. 2, 3, 5, 11, 17, 37, 67, 131, 257, 521, 1031,
  14423. 2053, 4099, 8209, 16411, 32771, 65537, 131101,
  14424. 262147, 524309, 1048583, 2097169, 4194319, 8388617,
  14425. 16777259, 33554467, 67108879, 134217757, 268435459,
  14426. 536870923, 1073741827, 2147483659
  14427. };
  14428. static const size_t n_primes = sizeof(primes)/sizeof(primes[0]);
  14429. // find the smallest prime that is larger or equal to min_sz
  14430. size_t l = 0;
  14431. size_t r = n_primes;
  14432. while (l < r) {
  14433. size_t m = (l + r)/2;
  14434. if (primes[m] < min_sz) {
  14435. l = m + 1;
  14436. } else {
  14437. r = m;
  14438. }
  14439. }
  14440. size_t sz = l < n_primes ? primes[l] : min_sz | 1;
  14441. return sz;
  14442. }
  14443. static size_t ggml_hash(const void * p) {
  14444. return (size_t)p;
  14445. }
  14446. size_t ggml_hash_find(const struct ggml_hash_set hash_set, struct ggml_tensor * key) {
  14447. size_t h = ggml_hash(key) % hash_set.size;
  14448. // linear probing
  14449. size_t i = h;
  14450. while (hash_set.keys[i] != NULL && hash_set.keys[i] != key) {
  14451. i = (i + 1) % hash_set.size;
  14452. if (i == h) {
  14453. // visited all hash table entries -> not found
  14454. return GGML_HASHTABLE_FULL;
  14455. }
  14456. }
  14457. return i;
  14458. }
  14459. bool ggml_hash_contains(struct ggml_hash_set hash_set, struct ggml_tensor * key) {
  14460. size_t i = ggml_hash_find(hash_set, key);
  14461. return i != GGML_HASHTABLE_FULL && hash_set.keys[i] == key;
  14462. }
  14463. size_t ggml_hash_insert(struct ggml_hash_set hash_set, struct ggml_tensor * key) {
  14464. size_t i = ggml_hash_find(hash_set, key);
  14465. GGML_ASSERT(i != GGML_HASHTABLE_FULL);
  14466. if (hash_set.keys[i] == key) {
  14467. return GGML_HASHTABLE_ALREADY_EXISTS;
  14468. }
  14469. // insert
  14470. GGML_ASSERT(hash_set.keys[i] == NULL);
  14471. hash_set.keys[i] = key;
  14472. return i;
  14473. }
  14474. size_t ggml_hash_find_or_insert(struct ggml_hash_set hash_set, struct ggml_tensor * key) {
  14475. size_t i = ggml_hash_find(hash_set, key);
  14476. GGML_ASSERT(i != GGML_HASHTABLE_FULL);
  14477. hash_set.keys[i] = key;
  14478. return i;
  14479. }
  14480. struct ggml_hash_set ggml_hash_set_new(size_t size) {
  14481. size = ggml_hash_size(size);
  14482. struct ggml_hash_set result;
  14483. result.size = size;
  14484. result.keys = GGML_MALLOC(sizeof(struct ggml_tensor *) * size);
  14485. memset(result.keys, 0, sizeof(struct ggml_tensor *) * size);
  14486. return result;
  14487. }
  14488. static void ggml_hash_set_free(struct ggml_hash_set hash_set) {
  14489. GGML_FREE(hash_set.keys);
  14490. }
  14491. struct hash_map {
  14492. struct ggml_hash_set set;
  14493. struct ggml_tensor ** vals;
  14494. };
  14495. static struct hash_map * ggml_new_hash_map(size_t size) {
  14496. struct hash_map * result = GGML_MALLOC(sizeof(struct hash_map));
  14497. result->set = ggml_hash_set_new(size);
  14498. result->vals = GGML_MALLOC(sizeof(struct ggml_tensor *) * result->set.size);
  14499. memset(result->vals, 0, sizeof(struct ggml_tensor *) * result->set.size);
  14500. return result;
  14501. }
  14502. static void ggml_hash_map_free(struct hash_map * map) {
  14503. ggml_hash_set_free(map->set);
  14504. GGML_FREE(map->vals);
  14505. GGML_FREE(map);
  14506. }
  14507. // gradient checkpointing
  14508. static struct ggml_tensor * ggml_recompute_graph_node(
  14509. struct ggml_context * ctx,
  14510. struct ggml_cgraph * graph,
  14511. struct hash_map * replacements,
  14512. struct ggml_tensor * node) {
  14513. if (node == NULL) {
  14514. return NULL;
  14515. }
  14516. if (node->flags & GGML_TENSOR_FLAG_PARAM) {
  14517. return node;
  14518. }
  14519. if (!ggml_hash_contains(graph->visited_hash_table, node)) {
  14520. return node;
  14521. }
  14522. int count_children = 0;
  14523. for (int k = 0; k < GGML_MAX_SRC; ++k) {
  14524. if (node->src[k]) {
  14525. ++count_children;
  14526. }
  14527. }
  14528. if (count_children == 0) {
  14529. return node;
  14530. }
  14531. size_t i = ggml_hash_find(replacements->set, node);
  14532. GGML_ASSERT(i != GGML_HASHTABLE_FULL); // assert that not full
  14533. if (replacements->set.keys[i] == node) {
  14534. return replacements->vals[i];
  14535. }
  14536. struct ggml_tensor * clone = ggml_new_tensor(ctx, node->type, GGML_MAX_DIMS, node->ne);
  14537. // insert clone into replacements
  14538. GGML_ASSERT(replacements->set.keys[i] == NULL); // assert that we don't overwrite
  14539. replacements->set.keys[i] = node;
  14540. replacements->vals[i] = clone;
  14541. clone->op = node->op;
  14542. clone->grad = node->grad;
  14543. clone->flags = node->flags;
  14544. clone->extra = node->extra;
  14545. for (int k = 0; k < GGML_MAX_DIMS; ++k) {
  14546. clone->nb[k] = node->nb[k];
  14547. }
  14548. for (int k = 0; k < GGML_MAX_SRC; ++k) {
  14549. clone->src[k] = ggml_recompute_graph_node(ctx, graph, replacements, node->src[k]);
  14550. }
  14551. if (node->view_src != NULL) {
  14552. clone->data = (node->view_src->data == NULL)
  14553. ? NULL // view_src not yet allocated
  14554. : (char *) node->view_src->data // view_src already allocated
  14555. + node->view_offs;
  14556. clone->view_src = node->view_src;
  14557. clone->view_offs = node->view_offs;
  14558. }
  14559. GGML_ASSERT(sizeof(node->op_params) == sizeof(int32_t) * (GGML_MAX_OP_PARAMS / sizeof(int32_t)));
  14560. GGML_ASSERT(sizeof(node->name) == GGML_MAX_NAME);
  14561. memcpy(clone->op_params, node->op_params, sizeof(node->op_params));
  14562. ggml_format_name(clone, "%s (clone)", ggml_get_name(node));
  14563. return clone;
  14564. }
  14565. void ggml_build_backward_gradient_checkpointing(
  14566. struct ggml_context * ctx,
  14567. struct ggml_cgraph * gf,
  14568. struct ggml_cgraph * gb,
  14569. struct ggml_cgraph * gb_tmp,
  14570. struct ggml_tensor * * checkpoints,
  14571. int n_checkpoints) {
  14572. ggml_graph_cpy(gf, gb_tmp);
  14573. ggml_build_backward_expand(ctx, gf, gb_tmp, true);
  14574. if (n_checkpoints <= 0) {
  14575. ggml_graph_cpy(gb_tmp, gb);
  14576. return;
  14577. }
  14578. struct hash_map * replacements = ggml_new_hash_map(gf->n_nodes + gf->n_leafs + n_checkpoints);
  14579. // insert checkpoints in replacements
  14580. for (int i = 0; i < n_checkpoints; ++i) {
  14581. size_t k = ggml_hash_find(replacements->set, checkpoints[i]);
  14582. GGML_ASSERT(k != GGML_HASHTABLE_FULL); // assert that not full
  14583. GGML_ASSERT(replacements->set.keys[k] == NULL); // assert that we don't overwrite
  14584. replacements->set.keys[k] = checkpoints[i];
  14585. replacements->vals[k] = checkpoints[i];
  14586. }
  14587. ggml_graph_cpy(gf, gb);
  14588. // rewrite gb_tmp->nodes[gf->n_nodes:gb_tmp->n_nodes],
  14589. // replacing references to gb_tmp->nodes[0:gf->n_nodes] ( == gf->nodes[0:gf->n_nodes]),
  14590. // by recomputing them from checkpoints
  14591. for (int i = gf->n_nodes; i<gb_tmp->n_nodes; ++i) {
  14592. struct ggml_tensor * node = gb_tmp->nodes[i];
  14593. for (int k = 0; k < GGML_MAX_SRC; ++k) {
  14594. // insert new tensors recomputing src, reusing already made replacements,
  14595. // remember replacements: remember new tensors with mapping from corresponding gf nodes
  14596. // recurse for input tensors,
  14597. // unless (i.e. terminating when) input tensors are replacements (like checkpoints)
  14598. node->src[k] = ggml_recompute_graph_node(ctx, gf, replacements, node->src[k]);
  14599. }
  14600. // insert rewritten backward node with replacements made into resulting backward graph gb
  14601. ggml_build_forward_expand(gb, node);
  14602. }
  14603. ggml_hash_map_free(replacements);
  14604. }
  14605. // functions to change gradients considering the case that input a might be initial gradient with zero value
  14606. static struct ggml_tensor * ggml_add_or_set(struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b, struct ggml_hash_set zero_table) {
  14607. if (ggml_hash_contains(zero_table, a)) {
  14608. return b;
  14609. } else {
  14610. return ggml_add_impl(ctx, a, b, false);
  14611. }
  14612. }
  14613. static struct ggml_tensor * ggml_acc_or_set(struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b, size_t nb1, size_t nb2, size_t nb3, size_t offset, struct ggml_hash_set zero_table) {
  14614. if (ggml_hash_contains(zero_table, a)) {
  14615. struct ggml_tensor * a_zero = ggml_scale(ctx, a, 0.0f);
  14616. return ggml_acc_impl(ctx, a_zero, b, nb1, nb2, nb3, offset, false);
  14617. } else {
  14618. return ggml_acc_impl(ctx, a, b, nb1, nb2, nb3, offset, false);
  14619. }
  14620. }
  14621. static struct ggml_tensor * ggml_add1_or_set(struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b, struct ggml_hash_set zero_table) {
  14622. if (ggml_hash_contains(zero_table, a)) {
  14623. return ggml_repeat(ctx, b, a);
  14624. } else {
  14625. return ggml_add1_impl(ctx, a, b, false);
  14626. }
  14627. }
  14628. static struct ggml_tensor * ggml_sub_or_set(struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b, struct ggml_hash_set zero_table) {
  14629. if (ggml_hash_contains(zero_table, a)) {
  14630. return ggml_neg(ctx, b);
  14631. } else {
  14632. return ggml_sub_impl(ctx, a, b, false);
  14633. }
  14634. }
  14635. static void ggml_compute_backward(struct ggml_context * ctx, struct ggml_tensor * tensor, struct ggml_hash_set zero_table) {
  14636. struct ggml_tensor * src0 = tensor->src[0];
  14637. struct ggml_tensor * src1 = tensor->src[1];
  14638. switch (tensor->op) {
  14639. case GGML_OP_DUP:
  14640. {
  14641. if (src0->grad) {
  14642. src0->grad = ggml_add_or_set(ctx, src0->grad, tensor->grad, zero_table);
  14643. }
  14644. } break;
  14645. case GGML_OP_ADD:
  14646. {
  14647. if (src0->grad) {
  14648. src0->grad = ggml_add_or_set(ctx, src0->grad, tensor->grad, zero_table);
  14649. }
  14650. if (src1->grad) {
  14651. src1->grad = ggml_add_or_set(ctx, src1->grad, tensor->grad, zero_table);
  14652. }
  14653. } break;
  14654. case GGML_OP_ADD1:
  14655. {
  14656. if (src0->grad) {
  14657. src0->grad = ggml_add_or_set(ctx, src0->grad, tensor->grad, zero_table);
  14658. }
  14659. if (src1->grad) {
  14660. src1->grad = ggml_add_or_set(ctx,
  14661. src1->grad,
  14662. ggml_mean(ctx, tensor->grad), // TODO: should probably be sum instead of mean
  14663. zero_table);
  14664. }
  14665. } break;
  14666. case GGML_OP_ACC:
  14667. {
  14668. if (src0->grad) {
  14669. src0->grad = ggml_add_or_set(ctx, src0->grad, tensor->grad, zero_table);
  14670. }
  14671. if (src1->grad) {
  14672. const size_t nb1 = ((int32_t *) tensor->op_params)[0];
  14673. const size_t nb2 = ((int32_t *) tensor->op_params)[1];
  14674. const size_t nb3 = ((int32_t *) tensor->op_params)[2];
  14675. const size_t offset = ((int32_t *) tensor->op_params)[3];
  14676. struct ggml_tensor * tensor_grad_view = ggml_view_4d(ctx,
  14677. tensor->grad,
  14678. src1->grad->ne[0],
  14679. src1->grad->ne[1],
  14680. src1->grad->ne[2],
  14681. src1->grad->ne[3],
  14682. nb1, nb2, nb3, offset);
  14683. src1->grad =
  14684. ggml_add_or_set(ctx,
  14685. src1->grad,
  14686. ggml_reshape(ctx,
  14687. ggml_cont(ctx, tensor_grad_view),
  14688. src1->grad),
  14689. zero_table);
  14690. }
  14691. } break;
  14692. case GGML_OP_SUB:
  14693. {
  14694. if (src0->grad) {
  14695. src0->grad = ggml_add_or_set(ctx, src0->grad, tensor->grad, zero_table);
  14696. }
  14697. if (src1->grad) {
  14698. src1->grad = ggml_sub_or_set(ctx, src1->grad, tensor->grad, zero_table);
  14699. }
  14700. } break;
  14701. case GGML_OP_MUL:
  14702. {
  14703. if (src0->grad) {
  14704. src0->grad =
  14705. ggml_add_or_set(ctx,
  14706. src0->grad,
  14707. ggml_mul(ctx, src1, tensor->grad),
  14708. zero_table);
  14709. }
  14710. if (src1->grad) {
  14711. src1->grad =
  14712. ggml_add_or_set(ctx,
  14713. src1->grad,
  14714. ggml_mul(ctx, src0, tensor->grad),
  14715. zero_table);
  14716. }
  14717. } break;
  14718. case GGML_OP_DIV:
  14719. {
  14720. if (src0->grad) {
  14721. src0->grad =
  14722. ggml_add_or_set(ctx,
  14723. src0->grad,
  14724. ggml_div(ctx, tensor->grad, src1),
  14725. zero_table);
  14726. }
  14727. if (src1->grad) {
  14728. src1->grad =
  14729. ggml_sub_or_set(ctx,
  14730. src1->grad,
  14731. ggml_mul(ctx,
  14732. tensor->grad,
  14733. ggml_div(ctx, tensor, src1)),
  14734. zero_table);
  14735. }
  14736. } break;
  14737. case GGML_OP_SQR:
  14738. {
  14739. if (src0->grad) {
  14740. src0->grad =
  14741. ggml_add_or_set(ctx,
  14742. src0->grad,
  14743. ggml_scale(ctx,
  14744. ggml_mul(ctx, src0, tensor->grad),
  14745. 2.0f),
  14746. zero_table);
  14747. }
  14748. } break;
  14749. case GGML_OP_SQRT:
  14750. {
  14751. if (src0->grad) {
  14752. src0->grad =
  14753. ggml_add_or_set(ctx,
  14754. src0->grad,
  14755. ggml_scale(ctx,
  14756. ggml_div(ctx,
  14757. tensor->grad,
  14758. tensor),
  14759. 0.5f),
  14760. zero_table);
  14761. }
  14762. } break;
  14763. case GGML_OP_LOG:
  14764. {
  14765. if (src0->grad) {
  14766. src0->grad =
  14767. ggml_add_or_set(ctx,
  14768. src0->grad,
  14769. ggml_div(ctx,
  14770. tensor->grad,
  14771. src0),
  14772. zero_table);
  14773. }
  14774. } break;
  14775. case GGML_OP_SUM:
  14776. {
  14777. if (src0->grad) {
  14778. src0->grad =
  14779. ggml_add1_or_set(ctx,
  14780. src0->grad,
  14781. tensor->grad,
  14782. zero_table);
  14783. }
  14784. } break;
  14785. case GGML_OP_SUM_ROWS:
  14786. {
  14787. if (src0->grad) {
  14788. src0->grad =
  14789. ggml_add_or_set(ctx,
  14790. src0->grad,
  14791. ggml_repeat(ctx,
  14792. tensor->grad,
  14793. src0->grad),
  14794. zero_table);
  14795. }
  14796. } break;
  14797. case GGML_OP_MEAN:
  14798. case GGML_OP_ARGMAX:
  14799. {
  14800. GGML_ASSERT(false); // TODO: implement
  14801. } break;
  14802. case GGML_OP_REPEAT:
  14803. {
  14804. // necessary for llama
  14805. if (src0->grad) {
  14806. src0->grad = ggml_add_or_set(ctx,
  14807. src0->grad,
  14808. ggml_repeat_back(ctx, tensor->grad, src0->grad),
  14809. zero_table);
  14810. }
  14811. } break;
  14812. case GGML_OP_REPEAT_BACK:
  14813. {
  14814. if (src0->grad) {
  14815. // TODO: test this
  14816. src0->grad = ggml_add_or_set(ctx,
  14817. src0->grad,
  14818. ggml_repeat(ctx, tensor->grad, src0->grad),
  14819. zero_table);
  14820. }
  14821. } break;
  14822. case GGML_OP_CONCAT:
  14823. {
  14824. GGML_ASSERT(false); // TODO: implement
  14825. } break;
  14826. case GGML_OP_SILU_BACK:
  14827. {
  14828. GGML_ASSERT(false); // TODO: not implemented
  14829. } break;
  14830. case GGML_OP_NORM:
  14831. {
  14832. GGML_ASSERT(false); // TODO: not implemented
  14833. } break;
  14834. case GGML_OP_RMS_NORM:
  14835. {
  14836. // necessary for llama
  14837. if (src0->grad) {
  14838. float eps;
  14839. memcpy(&eps, tensor->op_params, sizeof(float));
  14840. src0->grad = ggml_add_or_set(ctx,
  14841. src0->grad,
  14842. ggml_rms_norm_back(ctx, src0, tensor->grad, eps),
  14843. zero_table);
  14844. }
  14845. } break;
  14846. case GGML_OP_RMS_NORM_BACK:
  14847. {
  14848. GGML_ASSERT(false); // TODO: not implemented
  14849. } break;
  14850. case GGML_OP_GROUP_NORM:
  14851. {
  14852. GGML_ASSERT(false); // TODO: not implemented
  14853. } break;
  14854. case GGML_OP_MUL_MAT:
  14855. {
  14856. // https://cs231n.github.io/optimization-2/#staged
  14857. // # forward pass
  14858. // s0 = np.random.randn(5, 10)
  14859. // s1 = np.random.randn(10, 3)
  14860. // t = s0.dot(s1)
  14861. // # now suppose we had the gradient on t from above in the circuit
  14862. // dt = np.random.randn(*t.shape) # same shape as t
  14863. // ds0 = dt.dot(s1.T) #.T gives the transpose of the matrix
  14864. // ds1 = t.T.dot(dt)
  14865. // tensor.shape [m,p,qq,rr]
  14866. // src0.shape [n,m,q1,r1]
  14867. // src1.shape [n,p,qq,rr]
  14868. // necessary for llama
  14869. if (src0->grad) {
  14870. struct ggml_tensor * s1_tg =
  14871. ggml_out_prod(ctx, // [n,m,qq,rr]
  14872. src1, // [n,p,qq,rr]
  14873. tensor->grad); // [m,p,qq,rr]
  14874. const int64_t qq = s1_tg->ne[2];
  14875. const int64_t rr = s1_tg->ne[3];
  14876. const int64_t q1 = src0->ne[2];
  14877. const int64_t r1 = src0->ne[3];
  14878. const bool ne2_broadcasted = qq > q1;
  14879. const bool ne3_broadcasted = rr > r1;
  14880. if (ne2_broadcasted || ne3_broadcasted) {
  14881. // sum broadcast repetitions of s1_tg into shape of src0
  14882. s1_tg = ggml_repeat_back(ctx, s1_tg, src0);
  14883. }
  14884. src0->grad =
  14885. ggml_add_or_set(ctx,
  14886. src0->grad, // [n,m,q1,r1]
  14887. s1_tg, // [n,m,q1,r1]
  14888. zero_table);
  14889. }
  14890. if (src1->grad) {
  14891. src1->grad =
  14892. ggml_add_or_set(ctx,
  14893. src1->grad, // [n,p,qq,rr]
  14894. // ggml_mul_mat(ctx, // [n,p,qq,rr]
  14895. // ggml_cont(ctx, // [m,n,q1,r1]
  14896. // ggml_transpose(ctx, src0)), // [m,n,q1,r1]
  14897. // tensor->grad), // [m,p,qq,rr]
  14898. // // when src0 is bigger than tensor->grad (this is mostly the case in llama),
  14899. // // avoid transpose of src0, rather transpose smaller tensor->grad
  14900. // // and then use ggml_out_prod
  14901. ggml_out_prod(ctx, // [n,p,qq,rr]
  14902. src0, // [n,m,q1,r1]
  14903. ggml_transpose(ctx, // [p,m,qq,rr]
  14904. tensor->grad)), // [m,p,qq,rr]
  14905. zero_table);
  14906. }
  14907. } break;
  14908. case GGML_OP_MUL_MAT_ID:
  14909. {
  14910. GGML_ASSERT(false); // TODO: not implemented
  14911. } break;
  14912. case GGML_OP_OUT_PROD:
  14913. {
  14914. GGML_ASSERT(false); // TODO: not implemented
  14915. } break;
  14916. case GGML_OP_SCALE:
  14917. {
  14918. // necessary for llama
  14919. if (src0->grad) {
  14920. float s;
  14921. memcpy(&s, tensor->op_params, sizeof(float));
  14922. src0->grad =
  14923. ggml_add_or_set(ctx,
  14924. src0->grad,
  14925. ggml_scale_impl(ctx, tensor->grad, s, false),
  14926. zero_table);
  14927. }
  14928. } break;
  14929. case GGML_OP_SET:
  14930. {
  14931. const size_t nb1 = ((int32_t *) tensor->op_params)[0];
  14932. const size_t nb2 = ((int32_t *) tensor->op_params)[1];
  14933. const size_t nb3 = ((int32_t *) tensor->op_params)[2];
  14934. const size_t offset = ((int32_t *) tensor->op_params)[3];
  14935. struct ggml_tensor * tensor_grad_view = NULL;
  14936. if (src0->grad || src1->grad) {
  14937. GGML_ASSERT(src0->type == tensor->type);
  14938. GGML_ASSERT(tensor->grad->type == tensor->type);
  14939. GGML_ASSERT(tensor->grad->type == src1->grad->type);
  14940. tensor_grad_view = ggml_view_4d(ctx,
  14941. tensor->grad,
  14942. src1->grad->ne[0],
  14943. src1->grad->ne[1],
  14944. src1->grad->ne[2],
  14945. src1->grad->ne[3],
  14946. nb1, nb2, nb3, offset);
  14947. }
  14948. if (src0->grad) {
  14949. src0->grad = ggml_add_or_set(ctx,
  14950. src0->grad,
  14951. ggml_acc_impl(ctx,
  14952. tensor->grad,
  14953. ggml_neg(ctx, tensor_grad_view),
  14954. nb1, nb2, nb3, offset, false),
  14955. zero_table);
  14956. }
  14957. if (src1->grad) {
  14958. src1->grad =
  14959. ggml_add_or_set(ctx,
  14960. src1->grad,
  14961. ggml_reshape(ctx,
  14962. ggml_cont(ctx, tensor_grad_view),
  14963. src1->grad),
  14964. zero_table);
  14965. }
  14966. } break;
  14967. case GGML_OP_CPY:
  14968. {
  14969. // necessary for llama
  14970. // cpy overwrites value of src1 by src0 and returns view(src1)
  14971. // the overwriting is mathematically equivalent to:
  14972. // tensor = src0 * 1 + src1 * 0
  14973. if (src0->grad) {
  14974. // dsrc0 = dtensor * 1
  14975. src0->grad = ggml_add_or_set(ctx, src0->grad, tensor->grad, zero_table);
  14976. }
  14977. if (src1->grad) {
  14978. // dsrc1 = dtensor * 0 -> noop
  14979. }
  14980. } break;
  14981. case GGML_OP_CONT:
  14982. {
  14983. // same as cpy
  14984. if (src0->grad) {
  14985. GGML_ASSERT(ggml_is_contiguous(src0->grad));
  14986. GGML_ASSERT(ggml_is_contiguous(tensor->grad));
  14987. src0->grad = ggml_add_or_set(ctx, src0->grad, tensor->grad, zero_table);
  14988. }
  14989. } break;
  14990. case GGML_OP_RESHAPE:
  14991. {
  14992. // necessary for llama
  14993. if (src0->grad) {
  14994. src0->grad =
  14995. ggml_add_or_set(ctx, src0->grad,
  14996. ggml_reshape(ctx,
  14997. ggml_is_contiguous(tensor->grad)
  14998. ? tensor->grad
  14999. : ggml_cont(ctx, tensor->grad),
  15000. src0->grad),
  15001. zero_table);
  15002. }
  15003. } break;
  15004. case GGML_OP_VIEW:
  15005. {
  15006. // necessary for llama
  15007. if (src0->grad) {
  15008. size_t offset;
  15009. memcpy(&offset, tensor->op_params, sizeof(offset));
  15010. size_t nb1 = tensor->nb[1];
  15011. size_t nb2 = tensor->nb[2];
  15012. size_t nb3 = tensor->nb[3];
  15013. if (src0->type != src0->grad->type) {
  15014. // gradient is typically F32, but src0 could be other type
  15015. size_t ng = ggml_element_size(src0->grad);
  15016. size_t n0 = ggml_element_size(src0);
  15017. GGML_ASSERT(offset % n0 == 0);
  15018. GGML_ASSERT(nb1 % n0 == 0);
  15019. GGML_ASSERT(nb2 % n0 == 0);
  15020. GGML_ASSERT(nb3 % n0 == 0);
  15021. offset = (offset / n0) * ng;
  15022. nb1 = (nb1 / n0) * ng;
  15023. nb2 = (nb2 / n0) * ng;
  15024. nb3 = (nb3 / n0) * ng;
  15025. }
  15026. src0->grad = ggml_acc_or_set(ctx, src0->grad, tensor->grad, nb1, nb2, nb3, offset, zero_table);
  15027. }
  15028. } break;
  15029. case GGML_OP_PERMUTE:
  15030. {
  15031. // necessary for llama
  15032. if (src0->grad) {
  15033. int32_t * axes = (int32_t *) tensor->op_params;
  15034. int axis0 = axes[0] & 0x3;
  15035. int axis1 = axes[1] & 0x3;
  15036. int axis2 = axes[2] & 0x3;
  15037. int axis3 = axes[3] & 0x3;
  15038. int axes_backward[4] = {0,0,0,0};
  15039. axes_backward[axis0] = 0;
  15040. axes_backward[axis1] = 1;
  15041. axes_backward[axis2] = 2;
  15042. axes_backward[axis3] = 3;
  15043. src0->grad =
  15044. ggml_add_or_set(ctx, src0->grad,
  15045. ggml_permute(ctx,
  15046. tensor->grad,
  15047. axes_backward[0],
  15048. axes_backward[1],
  15049. axes_backward[2],
  15050. axes_backward[3]),
  15051. zero_table);
  15052. }
  15053. } break;
  15054. case GGML_OP_TRANSPOSE:
  15055. {
  15056. // necessary for llama
  15057. if (src0->grad) {
  15058. src0->grad =
  15059. ggml_add_or_set(ctx, src0->grad,
  15060. ggml_transpose(ctx, tensor->grad),
  15061. zero_table);
  15062. }
  15063. } break;
  15064. case GGML_OP_GET_ROWS:
  15065. {
  15066. // necessary for llama (only for tokenizer)
  15067. if (src0->grad) {
  15068. src0->grad =
  15069. ggml_add_or_set(ctx, src0->grad,
  15070. // last ggml_get_rows_back argument src0->grad is only
  15071. // necessary to setup correct output shape
  15072. ggml_get_rows_back(ctx, tensor->grad, src1, src0->grad),
  15073. zero_table);
  15074. }
  15075. if (src1->grad) {
  15076. // noop
  15077. }
  15078. } break;
  15079. case GGML_OP_GET_ROWS_BACK:
  15080. {
  15081. GGML_ASSERT(false); // TODO: not implemented
  15082. } break;
  15083. case GGML_OP_DIAG:
  15084. {
  15085. GGML_ASSERT(false); // TODO: not implemented
  15086. } break;
  15087. case GGML_OP_DIAG_MASK_INF:
  15088. {
  15089. // necessary for llama
  15090. if (src0->grad) {
  15091. const int n_past = ((int32_t *) tensor->op_params)[0];
  15092. src0->grad =
  15093. ggml_add_or_set(ctx, src0->grad,
  15094. /* ggml_diag_mask_inf_impl() shouldn't be here */
  15095. /* ref: https://github.com/ggerganov/llama.cpp/pull/4203#discussion_r1412377992 */
  15096. ggml_diag_mask_zero_impl(ctx, tensor->grad, n_past, false),
  15097. zero_table);
  15098. }
  15099. } break;
  15100. case GGML_OP_DIAG_MASK_ZERO:
  15101. {
  15102. // necessary for llama
  15103. if (src0->grad) {
  15104. const int n_past = ((int32_t *) tensor->op_params)[0];
  15105. src0->grad =
  15106. ggml_add_or_set(ctx, src0->grad,
  15107. ggml_diag_mask_zero_impl(ctx, tensor->grad, n_past, false),
  15108. zero_table);
  15109. }
  15110. } break;
  15111. case GGML_OP_SOFT_MAX:
  15112. {
  15113. // necessary for llama
  15114. if (src0->grad) {
  15115. src0->grad =
  15116. ggml_add_or_set(ctx, src0->grad,
  15117. ggml_soft_max_back(ctx, tensor->grad, tensor),
  15118. zero_table);
  15119. }
  15120. } break;
  15121. case GGML_OP_SOFT_MAX_BACK:
  15122. {
  15123. GGML_ASSERT(false); // TODO: not implemented
  15124. } break;
  15125. case GGML_OP_ROPE:
  15126. {
  15127. // necessary for llama
  15128. if (src0->grad) {
  15129. //const int n_past = ((int32_t *) tensor->op_params)[0];
  15130. const int n_dims = ((int32_t *) tensor->op_params)[1];
  15131. const int mode = ((int32_t *) tensor->op_params)[2];
  15132. const int n_ctx = ((int32_t *) tensor->op_params)[3];
  15133. const int n_orig_ctx = ((int32_t *) tensor->op_params)[4];
  15134. float freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow, xpos_base, xpos_down;
  15135. memcpy(&freq_base, (int32_t *) tensor->op_params + 5, sizeof(float));
  15136. memcpy(&freq_scale, (int32_t *) tensor->op_params + 6, sizeof(float));
  15137. memcpy(&ext_factor, (int32_t *) tensor->op_params + 7, sizeof(float));
  15138. memcpy(&attn_factor, (int32_t *) tensor->op_params + 8, sizeof(float));
  15139. memcpy(&beta_fast, (int32_t *) tensor->op_params + 9, sizeof(float));
  15140. memcpy(&beta_slow, (int32_t *) tensor->op_params + 10, sizeof(float));
  15141. memcpy(&xpos_base, (int32_t *) tensor->op_params + 11, sizeof(float));
  15142. memcpy(&xpos_down, (int32_t *) tensor->op_params + 12, sizeof(bool));
  15143. src0->grad = ggml_add_or_set(ctx,
  15144. src0->grad,
  15145. ggml_rope_back(ctx,
  15146. tensor->grad,
  15147. src1,
  15148. n_dims,
  15149. mode,
  15150. n_ctx,
  15151. n_orig_ctx,
  15152. freq_base,
  15153. freq_scale,
  15154. ext_factor,
  15155. attn_factor,
  15156. beta_fast,
  15157. beta_slow,
  15158. xpos_base,
  15159. xpos_down),
  15160. zero_table);
  15161. }
  15162. } break;
  15163. case GGML_OP_ROPE_BACK:
  15164. {
  15165. if (src0->grad) {
  15166. //const int n_past = ((int32_t *) tensor->op_params)[0];
  15167. const int n_dims = ((int32_t *) tensor->op_params)[1];
  15168. const int mode = ((int32_t *) tensor->op_params)[2];
  15169. const int n_ctx = ((int32_t *) tensor->op_params)[3];
  15170. const int n_orig_ctx = ((int32_t *) tensor->op_params)[4];
  15171. float freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow, xpos_base, xpos_down;
  15172. memcpy(&freq_base, (int32_t *) tensor->op_params + 5, sizeof(float));
  15173. memcpy(&freq_scale, (int32_t *) tensor->op_params + 6, sizeof(float));
  15174. memcpy(&ext_factor, (int32_t *) tensor->op_params + 7, sizeof(float));
  15175. memcpy(&attn_factor, (int32_t *) tensor->op_params + 8, sizeof(float));
  15176. memcpy(&beta_fast, (int32_t *) tensor->op_params + 9, sizeof(float));
  15177. memcpy(&beta_slow, (int32_t *) tensor->op_params + 10, sizeof(float));
  15178. memcpy(&xpos_base, (int32_t *) tensor->op_params + 11, sizeof(float));
  15179. memcpy(&xpos_down, (int32_t *) tensor->op_params + 12, sizeof(bool));
  15180. src0->grad = ggml_add_or_set(ctx,
  15181. src0->grad,
  15182. ggml_rope_impl(ctx,
  15183. tensor->grad,
  15184. src1,
  15185. n_dims,
  15186. mode,
  15187. n_ctx,
  15188. n_orig_ctx,
  15189. freq_base,
  15190. freq_scale,
  15191. ext_factor,
  15192. attn_factor,
  15193. beta_fast,
  15194. beta_slow,
  15195. xpos_base,
  15196. xpos_down,
  15197. false),
  15198. zero_table);
  15199. }
  15200. } break;
  15201. case GGML_OP_CLAMP:
  15202. {
  15203. GGML_ASSERT(false); // TODO: not implemented
  15204. } break;
  15205. case GGML_OP_CONV_TRANSPOSE_1D:
  15206. {
  15207. GGML_ASSERT(false); // TODO: not implemented
  15208. } break;
  15209. case GGML_OP_IM2COL:
  15210. {
  15211. GGML_ASSERT(false); // TODO: not implemented
  15212. } break;
  15213. case GGML_OP_CONV_TRANSPOSE_2D:
  15214. {
  15215. GGML_ASSERT(false); // TODO: not implemented
  15216. } break;
  15217. case GGML_OP_POOL_1D:
  15218. {
  15219. GGML_ASSERT(false); // TODO: not implemented
  15220. } break;
  15221. case GGML_OP_POOL_2D:
  15222. {
  15223. GGML_ASSERT(false); // TODO: not implemented
  15224. } break;
  15225. case GGML_OP_UPSCALE:
  15226. {
  15227. GGML_ASSERT(false); // TODO: not implemented
  15228. } break;
  15229. case GGML_OP_PAD:
  15230. {
  15231. GGML_ASSERT(false); // TODO: not implemented
  15232. } break;
  15233. case GGML_OP_ARANGE:
  15234. {
  15235. GGML_ASSERT(false); // TODO: not implemented
  15236. } break;
  15237. case GGML_OP_TIMESTEP_EMBEDDING:
  15238. {
  15239. GGML_ASSERT(false); // TODO: not implemented
  15240. } break;
  15241. case GGML_OP_ARGSORT:
  15242. {
  15243. GGML_ASSERT(false); // TODO: not implemented
  15244. } break;
  15245. case GGML_OP_LEAKY_RELU:
  15246. {
  15247. GGML_ASSERT(false); // TODO: not implemented
  15248. } break;
  15249. case GGML_OP_FLASH_ATTN:
  15250. case GGML_OP_FLASH_ATTN_EXT:
  15251. {
  15252. struct ggml_tensor * flash_grad = NULL;
  15253. if (src0->grad || src1->grad || tensor->src[2]->grad) {
  15254. int32_t t = ggml_get_op_params_i32(tensor, 0);
  15255. GGML_ASSERT(t == 0 || t == 1);
  15256. bool masked = t != 0;
  15257. flash_grad =
  15258. ggml_flash_attn_back(ctx,
  15259. src0,
  15260. src1,
  15261. tensor->src[2],
  15262. tensor->grad,
  15263. masked);
  15264. }
  15265. struct ggml_tensor * src2 = tensor->src[2];
  15266. const int64_t elem_q = ggml_nelements(src0);
  15267. const int64_t elem_k = ggml_nelements(src1);
  15268. const int64_t elem_v = ggml_nelements(src2);
  15269. enum ggml_type result_type = flash_grad->type;
  15270. GGML_ASSERT(ggml_blck_size(result_type) == 1);
  15271. const size_t tsize = ggml_type_size(result_type);
  15272. const size_t offs_q = 0;
  15273. const size_t offs_k = offs_q + GGML_PAD(elem_q * tsize, GGML_MEM_ALIGN);
  15274. const size_t offs_v = offs_k + GGML_PAD(elem_k * tsize, GGML_MEM_ALIGN);
  15275. if (src0->grad) {
  15276. struct ggml_tensor * view_q = ggml_view_1d(ctx, flash_grad, elem_q, offs_q);
  15277. struct ggml_tensor * grad_q = ggml_reshape(ctx, view_q, src0);
  15278. src0->grad = ggml_add_or_set(ctx,
  15279. src0->grad,
  15280. grad_q,
  15281. zero_table);
  15282. }
  15283. if (src1->grad) {
  15284. struct ggml_tensor * view_k = ggml_view_1d(ctx, flash_grad, elem_k, offs_k);
  15285. struct ggml_tensor * grad_k = ggml_reshape(ctx, view_k, src1);
  15286. src1->grad = ggml_add_or_set(ctx,
  15287. src1->grad,
  15288. grad_k,
  15289. zero_table);
  15290. }
  15291. if (src2->grad) {
  15292. struct ggml_tensor * view_v = ggml_view_1d(ctx, flash_grad, elem_v, offs_v);
  15293. struct ggml_tensor * grad_v = ggml_reshape(ctx, view_v, src2);
  15294. src2->grad = ggml_add_or_set(ctx,
  15295. src2->grad,
  15296. grad_v,
  15297. zero_table);
  15298. }
  15299. } break;
  15300. case GGML_OP_FLASH_FF:
  15301. {
  15302. GGML_ASSERT(false); // not supported
  15303. } break;
  15304. case GGML_OP_FLASH_ATTN_BACK:
  15305. {
  15306. GGML_ASSERT(false); // not supported
  15307. } break;
  15308. case GGML_OP_SSM_CONV:
  15309. case GGML_OP_SSM_SCAN:
  15310. {
  15311. GGML_ASSERT(false); // TODO: not implemented
  15312. } break;
  15313. case GGML_OP_WIN_PART:
  15314. case GGML_OP_WIN_UNPART:
  15315. case GGML_OP_UNARY:
  15316. {
  15317. switch (ggml_get_unary_op(tensor)) {
  15318. case GGML_UNARY_OP_ABS:
  15319. {
  15320. if (src0->grad) {
  15321. src0->grad =
  15322. ggml_add_or_set(ctx,
  15323. src0->grad,
  15324. ggml_mul(ctx,
  15325. ggml_sgn(ctx, src0),
  15326. tensor->grad),
  15327. zero_table);
  15328. }
  15329. } break;
  15330. case GGML_UNARY_OP_SGN:
  15331. {
  15332. if (src0->grad) {
  15333. // noop
  15334. }
  15335. } break;
  15336. case GGML_UNARY_OP_NEG:
  15337. {
  15338. if (src0->grad) {
  15339. src0->grad = ggml_sub_or_set(ctx, src0->grad, tensor->grad, zero_table);
  15340. }
  15341. } break;
  15342. case GGML_UNARY_OP_STEP:
  15343. {
  15344. if (src0->grad) {
  15345. // noop
  15346. }
  15347. } break;
  15348. case GGML_UNARY_OP_TANH:
  15349. {
  15350. GGML_ASSERT(false); // TODO: not implemented
  15351. } break;
  15352. case GGML_UNARY_OP_ELU:
  15353. {
  15354. GGML_ASSERT(false); // TODO: not implemented
  15355. } break;
  15356. case GGML_UNARY_OP_RELU:
  15357. {
  15358. if (src0->grad) {
  15359. src0->grad = ggml_add_or_set(ctx,
  15360. src0->grad,
  15361. ggml_mul(ctx,
  15362. ggml_step(ctx, src0),
  15363. tensor->grad),
  15364. zero_table);
  15365. }
  15366. } break;
  15367. case GGML_UNARY_OP_SIGMOID:
  15368. {
  15369. GGML_ASSERT(false); // TODO: not implemented
  15370. } break;
  15371. case GGML_UNARY_OP_GELU:
  15372. {
  15373. GGML_ASSERT(false); // TODO: not implemented
  15374. } break;
  15375. case GGML_UNARY_OP_GELU_QUICK:
  15376. {
  15377. GGML_ASSERT(false); // TODO: not implemented
  15378. } break;
  15379. case GGML_UNARY_OP_SILU:
  15380. {
  15381. // necessary for llama
  15382. if (src0->grad) {
  15383. src0->grad = ggml_add_or_set(ctx,
  15384. src0->grad,
  15385. ggml_silu_back(ctx, src0, tensor->grad),
  15386. zero_table);
  15387. }
  15388. } break;
  15389. default:
  15390. GGML_ASSERT(false);
  15391. }
  15392. } break;
  15393. case GGML_OP_GET_REL_POS:
  15394. case GGML_OP_ADD_REL_POS:
  15395. case GGML_OP_MAP_UNARY:
  15396. case GGML_OP_MAP_BINARY:
  15397. case GGML_OP_MAP_CUSTOM1_F32:
  15398. case GGML_OP_MAP_CUSTOM2_F32:
  15399. case GGML_OP_MAP_CUSTOM3_F32:
  15400. case GGML_OP_MAP_CUSTOM1:
  15401. case GGML_OP_MAP_CUSTOM2:
  15402. case GGML_OP_MAP_CUSTOM3:
  15403. {
  15404. GGML_ASSERT(false); // not supported
  15405. } break;
  15406. case GGML_OP_CROSS_ENTROPY_LOSS:
  15407. {
  15408. if (src0->grad) {
  15409. src0->grad = ggml_add_or_set(ctx,
  15410. src0->grad,
  15411. ggml_cross_entropy_loss_back(ctx,
  15412. src0,
  15413. src1,
  15414. tensor->grad),
  15415. zero_table);
  15416. }
  15417. } break;
  15418. case GGML_OP_CROSS_ENTROPY_LOSS_BACK:
  15419. {
  15420. GGML_ASSERT(false); // not supported
  15421. } break;
  15422. case GGML_OP_NONE:
  15423. {
  15424. // nop
  15425. } break;
  15426. case GGML_OP_COUNT:
  15427. {
  15428. GGML_ASSERT(false);
  15429. } break;
  15430. }
  15431. for (int i = 0; i < GGML_MAX_SRC; ++i) {
  15432. if (tensor->src[i] && tensor->src[i]->grad) {
  15433. GGML_ASSERT(ggml_are_same_shape(tensor->src[i], tensor->src[i]->grad));
  15434. }
  15435. }
  15436. }
  15437. static void ggml_visit_parents(struct ggml_cgraph * cgraph, struct ggml_tensor * node) {
  15438. if (node->grad == NULL) {
  15439. // this usually happens when we generate intermediate nodes from constants in the backward pass
  15440. // it can also happen during forward pass, if the user performs computations with constants
  15441. if (node->op != GGML_OP_NONE) {
  15442. //GGML_PRINT_DEBUG("%s: warning: node %p has no grad, but op %d\n", __func__, (void *) node, node->op);
  15443. }
  15444. }
  15445. // check if already visited
  15446. if (ggml_hash_insert(cgraph->visited_hash_table, node) == GGML_HASHTABLE_ALREADY_EXISTS) {
  15447. return;
  15448. }
  15449. for (int i = 0; i < GGML_MAX_SRC; ++i) {
  15450. const int k =
  15451. (cgraph->order == GGML_CGRAPH_EVAL_ORDER_LEFT_TO_RIGHT) ? i :
  15452. (cgraph->order == GGML_CGRAPH_EVAL_ORDER_RIGHT_TO_LEFT) ? (GGML_MAX_SRC-1-i) :
  15453. /* unknown order, just fall back to using i*/ i;
  15454. if (node->src[k]) {
  15455. ggml_visit_parents(cgraph, node->src[k]);
  15456. }
  15457. }
  15458. if (node->op == GGML_OP_NONE && node->grad == NULL) {
  15459. // reached a leaf node, not part of the gradient graph (e.g. a constant)
  15460. GGML_ASSERT(cgraph->n_leafs < cgraph->size);
  15461. if (strlen(node->name) == 0) {
  15462. ggml_format_name(node, "leaf_%d", cgraph->n_leafs);
  15463. }
  15464. cgraph->leafs[cgraph->n_leafs] = node;
  15465. cgraph->n_leafs++;
  15466. } else {
  15467. GGML_ASSERT(cgraph->n_nodes < cgraph->size);
  15468. if (strlen(node->name) == 0) {
  15469. ggml_format_name(node, "node_%d", cgraph->n_nodes);
  15470. }
  15471. cgraph->nodes[cgraph->n_nodes] = node;
  15472. if (cgraph->grads) {
  15473. cgraph->grads[cgraph->n_nodes] = node->grad;
  15474. }
  15475. cgraph->n_nodes++;
  15476. }
  15477. }
  15478. static void ggml_build_forward_impl(struct ggml_cgraph * cgraph, struct ggml_tensor * tensor, bool expand) {
  15479. if (!expand) {
  15480. // TODO: this branch isn't accessible anymore, maybe move this to ggml_build_forward_expand
  15481. ggml_graph_clear(cgraph);
  15482. }
  15483. const int n0 = cgraph->n_nodes;
  15484. UNUSED(n0);
  15485. ggml_visit_parents(cgraph, tensor);
  15486. const int n_new = cgraph->n_nodes - n0;
  15487. GGML_PRINT_DEBUG("%s: visited %d new nodes\n", __func__, n_new);
  15488. if (n_new > 0) {
  15489. // the last added node should always be starting point
  15490. GGML_ASSERT(cgraph->nodes[cgraph->n_nodes - 1] == tensor);
  15491. }
  15492. }
  15493. void ggml_build_forward_expand(struct ggml_cgraph * cgraph, struct ggml_tensor * tensor) {
  15494. ggml_build_forward_impl(cgraph, tensor, true);
  15495. }
  15496. void ggml_build_backward_expand(struct ggml_context * ctx, struct ggml_cgraph * gf, struct ggml_cgraph * gb, bool keep) {
  15497. GGML_ASSERT(gf->n_nodes > 0);
  15498. // if we are keeping the gradient graph, we have to detach the gradient nodes from the original graph
  15499. if (keep) {
  15500. for (int i = 0; i < gf->n_nodes; i++) {
  15501. struct ggml_tensor * node = gf->nodes[i];
  15502. if (node->grad) {
  15503. node->grad = ggml_dup_tensor(ctx, node);
  15504. gf->grads[i] = node->grad;
  15505. }
  15506. }
  15507. }
  15508. // remember original gradients which start with zero values
  15509. struct ggml_hash_set zero_table = ggml_hash_set_new(gf->size);
  15510. for (int i = 0; i < gf->n_nodes; i++) {
  15511. if (gf->grads[i]) {
  15512. ggml_hash_insert(zero_table, gf->grads[i]);
  15513. }
  15514. }
  15515. for (int i = gf->n_nodes - 1; i >= 0; i--) {
  15516. struct ggml_tensor * node = gf->nodes[i];
  15517. // inplace operations to add gradients are not created by ggml_compute_backward
  15518. // use allocator to automatically make inplace operations
  15519. if (node->grad) {
  15520. ggml_compute_backward(ctx, node, zero_table);
  15521. }
  15522. }
  15523. for (int i = 0; i < gf->n_nodes; i++) {
  15524. struct ggml_tensor * node = gf->nodes[i];
  15525. if (node->flags & GGML_TENSOR_FLAG_PARAM) {
  15526. GGML_PRINT_DEBUG("%s: found root node %p\n", __func__, (void *) node);
  15527. ggml_build_forward_expand(gb, node->grad);
  15528. }
  15529. }
  15530. ggml_hash_set_free(zero_table);
  15531. }
  15532. static size_t ggml_graph_nbytes(size_t size, bool grads) {
  15533. size_t nbytes = sizeof(struct ggml_cgraph);
  15534. nbytes += size * sizeof(struct ggml_tensor *) * 2; // leafs + nodes
  15535. if (grads) {
  15536. nbytes += size * sizeof(struct ggml_tensor *); // grads
  15537. }
  15538. nbytes += ggml_hash_size(size * 2) * sizeof(struct ggml_tensor *); // hash set
  15539. return nbytes;
  15540. }
  15541. size_t ggml_graph_overhead_custom(size_t size, bool grads) {
  15542. return GGML_OBJECT_SIZE + GGML_PAD(ggml_graph_nbytes(size, grads), GGML_MEM_ALIGN);
  15543. }
  15544. size_t ggml_graph_overhead(void) {
  15545. return ggml_graph_overhead_custom(GGML_DEFAULT_GRAPH_SIZE, false);
  15546. }
  15547. struct ggml_cgraph * ggml_new_graph_custom(struct ggml_context * ctx, size_t size, bool grads) {
  15548. const size_t obj_size = ggml_graph_nbytes(size, grads);
  15549. struct ggml_object * obj = ggml_new_object(ctx, GGML_OBJECT_TYPE_GRAPH, obj_size);
  15550. struct ggml_cgraph * cgraph = (struct ggml_cgraph *) ((char *) ctx->mem_buffer + obj->offs);
  15551. struct ggml_tensor ** data_start = (struct ggml_tensor **) (cgraph + 1);
  15552. size_t hash_size = ggml_hash_size(size * 2);
  15553. struct ggml_tensor ** nodes_ptr = data_start;
  15554. struct ggml_tensor ** leafs_ptr = nodes_ptr + size;
  15555. struct ggml_tensor ** hash_keys_ptr = leafs_ptr + size;
  15556. struct ggml_tensor ** grads_ptr = grads ? hash_keys_ptr + hash_size : NULL;
  15557. // check that we allocated the correct amount of memory
  15558. assert(obj_size == (size_t) (
  15559. (grads ? (char *)(grads_ptr + size) : (char *)(hash_keys_ptr + hash_size)) - (char *)cgraph));
  15560. memset(hash_keys_ptr, 0, hash_size * sizeof(struct ggml_tensor *));
  15561. *cgraph = (struct ggml_cgraph) {
  15562. /*.size =*/ size,
  15563. /*.n_nodes =*/ 0,
  15564. /*.n_leafs =*/ 0,
  15565. /*.nodes =*/ nodes_ptr,
  15566. /*.grads =*/ grads_ptr,
  15567. /*.leafs =*/ leafs_ptr,
  15568. /*.hash_table =*/ { hash_size, hash_keys_ptr },
  15569. /*.order =*/ GGML_CGRAPH_EVAL_ORDER_LEFT_TO_RIGHT,
  15570. /*.perf_runs =*/ 0,
  15571. /*.perf_cycles =*/ 0,
  15572. /*.perf_time_us =*/ 0,
  15573. };
  15574. return cgraph;
  15575. }
  15576. struct ggml_cgraph * ggml_new_graph(struct ggml_context * ctx) {
  15577. return ggml_new_graph_custom(ctx, GGML_DEFAULT_GRAPH_SIZE, false);
  15578. }
  15579. struct ggml_cgraph ggml_graph_view(struct ggml_cgraph * cgraph0, int i0, int i1) {
  15580. struct ggml_cgraph cgraph = {
  15581. /*.size =*/ 0,
  15582. /*.n_nodes =*/ i1 - i0,
  15583. /*.n_leafs =*/ 0,
  15584. /*.nodes =*/ cgraph0->nodes + i0,
  15585. /*.grads =*/ cgraph0->grads ? cgraph0->grads + i0 : NULL,
  15586. /*.leafs =*/ NULL,
  15587. /*.hash_table =*/ { 0, NULL },
  15588. /*.order =*/ cgraph0->order,
  15589. /*.perf_runs =*/ 0,
  15590. /*.perf_cycles =*/ 0,
  15591. /*.perf_time_us =*/ 0,
  15592. };
  15593. return cgraph;
  15594. }
  15595. void ggml_graph_cpy(struct ggml_cgraph * src, struct ggml_cgraph * dst) {
  15596. GGML_ASSERT(dst->size >= src->n_leafs);
  15597. GGML_ASSERT(dst->size >= src->n_nodes);
  15598. GGML_ASSERT(dst->visited_hash_table.size >= src->visited_hash_table.size);
  15599. dst->n_leafs = src->n_leafs;
  15600. dst->n_nodes = src->n_nodes;
  15601. dst->order = src->order;
  15602. for (int i = 0; i < src->n_leafs; ++i) {
  15603. dst->leafs[i] = src->leafs[i];
  15604. }
  15605. for (int i = 0; i < src->n_nodes; ++i) {
  15606. dst->nodes[i] = src->nodes[i];
  15607. }
  15608. if (src->grads) {
  15609. GGML_ASSERT(dst->grads != NULL);
  15610. for (int i = 0; i < src->n_nodes; ++i) {
  15611. dst->grads[i] = src->grads[i];
  15612. }
  15613. }
  15614. for (size_t i = 0; i < src->visited_hash_table.size; ++i) {
  15615. if (src->visited_hash_table.keys[i]) {
  15616. ggml_hash_insert(dst->visited_hash_table, src->visited_hash_table.keys[i]);
  15617. }
  15618. }
  15619. }
  15620. struct ggml_cgraph * ggml_graph_dup(struct ggml_context * ctx, struct ggml_cgraph * cgraph) {
  15621. struct ggml_cgraph * result = ggml_new_graph_custom(ctx, cgraph->size, cgraph->grads != NULL);
  15622. ggml_graph_cpy(cgraph, result);
  15623. return result;
  15624. }
  15625. void ggml_graph_reset(struct ggml_cgraph * cgraph) {
  15626. GGML_ASSERT(cgraph->grads != NULL);
  15627. for (int i = 0; i < cgraph->n_nodes; i++) {
  15628. struct ggml_tensor * grad = cgraph->grads[i];
  15629. if (grad) {
  15630. ggml_set_zero(grad);
  15631. }
  15632. }
  15633. }
  15634. void ggml_graph_clear(struct ggml_cgraph * cgraph) {
  15635. cgraph->n_leafs = 0;
  15636. cgraph->n_nodes = 0;
  15637. memset(cgraph->visited_hash_table.keys, 0, cgraph->visited_hash_table.size * sizeof(struct ggml_tensor *));
  15638. }
  15639. //
  15640. // thread data
  15641. //
  15642. // synchronization is done via busy loops
  15643. // I tried using spin locks, but not sure how to use them correctly - the things I tried were slower than busy loops
  15644. //
  15645. #ifdef __APPLE__
  15646. //#include <os/lock.h>
  15647. //
  15648. //typedef os_unfair_lock ggml_lock_t;
  15649. //
  15650. //#define ggml_lock_init(x) UNUSED(x)
  15651. //#define ggml_lock_destroy(x) UNUSED(x)
  15652. //#define ggml_lock_lock os_unfair_lock_lock
  15653. //#define ggml_lock_unlock os_unfair_lock_unlock
  15654. //
  15655. //#define GGML_LOCK_INITIALIZER OS_UNFAIR_LOCK_INIT
  15656. typedef int ggml_lock_t;
  15657. #define ggml_lock_init(x) UNUSED(x)
  15658. #define ggml_lock_destroy(x) UNUSED(x)
  15659. #define ggml_lock_lock(x) UNUSED(x)
  15660. #define ggml_lock_unlock(x) UNUSED(x)
  15661. #define GGML_LOCK_INITIALIZER 0
  15662. typedef pthread_t ggml_thread_t;
  15663. #define ggml_thread_create pthread_create
  15664. #define ggml_thread_join pthread_join
  15665. #else
  15666. //typedef pthread_spinlock_t ggml_lock_t;
  15667. //#define ggml_lock_init(x) pthread_spin_init(x, PTHREAD_PROCESS_PRIVATE)
  15668. //#define ggml_lock_destroy pthread_spin_destroy
  15669. //#define ggml_lock_lock pthread_spin_lock
  15670. //#define ggml_lock_unlock pthread_spin_unlock
  15671. typedef int ggml_lock_t;
  15672. #define ggml_lock_init(x) UNUSED(x)
  15673. #define ggml_lock_destroy(x) UNUSED(x)
  15674. #if defined(__x86_64__) || (defined(_MSC_VER) && defined(_M_AMD64))
  15675. #define ggml_lock_lock(x) _mm_pause()
  15676. #else
  15677. #define ggml_lock_lock(x) UNUSED(x)
  15678. #endif
  15679. #define ggml_lock_unlock(x) UNUSED(x)
  15680. #define GGML_LOCK_INITIALIZER 0
  15681. typedef pthread_t ggml_thread_t;
  15682. #define ggml_thread_create pthread_create
  15683. #define ggml_thread_join pthread_join
  15684. #endif
  15685. // Android's libc implementation "bionic" does not support setting affinity
  15686. #if defined(__gnu_linux__)
  15687. static void set_numa_thread_affinity(int thread_n) {
  15688. if (!ggml_is_numa()) {
  15689. return;
  15690. }
  15691. int node_num;
  15692. int rv;
  15693. size_t setsize = CPU_ALLOC_SIZE(g_state.numa.total_cpus);
  15694. switch(g_state.numa.numa_strategy) {
  15695. case GGML_NUMA_STRATEGY_DISTRIBUTE:
  15696. // run thread on node_num thread_n / (threads per node)
  15697. node_num = thread_n % g_state.numa.n_nodes;
  15698. break;
  15699. case GGML_NUMA_STRATEGY_ISOLATE:
  15700. // run thread on current_node
  15701. node_num = g_state.numa.current_node;
  15702. break;
  15703. case GGML_NUMA_STRATEGY_NUMACTL:
  15704. // use the cpuset that numactl gave us
  15705. rv = pthread_setaffinity_np(pthread_self(), setsize, &g_state.numa.cpuset);
  15706. if (rv) {
  15707. fprintf(stderr, "warning: pthread_setaffinity_np() failed: %s\n",strerror(rv));
  15708. }
  15709. return;
  15710. default:
  15711. return;
  15712. }
  15713. struct ggml_numa_node * node = &g_state.numa.nodes[node_num];
  15714. cpu_set_t * cpus = CPU_ALLOC(g_state.numa.total_cpus);
  15715. CPU_ZERO_S(setsize, cpus);
  15716. for (size_t i = 0; i < node->n_cpus; ++i) {
  15717. CPU_SET_S(node->cpus[i], setsize, cpus);
  15718. }
  15719. rv = pthread_setaffinity_np(pthread_self(), setsize, cpus);
  15720. if (rv) {
  15721. fprintf(stderr, "warning: pthread_setaffinity_np() failed: %s\n", strerror(rv));
  15722. }
  15723. CPU_FREE(cpus);
  15724. }
  15725. static void clear_numa_thread_affinity(void) {
  15726. if (!ggml_is_numa()) {
  15727. return;
  15728. }
  15729. size_t setsize = CPU_ALLOC_SIZE(g_state.numa.total_cpus);
  15730. cpu_set_t * cpus = CPU_ALLOC(g_state.numa.total_cpus);
  15731. CPU_ZERO_S(setsize, cpus);
  15732. for (unsigned i = 0; i < g_state.numa.total_cpus; ++i) {
  15733. CPU_SET_S(i, setsize, cpus);
  15734. }
  15735. int rv = pthread_setaffinity_np(pthread_self(), setsize, cpus);
  15736. if (rv) {
  15737. fprintf(stderr, "warning: pthread_setaffinity_np() failed: %s\n", strerror(rv));
  15738. }
  15739. CPU_FREE(cpus);
  15740. }
  15741. #else
  15742. // TODO: Windows etc.
  15743. // (the linux implementation may also work on BSD, someone should test)
  15744. static void set_numa_thread_affinity(int thread_n) { UNUSED(thread_n); }
  15745. static void clear_numa_thread_affinity(void) {}
  15746. #endif
  15747. struct ggml_compute_state_shared {
  15748. const struct ggml_cgraph * cgraph;
  15749. const struct ggml_cplan * cplan;
  15750. int64_t perf_node_start_cycles;
  15751. int64_t perf_node_start_time_us;
  15752. const int n_threads;
  15753. // synchronization primitives
  15754. atomic_int n_active; // num active threads
  15755. atomic_int node_n; // active graph node
  15756. atomic_int node_task; // active graph node task phase
  15757. ggml_abort_callback abort_callback; // abort ggml_graph_compute when true
  15758. void * abort_callback_data;
  15759. };
  15760. struct ggml_compute_state {
  15761. ggml_thread_t thrd;
  15762. int ith;
  15763. struct ggml_compute_state_shared * shared;
  15764. enum ggml_status ec;
  15765. };
  15766. static void ggml_graph_compute_perf_stats_node(struct ggml_tensor * node, const struct ggml_compute_state_shared * st) {
  15767. int64_t cycles_cur = ggml_perf_cycles() - st->perf_node_start_cycles;
  15768. int64_t time_us_cur = ggml_perf_time_us() - st->perf_node_start_time_us;
  15769. node->perf_runs++;
  15770. node->perf_cycles += cycles_cur;
  15771. node->perf_time_us += time_us_cur;
  15772. }
  15773. static int ggml_get_n_tasks(struct ggml_tensor * node, int n_threads, int n_cur_threads) {
  15774. int n_tasks = 0;
  15775. if (ggml_is_empty(node)) {
  15776. // no need to multi-thread a no-op
  15777. n_tasks = 1;
  15778. return n_tasks;
  15779. }
  15780. switch (node->op) {
  15781. case GGML_OP_CPY:
  15782. case GGML_OP_DUP:
  15783. case GGML_OP_ADD:
  15784. case GGML_OP_ADD1:
  15785. case GGML_OP_ACC:
  15786. {
  15787. n_tasks = n_threads;
  15788. } break;
  15789. case GGML_OP_SUB:
  15790. case GGML_OP_SQR:
  15791. case GGML_OP_SQRT:
  15792. case GGML_OP_LOG:
  15793. case GGML_OP_SUM:
  15794. case GGML_OP_SUM_ROWS:
  15795. case GGML_OP_MEAN:
  15796. case GGML_OP_ARGMAX:
  15797. case GGML_OP_REPEAT:
  15798. case GGML_OP_REPEAT_BACK:
  15799. case GGML_OP_LEAKY_RELU:
  15800. {
  15801. n_tasks = 1;
  15802. } break;
  15803. case GGML_OP_UNARY:
  15804. switch (ggml_get_unary_op(node)) {
  15805. case GGML_UNARY_OP_ABS:
  15806. case GGML_UNARY_OP_SGN:
  15807. case GGML_UNARY_OP_NEG:
  15808. case GGML_UNARY_OP_STEP:
  15809. case GGML_UNARY_OP_TANH:
  15810. case GGML_UNARY_OP_ELU:
  15811. case GGML_UNARY_OP_RELU:
  15812. case GGML_UNARY_OP_SIGMOID:
  15813. case GGML_UNARY_OP_HARDSWISH: // to opt for multiple threads
  15814. case GGML_UNARY_OP_HARDSIGMOID: // to opt for multiple threads
  15815. {
  15816. n_tasks = 1;
  15817. } break;
  15818. case GGML_UNARY_OP_GELU:
  15819. case GGML_UNARY_OP_GELU_QUICK:
  15820. case GGML_UNARY_OP_SILU:
  15821. {
  15822. n_tasks = n_threads;
  15823. } break;
  15824. default:
  15825. GGML_ASSERT(false);
  15826. }
  15827. break;
  15828. case GGML_OP_SILU_BACK:
  15829. case GGML_OP_MUL:
  15830. case GGML_OP_DIV:
  15831. case GGML_OP_NORM:
  15832. case GGML_OP_RMS_NORM:
  15833. case GGML_OP_RMS_NORM_BACK:
  15834. case GGML_OP_GROUP_NORM:
  15835. case GGML_OP_CONCAT:
  15836. {
  15837. n_tasks = n_threads;
  15838. } break;
  15839. case GGML_OP_MUL_MAT:
  15840. {
  15841. n_tasks = n_threads;
  15842. // TODO: use different scheduling for different matrix sizes
  15843. //const int nr0 = ggml_nrows(node->src[0]);
  15844. //const int nr1 = ggml_nrows(node->src[1]);
  15845. //n_tasks = MIN(n_threads, MAX(1, nr0/128));
  15846. //printf("nr0 = %8d, nr1 = %8d, nr0*nr1 = %8d, n_tasks%d\n", nr0, nr1, nr0*nr1, n_tasks);
  15847. } break;
  15848. case GGML_OP_MUL_MAT_ID:
  15849. {
  15850. n_tasks = n_threads;
  15851. } break;
  15852. case GGML_OP_OUT_PROD:
  15853. {
  15854. n_tasks = n_threads;
  15855. } break;
  15856. case GGML_OP_GET_ROWS:
  15857. {
  15858. // FIXME: the cost of launching additional threads decreases performance with GPU offloading
  15859. //n_tasks = MIN(n_threads, ggml_nelements(node->src[1]));
  15860. n_tasks = MIN(n_cur_threads, ggml_nelements(node->src[1]));
  15861. } break;
  15862. case GGML_OP_SCALE:
  15863. case GGML_OP_SET:
  15864. case GGML_OP_CONT:
  15865. case GGML_OP_RESHAPE:
  15866. case GGML_OP_VIEW:
  15867. case GGML_OP_PERMUTE:
  15868. case GGML_OP_TRANSPOSE:
  15869. case GGML_OP_GET_ROWS_BACK:
  15870. case GGML_OP_DIAG:
  15871. {
  15872. n_tasks = 1;
  15873. } break;
  15874. case GGML_OP_DIAG_MASK_ZERO:
  15875. case GGML_OP_DIAG_MASK_INF:
  15876. case GGML_OP_SOFT_MAX_BACK:
  15877. case GGML_OP_ROPE:
  15878. case GGML_OP_ROPE_BACK:
  15879. case GGML_OP_ADD_REL_POS:
  15880. {
  15881. n_tasks = n_threads;
  15882. } break;
  15883. case GGML_OP_CLAMP:
  15884. {
  15885. n_tasks = 1; //TODO
  15886. } break;
  15887. case GGML_OP_SOFT_MAX:
  15888. {
  15889. n_tasks = MIN(n_threads, ggml_nrows(node->src[0]));
  15890. } break;
  15891. case GGML_OP_CONV_TRANSPOSE_1D:
  15892. {
  15893. n_tasks = n_threads;
  15894. } break;
  15895. case GGML_OP_IM2COL:
  15896. {
  15897. n_tasks = n_threads;
  15898. } break;
  15899. case GGML_OP_CONV_TRANSPOSE_2D:
  15900. {
  15901. n_tasks = n_threads;
  15902. } break;
  15903. case GGML_OP_POOL_1D:
  15904. case GGML_OP_POOL_2D:
  15905. {
  15906. n_tasks = 1;
  15907. } break;
  15908. case GGML_OP_UPSCALE:
  15909. {
  15910. n_tasks = n_threads;
  15911. } break;
  15912. case GGML_OP_PAD:
  15913. {
  15914. n_tasks = n_threads;
  15915. } break;
  15916. case GGML_OP_ARANGE:
  15917. {
  15918. n_tasks = n_threads;
  15919. } break;
  15920. case GGML_OP_TIMESTEP_EMBEDDING:
  15921. {
  15922. n_tasks = n_threads;
  15923. } break;
  15924. case GGML_OP_ARGSORT:
  15925. {
  15926. n_tasks = n_threads;
  15927. } break;
  15928. case GGML_OP_FLASH_ATTN:
  15929. case GGML_OP_FLASH_ATTN_EXT:
  15930. {
  15931. n_tasks = n_threads;
  15932. } break;
  15933. case GGML_OP_FLASH_FF:
  15934. {
  15935. n_tasks = n_threads;
  15936. } break;
  15937. case GGML_OP_FLASH_ATTN_BACK:
  15938. {
  15939. n_tasks = n_threads;
  15940. } break;
  15941. case GGML_OP_SSM_CONV:
  15942. case GGML_OP_SSM_SCAN:
  15943. {
  15944. n_tasks = n_threads;
  15945. } break;
  15946. case GGML_OP_WIN_PART:
  15947. case GGML_OP_WIN_UNPART:
  15948. case GGML_OP_GET_REL_POS:
  15949. case GGML_OP_MAP_UNARY:
  15950. case GGML_OP_MAP_BINARY:
  15951. case GGML_OP_MAP_CUSTOM1_F32:
  15952. case GGML_OP_MAP_CUSTOM2_F32:
  15953. case GGML_OP_MAP_CUSTOM3_F32:
  15954. {
  15955. n_tasks = 1;
  15956. } break;
  15957. case GGML_OP_MAP_CUSTOM1:
  15958. {
  15959. struct ggml_map_custom1_op_params p;
  15960. memcpy(&p, node->op_params, sizeof(p));
  15961. if (p.n_tasks == GGML_N_TASKS_MAX) {
  15962. n_tasks = n_threads;
  15963. } else {
  15964. n_tasks = MIN(p.n_tasks, n_threads);
  15965. }
  15966. } break;
  15967. case GGML_OP_MAP_CUSTOM2:
  15968. {
  15969. struct ggml_map_custom2_op_params p;
  15970. memcpy(&p, node->op_params, sizeof(p));
  15971. if (p.n_tasks == GGML_N_TASKS_MAX) {
  15972. n_tasks = n_threads;
  15973. } else {
  15974. n_tasks = MIN(p.n_tasks, n_threads);
  15975. }
  15976. } break;
  15977. case GGML_OP_MAP_CUSTOM3:
  15978. {
  15979. struct ggml_map_custom3_op_params p;
  15980. memcpy(&p, node->op_params, sizeof(p));
  15981. if (p.n_tasks == GGML_N_TASKS_MAX) {
  15982. n_tasks = n_threads;
  15983. } else {
  15984. n_tasks = MIN(p.n_tasks, n_threads);
  15985. }
  15986. } break;
  15987. case GGML_OP_CROSS_ENTROPY_LOSS:
  15988. {
  15989. n_tasks = n_threads;
  15990. } break;
  15991. case GGML_OP_CROSS_ENTROPY_LOSS_BACK:
  15992. {
  15993. n_tasks = n_threads;
  15994. } break;
  15995. case GGML_OP_NONE:
  15996. {
  15997. n_tasks = 1;
  15998. } break;
  15999. case GGML_OP_COUNT:
  16000. {
  16001. GGML_ASSERT(false);
  16002. } break;
  16003. default:
  16004. {
  16005. fprintf(stderr, "%s: op not implemented: ", __func__);
  16006. if (node->op < GGML_OP_COUNT) {
  16007. fprintf(stderr, "%s\n", ggml_op_name(node->op));
  16008. } else {
  16009. fprintf(stderr, "%d\n", node->op);
  16010. }
  16011. GGML_ASSERT(false);
  16012. } break;
  16013. }
  16014. assert(n_tasks > 0);
  16015. return n_tasks;
  16016. }
  16017. static void ggml_graph_compute_thread_sync_node(int * node_n, struct ggml_compute_state * state, const bool do_yield) {
  16018. // wait for other threads to finish
  16019. const int last_node_n = * node_n;
  16020. while (true) {
  16021. if (do_yield) {
  16022. sched_yield();
  16023. }
  16024. * node_n = atomic_load(&state->shared->node_n);
  16025. if (* node_n != last_node_n) break;
  16026. }
  16027. }
  16028. static void ggml_graph_compute_thread_sync_task(int * task_phase, struct ggml_compute_state * state, const bool do_yield) {
  16029. // wait for other threads to finish
  16030. const int last_task_phase = * task_phase;
  16031. while (true) {
  16032. if (do_yield) {
  16033. sched_yield();
  16034. }
  16035. * task_phase = atomic_load(&state->shared->node_task);
  16036. if (* task_phase != last_task_phase) break;
  16037. }
  16038. }
  16039. static thread_ret_t ggml_graph_compute_thread(void * data) {
  16040. struct ggml_compute_state * state = (struct ggml_compute_state *) data;
  16041. const struct ggml_cgraph * cgraph = state->shared->cgraph;
  16042. const struct ggml_cplan * cplan = state->shared->cplan;
  16043. const int n_threads = state->shared->n_threads;
  16044. set_numa_thread_affinity(state->ith);
  16045. int node_n = -1;
  16046. int task_phase = GGML_TASK_TYPE_FINALIZE;
  16047. while (true) {
  16048. if (cplan->abort_callback && cplan->abort_callback(cplan->abort_callback_data)) {
  16049. state->shared->node_n += 1;
  16050. state->ec = GGML_STATUS_ABORTED;
  16051. return 0;
  16052. }
  16053. if (atomic_fetch_sub(&state->shared->n_active, 1) == 1) {
  16054. // all other threads are finished and spinning
  16055. // do finalize and init here so we don't have synchronize again
  16056. struct ggml_compute_params params = {
  16057. /*.type =*/ GGML_TASK_TYPE_FINALIZE,
  16058. /*.ith =*/ 0,
  16059. /*.nth =*/ 0,
  16060. /*.wsize =*/ cplan->work_size,
  16061. /*.wdata =*/ cplan->work_data,
  16062. };
  16063. if (node_n != -1) {
  16064. /* FINALIZE */
  16065. struct ggml_tensor * node = cgraph->nodes[node_n];
  16066. if (GGML_OP_HAS_FINALIZE[node->op]) {
  16067. params.nth = ggml_get_n_tasks(node, n_threads, state->shared->n_threads);
  16068. ggml_compute_forward(&params, node);
  16069. }
  16070. ggml_graph_compute_perf_stats_node(node, state->shared);
  16071. }
  16072. // distribute new work or execute it direct if 1T
  16073. while (++node_n < cgraph->n_nodes) {
  16074. GGML_PRINT_DEBUG_5("%s: %d/%d\n", __func__, node_n, cgraph->n_nodes);
  16075. struct ggml_tensor * node = cgraph->nodes[node_n];
  16076. const int n_tasks = ggml_get_n_tasks(node, n_threads, state->shared->n_threads);
  16077. state->shared->perf_node_start_cycles = ggml_perf_cycles();
  16078. state->shared->perf_node_start_time_us = ggml_perf_time_us();
  16079. params.nth = n_tasks;
  16080. if (n_tasks == 1) {
  16081. /* INIT */
  16082. if (GGML_OP_HAS_INIT[node->op]) {
  16083. params.type = GGML_TASK_TYPE_INIT;
  16084. ggml_compute_forward(&params, node);
  16085. }
  16086. // TODO: maybe push node_n to the atomic but if other threads see n_tasks is 1,
  16087. // they do something more efficient than spinning (?)
  16088. params.type = GGML_TASK_TYPE_COMPUTE;
  16089. ggml_compute_forward(&params, node);
  16090. if (GGML_OP_HAS_FINALIZE[node->op]) {
  16091. params.type = GGML_TASK_TYPE_FINALIZE;
  16092. ggml_compute_forward(&params, node);
  16093. }
  16094. ggml_graph_compute_perf_stats_node(node, state->shared);
  16095. } else {
  16096. break;
  16097. }
  16098. if (cplan->abort_callback && cplan->abort_callback(cplan->abort_callback_data)) {
  16099. break;
  16100. }
  16101. }
  16102. task_phase = GGML_TASK_TYPE_INIT;
  16103. atomic_store(&state->shared->n_active, n_threads);
  16104. atomic_store(&state->shared->node_n, node_n);
  16105. atomic_store(&state->shared->node_task, task_phase);
  16106. } else {
  16107. ggml_graph_compute_thread_sync_node(&node_n, state, false);
  16108. ggml_graph_compute_thread_sync_task(&task_phase, state, false);
  16109. }
  16110. // check if we should stop
  16111. if (node_n >= cgraph->n_nodes) break;
  16112. /* INIT & COMPUTE */
  16113. struct ggml_tensor * node = cgraph->nodes[node_n];
  16114. const int n_tasks = ggml_get_n_tasks(node, n_threads, state->shared->n_threads);
  16115. struct ggml_compute_params params = {
  16116. /*.type =*/ GGML_TASK_TYPE_INIT,
  16117. /*.ith =*/ state->ith,
  16118. /*.nth =*/ n_tasks,
  16119. /*.wsize =*/ cplan->work_size,
  16120. /*.wdata =*/ cplan->work_data,
  16121. };
  16122. if (state->ith < n_tasks) {
  16123. if (GGML_OP_HAS_INIT[node->op]) {
  16124. ggml_compute_forward(&params, node);
  16125. }
  16126. }
  16127. if (atomic_fetch_sub(&state->shared->n_active, 1) == 1) {
  16128. task_phase = GGML_TASK_TYPE_COMPUTE;
  16129. atomic_store(&state->shared->n_active, n_threads);
  16130. atomic_store(&state->shared->node_task, task_phase);
  16131. }
  16132. else {
  16133. // TODO: this sched_yield can have significant impact on the performance - either positive or negative
  16134. // depending on the workload and the operating system.
  16135. // since it is not clear what is the best approach, it should potentially become user-configurable
  16136. // ref: https://github.com/ggerganov/ggml/issues/291
  16137. // UPD: adding the do_yield flag seems to resolve the issue universally
  16138. const bool do_yield = node_n < 0 || cgraph->nodes[node_n]->op == GGML_OP_MUL_MAT;
  16139. ggml_graph_compute_thread_sync_task(&task_phase, state, do_yield);
  16140. }
  16141. if (state->ith < n_tasks) {
  16142. params.type = GGML_TASK_TYPE_COMPUTE;
  16143. ggml_compute_forward(&params, node);
  16144. }
  16145. if (atomic_fetch_sub(&state->shared->n_active, 1) == 1) {
  16146. task_phase = GGML_TASK_TYPE_FINALIZE;
  16147. atomic_store(&state->shared->n_active, n_threads);
  16148. atomic_store(&state->shared->node_task, task_phase);
  16149. }
  16150. else {
  16151. ggml_graph_compute_thread_sync_task(&task_phase, state, false);
  16152. }
  16153. }
  16154. return 0;
  16155. }
  16156. struct ggml_cplan ggml_graph_plan(const struct ggml_cgraph * cgraph, int n_threads) {
  16157. if (n_threads <= 0) {
  16158. n_threads = GGML_DEFAULT_N_THREADS;
  16159. }
  16160. size_t work_size = 0;
  16161. struct ggml_cplan cplan;
  16162. memset(&cplan, 0, sizeof(struct ggml_cplan));
  16163. int max_tasks = 1;
  16164. // thread scheduling for the different operations + work buffer size estimation
  16165. for (int i = 0; i < cgraph->n_nodes; i++) {
  16166. struct ggml_tensor * node = cgraph->nodes[i];
  16167. const int n_tasks = ggml_get_n_tasks(node, n_threads, 1);
  16168. max_tasks = MAX(max_tasks, n_tasks);
  16169. size_t cur = 0;
  16170. switch (node->op) {
  16171. case GGML_OP_CPY:
  16172. case GGML_OP_DUP:
  16173. {
  16174. if (ggml_is_quantized(node->type) ||
  16175. // F16 -> BF16 and BF16 -> F16 copies go through intermediate F32
  16176. (node->src[0]->type == GGML_TYPE_F16 && node->src[1] && node->src[1]->type == GGML_TYPE_BF16) ||
  16177. (node->src[0]->type == GGML_TYPE_BF16 && node->src[1] && node->src[1]->type == GGML_TYPE_F16)) {
  16178. cur = ggml_type_size(GGML_TYPE_F32) * node->ne[0] * n_tasks;
  16179. }
  16180. } break;
  16181. case GGML_OP_ADD:
  16182. case GGML_OP_ADD1:
  16183. {
  16184. if (ggml_is_quantized(node->src[0]->type)) {
  16185. cur = ggml_type_size(GGML_TYPE_F32) * node->src[0]->ne[0] * n_tasks;
  16186. }
  16187. } break;
  16188. case GGML_OP_ACC:
  16189. {
  16190. if (ggml_is_quantized(node->src[0]->type)) {
  16191. cur = ggml_type_size(GGML_TYPE_F32) * node->src[1]->ne[0] * n_tasks;
  16192. }
  16193. } break;
  16194. case GGML_OP_MUL_MAT:
  16195. {
  16196. const enum ggml_type vec_dot_type = type_traits[node->src[0]->type].vec_dot_type;
  16197. #if defined(GGML_USE_CLBLAST)
  16198. if (ggml_cl_can_mul_mat(node->src[0], node->src[1], node)) {
  16199. cur = ggml_cl_mul_mat_get_wsize(node->src[0], node->src[1], node);
  16200. } else
  16201. #endif
  16202. #if defined(GGML_USE_ACCELERATE) || defined(GGML_USE_OPENBLAS)
  16203. if (ggml_compute_forward_mul_mat_use_blas(node)) {
  16204. if (node->src[0]->type != GGML_TYPE_F32) {
  16205. // here we need memory for fully dequantized matrix from src0
  16206. // take into account that src0 can be broadcasted into src1[2,3]
  16207. cur = ggml_type_size(GGML_TYPE_F32)
  16208. * node->src[0]->ne[0]*node->src[0]->ne[1]
  16209. * node->src[1]->ne[2]*node->src[1]->ne[3];
  16210. }
  16211. } else
  16212. #endif
  16213. if (node->src[1]->type != vec_dot_type) {
  16214. cur = ggml_row_size(vec_dot_type, ggml_nelements(node->src[1]));
  16215. }
  16216. } break;
  16217. case GGML_OP_MUL_MAT_ID:
  16218. {
  16219. cur = 0;
  16220. const struct ggml_tensor * src0 = node->src[0];
  16221. const struct ggml_tensor * src1 = node->src[1];
  16222. const enum ggml_type vec_dot_type = type_traits[src0->type].vec_dot_type;
  16223. if (src1->type != vec_dot_type) {
  16224. cur += ggml_row_size(vec_dot_type, ggml_nelements(src1));
  16225. }
  16226. const int n_as = src0->ne[2];
  16227. cur += GGML_PAD(cur, sizeof(int64_t)); // align
  16228. cur += n_as * sizeof(int64_t); // matrix_row_counts
  16229. cur += n_as * src1->ne[2] * sizeof(int64_t); // matrix_rows
  16230. } break;
  16231. case GGML_OP_OUT_PROD:
  16232. {
  16233. if (ggml_is_quantized(node->src[0]->type)) {
  16234. cur = ggml_type_size(GGML_TYPE_F32) * node->src[0]->ne[0] * n_tasks;
  16235. }
  16236. } break;
  16237. case GGML_OP_SOFT_MAX:
  16238. case GGML_OP_ROPE:
  16239. {
  16240. cur = ggml_type_size(GGML_TYPE_F32) * node->ne[0] * n_tasks;
  16241. } break;
  16242. case GGML_OP_CONV_TRANSPOSE_1D:
  16243. {
  16244. GGML_ASSERT(node->src[0]->ne[3] == 1);
  16245. GGML_ASSERT(node->src[1]->ne[2] == 1);
  16246. GGML_ASSERT(node->src[1]->ne[3] == 1);
  16247. const int64_t ne00 = node->src[0]->ne[0]; // K
  16248. const int64_t ne01 = node->src[0]->ne[1]; // Cout
  16249. const int64_t ne02 = node->src[0]->ne[2]; // Cin
  16250. const int64_t ne10 = node->src[1]->ne[0]; // L
  16251. const int64_t ne11 = node->src[1]->ne[1]; // Cin
  16252. if ((node->src[0]->type == GGML_TYPE_F16 ||
  16253. node->src[0]->type == GGML_TYPE_BF16) &&
  16254. node->src[1]->type == GGML_TYPE_F32) {
  16255. cur += sizeof(ggml_fp16_t)*ne00*ne01*ne02;
  16256. cur += sizeof(ggml_fp16_t)*ne10*ne11;
  16257. } else if (node->src[0]->type == GGML_TYPE_F32 &&
  16258. node->src[1]->type == GGML_TYPE_F32) {
  16259. cur += sizeof(float)*ne00*ne01*ne02;
  16260. cur += sizeof(float)*ne10*ne11;
  16261. } else {
  16262. GGML_ASSERT(false);
  16263. }
  16264. } break;
  16265. case GGML_OP_CONV_TRANSPOSE_2D:
  16266. {
  16267. const int64_t ne00 = node->src[0]->ne[0]; // W
  16268. const int64_t ne01 = node->src[0]->ne[1]; // H
  16269. const int64_t ne02 = node->src[0]->ne[2]; // Channels Out
  16270. const int64_t ne03 = node->src[0]->ne[3]; // Channels In
  16271. const int64_t ne10 = node->src[1]->ne[0]; // W
  16272. const int64_t ne11 = node->src[1]->ne[1]; // H
  16273. const int64_t ne12 = node->src[1]->ne[2]; // Channels In
  16274. cur += sizeof(ggml_fp16_t)*ne00*ne01*ne02*ne03;
  16275. cur += sizeof(ggml_fp16_t)*ne10*ne11*ne12;
  16276. } break;
  16277. case GGML_OP_FLASH_ATTN:
  16278. {
  16279. const int64_t ne11 = ggml_up(node->src[1]->ne[1], GGML_SOFT_MAX_UNROLL);
  16280. if (node->src[1]->type == GGML_TYPE_F32) {
  16281. cur = sizeof(float)*ne11*n_tasks; // TODO: this can become (n_tasks-1)
  16282. cur += sizeof(float)*ne11*n_tasks; // this is overestimated by x2
  16283. } else if (node->src[1]->type == GGML_TYPE_F16) {
  16284. cur = sizeof(float)*ne11*n_tasks; // TODO: this can become (n_tasks-1)
  16285. cur += sizeof(float)*ne11*n_tasks; // this is overestimated by x2
  16286. } else if (node->src[1]->type == GGML_TYPE_BF16) {
  16287. cur = sizeof(float)*ne11*n_tasks; // TODO: this can become (n_tasks-1)
  16288. cur += sizeof(float)*ne11*n_tasks; // this is overestimated by x2
  16289. }
  16290. } break;
  16291. case GGML_OP_FLASH_ATTN_EXT:
  16292. {
  16293. const int64_t ne00 = node->src[0]->ne[0]; // D
  16294. cur = 2*sizeof(float)*ne00*n_tasks; // 2x head size
  16295. } break;
  16296. case GGML_OP_FLASH_FF:
  16297. {
  16298. if (node->src[1]->type == GGML_TYPE_F32) {
  16299. cur = sizeof(float)*node->src[1]->ne[1]*n_tasks; // TODO: this can become (n_tasks-1)
  16300. cur += sizeof(float)*node->src[1]->ne[1]*n_tasks; // this is overestimated by x2
  16301. } else if (node->src[1]->type == GGML_TYPE_F16) {
  16302. cur = sizeof(float)*node->src[1]->ne[1]*n_tasks; // TODO: this can become (n_tasks-1)
  16303. cur += sizeof(float)*node->src[1]->ne[1]*n_tasks; // this is overestimated by x2
  16304. } else if (node->src[1]->type == GGML_TYPE_BF16) {
  16305. cur = sizeof(float)*node->src[1]->ne[1]*n_tasks; // TODO: this can become (n_tasks-1)
  16306. cur += sizeof(float)*node->src[1]->ne[1]*n_tasks; // this is overestimated by x2
  16307. }
  16308. } break;
  16309. case GGML_OP_FLASH_ATTN_BACK:
  16310. {
  16311. const int64_t D = node->src[0]->ne[0];
  16312. const int64_t ne11 = ggml_up(node->src[1]->ne[1], GGML_SOFT_MAX_UNROLL);
  16313. const int64_t mxDn = MAX(D, ne11) * 2; // *2 because of S and SM in ggml_compute_forward_flash_attn_back
  16314. if (node->src[1]->type == GGML_TYPE_F32) {
  16315. cur = sizeof(float)*mxDn*n_tasks; // TODO: this can become (n_tasks-1)
  16316. cur += sizeof(float)*mxDn*n_tasks; // this is overestimated by x2
  16317. } else if (node->src[1]->type == GGML_TYPE_F16) {
  16318. cur = sizeof(float)*mxDn*n_tasks; // TODO: this can become (n_tasks-1)
  16319. cur += sizeof(float)*mxDn*n_tasks; // this is overestimated by x2
  16320. } else if (node->src[1]->type == GGML_TYPE_BF16) {
  16321. cur = sizeof(float)*mxDn*n_tasks; // TODO: this can become (n_tasks-1)
  16322. cur += sizeof(float)*mxDn*n_tasks; // this is overestimated by x2
  16323. }
  16324. } break;
  16325. case GGML_OP_CROSS_ENTROPY_LOSS:
  16326. {
  16327. cur = ggml_type_size(node->type)*(n_tasks + node->src[0]->ne[0]*n_tasks);
  16328. } break;
  16329. case GGML_OP_COUNT:
  16330. {
  16331. GGML_ASSERT(false);
  16332. } break;
  16333. default:
  16334. break;
  16335. }
  16336. work_size = MAX(work_size, cur);
  16337. }
  16338. if (work_size > 0) {
  16339. work_size += CACHE_LINE_SIZE*(n_threads - 1);
  16340. }
  16341. cplan.n_threads = MIN(max_tasks, n_threads);
  16342. cplan.work_size = work_size;
  16343. cplan.work_data = NULL;
  16344. return cplan;
  16345. }
  16346. enum ggml_status ggml_graph_compute(struct ggml_cgraph * cgraph, struct ggml_cplan * cplan) {
  16347. {
  16348. GGML_ASSERT(cplan);
  16349. GGML_ASSERT(cplan->n_threads > 0);
  16350. if (cplan->work_size > 0) {
  16351. GGML_ASSERT(cplan->work_data);
  16352. }
  16353. }
  16354. const int n_threads = cplan->n_threads;
  16355. struct ggml_compute_state_shared state_shared = {
  16356. /*.cgraph =*/ cgraph,
  16357. /*.cgraph_plan =*/ cplan,
  16358. /*.perf_node_start_cycles =*/ 0,
  16359. /*.perf_node_start_time_us =*/ 0,
  16360. /*.n_threads =*/ n_threads,
  16361. /*.n_active =*/ n_threads,
  16362. /*.node_n =*/ -1,
  16363. /*.node_task =*/ GGML_TASK_TYPE_FINALIZE,
  16364. /*.abort_callback =*/ NULL,
  16365. /*.abort_callback_data =*/ NULL,
  16366. };
  16367. struct ggml_compute_state * workers = alloca(sizeof(struct ggml_compute_state)*n_threads);
  16368. // create thread pool
  16369. if (n_threads > 1) {
  16370. for (int j = 1; j < n_threads; ++j) {
  16371. workers[j] = (struct ggml_compute_state) {
  16372. .thrd = 0,
  16373. .ith = j,
  16374. .shared = &state_shared,
  16375. .ec = GGML_STATUS_SUCCESS,
  16376. };
  16377. const int rc = ggml_thread_create(&workers[j].thrd, NULL, ggml_graph_compute_thread, &workers[j]);
  16378. GGML_ASSERT(rc == 0);
  16379. UNUSED(rc);
  16380. }
  16381. }
  16382. workers[0].ith = 0;
  16383. workers[0].shared = &state_shared;
  16384. workers[0].ec = GGML_STATUS_SUCCESS;
  16385. const int64_t perf_start_cycles = ggml_perf_cycles();
  16386. const int64_t perf_start_time_us = ggml_perf_time_us();
  16387. // this is a work thread too
  16388. ggml_graph_compute_thread(&workers[0]);
  16389. enum ggml_status compute_status = workers[0].ec;
  16390. // don't leave affinity set on the main thread
  16391. clear_numa_thread_affinity();
  16392. // join or kill thread pool
  16393. if (n_threads > 1) {
  16394. for (int j = 1; j < n_threads; j++) {
  16395. const int rc = ggml_thread_join(workers[j].thrd, NULL);
  16396. GGML_ASSERT(rc == 0);
  16397. if (workers[j].ec != GGML_STATUS_SUCCESS)
  16398. compute_status = workers[j].ec;
  16399. }
  16400. }
  16401. // performance stats (graph)
  16402. {
  16403. int64_t perf_cycles_cur = ggml_perf_cycles() - perf_start_cycles;
  16404. int64_t perf_time_us_cur = ggml_perf_time_us() - perf_start_time_us;
  16405. cgraph->perf_runs++;
  16406. cgraph->perf_cycles += perf_cycles_cur;
  16407. cgraph->perf_time_us += perf_time_us_cur;
  16408. GGML_PRINT_DEBUG("%s: perf (%d) - cpu = %.3f / %.3f ms, wall = %.3f / %.3f ms\n",
  16409. __func__, cgraph->perf_runs,
  16410. (double) perf_cycles_cur / (double) ggml_cycles_per_ms(),
  16411. (double) cgraph->perf_cycles / (double) ggml_cycles_per_ms() / (double) cgraph->perf_runs,
  16412. (double) perf_time_us_cur / 1000.0,
  16413. (double) cgraph->perf_time_us / 1000.0 / cgraph->perf_runs);
  16414. }
  16415. return compute_status;
  16416. }
  16417. enum ggml_status ggml_graph_compute_with_ctx(struct ggml_context * ctx, struct ggml_cgraph * cgraph, int n_threads) {
  16418. struct ggml_cplan cplan = ggml_graph_plan(cgraph, n_threads);
  16419. struct ggml_object * obj = ggml_new_object(ctx, GGML_OBJECT_TYPE_WORK_BUFFER, cplan.work_size);
  16420. cplan.work_data = (uint8_t *)ctx->mem_buffer + obj->offs;
  16421. return ggml_graph_compute(cgraph, &cplan);
  16422. }
  16423. struct ggml_tensor * ggml_graph_get_tensor(struct ggml_cgraph * cgraph, const char * name) {
  16424. for (int i = 0; i < cgraph->n_leafs; i++) {
  16425. struct ggml_tensor * leaf = cgraph->leafs[i];
  16426. if (strcmp(leaf->name, name) == 0) {
  16427. return leaf;
  16428. }
  16429. }
  16430. for (int i = 0; i < cgraph->n_nodes; i++) {
  16431. struct ggml_tensor * node = cgraph->nodes[i];
  16432. if (strcmp(node->name, name) == 0) {
  16433. return node;
  16434. }
  16435. }
  16436. return NULL;
  16437. }
  16438. static void ggml_graph_export_leaf(const struct ggml_tensor * tensor, FILE * fout) {
  16439. const int64_t * ne = tensor->ne;
  16440. const size_t * nb = tensor->nb;
  16441. fprintf(fout, "%-6s %-12s %8d %" PRId64 " %" PRId64 " %" PRId64 " %" PRId64 " %16zu %16zu %16zu %16zu %16p %32s\n",
  16442. ggml_type_name(tensor->type),
  16443. ggml_op_name (tensor->op),
  16444. ggml_n_dims(tensor),
  16445. ne[0], ne[1], ne[2], ne[3],
  16446. nb[0], nb[1], nb[2], nb[3],
  16447. tensor->data,
  16448. tensor->name);
  16449. }
  16450. static void ggml_graph_export_node(const struct ggml_tensor * tensor, const char * arg, FILE * fout) {
  16451. const int64_t * ne = tensor->ne;
  16452. const size_t * nb = tensor->nb;
  16453. fprintf(fout, "%-6s %-6s %-12s %8d %" PRId64 " %" PRId64 " %" PRId64 " %" PRId64 " %16zu %16zu %16zu %16zu %16p %32s\n",
  16454. arg,
  16455. ggml_type_name(tensor->type),
  16456. ggml_op_name (tensor->op),
  16457. ggml_n_dims(tensor),
  16458. ne[0], ne[1], ne[2], ne[3],
  16459. nb[0], nb[1], nb[2], nb[3],
  16460. tensor->data,
  16461. tensor->name);
  16462. }
  16463. void ggml_graph_export(const struct ggml_cgraph * cgraph, const char * fname) {
  16464. uint64_t size_eval = 0;
  16465. // compute size of intermediate results
  16466. // TODO: does not take into account scratch buffers !!!!
  16467. for (int i = 0; i < cgraph->n_nodes; ++i) {
  16468. size_eval += ggml_nbytes_pad(cgraph->nodes[i]);
  16469. }
  16470. // print
  16471. {
  16472. FILE * fout = stdout;
  16473. fprintf(fout, "\n");
  16474. fprintf(fout, "%-16s %8x\n", "magic", GGML_FILE_MAGIC);
  16475. fprintf(fout, "%-16s %8d\n", "version", GGML_FILE_VERSION);
  16476. fprintf(fout, "%-16s %8d\n", "leafs", cgraph->n_leafs);
  16477. fprintf(fout, "%-16s %8d\n", "nodes", cgraph->n_nodes);
  16478. fprintf(fout, "%-16s %" PRIu64 "\n", "eval", size_eval);
  16479. // header
  16480. fprintf(fout, "\n");
  16481. fprintf(fout, "%-6s %-12s %8s %8s %8s %8s %8s %16s %16s %16s %16s %16s %16s\n",
  16482. "TYPE", "OP", "NDIMS", "NE0", "NE1", "NE2", "NE3", "NB0", "NB1", "NB2", "NB3", "DATA", "NAME");
  16483. for (int i = 0; i < cgraph->n_leafs; ++i) {
  16484. ggml_graph_export_leaf(cgraph->leafs[i], fout);
  16485. GGML_ASSERT(cgraph->leafs[i]->op == GGML_OP_NONE);
  16486. GGML_ASSERT(cgraph->leafs[i]->src[0] == NULL);
  16487. GGML_ASSERT(cgraph->leafs[i]->src[1] == NULL);
  16488. }
  16489. // header
  16490. fprintf(fout, "\n");
  16491. fprintf(fout, "%-6s %-6s %-12s %8s %8s %8s %8s %8s %16s %16s %16s %16s %8s %16s %16s\n",
  16492. "ARG", "TYPE", "OP", "NDIMS", "NE0", "NE1", "NE2", "NE3", "NB0", "NB1", "NB2", "NB3", "NTASKS", "DATA", "NAME");
  16493. for (int i = 0; i < cgraph->n_nodes; ++i) {
  16494. ggml_graph_export_node(cgraph->nodes[i], "DST", fout);
  16495. for (int j = 0; j < GGML_MAX_SRC; ++j) {
  16496. if (cgraph->nodes[i]->src[j]) {
  16497. ggml_graph_export_node(cgraph->nodes[i]->src[j], "SRC", fout);
  16498. }
  16499. }
  16500. fprintf(fout, "\n");
  16501. }
  16502. fprintf(fout, "\n");
  16503. }
  16504. // write binary data
  16505. {
  16506. FILE * fout = ggml_fopen(fname, "wb");
  16507. if (!fout) {
  16508. fprintf(stderr, "%s: failed to open %s\n", __func__, fname);
  16509. return;
  16510. }
  16511. // header
  16512. {
  16513. const uint32_t magic = GGML_FILE_MAGIC;
  16514. const uint32_t version = GGML_FILE_VERSION;
  16515. const uint32_t n_leafs = cgraph->n_leafs;
  16516. const uint32_t n_nodes = cgraph->n_nodes;
  16517. fwrite(&magic, sizeof(uint32_t), 1, fout);
  16518. fwrite(&version, sizeof(uint32_t), 1, fout);
  16519. fwrite(&n_leafs, sizeof(uint32_t), 1, fout);
  16520. fwrite(&n_nodes, sizeof(uint32_t), 1, fout);
  16521. fwrite(&size_eval, sizeof(uint64_t), 1, fout);
  16522. }
  16523. // leafs
  16524. {
  16525. for (int i = 0; i < cgraph->n_leafs; ++i) {
  16526. const struct ggml_tensor * tensor = cgraph->leafs[i];
  16527. const uint32_t type = tensor->type;
  16528. const uint32_t op = tensor->op;
  16529. fwrite(&type, sizeof(uint32_t), 1, fout);
  16530. fwrite(&op, sizeof(uint32_t), 1, fout);
  16531. for (int j = 0; j < GGML_MAX_DIMS; ++j) {
  16532. const uint64_t ne = tensor->ne[j];
  16533. const uint64_t nb = tensor->nb[j];
  16534. fwrite(&ne, sizeof(uint64_t), 1, fout);
  16535. fwrite(&nb, sizeof(uint64_t), 1, fout);
  16536. }
  16537. fwrite(tensor->name, sizeof(char), GGML_MAX_NAME, fout);
  16538. fwrite(tensor->op_params, sizeof(char), GGML_MAX_OP_PARAMS, fout);
  16539. // dump the data
  16540. // TODO: pad this to 32 byte boundary
  16541. {
  16542. const size_t size = ggml_nbytes(tensor);
  16543. fwrite(tensor->data, sizeof(char), size, fout);
  16544. }
  16545. }
  16546. }
  16547. // nodes
  16548. {
  16549. for (int i = 0; i < cgraph->n_nodes; ++i) {
  16550. const struct ggml_tensor * tensor = cgraph->nodes[i];
  16551. const uint32_t type = tensor->type;
  16552. const uint32_t op = tensor->op;
  16553. fwrite(&type, sizeof(uint32_t), 1, fout);
  16554. fwrite(&op, sizeof(uint32_t), 1, fout);
  16555. for (int j = 0; j < GGML_MAX_DIMS; ++j) {
  16556. const uint64_t ne = tensor->ne[j];
  16557. const uint64_t nb = tensor->nb[j];
  16558. fwrite(&ne, sizeof(uint64_t), 1, fout);
  16559. fwrite(&nb, sizeof(uint64_t), 1, fout);
  16560. }
  16561. fwrite(tensor->name, sizeof(char), GGML_MAX_NAME, fout);
  16562. fwrite(tensor->op_params, sizeof(char), GGML_MAX_OP_PARAMS, fout);
  16563. // output the op arguments
  16564. {
  16565. struct ggml_tensor * args[GGML_MAX_SRC] = { NULL };
  16566. for (int j = 0; j < GGML_MAX_SRC; ++j) {
  16567. args[j] = tensor->src[j];
  16568. }
  16569. for (int j = 0; j < GGML_MAX_SRC; ++j) {
  16570. if (args[j]) {
  16571. int32_t idx = -1;
  16572. // check if leaf
  16573. {
  16574. for (int k = 0; k < cgraph->n_leafs; ++k) {
  16575. if (args[j] == cgraph->leafs[k]) {
  16576. idx = k;
  16577. break;
  16578. }
  16579. }
  16580. }
  16581. // check if node
  16582. if (idx == -1) {
  16583. for (int k = 0; k < cgraph->n_nodes; ++k) {
  16584. if (args[j] == cgraph->nodes[k]) {
  16585. idx = cgraph->n_leafs + k;
  16586. break;
  16587. }
  16588. }
  16589. }
  16590. if (idx == -1) {
  16591. fprintf(stderr, "%s: failed to find tensor, arg = %d, node = %d\n", __func__, j, i);
  16592. fclose(fout);
  16593. return;
  16594. }
  16595. fwrite(&idx, sizeof(int32_t), 1, fout);
  16596. } else {
  16597. const int32_t nul = -1;
  16598. fwrite(&nul, sizeof(int32_t), 1, fout);
  16599. }
  16600. }
  16601. }
  16602. }
  16603. }
  16604. fclose(fout);
  16605. }
  16606. }
  16607. struct ggml_cgraph * ggml_graph_import(const char * fname, struct ggml_context ** ctx_data, struct ggml_context ** ctx_eval) {
  16608. assert(*ctx_data == NULL);
  16609. assert(*ctx_eval == NULL);
  16610. struct ggml_cgraph * result = NULL;
  16611. struct ggml_tensor * data = NULL;
  16612. // read file into data
  16613. {
  16614. FILE * fin = ggml_fopen(fname, "rb");
  16615. if (!fin) {
  16616. fprintf(stderr, "%s: failed to open %s\n", __func__, fname);
  16617. return result;
  16618. }
  16619. size_t fsize = 0;
  16620. fseek(fin, 0, SEEK_END);
  16621. fsize = ftell(fin);
  16622. fseek(fin, 0, SEEK_SET);
  16623. // create the data context
  16624. {
  16625. const size_t overhead = 1*ggml_tensor_overhead();
  16626. struct ggml_init_params params = {
  16627. .mem_size = fsize + overhead,
  16628. .mem_buffer = NULL,
  16629. .no_alloc = false,
  16630. };
  16631. *ctx_data = ggml_init(params);
  16632. if (!*ctx_data) {
  16633. fprintf(stderr, "%s: failed to create ggml context\n", __func__);
  16634. fclose(fin);
  16635. return result;
  16636. }
  16637. }
  16638. data = ggml_new_tensor_1d(*ctx_data, GGML_TYPE_I8, fsize);
  16639. {
  16640. const size_t ret = fread(data->data, sizeof(char), fsize, fin);
  16641. if (ret != fsize) {
  16642. fprintf(stderr, "%s: failed to read %s\n", __func__, fname);
  16643. fclose(fin);
  16644. return result;
  16645. }
  16646. }
  16647. fclose(fin);
  16648. }
  16649. // populate result
  16650. {
  16651. char * ptr = (char *) data->data;
  16652. const uint32_t magic = *(const uint32_t *) ptr; ptr += sizeof(magic);
  16653. if (magic != GGML_FILE_MAGIC) {
  16654. fprintf(stderr, "%s: invalid magic number, got %08x\n", __func__, magic);
  16655. return result;
  16656. }
  16657. const uint32_t version = *(const uint32_t *) ptr; ptr += sizeof(version);
  16658. if (version != GGML_FILE_VERSION) {
  16659. fprintf(stderr, "%s: invalid version number\n", __func__);
  16660. return result;
  16661. }
  16662. const uint32_t n_leafs = *(const uint32_t *) ptr; ptr += sizeof(n_leafs);
  16663. const uint32_t n_nodes = *(const uint32_t *) ptr; ptr += sizeof(n_nodes);
  16664. const uint64_t size_eval = *(const uint64_t *) ptr; ptr += sizeof(size_eval);
  16665. const int graph_size = MAX(n_leafs, n_nodes);
  16666. // create the data context
  16667. {
  16668. const size_t overhead = (n_leafs + n_nodes)*ggml_tensor_overhead() + ggml_graph_overhead_custom(graph_size, false);
  16669. struct ggml_init_params params = {
  16670. .mem_size = size_eval + overhead,
  16671. .mem_buffer = NULL,
  16672. .no_alloc = true,
  16673. };
  16674. *ctx_eval = ggml_init(params);
  16675. if (!*ctx_eval) {
  16676. fprintf(stderr, "%s: failed to create ggml context\n", __func__);
  16677. return result;
  16678. }
  16679. }
  16680. result = ggml_new_graph_custom(*ctx_eval, graph_size, false);
  16681. result->n_leafs = n_leafs;
  16682. result->n_nodes = n_nodes;
  16683. // leafs
  16684. {
  16685. uint32_t type;
  16686. uint32_t op;
  16687. for (uint32_t i = 0; i < n_leafs; ++i) {
  16688. type = *(const uint32_t *) ptr; ptr += sizeof(type);
  16689. op = *(const uint32_t *) ptr; ptr += sizeof(op);
  16690. int64_t ne[GGML_MAX_DIMS];
  16691. size_t nb[GGML_MAX_DIMS];
  16692. for (int j = 0; j < GGML_MAX_DIMS; ++j) {
  16693. uint64_t ne_cur;
  16694. uint64_t nb_cur;
  16695. ne_cur = *(const uint64_t *) ptr; ptr += sizeof(ne_cur);
  16696. nb_cur = *(const uint64_t *) ptr; ptr += sizeof(nb_cur);
  16697. ne[j] = ne_cur;
  16698. nb[j] = nb_cur;
  16699. }
  16700. struct ggml_tensor * tensor = ggml_new_tensor(*ctx_eval, (enum ggml_type) type, GGML_MAX_DIMS, ne);
  16701. tensor->op = (enum ggml_op) op;
  16702. memcpy(tensor->name, ptr, GGML_MAX_NAME); ptr += GGML_MAX_NAME;
  16703. memcpy(tensor->op_params, ptr, GGML_MAX_OP_PARAMS); ptr += GGML_MAX_OP_PARAMS;
  16704. tensor->data = (void *) ptr;
  16705. for (int j = 0; j < GGML_MAX_DIMS; ++j) {
  16706. tensor->nb[j] = nb[j];
  16707. }
  16708. result->leafs[i] = tensor;
  16709. ptr += ggml_nbytes(tensor);
  16710. fprintf(stderr, "%s: loaded leaf %u: '%16s', %9zu bytes\n", __func__, i, tensor->name, ggml_nbytes(tensor));
  16711. }
  16712. }
  16713. ggml_set_no_alloc(*ctx_eval, false);
  16714. // nodes
  16715. {
  16716. uint32_t type;
  16717. uint32_t op;
  16718. for (uint32_t i = 0; i < n_nodes; ++i) {
  16719. type = *(const uint32_t *) ptr; ptr += sizeof(type);
  16720. op = *(const uint32_t *) ptr; ptr += sizeof(op);
  16721. enum ggml_op eop = (enum ggml_op) op;
  16722. int64_t ne[GGML_MAX_DIMS];
  16723. size_t nb[GGML_MAX_DIMS];
  16724. for (int j = 0; j < GGML_MAX_DIMS; ++j) {
  16725. uint64_t ne_cur;
  16726. uint64_t nb_cur;
  16727. ne_cur = *(const uint64_t *) ptr; ptr += sizeof(ne_cur);
  16728. nb_cur = *(const uint64_t *) ptr; ptr += sizeof(nb_cur);
  16729. ne[j] = ne_cur;
  16730. nb[j] = nb_cur;
  16731. }
  16732. const char * ptr_name = ptr; ptr += GGML_MAX_NAME;
  16733. const char * ptr_op_params = ptr; ptr += GGML_MAX_OP_PARAMS;
  16734. const int32_t * ptr_arg_idx = (const int32_t *) ptr; ptr += GGML_MAX_SRC*sizeof(int32_t);
  16735. struct ggml_tensor * args[GGML_MAX_SRC] = { NULL };
  16736. // parse args
  16737. for (int j = 0; j < GGML_MAX_SRC; ++j) {
  16738. const int32_t arg_idx = ptr_arg_idx[j];
  16739. if (arg_idx == -1) {
  16740. continue;
  16741. }
  16742. if (arg_idx < result->n_leafs) {
  16743. args[j] = result->leafs[arg_idx];
  16744. } else {
  16745. args[j] = result->nodes[arg_idx - result->n_leafs];
  16746. }
  16747. }
  16748. // create the tensor
  16749. // "view" operations are handled differently
  16750. // TODO: handle inplace ops - currently a copy is always made
  16751. struct ggml_tensor * tensor = NULL;
  16752. switch (eop) {
  16753. // TODO: implement other view ops
  16754. case GGML_OP_RESHAPE:
  16755. {
  16756. tensor = ggml_reshape_4d(*ctx_eval, args[0], ne[0], ne[1], ne[2], ne[3]);
  16757. } break;
  16758. case GGML_OP_VIEW:
  16759. {
  16760. tensor = ggml_view_4d(*ctx_eval, args[0], ne[0], ne[1], ne[2], ne[3], 0, 0, 0, 0);
  16761. size_t offs;
  16762. memcpy(&offs, ptr_op_params, sizeof(offs));
  16763. tensor->data = ((char *) tensor->data) + offs;
  16764. } break;
  16765. case GGML_OP_TRANSPOSE:
  16766. {
  16767. tensor = ggml_transpose(*ctx_eval, args[0]);
  16768. } break;
  16769. case GGML_OP_PERMUTE:
  16770. {
  16771. tensor = ggml_view_4d(*ctx_eval, args[0], ne[0], ne[1], ne[2], ne[3], 0, 0, 0, 0);
  16772. } break;
  16773. default:
  16774. {
  16775. tensor = ggml_new_tensor(*ctx_eval, (enum ggml_type) type, GGML_MAX_DIMS, ne);
  16776. tensor->op = eop;
  16777. } break;
  16778. }
  16779. memcpy(tensor->name, ptr_name, GGML_MAX_NAME);
  16780. memcpy(tensor->op_params, ptr_op_params, GGML_MAX_OP_PARAMS);
  16781. for (int j = 0; j < GGML_MAX_DIMS; ++j) {
  16782. tensor->nb[j] = nb[j];
  16783. }
  16784. for (int j = 0; j < GGML_MAX_SRC; ++j) {
  16785. tensor->src[j] = args[j];
  16786. }
  16787. result->nodes[i] = tensor;
  16788. fprintf(stderr, "%s: loaded node %u: '%16s', %9zu bytes\n", __func__, i, tensor->name, ggml_nbytes(tensor));
  16789. }
  16790. }
  16791. }
  16792. return result;
  16793. }
  16794. void ggml_graph_print(const struct ggml_cgraph * cgraph) {
  16795. int64_t perf_total_per_op_us[GGML_OP_COUNT] = {0};
  16796. GGML_PRINT("=== GRAPH ===\n");
  16797. GGML_PRINT("n_nodes = %d\n", cgraph->n_nodes);
  16798. for (int i = 0; i < cgraph->n_nodes; i++) {
  16799. struct ggml_tensor * node = cgraph->nodes[i];
  16800. perf_total_per_op_us[node->op] += MAX(1, node->perf_time_us);
  16801. GGML_PRINT(" - %3d: [ %5" PRId64 ", %5" PRId64 ", %5" PRId64 "] %16s %s (%3d) cpu = %7.3f / %7.3f ms, wall = %7.3f / %7.3f ms\n",
  16802. i,
  16803. node->ne[0], node->ne[1], node->ne[2],
  16804. ggml_op_name(node->op), (node->flags & GGML_TENSOR_FLAG_PARAM) ? "x" : node->grad ? "g" : " ", node->perf_runs,
  16805. (double) node->perf_cycles / (double) ggml_cycles_per_ms(),
  16806. (double) node->perf_cycles / (double) ggml_cycles_per_ms() / (double) node->perf_runs,
  16807. (double) node->perf_time_us / 1000.0,
  16808. (double) node->perf_time_us / 1000.0 / node->perf_runs);
  16809. }
  16810. GGML_PRINT("n_leafs = %d\n", cgraph->n_leafs);
  16811. for (int i = 0; i < cgraph->n_leafs; i++) {
  16812. struct ggml_tensor * node = cgraph->leafs[i];
  16813. GGML_PRINT(" - %3d: [ %5" PRId64 ", %5" PRId64 "] %8s %16s\n",
  16814. i,
  16815. node->ne[0], node->ne[1],
  16816. ggml_op_name(node->op),
  16817. ggml_get_name(node));
  16818. }
  16819. for (int i = 0; i < GGML_OP_COUNT; i++) {
  16820. if (perf_total_per_op_us[i] == 0) {
  16821. continue;
  16822. }
  16823. GGML_PRINT("perf_total_per_op_us[%16s] = %7.3f ms\n", ggml_op_name(i), (double) perf_total_per_op_us[i] / 1000.0);
  16824. }
  16825. GGML_PRINT("========================================\n");
  16826. }
  16827. // check if node is part of the graph
  16828. static bool ggml_graph_find(const struct ggml_cgraph * cgraph, const struct ggml_tensor * node) {
  16829. if (cgraph == NULL) {
  16830. return true;
  16831. }
  16832. for (int i = 0; i < cgraph->n_nodes; i++) {
  16833. if (cgraph->nodes[i] == node) {
  16834. return true;
  16835. }
  16836. }
  16837. return false;
  16838. }
  16839. static struct ggml_tensor * ggml_graph_get_parent(const struct ggml_cgraph * cgraph, const struct ggml_tensor * node) {
  16840. for (int i = 0; i < cgraph->n_nodes; i++) {
  16841. struct ggml_tensor * parent = cgraph->nodes[i];
  16842. if (parent->grad == node) {
  16843. return parent;
  16844. }
  16845. }
  16846. return NULL;
  16847. }
  16848. static void ggml_graph_dump_dot_node_edge(FILE * fp, const struct ggml_cgraph * gb, struct ggml_tensor * node, struct ggml_tensor * parent, const char * label) {
  16849. struct ggml_tensor * gparent = ggml_graph_get_parent(gb, node);
  16850. struct ggml_tensor * gparent0 = ggml_graph_get_parent(gb, parent);
  16851. fprintf(fp, " \"%p\":%s -> \"%p\":%s [ arrowhead = %s; style = %s; label = \"%s\"; ]\n",
  16852. gparent0 ? (void *) gparent0 : (void *) parent,
  16853. gparent0 ? "g" : "x",
  16854. gparent ? (void *) gparent : (void *) node,
  16855. gparent ? "g" : "x",
  16856. gparent ? "empty" : "vee",
  16857. gparent ? "dashed" : "solid",
  16858. label);
  16859. }
  16860. static void ggml_graph_dump_dot_leaf_edge(FILE * fp, struct ggml_tensor * node, struct ggml_tensor * parent, const char * label) {
  16861. fprintf(fp, " \"%p\":%s -> \"%p\":%s [ label = \"%s\"; ]\n",
  16862. (void *) parent, "x",
  16863. (void *) node, "x",
  16864. label);
  16865. }
  16866. void ggml_graph_dump_dot(const struct ggml_cgraph * gb, const struct ggml_cgraph * gf, const char * filename) {
  16867. char color[16];
  16868. FILE * fp = ggml_fopen(filename, "w");
  16869. GGML_ASSERT(fp);
  16870. fprintf(fp, "digraph G {\n");
  16871. fprintf(fp, " newrank = true;\n");
  16872. fprintf(fp, " rankdir = LR;\n");
  16873. for (int i = 0; i < gb->n_nodes; i++) {
  16874. struct ggml_tensor * node = gb->nodes[i];
  16875. if (ggml_graph_get_parent(gb, node) != NULL) {
  16876. continue;
  16877. }
  16878. if (node->flags & GGML_TENSOR_FLAG_PARAM) {
  16879. snprintf(color, sizeof(color), "yellow");
  16880. } else if (node->grad) {
  16881. if (ggml_graph_find(gf, node)) {
  16882. snprintf(color, sizeof(color), "green");
  16883. } else {
  16884. snprintf(color, sizeof(color), "lightblue");
  16885. }
  16886. } else {
  16887. snprintf(color, sizeof(color), "white");
  16888. }
  16889. fprintf(fp, " \"%p\" [ "
  16890. "style = filled; fillcolor = %s; shape = record; "
  16891. "label=\"",
  16892. (void *) node, color);
  16893. if (strlen(node->name) > 0) {
  16894. fprintf(fp, "%s (%s)|", node->name, ggml_type_name(node->type));
  16895. } else {
  16896. fprintf(fp, "(%s)|", ggml_type_name(node->type));
  16897. }
  16898. if (ggml_is_matrix(node)) {
  16899. fprintf(fp, "%d [%" PRId64 ", %" PRId64 "] | <x>%s", i, node->ne[0], node->ne[1], ggml_op_symbol(node->op));
  16900. } else {
  16901. fprintf(fp, "%d [%" PRId64 ", %" PRId64 ", %" PRId64 "] | <x>%s", i, node->ne[0], node->ne[1], node->ne[2], ggml_op_symbol(node->op));
  16902. }
  16903. if (node->grad) {
  16904. fprintf(fp, " | <g>%s\"; ]\n", ggml_op_symbol(node->grad->op));
  16905. } else {
  16906. fprintf(fp, "\"; ]\n");
  16907. }
  16908. }
  16909. for (int i = 0; i < gb->n_leafs; i++) {
  16910. struct ggml_tensor * node = gb->leafs[i];
  16911. snprintf(color, sizeof(color), "pink");
  16912. fprintf(fp, " \"%p\" [ "
  16913. "style = filled; fillcolor = %s; shape = record; "
  16914. "label=\"<x>",
  16915. (void *) node, color);
  16916. if (strlen(node->name) > 0) {
  16917. fprintf(fp, "%s (%s)|", node->name, ggml_type_name(node->type));
  16918. } else {
  16919. fprintf(fp, "(%s)|", ggml_type_name(node->type));
  16920. }
  16921. fprintf(fp, "CONST %d [%" PRId64 ", %" PRId64 "]", i, node->ne[0], node->ne[1]);
  16922. if (ggml_nelements(node) < 5) {
  16923. fprintf(fp, " | (");
  16924. for (int j = 0; j < ggml_nelements(node); j++) {
  16925. if (node->type == GGML_TYPE_I8 || node->type == GGML_TYPE_I16 || node->type == GGML_TYPE_I32) {
  16926. fprintf(fp, "%d", ggml_get_i32_1d(node, j));
  16927. }
  16928. else if (node->type == GGML_TYPE_F32 ||
  16929. node->type == GGML_TYPE_F16 ||
  16930. node->type == GGML_TYPE_BF16) {
  16931. fprintf(fp, "%.1e", (double)ggml_get_f32_1d(node, j));
  16932. }
  16933. else {
  16934. fprintf(fp, "#");
  16935. }
  16936. if (j < ggml_nelements(node) - 1) {
  16937. fprintf(fp, ", ");
  16938. }
  16939. }
  16940. fprintf(fp, ")");
  16941. }
  16942. fprintf(fp, "\"; ]\n");
  16943. }
  16944. for (int i = 0; i < gb->n_nodes; i++) {
  16945. struct ggml_tensor * node = gb->nodes[i];
  16946. for (int j = 0; j < GGML_MAX_SRC; j++) {
  16947. if (node->src[j]) {
  16948. char label[16];
  16949. snprintf(label, sizeof(label), "src %d", j);
  16950. ggml_graph_dump_dot_node_edge(fp, gb, node, node->src[j], label);
  16951. }
  16952. }
  16953. }
  16954. for (int i = 0; i < gb->n_leafs; i++) {
  16955. struct ggml_tensor * node = gb->leafs[i];
  16956. for (int j = 0; j < GGML_MAX_SRC; j++) {
  16957. if (node->src[j]) {
  16958. char label[16];
  16959. snprintf(label, sizeof(label), "src %d", j);
  16960. ggml_graph_dump_dot_leaf_edge(fp, node, node->src[j], label);
  16961. }
  16962. }
  16963. }
  16964. fprintf(fp, "}\n");
  16965. fclose(fp);
  16966. GGML_PRINT("%s: dot -Tpng %s -o %s.png && open %s.png\n", __func__, filename, filename, filename);
  16967. }
  16968. ////////////////////////////////////////////////////////////////////////////////
  16969. static void ggml_opt_set_params(int np, struct ggml_tensor * const ps[], const float * x) {
  16970. int i = 0;
  16971. for (int p = 0; p < np; ++p) {
  16972. const int64_t ne = ggml_nelements(ps[p]) ;
  16973. // TODO: add function to set tensor from array
  16974. for (int64_t j = 0; j < ne; ++j) {
  16975. ggml_set_f32_1d(ps[p], j, x[i++]);
  16976. }
  16977. }
  16978. }
  16979. static void ggml_opt_get_params(int np, struct ggml_tensor * const ps[], float * x) {
  16980. int i = 0;
  16981. for (int p = 0; p < np; ++p) {
  16982. const int64_t ne = ggml_nelements(ps[p]) ;
  16983. // TODO: add function to get all elements at once
  16984. for (int64_t j = 0; j < ne; ++j) {
  16985. x[i++] = ggml_get_f32_1d(ps[p], j);
  16986. }
  16987. }
  16988. }
  16989. static void ggml_opt_get_grad(int np, struct ggml_tensor * const ps[], float * g) {
  16990. int64_t i = 0;
  16991. for (int p = 0; p < np; ++p) {
  16992. const int64_t ne = ggml_nelements(ps[p]) ;
  16993. // TODO: add function to get all elements at once
  16994. for (int64_t j = 0; j < ne; ++j) {
  16995. g[i++] = ggml_get_f32_1d(ps[p]->grad, j);
  16996. }
  16997. }
  16998. }
  16999. static void ggml_opt_acc_grad(int np, struct ggml_tensor * const ps[], float * g, float scale) {
  17000. int64_t i = 0;
  17001. for (int p = 0; p < np; ++p) {
  17002. const int64_t ne = ggml_nelements(ps[p]) ;
  17003. // TODO: add function to get all elements at once
  17004. for (int64_t j = 0; j < ne; ++j) {
  17005. g[i++] += ggml_get_f32_1d(ps[p]->grad, j) * scale;
  17006. }
  17007. }
  17008. }
  17009. //
  17010. // Using AdamW - ref: https://arxiv.org/pdf/1711.05101v3.pdf
  17011. //
  17012. // (Original Adam - ref: https://arxiv.org/pdf/1412.6980.pdf)
  17013. //
  17014. static enum ggml_opt_result ggml_opt_adam(
  17015. struct ggml_context * ctx,
  17016. struct ggml_opt_context * opt,
  17017. struct ggml_opt_params params,
  17018. struct ggml_tensor * f,
  17019. struct ggml_cgraph * gf,
  17020. struct ggml_cgraph * gb,
  17021. ggml_opt_callback callback,
  17022. void * callback_data) {
  17023. GGML_ASSERT(ggml_is_scalar(f));
  17024. // these will store the parameters we want to optimize
  17025. struct ggml_tensor * ps[GGML_MAX_PARAMS];
  17026. int np = 0;
  17027. int64_t nx = 0;
  17028. for (int i = 0; i < gf->n_nodes; ++i) {
  17029. if (gf->nodes[i]->flags & GGML_TENSOR_FLAG_PARAM) {
  17030. GGML_PRINT_DEBUG("found param %d: grad->op = %d\n", np, gf->nodes[i]->grad->op);
  17031. GGML_ASSERT(np < GGML_MAX_PARAMS);
  17032. ps[np++] = gf->nodes[i];
  17033. nx += ggml_nelements(gf->nodes[i]);
  17034. }
  17035. }
  17036. if ((opt->params.type != params.type) || (opt->nx != nx) || (opt->params.past != params.past)) {
  17037. int iter = opt->iter;
  17038. ggml_opt_init(opt->ctx, opt, params, nx);
  17039. opt->iter = iter;
  17040. }
  17041. // constants
  17042. float sched = params.adam.sched;
  17043. const float alpha = params.adam.alpha;
  17044. const float decay = params.adam.decay * alpha;
  17045. const float beta1 = params.adam.beta1;
  17046. const float beta2 = params.adam.beta2;
  17047. const float eps = params.adam.eps;
  17048. const float gclip = params.adam.gclip;
  17049. const int decay_min_ndim = params.adam.decay_min_ndim;
  17050. const int n_accum = MAX(1, params.n_gradient_accumulation);
  17051. const float accum_norm = 1.0f / (float) n_accum;
  17052. float * g = opt->adam.g->data; // gradients
  17053. float * m = opt->adam.m->data; // first moment
  17054. float * v = opt->adam.v->data; // second moment
  17055. float * pf = params.past > 0 ? opt->adam.pf->data : NULL; // past function values
  17056. struct ggml_cplan cplan = ggml_graph_plan(gb, params.n_threads);
  17057. struct ggml_object * obj = ggml_new_object(ctx, GGML_OBJECT_TYPE_WORK_BUFFER, cplan.work_size);
  17058. cplan.work_data = (uint8_t *)ctx->mem_buffer + obj->offs;
  17059. bool cancel = false;
  17060. // compute the function value
  17061. float fx = 0;
  17062. ggml_set_zero(opt->adam.g);
  17063. for (int accum_step = 0; accum_step < n_accum; ++accum_step) {
  17064. if (callback) {
  17065. callback(callback_data, accum_step, &sched, &cancel);
  17066. if (cancel) {
  17067. return GGML_OPT_RESULT_CANCEL;
  17068. }
  17069. }
  17070. // ggml_graph_reset (gf);
  17071. ggml_set_f32 (f->grad, 1.0f);
  17072. ggml_graph_compute(gb, &cplan);
  17073. ggml_opt_acc_grad(np, ps, g, accum_norm);
  17074. fx += ggml_get_f32_1d(f, 0);
  17075. }
  17076. fx *= accum_norm;
  17077. opt->adam.fx_prev = fx;
  17078. opt->adam.fx_best = opt->adam.fx_prev;
  17079. if (pf) {
  17080. pf[opt->iter % params.past] = opt->adam.fx_prev;
  17081. }
  17082. opt->loss_before = opt->adam.fx_prev;
  17083. opt->loss_after = opt->adam.fx_prev;
  17084. // initialize
  17085. if (opt->just_initialized) {
  17086. opt->adam.n_no_improvement = 0;
  17087. opt->just_initialized = false;
  17088. }
  17089. float * fx_best = &opt->adam.fx_best;
  17090. float * fx_prev = &opt->adam.fx_prev;
  17091. int * n_no_improvement = &opt->adam.n_no_improvement;
  17092. int iter0 = opt->iter;
  17093. // run the optimizer
  17094. for (int t = 0; t < params.adam.n_iter; ++t) {
  17095. opt->iter = iter0 + t + 1;
  17096. GGML_PRINT_DEBUG ("=== iter %d ===\n", t);
  17097. GGML_PRINT_DEBUG ("f = %10.6f\n", ggml_get_f32_1d(f, 0));
  17098. GGML_PRINT_DEBUG_5("df/dx0 = %10.6f\n", ggml_get_f32_1d(ps[0]->grad, 0));
  17099. GGML_PRINT_DEBUG_5("df/dx1 = %10.6f\n", ggml_get_f32_1d(ps[1]->grad, 0));
  17100. for (int i = 0; i < np; ++i) {
  17101. GGML_PRINT_DEBUG("param %d: %10.6f, g = %10.6f\n", i,
  17102. ggml_get_f32_1d(ps[i], 0), ggml_get_f32_1d(ps[i]->grad, 0));
  17103. }
  17104. const int64_t t_start_wall = ggml_time_us();
  17105. const int64_t t_start_cpu = ggml_cycles();
  17106. UNUSED(t_start_wall);
  17107. UNUSED(t_start_cpu);
  17108. {
  17109. float gnorm = 1.0f;
  17110. if (gclip > 0.0f) {
  17111. // gradient clipping
  17112. ggml_float sum = 0.0;
  17113. for (int64_t i = 0; i < nx; ++i) {
  17114. sum += (ggml_float)(g[i]*g[i]);
  17115. }
  17116. ggml_float norm = sqrt(sum);
  17117. if (norm > (ggml_float) gclip) {
  17118. gnorm = (float) ((ggml_float) gclip / norm);
  17119. }
  17120. }
  17121. const float beta1h = alpha*sched/(1.0f - powf(beta1, opt->iter));
  17122. const float beta2h = 1.0f/(1.0f - powf(beta2, opt->iter));
  17123. int64_t i = 0;
  17124. for (int p = 0; p < np; ++p) {
  17125. const int64_t ne = ggml_nelements(ps[p]);
  17126. const float p_decay = ((ggml_n_dims(ps[p]) >= decay_min_ndim) ? decay : 0.0f) * sched;
  17127. for (int64_t j = 0; j < ne; ++j) {
  17128. float x = ggml_get_f32_1d(ps[p], j);
  17129. float g_ = g[i]*gnorm;
  17130. m[i] = m[i]*beta1 + g_*(1.0f - beta1);
  17131. v[i] = v[i]*beta2 + g_*g_*(1.0f - beta2);
  17132. float mh = m[i]*beta1h;
  17133. float vh = v[i]*beta2h;
  17134. vh = sqrtf(vh) + eps;
  17135. x = x*(1.0f - p_decay) - mh/vh;
  17136. ggml_set_f32_1d(ps[p], j, x);
  17137. ++i;
  17138. }
  17139. }
  17140. }
  17141. fx = 0;
  17142. ggml_set_zero(opt->adam.g);
  17143. for (int accum_step = 0; accum_step < n_accum; ++accum_step) {
  17144. if (callback) {
  17145. callback(callback_data, accum_step, &sched, &cancel);
  17146. if (cancel) {
  17147. return GGML_OPT_RESULT_CANCEL;;
  17148. }
  17149. }
  17150. // ggml_graph_reset (gf);
  17151. ggml_set_f32 (f->grad, 1.0f);
  17152. ggml_graph_compute(gb, &cplan);
  17153. ggml_opt_acc_grad(np, ps, g, accum_norm);
  17154. fx += ggml_get_f32_1d(f, 0);
  17155. }
  17156. fx *= accum_norm;
  17157. opt->loss_after = fx;
  17158. // check convergence
  17159. if (fabsf(fx - fx_prev[0])/fx < params.adam.eps_f) {
  17160. GGML_PRINT_DEBUG("converged\n");
  17161. return GGML_OPT_RESULT_OK;
  17162. }
  17163. // delta-based convergence test
  17164. if (pf != NULL) {
  17165. // need at least params.past iterations to start checking for convergence
  17166. if (params.past <= iter0 + t) {
  17167. const float rate = (pf[(iter0 + t)%params.past] - fx)/fx;
  17168. if (fabsf(rate) < params.delta) {
  17169. return GGML_OPT_RESULT_OK;
  17170. }
  17171. }
  17172. pf[(iter0 + t)%params.past] = fx;
  17173. }
  17174. // check for improvement
  17175. if (params.max_no_improvement > 0) {
  17176. if (fx_best[0] > fx) {
  17177. fx_best[0] = fx;
  17178. n_no_improvement[0] = 0;
  17179. } else {
  17180. ++n_no_improvement[0];
  17181. if (n_no_improvement[0] >= params.max_no_improvement) {
  17182. return GGML_OPT_RESULT_OK;
  17183. }
  17184. }
  17185. }
  17186. fx_prev[0] = fx;
  17187. {
  17188. const int64_t t_end_cpu = ggml_cycles();
  17189. GGML_PRINT_DEBUG("time iter: %5.3f s\n", ((float)(t_end_cpu - t_start_cpu))/CLOCKS_PER_SEC);
  17190. UNUSED(t_end_cpu);
  17191. const int64_t t_end_wall = ggml_time_us();
  17192. GGML_PRINT_DEBUG("wall time iter: %5.3f s\n", (t_end_wall - t_start_wall)/1e6);
  17193. UNUSED(t_end_wall);
  17194. }
  17195. }
  17196. return GGML_OPT_RESULT_DID_NOT_CONVERGE;
  17197. }
  17198. //
  17199. // L-BFGS
  17200. //
  17201. // the L-BFGS implementation below is based on the following implementation:
  17202. //
  17203. // https://github.com/chokkan/liblbfgs
  17204. //
  17205. struct ggml_lbfgs_iteration_data {
  17206. float alpha;
  17207. float ys;
  17208. float * s;
  17209. float * y;
  17210. };
  17211. static enum ggml_opt_result linesearch_backtracking(
  17212. const struct ggml_opt_params * params,
  17213. int nx,
  17214. float * x,
  17215. float * fx,
  17216. float * g,
  17217. float * d,
  17218. float * step,
  17219. const float * xp,
  17220. struct ggml_tensor * f,
  17221. struct ggml_cgraph * gb,
  17222. struct ggml_cplan * cplan,
  17223. const int np,
  17224. struct ggml_tensor * ps[],
  17225. bool * cancel,
  17226. ggml_opt_callback callback,
  17227. void * callback_data) {
  17228. int count = 0;
  17229. float width = 0.0f;
  17230. float dg = 0.0f;
  17231. float finit = 0.0f;
  17232. float dginit = 0.0f;
  17233. float dgtest = 0.0f;
  17234. const float dec = 0.5f;
  17235. const float inc = 2.1f;
  17236. const int n_accum = MAX(1, params->n_gradient_accumulation);
  17237. const float accum_norm = 1.0f / (float) n_accum;
  17238. if (*step <= 0.f) {
  17239. return GGML_LINESEARCH_INVALID_PARAMETERS;
  17240. }
  17241. // compute the initial gradient in the search direction
  17242. ggml_vec_dot_f32(nx, &dginit, 0, g, 0, d, 0, 1);
  17243. // make sure that d points to a descent direction
  17244. if (0 < dginit) {
  17245. return GGML_LINESEARCH_FAIL;
  17246. }
  17247. // initialize local variables
  17248. finit = *fx;
  17249. dgtest = params->lbfgs.ftol*dginit;
  17250. while (true) {
  17251. ggml_vec_cpy_f32(nx, x, xp);
  17252. ggml_vec_mad_f32(nx, x, d, *step);
  17253. // evaluate the function and gradient values
  17254. {
  17255. ggml_opt_set_params(np, ps, x);
  17256. *fx = 0;
  17257. memset(g, 0, sizeof(float)*nx);
  17258. for (int accum_step = 0; accum_step < n_accum; ++accum_step) {
  17259. if (callback) {
  17260. // LBFG-S does not support learning rate -> ignore learning schedule
  17261. float sched = 0;
  17262. callback(callback_data, accum_step, &sched, cancel);
  17263. if (*cancel) {
  17264. return GGML_OPT_RESULT_CANCEL;
  17265. }
  17266. }
  17267. // ggml_graph_reset (gf);
  17268. ggml_set_f32 (f->grad, 1.0f);
  17269. ggml_graph_compute(gb, cplan);
  17270. ggml_opt_acc_grad(np, ps, g, accum_norm);
  17271. *fx += ggml_get_f32_1d(f, 0);
  17272. }
  17273. *fx *= accum_norm;
  17274. }
  17275. ++count;
  17276. if (*fx > finit + (*step)*dgtest) {
  17277. width = dec;
  17278. } else {
  17279. // Armijo condition is satisfied
  17280. if (params->lbfgs.linesearch == GGML_LINESEARCH_BACKTRACKING_ARMIJO) {
  17281. return count;
  17282. }
  17283. ggml_vec_dot_f32(nx, &dg, 0, g, 0, d, 0, 1);
  17284. // check the Wolfe condition
  17285. if (dg < params->lbfgs.wolfe * dginit) {
  17286. width = inc;
  17287. } else {
  17288. if(params->lbfgs.linesearch == GGML_LINESEARCH_BACKTRACKING_WOLFE) {
  17289. // regular Wolfe conditions
  17290. return count;
  17291. }
  17292. if(dg > -params->lbfgs.wolfe*dginit) {
  17293. width = dec;
  17294. } else {
  17295. // strong Wolfe condition (GGML_LINESEARCH_BACKTRACKING_STRONG_WOLFE)
  17296. return count;
  17297. }
  17298. }
  17299. }
  17300. if (*step < params->lbfgs.min_step) {
  17301. return GGML_LINESEARCH_MINIMUM_STEP;
  17302. }
  17303. if (*step > params->lbfgs.max_step) {
  17304. return GGML_LINESEARCH_MAXIMUM_STEP;
  17305. }
  17306. if (params->lbfgs.max_linesearch <= count) {
  17307. return GGML_LINESEARCH_MAXIMUM_ITERATIONS;
  17308. }
  17309. (*step) *= width;
  17310. }
  17311. GGML_ASSERT(false && "line search failed");
  17312. return GGML_LINESEARCH_FAIL;
  17313. }
  17314. static enum ggml_opt_result ggml_opt_lbfgs(
  17315. struct ggml_context * ctx,
  17316. struct ggml_opt_context * opt,
  17317. struct ggml_opt_params params,
  17318. struct ggml_tensor * f,
  17319. struct ggml_cgraph * gf,
  17320. struct ggml_cgraph * gb,
  17321. ggml_opt_callback callback,
  17322. void * callback_data) {
  17323. if (params.lbfgs.linesearch == GGML_LINESEARCH_BACKTRACKING_WOLFE ||
  17324. params.lbfgs.linesearch == GGML_LINESEARCH_BACKTRACKING_STRONG_WOLFE) {
  17325. if (params.lbfgs.wolfe <= params.lbfgs.ftol || 1.f <= params.lbfgs.wolfe) {
  17326. return GGML_OPT_RESULT_INVALID_WOLFE;
  17327. }
  17328. }
  17329. const int m = params.lbfgs.m;
  17330. // these will store the parameters we want to optimize
  17331. struct ggml_tensor * ps[GGML_MAX_PARAMS];
  17332. int np = 0;
  17333. int nx = 0;
  17334. for (int i = 0; i < gf->n_nodes; ++i) {
  17335. if (gf->nodes[i]->flags & GGML_TENSOR_FLAG_PARAM) {
  17336. GGML_PRINT_DEBUG("found param %d: grad->op = %d\n", np, gf->nodes[i]->grad->op);
  17337. GGML_ASSERT(np < GGML_MAX_PARAMS);
  17338. ps[np++] = gf->nodes[i];
  17339. nx += ggml_nelements(gf->nodes[i]);
  17340. }
  17341. }
  17342. if ((opt->params.type != params.type) || (opt->nx != nx) || (opt->params.past != params.past) || (opt->params.lbfgs.m != params.lbfgs.m)) {
  17343. int iter = opt->iter;
  17344. ggml_opt_init(ctx, opt, params, nx);
  17345. opt->iter = iter;
  17346. }
  17347. struct ggml_cplan cplan = ggml_graph_plan(gb, params.n_threads);
  17348. struct ggml_object * obj = ggml_new_object(ctx, GGML_OBJECT_TYPE_WORK_BUFFER, cplan.work_size);
  17349. cplan.work_data = (uint8_t *)ctx->mem_buffer + obj->offs;
  17350. float * x = opt->lbfgs.x->data; // current parameters
  17351. float * xp = opt->lbfgs.xp->data; // previous parameters
  17352. float * g = opt->lbfgs.g->data; // current gradient
  17353. float * gp = opt->lbfgs.gp->data; // previous gradient
  17354. float * d = opt->lbfgs.d->data; // search direction
  17355. float * pf = params.past > 0 ? opt->lbfgs.pf->data : NULL; // past function values
  17356. const int n_accum = MAX(1, params.n_gradient_accumulation);
  17357. const float accum_norm = 1.0f / (float) n_accum;
  17358. float fx = 0.0f; // cost function value
  17359. float xnorm = 0.0f; // ||x||
  17360. float gnorm = 0.0f; // ||g||
  17361. // initialize x from the graph nodes
  17362. ggml_opt_get_params(np, ps, x);
  17363. // the L-BFGS memory
  17364. float * lm_alpha = opt->lbfgs.lmal->data;
  17365. float * lm_ys = opt->lbfgs.lmys->data;
  17366. float * lm_s = opt->lbfgs.lms->data;
  17367. float * lm_y = opt->lbfgs.lmy->data;
  17368. bool cancel = false;
  17369. // evaluate the function value and its gradient
  17370. {
  17371. ggml_opt_set_params(np, ps, x);
  17372. fx = 0;
  17373. memset(g, 0, sizeof(float)*nx);
  17374. for (int accum_step = 0; accum_step < n_accum; ++accum_step) {
  17375. if (callback) {
  17376. // LBFG-S does not support learning rate -> ignore learning schedule
  17377. float sched = 0;
  17378. callback(callback_data, accum_step, &sched, &cancel);
  17379. if (cancel) {
  17380. return GGML_OPT_RESULT_CANCEL;
  17381. }
  17382. }
  17383. // ggml_graph_reset (gf);
  17384. ggml_set_f32 (f->grad, 1.0f);
  17385. ggml_graph_compute(gb, &cplan);
  17386. ggml_opt_acc_grad(np, ps, g, accum_norm);
  17387. fx += ggml_get_f32_1d(f, 0);
  17388. }
  17389. fx *= accum_norm;
  17390. opt->loss_before = fx;
  17391. opt->loss_after = fx;
  17392. }
  17393. // search direction = -gradient
  17394. ggml_vec_neg_f32(nx, d, g);
  17395. // ||x||, ||g||
  17396. ggml_vec_norm_f32(nx, &xnorm, x);
  17397. ggml_vec_norm_f32(nx, &gnorm, g);
  17398. if (xnorm < 1.0f) {
  17399. xnorm = 1.0f;
  17400. }
  17401. // already optimized
  17402. if (gnorm/xnorm <= params.lbfgs.eps) {
  17403. return GGML_OPT_RESULT_OK;
  17404. }
  17405. if (opt->just_initialized) {
  17406. if (pf) {
  17407. pf[0] = fx;
  17408. }
  17409. opt->lbfgs.fx_best = fx;
  17410. // initial step
  17411. ggml_vec_norm_inv_f32(nx, &opt->lbfgs.step, d);
  17412. opt->lbfgs.j = 0;
  17413. opt->lbfgs.k = 1;
  17414. opt->lbfgs.end = 0;
  17415. opt->lbfgs.n_no_improvement = 0;
  17416. opt->just_initialized = false;
  17417. }
  17418. float * fx_best = &opt->lbfgs.fx_best;
  17419. float * step = &opt->lbfgs.step;
  17420. int * j = &opt->lbfgs.j;
  17421. int * k = &opt->lbfgs.k;
  17422. int * end = &opt->lbfgs.end;
  17423. int * n_no_improvement = &opt->lbfgs.n_no_improvement;
  17424. int ls = 0;
  17425. int bound = 0;
  17426. float ys = 0.0f;
  17427. float yy = 0.0f;
  17428. float beta = 0.0f;
  17429. int it = 0;
  17430. while (true) {
  17431. // store the current position and gradient vectors
  17432. ggml_vec_cpy_f32(nx, xp, x);
  17433. ggml_vec_cpy_f32(nx, gp, g);
  17434. // TODO: instead of passing &cancel here, use the return code of the linesearch
  17435. // to determine if the optimization should be cancelled
  17436. // this is a simple change, but not doing this atm, since I don't have a nice
  17437. // way to test and don't want to break something with so many changes lined up
  17438. ls = linesearch_backtracking(&params, nx, x, &fx, g, d, step, xp, f, gb, &cplan, np, ps, &cancel, callback, callback_data);
  17439. if (cancel) {
  17440. return GGML_OPT_RESULT_CANCEL;
  17441. }
  17442. if (ls < 0) {
  17443. // linesearch failed - go back to the previous point and return
  17444. ggml_vec_cpy_f32(nx, x, xp);
  17445. ggml_vec_cpy_f32(nx, g, gp);
  17446. return ls;
  17447. }
  17448. opt->loss_after = fx;
  17449. ggml_vec_norm_f32(nx, &xnorm, x);
  17450. ggml_vec_norm_f32(nx, &gnorm, g);
  17451. GGML_PRINT_DEBUG("f = %10.6f\n", ggml_get_f32_1d(f, 0));
  17452. if (xnorm < 1.0f) {
  17453. xnorm = 1.0f;
  17454. }
  17455. if (gnorm/xnorm <= params.lbfgs.eps) {
  17456. // converged
  17457. return GGML_OPT_RESULT_OK;
  17458. }
  17459. // delta-based convergence test
  17460. if (pf != NULL) {
  17461. // need at least params.past iterations to start checking for convergence
  17462. if (params.past <= k[0]) {
  17463. const float rate = (pf[k[0]%params.past] - fx)/fx;
  17464. if (fabsf(rate) < params.delta) {
  17465. return GGML_OPT_RESULT_OK;
  17466. }
  17467. }
  17468. pf[k[0]%params.past] = fx;
  17469. }
  17470. // check for improvement
  17471. if (params.max_no_improvement > 0) {
  17472. if (fx < fx_best[0]) {
  17473. fx_best[0] = fx;
  17474. n_no_improvement[0] = 0;
  17475. } else {
  17476. n_no_improvement[0]++;
  17477. if (n_no_improvement[0] >= params.max_no_improvement) {
  17478. return GGML_OPT_RESULT_OK;
  17479. }
  17480. }
  17481. }
  17482. if (params.lbfgs.n_iter != 0 && params.lbfgs.n_iter < it + 1) {
  17483. // reached the maximum number of iterations
  17484. return GGML_OPT_RESULT_DID_NOT_CONVERGE;
  17485. }
  17486. // update vectors s and y:
  17487. // s_{k+1} = x_{k+1} - x_{k} = \step * d_{k}.
  17488. // y_{k+1} = g_{k+1} - g_{k}.
  17489. //
  17490. ggml_vec_sub_f32(nx, &lm_s[end[0]*nx], x, xp);
  17491. ggml_vec_sub_f32(nx, &lm_y[end[0]*nx], g, gp);
  17492. // compute scalars ys and yy:
  17493. // ys = y^t \cdot s -> 1 / \rho.
  17494. // yy = y^t \cdot y.
  17495. //
  17496. ggml_vec_dot_f32(nx, &ys, 0, &lm_y[end[0]*nx], 0, &lm_s[end[0]*nx], 0, 1);
  17497. ggml_vec_dot_f32(nx, &yy, 0, &lm_y[end[0]*nx], 0, &lm_y[end[0]*nx], 0, 1);
  17498. lm_ys[end[0]] = ys;
  17499. // find new search direction
  17500. // ref: https://en.wikipedia.org/wiki/Limited-memory_BFGS
  17501. bound = (m <= k[0]) ? m : k[0];
  17502. k[0]++;
  17503. it++;
  17504. end[0] = (end[0] + 1)%m;
  17505. // initialize search direction with -g
  17506. ggml_vec_neg_f32(nx, d, g);
  17507. j[0] = end[0];
  17508. for (int i = 0; i < bound; ++i) {
  17509. j[0] = (j[0] + m - 1) % m;
  17510. // \alpha_{j} = \rho_{j} s^{t}_{j} \cdot q_{k+1}
  17511. ggml_vec_dot_f32(nx, &lm_alpha[j[0]], 0, &lm_s[j[0]*nx], 0, d, 0, 1);
  17512. lm_alpha[j[0]] /= lm_ys[j[0]];
  17513. // q_{i} = q_{i+1} - \alpha_{i} y_{i}
  17514. ggml_vec_mad_f32(nx, d, &lm_y[j[0]*nx], -lm_alpha[j[0]]);
  17515. }
  17516. ggml_vec_scale_f32(nx, d, ys/yy);
  17517. for (int i = 0; i < bound; ++i) {
  17518. // \beta_{j} = \rho_{j} y^t_{j} \cdot \gamma_{i}
  17519. ggml_vec_dot_f32(nx, &beta, 0, &lm_y[j[0]*nx], 0, d, 0, 1);
  17520. beta /= lm_ys[j[0]];
  17521. // \gamma_{i+1} = \gamma_{i} + (\alpha_{j} - \beta_{j}) s_{j}
  17522. ggml_vec_mad_f32(nx, d, &lm_s[j[0]*nx], lm_alpha[j[0]] - beta);
  17523. j[0] = (j[0] + 1)%m;
  17524. }
  17525. step[0] = 1.0;
  17526. }
  17527. GGML_ASSERT(false && "lbfgs failed");
  17528. return GGML_OPT_RESULT_DID_NOT_CONVERGE;
  17529. }
  17530. struct ggml_opt_params ggml_opt_default_params(enum ggml_opt_type type) {
  17531. struct ggml_opt_params result;
  17532. switch (type) {
  17533. case GGML_OPT_TYPE_ADAM:
  17534. {
  17535. result = (struct ggml_opt_params) {
  17536. .type = GGML_OPT_TYPE_ADAM,
  17537. .graph_size = GGML_DEFAULT_GRAPH_SIZE,
  17538. .n_threads = 1, // FIXME: GGML_DEFAULT_N_THREADS ?
  17539. .past = 0,
  17540. .delta = 1e-5f,
  17541. .max_no_improvement = 100,
  17542. .print_forward_graph = true,
  17543. .print_backward_graph = true,
  17544. .n_gradient_accumulation = 1,
  17545. .adam = {
  17546. .n_iter = 10000,
  17547. .sched = 1.000f,
  17548. .decay = 0.0f,
  17549. .decay_min_ndim = 2,
  17550. .alpha = 0.001f,
  17551. .beta1 = 0.9f,
  17552. .beta2 = 0.999f,
  17553. .eps = 1e-8f,
  17554. .eps_f = 1e-5f,
  17555. .eps_g = 1e-3f,
  17556. .gclip = 0.0f,
  17557. },
  17558. };
  17559. } break;
  17560. case GGML_OPT_TYPE_LBFGS:
  17561. {
  17562. result = (struct ggml_opt_params) {
  17563. .type = GGML_OPT_TYPE_LBFGS,
  17564. .graph_size = GGML_DEFAULT_GRAPH_SIZE,
  17565. .n_threads = 1,
  17566. .past = 0,
  17567. .delta = 1e-5f,
  17568. .max_no_improvement = 0,
  17569. .print_forward_graph = true,
  17570. .print_backward_graph = true,
  17571. .n_gradient_accumulation = 1,
  17572. .lbfgs = {
  17573. .m = 6,
  17574. .n_iter = 100,
  17575. .max_linesearch = 20,
  17576. .eps = 1e-5f,
  17577. .ftol = 1e-4f,
  17578. .wolfe = 0.9f,
  17579. .min_step = 1e-20f,
  17580. .max_step = 1e+20f,
  17581. .linesearch = GGML_LINESEARCH_DEFAULT,
  17582. },
  17583. };
  17584. } break;
  17585. }
  17586. return result;
  17587. }
  17588. GGML_API void ggml_opt_init(
  17589. struct ggml_context * ctx,
  17590. struct ggml_opt_context * opt,
  17591. struct ggml_opt_params params,
  17592. int64_t nx) {
  17593. opt->ctx = ctx;
  17594. opt->params = params;
  17595. opt->iter = 0;
  17596. opt->nx = nx;
  17597. opt->just_initialized = true;
  17598. if (opt->ctx == NULL) {
  17599. struct ggml_init_params ctx_opt_params;
  17600. if (opt->params.type == GGML_OPT_TYPE_ADAM) {
  17601. ctx_opt_params.mem_size = GGML_MEM_ALIGN*3 + ggml_tensor_overhead()*3 + ggml_type_size(GGML_TYPE_F32)*nx*3;
  17602. if (opt->params.past > 0) {
  17603. ctx_opt_params.mem_size += GGML_MEM_ALIGN + ggml_tensor_overhead() + ggml_type_size(GGML_TYPE_F32)*opt->params.past;
  17604. }
  17605. } else if (opt->params.type == GGML_OPT_TYPE_LBFGS) {
  17606. ctx_opt_params.mem_size = GGML_MEM_ALIGN*9 + ggml_tensor_overhead()*9 + ggml_type_size(GGML_TYPE_F32)*(nx*5 + opt->params.lbfgs.m*2 + nx*opt->params.lbfgs.m*2);
  17607. if (opt->params.past > 0) {
  17608. ctx_opt_params.mem_size += GGML_MEM_ALIGN + ggml_tensor_overhead() + ggml_type_size(GGML_TYPE_F32)*opt->params.past;
  17609. }
  17610. }
  17611. ctx_opt_params.mem_buffer = NULL;
  17612. ctx_opt_params.no_alloc = false;
  17613. opt->ctx = ggml_init(ctx_opt_params);
  17614. }
  17615. switch (opt->params.type) {
  17616. case GGML_OPT_TYPE_ADAM:
  17617. {
  17618. opt->adam.g = ggml_new_tensor_1d(opt->ctx, GGML_TYPE_F32, nx);
  17619. opt->adam.m = ggml_new_tensor_1d(opt->ctx, GGML_TYPE_F32, nx);
  17620. opt->adam.v = ggml_new_tensor_1d(opt->ctx, GGML_TYPE_F32, nx);
  17621. opt->adam.pf = params.past > 0
  17622. ? ggml_new_tensor_1d(opt->ctx, GGML_TYPE_F32, params.past)
  17623. : NULL;
  17624. ggml_set_zero(opt->adam.m);
  17625. ggml_set_zero(opt->adam.v);
  17626. if (opt->adam.pf) {
  17627. ggml_set_zero(opt->adam.pf);
  17628. }
  17629. } break;
  17630. case GGML_OPT_TYPE_LBFGS:
  17631. {
  17632. opt->lbfgs.x = ggml_new_tensor_1d(opt->ctx, GGML_TYPE_F32, nx);
  17633. opt->lbfgs.xp = ggml_new_tensor_1d(opt->ctx, GGML_TYPE_F32, nx);
  17634. opt->lbfgs.g = ggml_new_tensor_1d(opt->ctx, GGML_TYPE_F32, nx);
  17635. opt->lbfgs.gp = ggml_new_tensor_1d(opt->ctx, GGML_TYPE_F32, nx);
  17636. opt->lbfgs.d = ggml_new_tensor_1d(opt->ctx, GGML_TYPE_F32, nx);
  17637. opt->lbfgs.pf = params.past > 0
  17638. ? ggml_new_tensor_1d(opt->ctx, GGML_TYPE_F32, params.past)
  17639. : NULL;
  17640. opt->lbfgs.lmal = ggml_new_tensor_1d(opt->ctx, GGML_TYPE_F32, params.lbfgs.m);
  17641. opt->lbfgs.lmys = ggml_new_tensor_1d(opt->ctx, GGML_TYPE_F32, params.lbfgs.m);
  17642. opt->lbfgs.lms = ggml_new_tensor_2d(opt->ctx, GGML_TYPE_F32, nx, params.lbfgs.m);
  17643. opt->lbfgs.lmy = ggml_new_tensor_2d(opt->ctx, GGML_TYPE_F32, nx, params.lbfgs.m);
  17644. ggml_set_zero(opt->lbfgs.x);
  17645. ggml_set_zero(opt->lbfgs.xp);
  17646. ggml_set_zero(opt->lbfgs.g);
  17647. ggml_set_zero(opt->lbfgs.gp);
  17648. ggml_set_zero(opt->lbfgs.d);
  17649. if (opt->lbfgs.pf) {
  17650. ggml_set_zero(opt->lbfgs.pf);
  17651. }
  17652. ggml_set_zero(opt->lbfgs.lmal);
  17653. ggml_set_zero(opt->lbfgs.lmys);
  17654. ggml_set_zero(opt->lbfgs.lms);
  17655. ggml_set_zero(opt->lbfgs.lmy);
  17656. } break;
  17657. }
  17658. }
  17659. enum ggml_opt_result ggml_opt(
  17660. struct ggml_context * ctx,
  17661. struct ggml_opt_params params,
  17662. struct ggml_tensor * f) {
  17663. bool free_ctx = false;
  17664. if (ctx == NULL) {
  17665. struct ggml_init_params params_ctx = {
  17666. .mem_size = 16*1024*1024,
  17667. .mem_buffer = NULL,
  17668. .no_alloc = false,
  17669. };
  17670. ctx = ggml_init(params_ctx);
  17671. if (ctx == NULL) {
  17672. return GGML_OPT_RESULT_NO_CONTEXT;
  17673. }
  17674. free_ctx = true;
  17675. }
  17676. enum ggml_opt_result result = GGML_OPT_RESULT_OK;
  17677. struct ggml_opt_context * opt = (struct ggml_opt_context *) alloca(sizeof(struct ggml_opt_context));
  17678. ggml_opt_init(ctx, opt, params, 0);
  17679. result = ggml_opt_resume(ctx, opt, f);
  17680. if (free_ctx) {
  17681. ggml_free(ctx);
  17682. }
  17683. return result;
  17684. }
  17685. enum ggml_opt_result ggml_opt_resume(
  17686. struct ggml_context * ctx,
  17687. struct ggml_opt_context * opt,
  17688. struct ggml_tensor * f) {
  17689. // build forward + backward compute graphs
  17690. struct ggml_cgraph * gf = ggml_new_graph_custom(ctx, opt->params.graph_size, true);
  17691. ggml_build_forward_expand(gf, f);
  17692. struct ggml_cgraph * gb = ggml_graph_dup(ctx, gf);
  17693. ggml_build_backward_expand(ctx, gf, gb, true);
  17694. return ggml_opt_resume_g(ctx, opt, f, gf, gb, NULL, NULL);
  17695. }
  17696. enum ggml_opt_result ggml_opt_resume_g(
  17697. struct ggml_context * ctx,
  17698. struct ggml_opt_context * opt,
  17699. struct ggml_tensor * f,
  17700. struct ggml_cgraph * gf,
  17701. struct ggml_cgraph * gb,
  17702. ggml_opt_callback callback,
  17703. void * callback_data) {
  17704. // build forward + backward compute graphs
  17705. enum ggml_opt_result result = GGML_OPT_RESULT_OK;
  17706. switch (opt->params.type) {
  17707. case GGML_OPT_TYPE_ADAM:
  17708. {
  17709. result = ggml_opt_adam(ctx, opt, opt->params, f, gf, gb, callback, callback_data);
  17710. } break;
  17711. case GGML_OPT_TYPE_LBFGS:
  17712. {
  17713. result = ggml_opt_lbfgs(ctx, opt, opt->params, f, gf, gb, callback, callback_data);
  17714. } break;
  17715. }
  17716. if (opt->params.print_forward_graph) {
  17717. ggml_graph_print (gf);
  17718. ggml_graph_dump_dot(gf, NULL, "opt-forward.dot");
  17719. }
  17720. if (opt->params.print_backward_graph) {
  17721. ggml_graph_print (gb);
  17722. ggml_graph_dump_dot(gb, gf, "opt-backward.dot");
  17723. }
  17724. return result;
  17725. }
  17726. ////////////////////////////////////////////////////////////////////////////////
  17727. void ggml_set_input(struct ggml_tensor * tensor) {
  17728. tensor->flags |= GGML_TENSOR_FLAG_INPUT;
  17729. }
  17730. void ggml_set_output(struct ggml_tensor * tensor) {
  17731. tensor->flags |= GGML_TENSOR_FLAG_OUTPUT;
  17732. }
  17733. ////////////////////////////////////////////////////////////////////////////////
  17734. void ggml_quantize_init(enum ggml_type type) {
  17735. ggml_critical_section_start();
  17736. switch (type) {
  17737. case GGML_TYPE_IQ2_XXS:
  17738. case GGML_TYPE_IQ2_XS:
  17739. case GGML_TYPE_IQ2_S:
  17740. case GGML_TYPE_IQ1_S:
  17741. case GGML_TYPE_IQ1_M: iq2xs_init_impl(type); break;
  17742. case GGML_TYPE_IQ3_XXS: iq3xs_init_impl(256); break;
  17743. case GGML_TYPE_IQ3_S: iq3xs_init_impl(512); break;
  17744. default: // nothing
  17745. break;
  17746. }
  17747. ggml_critical_section_end();
  17748. }
  17749. void ggml_quantize_free(void) {
  17750. ggml_critical_section_start();
  17751. iq2xs_free_impl(GGML_TYPE_IQ2_XXS);
  17752. iq2xs_free_impl(GGML_TYPE_IQ2_XS);
  17753. iq2xs_free_impl(GGML_TYPE_IQ1_S);
  17754. iq3xs_free_impl(256);
  17755. ggml_critical_section_end();
  17756. }
  17757. bool ggml_quantize_requires_imatrix(enum ggml_type type) {
  17758. return
  17759. type == GGML_TYPE_IQ2_XXS ||
  17760. type == GGML_TYPE_IQ2_XS ||
  17761. type == GGML_TYPE_IQ1_S;// ||
  17762. //type == GGML_TYPE_IQ1_M;
  17763. }
  17764. size_t ggml_quantize_chunk(
  17765. enum ggml_type type,
  17766. const float * src,
  17767. void * dst,
  17768. int64_t start,
  17769. int64_t nrows,
  17770. int64_t n_per_row,
  17771. const float * imatrix) {
  17772. const int64_t n = (int64_t) nrows * n_per_row;
  17773. if (ggml_quantize_requires_imatrix(type)) {
  17774. GGML_ASSERT(imatrix != NULL);
  17775. }
  17776. GGML_ASSERT(start % type_traits[type].blck_size == 0);
  17777. GGML_ASSERT(start % n_per_row == 0);
  17778. ggml_quantize_init(type); // this is noop if already initialized
  17779. const size_t start_row = start / n_per_row;
  17780. const size_t row_size = ggml_row_size(type, n_per_row);
  17781. size_t result = 0;
  17782. switch (type) {
  17783. case GGML_TYPE_Q4_0: result = quantize_q4_0(src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break;
  17784. case GGML_TYPE_Q4_1: result = quantize_q4_1(src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break;
  17785. case GGML_TYPE_Q5_0: result = quantize_q5_0(src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break;
  17786. case GGML_TYPE_Q5_1: result = quantize_q5_1(src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break;
  17787. case GGML_TYPE_Q8_0: result = quantize_q8_0(src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break;
  17788. case GGML_TYPE_Q2_K: result = quantize_q2_K(src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break;
  17789. case GGML_TYPE_Q3_K: result = quantize_q3_K(src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break;
  17790. case GGML_TYPE_Q4_K: result = quantize_q4_K(src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break;
  17791. case GGML_TYPE_Q5_K: result = quantize_q5_K(src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break;
  17792. case GGML_TYPE_Q6_K: result = quantize_q6_K(src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break;
  17793. case GGML_TYPE_IQ2_XXS: result = quantize_iq2_xxs(src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break;
  17794. case GGML_TYPE_IQ2_XS: result = quantize_iq2_xs (src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break;
  17795. case GGML_TYPE_IQ3_XXS: result = quantize_iq3_xxs(src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break;
  17796. case GGML_TYPE_IQ3_S: result = quantize_iq3_s (src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break;
  17797. case GGML_TYPE_IQ2_S: result = quantize_iq2_s (src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break;
  17798. case GGML_TYPE_IQ1_S: result = quantize_iq1_s (src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break;
  17799. case GGML_TYPE_IQ1_M: result = quantize_iq1_m (src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break;
  17800. case GGML_TYPE_IQ4_NL: result = quantize_iq4_nl (src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break;
  17801. #if QK_K == 64
  17802. case GGML_TYPE_IQ4_XS: result = quantize_iq4_nl (src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break;
  17803. #else
  17804. case GGML_TYPE_IQ4_XS: result = quantize_iq4_xs (src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break;
  17805. #endif
  17806. case GGML_TYPE_F16:
  17807. {
  17808. size_t elemsize = sizeof(ggml_fp16_t);
  17809. ggml_fp32_to_fp16_row(src + start, (ggml_fp16_t *)dst + start, n);
  17810. result = n * elemsize;
  17811. } break;
  17812. case GGML_TYPE_BF16:
  17813. {
  17814. size_t elemsize = sizeof(ggml_bf16_t);
  17815. ggml_fp32_to_bf16_row(src + start, (ggml_bf16_t *)dst + start, n);
  17816. result = n * elemsize;
  17817. } break;
  17818. case GGML_TYPE_F32:
  17819. {
  17820. size_t elemsize = sizeof(float);
  17821. result = n * elemsize;
  17822. memcpy((uint8_t *)dst + start * elemsize, src + start, result);
  17823. } break;
  17824. default:
  17825. assert(false);
  17826. }
  17827. GGML_ASSERT(result == nrows * row_size);
  17828. return result;
  17829. }
  17830. ////////////////////////////////////////////////////////////////////////////////
  17831. struct gguf_str {
  17832. uint64_t n; // GGUFv2
  17833. char * data;
  17834. };
  17835. static const size_t GGUF_TYPE_SIZE[GGUF_TYPE_COUNT] = {
  17836. [GGUF_TYPE_UINT8] = sizeof(uint8_t),
  17837. [GGUF_TYPE_INT8] = sizeof(int8_t),
  17838. [GGUF_TYPE_UINT16] = sizeof(uint16_t),
  17839. [GGUF_TYPE_INT16] = sizeof(int16_t),
  17840. [GGUF_TYPE_UINT32] = sizeof(uint32_t),
  17841. [GGUF_TYPE_INT32] = sizeof(int32_t),
  17842. [GGUF_TYPE_FLOAT32] = sizeof(float),
  17843. [GGUF_TYPE_BOOL] = sizeof(bool),
  17844. [GGUF_TYPE_STRING] = sizeof(struct gguf_str),
  17845. [GGUF_TYPE_UINT64] = sizeof(uint64_t),
  17846. [GGUF_TYPE_INT64] = sizeof(int64_t),
  17847. [GGUF_TYPE_FLOAT64] = sizeof(double),
  17848. [GGUF_TYPE_ARRAY] = 0, // undefined
  17849. };
  17850. static_assert(GGUF_TYPE_COUNT == 13, "GGUF_TYPE_COUNT != 13");
  17851. static const char * GGUF_TYPE_NAME[GGUF_TYPE_COUNT] = {
  17852. [GGUF_TYPE_UINT8] = "u8",
  17853. [GGUF_TYPE_INT8] = "i8",
  17854. [GGUF_TYPE_UINT16] = "u16",
  17855. [GGUF_TYPE_INT16] = "i16",
  17856. [GGUF_TYPE_UINT32] = "u32",
  17857. [GGUF_TYPE_INT32] = "i32",
  17858. [GGUF_TYPE_FLOAT32] = "f32",
  17859. [GGUF_TYPE_BOOL] = "bool",
  17860. [GGUF_TYPE_STRING] = "str",
  17861. [GGUF_TYPE_ARRAY] = "arr",
  17862. [GGUF_TYPE_UINT64] = "u64",
  17863. [GGUF_TYPE_INT64] = "i64",
  17864. [GGUF_TYPE_FLOAT64] = "f64",
  17865. };
  17866. static_assert(GGUF_TYPE_COUNT == 13, "GGUF_TYPE_COUNT != 13");
  17867. union gguf_value {
  17868. uint8_t uint8;
  17869. int8_t int8;
  17870. uint16_t uint16;
  17871. int16_t int16;
  17872. uint32_t uint32;
  17873. int32_t int32;
  17874. float float32;
  17875. uint64_t uint64;
  17876. int64_t int64;
  17877. double float64;
  17878. bool bool_;
  17879. struct gguf_str str;
  17880. struct {
  17881. enum gguf_type type;
  17882. uint64_t n; // GGUFv2
  17883. void * data;
  17884. } arr;
  17885. };
  17886. struct gguf_kv {
  17887. struct gguf_str key;
  17888. enum gguf_type type;
  17889. union gguf_value value;
  17890. };
  17891. struct gguf_header {
  17892. char magic[4];
  17893. uint32_t version;
  17894. uint64_t n_tensors; // GGUFv2
  17895. uint64_t n_kv; // GGUFv2
  17896. };
  17897. struct gguf_tensor_info {
  17898. struct gguf_str name;
  17899. uint32_t n_dims;
  17900. uint64_t ne[GGML_MAX_DIMS];
  17901. enum ggml_type type;
  17902. uint64_t offset; // offset from start of `data`, must be a multiple of `ALIGNMENT`
  17903. // for writing API
  17904. const void * data;
  17905. size_t size;
  17906. };
  17907. struct gguf_context {
  17908. struct gguf_header header;
  17909. struct gguf_kv * kv;
  17910. struct gguf_tensor_info * infos;
  17911. size_t alignment;
  17912. size_t offset; // offset of `data` from beginning of file
  17913. size_t size; // size of `data` in bytes
  17914. //uint8_t * padding;
  17915. void * data;
  17916. };
  17917. static size_t gguf_type_size(enum gguf_type type) {
  17918. GGML_ASSERT(0 <= type && type < GGUF_TYPE_COUNT);
  17919. return GGUF_TYPE_SIZE[type];
  17920. }
  17921. static void gguf_tensor_info_sanitize(struct gguf_tensor_info * info) {
  17922. GGML_ASSERT(info->n_dims <= GGML_MAX_DIMS);
  17923. GGML_ASSERT(0 <= info->type && info->type < GGML_TYPE_COUNT);
  17924. for (uint32_t i = 0; i < info->n_dims; ++i) {
  17925. GGML_ASSERT(info->ne[i] > 0);
  17926. }
  17927. // prevent overflow for total number of elements
  17928. GGML_ASSERT(INT64_MAX/info->ne[1] > info->ne[0]);
  17929. GGML_ASSERT(INT64_MAX/info->ne[2] > info->ne[0]*info->ne[1]);
  17930. GGML_ASSERT(INT64_MAX/info->ne[3] > info->ne[0]*info->ne[1]*info->ne[2]);
  17931. }
  17932. static bool gguf_fread_el(FILE * file, void * dst, size_t size, size_t * offset) {
  17933. const size_t n = fread(dst, 1, size, file);
  17934. *offset += n;
  17935. return n == size;
  17936. }
  17937. static bool gguf_fread_str(FILE * file, struct gguf_str * p, size_t * offset) {
  17938. p->n = 0;
  17939. p->data = NULL;
  17940. bool ok = true;
  17941. ok = ok && gguf_fread_el(file, &p->n, sizeof(p->n), offset);
  17942. // early exit if string length is invalid, prevents from integer overflow
  17943. if (p->n == SIZE_MAX) {
  17944. fprintf(stderr, "%s: invalid string length (%" PRIu64 ")\n", __func__, p->n);
  17945. return false;
  17946. }
  17947. p->data = GGML_CALLOC(p->n + 1, 1);
  17948. ok = ok && gguf_fread_el(file, p->data, p->n, offset);
  17949. return ok;
  17950. }
  17951. static void gguf_free_kv(struct gguf_kv * kv) {
  17952. if (kv->key.data) {
  17953. GGML_FREE(kv->key.data);
  17954. }
  17955. if (kv->type == GGUF_TYPE_STRING) {
  17956. if (kv->value.str.data) {
  17957. GGML_FREE(kv->value.str.data);
  17958. }
  17959. }
  17960. if (kv->type == GGUF_TYPE_ARRAY) {
  17961. if (kv->value.arr.data) {
  17962. if (kv->value.arr.type == GGUF_TYPE_STRING) {
  17963. for (uint64_t j = 0; j < kv->value.arr.n; ++j) {
  17964. struct gguf_str * str = &((struct gguf_str *) kv->value.arr.data)[j];
  17965. if (str->data) {
  17966. GGML_FREE(str->data);
  17967. }
  17968. }
  17969. }
  17970. GGML_FREE(kv->value.arr.data);
  17971. }
  17972. }
  17973. }
  17974. struct gguf_context * gguf_init_empty(void) {
  17975. struct gguf_context * ctx = GGML_CALLOC(1, sizeof(struct gguf_context));
  17976. memcpy(ctx->header.magic, GGUF_MAGIC, sizeof(ctx->header.magic));
  17977. ctx->header.version = GGUF_VERSION;
  17978. ctx->header.n_tensors = 0;
  17979. ctx->header.n_kv = 0;
  17980. ctx->kv = NULL;
  17981. ctx->infos = NULL;
  17982. ctx->alignment = GGUF_DEFAULT_ALIGNMENT;
  17983. ctx->offset = 0;
  17984. ctx->size = 0;
  17985. ctx->data = NULL;
  17986. return ctx;
  17987. }
  17988. struct gguf_context * gguf_init_from_file(const char * fname, struct gguf_init_params params) {
  17989. FILE * file = ggml_fopen(fname, "rb");
  17990. if (!file) {
  17991. return NULL;
  17992. }
  17993. // offset from start of file
  17994. size_t offset = 0;
  17995. char magic[4];
  17996. // check the magic before making allocations
  17997. {
  17998. gguf_fread_el(file, &magic, sizeof(magic), &offset);
  17999. for (uint32_t i = 0; i < sizeof(magic); i++) {
  18000. if (magic[i] != GGUF_MAGIC[i]) {
  18001. fprintf(stderr, "%s: invalid magic characters '%c%c%c%c'\n", __func__, magic[0], magic[1], magic[2], magic[3]);
  18002. fclose(file);
  18003. return NULL;
  18004. }
  18005. }
  18006. }
  18007. bool ok = true;
  18008. struct gguf_context * ctx = GGML_CALLOC(1, sizeof(struct gguf_context));
  18009. // read the header
  18010. {
  18011. strncpy(ctx->header.magic, magic, 4);
  18012. ctx->kv = NULL;
  18013. ctx->infos = NULL;
  18014. ctx->data = NULL;
  18015. ok = ok && gguf_fread_el(file, &ctx->header.version, sizeof(ctx->header.version), &offset);
  18016. ok = ok && gguf_fread_el(file, &ctx->header.n_tensors, sizeof(ctx->header.n_tensors), &offset);
  18017. ok = ok && gguf_fread_el(file, &ctx->header.n_kv, sizeof(ctx->header.n_kv), &offset);
  18018. if (ctx->header.version == 1) {
  18019. fprintf(stderr, "%s: GGUFv1 is no longer supported. please use a more up-to-date version\n", __func__);
  18020. fclose(file);
  18021. gguf_free(ctx);
  18022. return NULL;
  18023. }
  18024. // sanity-checks to prevent from integer/buffer overflows
  18025. ok = ok && (ctx->header.n_tensors < (SIZE_MAX/2)/sizeof(struct gguf_tensor_info));
  18026. ok = ok && (ctx->header.n_tensors < (SIZE_MAX/2)/ggml_tensor_overhead());
  18027. ok = ok && (ctx->header.n_kv < (SIZE_MAX/2)/sizeof(struct gguf_kv));
  18028. if (!ok) {
  18029. fprintf(stderr, "%s: failed to read header\n", __func__);
  18030. fclose(file);
  18031. gguf_free(ctx);
  18032. return NULL;
  18033. }
  18034. }
  18035. // read the kv pairs
  18036. {
  18037. const uint64_t n_kv = ctx->header.n_kv;
  18038. // header.n_kv will hold the actual value of pairs that were successfully read in the loop below
  18039. ctx->header.n_kv = 0;
  18040. ctx->kv = GGML_CALLOC(n_kv, sizeof(struct gguf_kv));
  18041. for (uint64_t i = 0; i < n_kv; ++i) {
  18042. struct gguf_kv * kv = &ctx->kv[i];
  18043. //fprintf(stderr, "%s: reading kv %d\n", __func__, i);
  18044. ok = ok && gguf_fread_str(file, &kv->key, &offset);
  18045. ok = ok && gguf_fread_el (file, &kv->type, sizeof(kv->type), &offset);
  18046. //fprintf(stderr, "%s: reading kv with key %s\n", __func__, kv->key.data);
  18047. switch (kv->type) {
  18048. case GGUF_TYPE_UINT8: ok = ok && gguf_fread_el (file, &kv->value.uint8, sizeof(kv->value.uint8), &offset); break;
  18049. case GGUF_TYPE_INT8: ok = ok && gguf_fread_el (file, &kv->value.int8, sizeof(kv->value.int8), &offset); break;
  18050. case GGUF_TYPE_UINT16: ok = ok && gguf_fread_el (file, &kv->value.uint16, sizeof(kv->value.uint16), &offset); break;
  18051. case GGUF_TYPE_INT16: ok = ok && gguf_fread_el (file, &kv->value.int16, sizeof(kv->value.int16), &offset); break;
  18052. case GGUF_TYPE_UINT32: ok = ok && gguf_fread_el (file, &kv->value.uint32, sizeof(kv->value.uint32), &offset); break;
  18053. case GGUF_TYPE_INT32: ok = ok && gguf_fread_el (file, &kv->value.int32, sizeof(kv->value.int32), &offset); break;
  18054. case GGUF_TYPE_FLOAT32: ok = ok && gguf_fread_el (file, &kv->value.float32, sizeof(kv->value.float32), &offset); break;
  18055. case GGUF_TYPE_UINT64: ok = ok && gguf_fread_el (file, &kv->value.uint64, sizeof(kv->value.uint64), &offset); break;
  18056. case GGUF_TYPE_INT64: ok = ok && gguf_fread_el (file, &kv->value.int64, sizeof(kv->value.int64), &offset); break;
  18057. case GGUF_TYPE_FLOAT64: ok = ok && gguf_fread_el (file, &kv->value.float64, sizeof(kv->value.float64), &offset); break;
  18058. case GGUF_TYPE_BOOL: ok = ok && gguf_fread_el (file, &kv->value.bool_, sizeof(kv->value.bool_), &offset); break;
  18059. case GGUF_TYPE_STRING: ok = ok && gguf_fread_str(file, &kv->value.str, &offset); break;
  18060. case GGUF_TYPE_ARRAY:
  18061. {
  18062. ok = ok && gguf_fread_el(file, &kv->value.arr.type, sizeof(kv->value.arr.type), &offset);
  18063. ok = ok && gguf_fread_el(file, &kv->value.arr.n, sizeof(kv->value.arr.n), &offset);
  18064. switch (kv->value.arr.type) {
  18065. case GGUF_TYPE_UINT8:
  18066. case GGUF_TYPE_INT8:
  18067. case GGUF_TYPE_UINT16:
  18068. case GGUF_TYPE_INT16:
  18069. case GGUF_TYPE_UINT32:
  18070. case GGUF_TYPE_INT32:
  18071. case GGUF_TYPE_FLOAT32:
  18072. case GGUF_TYPE_UINT64:
  18073. case GGUF_TYPE_INT64:
  18074. case GGUF_TYPE_FLOAT64:
  18075. case GGUF_TYPE_BOOL:
  18076. {
  18077. // prevent from integer overflow in the malloc below
  18078. if (kv->value.arr.n >= SIZE_MAX/gguf_type_size(kv->value.arr.type)) {
  18079. fprintf(stderr, "%s: array size is too large (%" PRIu64 ")\n", __func__, kv->value.arr.n);
  18080. fclose(file);
  18081. gguf_free(ctx);
  18082. return NULL;
  18083. }
  18084. kv->value.arr.data = GGML_CALLOC(kv->value.arr.n, gguf_type_size(kv->value.arr.type));
  18085. ok = ok && gguf_fread_el(file, kv->value.arr.data, kv->value.arr.n * gguf_type_size(kv->value.arr.type), &offset);
  18086. } break;
  18087. case GGUF_TYPE_STRING:
  18088. {
  18089. // prevent from integer overflow in the malloc below
  18090. if (kv->value.arr.n >= SIZE_MAX/sizeof(struct gguf_str)) {
  18091. fprintf(stderr, "%s: array size is too large (%" PRIu64 ")\n", __func__, kv->value.arr.n);
  18092. fclose(file);
  18093. gguf_free(ctx);
  18094. return NULL;
  18095. }
  18096. kv->value.arr.data = GGML_CALLOC(kv->value.arr.n, sizeof(struct gguf_str));
  18097. for (uint64_t j = 0; j < kv->value.arr.n; ++j) {
  18098. ok = ok && gguf_fread_str(file, &((struct gguf_str *) kv->value.arr.data)[j], &offset);
  18099. }
  18100. } break;
  18101. case GGUF_TYPE_ARRAY:
  18102. default: GGML_ASSERT(false && "invalid type"); break;
  18103. }
  18104. } break;
  18105. default: GGML_ASSERT(false && "invalid type");
  18106. }
  18107. if (!ok) {
  18108. break;
  18109. }
  18110. ctx->header.n_kv++;
  18111. }
  18112. if (!ok) {
  18113. fprintf(stderr, "%s: failed to read key-value pairs\n", __func__);
  18114. fclose(file);
  18115. gguf_free(ctx);
  18116. return NULL;
  18117. }
  18118. }
  18119. // read the tensor infos
  18120. if (ctx->header.n_tensors > 0) {
  18121. ctx->infos = GGML_CALLOC(ctx->header.n_tensors, sizeof(struct gguf_tensor_info));
  18122. for (uint64_t i = 0; i < ctx->header.n_tensors; ++i) {
  18123. struct gguf_tensor_info * info = &ctx->infos[i];
  18124. for (int j = 0; j < GGML_MAX_DIMS; ++j) {
  18125. info->ne[j] = 1;
  18126. }
  18127. ok = ok && gguf_fread_str(file, &info->name, &offset);
  18128. ok = ok && gguf_fread_el (file, &info->n_dims, sizeof(info->n_dims), &offset);
  18129. ok = ok && (info->n_dims <= GGML_MAX_DIMS);
  18130. for (uint32_t j = 0; j < info->n_dims; ++j) {
  18131. ok = ok && gguf_fread_el(file, &info->ne[j], sizeof(info->ne[j]), &offset);
  18132. }
  18133. ok = ok && gguf_fread_el (file, &info->type, sizeof(info->type), &offset);
  18134. ok = ok && gguf_fread_el (file, &info->offset, sizeof(info->offset), &offset);
  18135. // TODO: return an error instead of crashing with GGML_ASSERT
  18136. gguf_tensor_info_sanitize(info);
  18137. // make sure there is no duplicated tensor names
  18138. for (uint64_t j = 0; j < i; ++j) {
  18139. if (strcmp(info->name.data, ctx->infos[j].name.data) == 0) {
  18140. fprintf(stderr, "%s: duplicated tensor name %s\n", __func__, info->name.data);
  18141. ok = false;
  18142. }
  18143. }
  18144. if (!ok) {
  18145. fprintf(stderr, "%s: failed to read tensor info\n", __func__);
  18146. fclose(file);
  18147. gguf_free(ctx);
  18148. return NULL;
  18149. }
  18150. }
  18151. }
  18152. ctx->alignment = GGUF_DEFAULT_ALIGNMENT;
  18153. int alignment_idx = gguf_find_key(ctx, "general.alignment");
  18154. if (alignment_idx != -1) {
  18155. ctx->alignment = gguf_get_val_u32(ctx, alignment_idx);
  18156. }
  18157. // we require the data section to be aligned, so take into account any padding
  18158. {
  18159. const size_t offset_pad = offset % ctx->alignment;
  18160. if (offset_pad != 0) {
  18161. offset += ctx->alignment - offset_pad;
  18162. fseek(file, offset, SEEK_SET);
  18163. }
  18164. }
  18165. // store the current file offset - this is where the data section starts
  18166. ctx->offset = offset;
  18167. // compute the total size of the data section, taking into account the alignment
  18168. {
  18169. ctx->size = 0;
  18170. for (uint64_t i = 0; i < ctx->header.n_tensors; ++i) {
  18171. struct gguf_tensor_info * info = &ctx->infos[i];
  18172. const int64_t ne =
  18173. (int64_t) info->ne[0] *
  18174. (int64_t) info->ne[1] *
  18175. (int64_t) info->ne[2] *
  18176. (int64_t) info->ne[3];
  18177. if (ne % ggml_blck_size(info->type) != 0) {
  18178. fprintf(stderr, "%s: tensor '%s' of type %d (%s) number of elements (%" PRId64 ") is not a multiple of block size (%d)\n",
  18179. __func__, info->name.data, (int)info->type, ggml_type_name(info->type), ne, ggml_blck_size(info->type));
  18180. fclose(file);
  18181. gguf_free(ctx);
  18182. return NULL;
  18183. }
  18184. const size_t size_cur = ggml_row_size(info->type, ne);
  18185. ctx->size += GGML_PAD(size_cur, ctx->alignment);
  18186. }
  18187. }
  18188. // load the tensor data only if requested
  18189. if (params.ctx != NULL) {
  18190. // if the provided gguf_context is no_alloc, then we create "empty" tensors and do not read the binary blob
  18191. // otherwise, we load the binary blob into the created ggml_context as well, and point the "data" members of
  18192. // the ggml_tensor structs to the appropriate locations in the binary blob
  18193. // compute the exact size needed for the new ggml_context
  18194. const size_t mem_size =
  18195. params.no_alloc ?
  18196. (ctx->header.n_tensors )*ggml_tensor_overhead() :
  18197. (ctx->header.n_tensors + 1)*ggml_tensor_overhead() + ctx->size;
  18198. struct ggml_init_params pdata = {
  18199. .mem_size = mem_size,
  18200. .mem_buffer = NULL,
  18201. .no_alloc = params.no_alloc,
  18202. };
  18203. *params.ctx = ggml_init(pdata);
  18204. struct ggml_context * ctx_data = *params.ctx;
  18205. struct ggml_tensor * data = NULL;
  18206. if (!params.no_alloc) {
  18207. data = ggml_new_tensor_1d(ctx_data, GGML_TYPE_I8, ctx->size);
  18208. ok = ok && data != NULL;
  18209. // read the binary blob with the tensor data
  18210. ok = ok && gguf_fread_el(file, data->data, ctx->size, &offset);
  18211. if (!ok) {
  18212. fprintf(stderr, "%s: failed to read tensor data\n", __func__);
  18213. fclose(file);
  18214. ggml_free(ctx_data);
  18215. gguf_free(ctx);
  18216. return NULL;
  18217. }
  18218. ctx->data = data->data;
  18219. }
  18220. ggml_set_no_alloc(ctx_data, true);
  18221. // create the tensors
  18222. for (uint64_t i = 0; i < ctx->header.n_tensors; ++i) {
  18223. const int64_t ne[GGML_MAX_DIMS] = {
  18224. ctx->infos[i].ne[0],
  18225. ctx->infos[i].ne[1],
  18226. ctx->infos[i].ne[2],
  18227. ctx->infos[i].ne[3],
  18228. };
  18229. struct ggml_tensor * cur = ggml_new_tensor(ctx_data, ctx->infos[i].type, ctx->infos[i].n_dims, ne);
  18230. ok = ok && cur != NULL;
  18231. if (!ok) {
  18232. break;
  18233. }
  18234. ggml_set_name(cur, ctx->infos[i].name.data);
  18235. // point the data member to the appropriate location in the binary blob using the tensor infos
  18236. if (!params.no_alloc) {
  18237. //cur->data = (char *) data->data + ctx->infos[i].offset - ctx->offset; // offset from start of file
  18238. cur->data = (char *) data->data + ctx->infos[i].offset; // offset from data
  18239. }
  18240. }
  18241. if (!ok) {
  18242. fprintf(stderr, "%s: failed to read the tensor data\n", __func__);
  18243. fclose(file);
  18244. ggml_free(ctx_data);
  18245. gguf_free(ctx);
  18246. return NULL;
  18247. }
  18248. ggml_set_no_alloc(ctx_data, params.no_alloc);
  18249. }
  18250. fclose(file);
  18251. return ctx;
  18252. }
  18253. void gguf_free(struct gguf_context * ctx) {
  18254. if (ctx == NULL) {
  18255. return;
  18256. }
  18257. if (ctx->kv) {
  18258. // free string memory - not great..
  18259. for (uint64_t i = 0; i < ctx->header.n_kv; ++i) {
  18260. gguf_free_kv(&ctx->kv[i]);
  18261. }
  18262. GGML_FREE(ctx->kv);
  18263. }
  18264. if (ctx->infos) {
  18265. for (uint64_t i = 0; i < ctx->header.n_tensors; ++i) {
  18266. struct gguf_tensor_info * info = &ctx->infos[i];
  18267. if (info->name.data) {
  18268. GGML_FREE(info->name.data);
  18269. }
  18270. }
  18271. GGML_FREE(ctx->infos);
  18272. }
  18273. GGML_FREE(ctx);
  18274. }
  18275. const char * gguf_type_name(enum gguf_type type) {
  18276. return GGUF_TYPE_NAME[type];
  18277. }
  18278. int gguf_get_version(const struct gguf_context * ctx) {
  18279. return ctx->header.version;
  18280. }
  18281. size_t gguf_get_alignment(const struct gguf_context * ctx) {
  18282. return ctx->alignment;
  18283. }
  18284. size_t gguf_get_data_offset(const struct gguf_context * ctx) {
  18285. return ctx->offset;
  18286. }
  18287. void * gguf_get_data(const struct gguf_context * ctx) {
  18288. return ctx->data;
  18289. }
  18290. int gguf_get_n_kv(const struct gguf_context * ctx) {
  18291. return ctx->header.n_kv;
  18292. }
  18293. int gguf_find_key(const struct gguf_context * ctx, const char * key) {
  18294. // return -1 if key not found
  18295. int keyfound = -1;
  18296. const int n_kv = gguf_get_n_kv(ctx);
  18297. for (int i = 0; i < n_kv; ++i) {
  18298. if (strcmp(key, gguf_get_key(ctx, i)) == 0) {
  18299. keyfound = i;
  18300. break;
  18301. }
  18302. }
  18303. return keyfound;
  18304. }
  18305. const char * gguf_get_key(const struct gguf_context * ctx, int key_id) {
  18306. GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx));
  18307. return ctx->kv[key_id].key.data;
  18308. }
  18309. enum gguf_type gguf_get_kv_type(const struct gguf_context * ctx, int key_id) {
  18310. GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx));
  18311. return ctx->kv[key_id].type;
  18312. }
  18313. enum gguf_type gguf_get_arr_type(const struct gguf_context * ctx, int key_id) {
  18314. GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx));
  18315. GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_ARRAY);
  18316. return ctx->kv[key_id].value.arr.type;
  18317. }
  18318. const void * gguf_get_arr_data(const struct gguf_context * ctx, int key_id) {
  18319. GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx));
  18320. GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_ARRAY);
  18321. return ctx->kv[key_id].value.arr.data;
  18322. }
  18323. const char * gguf_get_arr_str(const struct gguf_context * ctx, int key_id, int i) {
  18324. GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx));
  18325. GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_ARRAY);
  18326. struct gguf_kv * kv = &ctx->kv[key_id];
  18327. struct gguf_str * str = &((struct gguf_str *) kv->value.arr.data)[i];
  18328. return str->data;
  18329. }
  18330. int gguf_get_arr_n(const struct gguf_context * ctx, int key_id) {
  18331. GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx));
  18332. GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_ARRAY);
  18333. return ctx->kv[key_id].value.arr.n;
  18334. }
  18335. uint8_t gguf_get_val_u8(const struct gguf_context * ctx, int key_id) {
  18336. GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx));
  18337. GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_UINT8);
  18338. return ctx->kv[key_id].value.uint8;
  18339. }
  18340. int8_t gguf_get_val_i8(const struct gguf_context * ctx, int key_id) {
  18341. GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx));
  18342. GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_INT8);
  18343. return ctx->kv[key_id].value.int8;
  18344. }
  18345. uint16_t gguf_get_val_u16(const struct gguf_context * ctx, int key_id) {
  18346. GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx));
  18347. GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_UINT16);
  18348. return ctx->kv[key_id].value.uint16;
  18349. }
  18350. int16_t gguf_get_val_i16(const struct gguf_context * ctx, int key_id) {
  18351. GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx));
  18352. GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_INT16);
  18353. return ctx->kv[key_id].value.int16;
  18354. }
  18355. uint32_t gguf_get_val_u32(const struct gguf_context * ctx, int key_id) {
  18356. GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx));
  18357. GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_UINT32);
  18358. return ctx->kv[key_id].value.uint32;
  18359. }
  18360. int32_t gguf_get_val_i32(const struct gguf_context * ctx, int key_id) {
  18361. GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx));
  18362. GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_INT32);
  18363. return ctx->kv[key_id].value.int32;
  18364. }
  18365. float gguf_get_val_f32(const struct gguf_context * ctx, int key_id) {
  18366. GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx));
  18367. GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_FLOAT32);
  18368. return ctx->kv[key_id].value.float32;
  18369. }
  18370. uint64_t gguf_get_val_u64(const struct gguf_context * ctx, int key_id) {
  18371. GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx));
  18372. GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_UINT64);
  18373. return ctx->kv[key_id].value.uint64;
  18374. }
  18375. int64_t gguf_get_val_i64(const struct gguf_context * ctx, int key_id) {
  18376. GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx));
  18377. GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_INT64);
  18378. return ctx->kv[key_id].value.int64;
  18379. }
  18380. double gguf_get_val_f64(const struct gguf_context * ctx, int key_id) {
  18381. GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx));
  18382. GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_FLOAT64);
  18383. return ctx->kv[key_id].value.float64;
  18384. }
  18385. bool gguf_get_val_bool(const struct gguf_context * ctx, int key_id) {
  18386. GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx));
  18387. GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_BOOL);
  18388. return ctx->kv[key_id].value.bool_;
  18389. }
  18390. const char * gguf_get_val_str(const struct gguf_context * ctx, int key_id) {
  18391. GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx));
  18392. GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_STRING);
  18393. return ctx->kv[key_id].value.str.data;
  18394. }
  18395. const void * gguf_get_val_data(const struct gguf_context * ctx, int key_id) {
  18396. GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx));
  18397. GGML_ASSERT(ctx->kv[key_id].type != GGUF_TYPE_ARRAY);
  18398. GGML_ASSERT(ctx->kv[key_id].type != GGUF_TYPE_STRING);
  18399. return &ctx->kv[key_id].value;
  18400. }
  18401. int gguf_get_n_tensors(const struct gguf_context * ctx) {
  18402. return ctx->header.n_tensors;
  18403. }
  18404. int gguf_find_tensor(const struct gguf_context * ctx, const char * name) {
  18405. // return -1 if tensor not found
  18406. int tensorfound = -1;
  18407. const int n_tensors = gguf_get_n_tensors(ctx);
  18408. for (int i = 0; i < n_tensors; ++i) {
  18409. if (strcmp(name, gguf_get_tensor_name(ctx, i)) == 0) {
  18410. tensorfound = i;
  18411. break;
  18412. }
  18413. }
  18414. return tensorfound;
  18415. }
  18416. size_t gguf_get_tensor_offset(const struct gguf_context * ctx, int i) {
  18417. return ctx->infos[i].offset;
  18418. }
  18419. char * gguf_get_tensor_name(const struct gguf_context * ctx, int i) {
  18420. return ctx->infos[i].name.data;
  18421. }
  18422. enum ggml_type gguf_get_tensor_type(const struct gguf_context * ctx, int i) {
  18423. return ctx->infos[i].type;
  18424. }
  18425. // returns the index
  18426. static int gguf_get_or_add_key(struct gguf_context * ctx, const char * key) {
  18427. const int idx = gguf_find_key(ctx, key);
  18428. if (idx >= 0) {
  18429. return idx;
  18430. }
  18431. const int n_kv = gguf_get_n_kv(ctx);
  18432. ctx->kv = realloc(ctx->kv, (n_kv + 1) * sizeof(struct gguf_kv));
  18433. ctx->kv[n_kv].key.n = strlen(key);
  18434. ctx->kv[n_kv].key.data = strdup(key);
  18435. ctx->header.n_kv++;
  18436. return n_kv;
  18437. }
  18438. void gguf_remove_key(struct gguf_context * ctx, const char * key) {
  18439. const int idx = gguf_find_key(ctx, key);
  18440. if (idx >= 0) {
  18441. const int n_kv = gguf_get_n_kv(ctx);
  18442. gguf_free_kv(&ctx->kv[idx]);
  18443. for (int i = idx; i < n_kv-1; ++i) {
  18444. ctx->kv[i] = ctx->kv[i+1];
  18445. }
  18446. ctx->kv = realloc(ctx->kv, (n_kv - 1) * sizeof(struct gguf_kv));
  18447. ctx->header.n_kv--;
  18448. }
  18449. }
  18450. void gguf_set_val_u8(struct gguf_context * ctx, const char * key, uint8_t val) {
  18451. const int idx = gguf_get_or_add_key(ctx, key);
  18452. ctx->kv[idx].type = GGUF_TYPE_UINT8;
  18453. ctx->kv[idx].value.uint8 = val;
  18454. }
  18455. void gguf_set_val_i8(struct gguf_context * ctx, const char * key, int8_t val) {
  18456. const int idx = gguf_get_or_add_key(ctx, key);
  18457. ctx->kv[idx].type = GGUF_TYPE_INT8;
  18458. ctx->kv[idx].value.int8 = val;
  18459. }
  18460. void gguf_set_val_u16(struct gguf_context * ctx, const char * key, uint16_t val) {
  18461. const int idx = gguf_get_or_add_key(ctx, key);
  18462. ctx->kv[idx].type = GGUF_TYPE_UINT16;
  18463. ctx->kv[idx].value.uint16 = val;
  18464. }
  18465. void gguf_set_val_i16(struct gguf_context * ctx, const char * key, int16_t val) {
  18466. const int idx = gguf_get_or_add_key(ctx, key);
  18467. ctx->kv[idx].type = GGUF_TYPE_INT16;
  18468. ctx->kv[idx].value.int16 = val;
  18469. }
  18470. void gguf_set_val_u32(struct gguf_context * ctx, const char * key, uint32_t val) {
  18471. const int idx = gguf_get_or_add_key(ctx, key);
  18472. ctx->kv[idx].type = GGUF_TYPE_UINT32;
  18473. ctx->kv[idx].value.uint32 = val;
  18474. }
  18475. void gguf_set_val_i32(struct gguf_context * ctx, const char * key, int32_t val) {
  18476. const int idx = gguf_get_or_add_key(ctx, key);
  18477. ctx->kv[idx].type = GGUF_TYPE_INT32;
  18478. ctx->kv[idx].value.int32 = val;
  18479. }
  18480. void gguf_set_val_f32(struct gguf_context * ctx, const char * key, float val) {
  18481. const int idx = gguf_get_or_add_key(ctx, key);
  18482. ctx->kv[idx].type = GGUF_TYPE_FLOAT32;
  18483. ctx->kv[idx].value.float32 = val;
  18484. }
  18485. void gguf_set_val_u64(struct gguf_context * ctx, const char * key, uint64_t val) {
  18486. const int idx = gguf_get_or_add_key(ctx, key);
  18487. ctx->kv[idx].type = GGUF_TYPE_UINT64;
  18488. ctx->kv[idx].value.uint64 = val;
  18489. }
  18490. void gguf_set_val_i64(struct gguf_context * ctx, const char * key, int64_t val) {
  18491. const int idx = gguf_get_or_add_key(ctx, key);
  18492. ctx->kv[idx].type = GGUF_TYPE_INT64;
  18493. ctx->kv[idx].value.int64 = val;
  18494. }
  18495. void gguf_set_val_f64(struct gguf_context * ctx, const char * key, double val) {
  18496. const int idx = gguf_get_or_add_key(ctx, key);
  18497. ctx->kv[idx].type = GGUF_TYPE_FLOAT64;
  18498. ctx->kv[idx].value.float64 = val;
  18499. }
  18500. void gguf_set_val_bool(struct gguf_context * ctx, const char * key, bool val) {
  18501. const int idx = gguf_get_or_add_key(ctx, key);
  18502. ctx->kv[idx].type = GGUF_TYPE_BOOL;
  18503. ctx->kv[idx].value.bool_ = val;
  18504. }
  18505. void gguf_set_val_str(struct gguf_context * ctx, const char * key, const char * val) {
  18506. const int idx = gguf_get_or_add_key(ctx, key);
  18507. ctx->kv[idx].type = GGUF_TYPE_STRING;
  18508. ctx->kv[idx].value.str.n = strlen(val);
  18509. ctx->kv[idx].value.str.data = strdup(val);
  18510. }
  18511. void gguf_set_arr_data(struct gguf_context * ctx, const char * key, enum gguf_type type, const void * data, int n) {
  18512. const int idx = gguf_get_or_add_key(ctx, key);
  18513. ctx->kv[idx].type = GGUF_TYPE_ARRAY;
  18514. ctx->kv[idx].value.arr.type = type;
  18515. ctx->kv[idx].value.arr.n = n;
  18516. ctx->kv[idx].value.arr.data = GGML_CALLOC(n, gguf_type_size(type));
  18517. memcpy(ctx->kv[idx].value.arr.data, data, n*gguf_type_size(type));
  18518. }
  18519. void gguf_set_arr_str(struct gguf_context * ctx, const char * key, const char ** data, int n) {
  18520. const int idx = gguf_get_or_add_key(ctx, key);
  18521. ctx->kv[idx].type = GGUF_TYPE_ARRAY;
  18522. ctx->kv[idx].value.arr.type = GGUF_TYPE_STRING;
  18523. ctx->kv[idx].value.arr.n = n;
  18524. ctx->kv[idx].value.arr.data = GGML_CALLOC(n, sizeof(struct gguf_str));
  18525. for (int i = 0; i < n; i++) {
  18526. struct gguf_str * str = &((struct gguf_str *)ctx->kv[idx].value.arr.data)[i];
  18527. str->n = strlen(data[i]);
  18528. str->data = strdup(data[i]);
  18529. }
  18530. }
  18531. // set or add KV pairs from another context
  18532. void gguf_set_kv(struct gguf_context * ctx, struct gguf_context * src) {
  18533. for (uint32_t i = 0; i < src->header.n_kv; i++) {
  18534. switch (src->kv[i].type) {
  18535. case GGUF_TYPE_UINT8: gguf_set_val_u8 (ctx, src->kv[i].key.data, src->kv[i].value.uint8); break;
  18536. case GGUF_TYPE_INT8: gguf_set_val_i8 (ctx, src->kv[i].key.data, src->kv[i].value.int8); break;
  18537. case GGUF_TYPE_UINT16: gguf_set_val_u16 (ctx, src->kv[i].key.data, src->kv[i].value.uint16); break;
  18538. case GGUF_TYPE_INT16: gguf_set_val_i16 (ctx, src->kv[i].key.data, src->kv[i].value.int16); break;
  18539. case GGUF_TYPE_UINT32: gguf_set_val_u32 (ctx, src->kv[i].key.data, src->kv[i].value.uint32); break;
  18540. case GGUF_TYPE_INT32: gguf_set_val_i32 (ctx, src->kv[i].key.data, src->kv[i].value.int32); break;
  18541. case GGUF_TYPE_FLOAT32: gguf_set_val_f32 (ctx, src->kv[i].key.data, src->kv[i].value.float32); break;
  18542. case GGUF_TYPE_UINT64: gguf_set_val_u64 (ctx, src->kv[i].key.data, src->kv[i].value.uint64); break;
  18543. case GGUF_TYPE_INT64: gguf_set_val_i64 (ctx, src->kv[i].key.data, src->kv[i].value.int64); break;
  18544. case GGUF_TYPE_FLOAT64: gguf_set_val_f64 (ctx, src->kv[i].key.data, src->kv[i].value.float64); break;
  18545. case GGUF_TYPE_BOOL: gguf_set_val_bool(ctx, src->kv[i].key.data, src->kv[i].value.bool_); break;
  18546. case GGUF_TYPE_STRING: gguf_set_val_str (ctx, src->kv[i].key.data, src->kv[i].value.str.data); break;
  18547. case GGUF_TYPE_ARRAY:
  18548. {
  18549. if (src->kv[i].value.arr.type == GGUF_TYPE_STRING) {
  18550. const char ** data = GGML_CALLOC(src->kv[i].value.arr.n, sizeof(char *));
  18551. for (uint32_t j = 0; j < src->kv[i].value.arr.n; j++) {
  18552. data[j] = ((struct gguf_str *)src->kv[i].value.arr.data)[j].data;
  18553. }
  18554. gguf_set_arr_str(ctx, src->kv[i].key.data, data, src->kv[i].value.arr.n);
  18555. GGML_FREE((void *)data);
  18556. } else if (src->kv[i].value.arr.type == GGUF_TYPE_ARRAY) {
  18557. GGML_ASSERT(false && "nested arrays not supported");
  18558. } else {
  18559. gguf_set_arr_data(ctx, src->kv[i].key.data, src->kv[i].value.arr.type, src->kv[i].value.arr.data, src->kv[i].value.arr.n);
  18560. }
  18561. } break;
  18562. default: GGML_ASSERT(false && "invalid type"); break;
  18563. }
  18564. }
  18565. }
  18566. void gguf_add_tensor(
  18567. struct gguf_context * ctx,
  18568. const struct ggml_tensor * tensor) {
  18569. if (gguf_find_tensor(ctx, tensor->name) != -1) {
  18570. GGML_ASSERT(false && "duplicated tensor name");
  18571. }
  18572. const int idx = ctx->header.n_tensors;
  18573. ctx->infos = realloc(ctx->infos, (idx + 1)*sizeof(struct gguf_tensor_info));
  18574. ctx->infos[idx].name.n = strlen(tensor->name);
  18575. ctx->infos[idx].name.data = strdup(tensor->name);
  18576. for (int i = 0; i < GGML_MAX_DIMS; ++i) {
  18577. ctx->infos[idx].ne[i] = 1;
  18578. }
  18579. ctx->infos[idx].n_dims = ggml_n_dims(tensor);
  18580. for (uint32_t i = 0; i < ctx->infos[idx].n_dims; i++) {
  18581. ctx->infos[idx].ne[i] = tensor->ne[i];
  18582. }
  18583. ctx->infos[idx].type = tensor->type;
  18584. ctx->infos[idx].offset = 0;
  18585. ctx->infos[idx].data = tensor->data;
  18586. ctx->infos[idx].size = ggml_nbytes(tensor);
  18587. if (ctx->header.n_tensors > 0) {
  18588. ctx->infos[idx].offset = ctx->infos[idx - 1].offset + GGML_PAD(ctx->infos[idx - 1].size, ctx->alignment);
  18589. }
  18590. ctx->header.n_tensors++;
  18591. }
  18592. void gguf_set_tensor_type(struct gguf_context * ctx, const char * name, enum ggml_type type) {
  18593. const int idx = gguf_find_tensor(ctx, name);
  18594. if (idx < 0) {
  18595. GGML_ASSERT(false && "tensor not found");
  18596. }
  18597. ctx->infos[idx].type = type;
  18598. }
  18599. void gguf_set_tensor_data(struct gguf_context * ctx, const char * name, const void * data, size_t size) {
  18600. const int idx = gguf_find_tensor(ctx, name);
  18601. if (idx < 0) {
  18602. GGML_ASSERT(false && "tensor not found");
  18603. }
  18604. ctx->infos[idx].data = data;
  18605. ctx->infos[idx].size = size;
  18606. // update offsets
  18607. for (uint32_t i = idx + 1; i < ctx->header.n_tensors; ++i) {
  18608. ctx->infos[i].offset = ctx->infos[i - 1].offset + GGML_PAD(ctx->infos[i - 1].size, ctx->alignment);
  18609. }
  18610. }
  18611. //static void gguf_fwrite_str(FILE * file, const struct gguf_str * val) {
  18612. // fwrite(&val->n, sizeof(val->n), 1, file);
  18613. // fwrite(val->data, sizeof(char), val->n, file);
  18614. //}
  18615. //
  18616. //static void gguf_fwrite_el(FILE * file, const void * val, size_t size) {
  18617. // fwrite(val, sizeof(char), size, file);
  18618. //}
  18619. struct gguf_buf {
  18620. void * data;
  18621. size_t size;
  18622. size_t offset;
  18623. };
  18624. static struct gguf_buf gguf_buf_init(size_t size) {
  18625. struct gguf_buf buf = {
  18626. /*buf.data =*/ size == 0 ? NULL : GGML_CALLOC(1, size),
  18627. /*buf.size =*/ size,
  18628. /*buf.offset =*/ 0,
  18629. };
  18630. return buf;
  18631. }
  18632. static void gguf_buf_free(struct gguf_buf buf) {
  18633. if (buf.data) {
  18634. GGML_FREE(buf.data);
  18635. }
  18636. }
  18637. static void gguf_buf_grow(struct gguf_buf * buf, size_t size) {
  18638. if (buf->offset + size > buf->size) {
  18639. buf->size = 1.5*(buf->offset + size);
  18640. if (buf->data) {
  18641. buf->data = realloc(buf->data, buf->size);
  18642. }
  18643. }
  18644. }
  18645. static void gguf_bwrite_str(struct gguf_buf * buf, const struct gguf_str * val) {
  18646. gguf_buf_grow(buf, sizeof(val->n) + val->n);
  18647. if (buf->data) {
  18648. memcpy((char *) buf->data + buf->offset, &val->n, sizeof(val->n));
  18649. }
  18650. buf->offset += sizeof(val->n);
  18651. if (buf->data) {
  18652. memcpy((char *) buf->data + buf->offset, val->data, val->n);
  18653. }
  18654. buf->offset += val->n;
  18655. }
  18656. static void gguf_bwrite_el(struct gguf_buf * buf, const void * val, size_t el_size) {
  18657. gguf_buf_grow(buf, el_size);
  18658. if (buf->data) {
  18659. memcpy((char *) buf->data + buf->offset, val, el_size);
  18660. }
  18661. buf->offset += el_size;
  18662. }
  18663. static void gguf_write_to_buf(const struct gguf_context * ctx, struct gguf_buf * buf, bool only_meta) {
  18664. // write header
  18665. gguf_bwrite_el(buf, &ctx->header.magic, sizeof(ctx->header.magic));
  18666. gguf_bwrite_el(buf, &ctx->header.version, sizeof(ctx->header.version));
  18667. gguf_bwrite_el(buf, &ctx->header.n_tensors, sizeof(ctx->header.n_tensors));
  18668. gguf_bwrite_el(buf, &ctx->header.n_kv, sizeof(ctx->header.n_kv));
  18669. // write key-value pairs
  18670. for (uint32_t i = 0; i < ctx->header.n_kv; ++i) {
  18671. struct gguf_kv * kv = &ctx->kv[i];
  18672. gguf_bwrite_str(buf, &kv->key);
  18673. gguf_bwrite_el (buf, &kv->type, sizeof(kv->type));
  18674. switch (kv->type) {
  18675. case GGUF_TYPE_UINT8: gguf_bwrite_el( buf, &kv->value.uint8, sizeof(kv->value.uint8) ); break;
  18676. case GGUF_TYPE_INT8: gguf_bwrite_el (buf, &kv->value.int8, sizeof(kv->value.int8) ); break;
  18677. case GGUF_TYPE_UINT16: gguf_bwrite_el (buf, &kv->value.uint16, sizeof(kv->value.uint16) ); break;
  18678. case GGUF_TYPE_INT16: gguf_bwrite_el (buf, &kv->value.int16, sizeof(kv->value.int16) ); break;
  18679. case GGUF_TYPE_UINT32: gguf_bwrite_el (buf, &kv->value.uint32, sizeof(kv->value.uint32) ); break;
  18680. case GGUF_TYPE_INT32: gguf_bwrite_el (buf, &kv->value.int32, sizeof(kv->value.int32) ); break;
  18681. case GGUF_TYPE_FLOAT32: gguf_bwrite_el (buf, &kv->value.float32, sizeof(kv->value.float32)); break;
  18682. case GGUF_TYPE_UINT64: gguf_bwrite_el (buf, &kv->value.uint64, sizeof(kv->value.uint64) ); break;
  18683. case GGUF_TYPE_INT64: gguf_bwrite_el (buf, &kv->value.int64, sizeof(kv->value.int64) ); break;
  18684. case GGUF_TYPE_FLOAT64: gguf_bwrite_el (buf, &kv->value.float64, sizeof(kv->value.float64)); break;
  18685. case GGUF_TYPE_BOOL: gguf_bwrite_el (buf, &kv->value.bool_, sizeof(kv->value.bool_) ); break;
  18686. case GGUF_TYPE_STRING: gguf_bwrite_str(buf, &kv->value.str ); break;
  18687. case GGUF_TYPE_ARRAY:
  18688. {
  18689. gguf_bwrite_el(buf, &kv->value.arr.type, sizeof(kv->value.arr.type));
  18690. gguf_bwrite_el(buf, &kv->value.arr.n, sizeof(kv->value.arr.n) );
  18691. switch (kv->value.arr.type) {
  18692. case GGUF_TYPE_UINT8:
  18693. case GGUF_TYPE_INT8:
  18694. case GGUF_TYPE_UINT16:
  18695. case GGUF_TYPE_INT16:
  18696. case GGUF_TYPE_UINT32:
  18697. case GGUF_TYPE_INT32:
  18698. case GGUF_TYPE_FLOAT32:
  18699. case GGUF_TYPE_UINT64:
  18700. case GGUF_TYPE_INT64:
  18701. case GGUF_TYPE_FLOAT64:
  18702. case GGUF_TYPE_BOOL:
  18703. {
  18704. gguf_bwrite_el(buf, kv->value.arr.data, kv->value.arr.n * gguf_type_size(kv->value.arr.type));
  18705. } break;
  18706. case GGUF_TYPE_STRING:
  18707. {
  18708. for (uint32_t j = 0; j < kv->value.arr.n; ++j) {
  18709. gguf_bwrite_str(buf, &((struct gguf_str *) kv->value.arr.data)[j]);
  18710. }
  18711. } break;
  18712. case GGUF_TYPE_ARRAY:
  18713. default: GGML_ASSERT(false && "invalid type"); break;
  18714. }
  18715. } break;
  18716. default: GGML_ASSERT(false && "invalid type");
  18717. }
  18718. }
  18719. // write tensor infos
  18720. for (uint32_t i = 0; i < ctx->header.n_tensors; ++i) {
  18721. struct gguf_tensor_info * info = &ctx->infos[i];
  18722. gguf_bwrite_str(buf, &info->name);
  18723. gguf_bwrite_el (buf, &info->n_dims, sizeof(info->n_dims));
  18724. for (uint32_t j = 0; j < info->n_dims; ++j) {
  18725. gguf_bwrite_el(buf, &info->ne[j], sizeof(info->ne[j]));
  18726. }
  18727. gguf_bwrite_el(buf, &info->type, sizeof(info->type));
  18728. gguf_bwrite_el(buf, &info->offset, sizeof(info->offset));
  18729. }
  18730. // we require the data section to be aligned, so take into account any padding
  18731. {
  18732. const size_t offset = buf->offset;
  18733. const size_t offset_pad = GGML_PAD(offset, ctx->alignment);
  18734. if (offset_pad != offset) {
  18735. uint8_t pad = 0;
  18736. for (size_t i = 0; i < offset_pad - offset; ++i) {
  18737. gguf_bwrite_el(buf, &pad, sizeof(pad));
  18738. }
  18739. }
  18740. }
  18741. if (only_meta) {
  18742. return;
  18743. }
  18744. size_t offset = 0;
  18745. // write tensor data
  18746. for (uint32_t i = 0; i < ctx->header.n_tensors; ++i) {
  18747. struct gguf_tensor_info * info = &ctx->infos[i];
  18748. const size_t size = info->size;
  18749. const size_t size_pad = GGML_PAD(size, ctx->alignment);
  18750. gguf_bwrite_el(buf, info->data, size);
  18751. if (size_pad != size) {
  18752. uint8_t pad = 0;
  18753. for (size_t j = 0; j < size_pad - size; ++j) {
  18754. gguf_bwrite_el(buf, &pad, sizeof(pad));
  18755. }
  18756. }
  18757. GGML_ASSERT(offset == info->offset);
  18758. offset += size_pad;
  18759. }
  18760. }
  18761. void gguf_write_to_file(const struct gguf_context * ctx, const char * fname, bool only_meta) {
  18762. FILE * file = ggml_fopen(fname, "wb");
  18763. if (!file) {
  18764. GGML_ASSERT(false && "failed to open file for writing");
  18765. }
  18766. struct gguf_buf buf = gguf_buf_init(16*1024);
  18767. gguf_write_to_buf(ctx, &buf, only_meta);
  18768. fwrite(buf.data, 1, buf.offset, file);
  18769. gguf_buf_free(buf);
  18770. fclose(file);
  18771. }
  18772. size_t gguf_get_meta_size(const struct gguf_context * ctx) {
  18773. // no allocs - only compute size
  18774. struct gguf_buf buf = gguf_buf_init(0);
  18775. gguf_write_to_buf(ctx, &buf, true);
  18776. return buf.offset;
  18777. }
  18778. void gguf_get_meta_data(const struct gguf_context * ctx, void * data) {
  18779. struct gguf_buf buf = gguf_buf_init(16*1024);
  18780. gguf_write_to_buf(ctx, &buf, true);
  18781. memcpy(data, buf.data, buf.offset);
  18782. gguf_buf_free(buf);
  18783. }
  18784. ////////////////////////////////////////////////////////////////////////////////
  18785. int ggml_cpu_has_avx(void) {
  18786. #if defined(__AVX__)
  18787. return 1;
  18788. #else
  18789. return 0;
  18790. #endif
  18791. }
  18792. int ggml_cpu_has_avx_vnni(void) {
  18793. #if defined(__AVXVNNI__)
  18794. return 1;
  18795. #else
  18796. return 0;
  18797. #endif
  18798. }
  18799. int ggml_cpu_has_avx2(void) {
  18800. #if defined(__AVX2__)
  18801. return 1;
  18802. #else
  18803. return 0;
  18804. #endif
  18805. }
  18806. int ggml_cpu_has_avx512(void) {
  18807. #if defined(__AVX512F__)
  18808. return 1;
  18809. #else
  18810. return 0;
  18811. #endif
  18812. }
  18813. int ggml_cpu_has_avx512_vbmi(void) {
  18814. #if defined(__AVX512VBMI__)
  18815. return 1;
  18816. #else
  18817. return 0;
  18818. #endif
  18819. }
  18820. int ggml_cpu_has_avx512_vnni(void) {
  18821. #if defined(__AVX512VNNI__)
  18822. return 1;
  18823. #else
  18824. return 0;
  18825. #endif
  18826. }
  18827. int ggml_cpu_has_fma(void) {
  18828. #if defined(__FMA__)
  18829. return 1;
  18830. #else
  18831. return 0;
  18832. #endif
  18833. }
  18834. int ggml_cpu_has_neon(void) {
  18835. #if defined(__ARM_NEON)
  18836. return 1;
  18837. #else
  18838. return 0;
  18839. #endif
  18840. }
  18841. int ggml_cpu_has_arm_fma(void) {
  18842. #if defined(__ARM_FEATURE_FMA)
  18843. return 1;
  18844. #else
  18845. return 0;
  18846. #endif
  18847. }
  18848. int ggml_cpu_has_metal(void) {
  18849. #if defined(GGML_USE_METAL)
  18850. return 1;
  18851. #else
  18852. return 0;
  18853. #endif
  18854. }
  18855. int ggml_cpu_has_f16c(void) {
  18856. #if defined(__F16C__)
  18857. return 1;
  18858. #else
  18859. return 0;
  18860. #endif
  18861. }
  18862. int ggml_cpu_has_fp16_va(void) {
  18863. #if defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC)
  18864. return 1;
  18865. #else
  18866. return 0;
  18867. #endif
  18868. }
  18869. int ggml_cpu_has_wasm_simd(void) {
  18870. #if defined(__wasm_simd128__)
  18871. return 1;
  18872. #else
  18873. return 0;
  18874. #endif
  18875. }
  18876. int ggml_cpu_has_blas(void) {
  18877. #if defined(GGML_USE_ACCELERATE) || defined(GGML_USE_OPENBLAS) || defined(GGML_USE_CUDA) || defined(GGML_USE_VULKAN) || defined(GGML_USE_CLBLAST) || defined(GGML_USE_SYCL)
  18878. return 1;
  18879. #else
  18880. return 0;
  18881. #endif
  18882. }
  18883. int ggml_cpu_has_cuda(void) {
  18884. #if defined(GGML_USE_CUDA)
  18885. return 1;
  18886. #else
  18887. return 0;
  18888. #endif
  18889. }
  18890. int ggml_cpu_has_clblast(void) {
  18891. #if defined(GGML_USE_CLBLAST)
  18892. return 1;
  18893. #else
  18894. return 0;
  18895. #endif
  18896. }
  18897. int ggml_cpu_has_vulkan(void) {
  18898. #if defined(GGML_USE_VULKAN)
  18899. return 1;
  18900. #else
  18901. return 0;
  18902. #endif
  18903. }
  18904. int ggml_cpu_has_kompute(void) {
  18905. #if defined(GGML_USE_KOMPUTE)
  18906. return 1;
  18907. #else
  18908. return 0;
  18909. #endif
  18910. }
  18911. int ggml_cpu_has_sycl(void) {
  18912. #if defined(GGML_USE_SYCL)
  18913. return 1;
  18914. #else
  18915. return 0;
  18916. #endif
  18917. }
  18918. int ggml_cpu_has_gpublas(void) {
  18919. return ggml_cpu_has_cuda() || ggml_cpu_has_clblast() || ggml_cpu_has_vulkan() || ggml_cpu_has_kompute() ||
  18920. ggml_cpu_has_sycl();
  18921. }
  18922. int ggml_cpu_has_sse3(void) {
  18923. #if defined(__SSE3__)
  18924. return 1;
  18925. #else
  18926. return 0;
  18927. #endif
  18928. }
  18929. int ggml_cpu_has_ssse3(void) {
  18930. #if defined(__SSSE3__)
  18931. return 1;
  18932. #else
  18933. return 0;
  18934. #endif
  18935. }
  18936. int ggml_cpu_has_vsx(void) {
  18937. #if defined(__POWER9_VECTOR__)
  18938. return 1;
  18939. #else
  18940. return 0;
  18941. #endif
  18942. }
  18943. int ggml_cpu_has_matmul_int8(void) {
  18944. #if defined(__ARM_FEATURE_MATMUL_INT8)
  18945. return 1;
  18946. #else
  18947. return 0;
  18948. #endif
  18949. }
  18950. ////////////////////////////////////////////////////////////////////////////////