ggml.c 740 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316431743184319432043214322432343244325432643274328432943304331433243334334433543364337433843394340434143424343434443454346434743484349435043514352435343544355435643574358435943604361436243634364436543664367436843694370437143724373437443754376437743784379438043814382438343844385438643874388438943904391439243934394439543964397439843994400440144024403440444054406440744084409441044114412441344144415441644174418441944204421442244234424442544264427442844294430443144324433443444354436443744384439444044414442444344444445444644474448444944504451445244534454445544564457445844594460446144624463446444654466446744684469447044714472447344744475447644774478447944804481448244834484448544864487448844894490449144924493449444954496449744984499450045014502450345044505450645074508450945104511451245134514451545164517451845194520452145224523452445254526452745284529453045314532453345344535453645374538453945404541454245434544454545464547454845494550455145524553455445554556455745584559456045614562456345644565456645674568456945704571457245734574457545764577457845794580458145824583458445854586458745884589459045914592459345944595459645974598459946004601460246034604460546064607460846094610461146124613461446154616461746184619462046214622462346244625462646274628462946304631463246334634463546364637463846394640464146424643464446454646464746484649465046514652465346544655465646574658465946604661466246634664466546664667466846694670467146724673467446754676467746784679468046814682468346844685468646874688468946904691469246934694469546964697469846994700470147024703470447054706470747084709471047114712471347144715471647174718471947204721472247234724472547264727472847294730473147324733473447354736473747384739474047414742474347444745474647474748474947504751475247534754475547564757475847594760476147624763476447654766476747684769477047714772477347744775477647774778477947804781478247834784478547864787478847894790479147924793479447954796479747984799480048014802480348044805480648074808480948104811481248134814481548164817481848194820482148224823482448254826482748284829483048314832483348344835483648374838483948404841484248434844484548464847484848494850485148524853485448554856485748584859486048614862486348644865486648674868486948704871487248734874487548764877487848794880488148824883488448854886488748884889489048914892489348944895489648974898489949004901490249034904490549064907490849094910491149124913491449154916491749184919492049214922492349244925492649274928492949304931493249334934493549364937493849394940494149424943494449454946494749484949495049514952495349544955495649574958495949604961496249634964496549664967496849694970497149724973497449754976497749784979498049814982498349844985498649874988498949904991499249934994499549964997499849995000500150025003500450055006500750085009501050115012501350145015501650175018501950205021502250235024502550265027502850295030503150325033503450355036503750385039504050415042504350445045504650475048504950505051505250535054505550565057505850595060506150625063506450655066506750685069507050715072507350745075507650775078507950805081508250835084508550865087508850895090509150925093509450955096509750985099510051015102510351045105510651075108510951105111511251135114511551165117511851195120512151225123512451255126512751285129513051315132513351345135513651375138513951405141514251435144514551465147514851495150515151525153515451555156515751585159516051615162516351645165516651675168516951705171517251735174517551765177517851795180518151825183518451855186518751885189519051915192519351945195519651975198519952005201520252035204520552065207520852095210521152125213521452155216521752185219522052215222522352245225522652275228522952305231523252335234523552365237523852395240524152425243524452455246524752485249525052515252525352545255525652575258525952605261526252635264526552665267526852695270527152725273527452755276527752785279528052815282528352845285528652875288528952905291529252935294529552965297529852995300530153025303530453055306530753085309531053115312531353145315531653175318531953205321532253235324532553265327532853295330533153325333533453355336533753385339534053415342534353445345534653475348534953505351535253535354535553565357535853595360536153625363536453655366536753685369537053715372537353745375537653775378537953805381538253835384538553865387538853895390539153925393539453955396539753985399540054015402540354045405540654075408540954105411541254135414541554165417541854195420542154225423542454255426542754285429543054315432543354345435543654375438543954405441544254435444544554465447544854495450545154525453545454555456545754585459546054615462546354645465546654675468546954705471547254735474547554765477547854795480548154825483548454855486548754885489549054915492549354945495549654975498549955005501550255035504550555065507550855095510551155125513551455155516551755185519552055215522552355245525552655275528552955305531553255335534553555365537553855395540554155425543554455455546554755485549555055515552555355545555555655575558555955605561556255635564556555665567556855695570557155725573557455755576557755785579558055815582558355845585558655875588558955905591559255935594559555965597559855995600560156025603560456055606560756085609561056115612561356145615561656175618561956205621562256235624562556265627562856295630563156325633563456355636563756385639564056415642564356445645564656475648564956505651565256535654565556565657565856595660566156625663566456655666566756685669567056715672567356745675567656775678567956805681568256835684568556865687568856895690569156925693569456955696569756985699570057015702570357045705570657075708570957105711571257135714571557165717571857195720572157225723572457255726572757285729573057315732573357345735573657375738573957405741574257435744574557465747574857495750575157525753575457555756575757585759576057615762576357645765576657675768576957705771577257735774577557765777577857795780578157825783578457855786578757885789579057915792579357945795579657975798579958005801580258035804580558065807580858095810581158125813581458155816581758185819582058215822582358245825582658275828582958305831583258335834583558365837583858395840584158425843584458455846584758485849585058515852585358545855585658575858585958605861586258635864586558665867586858695870587158725873587458755876587758785879588058815882588358845885588658875888588958905891589258935894589558965897589858995900590159025903590459055906590759085909591059115912591359145915591659175918591959205921592259235924592559265927592859295930593159325933593459355936593759385939594059415942594359445945594659475948594959505951595259535954595559565957595859595960596159625963596459655966596759685969597059715972597359745975597659775978597959805981598259835984598559865987598859895990599159925993599459955996599759985999600060016002600360046005600660076008600960106011601260136014601560166017601860196020602160226023602460256026602760286029603060316032603360346035603660376038603960406041604260436044604560466047604860496050605160526053605460556056605760586059606060616062606360646065606660676068606960706071607260736074607560766077607860796080608160826083608460856086608760886089609060916092609360946095609660976098609961006101610261036104610561066107610861096110611161126113611461156116611761186119612061216122612361246125612661276128612961306131613261336134613561366137613861396140614161426143614461456146614761486149615061516152615361546155615661576158615961606161616261636164616561666167616861696170617161726173617461756176617761786179618061816182618361846185618661876188618961906191619261936194619561966197619861996200620162026203620462056206620762086209621062116212621362146215621662176218621962206221622262236224622562266227622862296230623162326233623462356236623762386239624062416242624362446245624662476248624962506251625262536254625562566257625862596260626162626263626462656266626762686269627062716272627362746275627662776278627962806281628262836284628562866287628862896290629162926293629462956296629762986299630063016302630363046305630663076308630963106311631263136314631563166317631863196320632163226323632463256326632763286329633063316332633363346335633663376338633963406341634263436344634563466347634863496350635163526353635463556356635763586359636063616362636363646365636663676368636963706371637263736374637563766377637863796380638163826383638463856386638763886389639063916392639363946395639663976398639964006401640264036404640564066407640864096410641164126413641464156416641764186419642064216422642364246425642664276428642964306431643264336434643564366437643864396440644164426443644464456446644764486449645064516452645364546455645664576458645964606461646264636464646564666467646864696470647164726473647464756476647764786479648064816482648364846485648664876488648964906491649264936494649564966497649864996500650165026503650465056506650765086509651065116512651365146515651665176518651965206521652265236524652565266527652865296530653165326533653465356536653765386539654065416542654365446545654665476548654965506551655265536554655565566557655865596560656165626563656465656566656765686569657065716572657365746575657665776578657965806581658265836584658565866587658865896590659165926593659465956596659765986599660066016602660366046605660666076608660966106611661266136614661566166617661866196620662166226623662466256626662766286629663066316632663366346635663666376638663966406641664266436644664566466647664866496650665166526653665466556656665766586659666066616662666366646665666666676668666966706671667266736674667566766677667866796680668166826683668466856686668766886689669066916692669366946695669666976698669967006701670267036704670567066707670867096710671167126713671467156716671767186719672067216722672367246725672667276728672967306731673267336734673567366737673867396740674167426743674467456746674767486749675067516752675367546755675667576758675967606761676267636764676567666767676867696770677167726773677467756776677767786779678067816782678367846785678667876788678967906791679267936794679567966797679867996800680168026803680468056806680768086809681068116812681368146815681668176818681968206821682268236824682568266827682868296830683168326833683468356836683768386839684068416842684368446845684668476848684968506851685268536854685568566857685868596860686168626863686468656866686768686869687068716872687368746875687668776878687968806881688268836884688568866887688868896890689168926893689468956896689768986899690069016902690369046905690669076908690969106911691269136914691569166917691869196920692169226923692469256926692769286929693069316932693369346935693669376938693969406941694269436944694569466947694869496950695169526953695469556956695769586959696069616962696369646965696669676968696969706971697269736974697569766977697869796980698169826983698469856986698769886989699069916992699369946995699669976998699970007001700270037004700570067007700870097010701170127013701470157016701770187019702070217022702370247025702670277028702970307031703270337034703570367037703870397040704170427043704470457046704770487049705070517052705370547055705670577058705970607061706270637064706570667067706870697070707170727073707470757076707770787079708070817082708370847085708670877088708970907091709270937094709570967097709870997100710171027103710471057106710771087109711071117112711371147115711671177118711971207121712271237124712571267127712871297130713171327133713471357136713771387139714071417142714371447145714671477148714971507151715271537154715571567157715871597160716171627163716471657166716771687169717071717172717371747175717671777178717971807181718271837184718571867187718871897190719171927193719471957196719771987199720072017202720372047205720672077208720972107211721272137214721572167217721872197220722172227223722472257226722772287229723072317232723372347235723672377238723972407241724272437244724572467247724872497250725172527253725472557256725772587259726072617262726372647265726672677268726972707271727272737274727572767277727872797280728172827283728472857286728772887289729072917292729372947295729672977298729973007301730273037304730573067307730873097310731173127313731473157316731773187319732073217322732373247325732673277328732973307331733273337334733573367337733873397340734173427343734473457346734773487349735073517352735373547355735673577358735973607361736273637364736573667367736873697370737173727373737473757376737773787379738073817382738373847385738673877388738973907391739273937394739573967397739873997400740174027403740474057406740774087409741074117412741374147415741674177418741974207421742274237424742574267427742874297430743174327433743474357436743774387439744074417442744374447445744674477448744974507451745274537454745574567457745874597460746174627463746474657466746774687469747074717472747374747475747674777478747974807481748274837484748574867487748874897490749174927493749474957496749774987499750075017502750375047505750675077508750975107511751275137514751575167517751875197520752175227523752475257526752775287529753075317532753375347535753675377538753975407541754275437544754575467547754875497550755175527553755475557556755775587559756075617562756375647565756675677568756975707571757275737574757575767577757875797580758175827583758475857586758775887589759075917592759375947595759675977598759976007601760276037604760576067607760876097610761176127613761476157616761776187619762076217622762376247625762676277628762976307631763276337634763576367637763876397640764176427643764476457646764776487649765076517652765376547655765676577658765976607661766276637664766576667667766876697670767176727673767476757676767776787679768076817682768376847685768676877688768976907691769276937694769576967697769876997700770177027703770477057706770777087709771077117712771377147715771677177718771977207721772277237724772577267727772877297730773177327733773477357736773777387739774077417742774377447745774677477748774977507751775277537754775577567757775877597760776177627763776477657766776777687769777077717772777377747775777677777778777977807781778277837784778577867787778877897790779177927793779477957796779777987799780078017802780378047805780678077808780978107811781278137814781578167817781878197820782178227823782478257826782778287829783078317832783378347835783678377838783978407841784278437844784578467847784878497850785178527853785478557856785778587859786078617862786378647865786678677868786978707871787278737874787578767877787878797880788178827883788478857886788778887889789078917892789378947895789678977898789979007901790279037904790579067907790879097910791179127913791479157916791779187919792079217922792379247925792679277928792979307931793279337934793579367937793879397940794179427943794479457946794779487949795079517952795379547955795679577958795979607961796279637964796579667967796879697970797179727973797479757976797779787979798079817982798379847985798679877988798979907991799279937994799579967997799879998000800180028003800480058006800780088009801080118012801380148015801680178018801980208021802280238024802580268027802880298030803180328033803480358036803780388039804080418042804380448045804680478048804980508051805280538054805580568057805880598060806180628063806480658066806780688069807080718072807380748075807680778078807980808081808280838084808580868087808880898090809180928093809480958096809780988099810081018102810381048105810681078108810981108111811281138114811581168117811881198120812181228123812481258126812781288129813081318132813381348135813681378138813981408141814281438144814581468147814881498150815181528153815481558156815781588159816081618162816381648165816681678168816981708171817281738174817581768177817881798180818181828183818481858186818781888189819081918192819381948195819681978198819982008201820282038204820582068207820882098210821182128213821482158216821782188219822082218222822382248225822682278228822982308231823282338234823582368237823882398240824182428243824482458246824782488249825082518252825382548255825682578258825982608261826282638264826582668267826882698270827182728273827482758276827782788279828082818282828382848285828682878288828982908291829282938294829582968297829882998300830183028303830483058306830783088309831083118312831383148315831683178318831983208321832283238324832583268327832883298330833183328333833483358336833783388339834083418342834383448345834683478348834983508351835283538354835583568357835883598360836183628363836483658366836783688369837083718372837383748375837683778378837983808381838283838384838583868387838883898390839183928393839483958396839783988399840084018402840384048405840684078408840984108411841284138414841584168417841884198420842184228423842484258426842784288429843084318432843384348435843684378438843984408441844284438444844584468447844884498450845184528453845484558456845784588459846084618462846384648465846684678468846984708471847284738474847584768477847884798480848184828483848484858486848784888489849084918492849384948495849684978498849985008501850285038504850585068507850885098510851185128513851485158516851785188519852085218522852385248525852685278528852985308531853285338534853585368537853885398540854185428543854485458546854785488549855085518552855385548555855685578558855985608561856285638564856585668567856885698570857185728573857485758576857785788579858085818582858385848585858685878588858985908591859285938594859585968597859885998600860186028603860486058606860786088609861086118612861386148615861686178618861986208621862286238624862586268627862886298630863186328633863486358636863786388639864086418642864386448645864686478648864986508651865286538654865586568657865886598660866186628663866486658666866786688669867086718672867386748675867686778678867986808681868286838684868586868687868886898690869186928693869486958696869786988699870087018702870387048705870687078708870987108711871287138714871587168717871887198720872187228723872487258726872787288729873087318732873387348735873687378738873987408741874287438744874587468747874887498750875187528753875487558756875787588759876087618762876387648765876687678768876987708771877287738774877587768777877887798780878187828783878487858786878787888789879087918792879387948795879687978798879988008801880288038804880588068807880888098810881188128813881488158816881788188819882088218822882388248825882688278828882988308831883288338834883588368837883888398840884188428843884488458846884788488849885088518852885388548855885688578858885988608861886288638864886588668867886888698870887188728873887488758876887788788879888088818882888388848885888688878888888988908891889288938894889588968897889888998900890189028903890489058906890789088909891089118912891389148915891689178918891989208921892289238924892589268927892889298930893189328933893489358936893789388939894089418942894389448945894689478948894989508951895289538954895589568957895889598960896189628963896489658966896789688969897089718972897389748975897689778978897989808981898289838984898589868987898889898990899189928993899489958996899789988999900090019002900390049005900690079008900990109011901290139014901590169017901890199020902190229023902490259026902790289029903090319032903390349035903690379038903990409041904290439044904590469047904890499050905190529053905490559056905790589059906090619062906390649065906690679068906990709071907290739074907590769077907890799080908190829083908490859086908790889089909090919092909390949095909690979098909991009101910291039104910591069107910891099110911191129113911491159116911791189119912091219122912391249125912691279128912991309131913291339134913591369137913891399140914191429143914491459146914791489149915091519152915391549155915691579158915991609161916291639164916591669167916891699170917191729173917491759176917791789179918091819182918391849185918691879188918991909191919291939194919591969197919891999200920192029203920492059206920792089209921092119212921392149215921692179218921992209221922292239224922592269227922892299230923192329233923492359236923792389239924092419242924392449245924692479248924992509251925292539254925592569257925892599260926192629263926492659266926792689269927092719272927392749275927692779278927992809281928292839284928592869287928892899290929192929293929492959296929792989299930093019302930393049305930693079308930993109311931293139314931593169317931893199320932193229323932493259326932793289329933093319332933393349335933693379338933993409341934293439344934593469347934893499350935193529353935493559356935793589359936093619362936393649365936693679368936993709371937293739374937593769377937893799380938193829383938493859386938793889389939093919392939393949395939693979398939994009401940294039404940594069407940894099410941194129413941494159416941794189419942094219422942394249425942694279428942994309431943294339434943594369437943894399440944194429443944494459446944794489449945094519452945394549455945694579458945994609461946294639464946594669467946894699470947194729473947494759476947794789479948094819482948394849485948694879488948994909491949294939494949594969497949894999500950195029503950495059506950795089509951095119512951395149515951695179518951995209521952295239524952595269527952895299530953195329533953495359536953795389539954095419542954395449545954695479548954995509551955295539554955595569557955895599560956195629563956495659566956795689569957095719572957395749575957695779578957995809581958295839584958595869587958895899590959195929593959495959596959795989599960096019602960396049605960696079608960996109611961296139614961596169617961896199620962196229623962496259626962796289629963096319632963396349635963696379638963996409641964296439644964596469647964896499650965196529653965496559656965796589659966096619662966396649665966696679668966996709671967296739674967596769677967896799680968196829683968496859686968796889689969096919692969396949695969696979698969997009701970297039704970597069707970897099710971197129713971497159716971797189719972097219722972397249725972697279728972997309731973297339734973597369737973897399740974197429743974497459746974797489749975097519752975397549755975697579758975997609761976297639764976597669767976897699770977197729773977497759776977797789779978097819782978397849785978697879788978997909791979297939794979597969797979897999800980198029803980498059806980798089809981098119812981398149815981698179818981998209821982298239824982598269827982898299830983198329833983498359836983798389839984098419842984398449845984698479848984998509851985298539854985598569857985898599860986198629863986498659866986798689869987098719872987398749875987698779878987998809881988298839884988598869887988898899890989198929893989498959896989798989899990099019902990399049905990699079908990999109911991299139914991599169917991899199920992199229923992499259926992799289929993099319932993399349935993699379938993999409941994299439944994599469947994899499950995199529953995499559956995799589959996099619962996399649965996699679968996999709971997299739974997599769977997899799980998199829983998499859986998799889989999099919992999399949995999699979998999910000100011000210003100041000510006100071000810009100101001110012100131001410015100161001710018100191002010021100221002310024100251002610027100281002910030100311003210033100341003510036100371003810039100401004110042100431004410045100461004710048100491005010051100521005310054100551005610057100581005910060100611006210063100641006510066100671006810069100701007110072100731007410075100761007710078100791008010081100821008310084100851008610087100881008910090100911009210093100941009510096100971009810099101001010110102101031010410105101061010710108101091011010111101121011310114101151011610117101181011910120101211012210123101241012510126101271012810129101301013110132101331013410135101361013710138101391014010141101421014310144101451014610147101481014910150101511015210153101541015510156101571015810159101601016110162101631016410165101661016710168101691017010171101721017310174101751017610177101781017910180101811018210183101841018510186101871018810189101901019110192101931019410195101961019710198101991020010201102021020310204102051020610207102081020910210102111021210213102141021510216102171021810219102201022110222102231022410225102261022710228102291023010231102321023310234102351023610237102381023910240102411024210243102441024510246102471024810249102501025110252102531025410255102561025710258102591026010261102621026310264102651026610267102681026910270102711027210273102741027510276102771027810279102801028110282102831028410285102861028710288102891029010291102921029310294102951029610297102981029910300103011030210303103041030510306103071030810309103101031110312103131031410315103161031710318103191032010321103221032310324103251032610327103281032910330103311033210333103341033510336103371033810339103401034110342103431034410345103461034710348103491035010351103521035310354103551035610357103581035910360103611036210363103641036510366103671036810369103701037110372103731037410375103761037710378103791038010381103821038310384103851038610387103881038910390103911039210393103941039510396103971039810399104001040110402104031040410405104061040710408104091041010411104121041310414104151041610417104181041910420104211042210423104241042510426104271042810429104301043110432104331043410435104361043710438104391044010441104421044310444104451044610447104481044910450104511045210453104541045510456104571045810459104601046110462104631046410465104661046710468104691047010471104721047310474104751047610477104781047910480104811048210483104841048510486104871048810489104901049110492104931049410495104961049710498104991050010501105021050310504105051050610507105081050910510105111051210513105141051510516105171051810519105201052110522105231052410525105261052710528105291053010531105321053310534105351053610537105381053910540105411054210543105441054510546105471054810549105501055110552105531055410555105561055710558105591056010561105621056310564105651056610567105681056910570105711057210573105741057510576105771057810579105801058110582105831058410585105861058710588105891059010591105921059310594105951059610597105981059910600106011060210603106041060510606106071060810609106101061110612106131061410615106161061710618106191062010621106221062310624106251062610627106281062910630106311063210633106341063510636106371063810639106401064110642106431064410645106461064710648106491065010651106521065310654106551065610657106581065910660106611066210663106641066510666106671066810669106701067110672106731067410675106761067710678106791068010681106821068310684106851068610687106881068910690106911069210693106941069510696106971069810699107001070110702107031070410705107061070710708107091071010711107121071310714107151071610717107181071910720107211072210723107241072510726107271072810729107301073110732107331073410735107361073710738107391074010741107421074310744107451074610747107481074910750107511075210753107541075510756107571075810759107601076110762107631076410765107661076710768107691077010771107721077310774107751077610777107781077910780107811078210783107841078510786107871078810789107901079110792107931079410795107961079710798107991080010801108021080310804108051080610807108081080910810108111081210813108141081510816108171081810819108201082110822108231082410825108261082710828108291083010831108321083310834108351083610837108381083910840108411084210843108441084510846108471084810849108501085110852108531085410855108561085710858108591086010861108621086310864108651086610867108681086910870108711087210873108741087510876108771087810879108801088110882108831088410885108861088710888108891089010891108921089310894108951089610897108981089910900109011090210903109041090510906109071090810909109101091110912109131091410915109161091710918109191092010921109221092310924109251092610927109281092910930109311093210933109341093510936109371093810939109401094110942109431094410945109461094710948109491095010951109521095310954109551095610957109581095910960109611096210963109641096510966109671096810969109701097110972109731097410975109761097710978109791098010981109821098310984109851098610987109881098910990109911099210993109941099510996109971099810999110001100111002110031100411005110061100711008110091101011011110121101311014110151101611017110181101911020110211102211023110241102511026110271102811029110301103111032110331103411035110361103711038110391104011041110421104311044110451104611047110481104911050110511105211053110541105511056110571105811059110601106111062110631106411065110661106711068110691107011071110721107311074110751107611077110781107911080110811108211083110841108511086110871108811089110901109111092110931109411095110961109711098110991110011101111021110311104111051110611107111081110911110111111111211113111141111511116111171111811119111201112111122111231112411125111261112711128111291113011131111321113311134111351113611137111381113911140111411114211143111441114511146111471114811149111501115111152111531115411155111561115711158111591116011161111621116311164111651116611167111681116911170111711117211173111741117511176111771117811179111801118111182111831118411185111861118711188111891119011191111921119311194111951119611197111981119911200112011120211203112041120511206112071120811209112101121111212112131121411215112161121711218112191122011221112221122311224112251122611227112281122911230112311123211233112341123511236112371123811239112401124111242112431124411245112461124711248112491125011251112521125311254112551125611257112581125911260112611126211263112641126511266112671126811269112701127111272112731127411275112761127711278112791128011281112821128311284112851128611287112881128911290112911129211293112941129511296112971129811299113001130111302113031130411305113061130711308113091131011311113121131311314113151131611317113181131911320113211132211323113241132511326113271132811329113301133111332113331133411335113361133711338113391134011341113421134311344113451134611347113481134911350113511135211353113541135511356113571135811359113601136111362113631136411365113661136711368113691137011371113721137311374113751137611377113781137911380113811138211383113841138511386113871138811389113901139111392113931139411395113961139711398113991140011401114021140311404114051140611407114081140911410114111141211413114141141511416114171141811419114201142111422114231142411425114261142711428114291143011431114321143311434114351143611437114381143911440114411144211443114441144511446114471144811449114501145111452114531145411455114561145711458114591146011461114621146311464114651146611467114681146911470114711147211473114741147511476114771147811479114801148111482114831148411485114861148711488114891149011491114921149311494114951149611497114981149911500115011150211503115041150511506115071150811509115101151111512115131151411515115161151711518115191152011521115221152311524115251152611527115281152911530115311153211533115341153511536115371153811539115401154111542115431154411545115461154711548115491155011551115521155311554115551155611557115581155911560115611156211563115641156511566115671156811569115701157111572115731157411575115761157711578115791158011581115821158311584115851158611587115881158911590115911159211593115941159511596115971159811599116001160111602116031160411605116061160711608116091161011611116121161311614116151161611617116181161911620116211162211623116241162511626116271162811629116301163111632116331163411635116361163711638116391164011641116421164311644116451164611647116481164911650116511165211653116541165511656116571165811659116601166111662116631166411665116661166711668116691167011671116721167311674116751167611677116781167911680116811168211683116841168511686116871168811689116901169111692116931169411695116961169711698116991170011701117021170311704117051170611707117081170911710117111171211713117141171511716117171171811719117201172111722117231172411725117261172711728117291173011731117321173311734117351173611737117381173911740117411174211743117441174511746117471174811749117501175111752117531175411755117561175711758117591176011761117621176311764117651176611767117681176911770117711177211773117741177511776117771177811779117801178111782117831178411785117861178711788117891179011791117921179311794117951179611797117981179911800118011180211803118041180511806118071180811809118101181111812118131181411815118161181711818118191182011821118221182311824118251182611827118281182911830118311183211833118341183511836118371183811839118401184111842118431184411845118461184711848118491185011851118521185311854118551185611857118581185911860118611186211863118641186511866118671186811869118701187111872118731187411875118761187711878118791188011881118821188311884118851188611887118881188911890118911189211893118941189511896118971189811899119001190111902119031190411905119061190711908119091191011911119121191311914119151191611917119181191911920119211192211923119241192511926119271192811929119301193111932119331193411935119361193711938119391194011941119421194311944119451194611947119481194911950119511195211953119541195511956119571195811959119601196111962119631196411965119661196711968119691197011971119721197311974119751197611977119781197911980119811198211983119841198511986119871198811989119901199111992119931199411995119961199711998119991200012001120021200312004120051200612007120081200912010120111201212013120141201512016120171201812019120201202112022120231202412025120261202712028120291203012031120321203312034120351203612037120381203912040120411204212043120441204512046120471204812049120501205112052120531205412055120561205712058120591206012061120621206312064120651206612067120681206912070120711207212073120741207512076120771207812079120801208112082120831208412085120861208712088120891209012091120921209312094120951209612097120981209912100121011210212103121041210512106121071210812109121101211112112121131211412115121161211712118121191212012121121221212312124121251212612127121281212912130121311213212133121341213512136121371213812139121401214112142121431214412145121461214712148121491215012151121521215312154121551215612157121581215912160121611216212163121641216512166121671216812169121701217112172121731217412175121761217712178121791218012181121821218312184121851218612187121881218912190121911219212193121941219512196121971219812199122001220112202122031220412205122061220712208122091221012211122121221312214122151221612217122181221912220122211222212223122241222512226122271222812229122301223112232122331223412235122361223712238122391224012241122421224312244122451224612247122481224912250122511225212253122541225512256122571225812259122601226112262122631226412265122661226712268122691227012271122721227312274122751227612277122781227912280122811228212283122841228512286122871228812289122901229112292122931229412295122961229712298122991230012301123021230312304123051230612307123081230912310123111231212313123141231512316123171231812319123201232112322123231232412325123261232712328123291233012331123321233312334123351233612337123381233912340123411234212343123441234512346123471234812349123501235112352123531235412355123561235712358123591236012361123621236312364123651236612367123681236912370123711237212373123741237512376123771237812379123801238112382123831238412385123861238712388123891239012391123921239312394123951239612397123981239912400124011240212403124041240512406124071240812409124101241112412124131241412415124161241712418124191242012421124221242312424124251242612427124281242912430124311243212433124341243512436124371243812439124401244112442124431244412445124461244712448124491245012451124521245312454124551245612457124581245912460124611246212463124641246512466124671246812469124701247112472124731247412475124761247712478124791248012481124821248312484124851248612487124881248912490124911249212493124941249512496124971249812499125001250112502125031250412505125061250712508125091251012511125121251312514125151251612517125181251912520125211252212523125241252512526125271252812529125301253112532125331253412535125361253712538125391254012541125421254312544125451254612547125481254912550125511255212553125541255512556125571255812559125601256112562125631256412565125661256712568125691257012571125721257312574125751257612577125781257912580125811258212583125841258512586125871258812589125901259112592125931259412595125961259712598125991260012601126021260312604126051260612607126081260912610126111261212613126141261512616126171261812619126201262112622126231262412625126261262712628126291263012631126321263312634126351263612637126381263912640126411264212643126441264512646126471264812649126501265112652126531265412655126561265712658126591266012661126621266312664126651266612667126681266912670126711267212673126741267512676126771267812679126801268112682126831268412685126861268712688126891269012691126921269312694126951269612697126981269912700127011270212703127041270512706127071270812709127101271112712127131271412715127161271712718127191272012721127221272312724127251272612727127281272912730127311273212733127341273512736127371273812739127401274112742127431274412745127461274712748127491275012751127521275312754127551275612757127581275912760127611276212763127641276512766127671276812769127701277112772127731277412775127761277712778127791278012781127821278312784127851278612787127881278912790127911279212793127941279512796127971279812799128001280112802128031280412805128061280712808128091281012811128121281312814128151281612817128181281912820128211282212823128241282512826128271282812829128301283112832128331283412835128361283712838128391284012841128421284312844128451284612847128481284912850128511285212853128541285512856128571285812859128601286112862128631286412865128661286712868128691287012871128721287312874128751287612877128781287912880128811288212883128841288512886128871288812889128901289112892128931289412895128961289712898128991290012901129021290312904129051290612907129081290912910129111291212913129141291512916129171291812919129201292112922129231292412925129261292712928129291293012931129321293312934129351293612937129381293912940129411294212943129441294512946129471294812949129501295112952129531295412955129561295712958129591296012961129621296312964129651296612967129681296912970129711297212973129741297512976129771297812979129801298112982129831298412985129861298712988129891299012991129921299312994129951299612997129981299913000130011300213003130041300513006130071300813009130101301113012130131301413015130161301713018130191302013021130221302313024130251302613027130281302913030130311303213033130341303513036130371303813039130401304113042130431304413045130461304713048130491305013051130521305313054130551305613057130581305913060130611306213063130641306513066130671306813069130701307113072130731307413075130761307713078130791308013081130821308313084130851308613087130881308913090130911309213093130941309513096130971309813099131001310113102131031310413105131061310713108131091311013111131121311313114131151311613117131181311913120131211312213123131241312513126131271312813129131301313113132131331313413135131361313713138131391314013141131421314313144131451314613147131481314913150131511315213153131541315513156131571315813159131601316113162131631316413165131661316713168131691317013171131721317313174131751317613177131781317913180131811318213183131841318513186131871318813189131901319113192131931319413195131961319713198131991320013201132021320313204132051320613207132081320913210132111321213213132141321513216132171321813219132201322113222132231322413225132261322713228132291323013231132321323313234132351323613237132381323913240132411324213243132441324513246132471324813249132501325113252132531325413255132561325713258132591326013261132621326313264132651326613267132681326913270132711327213273132741327513276132771327813279132801328113282132831328413285132861328713288132891329013291132921329313294132951329613297132981329913300133011330213303133041330513306133071330813309133101331113312133131331413315133161331713318133191332013321133221332313324133251332613327133281332913330133311333213333133341333513336133371333813339133401334113342133431334413345133461334713348133491335013351133521335313354133551335613357133581335913360133611336213363133641336513366133671336813369133701337113372133731337413375133761337713378133791338013381133821338313384133851338613387133881338913390133911339213393133941339513396133971339813399134001340113402134031340413405134061340713408134091341013411134121341313414134151341613417134181341913420134211342213423134241342513426134271342813429134301343113432134331343413435134361343713438134391344013441134421344313444134451344613447134481344913450134511345213453134541345513456134571345813459134601346113462134631346413465134661346713468134691347013471134721347313474134751347613477134781347913480134811348213483134841348513486134871348813489134901349113492134931349413495134961349713498134991350013501135021350313504135051350613507135081350913510135111351213513135141351513516135171351813519135201352113522135231352413525135261352713528135291353013531135321353313534135351353613537135381353913540135411354213543135441354513546135471354813549135501355113552135531355413555135561355713558135591356013561135621356313564135651356613567135681356913570135711357213573135741357513576135771357813579135801358113582135831358413585135861358713588135891359013591135921359313594135951359613597135981359913600136011360213603136041360513606136071360813609136101361113612136131361413615136161361713618136191362013621136221362313624136251362613627136281362913630136311363213633136341363513636136371363813639136401364113642136431364413645136461364713648136491365013651136521365313654136551365613657136581365913660136611366213663136641366513666136671366813669136701367113672136731367413675136761367713678136791368013681136821368313684136851368613687136881368913690136911369213693136941369513696136971369813699137001370113702137031370413705137061370713708137091371013711137121371313714137151371613717137181371913720137211372213723137241372513726137271372813729137301373113732137331373413735137361373713738137391374013741137421374313744137451374613747137481374913750137511375213753137541375513756137571375813759137601376113762137631376413765137661376713768137691377013771137721377313774137751377613777137781377913780137811378213783137841378513786137871378813789137901379113792137931379413795137961379713798137991380013801138021380313804138051380613807138081380913810138111381213813138141381513816138171381813819138201382113822138231382413825138261382713828138291383013831138321383313834138351383613837138381383913840138411384213843138441384513846138471384813849138501385113852138531385413855138561385713858138591386013861138621386313864138651386613867138681386913870138711387213873138741387513876138771387813879138801388113882138831388413885138861388713888138891389013891138921389313894138951389613897138981389913900139011390213903139041390513906139071390813909139101391113912139131391413915139161391713918139191392013921139221392313924139251392613927139281392913930139311393213933139341393513936139371393813939139401394113942139431394413945139461394713948139491395013951139521395313954139551395613957139581395913960139611396213963139641396513966139671396813969139701397113972139731397413975139761397713978139791398013981139821398313984139851398613987139881398913990139911399213993139941399513996139971399813999140001400114002140031400414005140061400714008140091401014011140121401314014140151401614017140181401914020140211402214023140241402514026140271402814029140301403114032140331403414035140361403714038140391404014041140421404314044140451404614047140481404914050140511405214053140541405514056140571405814059140601406114062140631406414065140661406714068140691407014071140721407314074140751407614077140781407914080140811408214083140841408514086140871408814089140901409114092140931409414095140961409714098140991410014101141021410314104141051410614107141081410914110141111411214113141141411514116141171411814119141201412114122141231412414125141261412714128141291413014131141321413314134141351413614137141381413914140141411414214143141441414514146141471414814149141501415114152141531415414155141561415714158141591416014161141621416314164141651416614167141681416914170141711417214173141741417514176141771417814179141801418114182141831418414185141861418714188141891419014191141921419314194141951419614197141981419914200142011420214203142041420514206142071420814209142101421114212142131421414215142161421714218142191422014221142221422314224142251422614227142281422914230142311423214233142341423514236142371423814239142401424114242142431424414245142461424714248142491425014251142521425314254142551425614257142581425914260142611426214263142641426514266142671426814269142701427114272142731427414275142761427714278142791428014281142821428314284142851428614287142881428914290142911429214293142941429514296142971429814299143001430114302143031430414305143061430714308143091431014311143121431314314143151431614317143181431914320143211432214323143241432514326143271432814329143301433114332143331433414335143361433714338143391434014341143421434314344143451434614347143481434914350143511435214353143541435514356143571435814359143601436114362143631436414365143661436714368143691437014371143721437314374143751437614377143781437914380143811438214383143841438514386143871438814389143901439114392143931439414395143961439714398143991440014401144021440314404144051440614407144081440914410144111441214413144141441514416144171441814419144201442114422144231442414425144261442714428144291443014431144321443314434144351443614437144381443914440144411444214443144441444514446144471444814449144501445114452144531445414455144561445714458144591446014461144621446314464144651446614467144681446914470144711447214473144741447514476144771447814479144801448114482144831448414485144861448714488144891449014491144921449314494144951449614497144981449914500145011450214503145041450514506145071450814509145101451114512145131451414515145161451714518145191452014521145221452314524145251452614527145281452914530145311453214533145341453514536145371453814539145401454114542145431454414545145461454714548145491455014551145521455314554145551455614557145581455914560145611456214563145641456514566145671456814569145701457114572145731457414575145761457714578145791458014581145821458314584145851458614587145881458914590145911459214593145941459514596145971459814599146001460114602146031460414605146061460714608146091461014611146121461314614146151461614617146181461914620146211462214623146241462514626146271462814629146301463114632146331463414635146361463714638146391464014641146421464314644146451464614647146481464914650146511465214653146541465514656146571465814659146601466114662146631466414665146661466714668146691467014671146721467314674146751467614677146781467914680146811468214683146841468514686146871468814689146901469114692146931469414695146961469714698146991470014701147021470314704147051470614707147081470914710147111471214713147141471514716147171471814719147201472114722147231472414725147261472714728147291473014731147321473314734147351473614737147381473914740147411474214743147441474514746147471474814749147501475114752147531475414755147561475714758147591476014761147621476314764147651476614767147681476914770147711477214773147741477514776147771477814779147801478114782147831478414785147861478714788147891479014791147921479314794147951479614797147981479914800148011480214803148041480514806148071480814809148101481114812148131481414815148161481714818148191482014821148221482314824148251482614827148281482914830148311483214833148341483514836148371483814839148401484114842148431484414845148461484714848148491485014851148521485314854148551485614857148581485914860148611486214863148641486514866148671486814869148701487114872148731487414875148761487714878148791488014881148821488314884148851488614887148881488914890148911489214893148941489514896148971489814899149001490114902149031490414905149061490714908149091491014911149121491314914149151491614917149181491914920149211492214923149241492514926149271492814929149301493114932149331493414935149361493714938149391494014941149421494314944149451494614947149481494914950149511495214953149541495514956149571495814959149601496114962149631496414965149661496714968149691497014971149721497314974149751497614977149781497914980149811498214983149841498514986149871498814989149901499114992149931499414995149961499714998149991500015001150021500315004150051500615007150081500915010150111501215013150141501515016150171501815019150201502115022150231502415025150261502715028150291503015031150321503315034150351503615037150381503915040150411504215043150441504515046150471504815049150501505115052150531505415055150561505715058150591506015061150621506315064150651506615067150681506915070150711507215073150741507515076150771507815079150801508115082150831508415085150861508715088150891509015091150921509315094150951509615097150981509915100151011510215103151041510515106151071510815109151101511115112151131511415115151161511715118151191512015121151221512315124151251512615127151281512915130151311513215133151341513515136151371513815139151401514115142151431514415145151461514715148151491515015151151521515315154151551515615157151581515915160151611516215163151641516515166151671516815169151701517115172151731517415175151761517715178151791518015181151821518315184151851518615187151881518915190151911519215193151941519515196151971519815199152001520115202152031520415205152061520715208152091521015211152121521315214152151521615217152181521915220152211522215223152241522515226152271522815229152301523115232152331523415235152361523715238152391524015241152421524315244152451524615247152481524915250152511525215253152541525515256152571525815259152601526115262152631526415265152661526715268152691527015271152721527315274152751527615277152781527915280152811528215283152841528515286152871528815289152901529115292152931529415295152961529715298152991530015301153021530315304153051530615307153081530915310153111531215313153141531515316153171531815319153201532115322153231532415325153261532715328153291533015331153321533315334153351533615337153381533915340153411534215343153441534515346153471534815349153501535115352153531535415355153561535715358153591536015361153621536315364153651536615367153681536915370153711537215373153741537515376153771537815379153801538115382153831538415385153861538715388153891539015391153921539315394153951539615397153981539915400154011540215403154041540515406154071540815409154101541115412154131541415415154161541715418154191542015421154221542315424154251542615427154281542915430154311543215433154341543515436154371543815439154401544115442154431544415445154461544715448154491545015451154521545315454154551545615457154581545915460154611546215463154641546515466154671546815469154701547115472154731547415475154761547715478154791548015481154821548315484154851548615487154881548915490154911549215493154941549515496154971549815499155001550115502155031550415505155061550715508155091551015511155121551315514155151551615517155181551915520155211552215523155241552515526155271552815529155301553115532155331553415535155361553715538155391554015541155421554315544155451554615547155481554915550155511555215553155541555515556155571555815559155601556115562155631556415565155661556715568155691557015571155721557315574155751557615577155781557915580155811558215583155841558515586155871558815589155901559115592155931559415595155961559715598155991560015601156021560315604156051560615607156081560915610156111561215613156141561515616156171561815619156201562115622156231562415625156261562715628156291563015631156321563315634156351563615637156381563915640156411564215643156441564515646156471564815649156501565115652156531565415655156561565715658156591566015661156621566315664156651566615667156681566915670156711567215673156741567515676156771567815679156801568115682156831568415685156861568715688156891569015691156921569315694156951569615697156981569915700157011570215703157041570515706157071570815709157101571115712157131571415715157161571715718157191572015721157221572315724157251572615727157281572915730157311573215733157341573515736157371573815739157401574115742157431574415745157461574715748157491575015751157521575315754157551575615757157581575915760157611576215763157641576515766157671576815769157701577115772157731577415775157761577715778157791578015781157821578315784157851578615787157881578915790157911579215793157941579515796157971579815799158001580115802158031580415805158061580715808158091581015811158121581315814158151581615817158181581915820158211582215823158241582515826158271582815829158301583115832158331583415835158361583715838158391584015841158421584315844158451584615847158481584915850158511585215853158541585515856158571585815859158601586115862158631586415865158661586715868158691587015871158721587315874158751587615877158781587915880158811588215883158841588515886158871588815889158901589115892158931589415895158961589715898158991590015901159021590315904159051590615907159081590915910159111591215913159141591515916159171591815919159201592115922159231592415925159261592715928159291593015931159321593315934159351593615937159381593915940159411594215943159441594515946159471594815949159501595115952159531595415955159561595715958159591596015961159621596315964159651596615967159681596915970159711597215973159741597515976159771597815979159801598115982159831598415985159861598715988159891599015991159921599315994159951599615997159981599916000160011600216003160041600516006160071600816009160101601116012160131601416015160161601716018160191602016021160221602316024160251602616027160281602916030160311603216033160341603516036160371603816039160401604116042160431604416045160461604716048160491605016051160521605316054160551605616057160581605916060160611606216063160641606516066160671606816069160701607116072160731607416075160761607716078160791608016081160821608316084160851608616087160881608916090160911609216093160941609516096160971609816099161001610116102161031610416105161061610716108161091611016111161121611316114161151611616117161181611916120161211612216123161241612516126161271612816129161301613116132161331613416135161361613716138161391614016141161421614316144161451614616147161481614916150161511615216153161541615516156161571615816159161601616116162161631616416165161661616716168161691617016171161721617316174161751617616177161781617916180161811618216183161841618516186161871618816189161901619116192161931619416195161961619716198161991620016201162021620316204162051620616207162081620916210162111621216213162141621516216162171621816219162201622116222162231622416225162261622716228162291623016231162321623316234162351623616237162381623916240162411624216243162441624516246162471624816249162501625116252162531625416255162561625716258162591626016261162621626316264162651626616267162681626916270162711627216273162741627516276162771627816279162801628116282162831628416285162861628716288162891629016291162921629316294162951629616297162981629916300163011630216303163041630516306163071630816309163101631116312163131631416315163161631716318163191632016321163221632316324163251632616327163281632916330163311633216333163341633516336163371633816339163401634116342163431634416345163461634716348163491635016351163521635316354163551635616357163581635916360163611636216363163641636516366163671636816369163701637116372163731637416375163761637716378163791638016381163821638316384163851638616387163881638916390163911639216393163941639516396163971639816399164001640116402164031640416405164061640716408164091641016411164121641316414164151641616417164181641916420164211642216423164241642516426164271642816429164301643116432164331643416435164361643716438164391644016441164421644316444164451644616447164481644916450164511645216453164541645516456164571645816459164601646116462164631646416465164661646716468164691647016471164721647316474164751647616477164781647916480164811648216483164841648516486164871648816489164901649116492164931649416495164961649716498164991650016501165021650316504165051650616507165081650916510165111651216513165141651516516165171651816519165201652116522165231652416525165261652716528165291653016531165321653316534165351653616537165381653916540165411654216543165441654516546165471654816549165501655116552165531655416555165561655716558165591656016561165621656316564165651656616567165681656916570165711657216573165741657516576165771657816579165801658116582165831658416585165861658716588165891659016591165921659316594165951659616597165981659916600166011660216603166041660516606166071660816609166101661116612166131661416615166161661716618166191662016621166221662316624166251662616627166281662916630166311663216633166341663516636166371663816639166401664116642166431664416645166461664716648166491665016651166521665316654166551665616657166581665916660166611666216663166641666516666166671666816669166701667116672166731667416675166761667716678166791668016681166821668316684166851668616687166881668916690166911669216693166941669516696166971669816699167001670116702167031670416705167061670716708167091671016711167121671316714167151671616717167181671916720167211672216723167241672516726167271672816729167301673116732167331673416735167361673716738167391674016741167421674316744167451674616747167481674916750167511675216753167541675516756167571675816759167601676116762167631676416765167661676716768167691677016771167721677316774167751677616777167781677916780167811678216783167841678516786167871678816789167901679116792167931679416795167961679716798167991680016801168021680316804168051680616807168081680916810168111681216813168141681516816168171681816819168201682116822168231682416825168261682716828168291683016831168321683316834168351683616837168381683916840168411684216843168441684516846168471684816849168501685116852168531685416855168561685716858168591686016861168621686316864168651686616867168681686916870168711687216873168741687516876168771687816879168801688116882168831688416885168861688716888168891689016891168921689316894168951689616897168981689916900169011690216903169041690516906169071690816909169101691116912169131691416915169161691716918169191692016921169221692316924169251692616927169281692916930169311693216933169341693516936169371693816939169401694116942169431694416945169461694716948169491695016951169521695316954169551695616957169581695916960169611696216963169641696516966169671696816969169701697116972169731697416975169761697716978169791698016981169821698316984169851698616987169881698916990169911699216993169941699516996169971699816999170001700117002170031700417005170061700717008170091701017011170121701317014170151701617017170181701917020170211702217023170241702517026170271702817029170301703117032170331703417035170361703717038170391704017041170421704317044170451704617047170481704917050170511705217053170541705517056170571705817059170601706117062170631706417065170661706717068170691707017071170721707317074170751707617077170781707917080170811708217083170841708517086170871708817089170901709117092170931709417095170961709717098170991710017101171021710317104171051710617107171081710917110171111711217113171141711517116171171711817119171201712117122171231712417125171261712717128171291713017131171321713317134171351713617137171381713917140171411714217143171441714517146171471714817149171501715117152171531715417155171561715717158171591716017161171621716317164171651716617167171681716917170171711717217173171741717517176171771717817179171801718117182171831718417185171861718717188171891719017191171921719317194171951719617197171981719917200172011720217203172041720517206172071720817209172101721117212172131721417215172161721717218172191722017221172221722317224172251722617227172281722917230172311723217233172341723517236172371723817239172401724117242172431724417245172461724717248172491725017251172521725317254172551725617257172581725917260172611726217263172641726517266172671726817269172701727117272172731727417275172761727717278172791728017281172821728317284172851728617287172881728917290172911729217293172941729517296172971729817299173001730117302173031730417305173061730717308173091731017311173121731317314173151731617317173181731917320173211732217323173241732517326173271732817329173301733117332173331733417335173361733717338173391734017341173421734317344173451734617347173481734917350173511735217353173541735517356173571735817359173601736117362173631736417365173661736717368173691737017371173721737317374173751737617377173781737917380173811738217383173841738517386173871738817389173901739117392173931739417395173961739717398173991740017401174021740317404174051740617407174081740917410174111741217413174141741517416174171741817419174201742117422174231742417425174261742717428174291743017431174321743317434174351743617437174381743917440174411744217443174441744517446174471744817449174501745117452174531745417455174561745717458174591746017461174621746317464174651746617467174681746917470174711747217473174741747517476174771747817479174801748117482174831748417485174861748717488174891749017491174921749317494174951749617497174981749917500175011750217503175041750517506175071750817509175101751117512175131751417515175161751717518175191752017521175221752317524175251752617527175281752917530175311753217533175341753517536175371753817539175401754117542175431754417545175461754717548175491755017551175521755317554175551755617557175581755917560175611756217563175641756517566175671756817569175701757117572175731757417575175761757717578175791758017581175821758317584175851758617587175881758917590175911759217593175941759517596175971759817599176001760117602176031760417605176061760717608176091761017611176121761317614176151761617617176181761917620176211762217623176241762517626176271762817629176301763117632176331763417635176361763717638176391764017641176421764317644176451764617647176481764917650176511765217653176541765517656176571765817659176601766117662176631766417665176661766717668176691767017671176721767317674176751767617677176781767917680176811768217683176841768517686176871768817689176901769117692176931769417695176961769717698176991770017701177021770317704177051770617707177081770917710177111771217713177141771517716177171771817719177201772117722177231772417725177261772717728177291773017731177321773317734177351773617737177381773917740177411774217743177441774517746177471774817749177501775117752177531775417755177561775717758177591776017761177621776317764177651776617767177681776917770177711777217773177741777517776177771777817779177801778117782177831778417785177861778717788177891779017791177921779317794177951779617797177981779917800178011780217803178041780517806178071780817809178101781117812178131781417815178161781717818178191782017821178221782317824178251782617827178281782917830178311783217833178341783517836178371783817839178401784117842178431784417845178461784717848178491785017851178521785317854178551785617857178581785917860178611786217863178641786517866178671786817869178701787117872178731787417875178761787717878178791788017881178821788317884178851788617887178881788917890178911789217893178941789517896178971789817899179001790117902179031790417905179061790717908179091791017911179121791317914179151791617917179181791917920179211792217923179241792517926179271792817929179301793117932179331793417935179361793717938179391794017941179421794317944179451794617947179481794917950179511795217953179541795517956179571795817959179601796117962179631796417965179661796717968179691797017971179721797317974179751797617977179781797917980179811798217983179841798517986179871798817989179901799117992179931799417995179961799717998179991800018001180021800318004180051800618007180081800918010180111801218013180141801518016180171801818019180201802118022180231802418025180261802718028180291803018031180321803318034180351803618037180381803918040180411804218043180441804518046180471804818049180501805118052180531805418055180561805718058180591806018061180621806318064180651806618067180681806918070180711807218073180741807518076180771807818079180801808118082180831808418085180861808718088180891809018091180921809318094180951809618097180981809918100181011810218103181041810518106181071810818109181101811118112181131811418115181161811718118181191812018121181221812318124181251812618127181281812918130181311813218133181341813518136181371813818139181401814118142181431814418145181461814718148181491815018151181521815318154181551815618157181581815918160181611816218163181641816518166181671816818169181701817118172181731817418175181761817718178181791818018181181821818318184181851818618187181881818918190181911819218193181941819518196181971819818199182001820118202182031820418205182061820718208182091821018211182121821318214182151821618217182181821918220182211822218223182241822518226182271822818229182301823118232182331823418235182361823718238182391824018241182421824318244182451824618247182481824918250182511825218253182541825518256182571825818259182601826118262182631826418265182661826718268182691827018271182721827318274182751827618277182781827918280182811828218283182841828518286182871828818289182901829118292182931829418295182961829718298182991830018301183021830318304183051830618307183081830918310183111831218313183141831518316183171831818319183201832118322183231832418325183261832718328183291833018331183321833318334183351833618337183381833918340183411834218343183441834518346183471834818349183501835118352183531835418355183561835718358183591836018361183621836318364183651836618367183681836918370183711837218373183741837518376183771837818379183801838118382183831838418385183861838718388183891839018391183921839318394183951839618397183981839918400184011840218403184041840518406184071840818409184101841118412184131841418415184161841718418184191842018421184221842318424184251842618427184281842918430184311843218433184341843518436184371843818439184401844118442184431844418445184461844718448184491845018451184521845318454184551845618457184581845918460184611846218463184641846518466184671846818469184701847118472184731847418475184761847718478184791848018481184821848318484184851848618487184881848918490184911849218493184941849518496184971849818499185001850118502185031850418505185061850718508185091851018511185121851318514185151851618517185181851918520185211852218523185241852518526185271852818529185301853118532185331853418535185361853718538185391854018541185421854318544185451854618547185481854918550185511855218553185541855518556185571855818559185601856118562185631856418565185661856718568185691857018571185721857318574185751857618577185781857918580185811858218583185841858518586185871858818589185901859118592185931859418595185961859718598185991860018601186021860318604186051860618607186081860918610186111861218613186141861518616186171861818619186201862118622186231862418625186261862718628186291863018631186321863318634186351863618637186381863918640186411864218643186441864518646186471864818649186501865118652186531865418655186561865718658186591866018661186621866318664186651866618667186681866918670186711867218673186741867518676186771867818679186801868118682186831868418685186861868718688186891869018691186921869318694186951869618697186981869918700187011870218703187041870518706187071870818709187101871118712187131871418715187161871718718187191872018721187221872318724187251872618727187281872918730187311873218733187341873518736187371873818739187401874118742187431874418745187461874718748187491875018751187521875318754187551875618757187581875918760187611876218763187641876518766187671876818769187701877118772187731877418775187761877718778187791878018781187821878318784187851878618787187881878918790187911879218793187941879518796187971879818799188001880118802188031880418805188061880718808188091881018811188121881318814188151881618817188181881918820188211882218823188241882518826188271882818829188301883118832188331883418835188361883718838188391884018841188421884318844188451884618847188481884918850188511885218853188541885518856188571885818859188601886118862188631886418865188661886718868188691887018871188721887318874188751887618877188781887918880188811888218883188841888518886188871888818889188901889118892188931889418895188961889718898188991890018901189021890318904189051890618907189081890918910189111891218913189141891518916189171891818919189201892118922189231892418925189261892718928189291893018931189321893318934189351893618937189381893918940189411894218943189441894518946189471894818949189501895118952189531895418955189561895718958189591896018961189621896318964189651896618967189681896918970189711897218973189741897518976189771897818979189801898118982189831898418985189861898718988189891899018991189921899318994189951899618997189981899919000190011900219003190041900519006190071900819009190101901119012190131901419015190161901719018190191902019021190221902319024190251902619027190281902919030190311903219033190341903519036190371903819039190401904119042190431904419045190461904719048190491905019051190521905319054190551905619057190581905919060190611906219063190641906519066190671906819069190701907119072190731907419075190761907719078190791908019081190821908319084190851908619087190881908919090190911909219093190941909519096190971909819099191001910119102191031910419105191061910719108191091911019111191121911319114191151911619117191181911919120191211912219123191241912519126191271912819129191301913119132191331913419135191361913719138191391914019141191421914319144191451914619147191481914919150191511915219153191541915519156191571915819159191601916119162191631916419165191661916719168191691917019171191721917319174191751917619177191781917919180191811918219183191841918519186191871918819189191901919119192191931919419195191961919719198191991920019201192021920319204192051920619207192081920919210192111921219213192141921519216192171921819219192201922119222192231922419225192261922719228192291923019231192321923319234192351923619237192381923919240192411924219243192441924519246192471924819249192501925119252192531925419255192561925719258192591926019261192621926319264192651926619267192681926919270192711927219273192741927519276192771927819279192801928119282192831928419285192861928719288192891929019291192921929319294192951929619297192981929919300193011930219303193041930519306193071930819309193101931119312193131931419315193161931719318193191932019321193221932319324193251932619327193281932919330193311933219333193341933519336193371933819339193401934119342193431934419345193461934719348193491935019351193521935319354193551935619357193581935919360193611936219363193641936519366193671936819369193701937119372193731937419375193761937719378193791938019381193821938319384193851938619387193881938919390193911939219393193941939519396193971939819399194001940119402194031940419405194061940719408194091941019411194121941319414194151941619417194181941919420194211942219423194241942519426194271942819429194301943119432194331943419435194361943719438194391944019441194421944319444194451944619447194481944919450194511945219453194541945519456194571945819459194601946119462194631946419465194661946719468194691947019471194721947319474194751947619477194781947919480194811948219483194841948519486194871948819489194901949119492194931949419495194961949719498194991950019501195021950319504195051950619507195081950919510195111951219513195141951519516195171951819519195201952119522195231952419525195261952719528195291953019531195321953319534195351953619537195381953919540195411954219543195441954519546195471954819549195501955119552195531955419555195561955719558195591956019561195621956319564195651956619567195681956919570195711957219573195741957519576195771957819579195801958119582195831958419585195861958719588195891959019591195921959319594195951959619597195981959919600196011960219603196041960519606196071960819609196101961119612196131961419615196161961719618196191962019621196221962319624196251962619627196281962919630196311963219633196341963519636196371963819639196401964119642196431964419645196461964719648196491965019651196521965319654196551965619657196581965919660196611966219663196641966519666196671966819669196701967119672196731967419675196761967719678196791968019681196821968319684196851968619687196881968919690196911969219693196941969519696196971969819699197001970119702197031970419705197061970719708197091971019711197121971319714197151971619717197181971919720197211972219723197241972519726197271972819729197301973119732197331973419735197361973719738197391974019741197421974319744197451974619747197481974919750197511975219753197541975519756197571975819759197601976119762197631976419765197661976719768197691977019771197721977319774197751977619777197781977919780197811978219783197841978519786197871978819789197901979119792197931979419795197961979719798197991980019801198021980319804198051980619807198081980919810198111981219813198141981519816198171981819819198201982119822198231982419825198261982719828198291983019831198321983319834198351983619837198381983919840198411984219843198441984519846198471984819849198501985119852198531985419855198561985719858198591986019861198621986319864198651986619867198681986919870198711987219873198741987519876198771987819879198801988119882198831988419885198861988719888198891989019891198921989319894198951989619897198981989919900199011990219903199041990519906199071990819909199101991119912199131991419915199161991719918199191992019921199221992319924199251992619927199281992919930199311993219933199341993519936199371993819939199401994119942199431994419945199461994719948199491995019951199521995319954199551995619957199581995919960199611996219963199641996519966199671996819969199701997119972199731997419975199761997719978199791998019981199821998319984199851998619987199881998919990199911999219993199941999519996199971999819999200002000120002200032000420005200062000720008200092001020011200122001320014200152001620017200182001920020200212002220023200242002520026200272002820029200302003120032200332003420035200362003720038200392004020041200422004320044200452004620047200482004920050200512005220053200542005520056200572005820059200602006120062200632006420065200662006720068200692007020071200722007320074200752007620077200782007920080200812008220083200842008520086200872008820089200902009120092200932009420095200962009720098200992010020101201022010320104201052010620107201082010920110201112011220113201142011520116201172011820119201202012120122201232012420125201262012720128201292013020131201322013320134201352013620137201382013920140201412014220143201442014520146201472014820149201502015120152201532015420155201562015720158201592016020161201622016320164201652016620167201682016920170201712017220173201742017520176201772017820179201802018120182201832018420185201862018720188201892019020191201922019320194201952019620197201982019920200202012020220203202042020520206202072020820209202102021120212202132021420215202162021720218202192022020221202222022320224202252022620227202282022920230202312023220233202342023520236202372023820239202402024120242202432024420245202462024720248202492025020251202522025320254202552025620257202582025920260202612026220263202642026520266202672026820269202702027120272202732027420275202762027720278202792028020281202822028320284202852028620287202882028920290202912029220293202942029520296202972029820299203002030120302203032030420305203062030720308203092031020311203122031320314203152031620317203182031920320203212032220323203242032520326203272032820329203302033120332203332033420335203362033720338203392034020341203422034320344203452034620347203482034920350203512035220353203542035520356203572035820359203602036120362203632036420365203662036720368203692037020371203722037320374203752037620377203782037920380203812038220383203842038520386203872038820389203902039120392203932039420395203962039720398203992040020401204022040320404204052040620407204082040920410204112041220413204142041520416204172041820419204202042120422204232042420425204262042720428204292043020431204322043320434204352043620437204382043920440204412044220443204442044520446204472044820449204502045120452204532045420455204562045720458204592046020461204622046320464204652046620467204682046920470204712047220473204742047520476204772047820479204802048120482204832048420485204862048720488204892049020491204922049320494204952049620497204982049920500205012050220503205042050520506205072050820509205102051120512205132051420515205162051720518205192052020521205222052320524205252052620527205282052920530205312053220533205342053520536205372053820539205402054120542205432054420545205462054720548205492055020551205522055320554205552055620557205582055920560205612056220563205642056520566205672056820569205702057120572205732057420575205762057720578205792058020581205822058320584205852058620587205882058920590205912059220593205942059520596205972059820599206002060120602206032060420605206062060720608206092061020611206122061320614206152061620617206182061920620206212062220623206242062520626206272062820629206302063120632206332063420635206362063720638206392064020641206422064320644206452064620647206482064920650206512065220653206542065520656206572065820659206602066120662206632066420665206662066720668206692067020671206722067320674206752067620677206782067920680206812068220683206842068520686206872068820689206902069120692206932069420695206962069720698206992070020701207022070320704207052070620707207082070920710207112071220713207142071520716207172071820719207202072120722207232072420725207262072720728207292073020731207322073320734207352073620737207382073920740207412074220743207442074520746207472074820749207502075120752207532075420755207562075720758207592076020761207622076320764207652076620767207682076920770207712077220773207742077520776207772077820779207802078120782207832078420785207862078720788207892079020791207922079320794207952079620797207982079920800208012080220803208042080520806208072080820809208102081120812208132081420815208162081720818208192082020821208222082320824208252082620827208282082920830208312083220833208342083520836208372083820839208402084120842208432084420845208462084720848208492085020851208522085320854208552085620857208582085920860208612086220863208642086520866208672086820869208702087120872208732087420875208762087720878208792088020881208822088320884208852088620887208882088920890208912089220893208942089520896208972089820899209002090120902209032090420905209062090720908209092091020911209122091320914209152091620917209182091920920209212092220923209242092520926209272092820929209302093120932209332093420935209362093720938209392094020941209422094320944209452094620947209482094920950209512095220953209542095520956209572095820959209602096120962209632096420965209662096720968209692097020971209722097320974209752097620977209782097920980209812098220983209842098520986209872098820989209902099120992209932099420995209962099720998209992100021001210022100321004210052100621007210082100921010210112101221013210142101521016210172101821019210202102121022210232102421025210262102721028210292103021031210322103321034210352103621037210382103921040210412104221043210442104521046210472104821049210502105121052210532105421055210562105721058210592106021061210622106321064210652106621067210682106921070210712107221073210742107521076210772107821079210802108121082210832108421085210862108721088210892109021091210922109321094210952109621097210982109921100211012110221103211042110521106211072110821109211102111121112211132111421115211162111721118211192112021121211222112321124211252112621127211282112921130211312113221133211342113521136211372113821139211402114121142211432114421145211462114721148211492115021151211522115321154211552115621157211582115921160211612116221163211642116521166211672116821169211702117121172211732117421175211762117721178211792118021181211822118321184211852118621187211882118921190211912119221193211942119521196211972119821199212002120121202212032120421205212062120721208212092121021211212122121321214212152121621217212182121921220212212122221223212242122521226212272122821229212302123121232212332123421235212362123721238212392124021241212422124321244212452124621247212482124921250212512125221253212542125521256212572125821259212602126121262212632126421265212662126721268212692127021271212722127321274212752127621277212782127921280212812128221283212842128521286212872128821289212902129121292212932129421295212962129721298212992130021301213022130321304213052130621307213082130921310213112131221313213142131521316213172131821319213202132121322213232132421325213262132721328213292133021331213322133321334213352133621337213382133921340213412134221343213442134521346213472134821349213502135121352213532135421355213562135721358213592136021361213622136321364213652136621367213682136921370213712137221373213742137521376213772137821379213802138121382213832138421385213862138721388213892139021391213922139321394213952139621397213982139921400214012140221403214042140521406214072140821409214102141121412214132141421415214162141721418214192142021421214222142321424214252142621427214282142921430214312143221433214342143521436214372143821439214402144121442214432144421445214462144721448214492145021451214522145321454214552145621457214582145921460214612146221463214642146521466214672146821469214702147121472214732147421475214762147721478214792148021481214822148321484214852148621487214882148921490214912149221493214942149521496214972149821499215002150121502215032150421505215062150721508215092151021511215122151321514215152151621517215182151921520215212152221523215242152521526215272152821529215302153121532215332153421535215362153721538215392154021541215422154321544215452154621547215482154921550215512155221553215542155521556215572155821559215602156121562215632156421565215662156721568215692157021571215722157321574215752157621577215782157921580215812158221583215842158521586215872158821589215902159121592215932159421595215962159721598215992160021601216022160321604216052160621607216082160921610216112161221613216142161521616216172161821619216202162121622216232162421625216262162721628216292163021631216322163321634216352163621637216382163921640216412164221643216442164521646216472164821649216502165121652216532165421655216562165721658216592166021661216622166321664216652166621667216682166921670216712167221673216742167521676216772167821679216802168121682216832168421685216862168721688216892169021691216922169321694216952169621697216982169921700217012170221703217042170521706217072170821709217102171121712217132171421715217162171721718217192172021721217222172321724217252172621727217282172921730217312173221733217342173521736217372173821739217402174121742217432174421745217462174721748217492175021751217522175321754217552175621757217582175921760217612176221763217642176521766217672176821769217702177121772217732177421775217762177721778217792178021781217822178321784217852178621787217882178921790217912179221793217942179521796217972179821799218002180121802218032180421805218062180721808218092181021811218122181321814218152181621817218182181921820218212182221823218242182521826218272182821829218302183121832218332183421835218362183721838218392184021841218422184321844218452184621847218482184921850218512185221853218542185521856218572185821859218602186121862218632186421865218662186721868218692187021871218722187321874218752187621877218782187921880218812188221883218842188521886218872188821889218902189121892218932189421895218962189721898218992190021901219022190321904219052190621907219082190921910219112191221913219142191521916219172191821919219202192121922219232192421925219262192721928219292193021931219322193321934219352193621937219382193921940219412194221943219442194521946219472194821949219502195121952219532195421955219562195721958219592196021961219622196321964219652196621967219682196921970219712197221973219742197521976219772197821979219802198121982219832198421985219862198721988219892199021991219922199321994219952199621997219982199922000220012200222003220042200522006220072200822009220102201122012220132201422015220162201722018220192202022021220222202322024220252202622027220282202922030220312203222033220342203522036220372203822039220402204122042220432204422045220462204722048220492205022051220522205322054220552205622057220582205922060220612206222063220642206522066220672206822069220702207122072220732207422075220762207722078220792208022081220822208322084220852208622087220882208922090220912209222093220942209522096220972209822099221002210122102221032210422105221062210722108221092211022111221122211322114221152211622117221182211922120221212212222123221242212522126221272212822129221302213122132221332213422135221362213722138221392214022141221422214322144221452214622147221482214922150221512215222153221542215522156221572215822159221602216122162221632216422165221662216722168221692217022171221722217322174221752217622177221782217922180221812218222183221842218522186221872218822189221902219122192221932219422195221962219722198221992220022201222022220322204222052220622207222082220922210222112221222213222142221522216222172221822219222202222122222222232222422225222262222722228222292223022231222322223322234222352223622237222382223922240222412224222243222442224522246222472224822249222502225122252222532225422255222562225722258222592226022261222622226322264222652226622267222682226922270222712227222273222742227522276222772227822279222802228122282222832228422285222862228722288222892229022291222922229322294222952229622297222982229922300223012230222303223042230522306223072230822309223102231122312223132231422315223162231722318223192232022321223222232322324223252232622327223282232922330223312233222333223342233522336223372233822339223402234122342223432234422345223462234722348223492235022351223522235322354223552235622357223582235922360223612236222363223642236522366223672236822369223702237122372223732237422375223762237722378223792238022381223822238322384223852238622387223882238922390223912239222393223942239522396223972239822399224002240122402224032240422405224062240722408224092241022411224122241322414224152241622417224182241922420224212242222423224242242522426224272242822429224302243122432224332243422435224362243722438224392244022441224422244322444224452244622447224482244922450224512245222453224542245522456224572245822459224602246122462224632246422465224662246722468224692247022471224722247322474224752247622477224782247922480224812248222483224842248522486224872248822489224902249122492224932249422495224962249722498224992250022501225022250322504225052250622507225082250922510225112251222513225142251522516225172251822519225202252122522225232252422525225262252722528225292253022531225322253322534225352253622537225382253922540225412254222543225442254522546225472254822549225502255122552225532255422555225562255722558225592256022561225622256322564225652256622567225682256922570225712257222573225742257522576225772257822579225802258122582225832258422585225862258722588225892259022591225922259322594225952259622597225982259922600226012260222603226042260522606226072260822609226102261122612226132261422615226162261722618226192262022621226222262322624226252262622627226282262922630226312263222633226342263522636226372263822639226402264122642226432264422645226462264722648226492265022651226522265322654226552265622657226582265922660226612266222663226642266522666226672266822669226702267122672226732267422675226762267722678226792268022681226822268322684226852268622687226882268922690226912269222693226942269522696226972269822699227002270122702227032270422705227062270722708227092271022711227122271322714227152271622717227182271922720227212272222723227242272522726227272272822729227302273122732227332273422735227362273722738227392274022741227422274322744227452274622747227482274922750227512275222753227542275522756227572275822759227602276122762227632276422765227662276722768227692277022771227722277322774227752277622777227782277922780227812278222783227842278522786227872278822789227902279122792227932279422795227962279722798227992280022801228022280322804228052280622807228082280922810228112281222813228142281522816228172281822819228202282122822228232282422825228262282722828228292283022831228322283322834228352283622837228382283922840228412284222843228442284522846228472284822849228502285122852228532285422855228562285722858228592286022861228622286322864228652286622867228682286922870228712287222873228742287522876228772287822879228802288122882228832288422885228862288722888228892289022891228922289322894228952289622897228982289922900229012290222903229042290522906229072290822909229102291122912229132291422915229162291722918229192292022921229222292322924229252292622927229282292922930229312293222933229342293522936229372293822939229402294122942229432294422945229462294722948229492295022951229522295322954229552295622957229582295922960229612296222963229642296522966229672296822969229702297122972229732297422975229762297722978229792298022981229822298322984229852298622987229882298922990229912299222993229942299522996229972299822999230002300123002230032300423005230062300723008230092301023011230122301323014230152301623017230182301923020230212302223023230242302523026230272302823029
  1. #define _CRT_SECURE_NO_DEPRECATE // Disables ridiculous "unsafe" warnings on Windows
  2. #define _USE_MATH_DEFINES // For M_PI on MSVC
  3. #include "ggml-impl.h"
  4. #include "ggml-quants.h"
  5. #include "ggml.h"
  6. #if defined(_MSC_VER) || defined(__MINGW32__)
  7. #include <malloc.h> // using malloc.h with MSC/MINGW
  8. #elif !defined(__FreeBSD__) && !defined(__NetBSD__) && !defined(__OpenBSD__)
  9. #include <alloca.h>
  10. #endif
  11. #include <assert.h>
  12. #include <errno.h>
  13. #include <time.h>
  14. #include <math.h>
  15. #include <stdlib.h>
  16. #include <string.h>
  17. #include <stdint.h>
  18. #include <inttypes.h>
  19. #include <stdio.h>
  20. #include <float.h>
  21. #include <limits.h>
  22. #include <stdarg.h>
  23. #include <signal.h>
  24. #if defined(__gnu_linux__)
  25. #include <syscall.h>
  26. #endif
  27. #ifdef GGML_USE_METAL
  28. #include <unistd.h>
  29. #endif
  30. #ifdef __ARM_FEATURE_MATMUL_INT8
  31. #undef GGML_USE_LLAMAFILE
  32. #endif
  33. #ifdef GGML_USE_LLAMAFILE
  34. #include "sgemm.h"
  35. #endif
  36. #if defined(_MSC_VER)
  37. // disable "possible loss of data" to avoid hundreds of casts
  38. // we should just be careful :)
  39. #pragma warning(disable: 4244 4267)
  40. // disable POSIX deprecation warnings
  41. // these functions are never going away, anyway
  42. #pragma warning(disable: 4996)
  43. #endif
  44. #if defined(_WIN32)
  45. #define WIN32_LEAN_AND_MEAN
  46. #ifndef NOMINMAX
  47. #define NOMINMAX
  48. #endif
  49. #include <windows.h>
  50. typedef volatile LONG atomic_int;
  51. typedef atomic_int atomic_bool;
  52. static void atomic_store(atomic_int * ptr, LONG val) {
  53. InterlockedExchange(ptr, val);
  54. }
  55. static LONG atomic_load(atomic_int * ptr) {
  56. return InterlockedCompareExchange(ptr, 0, 0);
  57. }
  58. static LONG atomic_fetch_add(atomic_int * ptr, LONG inc) {
  59. return InterlockedExchangeAdd(ptr, inc);
  60. }
  61. static LONG atomic_fetch_sub(atomic_int * ptr, LONG dec) {
  62. return atomic_fetch_add(ptr, -(dec));
  63. }
  64. typedef HANDLE pthread_t;
  65. typedef DWORD thread_ret_t;
  66. static int pthread_create(pthread_t * out, void * unused, thread_ret_t(*func)(void *), void * arg) {
  67. (void) unused;
  68. HANDLE handle = CreateThread(NULL, 0, (LPTHREAD_START_ROUTINE) func, arg, 0, NULL);
  69. if (handle == NULL)
  70. {
  71. return EAGAIN;
  72. }
  73. *out = handle;
  74. return 0;
  75. }
  76. static int pthread_join(pthread_t thread, void * unused) {
  77. (void) unused;
  78. int ret = (int) WaitForSingleObject(thread, INFINITE);
  79. CloseHandle(thread);
  80. return ret;
  81. }
  82. static int sched_yield (void) {
  83. Sleep (0);
  84. return 0;
  85. }
  86. #else
  87. #include <pthread.h>
  88. #include <stdatomic.h>
  89. typedef void * thread_ret_t;
  90. #include <sys/types.h>
  91. #include <sys/stat.h>
  92. #include <unistd.h>
  93. #endif
  94. #ifdef GGML_USE_CPU_HBM
  95. #include <hbwmalloc.h>
  96. #endif
  97. #if defined(__APPLE__)
  98. #include <TargetConditionals.h>
  99. #endif
  100. #if (defined(__linux__) || defined(__APPLE__) || defined(__FreeBSD__) || defined(__NetBSD__) || defined(__OpenBSD__)) && \
  101. (!defined(TARGET_OS_TV) && !defined(TARGET_OS_WATCH))
  102. #include <sys/wait.h>
  103. void ggml_print_backtrace(void) {
  104. /*
  105. #include <execinfo.h>
  106. #include <dlfcn.h>
  107. void * trace[100];
  108. int nptrs = backtrace(trace, sizeof(trace)/sizeof(trace[0]));
  109. backtrace_symbols_fd(trace, nptrs, STDERR_FILENO);
  110. */
  111. // backtrack_symbols does not show line numbers, use gdb instead
  112. char attach[32];
  113. snprintf(attach, sizeof(attach), "attach %d", getpid());
  114. int pid = fork();
  115. if (pid == 0) {
  116. execlp("gdb", "gdb", "--batch",
  117. "-ex", "set style enabled on",
  118. "-ex", attach,
  119. "-ex", "bt -frame-info source-and-location",
  120. "-ex", "detach",
  121. "-ex", "quit",
  122. (char *) NULL);
  123. } else {
  124. waitpid(pid, NULL, 0);
  125. }
  126. }
  127. #else
  128. void ggml_print_backtrace(void) {
  129. // platform not supported
  130. }
  131. #endif
  132. /*#define GGML_PERF*/
  133. #define GGML_DEBUG 0
  134. #define GGML_GELU_FP16
  135. #define GGML_GELU_QUICK_FP16
  136. #define GGML_SILU_FP16
  137. // #define GGML_CROSS_ENTROPY_EXP_FP16
  138. // #define GGML_FLASH_ATTN_EXP_FP16
  139. #define GGML_SOFT_MAX_UNROLL 4
  140. #define GGML_VEC_DOT_UNROLL 2
  141. #define GGML_VEC_MAD_UNROLL 32
  142. //
  143. // logging
  144. //
  145. #if (GGML_DEBUG >= 1)
  146. #define GGML_PRINT_DEBUG(...) printf(__VA_ARGS__)
  147. #else
  148. #define GGML_PRINT_DEBUG(...)
  149. #endif
  150. #if (GGML_DEBUG >= 5)
  151. #define GGML_PRINT_DEBUG_5(...) printf(__VA_ARGS__)
  152. #else
  153. #define GGML_PRINT_DEBUG_5(...)
  154. #endif
  155. #if (GGML_DEBUG >= 10)
  156. #define GGML_PRINT_DEBUG_10(...) printf(__VA_ARGS__)
  157. #else
  158. #define GGML_PRINT_DEBUG_10(...)
  159. #endif
  160. #define GGML_PRINT(...) printf(__VA_ARGS__)
  161. //
  162. // end of logging block
  163. //
  164. #ifdef GGML_USE_ACCELERATE
  165. // uncomment to use vDSP for soft max computation
  166. // note: not sure if it is actually faster
  167. //#define GGML_SOFT_MAX_ACCELERATE
  168. #endif
  169. #if defined(_MSC_VER) || defined(__MINGW32__)
  170. #define GGML_ALIGNED_MALLOC(size) _aligned_malloc(size, GGML_MEM_ALIGN)
  171. #define GGML_ALIGNED_FREE(ptr) _aligned_free(ptr)
  172. #else
  173. inline static void * ggml_aligned_malloc(size_t size) {
  174. if (size == 0) {
  175. GGML_PRINT("WARNING: Behavior may be unexpected when allocating 0 bytes for ggml_aligned_malloc!\n");
  176. return NULL;
  177. }
  178. void * aligned_memory = NULL;
  179. #ifdef GGML_USE_CPU_HBM
  180. int result = hbw_posix_memalign(&aligned_memory, 16, size);
  181. #elif GGML_USE_METAL
  182. int result = posix_memalign(&aligned_memory, sysconf(_SC_PAGESIZE), size);
  183. #else
  184. int result = posix_memalign(&aligned_memory, GGML_MEM_ALIGN, size);
  185. #endif
  186. if (result != 0) {
  187. // Handle allocation failure
  188. const char *error_desc = "unknown allocation error";
  189. switch (result) {
  190. case EINVAL:
  191. error_desc = "invalid alignment value";
  192. break;
  193. case ENOMEM:
  194. error_desc = "insufficient memory";
  195. break;
  196. }
  197. GGML_PRINT("%s: %s (attempted to allocate %6.2f MB)\n", __func__, error_desc, size/(1024.0*1024.0));
  198. GGML_ASSERT(false);
  199. return NULL;
  200. }
  201. return aligned_memory;
  202. }
  203. #define GGML_ALIGNED_MALLOC(size) ggml_aligned_malloc(size)
  204. #ifdef GGML_USE_CPU_HBM
  205. #define GGML_ALIGNED_FREE(ptr) if(NULL != ptr) hbw_free(ptr)
  206. #else
  207. #define GGML_ALIGNED_FREE(ptr) free(ptr)
  208. #endif
  209. #endif
  210. inline static void * ggml_malloc(size_t size) {
  211. if (size == 0) {
  212. GGML_PRINT("WARNING: Behavior may be unexpected when allocating 0 bytes for ggml_malloc!\n");
  213. return NULL;
  214. }
  215. void * result = malloc(size);
  216. if (result == NULL) {
  217. GGML_PRINT("%s: failed to allocate %6.2f MB\n", __func__, size/(1024.0*1024.0));
  218. GGML_ASSERT(false);
  219. }
  220. return result;
  221. }
  222. // calloc
  223. inline static void * ggml_calloc(size_t num, size_t size) {
  224. if (num == 0 || size == 0) {
  225. GGML_PRINT("WARNING: Behavior may be unexpected when allocating 0 bytes for ggml_calloc!\n");
  226. return NULL;
  227. }
  228. void * result = calloc(num, size);
  229. if (result == NULL) {
  230. GGML_PRINT("%s: failed to allocate %6.2f MB\n", __func__, size/(1024.0*1024.0));
  231. GGML_ASSERT(false);
  232. }
  233. return result;
  234. }
  235. #define GGML_MALLOC(size) ggml_malloc(size)
  236. #define GGML_CALLOC(num, size) ggml_calloc(num, size)
  237. #define GGML_FREE(ptr) free(ptr)
  238. #define UNUSED GGML_UNUSED
  239. #define SWAP(x, y, T) do { T SWAP = x; x = y; y = SWAP; } while (0)
  240. #if defined(GGML_USE_ACCELERATE)
  241. #include <Accelerate/Accelerate.h>
  242. #if defined(GGML_USE_CLBLAST) // allow usage of CLBlast alongside Accelerate functions
  243. #include "ggml-opencl.h"
  244. #endif
  245. #elif defined(GGML_USE_OPENBLAS)
  246. #if defined(GGML_BLAS_USE_MKL)
  247. #include <mkl.h>
  248. #else
  249. #include <cblas.h>
  250. #endif
  251. #elif defined(GGML_USE_CLBLAST)
  252. #include "ggml-opencl.h"
  253. #endif
  254. // floating point type used to accumulate sums
  255. typedef double ggml_float;
  256. #undef MIN
  257. #undef MAX
  258. #define MIN(a, b) ((a) < (b) ? (a) : (b))
  259. #define MAX(a, b) ((a) > (b) ? (a) : (b))
  260. //
  261. // global data
  262. //
  263. // precomputed gelu table for f16 (128 KB)
  264. static ggml_fp16_t ggml_table_gelu_f16[1 << 16];
  265. // precomputed quick gelu table for f16 (128 KB)
  266. static ggml_fp16_t ggml_table_gelu_quick_f16[1 << 16];
  267. // precomputed silu table for f16 (128 KB)
  268. static ggml_fp16_t ggml_table_silu_f16[1 << 16];
  269. // precomputed exp table for f16 (128 KB)
  270. static ggml_fp16_t ggml_table_exp_f16[1 << 16];
  271. // precomputed f32 table for f16 (256 KB) (ggml-impl.h)
  272. float ggml_table_f32_f16[1 << 16];
  273. GGML_CALL const char * ggml_status_to_string(enum ggml_status status) {
  274. switch (status) {
  275. case GGML_STATUS_ALLOC_FAILED: return "GGML status: error (failed to allocate memory)";
  276. case GGML_STATUS_FAILED: return "GGML status: error (operation failed)";
  277. case GGML_STATUS_SUCCESS: return "GGML status: success";
  278. case GGML_STATUS_ABORTED: return "GGML status: warning (operation aborted)";
  279. }
  280. return "GGML status: unknown";
  281. }
  282. float ggml_fp16_to_fp32(ggml_fp16_t x) {
  283. #define ggml_fp16_to_fp32 do_not_use__ggml_fp16_to_fp32__in_ggml
  284. return GGML_FP16_TO_FP32(x);
  285. }
  286. ggml_fp16_t ggml_fp32_to_fp16(float x) {
  287. #define ggml_fp32_to_fp16 do_not_use__ggml_fp32_to_fp16__in_ggml
  288. return GGML_FP32_TO_FP16(x);
  289. }
  290. float ggml_bf16_to_fp32(ggml_bf16_t x) {
  291. #define ggml_bf16_to_fp32 do_not_use__ggml_bf16_to_fp32__in_ggml
  292. return GGML_BF16_TO_FP32(x); // it just left shifts
  293. }
  294. ggml_bf16_t ggml_fp32_to_bf16(float x) {
  295. #define ggml_fp32_to_bf16 do_not_use__ggml_fp32_to_bf16__in_ggml
  296. return GGML_FP32_TO_BF16(x);
  297. }
  298. void ggml_fp16_to_fp32_row(const ggml_fp16_t * x, float * y, int64_t n) {
  299. for (int64_t i = 0; i < n; i++) {
  300. y[i] = GGML_FP16_TO_FP32(x[i]);
  301. }
  302. }
  303. void ggml_fp32_to_fp16_row(const float * x, ggml_fp16_t * y, int64_t n) {
  304. int64_t i = 0;
  305. #if defined(__F16C__)
  306. for (; i + 7 < n; i += 8) {
  307. __m256 x_vec = _mm256_loadu_ps(x + i);
  308. __m128i y_vec = _mm256_cvtps_ph(x_vec, _MM_FROUND_TO_NEAREST_INT);
  309. _mm_storeu_si128((__m128i *)(y + i), y_vec);
  310. }
  311. for(; i + 3 < n; i += 4) {
  312. __m128 x_vec = _mm_loadu_ps(x + i);
  313. __m128i y_vec = _mm_cvtps_ph(x_vec, _MM_FROUND_TO_NEAREST_INT);
  314. _mm_storel_epi64((__m128i *)(y + i), y_vec);
  315. }
  316. #endif
  317. for (; i < n; i++) {
  318. y[i] = GGML_FP32_TO_FP16(x[i]);
  319. }
  320. }
  321. void ggml_bf16_to_fp32_row(const ggml_bf16_t * x, float * y, int64_t n) {
  322. int64_t i = 0;
  323. #if defined(__AVX512F__)
  324. for (; i + 16 <= n; i += 16) {
  325. _mm512_storeu_ps(y + i,
  326. _mm512_castsi512_ps(
  327. _mm512_slli_epi32(
  328. _mm512_cvtepu16_epi32(
  329. _mm256_loadu_si256(
  330. (const __m256i *)(x + i))),
  331. 16)));
  332. }
  333. #elif defined(__AVX2__)
  334. for (; i + 8 <= n; i += 8) {
  335. _mm256_storeu_ps(y + i,
  336. _mm256_castsi256_ps(
  337. _mm256_slli_epi32(
  338. _mm256_cvtepu16_epi32(
  339. _mm_loadu_si128(
  340. (const __m128i *)(x + i))),
  341. 16)));
  342. }
  343. #endif
  344. for (; i < n; i++) {
  345. y[i] = GGML_BF16_TO_FP32(x[i]);
  346. }
  347. }
  348. void ggml_fp32_to_bf16_row(const float * x, ggml_bf16_t * y, int64_t n) {
  349. int i = 0;
  350. #if defined(__AVX512BF16__)
  351. for (; i + 32 <= n; i += 32) {
  352. _mm512_storeu_ps(
  353. (__m512 *)(y + i),
  354. (__m512)_mm512_cvtne2ps_pbh(_mm512_loadu_ps(x + i + 16),
  355. _mm512_loadu_ps(x + i)));
  356. }
  357. #endif
  358. for (; i < n; i++) {
  359. y[i] = GGML_FP32_TO_BF16(x[i]);
  360. }
  361. }
  362. bool ggml_guid_matches(ggml_guid_t guid_a, ggml_guid_t guid_b) {
  363. return memcmp(guid_a, guid_b, sizeof(ggml_guid)) == 0;
  364. }
  365. //
  366. // timing
  367. //
  368. #if defined(_MSC_VER) || defined(__MINGW32__)
  369. static int64_t timer_freq, timer_start;
  370. void ggml_time_init(void) {
  371. LARGE_INTEGER t;
  372. QueryPerformanceFrequency(&t);
  373. timer_freq = t.QuadPart;
  374. // The multiplication by 1000 or 1000000 below can cause an overflow if timer_freq
  375. // and the uptime is high enough.
  376. // We subtract the program start time to reduce the likelihood of that happening.
  377. QueryPerformanceCounter(&t);
  378. timer_start = t.QuadPart;
  379. }
  380. int64_t ggml_time_ms(void) {
  381. LARGE_INTEGER t;
  382. QueryPerformanceCounter(&t);
  383. return ((t.QuadPart-timer_start) * 1000) / timer_freq;
  384. }
  385. int64_t ggml_time_us(void) {
  386. LARGE_INTEGER t;
  387. QueryPerformanceCounter(&t);
  388. return ((t.QuadPart-timer_start) * 1000000) / timer_freq;
  389. }
  390. #else
  391. void ggml_time_init(void) {}
  392. int64_t ggml_time_ms(void) {
  393. struct timespec ts;
  394. clock_gettime(CLOCK_MONOTONIC, &ts);
  395. return (int64_t)ts.tv_sec*1000 + (int64_t)ts.tv_nsec/1000000;
  396. }
  397. int64_t ggml_time_us(void) {
  398. struct timespec ts;
  399. clock_gettime(CLOCK_MONOTONIC, &ts);
  400. return (int64_t)ts.tv_sec*1000000 + (int64_t)ts.tv_nsec/1000;
  401. }
  402. #endif
  403. int64_t ggml_cycles(void) {
  404. return clock();
  405. }
  406. int64_t ggml_cycles_per_ms(void) {
  407. return CLOCKS_PER_SEC/1000;
  408. }
  409. #ifdef GGML_PERF
  410. #define ggml_perf_time_ms() ggml_time_ms()
  411. #define ggml_perf_time_us() ggml_time_us()
  412. #define ggml_perf_cycles() ggml_cycles()
  413. #define ggml_perf_cycles_per_ms() ggml_cycles_per_ms()
  414. #else
  415. #define ggml_perf_time_ms() 0
  416. #define ggml_perf_time_us() 0
  417. #define ggml_perf_cycles() 0
  418. #define ggml_perf_cycles_per_ms() 0
  419. #endif
  420. //
  421. // cross-platform UTF-8 file paths
  422. //
  423. #ifdef _WIN32
  424. static wchar_t * ggml_mbstowcs(const char * mbs) {
  425. int wlen = MultiByteToWideChar(CP_UTF8, 0, mbs, -1, NULL, 0);
  426. if (!wlen) {
  427. errno = EINVAL;
  428. return NULL;
  429. }
  430. wchar_t * wbuf = GGML_MALLOC(wlen * sizeof(wchar_t));
  431. wlen = MultiByteToWideChar(CP_UTF8, 0, mbs, -1, wbuf, wlen);
  432. if (!wlen) {
  433. GGML_FREE(wbuf);
  434. errno = EINVAL;
  435. return NULL;
  436. }
  437. return wbuf;
  438. }
  439. #endif
  440. FILE * ggml_fopen(const char * fname, const char * mode) {
  441. #ifdef _WIN32
  442. FILE * file = NULL;
  443. // convert fname (UTF-8)
  444. wchar_t * wfname = ggml_mbstowcs(fname);
  445. if (wfname) {
  446. // convert mode (ANSI)
  447. wchar_t * wmode = GGML_MALLOC((strlen(mode) + 1) * sizeof(wchar_t));
  448. wchar_t * wmode_p = wmode;
  449. do {
  450. *wmode_p++ = (wchar_t)*mode;
  451. } while (*mode++);
  452. // open file
  453. file = _wfopen(wfname, wmode);
  454. GGML_FREE(wfname);
  455. GGML_FREE(wmode);
  456. }
  457. return file;
  458. #else
  459. return fopen(fname, mode);
  460. #endif
  461. }
  462. //
  463. // cache line
  464. //
  465. #if defined(__cpp_lib_hardware_interference_size)
  466. #define CACHE_LINE_SIZE hardware_destructive_interference_size
  467. #else
  468. #if defined(__POWER9_VECTOR__)
  469. #define CACHE_LINE_SIZE 128
  470. #else
  471. #define CACHE_LINE_SIZE 64
  472. #endif
  473. #endif
  474. static const size_t CACHE_LINE_SIZE_F32 = CACHE_LINE_SIZE/sizeof(float);
  475. static void ggml_vec_dot_f32(int n, float * restrict s, size_t bs, const float * restrict x, size_t bx, const float * restrict y, size_t by, int nrc);
  476. static void ggml_vec_dot_f16(int n, float * restrict s, size_t bs, ggml_fp16_t * restrict x, size_t bx, ggml_fp16_t * restrict y, size_t by, int nrc);
  477. static void ggml_vec_dot_bf16(int n, float * restrict s, size_t bs, ggml_bf16_t * restrict x, size_t bx, ggml_bf16_t * restrict y, size_t by, int nrc);
  478. static const ggml_type_traits_t type_traits[GGML_TYPE_COUNT] = {
  479. [GGML_TYPE_I8] = {
  480. .type_name = "i8",
  481. .blck_size = 1,
  482. .type_size = sizeof(int8_t),
  483. .is_quantized = false,
  484. },
  485. [GGML_TYPE_I16] = {
  486. .type_name = "i16",
  487. .blck_size = 1,
  488. .type_size = sizeof(int16_t),
  489. .is_quantized = false,
  490. },
  491. [GGML_TYPE_I32] = {
  492. .type_name = "i32",
  493. .blck_size = 1,
  494. .type_size = sizeof(int32_t),
  495. .is_quantized = false,
  496. },
  497. [GGML_TYPE_I64] = {
  498. .type_name = "i64",
  499. .blck_size = 1,
  500. .type_size = sizeof(int64_t),
  501. .is_quantized = false,
  502. },
  503. [GGML_TYPE_F64] = {
  504. .type_name = "f64",
  505. .blck_size = 1,
  506. .type_size = sizeof(double),
  507. .is_quantized = false,
  508. .nrows = 1,
  509. },
  510. [GGML_TYPE_F32] = {
  511. .type_name = "f32",
  512. .blck_size = 1,
  513. .type_size = sizeof(float),
  514. .is_quantized = false,
  515. .vec_dot = (ggml_vec_dot_t) ggml_vec_dot_f32,
  516. .vec_dot_type = GGML_TYPE_F32,
  517. .nrows = 1,
  518. },
  519. [GGML_TYPE_F16] = {
  520. .type_name = "f16",
  521. .blck_size = 1,
  522. .type_size = sizeof(ggml_fp16_t),
  523. .is_quantized = false,
  524. .to_float = (ggml_to_float_t) ggml_fp16_to_fp32_row,
  525. .from_float = (ggml_from_float_t) ggml_fp32_to_fp16_row,
  526. .from_float_reference = (ggml_from_float_t) ggml_fp32_to_fp16_row,
  527. .vec_dot = (ggml_vec_dot_t) ggml_vec_dot_f16,
  528. .vec_dot_type = GGML_TYPE_F16,
  529. .nrows = 1,
  530. },
  531. [GGML_TYPE_Q4_0] = {
  532. .type_name = "q4_0",
  533. .blck_size = QK4_0,
  534. .type_size = sizeof(block_q4_0),
  535. .is_quantized = true,
  536. .to_float = (ggml_to_float_t) dequantize_row_q4_0,
  537. .from_float = quantize_row_q4_0,
  538. .from_float_reference = (ggml_from_float_t) quantize_row_q4_0_reference,
  539. .vec_dot = ggml_vec_dot_q4_0_q8_0,
  540. .vec_dot_type = GGML_TYPE_Q8_0,
  541. #if defined (__ARM_FEATURE_MATMUL_INT8)
  542. .nrows = 2,
  543. #else
  544. .nrows = 1,
  545. #endif
  546. },
  547. [GGML_TYPE_Q4_1] = {
  548. .type_name = "q4_1",
  549. .blck_size = QK4_1,
  550. .type_size = sizeof(block_q4_1),
  551. .is_quantized = true,
  552. .to_float = (ggml_to_float_t) dequantize_row_q4_1,
  553. .from_float = quantize_row_q4_1,
  554. .from_float_reference = (ggml_from_float_t) quantize_row_q4_1_reference,
  555. .vec_dot = ggml_vec_dot_q4_1_q8_1,
  556. .vec_dot_type = GGML_TYPE_Q8_1,
  557. #if defined (__ARM_FEATURE_MATMUL_INT8)
  558. .nrows = 2,
  559. #else
  560. .nrows = 1,
  561. #endif
  562. },
  563. [4] = { // GGML_TYPE_Q4_2
  564. .type_name = "DEPRECATED",
  565. .blck_size = 0,
  566. .type_size = 0,
  567. .is_quantized = false,
  568. .to_float = NULL,
  569. .from_float = NULL,
  570. .from_float_reference = NULL,
  571. .vec_dot = NULL,
  572. .vec_dot_type = GGML_TYPE_COUNT,
  573. .nrows = 1,
  574. },
  575. [5] = { // GGML_TYPE_Q4_3
  576. .type_name = "DEPRECATED",
  577. .blck_size = 0,
  578. .type_size = 0,
  579. .is_quantized = false,
  580. .to_float = NULL,
  581. .from_float = NULL,
  582. .from_float_reference = NULL,
  583. .vec_dot = NULL,
  584. .vec_dot_type = GGML_TYPE_COUNT,
  585. .nrows = 1,
  586. },
  587. [GGML_TYPE_Q5_0] = {
  588. .type_name = "q5_0",
  589. .blck_size = QK5_0,
  590. .type_size = sizeof(block_q5_0),
  591. .is_quantized = true,
  592. .to_float = (ggml_to_float_t) dequantize_row_q5_0,
  593. .from_float = quantize_row_q5_0,
  594. .from_float_reference = (ggml_from_float_t) quantize_row_q5_0_reference,
  595. .vec_dot = ggml_vec_dot_q5_0_q8_0,
  596. .vec_dot_type = GGML_TYPE_Q8_0,
  597. .nrows = 1,
  598. },
  599. [GGML_TYPE_Q5_1] = {
  600. .type_name = "q5_1",
  601. .blck_size = QK5_1,
  602. .type_size = sizeof(block_q5_1),
  603. .is_quantized = true,
  604. .to_float = (ggml_to_float_t) dequantize_row_q5_1,
  605. .from_float = quantize_row_q5_1,
  606. .from_float_reference = (ggml_from_float_t) quantize_row_q5_1_reference,
  607. .vec_dot = ggml_vec_dot_q5_1_q8_1,
  608. .vec_dot_type = GGML_TYPE_Q8_1,
  609. .nrows = 1,
  610. },
  611. [GGML_TYPE_Q8_0] = {
  612. .type_name = "q8_0",
  613. .blck_size = QK8_0,
  614. .type_size = sizeof(block_q8_0),
  615. .is_quantized = true,
  616. .to_float = (ggml_to_float_t) dequantize_row_q8_0,
  617. .from_float = quantize_row_q8_0,
  618. .from_float_reference = (ggml_from_float_t) quantize_row_q8_0_reference,
  619. .vec_dot = ggml_vec_dot_q8_0_q8_0,
  620. .vec_dot_type = GGML_TYPE_Q8_0,
  621. #if defined (__ARM_FEATURE_MATMUL_INT8)
  622. .nrows = 2,
  623. #else
  624. .nrows = 1,
  625. #endif
  626. },
  627. [GGML_TYPE_Q8_1] = {
  628. .type_name = "q8_1",
  629. .blck_size = QK8_1,
  630. .type_size = sizeof(block_q8_1),
  631. .is_quantized = true,
  632. .from_float = quantize_row_q8_1,
  633. .from_float_reference = (ggml_from_float_t) quantize_row_q8_1_reference,
  634. .vec_dot_type = GGML_TYPE_Q8_1,
  635. .nrows = 1,
  636. },
  637. [GGML_TYPE_Q2_K] = {
  638. .type_name = "q2_K",
  639. .blck_size = QK_K,
  640. .type_size = sizeof(block_q2_K),
  641. .is_quantized = true,
  642. .to_float = (ggml_to_float_t) dequantize_row_q2_K,
  643. .from_float = quantize_row_q2_K,
  644. .from_float_reference = (ggml_from_float_t) quantize_row_q2_K_reference,
  645. .vec_dot = ggml_vec_dot_q2_K_q8_K,
  646. .vec_dot_type = GGML_TYPE_Q8_K,
  647. .nrows = 1,
  648. },
  649. [GGML_TYPE_Q3_K] = {
  650. .type_name = "q3_K",
  651. .blck_size = QK_K,
  652. .type_size = sizeof(block_q3_K),
  653. .is_quantized = true,
  654. .to_float = (ggml_to_float_t) dequantize_row_q3_K,
  655. .from_float = quantize_row_q3_K,
  656. .from_float_reference = (ggml_from_float_t) quantize_row_q3_K_reference,
  657. .vec_dot = ggml_vec_dot_q3_K_q8_K,
  658. .vec_dot_type = GGML_TYPE_Q8_K,
  659. .nrows = 1,
  660. },
  661. [GGML_TYPE_Q4_K] = {
  662. .type_name = "q4_K",
  663. .blck_size = QK_K,
  664. .type_size = sizeof(block_q4_K),
  665. .is_quantized = true,
  666. .to_float = (ggml_to_float_t) dequantize_row_q4_K,
  667. .from_float = quantize_row_q4_K,
  668. .from_float_reference = (ggml_from_float_t) quantize_row_q4_K_reference,
  669. .vec_dot = ggml_vec_dot_q4_K_q8_K,
  670. .vec_dot_type = GGML_TYPE_Q8_K,
  671. .nrows = 1,
  672. },
  673. [GGML_TYPE_Q5_K] = {
  674. .type_name = "q5_K",
  675. .blck_size = QK_K,
  676. .type_size = sizeof(block_q5_K),
  677. .is_quantized = true,
  678. .to_float = (ggml_to_float_t) dequantize_row_q5_K,
  679. .from_float = quantize_row_q5_K,
  680. .from_float_reference = (ggml_from_float_t) quantize_row_q5_K_reference,
  681. .vec_dot = ggml_vec_dot_q5_K_q8_K,
  682. .vec_dot_type = GGML_TYPE_Q8_K,
  683. .nrows = 1,
  684. },
  685. [GGML_TYPE_Q6_K] = {
  686. .type_name = "q6_K",
  687. .blck_size = QK_K,
  688. .type_size = sizeof(block_q6_K),
  689. .is_quantized = true,
  690. .to_float = (ggml_to_float_t) dequantize_row_q6_K,
  691. .from_float = quantize_row_q6_K,
  692. .from_float_reference = (ggml_from_float_t) quantize_row_q6_K_reference,
  693. .vec_dot = ggml_vec_dot_q6_K_q8_K,
  694. .vec_dot_type = GGML_TYPE_Q8_K,
  695. .nrows = 1,
  696. },
  697. [GGML_TYPE_IQ2_XXS] = {
  698. .type_name = "iq2_xxs",
  699. .blck_size = QK_K,
  700. .type_size = sizeof(block_iq2_xxs),
  701. .is_quantized = true,
  702. .to_float = (ggml_to_float_t) dequantize_row_iq2_xxs,
  703. .from_float = NULL,
  704. .from_float_reference = NULL,
  705. .vec_dot = ggml_vec_dot_iq2_xxs_q8_K,
  706. .vec_dot_type = GGML_TYPE_Q8_K,
  707. .nrows = 1,
  708. },
  709. [GGML_TYPE_IQ2_XS] = {
  710. .type_name = "iq2_xs",
  711. .blck_size = QK_K,
  712. .type_size = sizeof(block_iq2_xs),
  713. .is_quantized = true,
  714. .to_float = (ggml_to_float_t) dequantize_row_iq2_xs,
  715. .from_float = NULL,
  716. .from_float_reference = NULL,
  717. .vec_dot = ggml_vec_dot_iq2_xs_q8_K,
  718. .vec_dot_type = GGML_TYPE_Q8_K,
  719. .nrows = 1,
  720. },
  721. [GGML_TYPE_IQ3_XXS] = {
  722. .type_name = "iq3_xxs",
  723. .blck_size = QK_K,
  724. .type_size = sizeof(block_iq3_xxs),
  725. .is_quantized = true,
  726. .to_float = (ggml_to_float_t) dequantize_row_iq3_xxs,
  727. .from_float = quantize_row_iq3_xxs,
  728. .from_float_reference = (ggml_from_float_t)quantize_row_iq3_xxs_reference,
  729. .vec_dot = ggml_vec_dot_iq3_xxs_q8_K,
  730. .vec_dot_type = GGML_TYPE_Q8_K,
  731. .nrows = 1,
  732. },
  733. [GGML_TYPE_IQ3_S] = {
  734. .type_name = "iq3_s",
  735. .blck_size = QK_K,
  736. .type_size = sizeof(block_iq3_s),
  737. .is_quantized = true,
  738. .to_float = (ggml_to_float_t) dequantize_row_iq3_s,
  739. .from_float = quantize_row_iq3_s,
  740. .from_float_reference = (ggml_from_float_t)quantize_row_iq3_s_reference,
  741. .vec_dot = ggml_vec_dot_iq3_s_q8_K,
  742. .vec_dot_type = GGML_TYPE_Q8_K,
  743. .nrows = 1,
  744. },
  745. [GGML_TYPE_IQ2_S] = {
  746. .type_name = "iq2_s",
  747. .blck_size = QK_K,
  748. .type_size = sizeof(block_iq2_s),
  749. .is_quantized = true,
  750. .to_float = (ggml_to_float_t) dequantize_row_iq2_s,
  751. .from_float = quantize_row_iq2_s,
  752. .from_float_reference = (ggml_from_float_t)quantize_row_iq2_s_reference,
  753. .vec_dot = ggml_vec_dot_iq2_s_q8_K,
  754. .vec_dot_type = GGML_TYPE_Q8_K,
  755. .nrows = 1,
  756. },
  757. [GGML_TYPE_IQ1_S] = {
  758. .type_name = "iq1_s",
  759. .blck_size = QK_K,
  760. .type_size = sizeof(block_iq1_s),
  761. .is_quantized = true,
  762. .to_float = (ggml_to_float_t) dequantize_row_iq1_s,
  763. .from_float = NULL,
  764. .from_float_reference = NULL,
  765. .vec_dot = ggml_vec_dot_iq1_s_q8_K,
  766. .vec_dot_type = GGML_TYPE_Q8_K,
  767. .nrows = 1,
  768. },
  769. [GGML_TYPE_IQ1_M] = {
  770. .type_name = "iq1_m",
  771. .blck_size = QK_K,
  772. .type_size = sizeof(block_iq1_m),
  773. .is_quantized = true,
  774. .to_float = (ggml_to_float_t) dequantize_row_iq1_m,
  775. .from_float = NULL,
  776. .from_float_reference = NULL,
  777. .vec_dot = ggml_vec_dot_iq1_m_q8_K,
  778. .vec_dot_type = GGML_TYPE_Q8_K,
  779. .nrows = 1,
  780. },
  781. [GGML_TYPE_IQ4_NL] = {
  782. .type_name = "iq4_nl",
  783. .blck_size = QK4_NL,
  784. .type_size = sizeof(block_iq4_nl),
  785. .is_quantized = true,
  786. .to_float = (ggml_to_float_t) dequantize_row_iq4_nl,
  787. .from_float = quantize_row_iq4_nl,
  788. .from_float_reference = (ggml_from_float_t)quantize_row_iq4_nl_reference,
  789. .vec_dot = ggml_vec_dot_iq4_nl_q8_0,
  790. .vec_dot_type = GGML_TYPE_Q8_0,
  791. .nrows = 1,
  792. },
  793. [GGML_TYPE_IQ4_XS] = {
  794. .type_name = "iq4_xs",
  795. #if QK_K == 64
  796. .blck_size = QK4_NL,
  797. #else
  798. .blck_size = QK_K,
  799. #endif
  800. .type_size = sizeof(block_iq4_xs),
  801. .is_quantized = true,
  802. .to_float = (ggml_to_float_t) dequantize_row_iq4_xs,
  803. .from_float = quantize_row_iq4_xs,
  804. .from_float_reference = (ggml_from_float_t)quantize_row_iq4_xs_reference,
  805. .vec_dot = ggml_vec_dot_iq4_xs_q8_K,
  806. #if QK_K == 64
  807. .vec_dot_type = GGML_TYPE_Q8_0,
  808. #else
  809. .vec_dot_type = GGML_TYPE_Q8_K,
  810. #endif
  811. .nrows = 1,
  812. },
  813. [GGML_TYPE_Q8_K] = {
  814. .type_name = "q8_K",
  815. .blck_size = QK_K,
  816. .type_size = sizeof(block_q8_K),
  817. .is_quantized = true,
  818. .from_float = quantize_row_q8_K,
  819. },
  820. [GGML_TYPE_BF16] = {
  821. .type_name = "bf16",
  822. .blck_size = 1,
  823. .type_size = sizeof(ggml_bf16_t),
  824. .is_quantized = false,
  825. .to_float = (ggml_to_float_t) ggml_bf16_to_fp32_row,
  826. .from_float = (ggml_from_float_t) ggml_fp32_to_bf16_row,
  827. .from_float_reference = (ggml_from_float_t) ggml_fp32_to_bf16_row,
  828. .vec_dot = (ggml_vec_dot_t) ggml_vec_dot_bf16,
  829. .vec_dot_type = GGML_TYPE_BF16,
  830. .nrows = 1,
  831. }
  832. };
  833. // For internal test use
  834. ggml_type_traits_t ggml_internal_get_type_traits(enum ggml_type type) {
  835. GGML_ASSERT(type < GGML_TYPE_COUNT);
  836. return type_traits[type];
  837. }
  838. //
  839. // simd mappings
  840. //
  841. // we define a common set of C macros which map to specific intrinsics based on the current architecture
  842. // we then implement the fundamental computation operations below using only these macros
  843. // adding support for new architectures requires to define the corresponding SIMD macros
  844. //
  845. // GGML_F32_STEP / GGML_F16_STEP
  846. // number of elements to process in a single step
  847. //
  848. // GGML_F32_EPR / GGML_F16_EPR
  849. // number of elements to fit in a single register
  850. //
  851. #if defined(__ARM_NEON) && defined(__ARM_FEATURE_FMA)
  852. #define GGML_SIMD
  853. // F32 NEON
  854. #define GGML_F32_STEP 16
  855. #define GGML_F32_EPR 4
  856. #define GGML_F32x4 float32x4_t
  857. #define GGML_F32x4_ZERO vdupq_n_f32(0.0f)
  858. #define GGML_F32x4_SET1(x) vdupq_n_f32(x)
  859. #define GGML_F32x4_LOAD vld1q_f32
  860. #define GGML_F32x4_STORE vst1q_f32
  861. #define GGML_F32x4_FMA(a, b, c) vfmaq_f32(a, b, c)
  862. #define GGML_F32x4_ADD vaddq_f32
  863. #define GGML_F32x4_MUL vmulq_f32
  864. #define GGML_F32x4_REDUCE_ONE(x) vaddvq_f32(x)
  865. #define GGML_F32x4_REDUCE(res, x) \
  866. { \
  867. int offset = GGML_F32_ARR >> 1; \
  868. for (int i = 0; i < offset; ++i) { \
  869. x[i] = vaddq_f32(x[i], x[offset+i]); \
  870. } \
  871. offset >>= 1; \
  872. for (int i = 0; i < offset; ++i) { \
  873. x[i] = vaddq_f32(x[i], x[offset+i]); \
  874. } \
  875. offset >>= 1; \
  876. for (int i = 0; i < offset; ++i) { \
  877. x[i] = vaddq_f32(x[i], x[offset+i]); \
  878. } \
  879. res = GGML_F32x4_REDUCE_ONE(x[0]); \
  880. }
  881. #define GGML_F32_VEC GGML_F32x4
  882. #define GGML_F32_VEC_ZERO GGML_F32x4_ZERO
  883. #define GGML_F32_VEC_SET1 GGML_F32x4_SET1
  884. #define GGML_F32_VEC_LOAD GGML_F32x4_LOAD
  885. #define GGML_F32_VEC_STORE GGML_F32x4_STORE
  886. #define GGML_F32_VEC_FMA GGML_F32x4_FMA
  887. #define GGML_F32_VEC_ADD GGML_F32x4_ADD
  888. #define GGML_F32_VEC_MUL GGML_F32x4_MUL
  889. #define GGML_F32_VEC_REDUCE GGML_F32x4_REDUCE
  890. // F16 NEON
  891. #if defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC)
  892. #define GGML_F16_STEP 32
  893. #define GGML_F16_EPR 8
  894. #define GGML_F16x8 float16x8_t
  895. #define GGML_F16x8_ZERO vdupq_n_f16(0.0f)
  896. #define GGML_F16x8_SET1(x) vdupq_n_f16(x)
  897. #define GGML_F16x8_LOAD(x) vld1q_f16((const ggml_fp16_internal_t *)(x))
  898. #define GGML_F16x8_STORE vst1q_f16
  899. #define GGML_F16x8_FMA(a, b, c) vfmaq_f16(a, b, c)
  900. #define GGML_F16x8_ADD vaddq_f16
  901. #define GGML_F16x8_MUL vmulq_f16
  902. #define GGML_F16x8_REDUCE(res, x) \
  903. do { \
  904. int offset = GGML_F16_ARR >> 1; \
  905. for (int i = 0; i < offset; ++i) { \
  906. x[i] = vaddq_f16(x[i], x[offset+i]); \
  907. } \
  908. offset >>= 1; \
  909. for (int i = 0; i < offset; ++i) { \
  910. x[i] = vaddq_f16(x[i], x[offset+i]); \
  911. } \
  912. offset >>= 1; \
  913. for (int i = 0; i < offset; ++i) { \
  914. x[i] = vaddq_f16(x[i], x[offset+i]); \
  915. } \
  916. const float32x4_t t0 = vcvt_f32_f16(vget_low_f16 (x[0])); \
  917. const float32x4_t t1 = vcvt_f32_f16(vget_high_f16(x[0])); \
  918. res = (ggml_float) vaddvq_f32(vaddq_f32(t0, t1)); \
  919. } while (0)
  920. #define GGML_F16_VEC GGML_F16x8
  921. #define GGML_F16_VEC_ZERO GGML_F16x8_ZERO
  922. #define GGML_F16_VEC_SET1 GGML_F16x8_SET1
  923. #define GGML_F16_VEC_LOAD(p, i) GGML_F16x8_LOAD(p)
  924. #define GGML_F16_VEC_STORE(p, r, i) GGML_F16x8_STORE((ggml_fp16_internal_t *)(p), r[i])
  925. #define GGML_F16_VEC_FMA GGML_F16x8_FMA
  926. #define GGML_F16_VEC_ADD GGML_F16x8_ADD
  927. #define GGML_F16_VEC_MUL GGML_F16x8_MUL
  928. #define GGML_F16_VEC_REDUCE GGML_F16x8_REDUCE
  929. #else
  930. // if FP16 vector arithmetic is not supported, we use FP32 instead
  931. // and take advantage of the vcvt_ functions to convert to/from FP16
  932. #define GGML_F16_STEP 16
  933. #define GGML_F16_EPR 4
  934. #define GGML_F32Cx4 float32x4_t
  935. #define GGML_F32Cx4_ZERO vdupq_n_f32(0.0f)
  936. #define GGML_F32Cx4_SET1(x) vdupq_n_f32(x)
  937. #define GGML_F32Cx4_LOAD(x) vcvt_f32_f16(vld1_f16((const ggml_fp16_internal_t *)(x)))
  938. #define GGML_F32Cx4_STORE(x, y) vst1_f16(x, vcvt_f16_f32(y))
  939. #define GGML_F32Cx4_FMA(a, b, c) vfmaq_f32(a, b, c)
  940. #define GGML_F32Cx4_ADD vaddq_f32
  941. #define GGML_F32Cx4_MUL vmulq_f32
  942. #define GGML_F32Cx4_REDUCE GGML_F32x4_REDUCE
  943. #define GGML_F16_VEC GGML_F32Cx4
  944. #define GGML_F16_VEC_ZERO GGML_F32Cx4_ZERO
  945. #define GGML_F16_VEC_SET1 GGML_F32Cx4_SET1
  946. #define GGML_F16_VEC_LOAD(p, i) GGML_F32Cx4_LOAD(p)
  947. #define GGML_F16_VEC_STORE(p, r, i) GGML_F32Cx4_STORE((ggml_fp16_internal_t *)(p), r[i])
  948. #define GGML_F16_VEC_FMA GGML_F32Cx4_FMA
  949. #define GGML_F16_VEC_ADD GGML_F32Cx4_ADD
  950. #define GGML_F16_VEC_MUL GGML_F32Cx4_MUL
  951. #define GGML_F16_VEC_REDUCE GGML_F32Cx4_REDUCE
  952. #endif
  953. #elif defined(__AVX512F__)
  954. #define GGML_SIMD
  955. // F32 AVX512
  956. #define GGML_F32_STEP 64
  957. #define GGML_F32_EPR 16
  958. #define GGML_F32x16 __m512
  959. #define GGML_F32x16_ZERO _mm512_setzero_ps()
  960. #define GGML_F32x16_SET1(x) _mm512_set1_ps(x)
  961. #define GGML_F32x16_LOAD _mm512_loadu_ps
  962. #define GGML_F32x16_STORE _mm512_storeu_ps
  963. // _mm512_fmadd_ps is defined in AVX512F so no guard is required
  964. #define GGML_F32x16_FMA(a, b, c) _mm512_fmadd_ps(b, c, a)
  965. #define GGML_F32x16_ADD _mm512_add_ps
  966. #define GGML_F32x16_MUL _mm512_mul_ps
  967. #define GGML_F32x16_REDUCE(res, x) \
  968. do { \
  969. int offset = GGML_F32_ARR >> 1; \
  970. for (int i = 0; i < offset; ++i) { \
  971. x[i] = _mm512_add_ps(x[i], x[offset+i]); \
  972. } \
  973. offset >>= 1; \
  974. for (int i = 0; i < offset; ++i) { \
  975. x[i] = _mm512_add_ps(x[i], x[offset+i]); \
  976. } \
  977. offset >>= 1; \
  978. for (int i = 0; i < offset; ++i) { \
  979. x[i] = _mm512_add_ps(x[i], x[offset+i]); \
  980. } \
  981. res = _mm512_reduce_add_ps(x[0]); \
  982. } while (0)
  983. // TODO: is this optimal ?
  984. #define GGML_F32_VEC GGML_F32x16
  985. #define GGML_F32_VEC_ZERO GGML_F32x16_ZERO
  986. #define GGML_F32_VEC_SET1 GGML_F32x16_SET1
  987. #define GGML_F32_VEC_LOAD GGML_F32x16_LOAD
  988. #define GGML_F32_VEC_STORE GGML_F32x16_STORE
  989. #define GGML_F32_VEC_FMA GGML_F32x16_FMA
  990. #define GGML_F32_VEC_ADD GGML_F32x16_ADD
  991. #define GGML_F32_VEC_MUL GGML_F32x16_MUL
  992. #define GGML_F32_VEC_REDUCE GGML_F32x16_REDUCE
  993. // F16 AVX512
  994. // F16 AVX
  995. #define GGML_F16_STEP 64
  996. #define GGML_F16_EPR 16
  997. // AVX512 has FP16 extension (AVX512_FP16) but I don't have it on my machine so I use FP32 instead
  998. #define GGML_F32Cx16 __m512
  999. #define GGML_F32Cx16_ZERO _mm512_setzero_ps()
  1000. #define GGML_F32Cx16_SET1(x) _mm512_set1_ps(x)
  1001. // unlike _mm256_cvt intrinsics that require F16C, _mm512_cvt is defined in AVX512F
  1002. // so F16C guard isn't required
  1003. #define GGML_F32Cx16_LOAD(x) _mm512_cvtph_ps(_mm256_loadu_si256((const __m256i *)(x)))
  1004. #define GGML_F32Cx16_STORE(x, y) _mm256_storeu_si256((__m256i *)(x), _mm512_cvtps_ph(y, 0))
  1005. #define GGML_F32Cx16_FMA(a, b, c) _mm512_fmadd_ps(b, c, a)
  1006. #define GGML_F32Cx16_ADD _mm512_add_ps
  1007. #define GGML_F32Cx16_MUL _mm512_mul_ps
  1008. #define GGML_F32Cx16_REDUCE(res, x) \
  1009. do { \
  1010. int offset = GGML_F32_ARR >> 1; \
  1011. for (int i = 0; i < offset; ++i) { \
  1012. x[i] = _mm512_add_ps(x[i], x[offset+i]); \
  1013. } \
  1014. offset >>= 1; \
  1015. for (int i = 0; i < offset; ++i) { \
  1016. x[i] = _mm512_add_ps(x[i], x[offset+i]); \
  1017. } \
  1018. offset >>= 1; \
  1019. for (int i = 0; i < offset; ++i) { \
  1020. x[i] = _mm512_add_ps(x[i], x[offset+i]); \
  1021. } \
  1022. res = _mm512_reduce_add_ps(x[0]); \
  1023. } while (0)
  1024. #define GGML_F16_VEC GGML_F32Cx16
  1025. #define GGML_F16_VEC_ZERO GGML_F32Cx16_ZERO
  1026. #define GGML_F16_VEC_SET1 GGML_F32Cx16_SET1
  1027. #define GGML_F16_VEC_LOAD(p, i) GGML_F32Cx16_LOAD(p)
  1028. #define GGML_F16_VEC_STORE(p, r, i) GGML_F32Cx16_STORE(p, r[i])
  1029. #define GGML_F16_VEC_FMA GGML_F32Cx16_FMA
  1030. #define GGML_F16_VEC_ADD GGML_F32Cx16_ADD
  1031. #define GGML_F16_VEC_MUL GGML_F32Cx16_MUL
  1032. #define GGML_F16_VEC_REDUCE GGML_F32Cx16_REDUCE
  1033. #elif defined(__AVX__)
  1034. #define GGML_SIMD
  1035. // F32 AVX
  1036. #define GGML_F32_STEP 32
  1037. #define GGML_F32_EPR 8
  1038. #define GGML_F32x8 __m256
  1039. #define GGML_F32x8_ZERO _mm256_setzero_ps()
  1040. #define GGML_F32x8_SET1(x) _mm256_set1_ps(x)
  1041. #define GGML_F32x8_LOAD _mm256_loadu_ps
  1042. #define GGML_F32x8_STORE _mm256_storeu_ps
  1043. #if defined(__FMA__)
  1044. #define GGML_F32x8_FMA(a, b, c) _mm256_fmadd_ps(b, c, a)
  1045. #else
  1046. #define GGML_F32x8_FMA(a, b, c) _mm256_add_ps(_mm256_mul_ps(b, c), a)
  1047. #endif
  1048. #define GGML_F32x8_ADD _mm256_add_ps
  1049. #define GGML_F32x8_MUL _mm256_mul_ps
  1050. #define GGML_F32x8_REDUCE(res, x) \
  1051. do { \
  1052. int offset = GGML_F32_ARR >> 1; \
  1053. for (int i = 0; i < offset; ++i) { \
  1054. x[i] = _mm256_add_ps(x[i], x[offset+i]); \
  1055. } \
  1056. offset >>= 1; \
  1057. for (int i = 0; i < offset; ++i) { \
  1058. x[i] = _mm256_add_ps(x[i], x[offset+i]); \
  1059. } \
  1060. offset >>= 1; \
  1061. for (int i = 0; i < offset; ++i) { \
  1062. x[i] = _mm256_add_ps(x[i], x[offset+i]); \
  1063. } \
  1064. const __m128 t0 = _mm_add_ps(_mm256_castps256_ps128(x[0]), \
  1065. _mm256_extractf128_ps(x[0], 1)); \
  1066. const __m128 t1 = _mm_hadd_ps(t0, t0); \
  1067. res = (ggml_float) _mm_cvtss_f32(_mm_hadd_ps(t1, t1)); \
  1068. } while (0)
  1069. // TODO: is this optimal ?
  1070. #define GGML_F32_VEC GGML_F32x8
  1071. #define GGML_F32_VEC_ZERO GGML_F32x8_ZERO
  1072. #define GGML_F32_VEC_SET1 GGML_F32x8_SET1
  1073. #define GGML_F32_VEC_LOAD GGML_F32x8_LOAD
  1074. #define GGML_F32_VEC_STORE GGML_F32x8_STORE
  1075. #define GGML_F32_VEC_FMA GGML_F32x8_FMA
  1076. #define GGML_F32_VEC_ADD GGML_F32x8_ADD
  1077. #define GGML_F32_VEC_MUL GGML_F32x8_MUL
  1078. #define GGML_F32_VEC_REDUCE GGML_F32x8_REDUCE
  1079. // F16 AVX
  1080. #define GGML_F16_STEP 32
  1081. #define GGML_F16_EPR 8
  1082. // F16 arithmetic is not supported by AVX, so we use F32 instead
  1083. #define GGML_F32Cx8 __m256
  1084. #define GGML_F32Cx8_ZERO _mm256_setzero_ps()
  1085. #define GGML_F32Cx8_SET1(x) _mm256_set1_ps(x)
  1086. #if defined(__F16C__)
  1087. // the _mm256_cvt intrinsics require F16C
  1088. #define GGML_F32Cx8_LOAD(x) _mm256_cvtph_ps(_mm_loadu_si128((const __m128i *)(x)))
  1089. #define GGML_F32Cx8_STORE(x, y) _mm_storeu_si128((__m128i *)(x), _mm256_cvtps_ph(y, 0))
  1090. #else
  1091. static inline __m256 __avx_f32cx8_load(ggml_fp16_t *x) {
  1092. float tmp[8];
  1093. for (int i = 0; i < 8; i++) {
  1094. tmp[i] = GGML_FP16_TO_FP32(x[i]);
  1095. }
  1096. return _mm256_loadu_ps(tmp);
  1097. }
  1098. static inline void __avx_f32cx8_store(ggml_fp16_t *x, __m256 y) {
  1099. float arr[8];
  1100. _mm256_storeu_ps(arr, y);
  1101. for (int i = 0; i < 8; i++)
  1102. x[i] = GGML_FP32_TO_FP16(arr[i]);
  1103. }
  1104. #define GGML_F32Cx8_LOAD(x) __avx_f32cx8_load(x)
  1105. #define GGML_F32Cx8_STORE(x, y) __avx_f32cx8_store(x, y)
  1106. #endif
  1107. #define GGML_F32Cx8_FMA GGML_F32x8_FMA
  1108. #define GGML_F32Cx8_ADD _mm256_add_ps
  1109. #define GGML_F32Cx8_MUL _mm256_mul_ps
  1110. #define GGML_F32Cx8_REDUCE GGML_F32x8_REDUCE
  1111. #define GGML_F16_VEC GGML_F32Cx8
  1112. #define GGML_F16_VEC_ZERO GGML_F32Cx8_ZERO
  1113. #define GGML_F16_VEC_SET1 GGML_F32Cx8_SET1
  1114. #define GGML_F16_VEC_LOAD(p, i) GGML_F32Cx8_LOAD(p)
  1115. #define GGML_F16_VEC_STORE(p, r, i) GGML_F32Cx8_STORE(p, r[i])
  1116. #define GGML_F16_VEC_FMA GGML_F32Cx8_FMA
  1117. #define GGML_F16_VEC_ADD GGML_F32Cx8_ADD
  1118. #define GGML_F16_VEC_MUL GGML_F32Cx8_MUL
  1119. #define GGML_F16_VEC_REDUCE GGML_F32Cx8_REDUCE
  1120. #elif defined(__POWER9_VECTOR__)
  1121. #define GGML_SIMD
  1122. // F32 POWER9
  1123. #define GGML_F32_STEP 32
  1124. #define GGML_F32_EPR 4
  1125. #define GGML_F32x4 vector float
  1126. #define GGML_F32x4_ZERO 0.0f
  1127. #define GGML_F32x4_SET1 vec_splats
  1128. #define GGML_F32x4_LOAD(p) vec_xl(0, p)
  1129. #define GGML_F32x4_STORE(p, r) vec_xst(r, 0, p)
  1130. #define GGML_F32x4_FMA(a, b, c) vec_madd(b, c, a)
  1131. #define GGML_F32x4_ADD vec_add
  1132. #define GGML_F32x4_MUL vec_mul
  1133. #define GGML_F32x4_REDUCE(res, x) \
  1134. { \
  1135. int offset = GGML_F32_ARR >> 1; \
  1136. for (int i = 0; i < offset; ++i) { \
  1137. x[i] = vec_add(x[i], x[offset+i]); \
  1138. } \
  1139. offset >>= 1; \
  1140. for (int i = 0; i < offset; ++i) { \
  1141. x[i] = vec_add(x[i], x[offset+i]); \
  1142. } \
  1143. offset >>= 1; \
  1144. for (int i = 0; i < offset; ++i) { \
  1145. x[i] = vec_add(x[i], x[offset+i]); \
  1146. } \
  1147. res = vec_extract(x[0], 0) + \
  1148. vec_extract(x[0], 1) + \
  1149. vec_extract(x[0], 2) + \
  1150. vec_extract(x[0], 3); \
  1151. }
  1152. #define GGML_F32_VEC GGML_F32x4
  1153. #define GGML_F32_VEC_ZERO GGML_F32x4_ZERO
  1154. #define GGML_F32_VEC_SET1 GGML_F32x4_SET1
  1155. #define GGML_F32_VEC_LOAD GGML_F32x4_LOAD
  1156. #define GGML_F32_VEC_STORE GGML_F32x4_STORE
  1157. #define GGML_F32_VEC_FMA GGML_F32x4_FMA
  1158. #define GGML_F32_VEC_ADD GGML_F32x4_ADD
  1159. #define GGML_F32_VEC_MUL GGML_F32x4_MUL
  1160. #define GGML_F32_VEC_REDUCE GGML_F32x4_REDUCE
  1161. // F16 POWER9
  1162. #define GGML_F16_STEP GGML_F32_STEP
  1163. #define GGML_F16_EPR GGML_F32_EPR
  1164. #define GGML_F16_VEC GGML_F32x4
  1165. #define GGML_F16_VEC_ZERO GGML_F32x4_ZERO
  1166. #define GGML_F16_VEC_SET1 GGML_F32x4_SET1
  1167. #define GGML_F16_VEC_FMA GGML_F32x4_FMA
  1168. #define GGML_F16_VEC_ADD GGML_F32x4_ADD
  1169. #define GGML_F16_VEC_MUL GGML_F32x4_MUL
  1170. #define GGML_F16_VEC_REDUCE GGML_F32x4_REDUCE
  1171. // Use vec_xl, not vec_ld, in case the load address is not aligned.
  1172. #define GGML_F16_VEC_LOAD(p, i) (i & 0x1) ? \
  1173. vec_extract_fp32_from_shorth(vec_xl(0, p - GGML_F16_EPR)) : \
  1174. vec_extract_fp32_from_shortl(vec_xl(0, p))
  1175. #define GGML_ENDIAN_BYTE(i) ((unsigned char *)&(uint16_t){1})[i]
  1176. #define GGML_F16_VEC_STORE(p, r, i) \
  1177. if (i & 0x1) \
  1178. vec_xst(vec_pack_to_short_fp32(r[i - GGML_ENDIAN_BYTE(1)], \
  1179. r[i - GGML_ENDIAN_BYTE(0)]), \
  1180. 0, p - GGML_F16_EPR)
  1181. #elif defined(__wasm_simd128__)
  1182. #define GGML_SIMD
  1183. // F32 WASM
  1184. #define GGML_F32_STEP 16
  1185. #define GGML_F32_EPR 4
  1186. #define GGML_F32x4 v128_t
  1187. #define GGML_F32x4_ZERO wasm_f32x4_splat(0.0f)
  1188. #define GGML_F32x4_SET1(x) wasm_f32x4_splat(x)
  1189. #define GGML_F32x4_LOAD wasm_v128_load
  1190. #define GGML_F32x4_STORE wasm_v128_store
  1191. #define GGML_F32x4_FMA(a, b, c) wasm_f32x4_add(wasm_f32x4_mul(b, c), a)
  1192. #define GGML_F32x4_ADD wasm_f32x4_add
  1193. #define GGML_F32x4_MUL wasm_f32x4_mul
  1194. #define GGML_F32x4_REDUCE(res, x) \
  1195. { \
  1196. int offset = GGML_F32_ARR >> 1; \
  1197. for (int i = 0; i < offset; ++i) { \
  1198. x[i] = wasm_f32x4_add(x[i], x[offset+i]); \
  1199. } \
  1200. offset >>= 1; \
  1201. for (int i = 0; i < offset; ++i) { \
  1202. x[i] = wasm_f32x4_add(x[i], x[offset+i]); \
  1203. } \
  1204. offset >>= 1; \
  1205. for (int i = 0; i < offset; ++i) { \
  1206. x[i] = wasm_f32x4_add(x[i], x[offset+i]); \
  1207. } \
  1208. res = wasm_f32x4_extract_lane(x[0], 0) + \
  1209. wasm_f32x4_extract_lane(x[0], 1) + \
  1210. wasm_f32x4_extract_lane(x[0], 2) + \
  1211. wasm_f32x4_extract_lane(x[0], 3); \
  1212. }
  1213. #define GGML_F32_VEC GGML_F32x4
  1214. #define GGML_F32_VEC_ZERO GGML_F32x4_ZERO
  1215. #define GGML_F32_VEC_SET1 GGML_F32x4_SET1
  1216. #define GGML_F32_VEC_LOAD GGML_F32x4_LOAD
  1217. #define GGML_F32_VEC_STORE GGML_F32x4_STORE
  1218. #define GGML_F32_VEC_FMA GGML_F32x4_FMA
  1219. #define GGML_F32_VEC_ADD GGML_F32x4_ADD
  1220. #define GGML_F32_VEC_MUL GGML_F32x4_MUL
  1221. #define GGML_F32_VEC_REDUCE GGML_F32x4_REDUCE
  1222. // F16 WASM
  1223. #define GGML_F16_STEP 16
  1224. #define GGML_F16_EPR 4
  1225. inline static v128_t __wasm_f16x4_load(const ggml_fp16_t * p) {
  1226. float tmp[4];
  1227. tmp[0] = GGML_FP16_TO_FP32(p[0]);
  1228. tmp[1] = GGML_FP16_TO_FP32(p[1]);
  1229. tmp[2] = GGML_FP16_TO_FP32(p[2]);
  1230. tmp[3] = GGML_FP16_TO_FP32(p[3]);
  1231. return wasm_v128_load(tmp);
  1232. }
  1233. inline static void __wasm_f16x4_store(ggml_fp16_t * p, v128_t x) {
  1234. float tmp[4];
  1235. wasm_v128_store(tmp, x);
  1236. p[0] = GGML_FP32_TO_FP16(tmp[0]);
  1237. p[1] = GGML_FP32_TO_FP16(tmp[1]);
  1238. p[2] = GGML_FP32_TO_FP16(tmp[2]);
  1239. p[3] = GGML_FP32_TO_FP16(tmp[3]);
  1240. }
  1241. #define GGML_F16x4 v128_t
  1242. #define GGML_F16x4_ZERO wasm_f32x4_splat(0.0f)
  1243. #define GGML_F16x4_SET1(x) wasm_f32x4_splat(x)
  1244. #define GGML_F16x4_LOAD(x) __wasm_f16x4_load(x)
  1245. #define GGML_F16x4_STORE(x, y) __wasm_f16x4_store(x, y)
  1246. #define GGML_F16x4_FMA GGML_F32x4_FMA
  1247. #define GGML_F16x4_ADD wasm_f32x4_add
  1248. #define GGML_F16x4_MUL wasm_f32x4_mul
  1249. #define GGML_F16x4_REDUCE(res, x) \
  1250. { \
  1251. int offset = GGML_F16_ARR >> 1; \
  1252. for (int i = 0; i < offset; ++i) { \
  1253. x[i] = wasm_f32x4_add(x[i], x[offset+i]); \
  1254. } \
  1255. offset >>= 1; \
  1256. for (int i = 0; i < offset; ++i) { \
  1257. x[i] = wasm_f32x4_add(x[i], x[offset+i]); \
  1258. } \
  1259. offset >>= 1; \
  1260. for (int i = 0; i < offset; ++i) { \
  1261. x[i] = wasm_f32x4_add(x[i], x[offset+i]); \
  1262. } \
  1263. res = wasm_f32x4_extract_lane(x[0], 0) + \
  1264. wasm_f32x4_extract_lane(x[0], 1) + \
  1265. wasm_f32x4_extract_lane(x[0], 2) + \
  1266. wasm_f32x4_extract_lane(x[0], 3); \
  1267. }
  1268. #define GGML_F16_VEC GGML_F16x4
  1269. #define GGML_F16_VEC_ZERO GGML_F16x4_ZERO
  1270. #define GGML_F16_VEC_SET1 GGML_F16x4_SET1
  1271. #define GGML_F16_VEC_LOAD(p, i) GGML_F16x4_LOAD(p)
  1272. #define GGML_F16_VEC_STORE(p, r, i) GGML_F16x4_STORE(p, r[i])
  1273. #define GGML_F16_VEC_FMA GGML_F16x4_FMA
  1274. #define GGML_F16_VEC_ADD GGML_F16x4_ADD
  1275. #define GGML_F16_VEC_MUL GGML_F16x4_MUL
  1276. #define GGML_F16_VEC_REDUCE GGML_F16x4_REDUCE
  1277. #elif defined(__SSE3__)
  1278. #define GGML_SIMD
  1279. // F32 SSE
  1280. #define GGML_F32_STEP 32
  1281. #define GGML_F32_EPR 4
  1282. #define GGML_F32x4 __m128
  1283. #define GGML_F32x4_ZERO _mm_setzero_ps()
  1284. #define GGML_F32x4_SET1(x) _mm_set1_ps(x)
  1285. #define GGML_F32x4_LOAD _mm_loadu_ps
  1286. #define GGML_F32x4_STORE _mm_storeu_ps
  1287. #if defined(__FMA__)
  1288. // TODO: Does this work?
  1289. #define GGML_F32x4_FMA(a, b, c) _mm_fmadd_ps(b, c, a)
  1290. #else
  1291. #define GGML_F32x4_FMA(a, b, c) _mm_add_ps(_mm_mul_ps(b, c), a)
  1292. #endif
  1293. #define GGML_F32x4_ADD _mm_add_ps
  1294. #define GGML_F32x4_MUL _mm_mul_ps
  1295. #define GGML_F32x4_REDUCE(res, x) \
  1296. { \
  1297. int offset = GGML_F32_ARR >> 1; \
  1298. for (int i = 0; i < offset; ++i) { \
  1299. x[i] = _mm_add_ps(x[i], x[offset+i]); \
  1300. } \
  1301. offset >>= 1; \
  1302. for (int i = 0; i < offset; ++i) { \
  1303. x[i] = _mm_add_ps(x[i], x[offset+i]); \
  1304. } \
  1305. offset >>= 1; \
  1306. for (int i = 0; i < offset; ++i) { \
  1307. x[i] = _mm_add_ps(x[i], x[offset+i]); \
  1308. } \
  1309. const __m128 t0 = _mm_hadd_ps(x[0], x[0]); \
  1310. res = (ggml_float) _mm_cvtss_f32(_mm_hadd_ps(t0, t0)); \
  1311. }
  1312. // TODO: is this optimal ?
  1313. #define GGML_F32_VEC GGML_F32x4
  1314. #define GGML_F32_VEC_ZERO GGML_F32x4_ZERO
  1315. #define GGML_F32_VEC_SET1 GGML_F32x4_SET1
  1316. #define GGML_F32_VEC_LOAD GGML_F32x4_LOAD
  1317. #define GGML_F32_VEC_STORE GGML_F32x4_STORE
  1318. #define GGML_F32_VEC_FMA GGML_F32x4_FMA
  1319. #define GGML_F32_VEC_ADD GGML_F32x4_ADD
  1320. #define GGML_F32_VEC_MUL GGML_F32x4_MUL
  1321. #define GGML_F32_VEC_REDUCE GGML_F32x4_REDUCE
  1322. // F16 SSE
  1323. #define GGML_F16_STEP 32
  1324. #define GGML_F16_EPR 4
  1325. static inline __m128 __sse_f16x4_load(ggml_fp16_t *x) {
  1326. float tmp[4];
  1327. tmp[0] = GGML_FP16_TO_FP32(x[0]);
  1328. tmp[1] = GGML_FP16_TO_FP32(x[1]);
  1329. tmp[2] = GGML_FP16_TO_FP32(x[2]);
  1330. tmp[3] = GGML_FP16_TO_FP32(x[3]);
  1331. return _mm_loadu_ps(tmp);
  1332. }
  1333. static inline void __sse_f16x4_store(ggml_fp16_t *x, __m128 y) {
  1334. float arr[4];
  1335. _mm_storeu_ps(arr, y);
  1336. x[0] = GGML_FP32_TO_FP16(arr[0]);
  1337. x[1] = GGML_FP32_TO_FP16(arr[1]);
  1338. x[2] = GGML_FP32_TO_FP16(arr[2]);
  1339. x[3] = GGML_FP32_TO_FP16(arr[3]);
  1340. }
  1341. #define GGML_F32Cx4 __m128
  1342. #define GGML_F32Cx4_ZERO _mm_setzero_ps()
  1343. #define GGML_F32Cx4_SET1(x) _mm_set1_ps(x)
  1344. #define GGML_F32Cx4_LOAD(x) __sse_f16x4_load(x)
  1345. #define GGML_F32Cx4_STORE(x, y) __sse_f16x4_store(x, y)
  1346. #define GGML_F32Cx4_FMA GGML_F32x4_FMA
  1347. #define GGML_F32Cx4_ADD _mm_add_ps
  1348. #define GGML_F32Cx4_MUL _mm_mul_ps
  1349. #define GGML_F32Cx4_REDUCE GGML_F32x4_REDUCE
  1350. #define GGML_F16_VEC GGML_F32Cx4
  1351. #define GGML_F16_VEC_ZERO GGML_F32Cx4_ZERO
  1352. #define GGML_F16_VEC_SET1 GGML_F32Cx4_SET1
  1353. #define GGML_F16_VEC_LOAD(p, i) GGML_F32Cx4_LOAD(p)
  1354. #define GGML_F16_VEC_STORE(p, r, i) GGML_F32Cx4_STORE(p, r[i])
  1355. #define GGML_F16_VEC_FMA GGML_F32Cx4_FMA
  1356. #define GGML_F16_VEC_ADD GGML_F32Cx4_ADD
  1357. #define GGML_F16_VEC_MUL GGML_F32Cx4_MUL
  1358. #define GGML_F16_VEC_REDUCE GGML_F32Cx4_REDUCE
  1359. #endif
  1360. // GGML_F32_ARR / GGML_F16_ARR
  1361. // number of registers to use per step
  1362. #ifdef GGML_SIMD
  1363. #define GGML_F32_ARR (GGML_F32_STEP/GGML_F32_EPR)
  1364. #define GGML_F16_ARR (GGML_F16_STEP/GGML_F16_EPR)
  1365. #endif
  1366. //
  1367. // fundamental operations
  1368. //
  1369. inline static void ggml_vec_set_i8(const int n, int8_t * x, const int8_t v) { for (int i = 0; i < n; ++i) x[i] = v; }
  1370. inline static void ggml_vec_set_i16(const int n, int16_t * x, const int16_t v) { for (int i = 0; i < n; ++i) x[i] = v; }
  1371. inline static void ggml_vec_set_i32(const int n, int32_t * x, const int32_t v) { for (int i = 0; i < n; ++i) x[i] = v; }
  1372. inline static void ggml_vec_set_f16(const int n, ggml_fp16_t * x, const int32_t v) { for (int i = 0; i < n; ++i) x[i] = v; }
  1373. inline static void ggml_vec_set_bf16(const int n, ggml_bf16_t * x, const ggml_bf16_t v) { for (int i = 0; i < n; ++i) x[i] = v; }
  1374. inline static void ggml_vec_add_f32 (const int n, float * z, const float * x, const float * y) { for (int i = 0; i < n; ++i) z[i] = x[i] + y[i]; }
  1375. inline static void ggml_vec_add1_f32(const int n, float * z, const float * x, const float v) { for (int i = 0; i < n; ++i) z[i] = x[i] + v; }
  1376. inline static void ggml_vec_acc_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] += x[i]; }
  1377. inline static void ggml_vec_acc1_f32(const int n, float * y, const float v) { for (int i = 0; i < n; ++i) y[i] += v; }
  1378. inline static void ggml_vec_sub_f32 (const int n, float * z, const float * x, const float * y) { for (int i = 0; i < n; ++i) z[i] = x[i] - y[i]; }
  1379. inline static void ggml_vec_set_f32 (const int n, float * x, const float v) { for (int i = 0; i < n; ++i) x[i] = v; }
  1380. inline static void ggml_vec_cpy_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = x[i]; }
  1381. inline static void ggml_vec_neg_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = -x[i]; }
  1382. inline static void ggml_vec_mul_f32 (const int n, float * z, const float * x, const float * y) { for (int i = 0; i < n; ++i) z[i] = x[i]*y[i]; }
  1383. inline static void ggml_vec_div_f32 (const int n, float * z, const float * x, const float * y) { for (int i = 0; i < n; ++i) z[i] = x[i]/y[i]; }
  1384. static void ggml_vec_dot_f32(int n, float * restrict s, size_t bs, const float * restrict x, size_t bx, const float * restrict y, size_t by, int nrc) {
  1385. assert(nrc == 1);
  1386. UNUSED(nrc);
  1387. UNUSED(bx);
  1388. UNUSED(by);
  1389. UNUSED(bs);
  1390. #if defined(GGML_SIMD)
  1391. float sumf = 0.0f;
  1392. const int np = (n & ~(GGML_F32_STEP - 1));
  1393. GGML_F32_VEC sum[GGML_F32_ARR] = { GGML_F32_VEC_ZERO };
  1394. GGML_F32_VEC ax[GGML_F32_ARR];
  1395. GGML_F32_VEC ay[GGML_F32_ARR];
  1396. for (int i = 0; i < np; i += GGML_F32_STEP) {
  1397. for (int j = 0; j < GGML_F32_ARR; j++) {
  1398. ax[j] = GGML_F32_VEC_LOAD(x + i + j*GGML_F32_EPR);
  1399. ay[j] = GGML_F32_VEC_LOAD(y + i + j*GGML_F32_EPR);
  1400. sum[j] = GGML_F32_VEC_FMA(sum[j], ax[j], ay[j]);
  1401. }
  1402. }
  1403. // reduce sum0..sum3 to sum0
  1404. GGML_F32_VEC_REDUCE(sumf, sum);
  1405. // leftovers
  1406. for (int i = np; i < n; ++i) {
  1407. sumf += x[i]*y[i];
  1408. }
  1409. #else
  1410. // scalar
  1411. ggml_float sumf = 0.0;
  1412. for (int i = 0; i < n; ++i) {
  1413. sumf += (ggml_float)(x[i]*y[i]);
  1414. }
  1415. #endif
  1416. *s = sumf;
  1417. }
  1418. static void ggml_vec_dot_bf16(int n, float * restrict s, size_t bs, ggml_bf16_t * restrict x, size_t bx, ggml_bf16_t * restrict y, size_t by, int nrc) {
  1419. assert(nrc == 1);
  1420. UNUSED(nrc);
  1421. UNUSED(bx);
  1422. UNUSED(by);
  1423. UNUSED(bs);
  1424. int i = 0;
  1425. ggml_float sumf = 0;
  1426. #if defined(__AVX512BF16__)
  1427. __m512 c1 = _mm512_setzero_ps();
  1428. __m512 c2 = _mm512_setzero_ps();
  1429. for (; i + 64 <= n; i += 64) {
  1430. c1 = _mm512_dpbf16_ps(c1, (__m512bh)_mm512_loadu_ps((const float *)(x + i)),
  1431. (__m512bh)_mm512_loadu_ps((const float *)(y + i)));
  1432. c2 = _mm512_dpbf16_ps(c2, (__m512bh)_mm512_loadu_ps((const float *)(x + i + 32)),
  1433. (__m512bh)_mm512_loadu_ps((const float *)(y + i + 32)));
  1434. }
  1435. sumf += (ggml_float)_mm512_reduce_add_ps(c1);
  1436. sumf += (ggml_float)_mm512_reduce_add_ps(c2);
  1437. #elif defined(__AVX512F__)
  1438. #define LOAD(p) _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_cvtepu16_epi32(_mm256_loadu_si256((const __m256i *)(p))), 16))
  1439. __m512 c1 = _mm512_setzero_ps();
  1440. __m512 c2 = _mm512_setzero_ps();
  1441. for (; i + 32 <= n; i += 32) {
  1442. c1 = _mm512_add_ps(_mm512_mul_ps(LOAD(x + i), LOAD(y + i)), c1);
  1443. c2 = _mm512_add_ps(_mm512_mul_ps(LOAD(x + i + 16), LOAD(y + i + 16)), c2);
  1444. }
  1445. sumf += (ggml_float)_mm512_reduce_add_ps(c1);
  1446. sumf += (ggml_float)_mm512_reduce_add_ps(c2);
  1447. #undef LOAD
  1448. #elif defined(__AVX2__)
  1449. #define LOAD(p) _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_cvtepu16_epi32(_mm_loadu_si128((const __m128i *)(p))), 16))
  1450. __m256 c1 = _mm256_setzero_ps();
  1451. __m256 c2 = _mm256_setzero_ps();
  1452. __m256 c3 = _mm256_setzero_ps();
  1453. __m256 c4 = _mm256_setzero_ps();
  1454. for (; i + 32 <= n; i += 32) {
  1455. c1 = _mm256_add_ps(_mm256_mul_ps(LOAD(x + i), LOAD(y + i)), c1);
  1456. c2 = _mm256_add_ps(_mm256_mul_ps(LOAD(x + i + 8), LOAD(y + i + 8)), c2);
  1457. c3 = _mm256_add_ps(_mm256_mul_ps(LOAD(x + i + 16), LOAD(y + i + 16)), c3);
  1458. c4 = _mm256_add_ps(_mm256_mul_ps(LOAD(x + i + 24), LOAD(y + i + 24)), c4);
  1459. }
  1460. __m128 g;
  1461. c1 = _mm256_add_ps(_mm256_add_ps(c1, c3),
  1462. _mm256_add_ps(c2, c4));
  1463. g = _mm_add_ps(_mm256_extractf128_ps(c1, 1),
  1464. _mm256_castps256_ps128(c1));
  1465. g = _mm_add_ps(g, _mm_movehl_ps(g, g));
  1466. g = _mm_add_ss(g, _mm_movehdup_ps(g));
  1467. sumf += (ggml_float)_mm_cvtss_f32(g);
  1468. #undef LOAD
  1469. #endif
  1470. for (; i < n; ++i) {
  1471. sumf += (ggml_float)(GGML_BF16_TO_FP32(x[i]) *
  1472. GGML_BF16_TO_FP32(y[i]));
  1473. }
  1474. *s = sumf;
  1475. }
  1476. static void ggml_vec_dot_f16(int n, float * restrict s, size_t bs, ggml_fp16_t * restrict x, size_t bx, ggml_fp16_t * restrict y, size_t by, int nrc) {
  1477. assert(nrc == 1);
  1478. UNUSED(nrc);
  1479. UNUSED(bx);
  1480. UNUSED(by);
  1481. UNUSED(bs);
  1482. ggml_float sumf = 0.0;
  1483. #if defined(GGML_SIMD)
  1484. const int np = (n & ~(GGML_F16_STEP - 1));
  1485. GGML_F16_VEC sum[GGML_F16_ARR] = { GGML_F16_VEC_ZERO };
  1486. GGML_F16_VEC ax[GGML_F16_ARR];
  1487. GGML_F16_VEC ay[GGML_F16_ARR];
  1488. for (int i = 0; i < np; i += GGML_F16_STEP) {
  1489. for (int j = 0; j < GGML_F16_ARR; j++) {
  1490. ax[j] = GGML_F16_VEC_LOAD(x + i + j*GGML_F16_EPR, j);
  1491. ay[j] = GGML_F16_VEC_LOAD(y + i + j*GGML_F16_EPR, j);
  1492. sum[j] = GGML_F16_VEC_FMA(sum[j], ax[j], ay[j]);
  1493. }
  1494. }
  1495. // reduce sum0..sum3 to sum0
  1496. GGML_F16_VEC_REDUCE(sumf, sum);
  1497. // leftovers
  1498. for (int i = np; i < n; ++i) {
  1499. sumf += (ggml_float)(GGML_FP16_TO_FP32(x[i])*GGML_FP16_TO_FP32(y[i]));
  1500. }
  1501. #else
  1502. for (int i = 0; i < n; ++i) {
  1503. sumf += (ggml_float)(GGML_FP16_TO_FP32(x[i])*GGML_FP16_TO_FP32(y[i]));
  1504. }
  1505. #endif
  1506. *s = sumf;
  1507. }
  1508. // compute GGML_VEC_DOT_UNROLL dot products at once
  1509. // xs - x row stride in bytes
  1510. inline static void ggml_vec_dot_f16_unroll(const int n, const int xs, float * restrict s, void * restrict xv, ggml_fp16_t * restrict y) {
  1511. ggml_float sumf[GGML_VEC_DOT_UNROLL] = { 0.0 };
  1512. ggml_fp16_t * restrict x[GGML_VEC_DOT_UNROLL];
  1513. for (int i = 0; i < GGML_VEC_DOT_UNROLL; ++i) {
  1514. x[i] = (ggml_fp16_t *) ((char *) xv + i*xs);
  1515. }
  1516. #if defined(GGML_SIMD)
  1517. const int np = (n & ~(GGML_F16_STEP - 1));
  1518. GGML_F16_VEC sum[GGML_VEC_DOT_UNROLL][GGML_F16_ARR] = { { GGML_F16_VEC_ZERO } };
  1519. GGML_F16_VEC ax[GGML_F16_ARR];
  1520. GGML_F16_VEC ay[GGML_F16_ARR];
  1521. for (int i = 0; i < np; i += GGML_F16_STEP) {
  1522. for (int j = 0; j < GGML_F16_ARR; j++) {
  1523. ay[j] = GGML_F16_VEC_LOAD(y + i + j*GGML_F16_EPR, j);
  1524. for (int k = 0; k < GGML_VEC_DOT_UNROLL; ++k) {
  1525. ax[j] = GGML_F16_VEC_LOAD(x[k] + i + j*GGML_F16_EPR, j);
  1526. sum[k][j] = GGML_F16_VEC_FMA(sum[k][j], ax[j], ay[j]);
  1527. }
  1528. }
  1529. }
  1530. // reduce sum0..sum3 to sum0
  1531. for (int k = 0; k < GGML_VEC_DOT_UNROLL; ++k) {
  1532. GGML_F16_VEC_REDUCE(sumf[k], sum[k]);
  1533. }
  1534. // leftovers
  1535. for (int i = np; i < n; ++i) {
  1536. for (int j = 0; j < GGML_VEC_DOT_UNROLL; ++j) {
  1537. sumf[j] += (ggml_float)(GGML_FP16_TO_FP32(x[j][i])*GGML_FP16_TO_FP32(y[i]));
  1538. }
  1539. }
  1540. #else
  1541. for (int i = 0; i < n; ++i) {
  1542. for (int j = 0; j < GGML_VEC_DOT_UNROLL; ++j) {
  1543. sumf[j] += (ggml_float)(GGML_FP16_TO_FP32(x[j][i])*GGML_FP16_TO_FP32(y[i]));
  1544. }
  1545. }
  1546. #endif
  1547. for (int i = 0; i < GGML_VEC_DOT_UNROLL; ++i) {
  1548. s[i] = sumf[i];
  1549. }
  1550. }
  1551. inline static void ggml_vec_mad_f32(const int n, float * restrict y, const float * restrict x, const float v) {
  1552. #if defined(GGML_SIMD)
  1553. const int np = (n & ~(GGML_F32_STEP - 1));
  1554. GGML_F32_VEC vx = GGML_F32_VEC_SET1(v);
  1555. GGML_F32_VEC ax[GGML_F32_ARR];
  1556. GGML_F32_VEC ay[GGML_F32_ARR];
  1557. for (int i = 0; i < np; i += GGML_F32_STEP) {
  1558. for (int j = 0; j < GGML_F32_ARR; j++) {
  1559. ax[j] = GGML_F32_VEC_LOAD(x + i + j*GGML_F32_EPR);
  1560. ay[j] = GGML_F32_VEC_LOAD(y + i + j*GGML_F32_EPR);
  1561. ay[j] = GGML_F32_VEC_FMA(ay[j], ax[j], vx);
  1562. GGML_F32_VEC_STORE(y + i + j*GGML_F32_EPR, ay[j]);
  1563. }
  1564. }
  1565. // leftovers
  1566. for (int i = np; i < n; ++i) {
  1567. y[i] += x[i]*v;
  1568. }
  1569. #else
  1570. // scalar
  1571. for (int i = 0; i < n; ++i) {
  1572. y[i] += x[i]*v;
  1573. }
  1574. #endif
  1575. }
  1576. inline static void ggml_vec_mad_f16(const int n, ggml_fp16_t * restrict y, const ggml_fp16_t * restrict x, const float v) {
  1577. #if defined(GGML_SIMD)
  1578. const int np = (n & ~(GGML_F16_STEP - 1));
  1579. GGML_F16_VEC vx = GGML_F16_VEC_SET1(v);
  1580. GGML_F16_VEC ax[GGML_F16_ARR];
  1581. GGML_F16_VEC ay[GGML_F16_ARR];
  1582. for (int i = 0; i < np; i += GGML_F16_STEP) {
  1583. for (int j = 0; j < GGML_F16_ARR; j++) {
  1584. ax[j] = GGML_F16_VEC_LOAD(x + i + j*GGML_F16_EPR, j);
  1585. ay[j] = GGML_F16_VEC_LOAD(y + i + j*GGML_F16_EPR, j);
  1586. ay[j] = GGML_F16_VEC_FMA(ay[j], ax[j], vx);
  1587. GGML_F16_VEC_STORE(y + i + j*GGML_F16_EPR, ay, j);
  1588. }
  1589. }
  1590. // leftovers
  1591. for (int i = np; i < n; ++i) {
  1592. y[i] = GGML_FP32_TO_FP16(GGML_FP16_TO_FP32(y[i]) + GGML_FP16_TO_FP32(x[i])*v);
  1593. }
  1594. #else
  1595. // scalar
  1596. for (int i = 0; i < n; ++i) {
  1597. y[i] = GGML_FP32_TO_FP16(GGML_FP16_TO_FP32(y[i]) + GGML_FP16_TO_FP32(x[i])*v);
  1598. }
  1599. #endif
  1600. }
  1601. // xs and vs are byte strides of x and v
  1602. inline static void ggml_vec_mad_f32_unroll(const int n, const int xs, const int vs, float * restrict y, const float * restrict xv, const float * restrict vv) {
  1603. const float * restrict x[GGML_VEC_MAD_UNROLL];
  1604. const float * restrict v[GGML_VEC_MAD_UNROLL];
  1605. for (int i = 0; i < GGML_VEC_MAD_UNROLL; ++i) {
  1606. x[i] = (const float *) ((const char *) xv + i*xs);
  1607. v[i] = (const float *) ((const char *) vv + i*vs);
  1608. }
  1609. #if defined(GGML_SIMD)
  1610. const int np = (n & ~(GGML_F32_STEP - 1));
  1611. GGML_F32_VEC vx[GGML_VEC_MAD_UNROLL];
  1612. for (int k = 0; k < GGML_VEC_MAD_UNROLL; ++k) {
  1613. vx[k] = GGML_F32_VEC_SET1(v[k][0]);
  1614. }
  1615. GGML_F32_VEC ax[GGML_VEC_MAD_UNROLL][GGML_F32_ARR];
  1616. GGML_F32_VEC ay[GGML_F32_ARR];
  1617. for (int i = 0; i < np; i += GGML_F32_STEP) {
  1618. for (int j = 0; j < GGML_F32_ARR; j++) {
  1619. ay[j] = GGML_F32_VEC_LOAD(y + i + j*GGML_F32_EPR);
  1620. for (int k = 0; k < GGML_VEC_MAD_UNROLL; ++k) {
  1621. ax[k][j] = GGML_F32_VEC_LOAD(x[k] + i + j*GGML_F32_EPR);
  1622. ay[j] = GGML_F32_VEC_FMA(ay[j], ax[k][j], vx[k]);
  1623. }
  1624. GGML_F32_VEC_STORE(y + i + j*GGML_F32_EPR, ay[j]);
  1625. }
  1626. }
  1627. // leftovers
  1628. for (int k = 0; k < GGML_VEC_MAD_UNROLL; ++k) {
  1629. for (int i = np; i < n; ++i) {
  1630. y[i] += x[k][i]*v[k][0];
  1631. }
  1632. }
  1633. #else
  1634. // scalar
  1635. for (int k = 0; k < GGML_VEC_MAD_UNROLL; ++k) {
  1636. for (int i = 0; i < n; ++i) {
  1637. y[i] += x[k][i]*v[k][0];
  1638. }
  1639. }
  1640. #endif
  1641. }
  1642. //inline static void ggml_vec_scale_f32(const int n, float * y, const float v) { for (int i = 0; i < n; ++i) y[i] *= v; }
  1643. inline static void ggml_vec_scale_f32(const int n, float * y, const float v) {
  1644. #if defined(GGML_USE_ACCELERATE)
  1645. vDSP_vsmul(y, 1, &v, y, 1, n);
  1646. #elif defined(GGML_SIMD)
  1647. const int np = (n & ~(GGML_F32_STEP - 1));
  1648. GGML_F32_VEC vx = GGML_F32_VEC_SET1(v);
  1649. GGML_F32_VEC ay[GGML_F32_ARR];
  1650. for (int i = 0; i < np; i += GGML_F32_STEP) {
  1651. for (int j = 0; j < GGML_F32_ARR; j++) {
  1652. ay[j] = GGML_F32_VEC_LOAD(y + i + j*GGML_F32_EPR);
  1653. ay[j] = GGML_F32_VEC_MUL(ay[j], vx);
  1654. GGML_F32_VEC_STORE(y + i + j*GGML_F32_EPR, ay[j]);
  1655. }
  1656. }
  1657. // leftovers
  1658. for (int i = np; i < n; ++i) {
  1659. y[i] *= v;
  1660. }
  1661. #else
  1662. // scalar
  1663. for (int i = 0; i < n; ++i) {
  1664. y[i] *= v;
  1665. }
  1666. #endif
  1667. }
  1668. inline static void ggml_vec_scale_f16(const int n, ggml_fp16_t * y, const float v) {
  1669. #if defined(GGML_SIMD)
  1670. const int np = (n & ~(GGML_F16_STEP - 1));
  1671. GGML_F16_VEC vx = GGML_F16_VEC_SET1(v);
  1672. GGML_F16_VEC ay[GGML_F16_ARR];
  1673. for (int i = 0; i < np; i += GGML_F16_STEP) {
  1674. for (int j = 0; j < GGML_F16_ARR; j++) {
  1675. ay[j] = GGML_F16_VEC_LOAD(y + i + j*GGML_F16_EPR, j);
  1676. ay[j] = GGML_F16_VEC_MUL(ay[j], vx);
  1677. GGML_F16_VEC_STORE(y + i + j*GGML_F16_EPR, ay, j);
  1678. }
  1679. }
  1680. // leftovers
  1681. for (int i = np; i < n; ++i) {
  1682. y[i] = GGML_FP32_TO_FP16(GGML_FP16_TO_FP32(y[i])*v);
  1683. }
  1684. #else
  1685. // scalar
  1686. for (int i = 0; i < n; ++i) {
  1687. y[i] = GGML_FP32_TO_FP16(GGML_FP16_TO_FP32(y[i])*v);
  1688. }
  1689. #endif
  1690. }
  1691. inline static void ggml_vec_norm_f32 (const int n, float * s, const float * x) { ggml_vec_dot_f32(n, s, 0, x, 0, x, 0, 1); *s = sqrtf(*s); }
  1692. inline static void ggml_vec_sqr_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = x[i]*x[i]; }
  1693. inline static void ggml_vec_sqrt_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = sqrtf(x[i]); }
  1694. inline static void ggml_vec_log_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = logf(x[i]); }
  1695. inline static void ggml_vec_abs_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = fabsf(x[i]); }
  1696. inline static void ggml_vec_sgn_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = (x[i] > 0.f) ? 1.f : ((x[i] < 0.f) ? -1.f : 0.f); }
  1697. inline static void ggml_vec_step_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = (x[i] > 0.f) ? 1.f : 0.f; }
  1698. inline static void ggml_vec_tanh_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = tanhf(x[i]); }
  1699. inline static void ggml_vec_elu_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = (x[i] > 0.f) ? x[i] : expf(x[i])-1; }
  1700. inline static void ggml_vec_relu_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = (x[i] > 0.f) ? x[i] : 0.f; }
  1701. inline static void ggml_vec_leaky_relu_f32 (const int n, float * y, const float * x, const float ns) { for (int i = 0; i < n; ++i) y[i] = ((x[i] > 0.f) ? x[i] : 0.f) + ns * ((x[i] < 0.0f) ? x[i] : 0.f); }
  1702. inline static void ggml_vec_sigmoid_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = 1.f / (1.f + expf(-x[i])); }
  1703. // TODO: optimize performance
  1704. inline static void ggml_vec_hardswish_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = x[i] * fminf(1.0f, fmaxf(0.0f, (x[i] + 3.0f) / 6.0f)); }
  1705. inline static void ggml_vec_hardsigmoid_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = fminf(1.0f, fmaxf(0.0f, (x[i] + 3.0f) / 6.0f)); }
  1706. static const float GELU_COEF_A = 0.044715f;
  1707. static const float GELU_QUICK_COEF = -1.702f;
  1708. static const float SQRT_2_OVER_PI = 0.79788456080286535587989211986876f;
  1709. inline static float ggml_gelu_f32(float x) {
  1710. return 0.5f*x*(1.0f + tanhf(SQRT_2_OVER_PI*x*(1.0f + GELU_COEF_A*x*x)));
  1711. }
  1712. inline static void ggml_vec_gelu_f16(const int n, ggml_fp16_t * y, const ggml_fp16_t * x) {
  1713. const uint16_t * i16 = (const uint16_t *) x;
  1714. for (int i = 0; i < n; ++i) {
  1715. y[i] = ggml_table_gelu_f16[i16[i]];
  1716. }
  1717. }
  1718. #ifdef GGML_GELU_FP16
  1719. inline static void ggml_vec_gelu_f32(const int n, float * y, const float * x) {
  1720. uint16_t t;
  1721. for (int i = 0; i < n; ++i) {
  1722. if (x[i] <= -10.0f) {
  1723. y[i] = 0.0f;
  1724. } else if (x[i] >= 10.0f) {
  1725. y[i] = x[i];
  1726. } else {
  1727. ggml_fp16_t fp16 = GGML_FP32_TO_FP16(x[i]);
  1728. memcpy(&t, &fp16, sizeof(uint16_t));
  1729. y[i] = GGML_FP16_TO_FP32(ggml_table_gelu_f16[t]);
  1730. }
  1731. }
  1732. }
  1733. #else
  1734. inline static void ggml_vec_gelu_f32(const int n, float * y, const float * x) {
  1735. for (int i = 0; i < n; ++i) {
  1736. y[i] = ggml_gelu_f32(x[i]);
  1737. }
  1738. }
  1739. #endif
  1740. inline static float ggml_gelu_quick_f32(float x) {
  1741. return x*(1.0f/(1.0f+expf(GELU_QUICK_COEF*x)));
  1742. }
  1743. //inline static void ggml_vec_gelu_quick_f16(const int n, ggml_fp16_t * y, const ggml_fp16_t * x) {
  1744. // const uint16_t * i16 = (const uint16_t *) x;
  1745. // for (int i = 0; i < n; ++i) {
  1746. // y[i] = ggml_table_gelu_quick_f16[i16[i]];
  1747. // }
  1748. //}
  1749. #ifdef GGML_GELU_QUICK_FP16
  1750. inline static void ggml_vec_gelu_quick_f32(const int n, float * y, const float * x) {
  1751. uint16_t t;
  1752. for (int i = 0; i < n; ++i) {
  1753. ggml_fp16_t fp16 = GGML_FP32_TO_FP16(x[i]);
  1754. memcpy(&t, &fp16, sizeof(uint16_t));
  1755. y[i] = GGML_FP16_TO_FP32(ggml_table_gelu_quick_f16[t]);
  1756. }
  1757. }
  1758. #else
  1759. inline static void ggml_vec_gelu_quick_f32(const int n, float * y, const float * x) {
  1760. for (int i = 0; i < n; ++i) {
  1761. y[i] = ggml_gelu_quick_f32(x[i]);
  1762. }
  1763. }
  1764. #endif
  1765. // Sigmoid Linear Unit (SiLU) function
  1766. inline static float ggml_silu_f32(float x) {
  1767. return x/(1.0f + expf(-x));
  1768. }
  1769. //inline static void ggml_vec_silu_f16(const int n, ggml_fp16_t * y, const ggml_fp16_t * x) {
  1770. // const uint16_t * i16 = (const uint16_t *) x;
  1771. // for (int i = 0; i < n; ++i) {
  1772. // y[i] = ggml_table_silu_f16[i16[i]];
  1773. // }
  1774. //}
  1775. #ifdef GGML_SILU_FP16
  1776. inline static void ggml_vec_silu_f32(const int n, float * y, const float * x) {
  1777. uint16_t t;
  1778. for (int i = 0; i < n; ++i) {
  1779. ggml_fp16_t fp16 = GGML_FP32_TO_FP16(x[i]);
  1780. memcpy(&t, &fp16, sizeof(uint16_t));
  1781. y[i] = GGML_FP16_TO_FP32(ggml_table_silu_f16[t]);
  1782. }
  1783. }
  1784. #else
  1785. inline static void ggml_vec_silu_f32(const int n, float * y, const float * x) {
  1786. for (int i = 0; i < n; ++i) {
  1787. y[i] = ggml_silu_f32(x[i]);
  1788. }
  1789. }
  1790. #endif
  1791. inline static float ggml_silu_backward_f32(float x, float dy) {
  1792. const float s = 1.0f/(1.0f + expf(-x));
  1793. return dy*s*(1.0f + x*(1.0f - s));
  1794. }
  1795. #ifdef GGML_SILU_FP16
  1796. inline static void ggml_vec_silu_backward_f32(const int n, float * dx, const float * x, const float * dy) {
  1797. for (int i = 0; i < n; ++i) {
  1798. // we did not use x[i] to compute forward silu but its f16 equivalent
  1799. // take derivative at f16 of x[i]:
  1800. ggml_fp16_t fp16 = GGML_FP32_TO_FP16(x[i]);
  1801. float usedx = GGML_FP16_TO_FP32(fp16);
  1802. dx[i] = ggml_silu_backward_f32(usedx, dy[i]);
  1803. }
  1804. }
  1805. #else
  1806. inline static void ggml_vec_silu_backward_f32(const int n, float * dx, const float * x, const float * dy) {
  1807. for (int i = 0; i < n; ++i) {
  1808. dx[i] = ggml_silu_backward_f32(x[i], dy[i]);
  1809. }
  1810. }
  1811. #endif
  1812. inline static void ggml_vec_sum_f32(const int n, float * s, const float * x) {
  1813. #ifndef GGML_USE_ACCELERATE
  1814. ggml_float sum = 0.0;
  1815. for (int i = 0; i < n; ++i) {
  1816. sum += (ggml_float)x[i];
  1817. }
  1818. *s = sum;
  1819. #else
  1820. vDSP_sve(x, 1, s, n);
  1821. #endif
  1822. }
  1823. inline static void ggml_vec_sum_f32_ggf(const int n, ggml_float * s, const float * x) {
  1824. ggml_float sum = 0.0;
  1825. for (int i = 0; i < n; ++i) {
  1826. sum += (ggml_float)x[i];
  1827. }
  1828. *s = sum;
  1829. }
  1830. inline static void ggml_vec_sum_f16_ggf(const int n, float * s, const ggml_fp16_t * x) {
  1831. float sum = 0.0f;
  1832. for (int i = 0; i < n; ++i) {
  1833. sum += GGML_FP16_TO_FP32(x[i]);
  1834. }
  1835. *s = sum;
  1836. }
  1837. inline static void ggml_vec_sum_bf16_ggf(const int n, float * s, const ggml_bf16_t * x) {
  1838. float sum = 0.0f;
  1839. for (int i = 0; i < n; ++i) {
  1840. sum += GGML_BF16_TO_FP32(x[i]);
  1841. }
  1842. *s = sum;
  1843. }
  1844. inline static void ggml_vec_max_f32(const int n, float * s, const float * x) {
  1845. #ifndef GGML_USE_ACCELERATE
  1846. float max = -INFINITY;
  1847. for (int i = 0; i < n; ++i) {
  1848. max = MAX(max, x[i]);
  1849. }
  1850. *s = max;
  1851. #else
  1852. vDSP_maxv(x, 1, s, n);
  1853. #endif
  1854. }
  1855. inline static void ggml_vec_norm_inv_f32(const int n, float * s, const float * x) {
  1856. ggml_vec_norm_f32(n, s, x);
  1857. *s = 1.f/(*s);
  1858. }
  1859. inline static void ggml_vec_argmax_f32(const int n, int * s, const float * x) {
  1860. float max = -INFINITY;
  1861. int idx = 0;
  1862. for (int i = 0; i < n; ++i) {
  1863. max = MAX(max, x[i]);
  1864. if (max == x[i]) { idx = i; }
  1865. }
  1866. *s = idx;
  1867. }
  1868. //
  1869. // data types
  1870. //
  1871. static const char * GGML_OP_NAME[GGML_OP_COUNT] = {
  1872. "NONE",
  1873. "DUP",
  1874. "ADD",
  1875. "ADD1",
  1876. "ACC",
  1877. "SUB",
  1878. "MUL",
  1879. "DIV",
  1880. "SQR",
  1881. "SQRT",
  1882. "LOG",
  1883. "SUM",
  1884. "SUM_ROWS",
  1885. "MEAN",
  1886. "ARGMAX",
  1887. "REPEAT",
  1888. "REPEAT_BACK",
  1889. "CONCAT",
  1890. "SILU_BACK",
  1891. "NORM",
  1892. "RMS_NORM",
  1893. "RMS_NORM_BACK",
  1894. "GROUP_NORM",
  1895. "MUL_MAT",
  1896. "MUL_MAT_ID",
  1897. "OUT_PROD",
  1898. "SCALE",
  1899. "SET",
  1900. "CPY",
  1901. "CONT",
  1902. "RESHAPE",
  1903. "VIEW",
  1904. "PERMUTE",
  1905. "TRANSPOSE",
  1906. "GET_ROWS",
  1907. "GET_ROWS_BACK",
  1908. "DIAG",
  1909. "DIAG_MASK_INF",
  1910. "DIAG_MASK_ZERO",
  1911. "SOFT_MAX",
  1912. "SOFT_MAX_BACK",
  1913. "ROPE",
  1914. "ROPE_BACK",
  1915. "CLAMP",
  1916. "CONV_TRANSPOSE_1D",
  1917. "IM2COL",
  1918. "CONV_TRANSPOSE_2D",
  1919. "POOL_1D",
  1920. "POOL_2D",
  1921. "UPSCALE",
  1922. "PAD",
  1923. "ARANGE",
  1924. "TIMESTEP_EMBEDDING",
  1925. "ARGSORT",
  1926. "LEAKY_RELU",
  1927. "FLASH_ATTN",
  1928. "FLASH_ATTN_EXT",
  1929. "FLASH_FF",
  1930. "FLASH_ATTN_BACK",
  1931. "SSM_CONV",
  1932. "SSM_SCAN",
  1933. "WIN_PART",
  1934. "WIN_UNPART",
  1935. "GET_REL_POS",
  1936. "ADD_REL_POS",
  1937. "UNARY",
  1938. "MAP_UNARY",
  1939. "MAP_BINARY",
  1940. "MAP_CUSTOM1_F32",
  1941. "MAP_CUSTOM2_F32",
  1942. "MAP_CUSTOM3_F32",
  1943. "MAP_CUSTOM1",
  1944. "MAP_CUSTOM2",
  1945. "MAP_CUSTOM3",
  1946. "CROSS_ENTROPY_LOSS",
  1947. "CROSS_ENTROPY_LOSS_BACK",
  1948. };
  1949. static_assert(GGML_OP_COUNT == 76, "GGML_OP_COUNT != 76");
  1950. static const char * GGML_OP_SYMBOL[GGML_OP_COUNT] = {
  1951. "none",
  1952. "x",
  1953. "x+y",
  1954. "x+y",
  1955. "view(x,nb,offset)+=y->x",
  1956. "x-y",
  1957. "x*y",
  1958. "x/y",
  1959. "x^2",
  1960. "√x",
  1961. "log(x)",
  1962. "Σx",
  1963. "Σx_k",
  1964. "Σx/n",
  1965. "argmax(x)",
  1966. "repeat(x)",
  1967. "repeat_back(x)",
  1968. "concat(x, y)",
  1969. "silu_back(x)",
  1970. "norm(x)",
  1971. "rms_norm(x)",
  1972. "rms_norm_back(x)",
  1973. "group_norm(x)",
  1974. "X*Y",
  1975. "X[i]*Y",
  1976. "X*Y",
  1977. "x*v",
  1978. "y-\\>view(x)",
  1979. "x-\\>y",
  1980. "cont(x)",
  1981. "reshape(x)",
  1982. "view(x)",
  1983. "permute(x)",
  1984. "transpose(x)",
  1985. "get_rows(x)",
  1986. "get_rows_back(x)",
  1987. "diag(x)",
  1988. "diag_mask_inf(x)",
  1989. "diag_mask_zero(x)",
  1990. "soft_max(x)",
  1991. "soft_max_back(x)",
  1992. "rope(x)",
  1993. "rope_back(x)",
  1994. "clamp(x)",
  1995. "conv_transpose_1d(x)",
  1996. "im2col(x)",
  1997. "conv_transpose_2d(x)",
  1998. "pool_1d(x)",
  1999. "pool_2d(x)",
  2000. "upscale(x)",
  2001. "pad(x)",
  2002. "arange(start, stop, step)",
  2003. "timestep_embedding(timesteps, dim, max_period)",
  2004. "argsort(x)",
  2005. "leaky_relu(x)",
  2006. "flash_attn(x)",
  2007. "flash_attn_ext(x)",
  2008. "flash_ff(x)",
  2009. "flash_attn_back(x)",
  2010. "ssm_conv(x)",
  2011. "ssm_scan(x)",
  2012. "win_part(x)",
  2013. "win_unpart(x)",
  2014. "get_rel_pos(x)",
  2015. "add_rel_pos(x)",
  2016. "unary(x)",
  2017. "f(x)",
  2018. "f(x,y)",
  2019. "custom_f32(x)",
  2020. "custom_f32(x,y)",
  2021. "custom_f32(x,y,z)",
  2022. "custom(x)",
  2023. "custom(x,y)",
  2024. "custom(x,y,z)",
  2025. "cross_entropy_loss(x,y)",
  2026. "cross_entropy_loss_back(x,y)",
  2027. };
  2028. static_assert(GGML_OP_COUNT == 76, "GGML_OP_COUNT != 76");
  2029. static_assert(GGML_OP_POOL_COUNT == 2, "GGML_OP_POOL_COUNT != 2");
  2030. static const char * GGML_UNARY_OP_NAME[GGML_UNARY_OP_COUNT] = {
  2031. "ABS",
  2032. "SGN",
  2033. "NEG",
  2034. "STEP",
  2035. "TANH",
  2036. "ELU",
  2037. "RELU",
  2038. "SIGMOID",
  2039. "GELU",
  2040. "GELU_QUICK",
  2041. "SILU",
  2042. "HARDSWISH",
  2043. "HARDSIGMOID",
  2044. };
  2045. static_assert(GGML_UNARY_OP_COUNT == 13, "GGML_UNARY_OP_COUNT != 13");
  2046. static_assert(sizeof(struct ggml_object)%GGML_MEM_ALIGN == 0, "ggml_object size must be a multiple of GGML_MEM_ALIGN");
  2047. static_assert(sizeof(struct ggml_tensor)%GGML_MEM_ALIGN == 0, "ggml_tensor size must be a multiple of GGML_MEM_ALIGN");
  2048. // WARN:
  2049. // Mis-configuration can lead to problem that's hard to reason about:
  2050. // * At best it crash or talks nosense.
  2051. // * At worst it talks slightly difference but hard to perceive.
  2052. //
  2053. // An op has to enable INIT or FINALIZE when any of it's branch needs that pass.
  2054. // Take care about compile options (e.g., GGML_USE_xxx).
  2055. static bool GGML_OP_HAS_INIT [GGML_OP_COUNT] = { 0 };
  2056. static bool GGML_OP_HAS_FINALIZE[GGML_OP_COUNT] = { 0 };
  2057. static void ggml_setup_op_has_task_pass(void) {
  2058. { // INIT
  2059. bool * p = GGML_OP_HAS_INIT;
  2060. p[GGML_OP_ACC ] = true;
  2061. p[GGML_OP_MUL_MAT ] = true;
  2062. p[GGML_OP_MUL_MAT_ID ] = true;
  2063. p[GGML_OP_OUT_PROD ] = true;
  2064. p[GGML_OP_SET ] = true;
  2065. p[GGML_OP_GET_ROWS_BACK ] = true;
  2066. p[GGML_OP_DIAG_MASK_INF ] = true;
  2067. p[GGML_OP_DIAG_MASK_ZERO ] = true;
  2068. p[GGML_OP_CONV_TRANSPOSE_1D ] = true;
  2069. p[GGML_OP_CONV_TRANSPOSE_2D ] = true;
  2070. p[GGML_OP_FLASH_ATTN_BACK ] = true;
  2071. p[GGML_OP_CROSS_ENTROPY_LOSS ] = true;
  2072. p[GGML_OP_ADD_REL_POS ] = true;
  2073. }
  2074. { // FINALIZE
  2075. bool * p = GGML_OP_HAS_FINALIZE;
  2076. p[GGML_OP_CROSS_ENTROPY_LOSS ] = true;
  2077. }
  2078. }
  2079. //
  2080. // ggml context
  2081. //
  2082. struct ggml_context {
  2083. size_t mem_size;
  2084. void * mem_buffer;
  2085. bool mem_buffer_owned;
  2086. bool no_alloc;
  2087. bool no_alloc_save; // this is used to save the no_alloc state when using scratch buffers
  2088. int n_objects;
  2089. struct ggml_object * objects_begin;
  2090. struct ggml_object * objects_end;
  2091. struct ggml_scratch scratch;
  2092. struct ggml_scratch scratch_save;
  2093. };
  2094. struct ggml_context_container {
  2095. bool used;
  2096. struct ggml_context context;
  2097. };
  2098. //
  2099. // NUMA support
  2100. //
  2101. #define GGML_NUMA_MAX_NODES 8
  2102. #define GGML_NUMA_MAX_CPUS 512
  2103. struct ggml_numa_node {
  2104. uint32_t cpus[GGML_NUMA_MAX_CPUS]; // hardware threads on this node
  2105. uint32_t n_cpus;
  2106. };
  2107. struct ggml_numa_nodes {
  2108. enum ggml_numa_strategy numa_strategy;
  2109. struct ggml_numa_node nodes[GGML_NUMA_MAX_NODES];
  2110. uint32_t n_nodes;
  2111. uint32_t total_cpus; // hardware threads on system
  2112. uint32_t current_node; // node on which main process is execting
  2113. #if defined(__gnu_linux__)
  2114. cpu_set_t cpuset; // cpuset from numactl
  2115. #else
  2116. uint32_t cpuset; // no NUMA support outside of Linux at this time. Use a portable datatype
  2117. #endif
  2118. };
  2119. //
  2120. // ggml state
  2121. //
  2122. struct ggml_state {
  2123. struct ggml_context_container contexts[GGML_MAX_CONTEXTS];
  2124. struct ggml_numa_nodes numa;
  2125. };
  2126. // global state
  2127. static struct ggml_state g_state;
  2128. static atomic_int g_state_barrier = 0;
  2129. // barrier via spin lock
  2130. inline static void ggml_critical_section_start(void) {
  2131. int processing = atomic_fetch_add(&g_state_barrier, 1);
  2132. while (processing > 0) {
  2133. // wait for other threads to finish
  2134. atomic_fetch_sub(&g_state_barrier, 1);
  2135. sched_yield(); // TODO: reconsider this
  2136. processing = atomic_fetch_add(&g_state_barrier, 1);
  2137. }
  2138. }
  2139. // TODO: make this somehow automatically executed
  2140. // some sort of "sentry" mechanism
  2141. inline static void ggml_critical_section_end(void) {
  2142. atomic_fetch_sub(&g_state_barrier, 1);
  2143. }
  2144. #if defined(__gnu_linux__)
  2145. static cpu_set_t ggml_get_numa_affinity(void) {
  2146. cpu_set_t cpuset;
  2147. pthread_t thread;
  2148. thread = pthread_self();
  2149. CPU_ZERO(&cpuset);
  2150. pthread_getaffinity_np(thread, sizeof(cpu_set_t), &cpuset);
  2151. return cpuset;
  2152. }
  2153. #else
  2154. static uint32_t ggml_get_numa_affinity(void) {
  2155. return 0; // no NUMA support
  2156. }
  2157. #endif
  2158. void ggml_numa_init(enum ggml_numa_strategy numa_flag) {
  2159. if (g_state.numa.n_nodes > 0) {
  2160. fprintf(stderr, "ggml_numa_init: NUMA already initialized\n");
  2161. return;
  2162. }
  2163. #if defined(__gnu_linux__)
  2164. struct stat st;
  2165. char path[256];
  2166. int rv;
  2167. // set numa scheme
  2168. g_state.numa.numa_strategy = numa_flag;
  2169. GGML_PRINT_DEBUG("numa strategy %u\n",g_state.numa.numa_strategy);
  2170. g_state.numa.cpuset = ggml_get_numa_affinity();
  2171. // enumerate nodes
  2172. while (g_state.numa.n_nodes < GGML_NUMA_MAX_NODES) {
  2173. rv = snprintf(path, sizeof(path), "/sys/devices/system/node/node%u", g_state.numa.n_nodes);
  2174. GGML_ASSERT(rv > 0 && (unsigned)rv < sizeof(path));
  2175. if (stat(path, &st) != 0) { break; }
  2176. ++g_state.numa.n_nodes;
  2177. }
  2178. // enumerate CPUs
  2179. while (g_state.numa.total_cpus < GGML_NUMA_MAX_CPUS) {
  2180. rv = snprintf(path, sizeof(path), "/sys/devices/system/cpu/cpu%u", g_state.numa.total_cpus);
  2181. GGML_ASSERT(rv > 0 && (unsigned)rv < sizeof(path));
  2182. if (stat(path, &st) != 0) { break; }
  2183. ++g_state.numa.total_cpus;
  2184. }
  2185. GGML_PRINT_DEBUG("found %u numa nodes, %u CPUs\n", g_state.numa.n_nodes, g_state.numa.total_cpus);
  2186. // figure out which node we're on
  2187. uint current_cpu;
  2188. int getcpu_ret = 0;
  2189. #if __GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ > 28) || defined(__COSMOPOLITAN__)
  2190. getcpu_ret = getcpu(&current_cpu, &g_state.numa.current_node);
  2191. #else
  2192. // old glibc doesn't have a wrapper for this call. Fall back on direct syscall
  2193. # if !defined(SYS_getcpu) && defined(SYS_get_cpu)
  2194. # define SYS_getcpu SYS_get_cpu // some older glibc versions use this name
  2195. # endif
  2196. getcpu_ret = syscall(SYS_getcpu, &current_cpu, &g_state.numa.current_node);
  2197. #endif
  2198. if (g_state.numa.n_nodes < 1 || g_state.numa.total_cpus < 1 || getcpu_ret != 0) {
  2199. g_state.numa.n_nodes = 0;
  2200. return;
  2201. }
  2202. GGML_PRINT_DEBUG("found our process on numa node %u, CPU %u\n", g_state.numa.current_node, current_cpu);
  2203. for (uint32_t n = 0; n < g_state.numa.n_nodes; ++n) {
  2204. struct ggml_numa_node * node = &g_state.numa.nodes[n];
  2205. GGML_PRINT_DEBUG("CPUs on node %u:", n);
  2206. node->n_cpus = 0;
  2207. for (uint32_t c = 0; c < g_state.numa.total_cpus; ++c) {
  2208. rv = snprintf(path, sizeof(path), "/sys/devices/system/node/node%u/cpu%u", n, c);
  2209. GGML_ASSERT(rv > 0 && (unsigned)rv < sizeof(path));
  2210. if (stat(path, &st) == 0) {
  2211. node->cpus[node->n_cpus++] = c;
  2212. GGML_PRINT_DEBUG(" %u", c);
  2213. }
  2214. }
  2215. GGML_PRINT_DEBUG("\n");
  2216. }
  2217. if (ggml_is_numa()) {
  2218. FILE *fptr = fopen("/proc/sys/kernel/numa_balancing", "r");
  2219. if (fptr != NULL) {
  2220. char buf[42];
  2221. if (fgets(buf, sizeof(buf), fptr) && strncmp(buf, "0\n", sizeof(buf)) != 0) {
  2222. GGML_PRINT("WARNING: /proc/sys/kernel/numa_balancing is enabled, this has been observed to impair performance\n");
  2223. }
  2224. fclose(fptr);
  2225. }
  2226. }
  2227. #else
  2228. GGML_UNUSED(numa_flag);
  2229. // TODO
  2230. #endif
  2231. }
  2232. bool ggml_is_numa(void) {
  2233. return g_state.numa.n_nodes > 1;
  2234. }
  2235. ////////////////////////////////////////////////////////////////////////////////
  2236. void ggml_print_object(const struct ggml_object * obj) {
  2237. GGML_PRINT(" - ggml_object: type = %d, offset = %zu, size = %zu, next = %p\n",
  2238. obj->type, obj->offs, obj->size, (const void *) obj->next);
  2239. }
  2240. void ggml_print_objects(const struct ggml_context * ctx) {
  2241. struct ggml_object * obj = ctx->objects_begin;
  2242. GGML_PRINT("%s: objects in context %p:\n", __func__, (const void *) ctx);
  2243. while (obj != NULL) {
  2244. ggml_print_object(obj);
  2245. obj = obj->next;
  2246. }
  2247. GGML_PRINT("%s: --- end ---\n", __func__);
  2248. }
  2249. GGML_CALL int64_t ggml_nelements(const struct ggml_tensor * tensor) {
  2250. static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function");
  2251. return tensor->ne[0]*tensor->ne[1]*tensor->ne[2]*tensor->ne[3];
  2252. }
  2253. GGML_CALL int64_t ggml_nrows(const struct ggml_tensor * tensor) {
  2254. static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function");
  2255. return tensor->ne[1]*tensor->ne[2]*tensor->ne[3];
  2256. }
  2257. GGML_CALL size_t ggml_nbytes(const struct ggml_tensor * tensor) {
  2258. size_t nbytes;
  2259. size_t blck_size = ggml_blck_size(tensor->type);
  2260. if (blck_size == 1) {
  2261. nbytes = ggml_type_size(tensor->type);
  2262. for (int i = 0; i < GGML_MAX_DIMS; ++i) {
  2263. nbytes += (tensor->ne[i] - 1)*tensor->nb[i];
  2264. }
  2265. }
  2266. else {
  2267. nbytes = tensor->ne[0]*tensor->nb[0]/blck_size;
  2268. for (int i = 1; i < GGML_MAX_DIMS; ++i) {
  2269. nbytes += (tensor->ne[i] - 1)*tensor->nb[i];
  2270. }
  2271. }
  2272. return nbytes;
  2273. }
  2274. size_t ggml_nbytes_pad(const struct ggml_tensor * tensor) {
  2275. return GGML_PAD(ggml_nbytes(tensor), GGML_MEM_ALIGN);
  2276. }
  2277. GGML_CALL int ggml_blck_size(enum ggml_type type) {
  2278. return type_traits[type].blck_size;
  2279. }
  2280. GGML_CALL size_t ggml_type_size(enum ggml_type type) {
  2281. return type_traits[type].type_size;
  2282. }
  2283. GGML_CALL size_t ggml_row_size(enum ggml_type type, int64_t ne) {
  2284. assert(ne % ggml_blck_size(type) == 0);
  2285. return ggml_type_size(type)*ne/ggml_blck_size(type);
  2286. }
  2287. double ggml_type_sizef(enum ggml_type type) {
  2288. return ((double)(type_traits[type].type_size))/type_traits[type].blck_size;
  2289. }
  2290. GGML_CALL const char * ggml_type_name(enum ggml_type type) {
  2291. return type_traits[type].type_name;
  2292. }
  2293. GGML_CALL bool ggml_is_quantized(enum ggml_type type) {
  2294. return type_traits[type].is_quantized;
  2295. }
  2296. GGML_CALL const char * ggml_op_name(enum ggml_op op) {
  2297. return GGML_OP_NAME[op];
  2298. }
  2299. const char * ggml_op_symbol(enum ggml_op op) {
  2300. return GGML_OP_SYMBOL[op];
  2301. }
  2302. const char * ggml_unary_op_name(enum ggml_unary_op op) {
  2303. return GGML_UNARY_OP_NAME[op];
  2304. }
  2305. GGML_CALL const char * ggml_op_desc(const struct ggml_tensor * t) {
  2306. if (t->op == GGML_OP_UNARY) {
  2307. enum ggml_unary_op uop = ggml_get_unary_op(t);
  2308. return ggml_unary_op_name(uop);
  2309. }
  2310. else {
  2311. return ggml_op_name(t->op);
  2312. }
  2313. }
  2314. GGML_CALL size_t ggml_element_size(const struct ggml_tensor * tensor) {
  2315. return ggml_type_size(tensor->type);
  2316. }
  2317. bool ggml_is_scalar(const struct ggml_tensor * tensor) {
  2318. static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function");
  2319. return tensor->ne[0] == 1 && tensor->ne[1] == 1 && tensor->ne[2] == 1 && tensor->ne[3] == 1;
  2320. }
  2321. bool ggml_is_vector(const struct ggml_tensor * tensor) {
  2322. static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function");
  2323. return tensor->ne[1] == 1 && tensor->ne[2] == 1 && tensor->ne[3] == 1;
  2324. }
  2325. bool ggml_is_matrix(const struct ggml_tensor * tensor) {
  2326. static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function");
  2327. return tensor->ne[2] == 1 && tensor->ne[3] == 1;
  2328. }
  2329. bool ggml_is_3d(const struct ggml_tensor * tensor) {
  2330. return tensor->ne[3] == 1;
  2331. }
  2332. int ggml_n_dims(const struct ggml_tensor * tensor) {
  2333. for (int i = GGML_MAX_DIMS - 1; i >= 1; --i) {
  2334. if (tensor->ne[i] > 1) {
  2335. return i + 1;
  2336. }
  2337. }
  2338. return 1;
  2339. }
  2340. static inline bool ggml_can_mul_mat(const struct ggml_tensor * t0, const struct ggml_tensor * t1) {
  2341. static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function");
  2342. return (t0->ne[0] == t1->ne[0]) &&
  2343. (t1->ne[2]%t0->ne[2] == 0) && // verify t0 is broadcastable
  2344. (t1->ne[3]%t0->ne[3] == 0);
  2345. }
  2346. static inline bool ggml_can_out_prod(const struct ggml_tensor * t0, const struct ggml_tensor * t1) {
  2347. static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function");
  2348. return (t0->ne[1] == t1->ne[1]) &&
  2349. (t1->ne[2]%t0->ne[2] == 0) && // verify t0 is broadcastable
  2350. (t1->ne[3]%t0->ne[3] == 0);
  2351. }
  2352. enum ggml_type ggml_ftype_to_ggml_type(enum ggml_ftype ftype) {
  2353. enum ggml_type wtype = GGML_TYPE_COUNT;
  2354. switch (ftype) {
  2355. case GGML_FTYPE_ALL_F32: wtype = GGML_TYPE_F32; break;
  2356. case GGML_FTYPE_MOSTLY_F16: wtype = GGML_TYPE_F16; break;
  2357. case GGML_FTYPE_MOSTLY_BF16: wtype = GGML_TYPE_BF16; break;
  2358. case GGML_FTYPE_MOSTLY_Q4_0: wtype = GGML_TYPE_Q4_0; break;
  2359. case GGML_FTYPE_MOSTLY_Q4_1: wtype = GGML_TYPE_Q4_1; break;
  2360. case GGML_FTYPE_MOSTLY_Q5_0: wtype = GGML_TYPE_Q5_0; break;
  2361. case GGML_FTYPE_MOSTLY_Q5_1: wtype = GGML_TYPE_Q5_1; break;
  2362. case GGML_FTYPE_MOSTLY_Q8_0: wtype = GGML_TYPE_Q8_0; break;
  2363. case GGML_FTYPE_MOSTLY_Q2_K: wtype = GGML_TYPE_Q2_K; break;
  2364. case GGML_FTYPE_MOSTLY_Q3_K: wtype = GGML_TYPE_Q3_K; break;
  2365. case GGML_FTYPE_MOSTLY_Q4_K: wtype = GGML_TYPE_Q4_K; break;
  2366. case GGML_FTYPE_MOSTLY_Q5_K: wtype = GGML_TYPE_Q5_K; break;
  2367. case GGML_FTYPE_MOSTLY_Q6_K: wtype = GGML_TYPE_Q6_K; break;
  2368. case GGML_FTYPE_MOSTLY_IQ2_XXS: wtype = GGML_TYPE_IQ2_XXS; break;
  2369. case GGML_FTYPE_MOSTLY_IQ2_XS: wtype = GGML_TYPE_IQ2_XS; break;
  2370. case GGML_FTYPE_MOSTLY_IQ3_XXS: wtype = GGML_TYPE_IQ3_XXS; break;
  2371. case GGML_FTYPE_MOSTLY_IQ1_S: wtype = GGML_TYPE_IQ1_S; break;
  2372. case GGML_FTYPE_MOSTLY_IQ1_M: wtype = GGML_TYPE_IQ1_M; break;
  2373. case GGML_FTYPE_MOSTLY_IQ4_NL: wtype = GGML_TYPE_IQ4_NL; break;
  2374. case GGML_FTYPE_MOSTLY_IQ4_XS: wtype = GGML_TYPE_IQ4_XS; break;
  2375. case GGML_FTYPE_MOSTLY_IQ3_S: wtype = GGML_TYPE_IQ3_S; break;
  2376. case GGML_FTYPE_MOSTLY_IQ2_S: wtype = GGML_TYPE_IQ2_S; break;
  2377. case GGML_FTYPE_UNKNOWN: wtype = GGML_TYPE_COUNT; break;
  2378. case GGML_FTYPE_MOSTLY_Q4_1_SOME_F16: wtype = GGML_TYPE_COUNT; break;
  2379. }
  2380. GGML_ASSERT(wtype != GGML_TYPE_COUNT);
  2381. return wtype;
  2382. }
  2383. size_t ggml_tensor_overhead(void) {
  2384. return GGML_OBJECT_SIZE + GGML_TENSOR_SIZE;
  2385. }
  2386. GGML_CALL bool ggml_is_transposed(const struct ggml_tensor * tensor) {
  2387. return tensor->nb[0] > tensor->nb[1];
  2388. }
  2389. GGML_CALL bool ggml_is_contiguous(const struct ggml_tensor * tensor) {
  2390. static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function");
  2391. return
  2392. tensor->nb[0] == ggml_type_size(tensor->type) &&
  2393. tensor->nb[1] == (tensor->nb[0]*tensor->ne[0])/ggml_blck_size(tensor->type) &&
  2394. tensor->nb[2] == tensor->nb[1]*tensor->ne[1] &&
  2395. tensor->nb[3] == tensor->nb[2]*tensor->ne[2];
  2396. }
  2397. static inline bool ggml_is_contiguous_except_dim_1(const struct ggml_tensor * tensor) {
  2398. static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function");
  2399. return
  2400. tensor->nb[0] == ggml_type_size(tensor->type) &&
  2401. tensor->nb[2] == tensor->nb[1]*tensor->ne[1] &&
  2402. tensor->nb[3] == tensor->nb[2]*tensor->ne[2];
  2403. }
  2404. GGML_CALL bool ggml_is_permuted(const struct ggml_tensor * tensor) {
  2405. static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function");
  2406. return tensor->nb[0] > tensor->nb[1] || tensor->nb[1] > tensor->nb[2] || tensor->nb[2] > tensor->nb[3];
  2407. }
  2408. static inline bool ggml_is_padded_1d(const struct ggml_tensor * tensor) {
  2409. static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function");
  2410. return
  2411. tensor->nb[0] == ggml_type_size(tensor->type) &&
  2412. tensor->nb[2] == tensor->nb[1]*tensor->ne[1] &&
  2413. tensor->nb[3] == tensor->nb[2]*tensor->ne[2];
  2414. }
  2415. GGML_CALL bool ggml_is_empty(const struct ggml_tensor * tensor) {
  2416. for (int i = 0; i < GGML_MAX_DIMS; ++i) {
  2417. if (tensor->ne[i] == 0) {
  2418. // empty if any dimension has no elements
  2419. return true;
  2420. }
  2421. }
  2422. return false;
  2423. }
  2424. bool ggml_are_same_shape(const struct ggml_tensor * t0, const struct ggml_tensor * t1) {
  2425. static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function");
  2426. return
  2427. (t0->ne[0] == t1->ne[0] ) &&
  2428. (t0->ne[1] == t1->ne[1] ) &&
  2429. (t0->ne[2] == t1->ne[2] ) &&
  2430. (t0->ne[3] == t1->ne[3] );
  2431. }
  2432. bool ggml_are_same_stride(const struct ggml_tensor * t0, const struct ggml_tensor * t1) {
  2433. static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function");
  2434. return
  2435. (t0->nb[0] == t1->nb[0] ) &&
  2436. (t0->nb[1] == t1->nb[1] ) &&
  2437. (t0->nb[2] == t1->nb[2] ) &&
  2438. (t0->nb[3] == t1->nb[3] );
  2439. }
  2440. // check if t1 can be represented as a repeatition of t0
  2441. static inline bool ggml_can_repeat(const struct ggml_tensor * t0, const struct ggml_tensor * t1) {
  2442. static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function");
  2443. return ggml_is_empty(t0) ? ggml_is_empty(t1) :
  2444. (t1->ne[0]%t0->ne[0] == 0) &&
  2445. (t1->ne[1]%t0->ne[1] == 0) &&
  2446. (t1->ne[2]%t0->ne[2] == 0) &&
  2447. (t1->ne[3]%t0->ne[3] == 0);
  2448. }
  2449. static inline bool ggml_can_repeat_rows(const struct ggml_tensor * t0, const struct ggml_tensor * t1) {
  2450. static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function");
  2451. return (t0->ne[0] == t1->ne[0]) && ggml_can_repeat(t0, t1);
  2452. }
  2453. static inline int ggml_up32(int n) {
  2454. return (n + 31) & ~31;
  2455. }
  2456. //static inline int ggml_up64(int n) {
  2457. // return (n + 63) & ~63;
  2458. //}
  2459. static inline int ggml_up(int n, int m) {
  2460. // assert m is a power of 2
  2461. GGML_ASSERT((m & (m - 1)) == 0);
  2462. return (n + m - 1) & ~(m - 1);
  2463. }
  2464. // assert that pointer is aligned to GGML_MEM_ALIGN
  2465. #define ggml_assert_aligned(ptr) \
  2466. GGML_ASSERT(((uintptr_t) (ptr))%GGML_MEM_ALIGN == 0)
  2467. ////////////////////////////////////////////////////////////////////////////////
  2468. struct ggml_context * ggml_init(struct ggml_init_params params) {
  2469. // make this function thread safe
  2470. ggml_critical_section_start();
  2471. static bool is_first_call = true;
  2472. if (is_first_call) {
  2473. // initialize time system (required on Windows)
  2474. ggml_time_init();
  2475. // initialize GELU, Quick GELU, SILU and EXP F32 tables
  2476. {
  2477. const uint64_t t_start = ggml_time_us(); UNUSED(t_start);
  2478. for (int i = 0; i < (1 << 16); ++i) {
  2479. union {
  2480. uint16_t u16;
  2481. ggml_fp16_t fp16;
  2482. } u = {i};
  2483. float f = ggml_table_f32_f16[i] = GGML_COMPUTE_FP16_TO_FP32(u.fp16);
  2484. ggml_table_gelu_f16[i] = GGML_FP32_TO_FP16(ggml_gelu_f32(f));
  2485. ggml_table_gelu_quick_f16[i] = GGML_FP32_TO_FP16(ggml_gelu_quick_f32(f));
  2486. ggml_table_silu_f16[i] = GGML_FP32_TO_FP16(ggml_silu_f32(f));
  2487. ggml_table_exp_f16[i] = GGML_FP32_TO_FP16(expf(f));
  2488. }
  2489. const uint64_t t_end = ggml_time_us(); UNUSED(t_end);
  2490. GGML_PRINT_DEBUG("%s: GELU, Quick GELU, SILU and EXP tables initialized in %f ms\n", __func__, (t_end - t_start)/1000.0f);
  2491. }
  2492. // initialize g_state
  2493. {
  2494. const uint64_t t_start = ggml_time_us(); UNUSED(t_start);
  2495. g_state = (struct ggml_state) {
  2496. /*.contexts =*/ { { 0 } },
  2497. /*.numa =*/ {
  2498. .n_nodes = 0,
  2499. .total_cpus = 0,
  2500. },
  2501. };
  2502. for (int i = 0; i < GGML_MAX_CONTEXTS; ++i) {
  2503. g_state.contexts[i].used = false;
  2504. }
  2505. const uint64_t t_end = ggml_time_us(); UNUSED(t_end);
  2506. GGML_PRINT_DEBUG("%s: g_state initialized in %f ms\n", __func__, (t_end - t_start)/1000.0f);
  2507. }
  2508. #if defined(GGML_USE_CLBLAST)
  2509. ggml_cl_init();
  2510. #endif
  2511. ggml_setup_op_has_task_pass();
  2512. is_first_call = false;
  2513. }
  2514. // find non-used context in g_state
  2515. struct ggml_context * ctx = NULL;
  2516. for (int i = 0; i < GGML_MAX_CONTEXTS; i++) {
  2517. if (!g_state.contexts[i].used) {
  2518. g_state.contexts[i].used = true;
  2519. ctx = &g_state.contexts[i].context;
  2520. GGML_PRINT_DEBUG("%s: found unused context %d\n", __func__, i);
  2521. break;
  2522. }
  2523. }
  2524. if (ctx == NULL) {
  2525. GGML_PRINT_DEBUG("%s: no unused context found\n", __func__);
  2526. ggml_critical_section_end();
  2527. return NULL;
  2528. }
  2529. // allow to call ggml_init with 0 size
  2530. if (params.mem_size == 0) {
  2531. params.mem_size = GGML_MEM_ALIGN;
  2532. }
  2533. const size_t mem_size = params.mem_buffer ? params.mem_size : GGML_PAD(params.mem_size, GGML_MEM_ALIGN);
  2534. *ctx = (struct ggml_context) {
  2535. /*.mem_size =*/ mem_size,
  2536. /*.mem_buffer =*/ params.mem_buffer ? params.mem_buffer : GGML_ALIGNED_MALLOC(mem_size),
  2537. /*.mem_buffer_owned =*/ params.mem_buffer ? false : true,
  2538. /*.no_alloc =*/ params.no_alloc,
  2539. /*.no_alloc_save =*/ params.no_alloc,
  2540. /*.n_objects =*/ 0,
  2541. /*.objects_begin =*/ NULL,
  2542. /*.objects_end =*/ NULL,
  2543. /*.scratch =*/ { 0, 0, NULL, },
  2544. /*.scratch_save =*/ { 0, 0, NULL, },
  2545. };
  2546. GGML_ASSERT(ctx->mem_buffer != NULL);
  2547. ggml_assert_aligned(ctx->mem_buffer);
  2548. GGML_PRINT_DEBUG("%s: context initialized\n", __func__);
  2549. ggml_critical_section_end();
  2550. return ctx;
  2551. }
  2552. void ggml_free(struct ggml_context * ctx) {
  2553. if (ctx == NULL) {
  2554. return;
  2555. }
  2556. // make this function thread safe
  2557. ggml_critical_section_start();
  2558. bool found = false;
  2559. for (int i = 0; i < GGML_MAX_CONTEXTS; i++) {
  2560. if (&g_state.contexts[i].context == ctx) {
  2561. g_state.contexts[i].used = false;
  2562. GGML_PRINT_DEBUG("%s: context %d has been freed. memory used = %zu\n",
  2563. __func__, i, ggml_used_mem(ctx));
  2564. if (ctx->mem_buffer_owned) {
  2565. GGML_ALIGNED_FREE(ctx->mem_buffer);
  2566. }
  2567. found = true;
  2568. break;
  2569. }
  2570. }
  2571. if (!found) {
  2572. GGML_PRINT_DEBUG("%s: context not found\n", __func__);
  2573. }
  2574. ggml_critical_section_end();
  2575. }
  2576. size_t ggml_used_mem(const struct ggml_context * ctx) {
  2577. return ctx->objects_end == NULL ? 0 : ctx->objects_end->offs + ctx->objects_end->size;
  2578. }
  2579. size_t ggml_set_scratch(struct ggml_context * ctx, struct ggml_scratch scratch) {
  2580. const size_t result = ctx->scratch.data ? ctx->scratch.offs : 0;
  2581. ctx->scratch = scratch;
  2582. return result;
  2583. }
  2584. bool ggml_get_no_alloc(struct ggml_context * ctx) {
  2585. return ctx->no_alloc;
  2586. }
  2587. void ggml_set_no_alloc(struct ggml_context * ctx, bool no_alloc) {
  2588. ctx->no_alloc = no_alloc;
  2589. }
  2590. void * ggml_get_mem_buffer(const struct ggml_context * ctx) {
  2591. return ctx->mem_buffer;
  2592. }
  2593. size_t ggml_get_mem_size(const struct ggml_context * ctx) {
  2594. return ctx->mem_size;
  2595. }
  2596. size_t ggml_get_max_tensor_size(const struct ggml_context * ctx) {
  2597. size_t max_size = 0;
  2598. for (struct ggml_tensor * tensor = ggml_get_first_tensor(ctx); tensor != NULL; tensor = ggml_get_next_tensor(ctx, tensor)) {
  2599. size_t bytes = ggml_nbytes(tensor);
  2600. max_size = MAX(max_size, bytes);
  2601. }
  2602. return max_size;
  2603. }
  2604. // IMPORTANT:
  2605. // when creating "opt" tensors, always save and load the scratch buffer
  2606. // this is an error prone process, but it is necessary to support inplace
  2607. // operators when using scratch buffers
  2608. // TODO: implement a better way
  2609. static void ggml_scratch_save(struct ggml_context * ctx) {
  2610. // this is needed to allow opt tensors to store their data
  2611. // TODO: again, need to find a better way
  2612. ctx->no_alloc_save = ctx->no_alloc;
  2613. ctx->no_alloc = false;
  2614. ctx->scratch_save = ctx->scratch;
  2615. ctx->scratch.data = NULL;
  2616. }
  2617. static void ggml_scratch_load(struct ggml_context * ctx) {
  2618. ctx->no_alloc = ctx->no_alloc_save;
  2619. ctx->scratch = ctx->scratch_save;
  2620. }
  2621. ////////////////////////////////////////////////////////////////////////////////
  2622. static struct ggml_object * ggml_new_object(struct ggml_context * ctx, enum ggml_object_type type, size_t size) {
  2623. // always insert objects at the end of the context's memory pool
  2624. struct ggml_object * obj_cur = ctx->objects_end;
  2625. const size_t cur_offs = obj_cur == NULL ? 0 : obj_cur->offs;
  2626. const size_t cur_size = obj_cur == NULL ? 0 : obj_cur->size;
  2627. const size_t cur_end = cur_offs + cur_size;
  2628. // align to GGML_MEM_ALIGN
  2629. size_t size_needed = GGML_PAD(size, GGML_MEM_ALIGN);
  2630. char * const mem_buffer = ctx->mem_buffer;
  2631. struct ggml_object * const obj_new = (struct ggml_object *)(mem_buffer + cur_end);
  2632. if (cur_end + size_needed + GGML_OBJECT_SIZE > ctx->mem_size) {
  2633. GGML_PRINT("%s: not enough space in the context's memory pool (needed %zu, available %zu)\n",
  2634. __func__, cur_end + size_needed, ctx->mem_size);
  2635. assert(false);
  2636. return NULL;
  2637. }
  2638. *obj_new = (struct ggml_object) {
  2639. .offs = cur_end + GGML_OBJECT_SIZE,
  2640. .size = size_needed,
  2641. .next = NULL,
  2642. .type = type,
  2643. };
  2644. ggml_assert_aligned(mem_buffer + obj_new->offs);
  2645. if (obj_cur != NULL) {
  2646. obj_cur->next = obj_new;
  2647. } else {
  2648. // this is the first object in this context
  2649. ctx->objects_begin = obj_new;
  2650. }
  2651. ctx->objects_end = obj_new;
  2652. //printf("%s: inserted new object at %zu, size = %zu\n", __func__, cur_end, obj_new->size);
  2653. return obj_new;
  2654. }
  2655. static struct ggml_tensor * ggml_new_tensor_impl(
  2656. struct ggml_context * ctx,
  2657. enum ggml_type type,
  2658. int n_dims,
  2659. const int64_t * ne,
  2660. struct ggml_tensor * view_src,
  2661. size_t view_offs) {
  2662. assert(n_dims >= 1 && n_dims <= GGML_MAX_DIMS);
  2663. // find the base tensor and absolute offset
  2664. if (view_src != NULL && view_src->view_src != NULL) {
  2665. view_offs += view_src->view_offs;
  2666. view_src = view_src->view_src;
  2667. }
  2668. size_t data_size = ggml_row_size(type, ne[0]);
  2669. for (int i = 1; i < n_dims; i++) {
  2670. data_size *= ne[i];
  2671. }
  2672. GGML_ASSERT(view_src == NULL || data_size == 0 || data_size + view_offs <= ggml_nbytes(view_src));
  2673. void * data = view_src != NULL ? view_src->data : NULL;
  2674. if (data != NULL) {
  2675. data = (char *) data + view_offs;
  2676. }
  2677. size_t obj_alloc_size = 0;
  2678. if (view_src == NULL && !ctx->no_alloc) {
  2679. if (ctx->scratch.data != NULL) {
  2680. // allocate tensor data in the scratch buffer
  2681. if (ctx->scratch.offs + data_size > ctx->scratch.size) {
  2682. GGML_PRINT("%s: not enough space in the scratch memory pool (needed %zu, available %zu)\n",
  2683. __func__, ctx->scratch.offs + data_size, ctx->scratch.size);
  2684. assert(false);
  2685. return NULL;
  2686. }
  2687. data = (char * const) ctx->scratch.data + ctx->scratch.offs;
  2688. ctx->scratch.offs += data_size;
  2689. } else {
  2690. // allocate tensor data in the context's memory pool
  2691. obj_alloc_size = data_size;
  2692. }
  2693. }
  2694. struct ggml_object * const obj_new = ggml_new_object(ctx, GGML_OBJECT_TYPE_TENSOR, GGML_TENSOR_SIZE + obj_alloc_size);
  2695. // TODO: for recoverable errors, we would need to free the data allocated from the scratch buffer here
  2696. struct ggml_tensor * const result = (struct ggml_tensor *)((char *)ctx->mem_buffer + obj_new->offs);
  2697. *result = (struct ggml_tensor) {
  2698. /*.type =*/ type,
  2699. /*.backend =*/ GGML_BACKEND_TYPE_CPU,
  2700. /*.buffer =*/ NULL,
  2701. /*.ne =*/ { 1, 1, 1, 1 },
  2702. /*.nb =*/ { 0, 0, 0, 0 },
  2703. /*.op =*/ GGML_OP_NONE,
  2704. /*.op_params =*/ { 0 },
  2705. /*.flags =*/ 0,
  2706. /*.grad =*/ NULL,
  2707. /*.src =*/ { NULL },
  2708. /*.perf_runs =*/ 0,
  2709. /*.perf_cycles =*/ 0,
  2710. /*.perf_time_us =*/ 0,
  2711. /*.view_src =*/ view_src,
  2712. /*.view_offs =*/ view_offs,
  2713. /*.data =*/ obj_alloc_size > 0 ? (void *)(result + 1) : data,
  2714. /*.name =*/ { 0 },
  2715. /*.extra =*/ NULL,
  2716. /*.padding =*/ { 0 },
  2717. };
  2718. // TODO: this should not be needed as long as we don't rely on aligned SIMD loads
  2719. //ggml_assert_aligned(result->data);
  2720. for (int i = 0; i < n_dims; i++) {
  2721. result->ne[i] = ne[i];
  2722. }
  2723. result->nb[0] = ggml_type_size(type);
  2724. result->nb[1] = result->nb[0]*(result->ne[0]/ggml_blck_size(type));
  2725. for (int i = 2; i < GGML_MAX_DIMS; i++) {
  2726. result->nb[i] = result->nb[i - 1]*result->ne[i - 1];
  2727. }
  2728. ctx->n_objects++;
  2729. return result;
  2730. }
  2731. struct ggml_tensor * ggml_new_tensor(
  2732. struct ggml_context * ctx,
  2733. enum ggml_type type,
  2734. int n_dims,
  2735. const int64_t * ne) {
  2736. return ggml_new_tensor_impl(ctx, type, n_dims, ne, NULL, 0);
  2737. }
  2738. struct ggml_tensor * ggml_new_tensor_1d(
  2739. struct ggml_context * ctx,
  2740. enum ggml_type type,
  2741. int64_t ne0) {
  2742. return ggml_new_tensor(ctx, type, 1, &ne0);
  2743. }
  2744. struct ggml_tensor * ggml_new_tensor_2d(
  2745. struct ggml_context * ctx,
  2746. enum ggml_type type,
  2747. int64_t ne0,
  2748. int64_t ne1) {
  2749. const int64_t ne[2] = { ne0, ne1 };
  2750. return ggml_new_tensor(ctx, type, 2, ne);
  2751. }
  2752. struct ggml_tensor * ggml_new_tensor_3d(
  2753. struct ggml_context * ctx,
  2754. enum ggml_type type,
  2755. int64_t ne0,
  2756. int64_t ne1,
  2757. int64_t ne2) {
  2758. const int64_t ne[3] = { ne0, ne1, ne2 };
  2759. return ggml_new_tensor(ctx, type, 3, ne);
  2760. }
  2761. struct ggml_tensor * ggml_new_tensor_4d(
  2762. struct ggml_context * ctx,
  2763. enum ggml_type type,
  2764. int64_t ne0,
  2765. int64_t ne1,
  2766. int64_t ne2,
  2767. int64_t ne3) {
  2768. const int64_t ne[4] = { ne0, ne1, ne2, ne3 };
  2769. return ggml_new_tensor(ctx, type, 4, ne);
  2770. }
  2771. struct ggml_tensor * ggml_new_i32(struct ggml_context * ctx, int32_t value) {
  2772. ggml_scratch_save(ctx);
  2773. struct ggml_tensor * result = ggml_new_tensor_1d(ctx, GGML_TYPE_I32, 1);
  2774. ggml_scratch_load(ctx);
  2775. ggml_set_i32(result, value);
  2776. return result;
  2777. }
  2778. struct ggml_tensor * ggml_new_f32(struct ggml_context * ctx, float value) {
  2779. ggml_scratch_save(ctx);
  2780. struct ggml_tensor * result = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, 1);
  2781. ggml_scratch_load(ctx);
  2782. ggml_set_f32(result, value);
  2783. return result;
  2784. }
  2785. struct ggml_tensor * ggml_dup_tensor(struct ggml_context * ctx, const struct ggml_tensor * src) {
  2786. return ggml_new_tensor(ctx, src->type, GGML_MAX_DIMS, src->ne);
  2787. }
  2788. static void ggml_set_op_params(struct ggml_tensor * tensor, const void * params, size_t params_size) {
  2789. GGML_ASSERT(tensor != NULL); // silence -Warray-bounds warnings
  2790. assert(params_size <= GGML_MAX_OP_PARAMS);
  2791. memcpy(tensor->op_params, params, params_size);
  2792. }
  2793. static int32_t ggml_get_op_params_i32(const struct ggml_tensor * tensor, uint32_t i) {
  2794. assert(i < GGML_MAX_OP_PARAMS / sizeof(int32_t));
  2795. return ((const int32_t *)(tensor->op_params))[i];
  2796. }
  2797. static float ggml_get_op_params_f32(const struct ggml_tensor * tensor, uint32_t i) {
  2798. assert(i < GGML_MAX_OP_PARAMS / sizeof(float));
  2799. return ((const float *)(tensor->op_params))[i];
  2800. }
  2801. static void ggml_set_op_params_i32(struct ggml_tensor * tensor, uint32_t i, int32_t value) {
  2802. assert(i < GGML_MAX_OP_PARAMS / sizeof(int32_t));
  2803. ((int32_t *)(tensor->op_params))[i] = value;
  2804. }
  2805. static void ggml_set_op_params_f32(struct ggml_tensor * tensor, uint32_t i, float value) {
  2806. assert(i < GGML_MAX_OP_PARAMS / sizeof(float));
  2807. ((float *)(tensor->op_params))[i] = value;
  2808. }
  2809. struct ggml_tensor * ggml_set_zero(struct ggml_tensor * tensor) {
  2810. memset(tensor->data, 0, ggml_nbytes(tensor));
  2811. return tensor;
  2812. }
  2813. struct ggml_tensor * ggml_set_i32 (struct ggml_tensor * tensor, int32_t value) {
  2814. const int n = ggml_nrows(tensor);
  2815. const int nc = tensor->ne[0];
  2816. const size_t n1 = tensor->nb[1];
  2817. char * const data = tensor->data;
  2818. switch (tensor->type) {
  2819. case GGML_TYPE_I8:
  2820. {
  2821. assert(tensor->nb[0] == sizeof(int8_t));
  2822. for (int i = 0; i < n; i++) {
  2823. ggml_vec_set_i8(nc, (int8_t *)(data + i*n1), value);
  2824. }
  2825. } break;
  2826. case GGML_TYPE_I16:
  2827. {
  2828. assert(tensor->nb[0] == sizeof(int16_t));
  2829. for (int i = 0; i < n; i++) {
  2830. ggml_vec_set_i16(nc, (int16_t *)(data + i*n1), value);
  2831. }
  2832. } break;
  2833. case GGML_TYPE_I32:
  2834. {
  2835. assert(tensor->nb[0] == sizeof(int32_t));
  2836. for (int i = 0; i < n; i++) {
  2837. ggml_vec_set_i32(nc, (int32_t *)(data + i*n1), value);
  2838. }
  2839. } break;
  2840. case GGML_TYPE_F16:
  2841. {
  2842. assert(tensor->nb[0] == sizeof(ggml_fp16_t));
  2843. for (int i = 0; i < n; i++) {
  2844. ggml_vec_set_f16(nc, (ggml_fp16_t *)(data + i*n1), GGML_FP32_TO_FP16(value));
  2845. }
  2846. } break;
  2847. case GGML_TYPE_BF16:
  2848. {
  2849. assert(tensor->nb[0] == sizeof(ggml_fp16_t));
  2850. for (int i = 0; i < n; i++) {
  2851. ggml_vec_set_bf16(nc, (ggml_bf16_t *)(data + i*n1), GGML_FP32_TO_BF16(value));
  2852. }
  2853. } break;
  2854. case GGML_TYPE_F32:
  2855. {
  2856. assert(tensor->nb[0] == sizeof(float));
  2857. for (int i = 0; i < n; i++) {
  2858. ggml_vec_set_f32(nc, (float *)(data + i*n1), value);
  2859. }
  2860. } break;
  2861. default:
  2862. {
  2863. GGML_ASSERT(false);
  2864. } break;
  2865. }
  2866. return tensor;
  2867. }
  2868. struct ggml_tensor * ggml_set_f32(struct ggml_tensor * tensor, float value) {
  2869. const int n = ggml_nrows(tensor);
  2870. const int nc = tensor->ne[0];
  2871. const size_t n1 = tensor->nb[1];
  2872. char * const data = tensor->data;
  2873. switch (tensor->type) {
  2874. case GGML_TYPE_I8:
  2875. {
  2876. assert(tensor->nb[0] == sizeof(int8_t));
  2877. for (int i = 0; i < n; i++) {
  2878. ggml_vec_set_i8(nc, (int8_t *)(data + i*n1), value);
  2879. }
  2880. } break;
  2881. case GGML_TYPE_I16:
  2882. {
  2883. assert(tensor->nb[0] == sizeof(int16_t));
  2884. for (int i = 0; i < n; i++) {
  2885. ggml_vec_set_i16(nc, (int16_t *)(data + i*n1), value);
  2886. }
  2887. } break;
  2888. case GGML_TYPE_I32:
  2889. {
  2890. assert(tensor->nb[0] == sizeof(int32_t));
  2891. for (int i = 0; i < n; i++) {
  2892. ggml_vec_set_i32(nc, (int32_t *)(data + i*n1), value);
  2893. }
  2894. } break;
  2895. case GGML_TYPE_F16:
  2896. {
  2897. assert(tensor->nb[0] == sizeof(ggml_fp16_t));
  2898. for (int i = 0; i < n; i++) {
  2899. ggml_vec_set_f16(nc, (ggml_fp16_t *)(data + i*n1), GGML_FP32_TO_FP16(value));
  2900. }
  2901. } break;
  2902. case GGML_TYPE_BF16:
  2903. {
  2904. assert(tensor->nb[0] == sizeof(ggml_bf16_t));
  2905. for (int i = 0; i < n; i++) {
  2906. ggml_vec_set_bf16(nc, (ggml_bf16_t *)(data + i*n1), GGML_FP32_TO_BF16(value));
  2907. }
  2908. } break;
  2909. case GGML_TYPE_F32:
  2910. {
  2911. assert(tensor->nb[0] == sizeof(float));
  2912. for (int i = 0; i < n; i++) {
  2913. ggml_vec_set_f32(nc, (float *)(data + i*n1), value);
  2914. }
  2915. } break;
  2916. default:
  2917. {
  2918. GGML_ASSERT(false);
  2919. } break;
  2920. }
  2921. return tensor;
  2922. }
  2923. void ggml_unravel_index(const struct ggml_tensor * tensor, int64_t i, int64_t * i0, int64_t * i1, int64_t * i2, int64_t * i3) {
  2924. const int64_t ne2 = tensor->ne[2];
  2925. const int64_t ne1 = tensor->ne[1];
  2926. const int64_t ne0 = tensor->ne[0];
  2927. const int64_t i3_ = (i/(ne2*ne1*ne0));
  2928. const int64_t i2_ = (i - i3_*ne2*ne1*ne0)/(ne1*ne0);
  2929. const int64_t i1_ = (i - i3_*ne2*ne1*ne0 - i2_*ne1*ne0)/ne0;
  2930. const int64_t i0_ = (i - i3_*ne2*ne1*ne0 - i2_*ne1*ne0 - i1_*ne0);
  2931. if (i0) {
  2932. * i0 = i0_;
  2933. }
  2934. if (i1) {
  2935. * i1 = i1_;
  2936. }
  2937. if (i2) {
  2938. * i2 = i2_;
  2939. }
  2940. if (i3) {
  2941. * i3 = i3_;
  2942. }
  2943. }
  2944. int32_t ggml_get_i32_1d(const struct ggml_tensor * tensor, int i) {
  2945. if (!ggml_is_contiguous(tensor)) {
  2946. int64_t id[4] = { 0, 0, 0, 0 };
  2947. ggml_unravel_index(tensor, i, &id[0], &id[1], &id[2], &id[3]);
  2948. return ggml_get_i32_nd(tensor, id[0], id[1], id[2], id[3]);
  2949. }
  2950. switch (tensor->type) {
  2951. case GGML_TYPE_I8:
  2952. {
  2953. GGML_ASSERT(tensor->nb[0] == sizeof(int8_t));
  2954. return ((int8_t *)(tensor->data))[i];
  2955. }
  2956. case GGML_TYPE_I16:
  2957. {
  2958. GGML_ASSERT(tensor->nb[0] == sizeof(int16_t));
  2959. return ((int16_t *)(tensor->data))[i];
  2960. }
  2961. case GGML_TYPE_I32:
  2962. {
  2963. GGML_ASSERT(tensor->nb[0] == sizeof(int32_t));
  2964. return ((int32_t *)(tensor->data))[i];
  2965. }
  2966. case GGML_TYPE_F16:
  2967. {
  2968. GGML_ASSERT(tensor->nb[0] == sizeof(ggml_fp16_t));
  2969. return GGML_FP16_TO_FP32(((ggml_fp16_t *)(tensor->data))[i]);
  2970. }
  2971. case GGML_TYPE_BF16:
  2972. {
  2973. GGML_ASSERT(tensor->nb[0] == sizeof(ggml_bf16_t));
  2974. return GGML_BF16_TO_FP32(((ggml_bf16_t *)(tensor->data))[i]);
  2975. }
  2976. case GGML_TYPE_F32:
  2977. {
  2978. GGML_ASSERT(tensor->nb[0] == sizeof(float));
  2979. return ((float *)(tensor->data))[i];
  2980. }
  2981. default:
  2982. {
  2983. GGML_ASSERT(false);
  2984. }
  2985. }
  2986. return 0.0f;
  2987. }
  2988. void ggml_set_i32_1d(const struct ggml_tensor * tensor, int i, int32_t value) {
  2989. if (!ggml_is_contiguous(tensor)) {
  2990. int64_t id[4] = { 0, 0, 0, 0 };
  2991. ggml_unravel_index(tensor, i, &id[0], &id[1], &id[2], &id[3]);
  2992. ggml_set_i32_nd(tensor, id[0], id[1], id[2], id[3], value);
  2993. return;
  2994. }
  2995. switch (tensor->type) {
  2996. case GGML_TYPE_I8:
  2997. {
  2998. GGML_ASSERT(tensor->nb[0] == sizeof(int8_t));
  2999. ((int8_t *)(tensor->data))[i] = value;
  3000. } break;
  3001. case GGML_TYPE_I16:
  3002. {
  3003. GGML_ASSERT(tensor->nb[0] == sizeof(int16_t));
  3004. ((int16_t *)(tensor->data))[i] = value;
  3005. } break;
  3006. case GGML_TYPE_I32:
  3007. {
  3008. GGML_ASSERT(tensor->nb[0] == sizeof(int32_t));
  3009. ((int32_t *)(tensor->data))[i] = value;
  3010. } break;
  3011. case GGML_TYPE_F16:
  3012. {
  3013. GGML_ASSERT(tensor->nb[0] == sizeof(ggml_fp16_t));
  3014. ((ggml_fp16_t *)(tensor->data))[i] = GGML_FP32_TO_FP16(value);
  3015. } break;
  3016. case GGML_TYPE_BF16:
  3017. {
  3018. GGML_ASSERT(tensor->nb[0] == sizeof(ggml_bf16_t));
  3019. ((ggml_bf16_t *)(tensor->data))[i] = GGML_FP32_TO_BF16(value);
  3020. } break;
  3021. case GGML_TYPE_F32:
  3022. {
  3023. GGML_ASSERT(tensor->nb[0] == sizeof(float));
  3024. ((float *)(tensor->data))[i] = value;
  3025. } break;
  3026. default:
  3027. {
  3028. GGML_ASSERT(false);
  3029. } break;
  3030. }
  3031. }
  3032. int32_t ggml_get_i32_nd(const struct ggml_tensor * tensor, int i0, int i1, int i2, int i3) {
  3033. void * data = (char *) tensor->data + i0*tensor->nb[0] + i1*tensor->nb[1] + i2*tensor->nb[2] + i3*tensor->nb[3];
  3034. switch (tensor->type) {
  3035. case GGML_TYPE_I8:
  3036. return ((int8_t *) data)[0];
  3037. case GGML_TYPE_I16:
  3038. return ((int16_t *) data)[0];
  3039. case GGML_TYPE_I32:
  3040. return ((int32_t *) data)[0];
  3041. case GGML_TYPE_F16:
  3042. return GGML_FP16_TO_FP32(((ggml_fp16_t *) data)[0]);
  3043. case GGML_TYPE_BF16:
  3044. return GGML_BF16_TO_FP32(((ggml_bf16_t *) data)[0]);
  3045. case GGML_TYPE_F32:
  3046. return ((float *) data)[0];
  3047. default:
  3048. GGML_ASSERT(false);
  3049. }
  3050. return 0.0f;
  3051. }
  3052. void ggml_set_i32_nd(const struct ggml_tensor * tensor, int i0, int i1, int i2, int i3, int32_t value) {
  3053. void * data = (char *) tensor->data + i0*tensor->nb[0] + i1*tensor->nb[1] + i2*tensor->nb[2] + i3*tensor->nb[3];
  3054. switch (tensor->type) {
  3055. case GGML_TYPE_I8:
  3056. {
  3057. ((int8_t *)(data))[0] = value;
  3058. } break;
  3059. case GGML_TYPE_I16:
  3060. {
  3061. ((int16_t *)(data))[0] = value;
  3062. } break;
  3063. case GGML_TYPE_I32:
  3064. {
  3065. ((int32_t *)(data))[0] = value;
  3066. } break;
  3067. case GGML_TYPE_F16:
  3068. {
  3069. ((ggml_fp16_t *)(data))[0] = GGML_FP32_TO_FP16(value);
  3070. } break;
  3071. case GGML_TYPE_BF16:
  3072. {
  3073. ((ggml_bf16_t *)(data))[0] = GGML_FP32_TO_BF16(value);
  3074. } break;
  3075. case GGML_TYPE_F32:
  3076. {
  3077. ((float *)(data))[0] = value;
  3078. } break;
  3079. default:
  3080. {
  3081. GGML_ASSERT(false);
  3082. } break;
  3083. }
  3084. }
  3085. float ggml_get_f32_1d(const struct ggml_tensor * tensor, int i) {
  3086. if (!ggml_is_contiguous(tensor)) {
  3087. int64_t id[4] = { 0, 0, 0, 0 };
  3088. ggml_unravel_index(tensor, i, &id[0], &id[1], &id[2], &id[3]);
  3089. return ggml_get_f32_nd(tensor, id[0], id[1], id[2], id[3]);
  3090. }
  3091. switch (tensor->type) {
  3092. case GGML_TYPE_I8:
  3093. {
  3094. GGML_ASSERT(tensor->nb[0] == sizeof(int8_t));
  3095. return ((int8_t *)(tensor->data))[i];
  3096. }
  3097. case GGML_TYPE_I16:
  3098. {
  3099. GGML_ASSERT(tensor->nb[0] == sizeof(int16_t));
  3100. return ((int16_t *)(tensor->data))[i];
  3101. }
  3102. case GGML_TYPE_I32:
  3103. {
  3104. GGML_ASSERT(tensor->nb[0] == sizeof(int32_t));
  3105. return ((int32_t *)(tensor->data))[i];
  3106. }
  3107. case GGML_TYPE_F16:
  3108. {
  3109. GGML_ASSERT(tensor->nb[0] == sizeof(ggml_fp16_t));
  3110. return GGML_FP16_TO_FP32(((ggml_fp16_t *)(tensor->data))[i]);
  3111. }
  3112. case GGML_TYPE_BF16:
  3113. {
  3114. GGML_ASSERT(tensor->nb[0] == sizeof(ggml_bf16_t));
  3115. return GGML_BF16_TO_FP32(((ggml_bf16_t *)(tensor->data))[i]);
  3116. }
  3117. case GGML_TYPE_F32:
  3118. {
  3119. GGML_ASSERT(tensor->nb[0] == sizeof(float));
  3120. return ((float *)(tensor->data))[i];
  3121. }
  3122. default:
  3123. {
  3124. GGML_ASSERT(false);
  3125. }
  3126. }
  3127. return 0.0f;
  3128. }
  3129. void ggml_set_f32_1d(const struct ggml_tensor * tensor, int i, float value) {
  3130. if (!ggml_is_contiguous(tensor)) {
  3131. int64_t id[4] = { 0, 0, 0, 0 };
  3132. ggml_unravel_index(tensor, i, &id[0], &id[1], &id[2], &id[3]);
  3133. ggml_set_f32_nd(tensor, id[0], id[1], id[2], id[3], value);
  3134. return;
  3135. }
  3136. switch (tensor->type) {
  3137. case GGML_TYPE_I8:
  3138. {
  3139. GGML_ASSERT(tensor->nb[0] == sizeof(int8_t));
  3140. ((int8_t *)(tensor->data))[i] = value;
  3141. } break;
  3142. case GGML_TYPE_I16:
  3143. {
  3144. GGML_ASSERT(tensor->nb[0] == sizeof(int16_t));
  3145. ((int16_t *)(tensor->data))[i] = value;
  3146. } break;
  3147. case GGML_TYPE_I32:
  3148. {
  3149. GGML_ASSERT(tensor->nb[0] == sizeof(int32_t));
  3150. ((int32_t *)(tensor->data))[i] = value;
  3151. } break;
  3152. case GGML_TYPE_F16:
  3153. {
  3154. GGML_ASSERT(tensor->nb[0] == sizeof(ggml_fp16_t));
  3155. ((ggml_fp16_t *)(tensor->data))[i] = GGML_FP32_TO_FP16(value);
  3156. } break;
  3157. case GGML_TYPE_BF16:
  3158. {
  3159. GGML_ASSERT(tensor->nb[0] == sizeof(ggml_bf16_t));
  3160. ((ggml_bf16_t *)(tensor->data))[i] = GGML_FP32_TO_BF16(value);
  3161. } break;
  3162. case GGML_TYPE_F32:
  3163. {
  3164. GGML_ASSERT(tensor->nb[0] == sizeof(float));
  3165. ((float *)(tensor->data))[i] = value;
  3166. } break;
  3167. default:
  3168. {
  3169. GGML_ASSERT(false);
  3170. } break;
  3171. }
  3172. }
  3173. float ggml_get_f32_nd(const struct ggml_tensor * tensor, int i0, int i1, int i2, int i3) {
  3174. void * data = (char *) tensor->data + i0*tensor->nb[0] + i1*tensor->nb[1] + i2*tensor->nb[2] + i3*tensor->nb[3];
  3175. switch (tensor->type) {
  3176. case GGML_TYPE_I8:
  3177. return ((int8_t *) data)[0];
  3178. case GGML_TYPE_I16:
  3179. return ((int16_t *) data)[0];
  3180. case GGML_TYPE_I32:
  3181. return ((int32_t *) data)[0];
  3182. case GGML_TYPE_F16:
  3183. return GGML_FP16_TO_FP32(((ggml_fp16_t *) data)[0]);
  3184. case GGML_TYPE_BF16:
  3185. return GGML_BF16_TO_FP32(((ggml_bf16_t *) data)[0]);
  3186. case GGML_TYPE_F32:
  3187. return ((float *) data)[0];
  3188. default:
  3189. GGML_ASSERT(false);
  3190. }
  3191. return 0.0f;
  3192. }
  3193. void ggml_set_f32_nd(const struct ggml_tensor * tensor, int i0, int i1, int i2, int i3, float value) {
  3194. void * data = (char *) tensor->data + i0*tensor->nb[0] + i1*tensor->nb[1] + i2*tensor->nb[2] + i3*tensor->nb[3];
  3195. switch (tensor->type) {
  3196. case GGML_TYPE_I8:
  3197. {
  3198. ((int8_t *)(data))[0] = value;
  3199. } break;
  3200. case GGML_TYPE_I16:
  3201. {
  3202. ((int16_t *)(data))[0] = value;
  3203. } break;
  3204. case GGML_TYPE_I32:
  3205. {
  3206. ((int32_t *)(data))[0] = value;
  3207. } break;
  3208. case GGML_TYPE_F16:
  3209. {
  3210. ((ggml_fp16_t *)(data))[0] = GGML_FP32_TO_FP16(value);
  3211. } break;
  3212. case GGML_TYPE_BF16:
  3213. {
  3214. ((ggml_bf16_t *)(data))[0] = GGML_FP32_TO_BF16(value);
  3215. } break;
  3216. case GGML_TYPE_F32:
  3217. {
  3218. ((float *)(data))[0] = value;
  3219. } break;
  3220. default:
  3221. {
  3222. GGML_ASSERT(false);
  3223. } break;
  3224. }
  3225. }
  3226. void * ggml_get_data(const struct ggml_tensor * tensor) {
  3227. return tensor->data;
  3228. }
  3229. float * ggml_get_data_f32(const struct ggml_tensor * tensor) {
  3230. assert(tensor->type == GGML_TYPE_F32);
  3231. return (float *)(tensor->data);
  3232. }
  3233. GGML_CALL enum ggml_unary_op ggml_get_unary_op(const struct ggml_tensor * tensor) {
  3234. GGML_ASSERT(tensor->op == GGML_OP_UNARY);
  3235. return (enum ggml_unary_op) ggml_get_op_params_i32(tensor, 0);
  3236. }
  3237. const char * ggml_get_name(const struct ggml_tensor * tensor) {
  3238. return tensor->name;
  3239. }
  3240. struct ggml_tensor * ggml_set_name(struct ggml_tensor * tensor, const char * name) {
  3241. strncpy(tensor->name, name, sizeof(tensor->name) - 1);
  3242. tensor->name[sizeof(tensor->name) - 1] = '\0';
  3243. return tensor;
  3244. }
  3245. struct ggml_tensor * ggml_format_name(struct ggml_tensor * tensor, const char * fmt, ...) {
  3246. va_list args;
  3247. va_start(args, fmt);
  3248. vsnprintf(tensor->name, sizeof(tensor->name), fmt, args);
  3249. va_end(args);
  3250. return tensor;
  3251. }
  3252. struct ggml_tensor * ggml_view_tensor(
  3253. struct ggml_context * ctx,
  3254. struct ggml_tensor * src) {
  3255. struct ggml_tensor * result = ggml_new_tensor_impl(ctx, src->type, GGML_MAX_DIMS, src->ne, src, 0);
  3256. ggml_format_name(result, "%s (view)", src->name);
  3257. for (int i = 0; i < GGML_MAX_DIMS; i++) {
  3258. result->nb[i] = src->nb[i];
  3259. }
  3260. return result;
  3261. }
  3262. struct ggml_tensor * ggml_get_first_tensor(const struct ggml_context * ctx) {
  3263. struct ggml_object * obj = ctx->objects_begin;
  3264. char * const mem_buffer = ctx->mem_buffer;
  3265. while (obj != NULL) {
  3266. if (obj->type == GGML_OBJECT_TYPE_TENSOR) {
  3267. return (struct ggml_tensor *)(mem_buffer + obj->offs);
  3268. }
  3269. obj = obj->next;
  3270. }
  3271. return NULL;
  3272. }
  3273. struct ggml_tensor * ggml_get_next_tensor(const struct ggml_context * ctx, struct ggml_tensor * tensor) {
  3274. struct ggml_object * obj = (struct ggml_object *) ((char *)tensor - GGML_OBJECT_SIZE);
  3275. obj = obj->next;
  3276. char * const mem_buffer = ctx->mem_buffer;
  3277. while (obj != NULL) {
  3278. if (obj->type == GGML_OBJECT_TYPE_TENSOR) {
  3279. return (struct ggml_tensor *)(mem_buffer + obj->offs);
  3280. }
  3281. obj = obj->next;
  3282. }
  3283. return NULL;
  3284. }
  3285. struct ggml_tensor * ggml_get_tensor(struct ggml_context * ctx, const char * name) {
  3286. struct ggml_object * obj = ctx->objects_begin;
  3287. char * const mem_buffer = ctx->mem_buffer;
  3288. while (obj != NULL) {
  3289. if (obj->type == GGML_OBJECT_TYPE_TENSOR) {
  3290. struct ggml_tensor * cur = (struct ggml_tensor *)(mem_buffer + obj->offs);
  3291. if (strcmp(cur->name, name) == 0) {
  3292. return cur;
  3293. }
  3294. }
  3295. obj = obj->next;
  3296. }
  3297. return NULL;
  3298. }
  3299. ////////////////////////////////////////////////////////////////////////////////
  3300. // ggml_dup
  3301. static struct ggml_tensor * ggml_dup_impl(
  3302. struct ggml_context * ctx,
  3303. struct ggml_tensor * a,
  3304. bool inplace) {
  3305. bool is_node = false;
  3306. if (!inplace && (a->grad)) {
  3307. is_node = true;
  3308. }
  3309. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  3310. result->op = GGML_OP_DUP;
  3311. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3312. result->src[0] = a;
  3313. return result;
  3314. }
  3315. struct ggml_tensor * ggml_dup(
  3316. struct ggml_context * ctx,
  3317. struct ggml_tensor * a) {
  3318. return ggml_dup_impl(ctx, a, false);
  3319. }
  3320. struct ggml_tensor * ggml_dup_inplace(
  3321. struct ggml_context * ctx,
  3322. struct ggml_tensor * a) {
  3323. return ggml_dup_impl(ctx, a, true);
  3324. }
  3325. // ggml_add
  3326. static struct ggml_tensor * ggml_add_impl(
  3327. struct ggml_context * ctx,
  3328. struct ggml_tensor * a,
  3329. struct ggml_tensor * b,
  3330. bool inplace) {
  3331. GGML_ASSERT(ggml_can_repeat(b, a));
  3332. bool is_node = false;
  3333. if (!inplace && (a->grad || b->grad)) {
  3334. // TODO: support backward pass for broadcasting
  3335. GGML_ASSERT(ggml_are_same_shape(a, b));
  3336. is_node = true;
  3337. }
  3338. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  3339. result->op = GGML_OP_ADD;
  3340. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3341. result->src[0] = a;
  3342. result->src[1] = b;
  3343. return result;
  3344. }
  3345. struct ggml_tensor * ggml_add(
  3346. struct ggml_context * ctx,
  3347. struct ggml_tensor * a,
  3348. struct ggml_tensor * b) {
  3349. return ggml_add_impl(ctx, a, b, false);
  3350. }
  3351. struct ggml_tensor * ggml_add_inplace(
  3352. struct ggml_context * ctx,
  3353. struct ggml_tensor * a,
  3354. struct ggml_tensor * b) {
  3355. return ggml_add_impl(ctx, a, b, true);
  3356. }
  3357. // ggml_add_cast
  3358. static struct ggml_tensor * ggml_add_cast_impl(
  3359. struct ggml_context * ctx,
  3360. struct ggml_tensor * a,
  3361. struct ggml_tensor * b,
  3362. enum ggml_type type) {
  3363. // TODO: support less-strict constraint
  3364. // GGML_ASSERT(ggml_can_repeat(b, a));
  3365. GGML_ASSERT(ggml_can_repeat_rows(b, a));
  3366. // currently only supported for quantized input and f16
  3367. GGML_ASSERT(ggml_is_quantized(a->type) ||
  3368. a->type == GGML_TYPE_F16 ||
  3369. a->type == GGML_TYPE_BF16);
  3370. bool is_node = false;
  3371. if (a->grad || b->grad) {
  3372. // TODO: support backward pass for broadcasting
  3373. GGML_ASSERT(ggml_are_same_shape(a, b));
  3374. is_node = true;
  3375. }
  3376. struct ggml_tensor * result = ggml_new_tensor(ctx, type, GGML_MAX_DIMS, a->ne);
  3377. result->op = GGML_OP_ADD;
  3378. result->grad = is_node ? ggml_new_tensor(ctx, GGML_TYPE_F32, GGML_MAX_DIMS, a->ne) : NULL;
  3379. result->src[0] = a;
  3380. result->src[1] = b;
  3381. return result;
  3382. }
  3383. struct ggml_tensor * ggml_add_cast(
  3384. struct ggml_context * ctx,
  3385. struct ggml_tensor * a,
  3386. struct ggml_tensor * b,
  3387. enum ggml_type type) {
  3388. return ggml_add_cast_impl(ctx, a, b, type);
  3389. }
  3390. // ggml_add1
  3391. static struct ggml_tensor * ggml_add1_impl(
  3392. struct ggml_context * ctx,
  3393. struct ggml_tensor * a,
  3394. struct ggml_tensor * b,
  3395. bool inplace) {
  3396. GGML_ASSERT(ggml_is_scalar(b));
  3397. GGML_ASSERT(ggml_is_padded_1d(a));
  3398. bool is_node = false;
  3399. if (a->grad || b->grad) {
  3400. is_node = true;
  3401. }
  3402. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  3403. result->op = GGML_OP_ADD1;
  3404. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3405. result->src[0] = a;
  3406. result->src[1] = b;
  3407. return result;
  3408. }
  3409. struct ggml_tensor * ggml_add1(
  3410. struct ggml_context * ctx,
  3411. struct ggml_tensor * a,
  3412. struct ggml_tensor * b) {
  3413. return ggml_add1_impl(ctx, a, b, false);
  3414. }
  3415. struct ggml_tensor * ggml_add1_inplace(
  3416. struct ggml_context * ctx,
  3417. struct ggml_tensor * a,
  3418. struct ggml_tensor * b) {
  3419. return ggml_add1_impl(ctx, a, b, true);
  3420. }
  3421. // ggml_acc
  3422. static struct ggml_tensor * ggml_acc_impl(
  3423. struct ggml_context * ctx,
  3424. struct ggml_tensor * a,
  3425. struct ggml_tensor * b,
  3426. size_t nb1,
  3427. size_t nb2,
  3428. size_t nb3,
  3429. size_t offset,
  3430. bool inplace) {
  3431. GGML_ASSERT(ggml_nelements(b) <= ggml_nelements(a));
  3432. GGML_ASSERT(ggml_is_contiguous(a));
  3433. GGML_ASSERT(a->type == GGML_TYPE_F32);
  3434. GGML_ASSERT(b->type == GGML_TYPE_F32);
  3435. bool is_node = false;
  3436. if (!inplace && (a->grad || b->grad)) {
  3437. is_node = true;
  3438. }
  3439. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  3440. int32_t params[] = { nb1, nb2, nb3, offset, inplace ? 1 : 0 };
  3441. ggml_set_op_params(result, params, sizeof(params));
  3442. result->op = GGML_OP_ACC;
  3443. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3444. result->src[0] = a;
  3445. result->src[1] = b;
  3446. return result;
  3447. }
  3448. struct ggml_tensor * ggml_acc(
  3449. struct ggml_context * ctx,
  3450. struct ggml_tensor * a,
  3451. struct ggml_tensor * b,
  3452. size_t nb1,
  3453. size_t nb2,
  3454. size_t nb3,
  3455. size_t offset) {
  3456. return ggml_acc_impl(ctx, a, b, nb1, nb2, nb3, offset, false);
  3457. }
  3458. struct ggml_tensor * ggml_acc_inplace(
  3459. struct ggml_context * ctx,
  3460. struct ggml_tensor * a,
  3461. struct ggml_tensor * b,
  3462. size_t nb1,
  3463. size_t nb2,
  3464. size_t nb3,
  3465. size_t offset) {
  3466. return ggml_acc_impl(ctx, a, b, nb1, nb2, nb3, offset, true);
  3467. }
  3468. // ggml_sub
  3469. static struct ggml_tensor * ggml_sub_impl(
  3470. struct ggml_context * ctx,
  3471. struct ggml_tensor * a,
  3472. struct ggml_tensor * b,
  3473. bool inplace) {
  3474. GGML_ASSERT(ggml_are_same_shape(a, b));
  3475. bool is_node = false;
  3476. if (!inplace && (a->grad || b->grad)) {
  3477. is_node = true;
  3478. }
  3479. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  3480. result->op = GGML_OP_SUB;
  3481. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3482. result->src[0] = a;
  3483. result->src[1] = b;
  3484. return result;
  3485. }
  3486. struct ggml_tensor * ggml_sub(
  3487. struct ggml_context * ctx,
  3488. struct ggml_tensor * a,
  3489. struct ggml_tensor * b) {
  3490. return ggml_sub_impl(ctx, a, b, false);
  3491. }
  3492. struct ggml_tensor * ggml_sub_inplace(
  3493. struct ggml_context * ctx,
  3494. struct ggml_tensor * a,
  3495. struct ggml_tensor * b) {
  3496. return ggml_sub_impl(ctx, a, b, true);
  3497. }
  3498. // ggml_mul
  3499. static struct ggml_tensor * ggml_mul_impl(
  3500. struct ggml_context * ctx,
  3501. struct ggml_tensor * a,
  3502. struct ggml_tensor * b,
  3503. bool inplace) {
  3504. GGML_ASSERT(ggml_can_repeat(b, a));
  3505. bool is_node = false;
  3506. if (!inplace && (a->grad || b->grad)) {
  3507. // TODO: support backward pass for broadcasting
  3508. GGML_ASSERT(ggml_are_same_shape(a, b));
  3509. is_node = true;
  3510. }
  3511. if (inplace) {
  3512. GGML_ASSERT(!is_node);
  3513. }
  3514. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  3515. result->op = GGML_OP_MUL;
  3516. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3517. result->src[0] = a;
  3518. result->src[1] = b;
  3519. return result;
  3520. }
  3521. struct ggml_tensor * ggml_mul(
  3522. struct ggml_context * ctx,
  3523. struct ggml_tensor * a,
  3524. struct ggml_tensor * b) {
  3525. return ggml_mul_impl(ctx, a, b, false);
  3526. }
  3527. struct ggml_tensor * ggml_mul_inplace(
  3528. struct ggml_context * ctx,
  3529. struct ggml_tensor * a,
  3530. struct ggml_tensor * b) {
  3531. return ggml_mul_impl(ctx, a, b, true);
  3532. }
  3533. // ggml_div
  3534. static struct ggml_tensor * ggml_div_impl(
  3535. struct ggml_context * ctx,
  3536. struct ggml_tensor * a,
  3537. struct ggml_tensor * b,
  3538. bool inplace) {
  3539. GGML_ASSERT(ggml_can_repeat(b, a));
  3540. bool is_node = false;
  3541. if (!inplace && (a->grad || b->grad)) {
  3542. is_node = true;
  3543. }
  3544. if (inplace) {
  3545. GGML_ASSERT(!is_node);
  3546. }
  3547. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  3548. result->op = GGML_OP_DIV;
  3549. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3550. result->src[0] = a;
  3551. result->src[1] = b;
  3552. return result;
  3553. }
  3554. struct ggml_tensor * ggml_div(
  3555. struct ggml_context * ctx,
  3556. struct ggml_tensor * a,
  3557. struct ggml_tensor * b) {
  3558. return ggml_div_impl(ctx, a, b, false);
  3559. }
  3560. struct ggml_tensor * ggml_div_inplace(
  3561. struct ggml_context * ctx,
  3562. struct ggml_tensor * a,
  3563. struct ggml_tensor * b) {
  3564. return ggml_div_impl(ctx, a, b, true);
  3565. }
  3566. // ggml_sqr
  3567. static struct ggml_tensor * ggml_sqr_impl(
  3568. struct ggml_context * ctx,
  3569. struct ggml_tensor * a,
  3570. bool inplace) {
  3571. bool is_node = false;
  3572. if (!inplace && (a->grad)) {
  3573. is_node = true;
  3574. }
  3575. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  3576. result->op = GGML_OP_SQR;
  3577. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3578. result->src[0] = a;
  3579. return result;
  3580. }
  3581. struct ggml_tensor * ggml_sqr(
  3582. struct ggml_context * ctx,
  3583. struct ggml_tensor * a) {
  3584. return ggml_sqr_impl(ctx, a, false);
  3585. }
  3586. struct ggml_tensor * ggml_sqr_inplace(
  3587. struct ggml_context * ctx,
  3588. struct ggml_tensor * a) {
  3589. return ggml_sqr_impl(ctx, a, true);
  3590. }
  3591. // ggml_sqrt
  3592. static struct ggml_tensor * ggml_sqrt_impl(
  3593. struct ggml_context * ctx,
  3594. struct ggml_tensor * a,
  3595. bool inplace) {
  3596. bool is_node = false;
  3597. if (!inplace && (a->grad)) {
  3598. is_node = true;
  3599. }
  3600. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  3601. result->op = GGML_OP_SQRT;
  3602. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3603. result->src[0] = a;
  3604. return result;
  3605. }
  3606. struct ggml_tensor * ggml_sqrt(
  3607. struct ggml_context * ctx,
  3608. struct ggml_tensor * a) {
  3609. return ggml_sqrt_impl(ctx, a, false);
  3610. }
  3611. struct ggml_tensor * ggml_sqrt_inplace(
  3612. struct ggml_context * ctx,
  3613. struct ggml_tensor * a) {
  3614. return ggml_sqrt_impl(ctx, a, true);
  3615. }
  3616. // ggml_log
  3617. static struct ggml_tensor * ggml_log_impl(
  3618. struct ggml_context * ctx,
  3619. struct ggml_tensor * a,
  3620. bool inplace) {
  3621. bool is_node = false;
  3622. if (!inplace && (a->grad)) {
  3623. is_node = true;
  3624. }
  3625. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  3626. result->op = GGML_OP_LOG;
  3627. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3628. result->src[0] = a;
  3629. return result;
  3630. }
  3631. struct ggml_tensor * ggml_log(
  3632. struct ggml_context * ctx,
  3633. struct ggml_tensor * a) {
  3634. return ggml_log_impl(ctx, a, false);
  3635. }
  3636. struct ggml_tensor * ggml_log_inplace(
  3637. struct ggml_context * ctx,
  3638. struct ggml_tensor * a) {
  3639. return ggml_log_impl(ctx, a, true);
  3640. }
  3641. // ggml_sum
  3642. struct ggml_tensor * ggml_sum(
  3643. struct ggml_context * ctx,
  3644. struct ggml_tensor * a) {
  3645. bool is_node = false;
  3646. if (a->grad) {
  3647. is_node = true;
  3648. }
  3649. struct ggml_tensor * result = ggml_new_tensor_1d(ctx, a->type, 1);
  3650. result->op = GGML_OP_SUM;
  3651. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3652. result->src[0] = a;
  3653. return result;
  3654. }
  3655. // ggml_sum_rows
  3656. struct ggml_tensor * ggml_sum_rows(
  3657. struct ggml_context * ctx,
  3658. struct ggml_tensor * a) {
  3659. bool is_node = false;
  3660. if (a->grad) {
  3661. is_node = true;
  3662. }
  3663. int64_t ne[GGML_MAX_DIMS] = { 1 };
  3664. for (int i = 1; i < GGML_MAX_DIMS; ++i) {
  3665. ne[i] = a->ne[i];
  3666. }
  3667. struct ggml_tensor * result = ggml_new_tensor(ctx, a->type, GGML_MAX_DIMS, ne);
  3668. result->op = GGML_OP_SUM_ROWS;
  3669. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3670. result->src[0] = a;
  3671. return result;
  3672. }
  3673. // ggml_mean
  3674. struct ggml_tensor * ggml_mean(
  3675. struct ggml_context * ctx,
  3676. struct ggml_tensor * a) {
  3677. bool is_node = false;
  3678. if (a->grad) {
  3679. GGML_ASSERT(false); // TODO: implement
  3680. is_node = true;
  3681. }
  3682. int64_t ne[4] = { 1, a->ne[1], a->ne[2], a->ne[3] };
  3683. struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, 4, ne);
  3684. result->op = GGML_OP_MEAN;
  3685. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3686. result->src[0] = a;
  3687. return result;
  3688. }
  3689. // ggml_argmax
  3690. struct ggml_tensor * ggml_argmax(
  3691. struct ggml_context * ctx,
  3692. struct ggml_tensor * a) {
  3693. GGML_ASSERT(ggml_is_matrix(a));
  3694. bool is_node = false;
  3695. if (a->grad) {
  3696. GGML_ASSERT(false);
  3697. is_node = true;
  3698. }
  3699. struct ggml_tensor * result = ggml_new_tensor_1d(ctx, GGML_TYPE_I32, a->ne[1]);
  3700. result->op = GGML_OP_ARGMAX;
  3701. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3702. result->src[0] = a;
  3703. return result;
  3704. }
  3705. // ggml_repeat
  3706. struct ggml_tensor * ggml_repeat(
  3707. struct ggml_context * ctx,
  3708. struct ggml_tensor * a,
  3709. struct ggml_tensor * b) {
  3710. GGML_ASSERT(ggml_can_repeat(a, b));
  3711. bool is_node = false;
  3712. if (a->grad) {
  3713. is_node = true;
  3714. }
  3715. struct ggml_tensor * result = ggml_new_tensor(ctx, a->type, GGML_MAX_DIMS, b->ne);
  3716. result->op = GGML_OP_REPEAT;
  3717. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3718. result->src[0] = a;
  3719. return result;
  3720. }
  3721. // ggml_repeat_back
  3722. struct ggml_tensor * ggml_repeat_back(
  3723. struct ggml_context * ctx,
  3724. struct ggml_tensor * a,
  3725. struct ggml_tensor * b) {
  3726. GGML_ASSERT(ggml_can_repeat(b, a));
  3727. bool is_node = false;
  3728. if (a->grad) {
  3729. is_node = true;
  3730. }
  3731. if (ggml_are_same_shape(a, b) && !is_node) {
  3732. return a;
  3733. }
  3734. struct ggml_tensor * result = ggml_new_tensor(ctx, a->type, GGML_MAX_DIMS, b->ne);
  3735. result->op = GGML_OP_REPEAT_BACK;
  3736. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3737. result->src[0] = a;
  3738. return result;
  3739. }
  3740. // ggml_concat
  3741. struct ggml_tensor * ggml_concat(
  3742. struct ggml_context* ctx,
  3743. struct ggml_tensor* a,
  3744. struct ggml_tensor* b) {
  3745. GGML_ASSERT(a->ne[0] == b->ne[0] && a->ne[1] == b->ne[1] && a->ne[3] == b->ne[3]);
  3746. bool is_node = false;
  3747. if (a->grad || b->grad) {
  3748. is_node = true;
  3749. }
  3750. struct ggml_tensor * result = ggml_new_tensor_4d(ctx, a->type, a->ne[0], a->ne[1], a->ne[2] + b->ne[2], a->ne[3]);
  3751. result->op = GGML_OP_CONCAT;
  3752. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3753. result->src[0] = a;
  3754. result->src[1] = b;
  3755. return result;
  3756. }
  3757. // ggml_abs
  3758. struct ggml_tensor * ggml_abs(
  3759. struct ggml_context * ctx,
  3760. struct ggml_tensor * a) {
  3761. return ggml_unary(ctx, a, GGML_UNARY_OP_ABS);
  3762. }
  3763. struct ggml_tensor * ggml_abs_inplace(
  3764. struct ggml_context * ctx,
  3765. struct ggml_tensor * a) {
  3766. return ggml_unary_inplace(ctx, a, GGML_UNARY_OP_ABS);
  3767. }
  3768. // ggml_sgn
  3769. struct ggml_tensor * ggml_sgn(
  3770. struct ggml_context * ctx,
  3771. struct ggml_tensor * a) {
  3772. return ggml_unary(ctx, a, GGML_UNARY_OP_SGN);
  3773. }
  3774. struct ggml_tensor * ggml_sgn_inplace(
  3775. struct ggml_context * ctx,
  3776. struct ggml_tensor * a) {
  3777. return ggml_unary_inplace(ctx, a, GGML_UNARY_OP_SGN);
  3778. }
  3779. // ggml_neg
  3780. struct ggml_tensor * ggml_neg(
  3781. struct ggml_context * ctx,
  3782. struct ggml_tensor * a) {
  3783. return ggml_unary(ctx, a, GGML_UNARY_OP_NEG);
  3784. }
  3785. struct ggml_tensor * ggml_neg_inplace(
  3786. struct ggml_context * ctx,
  3787. struct ggml_tensor * a) {
  3788. return ggml_unary_inplace(ctx, a, GGML_UNARY_OP_NEG);
  3789. }
  3790. // ggml_step
  3791. struct ggml_tensor * ggml_step(
  3792. struct ggml_context * ctx,
  3793. struct ggml_tensor * a) {
  3794. return ggml_unary(ctx, a, GGML_UNARY_OP_STEP);
  3795. }
  3796. struct ggml_tensor * ggml_step_inplace(
  3797. struct ggml_context * ctx,
  3798. struct ggml_tensor * a) {
  3799. return ggml_unary_inplace(ctx, a, GGML_UNARY_OP_STEP);
  3800. }
  3801. // ggml_tanh
  3802. struct ggml_tensor * ggml_tanh(
  3803. struct ggml_context * ctx,
  3804. struct ggml_tensor * a) {
  3805. return ggml_unary(ctx, a, GGML_UNARY_OP_TANH);
  3806. }
  3807. struct ggml_tensor * ggml_tanh_inplace(
  3808. struct ggml_context * ctx,
  3809. struct ggml_tensor * a) {
  3810. return ggml_unary_inplace(ctx, a, GGML_UNARY_OP_TANH);
  3811. }
  3812. // ggml_elu
  3813. struct ggml_tensor * ggml_elu(
  3814. struct ggml_context * ctx,
  3815. struct ggml_tensor * a) {
  3816. return ggml_unary(ctx, a, GGML_UNARY_OP_ELU);
  3817. }
  3818. struct ggml_tensor * ggml_elu_inplace(
  3819. struct ggml_context * ctx,
  3820. struct ggml_tensor * a) {
  3821. return ggml_unary_inplace(ctx, a, GGML_UNARY_OP_ELU);
  3822. }
  3823. // ggml_relu
  3824. struct ggml_tensor * ggml_relu(
  3825. struct ggml_context * ctx,
  3826. struct ggml_tensor * a) {
  3827. return ggml_unary(ctx, a, GGML_UNARY_OP_RELU);
  3828. }
  3829. struct ggml_tensor * ggml_relu_inplace(
  3830. struct ggml_context * ctx,
  3831. struct ggml_tensor * a) {
  3832. return ggml_unary_inplace(ctx, a, GGML_UNARY_OP_RELU);
  3833. }
  3834. // ggml_leaky_relu
  3835. struct ggml_tensor * ggml_leaky_relu(
  3836. struct ggml_context * ctx,
  3837. struct ggml_tensor * a, float negative_slope, bool inplace) {
  3838. bool is_node = false;
  3839. if (!inplace && (a->grad)) {
  3840. is_node = true;
  3841. }
  3842. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  3843. ggml_set_op_params(result, &negative_slope, sizeof(negative_slope));
  3844. result->op = GGML_OP_LEAKY_RELU;
  3845. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3846. result->src[0] = a;
  3847. return result;
  3848. }
  3849. // ggml_sigmoid
  3850. struct ggml_tensor * ggml_sigmoid(
  3851. struct ggml_context * ctx,
  3852. struct ggml_tensor * a) {
  3853. return ggml_unary(ctx, a, GGML_UNARY_OP_SIGMOID);
  3854. }
  3855. struct ggml_tensor * ggml_sigmoid_inplace(
  3856. struct ggml_context * ctx,
  3857. struct ggml_tensor * a) {
  3858. return ggml_unary_inplace(ctx, a, GGML_UNARY_OP_SIGMOID);
  3859. }
  3860. // ggml_gelu
  3861. struct ggml_tensor * ggml_gelu(
  3862. struct ggml_context * ctx,
  3863. struct ggml_tensor * a) {
  3864. return ggml_unary(ctx, a, GGML_UNARY_OP_GELU);
  3865. }
  3866. struct ggml_tensor * ggml_gelu_inplace(
  3867. struct ggml_context * ctx,
  3868. struct ggml_tensor * a) {
  3869. return ggml_unary_inplace(ctx, a, GGML_UNARY_OP_GELU);
  3870. }
  3871. // ggml_gelu_quick
  3872. struct ggml_tensor * ggml_gelu_quick(
  3873. struct ggml_context * ctx,
  3874. struct ggml_tensor * a) {
  3875. return ggml_unary(ctx, a, GGML_UNARY_OP_GELU_QUICK);
  3876. }
  3877. struct ggml_tensor * ggml_gelu_quick_inplace(
  3878. struct ggml_context * ctx,
  3879. struct ggml_tensor * a) {
  3880. return ggml_unary_inplace(ctx, a, GGML_UNARY_OP_GELU_QUICK);
  3881. }
  3882. // ggml_silu
  3883. struct ggml_tensor * ggml_silu(
  3884. struct ggml_context * ctx,
  3885. struct ggml_tensor * a) {
  3886. return ggml_unary(ctx, a, GGML_UNARY_OP_SILU);
  3887. }
  3888. struct ggml_tensor * ggml_silu_inplace(
  3889. struct ggml_context * ctx,
  3890. struct ggml_tensor * a) {
  3891. return ggml_unary_inplace(ctx, a, GGML_UNARY_OP_SILU);
  3892. }
  3893. // ggml_silu_back
  3894. struct ggml_tensor * ggml_silu_back(
  3895. struct ggml_context * ctx,
  3896. struct ggml_tensor * a,
  3897. struct ggml_tensor * b) {
  3898. bool is_node = false;
  3899. if (a->grad || b->grad) {
  3900. // TODO: implement backward
  3901. is_node = true;
  3902. }
  3903. struct ggml_tensor * result = ggml_dup_tensor(ctx, a);
  3904. result->op = GGML_OP_SILU_BACK;
  3905. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3906. result->src[0] = a;
  3907. result->src[1] = b;
  3908. return result;
  3909. }
  3910. // ggml hardswish
  3911. struct ggml_tensor * ggml_hardswish(
  3912. struct ggml_context * ctx,
  3913. struct ggml_tensor * a) {
  3914. return ggml_unary(ctx, a, GGML_UNARY_OP_HARDSWISH);
  3915. }
  3916. // ggml hardsigmoid
  3917. struct ggml_tensor * ggml_hardsigmoid(
  3918. struct ggml_context * ctx,
  3919. struct ggml_tensor * a) {
  3920. return ggml_unary(ctx, a, GGML_UNARY_OP_HARDSIGMOID);
  3921. }
  3922. // ggml_norm
  3923. static struct ggml_tensor * ggml_norm_impl(
  3924. struct ggml_context * ctx,
  3925. struct ggml_tensor * a,
  3926. float eps,
  3927. bool inplace) {
  3928. bool is_node = false;
  3929. if (!inplace && (a->grad)) {
  3930. GGML_ASSERT(false); // TODO: implement backward
  3931. is_node = true;
  3932. }
  3933. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  3934. ggml_set_op_params(result, &eps, sizeof(eps));
  3935. result->op = GGML_OP_NORM;
  3936. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3937. result->src[0] = a;
  3938. return result;
  3939. }
  3940. struct ggml_tensor * ggml_norm(
  3941. struct ggml_context * ctx,
  3942. struct ggml_tensor * a,
  3943. float eps) {
  3944. return ggml_norm_impl(ctx, a, eps, false);
  3945. }
  3946. struct ggml_tensor * ggml_norm_inplace(
  3947. struct ggml_context * ctx,
  3948. struct ggml_tensor * a,
  3949. float eps) {
  3950. return ggml_norm_impl(ctx, a, eps, true);
  3951. }
  3952. // ggml_rms_norm
  3953. static struct ggml_tensor * ggml_rms_norm_impl(
  3954. struct ggml_context * ctx,
  3955. struct ggml_tensor * a,
  3956. float eps,
  3957. bool inplace) {
  3958. bool is_node = false;
  3959. if (!inplace && (a->grad)) {
  3960. is_node = true;
  3961. }
  3962. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  3963. ggml_set_op_params(result, &eps, sizeof(eps));
  3964. result->op = GGML_OP_RMS_NORM;
  3965. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3966. result->src[0] = a;
  3967. return result;
  3968. }
  3969. struct ggml_tensor * ggml_rms_norm(
  3970. struct ggml_context * ctx,
  3971. struct ggml_tensor * a,
  3972. float eps) {
  3973. return ggml_rms_norm_impl(ctx, a, eps, false);
  3974. }
  3975. struct ggml_tensor * ggml_rms_norm_inplace(
  3976. struct ggml_context * ctx,
  3977. struct ggml_tensor * a,
  3978. float eps) {
  3979. return ggml_rms_norm_impl(ctx, a, eps, true);
  3980. }
  3981. // ggml_rms_norm_back
  3982. struct ggml_tensor * ggml_rms_norm_back(
  3983. struct ggml_context * ctx,
  3984. struct ggml_tensor * a,
  3985. struct ggml_tensor * b,
  3986. float eps) {
  3987. bool is_node = false;
  3988. if (a->grad) {
  3989. // TODO: implement backward
  3990. is_node = true;
  3991. }
  3992. struct ggml_tensor * result = ggml_dup_tensor(ctx, a);
  3993. ggml_set_op_params(result, &eps, sizeof(eps));
  3994. result->op = GGML_OP_RMS_NORM_BACK;
  3995. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3996. result->src[0] = a;
  3997. result->src[1] = b;
  3998. return result;
  3999. }
  4000. // ggml_group_norm
  4001. static struct ggml_tensor * ggml_group_norm_impl(
  4002. struct ggml_context * ctx,
  4003. struct ggml_tensor * a,
  4004. int n_groups,
  4005. bool inplace) {
  4006. bool is_node = false;
  4007. if (!inplace && (a->grad)) {
  4008. GGML_ASSERT(false); // TODO: implement backward
  4009. is_node = true;
  4010. }
  4011. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  4012. result->op_params[0] = n_groups;
  4013. result->op = GGML_OP_GROUP_NORM;
  4014. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4015. result->src[0] = a;
  4016. return result;
  4017. }
  4018. struct ggml_tensor * ggml_group_norm(
  4019. struct ggml_context * ctx,
  4020. struct ggml_tensor * a,
  4021. int n_groups) {
  4022. return ggml_group_norm_impl(ctx, a, n_groups, false);
  4023. }
  4024. struct ggml_tensor * ggml_group_norm_inplace(
  4025. struct ggml_context * ctx,
  4026. struct ggml_tensor * a,
  4027. int n_groups) {
  4028. return ggml_group_norm_impl(ctx, a, n_groups, true);
  4029. }
  4030. // ggml_mul_mat
  4031. struct ggml_tensor * ggml_mul_mat(
  4032. struct ggml_context * ctx,
  4033. struct ggml_tensor * a,
  4034. struct ggml_tensor * b) {
  4035. GGML_ASSERT(ggml_can_mul_mat(a, b));
  4036. GGML_ASSERT(!ggml_is_transposed(a));
  4037. bool is_node = false;
  4038. if (a->grad || b->grad) {
  4039. is_node = true;
  4040. }
  4041. const int64_t ne[4] = { a->ne[1], b->ne[1], b->ne[2], b->ne[3] };
  4042. struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, 4, ne);
  4043. result->op = GGML_OP_MUL_MAT;
  4044. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4045. result->src[0] = a;
  4046. result->src[1] = b;
  4047. return result;
  4048. }
  4049. void ggml_mul_mat_set_prec(
  4050. struct ggml_tensor * a,
  4051. enum ggml_prec prec) {
  4052. GGML_ASSERT(a->op == GGML_OP_MUL_MAT);
  4053. const int32_t prec_i32 = (int32_t) prec;
  4054. ggml_set_op_params_i32(a, 0, prec_i32);
  4055. }
  4056. // ggml_mul_mat_id
  4057. /*
  4058. c = ggml_mul_mat_id(ctx, as, b, ids);
  4059. as -> [cols, rows, n_expert]
  4060. ids -> [n_experts_used, n_tokens] (i32)
  4061. b -> [cols, n_expert_used, n_tokens]
  4062. c -> [cols, n_expert_used, n_tokens]
  4063. in b, n_experts_used can be broadcasted to match the n_expert_used of ids
  4064. c ~= as[:,:,i] @ b[:,i%r,t], i = ids[e,t] for all e,t in ids
  4065. */
  4066. struct ggml_tensor * ggml_mul_mat_id(
  4067. struct ggml_context * ctx,
  4068. struct ggml_tensor * as,
  4069. struct ggml_tensor * b,
  4070. struct ggml_tensor * ids) {
  4071. GGML_ASSERT(!ggml_is_transposed(as));
  4072. GGML_ASSERT(ids->type == GGML_TYPE_I32);
  4073. GGML_ASSERT(as->ne[3] == 1); // as is 3d (one matrix per expert)
  4074. GGML_ASSERT(b->ne[3] == 1); // b is 3d
  4075. GGML_ASSERT(ids->ne[2] == 1 && ids->ne[3] == 1); // ids is 2d
  4076. GGML_ASSERT(ids->ne[1] == b->ne[2]); // must have an expert list per b row
  4077. GGML_ASSERT(as->ne[0] == b->ne[0]); // can_mul_mat
  4078. GGML_ASSERT(ids->ne[0] % b->ne[1] == 0); // can broadcast
  4079. bool is_node = false;
  4080. if (as->grad || b->grad) {
  4081. is_node = true;
  4082. }
  4083. const int64_t ne[4] = { as->ne[1], ids->ne[0], b->ne[2], 1 };
  4084. struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, 4, ne);
  4085. result->op = GGML_OP_MUL_MAT_ID;
  4086. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4087. result->src[0] = as;
  4088. result->src[1] = b;
  4089. result->src[2] = ids;
  4090. return result;
  4091. }
  4092. // ggml_out_prod
  4093. struct ggml_tensor * ggml_out_prod(
  4094. struct ggml_context * ctx,
  4095. struct ggml_tensor * a,
  4096. struct ggml_tensor * b) {
  4097. GGML_ASSERT(ggml_can_out_prod(a, b));
  4098. GGML_ASSERT(!ggml_is_transposed(a));
  4099. bool is_node = false;
  4100. if (a->grad || b->grad) {
  4101. is_node = true;
  4102. }
  4103. // a is broadcastable to b for ne[2] and ne[3] -> use b->ne[2] and b->ne[3]
  4104. const int64_t ne[4] = { a->ne[0], b->ne[0], b->ne[2], b->ne[3] };
  4105. struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, 4, ne);
  4106. result->op = GGML_OP_OUT_PROD;
  4107. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4108. result->src[0] = a;
  4109. result->src[1] = b;
  4110. return result;
  4111. }
  4112. // ggml_scale
  4113. static struct ggml_tensor * ggml_scale_impl(
  4114. struct ggml_context * ctx,
  4115. struct ggml_tensor * a,
  4116. float s,
  4117. bool inplace) {
  4118. GGML_ASSERT(ggml_is_padded_1d(a));
  4119. bool is_node = false;
  4120. if (a->grad) {
  4121. is_node = true;
  4122. }
  4123. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  4124. ggml_set_op_params(result, &s, sizeof(s));
  4125. result->op = GGML_OP_SCALE;
  4126. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4127. result->src[0] = a;
  4128. return result;
  4129. }
  4130. struct ggml_tensor * ggml_scale(
  4131. struct ggml_context * ctx,
  4132. struct ggml_tensor * a,
  4133. float s) {
  4134. return ggml_scale_impl(ctx, a, s, false);
  4135. }
  4136. struct ggml_tensor * ggml_scale_inplace(
  4137. struct ggml_context * ctx,
  4138. struct ggml_tensor * a,
  4139. float s) {
  4140. return ggml_scale_impl(ctx, a, s, true);
  4141. }
  4142. // ggml_set
  4143. static struct ggml_tensor * ggml_set_impl(
  4144. struct ggml_context * ctx,
  4145. struct ggml_tensor * a,
  4146. struct ggml_tensor * b,
  4147. size_t nb1,
  4148. size_t nb2,
  4149. size_t nb3,
  4150. size_t offset,
  4151. bool inplace) {
  4152. GGML_ASSERT(ggml_nelements(a) >= ggml_nelements(b));
  4153. bool is_node = false;
  4154. if (a->grad || b->grad) {
  4155. is_node = true;
  4156. }
  4157. // make a view of the destination
  4158. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  4159. int32_t params[] = { nb1, nb2, nb3, offset, inplace ? 1 : 0 };
  4160. ggml_set_op_params(result, params, sizeof(params));
  4161. result->op = GGML_OP_SET;
  4162. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4163. result->src[0] = a;
  4164. result->src[1] = b;
  4165. return result;
  4166. }
  4167. struct ggml_tensor * ggml_set(
  4168. struct ggml_context * ctx,
  4169. struct ggml_tensor * a,
  4170. struct ggml_tensor * b,
  4171. size_t nb1,
  4172. size_t nb2,
  4173. size_t nb3,
  4174. size_t offset) {
  4175. return ggml_set_impl(ctx, a, b, nb1, nb2, nb3, offset, false);
  4176. }
  4177. struct ggml_tensor * ggml_set_inplace(
  4178. struct ggml_context * ctx,
  4179. struct ggml_tensor * a,
  4180. struct ggml_tensor * b,
  4181. size_t nb1,
  4182. size_t nb2,
  4183. size_t nb3,
  4184. size_t offset) {
  4185. return ggml_set_impl(ctx, a, b, nb1, nb2, nb3, offset, true);
  4186. }
  4187. struct ggml_tensor * ggml_set_1d(
  4188. struct ggml_context * ctx,
  4189. struct ggml_tensor * a,
  4190. struct ggml_tensor * b,
  4191. size_t offset) {
  4192. return ggml_set_impl(ctx, a, b, a->nb[1], a->nb[2], a->nb[3], offset, false);
  4193. }
  4194. struct ggml_tensor * ggml_set_1d_inplace(
  4195. struct ggml_context * ctx,
  4196. struct ggml_tensor * a,
  4197. struct ggml_tensor * b,
  4198. size_t offset) {
  4199. return ggml_set_impl(ctx, a, b, a->nb[1], a->nb[2], a->nb[3], offset, true);
  4200. }
  4201. struct ggml_tensor * ggml_set_2d(
  4202. struct ggml_context * ctx,
  4203. struct ggml_tensor * a,
  4204. struct ggml_tensor * b,
  4205. size_t nb1,
  4206. size_t offset) {
  4207. return ggml_set_impl(ctx, a, b, nb1, a->nb[2], a->nb[3], offset, false);
  4208. }
  4209. struct ggml_tensor * ggml_set_2d_inplace(
  4210. struct ggml_context * ctx,
  4211. struct ggml_tensor * a,
  4212. struct ggml_tensor * b,
  4213. size_t nb1,
  4214. size_t offset) {
  4215. return ggml_set_impl(ctx, a, b, nb1, a->nb[2], a->nb[3], offset, true);
  4216. }
  4217. // ggml_cpy
  4218. static struct ggml_tensor * ggml_cpy_impl(
  4219. struct ggml_context * ctx,
  4220. struct ggml_tensor * a,
  4221. struct ggml_tensor * b) {
  4222. GGML_ASSERT(ggml_nelements(a) == ggml_nelements(b));
  4223. bool is_node = false;
  4224. if (a->grad || b->grad) {
  4225. // inplace is false and either one have a grad
  4226. is_node = true;
  4227. }
  4228. // make a view of the destination
  4229. struct ggml_tensor * result = ggml_view_tensor(ctx, b);
  4230. if (strlen(b->name) > 0) {
  4231. ggml_format_name(result, "%s (copy of %s)", b->name, a->name);
  4232. } else {
  4233. ggml_format_name(result, "%s (copy)", a->name);
  4234. }
  4235. result->op = GGML_OP_CPY;
  4236. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4237. result->src[0] = a;
  4238. result->src[1] = b;
  4239. return result;
  4240. }
  4241. struct ggml_tensor * ggml_cpy(
  4242. struct ggml_context * ctx,
  4243. struct ggml_tensor * a,
  4244. struct ggml_tensor * b) {
  4245. return ggml_cpy_impl(ctx, a, b);
  4246. }
  4247. struct ggml_tensor * ggml_cast(
  4248. struct ggml_context * ctx,
  4249. struct ggml_tensor * a,
  4250. enum ggml_type type) {
  4251. bool is_node = false;
  4252. struct ggml_tensor * result = ggml_new_tensor(ctx, type, GGML_MAX_DIMS, a->ne);
  4253. ggml_format_name(result, "%s (copy)", a->name);
  4254. result->op = GGML_OP_CPY;
  4255. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4256. result->src[0] = a;
  4257. result->src[1] = result;
  4258. return result;
  4259. }
  4260. // ggml_cont
  4261. static struct ggml_tensor * ggml_cont_impl(
  4262. struct ggml_context * ctx,
  4263. struct ggml_tensor * a) {
  4264. bool is_node = false;
  4265. if (a->grad) {
  4266. is_node = true;
  4267. }
  4268. struct ggml_tensor * result = ggml_dup_tensor(ctx, a);
  4269. ggml_format_name(result, "%s (cont)", a->name);
  4270. result->op = GGML_OP_CONT;
  4271. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4272. result->src[0] = a;
  4273. return result;
  4274. }
  4275. struct ggml_tensor * ggml_cont(
  4276. struct ggml_context * ctx,
  4277. struct ggml_tensor * a) {
  4278. return ggml_cont_impl(ctx, a);
  4279. }
  4280. // make contiguous, with new shape
  4281. GGML_API struct ggml_tensor * ggml_cont_1d(
  4282. struct ggml_context * ctx,
  4283. struct ggml_tensor * a,
  4284. int64_t ne0) {
  4285. return ggml_cont_4d(ctx, a, ne0, 1, 1, 1);
  4286. }
  4287. GGML_API struct ggml_tensor * ggml_cont_2d(
  4288. struct ggml_context * ctx,
  4289. struct ggml_tensor * a,
  4290. int64_t ne0,
  4291. int64_t ne1) {
  4292. return ggml_cont_4d(ctx, a, ne0, ne1, 1, 1);
  4293. }
  4294. GGML_API struct ggml_tensor * ggml_cont_3d(
  4295. struct ggml_context * ctx,
  4296. struct ggml_tensor * a,
  4297. int64_t ne0,
  4298. int64_t ne1,
  4299. int64_t ne2) {
  4300. return ggml_cont_4d(ctx, a, ne0, ne1, ne2, 1);
  4301. }
  4302. struct ggml_tensor * ggml_cont_4d(
  4303. struct ggml_context * ctx,
  4304. struct ggml_tensor * a,
  4305. int64_t ne0,
  4306. int64_t ne1,
  4307. int64_t ne2,
  4308. int64_t ne3) {
  4309. GGML_ASSERT(ggml_nelements(a) == (ne0*ne1*ne2*ne3));
  4310. bool is_node = false;
  4311. struct ggml_tensor * result = ggml_new_tensor_4d(ctx, a->type, ne0, ne1, ne2, ne3);
  4312. ggml_format_name(result, "%s (cont)", a->name);
  4313. result->op = GGML_OP_CONT;
  4314. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4315. result->src[0] = a;
  4316. return result;
  4317. }
  4318. // ggml_reshape
  4319. struct ggml_tensor * ggml_reshape(
  4320. struct ggml_context * ctx,
  4321. struct ggml_tensor * a,
  4322. struct ggml_tensor * b) {
  4323. GGML_ASSERT(ggml_is_contiguous(a));
  4324. // as only the shape of b is relevant, and not its memory layout, b is allowed to be non contiguous.
  4325. GGML_ASSERT(ggml_nelements(a) == ggml_nelements(b));
  4326. bool is_node = false;
  4327. if (a->grad) {
  4328. is_node = true;
  4329. }
  4330. if (b->grad) {
  4331. // gradient propagation is not supported
  4332. //GGML_ASSERT(false);
  4333. }
  4334. struct ggml_tensor * result = ggml_new_tensor_impl(ctx, a->type, GGML_MAX_DIMS, b->ne, a, 0);
  4335. ggml_format_name(result, "%s (reshaped)", a->name);
  4336. result->op = GGML_OP_RESHAPE;
  4337. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4338. result->src[0] = a;
  4339. return result;
  4340. }
  4341. struct ggml_tensor * ggml_reshape_1d(
  4342. struct ggml_context * ctx,
  4343. struct ggml_tensor * a,
  4344. int64_t ne0) {
  4345. GGML_ASSERT(ggml_is_contiguous(a));
  4346. GGML_ASSERT(ggml_nelements(a) == ne0);
  4347. bool is_node = false;
  4348. if (a->grad) {
  4349. is_node = true;
  4350. }
  4351. const int64_t ne[1] = { ne0 };
  4352. struct ggml_tensor * result = ggml_new_tensor_impl(ctx, a->type, 1, ne, a, 0);
  4353. ggml_format_name(result, "%s (reshaped)", a->name);
  4354. result->op = GGML_OP_RESHAPE;
  4355. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4356. result->src[0] = a;
  4357. return result;
  4358. }
  4359. struct ggml_tensor * ggml_reshape_2d(
  4360. struct ggml_context * ctx,
  4361. struct ggml_tensor * a,
  4362. int64_t ne0,
  4363. int64_t ne1) {
  4364. GGML_ASSERT(ggml_is_contiguous(a));
  4365. GGML_ASSERT(ggml_nelements(a) == ne0*ne1);
  4366. bool is_node = false;
  4367. if (a->grad) {
  4368. is_node = true;
  4369. }
  4370. const int64_t ne[2] = { ne0, ne1 };
  4371. struct ggml_tensor * result = ggml_new_tensor_impl(ctx, a->type, 2, ne, a, 0);
  4372. ggml_format_name(result, "%s (reshaped)", a->name);
  4373. result->op = GGML_OP_RESHAPE;
  4374. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4375. result->src[0] = a;
  4376. return result;
  4377. }
  4378. struct ggml_tensor * ggml_reshape_3d(
  4379. struct ggml_context * ctx,
  4380. struct ggml_tensor * a,
  4381. int64_t ne0,
  4382. int64_t ne1,
  4383. int64_t ne2) {
  4384. GGML_ASSERT(ggml_is_contiguous(a));
  4385. GGML_ASSERT(ggml_nelements(a) == ne0*ne1*ne2);
  4386. bool is_node = false;
  4387. if (a->grad) {
  4388. is_node = true;
  4389. }
  4390. const int64_t ne[3] = { ne0, ne1, ne2 };
  4391. struct ggml_tensor * result = ggml_new_tensor_impl(ctx, a->type, 3, ne, a, 0);
  4392. ggml_format_name(result, "%s (reshaped)", a->name);
  4393. result->op = GGML_OP_RESHAPE;
  4394. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4395. result->src[0] = a;
  4396. return result;
  4397. }
  4398. struct ggml_tensor * ggml_reshape_4d(
  4399. struct ggml_context * ctx,
  4400. struct ggml_tensor * a,
  4401. int64_t ne0,
  4402. int64_t ne1,
  4403. int64_t ne2,
  4404. int64_t ne3) {
  4405. GGML_ASSERT(ggml_is_contiguous(a));
  4406. GGML_ASSERT(ggml_nelements(a) == ne0*ne1*ne2*ne3);
  4407. bool is_node = false;
  4408. if (a->grad) {
  4409. is_node = true;
  4410. }
  4411. const int64_t ne[4] = { ne0, ne1, ne2, ne3 };
  4412. struct ggml_tensor * result = ggml_new_tensor_impl(ctx, a->type, 4, ne, a, 0);
  4413. ggml_format_name(result, "%s (reshaped)", a->name);
  4414. result->op = GGML_OP_RESHAPE;
  4415. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4416. result->src[0] = a;
  4417. return result;
  4418. }
  4419. static struct ggml_tensor * ggml_view_impl(
  4420. struct ggml_context * ctx,
  4421. struct ggml_tensor * a,
  4422. int n_dims,
  4423. const int64_t * ne,
  4424. size_t offset) {
  4425. bool is_node = false;
  4426. if (a->grad) {
  4427. is_node = true;
  4428. }
  4429. struct ggml_tensor * result = ggml_new_tensor_impl(ctx, a->type, n_dims, ne, a, offset);
  4430. ggml_format_name(result, "%s (view)", a->name);
  4431. ggml_set_op_params(result, &offset, sizeof(offset));
  4432. result->op = GGML_OP_VIEW;
  4433. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4434. result->src[0] = a;
  4435. return result;
  4436. }
  4437. // ggml_view_1d
  4438. struct ggml_tensor * ggml_view_1d(
  4439. struct ggml_context * ctx,
  4440. struct ggml_tensor * a,
  4441. int64_t ne0,
  4442. size_t offset) {
  4443. struct ggml_tensor * result = ggml_view_impl(ctx, a, 1, &ne0, offset);
  4444. return result;
  4445. }
  4446. // ggml_view_2d
  4447. struct ggml_tensor * ggml_view_2d(
  4448. struct ggml_context * ctx,
  4449. struct ggml_tensor * a,
  4450. int64_t ne0,
  4451. int64_t ne1,
  4452. size_t nb1,
  4453. size_t offset) {
  4454. const int64_t ne[2] = { ne0, ne1 };
  4455. struct ggml_tensor * result = ggml_view_impl(ctx, a, 2, ne, offset);
  4456. result->nb[1] = nb1;
  4457. result->nb[2] = result->nb[1]*ne1;
  4458. result->nb[3] = result->nb[2];
  4459. return result;
  4460. }
  4461. // ggml_view_3d
  4462. struct ggml_tensor * ggml_view_3d(
  4463. struct ggml_context * ctx,
  4464. struct ggml_tensor * a,
  4465. int64_t ne0,
  4466. int64_t ne1,
  4467. int64_t ne2,
  4468. size_t nb1,
  4469. size_t nb2,
  4470. size_t offset) {
  4471. const int64_t ne[3] = { ne0, ne1, ne2 };
  4472. struct ggml_tensor * result = ggml_view_impl(ctx, a, 3, ne, offset);
  4473. result->nb[1] = nb1;
  4474. result->nb[2] = nb2;
  4475. result->nb[3] = result->nb[2]*ne2;
  4476. return result;
  4477. }
  4478. // ggml_view_4d
  4479. struct ggml_tensor * ggml_view_4d(
  4480. struct ggml_context * ctx,
  4481. struct ggml_tensor * a,
  4482. int64_t ne0,
  4483. int64_t ne1,
  4484. int64_t ne2,
  4485. int64_t ne3,
  4486. size_t nb1,
  4487. size_t nb2,
  4488. size_t nb3,
  4489. size_t offset) {
  4490. const int64_t ne[4] = { ne0, ne1, ne2, ne3 };
  4491. struct ggml_tensor * result = ggml_view_impl(ctx, a, 4, ne, offset);
  4492. result->nb[1] = nb1;
  4493. result->nb[2] = nb2;
  4494. result->nb[3] = nb3;
  4495. return result;
  4496. }
  4497. // ggml_permute
  4498. struct ggml_tensor * ggml_permute(
  4499. struct ggml_context * ctx,
  4500. struct ggml_tensor * a,
  4501. int axis0,
  4502. int axis1,
  4503. int axis2,
  4504. int axis3) {
  4505. GGML_ASSERT(axis0 >= 0 && axis0 < GGML_MAX_DIMS);
  4506. GGML_ASSERT(axis1 >= 0 && axis1 < GGML_MAX_DIMS);
  4507. GGML_ASSERT(axis2 >= 0 && axis2 < GGML_MAX_DIMS);
  4508. GGML_ASSERT(axis3 >= 0 && axis3 < GGML_MAX_DIMS);
  4509. GGML_ASSERT(axis0 != axis1);
  4510. GGML_ASSERT(axis0 != axis2);
  4511. GGML_ASSERT(axis0 != axis3);
  4512. GGML_ASSERT(axis1 != axis2);
  4513. GGML_ASSERT(axis1 != axis3);
  4514. GGML_ASSERT(axis2 != axis3);
  4515. bool is_node = false;
  4516. if (a->grad) {
  4517. is_node = true;
  4518. }
  4519. struct ggml_tensor * result = ggml_view_tensor(ctx, a);
  4520. ggml_format_name(result, "%s (permuted)", a->name);
  4521. int ne[GGML_MAX_DIMS];
  4522. int nb[GGML_MAX_DIMS];
  4523. ne[axis0] = a->ne[0];
  4524. ne[axis1] = a->ne[1];
  4525. ne[axis2] = a->ne[2];
  4526. ne[axis3] = a->ne[3];
  4527. nb[axis0] = a->nb[0];
  4528. nb[axis1] = a->nb[1];
  4529. nb[axis2] = a->nb[2];
  4530. nb[axis3] = a->nb[3];
  4531. result->ne[0] = ne[0];
  4532. result->ne[1] = ne[1];
  4533. result->ne[2] = ne[2];
  4534. result->ne[3] = ne[3];
  4535. result->nb[0] = nb[0];
  4536. result->nb[1] = nb[1];
  4537. result->nb[2] = nb[2];
  4538. result->nb[3] = nb[3];
  4539. result->op = GGML_OP_PERMUTE;
  4540. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4541. result->src[0] = a;
  4542. int32_t params[] = { axis0, axis1, axis2, axis3 };
  4543. ggml_set_op_params(result, params, sizeof(params));
  4544. return result;
  4545. }
  4546. // ggml_transpose
  4547. struct ggml_tensor * ggml_transpose(
  4548. struct ggml_context * ctx,
  4549. struct ggml_tensor * a) {
  4550. bool is_node = false;
  4551. if (a->grad) {
  4552. is_node = true;
  4553. }
  4554. struct ggml_tensor * result = ggml_view_tensor(ctx, a);
  4555. ggml_format_name(result, "%s (transposed)", a->name);
  4556. result->ne[0] = a->ne[1];
  4557. result->ne[1] = a->ne[0];
  4558. result->nb[0] = a->nb[1];
  4559. result->nb[1] = a->nb[0];
  4560. result->op = GGML_OP_TRANSPOSE;
  4561. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4562. result->src[0] = a;
  4563. return result;
  4564. }
  4565. // ggml_get_rows
  4566. struct ggml_tensor * ggml_get_rows(
  4567. struct ggml_context * ctx,
  4568. struct ggml_tensor * a,
  4569. struct ggml_tensor * b) {
  4570. GGML_ASSERT(a->ne[2] == b->ne[1]);
  4571. GGML_ASSERT(b->ne[3] == 1);
  4572. GGML_ASSERT(b->type == GGML_TYPE_I32);
  4573. bool is_node = false;
  4574. if (a->grad || b->grad) {
  4575. is_node = true;
  4576. }
  4577. // TODO: implement non F32 return
  4578. enum ggml_type type = GGML_TYPE_F32;
  4579. if (a->type == GGML_TYPE_I32) {
  4580. type = a->type;
  4581. }
  4582. struct ggml_tensor * result = ggml_new_tensor_4d(ctx, type, a->ne[0], b->ne[0], b->ne[1], b->ne[2]);
  4583. result->op = GGML_OP_GET_ROWS;
  4584. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4585. result->src[0] = a;
  4586. result->src[1] = b;
  4587. return result;
  4588. }
  4589. // ggml_get_rows_back
  4590. struct ggml_tensor * ggml_get_rows_back(
  4591. struct ggml_context * ctx,
  4592. struct ggml_tensor * a,
  4593. struct ggml_tensor * b,
  4594. struct ggml_tensor * c) {
  4595. GGML_ASSERT(ggml_is_matrix(a) && ggml_is_vector(b) && b->type == GGML_TYPE_I32);
  4596. GGML_ASSERT(ggml_is_matrix(c) && (a->ne[0] == c->ne[0]));
  4597. bool is_node = false;
  4598. if (a->grad || b->grad) {
  4599. is_node = true;
  4600. }
  4601. // TODO: implement non F32 return
  4602. //struct ggml_tensor * result = ggml_new_tensor_2d(ctx, a->type, a->ne[0], b->ne[0]);
  4603. struct ggml_tensor * result = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, c->ne[0], c->ne[1]);
  4604. result->op = GGML_OP_GET_ROWS_BACK;
  4605. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4606. result->src[0] = a;
  4607. result->src[1] = b;
  4608. return result;
  4609. }
  4610. // ggml_diag
  4611. struct ggml_tensor * ggml_diag(
  4612. struct ggml_context * ctx,
  4613. struct ggml_tensor * a) {
  4614. GGML_ASSERT(a->ne[1] == 1);
  4615. bool is_node = false;
  4616. if (a->grad) {
  4617. is_node = true;
  4618. }
  4619. const int64_t ne[4] = { a->ne[0], a->ne[0], a->ne[2], a->ne[3] };
  4620. struct ggml_tensor * result = ggml_new_tensor(ctx, a->type, 4, ne);
  4621. result->op = GGML_OP_DIAG;
  4622. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4623. result->src[0] = a;
  4624. return result;
  4625. }
  4626. // ggml_diag_mask_inf
  4627. static struct ggml_tensor * ggml_diag_mask_inf_impl(
  4628. struct ggml_context * ctx,
  4629. struct ggml_tensor * a,
  4630. int n_past,
  4631. bool inplace) {
  4632. bool is_node = false;
  4633. if (a->grad) {
  4634. is_node = true;
  4635. }
  4636. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  4637. int32_t params[] = { n_past };
  4638. ggml_set_op_params(result, params, sizeof(params));
  4639. result->op = GGML_OP_DIAG_MASK_INF;
  4640. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4641. result->src[0] = a;
  4642. return result;
  4643. }
  4644. struct ggml_tensor * ggml_diag_mask_inf(
  4645. struct ggml_context * ctx,
  4646. struct ggml_tensor * a,
  4647. int n_past) {
  4648. return ggml_diag_mask_inf_impl(ctx, a, n_past, false);
  4649. }
  4650. struct ggml_tensor * ggml_diag_mask_inf_inplace(
  4651. struct ggml_context * ctx,
  4652. struct ggml_tensor * a,
  4653. int n_past) {
  4654. return ggml_diag_mask_inf_impl(ctx, a, n_past, true);
  4655. }
  4656. // ggml_diag_mask_zero
  4657. static struct ggml_tensor * ggml_diag_mask_zero_impl(
  4658. struct ggml_context * ctx,
  4659. struct ggml_tensor * a,
  4660. int n_past,
  4661. bool inplace) {
  4662. bool is_node = false;
  4663. if (a->grad) {
  4664. is_node = true;
  4665. }
  4666. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  4667. int32_t params[] = { n_past };
  4668. ggml_set_op_params(result, params, sizeof(params));
  4669. result->op = GGML_OP_DIAG_MASK_ZERO;
  4670. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4671. result->src[0] = a;
  4672. return result;
  4673. }
  4674. struct ggml_tensor * ggml_diag_mask_zero(
  4675. struct ggml_context * ctx,
  4676. struct ggml_tensor * a,
  4677. int n_past) {
  4678. return ggml_diag_mask_zero_impl(ctx, a, n_past, false);
  4679. }
  4680. struct ggml_tensor * ggml_diag_mask_zero_inplace(
  4681. struct ggml_context * ctx,
  4682. struct ggml_tensor * a,
  4683. int n_past) {
  4684. return ggml_diag_mask_zero_impl(ctx, a, n_past, true);
  4685. }
  4686. // ggml_soft_max
  4687. static struct ggml_tensor * ggml_soft_max_impl(
  4688. struct ggml_context * ctx,
  4689. struct ggml_tensor * a,
  4690. struct ggml_tensor * mask,
  4691. float scale,
  4692. float max_bias,
  4693. bool inplace) {
  4694. GGML_ASSERT(ggml_is_contiguous(a));
  4695. if (mask) {
  4696. GGML_ASSERT(mask->type == GGML_TYPE_F16 || mask->type == GGML_TYPE_F32);
  4697. GGML_ASSERT(ggml_is_contiguous(mask));
  4698. GGML_ASSERT(ggml_is_matrix(mask));
  4699. GGML_ASSERT(mask->ne[0] == a->ne[0]);
  4700. GGML_ASSERT(mask->ne[1] >= a->ne[1]);
  4701. }
  4702. if (max_bias > 0.0f) {
  4703. GGML_ASSERT(mask);
  4704. }
  4705. bool is_node = false;
  4706. if (a->grad) {
  4707. is_node = true;
  4708. }
  4709. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  4710. float params[] = { scale, max_bias };
  4711. ggml_set_op_params(result, params, sizeof(params));
  4712. result->op = GGML_OP_SOFT_MAX;
  4713. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4714. result->src[0] = a;
  4715. result->src[1] = mask;
  4716. return result;
  4717. }
  4718. struct ggml_tensor * ggml_soft_max(
  4719. struct ggml_context * ctx,
  4720. struct ggml_tensor * a) {
  4721. return ggml_soft_max_impl(ctx, a, NULL, 1.0f, 0.0f, false);
  4722. }
  4723. struct ggml_tensor * ggml_soft_max_inplace(
  4724. struct ggml_context * ctx,
  4725. struct ggml_tensor * a) {
  4726. return ggml_soft_max_impl(ctx, a, NULL, 1.0f, 0.0f, true);
  4727. }
  4728. struct ggml_tensor * ggml_soft_max_ext(
  4729. struct ggml_context * ctx,
  4730. struct ggml_tensor * a,
  4731. struct ggml_tensor * mask,
  4732. float scale,
  4733. float max_bias) {
  4734. return ggml_soft_max_impl(ctx, a, mask, scale, max_bias, false);
  4735. }
  4736. // ggml_soft_max_back
  4737. static struct ggml_tensor * ggml_soft_max_back_impl(
  4738. struct ggml_context * ctx,
  4739. struct ggml_tensor * a,
  4740. struct ggml_tensor * b,
  4741. bool inplace) {
  4742. bool is_node = false;
  4743. if (a->grad || b->grad) {
  4744. is_node = true; // TODO : implement backward pass
  4745. }
  4746. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  4747. result->op = GGML_OP_SOFT_MAX_BACK;
  4748. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4749. result->src[0] = a;
  4750. result->src[1] = b;
  4751. return result;
  4752. }
  4753. struct ggml_tensor * ggml_soft_max_back(
  4754. struct ggml_context * ctx,
  4755. struct ggml_tensor * a,
  4756. struct ggml_tensor * b) {
  4757. return ggml_soft_max_back_impl(ctx, a, b, false);
  4758. }
  4759. struct ggml_tensor * ggml_soft_max_back_inplace(
  4760. struct ggml_context * ctx,
  4761. struct ggml_tensor * a,
  4762. struct ggml_tensor * b) {
  4763. return ggml_soft_max_back_impl(ctx, a, b, true);
  4764. }
  4765. // ggml_rope
  4766. static struct ggml_tensor * ggml_rope_impl(
  4767. struct ggml_context * ctx,
  4768. struct ggml_tensor * a,
  4769. struct ggml_tensor * b,
  4770. int n_dims,
  4771. int mode,
  4772. int n_ctx,
  4773. int n_orig_ctx,
  4774. float freq_base,
  4775. float freq_scale,
  4776. float ext_factor,
  4777. float attn_factor,
  4778. float beta_fast,
  4779. float beta_slow,
  4780. float xpos_base,
  4781. bool xpos_down,
  4782. bool inplace) {
  4783. GGML_ASSERT(ggml_is_vector(b));
  4784. GGML_ASSERT(b->type == GGML_TYPE_I32);
  4785. GGML_ASSERT(a->ne[2] == b->ne[0]);
  4786. bool is_node = false;
  4787. if (a->grad) {
  4788. is_node = true;
  4789. }
  4790. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  4791. int32_t params[13] = { /*n_past*/ 0, n_dims, mode, n_ctx, n_orig_ctx };
  4792. memcpy(params + 5, &freq_base, sizeof(float));
  4793. memcpy(params + 6, &freq_scale, sizeof(float));
  4794. memcpy(params + 7, &ext_factor, sizeof(float));
  4795. memcpy(params + 8, &attn_factor, sizeof(float));
  4796. memcpy(params + 9, &beta_fast, sizeof(float));
  4797. memcpy(params + 10, &beta_slow, sizeof(float));
  4798. memcpy(params + 11, &xpos_base, sizeof(float));
  4799. memcpy(params + 12, &xpos_down, sizeof(bool));
  4800. ggml_set_op_params(result, params, sizeof(params));
  4801. result->op = GGML_OP_ROPE;
  4802. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4803. result->src[0] = a;
  4804. result->src[1] = b;
  4805. return result;
  4806. }
  4807. struct ggml_tensor * ggml_rope(
  4808. struct ggml_context * ctx,
  4809. struct ggml_tensor * a,
  4810. struct ggml_tensor * b,
  4811. int n_dims,
  4812. int mode,
  4813. int n_ctx) {
  4814. return ggml_rope_impl(
  4815. ctx, a, b, n_dims, mode, n_ctx, 0, 10000.0f, 1.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, false, false
  4816. );
  4817. }
  4818. struct ggml_tensor * ggml_rope_inplace(
  4819. struct ggml_context * ctx,
  4820. struct ggml_tensor * a,
  4821. struct ggml_tensor * b,
  4822. int n_dims,
  4823. int mode,
  4824. int n_ctx) {
  4825. return ggml_rope_impl(
  4826. ctx, a, b, n_dims, mode, n_ctx, 0, 10000.0f, 1.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, false, true
  4827. );
  4828. }
  4829. struct ggml_tensor * ggml_rope_custom(
  4830. struct ggml_context * ctx,
  4831. struct ggml_tensor * a,
  4832. struct ggml_tensor * b,
  4833. int n_dims,
  4834. int mode,
  4835. int n_ctx,
  4836. int n_orig_ctx,
  4837. float freq_base,
  4838. float freq_scale,
  4839. float ext_factor,
  4840. float attn_factor,
  4841. float beta_fast,
  4842. float beta_slow) {
  4843. return ggml_rope_impl(
  4844. ctx, a, b, n_dims, mode, n_ctx, n_orig_ctx, freq_base, freq_scale,
  4845. ext_factor, attn_factor, beta_fast, beta_slow, 0.0f, false, false
  4846. );
  4847. }
  4848. struct ggml_tensor * ggml_rope_custom_inplace(
  4849. struct ggml_context * ctx,
  4850. struct ggml_tensor * a,
  4851. struct ggml_tensor * b,
  4852. int n_dims,
  4853. int mode,
  4854. int n_ctx,
  4855. int n_orig_ctx,
  4856. float freq_base,
  4857. float freq_scale,
  4858. float ext_factor,
  4859. float attn_factor,
  4860. float beta_fast,
  4861. float beta_slow) {
  4862. return ggml_rope_impl(
  4863. ctx, a, b, n_dims, mode, n_ctx, n_orig_ctx, freq_base, freq_scale,
  4864. ext_factor, attn_factor, beta_fast, beta_slow, 0.0f, false, true
  4865. );
  4866. }
  4867. struct ggml_tensor * ggml_rope_xpos_inplace(
  4868. struct ggml_context * ctx,
  4869. struct ggml_tensor * a,
  4870. struct ggml_tensor * b,
  4871. int n_dims,
  4872. float base,
  4873. bool down) {
  4874. return ggml_rope_impl(ctx, a, b, n_dims, 0, 0, 0, 10000.0f, 1.0f, 0.0f, 1.0f, 0.0f, 0.0f, base, down, true);
  4875. }
  4876. // ggml_rope_back
  4877. struct ggml_tensor * ggml_rope_back(
  4878. struct ggml_context * ctx,
  4879. struct ggml_tensor * a,
  4880. struct ggml_tensor * b,
  4881. int n_dims,
  4882. int mode,
  4883. int n_ctx,
  4884. int n_orig_ctx,
  4885. float freq_base,
  4886. float freq_scale,
  4887. float ext_factor,
  4888. float attn_factor,
  4889. float beta_fast,
  4890. float beta_slow,
  4891. float xpos_base,
  4892. bool xpos_down) {
  4893. GGML_ASSERT(ggml_is_vector(b));
  4894. GGML_ASSERT(b->type == GGML_TYPE_I32);
  4895. GGML_ASSERT(a->ne[2] == b->ne[0]);
  4896. GGML_ASSERT((mode & 4) == 0 && "ggml_rope_back() for ChatGLM not implemented yet");
  4897. bool is_node = false;
  4898. if (a->grad) {
  4899. is_node = false; // TODO: implement backward
  4900. }
  4901. struct ggml_tensor * result = ggml_dup_tensor(ctx, a);
  4902. int32_t params[13] = { /*n_past*/ 0, n_dims, mode, n_ctx, n_orig_ctx };
  4903. memcpy(params + 5, &freq_base, sizeof(float));
  4904. memcpy(params + 6, &freq_scale, sizeof(float));
  4905. memcpy(params + 7, &ext_factor, sizeof(float));
  4906. memcpy(params + 8, &attn_factor, sizeof(float));
  4907. memcpy(params + 9, &beta_fast, sizeof(float));
  4908. memcpy(params + 10, &beta_slow, sizeof(float));
  4909. memcpy(params + 11, &xpos_base, sizeof(float));
  4910. memcpy(params + 12, &xpos_down, sizeof(bool));
  4911. ggml_set_op_params(result, params, sizeof(params));
  4912. result->op = GGML_OP_ROPE_BACK;
  4913. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4914. result->src[0] = a;
  4915. result->src[1] = b;
  4916. return result;
  4917. }
  4918. // ggml_clamp
  4919. struct ggml_tensor * ggml_clamp(
  4920. struct ggml_context * ctx,
  4921. struct ggml_tensor * a,
  4922. float min,
  4923. float max) {
  4924. bool is_node = false;
  4925. if (a->grad) {
  4926. GGML_ASSERT(false); // TODO: implement backward
  4927. is_node = true;
  4928. }
  4929. // TODO: when implement backward, fix this:
  4930. struct ggml_tensor * result = ggml_view_tensor(ctx, a);
  4931. float params[] = { min, max };
  4932. ggml_set_op_params(result, params, sizeof(params));
  4933. result->op = GGML_OP_CLAMP;
  4934. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4935. result->src[0] = a;
  4936. return result;
  4937. }
  4938. // ggml_conv_1d
  4939. static int64_t ggml_calc_conv_output_size(int64_t ins, int64_t ks, int s, int p, int d) {
  4940. return (ins + 2 * p - d * (ks - 1) - 1) / s + 1;
  4941. }
  4942. GGML_API struct ggml_tensor * ggml_conv_1d(
  4943. struct ggml_context * ctx,
  4944. struct ggml_tensor * a,
  4945. struct ggml_tensor * b,
  4946. int s0,
  4947. int p0,
  4948. int d0) {
  4949. struct ggml_tensor * im2col = ggml_im2col(ctx, a, b, s0, 0, p0, 0, d0, 0, false, GGML_TYPE_F16); // [N, OL, IC * K]
  4950. struct ggml_tensor * result =
  4951. ggml_mul_mat(ctx,
  4952. ggml_reshape_2d(ctx, im2col, im2col->ne[0], (im2col->ne[2] * im2col->ne[1])), // [N, OL, IC * K] => [N*OL, IC * K]
  4953. ggml_reshape_2d(ctx, a, (a->ne[0] * a->ne[1]), a->ne[2])); // [OC,IC, K] => [OC, IC * K]
  4954. result = ggml_reshape_3d(ctx, result, im2col->ne[1], a->ne[2], im2col->ne[2]); // [N, OC, OL]
  4955. return result;
  4956. }
  4957. // ggml_conv_1d_ph
  4958. struct ggml_tensor* ggml_conv_1d_ph(
  4959. struct ggml_context * ctx,
  4960. struct ggml_tensor * a,
  4961. struct ggml_tensor * b,
  4962. int s,
  4963. int d) {
  4964. return ggml_conv_1d(ctx, a, b, s, a->ne[0] / 2, d);
  4965. }
  4966. // ggml_conv_transpose_1d
  4967. static int64_t ggml_calc_conv_transpose_1d_output_size(int64_t ins, int64_t ks, int s, int p, int d) {
  4968. return (ins - 1) * s - 2 * p + d * (ks - 1) + 1;
  4969. }
  4970. GGML_API struct ggml_tensor * ggml_conv_transpose_1d(
  4971. struct ggml_context * ctx,
  4972. struct ggml_tensor * a,
  4973. struct ggml_tensor * b,
  4974. int s0,
  4975. int p0,
  4976. int d0) {
  4977. GGML_ASSERT(ggml_is_matrix(b));
  4978. GGML_ASSERT(a->ne[2] == b->ne[1]);
  4979. GGML_ASSERT(a->ne[3] == 1);
  4980. GGML_ASSERT(p0 == 0);
  4981. GGML_ASSERT(d0 == 1);
  4982. bool is_node = false;
  4983. if (a->grad || b->grad) {
  4984. GGML_ASSERT(false); // TODO: implement backward
  4985. is_node = true;
  4986. }
  4987. const int64_t ne[4] = {
  4988. ggml_calc_conv_transpose_1d_output_size(b->ne[0], a->ne[0], s0, 0 /*p0*/, 1 /*d0*/),
  4989. a->ne[1], b->ne[2], 1,
  4990. };
  4991. struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, 4, ne);
  4992. int32_t params[] = { s0, p0, d0 };
  4993. ggml_set_op_params(result, params, sizeof(params));
  4994. result->op = GGML_OP_CONV_TRANSPOSE_1D;
  4995. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4996. result->src[0] = a;
  4997. result->src[1] = b;
  4998. return result;
  4999. }
  5000. // ggml_conv_depthwise
  5001. struct ggml_tensor * ggml_conv_depthwise_2d(
  5002. struct ggml_context * ctx,
  5003. struct ggml_tensor * a,
  5004. struct ggml_tensor * b,
  5005. int s0,
  5006. int s1,
  5007. int p0,
  5008. int p1,
  5009. int d0,
  5010. int d1) {
  5011. struct ggml_tensor * new_a = ggml_reshape_4d(ctx, a, a->ne[0], a->ne[1], 1, a->ne[2] * a->ne[3]);
  5012. struct ggml_tensor * im2col = ggml_im2col(ctx, new_a,
  5013. ggml_reshape_4d(ctx, b, b->ne[0], b->ne[1], 1, b->ne[2] * b->ne[3]),
  5014. s0, s1, p0, p1, d0, d1, true, GGML_TYPE_F16); // [N * IC, OH, OW, KH * KW]
  5015. struct ggml_tensor * new_b = ggml_reshape_4d(ctx, im2col, im2col->ne[0], im2col->ne[2] * im2col->ne[1], b->ne[2], b->ne[3]); // [N * IC, OH, OW, KH * KW] => [N, IC, OH * OW, KH * KW]
  5016. new_a = ggml_reshape_4d(ctx, new_a, (new_a->ne[0] * new_a->ne[1]), new_a->ne[2], new_a->ne[3], 1); // [OC,1, KH, KW] => [1, OC, 1, KH * KW]
  5017. struct ggml_tensor * result = ggml_mul_mat(ctx, new_a, new_b);
  5018. result = ggml_reshape_4d(ctx, result, im2col->ne[1], im2col->ne[2], b->ne[2], b->ne[3]); // [N, OC, OH, OW]
  5019. return result;
  5020. }
  5021. // ggml_conv_2d
  5022. // im2col: [N, IC, IH, IW] => [N, OH, OW, IC*KH*KW]
  5023. // a: [OC,IC, KH, KW]
  5024. // b: [N, IC, IH, IW]
  5025. // result: [N, OH, OW, IC*KH*KW]
  5026. struct ggml_tensor * ggml_im2col(
  5027. struct ggml_context * ctx,
  5028. struct ggml_tensor * a,
  5029. struct ggml_tensor * b,
  5030. int s0,
  5031. int s1,
  5032. int p0,
  5033. int p1,
  5034. int d0,
  5035. int d1,
  5036. bool is_2D,
  5037. enum ggml_type dst_type) {
  5038. if(is_2D) {
  5039. GGML_ASSERT(a->ne[2] == b->ne[2]);
  5040. } else {
  5041. GGML_ASSERT(a->ne[1] == b->ne[1]);
  5042. }
  5043. bool is_node = false;
  5044. if (a->grad || b->grad) {
  5045. GGML_ASSERT(false); // TODO: implement backward
  5046. is_node = true;
  5047. }
  5048. const int64_t OH = is_2D ? ggml_calc_conv_output_size(b->ne[1], a->ne[1], s1, p1, d1) : 0;
  5049. const int64_t OW = ggml_calc_conv_output_size(b->ne[0], a->ne[0], s0, p0, d0);
  5050. const int64_t ne[4] = {
  5051. is_2D ? (a->ne[2] * a->ne[1] * a->ne[0]) : a->ne[1] * a->ne[0],
  5052. OW,
  5053. is_2D ? OH : b->ne[2],
  5054. is_2D ? b->ne[3] : 1,
  5055. };
  5056. struct ggml_tensor * result = ggml_new_tensor(ctx, dst_type, 4, ne);
  5057. int32_t params[] = { s0, s1, p0, p1, d0, d1, (is_2D ? 1 : 0) };
  5058. ggml_set_op_params(result, params, sizeof(params));
  5059. result->op = GGML_OP_IM2COL;
  5060. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5061. result->src[0] = a;
  5062. result->src[1] = b;
  5063. return result;
  5064. }
  5065. // a: [OC,IC, KH, KW]
  5066. // b: [N, IC, IH, IW]
  5067. // result: [N, OC, OH, OW]
  5068. struct ggml_tensor * ggml_conv_2d(
  5069. struct ggml_context * ctx,
  5070. struct ggml_tensor * a,
  5071. struct ggml_tensor * b,
  5072. int s0,
  5073. int s1,
  5074. int p0,
  5075. int p1,
  5076. int d0,
  5077. int d1) {
  5078. struct ggml_tensor * im2col = ggml_im2col(ctx, a, b, s0, s1, p0, p1, d0, d1, true, GGML_TYPE_F16); // [N, OH, OW, IC * KH * KW]
  5079. struct ggml_tensor * result =
  5080. ggml_mul_mat(ctx,
  5081. ggml_reshape_2d(ctx, im2col, im2col->ne[0], im2col->ne[3] * im2col->ne[2] * im2col->ne[1]), // [N, OH, OW, IC * KH * KW] => [N*OH*OW, IC * KH * KW]
  5082. ggml_reshape_2d(ctx, a, (a->ne[0] * a->ne[1] * a->ne[2]), a->ne[3])); // [OC,IC, KH, KW] => [OC, IC * KH * KW]
  5083. result = ggml_reshape_4d(ctx, result, im2col->ne[1], im2col->ne[2], im2col->ne[3], a->ne[3]); // [OC, N, OH, OW]
  5084. result = ggml_cont(ctx, ggml_permute(ctx, result, 0, 1, 3, 2)); // [N, OC, OH, OW]
  5085. return result;
  5086. }
  5087. // ggml_conv_2d_sk_p0
  5088. struct ggml_tensor * ggml_conv_2d_sk_p0(
  5089. struct ggml_context * ctx,
  5090. struct ggml_tensor * a,
  5091. struct ggml_tensor * b) {
  5092. return ggml_conv_2d(ctx, a, b, a->ne[0], a->ne[1], 0, 0, 1, 1);
  5093. }
  5094. // ggml_conv_2d_s1_ph
  5095. struct ggml_tensor * ggml_conv_2d_s1_ph(
  5096. struct ggml_context * ctx,
  5097. struct ggml_tensor * a,
  5098. struct ggml_tensor * b) {
  5099. return ggml_conv_2d(ctx, a, b, 1, 1, a->ne[0] / 2, a->ne[1] / 2, 1, 1);
  5100. }
  5101. // ggml_conv_transpose_2d_p0
  5102. static int64_t ggml_calc_conv_transpose_output_size(int64_t ins, int64_t ks, int s, int p) {
  5103. return (ins - 1) * s - 2 * p + ks;
  5104. }
  5105. struct ggml_tensor * ggml_conv_transpose_2d_p0(
  5106. struct ggml_context * ctx,
  5107. struct ggml_tensor * a,
  5108. struct ggml_tensor * b,
  5109. int stride) {
  5110. GGML_ASSERT(a->ne[3] == b->ne[2]);
  5111. bool is_node = false;
  5112. if (a->grad || b->grad) {
  5113. GGML_ASSERT(false); // TODO: implement backward
  5114. is_node = true;
  5115. }
  5116. const int64_t ne[4] = {
  5117. ggml_calc_conv_transpose_output_size(b->ne[0], a->ne[0], stride, 0 /*p0*/),
  5118. ggml_calc_conv_transpose_output_size(b->ne[1], a->ne[1], stride, 0 /*p1*/),
  5119. a->ne[2], b->ne[3],
  5120. };
  5121. struct ggml_tensor* result = ggml_new_tensor(ctx, GGML_TYPE_F32, 4, ne);
  5122. ggml_set_op_params_i32(result, 0, stride);
  5123. result->op = GGML_OP_CONV_TRANSPOSE_2D;
  5124. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5125. result->src[0] = a;
  5126. result->src[1] = b;
  5127. return result;
  5128. }
  5129. // ggml_pool_*
  5130. static int64_t ggml_calc_pool_output_size(int64_t ins, int ks, int s, float p) {
  5131. return (ins + 2 * p - ks) / s + 1;
  5132. }
  5133. // ggml_pool_1d
  5134. struct ggml_tensor * ggml_pool_1d(
  5135. struct ggml_context * ctx,
  5136. struct ggml_tensor * a,
  5137. enum ggml_op_pool op,
  5138. int k0,
  5139. int s0,
  5140. int p0) {
  5141. bool is_node = false;
  5142. if (a->grad) {
  5143. GGML_ASSERT(false); // TODO: implement backward
  5144. is_node = true;
  5145. }
  5146. const int64_t ne[4] = {
  5147. ggml_calc_pool_output_size(a->ne[0], k0, s0, p0),
  5148. a->ne[1],
  5149. a->ne[2],
  5150. a->ne[3],
  5151. };
  5152. struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, 4, ne);
  5153. int32_t params[] = { op, k0, s0, p0 };
  5154. ggml_set_op_params(result, params, sizeof(params));
  5155. result->op = GGML_OP_POOL_1D;
  5156. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5157. result->src[0] = a;
  5158. return result;
  5159. }
  5160. // ggml_pool_2d
  5161. struct ggml_tensor * ggml_pool_2d(
  5162. struct ggml_context * ctx,
  5163. struct ggml_tensor * a,
  5164. enum ggml_op_pool op,
  5165. int k0,
  5166. int k1,
  5167. int s0,
  5168. int s1,
  5169. float p0,
  5170. float p1) {
  5171. bool is_node = false;
  5172. if (a->grad) {
  5173. GGML_ASSERT(false); // TODO: implement backward
  5174. is_node = true;
  5175. }
  5176. struct ggml_tensor * result;
  5177. const int64_t ne[3] = {
  5178. ggml_calc_pool_output_size(a->ne[0], k0, s0, p0),
  5179. ggml_calc_pool_output_size(a->ne[1], k1, s1, p1),
  5180. a->ne[2],
  5181. };
  5182. result = ggml_new_tensor(ctx, GGML_TYPE_F32, 3, ne);
  5183. int32_t params[] = { op, k0, k1, s0, s1, p0, p1 };
  5184. ggml_set_op_params(result, params, sizeof(params));
  5185. result->op = GGML_OP_POOL_2D;
  5186. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5187. result->src[0] = a;
  5188. return result;
  5189. }
  5190. // ggml_upscale
  5191. static struct ggml_tensor * ggml_upscale_impl(
  5192. struct ggml_context * ctx,
  5193. struct ggml_tensor * a,
  5194. int scale_factor) {
  5195. bool is_node = false;
  5196. if (a->grad) {
  5197. GGML_ASSERT(false); // TODO: implement backward
  5198. is_node = true;
  5199. }
  5200. struct ggml_tensor * result = ggml_new_tensor_4d(ctx, a->type,
  5201. a->ne[0] * scale_factor,
  5202. a->ne[1] * scale_factor,
  5203. a->ne[2], a->ne[3]);
  5204. result->op = GGML_OP_UPSCALE;
  5205. result->op_params[0] = scale_factor;
  5206. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5207. result->src[0] = a;
  5208. return result;
  5209. }
  5210. struct ggml_tensor * ggml_pad(
  5211. struct ggml_context * ctx,
  5212. struct ggml_tensor * a,
  5213. int p0, int p1, int p2, int p3) {
  5214. bool is_node = false;
  5215. if (a->grad) {
  5216. GGML_ASSERT(false); // TODO: implement backward
  5217. is_node = true;
  5218. }
  5219. struct ggml_tensor * result = ggml_new_tensor_4d(ctx, a->type,
  5220. a->ne[0] + p0,
  5221. a->ne[1] + p1,
  5222. a->ne[2] + p2,
  5223. a->ne[3] + p3);
  5224. result->op = GGML_OP_PAD;
  5225. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5226. result->src[0] = a;
  5227. return result;
  5228. }
  5229. struct ggml_tensor * ggml_upscale(
  5230. struct ggml_context * ctx,
  5231. struct ggml_tensor * a,
  5232. int scale_factor) {
  5233. return ggml_upscale_impl(ctx, a, scale_factor);
  5234. }
  5235. struct ggml_tensor * ggml_arange(
  5236. struct ggml_context * ctx,
  5237. float start,
  5238. float stop,
  5239. float step) {
  5240. GGML_ASSERT(stop > start);
  5241. const int64_t steps = (int64_t) ceilf((stop - start) / step);
  5242. struct ggml_tensor * result = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, steps);
  5243. result->op = GGML_OP_ARANGE;
  5244. ggml_set_op_params_f32(result, 0, start);
  5245. ggml_set_op_params_f32(result, 1, stop);
  5246. ggml_set_op_params_f32(result, 2, step);
  5247. return result;
  5248. }
  5249. struct ggml_tensor * ggml_timestep_embedding(
  5250. struct ggml_context * ctx,
  5251. struct ggml_tensor * timesteps,
  5252. int dim,
  5253. int max_period) {
  5254. bool is_node = false;
  5255. if (timesteps->grad) {
  5256. GGML_ASSERT(false); // TODO: implement backward
  5257. is_node = true;
  5258. }
  5259. int actual_dim = dim;
  5260. if (dim % 2 != 0) {
  5261. actual_dim = dim + 1;
  5262. }
  5263. struct ggml_tensor * result = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, actual_dim, timesteps->ne[0]);
  5264. result->op = GGML_OP_TIMESTEP_EMBEDDING;
  5265. ggml_set_op_params_i32(result, 0, dim);
  5266. ggml_set_op_params_i32(result, 1, max_period);
  5267. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5268. result->src[0] = timesteps;
  5269. return result;
  5270. }
  5271. // ggml_argsort
  5272. struct ggml_tensor * ggml_argsort(
  5273. struct ggml_context * ctx,
  5274. struct ggml_tensor * a,
  5275. enum ggml_sort_order order) {
  5276. bool is_node = false;
  5277. struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_I32, GGML_MAX_DIMS, a->ne);
  5278. ggml_set_op_params_i32(result, 0, (int32_t) order);
  5279. result->op = GGML_OP_ARGSORT;
  5280. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5281. result->src[0] = a;
  5282. return result;
  5283. }
  5284. // ggml_top_k
  5285. struct ggml_tensor * ggml_top_k(
  5286. struct ggml_context * ctx,
  5287. struct ggml_tensor * a,
  5288. int k) {
  5289. GGML_ASSERT(a->ne[0] >= k);
  5290. struct ggml_tensor * result = ggml_argsort(ctx, a, GGML_SORT_ORDER_DESC);
  5291. result = ggml_view_4d(ctx, result,
  5292. k, result->ne[1], result->ne[2], result->ne[3],
  5293. result->nb[1], result->nb[2], result->nb[3],
  5294. 0);
  5295. return result;
  5296. }
  5297. // ggml_flash_attn
  5298. struct ggml_tensor * ggml_flash_attn(
  5299. struct ggml_context * ctx,
  5300. struct ggml_tensor * q,
  5301. struct ggml_tensor * k,
  5302. struct ggml_tensor * v,
  5303. bool masked) {
  5304. GGML_ASSERT(ggml_can_mul_mat(k, q));
  5305. // TODO: check if vT can be multiplied by (k*qT)
  5306. bool is_node = false;
  5307. if (q->grad || k->grad || v->grad) {
  5308. is_node = true;
  5309. }
  5310. //struct ggml_tensor * result = ggml_dup_tensor(ctx, q);
  5311. struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, GGML_MAX_DIMS, q->ne);
  5312. int32_t t = masked ? 1 : 0;
  5313. ggml_set_op_params(result, &t, sizeof(t));
  5314. result->op = GGML_OP_FLASH_ATTN;
  5315. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5316. result->src[0] = q;
  5317. result->src[1] = k;
  5318. result->src[2] = v;
  5319. return result;
  5320. }
  5321. // ggml_flash_attn_ext
  5322. struct ggml_tensor * ggml_flash_attn_ext(
  5323. struct ggml_context * ctx,
  5324. struct ggml_tensor * q,
  5325. struct ggml_tensor * k,
  5326. struct ggml_tensor * v,
  5327. struct ggml_tensor * mask,
  5328. float scale,
  5329. float max_bias) {
  5330. GGML_ASSERT(ggml_can_mul_mat(k, q));
  5331. // TODO: check if vT can be multiplied by (k*qT)
  5332. if (mask) {
  5333. GGML_ASSERT(ggml_is_contiguous(mask));
  5334. GGML_ASSERT(mask->ne[2] == 1);
  5335. GGML_ASSERT(mask->ne[3] == 1);
  5336. GGML_ASSERT(mask->ne[1] >= GGML_PAD(q->ne[1], GGML_KQ_MASK_PAD) &&
  5337. "the Flash-Attention kernel requires the mask to be padded to GGML_KQ_MASK_PAD and at least n_queries big");
  5338. //GGML_ASSERT(ggml_can_repeat_rows(mask, qk));
  5339. }
  5340. if (max_bias > 0.0f) {
  5341. GGML_ASSERT(mask);
  5342. }
  5343. bool is_node = false;
  5344. if (q->grad || k->grad || v->grad) {
  5345. is_node = true;
  5346. }
  5347. // permute(0, 2, 1, 3)
  5348. int64_t ne[4] = { q->ne[0], q->ne[2], q->ne[1], q->ne[3] };
  5349. struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, 4, ne);
  5350. float params[] = { scale, max_bias };
  5351. ggml_set_op_params(result, params, sizeof(params));
  5352. result->op = GGML_OP_FLASH_ATTN_EXT;
  5353. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5354. result->src[0] = q;
  5355. result->src[1] = k;
  5356. result->src[2] = v;
  5357. result->src[3] = mask;
  5358. return result;
  5359. }
  5360. void ggml_flash_attn_ext_set_prec(
  5361. struct ggml_tensor * a,
  5362. enum ggml_prec prec) {
  5363. GGML_ASSERT(a->op == GGML_OP_FLASH_ATTN_EXT);
  5364. const int32_t prec_i32 = (int32_t) prec;
  5365. ggml_set_op_params_i32(a, 2, prec_i32); // scale is on first pos, max_bias on second
  5366. }
  5367. // ggml_flash_ff
  5368. struct ggml_tensor * ggml_flash_ff(
  5369. struct ggml_context * ctx,
  5370. struct ggml_tensor * a,
  5371. struct ggml_tensor * b0,
  5372. struct ggml_tensor * b1,
  5373. struct ggml_tensor * c0,
  5374. struct ggml_tensor * c1) {
  5375. GGML_ASSERT(ggml_can_mul_mat(b0, a));
  5376. // TODO: more checks
  5377. bool is_node = false;
  5378. if (a->grad || b0->grad || b1->grad || c0->grad || c1->grad) {
  5379. is_node = true;
  5380. }
  5381. //struct ggml_tensor * result = ggml_dup_tensor(ctx, a);
  5382. struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, GGML_MAX_DIMS, a->ne);
  5383. result->op = GGML_OP_FLASH_FF;
  5384. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5385. result->src[0] = a;
  5386. result->src[1] = b0;
  5387. result->src[2] = b1;
  5388. result->src[3] = c0;
  5389. result->src[4] = c1;
  5390. return result;
  5391. }
  5392. // ggml_flash_attn_back
  5393. struct ggml_tensor * ggml_flash_attn_back(
  5394. struct ggml_context * ctx,
  5395. struct ggml_tensor * q,
  5396. struct ggml_tensor * k,
  5397. struct ggml_tensor * v,
  5398. struct ggml_tensor * d,
  5399. bool masked) {
  5400. GGML_ASSERT(ggml_can_mul_mat(k, q));
  5401. // TODO: check if vT can be multiplied by (k*qT)
  5402. // d shape [D,N,ne2,ne3]
  5403. // q shape [D,N,ne2,ne3]
  5404. // k shape [D,M,kvne2,ne3]
  5405. // v shape [M,D,kvne2,ne3]
  5406. const int64_t D = q->ne[0];
  5407. const int64_t N = q->ne[1];
  5408. const int64_t M = k->ne[1];
  5409. const int64_t ne2 = q->ne[2];
  5410. const int64_t ne3 = q->ne[3];
  5411. const int64_t kvne2 = k->ne[2];
  5412. GGML_ASSERT(k->ne[0] == D);
  5413. GGML_ASSERT(v->ne[0] == M);
  5414. GGML_ASSERT(v->ne[1] == D);
  5415. GGML_ASSERT(d->ne[0] == D);
  5416. GGML_ASSERT(d->ne[1] == N);
  5417. GGML_ASSERT(k->ne[2] == kvne2);
  5418. GGML_ASSERT(k->ne[3] == ne3);
  5419. GGML_ASSERT(v->ne[2] == kvne2);
  5420. GGML_ASSERT(v->ne[3] == ne3);
  5421. GGML_ASSERT(d->ne[2] == ne2);
  5422. GGML_ASSERT(d->ne[3] == ne3);
  5423. GGML_ASSERT(ne2 % kvne2 == 0);
  5424. bool is_node = false;
  5425. if (q->grad || k->grad || v->grad) {
  5426. // when using this operation (in backwards pass) these grads are set.
  5427. // we don't want to create (big) grad of our result, so is_node is false.
  5428. is_node = false;
  5429. }
  5430. // store gradients of q, k and v as continuous tensors concatenated in result.
  5431. // note: v and gradv are actually transposed, i.e. v->ne[0] != D.
  5432. const int64_t elem_q = ggml_nelements(q);
  5433. const int64_t elem_k = ggml_nelements(k);
  5434. const int64_t elem_v = ggml_nelements(v);
  5435. enum ggml_type result_type = GGML_TYPE_F32;
  5436. GGML_ASSERT(ggml_blck_size(result_type) == 1);
  5437. const size_t tsize = ggml_type_size(result_type);
  5438. const size_t offs_q = 0;
  5439. const size_t offs_k = offs_q + GGML_PAD(elem_q * tsize, GGML_MEM_ALIGN);
  5440. const size_t offs_v = offs_k + GGML_PAD(elem_k * tsize, GGML_MEM_ALIGN);
  5441. const size_t end = offs_v + GGML_PAD(elem_v * tsize, GGML_MEM_ALIGN);
  5442. const size_t nelements = (end + tsize - 1)/tsize;
  5443. struct ggml_tensor * result = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, nelements);
  5444. int32_t masked_i = masked ? 1 : 0;
  5445. ggml_set_op_params(result, &masked_i, sizeof(masked_i));
  5446. result->op = GGML_OP_FLASH_ATTN_BACK;
  5447. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5448. result->src[0] = q;
  5449. result->src[1] = k;
  5450. result->src[2] = v;
  5451. result->src[3] = d;
  5452. return result;
  5453. }
  5454. // ggml_ssm_conv
  5455. struct ggml_tensor * ggml_ssm_conv(
  5456. struct ggml_context * ctx,
  5457. struct ggml_tensor * s,
  5458. struct ggml_tensor * x,
  5459. struct ggml_tensor * c,
  5460. struct ggml_tensor * sq) {
  5461. GGML_ASSERT(ggml_is_3d(s));
  5462. GGML_ASSERT(ggml_is_matrix(x));
  5463. GGML_ASSERT(ggml_is_matrix(c));
  5464. GGML_ASSERT(ggml_is_matrix(sq));
  5465. GGML_ASSERT(sq->type == GGML_TYPE_I32);
  5466. const int64_t d_conv = c->ne[0];
  5467. const int64_t d_inner = c->ne[1];
  5468. const int64_t n_tokens = x->ne[1];
  5469. const int64_t n_kv = s->ne[2];
  5470. GGML_ASSERT( s->ne[0] == d_conv - 1);
  5471. GGML_ASSERT( s->ne[1] == d_inner);
  5472. GGML_ASSERT( x->ne[0] == d_inner);
  5473. GGML_ASSERT(sq->ne[0] == n_kv);
  5474. GGML_ASSERT(sq->ne[1] == n_tokens);
  5475. bool is_node = false;
  5476. if (s->grad || x->grad || c->grad || sq->grad) {
  5477. GGML_ASSERT(false); // TODO: implement
  5478. is_node = true;
  5479. }
  5480. // 2-in-1 concatenated x and conv_states, {d_inner, n_tokens} with {d_conv, d_inner, n_kv}
  5481. struct ggml_tensor * result = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, (d_inner*n_tokens) + (d_conv*d_inner*n_kv));
  5482. result->op = GGML_OP_SSM_CONV;
  5483. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5484. result->src[0] = s;
  5485. result->src[1] = x;
  5486. result->src[2] = c;
  5487. result->src[3] = sq;
  5488. return result;
  5489. }
  5490. // ggml_ssm_scan
  5491. struct ggml_tensor * ggml_ssm_scan(
  5492. struct ggml_context * ctx,
  5493. struct ggml_tensor * s,
  5494. struct ggml_tensor * x,
  5495. struct ggml_tensor * dt,
  5496. struct ggml_tensor * A,
  5497. struct ggml_tensor * B,
  5498. struct ggml_tensor * C,
  5499. struct ggml_tensor * sq) {
  5500. GGML_ASSERT(ggml_is_contiguous(s));
  5501. GGML_ASSERT(ggml_is_contiguous(x));
  5502. GGML_ASSERT(ggml_is_contiguous(dt));
  5503. GGML_ASSERT(ggml_is_contiguous(A));
  5504. GGML_ASSERT(sq->type == GGML_TYPE_I32);
  5505. GGML_ASSERT(B->nb[0] == ggml_type_size(B->type));
  5506. GGML_ASSERT(C->nb[0] == ggml_type_size(C->type));
  5507. GGML_ASSERT(ggml_are_same_shape(x, dt));
  5508. {
  5509. const int64_t d_state = s->ne[0];
  5510. const int64_t d_inner = s->ne[1];
  5511. const int64_t n_tokens = x->ne[1];
  5512. GGML_ASSERT(x->ne[0] == d_inner);
  5513. GGML_ASSERT(A->ne[0] == d_state);
  5514. GGML_ASSERT(A->ne[1] == d_inner);
  5515. GGML_ASSERT(B->ne[0] == d_state);
  5516. GGML_ASSERT(B->ne[1] == n_tokens);
  5517. GGML_ASSERT(C->ne[0] == d_state);
  5518. GGML_ASSERT(C->ne[1] == n_tokens);
  5519. }
  5520. bool is_node = false;
  5521. if (s->grad || x->grad || dt->grad || A->grad || B->grad || C->grad || sq->grad) {
  5522. GGML_ASSERT(false); // TODO: implement
  5523. is_node = true;
  5524. }
  5525. // 2-in-1 concatenated y and ssm_states, {d_inner, n_tokens} with {d_state, d_inner, n_kv}
  5526. struct ggml_tensor * result = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, ggml_nelements(x) + ggml_nelements(s));
  5527. result->op = GGML_OP_SSM_SCAN;
  5528. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5529. result->src[0] = s;
  5530. result->src[1] = x;
  5531. result->src[2] = dt;
  5532. result->src[3] = A;
  5533. result->src[4] = B;
  5534. result->src[5] = C;
  5535. result->src[6] = sq;
  5536. return result;
  5537. }
  5538. // ggml_win_part
  5539. struct ggml_tensor * ggml_win_part(
  5540. struct ggml_context * ctx,
  5541. struct ggml_tensor * a,
  5542. int w) {
  5543. GGML_ASSERT(a->ne[3] == 1);
  5544. GGML_ASSERT(a->type == GGML_TYPE_F32);
  5545. bool is_node = false;
  5546. if (a->grad) {
  5547. GGML_ASSERT(false); // TODO: implement backward
  5548. is_node = true;
  5549. }
  5550. // padding
  5551. const int px = (w - a->ne[1]%w)%w;
  5552. const int py = (w - a->ne[2]%w)%w;
  5553. const int npx = (px + a->ne[1])/w;
  5554. const int npy = (py + a->ne[2])/w;
  5555. const int np = npx*npy;
  5556. const int64_t ne[4] = { a->ne[0], w, w, np, };
  5557. struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, 4, ne);
  5558. int32_t params[] = { npx, npy, w };
  5559. ggml_set_op_params(result, params, sizeof(params));
  5560. result->op = GGML_OP_WIN_PART;
  5561. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5562. result->src[0] = a;
  5563. return result;
  5564. }
  5565. // ggml_win_unpart
  5566. struct ggml_tensor * ggml_win_unpart(
  5567. struct ggml_context * ctx,
  5568. struct ggml_tensor * a,
  5569. int w0,
  5570. int h0,
  5571. int w) {
  5572. GGML_ASSERT(a->type == GGML_TYPE_F32);
  5573. bool is_node = false;
  5574. if (a->grad) {
  5575. GGML_ASSERT(false); // TODO: implement backward
  5576. is_node = true;
  5577. }
  5578. const int64_t ne[4] = { a->ne[0], w0, h0, 1, };
  5579. struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, 3, ne);
  5580. int32_t params[] = { w };
  5581. ggml_set_op_params(result, params, sizeof(params));
  5582. result->op = GGML_OP_WIN_UNPART;
  5583. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5584. result->src[0] = a;
  5585. return result;
  5586. }
  5587. // ggml_get_rel_pos
  5588. struct ggml_tensor * ggml_get_rel_pos(
  5589. struct ggml_context * ctx,
  5590. struct ggml_tensor * a,
  5591. int qh,
  5592. int kh) {
  5593. GGML_ASSERT(qh == kh);
  5594. GGML_ASSERT(2*MAX(qh, kh) - 1 == a->ne[1]);
  5595. bool is_node = false;
  5596. if (a->grad) {
  5597. GGML_ASSERT(false); // TODO: implement backward
  5598. is_node = true;
  5599. }
  5600. const int64_t ne[4] = { a->ne[0], kh, qh, 1, };
  5601. struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F16, 3, ne);
  5602. result->op = GGML_OP_GET_REL_POS;
  5603. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5604. result->src[0] = a;
  5605. return result;
  5606. }
  5607. // ggml_add_rel_pos
  5608. static struct ggml_tensor * ggml_add_rel_pos_impl(
  5609. struct ggml_context * ctx,
  5610. struct ggml_tensor * a,
  5611. struct ggml_tensor * pw,
  5612. struct ggml_tensor * ph,
  5613. bool inplace) {
  5614. GGML_ASSERT(ggml_are_same_shape(pw, ph));
  5615. GGML_ASSERT(ggml_is_contiguous(a));
  5616. GGML_ASSERT(ggml_is_contiguous(pw));
  5617. GGML_ASSERT(ggml_is_contiguous(ph));
  5618. GGML_ASSERT(ph->type == GGML_TYPE_F32);
  5619. GGML_ASSERT(pw->type == GGML_TYPE_F32);
  5620. GGML_ASSERT(pw->ne[3] == a->ne[2]);
  5621. GGML_ASSERT(pw->ne[0]*pw->ne[0] == a->ne[0]);
  5622. GGML_ASSERT(pw->ne[1]*pw->ne[2] == a->ne[1]);
  5623. bool is_node = false;
  5624. if (!inplace && (a->grad || pw->grad || ph->grad)) {
  5625. is_node = true;
  5626. }
  5627. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  5628. ggml_set_op_params_i32(result, 0, inplace ? 1 : 0);
  5629. result->op = GGML_OP_ADD_REL_POS;
  5630. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5631. result->src[0] = a;
  5632. result->src[1] = pw;
  5633. result->src[2] = ph;
  5634. return result;
  5635. }
  5636. struct ggml_tensor * ggml_add_rel_pos(
  5637. struct ggml_context * ctx,
  5638. struct ggml_tensor * a,
  5639. struct ggml_tensor * pw,
  5640. struct ggml_tensor * ph) {
  5641. return ggml_add_rel_pos_impl(ctx, a, pw, ph, false);
  5642. }
  5643. struct ggml_tensor * ggml_add_rel_pos_inplace(
  5644. struct ggml_context * ctx,
  5645. struct ggml_tensor * a,
  5646. struct ggml_tensor * pw,
  5647. struct ggml_tensor * ph) {
  5648. return ggml_add_rel_pos_impl(ctx, a, pw, ph, true);
  5649. }
  5650. // gmml_unary
  5651. static struct ggml_tensor * ggml_unary_impl(
  5652. struct ggml_context * ctx,
  5653. struct ggml_tensor * a,
  5654. enum ggml_unary_op op,
  5655. bool inplace) {
  5656. bool is_node = false;
  5657. if (!inplace && (a->grad)) {
  5658. is_node = true;
  5659. }
  5660. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  5661. ggml_set_op_params_i32(result, 0, (int32_t) op);
  5662. result->op = GGML_OP_UNARY;
  5663. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5664. result->src[0] = a;
  5665. return result;
  5666. }
  5667. struct ggml_tensor * ggml_unary(
  5668. struct ggml_context * ctx,
  5669. struct ggml_tensor * a,
  5670. enum ggml_unary_op op) {
  5671. return ggml_unary_impl(ctx, a, op, false);
  5672. }
  5673. struct ggml_tensor * ggml_unary_inplace(
  5674. struct ggml_context * ctx,
  5675. struct ggml_tensor * a,
  5676. enum ggml_unary_op op) {
  5677. return ggml_unary_impl(ctx, a, op, true);
  5678. }
  5679. // ggml_map_unary
  5680. static struct ggml_tensor * ggml_map_unary_impl_f32(
  5681. struct ggml_context * ctx,
  5682. struct ggml_tensor * a,
  5683. const ggml_unary_op_f32_t fun,
  5684. bool inplace) {
  5685. bool is_node = false;
  5686. if (!inplace && a->grad) {
  5687. is_node = true;
  5688. }
  5689. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  5690. ggml_set_op_params(result, (const void *) &fun, sizeof(fun));
  5691. result->op = GGML_OP_MAP_UNARY;
  5692. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5693. result->src[0] = a;
  5694. return result;
  5695. }
  5696. struct ggml_tensor * ggml_map_unary_f32(
  5697. struct ggml_context * ctx,
  5698. struct ggml_tensor * a,
  5699. const ggml_unary_op_f32_t fun) {
  5700. return ggml_map_unary_impl_f32(ctx, a, fun, false);
  5701. }
  5702. struct ggml_tensor * ggml_map_unary_inplace_f32(
  5703. struct ggml_context * ctx,
  5704. struct ggml_tensor * a,
  5705. const ggml_unary_op_f32_t fun) {
  5706. return ggml_map_unary_impl_f32(ctx, a, fun, true);
  5707. }
  5708. // ggml_map_binary
  5709. static struct ggml_tensor * ggml_map_binary_impl_f32(
  5710. struct ggml_context * ctx,
  5711. struct ggml_tensor * a,
  5712. struct ggml_tensor * b,
  5713. const ggml_binary_op_f32_t fun,
  5714. bool inplace) {
  5715. GGML_ASSERT(ggml_are_same_shape(a, b));
  5716. bool is_node = false;
  5717. if (!inplace && (a->grad || b->grad)) {
  5718. is_node = true;
  5719. }
  5720. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  5721. ggml_set_op_params(result, (const void *) &fun, sizeof(fun));
  5722. result->op = GGML_OP_MAP_BINARY;
  5723. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5724. result->src[0] = a;
  5725. result->src[1] = b;
  5726. return result;
  5727. }
  5728. struct ggml_tensor * ggml_map_binary_f32(
  5729. struct ggml_context * ctx,
  5730. struct ggml_tensor * a,
  5731. struct ggml_tensor * b,
  5732. const ggml_binary_op_f32_t fun) {
  5733. return ggml_map_binary_impl_f32(ctx, a, b, fun, false);
  5734. }
  5735. struct ggml_tensor * ggml_map_binary_inplace_f32(
  5736. struct ggml_context * ctx,
  5737. struct ggml_tensor * a,
  5738. struct ggml_tensor * b,
  5739. const ggml_binary_op_f32_t fun) {
  5740. return ggml_map_binary_impl_f32(ctx, a, b, fun, true);
  5741. }
  5742. // ggml_map_custom1_f32
  5743. static struct ggml_tensor * ggml_map_custom1_impl_f32(
  5744. struct ggml_context * ctx,
  5745. struct ggml_tensor * a,
  5746. const ggml_custom1_op_f32_t fun,
  5747. bool inplace) {
  5748. bool is_node = false;
  5749. if (!inplace && a->grad) {
  5750. is_node = true;
  5751. }
  5752. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  5753. ggml_set_op_params(result, (const void *) &fun, sizeof(fun));
  5754. result->op = GGML_OP_MAP_CUSTOM1_F32;
  5755. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5756. result->src[0] = a;
  5757. return result;
  5758. }
  5759. struct ggml_tensor * ggml_map_custom1_f32(
  5760. struct ggml_context * ctx,
  5761. struct ggml_tensor * a,
  5762. const ggml_custom1_op_f32_t fun) {
  5763. return ggml_map_custom1_impl_f32(ctx, a, fun, false);
  5764. }
  5765. struct ggml_tensor * ggml_map_custom1_inplace_f32(
  5766. struct ggml_context * ctx,
  5767. struct ggml_tensor * a,
  5768. const ggml_custom1_op_f32_t fun) {
  5769. return ggml_map_custom1_impl_f32(ctx, a, fun, true);
  5770. }
  5771. // ggml_map_custom2_f32
  5772. static struct ggml_tensor * ggml_map_custom2_impl_f32(
  5773. struct ggml_context * ctx,
  5774. struct ggml_tensor * a,
  5775. struct ggml_tensor * b,
  5776. const ggml_custom2_op_f32_t fun,
  5777. bool inplace) {
  5778. bool is_node = false;
  5779. if (!inplace && (a->grad || b->grad)) {
  5780. is_node = true;
  5781. }
  5782. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  5783. ggml_set_op_params(result, (const void *) &fun, sizeof(fun));
  5784. result->op = GGML_OP_MAP_CUSTOM2_F32;
  5785. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5786. result->src[0] = a;
  5787. result->src[1] = b;
  5788. return result;
  5789. }
  5790. struct ggml_tensor * ggml_map_custom2_f32(
  5791. struct ggml_context * ctx,
  5792. struct ggml_tensor * a,
  5793. struct ggml_tensor * b,
  5794. const ggml_custom2_op_f32_t fun) {
  5795. return ggml_map_custom2_impl_f32(ctx, a, b, fun, false);
  5796. }
  5797. struct ggml_tensor * ggml_map_custom2_inplace_f32(
  5798. struct ggml_context * ctx,
  5799. struct ggml_tensor * a,
  5800. struct ggml_tensor * b,
  5801. const ggml_custom2_op_f32_t fun) {
  5802. return ggml_map_custom2_impl_f32(ctx, a, b, fun, true);
  5803. }
  5804. // ggml_map_custom3_f32
  5805. static struct ggml_tensor * ggml_map_custom3_impl_f32(
  5806. struct ggml_context * ctx,
  5807. struct ggml_tensor * a,
  5808. struct ggml_tensor * b,
  5809. struct ggml_tensor * c,
  5810. const ggml_custom3_op_f32_t fun,
  5811. bool inplace) {
  5812. bool is_node = false;
  5813. if (!inplace && (a->grad || b->grad || c->grad)) {
  5814. is_node = true;
  5815. }
  5816. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  5817. ggml_set_op_params(result, (const void *) &fun, sizeof(fun));
  5818. result->op = GGML_OP_MAP_CUSTOM3_F32;
  5819. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5820. result->src[0] = a;
  5821. result->src[1] = b;
  5822. result->src[2] = c;
  5823. return result;
  5824. }
  5825. struct ggml_tensor * ggml_map_custom3_f32(
  5826. struct ggml_context * ctx,
  5827. struct ggml_tensor * a,
  5828. struct ggml_tensor * b,
  5829. struct ggml_tensor * c,
  5830. const ggml_custom3_op_f32_t fun) {
  5831. return ggml_map_custom3_impl_f32(ctx, a, b, c, fun, false);
  5832. }
  5833. struct ggml_tensor * ggml_map_custom3_inplace_f32(
  5834. struct ggml_context * ctx,
  5835. struct ggml_tensor * a,
  5836. struct ggml_tensor * b,
  5837. struct ggml_tensor * c,
  5838. const ggml_custom3_op_f32_t fun) {
  5839. return ggml_map_custom3_impl_f32(ctx, a, b, c, fun, true);
  5840. }
  5841. // ggml_map_custom1
  5842. struct ggml_map_custom1_op_params {
  5843. ggml_custom1_op_t fun;
  5844. int n_tasks;
  5845. void * userdata;
  5846. };
  5847. static struct ggml_tensor * ggml_map_custom1_impl(
  5848. struct ggml_context * ctx,
  5849. struct ggml_tensor * a,
  5850. const ggml_custom1_op_t fun,
  5851. int n_tasks,
  5852. void * userdata,
  5853. bool inplace) {
  5854. GGML_ASSERT(n_tasks == GGML_N_TASKS_MAX || n_tasks > 0);
  5855. bool is_node = false;
  5856. if (!inplace && a->grad) {
  5857. is_node = true;
  5858. }
  5859. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  5860. struct ggml_map_custom1_op_params params = {
  5861. /*.fun =*/ fun,
  5862. /*.n_tasks =*/ n_tasks,
  5863. /*.userdata =*/ userdata
  5864. };
  5865. ggml_set_op_params(result, (const void *) &params, sizeof(params));
  5866. result->op = GGML_OP_MAP_CUSTOM1;
  5867. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5868. result->src[0] = a;
  5869. return result;
  5870. }
  5871. struct ggml_tensor * ggml_map_custom1(
  5872. struct ggml_context * ctx,
  5873. struct ggml_tensor * a,
  5874. const ggml_custom1_op_t fun,
  5875. int n_tasks,
  5876. void * userdata) {
  5877. return ggml_map_custom1_impl(ctx, a, fun, n_tasks, userdata, false);
  5878. }
  5879. struct ggml_tensor * ggml_map_custom1_inplace(
  5880. struct ggml_context * ctx,
  5881. struct ggml_tensor * a,
  5882. const ggml_custom1_op_t fun,
  5883. int n_tasks,
  5884. void * userdata) {
  5885. return ggml_map_custom1_impl(ctx, a, fun, n_tasks, userdata, true);
  5886. }
  5887. // ggml_map_custom2
  5888. struct ggml_map_custom2_op_params {
  5889. ggml_custom2_op_t fun;
  5890. int n_tasks;
  5891. void * userdata;
  5892. };
  5893. static struct ggml_tensor * ggml_map_custom2_impl(
  5894. struct ggml_context * ctx,
  5895. struct ggml_tensor * a,
  5896. struct ggml_tensor * b,
  5897. const ggml_custom2_op_t fun,
  5898. int n_tasks,
  5899. void * userdata,
  5900. bool inplace) {
  5901. GGML_ASSERT(n_tasks == GGML_N_TASKS_MAX || n_tasks > 0);
  5902. bool is_node = false;
  5903. if (!inplace && (a->grad || b->grad)) {
  5904. is_node = true;
  5905. }
  5906. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  5907. struct ggml_map_custom2_op_params params = {
  5908. /*.fun =*/ fun,
  5909. /*.n_tasks =*/ n_tasks,
  5910. /*.userdata =*/ userdata
  5911. };
  5912. ggml_set_op_params(result, (const void *) &params, sizeof(params));
  5913. result->op = GGML_OP_MAP_CUSTOM2;
  5914. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5915. result->src[0] = a;
  5916. result->src[1] = b;
  5917. return result;
  5918. }
  5919. struct ggml_tensor * ggml_map_custom2(
  5920. struct ggml_context * ctx,
  5921. struct ggml_tensor * a,
  5922. struct ggml_tensor * b,
  5923. const ggml_custom2_op_t fun,
  5924. int n_tasks,
  5925. void * userdata) {
  5926. return ggml_map_custom2_impl(ctx, a, b, fun, n_tasks, userdata, false);
  5927. }
  5928. struct ggml_tensor * ggml_map_custom2_inplace(
  5929. struct ggml_context * ctx,
  5930. struct ggml_tensor * a,
  5931. struct ggml_tensor * b,
  5932. const ggml_custom2_op_t fun,
  5933. int n_tasks,
  5934. void * userdata) {
  5935. return ggml_map_custom2_impl(ctx, a, b, fun, n_tasks, userdata, true);
  5936. }
  5937. // ggml_map_custom3
  5938. struct ggml_map_custom3_op_params {
  5939. ggml_custom3_op_t fun;
  5940. int n_tasks;
  5941. void * userdata;
  5942. };
  5943. static struct ggml_tensor * ggml_map_custom3_impl(
  5944. struct ggml_context * ctx,
  5945. struct ggml_tensor * a,
  5946. struct ggml_tensor * b,
  5947. struct ggml_tensor * c,
  5948. const ggml_custom3_op_t fun,
  5949. int n_tasks,
  5950. void * userdata,
  5951. bool inplace) {
  5952. GGML_ASSERT(n_tasks == GGML_N_TASKS_MAX || n_tasks > 0);
  5953. bool is_node = false;
  5954. if (!inplace && (a->grad || b->grad || c->grad)) {
  5955. is_node = true;
  5956. }
  5957. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  5958. struct ggml_map_custom3_op_params params = {
  5959. /*.fun =*/ fun,
  5960. /*.n_tasks =*/ n_tasks,
  5961. /*.userdata =*/ userdata
  5962. };
  5963. ggml_set_op_params(result, (const void *) &params, sizeof(params));
  5964. result->op = GGML_OP_MAP_CUSTOM3;
  5965. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5966. result->src[0] = a;
  5967. result->src[1] = b;
  5968. result->src[2] = c;
  5969. return result;
  5970. }
  5971. struct ggml_tensor * ggml_map_custom3(
  5972. struct ggml_context * ctx,
  5973. struct ggml_tensor * a,
  5974. struct ggml_tensor * b,
  5975. struct ggml_tensor * c,
  5976. const ggml_custom3_op_t fun,
  5977. int n_tasks,
  5978. void * userdata) {
  5979. return ggml_map_custom3_impl(ctx, a, b, c, fun, n_tasks, userdata, false);
  5980. }
  5981. struct ggml_tensor * ggml_map_custom3_inplace(
  5982. struct ggml_context * ctx,
  5983. struct ggml_tensor * a,
  5984. struct ggml_tensor * b,
  5985. struct ggml_tensor * c,
  5986. const ggml_custom3_op_t fun,
  5987. int n_tasks,
  5988. void * userdata) {
  5989. return ggml_map_custom3_impl(ctx, a, b, c, fun, n_tasks, userdata, true);
  5990. }
  5991. // ggml_cross_entropy_loss
  5992. struct ggml_tensor * ggml_cross_entropy_loss(
  5993. struct ggml_context * ctx,
  5994. struct ggml_tensor * a,
  5995. struct ggml_tensor * b) {
  5996. GGML_ASSERT(ggml_are_same_shape(a, b));
  5997. bool is_node = false;
  5998. if (a->grad || b->grad) {
  5999. is_node = true;
  6000. }
  6001. struct ggml_tensor * result = ggml_new_tensor_1d(ctx, a->type, 1);
  6002. result->op = GGML_OP_CROSS_ENTROPY_LOSS;
  6003. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  6004. result->src[0] = a;
  6005. result->src[1] = b;
  6006. return result;
  6007. }
  6008. // ggml_cross_entropy_loss_back
  6009. struct ggml_tensor * ggml_cross_entropy_loss_back(
  6010. struct ggml_context * ctx,
  6011. struct ggml_tensor * a,
  6012. struct ggml_tensor * b,
  6013. struct ggml_tensor * c) {
  6014. GGML_ASSERT(ggml_are_same_shape(a, b));
  6015. GGML_ASSERT(ggml_is_scalar(c));
  6016. struct ggml_tensor * result = ggml_dup_tensor(ctx, a);
  6017. result->op = GGML_OP_CROSS_ENTROPY_LOSS_BACK;
  6018. result->grad = NULL;
  6019. result->src[0] = a;
  6020. result->src[1] = b;
  6021. result->src[2] = c;
  6022. return result;
  6023. }
  6024. ////////////////////////////////////////////////////////////////////////////////
  6025. void ggml_set_param(
  6026. struct ggml_context * ctx,
  6027. struct ggml_tensor * tensor) {
  6028. tensor->flags |= GGML_TENSOR_FLAG_PARAM;
  6029. GGML_ASSERT(tensor->grad == NULL);
  6030. tensor->grad = ggml_dup_tensor(ctx, tensor);
  6031. ggml_format_name(tensor->grad, "%s (grad)", tensor->name);
  6032. }
  6033. // ggml_compute_forward_dup
  6034. static void ggml_compute_forward_dup_same_cont(
  6035. const struct ggml_compute_params * params,
  6036. struct ggml_tensor * dst) {
  6037. const struct ggml_tensor * src0 = dst->src[0];
  6038. GGML_ASSERT(ggml_nelements(dst) == ggml_nelements(src0));
  6039. GGML_ASSERT(ggml_is_contiguous(dst) && ggml_is_contiguous(src0));
  6040. GGML_ASSERT(src0->type == dst->type);
  6041. if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
  6042. return;
  6043. }
  6044. const size_t nb00 = src0->nb[0];
  6045. const size_t nb0 = dst->nb[0];
  6046. const int ith = params->ith; // thread index
  6047. const int nth = params->nth; // number of threads
  6048. // parallelize by elements
  6049. const int ne = ggml_nelements(dst);
  6050. const int dr = (ne + nth - 1) / nth;
  6051. const int ie0 = dr * ith;
  6052. const int ie1 = MIN(ie0 + dr, ne);
  6053. if (ie0 < ie1) {
  6054. memcpy(
  6055. ((char *) dst->data + ie0*nb0),
  6056. ((char *) src0->data + ie0*nb00),
  6057. (ie1 - ie0) * ggml_type_size(src0->type));
  6058. }
  6059. }
  6060. static void ggml_compute_forward_dup_f16(
  6061. const struct ggml_compute_params * params,
  6062. struct ggml_tensor * dst) {
  6063. const struct ggml_tensor * src0 = dst->src[0];
  6064. GGML_ASSERT(ggml_nelements(dst) == ggml_nelements(src0));
  6065. if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
  6066. return;
  6067. }
  6068. GGML_TENSOR_UNARY_OP_LOCALS
  6069. const int ith = params->ith; // thread index
  6070. const int nth = params->nth; // number of threads
  6071. if (ggml_is_contiguous(src0) && ggml_is_contiguous(dst) && src0->type == dst->type) {
  6072. ggml_compute_forward_dup_same_cont(params, dst);
  6073. return;
  6074. }
  6075. // parallelize by rows
  6076. const int nr = ne01;
  6077. // number of rows per thread
  6078. const int dr = (nr + nth - 1) / nth;
  6079. // row range for this thread
  6080. const int ir0 = dr * ith;
  6081. const int ir1 = MIN(ir0 + dr, nr);
  6082. if (src0->type == dst->type &&
  6083. ne00 == ne0 &&
  6084. nb00 == ggml_type_size(src0->type) && nb0 == ggml_type_size(dst->type)) {
  6085. // copy by rows
  6086. const size_t rs = ne00*nb00;
  6087. for (int64_t i03 = 0; i03 < ne03; i03++) {
  6088. for (int64_t i02 = 0; i02 < ne02; i02++) {
  6089. for (int64_t i01 = ir0; i01 < ir1; i01++) {
  6090. memcpy(
  6091. ((char *) dst->data + i01*nb1 + i02*nb2 + i03*nb3),
  6092. ((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03),
  6093. rs);
  6094. }
  6095. }
  6096. }
  6097. return;
  6098. }
  6099. // TODO: add more special-case implementations for tensor shapes/strides that can benefit from memcpy
  6100. if (ggml_is_contiguous(dst)) {
  6101. if (nb00 == sizeof(ggml_fp16_t)) {
  6102. if (dst->type == GGML_TYPE_F16) {
  6103. size_t id = 0;
  6104. const size_t rs = ne00 * nb00;
  6105. char * dst_ptr = (char *) dst->data;
  6106. for (int i03 = 0; i03 < ne03; i03++) {
  6107. for (int i02 = 0; i02 < ne02; i02++) {
  6108. id += rs * ir0;
  6109. for (int i01 = ir0; i01 < ir1; i01++) {
  6110. const char * src0_ptr = (char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03;
  6111. memcpy(dst_ptr + id, src0_ptr, rs);
  6112. id += rs;
  6113. }
  6114. id += rs * (ne01 - ir1);
  6115. }
  6116. }
  6117. } else if (dst->type == GGML_TYPE_F32) {
  6118. size_t id = 0;
  6119. float * dst_ptr = (float *) dst->data;
  6120. for (int i03 = 0; i03 < ne03; i03++) {
  6121. for (int i02 = 0; i02 < ne02; i02++) {
  6122. id += ne00 * ir0;
  6123. for (int i01 = ir0; i01 < ir1; i01++) {
  6124. const ggml_fp16_t * src0_ptr = (ggml_fp16_t *) ((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03);
  6125. for (int i00 = 0; i00 < ne00; i00++) {
  6126. dst_ptr[id] = GGML_FP16_TO_FP32(src0_ptr[i00]);
  6127. id++;
  6128. }
  6129. }
  6130. id += ne00 * (ne01 - ir1);
  6131. }
  6132. }
  6133. } else if (type_traits[dst->type].from_float) {
  6134. ggml_from_float_t const quantize_row_q = type_traits[dst->type].from_float;
  6135. float * src0_f32 = (float *) params->wdata + (ne00 + CACHE_LINE_SIZE_F32) * ith;
  6136. size_t id = 0;
  6137. size_t rs = nb0 * (ne00 / ggml_blck_size(dst->type));
  6138. char * dst_ptr = (char *) dst->data;
  6139. for (int i03 = 0; i03 < ne03; i03++) {
  6140. for (int i02 = 0; i02 < ne02; i02++) {
  6141. id += rs * ir0;
  6142. for (int i01 = ir0; i01 < ir1; i01++) {
  6143. const ggml_fp16_t * src0_ptr = (ggml_fp16_t *) ((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03);
  6144. for (int i00 = 0; i00 < ne00; i00++) {
  6145. src0_f32[i00] = GGML_FP16_TO_FP32(src0_ptr[i00]);
  6146. }
  6147. quantize_row_q(src0_f32, dst_ptr + id, ne00);
  6148. id += rs;
  6149. }
  6150. id += rs * (ne01 - ir1);
  6151. }
  6152. }
  6153. } else {
  6154. GGML_ASSERT(false); // TODO: implement
  6155. }
  6156. } else {
  6157. //printf("%s: this is not optimal - fix me\n", __func__);
  6158. if (dst->type == GGML_TYPE_F32) {
  6159. size_t id = 0;
  6160. float * dst_ptr = (float *) dst->data;
  6161. for (int i03 = 0; i03 < ne03; i03++) {
  6162. for (int i02 = 0; i02 < ne02; i02++) {
  6163. id += ne00 * ir0;
  6164. for (int i01 = ir0; i01 < ir1; i01++) {
  6165. for (int i00 = 0; i00 < ne00; i00++) {
  6166. const ggml_fp16_t * src0_ptr = (ggml_fp16_t *) ((char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03);
  6167. dst_ptr[id] = GGML_FP16_TO_FP32(*src0_ptr);
  6168. id++;
  6169. }
  6170. }
  6171. id += ne00 * (ne01 - ir1);
  6172. }
  6173. }
  6174. } else if (dst->type == GGML_TYPE_F16) {
  6175. size_t id = 0;
  6176. ggml_fp16_t * dst_ptr = (ggml_fp16_t *) dst->data;
  6177. for (int i03 = 0; i03 < ne03; i03++) {
  6178. for (int i02 = 0; i02 < ne02; i02++) {
  6179. id += ne00 * ir0;
  6180. for (int i01 = ir0; i01 < ir1; i01++) {
  6181. for (int i00 = 0; i00 < ne00; i00++) {
  6182. const ggml_fp16_t * src0_ptr = (ggml_fp16_t *) ((char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03);
  6183. dst_ptr[id] = *src0_ptr;
  6184. id++;
  6185. }
  6186. }
  6187. id += ne00 * (ne01 - ir1);
  6188. }
  6189. }
  6190. } else {
  6191. GGML_ASSERT(false); // TODO: implement
  6192. }
  6193. }
  6194. return;
  6195. }
  6196. // dst counters
  6197. int64_t i10 = 0;
  6198. int64_t i11 = 0;
  6199. int64_t i12 = 0;
  6200. int64_t i13 = 0;
  6201. if (dst->type == GGML_TYPE_F16) {
  6202. for (int64_t i03 = 0; i03 < ne03; i03++) {
  6203. for (int64_t i02 = 0; i02 < ne02; i02++) {
  6204. i10 += ne00 * ir0;
  6205. while (i10 >= ne0) {
  6206. i10 -= ne0;
  6207. if (++i11 == ne1) {
  6208. i11 = 0;
  6209. if (++i12 == ne2) {
  6210. i12 = 0;
  6211. if (++i13 == ne3) {
  6212. i13 = 0;
  6213. }
  6214. }
  6215. }
  6216. }
  6217. for (int64_t i01 = ir0; i01 < ir1; i01++) {
  6218. for (int64_t i00 = 0; i00 < ne00; i00++) {
  6219. const char * src0_ptr = ((char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03);
  6220. char * dst_ptr = ((char *) dst->data + i10*nb0 + i11*nb1 + i12*nb2 + i13*nb3);
  6221. memcpy(dst_ptr, src0_ptr, sizeof(ggml_fp16_t));
  6222. if (++i10 == ne00) {
  6223. i10 = 0;
  6224. if (++i11 == ne01) {
  6225. i11 = 0;
  6226. if (++i12 == ne02) {
  6227. i12 = 0;
  6228. if (++i13 == ne03) {
  6229. i13 = 0;
  6230. }
  6231. }
  6232. }
  6233. }
  6234. }
  6235. }
  6236. i10 += ne00 * (ne01 - ir1);
  6237. while (i10 >= ne0) {
  6238. i10 -= ne0;
  6239. if (++i11 == ne1) {
  6240. i11 = 0;
  6241. if (++i12 == ne2) {
  6242. i12 = 0;
  6243. if (++i13 == ne3) {
  6244. i13 = 0;
  6245. }
  6246. }
  6247. }
  6248. }
  6249. }
  6250. }
  6251. } else if (dst->type == GGML_TYPE_F32) {
  6252. for (int64_t i03 = 0; i03 < ne03; i03++) {
  6253. for (int64_t i02 = 0; i02 < ne02; i02++) {
  6254. i10 += ne00 * ir0;
  6255. while (i10 >= ne0) {
  6256. i10 -= ne0;
  6257. if (++i11 == ne1) {
  6258. i11 = 0;
  6259. if (++i12 == ne2) {
  6260. i12 = 0;
  6261. if (++i13 == ne3) {
  6262. i13 = 0;
  6263. }
  6264. }
  6265. }
  6266. }
  6267. for (int64_t i01 = ir0; i01 < ir1; i01++) {
  6268. for (int64_t i00 = 0; i00 < ne00; i00++) {
  6269. const char * src0_ptr = ((char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03);
  6270. char * dst_ptr = ((char *) dst->data + i10*nb0 + i11*nb1 + i12*nb2 + i13*nb3);
  6271. *(float *) dst_ptr = GGML_FP16_TO_FP32(*(const ggml_fp16_t *) src0_ptr);
  6272. if (++i10 == ne0) {
  6273. i10 = 0;
  6274. if (++i11 == ne1) {
  6275. i11 = 0;
  6276. if (++i12 == ne2) {
  6277. i12 = 0;
  6278. if (++i13 == ne3) {
  6279. i13 = 0;
  6280. }
  6281. }
  6282. }
  6283. }
  6284. }
  6285. }
  6286. i10 += ne00 * (ne01 - ir1);
  6287. while (i10 >= ne0) {
  6288. i10 -= ne0;
  6289. if (++i11 == ne1) {
  6290. i11 = 0;
  6291. if (++i12 == ne2) {
  6292. i12 = 0;
  6293. if (++i13 == ne3) {
  6294. i13 = 0;
  6295. }
  6296. }
  6297. }
  6298. }
  6299. }
  6300. }
  6301. } else {
  6302. GGML_ASSERT(false); // TODO: implement
  6303. }
  6304. }
  6305. static void ggml_compute_forward_dup_bf16(
  6306. const struct ggml_compute_params * params,
  6307. struct ggml_tensor * dst) {
  6308. const struct ggml_tensor * src0 = dst->src[0];
  6309. GGML_ASSERT(ggml_nelements(dst) == ggml_nelements(src0));
  6310. if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
  6311. return;
  6312. }
  6313. GGML_TENSOR_UNARY_OP_LOCALS
  6314. const int ith = params->ith; // thread index
  6315. const int nth = params->nth; // number of threads
  6316. if (ggml_is_contiguous(src0) && ggml_is_contiguous(dst) && src0->type == dst->type) {
  6317. ggml_compute_forward_dup_same_cont(params, dst);
  6318. return;
  6319. }
  6320. // parallelize by rows
  6321. const int nr = ne01;
  6322. // number of rows per thread
  6323. const int dr = (nr + nth - 1) / nth;
  6324. // row range for this thread
  6325. const int ir0 = dr * ith;
  6326. const int ir1 = MIN(ir0 + dr, nr);
  6327. if (src0->type == dst->type &&
  6328. ne00 == ne0 &&
  6329. nb00 == ggml_type_size(src0->type) && nb0 == ggml_type_size(dst->type)) {
  6330. // copy by rows
  6331. const size_t rs = ne00*nb00;
  6332. for (int64_t i03 = 0; i03 < ne03; i03++) {
  6333. for (int64_t i02 = 0; i02 < ne02; i02++) {
  6334. for (int64_t i01 = ir0; i01 < ir1; i01++) {
  6335. memcpy(
  6336. ((char *) dst->data + i01*nb1 + i02*nb2 + i03*nb3),
  6337. ((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03),
  6338. rs);
  6339. }
  6340. }
  6341. }
  6342. return;
  6343. }
  6344. // TODO: add more special-case implementations for tensor shapes/strides that can benefit from memcpy
  6345. if (ggml_is_contiguous(dst)) {
  6346. if (nb00 == sizeof(ggml_bf16_t)) {
  6347. if (dst->type == GGML_TYPE_BF16) {
  6348. size_t id = 0;
  6349. const size_t rs = ne00 * nb00;
  6350. char * dst_ptr = (char *) dst->data;
  6351. for (int i03 = 0; i03 < ne03; i03++) {
  6352. for (int i02 = 0; i02 < ne02; i02++) {
  6353. id += rs * ir0;
  6354. for (int i01 = ir0; i01 < ir1; i01++) {
  6355. const char * src0_ptr = (char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03;
  6356. memcpy(dst_ptr + id, src0_ptr, rs);
  6357. id += rs;
  6358. }
  6359. id += rs * (ne01 - ir1);
  6360. }
  6361. }
  6362. } else if (dst->type == GGML_TYPE_F16) {
  6363. size_t id = 0;
  6364. ggml_fp16_t * dst_ptr = (ggml_fp16_t *) dst->data;
  6365. for (int i03 = 0; i03 < ne03; i03++) {
  6366. for (int i02 = 0; i02 < ne02; i02++) {
  6367. id += ne00 * ir0;
  6368. for (int i01 = ir0; i01 < ir1; i01++) {
  6369. const ggml_bf16_t * src0_ptr = (ggml_bf16_t *) ((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03);
  6370. for (int i00 = 0; i00 < ne00; i00++) {
  6371. dst_ptr[id] = GGML_FP32_TO_FP16(GGML_BF16_TO_FP32(src0_ptr[i00]));
  6372. id++;
  6373. }
  6374. }
  6375. id += ne00 * (ne01 - ir1);
  6376. }
  6377. }
  6378. } else if (dst->type == GGML_TYPE_F32) {
  6379. size_t id = 0;
  6380. float * dst_ptr = (float *) dst->data;
  6381. for (int i03 = 0; i03 < ne03; i03++) {
  6382. for (int i02 = 0; i02 < ne02; i02++) {
  6383. id += ne00 * ir0;
  6384. for (int i01 = ir0; i01 < ir1; i01++) {
  6385. const ggml_bf16_t * src0_ptr = (ggml_bf16_t *) ((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03);
  6386. for (int i00 = 0; i00 < ne00; i00++) {
  6387. dst_ptr[id] = GGML_BF16_TO_FP32(src0_ptr[i00]);
  6388. id++;
  6389. }
  6390. }
  6391. id += ne00 * (ne01 - ir1);
  6392. }
  6393. }
  6394. } else if (type_traits[dst->type].from_float) {
  6395. ggml_from_float_t const quantize_row_q = type_traits[dst->type].from_float;
  6396. float * src0_f32 = (float *) params->wdata + (ne00 + CACHE_LINE_SIZE_F32) * ith;
  6397. size_t id = 0;
  6398. size_t rs = nb0 * (ne00 / ggml_blck_size(dst->type));
  6399. char * dst_ptr = (char *) dst->data;
  6400. for (int i03 = 0; i03 < ne03; i03++) {
  6401. for (int i02 = 0; i02 < ne02; i02++) {
  6402. id += rs * ir0;
  6403. for (int i01 = ir0; i01 < ir1; i01++) {
  6404. const ggml_bf16_t * src0_ptr = (ggml_bf16_t *) ((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03);
  6405. for (int i00 = 0; i00 < ne00; i00++) {
  6406. src0_f32[i00] = GGML_BF16_TO_FP32(src0_ptr[i00]);
  6407. }
  6408. quantize_row_q(src0_f32, dst_ptr + id, ne00);
  6409. id += rs;
  6410. }
  6411. id += rs * (ne01 - ir1);
  6412. }
  6413. }
  6414. } else {
  6415. GGML_ASSERT(false); // TODO: implement
  6416. }
  6417. } else {
  6418. //printf("%s: this is not optimal - fix me\n", __func__);
  6419. if (dst->type == GGML_TYPE_F32) {
  6420. size_t id = 0;
  6421. float * dst_ptr = (float *) dst->data;
  6422. for (int i03 = 0; i03 < ne03; i03++) {
  6423. for (int i02 = 0; i02 < ne02; i02++) {
  6424. id += ne00 * ir0;
  6425. for (int i01 = ir0; i01 < ir1; i01++) {
  6426. for (int i00 = 0; i00 < ne00; i00++) {
  6427. const ggml_bf16_t * src0_ptr = (ggml_bf16_t *) ((char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03);
  6428. dst_ptr[id] = GGML_BF16_TO_FP32(*src0_ptr);
  6429. id++;
  6430. }
  6431. }
  6432. id += ne00 * (ne01 - ir1);
  6433. }
  6434. }
  6435. } else if (dst->type == GGML_TYPE_BF16) {
  6436. size_t id = 0;
  6437. ggml_bf16_t * dst_ptr = (ggml_bf16_t *) dst->data;
  6438. for (int i03 = 0; i03 < ne03; i03++) {
  6439. for (int i02 = 0; i02 < ne02; i02++) {
  6440. id += ne00 * ir0;
  6441. for (int i01 = ir0; i01 < ir1; i01++) {
  6442. for (int i00 = 0; i00 < ne00; i00++) {
  6443. const ggml_bf16_t * src0_ptr = (ggml_bf16_t *) ((char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03);
  6444. dst_ptr[id] = *src0_ptr;
  6445. id++;
  6446. }
  6447. }
  6448. id += ne00 * (ne01 - ir1);
  6449. }
  6450. }
  6451. } else if (dst->type == GGML_TYPE_F16) {
  6452. size_t id = 0;
  6453. ggml_fp16_t * dst_ptr = (ggml_fp16_t *) dst->data;
  6454. for (int i03 = 0; i03 < ne03; i03++) {
  6455. for (int i02 = 0; i02 < ne02; i02++) {
  6456. id += ne00 * ir0;
  6457. for (int i01 = ir0; i01 < ir1; i01++) {
  6458. for (int i00 = 0; i00 < ne00; i00++) {
  6459. const ggml_bf16_t * src0_ptr = (ggml_bf16_t *) ((char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03);
  6460. dst_ptr[id] = GGML_FP32_TO_FP16(GGML_BF16_TO_FP32(*src0_ptr));
  6461. id++;
  6462. }
  6463. }
  6464. id += ne00 * (ne01 - ir1);
  6465. }
  6466. }
  6467. } else {
  6468. GGML_ASSERT(false); // TODO: implement
  6469. }
  6470. }
  6471. return;
  6472. }
  6473. // dst counters
  6474. int64_t i10 = 0;
  6475. int64_t i11 = 0;
  6476. int64_t i12 = 0;
  6477. int64_t i13 = 0;
  6478. if (dst->type == GGML_TYPE_BF16) {
  6479. for (int64_t i03 = 0; i03 < ne03; i03++) {
  6480. for (int64_t i02 = 0; i02 < ne02; i02++) {
  6481. i10 += ne00 * ir0;
  6482. while (i10 >= ne0) {
  6483. i10 -= ne0;
  6484. if (++i11 == ne1) {
  6485. i11 = 0;
  6486. if (++i12 == ne2) {
  6487. i12 = 0;
  6488. if (++i13 == ne3) {
  6489. i13 = 0;
  6490. }
  6491. }
  6492. }
  6493. }
  6494. for (int64_t i01 = ir0; i01 < ir1; i01++) {
  6495. for (int64_t i00 = 0; i00 < ne00; i00++) {
  6496. const char * src0_ptr = ((char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03);
  6497. char * dst_ptr = ((char *) dst->data + i10*nb0 + i11*nb1 + i12*nb2 + i13*nb3);
  6498. memcpy(dst_ptr, src0_ptr, sizeof(ggml_bf16_t));
  6499. if (++i10 == ne00) {
  6500. i10 = 0;
  6501. if (++i11 == ne01) {
  6502. i11 = 0;
  6503. if (++i12 == ne02) {
  6504. i12 = 0;
  6505. if (++i13 == ne03) {
  6506. i13 = 0;
  6507. }
  6508. }
  6509. }
  6510. }
  6511. }
  6512. }
  6513. i10 += ne00 * (ne01 - ir1);
  6514. while (i10 >= ne0) {
  6515. i10 -= ne0;
  6516. if (++i11 == ne1) {
  6517. i11 = 0;
  6518. if (++i12 == ne2) {
  6519. i12 = 0;
  6520. if (++i13 == ne3) {
  6521. i13 = 0;
  6522. }
  6523. }
  6524. }
  6525. }
  6526. }
  6527. }
  6528. } else if (dst->type == GGML_TYPE_F16) {
  6529. for (int64_t i03 = 0; i03 < ne03; i03++) {
  6530. for (int64_t i02 = 0; i02 < ne02; i02++) {
  6531. i10 += ne00 * ir0;
  6532. while (i10 >= ne0) {
  6533. i10 -= ne0;
  6534. if (++i11 == ne1) {
  6535. i11 = 0;
  6536. if (++i12 == ne2) {
  6537. i12 = 0;
  6538. if (++i13 == ne3) {
  6539. i13 = 0;
  6540. }
  6541. }
  6542. }
  6543. }
  6544. for (int64_t i01 = ir0; i01 < ir1; i01++) {
  6545. for (int64_t i00 = 0; i00 < ne00; i00++) {
  6546. const char * src0_ptr = ((char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03);
  6547. char * dst_ptr = ((char *) dst->data + i10*nb0 + i11*nb1 + i12*nb2 + i13*nb3);
  6548. *(ggml_fp16_t *) dst_ptr = GGML_FP32_TO_FP16(GGML_BF16_TO_FP32(*(const ggml_bf16_t *) src0_ptr));
  6549. if (++i10 == ne0) {
  6550. i10 = 0;
  6551. if (++i11 == ne1) {
  6552. i11 = 0;
  6553. if (++i12 == ne2) {
  6554. i12 = 0;
  6555. if (++i13 == ne3) {
  6556. i13 = 0;
  6557. }
  6558. }
  6559. }
  6560. }
  6561. }
  6562. }
  6563. i10 += ne00 * (ne01 - ir1);
  6564. while (i10 >= ne0) {
  6565. i10 -= ne0;
  6566. if (++i11 == ne1) {
  6567. i11 = 0;
  6568. if (++i12 == ne2) {
  6569. i12 = 0;
  6570. if (++i13 == ne3) {
  6571. i13 = 0;
  6572. }
  6573. }
  6574. }
  6575. }
  6576. }
  6577. }
  6578. } else if (dst->type == GGML_TYPE_F32) {
  6579. for (int64_t i03 = 0; i03 < ne03; i03++) {
  6580. for (int64_t i02 = 0; i02 < ne02; i02++) {
  6581. i10 += ne00 * ir0;
  6582. while (i10 >= ne0) {
  6583. i10 -= ne0;
  6584. if (++i11 == ne1) {
  6585. i11 = 0;
  6586. if (++i12 == ne2) {
  6587. i12 = 0;
  6588. if (++i13 == ne3) {
  6589. i13 = 0;
  6590. }
  6591. }
  6592. }
  6593. }
  6594. for (int64_t i01 = ir0; i01 < ir1; i01++) {
  6595. for (int64_t i00 = 0; i00 < ne00; i00++) {
  6596. const char * src0_ptr = ((char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03);
  6597. char * dst_ptr = ((char *) dst->data + i10*nb0 + i11*nb1 + i12*nb2 + i13*nb3);
  6598. *(float *) dst_ptr = GGML_BF16_TO_FP32(*(const ggml_bf16_t *) src0_ptr);
  6599. if (++i10 == ne0) {
  6600. i10 = 0;
  6601. if (++i11 == ne1) {
  6602. i11 = 0;
  6603. if (++i12 == ne2) {
  6604. i12 = 0;
  6605. if (++i13 == ne3) {
  6606. i13 = 0;
  6607. }
  6608. }
  6609. }
  6610. }
  6611. }
  6612. }
  6613. i10 += ne00 * (ne01 - ir1);
  6614. while (i10 >= ne0) {
  6615. i10 -= ne0;
  6616. if (++i11 == ne1) {
  6617. i11 = 0;
  6618. if (++i12 == ne2) {
  6619. i12 = 0;
  6620. if (++i13 == ne3) {
  6621. i13 = 0;
  6622. }
  6623. }
  6624. }
  6625. }
  6626. }
  6627. }
  6628. } else {
  6629. GGML_ASSERT(false); // TODO: implement
  6630. }
  6631. }
  6632. static void ggml_compute_forward_dup_f32(
  6633. const struct ggml_compute_params * params,
  6634. struct ggml_tensor * dst) {
  6635. const struct ggml_tensor * src0 = dst->src[0];
  6636. GGML_ASSERT(ggml_nelements(dst) == ggml_nelements(src0));
  6637. if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
  6638. return;
  6639. }
  6640. GGML_TENSOR_UNARY_OP_LOCALS
  6641. const int ith = params->ith; // thread index
  6642. const int nth = params->nth; // number of threads
  6643. if (ggml_is_contiguous(src0) && ggml_is_contiguous(dst) && src0->type == dst->type) {
  6644. ggml_compute_forward_dup_same_cont(params, dst);
  6645. return;
  6646. }
  6647. // parallelize by rows
  6648. const int nr = ne01;
  6649. // number of rows per thread
  6650. const int dr = (nr + nth - 1) / nth;
  6651. // row range for this thread
  6652. const int ir0 = dr * ith;
  6653. const int ir1 = MIN(ir0 + dr, nr);
  6654. if (src0->type == dst->type &&
  6655. ne00 == ne0 &&
  6656. nb00 == ggml_type_size(src0->type) && nb0 == ggml_type_size(dst->type)) {
  6657. // copy by rows
  6658. const size_t rs = ne00*nb00;
  6659. for (int64_t i03 = 0; i03 < ne03; i03++) {
  6660. for (int64_t i02 = 0; i02 < ne02; i02++) {
  6661. for (int64_t i01 = ir0; i01 < ir1; i01++) {
  6662. memcpy(
  6663. ((char *) dst->data + i01*nb1 + i02*nb2 + i03*nb3),
  6664. ((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03),
  6665. rs);
  6666. }
  6667. }
  6668. }
  6669. return;
  6670. }
  6671. if (ggml_is_contiguous(dst)) {
  6672. // TODO: simplify
  6673. if (nb00 == sizeof(float)) {
  6674. if (dst->type == GGML_TYPE_F32) {
  6675. size_t id = 0;
  6676. const size_t rs = ne00 * nb00;
  6677. char * dst_ptr = (char *) dst->data;
  6678. for (int i03 = 0; i03 < ne03; i03++) {
  6679. for (int i02 = 0; i02 < ne02; i02++) {
  6680. id += rs * ir0;
  6681. for (int i01 = ir0; i01 < ir1; i01++) {
  6682. const char * src0_ptr = (char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03;
  6683. memcpy(dst_ptr + id, src0_ptr, rs);
  6684. id += rs;
  6685. }
  6686. id += rs * (ne01 - ir1);
  6687. }
  6688. }
  6689. } else if (type_traits[dst->type].from_float) {
  6690. ggml_from_float_t const quantize_row_q = type_traits[dst->type].from_float;
  6691. size_t id = 0;
  6692. size_t rs = nb0 * (ne00 / ggml_blck_size(dst->type));
  6693. char * dst_ptr = (char *) dst->data;
  6694. for (int i03 = 0; i03 < ne03; i03++) {
  6695. for (int i02 = 0; i02 < ne02; i02++) {
  6696. id += rs * ir0;
  6697. for (int i01 = ir0; i01 < ir1; i01++) {
  6698. const float * src0_ptr = (float *) ((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03);
  6699. quantize_row_q(src0_ptr, dst_ptr + id, ne00);
  6700. id += rs;
  6701. }
  6702. id += rs * (ne01 - ir1);
  6703. }
  6704. }
  6705. } else {
  6706. GGML_ASSERT(false); // TODO: implement
  6707. }
  6708. } else {
  6709. //printf("%s: this is not optimal - fix me\n", __func__);
  6710. if (dst->type == GGML_TYPE_F32) {
  6711. size_t id = 0;
  6712. float * dst_ptr = (float *) dst->data;
  6713. for (int i03 = 0; i03 < ne03; i03++) {
  6714. for (int i02 = 0; i02 < ne02; i02++) {
  6715. id += ne00 * ir0;
  6716. for (int i01 = ir0; i01 < ir1; i01++) {
  6717. for (int i00 = 0; i00 < ne00; i00++) {
  6718. const float * src0_ptr = (float *) ((char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03);
  6719. dst_ptr[id] = *src0_ptr;
  6720. id++;
  6721. }
  6722. }
  6723. id += ne00 * (ne01 - ir1);
  6724. }
  6725. }
  6726. } else if (dst->type == GGML_TYPE_F16) {
  6727. size_t id = 0;
  6728. ggml_fp16_t * dst_ptr = (ggml_fp16_t *) dst->data;
  6729. for (int i03 = 0; i03 < ne03; i03++) {
  6730. for (int i02 = 0; i02 < ne02; i02++) {
  6731. id += ne00 * ir0;
  6732. for (int i01 = ir0; i01 < ir1; i01++) {
  6733. for (int i00 = 0; i00 < ne00; i00++) {
  6734. const float * src0_ptr = (float *) ((char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03);
  6735. dst_ptr[id] = GGML_FP32_TO_FP16(*src0_ptr);
  6736. id++;
  6737. }
  6738. }
  6739. id += ne00 * (ne01 - ir1);
  6740. }
  6741. }
  6742. } else if (dst->type == GGML_TYPE_BF16) {
  6743. size_t id = 0;
  6744. ggml_bf16_t * dst_ptr = (ggml_bf16_t *) dst->data;
  6745. for (int i03 = 0; i03 < ne03; i03++) {
  6746. for (int i02 = 0; i02 < ne02; i02++) {
  6747. id += ne00 * ir0;
  6748. for (int i01 = ir0; i01 < ir1; i01++) {
  6749. for (int i00 = 0; i00 < ne00; i00++) {
  6750. const float * src0_ptr = (float *) ((char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03);
  6751. dst_ptr[id] = GGML_FP32_TO_BF16(*src0_ptr);
  6752. id++;
  6753. }
  6754. }
  6755. id += ne00 * (ne01 - ir1);
  6756. }
  6757. }
  6758. } else {
  6759. GGML_ASSERT(false); // TODO: implement
  6760. }
  6761. }
  6762. return;
  6763. }
  6764. // dst counters
  6765. int64_t i10 = 0;
  6766. int64_t i11 = 0;
  6767. int64_t i12 = 0;
  6768. int64_t i13 = 0;
  6769. if (dst->type == GGML_TYPE_F32) {
  6770. for (int64_t i03 = 0; i03 < ne03; i03++) {
  6771. for (int64_t i02 = 0; i02 < ne02; i02++) {
  6772. i10 += ne00 * ir0;
  6773. while (i10 >= ne0) {
  6774. i10 -= ne0;
  6775. if (++i11 == ne1) {
  6776. i11 = 0;
  6777. if (++i12 == ne2) {
  6778. i12 = 0;
  6779. if (++i13 == ne3) {
  6780. i13 = 0;
  6781. }
  6782. }
  6783. }
  6784. }
  6785. for (int64_t i01 = ir0; i01 < ir1; i01++) {
  6786. for (int64_t i00 = 0; i00 < ne00; i00++) {
  6787. const char * src0_ptr = ((char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03);
  6788. char * dst_ptr = ((char *) dst->data + i10*nb0 + i11*nb1 + i12*nb2 + i13*nb3);
  6789. memcpy(dst_ptr, src0_ptr, sizeof(float));
  6790. if (++i10 == ne0) {
  6791. i10 = 0;
  6792. if (++i11 == ne1) {
  6793. i11 = 0;
  6794. if (++i12 == ne2) {
  6795. i12 = 0;
  6796. if (++i13 == ne3) {
  6797. i13 = 0;
  6798. }
  6799. }
  6800. }
  6801. }
  6802. }
  6803. }
  6804. i10 += ne00 * (ne01 - ir1);
  6805. while (i10 >= ne0) {
  6806. i10 -= ne0;
  6807. if (++i11 == ne1) {
  6808. i11 = 0;
  6809. if (++i12 == ne2) {
  6810. i12 = 0;
  6811. if (++i13 == ne3) {
  6812. i13 = 0;
  6813. }
  6814. }
  6815. }
  6816. }
  6817. }
  6818. }
  6819. } else if (dst->type == GGML_TYPE_F16) {
  6820. for (int64_t i03 = 0; i03 < ne03; i03++) {
  6821. for (int64_t i02 = 0; i02 < ne02; i02++) {
  6822. i10 += ne00 * ir0;
  6823. while (i10 >= ne0) {
  6824. i10 -= ne0;
  6825. if (++i11 == ne1) {
  6826. i11 = 0;
  6827. if (++i12 == ne2) {
  6828. i12 = 0;
  6829. if (++i13 == ne3) {
  6830. i13 = 0;
  6831. }
  6832. }
  6833. }
  6834. }
  6835. for (int64_t i01 = ir0; i01 < ir1; i01++) {
  6836. for (int64_t i00 = 0; i00 < ne00; i00++) {
  6837. const char * src0_ptr = ((char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03);
  6838. char * dst_ptr = ((char *) dst->data + i10*nb0 + i11*nb1 + i12*nb2 + i13*nb3);
  6839. *(ggml_fp16_t *) dst_ptr = GGML_FP32_TO_FP16(*(const float *) src0_ptr);
  6840. if (++i10 == ne0) {
  6841. i10 = 0;
  6842. if (++i11 == ne1) {
  6843. i11 = 0;
  6844. if (++i12 == ne2) {
  6845. i12 = 0;
  6846. if (++i13 == ne3) {
  6847. i13 = 0;
  6848. }
  6849. }
  6850. }
  6851. }
  6852. }
  6853. }
  6854. i10 += ne00 * (ne01 - ir1);
  6855. while (i10 >= ne0) {
  6856. i10 -= ne0;
  6857. if (++i11 == ne1) {
  6858. i11 = 0;
  6859. if (++i12 == ne2) {
  6860. i12 = 0;
  6861. if (++i13 == ne3) {
  6862. i13 = 0;
  6863. }
  6864. }
  6865. }
  6866. }
  6867. }
  6868. }
  6869. } else if (dst->type == GGML_TYPE_BF16) {
  6870. for (int64_t i03 = 0; i03 < ne03; i03++) {
  6871. for (int64_t i02 = 0; i02 < ne02; i02++) {
  6872. i10 += ne00 * ir0;
  6873. while (i10 >= ne0) {
  6874. i10 -= ne0;
  6875. if (++i11 == ne1) {
  6876. i11 = 0;
  6877. if (++i12 == ne2) {
  6878. i12 = 0;
  6879. if (++i13 == ne3) {
  6880. i13 = 0;
  6881. }
  6882. }
  6883. }
  6884. }
  6885. for (int64_t i01 = ir0; i01 < ir1; i01++) {
  6886. for (int64_t i00 = 0; i00 < ne00; i00++) {
  6887. const char * src0_ptr = ((char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03);
  6888. char * dst_ptr = ((char *) dst->data + i10*nb0 + i11*nb1 + i12*nb2 + i13*nb3);
  6889. *(ggml_bf16_t *) dst_ptr = GGML_FP32_TO_BF16(*(const float *) src0_ptr);
  6890. if (++i10 == ne0) {
  6891. i10 = 0;
  6892. if (++i11 == ne1) {
  6893. i11 = 0;
  6894. if (++i12 == ne2) {
  6895. i12 = 0;
  6896. if (++i13 == ne3) {
  6897. i13 = 0;
  6898. }
  6899. }
  6900. }
  6901. }
  6902. }
  6903. }
  6904. i10 += ne00 * (ne01 - ir1);
  6905. while (i10 >= ne0) {
  6906. i10 -= ne0;
  6907. if (++i11 == ne1) {
  6908. i11 = 0;
  6909. if (++i12 == ne2) {
  6910. i12 = 0;
  6911. if (++i13 == ne3) {
  6912. i13 = 0;
  6913. }
  6914. }
  6915. }
  6916. }
  6917. }
  6918. }
  6919. } else {
  6920. GGML_ASSERT(false); // TODO: implement
  6921. }
  6922. }
  6923. // A simplified version of ggml_compute_forward_dup that doesn't do float upcasting, and just plain old memcpy.
  6924. static void ggml_compute_forward_dup_bytes(
  6925. const struct ggml_compute_params * params,
  6926. struct ggml_tensor * dst) {
  6927. const struct ggml_tensor * src0 = dst->src[0];
  6928. GGML_ASSERT(ggml_nelements(dst) == ggml_nelements(src0));
  6929. GGML_ASSERT(src0->type == dst->type);
  6930. if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
  6931. return;
  6932. }
  6933. if (ggml_is_contiguous(src0) && ggml_is_contiguous(dst)) {
  6934. ggml_compute_forward_dup_same_cont(params, dst);
  6935. return;
  6936. }
  6937. GGML_TENSOR_UNARY_OP_LOCALS;
  6938. const size_t type_size = ggml_type_size(src0->type);
  6939. const int ith = params->ith; // thread index
  6940. const int nth = params->nth; // number of threads
  6941. // parallelize by rows
  6942. const int nr = ne01;
  6943. // number of rows per thread
  6944. const int dr = (nr + nth - 1) / nth;
  6945. // row range for this thread
  6946. const int ir0 = dr * ith;
  6947. const int ir1 = MIN(ir0 + dr, nr);
  6948. if (src0->type == dst->type &&
  6949. ne00 == ne0 &&
  6950. nb00 == type_size && nb0 == type_size) {
  6951. // copy by rows
  6952. const size_t rs = ne00 * type_size;
  6953. for (int64_t i03 = 0; i03 < ne03; i03++) {
  6954. for (int64_t i02 = 0; i02 < ne02; i02++) {
  6955. for (int64_t i01 = ir0; i01 < ir1; i01++) {
  6956. memcpy(
  6957. ((char *) dst->data + i01*nb1 + i02*nb2 + i03*nb3),
  6958. ((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03),
  6959. rs);
  6960. }
  6961. }
  6962. }
  6963. return;
  6964. }
  6965. if (ggml_is_contiguous(dst)) {
  6966. size_t id = 0;
  6967. char * dst_ptr = (char *) dst->data;
  6968. const size_t rs = ne00 * type_size;
  6969. if (nb00 == type_size) {
  6970. // src0 is contigous on first dimension, copy by rows
  6971. for (int64_t i03 = 0; i03 < ne03; i03++) {
  6972. for (int64_t i02 = 0; i02 < ne02; i02++) {
  6973. id += rs * ir0;
  6974. for (int64_t i01 = ir0; i01 < ir1; i01++) {
  6975. const char * src0_ptr = (char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03;
  6976. memcpy(dst_ptr + id, src0_ptr, rs);
  6977. id += rs;
  6978. }
  6979. id += rs * (ne01 - ir1);
  6980. }
  6981. }
  6982. } else {
  6983. //printf("%s: this is not optimal - fix me\n", __func__);
  6984. for (int64_t i03 = 0; i03 < ne03; i03++) {
  6985. for (int64_t i02 = 0; i02 < ne02; i02++) {
  6986. id += rs * ir0;
  6987. for (int64_t i01 = ir0; i01 < ir1; i01++) {
  6988. for (int64_t i00 = 0; i00 < ne00; i00++) {
  6989. const char * src0_ptr = (char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03;
  6990. memcpy(dst_ptr + id, src0_ptr, type_size);
  6991. id += type_size;
  6992. }
  6993. }
  6994. id += rs * (ne01 - ir1);
  6995. }
  6996. }
  6997. }
  6998. return;
  6999. }
  7000. // dst counters
  7001. int64_t i10 = 0;
  7002. int64_t i11 = 0;
  7003. int64_t i12 = 0;
  7004. int64_t i13 = 0;
  7005. for (int64_t i03 = 0; i03 < ne03; i03++) {
  7006. for (int64_t i02 = 0; i02 < ne02; i02++) {
  7007. i10 += ne00 * ir0;
  7008. while (i10 >= ne0) {
  7009. i10 -= ne0;
  7010. if (++i11 == ne1) {
  7011. i11 = 0;
  7012. if (++i12 == ne2) {
  7013. i12 = 0;
  7014. if (++i13 == ne3) {
  7015. i13 = 0;
  7016. }
  7017. }
  7018. }
  7019. }
  7020. for (int64_t i01 = ir0; i01 < ir1; i01++) {
  7021. for (int64_t i00 = 0; i00 < ne00; i00++) {
  7022. const char * src0_ptr = ((char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03);
  7023. char * dst_ptr = ((char *) dst->data + i10*nb0 + i11*nb1 + i12*nb2 + i13*nb3);
  7024. memcpy(dst_ptr, src0_ptr, type_size);
  7025. if (++i10 == ne0) {
  7026. i10 = 0;
  7027. if (++i11 == ne1) {
  7028. i11 = 0;
  7029. if (++i12 == ne2) {
  7030. i12 = 0;
  7031. if (++i13 == ne3) {
  7032. i13 = 0;
  7033. }
  7034. }
  7035. }
  7036. }
  7037. }
  7038. }
  7039. i10 += ne00 * (ne01 - ir1);
  7040. while (i10 >= ne0) {
  7041. i10 -= ne0;
  7042. if (++i11 == ne1) {
  7043. i11 = 0;
  7044. if (++i12 == ne2) {
  7045. i12 = 0;
  7046. if (++i13 == ne3) {
  7047. i13 = 0;
  7048. }
  7049. }
  7050. }
  7051. }
  7052. }
  7053. }
  7054. }
  7055. static void ggml_compute_forward_dup(
  7056. const struct ggml_compute_params * params,
  7057. struct ggml_tensor * dst) {
  7058. const struct ggml_tensor * src0 = dst->src[0];
  7059. if (src0->type == dst->type) {
  7060. ggml_compute_forward_dup_bytes(params, dst);
  7061. return;
  7062. }
  7063. switch (src0->type) {
  7064. case GGML_TYPE_F16:
  7065. {
  7066. ggml_compute_forward_dup_f16(params, dst);
  7067. } break;
  7068. case GGML_TYPE_BF16:
  7069. {
  7070. ggml_compute_forward_dup_bf16(params, dst);
  7071. } break;
  7072. case GGML_TYPE_F32:
  7073. {
  7074. ggml_compute_forward_dup_f32(params, dst);
  7075. } break;
  7076. default:
  7077. {
  7078. GGML_ASSERT(false);
  7079. } break;
  7080. }
  7081. }
  7082. // ggml_compute_forward_add
  7083. static void ggml_compute_forward_add_f32(
  7084. const struct ggml_compute_params * params,
  7085. struct ggml_tensor * dst) {
  7086. const struct ggml_tensor * src0 = dst->src[0];
  7087. const struct ggml_tensor * src1 = dst->src[1];
  7088. GGML_ASSERT(ggml_can_repeat(src1, src0) && ggml_are_same_shape(src0, dst));
  7089. if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
  7090. return;
  7091. }
  7092. const int ith = params->ith;
  7093. const int nth = params->nth;
  7094. #ifdef GGML_USE_CLBLAST
  7095. if (src1->backend == GGML_BACKEND_TYPE_GPU) {
  7096. // TODO: OpenCL kernel support full broadcast
  7097. GGML_ASSERT(ggml_can_repeat_rows(src1, src0));
  7098. if (ith == 0) {
  7099. ggml_cl_add(src0, src1, dst);
  7100. }
  7101. return;
  7102. }
  7103. #endif
  7104. const int nr = ggml_nrows(src0);
  7105. GGML_TENSOR_BINARY_OP_LOCALS
  7106. GGML_ASSERT( nb0 == sizeof(float));
  7107. GGML_ASSERT(nb00 == sizeof(float));
  7108. // rows per thread
  7109. const int dr = (nr + nth - 1)/nth;
  7110. // row range for this thread
  7111. const int ir0 = dr*ith;
  7112. const int ir1 = MIN(ir0 + dr, nr);
  7113. if (nb10 == sizeof(float)) {
  7114. for (int ir = ir0; ir < ir1; ++ir) {
  7115. // src1 is broadcastable across src0 and dst in i1, i2, i3
  7116. const int64_t i03 = ir/(ne02*ne01);
  7117. const int64_t i02 = (ir - i03*ne02*ne01)/ne01;
  7118. const int64_t i01 = (ir - i03*ne02*ne01 - i02*ne01);
  7119. const int64_t i13 = i03 % ne13;
  7120. const int64_t i12 = i02 % ne12;
  7121. const int64_t i11 = i01 % ne11;
  7122. const int64_t nr0 = ne00 / ne10;
  7123. float * dst_ptr = (float *) ((char *) dst->data + i03*nb3 + i02*nb2 + i01*nb1 );
  7124. float * src0_ptr = (float *) ((char *) src0->data + i03*nb03 + i02*nb02 + i01*nb01);
  7125. float * src1_ptr = (float *) ((char *) src1->data + i13*nb13 + i12*nb12 + i11*nb11);
  7126. for (int64_t r = 0; r < nr0; ++r) {
  7127. #ifdef GGML_USE_ACCELERATE
  7128. vDSP_vadd(src0_ptr + r*ne10, 1, src1_ptr, 1, dst_ptr + r*ne10, 1, ne10);
  7129. #else
  7130. ggml_vec_add_f32(ne10, dst_ptr + r*ne10, src0_ptr + r*ne10, src1_ptr);
  7131. #endif
  7132. }
  7133. }
  7134. } else {
  7135. // src1 is not contiguous
  7136. for (int ir = ir0; ir < ir1; ++ir) {
  7137. // src1 is broadcastable across src0 and dst in i1, i2, i3
  7138. const int64_t i03 = ir/(ne02*ne01);
  7139. const int64_t i02 = (ir - i03*ne02*ne01)/ne01;
  7140. const int64_t i01 = (ir - i03*ne02*ne01 - i02*ne01);
  7141. const int64_t i13 = i03 % ne13;
  7142. const int64_t i12 = i02 % ne12;
  7143. const int64_t i11 = i01 % ne11;
  7144. float * dst_ptr = (float *) ((char *) dst->data + i03*nb3 + i02*nb2 + i01*nb1 );
  7145. float * src0_ptr = (float *) ((char *) src0->data + i03*nb03 + i02*nb02 + i01*nb01);
  7146. for (int64_t i0 = 0; i0 < ne0; ++i0) {
  7147. const int64_t i10 = i0 % ne10;
  7148. float * src1_ptr = (float *) ((char *) src1->data + i13*nb13 + i12*nb12 + i11*nb11 + i10*nb10);
  7149. dst_ptr[i0] = src0_ptr[i0] + *src1_ptr;
  7150. }
  7151. }
  7152. }
  7153. }
  7154. static void ggml_compute_forward_add_f16_f32(
  7155. const struct ggml_compute_params * params,
  7156. struct ggml_tensor * dst) {
  7157. const struct ggml_tensor * src0 = dst->src[0];
  7158. const struct ggml_tensor * src1 = dst->src[1];
  7159. GGML_ASSERT(ggml_are_same_shape(src0, src1) && ggml_are_same_shape(src0, dst));
  7160. if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
  7161. return;
  7162. }
  7163. const int ith = params->ith;
  7164. const int nth = params->nth;
  7165. const int nr = ggml_nrows(src0);
  7166. GGML_TENSOR_BINARY_OP_LOCALS
  7167. GGML_ASSERT(src0->type == GGML_TYPE_F16);
  7168. GGML_ASSERT(src1->type == GGML_TYPE_F32);
  7169. if (dst->type == GGML_TYPE_F32) {
  7170. GGML_ASSERT( nb0 == sizeof(float));
  7171. }
  7172. else {
  7173. GGML_ASSERT(dst->type == GGML_TYPE_F16);
  7174. GGML_ASSERT( nb0 == sizeof(ggml_fp16_t));
  7175. }
  7176. GGML_ASSERT(nb00 == sizeof(ggml_fp16_t));
  7177. // rows per thread
  7178. const int dr = (nr + nth - 1)/nth;
  7179. // row range for this thread
  7180. const int ir0 = dr*ith;
  7181. const int ir1 = MIN(ir0 + dr, nr);
  7182. if (nb10 == sizeof(float)) {
  7183. if (dst->type == GGML_TYPE_F16) {
  7184. for (int ir = ir0; ir < ir1; ++ir) {
  7185. // src0, src1 and dst are same shape => same indices
  7186. const int i3 = ir/(ne2*ne1);
  7187. const int i2 = (ir - i3*ne2*ne1)/ne1;
  7188. const int i1 = (ir - i3*ne2*ne1 - i2*ne1);
  7189. ggml_fp16_t * dst_ptr = (ggml_fp16_t *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1);
  7190. ggml_fp16_t * src0_ptr = (ggml_fp16_t *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01);
  7191. float * src1_ptr = (float *) ((char *) src1->data + i3*nb13 + i2*nb12 + i1*nb11);
  7192. for (int i = 0; i < ne0; i++) {
  7193. dst_ptr[i] = GGML_FP32_TO_FP16(GGML_FP16_TO_FP32(src0_ptr[i]) + src1_ptr[i]);
  7194. }
  7195. }
  7196. } else {
  7197. for (int ir = ir0; ir < ir1; ++ir) {
  7198. // src0, src1 and dst are same shape => same indices
  7199. const int i3 = ir/(ne2*ne1);
  7200. const int i2 = (ir - i3*ne2*ne1)/ne1;
  7201. const int i1 = (ir - i3*ne2*ne1 - i2*ne1);
  7202. float * dst_ptr = (float *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1);
  7203. ggml_fp16_t * src0_ptr = (ggml_fp16_t *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01);
  7204. float * src1_ptr = (float *) ((char *) src1->data + i3*nb13 + i2*nb12 + i1*nb11);
  7205. for (int i = 0; i < ne0; i++) {
  7206. dst_ptr[i] = GGML_FP16_TO_FP32(src0_ptr[i]) + src1_ptr[i];
  7207. }
  7208. }
  7209. }
  7210. }
  7211. else {
  7212. // src1 is not contiguous
  7213. GGML_ASSERT(false);
  7214. }
  7215. }
  7216. static void ggml_compute_forward_add_bf16_f32(
  7217. const struct ggml_compute_params * params,
  7218. struct ggml_tensor * dst) {
  7219. const struct ggml_tensor * src0 = dst->src[0];
  7220. const struct ggml_tensor * src1 = dst->src[1];
  7221. GGML_ASSERT(ggml_are_same_shape(src0, src1) && ggml_are_same_shape(src0, dst));
  7222. if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
  7223. return;
  7224. }
  7225. const int ith = params->ith;
  7226. const int nth = params->nth;
  7227. const int nr = ggml_nrows(src0);
  7228. GGML_TENSOR_BINARY_OP_LOCALS
  7229. GGML_ASSERT(src0->type == GGML_TYPE_BF16);
  7230. GGML_ASSERT(src1->type == GGML_TYPE_F32);
  7231. if (dst->type == GGML_TYPE_F32) {
  7232. GGML_ASSERT( nb0 == sizeof(float));
  7233. }
  7234. else {
  7235. GGML_ASSERT(dst->type == GGML_TYPE_BF16);
  7236. GGML_ASSERT( nb0 == sizeof(ggml_bf16_t));
  7237. }
  7238. GGML_ASSERT(nb00 == sizeof(ggml_bf16_t));
  7239. // rows per thread
  7240. const int dr = (nr + nth - 1)/nth;
  7241. // row range for this thread
  7242. const int ir0 = dr*ith;
  7243. const int ir1 = MIN(ir0 + dr, nr);
  7244. if (nb10 == sizeof(float)) {
  7245. if (dst->type == GGML_TYPE_BF16) {
  7246. for (int ir = ir0; ir < ir1; ++ir) {
  7247. // src0, src1 and dst are same shape => same indices
  7248. const int i3 = ir/(ne2*ne1);
  7249. const int i2 = (ir - i3*ne2*ne1)/ne1;
  7250. const int i1 = (ir - i3*ne2*ne1 - i2*ne1);
  7251. ggml_bf16_t * dst_ptr = (ggml_bf16_t *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1);
  7252. ggml_bf16_t * src0_ptr = (ggml_bf16_t *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01);
  7253. float * src1_ptr = (float *) ((char *) src1->data + i3*nb13 + i2*nb12 + i1*nb11);
  7254. for (int i = 0; i < ne0; i++) {
  7255. dst_ptr[i] = GGML_FP32_TO_BF16(GGML_BF16_TO_FP32(src0_ptr[i]) + src1_ptr[i]);
  7256. }
  7257. }
  7258. } else {
  7259. for (int ir = ir0; ir < ir1; ++ir) {
  7260. // src0, src1 and dst are same shape => same indices
  7261. const int i3 = ir/(ne2*ne1);
  7262. const int i2 = (ir - i3*ne2*ne1)/ne1;
  7263. const int i1 = (ir - i3*ne2*ne1 - i2*ne1);
  7264. float * dst_ptr = (float *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1);
  7265. ggml_bf16_t * src0_ptr = (ggml_bf16_t *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01);
  7266. float * src1_ptr = (float *) ((char *) src1->data + i3*nb13 + i2*nb12 + i1*nb11);
  7267. for (int i = 0; i < ne0; i++) {
  7268. dst_ptr[i] = GGML_BF16_TO_FP32(src0_ptr[i]) + src1_ptr[i];
  7269. }
  7270. }
  7271. }
  7272. }
  7273. else {
  7274. // src1 is not contiguous
  7275. GGML_ASSERT(false);
  7276. }
  7277. }
  7278. static void ggml_compute_forward_add_f16_f16(
  7279. const struct ggml_compute_params * params,
  7280. struct ggml_tensor * dst) {
  7281. const struct ggml_tensor * src0 = dst->src[0];
  7282. const struct ggml_tensor * src1 = dst->src[1];
  7283. GGML_ASSERT(ggml_are_same_shape(src0, src1) && ggml_are_same_shape(src0, dst));
  7284. if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
  7285. return;
  7286. }
  7287. const int ith = params->ith;
  7288. const int nth = params->nth;
  7289. const int nr = ggml_nrows(src0);
  7290. GGML_TENSOR_BINARY_OP_LOCALS
  7291. GGML_ASSERT(src0->type == GGML_TYPE_F16);
  7292. GGML_ASSERT(src1->type == GGML_TYPE_F16);
  7293. GGML_ASSERT(dst->type == GGML_TYPE_F16);
  7294. GGML_ASSERT( nb0 == sizeof(ggml_fp16_t));
  7295. GGML_ASSERT(nb00 == sizeof(ggml_fp16_t));
  7296. // rows per thread
  7297. const int dr = (nr + nth - 1)/nth;
  7298. // row range for this thread
  7299. const int ir0 = dr*ith;
  7300. const int ir1 = MIN(ir0 + dr, nr);
  7301. if (nb10 == sizeof(ggml_fp16_t)) {
  7302. for (int ir = ir0; ir < ir1; ++ir) {
  7303. // src0, src1 and dst are same shape => same indices
  7304. const int i3 = ir/(ne2*ne1);
  7305. const int i2 = (ir - i3*ne2*ne1)/ne1;
  7306. const int i1 = (ir - i3*ne2*ne1 - i2*ne1);
  7307. ggml_fp16_t * dst_ptr = (ggml_fp16_t *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1);
  7308. ggml_fp16_t * src0_ptr = (ggml_fp16_t *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01);
  7309. ggml_fp16_t * src1_ptr = (ggml_fp16_t *) ((char *) src1->data + i3*nb13 + i2*nb12 + i1*nb11);
  7310. for (int i = 0; i < ne0; i++) {
  7311. dst_ptr[i] = GGML_FP32_TO_FP16(GGML_FP16_TO_FP32(src0_ptr[i]) + GGML_FP16_TO_FP32(src1_ptr[i]));
  7312. }
  7313. }
  7314. }
  7315. else {
  7316. // src1 is not contiguous
  7317. GGML_ASSERT(false);
  7318. }
  7319. }
  7320. static void ggml_compute_forward_add_bf16_bf16(
  7321. const struct ggml_compute_params * params,
  7322. struct ggml_tensor * dst) {
  7323. const struct ggml_tensor * src0 = dst->src[0];
  7324. const struct ggml_tensor * src1 = dst->src[1];
  7325. GGML_ASSERT(ggml_are_same_shape(src0, src1) && ggml_are_same_shape(src0, dst));
  7326. if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
  7327. return;
  7328. }
  7329. const int ith = params->ith;
  7330. const int nth = params->nth;
  7331. const int nr = ggml_nrows(src0);
  7332. GGML_TENSOR_BINARY_OP_LOCALS
  7333. GGML_ASSERT(src0->type == GGML_TYPE_BF16);
  7334. GGML_ASSERT(src1->type == GGML_TYPE_BF16);
  7335. GGML_ASSERT(dst->type == GGML_TYPE_BF16);
  7336. GGML_ASSERT( nb0 == sizeof(ggml_bf16_t));
  7337. GGML_ASSERT(nb00 == sizeof(ggml_bf16_t));
  7338. // rows per thread
  7339. const int dr = (nr + nth - 1)/nth;
  7340. // row range for this thread
  7341. const int ir0 = dr*ith;
  7342. const int ir1 = MIN(ir0 + dr, nr);
  7343. if (nb10 == sizeof(ggml_bf16_t)) {
  7344. for (int ir = ir0; ir < ir1; ++ir) {
  7345. // src0, src1 and dst are same shape => same indices
  7346. const int i3 = ir/(ne2*ne1);
  7347. const int i2 = (ir - i3*ne2*ne1)/ne1;
  7348. const int i1 = (ir - i3*ne2*ne1 - i2*ne1);
  7349. ggml_bf16_t * dst_ptr = (ggml_bf16_t *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1);
  7350. ggml_bf16_t * src0_ptr = (ggml_bf16_t *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01);
  7351. ggml_bf16_t * src1_ptr = (ggml_bf16_t *) ((char *) src1->data + i3*nb13 + i2*nb12 + i1*nb11);
  7352. for (int i = 0; i < ne0; i++) {
  7353. dst_ptr[i] = GGML_FP32_TO_BF16(GGML_BF16_TO_FP32(src0_ptr[i]) + GGML_BF16_TO_FP32(src1_ptr[i]));
  7354. }
  7355. }
  7356. }
  7357. else {
  7358. // src1 is not contiguous
  7359. GGML_ASSERT(false);
  7360. }
  7361. }
  7362. static void ggml_compute_forward_add_q_f32(
  7363. const struct ggml_compute_params * params,
  7364. struct ggml_tensor * dst) {
  7365. const struct ggml_tensor * src0 = dst->src[0];
  7366. const struct ggml_tensor * src1 = dst->src[1];
  7367. GGML_ASSERT(ggml_are_same_shape(src0, src1) && ggml_are_same_shape(src0, dst));
  7368. if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
  7369. return;
  7370. }
  7371. const int nr = ggml_nrows(src0);
  7372. GGML_TENSOR_BINARY_OP_LOCALS
  7373. const int ith = params->ith;
  7374. const int nth = params->nth;
  7375. const enum ggml_type type = src0->type;
  7376. const enum ggml_type dtype = dst->type;
  7377. ggml_to_float_t const dequantize_row_q = type_traits[type].to_float;
  7378. ggml_from_float_t const quantize_row_q = type_traits[dtype].from_float;
  7379. // we don't support permuted src0 or src1
  7380. GGML_ASSERT(nb00 == ggml_type_size(type));
  7381. GGML_ASSERT(nb10 == sizeof(float));
  7382. // dst cannot be transposed or permuted
  7383. GGML_ASSERT(nb0 <= nb1);
  7384. GGML_ASSERT(nb1 <= nb2);
  7385. GGML_ASSERT(nb2 <= nb3);
  7386. GGML_ASSERT(ggml_is_quantized(src0->type));
  7387. GGML_ASSERT(src1->type == GGML_TYPE_F32);
  7388. // rows per thread
  7389. const int dr = (nr + nth - 1)/nth;
  7390. // row range for this thread
  7391. const int ir0 = dr*ith;
  7392. const int ir1 = MIN(ir0 + dr, nr);
  7393. float * wdata = (float *) params->wdata + (ne00 + CACHE_LINE_SIZE_F32) * ith;
  7394. for (int ir = ir0; ir < ir1; ++ir) {
  7395. // src0 indices
  7396. const int i03 = ir/(ne02*ne01);
  7397. const int i02 = (ir - i03*ne02*ne01)/ne01;
  7398. const int i01 = (ir - i03*ne02*ne01 - i02*ne01);
  7399. // src1 and dst are same shape as src0 => same indices
  7400. const int i13 = i03;
  7401. const int i12 = i02;
  7402. const int i11 = i01;
  7403. const int i3 = i03;
  7404. const int i2 = i02;
  7405. const int i1 = i01;
  7406. void * src0_row = (void *) ((char *) src0->data + (i01*nb01 + i02*nb02 + i03*nb03));
  7407. float * src1_row = (float *)((char *) src1->data + (i11*nb11 + i12*nb12 + i13*nb13));
  7408. void * dst_row = (void *) ((char *) dst->data + ( i1*nb1 + i2*nb2 + i3*nb3));
  7409. assert(ne00 % 32 == 0);
  7410. // unquantize row from src0 to temp buffer
  7411. dequantize_row_q(src0_row, wdata, ne00);
  7412. // add src1
  7413. ggml_vec_acc_f32(ne00, wdata, src1_row);
  7414. // quantize row to dst
  7415. if (quantize_row_q != NULL) {
  7416. quantize_row_q(wdata, dst_row, ne00);
  7417. } else {
  7418. memcpy(dst_row, wdata, ne0*nb0);
  7419. }
  7420. }
  7421. }
  7422. static void ggml_compute_forward_add(
  7423. const struct ggml_compute_params * params,
  7424. struct ggml_tensor * dst) {
  7425. const struct ggml_tensor * src0 = dst->src[0];
  7426. const struct ggml_tensor * src1 = dst->src[1];
  7427. switch (src0->type) {
  7428. case GGML_TYPE_F32:
  7429. {
  7430. if (src1->type == GGML_TYPE_F32) {
  7431. ggml_compute_forward_add_f32(params, dst);
  7432. }
  7433. else {
  7434. GGML_ASSERT(false);
  7435. }
  7436. } break;
  7437. case GGML_TYPE_F16:
  7438. {
  7439. if (src1->type == GGML_TYPE_F16) {
  7440. ggml_compute_forward_add_f16_f16(params, dst);
  7441. }
  7442. else if (src1->type == GGML_TYPE_F32) {
  7443. ggml_compute_forward_add_f16_f32(params, dst);
  7444. }
  7445. else {
  7446. GGML_ASSERT(false);
  7447. }
  7448. } break;
  7449. case GGML_TYPE_BF16:
  7450. {
  7451. if (src1->type == GGML_TYPE_BF16) {
  7452. ggml_compute_forward_add_bf16_bf16(params, dst);
  7453. }
  7454. else if (src1->type == GGML_TYPE_F32) {
  7455. ggml_compute_forward_add_bf16_f32(params, dst);
  7456. }
  7457. else {
  7458. GGML_ASSERT(false);
  7459. }
  7460. } break;
  7461. case GGML_TYPE_Q4_0:
  7462. case GGML_TYPE_Q4_1:
  7463. case GGML_TYPE_Q5_0:
  7464. case GGML_TYPE_Q5_1:
  7465. case GGML_TYPE_Q8_0:
  7466. case GGML_TYPE_Q2_K:
  7467. case GGML_TYPE_Q3_K:
  7468. case GGML_TYPE_Q4_K:
  7469. case GGML_TYPE_Q5_K:
  7470. case GGML_TYPE_Q6_K:
  7471. case GGML_TYPE_IQ2_XXS:
  7472. case GGML_TYPE_IQ2_XS:
  7473. case GGML_TYPE_IQ3_XXS:
  7474. case GGML_TYPE_IQ1_S:
  7475. case GGML_TYPE_IQ1_M:
  7476. case GGML_TYPE_IQ4_NL:
  7477. case GGML_TYPE_IQ4_XS:
  7478. case GGML_TYPE_IQ3_S:
  7479. case GGML_TYPE_IQ2_S:
  7480. {
  7481. ggml_compute_forward_add_q_f32(params, dst);
  7482. } break;
  7483. default:
  7484. {
  7485. GGML_ASSERT(false);
  7486. } break;
  7487. }
  7488. }
  7489. // ggml_compute_forward_add1
  7490. static void ggml_compute_forward_add1_f32(
  7491. const struct ggml_compute_params * params,
  7492. struct ggml_tensor * dst) {
  7493. const struct ggml_tensor * src0 = dst->src[0];
  7494. const struct ggml_tensor * src1 = dst->src[1];
  7495. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  7496. GGML_ASSERT(ggml_is_scalar(src1));
  7497. if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
  7498. return;
  7499. }
  7500. const int ith = params->ith;
  7501. const int nth = params->nth;
  7502. const int nr = ggml_nrows(src0);
  7503. GGML_TENSOR_UNARY_OP_LOCALS
  7504. GGML_ASSERT( nb0 == sizeof(float));
  7505. GGML_ASSERT(nb00 == sizeof(float));
  7506. // rows per thread
  7507. const int dr = (nr + nth - 1)/nth;
  7508. // row range for this thread
  7509. const int ir0 = dr*ith;
  7510. const int ir1 = MIN(ir0 + dr, nr);
  7511. for (int ir = ir0; ir < ir1; ++ir) {
  7512. // src0 and dst are same shape => same indices
  7513. const int i3 = ir/(ne2*ne1);
  7514. const int i2 = (ir - i3*ne2*ne1)/ne1;
  7515. const int i1 = (ir - i3*ne2*ne1 - i2*ne1);
  7516. #ifdef GGML_USE_ACCELERATE
  7517. UNUSED(ggml_vec_add1_f32);
  7518. vDSP_vadd(
  7519. (float *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01), 1,
  7520. (float *) ((char *) src1->data), 0,
  7521. (float *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 ), 1,
  7522. ne0);
  7523. #else
  7524. ggml_vec_add1_f32(ne0,
  7525. (float *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 ),
  7526. (float *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01),
  7527. *(float *) src1->data);
  7528. #endif
  7529. }
  7530. }
  7531. static void ggml_compute_forward_add1_f16_f32(
  7532. const struct ggml_compute_params * params,
  7533. struct ggml_tensor * dst) {
  7534. const struct ggml_tensor * src0 = dst->src[0];
  7535. const struct ggml_tensor * src1 = dst->src[1];
  7536. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  7537. GGML_ASSERT(ggml_is_scalar(src1));
  7538. if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
  7539. return;
  7540. }
  7541. // scalar to add
  7542. const float v = *(float *) src1->data;
  7543. const int ith = params->ith;
  7544. const int nth = params->nth;
  7545. const int nr = ggml_nrows(src0);
  7546. GGML_TENSOR_UNARY_OP_LOCALS
  7547. GGML_ASSERT(src0->type == GGML_TYPE_F16);
  7548. GGML_ASSERT(src1->type == GGML_TYPE_F32);
  7549. GGML_ASSERT(dst->type == GGML_TYPE_F16);
  7550. GGML_ASSERT( nb0 == sizeof(ggml_fp16_t));
  7551. GGML_ASSERT(nb00 == sizeof(ggml_fp16_t));
  7552. // rows per thread
  7553. const int dr = (nr + nth - 1)/nth;
  7554. // row range for this thread
  7555. const int ir0 = dr*ith;
  7556. const int ir1 = MIN(ir0 + dr, nr);
  7557. for (int ir = ir0; ir < ir1; ++ir) {
  7558. // src0 and dst are same shape => same indices
  7559. const int i3 = ir/(ne2*ne1);
  7560. const int i2 = (ir - i3*ne2*ne1)/ne1;
  7561. const int i1 = (ir - i3*ne2*ne1 - i2*ne1);
  7562. ggml_fp16_t * dst_ptr = (ggml_fp16_t *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 );
  7563. ggml_fp16_t * src0_ptr = (ggml_fp16_t *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01);
  7564. for (int i = 0; i < ne0; i++) {
  7565. dst_ptr[i] = GGML_FP32_TO_FP16(GGML_FP16_TO_FP32(src0_ptr[i]) + v);
  7566. }
  7567. }
  7568. }
  7569. static void ggml_compute_forward_add1_f16_f16(
  7570. const struct ggml_compute_params * params,
  7571. struct ggml_tensor * dst) {
  7572. const struct ggml_tensor * src0 = dst->src[0];
  7573. const struct ggml_tensor * src1 = dst->src[1];
  7574. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  7575. GGML_ASSERT(ggml_is_scalar(src1));
  7576. if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
  7577. return;
  7578. }
  7579. // scalar to add
  7580. const float v = GGML_FP16_TO_FP32(*(ggml_fp16_t *) src1->data);
  7581. const int ith = params->ith;
  7582. const int nth = params->nth;
  7583. const int nr = ggml_nrows(src0);
  7584. GGML_TENSOR_UNARY_OP_LOCALS
  7585. GGML_ASSERT(src0->type == GGML_TYPE_F16);
  7586. GGML_ASSERT(src1->type == GGML_TYPE_F16);
  7587. GGML_ASSERT(dst->type == GGML_TYPE_F16);
  7588. GGML_ASSERT( nb0 == sizeof(ggml_fp16_t));
  7589. GGML_ASSERT(nb00 == sizeof(ggml_fp16_t));
  7590. // rows per thread
  7591. const int dr = (nr + nth - 1)/nth;
  7592. // row range for this thread
  7593. const int ir0 = dr*ith;
  7594. const int ir1 = MIN(ir0 + dr, nr);
  7595. for (int ir = ir0; ir < ir1; ++ir) {
  7596. // src0 and dst are same shape => same indices
  7597. const int i3 = ir/(ne2*ne1);
  7598. const int i2 = (ir - i3*ne2*ne1)/ne1;
  7599. const int i1 = (ir - i3*ne2*ne1 - i2*ne1);
  7600. ggml_fp16_t * dst_ptr = (ggml_fp16_t *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 );
  7601. ggml_fp16_t * src0_ptr = (ggml_fp16_t *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01);
  7602. for (int i = 0; i < ne0; i++) {
  7603. dst_ptr[i] = GGML_FP32_TO_FP16(GGML_FP16_TO_FP32(src0_ptr[i]) + v);
  7604. }
  7605. }
  7606. }
  7607. static void ggml_compute_forward_add1_q_f32(
  7608. const struct ggml_compute_params * params,
  7609. struct ggml_tensor * dst) {
  7610. const struct ggml_tensor * src0 = dst->src[0];
  7611. const struct ggml_tensor * src1 = dst->src[1];
  7612. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  7613. GGML_ASSERT(ggml_is_scalar(src1));
  7614. if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
  7615. return;
  7616. }
  7617. // scalar to add
  7618. const float v = *(float *) src1->data;
  7619. const int ith = params->ith;
  7620. const int nth = params->nth;
  7621. const int nr = ggml_nrows(src0);
  7622. GGML_TENSOR_UNARY_OP_LOCALS
  7623. const enum ggml_type type = src0->type;
  7624. ggml_to_float_t const dequantize_row_q = type_traits[type].to_float;
  7625. ggml_from_float_t const quantize_row_q = type_traits[type].from_float;
  7626. // we don't support permuted src0
  7627. GGML_ASSERT(nb00 == ggml_type_size(type));
  7628. // dst cannot be transposed or permuted
  7629. GGML_ASSERT(nb0 <= nb1);
  7630. GGML_ASSERT(nb1 <= nb2);
  7631. GGML_ASSERT(nb2 <= nb3);
  7632. GGML_ASSERT(ggml_is_quantized(src0->type));
  7633. GGML_ASSERT(dst->type == src0->type);
  7634. GGML_ASSERT(src1->type == GGML_TYPE_F32);
  7635. // rows per thread
  7636. const int dr = (nr + nth - 1)/nth;
  7637. // row range for this thread
  7638. const int ir0 = dr*ith;
  7639. const int ir1 = MIN(ir0 + dr, nr);
  7640. float * wdata = (float *) params->wdata + (ne0 + CACHE_LINE_SIZE_F32) * ith;
  7641. for (int ir = ir0; ir < ir1; ++ir) {
  7642. // src0 and dst are same shape => same indices
  7643. const int i3 = ir/(ne2*ne1);
  7644. const int i2 = (ir - i3*ne2*ne1)/ne1;
  7645. const int i1 = (ir - i3*ne2*ne1 - i2*ne1);
  7646. void * src0_row = (void *) ((char *) src0->data + (i1*nb01 + i2*nb02 + i3*nb03));
  7647. void * dst_row = (void *) ((char *) dst->data + (i1*nb1 + i2*nb2 + i3*nb0 ));
  7648. assert(ne0 % 32 == 0);
  7649. // unquantize row from src0 to temp buffer
  7650. dequantize_row_q(src0_row, wdata, ne0);
  7651. // add src1
  7652. ggml_vec_acc1_f32(ne0, wdata, v);
  7653. // quantize row to dst
  7654. quantize_row_q(wdata, dst_row, ne0);
  7655. }
  7656. }
  7657. static void ggml_compute_forward_add1_bf16_f32(
  7658. const struct ggml_compute_params * params,
  7659. struct ggml_tensor * dst) {
  7660. const struct ggml_tensor * src0 = dst->src[0];
  7661. const struct ggml_tensor * src1 = dst->src[1];
  7662. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  7663. GGML_ASSERT(ggml_is_scalar(src1));
  7664. if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
  7665. return;
  7666. }
  7667. // scalar to add
  7668. const float v = *(float *) src1->data;
  7669. const int ith = params->ith;
  7670. const int nth = params->nth;
  7671. const int nr = ggml_nrows(src0);
  7672. GGML_TENSOR_UNARY_OP_LOCALS
  7673. GGML_ASSERT(src0->type == GGML_TYPE_BF16);
  7674. GGML_ASSERT(src1->type == GGML_TYPE_F32);
  7675. GGML_ASSERT(dst->type == GGML_TYPE_BF16);
  7676. GGML_ASSERT( nb0 == sizeof(ggml_bf16_t));
  7677. GGML_ASSERT(nb00 == sizeof(ggml_bf16_t));
  7678. // rows per thread
  7679. const int dr = (nr + nth - 1)/nth;
  7680. // row range for this thread
  7681. const int ir0 = dr*ith;
  7682. const int ir1 = MIN(ir0 + dr, nr);
  7683. for (int ir = ir0; ir < ir1; ++ir) {
  7684. // src0 and dst are same shape => same indices
  7685. const int i3 = ir/(ne2*ne1);
  7686. const int i2 = (ir - i3*ne2*ne1)/ne1;
  7687. const int i1 = (ir - i3*ne2*ne1 - i2*ne1);
  7688. ggml_bf16_t * dst_ptr = (ggml_bf16_t *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 );
  7689. ggml_bf16_t * src0_ptr = (ggml_bf16_t *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01);
  7690. for (int i = 0; i < ne0; i++) {
  7691. dst_ptr[i] = GGML_FP32_TO_BF16(GGML_BF16_TO_FP32(src0_ptr[i]) + v);
  7692. }
  7693. }
  7694. }
  7695. static void ggml_compute_forward_add1_bf16_bf16(
  7696. const struct ggml_compute_params * params,
  7697. struct ggml_tensor * dst) {
  7698. const struct ggml_tensor * src0 = dst->src[0];
  7699. const struct ggml_tensor * src1 = dst->src[1];
  7700. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  7701. GGML_ASSERT(ggml_is_scalar(src1));
  7702. if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
  7703. return;
  7704. }
  7705. // scalar to add
  7706. const float v = GGML_BF16_TO_FP32(*(ggml_bf16_t *) src1->data);
  7707. const int ith = params->ith;
  7708. const int nth = params->nth;
  7709. const int nr = ggml_nrows(src0);
  7710. GGML_TENSOR_UNARY_OP_LOCALS
  7711. GGML_ASSERT(src0->type == GGML_TYPE_BF16);
  7712. GGML_ASSERT(src1->type == GGML_TYPE_BF16);
  7713. GGML_ASSERT(dst->type == GGML_TYPE_BF16);
  7714. GGML_ASSERT( nb0 == sizeof(ggml_bf16_t));
  7715. GGML_ASSERT(nb00 == sizeof(ggml_bf16_t));
  7716. // rows per thread
  7717. const int dr = (nr + nth - 1)/nth;
  7718. // row range for this thread
  7719. const int ir0 = dr*ith;
  7720. const int ir1 = MIN(ir0 + dr, nr);
  7721. for (int ir = ir0; ir < ir1; ++ir) {
  7722. // src0 and dst are same shape => same indices
  7723. const int i3 = ir/(ne2*ne1);
  7724. const int i2 = (ir - i3*ne2*ne1)/ne1;
  7725. const int i1 = (ir - i3*ne2*ne1 - i2*ne1);
  7726. ggml_bf16_t * dst_ptr = (ggml_bf16_t *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 );
  7727. ggml_bf16_t * src0_ptr = (ggml_bf16_t *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01);
  7728. for (int i = 0; i < ne0; i++) {
  7729. dst_ptr[i] = GGML_FP32_TO_BF16(GGML_BF16_TO_FP32(src0_ptr[i]) + v);
  7730. }
  7731. }
  7732. }
  7733. static void ggml_compute_forward_add1(
  7734. const struct ggml_compute_params * params,
  7735. struct ggml_tensor * dst) {
  7736. const struct ggml_tensor * src0 = dst->src[0];
  7737. const struct ggml_tensor * src1 = dst->src[1];
  7738. switch (src0->type) {
  7739. case GGML_TYPE_F32:
  7740. {
  7741. ggml_compute_forward_add1_f32(params, dst);
  7742. } break;
  7743. case GGML_TYPE_F16:
  7744. {
  7745. if (src1->type == GGML_TYPE_F16) {
  7746. ggml_compute_forward_add1_f16_f16(params, dst);
  7747. }
  7748. else if (src1->type == GGML_TYPE_F32) {
  7749. ggml_compute_forward_add1_f16_f32(params, dst);
  7750. }
  7751. else {
  7752. GGML_ASSERT(false);
  7753. }
  7754. } break;
  7755. case GGML_TYPE_BF16:
  7756. {
  7757. if (src1->type == GGML_TYPE_BF16) {
  7758. ggml_compute_forward_add1_bf16_bf16(params, dst);
  7759. }
  7760. else if (src1->type == GGML_TYPE_F32) {
  7761. ggml_compute_forward_add1_bf16_f32(params, dst);
  7762. }
  7763. else {
  7764. GGML_ASSERT(false);
  7765. }
  7766. } break;
  7767. case GGML_TYPE_Q4_0:
  7768. case GGML_TYPE_Q4_1:
  7769. case GGML_TYPE_Q5_0:
  7770. case GGML_TYPE_Q5_1:
  7771. case GGML_TYPE_Q8_0:
  7772. case GGML_TYPE_Q8_1:
  7773. case GGML_TYPE_Q2_K:
  7774. case GGML_TYPE_Q3_K:
  7775. case GGML_TYPE_Q4_K:
  7776. case GGML_TYPE_Q5_K:
  7777. case GGML_TYPE_Q6_K:
  7778. case GGML_TYPE_IQ2_XXS:
  7779. case GGML_TYPE_IQ2_XS:
  7780. case GGML_TYPE_IQ3_XXS:
  7781. case GGML_TYPE_IQ1_S:
  7782. case GGML_TYPE_IQ1_M:
  7783. case GGML_TYPE_IQ4_NL:
  7784. case GGML_TYPE_IQ4_XS:
  7785. case GGML_TYPE_IQ3_S:
  7786. case GGML_TYPE_IQ2_S:
  7787. {
  7788. ggml_compute_forward_add1_q_f32(params, dst);
  7789. } break;
  7790. default:
  7791. {
  7792. GGML_ASSERT(false);
  7793. } break;
  7794. }
  7795. }
  7796. // ggml_compute_forward_acc
  7797. static void ggml_compute_forward_acc_f32(
  7798. const struct ggml_compute_params * params,
  7799. struct ggml_tensor * dst) {
  7800. const struct ggml_tensor * src0 = dst->src[0];
  7801. const struct ggml_tensor * src1 = dst->src[1];
  7802. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  7803. GGML_ASSERT(ggml_is_contiguous(dst) && ggml_is_contiguous(src0));
  7804. // view src0 and dst with these strides and data offset inbytes during acc
  7805. // nb0 is implicitly element_size because src0 and dst are contiguous
  7806. size_t nb1 = ((int32_t *) dst->op_params)[0];
  7807. size_t nb2 = ((int32_t *) dst->op_params)[1];
  7808. size_t nb3 = ((int32_t *) dst->op_params)[2];
  7809. size_t offset = ((int32_t *) dst->op_params)[3];
  7810. bool inplace = (bool) ((int32_t *) dst->op_params)[4];
  7811. if (!inplace && (params->type == GGML_TASK_TYPE_INIT)) {
  7812. if (params->ith != 0) {
  7813. return;
  7814. }
  7815. // memcpy needs to be synchronized across threads to avoid race conditions.
  7816. // => do it in INIT phase
  7817. memcpy(
  7818. ((char *) dst->data),
  7819. ((char *) src0->data),
  7820. ggml_nbytes(dst));
  7821. }
  7822. if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
  7823. return;
  7824. }
  7825. const int ith = params->ith;
  7826. const int nth = params->nth;
  7827. const int nr = ggml_nrows(src1);
  7828. const int nc = src1->ne[0];
  7829. GGML_TENSOR_LOCALS(int64_t, ne1, src1, ne)
  7830. GGML_TENSOR_LOCALS(size_t, nb1, src1, nb)
  7831. // src0 and dst as viewed during acc
  7832. const size_t nb0 = ggml_element_size(src0);
  7833. const size_t nb00 = nb0;
  7834. const size_t nb01 = nb1;
  7835. const size_t nb02 = nb2;
  7836. const size_t nb03 = nb3;
  7837. GGML_ASSERT(offset + (ne10 == 0 ? 0 : ne10-1)*nb0 + (ne11 == 0 ? 0 : ne11-1)*nb1 + (ne12 == 0 ? 0 : ne12-1)*nb2 + (ne13 == 0 ? 0 : ne13-1)*nb3 < ggml_nbytes(dst));
  7838. GGML_ASSERT(offset + (ne10 == 0 ? 0 : ne10-1)*nb00 + (ne11 == 0 ? 0 : ne11-1)*nb01 + (ne12 == 0 ? 0 : ne12-1)*nb02 + (ne13 == 0 ? 0 : ne13-1)*nb03 < ggml_nbytes(src0));
  7839. GGML_ASSERT(nb10 == sizeof(float));
  7840. // rows per thread
  7841. const int dr = (nr + nth - 1)/nth;
  7842. // row range for this thread
  7843. const int ir0 = dr*ith;
  7844. const int ir1 = MIN(ir0 + dr, nr);
  7845. for (int ir = ir0; ir < ir1; ++ir) {
  7846. // src0 and dst are viewed with shape of src1 and offset
  7847. // => same indices
  7848. const int i3 = ir/(ne12*ne11);
  7849. const int i2 = (ir - i3*ne12*ne11)/ne11;
  7850. const int i1 = (ir - i3*ne12*ne11 - i2*ne11);
  7851. #ifdef GGML_USE_ACCELERATE
  7852. vDSP_vadd(
  7853. (float *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + offset), 1,
  7854. (float *) ((char *) src1->data + i3*nb13 + i2*nb12 + i1*nb11), 1,
  7855. (float *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + offset), 1, nc);
  7856. #else
  7857. ggml_vec_add_f32(nc,
  7858. (float *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + offset),
  7859. (float *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + offset),
  7860. (float *) ((char *) src1->data + i3*nb13 + i2*nb12 + i1*nb11));
  7861. #endif
  7862. }
  7863. }
  7864. static void ggml_compute_forward_acc(
  7865. const struct ggml_compute_params * params,
  7866. struct ggml_tensor * dst) {
  7867. const struct ggml_tensor * src0 = dst->src[0];
  7868. switch (src0->type) {
  7869. case GGML_TYPE_F32:
  7870. {
  7871. ggml_compute_forward_acc_f32(params, dst);
  7872. } break;
  7873. case GGML_TYPE_F16:
  7874. case GGML_TYPE_BF16:
  7875. case GGML_TYPE_Q4_0:
  7876. case GGML_TYPE_Q4_1:
  7877. case GGML_TYPE_Q5_0:
  7878. case GGML_TYPE_Q5_1:
  7879. case GGML_TYPE_Q8_0:
  7880. case GGML_TYPE_Q8_1:
  7881. case GGML_TYPE_Q2_K:
  7882. case GGML_TYPE_Q3_K:
  7883. case GGML_TYPE_Q4_K:
  7884. case GGML_TYPE_Q5_K:
  7885. case GGML_TYPE_Q6_K:
  7886. case GGML_TYPE_IQ2_XXS:
  7887. case GGML_TYPE_IQ2_XS:
  7888. case GGML_TYPE_IQ3_XXS:
  7889. case GGML_TYPE_IQ1_S:
  7890. case GGML_TYPE_IQ1_M:
  7891. case GGML_TYPE_IQ4_NL:
  7892. case GGML_TYPE_IQ4_XS:
  7893. case GGML_TYPE_IQ3_S:
  7894. case GGML_TYPE_IQ2_S:
  7895. default:
  7896. {
  7897. GGML_ASSERT(false);
  7898. } break;
  7899. }
  7900. }
  7901. // ggml_compute_forward_sub
  7902. static void ggml_compute_forward_sub_f32(
  7903. const struct ggml_compute_params * params,
  7904. struct ggml_tensor * dst) {
  7905. const struct ggml_tensor * src0 = dst->src[0];
  7906. const struct ggml_tensor * src1 = dst->src[1];
  7907. assert(params->ith == 0);
  7908. assert(ggml_are_same_shape(src0, src1) && ggml_are_same_shape(src0, dst));
  7909. if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
  7910. return;
  7911. }
  7912. const int nr = ggml_nrows(src0);
  7913. GGML_TENSOR_BINARY_OP_LOCALS
  7914. GGML_ASSERT( nb0 == sizeof(float));
  7915. GGML_ASSERT(nb00 == sizeof(float));
  7916. if (nb10 == sizeof(float)) {
  7917. for (int ir = 0; ir < nr; ++ir) {
  7918. // src0, src1 and dst are same shape => same indices
  7919. const int i3 = ir/(ne2*ne1);
  7920. const int i2 = (ir - i3*ne2*ne1)/ne1;
  7921. const int i1 = (ir - i3*ne2*ne1 - i2*ne1);
  7922. #ifdef GGML_USE_ACCELERATE
  7923. vDSP_vsub(
  7924. (float *) ((char *) src1->data + i3*nb13 + i2*nb12 + i1*nb11), 1,
  7925. (float *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01), 1,
  7926. (float *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 ), 1,
  7927. ne0);
  7928. #else
  7929. ggml_vec_sub_f32(ne0,
  7930. (float *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 ),
  7931. (float *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01),
  7932. (float *) ((char *) src1->data + i3*nb13 + i2*nb12 + i1*nb11));
  7933. #endif
  7934. // }
  7935. // }
  7936. }
  7937. } else {
  7938. // src1 is not contiguous
  7939. for (int ir = 0; ir < nr; ++ir) {
  7940. // src0, src1 and dst are same shape => same indices
  7941. const int i3 = ir/(ne2*ne1);
  7942. const int i2 = (ir - i3*ne2*ne1)/ne1;
  7943. const int i1 = (ir - i3*ne2*ne1 - i2*ne1);
  7944. float * dst_ptr = (float *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 );
  7945. float * src0_ptr = (float *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01);
  7946. for (int i0 = 0; i0 < ne0; i0++) {
  7947. float * src1_ptr = (float *) ((char *) src1->data + i3*nb13 + i2*nb12 + i1*nb11 + i0*nb10);
  7948. dst_ptr[i0] = src0_ptr[i0] - *src1_ptr;
  7949. }
  7950. }
  7951. }
  7952. }
  7953. static void ggml_compute_forward_sub(
  7954. const struct ggml_compute_params * params,
  7955. struct ggml_tensor * dst) {
  7956. const struct ggml_tensor * src0 = dst->src[0];
  7957. switch (src0->type) {
  7958. case GGML_TYPE_F32:
  7959. {
  7960. ggml_compute_forward_sub_f32(params, dst);
  7961. } break;
  7962. default:
  7963. {
  7964. GGML_ASSERT(false);
  7965. } break;
  7966. }
  7967. }
  7968. // ggml_compute_forward_mul
  7969. static void ggml_compute_forward_mul_f32(
  7970. const struct ggml_compute_params * params,
  7971. struct ggml_tensor * dst) {
  7972. const struct ggml_tensor * src0 = dst->src[0];
  7973. const struct ggml_tensor * src1 = dst->src[1];
  7974. GGML_ASSERT(ggml_can_repeat(src1, src0) && ggml_are_same_shape(src0, dst));
  7975. if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
  7976. return;
  7977. }
  7978. const int ith = params->ith;
  7979. const int nth = params->nth;
  7980. #if defined(GGML_USE_CLBLAST)
  7981. if (src1->backend == GGML_BACKEND_TYPE_GPU) {
  7982. // TODO: OpenCL kernel support full broadcast
  7983. GGML_ASSERT(ggml_can_repeat_rows(src1, src0));
  7984. if (ith == 0) {
  7985. ggml_cl_mul(src0, src1, dst);
  7986. }
  7987. return;
  7988. }
  7989. #endif
  7990. const int64_t nr = ggml_nrows(src0);
  7991. GGML_TENSOR_BINARY_OP_LOCALS
  7992. GGML_ASSERT( nb0 == sizeof(float));
  7993. GGML_ASSERT(nb00 == sizeof(float));
  7994. if (nb10 == sizeof(float)) {
  7995. for (int64_t ir = ith; ir < nr; ir += nth) {
  7996. // src0 and dst are same shape => same indices
  7997. const int64_t i03 = ir/(ne02*ne01);
  7998. const int64_t i02 = (ir - i03*ne02*ne01)/ne01;
  7999. const int64_t i01 = (ir - i03*ne02*ne01 - i02*ne01);
  8000. const int64_t i13 = i03 % ne13;
  8001. const int64_t i12 = i02 % ne12;
  8002. const int64_t i11 = i01 % ne11;
  8003. const int64_t nr0 = ne00 / ne10;
  8004. float * dst_ptr = (float *) ((char *) dst->data + i03*nb3 + i02*nb2 + i01*nb1 );
  8005. float * src0_ptr = (float *) ((char *) src0->data + i03*nb03 + i02*nb02 + i01*nb01);
  8006. float * src1_ptr = (float *) ((char *) src1->data + i13*nb13 + i12*nb12 + i11*nb11);
  8007. for (int64_t r = 0 ; r < nr0; ++r) {
  8008. #ifdef GGML_USE_ACCELERATE
  8009. UNUSED(ggml_vec_mul_f32);
  8010. vDSP_vmul(src0_ptr + r*ne10, 1, src1_ptr, 1, dst_ptr + r*ne10, 1, ne10);
  8011. #else
  8012. ggml_vec_mul_f32(ne10, dst_ptr + r*ne10, src0_ptr + r*ne10, src1_ptr);
  8013. #endif
  8014. }
  8015. }
  8016. } else {
  8017. // src1 is not contiguous
  8018. for (int64_t ir = ith; ir < nr; ir += nth) {
  8019. // src0 and dst are same shape => same indices
  8020. // src1 is broadcastable across src0 and dst in i1, i2, i3
  8021. const int64_t i03 = ir/(ne02*ne01);
  8022. const int64_t i02 = (ir - i03*ne02*ne01)/ne01;
  8023. const int64_t i01 = (ir - i03*ne02*ne01 - i02*ne01);
  8024. const int64_t i13 = i03 % ne13;
  8025. const int64_t i12 = i02 % ne12;
  8026. const int64_t i11 = i01 % ne11;
  8027. float * dst_ptr = (float *) ((char *) dst->data + i03*nb3 + i02*nb2 + i01*nb1 );
  8028. float * src0_ptr = (float *) ((char *) src0->data + i03*nb03 + i02*nb02 + i01*nb01);
  8029. for (int64_t i0 = 0; i0 < ne00; ++i0) {
  8030. const int64_t i10 = i0 % ne10;
  8031. float * src1_ptr = (float *) ((char *) src1->data + i13*nb13 + i12*nb12 + i11*nb11 + i10*nb10);
  8032. dst_ptr[i0] = src0_ptr[i0] * (*src1_ptr);
  8033. }
  8034. }
  8035. }
  8036. }
  8037. static void ggml_compute_forward_mul(
  8038. const struct ggml_compute_params * params,
  8039. struct ggml_tensor * dst) {
  8040. const struct ggml_tensor * src0 = dst->src[0];
  8041. const struct ggml_tensor * src1 = dst->src[1];
  8042. GGML_ASSERT(src1->type == GGML_TYPE_F32 && "only f32 src1 supported for now");
  8043. switch (src0->type) {
  8044. case GGML_TYPE_F32:
  8045. {
  8046. ggml_compute_forward_mul_f32(params, dst);
  8047. } break;
  8048. default:
  8049. {
  8050. GGML_ASSERT(false);
  8051. } break;
  8052. }
  8053. }
  8054. // ggml_compute_forward_div
  8055. static void ggml_compute_forward_div_f32(
  8056. const struct ggml_compute_params * params,
  8057. struct ggml_tensor * dst) {
  8058. const struct ggml_tensor * src0 = dst->src[0];
  8059. const struct ggml_tensor * src1 = dst->src[1];
  8060. GGML_ASSERT(ggml_can_repeat(src1, src0) && ggml_are_same_shape(src0, dst));
  8061. if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
  8062. return;
  8063. }
  8064. const int ith = params->ith;
  8065. const int nth = params->nth;
  8066. const int64_t nr = ggml_nrows(src0);
  8067. GGML_TENSOR_BINARY_OP_LOCALS
  8068. GGML_ASSERT( nb0 == sizeof(float));
  8069. GGML_ASSERT(nb00 == sizeof(float));
  8070. if (nb10 == sizeof(float)) {
  8071. for (int64_t ir = ith; ir < nr; ir += nth) {
  8072. // src0 and dst are same shape => same indices
  8073. const int64_t i03 = ir/(ne02*ne01);
  8074. const int64_t i02 = (ir - i03*ne02*ne01)/ne01;
  8075. const int64_t i01 = (ir - i03*ne02*ne01 - i02*ne01);
  8076. const int64_t i13 = i03 % ne13;
  8077. const int64_t i12 = i02 % ne12;
  8078. const int64_t i11 = i01 % ne11;
  8079. const int64_t nr0 = ne00 / ne10;
  8080. float * dst_ptr = (float *) ((char *) dst->data + i03*nb3 + i02*nb2 + i01*nb1 );
  8081. float * src0_ptr = (float *) ((char *) src0->data + i03*nb03 + i02*nb02 + i01*nb01);
  8082. float * src1_ptr = (float *) ((char *) src1->data + i13*nb13 + i12*nb12 + i11*nb11);
  8083. for (int64_t r = 0; r < nr0; ++r) {
  8084. #ifdef GGML_USE_ACCELERATE
  8085. UNUSED(ggml_vec_div_f32);
  8086. vDSP_vdiv(src1_ptr, 1, src0_ptr + r*ne10, 1, dst_ptr + r*ne10, 1, ne10);
  8087. #else
  8088. ggml_vec_div_f32(ne10, dst_ptr + r*ne10, src0_ptr + r*ne10, src1_ptr);
  8089. #endif
  8090. }
  8091. }
  8092. } else {
  8093. // src1 is not contiguous
  8094. for (int64_t ir = ith; ir < nr; ir += nth) {
  8095. // src0 and dst are same shape => same indices
  8096. // src1 is broadcastable across src0 and dst in i1, i2, i3
  8097. const int64_t i03 = ir/(ne02*ne01);
  8098. const int64_t i02 = (ir - i03*ne02*ne01)/ne01;
  8099. const int64_t i01 = (ir - i03*ne02*ne01 - i02*ne01);
  8100. const int64_t i13 = i03 % ne13;
  8101. const int64_t i12 = i02 % ne12;
  8102. const int64_t i11 = i01 % ne11;
  8103. float * dst_ptr = (float *) ((char *) dst->data + i03*nb3 + i02*nb2 + i01*nb1 );
  8104. float * src0_ptr = (float *) ((char *) src0->data + i03*nb03 + i02*nb02 + i01*nb01);
  8105. for (int64_t i0 = 0; i0 < ne00; ++i0) {
  8106. const int64_t i10 = i0 % ne10;
  8107. float * src1_ptr = (float *) ((char *) src1->data + i13*nb13 + i12*nb12 + i11*nb11 + i10*nb10);
  8108. dst_ptr[i0] = src0_ptr[i0] / (*src1_ptr);
  8109. }
  8110. }
  8111. }
  8112. }
  8113. static void ggml_compute_forward_div(
  8114. const struct ggml_compute_params * params,
  8115. struct ggml_tensor * dst) {
  8116. const struct ggml_tensor * src0 = dst->src[0];
  8117. switch (src0->type) {
  8118. case GGML_TYPE_F32:
  8119. {
  8120. ggml_compute_forward_div_f32(params, dst);
  8121. } break;
  8122. default:
  8123. {
  8124. GGML_ASSERT(false);
  8125. } break;
  8126. }
  8127. }
  8128. // ggml_compute_forward_sqr
  8129. static void ggml_compute_forward_sqr_f32(
  8130. const struct ggml_compute_params * params,
  8131. struct ggml_tensor * dst) {
  8132. const struct ggml_tensor * src0 = dst->src[0];
  8133. assert(params->ith == 0);
  8134. assert(ggml_are_same_shape(src0, dst));
  8135. if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
  8136. return;
  8137. }
  8138. const int n = ggml_nrows(src0);
  8139. const int nc = src0->ne[0];
  8140. assert( dst->nb[0] == sizeof(float));
  8141. assert(src0->nb[0] == sizeof(float));
  8142. for (int i = 0; i < n; i++) {
  8143. ggml_vec_sqr_f32(nc,
  8144. (float *) ((char *) dst->data + i*( dst->nb[1])),
  8145. (float *) ((char *) src0->data + i*(src0->nb[1])));
  8146. }
  8147. }
  8148. static void ggml_compute_forward_sqr(
  8149. const struct ggml_compute_params * params,
  8150. struct ggml_tensor * dst) {
  8151. const struct ggml_tensor * src0 = dst->src[0];
  8152. switch (src0->type) {
  8153. case GGML_TYPE_F32:
  8154. {
  8155. ggml_compute_forward_sqr_f32(params, dst);
  8156. } break;
  8157. default:
  8158. {
  8159. GGML_ASSERT(false);
  8160. } break;
  8161. }
  8162. }
  8163. // ggml_compute_forward_sqrt
  8164. static void ggml_compute_forward_sqrt_f32(
  8165. const struct ggml_compute_params * params,
  8166. struct ggml_tensor * dst) {
  8167. const struct ggml_tensor * src0 = dst->src[0];
  8168. assert(params->ith == 0);
  8169. assert(ggml_are_same_shape(src0, dst));
  8170. if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
  8171. return;
  8172. }
  8173. const int n = ggml_nrows(src0);
  8174. const int nc = src0->ne[0];
  8175. assert( dst->nb[0] == sizeof(float));
  8176. assert(src0->nb[0] == sizeof(float));
  8177. for (int i = 0; i < n; i++) {
  8178. ggml_vec_sqrt_f32(nc,
  8179. (float *) ((char *) dst->data + i*( dst->nb[1])),
  8180. (float *) ((char *) src0->data + i*(src0->nb[1])));
  8181. }
  8182. }
  8183. static void ggml_compute_forward_sqrt(
  8184. const struct ggml_compute_params * params,
  8185. struct ggml_tensor * dst) {
  8186. const struct ggml_tensor * src0 = dst->src[0];
  8187. switch (src0->type) {
  8188. case GGML_TYPE_F32:
  8189. {
  8190. ggml_compute_forward_sqrt_f32(params, dst);
  8191. } break;
  8192. default:
  8193. {
  8194. GGML_ASSERT(false);
  8195. } break;
  8196. }
  8197. }
  8198. // ggml_compute_forward_log
  8199. static void ggml_compute_forward_log_f32(
  8200. const struct ggml_compute_params * params,
  8201. struct ggml_tensor * dst) {
  8202. const struct ggml_tensor * src0 = dst->src[0];
  8203. GGML_ASSERT(params->ith == 0);
  8204. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  8205. if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
  8206. return;
  8207. }
  8208. const int n = ggml_nrows(src0);
  8209. const int nc = src0->ne[0];
  8210. GGML_ASSERT( dst->nb[0] == sizeof(float));
  8211. GGML_ASSERT(src0->nb[0] == sizeof(float));
  8212. for (int i = 0; i < n; i++) {
  8213. ggml_vec_log_f32(nc,
  8214. (float *) ((char *) dst->data + i*( dst->nb[1])),
  8215. (float *) ((char *) src0->data + i*(src0->nb[1])));
  8216. }
  8217. }
  8218. static void ggml_compute_forward_log(
  8219. const struct ggml_compute_params * params,
  8220. struct ggml_tensor * dst) {
  8221. const struct ggml_tensor * src0 = dst->src[0];
  8222. switch (src0->type) {
  8223. case GGML_TYPE_F32:
  8224. {
  8225. ggml_compute_forward_log_f32(params, dst);
  8226. } break;
  8227. default:
  8228. {
  8229. GGML_ASSERT(false);
  8230. } break;
  8231. }
  8232. }
  8233. // ggml_compute_forward_sum
  8234. static void ggml_compute_forward_sum_f32(
  8235. const struct ggml_compute_params * params,
  8236. struct ggml_tensor * dst) {
  8237. const struct ggml_tensor * src0 = dst->src[0];
  8238. assert(params->ith == 0);
  8239. assert(ggml_is_scalar(dst));
  8240. if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
  8241. return;
  8242. }
  8243. assert(ggml_is_scalar(dst));
  8244. assert(src0->nb[0] == sizeof(float));
  8245. GGML_TENSOR_LOCALS(int64_t, ne0, src0, ne)
  8246. GGML_TENSOR_LOCALS(size_t, nb0, src0, nb)
  8247. ggml_float sum = 0;
  8248. ggml_float row_sum = 0;
  8249. for (int64_t i03 = 0; i03 < ne03; i03++) {
  8250. for (int64_t i02 = 0; i02 < ne02; i02++) {
  8251. for (int64_t i01 = 0; i01 < ne01; i01++) {
  8252. ggml_vec_sum_f32_ggf(ne00,
  8253. &row_sum,
  8254. (float *) ((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03));
  8255. sum += row_sum;
  8256. }
  8257. }
  8258. }
  8259. ((float *) dst->data)[0] = sum;
  8260. }
  8261. static void ggml_compute_forward_sum_f16(
  8262. const struct ggml_compute_params * params,
  8263. struct ggml_tensor * dst) {
  8264. const struct ggml_tensor * src0 = dst->src[0];
  8265. assert(params->ith == 0);
  8266. assert(ggml_is_scalar(dst));
  8267. if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
  8268. return;
  8269. }
  8270. assert(src0->nb[0] == sizeof(ggml_fp16_t));
  8271. GGML_TENSOR_LOCALS(int64_t, ne0, src0, ne)
  8272. GGML_TENSOR_LOCALS(size_t, nb0, src0, nb)
  8273. float sum = 0;
  8274. float row_sum = 0;
  8275. for (int64_t i03 = 0; i03 < ne03; i03++) {
  8276. for (int64_t i02 = 0; i02 < ne02; i02++) {
  8277. for (int64_t i01 = 0; i01 < ne01; i01++) {
  8278. ggml_vec_sum_f16_ggf(ne00,
  8279. &row_sum,
  8280. (ggml_fp16_t *) ((char *) src0->data + i01 * nb01 + i02 * nb02 + i03 * nb03));
  8281. sum += row_sum;
  8282. }
  8283. }
  8284. }
  8285. ((ggml_fp16_t *) dst->data)[0] = GGML_FP32_TO_FP16(sum);
  8286. }
  8287. static void ggml_compute_forward_sum_bf16(
  8288. const struct ggml_compute_params * params,
  8289. struct ggml_tensor * dst) {
  8290. const struct ggml_tensor * src0 = dst->src[0];
  8291. assert(params->ith == 0);
  8292. assert(ggml_is_scalar(dst));
  8293. if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
  8294. return;
  8295. }
  8296. assert(src0->nb[0] == sizeof(ggml_bf16_t));
  8297. GGML_TENSOR_LOCALS(int64_t, ne0, src0, ne)
  8298. GGML_TENSOR_LOCALS(size_t, nb0, src0, nb)
  8299. float sum = 0;
  8300. float row_sum = 0;
  8301. for (int64_t i03 = 0; i03 < ne03; i03++) {
  8302. for (int64_t i02 = 0; i02 < ne02; i02++) {
  8303. for (int64_t i01 = 0; i01 < ne01; i01++) {
  8304. ggml_vec_sum_bf16_ggf(ne00,
  8305. &row_sum,
  8306. (ggml_bf16_t *) ((char *) src0->data + i01 * nb01 + i02 * nb02 + i03 * nb03));
  8307. sum += row_sum;
  8308. }
  8309. }
  8310. }
  8311. ((ggml_bf16_t *) dst->data)[0] = GGML_FP32_TO_BF16(sum);
  8312. }
  8313. static void ggml_compute_forward_sum(
  8314. const struct ggml_compute_params * params,
  8315. struct ggml_tensor * dst) {
  8316. const struct ggml_tensor * src0 = dst->src[0];
  8317. switch (src0->type) {
  8318. case GGML_TYPE_F32:
  8319. {
  8320. ggml_compute_forward_sum_f32(params, dst);
  8321. } break;
  8322. case GGML_TYPE_F16:
  8323. {
  8324. ggml_compute_forward_sum_f16(params, dst);
  8325. } break;
  8326. case GGML_TYPE_BF16:
  8327. {
  8328. ggml_compute_forward_sum_bf16(params, dst);
  8329. } break;
  8330. default:
  8331. {
  8332. GGML_ASSERT(false);
  8333. } break;
  8334. }
  8335. }
  8336. // ggml_compute_forward_sum_rows
  8337. static void ggml_compute_forward_sum_rows_f32(
  8338. const struct ggml_compute_params * params,
  8339. struct ggml_tensor * dst) {
  8340. const struct ggml_tensor * src0 = dst->src[0];
  8341. GGML_ASSERT(params->ith == 0);
  8342. if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
  8343. return;
  8344. }
  8345. GGML_ASSERT(src0->nb[0] == sizeof(float));
  8346. GGML_ASSERT(dst->nb[0] == sizeof(float));
  8347. GGML_TENSOR_UNARY_OP_LOCALS
  8348. GGML_ASSERT(ne0 == 1);
  8349. GGML_ASSERT(ne1 == ne01);
  8350. GGML_ASSERT(ne2 == ne02);
  8351. GGML_ASSERT(ne3 == ne03);
  8352. for (int64_t i3 = 0; i3 < ne03; i3++) {
  8353. for (int64_t i2 = 0; i2 < ne02; i2++) {
  8354. for (int64_t i1 = 0; i1 < ne01; i1++) {
  8355. float * src_row = (float *) ((char *) src0->data + i1*nb01 + i2*nb02 + i3*nb03);
  8356. float * dst_row = (float *) ((char *) dst->data + i1*nb1 + i2*nb2 + i3*nb3);
  8357. float row_sum = 0;
  8358. ggml_vec_sum_f32(ne00, &row_sum, src_row);
  8359. dst_row[0] = row_sum;
  8360. }
  8361. }
  8362. }
  8363. }
  8364. static void ggml_compute_forward_sum_rows(
  8365. const struct ggml_compute_params * params,
  8366. struct ggml_tensor * dst) {
  8367. const struct ggml_tensor * src0 = dst->src[0];
  8368. switch (src0->type) {
  8369. case GGML_TYPE_F32:
  8370. {
  8371. ggml_compute_forward_sum_rows_f32(params, dst);
  8372. } break;
  8373. default:
  8374. {
  8375. GGML_ASSERT(false);
  8376. } break;
  8377. }
  8378. }
  8379. // ggml_compute_forward_mean
  8380. static void ggml_compute_forward_mean_f32(
  8381. const struct ggml_compute_params * params,
  8382. struct ggml_tensor * dst) {
  8383. const struct ggml_tensor * src0 = dst->src[0];
  8384. assert(params->ith == 0);
  8385. if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
  8386. return;
  8387. }
  8388. assert(src0->nb[0] == sizeof(float));
  8389. GGML_TENSOR_UNARY_OP_LOCALS
  8390. assert(ne0 == 1);
  8391. assert(ne1 == ne01);
  8392. assert(ne2 == ne02);
  8393. assert(ne3 == ne03);
  8394. UNUSED(ne0);
  8395. UNUSED(ne1);
  8396. UNUSED(ne2);
  8397. UNUSED(ne3);
  8398. for (int64_t i03 = 0; i03 < ne03; i03++) {
  8399. for (int64_t i02 = 0; i02 < ne02; i02++) {
  8400. for (int64_t i01 = 0; i01 < ne01; i01++) {
  8401. ggml_vec_sum_f32(ne00,
  8402. (float *) ((char *) dst->data + i01*nb1 + i02*nb2 + i03*nb3),
  8403. (float *) ((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03));
  8404. *(float *) ((char *) dst->data + i01*nb1 + i02*nb2 + i03*nb3) /= (float) ne00;
  8405. }
  8406. }
  8407. }
  8408. }
  8409. static void ggml_compute_forward_mean(
  8410. const struct ggml_compute_params * params,
  8411. struct ggml_tensor * dst) {
  8412. const struct ggml_tensor * src0 = dst->src[0];
  8413. switch (src0->type) {
  8414. case GGML_TYPE_F32:
  8415. {
  8416. ggml_compute_forward_mean_f32(params, dst);
  8417. } break;
  8418. default:
  8419. {
  8420. GGML_ASSERT(false);
  8421. } break;
  8422. }
  8423. }
  8424. // ggml_compute_forward_argmax
  8425. static void ggml_compute_forward_argmax_f32(
  8426. const struct ggml_compute_params * params,
  8427. struct ggml_tensor * dst) {
  8428. const struct ggml_tensor * src0 = dst->src[0];
  8429. assert(params->ith == 0);
  8430. if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
  8431. return;
  8432. }
  8433. assert(src0->nb[0] == sizeof(float));
  8434. assert(dst->nb[0] == sizeof(float));
  8435. const int64_t ne00 = src0->ne[0];
  8436. const int64_t ne01 = src0->ne[1];
  8437. const size_t nb01 = src0->nb[1];
  8438. const size_t nb0 = dst->nb[0];
  8439. for (int64_t i1 = 0; i1 < ne01; i1++) {
  8440. float * src = (float *) ((char *) src0->data + i1*nb01);
  8441. int32_t * dst_ = (int32_t *) ((char *) dst->data + i1*nb0);
  8442. int v = 0;
  8443. ggml_vec_argmax_f32(ne00, &v, src);
  8444. dst_[0] = v;
  8445. }
  8446. }
  8447. static void ggml_compute_forward_argmax(
  8448. const struct ggml_compute_params * params,
  8449. struct ggml_tensor * dst) {
  8450. const struct ggml_tensor * src0 = dst->src[0];
  8451. switch (src0->type) {
  8452. case GGML_TYPE_F32:
  8453. {
  8454. ggml_compute_forward_argmax_f32(params, dst);
  8455. } break;
  8456. default:
  8457. {
  8458. GGML_ASSERT(false);
  8459. } break;
  8460. }
  8461. }
  8462. // ggml_compute_forward_repeat
  8463. static void ggml_compute_forward_repeat_f32(
  8464. const struct ggml_compute_params * params,
  8465. struct ggml_tensor * dst) {
  8466. const struct ggml_tensor * src0 = dst->src[0];
  8467. GGML_ASSERT(params->ith == 0);
  8468. GGML_ASSERT(ggml_can_repeat(src0, dst));
  8469. if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
  8470. return;
  8471. }
  8472. GGML_TENSOR_UNARY_OP_LOCALS
  8473. // guaranteed to be an integer due to the check in ggml_can_repeat
  8474. const int nr0 = (int)(ne0/ne00);
  8475. const int nr1 = (int)(ne1/ne01);
  8476. const int nr2 = (int)(ne2/ne02);
  8477. const int nr3 = (int)(ne3/ne03);
  8478. // TODO: support for transposed / permuted tensors
  8479. GGML_ASSERT(nb0 == sizeof(float));
  8480. GGML_ASSERT(nb00 == sizeof(float));
  8481. // TODO: maybe this is not optimal?
  8482. for (int i3 = 0; i3 < nr3; i3++) {
  8483. for (int k3 = 0; k3 < ne03; k3++) {
  8484. for (int i2 = 0; i2 < nr2; i2++) {
  8485. for (int k2 = 0; k2 < ne02; k2++) {
  8486. for (int i1 = 0; i1 < nr1; i1++) {
  8487. for (int k1 = 0; k1 < ne01; k1++) {
  8488. for (int i0 = 0; i0 < nr0; i0++) {
  8489. ggml_vec_cpy_f32(ne00,
  8490. (float *) ((char *) dst->data + (i3*ne03 + k3)*nb3 + (i2*ne02 + k2)*nb2 + (i1*ne01 + k1)*nb1 + (i0*ne00)*nb0),
  8491. (float *) ((char *) src0->data + ( k3)*nb03 + ( k2)*nb02 + ( k1)*nb01));
  8492. }
  8493. }
  8494. }
  8495. }
  8496. }
  8497. }
  8498. }
  8499. }
  8500. static void ggml_compute_forward_repeat_f16(
  8501. const struct ggml_compute_params * params,
  8502. struct ggml_tensor * dst) {
  8503. const struct ggml_tensor * src0 = dst->src[0];
  8504. GGML_ASSERT(params->ith == 0);
  8505. GGML_ASSERT(ggml_can_repeat(src0, dst));
  8506. if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
  8507. return;
  8508. }
  8509. GGML_TENSOR_UNARY_OP_LOCALS
  8510. // guaranteed to be an integer due to the check in ggml_can_repeat
  8511. const int nr0 = (int)(ne0/ne00);
  8512. const int nr1 = (int)(ne1/ne01);
  8513. const int nr2 = (int)(ne2/ne02);
  8514. const int nr3 = (int)(ne3/ne03);
  8515. // TODO: support for transposed / permuted tensors
  8516. GGML_ASSERT(nb0 == sizeof(ggml_fp16_t));
  8517. GGML_ASSERT(nb00 == sizeof(ggml_fp16_t));
  8518. // TODO: maybe this is not optimal?
  8519. for (int i3 = 0; i3 < nr3; i3++) {
  8520. for (int k3 = 0; k3 < ne03; k3++) {
  8521. for (int i2 = 0; i2 < nr2; i2++) {
  8522. for (int k2 = 0; k2 < ne02; k2++) {
  8523. for (int i1 = 0; i1 < nr1; i1++) {
  8524. for (int k1 = 0; k1 < ne01; k1++) {
  8525. for (int i0 = 0; i0 < nr0; i0++) {
  8526. ggml_fp16_t * y = (ggml_fp16_t *) ((char *) dst->data + (i3*ne03 + k3)*nb3 + (i2*ne02 + k2)*nb2 + (i1*ne01 + k1)*nb1 + (i0*ne00)*nb0);
  8527. ggml_fp16_t * x = (ggml_fp16_t *) ((char *) src0->data + ( k3)*nb03 + ( k2)*nb02 + ( k1)*nb01);
  8528. // ggml_vec_cpy_f16(ne00, y, x)
  8529. for (int i = 0; i < ne00; ++i) {
  8530. y[i] = x[i];
  8531. }
  8532. }
  8533. }
  8534. }
  8535. }
  8536. }
  8537. }
  8538. }
  8539. }
  8540. static void ggml_compute_forward_repeat(
  8541. const struct ggml_compute_params * params,
  8542. struct ggml_tensor * dst) {
  8543. const struct ggml_tensor * src0 = dst->src[0];
  8544. switch (src0->type) {
  8545. case GGML_TYPE_F16:
  8546. case GGML_TYPE_BF16:
  8547. case GGML_TYPE_I16:
  8548. {
  8549. ggml_compute_forward_repeat_f16(params, dst);
  8550. } break;
  8551. case GGML_TYPE_F32:
  8552. case GGML_TYPE_I32:
  8553. {
  8554. ggml_compute_forward_repeat_f32(params, dst);
  8555. } break;
  8556. default:
  8557. {
  8558. GGML_ASSERT(false);
  8559. } break;
  8560. }
  8561. }
  8562. // ggml_compute_forward_repeat_back
  8563. static void ggml_compute_forward_repeat_back_f32(
  8564. const struct ggml_compute_params * params,
  8565. struct ggml_tensor * dst) {
  8566. const struct ggml_tensor * src0 = dst->src[0];
  8567. GGML_ASSERT(params->ith == 0);
  8568. GGML_ASSERT(ggml_can_repeat(dst, src0));
  8569. if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
  8570. return;
  8571. }
  8572. GGML_TENSOR_UNARY_OP_LOCALS
  8573. // guaranteed to be an integer due to the check in ggml_can_repeat
  8574. const int nr0 = (int)(ne00/ne0);
  8575. const int nr1 = (int)(ne01/ne1);
  8576. const int nr2 = (int)(ne02/ne2);
  8577. const int nr3 = (int)(ne03/ne3);
  8578. // TODO: support for transposed / permuted tensors
  8579. GGML_ASSERT(nb0 == sizeof(float));
  8580. GGML_ASSERT(nb00 == sizeof(float));
  8581. if (ggml_is_contiguous(dst)) {
  8582. ggml_vec_set_f32(ne0*ne1*ne2*ne3, dst->data, 0);
  8583. } else {
  8584. for (int k3 = 0; k3 < ne3; k3++) {
  8585. for (int k2 = 0; k2 < ne2; k2++) {
  8586. for (int k1 = 0; k1 < ne1; k1++) {
  8587. ggml_vec_set_f32(ne0,
  8588. (float *) ((char *) dst->data + k1*nb1 + k2*nb2 + k3*nb3),
  8589. 0);
  8590. }
  8591. }
  8592. }
  8593. }
  8594. // TODO: maybe this is not optimal?
  8595. for (int i3 = 0; i3 < nr3; i3++) {
  8596. for (int k3 = 0; k3 < ne3; k3++) {
  8597. for (int i2 = 0; i2 < nr2; i2++) {
  8598. for (int k2 = 0; k2 < ne2; k2++) {
  8599. for (int i1 = 0; i1 < nr1; i1++) {
  8600. for (int k1 = 0; k1 < ne1; k1++) {
  8601. for (int i0 = 0; i0 < nr0; i0++) {
  8602. ggml_vec_acc_f32(ne0,
  8603. (float *) ((char *) dst->data + ( k3)*nb3 + ( k2)*nb2 + ( k1)*nb1),
  8604. (float *) ((char *) src0->data + (i3*ne3 + k3)*nb03 + (i2*ne2 + k2)*nb02 + (i1*ne1 + k1)*nb01 + (i0*ne0)*nb00));
  8605. }
  8606. }
  8607. }
  8608. }
  8609. }
  8610. }
  8611. }
  8612. }
  8613. static void ggml_compute_forward_repeat_back(
  8614. const struct ggml_compute_params * params,
  8615. struct ggml_tensor * dst) {
  8616. const struct ggml_tensor * src0 = dst->src[0];
  8617. switch (src0->type) {
  8618. case GGML_TYPE_F32:
  8619. {
  8620. ggml_compute_forward_repeat_back_f32(params, dst);
  8621. } break;
  8622. default:
  8623. {
  8624. GGML_ASSERT(false);
  8625. } break;
  8626. }
  8627. }
  8628. // ggml_compute_forward_concat
  8629. static void ggml_compute_forward_concat_f32(
  8630. const struct ggml_compute_params * params,
  8631. struct ggml_tensor * dst) {
  8632. const struct ggml_tensor * src0 = dst->src[0];
  8633. const struct ggml_tensor * src1 = dst->src[1];
  8634. if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
  8635. return;
  8636. }
  8637. GGML_ASSERT(src0->nb[0] == sizeof(float));
  8638. const int ith = params->ith;
  8639. const int nth = params->nth;
  8640. GGML_TENSOR_BINARY_OP_LOCALS
  8641. // TODO: support for transposed / permuted tensors
  8642. GGML_ASSERT(nb0 == sizeof(float));
  8643. GGML_ASSERT(nb00 == sizeof(float));
  8644. GGML_ASSERT(nb10 == sizeof(float));
  8645. for (int i3 = 0; i3 < ne3; i3++) {
  8646. for (int i2 = ith; i2 < ne2; i2 += nth) {
  8647. if (i2 < ne02) { // src0
  8648. for (int i1 = 0; i1 < ne1; i1++) {
  8649. for (int i0 = 0; i0 < ne0; i0++) {
  8650. const float * x = (float *)((char *) src0->data + i0 * nb00 + i1 * nb01 + i2 * nb02 + i3 * nb03);
  8651. float * y = (float *)((char *)dst->data + i0 * nb0 + i1 * nb1 + i2 * nb2 + i3 * nb3);
  8652. *y = *x;
  8653. }
  8654. }
  8655. } // src1
  8656. else {
  8657. for (int i1 = 0; i1 < ne1; i1++) {
  8658. for (int i0 = 0; i0 < ne0; i0++) {
  8659. const float * x = (float *)((char *) src1->data + i0 * nb10 + i1 * nb11 + (i2 - ne02) * nb12 + i3 * nb13);
  8660. float * y = (float *)((char *)dst->data + i0 * nb0 + i1 * nb1 + i2 * nb2 + i3 * nb3);
  8661. *y = *x;
  8662. }
  8663. }
  8664. }
  8665. }
  8666. }
  8667. }
  8668. static void ggml_compute_forward_concat(
  8669. const struct ggml_compute_params* params,
  8670. struct ggml_tensor* dst) {
  8671. const struct ggml_tensor * src0 = dst->src[0];
  8672. switch (src0->type) {
  8673. case GGML_TYPE_F32:
  8674. case GGML_TYPE_I32:
  8675. {
  8676. ggml_compute_forward_concat_f32(params, dst);
  8677. } break;
  8678. default:
  8679. {
  8680. GGML_ASSERT(false);
  8681. } break;
  8682. }
  8683. }
  8684. // ggml_compute_forward_abs
  8685. static void ggml_compute_forward_abs_f32(
  8686. const struct ggml_compute_params * params,
  8687. struct ggml_tensor * dst) {
  8688. const struct ggml_tensor * src0 = dst->src[0];
  8689. assert(params->ith == 0);
  8690. assert(ggml_are_same_shape(src0, dst));
  8691. if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
  8692. return;
  8693. }
  8694. const int n = ggml_nrows(src0);
  8695. const int nc = src0->ne[0];
  8696. assert(dst->nb[0] == sizeof(float));
  8697. assert(src0->nb[0] == sizeof(float));
  8698. for (int i = 0; i < n; i++) {
  8699. ggml_vec_abs_f32(nc,
  8700. (float *) ((char *) dst->data + i*( dst->nb[1])),
  8701. (float *) ((char *) src0->data + i*(src0->nb[1])));
  8702. }
  8703. }
  8704. static void ggml_compute_forward_abs(
  8705. const struct ggml_compute_params * params,
  8706. struct ggml_tensor * dst) {
  8707. const struct ggml_tensor * src0 = dst->src[0];
  8708. switch (src0->type) {
  8709. case GGML_TYPE_F32:
  8710. {
  8711. ggml_compute_forward_abs_f32(params, dst);
  8712. } break;
  8713. default:
  8714. {
  8715. GGML_ASSERT(false);
  8716. } break;
  8717. }
  8718. }
  8719. // ggml_compute_forward_sgn
  8720. static void ggml_compute_forward_sgn_f32(
  8721. const struct ggml_compute_params * params,
  8722. struct ggml_tensor * dst) {
  8723. const struct ggml_tensor * src0 = dst->src[0];
  8724. assert(params->ith == 0);
  8725. assert(ggml_are_same_shape(src0, dst));
  8726. if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
  8727. return;
  8728. }
  8729. const int n = ggml_nrows(src0);
  8730. const int nc = src0->ne[0];
  8731. assert(dst->nb[0] == sizeof(float));
  8732. assert(src0->nb[0] == sizeof(float));
  8733. for (int i = 0; i < n; i++) {
  8734. ggml_vec_sgn_f32(nc,
  8735. (float *) ((char *) dst->data + i*( dst->nb[1])),
  8736. (float *) ((char *) src0->data + i*(src0->nb[1])));
  8737. }
  8738. }
  8739. static void ggml_compute_forward_sgn(
  8740. const struct ggml_compute_params * params,
  8741. struct ggml_tensor * dst) {
  8742. const struct ggml_tensor * src0 = dst->src[0];
  8743. switch (src0->type) {
  8744. case GGML_TYPE_F32:
  8745. {
  8746. ggml_compute_forward_sgn_f32(params, dst);
  8747. } break;
  8748. default:
  8749. {
  8750. GGML_ASSERT(false);
  8751. } break;
  8752. }
  8753. }
  8754. // ggml_compute_forward_neg
  8755. static void ggml_compute_forward_neg_f32(
  8756. const struct ggml_compute_params * params,
  8757. struct ggml_tensor * dst) {
  8758. const struct ggml_tensor * src0 = dst->src[0];
  8759. assert(params->ith == 0);
  8760. assert(ggml_are_same_shape(src0, dst));
  8761. if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
  8762. return;
  8763. }
  8764. const int n = ggml_nrows(src0);
  8765. const int nc = src0->ne[0];
  8766. assert(dst->nb[0] == sizeof(float));
  8767. assert(src0->nb[0] == sizeof(float));
  8768. for (int i = 0; i < n; i++) {
  8769. ggml_vec_neg_f32(nc,
  8770. (float *) ((char *) dst->data + i*( dst->nb[1])),
  8771. (float *) ((char *) src0->data + i*(src0->nb[1])));
  8772. }
  8773. }
  8774. static void ggml_compute_forward_neg(
  8775. const struct ggml_compute_params * params,
  8776. struct ggml_tensor * dst) {
  8777. const struct ggml_tensor * src0 = dst->src[0];
  8778. switch (src0->type) {
  8779. case GGML_TYPE_F32:
  8780. {
  8781. ggml_compute_forward_neg_f32(params, dst);
  8782. } break;
  8783. default:
  8784. {
  8785. GGML_ASSERT(false);
  8786. } break;
  8787. }
  8788. }
  8789. // ggml_compute_forward_step
  8790. static void ggml_compute_forward_step_f32(
  8791. const struct ggml_compute_params * params,
  8792. struct ggml_tensor * dst) {
  8793. const struct ggml_tensor * src0 = dst->src[0];
  8794. assert(params->ith == 0);
  8795. assert(ggml_are_same_shape(src0, dst));
  8796. if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
  8797. return;
  8798. }
  8799. const int n = ggml_nrows(src0);
  8800. const int nc = src0->ne[0];
  8801. assert(dst->nb[0] == sizeof(float));
  8802. assert(src0->nb[0] == sizeof(float));
  8803. for (int i = 0; i < n; i++) {
  8804. ggml_vec_step_f32(nc,
  8805. (float *) ((char *) dst->data + i*( dst->nb[1])),
  8806. (float *) ((char *) src0->data + i*(src0->nb[1])));
  8807. }
  8808. }
  8809. static void ggml_compute_forward_step(
  8810. const struct ggml_compute_params * params,
  8811. struct ggml_tensor * dst) {
  8812. const struct ggml_tensor * src0 = dst->src[0];
  8813. switch (src0->type) {
  8814. case GGML_TYPE_F32:
  8815. {
  8816. ggml_compute_forward_step_f32(params, dst);
  8817. } break;
  8818. default:
  8819. {
  8820. GGML_ASSERT(false);
  8821. } break;
  8822. }
  8823. }
  8824. // ggml_compute_forward_tanh
  8825. static void ggml_compute_forward_tanh_f32(
  8826. const struct ggml_compute_params * params,
  8827. struct ggml_tensor * dst) {
  8828. const struct ggml_tensor * src0 = dst->src[0];
  8829. assert(params->ith == 0);
  8830. assert(ggml_are_same_shape(src0, dst));
  8831. if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
  8832. return;
  8833. }
  8834. const int n = ggml_nrows(src0);
  8835. const int nc = src0->ne[0];
  8836. assert(dst->nb[0] == sizeof(float));
  8837. assert(src0->nb[0] == sizeof(float));
  8838. for (int i = 0; i < n; i++) {
  8839. ggml_vec_tanh_f32(nc,
  8840. (float *) ((char *) dst->data + i*( dst->nb[1])),
  8841. (float *) ((char *) src0->data + i*(src0->nb[1])));
  8842. }
  8843. }
  8844. static void ggml_compute_forward_tanh(
  8845. const struct ggml_compute_params * params,
  8846. struct ggml_tensor * dst) {
  8847. const struct ggml_tensor * src0 = dst->src[0];
  8848. switch (src0->type) {
  8849. case GGML_TYPE_F32:
  8850. {
  8851. ggml_compute_forward_tanh_f32(params, dst);
  8852. } break;
  8853. default:
  8854. {
  8855. GGML_ASSERT(false);
  8856. } break;
  8857. }
  8858. }
  8859. // ggml_compute_forward_elu
  8860. static void ggml_compute_forward_elu_f32(
  8861. const struct ggml_compute_params * params,
  8862. struct ggml_tensor * dst) {
  8863. const struct ggml_tensor * src0 = dst->src[0];
  8864. assert(params->ith == 0);
  8865. assert(ggml_are_same_shape(src0, dst));
  8866. if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
  8867. return;
  8868. }
  8869. const int n = ggml_nrows(src0);
  8870. const int nc = src0->ne[0];
  8871. assert(dst->nb[0] == sizeof(float));
  8872. assert(src0->nb[0] == sizeof(float));
  8873. for (int i = 0; i < n; i++) {
  8874. ggml_vec_elu_f32(nc,
  8875. (float *) ((char *) dst->data + i*( dst->nb[1])),
  8876. (float *) ((char *) src0->data + i*(src0->nb[1])));
  8877. }
  8878. }
  8879. static void ggml_compute_forward_elu(
  8880. const struct ggml_compute_params * params,
  8881. struct ggml_tensor * dst) {
  8882. const struct ggml_tensor * src0 = dst->src[0];
  8883. switch (src0->type) {
  8884. case GGML_TYPE_F32:
  8885. {
  8886. ggml_compute_forward_elu_f32(params, dst);
  8887. } break;
  8888. default:
  8889. {
  8890. GGML_ASSERT(false);
  8891. } break;
  8892. }
  8893. }
  8894. // ggml_compute_forward_relu
  8895. static void ggml_compute_forward_relu_f32(
  8896. const struct ggml_compute_params * params,
  8897. struct ggml_tensor * dst) {
  8898. const struct ggml_tensor * src0 = dst->src[0];
  8899. assert(params->ith == 0);
  8900. assert(ggml_are_same_shape(src0, dst));
  8901. if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
  8902. return;
  8903. }
  8904. const int n = ggml_nrows(src0);
  8905. const int nc = src0->ne[0];
  8906. assert(dst->nb[0] == sizeof(float));
  8907. assert(src0->nb[0] == sizeof(float));
  8908. for (int i = 0; i < n; i++) {
  8909. ggml_vec_relu_f32(nc,
  8910. (float *) ((char *) dst->data + i*( dst->nb[1])),
  8911. (float *) ((char *) src0->data + i*(src0->nb[1])));
  8912. }
  8913. }
  8914. static void ggml_compute_forward_relu(
  8915. const struct ggml_compute_params * params,
  8916. struct ggml_tensor * dst) {
  8917. const struct ggml_tensor * src0 = dst->src[0];
  8918. switch (src0->type) {
  8919. case GGML_TYPE_F32:
  8920. {
  8921. ggml_compute_forward_relu_f32(params, dst);
  8922. } break;
  8923. default:
  8924. {
  8925. GGML_ASSERT(false);
  8926. } break;
  8927. }
  8928. }
  8929. // ggml_compute_forward_sigmoid
  8930. static void ggml_compute_forward_sigmoid_f32(
  8931. const struct ggml_compute_params * params,
  8932. struct ggml_tensor * dst) {
  8933. const struct ggml_tensor * src0 = dst->src[0];
  8934. assert(params->ith == 0);
  8935. assert(ggml_are_same_shape(src0, dst));
  8936. if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
  8937. return;
  8938. }
  8939. const int n = ggml_nrows(src0);
  8940. const int nc = src0->ne[0];
  8941. assert(dst->nb[0] == sizeof(float));
  8942. assert(src0->nb[0] == sizeof(float));
  8943. for (int i = 0; i < n; i++) {
  8944. ggml_vec_sigmoid_f32(nc,
  8945. (float *) ((char *) dst->data + i*( dst->nb[1])),
  8946. (float *) ((char *) src0->data + i*(src0->nb[1])));
  8947. }
  8948. }
  8949. static void ggml_compute_forward_sigmoid(
  8950. const struct ggml_compute_params * params,
  8951. struct ggml_tensor * dst) {
  8952. const struct ggml_tensor * src0 = dst->src[0];
  8953. switch (src0->type) {
  8954. case GGML_TYPE_F32:
  8955. {
  8956. ggml_compute_forward_sigmoid_f32(params, dst);
  8957. } break;
  8958. default:
  8959. {
  8960. GGML_ASSERT(false);
  8961. } break;
  8962. }
  8963. }
  8964. // ggml_compute_forward_gelu
  8965. static void ggml_compute_forward_gelu_f32(
  8966. const struct ggml_compute_params * params,
  8967. struct ggml_tensor * dst) {
  8968. const struct ggml_tensor * src0 = dst->src[0];
  8969. GGML_ASSERT(ggml_is_contiguous_except_dim_1(src0));
  8970. GGML_ASSERT(ggml_is_contiguous_except_dim_1(dst));
  8971. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  8972. if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
  8973. return;
  8974. }
  8975. const int ith = params->ith;
  8976. const int nth = params->nth;
  8977. const int nc = src0->ne[0];
  8978. const int nr = ggml_nrows(src0);
  8979. // rows per thread
  8980. const int dr = (nr + nth - 1)/nth;
  8981. // row range for this thread
  8982. const int ir0 = dr*ith;
  8983. const int ir1 = MIN(ir0 + dr, nr);
  8984. for (int i1 = ir0; i1 < ir1; i1++) {
  8985. ggml_vec_gelu_f32(nc,
  8986. (float *) ((char *) dst->data + i1*( dst->nb[1])),
  8987. (float *) ((char *) src0->data + i1*(src0->nb[1])));
  8988. #ifndef NDEBUG
  8989. for (int k = 0; k < nc; k++) {
  8990. const float x = ((float *) ((char *) dst->data + i1*( dst->nb[1])))[k];
  8991. UNUSED(x);
  8992. assert(!isnan(x));
  8993. assert(!isinf(x));
  8994. }
  8995. #endif
  8996. }
  8997. }
  8998. static void ggml_compute_forward_gelu(
  8999. const struct ggml_compute_params * params,
  9000. struct ggml_tensor * dst) {
  9001. const struct ggml_tensor * src0 = dst->src[0];
  9002. switch (src0->type) {
  9003. case GGML_TYPE_F32:
  9004. {
  9005. ggml_compute_forward_gelu_f32(params, dst);
  9006. } break;
  9007. default:
  9008. {
  9009. GGML_ASSERT(false);
  9010. } break;
  9011. }
  9012. }
  9013. // ggml_compute_forward_gelu_quick
  9014. static void ggml_compute_forward_gelu_quick_f32(
  9015. const struct ggml_compute_params * params,
  9016. struct ggml_tensor * dst) {
  9017. const struct ggml_tensor * src0 = dst->src[0];
  9018. GGML_ASSERT(ggml_is_contiguous_except_dim_1(src0));
  9019. GGML_ASSERT(ggml_is_contiguous_except_dim_1(dst));
  9020. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  9021. if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
  9022. return;
  9023. }
  9024. const int ith = params->ith;
  9025. const int nth = params->nth;
  9026. const int nc = src0->ne[0];
  9027. const int nr = ggml_nrows(src0);
  9028. // rows per thread
  9029. const int dr = (nr + nth - 1)/nth;
  9030. // row range for this thread
  9031. const int ir0 = dr*ith;
  9032. const int ir1 = MIN(ir0 + dr, nr);
  9033. for (int i1 = ir0; i1 < ir1; i1++) {
  9034. ggml_vec_gelu_quick_f32(nc,
  9035. (float *) ((char *) dst->data + i1*( dst->nb[1])),
  9036. (float *) ((char *) src0->data + i1*(src0->nb[1])));
  9037. #ifndef NDEBUG
  9038. for (int k = 0; k < nc; k++) {
  9039. const float x = ((float *) ((char *) dst->data + i1*( dst->nb[1])))[k];
  9040. UNUSED(x);
  9041. assert(!isnan(x));
  9042. assert(!isinf(x));
  9043. }
  9044. #endif
  9045. }
  9046. }
  9047. static void ggml_compute_forward_gelu_quick(
  9048. const struct ggml_compute_params * params,
  9049. struct ggml_tensor * dst) {
  9050. const struct ggml_tensor * src0 = dst->src[0];
  9051. switch (src0->type) {
  9052. case GGML_TYPE_F32:
  9053. {
  9054. ggml_compute_forward_gelu_quick_f32(params, dst);
  9055. } break;
  9056. default:
  9057. {
  9058. GGML_ASSERT(false);
  9059. } break;
  9060. }
  9061. }
  9062. // ggml_compute_forward_silu
  9063. static void ggml_compute_forward_silu_f32(
  9064. const struct ggml_compute_params * params,
  9065. struct ggml_tensor * dst) {
  9066. const struct ggml_tensor * src0 = dst->src[0];
  9067. GGML_ASSERT(ggml_is_contiguous_except_dim_1(src0));
  9068. GGML_ASSERT(ggml_is_contiguous_except_dim_1(dst));
  9069. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  9070. if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
  9071. return;
  9072. }
  9073. const int ith = params->ith;
  9074. const int nth = params->nth;
  9075. const int nc = src0->ne[0];
  9076. const int nr = ggml_nrows(src0);
  9077. // rows per thread
  9078. const int dr = (nr + nth - 1)/nth;
  9079. // row range for this thread
  9080. const int ir0 = dr*ith;
  9081. const int ir1 = MIN(ir0 + dr, nr);
  9082. for (int i1 = ir0; i1 < ir1; i1++) {
  9083. ggml_vec_silu_f32(nc,
  9084. (float *) ((char *) dst->data + i1*( dst->nb[1])),
  9085. (float *) ((char *) src0->data + i1*(src0->nb[1])));
  9086. #ifndef NDEBUG
  9087. for (int k = 0; k < nc; k++) {
  9088. const float x = ((float *) ((char *) dst->data + i1*(dst->nb[1])))[k];
  9089. UNUSED(x);
  9090. assert(!isnan(x));
  9091. assert(!isinf(x));
  9092. }
  9093. #endif
  9094. }
  9095. }
  9096. static void ggml_compute_forward_silu(
  9097. const struct ggml_compute_params * params,
  9098. struct ggml_tensor * dst) {
  9099. const struct ggml_tensor * src0 = dst->src[0];
  9100. switch (src0->type) {
  9101. case GGML_TYPE_F32:
  9102. {
  9103. ggml_compute_forward_silu_f32(params, dst);
  9104. } break;
  9105. default:
  9106. {
  9107. GGML_ASSERT(false);
  9108. } break;
  9109. }
  9110. }
  9111. // ggml_compute_forward_leaky_relu
  9112. static void ggml_compute_forward_leaky_relu_f32(
  9113. const struct ggml_compute_params * params,
  9114. struct ggml_tensor * dst) {
  9115. const struct ggml_tensor * src0 = dst->src[0];
  9116. assert(params->ith == 0);
  9117. assert(ggml_are_same_shape(src0, dst));
  9118. if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
  9119. return;
  9120. }
  9121. const int n = ggml_nrows(src0);
  9122. const int nc = src0->ne[0];
  9123. float negative_slope;
  9124. memcpy(&negative_slope, dst->op_params, sizeof(float));
  9125. assert(dst->nb[0] == sizeof(float));
  9126. assert(src0->nb[0] == sizeof(float));
  9127. for (int i = 0; i < n; i++) {
  9128. ggml_vec_leaky_relu_f32(nc,
  9129. (float *) ((char *) dst->data + i*( dst->nb[1])),
  9130. (float *) ((char *) src0->data + i*(src0->nb[1])), negative_slope);
  9131. }
  9132. }
  9133. static void ggml_compute_forward_leaky_relu(
  9134. const struct ggml_compute_params * params,
  9135. struct ggml_tensor * dst) {
  9136. const struct ggml_tensor * src0 = dst->src[0];
  9137. switch (src0->type) {
  9138. case GGML_TYPE_F32:
  9139. {
  9140. ggml_compute_forward_leaky_relu_f32(params, dst);
  9141. } break;
  9142. default:
  9143. {
  9144. GGML_ASSERT(false);
  9145. } break;
  9146. }
  9147. }
  9148. // ggml_compute_forward_silu_back
  9149. static void ggml_compute_forward_silu_back_f32(
  9150. const struct ggml_compute_params * params,
  9151. struct ggml_tensor * dst) {
  9152. const struct ggml_tensor * src0 = dst->src[0];
  9153. const struct ggml_tensor * grad = dst->src[1];
  9154. GGML_ASSERT(ggml_is_contiguous_except_dim_1(grad));
  9155. GGML_ASSERT(ggml_is_contiguous_except_dim_1(src0));
  9156. GGML_ASSERT(ggml_is_contiguous_except_dim_1(dst));
  9157. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  9158. GGML_ASSERT(ggml_are_same_shape(src0, grad));
  9159. if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
  9160. return;
  9161. }
  9162. const int ith = params->ith;
  9163. const int nth = params->nth;
  9164. const int nc = src0->ne[0];
  9165. const int nr = ggml_nrows(src0);
  9166. // rows per thread
  9167. const int dr = (nr + nth - 1)/nth;
  9168. // row range for this thread
  9169. const int ir0 = dr*ith;
  9170. const int ir1 = MIN(ir0 + dr, nr);
  9171. for (int i1 = ir0; i1 < ir1; i1++) {
  9172. ggml_vec_silu_backward_f32(nc,
  9173. (float *) ((char *) dst->data + i1*( dst->nb[1])),
  9174. (float *) ((char *) src0->data + i1*(src0->nb[1])),
  9175. (float *) ((char *) grad->data + i1*(grad->nb[1])));
  9176. #ifndef NDEBUG
  9177. for (int k = 0; k < nc; k++) {
  9178. const float x = ((float *) ((char *) dst->data + i1*( dst->nb[1])))[k];
  9179. UNUSED(x);
  9180. assert(!isnan(x));
  9181. assert(!isinf(x));
  9182. }
  9183. #endif
  9184. }
  9185. }
  9186. static void ggml_compute_forward_silu_back(
  9187. const struct ggml_compute_params * params,
  9188. struct ggml_tensor * dst) {
  9189. const struct ggml_tensor * src0 = dst->src[0];
  9190. switch (src0->type) {
  9191. case GGML_TYPE_F32:
  9192. {
  9193. ggml_compute_forward_silu_back_f32(params, dst);
  9194. } break;
  9195. default:
  9196. {
  9197. GGML_ASSERT(false);
  9198. } break;
  9199. }
  9200. }
  9201. static void ggml_compute_forward_hardswish_f32(
  9202. const struct ggml_compute_params * params,
  9203. struct ggml_tensor * dst) {
  9204. const struct ggml_tensor * src0 = dst->src[0];
  9205. assert(params->ith == 0);
  9206. assert(ggml_are_same_shape(src0, dst));
  9207. if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
  9208. return;
  9209. }
  9210. const int n = ggml_nrows(src0);
  9211. const int nc = src0->ne[0];
  9212. assert(dst->nb[0] == sizeof(float));
  9213. assert(src0->nb[0] == sizeof(float));
  9214. for (int i = 0; i < n; i++) {
  9215. ggml_vec_hardswish_f32(nc,
  9216. (float *) ((char *) dst->data + i*( dst->nb[1])),
  9217. (float *) ((char *) src0->data + i*(src0->nb[1])));
  9218. }
  9219. }
  9220. static void ggml_compute_forward_hardswish(
  9221. const struct ggml_compute_params * params,
  9222. struct ggml_tensor * dst) {
  9223. const struct ggml_tensor * src0 = dst->src[0];
  9224. switch (src0->type) {
  9225. case GGML_TYPE_F32:
  9226. {
  9227. ggml_compute_forward_hardswish_f32(params, dst);
  9228. } break;
  9229. default:
  9230. {
  9231. GGML_ASSERT(false);
  9232. } break;
  9233. }
  9234. }
  9235. static void ggml_compute_forward_hardsigmoid_f32(
  9236. const struct ggml_compute_params * params,
  9237. struct ggml_tensor * dst) {
  9238. const struct ggml_tensor * src0 = dst->src[0];
  9239. assert(params->ith == 0);
  9240. assert(ggml_are_same_shape(src0, dst));
  9241. if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
  9242. return;
  9243. }
  9244. const int n = ggml_nrows(src0);
  9245. const int nc = src0->ne[0];
  9246. assert(dst->nb[0] == sizeof(float));
  9247. assert(src0->nb[0] == sizeof(float));
  9248. for (int i = 0; i < n; i++) {
  9249. ggml_vec_hardsigmoid_f32(nc,
  9250. (float *) ((char *) dst->data + i*( dst->nb[1])),
  9251. (float *) ((char *) src0->data + i*(src0->nb[1])));
  9252. }
  9253. }
  9254. static void ggml_compute_forward_hardsigmoid(
  9255. const struct ggml_compute_params * params,
  9256. struct ggml_tensor * dst) {
  9257. const struct ggml_tensor * src0 = dst->src[0];
  9258. switch (src0->type) {
  9259. case GGML_TYPE_F32:
  9260. {
  9261. ggml_compute_forward_hardsigmoid_f32(params, dst);
  9262. } break;
  9263. default:
  9264. {
  9265. GGML_ASSERT(false);
  9266. } break;
  9267. }
  9268. }
  9269. // ggml_compute_forward_norm
  9270. static void ggml_compute_forward_norm_f32(
  9271. const struct ggml_compute_params * params,
  9272. struct ggml_tensor * dst) {
  9273. const struct ggml_tensor * src0 = dst->src[0];
  9274. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  9275. if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
  9276. return;
  9277. }
  9278. GGML_ASSERT(src0->nb[0] == sizeof(float));
  9279. const int ith = params->ith;
  9280. const int nth = params->nth;
  9281. GGML_TENSOR_UNARY_OP_LOCALS
  9282. float eps;
  9283. memcpy(&eps, dst->op_params, sizeof(float));
  9284. GGML_ASSERT(eps > 0.0f);
  9285. // TODO: optimize
  9286. for (int64_t i03 = 0; i03 < ne03; i03++) {
  9287. for (int64_t i02 = 0; i02 < ne02; i02++) {
  9288. for (int64_t i01 = ith; i01 < ne01; i01 += nth) {
  9289. const float * x = (float *) ((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03);
  9290. ggml_float sum = 0.0;
  9291. for (int64_t i00 = 0; i00 < ne00; i00++) {
  9292. sum += (ggml_float)x[i00];
  9293. }
  9294. float mean = sum/ne00;
  9295. float * y = (float *) ((char *) dst->data + i01*nb1 + i02*nb2 + i03*nb3);
  9296. ggml_float sum2 = 0.0;
  9297. for (int64_t i00 = 0; i00 < ne00; i00++) {
  9298. float v = x[i00] - mean;
  9299. y[i00] = v;
  9300. sum2 += (ggml_float)(v*v);
  9301. }
  9302. float variance = sum2/ne00;
  9303. const float scale = 1.0f/sqrtf(variance + eps);
  9304. ggml_vec_scale_f32(ne00, y, scale);
  9305. }
  9306. }
  9307. }
  9308. }
  9309. static void ggml_compute_forward_norm(
  9310. const struct ggml_compute_params * params,
  9311. struct ggml_tensor * dst) {
  9312. const struct ggml_tensor * src0 = dst->src[0];
  9313. switch (src0->type) {
  9314. case GGML_TYPE_F32:
  9315. {
  9316. ggml_compute_forward_norm_f32(params, dst);
  9317. } break;
  9318. default:
  9319. {
  9320. GGML_ASSERT(false);
  9321. } break;
  9322. }
  9323. }
  9324. // ggml_compute_forward_group_rms_norm
  9325. static void ggml_compute_forward_rms_norm_f32(
  9326. const struct ggml_compute_params * params,
  9327. struct ggml_tensor * dst) {
  9328. const struct ggml_tensor * src0 = dst->src[0];
  9329. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  9330. if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
  9331. return;
  9332. }
  9333. GGML_ASSERT(src0->nb[0] == sizeof(float));
  9334. const int ith = params->ith;
  9335. const int nth = params->nth;
  9336. GGML_TENSOR_UNARY_OP_LOCALS
  9337. float eps;
  9338. memcpy(&eps, dst->op_params, sizeof(float));
  9339. GGML_ASSERT(eps > 0.0f);
  9340. // TODO: optimize
  9341. for (int64_t i03 = 0; i03 < ne03; i03++) {
  9342. for (int64_t i02 = 0; i02 < ne02; i02++) {
  9343. for (int64_t i01 = ith; i01 < ne01; i01 += nth) {
  9344. const float * x = (float *) ((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03);
  9345. ggml_float sum = 0.0;
  9346. for (int64_t i00 = 0; i00 < ne00; i00++) {
  9347. sum += (ggml_float)(x[i00] * x[i00]);
  9348. }
  9349. const float mean = sum/ne00;
  9350. float * y = (float *) ((char *) dst->data + i01*nb1 + i02*nb2 + i03*nb3);
  9351. memcpy(y, x, ne00 * sizeof(float));
  9352. // for (int i00 = 0; i00 < ne00; i00++) {
  9353. // y[i00] = x[i00];
  9354. // }
  9355. const float scale = 1.0f/sqrtf(mean + eps);
  9356. ggml_vec_scale_f32(ne00, y, scale);
  9357. }
  9358. }
  9359. }
  9360. }
  9361. static void ggml_compute_forward_rms_norm(
  9362. const struct ggml_compute_params * params,
  9363. struct ggml_tensor * dst) {
  9364. const struct ggml_tensor * src0 = dst->src[0];
  9365. switch (src0->type) {
  9366. case GGML_TYPE_F32:
  9367. {
  9368. ggml_compute_forward_rms_norm_f32(params, dst);
  9369. } break;
  9370. default:
  9371. {
  9372. GGML_ASSERT(false);
  9373. } break;
  9374. }
  9375. }
  9376. static void ggml_compute_forward_rms_norm_back_f32(
  9377. const struct ggml_compute_params * params,
  9378. struct ggml_tensor * dst) {
  9379. const struct ggml_tensor * src0 = dst->src[0];
  9380. const struct ggml_tensor * src1 = dst->src[1];
  9381. GGML_ASSERT(ggml_are_same_shape(src0, dst) && ggml_are_same_shape(src0, src1));
  9382. if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
  9383. return;
  9384. }
  9385. GGML_ASSERT(src0->nb[0] == sizeof(float));
  9386. const int ith = params->ith;
  9387. const int nth = params->nth;
  9388. GGML_TENSOR_BINARY_OP_LOCALS
  9389. float eps;
  9390. memcpy(&eps, dst->op_params, sizeof(float));
  9391. // TODO: optimize
  9392. for (int64_t i03 = 0; i03 < ne03; i03++) {
  9393. for (int64_t i02 = 0; i02 < ne02; i02++) {
  9394. for (int64_t i01 = ith; i01 < ne01; i01 += nth) {
  9395. // src1 is same shape as src0 => same indices
  9396. const int64_t i11 = i01;
  9397. const int64_t i12 = i02;
  9398. const int64_t i13 = i03;
  9399. const float * x = (float *) ((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03);
  9400. const float * dz = (float *) ((char *) src1->data + i11*nb11 + i12*nb12 + i13*nb13);
  9401. ggml_float sum_xx = 0.0;
  9402. ggml_float sum_xdz = 0.0;
  9403. for (int64_t i00 = 0; i00 < ne00; i00++) {
  9404. sum_xx += (ggml_float)(x[i00] * x[i00]);
  9405. sum_xdz += (ggml_float)(x[i00] * dz[i00]);
  9406. }
  9407. //const float mean = (float)(sum_xx)/ne00;
  9408. const float mean_eps = (float)(sum_xx)/ne00 + eps;
  9409. const float sum_eps = (float)(sum_xx) + eps*ne00;
  9410. //const float mean_xdz = (float)(sum_xdz)/ne00;
  9411. // we could cache rms from forward pass to improve performance.
  9412. // to do this implement ggml_rms and compose ggml_rms_norm using ggml_rms.
  9413. //const float rms = sqrtf(mean_eps);
  9414. const float rrms = 1.0f / sqrtf(mean_eps);
  9415. //const float scale = -rrms/(ne00 * mean_eps); // -1/(n*rms**3)
  9416. {
  9417. // z = rms_norm(x)
  9418. //
  9419. // rms_norm(src0) =
  9420. // scale(
  9421. // src0,
  9422. // div(
  9423. // 1,
  9424. // sqrt(
  9425. // add(
  9426. // scale(
  9427. // sum(
  9428. // sqr(
  9429. // src0)),
  9430. // (1.0/N)),
  9431. // eps))));
  9432. // postorder:
  9433. // ## op args grad
  9434. // 00 param src0 grad[#00]
  9435. // 01 const 1
  9436. // 02 sqr (#00) grad[#02]
  9437. // 03 sum (#02) grad[#03]
  9438. // 04 const 1/N
  9439. // 05 scale (#03, #04) grad[#05]
  9440. // 06 const eps
  9441. // 07 add (#05, #06) grad[#07]
  9442. // 08 sqrt (#07) grad[#08]
  9443. // 09 div (#01,#08) grad[#09]
  9444. // 10 scale (#00,#09) grad[#10]
  9445. //
  9446. // backward pass, given grad[#10]
  9447. // #10: scale
  9448. // grad[#00] += scale(grad[#10],#09)
  9449. // grad[#09] += sum(mul(grad[#10],#00))
  9450. // #09: div
  9451. // grad[#08] += neg(mul(grad[#09], div(#09,#08)))
  9452. // #08: sqrt
  9453. // grad[#07] += mul(grad[#08], div(0.5, #08))
  9454. // #07: add
  9455. // grad[#05] += grad[#07]
  9456. // #05: scale
  9457. // grad[#03] += scale(grad[#05],#04)
  9458. // #03: sum
  9459. // grad[#02] += repeat(grad[#03], #02)
  9460. // #02:
  9461. // grad[#00] += scale(mul(#00, grad[#02]), 2.0)
  9462. //
  9463. // substitute and simplify:
  9464. // grad[#00] = scale(grad(#10), #09) + scale(mul(#00, grad[#02]), 2.0)
  9465. // grad[#02] = repeat(grad[#03], #02)
  9466. // grad[#02] = repeat(scale(grad[#05],#04), #02)
  9467. // grad[#02] = repeat(scale(grad[#07],#04), #02)
  9468. // grad[#02] = repeat(scale(mul(grad[#08], div(0.5, #08)),#04), #02)
  9469. // grad[#02] = repeat(scale(mul(neg(mul(grad[#09], div(#09,#08))), div(0.5, #08)),#04), #02)
  9470. // grad[#02] = repeat(scale(mul(neg(mul(sum(mul(grad[#10],#00)), div(#09,#08))), div(0.5, #08)),#04), #02)
  9471. // grad[#02] = repeat(-(sum(mul(grad[#10],#00)) * div(#09,#08) * div(0.5, #08) * (1/N)), #02)
  9472. // grad[#02] = repeat(-(sum(mul(grad[#10],#00)) * div(div(#01,#08),#08) * div(0.5, #08) * (1/N)), #02)
  9473. // grad[#02] = repeat(-(sum(mul(grad[#10],#00)) * div(1,#08*#08) * div(0.5, #08) * (1/N)), #02)
  9474. // grad[#02] = repeat(-(sum(mul(grad[#10],#00)) * div(1,#07) * div(0.5, #08) * (1/N)), #02)
  9475. // grad[#00] = scale(grad(#10), #09) + scale(mul(#00, grad[#02]), 2.0)
  9476. // grad[#00] = scale(grad(#10), #09) + scale(mul(#00, repeat(-(sum(mul(grad[#10],#00)) * div(1,#07) * div(0.5, #08) * (1/N)), #02)), 2.0)
  9477. // grad[#00] = scale(grad(#10), #09) + scale(scale(#00, -(sum(mul(grad[#10],#00)) * div(1,#07) * div(0.5, #08) * (1/N))), 2.0)
  9478. // grad[#00] = scale(grad(#10), #09) + scale(#00, -(sum(mul(grad[#10],#00)) * div(1,#07) * div(1,#08) * (1/N)))
  9479. // grad[#00] = scale(grad(#10), #09) + scale(#00, sum(mul(grad[#10],#00)) * div(1,#07*#08) * (-1/N))
  9480. // grad[#00] = scale(grad(#10), #09) + scale(#00, sum(mul(grad[#10],#00)) * div(1,#07*#08) * (-1/N))
  9481. // grad[#00] = scale(grad(#10), #09) + scale(#00, sum(mul(grad[#10],#00)) * div(1,mean_eps*rms) * (-1/N))
  9482. // grad[#00] = scale(grad(#10), #09) + scale(#00, sum(mul(grad[#10],#00)) * div(-1,rms*N*mean_eps))
  9483. // grad[#00] = scale(grad(#10), #09) + scale(#00, sum(mul(grad[#10],#00)) * div(-1,rms*N*(sum_xx/N+eps)))
  9484. // grad[#00] = scale(grad(#10), #09) + scale(#00, sum(mul(grad[#10],#00)) * div(-1,rms*N*sum_xx+rms*N*eps))
  9485. // grad[#00] = scale(dz, rrms) + scale(x, sum(mul(dz,x)) * div(-1,rms*N*mean_eps))
  9486. // grad[#00] = scale(dz, rrms) + scale(x, sum_xdz * div(-1,rms*N*mean_eps))
  9487. // a = b*c + d*e
  9488. // a = b*c*f/f + d*e*f/f
  9489. // a = (b*c*f + d*e*f)*(1/f)
  9490. // a = (b*c*(1/c) + d*e*(1/c))*(1/(1/c))
  9491. // a = (b + d*e/c)*c
  9492. // b = dz, c = rrms, d = x, e = sum_xdz * div(-1,rms*N*mean_eps)
  9493. // a = (dz + x*sum_xdz * div(-1,rms*N*mean_eps)/rrms)*rrms
  9494. // a = (dz + x*sum_xdz * div(-1,rms*N*mean_eps)*rms)*rrms
  9495. // a = (dz + x*sum_xdz * div(-rms,rms*N*mean_eps))*rrms
  9496. // a = (dz + x*sum_xdz * div(-1,N*mean_eps))*rrms
  9497. // a = (dz + x*div(-sum_xdz,N*mean_eps))*rrms
  9498. // a = (dz + x*div(-mean_xdz,mean_eps))*rrms
  9499. // grad[#00] = scale(dz + scale(x, div(-mean_xdz,mean_eps)),rrms)
  9500. // grad[#00] = scale(dz + scale(x, -mean_xdz/mean_eps),rrms)
  9501. // dx = scale(dz + scale(x, -mean_xdz/mean_eps),rrms)
  9502. }
  9503. // dx = scale(dz + scale(x, -mean_xdz/mean_eps),rrms)
  9504. // post-order:
  9505. // dx := x
  9506. // dx := scale(dx,-mean_xdz/mean_eps)
  9507. // dx := add(dx, dz)
  9508. // dx := scale(dx, rrms)
  9509. float * dx = (float *) ((char *) dst->data + i01*nb1 + i02*nb2 + i03*nb3);
  9510. ggml_vec_cpy_f32 (ne00, dx, x);
  9511. // ggml_vec_scale_f32(ne00, dx, -mean_xdz/mean_eps);
  9512. ggml_vec_scale_f32(ne00, dx, (float)(-sum_xdz)/sum_eps);
  9513. ggml_vec_acc_f32 (ne00, dx, dz);
  9514. ggml_vec_scale_f32(ne00, dx, rrms);
  9515. }
  9516. }
  9517. }
  9518. }
  9519. static void ggml_compute_forward_rms_norm_back(
  9520. const struct ggml_compute_params * params,
  9521. struct ggml_tensor * dst) {
  9522. const struct ggml_tensor * src0 = dst->src[0];
  9523. switch (src0->type) {
  9524. case GGML_TYPE_F32:
  9525. {
  9526. ggml_compute_forward_rms_norm_back_f32(params, dst);
  9527. } break;
  9528. default:
  9529. {
  9530. GGML_ASSERT(false);
  9531. } break;
  9532. }
  9533. }
  9534. // ggml_compute_forward_group_norm
  9535. static void ggml_compute_forward_group_norm_f32(
  9536. const struct ggml_compute_params * params,
  9537. struct ggml_tensor * dst) {
  9538. const struct ggml_tensor * src0 = dst->src[0];
  9539. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  9540. if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
  9541. return;
  9542. }
  9543. GGML_ASSERT(src0->nb[0] == sizeof(float));
  9544. const int ith = params->ith;
  9545. const int nth = params->nth;
  9546. GGML_TENSOR_UNARY_OP_LOCALS
  9547. const float eps = 1e-6f; // TODO: make this a parameter
  9548. // TODO: optimize
  9549. int n_channels = src0->ne[2];
  9550. int n_groups = dst->op_params[0];
  9551. int n_channels_per_group = (n_channels + n_groups - 1) / n_groups;
  9552. for (int i = ith; i < n_groups; i += nth) {
  9553. int start = i * n_channels_per_group;
  9554. int end = start + n_channels_per_group;
  9555. if (end > n_channels) {
  9556. end = n_channels;
  9557. }
  9558. int step = end - start;
  9559. for (int64_t i03 = 0; i03 < ne03; i03++) {
  9560. ggml_float sum = 0.0;
  9561. for (int64_t i02 = start; i02 < end; i02++) {
  9562. for (int64_t i01 = 0; i01 < ne01; i01++) {
  9563. const float * x = (float *)((char *) src0->data + i01 * nb01 + i02 * nb02 + i03 * nb03);
  9564. ggml_float sumr = 0.0;
  9565. for (int64_t i00 = 0; i00 < ne00; i00++) {
  9566. sumr += (ggml_float)x[i00];
  9567. }
  9568. sum += sumr;
  9569. }
  9570. }
  9571. const float mean = sum / (ne00 * ne01 * step);
  9572. ggml_float sum2 = 0.0;
  9573. for (int64_t i02 = start; i02 < end; i02++) {
  9574. for (int64_t i01 = 0; i01 < ne01; i01++) {
  9575. const float * x = (float *)((char *) src0->data + i01 * nb01 + i02 * nb02 + i03 * nb03);
  9576. float * y = (float *)((char *) dst->data + i01 * nb1 + i02 * nb2 + i03 * nb3);
  9577. ggml_float sumr = 0.0;
  9578. for (int64_t i00 = 0; i00 < ne00; i00++) {
  9579. float v = x[i00] - mean;
  9580. y[i00] = v;
  9581. sumr += (ggml_float)(v * v);
  9582. }
  9583. sum2 += sumr;
  9584. }
  9585. }
  9586. const float variance = sum2 / (ne00 * ne01 * step);
  9587. const float scale = 1.0f / sqrtf(variance + eps);
  9588. for (int64_t i02 = start; i02 < end; i02++) {
  9589. for (int64_t i01 = 0; i01 < ne01; i01++) {
  9590. float * y = (float *)((char *) dst->data + i01 * nb1 + i02 * nb2 + i03 * nb3);
  9591. ggml_vec_scale_f32(ne00, y, scale);
  9592. }
  9593. }
  9594. }
  9595. }
  9596. }
  9597. static void ggml_compute_forward_group_norm(
  9598. const struct ggml_compute_params * params,
  9599. struct ggml_tensor * dst) {
  9600. const struct ggml_tensor * src0 = dst->src[0];
  9601. switch (src0->type) {
  9602. case GGML_TYPE_F32:
  9603. {
  9604. ggml_compute_forward_group_norm_f32(params, dst);
  9605. } break;
  9606. default:
  9607. {
  9608. GGML_ASSERT(false);
  9609. } break;
  9610. }
  9611. }
  9612. // ggml_compute_forward_mul_mat
  9613. #if defined(GGML_USE_ACCELERATE) || defined(GGML_USE_OPENBLAS)
  9614. // helper function to determine if it is better to use BLAS or not
  9615. // for large matrices, BLAS is faster
  9616. static bool ggml_compute_forward_mul_mat_use_blas(struct ggml_tensor * dst) {
  9617. const struct ggml_tensor * src0 = dst->src[0];
  9618. const struct ggml_tensor * src1 = dst->src[1];
  9619. //const int64_t ne00 = src0->ne[0];
  9620. //const int64_t ne01 = src0->ne[1];
  9621. const int64_t ne10 = src1->ne[0];
  9622. const int64_t ne0 = dst->ne[0];
  9623. const int64_t ne1 = dst->ne[1];
  9624. // NOTE: with GGML_OP_MUL_MAT_ID we don't want to go through the BLAS branch because it will dequantize (to_float)
  9625. // all the experts for each batch element and the processing would become incredibly slow
  9626. // TODO: find the optimal values for these
  9627. if (dst->op != GGML_OP_MUL_MAT_ID &&
  9628. ggml_is_contiguous(src0) &&
  9629. ggml_is_contiguous(src1) &&
  9630. //src0->type == GGML_TYPE_F32 &&
  9631. src1->type == GGML_TYPE_F32 &&
  9632. (ne0 >= 32 && ne1 >= 32 && ne10 >= 32)) {
  9633. /*printf("BLAS: %d %d %d %d %d\n", ne0, ne1, ne10, ne00, ne01);*/
  9634. return true;
  9635. }
  9636. return false;
  9637. }
  9638. #endif
  9639. static void ggml_compute_forward_mul_mat(
  9640. const struct ggml_compute_params * params,
  9641. struct ggml_tensor * dst) {
  9642. const struct ggml_tensor * src0 = dst->src[0];
  9643. const struct ggml_tensor * src1 = dst->src[1];
  9644. int64_t t0 = ggml_perf_time_us();
  9645. UNUSED(t0);
  9646. GGML_TENSOR_BINARY_OP_LOCALS
  9647. const int ith = params->ith;
  9648. const int nth = params->nth;
  9649. const enum ggml_type type = src0->type;
  9650. const bool src1_cont = ggml_is_contiguous(src1);
  9651. ggml_vec_dot_t const vec_dot = type_traits[type].vec_dot;
  9652. enum ggml_type const vec_dot_type = type_traits[type].vec_dot_type;
  9653. ggml_from_float_t const from_float_to_vec_dot = type_traits[vec_dot_type].from_float;
  9654. int64_t const vec_dot_num_rows = type_traits[type].nrows;
  9655. GGML_ASSERT(ne0 == ne01);
  9656. GGML_ASSERT(ne1 == ne11);
  9657. GGML_ASSERT(ne2 == ne12);
  9658. GGML_ASSERT(ne3 == ne13);
  9659. // we don't support permuted src0 or src1
  9660. GGML_ASSERT(nb00 == ggml_type_size(type));
  9661. GGML_ASSERT(nb10 == ggml_type_size(src1->type));
  9662. // dst cannot be transposed or permuted
  9663. GGML_ASSERT(nb0 == sizeof(float));
  9664. GGML_ASSERT(nb0 <= nb1);
  9665. GGML_ASSERT(nb1 <= nb2);
  9666. GGML_ASSERT(nb2 <= nb3);
  9667. // broadcast factors
  9668. const int64_t r2 = ne12/ne02;
  9669. const int64_t r3 = ne13/ne03;
  9670. // nb01 >= nb00 - src0 is not transposed
  9671. // compute by src0 rows
  9672. #if defined(GGML_USE_CLBLAST)
  9673. if (ggml_cl_can_mul_mat(src0, src1, dst)) {
  9674. if (params->ith == 0 && params->type == GGML_TASK_TYPE_COMPUTE) {
  9675. ggml_cl_mul_mat(src0, src1, dst, params->wdata, params->wsize);
  9676. }
  9677. return;
  9678. }
  9679. #endif
  9680. #if defined(GGML_USE_ACCELERATE) || defined(GGML_USE_OPENBLAS)
  9681. if (ggml_compute_forward_mul_mat_use_blas(dst)) {
  9682. const int64_t ne_plane = ne01*ne00;
  9683. const size_t desired_wsize = ne13*ne12*ne_plane*sizeof(float);
  9684. UNUSED(desired_wsize);
  9685. if (params->type == GGML_TASK_TYPE_INIT) {
  9686. if (type != GGML_TYPE_F32) {
  9687. assert(params->wsize >= desired_wsize);
  9688. // parallelize by src0 rows
  9689. for (int64_t i13 = 0; i13 < ne13; i13++) {
  9690. for (int64_t i12 = 0; i12 < ne12; i12++) {
  9691. // broadcast src0 into src1 across 2nd,3rd dimension
  9692. const int64_t i03 = i13/r3;
  9693. const int64_t i02 = i12/r2;
  9694. const void * x = (char *) src0->data + i02*nb02 + i03*nb03;
  9695. float * const wdata = (float *) params->wdata + i13*ne12*ne_plane + i12*ne_plane;
  9696. ggml_to_float_t const to_float = type_traits[type].to_float;
  9697. for (int64_t i01 = ith; i01 < ne01; i01 += nth) {
  9698. to_float((const char *) x + i01*nb01, wdata + i01*ne00, ne00);
  9699. }
  9700. }
  9701. }
  9702. }
  9703. return;
  9704. }
  9705. if (params->type == GGML_TASK_TYPE_FINALIZE) {
  9706. return;
  9707. }
  9708. // perform sgemm, parallelization controlled by blas lib
  9709. if (ith != 0) {
  9710. return;
  9711. }
  9712. //const int64_t tgemm0 = ggml_perf_time_us();
  9713. for (int64_t i13 = 0; i13 < ne13; i13++) {
  9714. for (int64_t i12 = 0; i12 < ne12; i12++) {
  9715. const int64_t i03 = i13/r3;
  9716. const int64_t i02 = i12/r2;
  9717. const void * x = (char *) src0->data + i02*nb02 + i03*nb03;
  9718. const float * y = (float *) ((char *) src1->data + i12*nb12 + i13*nb13);
  9719. float * d = (float *) ((char *) dst->data + i12*nb2 + i13*nb3);
  9720. if (type != GGML_TYPE_F32) {
  9721. x = (float *) params->wdata + i13*ne12*ne_plane + i12*ne_plane;
  9722. }
  9723. cblas_sgemm(CblasRowMajor, CblasNoTrans, CblasTrans,
  9724. ne1, ne01, ne10,
  9725. 1.0f, y, ne10,
  9726. x, ne00,
  9727. 0.0f, d, ne01);
  9728. }
  9729. }
  9730. //printf("cblas_sgemm = %.3f ms, %lld flops\n", (ggml_perf_time_us() - tgemm0)/1000.0, ne13*ne12*ne1*ne01*ne10*2);
  9731. //printf("CBLAS = %f ms, %d x %d x %d x %d\n", (ggml_perf_time_us() - t0)/1000.0, ne0, ne1, ne2, ne3);
  9732. return;
  9733. }
  9734. #endif
  9735. #if GGML_USE_LLAMAFILE
  9736. if (src1_cont) {
  9737. for (int64_t i13 = 0; i13 < ne13; i13++)
  9738. for (int64_t i12 = 0; i12 < ne12; i12++)
  9739. if (!llamafile_sgemm(ne01, ne11, ne00/ggml_blck_size(src0->type),
  9740. (const char *)src0->data + i12/r2*nb02 + i13/r3*nb03,
  9741. nb01/ggml_type_size(src0->type),
  9742. (const char *)src1->data + i12*nb12 + i13*nb13,
  9743. nb11/ggml_type_size(src1->type),
  9744. (char *)dst->data + i12*nb2 + i13*nb3,
  9745. nb1/ggml_type_size(dst->type),
  9746. ith, nth,
  9747. params->type,
  9748. src0->type,
  9749. src1->type,
  9750. dst->type))
  9751. goto UseGgmlGemm1;
  9752. return;
  9753. }
  9754. UseGgmlGemm1:;
  9755. #endif
  9756. if (params->type == GGML_TASK_TYPE_INIT) {
  9757. if (ith != 0) {
  9758. return;
  9759. }
  9760. if (src1->type != vec_dot_type) {
  9761. char * wdata = params->wdata;
  9762. const size_t row_size = ggml_row_size(vec_dot_type, ne10);
  9763. assert(params->wsize >= ne11*ne12*ne13*row_size);
  9764. GGML_ASSERT(src1->type == GGML_TYPE_F32);
  9765. for (int64_t i13 = 0; i13 < ne13; ++i13) {
  9766. for (int64_t i12 = 0; i12 < ne12; ++i12) {
  9767. for (int64_t i11 = 0; i11 < ne11; ++i11) {
  9768. from_float_to_vec_dot((float *)((char *) src1->data + i13*nb13 + i12*nb12 + i11*nb11), (void *) wdata, ne10);
  9769. wdata += row_size;
  9770. }
  9771. }
  9772. }
  9773. }
  9774. return;
  9775. }
  9776. if (params->type == GGML_TASK_TYPE_FINALIZE) {
  9777. return;
  9778. }
  9779. const void * wdata = (src1->type == vec_dot_type) ? src1->data : params->wdata;
  9780. const size_t row_size = ggml_row_size(vec_dot_type, ne10);
  9781. #if GGML_USE_LLAMAFILE
  9782. if (src1->type != vec_dot_type) {
  9783. for (int64_t i13 = 0; i13 < ne13; i13++)
  9784. for (int64_t i12 = 0; i12 < ne12; i12++)
  9785. if (!llamafile_sgemm(ne01, ne11, ne00/ggml_blck_size(src0->type),
  9786. (const char *)src0->data + i12/r2*nb02 + i13/r3*nb03,
  9787. nb01/ggml_type_size(src0->type),
  9788. (const char *)wdata + (i12*ne11 + i13*ne12*ne11)*row_size,
  9789. row_size/ggml_type_size(vec_dot_type),
  9790. (char *)dst->data + i12*nb2 + i13*nb3,
  9791. nb1/ggml_type_size(dst->type),
  9792. ith, nth,
  9793. params->type,
  9794. src0->type,
  9795. vec_dot_type,
  9796. dst->type))
  9797. goto UseGgmlGemm2;
  9798. return;
  9799. }
  9800. UseGgmlGemm2:;
  9801. #endif
  9802. const int64_t nr0 = ne01; // src0 rows
  9803. const int64_t nr1 = ne1*ne12*ne13; // src1 rows
  9804. //printf("nr0 = %lld, nr1 = %lld\n", nr0, nr1);
  9805. // distribute the thread work across the inner or outer loop based on which one is larger
  9806. const int64_t nth0 = nr0 > nr1 ? nth : 1; // parallelize by src0 rows
  9807. const int64_t nth1 = nr0 > nr1 ? 1 : nth; // parallelize by src1 rows
  9808. const int64_t ith0 = ith % nth0;
  9809. const int64_t ith1 = ith / nth0;
  9810. const int64_t dr0 = (nr0 + nth0 - 1)/nth0;
  9811. const int64_t dr1 = (nr1 + nth1 - 1)/nth1;
  9812. const int64_t ir010 = dr0*ith0;
  9813. const int64_t ir011 = MIN(ir010 + dr0, nr0);
  9814. const int64_t ir110 = dr1*ith1;
  9815. const int64_t ir111 = MIN(ir110 + dr1, nr1);
  9816. //printf("ir010 = %6lld, ir011 = %6lld, ir110 = %6lld, ir111 = %6lld\n", ir010, ir011, ir110, ir111);
  9817. // threads with no work simply yield (not sure if it helps)
  9818. if (ir010 >= ir011 || ir110 >= ir111) {
  9819. sched_yield();
  9820. return;
  9821. }
  9822. assert(ne12 % ne02 == 0);
  9823. assert(ne13 % ne03 == 0);
  9824. // block-tiling attempt
  9825. const int64_t blck_0 = 16;
  9826. const int64_t blck_1 = 16;
  9827. // dot kernels can handle 1 row and col at a time, but mmla kernels can process 2 rows and cols
  9828. int64_t nrc = vec_dot_num_rows;
  9829. // TODO: currently the mmla kernels support only even numbered rows/cols.
  9830. // this check can be removed once they are extended to support odd numbered rows/cols too
  9831. if ((nr0 % 2 != 0) || (ne11 % 2 != 0)) {
  9832. nrc = 1;
  9833. }
  9834. const size_t src1_col_stride = src1_cont || src1->type != vec_dot_type ? row_size : nb11;
  9835. // attempt to reduce false-sharing (does not seem to make a difference)
  9836. // 16 * 2, accounting for mmla kernels
  9837. float tmp[32];
  9838. for (int64_t iir1 = ir110; iir1 < ir111; iir1 += blck_1) {
  9839. for (int64_t iir0 = ir010; iir0 < ir011; iir0 += blck_0) {
  9840. for (int64_t ir1 = iir1; ir1 < iir1 + blck_1 && ir1 < ir111; ir1 += nrc) {
  9841. const int64_t i13 = (ir1/(ne12*ne1));
  9842. const int64_t i12 = (ir1 - i13*ne12*ne1)/ne1;
  9843. const int64_t i11 = (ir1 - i13*ne12*ne1 - i12*ne1);
  9844. // broadcast src0 into src1
  9845. const int64_t i03 = i13/r3;
  9846. const int64_t i02 = i12/r2;
  9847. const int64_t i1 = i11;
  9848. const int64_t i2 = i12;
  9849. const int64_t i3 = i13;
  9850. const char * src0_row = (const char *) src0->data + (0 + i02*nb02 + i03*nb03);
  9851. // desc: when src1 is not a contiguous memory block we have to calculate the offset using the strides
  9852. // if it is, then we have either copied the data to params->wdata and made it contiguous or we are using
  9853. // the original src1 data pointer, so we should index using the indices directly
  9854. // TODO: this is a bit of a hack, we should probably have a better way to handle this
  9855. const char * src1_col = (const char *) wdata +
  9856. (src1_cont || src1->type != vec_dot_type
  9857. ? (i11 + i12*ne11 + i13*ne12*ne11)*row_size
  9858. : (i11*nb11 + i12*nb12 + i13*nb13));
  9859. float * dst_col = (float *) ((char *) dst->data + (i1*nb1 + i2*nb2 + i3*nb3));
  9860. //for (int64_t ir0 = iir0; ir0 < iir0 + blck_0 && ir0 < ir011; ++ir0) {
  9861. // vec_dot(ne00, &dst_col[ir0], src0_row + ir0*nb01, src1_col);
  9862. //}
  9863. for (int64_t ir0 = iir0; ir0 < iir0 + blck_0 && ir0 < ir011; ir0 += nrc) {
  9864. vec_dot(ne00, &tmp[ir0 - iir0], (nrc>1 ? 16 : 0), src0_row + ir0*nb01, (nrc>1 ? nb01 : 0), src1_col, (nrc>1 ? src1_col_stride : 0), nrc);
  9865. }
  9866. for (int cn = 0; cn < nrc; ++cn) {
  9867. memcpy(&dst_col[iir0 + cn*nb1/nb0], tmp + (cn*16), (MIN(iir0 + blck_0, ir011) - iir0)*sizeof(float));
  9868. }
  9869. }
  9870. }
  9871. }
  9872. }
  9873. // ggml_compute_forward_mul_mat_id
  9874. static void ggml_compute_forward_mul_mat_id(
  9875. const struct ggml_compute_params * params,
  9876. struct ggml_tensor * dst) {
  9877. const struct ggml_tensor * src0 = dst->src[0];
  9878. const struct ggml_tensor * src1 = dst->src[1];
  9879. const struct ggml_tensor * ids = dst->src[2];
  9880. GGML_TENSOR_BINARY_OP_LOCALS
  9881. const int ith = params->ith;
  9882. const int nth = params->nth;
  9883. const enum ggml_type type = src0->type;
  9884. const bool src1_cont = ggml_is_contiguous(src1);
  9885. ggml_vec_dot_t const vec_dot = type_traits[type].vec_dot;
  9886. enum ggml_type const vec_dot_type = type_traits[type].vec_dot_type;
  9887. ggml_from_float_t const from_float_to_vec_dot = type_traits[vec_dot_type].from_float;
  9888. // we don't support permuted src0 or src1
  9889. GGML_ASSERT(nb00 == ggml_type_size(type));
  9890. GGML_ASSERT(nb10 == ggml_type_size(src1->type));
  9891. // dst cannot be transposed or permuted
  9892. GGML_ASSERT(nb0 == sizeof(float));
  9893. GGML_ASSERT(nb0 <= nb1);
  9894. GGML_ASSERT(nb1 <= nb2);
  9895. GGML_ASSERT(nb2 <= nb3);
  9896. // row groups
  9897. const int n_ids = ids->ne[0]; // n_expert_used
  9898. const int n_as = ne02; // n_expert
  9899. char * wdata_src1_end = (src1->type == vec_dot_type) ?
  9900. (char *) params->wdata :
  9901. (char *) params->wdata + GGML_PAD(ggml_row_size(vec_dot_type, ggml_nelements(src1)), sizeof(int64_t));
  9902. struct mmid_row_mapping {
  9903. int32_t i1;
  9904. int32_t i2;
  9905. };
  9906. int64_t * matrix_row_counts = (int64_t *) (wdata_src1_end); // [n_as]
  9907. struct mmid_row_mapping * matrix_rows = (struct mmid_row_mapping *)(matrix_row_counts + n_as); // [n_as][ne11]
  9908. if (params->type == GGML_TASK_TYPE_INIT) {
  9909. if (ith != 0) {
  9910. return;
  9911. }
  9912. char * wdata = params->wdata;
  9913. if (src1->type != vec_dot_type) {
  9914. const size_t row_size = ggml_row_size(vec_dot_type, ne10);
  9915. assert(params->wsize >= ne11*ne12*ne13*row_size);
  9916. assert(src1->type == GGML_TYPE_F32);
  9917. for (int64_t i13 = 0; i13 < ne13; ++i13) {
  9918. for (int64_t i12 = 0; i12 < ne12; ++i12) {
  9919. for (int64_t i11 = 0; i11 < ne11; ++i11) {
  9920. from_float_to_vec_dot((float *)((char *) src1->data + i13*nb13 + i12*nb12 + i11*nb11), (void *) wdata, ne10);
  9921. wdata += row_size;
  9922. }
  9923. }
  9924. }
  9925. }
  9926. // initialize matrix_row_counts
  9927. memset(matrix_row_counts, 0, n_as*sizeof(int64_t));
  9928. #define MMID_MATRIX_ROW(row_id, i1) matrix_rows[(row_id)*ne12 + (i1)]
  9929. // group rows by src0 matrix
  9930. for (int64_t iid1 = 0; iid1 < ids->ne[1]; ++iid1) {
  9931. for (int id = 0; id < n_ids; ++id) {
  9932. const int32_t i02 = *(const int32_t *) ((const char *) ids->data + iid1*ids->nb[1] + id*ids->nb[0]);
  9933. assert(i02 >= 0 && i02 < n_as);
  9934. MMID_MATRIX_ROW(i02, matrix_row_counts[i02]) = (struct mmid_row_mapping) {id, iid1};
  9935. matrix_row_counts[i02] += 1;
  9936. }
  9937. }
  9938. return;
  9939. }
  9940. if (params->type == GGML_TASK_TYPE_FINALIZE) {
  9941. return;
  9942. }
  9943. // compute each matrix multiplication in sequence
  9944. for (int cur_a = 0; cur_a < n_as; ++cur_a) {
  9945. const int64_t cne1 = matrix_row_counts[cur_a];
  9946. if (cne1 == 0) {
  9947. continue;
  9948. }
  9949. const char * src0_cur = (const char *) src0->data + cur_a*nb02;
  9950. const void * wdata = (src1->type == vec_dot_type) ? src1->data : params->wdata;
  9951. const size_t row_size = ggml_row_size(vec_dot_type, ne10);
  9952. const int64_t nr0 = ne01; // src0 rows
  9953. const int64_t nr1 = cne1; // src1 rows
  9954. // distribute the thread work across the inner or outer loop based on which one is larger
  9955. const int64_t nth0 = nr0 > nr1 ? nth : 1; // parallelize by src0 rows
  9956. const int64_t nth1 = nr0 > nr1 ? 1 : nth; // parallelize by src1 rows
  9957. const int64_t ith0 = ith % nth0;
  9958. const int64_t ith1 = ith / nth0;
  9959. const int64_t dr0 = (nr0 + nth0 - 1)/nth0;
  9960. const int64_t dr1 = (nr1 + nth1 - 1)/nth1;
  9961. const int64_t ir010 = dr0*ith0;
  9962. const int64_t ir011 = MIN(ir010 + dr0, nr0);
  9963. const int64_t ir110 = dr1*ith1;
  9964. const int64_t ir111 = MIN(ir110 + dr1, nr1);
  9965. // threads with no work simply yield (not sure if it helps)
  9966. //if (ir010 >= ir011 || ir110 >= ir111) {
  9967. // sched_yield();
  9968. // continue;
  9969. //}
  9970. // block-tiling attempt
  9971. const int64_t blck_0 = 16;
  9972. const int64_t blck_1 = 16;
  9973. // attempt to reduce false-sharing (does not seem to make a difference)
  9974. float tmp[16];
  9975. for (int64_t iir1 = ir110; iir1 < ir111; iir1 += blck_1) {
  9976. for (int64_t iir0 = ir010; iir0 < ir011; iir0 += blck_0) {
  9977. for (int64_t ir1 = iir1; ir1 < iir1 + blck_1 && ir1 < ir111; ++ir1) {
  9978. const int64_t _i12 = ir1; // logical row index for this expert
  9979. struct mmid_row_mapping row_mapping = MMID_MATRIX_ROW(cur_a, _i12);
  9980. const int id = row_mapping.i1; // selected expert index
  9981. const int64_t i11 = id % ne11;
  9982. const int64_t i12 = row_mapping.i2; // row index in src1
  9983. const int64_t i1 = id; // selected expert index
  9984. const int64_t i2 = i12; // row
  9985. // desc: when src1 is not a contiguous memory block we have to calculate the offset using the strides
  9986. // if it is, then we have either copied the data to params->wdata and made it contiguous or we are using
  9987. // the original src1 data pointer, so we should index using the indices directly
  9988. // TODO: this is a bit of a hack, we should probably have a better way to handle this
  9989. const char * src1_col = (const char *) wdata +
  9990. (src1_cont || src1->type != vec_dot_type
  9991. ? (i11 + i12*ne11)*row_size
  9992. : (i11*nb11 + i12*nb12));
  9993. float * dst_col = (float *) ((char *) dst->data + (i1*nb1 + i2*nb2));
  9994. //for (int64_t ir0 = iir0; ir0 < iir0 + blck_0 && ir0 < ir011; ++ir0) {
  9995. // vec_dot(ne00, &dst_col[ir0], src0_row + ir0*nb01, src1_col);
  9996. //}
  9997. for (int64_t ir0 = iir0; ir0 < iir0 + blck_0 && ir0 < ir011; ++ir0) {
  9998. vec_dot(ne00, &tmp[ir0 - iir0], 0, src0_cur + ir0*nb01, 0, src1_col, 0, 1);
  9999. }
  10000. memcpy(&dst_col[iir0], tmp, (MIN(iir0 + blck_0, ir011) - iir0)*sizeof(float));
  10001. }
  10002. }
  10003. }
  10004. }
  10005. #undef MMID_MATRIX_ROW
  10006. }
  10007. // ggml_compute_forward_out_prod
  10008. static void ggml_compute_forward_out_prod_f32(
  10009. const struct ggml_compute_params * params,
  10010. struct ggml_tensor * dst) {
  10011. const struct ggml_tensor * src0 = dst->src[0];
  10012. const struct ggml_tensor * src1 = dst->src[1];
  10013. // int64_t t0 = ggml_perf_time_us();
  10014. // UNUSED(t0);
  10015. GGML_TENSOR_BINARY_OP_LOCALS
  10016. const int ith = params->ith;
  10017. const int nth = params->nth;
  10018. GGML_ASSERT(ne0 == ne00);
  10019. GGML_ASSERT(ne1 == ne10);
  10020. GGML_ASSERT(ne2 == ne02);
  10021. GGML_ASSERT(ne02 == ne12);
  10022. GGML_ASSERT(ne3 == ne13);
  10023. GGML_ASSERT(ne03 == ne13);
  10024. // we don't support permuted src0 or src1
  10025. GGML_ASSERT(nb00 == sizeof(float));
  10026. // dst cannot be transposed or permuted
  10027. GGML_ASSERT(nb0 == sizeof(float));
  10028. // GGML_ASSERT(nb0 <= nb1);
  10029. // GGML_ASSERT(nb1 <= nb2);
  10030. // GGML_ASSERT(nb2 <= nb3);
  10031. // nb01 >= nb00 - src0 is not transposed
  10032. // compute by src0 rows
  10033. // TODO: #if defined(GGML_USE_CLBLAST)
  10034. #if defined(GGML_USE_ACCELERATE) || defined(GGML_USE_OPENBLAS)
  10035. bool use_blas = ggml_is_matrix(src0) &&
  10036. ggml_is_matrix(src1) &&
  10037. ggml_is_contiguous(src0) &&
  10038. (ggml_is_contiguous(src1) || ggml_is_transposed(src1));
  10039. #endif
  10040. if (params->type == GGML_TASK_TYPE_INIT) {
  10041. #if defined(GGML_USE_ACCELERATE) || defined(GGML_USE_OPENBLAS) // gemm beta will zero dst
  10042. if (use_blas) {
  10043. return;
  10044. }
  10045. #endif
  10046. if (ith != 0) {
  10047. return;
  10048. }
  10049. ggml_vec_set_f32(ne0*ne1*ne2*ne3, dst->data, 0);
  10050. return;
  10051. }
  10052. if (params->type == GGML_TASK_TYPE_FINALIZE) {
  10053. return;
  10054. }
  10055. #if defined(GGML_USE_ACCELERATE) || defined(GGML_USE_OPENBLAS)
  10056. if (use_blas) {
  10057. if (params->ith != 0) { // All threads other than the first do no work.
  10058. return;
  10059. }
  10060. // Arguments to ggml_compute_forward_out_prod (expressed as major,minor)
  10061. // src0: (k,n)
  10062. // src1: (k,m)
  10063. // dst: (m,n)
  10064. //
  10065. // Arguments to sgemm (see https://github.com/Reference-LAPACK/lapack/blob/master/BLAS/SRC/sgemm.f)
  10066. // Also expressed as (major,minor)
  10067. // a: (m,k): so src1 transposed
  10068. // b: (k,n): so src0
  10069. // c: (m,n)
  10070. //
  10071. // However, if ggml_is_transposed(src1) is true, then
  10072. // src1->data already contains a transposed version, so sgemm mustn't
  10073. // transpose it further.
  10074. int n = src0->ne[0];
  10075. int k = src0->ne[1];
  10076. int m = src1->ne[0];
  10077. int transposeA, lda;
  10078. if (!ggml_is_transposed(src1)) {
  10079. transposeA = CblasTrans;
  10080. lda = m;
  10081. } else {
  10082. transposeA = CblasNoTrans;
  10083. lda = k;
  10084. }
  10085. float * a = (float *) ((char *) src1->data);
  10086. float * b = (float *) ((char *) src0->data);
  10087. float * c = (float *) ((char *) dst->data);
  10088. cblas_sgemm(CblasRowMajor, transposeA, CblasNoTrans, m, n, k, 1.0, a, lda, b, n, 0.0, c, n);
  10089. return;
  10090. }
  10091. #endif
  10092. // dst[:,:,:,:] = 0
  10093. // for i2,i3:
  10094. // for i1:
  10095. // for i01:
  10096. // for i0:
  10097. // dst[i0,i1,i2,i3] += src0[i0,i01,i2,i3] * src1[i1,i01,i2,i3]
  10098. // parallelize by last three dimensions
  10099. // total rows in dst
  10100. const int64_t nr = ne1*ne2*ne3;
  10101. // rows per thread
  10102. const int64_t dr = (nr + nth - 1)/nth;
  10103. // row range for this thread
  10104. const int64_t ir0 = dr*ith;
  10105. const int64_t ir1 = MIN(ir0 + dr, nr);
  10106. // block-tiling attempt
  10107. const int64_t blck_0 = MAX(GGML_VEC_MAD_UNROLL, 32);
  10108. const int64_t blck_1 = 16;
  10109. for (int64_t bir = ir0; bir < ir1; bir += blck_1) {
  10110. const int64_t bir1 = MIN(bir + blck_1, ir1);
  10111. for (int64_t bi01 = 0; bi01 < ne01; bi01 += blck_0) {
  10112. const int64_t bne01 = MIN(bi01 + blck_0, ne01);
  10113. for (int64_t ir = bir; ir < bir1; ++ir) {
  10114. // dst indices
  10115. const int64_t i3 = ir/(ne2*ne1);
  10116. const int64_t i2 = (ir - i3*ne2*ne1)/ne1;
  10117. const int64_t i1 = (ir - i3*ne2*ne1 - i2*ne1);
  10118. const int64_t i02 = i2;
  10119. const int64_t i03 = i3;
  10120. //const int64_t i10 = i1;
  10121. const int64_t i12 = i2;
  10122. const int64_t i13 = i3;
  10123. #if GGML_VEC_MAD_UNROLL > 2
  10124. const int64_t bne01_unroll = bne01 - (bne01 % GGML_VEC_MAD_UNROLL);
  10125. for (int64_t i01 = bi01; i01 < bne01_unroll; i01 += GGML_VEC_MAD_UNROLL) {
  10126. const int64_t i11 = i01;
  10127. float * s0 = (float *) ((char *) src0->data + ( i01*nb01 + i02*nb02 + i03*nb03));
  10128. float * s1 = (float *) ((char *) src1->data + (i1*nb10 + i11*nb11 + i12*nb12 + i13*nb13));
  10129. float * d = (float *) ((char *) dst->data + ( i1*nb1 + i2*nb2 + i3*nb3));
  10130. ggml_vec_mad_f32_unroll(ne0, nb01, nb11, d, s0, s1);
  10131. }
  10132. for (int64_t i01 = bne01_unroll; i01 < bne01; ++i01) {
  10133. const int64_t i11 = i01;
  10134. float * s0 = (float *) ((char *) src0->data + ( i01*nb01 + i02*nb02 + i03*nb03));
  10135. float * s1 = (float *) ((char *) src1->data + (i1*nb10 + i11*nb11 + i12*nb12 + i13*nb13));
  10136. float * d = (float *) ((char *) dst->data + ( i1*nb1 + i2*nb2 + i3*nb3));
  10137. ggml_vec_mad_f32(ne0, d, s0, *s1);
  10138. }
  10139. #else
  10140. for (int64_t i01 = bi01; i01 < bne01; ++i01) {
  10141. const int64_t i11 = i01;
  10142. float * s0 = (float *) ((char *) src0->data + ( i01*nb01 + i02*nb02 + i03*nb03));
  10143. float * s1 = (float *) ((char *) src1->data + (i1*nb10 + i11*nb11 + i12*nb12 + i13*nb13));
  10144. float * d = (float *) ((char *) dst->data + ( i1*nb1 + i2*nb2 + i3*nb3));
  10145. ggml_vec_mad_f32(ne0, d, s0, *s1);
  10146. }
  10147. #endif
  10148. }
  10149. }
  10150. }
  10151. //int64_t t1 = ggml_perf_time_us();
  10152. //static int64_t acc = 0;
  10153. //acc += t1 - t0;
  10154. //if (t1 - t0 > 10) {
  10155. // printf("\n");
  10156. // printf("ne00 = %5d, ne01 = %5d, ne02 = %5d, ne03 = %5d\n", ne00, ne01, ne02, ne03);
  10157. // printf("nb00 = %5d, nb01 = %5d, nb02 = %5d, nb03 = %5d\n", nb00, nb01, nb02, nb03);
  10158. // printf("ne10 = %5d, ne11 = %5d, ne12 = %5d, ne13 = %5d\n", ne10, ne11, ne12, ne13);
  10159. // printf("nb10 = %5d, nb11 = %5d, nb12 = %5d, nb13 = %5d\n", nb10, nb11, nb12, nb13);
  10160. // printf("XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX task %d/%d: %d us, acc = %d\n", ith, nth, (int) (t1 - t0), (int) acc);
  10161. //}
  10162. }
  10163. static void ggml_compute_forward_out_prod_q_f32(
  10164. const struct ggml_compute_params * params,
  10165. struct ggml_tensor * dst) {
  10166. const struct ggml_tensor * src0 = dst->src[0];
  10167. const struct ggml_tensor * src1 = dst->src[1];
  10168. // int64_t t0 = ggml_perf_time_us();
  10169. // UNUSED(t0);
  10170. GGML_TENSOR_BINARY_OP_LOCALS;
  10171. const int ith = params->ith;
  10172. const int nth = params->nth;
  10173. const enum ggml_type type = src0->type;
  10174. ggml_to_float_t const dequantize_row_q = type_traits[type].to_float;
  10175. GGML_ASSERT(ne02 == ne12);
  10176. GGML_ASSERT(ne03 == ne13);
  10177. GGML_ASSERT(ne2 == ne12);
  10178. GGML_ASSERT(ne3 == ne13);
  10179. // we don't support permuted src0 dim0
  10180. GGML_ASSERT(nb00 == ggml_type_size(type));
  10181. // dst dim0 cannot be transposed or permuted
  10182. GGML_ASSERT(nb0 == sizeof(float));
  10183. // GGML_ASSERT(nb0 <= nb1);
  10184. // GGML_ASSERT(nb1 <= nb2);
  10185. // GGML_ASSERT(nb2 <= nb3);
  10186. GGML_ASSERT(ne0 == ne00);
  10187. GGML_ASSERT(ne1 == ne10);
  10188. GGML_ASSERT(ne2 == ne02);
  10189. GGML_ASSERT(ne3 == ne03);
  10190. // nb01 >= nb00 - src0 is not transposed
  10191. // compute by src0 rows
  10192. // TODO: #if defined(GGML_USE_ACCELERATE) || defined(GGML_USE_OPENBLAS) || defined(GGML_USE_CLBLAST)
  10193. if (params->type == GGML_TASK_TYPE_INIT) {
  10194. if (ith != 0) {
  10195. return;
  10196. }
  10197. ggml_vec_set_f32(ne0*ne1*ne2*ne3, dst->data, 0);
  10198. return;
  10199. }
  10200. if (params->type == GGML_TASK_TYPE_FINALIZE) {
  10201. return;
  10202. }
  10203. // parallelize by last three dimensions
  10204. // total rows in dst
  10205. const int64_t nr = ne1*ne2*ne3;
  10206. // rows per thread
  10207. const int64_t dr = (nr + nth - 1)/nth;
  10208. // row range for this thread
  10209. const int64_t ir0 = dr*ith;
  10210. const int64_t ir1 = MIN(ir0 + dr, nr);
  10211. // dst[:,:,:,:] = 0
  10212. // for i2,i3:
  10213. // for i1:
  10214. // for i01:
  10215. // for i0:
  10216. // dst[i0,i1,i2,i3] += src0[i0,i01,i2,i3] * src1[i1,i01,i2,i3]
  10217. float * wdata = (float *) params->wdata + (ne0 + CACHE_LINE_SIZE_F32) * ith;
  10218. for (int64_t ir = ir0; ir < ir1; ++ir) {
  10219. // dst indices
  10220. const int64_t i3 = ir/(ne2*ne1);
  10221. const int64_t i2 = (ir - i3*ne2*ne1)/ne1;
  10222. const int64_t i1 = (ir - i3*ne2*ne1 - i2*ne1);
  10223. const int64_t i02 = i2;
  10224. const int64_t i03 = i3;
  10225. //const int64_t i10 = i1;
  10226. const int64_t i12 = i2;
  10227. const int64_t i13 = i3;
  10228. for (int64_t i01 = 0; i01 < ne01; ++i01) {
  10229. const int64_t i11 = i01;
  10230. float * s0 = (float *) ((char *) src0->data + ( i01*nb01 + i02*nb02 + i03*nb03));
  10231. float * s1 = (float *) ((char *) src1->data + (i1*nb10 + i11*nb11 + i12*nb12 + i13*nb13));
  10232. float * d = (float *) ((char *) dst->data + ( i1*nb1 + i2*nb2 + i3*nb3));
  10233. dequantize_row_q(s0, wdata, ne0);
  10234. ggml_vec_mad_f32(ne0, d, wdata, *s1);
  10235. }
  10236. }
  10237. //int64_t t1 = ggml_perf_time_us();
  10238. //static int64_t acc = 0;
  10239. //acc += t1 - t0;
  10240. //if (t1 - t0 > 10) {
  10241. // printf("\n");
  10242. // printf("ne00 = %5d, ne01 = %5d, ne02 = %5d, ne03 = %5d\n", ne00, ne01, ne02, ne03);
  10243. // printf("nb00 = %5d, nb01 = %5d, nb02 = %5d, nb03 = %5d\n", nb00, nb01, nb02, nb03);
  10244. // printf("ne10 = %5d, ne11 = %5d, ne12 = %5d, ne13 = %5d\n", ne10, ne11, ne12, ne13);
  10245. // printf("nb10 = %5d, nb11 = %5d, nb12 = %5d, nb13 = %5d\n", nb10, nb11, nb12, nb13);
  10246. // printf("XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX task %d/%d: %d us, acc = %d\n", ith, nth, (int) (t1 - t0), (int) acc);
  10247. //}
  10248. }
  10249. static void ggml_compute_forward_out_prod(
  10250. const struct ggml_compute_params * params,
  10251. struct ggml_tensor * dst) {
  10252. const struct ggml_tensor * src0 = dst->src[0];
  10253. switch (src0->type) {
  10254. case GGML_TYPE_Q4_0:
  10255. case GGML_TYPE_Q4_1:
  10256. case GGML_TYPE_Q5_0:
  10257. case GGML_TYPE_Q5_1:
  10258. case GGML_TYPE_Q8_0:
  10259. case GGML_TYPE_Q2_K:
  10260. case GGML_TYPE_Q3_K:
  10261. case GGML_TYPE_Q4_K:
  10262. case GGML_TYPE_Q5_K:
  10263. case GGML_TYPE_Q6_K:
  10264. case GGML_TYPE_IQ2_XXS:
  10265. case GGML_TYPE_IQ2_XS:
  10266. case GGML_TYPE_IQ3_XXS:
  10267. case GGML_TYPE_IQ1_S:
  10268. case GGML_TYPE_IQ1_M:
  10269. case GGML_TYPE_IQ4_NL:
  10270. case GGML_TYPE_IQ4_XS:
  10271. case GGML_TYPE_IQ3_S:
  10272. case GGML_TYPE_IQ2_S:
  10273. {
  10274. ggml_compute_forward_out_prod_q_f32(params, dst);
  10275. } break;
  10276. case GGML_TYPE_F16:
  10277. {
  10278. GGML_ASSERT(false); // todo
  10279. // ggml_compute_forward_out_prod_f16_f32(params, dst);
  10280. } break;
  10281. case GGML_TYPE_F32:
  10282. {
  10283. ggml_compute_forward_out_prod_f32(params, dst);
  10284. } break;
  10285. default:
  10286. {
  10287. GGML_ASSERT(false);
  10288. } break;
  10289. }
  10290. }
  10291. // ggml_compute_forward_scale
  10292. static void ggml_compute_forward_scale_f32(
  10293. const struct ggml_compute_params * params,
  10294. struct ggml_tensor * dst) {
  10295. const struct ggml_tensor * src0 = dst->src[0];
  10296. GGML_ASSERT(ggml_is_contiguous(src0));
  10297. GGML_ASSERT(ggml_is_contiguous(dst));
  10298. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  10299. if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
  10300. return;
  10301. }
  10302. // scale factor
  10303. float v;
  10304. memcpy(&v, dst->op_params, sizeof(float));
  10305. const int ith = params->ith;
  10306. const int nth = params->nth;
  10307. const int nc = src0->ne[0];
  10308. const int nr = ggml_nrows(src0);
  10309. // rows per thread
  10310. const int dr = (nr + nth - 1)/nth;
  10311. // row range for this thread
  10312. const int ir0 = dr*ith;
  10313. const int ir1 = MIN(ir0 + dr, nr);
  10314. const size_t nb01 = src0->nb[1];
  10315. const size_t nb1 = dst->nb[1];
  10316. for (int i1 = ir0; i1 < ir1; i1++) {
  10317. if (dst->data != src0->data) {
  10318. // src0 is same shape as dst => same indices
  10319. memcpy((char *)dst->data + i1*nb1, (char *)src0->data + i1*nb01, nc * sizeof(float));
  10320. }
  10321. ggml_vec_scale_f32(nc, (float *) ((char *) dst->data + i1*nb1), v);
  10322. }
  10323. }
  10324. static void ggml_compute_forward_scale(
  10325. const struct ggml_compute_params * params,
  10326. struct ggml_tensor * dst) {
  10327. const struct ggml_tensor * src0 = dst->src[0];
  10328. switch (src0->type) {
  10329. case GGML_TYPE_F32:
  10330. {
  10331. ggml_compute_forward_scale_f32(params, dst);
  10332. } break;
  10333. default:
  10334. {
  10335. GGML_ASSERT(false);
  10336. } break;
  10337. }
  10338. }
  10339. // ggml_compute_forward_set
  10340. static void ggml_compute_forward_set_f32(
  10341. const struct ggml_compute_params * params,
  10342. struct ggml_tensor * dst) {
  10343. const struct ggml_tensor * src0 = dst->src[0];
  10344. const struct ggml_tensor * src1 = dst->src[1];
  10345. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  10346. GGML_ASSERT(ggml_is_contiguous(dst) && ggml_is_contiguous(src0));
  10347. // view src0 and dst with these strides and data offset inbytes during set
  10348. // nb0 is implicitly element_size because src0 and dst are contiguous
  10349. size_t nb1 = ((int32_t *) dst->op_params)[0];
  10350. size_t nb2 = ((int32_t *) dst->op_params)[1];
  10351. size_t nb3 = ((int32_t *) dst->op_params)[2];
  10352. size_t offset = ((int32_t *) dst->op_params)[3];
  10353. bool inplace = (bool) ((int32_t *) dst->op_params)[4];
  10354. if (!inplace && (params->type == GGML_TASK_TYPE_INIT)) {
  10355. if (params->ith != 0) {
  10356. return;
  10357. }
  10358. // memcpy needs to be synchronized across threads to avoid race conditions.
  10359. // => do it in INIT phase
  10360. memcpy(
  10361. ((char *) dst->data),
  10362. ((char *) src0->data),
  10363. ggml_nbytes(dst));
  10364. }
  10365. if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
  10366. return;
  10367. }
  10368. const int ith = params->ith;
  10369. const int nth = params->nth;
  10370. const int nr = ggml_nrows(src1);
  10371. const int nc = src1->ne[0];
  10372. GGML_TENSOR_LOCALS(int64_t, ne1, src1, ne)
  10373. GGML_TENSOR_LOCALS(size_t, nb1, src1, nb)
  10374. // src0 and dst as viewed during set
  10375. const size_t nb0 = ggml_element_size(src0);
  10376. const int im0 = (ne10 == 0 ? 0 : ne10-1);
  10377. const int im1 = (ne11 == 0 ? 0 : ne11-1);
  10378. const int im2 = (ne12 == 0 ? 0 : ne12-1);
  10379. const int im3 = (ne13 == 0 ? 0 : ne13-1);
  10380. GGML_ASSERT(offset + im0*nb0 + im1*nb1 + im2*nb2 + im3*nb3 <= ggml_nbytes(dst));
  10381. GGML_ASSERT(nb10 == sizeof(float));
  10382. // rows per thread
  10383. const int dr = (nr + nth - 1)/nth;
  10384. // row range for this thread
  10385. const int ir0 = dr*ith;
  10386. const int ir1 = MIN(ir0 + dr, nr);
  10387. for (int ir = ir0; ir < ir1; ++ir) {
  10388. // src0 and dst are viewed with shape of src1 and offset
  10389. // => same indices
  10390. const int i3 = ir/(ne12*ne11);
  10391. const int i2 = (ir - i3*ne12*ne11)/ne11;
  10392. const int i1 = (ir - i3*ne12*ne11 - i2*ne11);
  10393. ggml_vec_cpy_f32(nc,
  10394. (float *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + offset),
  10395. (float *) ((char *) src1->data + i3*nb13 + i2*nb12 + i1*nb11));
  10396. }
  10397. }
  10398. static void ggml_compute_forward_set(
  10399. const struct ggml_compute_params * params,
  10400. struct ggml_tensor * dst) {
  10401. const struct ggml_tensor * src0 = dst->src[0];
  10402. switch (src0->type) {
  10403. case GGML_TYPE_F32:
  10404. {
  10405. ggml_compute_forward_set_f32(params, dst);
  10406. } break;
  10407. case GGML_TYPE_F16:
  10408. case GGML_TYPE_BF16:
  10409. case GGML_TYPE_Q4_0:
  10410. case GGML_TYPE_Q4_1:
  10411. case GGML_TYPE_Q5_0:
  10412. case GGML_TYPE_Q5_1:
  10413. case GGML_TYPE_Q8_0:
  10414. case GGML_TYPE_Q8_1:
  10415. case GGML_TYPE_Q2_K:
  10416. case GGML_TYPE_Q3_K:
  10417. case GGML_TYPE_Q4_K:
  10418. case GGML_TYPE_Q5_K:
  10419. case GGML_TYPE_Q6_K:
  10420. case GGML_TYPE_IQ2_XXS:
  10421. case GGML_TYPE_IQ2_XS:
  10422. case GGML_TYPE_IQ3_XXS:
  10423. case GGML_TYPE_IQ1_S:
  10424. case GGML_TYPE_IQ1_M:
  10425. case GGML_TYPE_IQ4_NL:
  10426. case GGML_TYPE_IQ4_XS:
  10427. case GGML_TYPE_IQ3_S:
  10428. case GGML_TYPE_IQ2_S:
  10429. default:
  10430. {
  10431. GGML_ASSERT(false);
  10432. } break;
  10433. }
  10434. }
  10435. // ggml_compute_forward_cpy
  10436. static void ggml_compute_forward_cpy(
  10437. const struct ggml_compute_params * params,
  10438. struct ggml_tensor * dst) {
  10439. ggml_compute_forward_dup(params, dst);
  10440. }
  10441. // ggml_compute_forward_cont
  10442. static void ggml_compute_forward_cont(
  10443. const struct ggml_compute_params * params,
  10444. struct ggml_tensor * dst) {
  10445. ggml_compute_forward_dup(params, dst);
  10446. }
  10447. // ggml_compute_forward_reshape
  10448. static void ggml_compute_forward_reshape(
  10449. const struct ggml_compute_params * params,
  10450. struct ggml_tensor * dst) {
  10451. // NOP
  10452. UNUSED(params);
  10453. UNUSED(dst);
  10454. }
  10455. // ggml_compute_forward_view
  10456. static void ggml_compute_forward_view(
  10457. const struct ggml_compute_params * params,
  10458. const struct ggml_tensor * dst) {
  10459. // NOP
  10460. UNUSED(params);
  10461. UNUSED(dst);
  10462. }
  10463. // ggml_compute_forward_permute
  10464. static void ggml_compute_forward_permute(
  10465. const struct ggml_compute_params * params,
  10466. const struct ggml_tensor * dst) {
  10467. // NOP
  10468. UNUSED(params);
  10469. UNUSED(dst);
  10470. }
  10471. // ggml_compute_forward_transpose
  10472. static void ggml_compute_forward_transpose(
  10473. const struct ggml_compute_params * params,
  10474. const struct ggml_tensor * dst) {
  10475. // NOP
  10476. UNUSED(params);
  10477. UNUSED(dst);
  10478. }
  10479. // ggml_compute_forward_get_rows
  10480. static void ggml_compute_forward_get_rows_q(
  10481. const struct ggml_compute_params * params,
  10482. struct ggml_tensor * dst) {
  10483. const struct ggml_tensor * src0 = dst->src[0];
  10484. const struct ggml_tensor * src1 = dst->src[1];
  10485. if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
  10486. return;
  10487. }
  10488. GGML_TENSOR_BINARY_OP_LOCALS
  10489. const int64_t nc = ne00;
  10490. const int64_t nr = ggml_nelements(src1);
  10491. const enum ggml_type type = src0->type;
  10492. ggml_to_float_t const dequantize_row_q = type_traits[type].to_float;
  10493. assert(ne0 == nc);
  10494. assert(ne02 == ne11);
  10495. assert(nb00 == ggml_type_size(type));
  10496. assert(ggml_nrows(dst) == nr);
  10497. const int ith = params->ith;
  10498. const int nth = params->nth;
  10499. // rows per thread
  10500. const int dr = (nr + nth - 1)/nth;
  10501. // row range for this thread
  10502. const int ir0 = dr*ith;
  10503. const int ir1 = MIN(ir0 + dr, nr);
  10504. for (int64_t i = ir0; i < ir1; ++i) {
  10505. const int64_t i12 = i/(ne11*ne10);
  10506. const int64_t i11 = (i - i12*ne11*ne10)/ne10;
  10507. const int64_t i10 = (i - i12*ne11*ne10 - i11*ne10);
  10508. const int64_t i01 = *(int32_t *) ((char *) src1->data + i10*nb10 + i11*nb11 + i12*nb12);
  10509. dequantize_row_q(
  10510. (const void *) ((char *) src0->data + i01*nb01 + i11*nb02 + i12*nb03),
  10511. (float *) ((char *) dst->data + i10*nb1 + i11*nb2 + i12*nb3), nc);
  10512. }
  10513. }
  10514. static void ggml_compute_forward_get_rows_f16(
  10515. const struct ggml_compute_params * params,
  10516. struct ggml_tensor * dst) {
  10517. const struct ggml_tensor * src0 = dst->src[0];
  10518. const struct ggml_tensor * src1 = dst->src[1];
  10519. if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
  10520. return;
  10521. }
  10522. GGML_TENSOR_BINARY_OP_LOCALS
  10523. const int64_t nc = ne00;
  10524. const int64_t nr = ggml_nelements(src1);
  10525. assert(ne0 == nc);
  10526. assert(ne02 == ne11);
  10527. assert(nb00 == sizeof(ggml_fp16_t));
  10528. assert(ggml_nrows(dst) == nr);
  10529. const int ith = params->ith;
  10530. const int nth = params->nth;
  10531. // rows per thread
  10532. const int dr = (nr + nth - 1)/nth;
  10533. // row range for this thread
  10534. const int ir0 = dr*ith;
  10535. const int ir1 = MIN(ir0 + dr, nr);
  10536. for (int64_t i = ir0; i < ir1; ++i) {
  10537. const int64_t i12 = i/(ne11*ne10);
  10538. const int64_t i11 = (i - i12*ne11*ne10)/ne10;
  10539. const int64_t i10 = (i - i12*ne11*ne10 - i11*ne10);
  10540. const int64_t i01 = *(int32_t *) ((char *) src1->data + i10*nb10 + i11*nb11 + i12*nb12);
  10541. ggml_fp16_to_fp32_row(
  10542. (const void *) ((char *) src0->data + i01*nb01 + i11*nb02 + i12*nb03),
  10543. (float *) ((char *) dst->data + i10*nb1 + i11*nb2 + i12*nb3), nc);
  10544. }
  10545. }
  10546. static void ggml_compute_forward_get_rows_bf16(
  10547. const struct ggml_compute_params * params,
  10548. struct ggml_tensor * dst) {
  10549. const struct ggml_tensor * src0 = dst->src[0];
  10550. const struct ggml_tensor * src1 = dst->src[1];
  10551. if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
  10552. return;
  10553. }
  10554. GGML_TENSOR_BINARY_OP_LOCALS
  10555. const int64_t nc = ne00;
  10556. const int64_t nr = ggml_nelements(src1);
  10557. assert(ne0 == nc);
  10558. assert(ne02 == ne11);
  10559. assert(nb00 == sizeof(ggml_bf16_t));
  10560. assert(ggml_nrows(dst) == nr);
  10561. const int ith = params->ith;
  10562. const int nth = params->nth;
  10563. // rows per thread
  10564. const int dr = (nr + nth - 1)/nth;
  10565. // row range for this thread
  10566. const int ir0 = dr*ith;
  10567. const int ir1 = MIN(ir0 + dr, nr);
  10568. for (int64_t i = ir0; i < ir1; ++i) {
  10569. const int64_t i12 = i/(ne11*ne10);
  10570. const int64_t i11 = (i - i12*ne11*ne10)/ne10;
  10571. const int64_t i10 = (i - i12*ne11*ne10 - i11*ne10);
  10572. const int64_t i01 = *(int32_t *) ((char *) src1->data + i10*nb10 + i11*nb11 + i12*nb12);
  10573. ggml_bf16_to_fp32_row(
  10574. (const void *) ((char *) src0->data + i01*nb01 + i11*nb02 + i12*nb03),
  10575. (float *) ((char *) dst->data + i10*nb1 + i11*nb2 + i12*nb3), nc);
  10576. }
  10577. }
  10578. static void ggml_compute_forward_get_rows_f32(
  10579. const struct ggml_compute_params * params,
  10580. struct ggml_tensor * dst) {
  10581. const struct ggml_tensor * src0 = dst->src[0];
  10582. const struct ggml_tensor * src1 = dst->src[1];
  10583. if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
  10584. return;
  10585. }
  10586. GGML_TENSOR_BINARY_OP_LOCALS
  10587. const int64_t nc = ne00;
  10588. const int64_t nr = ggml_nelements(src1);
  10589. assert(ne0 == nc);
  10590. assert(ne02 == ne11);
  10591. assert(nb00 == sizeof(float));
  10592. assert(ggml_nrows(dst) == nr);
  10593. const int ith = params->ith;
  10594. const int nth = params->nth;
  10595. // rows per thread
  10596. const int dr = (nr + nth - 1)/nth;
  10597. // row range for this thread
  10598. const int ir0 = dr*ith;
  10599. const int ir1 = MIN(ir0 + dr, nr);
  10600. for (int64_t i = ir0; i < ir1; ++i) {
  10601. const int64_t i12 = i/(ne11*ne10);
  10602. const int64_t i11 = (i - i12*ne11*ne10)/ne10;
  10603. const int64_t i10 = (i - i12*ne11*ne10 - i11*ne10);
  10604. const int64_t i01 = *(int32_t *) ((char *) src1->data + i10*nb10 + i11*nb11 + i12*nb12);
  10605. ggml_vec_cpy_f32(nc,
  10606. (float *) ((char *) dst->data + i10*nb1 + i11*nb2 + i12*nb3),
  10607. (float *) ((char *) src0->data + i01*nb01 + i11*nb02 + i12*nb03));
  10608. }
  10609. }
  10610. static void ggml_compute_forward_get_rows(
  10611. const struct ggml_compute_params * params,
  10612. struct ggml_tensor * dst) {
  10613. const struct ggml_tensor * src0 = dst->src[0];
  10614. switch (src0->type) {
  10615. case GGML_TYPE_Q4_0:
  10616. case GGML_TYPE_Q4_1:
  10617. case GGML_TYPE_Q5_0:
  10618. case GGML_TYPE_Q5_1:
  10619. case GGML_TYPE_Q8_0:
  10620. case GGML_TYPE_Q8_1:
  10621. case GGML_TYPE_Q2_K:
  10622. case GGML_TYPE_Q3_K:
  10623. case GGML_TYPE_Q4_K:
  10624. case GGML_TYPE_Q5_K:
  10625. case GGML_TYPE_Q6_K:
  10626. case GGML_TYPE_IQ2_XXS:
  10627. case GGML_TYPE_IQ2_XS:
  10628. case GGML_TYPE_IQ3_XXS:
  10629. case GGML_TYPE_IQ1_S:
  10630. case GGML_TYPE_IQ1_M:
  10631. case GGML_TYPE_IQ4_NL:
  10632. case GGML_TYPE_IQ4_XS:
  10633. case GGML_TYPE_IQ3_S:
  10634. case GGML_TYPE_IQ2_S:
  10635. {
  10636. ggml_compute_forward_get_rows_q(params, dst);
  10637. } break;
  10638. case GGML_TYPE_F16:
  10639. {
  10640. ggml_compute_forward_get_rows_f16(params, dst);
  10641. } break;
  10642. case GGML_TYPE_BF16:
  10643. {
  10644. ggml_compute_forward_get_rows_bf16(params, dst);
  10645. } break;
  10646. case GGML_TYPE_F32:
  10647. case GGML_TYPE_I32:
  10648. {
  10649. ggml_compute_forward_get_rows_f32(params, dst);
  10650. } break;
  10651. default:
  10652. {
  10653. GGML_ASSERT(false);
  10654. } break;
  10655. }
  10656. //static bool first = true;
  10657. //printf("ne0 = %d, ne1 = %d, ne2 = %d\n", dst->ne[0], dst->ne[1], dst->ne[2]);
  10658. //if (first) {
  10659. // first = false;
  10660. //} else {
  10661. // for (int k = 0; k < dst->ne[1]; ++k) {
  10662. // for (int j = 0; j < dst->ne[0]/16; ++j) {
  10663. // for (int i = 0; i < 16; ++i) {
  10664. // printf("%8.4f ", ((float *) dst->data)[k*dst->ne[0] + j*16 + i]);
  10665. // }
  10666. // printf("\n");
  10667. // }
  10668. // printf("\n");
  10669. // }
  10670. // printf("\n");
  10671. // exit(0);
  10672. //}
  10673. }
  10674. // ggml_compute_forward_get_rows_back
  10675. static void ggml_compute_forward_get_rows_back_f32_f16(
  10676. const struct ggml_compute_params * params,
  10677. struct ggml_tensor * dst) {
  10678. const struct ggml_tensor * src0 = dst->src[0];
  10679. const struct ggml_tensor * src1 = dst->src[1];
  10680. GGML_ASSERT(params->ith == 0);
  10681. GGML_ASSERT(ggml_is_contiguous(dst));
  10682. // ggml_compute_forward_dup_same_cont(params, opt0, dst);
  10683. if (params->type == GGML_TASK_TYPE_INIT) {
  10684. if (params->ith != 0) {
  10685. return;
  10686. }
  10687. memset(dst->data, 0, ggml_nbytes(dst));
  10688. }
  10689. if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
  10690. return;
  10691. }
  10692. const int nc = src0->ne[0];
  10693. const int nr = ggml_nelements(src1);
  10694. GGML_ASSERT( dst->ne[0] == nc);
  10695. GGML_ASSERT(src0->nb[0] == sizeof(ggml_fp16_t));
  10696. for (int i = 0; i < nr; ++i) {
  10697. const int r = ((int32_t *) src1->data)[i];
  10698. for (int j = 0; j < nc; ++j) {
  10699. ggml_fp16_t v = ((ggml_fp16_t *) ((char *) src0->data + i*src0->nb[1]))[j];
  10700. ((float *) ((char *) dst->data + r*dst->nb[1]))[j] += GGML_FP16_TO_FP32(v);
  10701. }
  10702. }
  10703. }
  10704. static void ggml_compute_forward_get_rows_back_f32(
  10705. const struct ggml_compute_params * params,
  10706. struct ggml_tensor * dst) {
  10707. const struct ggml_tensor * src0 = dst->src[0];
  10708. const struct ggml_tensor * src1 = dst->src[1];
  10709. GGML_ASSERT(params->ith == 0);
  10710. GGML_ASSERT(ggml_is_contiguous(dst));
  10711. // ggml_compute_forward_dup_same_cont(params, opt0, dst);
  10712. if (params->type == GGML_TASK_TYPE_INIT) {
  10713. if (params->ith != 0) {
  10714. return;
  10715. }
  10716. memset(dst->data, 0, ggml_nbytes(dst));
  10717. }
  10718. if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
  10719. return;
  10720. }
  10721. const int nc = src0->ne[0];
  10722. const int nr = ggml_nelements(src1);
  10723. GGML_ASSERT( dst->ne[0] == nc);
  10724. GGML_ASSERT(src0->nb[0] == sizeof(float));
  10725. for (int i = 0; i < nr; ++i) {
  10726. const int r = ((int32_t *) src1->data)[i];
  10727. ggml_vec_add_f32(nc,
  10728. (float *) ((char *) dst->data + r*dst->nb[1]),
  10729. (float *) ((char *) dst->data + r*dst->nb[1]),
  10730. (float *) ((char *) src0->data + i*src0->nb[1]));
  10731. }
  10732. }
  10733. static void ggml_compute_forward_get_rows_back(
  10734. const struct ggml_compute_params * params,
  10735. struct ggml_tensor * dst) {
  10736. const struct ggml_tensor * src0 = dst->src[0];
  10737. switch (src0->type) {
  10738. case GGML_TYPE_F16:
  10739. {
  10740. ggml_compute_forward_get_rows_back_f32_f16(params, dst);
  10741. } break;
  10742. case GGML_TYPE_F32:
  10743. {
  10744. ggml_compute_forward_get_rows_back_f32(params, dst);
  10745. } break;
  10746. default:
  10747. {
  10748. GGML_ASSERT(false);
  10749. } break;
  10750. }
  10751. //static bool first = true;
  10752. //printf("ne0 = %d, ne1 = %d, ne2 = %d\n", dst->ne[0], dst->ne[1], dst->ne[2]);
  10753. //if (first) {
  10754. // first = false;
  10755. //} else {
  10756. // for (int k = 0; k < dst->ne[1]; ++k) {
  10757. // for (int j = 0; j < dst->ne[0]/16; ++j) {
  10758. // for (int i = 0; i < 16; ++i) {
  10759. // printf("%8.4f ", ((float *) dst->data)[k*dst->ne[0] + j*16 + i]);
  10760. // }
  10761. // printf("\n");
  10762. // }
  10763. // printf("\n");
  10764. // }
  10765. // printf("\n");
  10766. // exit(0);
  10767. //}
  10768. }
  10769. // ggml_compute_forward_diag
  10770. static void ggml_compute_forward_diag_f32(
  10771. const struct ggml_compute_params * params,
  10772. struct ggml_tensor * dst) {
  10773. const struct ggml_tensor * src0 = dst->src[0];
  10774. GGML_ASSERT(params->ith == 0);
  10775. if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
  10776. return;
  10777. }
  10778. // TODO: handle transposed/permuted matrices
  10779. GGML_TENSOR_UNARY_OP_LOCALS
  10780. GGML_ASSERT(ne00 == ne0);
  10781. GGML_ASSERT(ne00 == ne1);
  10782. GGML_ASSERT(ne01 == 1);
  10783. GGML_ASSERT(ne02 == ne2);
  10784. GGML_ASSERT(ne03 == ne3);
  10785. GGML_ASSERT(nb00 == sizeof(float));
  10786. GGML_ASSERT(nb0 == sizeof(float));
  10787. for (int i3 = 0; i3 < ne3; i3++) {
  10788. for (int i2 = 0; i2 < ne2; i2++) {
  10789. for (int i1 = 0; i1 < ne1; i1++) {
  10790. float * d = (float *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1);
  10791. float * s = (float *)((char *) src0->data + i3*nb03 + i2*nb02);
  10792. for (int i0 = 0; i0 < i1; i0++) {
  10793. d[i0] = 0;
  10794. }
  10795. d[i1] = s[i1];
  10796. for (int i0 = i1+1; i0 < ne0; i0++) {
  10797. d[i0] = 0;
  10798. }
  10799. }
  10800. }
  10801. }
  10802. }
  10803. static void ggml_compute_forward_diag(
  10804. const struct ggml_compute_params * params,
  10805. struct ggml_tensor * dst) {
  10806. const struct ggml_tensor * src0 = dst->src[0];
  10807. switch (src0->type) {
  10808. case GGML_TYPE_F32:
  10809. {
  10810. ggml_compute_forward_diag_f32(params, dst);
  10811. } break;
  10812. default:
  10813. {
  10814. GGML_ASSERT(false);
  10815. } break;
  10816. }
  10817. }
  10818. // ggml_compute_forward_diag_mask_inf
  10819. static void ggml_compute_forward_diag_mask_f32(
  10820. const struct ggml_compute_params * params,
  10821. struct ggml_tensor * dst,
  10822. const float value) {
  10823. const struct ggml_tensor * src0 = dst->src[0];
  10824. const int ith = params->ith;
  10825. const int nth = params->nth;
  10826. const int n_past = ((int32_t *) dst->op_params)[0];
  10827. const bool inplace = src0->data == dst->data;
  10828. GGML_ASSERT(n_past >= 0);
  10829. if (!inplace && (params->type == GGML_TASK_TYPE_INIT)) {
  10830. if (ith != 0) {
  10831. return;
  10832. }
  10833. // memcpy needs to be synchronized across threads to avoid race conditions.
  10834. // => do it in INIT phase
  10835. GGML_ASSERT(ggml_nelements(dst) == ggml_nelements(src0));
  10836. GGML_ASSERT(ggml_is_contiguous(dst) && ggml_is_contiguous(src0));
  10837. memcpy(
  10838. ((char *) dst->data),
  10839. ((char *) src0->data),
  10840. ggml_nbytes(dst));
  10841. }
  10842. if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
  10843. return;
  10844. }
  10845. // TODO: handle transposed/permuted matrices
  10846. const int n = ggml_nrows(src0);
  10847. const int nc = src0->ne[0];
  10848. const int nr = src0->ne[1];
  10849. const int nz = n/nr;
  10850. GGML_ASSERT( dst->nb[0] == sizeof(float));
  10851. GGML_ASSERT(src0->nb[0] == sizeof(float));
  10852. for (int k = 0; k < nz; k++) {
  10853. for (int j = ith; j < nr; j += nth) {
  10854. for (int i = n_past; i < nc; i++) {
  10855. if (i > n_past + j) {
  10856. *(float *)((char *) dst->data + k*dst->nb[2] + j*dst->nb[1] + i*dst->nb[0]) = value;
  10857. }
  10858. }
  10859. }
  10860. }
  10861. }
  10862. static void ggml_compute_forward_diag_mask_inf(
  10863. const struct ggml_compute_params * params,
  10864. struct ggml_tensor * dst) {
  10865. const struct ggml_tensor * src0 = dst->src[0];
  10866. switch (src0->type) {
  10867. case GGML_TYPE_F32:
  10868. {
  10869. ggml_compute_forward_diag_mask_f32(params, dst, -INFINITY);
  10870. } break;
  10871. default:
  10872. {
  10873. GGML_ASSERT(false);
  10874. } break;
  10875. }
  10876. }
  10877. static void ggml_compute_forward_diag_mask_zero(
  10878. const struct ggml_compute_params * params,
  10879. struct ggml_tensor * dst) {
  10880. const struct ggml_tensor * src0 = dst->src[0];
  10881. switch (src0->type) {
  10882. case GGML_TYPE_F32:
  10883. {
  10884. ggml_compute_forward_diag_mask_f32(params, dst, 0);
  10885. } break;
  10886. default:
  10887. {
  10888. GGML_ASSERT(false);
  10889. } break;
  10890. }
  10891. }
  10892. // ggml_compute_forward_soft_max
  10893. static void ggml_compute_forward_soft_max_f32(
  10894. const struct ggml_compute_params * params,
  10895. struct ggml_tensor * dst) {
  10896. const struct ggml_tensor * src0 = dst->src[0];
  10897. const struct ggml_tensor * src1 = dst->src[1];
  10898. assert(ggml_is_contiguous(dst));
  10899. assert(ggml_are_same_shape(src0, dst));
  10900. if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
  10901. return;
  10902. }
  10903. float scale = 1.0f;
  10904. float max_bias = 0.0f;
  10905. memcpy(&scale, (float *) dst->op_params + 0, sizeof(float));
  10906. memcpy(&max_bias, (float *) dst->op_params + 1, sizeof(float));
  10907. // TODO: handle transposed/permuted matrices
  10908. const int ith = params->ith;
  10909. const int nth = params->nth;
  10910. GGML_TENSOR_UNARY_OP_LOCALS
  10911. //const int64_t ne11 = src1 ? src1->ne[1] : 1;
  10912. // TODO: is this supposed to be ceil instead of floor?
  10913. // https://huggingface.co/mosaicml/mpt-7b/blob/main/attention.py#L370
  10914. const uint32_t n_head = ne02;
  10915. const uint32_t n_head_log2 = 1u << (uint32_t) floor(log2(n_head));
  10916. const float m0 = powf(2.0f, -(max_bias ) / n_head_log2);
  10917. const float m1 = powf(2.0f, -(max_bias / 2.0f) / n_head_log2);
  10918. const int nc = src0->ne[0];
  10919. const int nr = ggml_nrows(src0);
  10920. // rows per thread
  10921. const int dr = (nr + nth - 1)/nth;
  10922. // row range for this thread
  10923. const int ir0 = dr*ith;
  10924. const int ir1 = MIN(ir0 + dr, nr);
  10925. float * wp = (float *) params->wdata + (nc + CACHE_LINE_SIZE_F32) * ith;
  10926. const bool use_f16 = (src1 && src1->type == GGML_TYPE_F16);
  10927. for (int i1 = ir0; i1 < ir1; i1++) {
  10928. // ALiBi
  10929. const uint32_t h = (i1/ne01)%ne02; // head
  10930. const float slope = (max_bias > 0.0f) ? h < n_head_log2 ? powf(m0, h + 1) : powf(m1, 2*(h - n_head_log2) + 1) : 1.0f;
  10931. float * sp = (float *)((char *) src0->data + i1*src0->nb[1]);
  10932. float * dp = (float *)((char *) dst->data + i1*dst->nb[1]);
  10933. // broadcast the mask across rows
  10934. ggml_fp16_t * mp_f16 = src1 ? (ggml_fp16_t *)((char *) src1->data) + (i1%ne01)*ne00 : NULL;
  10935. float * mp_f32 = src1 ? (float *)((char *) src1->data) + (i1%ne01)*ne00 : NULL;
  10936. ggml_vec_cpy_f32 (nc, wp, sp);
  10937. ggml_vec_scale_f32(nc, wp, scale);
  10938. if (mp_f32) {
  10939. if (use_f16) {
  10940. for (int i = 0; i < nc; ++i) {
  10941. wp[i] += slope*GGML_FP16_TO_FP32(mp_f16[i]);
  10942. }
  10943. } else {
  10944. for (int i = 0; i < nc; ++i) {
  10945. wp[i] += slope*mp_f32[i];
  10946. }
  10947. }
  10948. }
  10949. #ifndef NDEBUG
  10950. for (int i = 0; i < nc; ++i) {
  10951. //printf("p[%d] = %f\n", i, p[i]);
  10952. assert(!isnan(wp[i]));
  10953. }
  10954. #endif
  10955. float max = -INFINITY;
  10956. ggml_vec_max_f32(nc, &max, wp);
  10957. ggml_float sum = 0.0;
  10958. uint16_t scvt;
  10959. for (int i = 0; i < nc; i++) {
  10960. if (wp[i] == -INFINITY) {
  10961. dp[i] = 0.0f;
  10962. } else {
  10963. // const float val = (wp[i] == -INFINITY) ? 0.0 : exp(wp[i] - max);
  10964. ggml_fp16_t s = GGML_FP32_TO_FP16(wp[i] - max);
  10965. memcpy(&scvt, &s, sizeof(scvt));
  10966. const float val = GGML_FP16_TO_FP32(ggml_table_exp_f16[scvt]);
  10967. sum += (ggml_float)val;
  10968. dp[i] = val;
  10969. }
  10970. }
  10971. assert(sum > 0.0);
  10972. sum = 1.0/sum;
  10973. ggml_vec_scale_f32(nc, dp, sum);
  10974. #ifndef NDEBUG
  10975. for (int i = 0; i < nc; ++i) {
  10976. assert(!isnan(dp[i]));
  10977. assert(!isinf(dp[i]));
  10978. }
  10979. #endif
  10980. }
  10981. }
  10982. static void ggml_compute_forward_soft_max(
  10983. const struct ggml_compute_params * params,
  10984. struct ggml_tensor * dst) {
  10985. const struct ggml_tensor * src0 = dst->src[0];
  10986. switch (src0->type) {
  10987. case GGML_TYPE_F32:
  10988. {
  10989. ggml_compute_forward_soft_max_f32(params, dst);
  10990. } break;
  10991. default:
  10992. {
  10993. GGML_ASSERT(false);
  10994. } break;
  10995. }
  10996. }
  10997. // ggml_compute_forward_soft_max_back
  10998. static void ggml_compute_forward_soft_max_back_f32(
  10999. const struct ggml_compute_params * params,
  11000. struct ggml_tensor * dst) {
  11001. const struct ggml_tensor * src0 = dst->src[0];
  11002. const struct ggml_tensor * src1 = dst->src[1];
  11003. GGML_ASSERT(ggml_is_contiguous(src0));
  11004. GGML_ASSERT(ggml_is_contiguous(src1));
  11005. GGML_ASSERT(ggml_is_contiguous(dst));
  11006. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  11007. GGML_ASSERT(ggml_are_same_shape(src1, dst));
  11008. if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
  11009. return;
  11010. }
  11011. // TODO: handle transposed/permuted matrices
  11012. const int ith = params->ith;
  11013. const int nth = params->nth;
  11014. const int nc = src0->ne[0];
  11015. const int nr = ggml_nrows(src0);
  11016. // rows per thread
  11017. const int dr = (nr + nth - 1)/nth;
  11018. // row range for this thread
  11019. const int ir0 = dr*ith;
  11020. const int ir1 = MIN(ir0 + dr, nr);
  11021. for (int i1 = ir0; i1 < ir1; i1++) {
  11022. float *dy = (float *)((char *) src0->data + i1*src0->nb[1]);
  11023. float *y = (float *)((char *) src1->data + i1*src1->nb[1]);
  11024. float *dx = (float *)((char *) dst->data + i1*dst->nb[1]);
  11025. #ifndef NDEBUG
  11026. for (int i = 0; i < nc; ++i) {
  11027. //printf("p[%d] = %f\n", i, p[i]);
  11028. assert(!isnan(dy[i]));
  11029. assert(!isnan(y[i]));
  11030. }
  11031. #endif
  11032. // Jii = yi - yi*yi
  11033. // Jij = -yi*yj
  11034. // J = diag(y)-y.T*y
  11035. // dx = J * dy
  11036. // dxk = sum_i(Jki * dyi)
  11037. // dxk = sum_i(-yk*yi * dyi) - (-yk*yk)*dyk + (yk - yk*yk)*dyk
  11038. // dxk = sum_i(-yk*yi * dyi) + yk*yk*dyk + yk*dyk - yk*yk*dyk
  11039. // dxk = sum_i(-yk*yi * dyi) + yk*dyk
  11040. // dxk = -yk * sum_i(yi * dyi) + yk*dyk
  11041. // dxk = -yk * dot(y, dy) + yk*dyk
  11042. // dxk = yk * (- dot(y, dy) + dyk)
  11043. // dxk = yk * (dyk - dot(y, dy))
  11044. //
  11045. // post-order:
  11046. // dot_y_dy := dot(y, dy)
  11047. // dx := dy
  11048. // dx := dx - dot_y_dy
  11049. // dx := dx * y
  11050. // linear runtime, no additional memory
  11051. float dot_y_dy = 0;
  11052. ggml_vec_dot_f32 (nc, &dot_y_dy, 0, y, 0, dy, 0, 1);
  11053. ggml_vec_cpy_f32 (nc, dx, dy);
  11054. ggml_vec_acc1_f32(nc, dx, -dot_y_dy);
  11055. ggml_vec_mul_f32 (nc, dx, dx, y);
  11056. #ifndef NDEBUG
  11057. for (int i = 0; i < nc; ++i) {
  11058. assert(!isnan(dx[i]));
  11059. assert(!isinf(dx[i]));
  11060. }
  11061. #endif
  11062. }
  11063. }
  11064. static void ggml_compute_forward_soft_max_back(
  11065. const struct ggml_compute_params * params,
  11066. struct ggml_tensor * dst) {
  11067. const struct ggml_tensor * src0 = dst->src[0];
  11068. switch (src0->type) {
  11069. case GGML_TYPE_F32:
  11070. {
  11071. ggml_compute_forward_soft_max_back_f32(params, dst);
  11072. } break;
  11073. default:
  11074. {
  11075. GGML_ASSERT(false);
  11076. } break;
  11077. }
  11078. }
  11079. // ggml_compute_forward_clamp
  11080. static void ggml_compute_forward_clamp_f32(
  11081. const struct ggml_compute_params * params,
  11082. struct ggml_tensor * dst) {
  11083. const struct ggml_tensor * src0 = dst->src[0];
  11084. assert(params->ith == 0);
  11085. if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
  11086. return;
  11087. }
  11088. float min;
  11089. float max;
  11090. memcpy(&min, (float *) dst->op_params + 0, sizeof(float));
  11091. memcpy(&max, (float *) dst->op_params + 1, sizeof(float));
  11092. const int ith = params->ith;
  11093. const int nth = params->nth;
  11094. const int n = ggml_nrows(src0);
  11095. const int nc = src0->ne[0];
  11096. const size_t nb00 = src0->nb[0];
  11097. const size_t nb01 = src0->nb[1];
  11098. const size_t nb0 = dst->nb[0];
  11099. const size_t nb1 = dst->nb[1];
  11100. GGML_ASSERT( nb0 == sizeof(float));
  11101. GGML_ASSERT(nb00 == sizeof(float));
  11102. for (int j = ith; j < n; j += nth) {
  11103. float * dst_ptr = (float *) ((char *) dst->data + j*nb1);
  11104. float * src0_ptr = (float *) ((char *) src0->data + j*nb01);
  11105. for (int i = 0; i < nc; i++) {
  11106. dst_ptr[i] = MAX(MIN(src0_ptr[i], max), min);
  11107. }
  11108. }
  11109. }
  11110. static void ggml_compute_forward_clamp(
  11111. const struct ggml_compute_params * params,
  11112. struct ggml_tensor * dst) {
  11113. const struct ggml_tensor * src0 = dst->src[0];
  11114. switch (src0->type) {
  11115. case GGML_TYPE_F32:
  11116. {
  11117. ggml_compute_forward_clamp_f32(params, dst);
  11118. } break;
  11119. case GGML_TYPE_F16:
  11120. case GGML_TYPE_BF16:
  11121. case GGML_TYPE_Q4_0:
  11122. case GGML_TYPE_Q4_1:
  11123. case GGML_TYPE_Q5_0:
  11124. case GGML_TYPE_Q5_1:
  11125. case GGML_TYPE_Q8_0:
  11126. case GGML_TYPE_Q8_1:
  11127. case GGML_TYPE_Q2_K:
  11128. case GGML_TYPE_Q3_K:
  11129. case GGML_TYPE_Q4_K:
  11130. case GGML_TYPE_Q5_K:
  11131. case GGML_TYPE_Q6_K:
  11132. case GGML_TYPE_IQ2_XXS:
  11133. case GGML_TYPE_IQ2_XS:
  11134. case GGML_TYPE_IQ3_XXS:
  11135. case GGML_TYPE_IQ1_S:
  11136. case GGML_TYPE_IQ1_M:
  11137. case GGML_TYPE_IQ4_NL:
  11138. case GGML_TYPE_IQ4_XS:
  11139. case GGML_TYPE_IQ3_S:
  11140. case GGML_TYPE_IQ2_S:
  11141. case GGML_TYPE_Q8_K:
  11142. case GGML_TYPE_I8:
  11143. case GGML_TYPE_I16:
  11144. case GGML_TYPE_I32:
  11145. case GGML_TYPE_I64:
  11146. case GGML_TYPE_F64:
  11147. case GGML_TYPE_COUNT:
  11148. {
  11149. GGML_ASSERT(false);
  11150. } break;
  11151. }
  11152. }
  11153. // ggml_compute_forward_rope
  11154. static float rope_yarn_ramp(const float low, const float high, const int i0) {
  11155. const float y = (i0 / 2 - low) / MAX(0.001f, high - low);
  11156. return 1 - MIN(1, MAX(0, y));
  11157. }
  11158. // YaRN algorithm based on LlamaYaRNScaledRotaryEmbedding.py from https://github.com/jquesnelle/yarn
  11159. // MIT licensed. Copyright (c) 2023 Jeffrey Quesnelle and Bowen Peng.
  11160. static void rope_yarn(
  11161. float theta_extrap, float freq_scale, float corr_dims[2], int64_t i0, float ext_factor, float mscale,
  11162. float * cos_theta, float * sin_theta
  11163. ) {
  11164. // Get n-d rotational scaling corrected for extrapolation
  11165. float theta_interp = freq_scale * theta_extrap;
  11166. float theta = theta_interp;
  11167. if (ext_factor != 0.0f) {
  11168. float ramp_mix = rope_yarn_ramp(corr_dims[0], corr_dims[1], i0) * ext_factor;
  11169. theta = theta_interp * (1 - ramp_mix) + theta_extrap * ramp_mix;
  11170. // Get n-d magnitude scaling corrected for interpolation
  11171. mscale *= 1.0f + 0.1f * logf(1.0f / freq_scale);
  11172. }
  11173. *cos_theta = cosf(theta) * mscale;
  11174. *sin_theta = sinf(theta) * mscale;
  11175. }
  11176. // Apparently solving `n_rot = 2pi * x * base^((2 * max_pos_emb) / n_dims)` for x, we get
  11177. // `corr_dim(n_rot) = n_dims * log(max_pos_emb / (n_rot * 2pi)) / (2 * log(base))`
  11178. static float ggml_rope_yarn_corr_dim(int n_dims, int n_orig_ctx, float n_rot, float base) {
  11179. return n_dims * logf(n_orig_ctx / (n_rot * 2 * (float)M_PI)) / (2 * logf(base));
  11180. }
  11181. static void ggml_rope_cache_init(
  11182. float theta_base, float freq_scale, float corr_dims[2], int64_t ne0, float ext_factor, float mscale,
  11183. float * cache, float sin_sign, float theta_scale
  11184. ) {
  11185. float theta = theta_base;
  11186. for (int64_t i0 = 0; i0 < ne0; i0 += 2) {
  11187. rope_yarn(
  11188. theta, freq_scale, corr_dims, i0, ext_factor, mscale, &cache[i0 + 0], &cache[i0 + 1]
  11189. );
  11190. cache[i0 + 1] *= sin_sign;
  11191. theta *= theta_scale;
  11192. }
  11193. }
  11194. GGML_CALL void ggml_rope_yarn_corr_dims(
  11195. int n_dims, int n_orig_ctx, float freq_base, float beta_fast, float beta_slow, float dims[2]
  11196. ) {
  11197. // start and end correction dims
  11198. float start = floorf(ggml_rope_yarn_corr_dim(n_dims, n_orig_ctx, beta_fast, freq_base));
  11199. float end = ceilf(ggml_rope_yarn_corr_dim(n_dims, n_orig_ctx, beta_slow, freq_base));
  11200. dims[0] = MAX(0, start);
  11201. dims[1] = MIN(n_dims - 1, end);
  11202. }
  11203. static void ggml_compute_forward_rope_f32(
  11204. const struct ggml_compute_params * params,
  11205. struct ggml_tensor * dst,
  11206. const bool forward) {
  11207. const struct ggml_tensor * src0 = dst->src[0];
  11208. const struct ggml_tensor * src1 = dst->src[1];
  11209. if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
  11210. return;
  11211. }
  11212. float freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow;
  11213. // these two only relevant for xPos RoPE:
  11214. float xpos_base;
  11215. bool xpos_down;
  11216. //const int n_past = ((int32_t *) dst->op_params)[0];
  11217. const int n_dims = ((int32_t *) dst->op_params)[1];
  11218. const int mode = ((int32_t *) dst->op_params)[2];
  11219. const int n_ctx = ((int32_t *) dst->op_params)[3];
  11220. const int n_orig_ctx = ((int32_t *) dst->op_params)[4];
  11221. memcpy(&freq_base, (int32_t *) dst->op_params + 5, sizeof(float));
  11222. memcpy(&freq_scale, (int32_t *) dst->op_params + 6, sizeof(float));
  11223. memcpy(&ext_factor, (int32_t *) dst->op_params + 7, sizeof(float));
  11224. memcpy(&attn_factor, (int32_t *) dst->op_params + 8, sizeof(float));
  11225. memcpy(&beta_fast, (int32_t *) dst->op_params + 9, sizeof(float));
  11226. memcpy(&beta_slow, (int32_t *) dst->op_params + 10, sizeof(float));
  11227. memcpy(&xpos_base, (int32_t *) dst->op_params + 11, sizeof(float));
  11228. memcpy(&xpos_down, (int32_t *) dst->op_params + 12, sizeof(bool));
  11229. GGML_TENSOR_UNARY_OP_LOCALS
  11230. //printf("ne0: %d, ne1: %d, ne2: %d, ne3: %d\n", ne0, ne1, ne2, ne3);
  11231. //printf("n_past = %d, ne2 = %d\n", n_past, ne2);
  11232. GGML_ASSERT(nb00 == sizeof(float));
  11233. const int ith = params->ith;
  11234. const int nth = params->nth;
  11235. const int nr = ggml_nrows(dst);
  11236. GGML_ASSERT(n_dims <= ne0);
  11237. GGML_ASSERT(n_dims % 2 == 0);
  11238. // rows per thread
  11239. const int dr = (nr + nth - 1)/nth;
  11240. // row range for this thread
  11241. const int ir0 = dr*ith;
  11242. const int ir1 = MIN(ir0 + dr, nr);
  11243. // row index used to determine which thread to use
  11244. int ir = 0;
  11245. const float theta_scale = powf(freq_base, -2.0f/n_dims);
  11246. const float inv_ndims = -1.f/n_dims;
  11247. float corr_dims[2];
  11248. ggml_rope_yarn_corr_dims(n_dims, n_orig_ctx, freq_base, beta_fast, beta_slow, corr_dims);
  11249. const bool is_neox = mode & 2;
  11250. const bool is_glm = mode & 4;
  11251. // backward process uses inverse rotation by cos and sin.
  11252. // cos and sin build a rotation matrix, where the inverse is the transpose.
  11253. // this essentially just switches the sign of sin.
  11254. const float sin_sign = forward ? 1.0f : -1.0f;
  11255. const int32_t * pos = (const int32_t *) src1->data;
  11256. for (int64_t i3 = 0; i3 < ne3; i3++) {
  11257. for (int64_t i2 = 0; i2 < ne2; i2++) {
  11258. const int64_t p = pos[i2];
  11259. float * cache = (float *) params->wdata + (ne0 + CACHE_LINE_SIZE_F32)*ith;
  11260. if (!is_glm && !is_neox) { // TODO: cache sin/cos for glm, neox
  11261. ggml_rope_cache_init(p, freq_scale, corr_dims, ne0, ext_factor, attn_factor, cache, sin_sign, theta_scale);
  11262. }
  11263. for (int64_t i1 = 0; i1 < ne1; i1++) {
  11264. if (ir++ < ir0) continue;
  11265. if (ir > ir1) break;
  11266. float theta_base = (float)p;
  11267. if (is_glm) {
  11268. theta_base = MIN(p, n_ctx - 2);
  11269. float block_theta = MAX(p - (n_ctx - 2), 0);
  11270. for (int64_t i0 = 0; i0 < ne0 / 4; i0++) {
  11271. const float cos_theta = cosf(theta_base);
  11272. const float sin_theta = sinf(theta_base) * sin_sign;
  11273. const float cos_block_theta = cosf(block_theta);
  11274. const float sin_block_theta = sinf(block_theta) * sin_sign;
  11275. theta_base *= theta_scale;
  11276. block_theta *= theta_scale;
  11277. const float * const src = (float *)((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00);
  11278. float * dst_data = (float *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0);
  11279. const float x0 = src[0];
  11280. const float x1 = src[n_dims/2];
  11281. const float x2 = src[n_dims];
  11282. const float x3 = src[n_dims/2*3];
  11283. dst_data[0] = x0*cos_theta - x1*sin_theta;
  11284. dst_data[n_dims/2] = x0*sin_theta + x1*cos_theta;
  11285. dst_data[n_dims] = x2*cos_block_theta - x3*sin_block_theta;
  11286. dst_data[n_dims/2*3] = x2*sin_block_theta + x3*cos_block_theta;
  11287. }
  11288. } else if (!is_neox) {
  11289. for (int64_t i0 = 0; i0 < ne0; i0 += 2) {
  11290. const float cos_theta = cache[i0 + 0];
  11291. const float sin_theta = cache[i0 + 1];
  11292. // zeta scaling for xPos only:
  11293. float zeta = xpos_base != 0.0f ? powf((i0 + 0.4f * ne0) / (1.4f * ne0), p / xpos_base) : 1.0f;
  11294. if (xpos_down) zeta = 1.0f / zeta;
  11295. const float * const src = (float *)((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00);
  11296. float * dst_data = (float *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0);
  11297. const float x0 = src[0];
  11298. const float x1 = src[1];
  11299. dst_data[0] = x0*cos_theta*zeta - x1*sin_theta*zeta;
  11300. dst_data[1] = x0*sin_theta*zeta + x1*cos_theta*zeta;
  11301. }
  11302. } else {
  11303. // TODO: this might be wrong for ne0 != n_dims - need double check
  11304. // it seems we have to rope just the first n_dims elements and do nothing with the rest
  11305. // ref: https://github.com/ml-explore/mlx/blob/dc2edc762c797e3b8de50b1dad4dc0a131691033/benchmarks/python/llama_jax_bench.py#L11-L26
  11306. theta_base *= freq_scale;
  11307. for (int64_t ic = 0; ic < ne0; ic += 2) {
  11308. if (ic < n_dims) {
  11309. const int64_t ib = 0;
  11310. // simplified from `(ib * n_dims + ic) * inv_ndims`
  11311. float cur_rot = inv_ndims * ic - ib;
  11312. float cos_theta, sin_theta;
  11313. rope_yarn(
  11314. theta_base, freq_scale, corr_dims, cur_rot, ext_factor, attn_factor,
  11315. &cos_theta, &sin_theta
  11316. );
  11317. sin_theta *= sin_sign;
  11318. theta_base *= theta_scale;
  11319. const int64_t i0 = ib*n_dims + ic/2;
  11320. const float * const src = (float *)((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00);
  11321. float * dst_data = (float *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0);
  11322. const float x0 = src[0];
  11323. const float x1 = src[n_dims/2];
  11324. dst_data[0] = x0*cos_theta - x1*sin_theta;
  11325. dst_data[n_dims/2] = x0*sin_theta + x1*cos_theta;
  11326. } else {
  11327. const int64_t i0 = ic;
  11328. const float * const src = (float *)((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00);
  11329. float * dst_data = (float *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0);
  11330. dst_data[0] = src[0];
  11331. dst_data[1] = src[1];
  11332. }
  11333. }
  11334. }
  11335. }
  11336. }
  11337. }
  11338. }
  11339. static void ggml_compute_forward_rope_f16(
  11340. const struct ggml_compute_params * params,
  11341. struct ggml_tensor * dst,
  11342. const bool forward) {
  11343. const struct ggml_tensor * src0 = dst->src[0];
  11344. const struct ggml_tensor * src1 = dst->src[1];
  11345. if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
  11346. return;
  11347. }
  11348. float freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow;
  11349. //const int n_past = ((int32_t *) dst->op_params)[0];
  11350. const int n_dims = ((int32_t *) dst->op_params)[1];
  11351. const int mode = ((int32_t *) dst->op_params)[2];
  11352. const int n_ctx = ((int32_t *) dst->op_params)[3];
  11353. const int n_orig_ctx = ((int32_t *) dst->op_params)[4];
  11354. memcpy(&freq_base, (int32_t *) dst->op_params + 5, sizeof(float));
  11355. memcpy(&freq_scale, (int32_t *) dst->op_params + 6, sizeof(float));
  11356. memcpy(&ext_factor, (int32_t *) dst->op_params + 7, sizeof(float));
  11357. memcpy(&attn_factor, (int32_t *) dst->op_params + 8, sizeof(float));
  11358. memcpy(&beta_fast, (int32_t *) dst->op_params + 9, sizeof(float));
  11359. memcpy(&beta_slow, (int32_t *) dst->op_params + 10, sizeof(float));
  11360. GGML_TENSOR_UNARY_OP_LOCALS
  11361. //printf("ne0: %d, ne1: %d, ne2: %d, ne3: %d\n", ne0, ne1, ne2, ne3);
  11362. //printf("n_past = %d, ne2 = %d\n", n_past, ne2);
  11363. GGML_ASSERT(nb0 == sizeof(ggml_fp16_t));
  11364. const int ith = params->ith;
  11365. const int nth = params->nth;
  11366. const int nr = ggml_nrows(dst);
  11367. GGML_ASSERT(n_dims <= ne0);
  11368. GGML_ASSERT(n_dims % 2 == 0);
  11369. // rows per thread
  11370. const int dr = (nr + nth - 1)/nth;
  11371. // row range for this thread
  11372. const int ir0 = dr*ith;
  11373. const int ir1 = MIN(ir0 + dr, nr);
  11374. // row index used to determine which thread to use
  11375. int ir = 0;
  11376. const float theta_scale = powf(freq_base, -2.0f/n_dims);
  11377. const float inv_ndims = -1.f/n_dims;
  11378. float corr_dims[2];
  11379. ggml_rope_yarn_corr_dims(n_dims, n_orig_ctx, freq_base, beta_fast, beta_slow, corr_dims);
  11380. const bool is_neox = mode & 2;
  11381. const bool is_glm = mode & 4;
  11382. // backward process uses inverse rotation by cos and sin.
  11383. // cos and sin build a rotation matrix, where the inverse is the transpose.
  11384. // this essentially just switches the sign of sin.
  11385. const float sin_sign = forward ? 1.0f : -1.0f;
  11386. const int32_t * pos = (const int32_t *) src1->data;
  11387. for (int64_t i3 = 0; i3 < ne3; i3++) {
  11388. for (int64_t i2 = 0; i2 < ne2; i2++) {
  11389. const int64_t p = pos[i2];
  11390. float * cache = (float *) params->wdata + (ne0 + CACHE_LINE_SIZE_F32)*ith;
  11391. if (!is_glm && !is_neox) { // TODO: cache sin/cos for glm, neox
  11392. ggml_rope_cache_init(p, freq_scale, corr_dims, ne0, ext_factor, attn_factor, cache, sin_sign, theta_scale);
  11393. }
  11394. for (int64_t i1 = 0; i1 < ne1; i1++) {
  11395. if (ir++ < ir0) continue;
  11396. if (ir > ir1) break;
  11397. float theta_base = (float)p;
  11398. if (is_glm) {
  11399. theta_base = MIN(p, n_ctx - 2);
  11400. float block_theta = MAX(p - (n_ctx - 2), 0);
  11401. for (int64_t i0 = 0; i0 < ne0 / 4; i0++) {
  11402. const float cos_theta = cosf(theta_base);
  11403. const float sin_theta = sinf(theta_base) * sin_sign;
  11404. const float cos_block_theta = cosf(block_theta);
  11405. const float sin_block_theta = sinf(block_theta) * sin_sign;
  11406. theta_base *= theta_scale;
  11407. block_theta *= theta_scale;
  11408. const ggml_fp16_t * const src = (ggml_fp16_t *)((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00);
  11409. ggml_fp16_t * dst_data = (ggml_fp16_t *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0);
  11410. const float x0 = GGML_FP16_TO_FP32(src[0]);
  11411. const float x1 = GGML_FP16_TO_FP32(src[n_dims/2]);
  11412. const float x2 = GGML_FP16_TO_FP32(src[n_dims]);
  11413. const float x3 = GGML_FP16_TO_FP32(src[n_dims/2*3]);
  11414. dst_data[0] = GGML_FP32_TO_FP16(x0*cos_theta - x1*sin_theta);
  11415. dst_data[n_dims/2] = GGML_FP32_TO_FP16(x0*sin_theta + x1*cos_theta);
  11416. dst_data[n_dims] = GGML_FP32_TO_FP16(x2*cos_block_theta - x3*sin_block_theta);
  11417. dst_data[n_dims/2*3] = GGML_FP32_TO_FP16(x2*sin_block_theta + x3*cos_block_theta);
  11418. }
  11419. } else if (!is_neox) {
  11420. for (int64_t i0 = 0; i0 < ne0; i0 += 2) {
  11421. const float cos_theta = cache[i0 + 0];
  11422. const float sin_theta = cache[i0 + 1];
  11423. const ggml_fp16_t * const src = (ggml_fp16_t *)((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00);
  11424. ggml_fp16_t * dst_data = (ggml_fp16_t *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0);
  11425. const float x0 = GGML_FP16_TO_FP32(src[0]);
  11426. const float x1 = GGML_FP16_TO_FP32(src[1]);
  11427. dst_data[0] = GGML_FP32_TO_FP16(x0*cos_theta - x1*sin_theta);
  11428. dst_data[1] = GGML_FP32_TO_FP16(x0*sin_theta + x1*cos_theta);
  11429. }
  11430. } else {
  11431. // TODO: this might be wrong for ne0 != n_dims - need double check
  11432. // it seems we have to rope just the first n_dims elements and do nothing with the rest
  11433. // ref: https://github.com/ml-explore/mlx/blob/dc2edc762c797e3b8de50b1dad4dc0a131691033/benchmarks/python/llama_jax_bench.py#L11-L26
  11434. theta_base *= freq_scale;
  11435. for (int64_t ic = 0; ic < ne0; ic += 2) {
  11436. if (ic < n_dims) {
  11437. const int64_t ib = 0;
  11438. // simplified from `(ib * n_dims + ic) * inv_ndims`
  11439. float cur_rot = inv_ndims * ic - ib;
  11440. float cos_theta, sin_theta;
  11441. rope_yarn(
  11442. theta_base, freq_scale, corr_dims, cur_rot, ext_factor, attn_factor,
  11443. &cos_theta, &sin_theta
  11444. );
  11445. sin_theta *= sin_sign;
  11446. theta_base *= theta_scale;
  11447. const int64_t i0 = ib*n_dims + ic/2;
  11448. const ggml_fp16_t * const src = (ggml_fp16_t *)((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00);
  11449. ggml_fp16_t * dst_data = (ggml_fp16_t *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0);
  11450. const float x0 = GGML_FP16_TO_FP32(src[0]);
  11451. const float x1 = GGML_FP16_TO_FP32(src[n_dims/2]);
  11452. dst_data[0] = GGML_FP32_TO_FP16(x0*cos_theta - x1*sin_theta);
  11453. dst_data[n_dims/2] = GGML_FP32_TO_FP16(x0*sin_theta + x1*cos_theta);
  11454. } else {
  11455. const int64_t i0 = ic;
  11456. const ggml_fp16_t * const src = (ggml_fp16_t *)((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00);
  11457. ggml_fp16_t * dst_data = (ggml_fp16_t *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0);
  11458. dst_data[0] = src[0];
  11459. dst_data[1] = src[1];
  11460. }
  11461. }
  11462. }
  11463. }
  11464. }
  11465. }
  11466. }
  11467. static void ggml_compute_forward_rope(
  11468. const struct ggml_compute_params * params,
  11469. struct ggml_tensor * dst) {
  11470. const struct ggml_tensor * src0 = dst->src[0];
  11471. switch (src0->type) {
  11472. case GGML_TYPE_F16:
  11473. {
  11474. ggml_compute_forward_rope_f16(params, dst, true);
  11475. } break;
  11476. case GGML_TYPE_F32:
  11477. {
  11478. ggml_compute_forward_rope_f32(params, dst, true);
  11479. } break;
  11480. default:
  11481. {
  11482. GGML_ASSERT(false);
  11483. } break;
  11484. }
  11485. }
  11486. // ggml_compute_forward_rope_back
  11487. static void ggml_compute_forward_rope_back(
  11488. const struct ggml_compute_params * params,
  11489. struct ggml_tensor * dst) {
  11490. const struct ggml_tensor * src0 = dst->src[0];
  11491. switch (src0->type) {
  11492. case GGML_TYPE_F16:
  11493. {
  11494. ggml_compute_forward_rope_f16(params, dst, false);
  11495. } break;
  11496. case GGML_TYPE_F32:
  11497. {
  11498. ggml_compute_forward_rope_f32(params, dst, false);
  11499. } break;
  11500. default:
  11501. {
  11502. GGML_ASSERT(false);
  11503. } break;
  11504. }
  11505. }
  11506. // ggml_compute_forward_conv_transpose_1d
  11507. static void ggml_compute_forward_conv_transpose_1d_f16_f32(
  11508. const struct ggml_compute_params * params,
  11509. struct ggml_tensor * dst) {
  11510. const struct ggml_tensor * src0 = dst->src[0];
  11511. const struct ggml_tensor * src1 = dst->src[1];
  11512. GGML_ASSERT(src0->type == GGML_TYPE_F16);
  11513. GGML_ASSERT(src1->type == GGML_TYPE_F32);
  11514. GGML_ASSERT( dst->type == GGML_TYPE_F32);
  11515. int64_t t0 = ggml_perf_time_us();
  11516. UNUSED(t0);
  11517. GGML_TENSOR_BINARY_OP_LOCALS
  11518. const int ith = params->ith;
  11519. const int nth = params->nth;
  11520. const int nk = ne00*ne01*ne02;
  11521. GGML_ASSERT(nb00 == sizeof(ggml_fp16_t));
  11522. GGML_ASSERT(nb10 == sizeof(float));
  11523. if (params->type == GGML_TASK_TYPE_INIT) {
  11524. if (ith != 0) {
  11525. return;
  11526. }
  11527. memset(params->wdata, 0, params->wsize);
  11528. // permute kernel data (src0) from (K x Cout x Cin) to (Cin x K x Cout)
  11529. {
  11530. ggml_fp16_t * const wdata = (ggml_fp16_t *) params->wdata + 0;
  11531. for (int64_t i02 = 0; i02 < ne02; i02++) {
  11532. for (int64_t i01 = 0; i01 < ne01; i01++) {
  11533. const ggml_fp16_t * const src = (ggml_fp16_t *)((char *) src0->data + i02*nb02 + i01*nb01);
  11534. ggml_fp16_t * dst_data = wdata + i01*ne00*ne02;
  11535. for (int64_t i00 = 0; i00 < ne00; i00++) {
  11536. dst_data[i00*ne02 + i02] = src[i00];
  11537. }
  11538. }
  11539. }
  11540. }
  11541. // permute source data (src1) from (L x Cin) to (Cin x L)
  11542. {
  11543. ggml_fp16_t * const wdata = (ggml_fp16_t *) params->wdata + nk;
  11544. ggml_fp16_t * dst_data = wdata;
  11545. for (int64_t i11 = 0; i11 < ne11; i11++) {
  11546. const float * const src = (float *)((char *) src1->data + i11*nb11);
  11547. for (int64_t i10 = 0; i10 < ne10; i10++) {
  11548. dst_data[i10*ne11 + i11] = GGML_FP32_TO_FP16(src[i10]);
  11549. }
  11550. }
  11551. }
  11552. // need to zero dst since we are accumulating into it
  11553. memset(dst->data, 0, ggml_nbytes(dst));
  11554. return;
  11555. }
  11556. if (params->type == GGML_TASK_TYPE_FINALIZE) {
  11557. return;
  11558. }
  11559. const int32_t s0 = ((const int32_t*)(dst->op_params))[0];
  11560. // total rows in dst
  11561. const int nr = ne1;
  11562. // rows per thread
  11563. const int dr = (nr + nth - 1)/nth;
  11564. // row range for this thread
  11565. const int ir0 = dr*ith;
  11566. const int ir1 = MIN(ir0 + dr, nr);
  11567. ggml_fp16_t * const wdata = (ggml_fp16_t *) params->wdata + 0;
  11568. ggml_fp16_t * const wdata_src = wdata + nk;
  11569. for (int i1 = ir0; i1 < ir1; i1++) {
  11570. float * dst_data = (float *)((char *) dst->data + i1*nb1);
  11571. ggml_fp16_t * wdata_kernel = wdata + i1*ne02*ne00;
  11572. for (int i10 = 0; i10 < ne10; i10++) {
  11573. const int i1n = i10*ne11;
  11574. for (int i00 = 0; i00 < ne00; i00++) {
  11575. float v = 0;
  11576. ggml_vec_dot_f16(ne02, &v, 0,
  11577. (ggml_fp16_t *) wdata_src + i1n, 0,
  11578. (ggml_fp16_t *) wdata_kernel + i00*ne02, 0, 1);
  11579. dst_data[i10*s0 + i00] += v;
  11580. }
  11581. }
  11582. }
  11583. }
  11584. static void ggml_compute_forward_conv_transpose_1d_f32(
  11585. const struct ggml_compute_params * params,
  11586. struct ggml_tensor * dst) {
  11587. const struct ggml_tensor * src0 = dst->src[0];
  11588. const struct ggml_tensor * src1 = dst->src[1];
  11589. GGML_ASSERT(src0->type == GGML_TYPE_F32);
  11590. GGML_ASSERT(src1->type == GGML_TYPE_F32);
  11591. GGML_ASSERT( dst->type == GGML_TYPE_F32);
  11592. int64_t t0 = ggml_perf_time_us();
  11593. UNUSED(t0);
  11594. GGML_TENSOR_BINARY_OP_LOCALS
  11595. const int ith = params->ith;
  11596. const int nth = params->nth;
  11597. const int nk = ne00*ne01*ne02;
  11598. GGML_ASSERT(nb00 == sizeof(float));
  11599. GGML_ASSERT(nb10 == sizeof(float));
  11600. if (params->type == GGML_TASK_TYPE_INIT) {
  11601. if (ith != 0) {
  11602. return;
  11603. }
  11604. memset(params->wdata, 0, params->wsize);
  11605. // prepare kernel data (src0) from (K x Cout x Cin) to (Cin x K x Cout)
  11606. {
  11607. float * const wdata = (float *) params->wdata + 0;
  11608. for (int64_t i02 = 0; i02 < ne02; i02++) {
  11609. for (int64_t i01 = 0; i01 < ne01; i01++) {
  11610. const float * const src = (float *)((char *) src0->data + i02*nb02 + i01*nb01);
  11611. float * dst_data = wdata + i01*ne00*ne02;
  11612. for (int64_t i00 = 0; i00 < ne00; i00++) {
  11613. dst_data[i00*ne02 + i02] = src[i00];
  11614. }
  11615. }
  11616. }
  11617. }
  11618. // prepare source data (src1)
  11619. {
  11620. float * const wdata = (float *) params->wdata + nk;
  11621. float * dst_data = wdata;
  11622. for (int64_t i11 = 0; i11 < ne11; i11++) {
  11623. const float * const src = (float *)((char *) src1->data + i11*nb11);
  11624. for (int64_t i10 = 0; i10 < ne10; i10++) {
  11625. dst_data[i10*ne11 + i11] = src[i10];
  11626. }
  11627. }
  11628. }
  11629. // need to zero dst since we are accumulating into it
  11630. memset(dst->data, 0, ggml_nbytes(dst));
  11631. return;
  11632. }
  11633. if (params->type == GGML_TASK_TYPE_FINALIZE) {
  11634. return;
  11635. }
  11636. const int32_t s0 = ((const int32_t*)(dst->op_params))[0];
  11637. // total rows in dst
  11638. const int nr = ne1;
  11639. // rows per thread
  11640. const int dr = (nr + nth - 1)/nth;
  11641. // row range for this thread
  11642. const int ir0 = dr*ith;
  11643. const int ir1 = MIN(ir0 + dr, nr);
  11644. float * const wdata = (float *) params->wdata + 0;
  11645. float * const wdata_src = wdata + nk;
  11646. for (int i1 = ir0; i1 < ir1; i1++) {
  11647. float * dst_data = (float *)((char *) dst->data + i1*nb1);
  11648. float * wdata_kernel = wdata + i1*ne02*ne00;
  11649. for (int i10 = 0; i10 < ne10; i10++) {
  11650. const int i1n = i10*ne11;
  11651. for (int i00 = 0; i00 < ne00; i00++) {
  11652. float v = 0;
  11653. ggml_vec_dot_f32(ne02, &v, 0,
  11654. wdata_src + i1n, 0,
  11655. wdata_kernel + i00*ne02, 0, 1);
  11656. dst_data[i10*s0 + i00] += v;
  11657. }
  11658. }
  11659. }
  11660. }
  11661. static void ggml_compute_forward_conv_transpose_1d(
  11662. const struct ggml_compute_params * params,
  11663. struct ggml_tensor * dst) {
  11664. const struct ggml_tensor * src0 = dst->src[0];
  11665. switch (src0->type) {
  11666. case GGML_TYPE_F16:
  11667. {
  11668. ggml_compute_forward_conv_transpose_1d_f16_f32(params, dst);
  11669. } break;
  11670. case GGML_TYPE_F32:
  11671. {
  11672. ggml_compute_forward_conv_transpose_1d_f32(params, dst);
  11673. } break;
  11674. default:
  11675. {
  11676. GGML_ASSERT(false);
  11677. } break;
  11678. }
  11679. }
  11680. // src0: kernel [OC, IC, KH, KW]
  11681. // src1: image [N, IC, IH, IW]
  11682. // dst: result [N, OH, OW, IC*KH*KW]
  11683. static void ggml_compute_forward_im2col_f32(
  11684. const struct ggml_compute_params * params,
  11685. struct ggml_tensor * dst) {
  11686. const struct ggml_tensor * src0 = dst->src[0];
  11687. const struct ggml_tensor * src1 = dst->src[1];
  11688. GGML_ASSERT(src0->type == GGML_TYPE_F16);
  11689. GGML_ASSERT(src1->type == GGML_TYPE_F32);
  11690. GGML_ASSERT( dst->type == GGML_TYPE_F32);
  11691. int64_t t0 = ggml_perf_time_us();
  11692. UNUSED(t0);
  11693. GGML_TENSOR_BINARY_OP_LOCALS;
  11694. const int32_t s0 = ((const int32_t *)(dst->op_params))[0];
  11695. const int32_t s1 = ((const int32_t *)(dst->op_params))[1];
  11696. const int32_t p0 = ((const int32_t *)(dst->op_params))[2];
  11697. const int32_t p1 = ((const int32_t *)(dst->op_params))[3];
  11698. const int32_t d0 = ((const int32_t *)(dst->op_params))[4];
  11699. const int32_t d1 = ((const int32_t *)(dst->op_params))[5];
  11700. const bool is_2D = ((const int32_t *)(dst->op_params))[6] == 1;
  11701. const int ith = params->ith;
  11702. const int nth = params->nth;
  11703. const int64_t N = is_2D ? ne13 : ne12;
  11704. const int64_t IC = is_2D ? ne12 : ne11;
  11705. const int64_t IH = is_2D ? ne11 : 1;
  11706. const int64_t IW = ne10;
  11707. const int64_t KH = is_2D ? ne01 : 1;
  11708. const int64_t KW = ne00;
  11709. const int64_t OH = is_2D ? ne2 : 1;
  11710. const int64_t OW = ne1;
  11711. int ofs0 = is_2D ? nb13 : nb12;
  11712. int ofs1 = is_2D ? nb12 : nb11;
  11713. GGML_ASSERT(nb00 == sizeof(ggml_fp16_t));
  11714. GGML_ASSERT(nb10 == sizeof(float));
  11715. if (params->type == GGML_TASK_TYPE_INIT) {
  11716. return;
  11717. }
  11718. if (params->type == GGML_TASK_TYPE_FINALIZE) {
  11719. return;
  11720. }
  11721. // im2col: [N, IC, IH, IW] => [N, OH, OW, IC*KH*KW]
  11722. {
  11723. float * const wdata = (float *) dst->data;
  11724. for (int64_t in = 0; in < N; in++) {
  11725. for (int64_t ioh = 0; ioh < OH; ioh++) { // 1
  11726. for (int64_t iow = 0; iow < OW; iow++) {
  11727. for (int64_t iic = ith; iic < IC; iic += nth) {
  11728. // micro kernel
  11729. float * dst_data = wdata + (in*OH*OW + ioh*OW + iow)*(IC*KH*KW); // [IC, KH, KW]
  11730. const float * const src_data = (float *)((char *) src1->data + in*ofs0 + iic*ofs1); // [IH, IW]
  11731. for (int64_t ikh = 0; ikh < KH; ikh++) { // 1
  11732. for (int64_t ikw = 0; ikw < KW; ikw++) {
  11733. const int64_t iiw = iow*s0 + ikw*d0 - p0;
  11734. const int64_t iih = ioh*s1 + ikh*d1 - p1;
  11735. if (iih < 0 || iih >= IH || iiw < 0 || iiw >= IW) {
  11736. dst_data[iic*(KH*KW) + ikh*KW + ikw] = 0;
  11737. } else {
  11738. dst_data[iic*(KH*KW) + ikh*KW + ikw] = (src_data[iih*IW + iiw]);
  11739. }
  11740. }
  11741. }
  11742. }
  11743. }
  11744. }
  11745. }
  11746. }
  11747. }
  11748. // src0: kernel [OC, IC, KH, KW]
  11749. // src1: image [N, IC, IH, IW]
  11750. // dst: result [N, OH, OW, IC*KH*KW]
  11751. static void ggml_compute_forward_im2col_f16(
  11752. const struct ggml_compute_params * params,
  11753. struct ggml_tensor * dst) {
  11754. const struct ggml_tensor * src0 = dst->src[0];
  11755. const struct ggml_tensor * src1 = dst->src[1];
  11756. GGML_ASSERT(src0->type == GGML_TYPE_F16);
  11757. GGML_ASSERT(src1->type == GGML_TYPE_F32);
  11758. GGML_ASSERT( dst->type == GGML_TYPE_F16);
  11759. int64_t t0 = ggml_perf_time_us();
  11760. UNUSED(t0);
  11761. GGML_TENSOR_BINARY_OP_LOCALS;
  11762. const int32_t s0 = ((const int32_t *)(dst->op_params))[0];
  11763. const int32_t s1 = ((const int32_t *)(dst->op_params))[1];
  11764. const int32_t p0 = ((const int32_t *)(dst->op_params))[2];
  11765. const int32_t p1 = ((const int32_t *)(dst->op_params))[3];
  11766. const int32_t d0 = ((const int32_t *)(dst->op_params))[4];
  11767. const int32_t d1 = ((const int32_t *)(dst->op_params))[5];
  11768. const bool is_2D = ((const int32_t *)(dst->op_params))[6] == 1;
  11769. const int ith = params->ith;
  11770. const int nth = params->nth;
  11771. const int64_t N = is_2D ? ne13 : ne12;
  11772. const int64_t IC = is_2D ? ne12 : ne11;
  11773. const int64_t IH = is_2D ? ne11 : 1;
  11774. const int64_t IW = ne10;
  11775. const int64_t KH = is_2D ? ne01 : 1;
  11776. const int64_t KW = ne00;
  11777. const int64_t OH = is_2D ? ne2 : 1;
  11778. const int64_t OW = ne1;
  11779. int ofs0 = is_2D ? nb13 : nb12;
  11780. int ofs1 = is_2D ? nb12 : nb11;
  11781. GGML_ASSERT(nb00 == sizeof(ggml_fp16_t));
  11782. GGML_ASSERT(nb10 == sizeof(float));
  11783. if (params->type == GGML_TASK_TYPE_INIT) {
  11784. return;
  11785. }
  11786. if (params->type == GGML_TASK_TYPE_FINALIZE) {
  11787. return;
  11788. }
  11789. // im2col: [N, IC, IH, IW] => [N, OH, OW, IC*KH*KW]
  11790. {
  11791. ggml_fp16_t * const wdata = (ggml_fp16_t *) dst->data;
  11792. for (int64_t in = 0; in < N; in++) {
  11793. for (int64_t ioh = 0; ioh < OH; ioh++) { // 1
  11794. for (int64_t iow = 0; iow < OW; iow++) {
  11795. for (int64_t iic = ith; iic < IC; iic += nth) {
  11796. // micro kernel
  11797. ggml_fp16_t * dst_data = wdata + (in*OH*OW + ioh*OW + iow)*(IC*KH*KW); // [IC, KH, KW]
  11798. const float * const src_data = (float *)((char *) src1->data + in*ofs0 + iic*ofs1); // [IH, IW]
  11799. for (int64_t ikh = 0; ikh < KH; ikh++) { // 1
  11800. for (int64_t ikw = 0; ikw < KW; ikw++) {
  11801. const int64_t iiw = iow*s0 + ikw*d0 - p0;
  11802. const int64_t iih = ioh*s1 + ikh*d1 - p1;
  11803. if (iih < 0 || iih >= IH || iiw < 0 || iiw >= IW) {
  11804. dst_data[iic*(KH*KW) + ikh*KW + ikw] = 0;
  11805. } else {
  11806. dst_data[iic*(KH*KW) + ikh*KW + ikw] = GGML_FP32_TO_FP16(src_data[iih*IW + iiw]);
  11807. }
  11808. }
  11809. }
  11810. }
  11811. }
  11812. }
  11813. }
  11814. }
  11815. }
  11816. static void ggml_compute_forward_im2col(
  11817. const struct ggml_compute_params * params,
  11818. struct ggml_tensor * dst) {
  11819. switch (dst->type) {
  11820. case GGML_TYPE_F16:
  11821. {
  11822. ggml_compute_forward_im2col_f16(params, dst);
  11823. } break;
  11824. case GGML_TYPE_F32:
  11825. {
  11826. ggml_compute_forward_im2col_f32(params, dst);
  11827. } break;
  11828. default:
  11829. {
  11830. GGML_ASSERT(false);
  11831. } break;
  11832. }
  11833. }
  11834. // ggml_compute_forward_conv_transpose_2d
  11835. static void ggml_compute_forward_conv_transpose_2d(
  11836. const struct ggml_compute_params * params,
  11837. struct ggml_tensor * dst) {
  11838. const struct ggml_tensor * src0 = dst->src[0];
  11839. const struct ggml_tensor * src1 = dst->src[1];
  11840. GGML_ASSERT(src0->type == GGML_TYPE_F16);
  11841. GGML_ASSERT(src1->type == GGML_TYPE_F32);
  11842. GGML_ASSERT( dst->type == GGML_TYPE_F32);
  11843. int64_t t0 = ggml_perf_time_us();
  11844. UNUSED(t0);
  11845. GGML_TENSOR_BINARY_OP_LOCALS
  11846. const int ith = params->ith;
  11847. const int nth = params->nth;
  11848. const int nk = ne00*ne01*ne02*ne03;
  11849. GGML_ASSERT(nb00 == sizeof(ggml_fp16_t));
  11850. GGML_ASSERT(nb10 == sizeof(float));
  11851. if (params->type == GGML_TASK_TYPE_INIT) {
  11852. if (ith != 0) {
  11853. return;
  11854. }
  11855. memset(params->wdata, 0, params->wsize);
  11856. // permute kernel data (src0) from (Kw x Kh x Cout x Cin) to (Cin x Kw x Kh x Cout)
  11857. {
  11858. ggml_fp16_t * const wdata = (ggml_fp16_t *) params->wdata + 0;
  11859. for (int64_t i03 = 0; i03 < ne03; i03++) {
  11860. for (int64_t i02 = 0; i02 < ne02; i02++) {
  11861. const ggml_fp16_t * const src = (ggml_fp16_t *)((char *) src0->data + i03*nb03 + i02*nb02);
  11862. ggml_fp16_t * dst_data = wdata + i02*ne01*ne00*ne03;
  11863. for (int64_t i01 = 0; i01 < ne01; i01++) {
  11864. for (int64_t i00 = 0; i00 < ne00; i00++) {
  11865. dst_data[i01*ne00*ne03 + i00*ne03 + i03] = src[i01 * ne00 + i00];
  11866. }
  11867. }
  11868. }
  11869. }
  11870. }
  11871. // permute source data (src1) from (Sw x Sh x Cin) to (Cin x Sw x Sh)
  11872. {
  11873. ggml_fp16_t * const wdata = (ggml_fp16_t *) params->wdata + nk;
  11874. for (int i12 = 0; i12 < ne12; i12++) {
  11875. for (int i11 = 0; i11 < ne11; i11++) {
  11876. const float * const src = (float *)((char *) src1->data + i12*nb12 + i11*nb11);
  11877. ggml_fp16_t * dst_data = wdata + i11*ne10*ne12;
  11878. for (int i10 = 0; i10 < ne10; i10++) {
  11879. dst_data[i10*ne12 + i12] = GGML_FP32_TO_FP16(src[i10]);
  11880. }
  11881. }
  11882. }
  11883. }
  11884. memset(dst->data, 0, ggml_nbytes(dst));
  11885. return;
  11886. }
  11887. if (params->type == GGML_TASK_TYPE_FINALIZE) {
  11888. return;
  11889. }
  11890. const int32_t stride = ggml_get_op_params_i32(dst, 0);
  11891. // total patches in dst
  11892. const int np = ne2;
  11893. // patches per thread
  11894. const int dp = (np + nth - 1)/nth;
  11895. // patch range for this thread
  11896. const int ip0 = dp*ith;
  11897. const int ip1 = MIN(ip0 + dp, np);
  11898. ggml_fp16_t * const wdata = (ggml_fp16_t *) params->wdata + 0;
  11899. ggml_fp16_t * const wdata_src = wdata + nk;
  11900. for (int i2 = ip0; i2 < ip1; i2++) { // Cout
  11901. float * dst_data = (float *)((char *) dst->data + i2*nb2);
  11902. ggml_fp16_t * wdata_kernel = wdata + i2*ne01*ne00*ne03;
  11903. for (int i11 = 0; i11 < ne11; i11++) {
  11904. for (int i10 = 0; i10 < ne10; i10++) {
  11905. const int i1n = i11*ne10*ne12 + i10*ne12;
  11906. for (int i01 = 0; i01 < ne01; i01++) {
  11907. for (int i00 = 0; i00 < ne00; i00++) {
  11908. float v = 0;
  11909. ggml_vec_dot_f16(ne03, &v, 0,
  11910. wdata_src + i1n, 0,
  11911. wdata_kernel + i01*ne00*ne03 + i00*ne03, 0, 1);
  11912. dst_data[(i11*stride + i01)*ne0 + i10*stride + i00] += v;
  11913. }
  11914. }
  11915. }
  11916. }
  11917. }
  11918. }
  11919. // ggml_compute_forward_pool_1d_sk_p0
  11920. static void ggml_compute_forward_pool_1d_sk_p0(
  11921. const struct ggml_compute_params * params,
  11922. const enum ggml_op_pool op,
  11923. const int k,
  11924. struct ggml_tensor * dst) {
  11925. const struct ggml_tensor * src = dst->src[0];
  11926. assert(src->type == GGML_TYPE_F32);
  11927. assert(params->ith == 0);
  11928. if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
  11929. return;
  11930. }
  11931. const char * cdata = (const char *)src->data;
  11932. const char * const data_end = cdata + ggml_nbytes(src);
  11933. float * drow = (float *)dst->data;
  11934. const int64_t rs = dst->ne[0];
  11935. while (cdata < data_end) {
  11936. const float * const srow = (const float *)cdata;
  11937. int j = 0;
  11938. for (int64_t i = 0; i < rs; ++i) {
  11939. switch (op) {
  11940. case GGML_OP_POOL_AVG: drow[i] = 0; break;
  11941. case GGML_OP_POOL_MAX: drow[i] = -FLT_MAX; break;
  11942. case GGML_OP_POOL_COUNT: GGML_ASSERT(false); break;
  11943. }
  11944. for (int ki = 0; ki < k; ++ki) {
  11945. switch (op) {
  11946. case GGML_OP_POOL_AVG: drow[i] += srow[j]; break;
  11947. case GGML_OP_POOL_MAX: if (srow[j] > drow[i]) drow[i] = srow[j]; break;
  11948. case GGML_OP_POOL_COUNT: GGML_ASSERT(false); break;
  11949. }
  11950. ++j;
  11951. }
  11952. switch (op) {
  11953. case GGML_OP_POOL_AVG: drow[i] /= k; break;
  11954. case GGML_OP_POOL_MAX: break;
  11955. case GGML_OP_POOL_COUNT: GGML_ASSERT(false); break;
  11956. }
  11957. }
  11958. cdata += src->nb[1];
  11959. drow += rs;
  11960. }
  11961. }
  11962. // ggml_compute_forward_pool_1d
  11963. static void ggml_compute_forward_pool_1d(
  11964. const struct ggml_compute_params * params,
  11965. struct ggml_tensor * dst) {
  11966. const int32_t * opts = (const int32_t *)dst->op_params;
  11967. enum ggml_op_pool op = opts[0];
  11968. const int k0 = opts[1];
  11969. const int s0 = opts[2];
  11970. const int p0 = opts[3];
  11971. GGML_ASSERT(p0 == 0); // padding not supported
  11972. GGML_ASSERT(k0 == s0); // only s = k supported
  11973. ggml_compute_forward_pool_1d_sk_p0(params, op, k0, dst);
  11974. }
  11975. // ggml_compute_forward_pool_2d
  11976. static void ggml_compute_forward_pool_2d(
  11977. const struct ggml_compute_params * params,
  11978. struct ggml_tensor * dst) {
  11979. const struct ggml_tensor * src = dst->src[0];
  11980. GGML_ASSERT(src->type == GGML_TYPE_F32);
  11981. GGML_ASSERT(params->ith == 0);
  11982. if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
  11983. return;
  11984. }
  11985. const int32_t * opts = (const int32_t *)dst->op_params;
  11986. enum ggml_op_pool op = opts[0];
  11987. const int k0 = opts[1];
  11988. const int k1 = opts[2];
  11989. const int s0 = opts[3];
  11990. const int s1 = opts[4];
  11991. const int p0 = opts[5];
  11992. const int p1 = opts[6];
  11993. const char * cdata = (const char*)src->data;
  11994. const char * const data_end = cdata + ggml_nbytes(src);
  11995. const int64_t px = dst->ne[0];
  11996. const int64_t py = dst->ne[1];
  11997. const int64_t pa = px * py;
  11998. float * dplane = (float *)dst->data;
  11999. const int ka = k0 * k1;
  12000. const int offset0 = -p0;
  12001. const int offset1 = -p1;
  12002. while (cdata < data_end) {
  12003. for (int oy = 0; oy < py; ++oy) {
  12004. float * const drow = dplane + oy * px;
  12005. for (int ox = 0; ox < px; ++ox) {
  12006. float * const out = drow + ox;
  12007. switch (op) {
  12008. case GGML_OP_POOL_AVG: *out = 0; break;
  12009. case GGML_OP_POOL_MAX: *out = -FLT_MAX; break;
  12010. case GGML_OP_POOL_COUNT: GGML_ASSERT(false); break;
  12011. }
  12012. const int ix = offset0 + ox * s0;
  12013. const int iy = offset1 + oy * s1;
  12014. for (int ky = 0; ky < k1; ++ky) {
  12015. if (iy + ky < 0 || iy + ky >= src->ne[1]) continue;
  12016. const float * const srow = (const float *)(cdata + src->nb[1] * (iy + ky));
  12017. for (int kx = 0; kx < k0; ++kx) {
  12018. int j = ix + kx;
  12019. if (j < 0 || j >= src->ne[0]) continue;
  12020. switch (op) {
  12021. case GGML_OP_POOL_AVG: *out += srow[j]; break;
  12022. case GGML_OP_POOL_MAX: if (srow[j] > *out) *out = srow[j]; break;
  12023. case GGML_OP_POOL_COUNT: GGML_ASSERT(false); break;
  12024. }
  12025. }
  12026. }
  12027. switch (op) {
  12028. case GGML_OP_POOL_AVG: *out /= ka; break;
  12029. case GGML_OP_POOL_MAX: break;
  12030. case GGML_OP_POOL_COUNT: GGML_ASSERT(false); break;
  12031. }
  12032. }
  12033. }
  12034. cdata += src->nb[2];
  12035. dplane += pa;
  12036. }
  12037. }
  12038. // ggml_compute_forward_upscale
  12039. static void ggml_compute_forward_upscale_f32(
  12040. const struct ggml_compute_params * params,
  12041. struct ggml_tensor * dst) {
  12042. const struct ggml_tensor * src0 = dst->src[0];
  12043. if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
  12044. return;
  12045. }
  12046. GGML_ASSERT(src0->nb[0] == sizeof(float));
  12047. const int ith = params->ith;
  12048. const int nth = params->nth;
  12049. GGML_TENSOR_UNARY_OP_LOCALS
  12050. const int scale_factor = dst->op_params[0];
  12051. // TODO: optimize
  12052. for (int64_t i3 = 0; i3 < ne3; i3++) {
  12053. const int64_t i03 = i3;
  12054. for (int64_t i2 = ith; i2 < ne2; i2 += nth) {
  12055. const int64_t i02 = i2;
  12056. for (int64_t i1 = 0; i1 < ne1; i1++) {
  12057. const int64_t i01 = i1 / scale_factor;
  12058. for (int64_t i0 = 0; i0 < ne0; i0++) {
  12059. const int64_t i00 = i0 / scale_factor;
  12060. const float * x = (float *)((char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03);
  12061. float * y = (float *)((char *) dst->data + i0*nb0 + i1*nb1 + i2*nb2 + i3*nb3);
  12062. *y = *x;
  12063. }
  12064. }
  12065. }
  12066. }
  12067. }
  12068. static void ggml_compute_forward_upscale(
  12069. const struct ggml_compute_params * params,
  12070. struct ggml_tensor * dst) {
  12071. const struct ggml_tensor * src0 = dst->src[0];
  12072. switch (src0->type) {
  12073. case GGML_TYPE_F32:
  12074. {
  12075. ggml_compute_forward_upscale_f32(params, dst);
  12076. } break;
  12077. default:
  12078. {
  12079. GGML_ASSERT(false);
  12080. } break;
  12081. }
  12082. }
  12083. // ggml_compute_forward_pad
  12084. static void ggml_compute_forward_pad_f32(
  12085. const struct ggml_compute_params * params,
  12086. struct ggml_tensor * dst) {
  12087. const struct ggml_tensor * src0 = dst->src[0];
  12088. if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
  12089. return;
  12090. }
  12091. GGML_ASSERT(src0->nb[0] == sizeof(float));
  12092. GGML_ASSERT( dst->nb[0] == sizeof(float));
  12093. const int ith = params->ith;
  12094. const int nth = params->nth;
  12095. GGML_TENSOR_UNARY_OP_LOCALS
  12096. float * dst_ptr = (float *) dst->data;
  12097. // TODO: optimize
  12098. for (int64_t i2 = 0; i2 < ne2; ++i2) {
  12099. for (int64_t i1 = ith; i1 < ne1; i1 += nth) {
  12100. for (int64_t i0 = 0; i0 < ne0; ++i0) {
  12101. for (int64_t i3 = 0; i3 < ne3; ++i3) {
  12102. const int64_t dst_idx = i3*(ne0*ne1*ne2) + i2*(ne0*ne1) + i1*ne0 + i0;
  12103. const float * src_ptr = (const float *)((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00);
  12104. if (i0 < ne00 && i1 < ne01 && i2 < ne02 && i3 < ne03) {
  12105. dst_ptr[dst_idx] = *src_ptr;
  12106. } else {
  12107. dst_ptr[dst_idx] = 0;
  12108. }
  12109. }
  12110. }
  12111. }
  12112. }
  12113. }
  12114. static void ggml_compute_forward_pad(
  12115. const struct ggml_compute_params * params,
  12116. struct ggml_tensor * dst) {
  12117. const struct ggml_tensor * src0 = dst->src[0];
  12118. switch (src0->type) {
  12119. case GGML_TYPE_F32:
  12120. {
  12121. ggml_compute_forward_pad_f32(params, dst);
  12122. } break;
  12123. default:
  12124. {
  12125. GGML_ASSERT(false);
  12126. } break;
  12127. }
  12128. }
  12129. // ggml_compute_forward_arange
  12130. static void ggml_compute_forward_arange_f32(
  12131. const struct ggml_compute_params * params,
  12132. struct ggml_tensor * dst) {
  12133. if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
  12134. return;
  12135. }
  12136. GGML_ASSERT(dst->nb[0] == sizeof(float));
  12137. const int ith = params->ith;
  12138. const int nth = params->nth;
  12139. const float start = ggml_get_op_params_f32(dst, 0);
  12140. const float stop = ggml_get_op_params_f32(dst, 1);
  12141. const float step = ggml_get_op_params_f32(dst, 2);
  12142. const int64_t steps = (int64_t) ceilf((stop - start) / step);
  12143. GGML_ASSERT(ggml_nelements(dst) == steps);
  12144. for (int64_t i = ith; i < steps; i+= nth) {
  12145. float value = start + step * i;
  12146. ((float *)dst->data)[i] = value;
  12147. }
  12148. }
  12149. static void ggml_compute_forward_arange(
  12150. const struct ggml_compute_params * params,
  12151. struct ggml_tensor * dst) {
  12152. switch (dst->type) {
  12153. case GGML_TYPE_F32:
  12154. {
  12155. ggml_compute_forward_arange_f32(params, dst);
  12156. } break;
  12157. default:
  12158. {
  12159. GGML_ASSERT(false);
  12160. } break;
  12161. }
  12162. }
  12163. static void ggml_compute_forward_timestep_embedding_f32(
  12164. const struct ggml_compute_params * params,
  12165. struct ggml_tensor * dst) {
  12166. if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
  12167. return;
  12168. }
  12169. const struct ggml_tensor * src0 = dst->src[0];
  12170. GGML_ASSERT(src0->nb[0] == sizeof(float));
  12171. const int ith = params->ith;
  12172. const int nth = params->nth;
  12173. GGML_TENSOR_UNARY_OP_LOCALS
  12174. const int dim = ggml_get_op_params_i32(dst, 0);
  12175. const int max_period = ggml_get_op_params_i32(dst, 1);
  12176. int half = dim / 2;
  12177. for (int64_t i = 0; i < ne00; i++) {
  12178. float * embed_data = (float *)((char *) dst->data + i*nb1);
  12179. for (int64_t j = ith; j < half; j += nth) {
  12180. float timestep = ((float *)src0->data)[i];
  12181. float freq = (float)expf(-logf(max_period) * j / half);
  12182. float arg = timestep * freq;
  12183. embed_data[j] = cosf(arg);
  12184. embed_data[j + half] = sinf(arg);
  12185. }
  12186. if (dim % 2 != 0 && ith == 0) {
  12187. embed_data[dim] = 0.f;
  12188. }
  12189. }
  12190. }
  12191. static void ggml_compute_forward_timestep_embedding(
  12192. const struct ggml_compute_params * params,
  12193. struct ggml_tensor * dst) {
  12194. const struct ggml_tensor * src0 = dst->src[0];
  12195. switch (src0->type) {
  12196. case GGML_TYPE_F32:
  12197. {
  12198. ggml_compute_forward_timestep_embedding_f32(params, dst);
  12199. } break;
  12200. default:
  12201. {
  12202. GGML_ASSERT(false);
  12203. } break;
  12204. }
  12205. }
  12206. // ggml_compute_forward_argsort
  12207. static void ggml_compute_forward_argsort_f32(
  12208. const struct ggml_compute_params * params,
  12209. struct ggml_tensor * dst) {
  12210. const struct ggml_tensor * src0 = dst->src[0];
  12211. if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
  12212. return;
  12213. }
  12214. GGML_TENSOR_UNARY_OP_LOCALS
  12215. GGML_ASSERT(nb0 == sizeof(float));
  12216. const int ith = params->ith;
  12217. const int nth = params->nth;
  12218. const int64_t nr = ggml_nrows(src0);
  12219. enum ggml_sort_order order = (enum ggml_sort_order) ggml_get_op_params_i32(dst, 0);
  12220. for (int64_t i = ith; i < nr; i += nth) {
  12221. int32_t * dst_data = (int32_t *)((char *) dst->data + i*nb1);
  12222. const float * src_data = (float *)((char *) src0->data + i*nb01);
  12223. for (int64_t j = 0; j < ne0; j++) {
  12224. dst_data[j] = j;
  12225. }
  12226. // C doesn't have a functional sort, so we do a bubble sort instead
  12227. for (int64_t j = 0; j < ne0; j++) {
  12228. for (int64_t k = j + 1; k < ne0; k++) {
  12229. if ((order == GGML_SORT_ORDER_ASC && src_data[dst_data[j]] > src_data[dst_data[k]]) ||
  12230. (order == GGML_SORT_ORDER_DESC && src_data[dst_data[j]] < src_data[dst_data[k]])) {
  12231. int32_t tmp = dst_data[j];
  12232. dst_data[j] = dst_data[k];
  12233. dst_data[k] = tmp;
  12234. }
  12235. }
  12236. }
  12237. }
  12238. }
  12239. static void ggml_compute_forward_argsort(
  12240. const struct ggml_compute_params * params,
  12241. struct ggml_tensor * dst) {
  12242. const struct ggml_tensor * src0 = dst->src[0];
  12243. switch (src0->type) {
  12244. case GGML_TYPE_F32:
  12245. {
  12246. ggml_compute_forward_argsort_f32(params, dst);
  12247. } break;
  12248. default:
  12249. {
  12250. GGML_ASSERT(false);
  12251. } break;
  12252. }
  12253. }
  12254. // ggml_compute_forward_flash_attn
  12255. static void ggml_compute_forward_flash_attn_f32(
  12256. const struct ggml_compute_params * params,
  12257. const bool masked,
  12258. struct ggml_tensor * dst) {
  12259. const struct ggml_tensor * q = dst->src[0];
  12260. const struct ggml_tensor * k = dst->src[1];
  12261. const struct ggml_tensor * v = dst->src[2];
  12262. int64_t t0 = ggml_perf_time_us();
  12263. UNUSED(t0);
  12264. GGML_TENSOR_LOCALS(int64_t, neq, q, ne)
  12265. GGML_TENSOR_LOCALS(size_t, nbq, q, nb)
  12266. GGML_TENSOR_LOCALS(int64_t, nek, k, ne)
  12267. GGML_TENSOR_LOCALS(size_t, nbk, k, nb)
  12268. GGML_TENSOR_LOCALS(int64_t, nev, v, ne)
  12269. GGML_TENSOR_LOCALS(size_t, nbv, v, nb)
  12270. GGML_TENSOR_LOCALS(int64_t, ne, dst, ne)
  12271. GGML_TENSOR_LOCALS(size_t, nb, dst, nb)
  12272. const int ith = params->ith;
  12273. const int nth = params->nth;
  12274. const int64_t D = neq0;
  12275. const int64_t N = neq1;
  12276. const int64_t P = nek1 - N;
  12277. const int64_t M = P + N;
  12278. const int Mup = ggml_up(M, GGML_SOFT_MAX_UNROLL);
  12279. GGML_ASSERT(ne0 == D);
  12280. GGML_ASSERT(ne1 == N);
  12281. GGML_ASSERT(P >= 0);
  12282. GGML_ASSERT(nbq0 == sizeof(float));
  12283. GGML_ASSERT(nbk0 == sizeof(float));
  12284. GGML_ASSERT(nbv0 == sizeof(float));
  12285. GGML_ASSERT(neq0 == D);
  12286. GGML_ASSERT(nek0 == D);
  12287. GGML_ASSERT(nev1 == D);
  12288. GGML_ASSERT(neq1 == N);
  12289. GGML_ASSERT(nek1 == N + P);
  12290. GGML_ASSERT(nev1 == D);
  12291. // dst cannot be transposed or permuted
  12292. GGML_ASSERT(nb0 == sizeof(float));
  12293. GGML_ASSERT(nb0 <= nb1);
  12294. GGML_ASSERT(nb1 <= nb2);
  12295. GGML_ASSERT(nb2 <= nb3);
  12296. if (params->type == GGML_TASK_TYPE_INIT) {
  12297. return;
  12298. }
  12299. if (params->type == GGML_TASK_TYPE_FINALIZE) {
  12300. return;
  12301. }
  12302. // parallelize by q rows using ggml_vec_dot_f32
  12303. // total rows in q
  12304. const int nr = neq1*neq2*neq3;
  12305. // rows per thread
  12306. const int dr = (nr + nth - 1)/nth;
  12307. // row range for this thread
  12308. const int ir0 = dr*ith;
  12309. const int ir1 = MIN(ir0 + dr, nr);
  12310. const float scale = 1.0f/sqrtf(D);
  12311. //printf("P=%d N=%d D=%d ir0=%d ir1=%d scale = %f\n", P, N, D, ir0, ir1, scale);
  12312. for (int ir = ir0; ir < ir1; ++ir) {
  12313. // q indices
  12314. const int iq3 = ir/(neq2*neq1);
  12315. const int iq2 = (ir - iq3*neq2*neq1)/neq1;
  12316. const int iq1 = (ir - iq3*neq2*neq1 - iq2*neq1);
  12317. float * S = (float *) params->wdata + ith*(Mup + CACHE_LINE_SIZE_F32);
  12318. for (int i = M; i < Mup; ++i) {
  12319. S[i] = -INFINITY;
  12320. }
  12321. const int64_t masked_begin = masked ? (P + iq1 + 1) : M;
  12322. for (int64_t ic = 0; ic < masked_begin; ++ic) {
  12323. // k indices
  12324. const int ik3 = iq3;
  12325. const int ik2 = iq2 % nek2;
  12326. const int ik1 = ic;
  12327. // S indices
  12328. const int i1 = ik1;
  12329. ggml_vec_dot_f32(neq0,
  12330. S + i1, 0,
  12331. (float *) ((char *) k->data + (ik1*nbk1 + ik2*nbk2 + ik3*nbk3)), 0,
  12332. (float *) ((char *) q->data + (iq1*nbq1 + iq2*nbq2 + iq3*nbq3)), 0, 1);
  12333. }
  12334. // scale
  12335. ggml_vec_scale_f32(masked_begin, S, scale);
  12336. for (int64_t i = masked_begin; i < M; i++) {
  12337. S[i] = -INFINITY;
  12338. }
  12339. // softmax
  12340. // exclude known -INF S[..] values from max and loop
  12341. // dont forget to set their SW values to zero
  12342. {
  12343. float max = -INFINITY;
  12344. ggml_vec_max_f32(masked_begin, &max, S);
  12345. ggml_float sum = 0.0;
  12346. {
  12347. #ifdef GGML_SOFT_MAX_ACCELERATE
  12348. max = -max;
  12349. vDSP_vsadd(S, 1, &max, S, 1, Mup);
  12350. vvexpf(S, S, &Mup);
  12351. ggml_vec_sum_f32(Mup, &sum, S);
  12352. #else
  12353. uint16_t scvt[GGML_SOFT_MAX_UNROLL]; UNUSED(scvt);
  12354. ggml_float sump[GGML_SOFT_MAX_UNROLL] = { 0.0 };
  12355. for (int i = 0; i < Mup; i += GGML_SOFT_MAX_UNROLL) {
  12356. if (i >= masked_begin) {
  12357. break;
  12358. }
  12359. float * SS = S + i;
  12360. for (int j = 0; j < GGML_SOFT_MAX_UNROLL; ++j) {
  12361. if (i + j >= masked_begin) {
  12362. break;
  12363. } else if (SS[j] == -INFINITY) {
  12364. SS[j] = 0.0f;
  12365. } else {
  12366. #ifndef GGML_FLASH_ATTN_EXP_FP16
  12367. const float val = expf(SS[j] - max);
  12368. #else
  12369. ggml_fp16_t s = GGML_FP32_TO_FP16(SS[j] - max);
  12370. memcpy(&scvt[j], &s, sizeof(uint16_t));
  12371. const float val = GGML_FP16_TO_FP32(ggml_table_exp_f16[scvt[j]]);
  12372. #endif
  12373. sump[j] += (ggml_float)val;
  12374. SS[j] = val;
  12375. }
  12376. }
  12377. }
  12378. for (int i = 0; i < GGML_SOFT_MAX_UNROLL; i++) {
  12379. sum += sump[i];
  12380. }
  12381. #endif
  12382. }
  12383. assert(sum > 0.0);
  12384. sum = 1.0/sum;
  12385. ggml_vec_scale_f32(masked_begin, S, sum);
  12386. #ifndef NDEBUG
  12387. for (int i = 0; i < masked_begin; ++i) {
  12388. assert(!isnan(S[i]));
  12389. assert(!isinf(S[i]));
  12390. }
  12391. #endif
  12392. }
  12393. for (int64_t ic = 0; ic < nev1; ++ic) {
  12394. // dst indices
  12395. const int i1 = iq1;
  12396. const int i2 = iq2;
  12397. const int i3 = iq3;
  12398. // v indices
  12399. const int iv2 = iq2 % nev2;
  12400. const int iv3 = iq3;
  12401. ggml_vec_dot_f32(masked_begin,
  12402. (float *) ((char *) dst->data + (ic*nb0 + i1*nb1 + i2*nb2 + i3*nb3)), 0,
  12403. (float *) ((char *) v->data + ( ic*nbv1 + iv2*nbv2 + iv3*nbv3)), 0,
  12404. S, 0, 1);
  12405. }
  12406. }
  12407. }
  12408. static void ggml_compute_forward_flash_attn_f16(
  12409. const struct ggml_compute_params * params,
  12410. const bool masked,
  12411. struct ggml_tensor * dst) {
  12412. const struct ggml_tensor * q = dst->src[0];
  12413. const struct ggml_tensor * k = dst->src[1];
  12414. const struct ggml_tensor * v = dst->src[2];
  12415. int64_t t0 = ggml_perf_time_us();
  12416. UNUSED(t0);
  12417. GGML_TENSOR_LOCALS(int64_t, neq, q, ne)
  12418. GGML_TENSOR_LOCALS(size_t, nbq, q, nb)
  12419. GGML_TENSOR_LOCALS(int64_t, nek, k, ne)
  12420. GGML_TENSOR_LOCALS(size_t, nbk, k, nb)
  12421. GGML_TENSOR_LOCALS(int64_t, nev, v, ne)
  12422. GGML_TENSOR_LOCALS(size_t, nbv, v, nb)
  12423. GGML_TENSOR_LOCALS(int64_t, ne, dst, ne)
  12424. GGML_TENSOR_LOCALS(size_t, nb, dst, nb)
  12425. const int ith = params->ith;
  12426. const int nth = params->nth;
  12427. const int64_t D = neq0;
  12428. const int64_t N = neq1;
  12429. const int64_t P = nek1 - N;
  12430. const int64_t M = P + N;
  12431. const int Mup = ggml_up(M, GGML_SOFT_MAX_UNROLL);
  12432. GGML_ASSERT(ne0 == D);
  12433. GGML_ASSERT(ne1 == N);
  12434. GGML_ASSERT(P >= 0);
  12435. GGML_ASSERT(nbq0 == sizeof(ggml_fp16_t));
  12436. GGML_ASSERT(nbk0 == sizeof(ggml_fp16_t));
  12437. GGML_ASSERT(nbv0 == sizeof(ggml_fp16_t));
  12438. GGML_ASSERT(neq0 == D);
  12439. GGML_ASSERT(nek0 == D);
  12440. GGML_ASSERT(nev1 == D);
  12441. GGML_ASSERT(neq1 == N);
  12442. GGML_ASSERT(nek1 == N + P);
  12443. GGML_ASSERT(nev1 == D);
  12444. // dst cannot be transposed or permuted
  12445. GGML_ASSERT(nb0 == sizeof(float));
  12446. GGML_ASSERT(nb0 <= nb1);
  12447. GGML_ASSERT(nb1 <= nb2);
  12448. GGML_ASSERT(nb2 <= nb3);
  12449. if (params->type == GGML_TASK_TYPE_INIT) {
  12450. return;
  12451. }
  12452. if (params->type == GGML_TASK_TYPE_FINALIZE) {
  12453. return;
  12454. }
  12455. // parallelize by q rows using ggml_vec_dot_f32
  12456. // total rows in q
  12457. const int nr = neq1*neq2*neq3;
  12458. // rows per thread
  12459. const int dr = (nr + nth - 1)/nth;
  12460. // row range for this thread
  12461. const int ir0 = dr*ith;
  12462. const int ir1 = MIN(ir0 + dr, nr);
  12463. const float scale = 1.0f/sqrtf(D);
  12464. //printf("P=%d N=%d D=%d ir0=%d ir1=%d scale = %f\n", P, N, D, ir0, ir1, scale);
  12465. for (int ir = ir0; ir < ir1; ++ir) {
  12466. // q indices
  12467. const int iq3 = ir/(neq2*neq1);
  12468. const int iq2 = (ir - iq3*neq2*neq1)/neq1;
  12469. const int iq1 = (ir - iq3*neq2*neq1 - iq2*neq1);
  12470. float * S = (float *) params->wdata + ith*(2*Mup + CACHE_LINE_SIZE_F32);
  12471. for (int i = M; i < Mup; ++i) {
  12472. S[i] = -INFINITY;
  12473. }
  12474. if (GGML_VEC_DOT_UNROLL > 2 || nek1 % GGML_VEC_DOT_UNROLL != 0) {
  12475. for (int64_t ic = 0; ic < nek1; ++ic) {
  12476. // k indices
  12477. const int ik3 = iq3;
  12478. const int ik2 = iq2 % nek2;
  12479. const int ik1 = ic;
  12480. // S indices
  12481. const int i1 = ik1;
  12482. ggml_vec_dot_f16(neq0,
  12483. S + i1, 0,
  12484. (ggml_fp16_t *) ((char *) k->data + (ik1*nbk1 + ik2*nbk2 + ik3*nbk3)), 0,
  12485. (ggml_fp16_t *) ((char *) q->data + (iq1*nbq1 + iq2*nbq2 + iq3*nbq3)), 0, 1);
  12486. }
  12487. } else {
  12488. for (int64_t ic = 0; ic < nek1; ic += GGML_VEC_DOT_UNROLL) {
  12489. // k indices
  12490. const int ik3 = iq3;
  12491. const int ik2 = iq2 % nek2;
  12492. const int ik1 = ic;
  12493. // S indices
  12494. const int i1 = ik1;
  12495. ggml_vec_dot_f16_unroll(neq0, nbk1,
  12496. S + i1,
  12497. ((char *) k->data + (ik1*nbk1 + ik2*nbk2 + ik3*nbk3)),
  12498. (ggml_fp16_t *) ((char *) q->data + (iq1*nbq1 + iq2*nbq2 + iq3*nbq3)));
  12499. }
  12500. }
  12501. // scale
  12502. ggml_vec_scale_f32(nek1, S, scale);
  12503. if (masked) {
  12504. for (int64_t i = P; i < M; i++) {
  12505. if (i > P + iq1) {
  12506. S[i] = -INFINITY;
  12507. }
  12508. }
  12509. }
  12510. // softmax
  12511. // todo: exclude known -INF S[..] values from max and loop, assuming their results to be zero.
  12512. // dont forget to set their S values to zero
  12513. {
  12514. float max = -INFINITY;
  12515. ggml_vec_max_f32(M, &max, S);
  12516. ggml_float sum = 0.0;
  12517. {
  12518. #ifdef GGML_SOFT_MAX_ACCELERATE
  12519. max = -max;
  12520. vDSP_vsadd(S, 1, &max, S, 1, Mup);
  12521. vvexpf(S, S, &Mup);
  12522. ggml_vec_sum_f32(Mup, &sum, S);
  12523. #else
  12524. uint16_t scvt[GGML_SOFT_MAX_UNROLL];
  12525. ggml_float sump[GGML_SOFT_MAX_UNROLL] = { 0.0 };
  12526. for (int i = 0; i < Mup; i += GGML_SOFT_MAX_UNROLL) {
  12527. float * SS = S + i;
  12528. for (int j = 0; j < GGML_SOFT_MAX_UNROLL; ++j) {
  12529. if (SS[j] == -INFINITY) {
  12530. SS[j] = 0.0f;
  12531. } else {
  12532. ggml_fp16_t s = GGML_FP32_TO_FP16(SS[j] - max);
  12533. memcpy(&scvt[j], &s, sizeof(uint16_t));
  12534. const float val = GGML_FP16_TO_FP32(ggml_table_exp_f16[scvt[j]]);
  12535. sump[j] += (ggml_float)val;
  12536. SS[j] = val;
  12537. }
  12538. }
  12539. }
  12540. for (int i = 0; i < GGML_SOFT_MAX_UNROLL; i++) {
  12541. sum += sump[i];
  12542. }
  12543. #endif
  12544. }
  12545. assert(sum > 0.0);
  12546. sum = 1.0/sum;
  12547. ggml_vec_scale_f32(M, S, sum);
  12548. #ifndef NDEBUG
  12549. for (int i = 0; i < M; ++i) {
  12550. assert(!isnan(S[i]));
  12551. assert(!isinf(S[i]));
  12552. }
  12553. #endif
  12554. }
  12555. ggml_fp16_t * S16 = (ggml_fp16_t *) ((float *) params->wdata + ith*(2*Mup + CACHE_LINE_SIZE_F32) + Mup);
  12556. for (int64_t i = 0; i < M; i++) {
  12557. S16[i] = GGML_FP32_TO_FP16(S[i]);
  12558. }
  12559. // todo: exclude known zero S[..] values from dot (reducing nev0 and increasing begin of v and S16).
  12560. if (GGML_VEC_DOT_UNROLL == 1 || (nev1 % GGML_VEC_DOT_UNROLL != 0)) {
  12561. for (int64_t ic = 0; ic < nev1; ++ic) {
  12562. // dst indices
  12563. const int i1 = iq1;
  12564. const int i2 = iq2;
  12565. const int i3 = iq3;
  12566. // v indices
  12567. const int iv2 = iq2 % nev2;
  12568. const int iv3 = iq3;
  12569. ggml_vec_dot_f16(nev0,
  12570. (float *) ((char *) dst->data + (ic*nb0 + i1*nb1 + i2*nb2 + i3*nb3)), 0,
  12571. (ggml_fp16_t *) ((char *) v->data + ( ic*nbv1 + iv2*nbv2 + iv3*nbv3)), 0,
  12572. S16, 0, 1);
  12573. }
  12574. } else {
  12575. for (int64_t ic = 0; ic < nev1; ic += GGML_VEC_DOT_UNROLL) {
  12576. // dst indices
  12577. const int i1 = iq1;
  12578. const int i2 = iq2;
  12579. const int i3 = iq3;
  12580. // v indices
  12581. const int iv2 = iq2 % nev2;
  12582. const int iv3 = iq3;
  12583. ggml_vec_dot_f16_unroll(nev0, nbv1,
  12584. (float *) ((char *) dst->data + (ic*nb0 + i1*nb1 + i2*nb2 + i3*nb3)),
  12585. ((char *) v->data + ( ic*nbv1 + iv2*nbv2 + iv3*nbv3)),
  12586. S16);
  12587. }
  12588. }
  12589. }
  12590. }
  12591. static void ggml_compute_forward_flash_attn(
  12592. const struct ggml_compute_params * params,
  12593. const bool masked,
  12594. struct ggml_tensor * dst) {
  12595. const struct ggml_tensor * q = dst->src[0];
  12596. switch (q->type) {
  12597. case GGML_TYPE_F16:
  12598. {
  12599. ggml_compute_forward_flash_attn_f16(params, masked, dst);
  12600. } break;
  12601. case GGML_TYPE_F32:
  12602. {
  12603. ggml_compute_forward_flash_attn_f32(params, masked, dst);
  12604. } break;
  12605. default:
  12606. {
  12607. GGML_ASSERT(false);
  12608. } break;
  12609. }
  12610. }
  12611. // ggml_compute_forward_flash_attn_ext
  12612. static void ggml_compute_forward_flash_attn_ext_f16(
  12613. const struct ggml_compute_params * params,
  12614. const struct ggml_tensor * q,
  12615. const struct ggml_tensor * k,
  12616. const struct ggml_tensor * v,
  12617. const struct ggml_tensor * mask,
  12618. struct ggml_tensor * dst) {
  12619. int64_t t0 = ggml_perf_time_us();
  12620. UNUSED(t0);
  12621. GGML_TENSOR_LOCALS(int64_t, neq, q, ne)
  12622. GGML_TENSOR_LOCALS(size_t, nbq, q, nb)
  12623. GGML_TENSOR_LOCALS(int64_t, nek, k, ne)
  12624. GGML_TENSOR_LOCALS(size_t, nbk, k, nb)
  12625. GGML_TENSOR_LOCALS(int64_t, nev, v, ne)
  12626. GGML_TENSOR_LOCALS(size_t, nbv, v, nb)
  12627. GGML_TENSOR_LOCALS(int64_t, ne, dst, ne)
  12628. GGML_TENSOR_LOCALS(size_t, nb, dst, nb)
  12629. const int ith = params->ith;
  12630. const int nth = params->nth;
  12631. const int64_t D = neq0;
  12632. const int64_t N = neq1;
  12633. GGML_ASSERT(ne0 == D);
  12634. GGML_ASSERT(ne2 == N);
  12635. GGML_ASSERT(nbq0 == sizeof(float));
  12636. GGML_ASSERT(nbk0 == sizeof(ggml_fp16_t));
  12637. GGML_ASSERT(nbv0 == sizeof(ggml_fp16_t));
  12638. GGML_ASSERT(neq0 == D);
  12639. GGML_ASSERT(nek0 == D);
  12640. GGML_ASSERT(nev0 == D);
  12641. GGML_ASSERT(neq1 == N);
  12642. GGML_ASSERT(nev0 == D);
  12643. // dst cannot be transposed or permuted
  12644. GGML_ASSERT(nb0 == sizeof(float));
  12645. GGML_ASSERT(nb0 <= nb1);
  12646. GGML_ASSERT(nb1 <= nb2);
  12647. GGML_ASSERT(nb2 <= nb3);
  12648. // broadcast factors
  12649. const int64_t rk2 = neq2/nek2;
  12650. const int64_t rk3 = neq3/nek3;
  12651. const int64_t rv2 = neq2/nev2;
  12652. const int64_t rv3 = neq3/nev3;
  12653. if (params->type == GGML_TASK_TYPE_INIT) {
  12654. return;
  12655. }
  12656. if (params->type == GGML_TASK_TYPE_FINALIZE) {
  12657. return;
  12658. }
  12659. // parallelize by q rows using ggml_vec_dot_f32
  12660. // total rows in q
  12661. const int nr = neq1*neq2*neq3;
  12662. // rows per thread
  12663. const int dr = (nr + nth - 1)/nth;
  12664. // row range for this thread
  12665. const int ir0 = dr*ith;
  12666. const int ir1 = MIN(ir0 + dr, nr);
  12667. float scale = 1.0f;
  12668. float max_bias = 0.0f;
  12669. memcpy(&scale, (float *) dst->op_params + 0, sizeof(float));
  12670. memcpy(&max_bias, (float *) dst->op_params + 1, sizeof(float));
  12671. const uint32_t n_head = neq2;
  12672. const uint32_t n_head_log2 = 1u << (uint32_t) floor(log2(n_head));
  12673. const float m0 = powf(2.0f, -(max_bias ) / n_head_log2);
  12674. const float m1 = powf(2.0f, -(max_bias / 2.0f) / n_head_log2);
  12675. // loop over n_batch and n_head
  12676. for (int ir = ir0; ir < ir1; ++ir) {
  12677. // q indices
  12678. const int iq3 = ir/(neq2*neq1);
  12679. const int iq2 = (ir - iq3*neq2*neq1)/neq1;
  12680. const int iq1 = (ir - iq3*neq2*neq1 - iq2*neq1);
  12681. const uint32_t h = iq2; // head
  12682. const float slope = (max_bias > 0.0f) ? h < n_head_log2 ? powf(m0, h + 1) : powf(m1, 2*(h - n_head_log2) + 1) : 1.0f;
  12683. float S = 0.0f;
  12684. float M = -INFINITY;
  12685. float * V32 = (float *) params->wdata + ith*(2*D + CACHE_LINE_SIZE_F32);
  12686. ggml_fp16_t * Q16 = (ggml_fp16_t *) (V32); // reuse memory
  12687. ggml_fp16_t * V16 = (ggml_fp16_t *) (V32 + D);
  12688. memset(V16, 0, D*sizeof(ggml_fp16_t));
  12689. const ggml_fp16_t * mp = mask ? (ggml_fp16_t *)((char *) mask->data + iq1*mask->nb[1]) : NULL;
  12690. // k indices
  12691. const int ik3 = iq3 / rk3;
  12692. const int ik2 = iq2 / rk2;
  12693. // v indices
  12694. const int iv3 = iq3 / rv3;
  12695. const int iv2 = iq2 / rv2;
  12696. // online softmax / attention
  12697. // loop over n_kv and n_head_kv
  12698. // ref: https://arxiv.org/pdf/2112.05682.pdf
  12699. for (int64_t ic = 0; ic < nek1; ++ic) {
  12700. const float mv = mp ? slope*GGML_FP16_TO_FP32(mp[ic]) : 0.0f;
  12701. if (mv == -INFINITY) {
  12702. continue;
  12703. }
  12704. float s;
  12705. // convert Q to F16 in V32
  12706. {
  12707. const float * pq = (const float *) ((char *) q->data + (iq1*nbq1 + iq2*nbq2 + iq3*nbq3));
  12708. for (int64_t d = 0; d < D; ++d) {
  12709. Q16[d] = GGML_FP32_TO_FP16(pq[d]);
  12710. }
  12711. }
  12712. ggml_vec_dot_f16(D,
  12713. &s, 0,
  12714. (ggml_fp16_t *) ((char *) k->data + ( ic*nbk1 + ik2*nbk2 + ik3*nbk3)), 0,
  12715. Q16, 0, 1);
  12716. s = s*scale + mv;
  12717. const float Mold = M;
  12718. float ms = 1.0f;
  12719. float vs = 1.0f;
  12720. if (s > M) {
  12721. M = s;
  12722. ms = expf(Mold - M);
  12723. // V = V*expf(Mold - M)
  12724. ggml_vec_scale_f16(D, V16, ms);
  12725. } else {
  12726. vs = expf(s - M);
  12727. }
  12728. const ggml_fp16_t * v16 = (const ggml_fp16_t *) ((char *) v->data + (ic*nbv1 + iv2*nbv2 + iv3*nbv3));
  12729. // V += v*expf(s - M)
  12730. ggml_vec_mad_f16(D, V16, v16, vs);
  12731. S = S*ms + vs;
  12732. }
  12733. // V /= S
  12734. for (int64_t d = 0; d < D; ++d) {
  12735. V32[d] = GGML_FP16_TO_FP32(V16[d])/S;
  12736. }
  12737. // dst indices
  12738. const int i1 = iq1;
  12739. const int i2 = iq2;
  12740. const int i3 = iq3;
  12741. // original
  12742. //memcpy((char *) dst->data + (i1*nb1 + i2*nb2 + i3*nb3), V, nev0*sizeof(float));
  12743. // permute(0, 2, 1, 3)
  12744. memcpy((char *) dst->data + (i3*ne2*ne1 + i2 + i1*ne1)*nb1, V32, nb1);
  12745. }
  12746. }
  12747. static void ggml_compute_forward_flash_attn_ext(
  12748. const struct ggml_compute_params * params,
  12749. const struct ggml_tensor * q,
  12750. const struct ggml_tensor * k,
  12751. const struct ggml_tensor * v,
  12752. const struct ggml_tensor * mask,
  12753. struct ggml_tensor * dst) {
  12754. switch (dst->op_params[2]) {
  12755. case GGML_PREC_DEFAULT:
  12756. case GGML_PREC_F32:
  12757. {
  12758. // uses F32 accumulators
  12759. ggml_compute_forward_flash_attn_ext_f16(params, q, k, v, mask, dst);
  12760. } break;
  12761. default:
  12762. {
  12763. GGML_ASSERT(false);
  12764. } break;
  12765. }
  12766. }
  12767. // ggml_compute_forward_flash_ff
  12768. static void ggml_compute_forward_flash_ff_f16(
  12769. const struct ggml_compute_params * params,
  12770. struct ggml_tensor * dst) {
  12771. const struct ggml_tensor * a = dst->src[0]; // F16
  12772. const struct ggml_tensor * b0 = dst->src[1]; // F16 fc_w
  12773. const struct ggml_tensor * b1 = dst->src[2]; // F32 fc_b
  12774. const struct ggml_tensor * c0 = dst->src[3]; // F16 proj_w
  12775. const struct ggml_tensor * c1 = dst->src[4]; // F32 proj_b
  12776. int64_t t0 = ggml_perf_time_us();
  12777. UNUSED(t0);
  12778. GGML_TENSOR_LOCALS(int64_t, nea, a, ne)
  12779. GGML_TENSOR_LOCALS(size_t, nba, a, nb)
  12780. GGML_TENSOR_LOCALS(int64_t, neb0, b0, ne)
  12781. GGML_TENSOR_LOCALS(size_t, nbb0, b0, nb)
  12782. GGML_TENSOR_LOCALS(int64_t, neb1, b1, ne)
  12783. GGML_TENSOR_LOCALS(size_t, nbb1, b1, nb)
  12784. GGML_TENSOR_LOCALS(int64_t, nec0, c0, ne)
  12785. GGML_TENSOR_LOCALS(size_t, nbc0, c0, nb)
  12786. GGML_TENSOR_LOCALS(int64_t, nec1, c1, ne)
  12787. GGML_TENSOR_LOCALS(size_t, nbc1, c1, nb)
  12788. GGML_TENSOR_LOCALS(int64_t, ne, dst, ne)
  12789. GGML_TENSOR_LOCALS(size_t, nb, dst, nb)
  12790. const int ith = params->ith;
  12791. const int nth = params->nth;
  12792. const int64_t D = nea0;
  12793. //const int64_t N = nea1;
  12794. const int64_t M = neb01;
  12795. GGML_ASSERT(ne0 == nea0);
  12796. GGML_ASSERT(ne1 == nea1);
  12797. GGML_ASSERT(ne2 == nea2);
  12798. GGML_ASSERT(nba0 == sizeof(ggml_fp16_t));
  12799. GGML_ASSERT(nbb00 == sizeof(ggml_fp16_t));
  12800. GGML_ASSERT(nbb10 == sizeof(float));
  12801. GGML_ASSERT(nbc00 == sizeof(ggml_fp16_t));
  12802. GGML_ASSERT(nbc10 == sizeof(float));
  12803. GGML_ASSERT(neb00 == D);
  12804. GGML_ASSERT(neb01 == M);
  12805. GGML_ASSERT(neb10 == M);
  12806. GGML_ASSERT(neb11 == 1);
  12807. GGML_ASSERT(nec00 == M);
  12808. GGML_ASSERT(nec01 == D);
  12809. GGML_ASSERT(nec10 == D);
  12810. GGML_ASSERT(nec11 == 1);
  12811. // dst cannot be transposed or permuted
  12812. GGML_ASSERT(nb0 == sizeof(float));
  12813. GGML_ASSERT(nb0 <= nb1);
  12814. GGML_ASSERT(nb1 <= nb2);
  12815. GGML_ASSERT(nb2 <= nb3);
  12816. if (params->type == GGML_TASK_TYPE_INIT) {
  12817. return;
  12818. }
  12819. if (params->type == GGML_TASK_TYPE_FINALIZE) {
  12820. return;
  12821. }
  12822. // parallelize by a rows using ggml_vec_dot_f32
  12823. // total rows in a
  12824. const int nr = nea1*nea2*nea3;
  12825. // rows per thread
  12826. const int dr = (nr + nth - 1)/nth;
  12827. // row range for this thread
  12828. const int ir0 = dr*ith;
  12829. const int ir1 = MIN(ir0 + dr, nr);
  12830. for (int ir = ir0; ir < ir1; ++ir) {
  12831. // a indices
  12832. const int ia3 = ir/(nea2*nea1);
  12833. const int ia2 = (ir - ia3*nea2*nea1)/nea1;
  12834. const int ia1 = (ir - ia3*nea2*nea1 - ia2*nea1);
  12835. float * S = (float *) params->wdata + ith*(2*M + CACHE_LINE_SIZE_F32);
  12836. for (int64_t ic = 0; ic < neb01; ++ic) {
  12837. // b0 indices
  12838. const int ib03 = ia3;
  12839. const int ib02 = ia2;
  12840. const int ib01 = ic;
  12841. // S indices
  12842. const int i1 = ib01;
  12843. ggml_vec_dot_f16(nea0,
  12844. S + i1, 0,
  12845. (ggml_fp16_t *) ((char *) b0->data + (ib01*nbb01 + ib02*nbb02 + ib03*nbb03)), 0,
  12846. (ggml_fp16_t *) ((char *) a->data + ( ia1*nba1 + ia2*nba2 + ia3*nba3)), 0, 1);
  12847. }
  12848. ggml_vec_add_f32(neb01, S, S, (float *) b1->data);
  12849. //ggml_vec_gelu_f32(neb01, S, S);
  12850. ggml_fp16_t * S16 = (ggml_fp16_t *) ((float *) params->wdata + ith*(2*M + CACHE_LINE_SIZE_F32) + M);
  12851. for (int64_t i = 0; i < M; i++) {
  12852. S16[i] = GGML_FP32_TO_FP16(S[i]);
  12853. }
  12854. ggml_vec_gelu_f16(neb01, S16, S16);
  12855. {
  12856. // dst indices
  12857. const int i1 = ia1;
  12858. const int i2 = ia2;
  12859. const int i3 = ia3;
  12860. for (int64_t ic = 0; ic < nec01; ++ic) {
  12861. ggml_vec_dot_f16(neb01,
  12862. (float *) ((char *) dst->data + (ic*nb0 + i1*nb1 + i2*nb2 + i3*nb3)), 0,
  12863. (ggml_fp16_t *) ((char *) c0->data + ( ic*nbc01 + i2*nbc02 + i3*nbc03)), 0,
  12864. S16, 0, 1);
  12865. }
  12866. ggml_vec_add_f32(nec01,
  12867. (float *) ((char *) dst->data + (i1*nb1 + i2*nb2 + i3*nb3)),
  12868. (float *) ((char *) dst->data + (i1*nb1 + i2*nb2 + i3*nb3)),
  12869. (float *) c1->data);
  12870. }
  12871. }
  12872. }
  12873. static void ggml_compute_forward_flash_ff(
  12874. const struct ggml_compute_params * params,
  12875. struct ggml_tensor * dst) {
  12876. const struct ggml_tensor * b0 = dst->src[1];
  12877. switch (b0->type) {
  12878. case GGML_TYPE_F16:
  12879. {
  12880. ggml_compute_forward_flash_ff_f16(params, dst);
  12881. } break;
  12882. case GGML_TYPE_F32:
  12883. {
  12884. GGML_ASSERT(false); // TODO
  12885. } break;
  12886. default:
  12887. {
  12888. GGML_ASSERT(false);
  12889. } break;
  12890. }
  12891. }
  12892. // ggml_compute_forward_flash_attn_back
  12893. static void ggml_compute_forward_flash_attn_back_f32(
  12894. const struct ggml_compute_params * params,
  12895. const bool masked,
  12896. struct ggml_tensor * dst) {
  12897. const struct ggml_tensor * q = dst->src[0];
  12898. const struct ggml_tensor * k = dst->src[1];
  12899. const struct ggml_tensor * v = dst->src[2];
  12900. const struct ggml_tensor * d = dst->src[3];
  12901. int64_t t0 = ggml_perf_time_us();
  12902. UNUSED(t0);
  12903. GGML_TENSOR_LOCALS(int64_t, neq, q, ne)
  12904. GGML_TENSOR_LOCALS(size_t, nbq, q, nb)
  12905. GGML_TENSOR_LOCALS(int64_t, nek, k, ne)
  12906. GGML_TENSOR_LOCALS(size_t, nbk, k, nb)
  12907. GGML_TENSOR_LOCALS(int64_t, nev, v, ne)
  12908. GGML_TENSOR_LOCALS(size_t, nbv, v, nb)
  12909. GGML_TENSOR_LOCALS(int64_t, ned, d, ne)
  12910. GGML_TENSOR_LOCALS(size_t, nbd, d, nb)
  12911. GGML_TENSOR_LOCALS(int64_t, ne, dst, ne)
  12912. GGML_TENSOR_LOCALS(size_t, nb, dst, nb)
  12913. const int ith = params->ith;
  12914. const int nth = params->nth;
  12915. const int64_t D = neq0;
  12916. const int64_t N = neq1;
  12917. const int64_t P = nek1 - N;
  12918. const int64_t M = P + N;
  12919. const int Mup = ggml_up(M, GGML_SOFT_MAX_UNROLL);
  12920. const int mxDM = MAX(D, Mup);
  12921. // GGML_ASSERT(ne0 == D);
  12922. // GGML_ASSERT(ne1 == N);
  12923. GGML_ASSERT(P >= 0);
  12924. GGML_ASSERT(nbq0 == sizeof(float));
  12925. GGML_ASSERT(nbk0 == sizeof(float));
  12926. GGML_ASSERT(nbv0 == sizeof(float));
  12927. GGML_ASSERT(neq0 == D);
  12928. GGML_ASSERT(nek0 == D);
  12929. GGML_ASSERT(nev1 == D);
  12930. GGML_ASSERT(ned0 == D);
  12931. GGML_ASSERT(neq1 == N);
  12932. GGML_ASSERT(nek1 == N + P);
  12933. GGML_ASSERT(nev1 == D);
  12934. GGML_ASSERT(ned1 == N);
  12935. // dst cannot be transposed or permuted
  12936. GGML_ASSERT(nb0 == sizeof(float));
  12937. GGML_ASSERT(nb0 <= nb1);
  12938. GGML_ASSERT(nb1 <= nb2);
  12939. GGML_ASSERT(nb2 <= nb3);
  12940. if (params->type == GGML_TASK_TYPE_INIT) {
  12941. if (ith == 0) {
  12942. memset(dst->data, 0, nb0*ne0*ne1*ne2*ne3);
  12943. }
  12944. return;
  12945. }
  12946. if (params->type == GGML_TASK_TYPE_FINALIZE) {
  12947. return;
  12948. }
  12949. const int64_t elem_q = ggml_nelements(q);
  12950. const int64_t elem_k = ggml_nelements(k);
  12951. enum ggml_type result_type = dst->type;
  12952. GGML_ASSERT(ggml_blck_size(result_type) == 1);
  12953. const size_t tsize = ggml_type_size(result_type);
  12954. const size_t offs_q = 0;
  12955. const size_t offs_k = offs_q + GGML_PAD(elem_q * tsize, GGML_MEM_ALIGN);
  12956. const size_t offs_v = offs_k + GGML_PAD(elem_k * tsize, GGML_MEM_ALIGN);
  12957. void * grad_q = (char *) dst->data;
  12958. void * grad_k = (char *) dst->data + offs_k;
  12959. void * grad_v = (char *) dst->data + offs_v;
  12960. const size_t nbgq1 = nb0*neq0;
  12961. const size_t nbgq2 = nb0*neq0*neq1;
  12962. const size_t nbgq3 = nb0*neq0*neq1*neq2;
  12963. const size_t nbgk1 = nb0*nek0;
  12964. const size_t nbgk2 = nb0*nek0*nek1;
  12965. const size_t nbgk3 = nb0*nek0*nek1*neq2;
  12966. const size_t nbgv1 = nb0*nev0;
  12967. const size_t nbgv2 = nb0*nev0*nev1;
  12968. const size_t nbgv3 = nb0*nev0*nev1*neq2;
  12969. // parallelize by k rows using ggml_vec_dot_f32
  12970. // total rows in k
  12971. const int nr = nek2*nek3;
  12972. // rows per thread
  12973. const int dr = (nr + nth - 1)/nth;
  12974. // row range for this thread
  12975. const int ir0 = dr*ith;
  12976. const int ir1 = MIN(ir0 + dr, nr);
  12977. const float scale = 1.0f/sqrtf(D);
  12978. //printf("P=%d N=%d D=%d ir0=%d ir1=%d scale = %f\n", P, N, D, ir0, ir1, scale);
  12979. // how often k2 (and v2) is repeated in q2
  12980. int nrep = neq2/nek2;
  12981. for (int ir = ir0; ir < ir1; ++ir) {
  12982. // q indices
  12983. const int ik3 = ir/(nek2);
  12984. const int ik2 = ir - ik3*nek2;
  12985. const int iq3 = ik3;
  12986. const int id3 = ik3;
  12987. const int iv3 = ik3;
  12988. const int iv2 = ik2;
  12989. for (int irep = 0; irep < nrep; ++irep) {
  12990. const int iq2 = ik2 + irep*nek2;
  12991. const int id2 = iq2;
  12992. // (ik2 + irep*nek2) % nek2 == ik2
  12993. for (int iq1 = 0; iq1 < neq1; ++iq1) {
  12994. const int id1 = iq1;
  12995. // not sure about CACHE_LINE_SIZE_F32..
  12996. // - maybe it must not be multiplied by 2 and excluded from .. in SM 1*(..) offset?
  12997. float * S = (float *) params->wdata + ith*2*(mxDM + CACHE_LINE_SIZE_F32) + 0*(mxDM+CACHE_LINE_SIZE_F32);
  12998. float * SM = (float *) params->wdata + ith*2*(mxDM + CACHE_LINE_SIZE_F32) + 1*(mxDM+CACHE_LINE_SIZE_F32);
  12999. for (int i = M; i < Mup; ++i) {
  13000. S[i] = -INFINITY;
  13001. }
  13002. const int64_t masked_begin = masked ? (P + iq1 + 1) : M;
  13003. for (int64_t ic = 0; ic < masked_begin; ++ic) {
  13004. // k indices
  13005. const int ik1 = ic;
  13006. // S indices
  13007. const int i1 = ik1;
  13008. ggml_vec_dot_f32(neq0,
  13009. S + i1, 0,
  13010. (float *) ((char *) k->data + (ik1*nbk1 + ik2*nbk2 + ik3*nbk3)), 0,
  13011. (float *) ((char *) q->data + (iq1*nbq1 + iq2*nbq2 + iq3*nbq3)), 0, 1);
  13012. }
  13013. // scale
  13014. ggml_vec_scale_f32(masked_begin, S, scale);
  13015. for (int64_t i = masked_begin; i < M; i++) {
  13016. S[i] = -INFINITY;
  13017. }
  13018. // softmax
  13019. // exclude known -INF S[..] values from max and loop
  13020. // dont forget to set their SM values to zero
  13021. {
  13022. float max = -INFINITY;
  13023. ggml_vec_max_f32(masked_begin, &max, S);
  13024. ggml_float sum = 0.0;
  13025. {
  13026. #ifdef GGML_SOFT_MAX_ACCELERATE
  13027. max = -max;
  13028. vDSP_vsadd(SM, 1, &max, SM, 1, Mup);
  13029. vvexpf(SM, SM, &Mup);
  13030. ggml_vec_sum_f32(Mup, &sum, SM);
  13031. #else
  13032. uint16_t scvt[GGML_SOFT_MAX_UNROLL]; UNUSED(scvt);
  13033. ggml_float sump[GGML_SOFT_MAX_UNROLL] = { 0.0 };
  13034. for (int i = 0; i < Mup; i += GGML_SOFT_MAX_UNROLL) {
  13035. if (i >= masked_begin) {
  13036. break;
  13037. }
  13038. float * SR = S + i;
  13039. float * SW = SM + i;
  13040. for (int j = 0; j < GGML_SOFT_MAX_UNROLL; ++j) {
  13041. if (i + j >= masked_begin) {
  13042. break;
  13043. } else if (SR[j] == -INFINITY) {
  13044. SW[j] = 0.0f;
  13045. } else {
  13046. #ifndef GGML_FLASH_ATTN_EXP_FP16
  13047. const float val = expf(SR[j] - max);
  13048. #else
  13049. ggml_fp16_t s = GGML_FP32_TO_FP16(SR[j] - max);
  13050. memcpy(&scvt[j], &s, sizeof(uint16_t));
  13051. const float val = GGML_FP16_TO_FP32(ggml_table_exp_f16[scvt[j]]);
  13052. #endif
  13053. sump[j] += (ggml_float)val;
  13054. SW[j] = val;
  13055. }
  13056. }
  13057. }
  13058. for (int i = 0; i < GGML_SOFT_MAX_UNROLL; i++) {
  13059. sum += sump[i];
  13060. }
  13061. #endif
  13062. }
  13063. assert(sum > 0.0);
  13064. sum = 1.0/sum;
  13065. ggml_vec_scale_f32(masked_begin, SM, sum);
  13066. }
  13067. // step-by-step explanation
  13068. {
  13069. // forward-process shape grads from backward process
  13070. // parallel_for ik2,ik3:
  13071. // for irep:
  13072. // iq2 = ik2 + irep*nek2
  13073. // k[:D,:M,:,:] [D,M,:,:] grad[k][:D,:M,ik2,ik3] += grad[kcur]
  13074. // q[:D,:N,:,:] [D,N,:,:] grad[q][:D,iq1,iq2,iq3] += grad[qcur]
  13075. // v[:M,:D,:,:] [M,D,:,:] grad[v][:M,:D,iv2,iv3] += grad[vcur]
  13076. // for iq1:
  13077. // kcur = k[:D,:M,ik2,ik3] [D,M,1,1] grad[kcur] = grad[S1].T @ qcur
  13078. // qcur = q[:D,iq1,iq2,iq3] [D,1,1,1] grad[qcur] = grad[S1] @ kcur
  13079. // vcur = v[:M,:D,iv2,iv3] [M,D,1,1] grad[vcur] = grad[S5].T @ S4
  13080. // S0 = -Inf [D,1,1,1]
  13081. // ~S1[i] = dot(kcur[:D,i], qcur)
  13082. // S1 = qcur @ kcur.T [M,1,1,1] grad[S1] = grad[S2] * scale
  13083. // S2 = S1 * scale [M,1,1,1] grad[S2] = diag_mask_zero(grad[S3], P)
  13084. // S3 = diag_mask_inf(S2, P) [M,1,1,1] grad[S3] = S4 * (grad[S4] - dot(S4, grad[S4]))
  13085. // S4 = softmax(S3) [M,1,1,1] grad[S4] = grad[S5] @ vcur
  13086. // ~S5[i] = dot(vcur[:,i], S4)
  13087. // S5 = S4 @ vcur.T [D,1,1,1] grad[S5] = d[:D,id1,id2,id3]
  13088. // ~dst[i,iq1,iq2,iq3] = S5[i] ^
  13089. // dst[:D,iq1,iq2,iq3] = S5 | grad[dst[:D,iq1,iq2,iq3]] = d[:D,id1,id2,id3]
  13090. // dst backward-/ grad[dst] = d
  13091. //
  13092. // output gradients with their dependencies:
  13093. //
  13094. // grad[kcur] = grad[S1].T @ qcur
  13095. // grad[S1] = diag_mask_zero(grad[S3], P) * scale
  13096. // grad[S3] = S4 * (grad[S4] - dot(S4, grad[S4]))
  13097. // grad[S4] = grad[S5] @ vcur
  13098. // grad[S4] = d[:D,id1,id2,id3] @ vcur
  13099. // grad[qcur] = grad[S1] @ kcur
  13100. // grad[vcur] = grad[S5].T @ S4
  13101. // grad[vcur] = d[:D,id1,id2,id3].T @ S4
  13102. //
  13103. // in post-order:
  13104. //
  13105. // S1 = qcur @ kcur.T
  13106. // S2 = S1 * scale
  13107. // S3 = diag_mask_inf(S2, P)
  13108. // S4 = softmax(S3)
  13109. // grad[S4] = d[:D,id1,id2,id3] @ vcur
  13110. // grad[S3] = S4 * (grad[S4] - dot(S4, grad[S4]))
  13111. // grad[S1] = diag_mask_zero(grad[S3], P) * scale
  13112. // grad[qcur] = grad[S1] @ kcur
  13113. // grad[kcur] = grad[S1].T @ qcur
  13114. // grad[vcur] = d[:D,id1,id2,id3].T @ S4
  13115. //
  13116. // using less variables (SM=S4):
  13117. //
  13118. // S = diag_mask_inf(qcur @ kcur.T * scale, P)
  13119. // SM = softmax(S)
  13120. // S = d[:D,iq1,iq2,iq3] @ vcur
  13121. // dot_SM_gradSM = dot(SM, S)
  13122. // S = SM * (S - dot(SM, S))
  13123. // S = diag_mask_zero(S, P) * scale
  13124. //
  13125. // grad[q][:D,iq1,iq2,iq3] += S @ kcur
  13126. // grad[k][:D,:M,ik2,ik3] += S.T @ qcur
  13127. // grad[v][:M,:D,iv2,iv3] += d[:D,id1,id2,id3].T @ SM
  13128. }
  13129. // S = gradSM = d[:D,id1,id2,id3] @ vcur[:,:,iv2,iv3]
  13130. // S = d[:D,id1,id2,id3] @ vcur[:,:,iv2,iv3]
  13131. // for ic:
  13132. // S[:M] += vcur[:M,ic,iv2,iv3] * d[ic,id1,id2,id3]
  13133. // exclude known future zero S[..] values from operation
  13134. ggml_vec_set_f32(masked_begin, S, 0);
  13135. for (int64_t ic = 0; ic < D; ++ic) {
  13136. ggml_vec_mad_f32(masked_begin,
  13137. S,
  13138. (float *) ((char *) v->data + ( ic*nbv1 + iv2*nbv2 + iv3*nbv3)),
  13139. *(float *) ((char *) d->data + (ic*nbd0 + id1*nbd1 + id2*nbd2 + id3*nbd3)));
  13140. }
  13141. // S = SM * (S - dot(SM, S))
  13142. float dot_SM_gradSM = 0;
  13143. ggml_vec_dot_f32 (masked_begin, &dot_SM_gradSM, 0, SM, 0, S, 0, 1);
  13144. ggml_vec_acc1_f32(M, S, -dot_SM_gradSM);
  13145. ggml_vec_mul_f32 (masked_begin, S, S, SM);
  13146. // S = diag_mask_zero(S, P) * scale
  13147. // already done by above ggml_vec_set_f32
  13148. // exclude known zero S[..] values from operation
  13149. ggml_vec_scale_f32(masked_begin, S, scale);
  13150. // S shape [M,1]
  13151. // SM shape [M,1]
  13152. // kcur shape [D,M]
  13153. // qcur shape [D,1]
  13154. // vcur shape [M,D]
  13155. // grad[q][:D,iq1,iq2,iq3] += S @ kcur
  13156. // grad[q][:D,iq1,iq2,iq3] += shape[M,1] @ shape[D,M]
  13157. // for ic:
  13158. // grad[q][:D,iq1,iq2,iq3] += S[ic] * kcur[:D,ic,ik2,ik3]
  13159. // exclude known zero S[..] values from loop
  13160. for (int64_t ic = 0; ic < masked_begin; ++ic) {
  13161. ggml_vec_mad_f32(D,
  13162. (float *) ((char *) grad_q + (iq1*nbgq1 + iq2*nbgq2 + iq3*nbgq3)),
  13163. (float *) ((char *) k->data + (ic*nbk1 + ik2*nbk2 + ik3*nbk3)),
  13164. S[ic]);
  13165. }
  13166. // grad[k][:D,:M,iq2,iq3] += S.T @ qcur
  13167. // for ic:
  13168. // grad[k][:D,ic,iq2,iq3] += S.T[0,ic] * qcur[:D,0]
  13169. // grad[k][:D,ic,iq2,iq3] += S[ic] * qcur[:D,0]
  13170. // exclude known zero S[..] values from loop
  13171. for (int64_t ic = 0; ic < masked_begin; ++ic) {
  13172. ggml_vec_mad_f32(D,
  13173. (float *) ((char *) grad_k + (ic*nbgk1 + ik2*nbgk2 + ik3*nbgk3)),
  13174. (float *) ((char *) q->data + (iq1*nbq1 + iq2*nbq2 + iq3*nbq3)),
  13175. S[ic]);
  13176. }
  13177. // grad[v][:M,:D,iv2,iv3] += d[:D,id1,id2,id3].T @ SM
  13178. // for ic:
  13179. // grad[v][:M,ic,iv2,iv3] += d[:D,id1,id2,id3].T[0,ic] * SM[:M]
  13180. // grad[v][:M,ic,iv2,iv3] += d[ic,id1,id2,id3] * SM[:M]
  13181. // exclude known zero SM[..] values from mad
  13182. for (int64_t ic = 0; ic < D; ++ic) {
  13183. ggml_vec_mad_f32(masked_begin,
  13184. (float *) ((char *) grad_v + ( ic*nbgv1 + iv2*nbgv2 + iv3*nbgv3)),
  13185. SM,
  13186. *(float *) ((char *) d->data + (ic*nbd0 + id1*nbd1 + id2*nbd2 + id3*nbd3)));
  13187. }
  13188. }
  13189. }
  13190. }
  13191. }
  13192. static void ggml_compute_forward_flash_attn_back(
  13193. const struct ggml_compute_params * params,
  13194. const bool masked,
  13195. struct ggml_tensor * dst) {
  13196. const struct ggml_tensor * q = dst->src[0];
  13197. switch (q->type) {
  13198. case GGML_TYPE_F32:
  13199. {
  13200. ggml_compute_forward_flash_attn_back_f32(params, masked, dst);
  13201. } break;
  13202. default:
  13203. {
  13204. GGML_ASSERT(false);
  13205. } break;
  13206. }
  13207. }
  13208. // ggml_compute_forward_ssm_conv
  13209. static void ggml_compute_forward_ssm_conv_f32(
  13210. const struct ggml_compute_params * params,
  13211. struct ggml_tensor * dst) {
  13212. if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
  13213. return;
  13214. }
  13215. const struct ggml_tensor * src0 = dst->src[0]; // conv_state
  13216. const struct ggml_tensor * src1 = dst->src[1]; // x
  13217. const struct ggml_tensor * src2 = dst->src[2]; // conv1d.weight
  13218. const struct ggml_tensor * src3 = dst->src[3]; // state_seq
  13219. const int ith = params->ith;
  13220. const int nth = params->nth;
  13221. const int nc = src2->ne[0]; // d_conv
  13222. const int nr = src0->ne[1]; // d_inner
  13223. const int n_t = src1->ne[1]; // n_tokens
  13224. const int n_kv = src0->ne[2]; // max number of sequences in the batch
  13225. GGML_ASSERT((nr*n_t) + (nc*nr*n_kv) == ggml_nelements(dst));
  13226. GGML_ASSERT(src0->nb[0] == sizeof(float));
  13227. GGML_ASSERT(src1->nb[0] == sizeof(float));
  13228. GGML_ASSERT(src2->nb[0] == sizeof(float));
  13229. GGML_ASSERT(src3->nb[0] == sizeof(int32_t));
  13230. GGML_ASSERT(src0->nb[1] == src0->ne[0]*sizeof(float));
  13231. // for use with the destination state offset between sequences
  13232. GGML_ASSERT(src2->nb[2] == src2->ne[1]*src2->ne[0]*sizeof(float));
  13233. // rows per thread
  13234. const int dr = (nr + nth - 1)/nth;
  13235. // row range for this thread
  13236. const int ir0 = dr*ith;
  13237. const int ir1 = MIN(ir0 + dr, nr);
  13238. const int ir = ir1 - ir0;
  13239. if (n_kv > 1) {
  13240. // multiple sequences means it's hard to know when it's the first time a state is read,
  13241. // so copy them all over to the destination, just to be sure.
  13242. for (int i3 = 0; i3 < n_kv; ++i3) {
  13243. float * s0 = (float *) ((char *) src0->data + ir0*(src0->nb[1]) + i3*(src0->nb[2]));
  13244. float * s = (float *) ((char *) dst->data + ir0*(src2->nb[1]) + i3*(src2->nb[2]) + nr*n_t*sizeof(float));
  13245. // can't use memcpy because of d_conv vs d_conv - 1
  13246. for (int i1 = 0; i1 < ir; ++i1) {
  13247. for (int i0 = 0; i0 < nc - 1; ++i0) {
  13248. // copy s0 to last (d_conv - 1) columns of s
  13249. s[1 + i0 + i1*nc] = s0[i0 + i1*(nc - 1)];
  13250. }
  13251. }
  13252. }
  13253. }
  13254. for (int i2 = 0; i2 < n_t; ++i2) {
  13255. int32_t * sq = (int32_t *) ((char *) src3->data + i2*(src3->nb[1])); // {n_kv, n_tokens}
  13256. float * x = (float *) ((char *) dst->data + ir0*sizeof(float) + i2*(nr*sizeof(float))); // {d_inner, n_tokens}
  13257. float * s = (float *) ((char *) dst->data + ir0*(src2->nb[1]) + sq[0]*(src2->nb[2]) + nr*n_t*sizeof(float)); // {d_conv, d_inner, n_kv}
  13258. float * s0; // {d_conv - 1, d_inner, n_kv}
  13259. float * x0 = (float *) ((char *) src1->data + ir0*(src1->nb[0]) + i2*(src1->nb[1])); // {d_inner, n_tokens}
  13260. float * c = (float *) ((char *) src2->data + ir0*(src2->nb[1])); // {d_conv, d_inner}
  13261. int ne0s0;
  13262. GGML_ASSERT(0 <= sq[0] && sq[0] < n_kv);
  13263. // avoid needing to copy the state for the first token
  13264. if (i2 == 0) {
  13265. s0 = (float *) ((char *) src0->data + ir0*(src0->nb[1]) + sq[0]*(src0->nb[2])); // {d_conv - 1, d_inner, n_kv}
  13266. ne0s0 = src0->ne[0];
  13267. } else {
  13268. // the source is the last (d_conv - 1) columns of the destination
  13269. s0 = s + 1;
  13270. ne0s0 = nc;
  13271. }
  13272. // d_inner
  13273. for (int i1 = 0; i1 < ir; ++i1) {
  13274. // shift state left
  13275. for (int i0 = 0; i0 < nc - 1; ++i0) {
  13276. s[i0 + i1*nc] = s0[i0 + i1*ne0s0];
  13277. }
  13278. // insert x on the last column
  13279. s[(nc - 1) + i1*nc] = x0[i1];
  13280. }
  13281. // handle copies when there are multiple output states
  13282. for (int i3 = 1; i3 < n_kv; ++i3) {
  13283. int32_t seq = sq[i3];
  13284. if (0 <= seq && seq < n_kv) {
  13285. float * s1 = s + (seq - sq[0])*nc*nr;
  13286. memcpy(s1, s, nc*ir*sizeof(float));
  13287. } else {
  13288. // stop at negative or too big seq_ids
  13289. break;
  13290. }
  13291. }
  13292. // it seems a little faster when this is separate from the state shift
  13293. for (int i1 = 0; i1 < ir; ++i1) {
  13294. // rowwise dot product
  13295. float sumf = 0.0f;
  13296. for (int i0 = 0; i0 < nc; ++i0) {
  13297. int i = i0 + i1*nc;
  13298. sumf += s[i] * c[i];
  13299. }
  13300. x[i1] = sumf;
  13301. }
  13302. }
  13303. }
  13304. static void ggml_compute_forward_ssm_conv(
  13305. const struct ggml_compute_params * params,
  13306. struct ggml_tensor * dst) {
  13307. switch (dst->src[0]->type) {
  13308. case GGML_TYPE_F32:
  13309. {
  13310. ggml_compute_forward_ssm_conv_f32(params, dst);
  13311. } break;
  13312. default:
  13313. {
  13314. GGML_ASSERT(false);
  13315. } break;
  13316. }
  13317. }
  13318. // ggml_compute_forward_ssm_scan
  13319. static void ggml_compute_forward_ssm_scan_f32(
  13320. const struct ggml_compute_params * params,
  13321. struct ggml_tensor * dst) {
  13322. if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
  13323. return;
  13324. }
  13325. const struct ggml_tensor * src0 = dst->src[0]; // s
  13326. const struct ggml_tensor * src1 = dst->src[1]; // x
  13327. const struct ggml_tensor * src2 = dst->src[2]; // dt
  13328. const struct ggml_tensor * src3 = dst->src[3]; // A
  13329. const struct ggml_tensor * src4 = dst->src[4]; // B
  13330. const struct ggml_tensor * src5 = dst->src[5]; // C
  13331. const struct ggml_tensor * src6 = dst->src[6]; // sq
  13332. const int ith = params->ith;
  13333. const int nth = params->nth;
  13334. const int64_t nc = src0->ne[0]; // d_state
  13335. const int64_t nr = src0->ne[1]; // d_inner
  13336. const int64_t n_t = src1->ne[1]; // number of tokens in the batch
  13337. const int64_t n_kv = src0->ne[2]; // max number of sequences in the batch
  13338. GGML_ASSERT(ggml_nelements(src1) + ggml_nelements(src0) == ggml_nelements(dst));
  13339. GGML_ASSERT(src0->nb[0] == sizeof(float));
  13340. GGML_ASSERT(src1->nb[0] == sizeof(float));
  13341. GGML_ASSERT(src2->nb[0] == sizeof(float));
  13342. GGML_ASSERT(src3->nb[0] == sizeof(float));
  13343. GGML_ASSERT(src4->nb[0] == sizeof(float));
  13344. GGML_ASSERT(src5->nb[0] == sizeof(float));
  13345. // required for the dot product between s and C, and when copying the states
  13346. GGML_ASSERT(src0->nb[1] == src0->ne[0]*sizeof(float));
  13347. // required for per-sequence offsets for states
  13348. GGML_ASSERT(src0->nb[2] == src0->ne[0]*src0->ne[1]*sizeof(float));
  13349. // required to get correct offset for state destination (i.e. src1->nb[2])
  13350. GGML_ASSERT(src1->nb[2] == src1->ne[0]*src1->ne[1]*sizeof(float));
  13351. // rows per thread
  13352. const int dr = (nr + nth - 1)/nth;
  13353. // row range for this thread
  13354. const int ir0 = dr*ith;
  13355. const int ir1 = MIN(ir0 + dr, nr);
  13356. const int ir = ir1 - ir0;
  13357. if (n_kv > 1) {
  13358. // it's hard to know if the source states have already been copied
  13359. // when there are multiple, so copy them already.
  13360. for (int i3 = 0; i3 < n_kv; ++i3) {
  13361. float * s0 = (float *) ((char *) src0->data + ir0*(src0->nb[1]) + i3*(src0->nb[2]));
  13362. float * s = (float *) ((char *) dst->data + ir0*(src0->nb[1]) + i3*(src0->nb[2]) + src1->nb[2]);
  13363. memcpy(s, s0, nc*ir*sizeof(float));
  13364. }
  13365. }
  13366. for (int i2 = 0; i2 < n_t; ++i2) {
  13367. int32_t * sq = (int32_t *) ((char *) src6->data + i2*(src6->nb[1])); // {n_kv, n_tokens}
  13368. float * y = (float *) ((char *) dst->data + ir0*(src1->nb[0]) + i2*(src1->nb[1])); // {d_inner, n_tokens}
  13369. float * s = (float *) ((char *) dst->data + ir0*(src0->nb[1]) + sq[0]*(src0->nb[2]) + src1->nb[2]); // {d_state, d_inner, n_kv}
  13370. float * s0;
  13371. float * x = (float *) ((char *) src1->data + ir0*(src1->nb[0]) + i2*(src1->nb[1])); // {d_inner, n_tokens}
  13372. float * dt = (float *) ((char *) src2->data + ir0*(src2->nb[0]) + i2*(src2->nb[1])); // {d_inner, n_tokens}
  13373. float * A = (float *) ((char *) src3->data + ir0*(src3->nb[1])); // {d_state, d_inner}
  13374. float * B = (float *) ((char *) src4->data + i2*(src4->nb[1])); // {d_state, n_tokens}
  13375. float * C = (float *) ((char *) src5->data + i2*(src5->nb[1])); // {d_state, n_tokens}
  13376. GGML_ASSERT(0 <= sq[0] && sq[0] < n_kv);
  13377. // avoid needing to copy the state for the first token
  13378. if (i2 == 0) {
  13379. s0 = (float *) ((char *) src0->data + ir0*(src0->nb[1]) + sq[0]*(src0->nb[2])); // {d_state, d_inner, n_kv}
  13380. } else {
  13381. // otherwise the source is the same as the destination
  13382. s0 = s;
  13383. }
  13384. // d_inner
  13385. for (int i1 = 0; i1 < ir; ++i1) {
  13386. // ref: https://github.com/state-spaces/mamba/blob/34076d664838588a3c97727b263478ab9f621a07/mamba_ssm/ops/triton/selective_state_update.py#L78
  13387. float dt_soft_plus = dt[i1] <= 20.0f ? log1pf(expf(dt[i1])) : dt[i1];
  13388. float x_dt = x[i1] * dt_soft_plus;
  13389. float sumf = 0.0f;
  13390. // d_state
  13391. for (int i0 = 0; i0 < nc; ++i0) {
  13392. int i = i0 + i1*nc;
  13393. // state = prev_state * dA + dB * x
  13394. float state = (s0[i] * expf(dt_soft_plus * A[i])) + (B[i0] * x_dt);
  13395. // y = rowwise_dotprod(state, C)
  13396. sumf += state * C[i0];
  13397. s[i] = state;
  13398. }
  13399. y[i1] = sumf;
  13400. }
  13401. // handle copies when there are multiple output states
  13402. for (int i3 = 1; i3 < n_kv; ++i3) {
  13403. int32_t seq = sq[i3];
  13404. if (0 <= seq && seq < n_kv) {
  13405. float * s1 = s + (seq - sq[0])*nc*nr;
  13406. memcpy(s1, s, nc*ir*sizeof(float));
  13407. } else {
  13408. // stop at negative or too big seq_ids
  13409. break;
  13410. }
  13411. }
  13412. }
  13413. }
  13414. static void ggml_compute_forward_ssm_scan(
  13415. const struct ggml_compute_params * params,
  13416. struct ggml_tensor * dst) {
  13417. switch (dst->src[0]->type) {
  13418. case GGML_TYPE_F32:
  13419. {
  13420. ggml_compute_forward_ssm_scan_f32(params, dst);
  13421. } break;
  13422. default:
  13423. {
  13424. GGML_ASSERT(false);
  13425. } break;
  13426. }
  13427. }
  13428. // ggml_compute_forward_win_part
  13429. static void ggml_compute_forward_win_part_f32(
  13430. const struct ggml_compute_params * params,
  13431. struct ggml_tensor * dst) {
  13432. const struct ggml_tensor * src0 = dst->src[0];
  13433. if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
  13434. return;
  13435. }
  13436. GGML_TENSOR_LOCALS(int64_t, ne0, src0, ne)
  13437. GGML_TENSOR_LOCALS(int64_t, ne, dst, ne)
  13438. const int32_t nep0 = ((const int32_t *)(dst->op_params))[0];
  13439. const int32_t nep1 = ((const int32_t *)(dst->op_params))[1];
  13440. const int32_t w = ((const int32_t *)(dst->op_params))[2];
  13441. assert(ne00 == ne0);
  13442. assert(ne3 == nep0*nep1);
  13443. // TODO: optimize / multi-thread
  13444. for (int py = 0; py < nep1; ++py) {
  13445. for (int px = 0; px < nep0; ++px) {
  13446. const int64_t i3 = py*nep0 + px;
  13447. for (int64_t i2 = 0; i2 < ne2; ++i2) {
  13448. for (int64_t i1 = 0; i1 < ne1; ++i1) {
  13449. for (int64_t i0 = 0; i0 < ne0; ++i0) {
  13450. const int64_t i02 = py*w + i2;
  13451. const int64_t i01 = px*w + i1;
  13452. const int64_t i00 = i0;
  13453. const int64_t i = i3*ne2*ne1*ne0 + i2*ne1*ne0 + i1*ne0 + i0;
  13454. const int64_t j = i02*ne01*ne00 + i01*ne00 + i00;
  13455. if (py*w + i2 >= ne02 || px*w + i1 >= ne01) {
  13456. ((float *) dst->data)[i] = 0.0f;
  13457. } else {
  13458. ((float *) dst->data)[i] = ((float *) src0->data)[j];
  13459. }
  13460. }
  13461. }
  13462. }
  13463. }
  13464. }
  13465. }
  13466. static void ggml_compute_forward_win_part(
  13467. const struct ggml_compute_params * params,
  13468. struct ggml_tensor * dst) {
  13469. const struct ggml_tensor * src0 = dst->src[0];
  13470. switch (src0->type) {
  13471. case GGML_TYPE_F32:
  13472. {
  13473. ggml_compute_forward_win_part_f32(params, dst);
  13474. } break;
  13475. default:
  13476. {
  13477. GGML_ASSERT(false);
  13478. } break;
  13479. }
  13480. }
  13481. // ggml_compute_forward_win_unpart
  13482. static void ggml_compute_forward_win_unpart_f32(
  13483. const struct ggml_compute_params * params,
  13484. struct ggml_tensor * dst) {
  13485. const struct ggml_tensor * src0 = dst->src[0];
  13486. if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
  13487. return;
  13488. }
  13489. GGML_TENSOR_LOCALS(int64_t, ne0, src0, ne)
  13490. GGML_TENSOR_LOCALS(int64_t, ne, dst, ne)
  13491. const int32_t w = ((const int32_t *)(dst->op_params))[0];
  13492. // padding
  13493. const int px = (w - ne1%w)%w;
  13494. //const int py = (w - ne2%w)%w;
  13495. const int npx = (px + ne1)/w;
  13496. //const int npy = (py + ne2)/w;
  13497. assert(ne0 == ne00);
  13498. // TODO: optimize / multi-thread
  13499. for (int64_t i2 = 0; i2 < ne2; ++i2) {
  13500. for (int64_t i1 = 0; i1 < ne1; ++i1) {
  13501. for (int64_t i0 = 0; i0 < ne0; ++i0) {
  13502. const int ip2 = i2/w;
  13503. const int ip1 = i1/w;
  13504. const int64_t i02 = i2%w;
  13505. const int64_t i01 = i1%w;
  13506. const int64_t i00 = i0;
  13507. const int64_t i = (ip2*npx + ip1)*ne02*ne01*ne00 + i02*ne01*ne00 + i01*ne00 + i00;
  13508. const int64_t j = i2*ne1*ne0 + i1*ne0 + i0;
  13509. ((float *) dst->data)[j] = ((float *) src0->data)[i];
  13510. }
  13511. }
  13512. }
  13513. }
  13514. static void ggml_compute_forward_win_unpart(
  13515. const struct ggml_compute_params * params,
  13516. struct ggml_tensor * dst) {
  13517. const struct ggml_tensor * src0 = dst->src[0];
  13518. switch (src0->type) {
  13519. case GGML_TYPE_F32:
  13520. {
  13521. ggml_compute_forward_win_unpart_f32(params, dst);
  13522. } break;
  13523. default:
  13524. {
  13525. GGML_ASSERT(false);
  13526. } break;
  13527. }
  13528. }
  13529. //gmml_compute_forward_unary
  13530. static void ggml_compute_forward_unary(
  13531. const struct ggml_compute_params * params,
  13532. struct ggml_tensor * dst) {
  13533. const enum ggml_unary_op op = ggml_get_unary_op(dst);
  13534. switch (op) {
  13535. case GGML_UNARY_OP_ABS:
  13536. {
  13537. ggml_compute_forward_abs(params, dst);
  13538. } break;
  13539. case GGML_UNARY_OP_SGN:
  13540. {
  13541. ggml_compute_forward_sgn(params, dst);
  13542. } break;
  13543. case GGML_UNARY_OP_NEG:
  13544. {
  13545. ggml_compute_forward_neg(params, dst);
  13546. } break;
  13547. case GGML_UNARY_OP_STEP:
  13548. {
  13549. ggml_compute_forward_step(params, dst);
  13550. } break;
  13551. case GGML_UNARY_OP_TANH:
  13552. {
  13553. ggml_compute_forward_tanh(params, dst);
  13554. } break;
  13555. case GGML_UNARY_OP_ELU:
  13556. {
  13557. ggml_compute_forward_elu(params, dst);
  13558. } break;
  13559. case GGML_UNARY_OP_RELU:
  13560. {
  13561. ggml_compute_forward_relu(params, dst);
  13562. } break;
  13563. case GGML_UNARY_OP_SIGMOID:
  13564. {
  13565. ggml_compute_forward_sigmoid(params, dst);
  13566. } break;
  13567. case GGML_UNARY_OP_GELU:
  13568. {
  13569. ggml_compute_forward_gelu(params, dst);
  13570. } break;
  13571. case GGML_UNARY_OP_GELU_QUICK:
  13572. {
  13573. ggml_compute_forward_gelu_quick(params, dst);
  13574. } break;
  13575. case GGML_UNARY_OP_SILU:
  13576. {
  13577. ggml_compute_forward_silu(params, dst);
  13578. } break;
  13579. case GGML_UNARY_OP_HARDSWISH:
  13580. {
  13581. ggml_compute_forward_hardswish(params, dst);
  13582. } break;
  13583. case GGML_UNARY_OP_HARDSIGMOID:
  13584. {
  13585. ggml_compute_forward_hardsigmoid(params, dst);
  13586. } break;
  13587. default:
  13588. {
  13589. GGML_ASSERT(false);
  13590. } break;
  13591. }
  13592. }
  13593. // ggml_compute_forward_get_rel_pos
  13594. static void ggml_compute_forward_get_rel_pos_f16(
  13595. const struct ggml_compute_params * params,
  13596. struct ggml_tensor * dst) {
  13597. const struct ggml_tensor * src0 = dst->src[0];
  13598. if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
  13599. return;
  13600. }
  13601. // ref: https://github.com/facebookresearch/segment-anything/blob/main/segment_anything/modeling/image_encoder.py#L292-L322
  13602. GGML_TENSOR_UNARY_OP_LOCALS
  13603. const int64_t w = ne1;
  13604. ggml_fp16_t * src0_data = (ggml_fp16_t *) src0->data;
  13605. ggml_fp16_t * dst_data = (ggml_fp16_t *) dst->data;
  13606. for (int64_t i2 = 0; i2 < ne2; ++i2) {
  13607. for (int64_t i1 = 0; i1 < ne1; ++i1) {
  13608. const int64_t pos = (w - i1 - 1) + i2;
  13609. for (int64_t i0 = 0; i0 < ne0; ++i0) {
  13610. dst_data[i2*ne1*ne0 + i1*ne0 + i0] = src0_data[pos*ne00 + i0];
  13611. }
  13612. }
  13613. }
  13614. }
  13615. static void ggml_compute_forward_get_rel_pos(
  13616. const struct ggml_compute_params * params,
  13617. struct ggml_tensor * dst) {
  13618. const struct ggml_tensor * src0 = dst->src[0];
  13619. switch (src0->type) {
  13620. case GGML_TYPE_F16:
  13621. case GGML_TYPE_BF16:
  13622. {
  13623. ggml_compute_forward_get_rel_pos_f16(params, dst);
  13624. } break;
  13625. default:
  13626. {
  13627. GGML_ASSERT(false);
  13628. } break;
  13629. }
  13630. }
  13631. // ggml_compute_forward_add_rel_pos
  13632. static void ggml_compute_forward_add_rel_pos_f32(
  13633. const struct ggml_compute_params * params,
  13634. struct ggml_tensor * dst) {
  13635. const struct ggml_tensor * src0 = dst->src[0];
  13636. const struct ggml_tensor * src1 = dst->src[1];
  13637. const struct ggml_tensor * src2 = dst->src[2];
  13638. const bool inplace = (bool) ((int32_t *) dst->op_params)[0];
  13639. if (!inplace && params->type == GGML_TASK_TYPE_INIT) {
  13640. if (params->ith != 0) {
  13641. return;
  13642. }
  13643. memcpy((char *) dst->data, (char *) src0->data, ggml_nbytes(dst));
  13644. return;
  13645. }
  13646. if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
  13647. return;
  13648. }
  13649. int64_t t0 = ggml_perf_time_us();
  13650. UNUSED(t0);
  13651. // ref: https://github.com/facebookresearch/segment-anything/blob/main/segment_anything/modeling/image_encoder.py#L357-L359
  13652. float * src1_data = (float *) src1->data;
  13653. float * src2_data = (float *) src2->data;
  13654. float * dst_data = (float *) dst->data;
  13655. const int64_t ne10 = src1->ne[0];
  13656. const int64_t ne11 = src1->ne[1];
  13657. const int64_t ne12 = src1->ne[2];
  13658. const int64_t ne13 = src1->ne[3];
  13659. const int ith = params->ith;
  13660. const int nth = params->nth;
  13661. // total patches in dst
  13662. const int np = ne13;
  13663. // patches per thread
  13664. const int dp = (np + nth - 1)/nth;
  13665. // patch range for this thread
  13666. const int ip0 = dp*ith;
  13667. const int ip1 = MIN(ip0 + dp, np);
  13668. for (int64_t i13 = ip0; i13 < ip1; ++i13) {
  13669. for (int64_t i12 = 0; i12 < ne12; ++i12) {
  13670. for (int64_t i11 = 0; i11 < ne11; ++i11) {
  13671. const int64_t jp1 = i13*ne12*ne11*ne10 + i12*ne11*ne10 + i11*ne10;
  13672. for (int64_t i10 = 0; i10 < ne10; ++i10) {
  13673. const int64_t jp0 = jp1 + i10;
  13674. const float src1_e = src1_data[jp0];
  13675. const float src2_e = src2_data[jp0];
  13676. const int64_t jdh = jp0 * ne10;
  13677. const int64_t jdw = jdh - (ne10 - 1) * i10;
  13678. for (int64_t j = 0; j < ne10; ++j) {
  13679. dst_data[jdh + j ] += src2_e;
  13680. dst_data[jdw + j*ne10] += src1_e;
  13681. }
  13682. }
  13683. }
  13684. }
  13685. }
  13686. }
  13687. static void ggml_compute_forward_add_rel_pos(
  13688. const struct ggml_compute_params * params,
  13689. struct ggml_tensor * dst) {
  13690. const struct ggml_tensor * src0 = dst->src[0];
  13691. switch (src0->type) {
  13692. case GGML_TYPE_F32:
  13693. {
  13694. ggml_compute_forward_add_rel_pos_f32(params, dst);
  13695. } break;
  13696. default:
  13697. {
  13698. GGML_ASSERT(false);
  13699. } break;
  13700. }
  13701. }
  13702. // ggml_compute_forward_map_unary
  13703. static void ggml_compute_forward_map_unary_f32(
  13704. const struct ggml_compute_params * params,
  13705. struct ggml_tensor * dst,
  13706. const ggml_unary_op_f32_t fun) {
  13707. const struct ggml_tensor * src0 = dst->src[0];
  13708. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  13709. if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
  13710. return;
  13711. }
  13712. const int n = ggml_nrows(src0);
  13713. const int nc = src0->ne[0];
  13714. assert( dst->nb[0] == sizeof(float));
  13715. assert(src0->nb[0] == sizeof(float));
  13716. for (int i = 0; i < n; i++) {
  13717. fun(nc,
  13718. (float *) ((char *) dst->data + i*( dst->nb[1])),
  13719. (float *) ((char *) src0->data + i*(src0->nb[1])));
  13720. }
  13721. }
  13722. static void ggml_compute_forward_map_unary(
  13723. const struct ggml_compute_params * params,
  13724. struct ggml_tensor * dst,
  13725. const ggml_unary_op_f32_t fun) {
  13726. const struct ggml_tensor * src0 = dst->src[0];
  13727. switch (src0->type) {
  13728. case GGML_TYPE_F32:
  13729. {
  13730. ggml_compute_forward_map_unary_f32(params, dst, fun);
  13731. } break;
  13732. default:
  13733. {
  13734. GGML_ASSERT(false);
  13735. } break;
  13736. }
  13737. }
  13738. // ggml_compute_forward_map_binary
  13739. static void ggml_compute_forward_map_binary_f32(
  13740. const struct ggml_compute_params * params,
  13741. struct ggml_tensor * dst,
  13742. const ggml_binary_op_f32_t fun) {
  13743. const struct ggml_tensor * src0 = dst->src[0];
  13744. const struct ggml_tensor * src1 = dst->src[1];
  13745. assert(params->ith == 0);
  13746. assert(ggml_are_same_shape(src0, src1) && ggml_are_same_shape(src0, dst));
  13747. if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
  13748. return;
  13749. }
  13750. const int n = ggml_nrows(src0);
  13751. const int nc = src0->ne[0];
  13752. assert( dst->nb[0] == sizeof(float));
  13753. assert(src0->nb[0] == sizeof(float));
  13754. assert(src1->nb[0] == sizeof(float));
  13755. for (int i = 0; i < n; i++) {
  13756. fun(nc,
  13757. (float *) ((char *) dst->data + i*( dst->nb[1])),
  13758. (float *) ((char *) src0->data + i*(src0->nb[1])),
  13759. (float *) ((char *) src1->data + i*(src1->nb[1])));
  13760. }
  13761. }
  13762. static void ggml_compute_forward_map_binary(
  13763. const struct ggml_compute_params * params,
  13764. struct ggml_tensor * dst,
  13765. const ggml_binary_op_f32_t fun) {
  13766. const struct ggml_tensor * src0 = dst->src[0];
  13767. switch (src0->type) {
  13768. case GGML_TYPE_F32:
  13769. {
  13770. ggml_compute_forward_map_binary_f32(params, dst, fun);
  13771. } break;
  13772. default:
  13773. {
  13774. GGML_ASSERT(false);
  13775. } break;
  13776. }
  13777. }
  13778. // ggml_compute_forward_map_custom1
  13779. static void ggml_compute_forward_map_custom1_f32(
  13780. const struct ggml_compute_params * params,
  13781. struct ggml_tensor * dst,
  13782. const ggml_custom1_op_f32_t fun) {
  13783. const struct ggml_tensor * a = dst->src[0];
  13784. assert(params->ith == 0);
  13785. if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
  13786. return;
  13787. }
  13788. fun(dst, a);
  13789. }
  13790. // ggml_compute_forward_map_custom2
  13791. static void ggml_compute_forward_map_custom2_f32(
  13792. const struct ggml_compute_params * params,
  13793. struct ggml_tensor * dst,
  13794. const ggml_custom2_op_f32_t fun) {
  13795. const struct ggml_tensor * a = dst->src[0];
  13796. const struct ggml_tensor * b = dst->src[1];
  13797. assert(params->ith == 0);
  13798. if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
  13799. return;
  13800. }
  13801. fun(dst, a, b);
  13802. }
  13803. // ggml_compute_forward_map_custom3
  13804. static void ggml_compute_forward_map_custom3_f32(
  13805. const struct ggml_compute_params * params,
  13806. struct ggml_tensor * dst,
  13807. const ggml_custom3_op_f32_t fun) {
  13808. const struct ggml_tensor * a = dst->src[0];
  13809. const struct ggml_tensor * b = dst->src[1];
  13810. const struct ggml_tensor * c = dst->src[1];
  13811. assert(params->ith == 0);
  13812. if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
  13813. return;
  13814. }
  13815. fun(dst, a, b, c);
  13816. }
  13817. // ggml_compute_forward_map_custom1
  13818. static void ggml_compute_forward_map_custom1(
  13819. const struct ggml_compute_params * params,
  13820. struct ggml_tensor * dst) {
  13821. const struct ggml_tensor * a = dst->src[0];
  13822. if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
  13823. return;
  13824. }
  13825. struct ggml_map_custom1_op_params p;
  13826. memcpy(&p, dst->op_params, sizeof(p));
  13827. p.fun(dst, a, params->ith, params->nth, p.userdata);
  13828. }
  13829. // ggml_compute_forward_map_custom2
  13830. static void ggml_compute_forward_map_custom2(
  13831. const struct ggml_compute_params * params,
  13832. struct ggml_tensor * dst) {
  13833. const struct ggml_tensor * a = dst->src[0];
  13834. const struct ggml_tensor * b = dst->src[1];
  13835. if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
  13836. return;
  13837. }
  13838. struct ggml_map_custom2_op_params p;
  13839. memcpy(&p, dst->op_params, sizeof(p));
  13840. p.fun(dst, a, b, params->ith, params->nth, p.userdata);
  13841. }
  13842. // ggml_compute_forward_map_custom3
  13843. static void ggml_compute_forward_map_custom3(
  13844. const struct ggml_compute_params * params,
  13845. struct ggml_tensor * dst) {
  13846. const struct ggml_tensor * a = dst->src[0];
  13847. const struct ggml_tensor * b = dst->src[1];
  13848. const struct ggml_tensor * c = dst->src[2];
  13849. if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
  13850. return;
  13851. }
  13852. struct ggml_map_custom3_op_params p;
  13853. memcpy(&p, dst->op_params, sizeof(p));
  13854. p.fun(dst, a, b, c, params->ith, params->nth, p.userdata);
  13855. }
  13856. // ggml_compute_forward_cross_entropy_loss
  13857. static void ggml_compute_forward_cross_entropy_loss_f32(
  13858. const struct ggml_compute_params * params,
  13859. struct ggml_tensor * dst) {
  13860. const struct ggml_tensor * src0 = dst->src[0];
  13861. const struct ggml_tensor * src1 = dst->src[1];
  13862. GGML_ASSERT(ggml_is_contiguous(src0));
  13863. GGML_ASSERT(ggml_is_contiguous(src1));
  13864. GGML_ASSERT(ggml_is_scalar(dst));
  13865. GGML_ASSERT(ggml_are_same_shape(src0, src1));
  13866. const int ith = params->ith;
  13867. const int nth = params->nth;
  13868. float * sums = (float *) params->wdata;
  13869. // TODO: handle transposed/permuted matrices
  13870. const int nc = src0->ne[0];
  13871. const int nr = ggml_nrows(src0);
  13872. GGML_ASSERT(params->wsize >= sizeof(float) * (nth + nth * nc));
  13873. if (params->type == GGML_TASK_TYPE_INIT) {
  13874. if (ith == 0) {
  13875. memset(sums, 0, sizeof(float) * (nth + nth * nc));
  13876. }
  13877. return;
  13878. }
  13879. if (params->type == GGML_TASK_TYPE_FINALIZE) {
  13880. if (ith == 0) {
  13881. float * dp = (float *) dst->data;
  13882. ggml_vec_sum_f32(nth, dp, sums);
  13883. dp[0] *= -1.0f / (float) nr;
  13884. }
  13885. return;
  13886. }
  13887. const double eps = 1e-9;
  13888. // rows per thread
  13889. const int dr = (nr + nth - 1)/nth;
  13890. // row range for this thread
  13891. const int ir0 = dr*ith;
  13892. const int ir1 = MIN(ir0 + dr, nr);
  13893. for (int i1 = ir0; i1 < ir1; i1++) {
  13894. float * s0 = (float *)((char *) src0->data + i1*src0->nb[1]);
  13895. float * s1 = (float *)((char *) src1->data + i1*src1->nb[1]);
  13896. float * st = ((float *) params->wdata) + nth + ith*nc;
  13897. #ifndef NDEBUG
  13898. for (int i = 0; i < nc; ++i) {
  13899. //printf("p[%d] = %f\n", i, p[i]);
  13900. assert(!isnan(s0[i]));
  13901. assert(!isnan(s1[i]));
  13902. }
  13903. #endif
  13904. // soft_max
  13905. ggml_float sum = 0.0;
  13906. {
  13907. float max = -INFINITY;
  13908. ggml_vec_max_f32(nc, &max, s0);
  13909. uint16_t scvt; UNUSED(scvt);
  13910. for (int i = 0; i < nc; i++) {
  13911. if (s0[i] == -INFINITY) {
  13912. st[i] = 0.0f;
  13913. } else {
  13914. #ifndef GGML_CROSS_ENTROPY_EXP_FP16
  13915. const float s = s0[i] - max;
  13916. const float val = expf(s);
  13917. #else
  13918. ggml_fp16_t s = GGML_FP32_TO_FP16(s0[i] - max);
  13919. memcpy(&scvt, &s, sizeof(scvt));
  13920. const float val = GGML_FP16_TO_FP32(ggml_table_exp_f16[scvt]);
  13921. #endif
  13922. sum += (ggml_float)val;
  13923. st[i] = val;
  13924. }
  13925. }
  13926. assert(sum > 0.0);
  13927. // sum = 1.0/sum;
  13928. }
  13929. // avoid log(0) by rescaling from [0..1] to [eps..1]
  13930. sum = (1.0 - eps) / sum;
  13931. ggml_vec_scale_f32(nc, st, sum);
  13932. ggml_vec_add1_f32(nc, st, st, eps);
  13933. ggml_vec_log_f32(nc, st, st);
  13934. ggml_vec_mul_f32(nc, st, st, s1);
  13935. float st_sum = 0;
  13936. ggml_vec_sum_f32(nc, &st_sum, st);
  13937. sums[ith] += st_sum;
  13938. #ifndef NDEBUG
  13939. for (int i = 0; i < nc; ++i) {
  13940. assert(!isnan(st[i]));
  13941. assert(!isinf(st[i]));
  13942. }
  13943. #endif
  13944. }
  13945. }
  13946. static void ggml_compute_forward_cross_entropy_loss(
  13947. const struct ggml_compute_params * params,
  13948. struct ggml_tensor * dst) {
  13949. const struct ggml_tensor * src0 = dst->src[0];
  13950. switch (src0->type) {
  13951. case GGML_TYPE_F32:
  13952. {
  13953. ggml_compute_forward_cross_entropy_loss_f32(params, dst);
  13954. } break;
  13955. default:
  13956. {
  13957. GGML_ASSERT(false);
  13958. } break;
  13959. }
  13960. }
  13961. // ggml_compute_forward_cross_entropy_loss_back
  13962. static void ggml_compute_forward_cross_entropy_loss_back_f32(
  13963. const struct ggml_compute_params * params,
  13964. struct ggml_tensor * dst) {
  13965. const struct ggml_tensor * src0 = dst->src[0];
  13966. const struct ggml_tensor * src1 = dst->src[1];
  13967. const struct ggml_tensor * opt0 = dst->src[2];
  13968. GGML_ASSERT(ggml_is_contiguous(dst));
  13969. GGML_ASSERT(ggml_is_contiguous(src0));
  13970. GGML_ASSERT(ggml_is_contiguous(src1));
  13971. GGML_ASSERT(ggml_is_contiguous(opt0));
  13972. GGML_ASSERT(ggml_are_same_shape(src0, src1) && ggml_are_same_shape(src0, dst));
  13973. const int64_t ith = params->ith;
  13974. const int64_t nth = params->nth;
  13975. if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
  13976. return;
  13977. }
  13978. const double eps = 1e-9;
  13979. // TODO: handle transposed/permuted matrices
  13980. const int64_t nc = src0->ne[0];
  13981. const int64_t nr = ggml_nrows(src0);
  13982. // rows per thread
  13983. const int64_t dr = (nr + nth - 1)/nth;
  13984. // row range for this thread
  13985. const int64_t ir0 = dr*ith;
  13986. const int64_t ir1 = MIN(ir0 + dr, nr);
  13987. float * d = (float *) opt0->data;
  13988. for (int64_t i1 = ir0; i1 < ir1; i1++) {
  13989. float * ds0 = (float *)((char *) dst->data + i1*dst->nb[1]);
  13990. float * s0 = (float *)((char *) src0->data + i1*src0->nb[1]);
  13991. float * s1 = (float *)((char *) src1->data + i1*src1->nb[1]);
  13992. #ifndef NDEBUG
  13993. for (int i = 0; i < nc; ++i) {
  13994. //printf("p[%d] = %f\n", i, p[i]);
  13995. assert(!isnan(s0[i]));
  13996. assert(!isnan(s1[i]));
  13997. }
  13998. #endif
  13999. // soft_max
  14000. ggml_float sum = 0.0;
  14001. {
  14002. float max = -INFINITY;
  14003. ggml_vec_max_f32(nc, &max, s0);
  14004. uint16_t scvt; UNUSED(scvt);
  14005. for (int i = 0; i < nc; i++) {
  14006. if (s0[i] == -INFINITY) {
  14007. ds0[i] = 0.0f;
  14008. } else {
  14009. #ifndef GGML_CROSS_ENTROPY_EXP_FP16
  14010. const float s = s0[i] - max;
  14011. const float val = expf(s);
  14012. #else
  14013. ggml_fp16_t s = GGML_FP32_TO_FP16(s0[i] - max);
  14014. memcpy(&scvt, &s, sizeof(scvt));
  14015. const float val = GGML_FP16_TO_FP32(ggml_table_exp_f16[scvt]);
  14016. #endif
  14017. sum += (ggml_float)val;
  14018. ds0[i] = val;
  14019. }
  14020. }
  14021. assert(sum > 0.0);
  14022. sum = (1.0 - eps)/sum;
  14023. }
  14024. // grad(src0) = (softmax(src0) - src1) * grad(cross_entropy_loss(src0, src1)) / nr
  14025. ggml_vec_scale_f32(nc, ds0, sum);
  14026. ggml_vec_add1_f32(nc, ds0, ds0, eps);
  14027. ggml_vec_sub_f32(nc, ds0, ds0, s1);
  14028. ggml_vec_scale_f32(nc, ds0, d[0] / (float) nr);
  14029. #ifndef NDEBUG
  14030. for (int i = 0; i < nc; ++i) {
  14031. assert(!isnan(ds0[i]));
  14032. assert(!isinf(ds0[i]));
  14033. }
  14034. #endif
  14035. }
  14036. }
  14037. static void ggml_compute_forward_cross_entropy_loss_back(
  14038. const struct ggml_compute_params * params,
  14039. struct ggml_tensor * dst) {
  14040. const struct ggml_tensor * src0 = dst->src[0];
  14041. switch (src0->type) {
  14042. case GGML_TYPE_F32:
  14043. {
  14044. ggml_compute_forward_cross_entropy_loss_back_f32(params, dst);
  14045. } break;
  14046. default:
  14047. {
  14048. GGML_ASSERT(false);
  14049. } break;
  14050. }
  14051. }
  14052. /////////////////////////////////
  14053. static void ggml_compute_forward(struct ggml_compute_params * params, struct ggml_tensor * tensor) {
  14054. GGML_ASSERT(params);
  14055. if (tensor->op == GGML_OP_NONE || ggml_is_empty(tensor)) {
  14056. return;
  14057. }
  14058. switch (tensor->op) {
  14059. case GGML_OP_DUP:
  14060. {
  14061. ggml_compute_forward_dup(params, tensor);
  14062. } break;
  14063. case GGML_OP_ADD:
  14064. {
  14065. ggml_compute_forward_add(params, tensor);
  14066. } break;
  14067. case GGML_OP_ADD1:
  14068. {
  14069. ggml_compute_forward_add1(params, tensor);
  14070. } break;
  14071. case GGML_OP_ACC:
  14072. {
  14073. ggml_compute_forward_acc(params, tensor);
  14074. } break;
  14075. case GGML_OP_SUB:
  14076. {
  14077. ggml_compute_forward_sub(params, tensor);
  14078. } break;
  14079. case GGML_OP_MUL:
  14080. {
  14081. ggml_compute_forward_mul(params, tensor);
  14082. } break;
  14083. case GGML_OP_DIV:
  14084. {
  14085. ggml_compute_forward_div(params, tensor);
  14086. } break;
  14087. case GGML_OP_SQR:
  14088. {
  14089. ggml_compute_forward_sqr(params, tensor);
  14090. } break;
  14091. case GGML_OP_SQRT:
  14092. {
  14093. ggml_compute_forward_sqrt(params, tensor);
  14094. } break;
  14095. case GGML_OP_LOG:
  14096. {
  14097. ggml_compute_forward_log(params, tensor);
  14098. } break;
  14099. case GGML_OP_SUM:
  14100. {
  14101. ggml_compute_forward_sum(params, tensor);
  14102. } break;
  14103. case GGML_OP_SUM_ROWS:
  14104. {
  14105. ggml_compute_forward_sum_rows(params, tensor);
  14106. } break;
  14107. case GGML_OP_MEAN:
  14108. {
  14109. ggml_compute_forward_mean(params, tensor);
  14110. } break;
  14111. case GGML_OP_ARGMAX:
  14112. {
  14113. ggml_compute_forward_argmax(params, tensor);
  14114. } break;
  14115. case GGML_OP_REPEAT:
  14116. {
  14117. ggml_compute_forward_repeat(params, tensor);
  14118. } break;
  14119. case GGML_OP_REPEAT_BACK:
  14120. {
  14121. ggml_compute_forward_repeat_back(params, tensor);
  14122. } break;
  14123. case GGML_OP_CONCAT:
  14124. {
  14125. ggml_compute_forward_concat(params, tensor);
  14126. } break;
  14127. case GGML_OP_SILU_BACK:
  14128. {
  14129. ggml_compute_forward_silu_back(params, tensor);
  14130. } break;
  14131. case GGML_OP_NORM:
  14132. {
  14133. ggml_compute_forward_norm(params, tensor);
  14134. } break;
  14135. case GGML_OP_RMS_NORM:
  14136. {
  14137. ggml_compute_forward_rms_norm(params, tensor);
  14138. } break;
  14139. case GGML_OP_RMS_NORM_BACK:
  14140. {
  14141. ggml_compute_forward_rms_norm_back(params, tensor);
  14142. } break;
  14143. case GGML_OP_GROUP_NORM:
  14144. {
  14145. ggml_compute_forward_group_norm(params, tensor);
  14146. } break;
  14147. case GGML_OP_MUL_MAT:
  14148. {
  14149. ggml_compute_forward_mul_mat(params, tensor);
  14150. } break;
  14151. case GGML_OP_MUL_MAT_ID:
  14152. {
  14153. ggml_compute_forward_mul_mat_id(params, tensor);
  14154. } break;
  14155. case GGML_OP_OUT_PROD:
  14156. {
  14157. ggml_compute_forward_out_prod(params, tensor);
  14158. } break;
  14159. case GGML_OP_SCALE:
  14160. {
  14161. ggml_compute_forward_scale(params, tensor);
  14162. } break;
  14163. case GGML_OP_SET:
  14164. {
  14165. ggml_compute_forward_set(params, tensor);
  14166. } break;
  14167. case GGML_OP_CPY:
  14168. {
  14169. ggml_compute_forward_cpy(params, tensor);
  14170. } break;
  14171. case GGML_OP_CONT:
  14172. {
  14173. ggml_compute_forward_cont(params, tensor);
  14174. } break;
  14175. case GGML_OP_RESHAPE:
  14176. {
  14177. ggml_compute_forward_reshape(params, tensor);
  14178. } break;
  14179. case GGML_OP_VIEW:
  14180. {
  14181. ggml_compute_forward_view(params, tensor);
  14182. } break;
  14183. case GGML_OP_PERMUTE:
  14184. {
  14185. ggml_compute_forward_permute(params, tensor);
  14186. } break;
  14187. case GGML_OP_TRANSPOSE:
  14188. {
  14189. ggml_compute_forward_transpose(params, tensor);
  14190. } break;
  14191. case GGML_OP_GET_ROWS:
  14192. {
  14193. ggml_compute_forward_get_rows(params, tensor);
  14194. } break;
  14195. case GGML_OP_GET_ROWS_BACK:
  14196. {
  14197. ggml_compute_forward_get_rows_back(params, tensor);
  14198. } break;
  14199. case GGML_OP_DIAG:
  14200. {
  14201. ggml_compute_forward_diag(params, tensor);
  14202. } break;
  14203. case GGML_OP_DIAG_MASK_INF:
  14204. {
  14205. ggml_compute_forward_diag_mask_inf(params, tensor);
  14206. } break;
  14207. case GGML_OP_DIAG_MASK_ZERO:
  14208. {
  14209. ggml_compute_forward_diag_mask_zero(params, tensor);
  14210. } break;
  14211. case GGML_OP_SOFT_MAX:
  14212. {
  14213. ggml_compute_forward_soft_max(params, tensor);
  14214. } break;
  14215. case GGML_OP_SOFT_MAX_BACK:
  14216. {
  14217. ggml_compute_forward_soft_max_back(params, tensor);
  14218. } break;
  14219. case GGML_OP_ROPE:
  14220. {
  14221. ggml_compute_forward_rope(params, tensor);
  14222. } break;
  14223. case GGML_OP_ROPE_BACK:
  14224. {
  14225. ggml_compute_forward_rope_back(params, tensor);
  14226. } break;
  14227. case GGML_OP_CLAMP:
  14228. {
  14229. ggml_compute_forward_clamp(params, tensor);
  14230. } break;
  14231. case GGML_OP_CONV_TRANSPOSE_1D:
  14232. {
  14233. ggml_compute_forward_conv_transpose_1d(params, tensor);
  14234. } break;
  14235. case GGML_OP_IM2COL:
  14236. {
  14237. ggml_compute_forward_im2col(params, tensor);
  14238. } break;
  14239. case GGML_OP_CONV_TRANSPOSE_2D:
  14240. {
  14241. ggml_compute_forward_conv_transpose_2d(params, tensor);
  14242. } break;
  14243. case GGML_OP_POOL_1D:
  14244. {
  14245. ggml_compute_forward_pool_1d(params, tensor);
  14246. } break;
  14247. case GGML_OP_POOL_2D:
  14248. {
  14249. ggml_compute_forward_pool_2d(params, tensor);
  14250. } break;
  14251. case GGML_OP_UPSCALE:
  14252. {
  14253. ggml_compute_forward_upscale(params, tensor);
  14254. } break;
  14255. case GGML_OP_PAD:
  14256. {
  14257. ggml_compute_forward_pad(params, tensor);
  14258. } break;
  14259. case GGML_OP_ARANGE:
  14260. {
  14261. ggml_compute_forward_arange(params, tensor);
  14262. } break;
  14263. case GGML_OP_TIMESTEP_EMBEDDING:
  14264. {
  14265. ggml_compute_forward_timestep_embedding(params, tensor);
  14266. } break;
  14267. case GGML_OP_ARGSORT:
  14268. {
  14269. ggml_compute_forward_argsort(params, tensor);
  14270. } break;
  14271. case GGML_OP_LEAKY_RELU:
  14272. {
  14273. ggml_compute_forward_leaky_relu(params, tensor);
  14274. } break;
  14275. case GGML_OP_FLASH_ATTN:
  14276. {
  14277. const int32_t t = ggml_get_op_params_i32(tensor, 0);
  14278. GGML_ASSERT(t == 0 || t == 1);
  14279. const bool masked = t != 0;
  14280. ggml_compute_forward_flash_attn(params, masked, tensor);
  14281. } break;
  14282. case GGML_OP_FLASH_ATTN_EXT:
  14283. {
  14284. ggml_compute_forward_flash_attn_ext(params, tensor->src[0], tensor->src[1], tensor->src[2], tensor->src[3], tensor);
  14285. } break;
  14286. case GGML_OP_FLASH_FF:
  14287. {
  14288. ggml_compute_forward_flash_ff(params, tensor);
  14289. } break;
  14290. case GGML_OP_FLASH_ATTN_BACK:
  14291. {
  14292. int32_t t = ggml_get_op_params_i32(tensor, 0);
  14293. GGML_ASSERT(t == 0 || t == 1);
  14294. bool masked = t != 0;
  14295. ggml_compute_forward_flash_attn_back(params, masked, tensor);
  14296. } break;
  14297. case GGML_OP_SSM_CONV:
  14298. {
  14299. ggml_compute_forward_ssm_conv(params, tensor);
  14300. } break;
  14301. case GGML_OP_SSM_SCAN:
  14302. {
  14303. ggml_compute_forward_ssm_scan(params, tensor);
  14304. } break;
  14305. case GGML_OP_WIN_PART:
  14306. {
  14307. ggml_compute_forward_win_part(params, tensor);
  14308. } break;
  14309. case GGML_OP_WIN_UNPART:
  14310. {
  14311. ggml_compute_forward_win_unpart(params, tensor);
  14312. } break;
  14313. case GGML_OP_UNARY:
  14314. {
  14315. ggml_compute_forward_unary(params, tensor);
  14316. } break;
  14317. case GGML_OP_GET_REL_POS:
  14318. {
  14319. ggml_compute_forward_get_rel_pos(params, tensor);
  14320. } break;
  14321. case GGML_OP_ADD_REL_POS:
  14322. {
  14323. ggml_compute_forward_add_rel_pos(params, tensor);
  14324. } break;
  14325. case GGML_OP_MAP_UNARY:
  14326. {
  14327. ggml_unary_op_f32_t fun;
  14328. memcpy(&fun, tensor->op_params, sizeof(fun));
  14329. ggml_compute_forward_map_unary(params, tensor, fun);
  14330. }
  14331. break;
  14332. case GGML_OP_MAP_BINARY:
  14333. {
  14334. ggml_binary_op_f32_t fun;
  14335. memcpy(&fun, tensor->op_params, sizeof(fun));
  14336. ggml_compute_forward_map_binary(params, tensor, fun);
  14337. }
  14338. break;
  14339. case GGML_OP_MAP_CUSTOM1_F32:
  14340. {
  14341. ggml_custom1_op_f32_t fun;
  14342. memcpy(&fun, tensor->op_params, sizeof(fun));
  14343. ggml_compute_forward_map_custom1_f32(params, tensor, fun);
  14344. }
  14345. break;
  14346. case GGML_OP_MAP_CUSTOM2_F32:
  14347. {
  14348. ggml_custom2_op_f32_t fun;
  14349. memcpy(&fun, tensor->op_params, sizeof(fun));
  14350. ggml_compute_forward_map_custom2_f32(params, tensor, fun);
  14351. }
  14352. break;
  14353. case GGML_OP_MAP_CUSTOM3_F32:
  14354. {
  14355. ggml_custom3_op_f32_t fun;
  14356. memcpy(&fun, tensor->op_params, sizeof(fun));
  14357. ggml_compute_forward_map_custom3_f32(params, tensor, fun);
  14358. }
  14359. break;
  14360. case GGML_OP_MAP_CUSTOM1:
  14361. {
  14362. ggml_compute_forward_map_custom1(params, tensor);
  14363. }
  14364. break;
  14365. case GGML_OP_MAP_CUSTOM2:
  14366. {
  14367. ggml_compute_forward_map_custom2(params, tensor);
  14368. }
  14369. break;
  14370. case GGML_OP_MAP_CUSTOM3:
  14371. {
  14372. ggml_compute_forward_map_custom3(params, tensor);
  14373. }
  14374. break;
  14375. case GGML_OP_CROSS_ENTROPY_LOSS:
  14376. {
  14377. ggml_compute_forward_cross_entropy_loss(params, tensor);
  14378. }
  14379. break;
  14380. case GGML_OP_CROSS_ENTROPY_LOSS_BACK:
  14381. {
  14382. ggml_compute_forward_cross_entropy_loss_back(params, tensor);
  14383. }
  14384. break;
  14385. case GGML_OP_NONE:
  14386. {
  14387. // nop
  14388. } break;
  14389. case GGML_OP_COUNT:
  14390. {
  14391. GGML_ASSERT(false);
  14392. } break;
  14393. }
  14394. }
  14395. ////////////////////////////////////////////////////////////////////////////////
  14396. static size_t ggml_hash_size(size_t min_sz) {
  14397. // next primes after powers of two
  14398. static const size_t primes[] = {
  14399. 2, 3, 5, 11, 17, 37, 67, 131, 257, 521, 1031,
  14400. 2053, 4099, 8209, 16411, 32771, 65537, 131101,
  14401. 262147, 524309, 1048583, 2097169, 4194319, 8388617,
  14402. 16777259, 33554467, 67108879, 134217757, 268435459,
  14403. 536870923, 1073741827, 2147483659
  14404. };
  14405. static const size_t n_primes = sizeof(primes)/sizeof(primes[0]);
  14406. // find the smallest prime that is larger or equal to min_sz
  14407. size_t l = 0;
  14408. size_t r = n_primes;
  14409. while (l < r) {
  14410. size_t m = (l + r)/2;
  14411. if (primes[m] < min_sz) {
  14412. l = m + 1;
  14413. } else {
  14414. r = m;
  14415. }
  14416. }
  14417. size_t sz = l < n_primes ? primes[l] : min_sz | 1;
  14418. return sz;
  14419. }
  14420. static size_t ggml_hash(const void * p) {
  14421. return (size_t)p;
  14422. }
  14423. size_t ggml_hash_find(const struct ggml_hash_set hash_set, struct ggml_tensor * key) {
  14424. size_t h = ggml_hash(key) % hash_set.size;
  14425. // linear probing
  14426. size_t i = h;
  14427. while (hash_set.keys[i] != NULL && hash_set.keys[i] != key) {
  14428. i = (i + 1) % hash_set.size;
  14429. if (i == h) {
  14430. // visited all hash table entries -> not found
  14431. return GGML_HASHTABLE_FULL;
  14432. }
  14433. }
  14434. return i;
  14435. }
  14436. bool ggml_hash_contains(struct ggml_hash_set hash_set, struct ggml_tensor * key) {
  14437. size_t i = ggml_hash_find(hash_set, key);
  14438. return i != GGML_HASHTABLE_FULL && hash_set.keys[i] == key;
  14439. }
  14440. size_t ggml_hash_insert(struct ggml_hash_set hash_set, struct ggml_tensor * key) {
  14441. size_t i = ggml_hash_find(hash_set, key);
  14442. GGML_ASSERT(i != GGML_HASHTABLE_FULL);
  14443. if (hash_set.keys[i] == key) {
  14444. return GGML_HASHTABLE_ALREADY_EXISTS;
  14445. }
  14446. // insert
  14447. GGML_ASSERT(hash_set.keys[i] == NULL);
  14448. hash_set.keys[i] = key;
  14449. return i;
  14450. }
  14451. size_t ggml_hash_find_or_insert(struct ggml_hash_set hash_set, struct ggml_tensor * key) {
  14452. size_t i = ggml_hash_find(hash_set, key);
  14453. GGML_ASSERT(i != GGML_HASHTABLE_FULL);
  14454. hash_set.keys[i] = key;
  14455. return i;
  14456. }
  14457. struct ggml_hash_set ggml_hash_set_new(size_t size) {
  14458. size = ggml_hash_size(size);
  14459. struct ggml_hash_set result;
  14460. result.size = size;
  14461. result.keys = GGML_MALLOC(sizeof(struct ggml_tensor *) * size);
  14462. memset(result.keys, 0, sizeof(struct ggml_tensor *) * size);
  14463. return result;
  14464. }
  14465. static void ggml_hash_set_free(struct ggml_hash_set hash_set) {
  14466. GGML_FREE(hash_set.keys);
  14467. }
  14468. struct hash_map {
  14469. struct ggml_hash_set set;
  14470. struct ggml_tensor ** vals;
  14471. };
  14472. static struct hash_map * ggml_new_hash_map(size_t size) {
  14473. struct hash_map * result = GGML_MALLOC(sizeof(struct hash_map));
  14474. result->set = ggml_hash_set_new(size);
  14475. result->vals = GGML_MALLOC(sizeof(struct ggml_tensor *) * result->set.size);
  14476. memset(result->vals, 0, sizeof(struct ggml_tensor *) * result->set.size);
  14477. return result;
  14478. }
  14479. static void ggml_hash_map_free(struct hash_map * map) {
  14480. ggml_hash_set_free(map->set);
  14481. GGML_FREE(map->vals);
  14482. GGML_FREE(map);
  14483. }
  14484. // gradient checkpointing
  14485. static struct ggml_tensor * ggml_recompute_graph_node(
  14486. struct ggml_context * ctx,
  14487. struct ggml_cgraph * graph,
  14488. struct hash_map * replacements,
  14489. struct ggml_tensor * node) {
  14490. if (node == NULL) {
  14491. return NULL;
  14492. }
  14493. if (node->flags & GGML_TENSOR_FLAG_PARAM) {
  14494. return node;
  14495. }
  14496. if (!ggml_hash_contains(graph->visited_hash_table, node)) {
  14497. return node;
  14498. }
  14499. int count_children = 0;
  14500. for (int k = 0; k < GGML_MAX_SRC; ++k) {
  14501. if (node->src[k]) {
  14502. ++count_children;
  14503. }
  14504. }
  14505. if (count_children == 0) {
  14506. return node;
  14507. }
  14508. size_t i = ggml_hash_find(replacements->set, node);
  14509. GGML_ASSERT(i != GGML_HASHTABLE_FULL); // assert that not full
  14510. if (replacements->set.keys[i] == node) {
  14511. return replacements->vals[i];
  14512. }
  14513. struct ggml_tensor * clone = ggml_new_tensor(ctx, node->type, GGML_MAX_DIMS, node->ne);
  14514. // insert clone into replacements
  14515. GGML_ASSERT(replacements->set.keys[i] == NULL); // assert that we don't overwrite
  14516. replacements->set.keys[i] = node;
  14517. replacements->vals[i] = clone;
  14518. clone->op = node->op;
  14519. clone->grad = node->grad;
  14520. clone->flags = node->flags;
  14521. clone->extra = node->extra;
  14522. for (int k = 0; k < GGML_MAX_DIMS; ++k) {
  14523. clone->nb[k] = node->nb[k];
  14524. }
  14525. for (int k = 0; k < GGML_MAX_SRC; ++k) {
  14526. clone->src[k] = ggml_recompute_graph_node(ctx, graph, replacements, node->src[k]);
  14527. }
  14528. if (node->view_src != NULL) {
  14529. clone->data = (node->view_src->data == NULL)
  14530. ? NULL // view_src not yet allocated
  14531. : (char *) node->view_src->data // view_src already allocated
  14532. + node->view_offs;
  14533. clone->view_src = node->view_src;
  14534. clone->view_offs = node->view_offs;
  14535. }
  14536. GGML_ASSERT(sizeof(node->op_params) == sizeof(int32_t) * (GGML_MAX_OP_PARAMS / sizeof(int32_t)));
  14537. GGML_ASSERT(sizeof(node->name) == GGML_MAX_NAME);
  14538. memcpy(clone->op_params, node->op_params, sizeof(node->op_params));
  14539. ggml_format_name(clone, "%s (clone)", ggml_get_name(node));
  14540. return clone;
  14541. }
  14542. void ggml_build_backward_gradient_checkpointing(
  14543. struct ggml_context * ctx,
  14544. struct ggml_cgraph * gf,
  14545. struct ggml_cgraph * gb,
  14546. struct ggml_cgraph * gb_tmp,
  14547. struct ggml_tensor * * checkpoints,
  14548. int n_checkpoints) {
  14549. ggml_graph_cpy(gf, gb_tmp);
  14550. ggml_build_backward_expand(ctx, gf, gb_tmp, true);
  14551. if (n_checkpoints <= 0) {
  14552. ggml_graph_cpy(gb_tmp, gb);
  14553. return;
  14554. }
  14555. struct hash_map * replacements = ggml_new_hash_map(gf->n_nodes + gf->n_leafs + n_checkpoints);
  14556. // insert checkpoints in replacements
  14557. for (int i = 0; i < n_checkpoints; ++i) {
  14558. size_t k = ggml_hash_find(replacements->set, checkpoints[i]);
  14559. GGML_ASSERT(k != GGML_HASHTABLE_FULL); // assert that not full
  14560. GGML_ASSERT(replacements->set.keys[k] == NULL); // assert that we don't overwrite
  14561. replacements->set.keys[k] = checkpoints[i];
  14562. replacements->vals[k] = checkpoints[i];
  14563. }
  14564. ggml_graph_cpy(gf, gb);
  14565. // rewrite gb_tmp->nodes[gf->n_nodes:gb_tmp->n_nodes],
  14566. // replacing references to gb_tmp->nodes[0:gf->n_nodes] ( == gf->nodes[0:gf->n_nodes]),
  14567. // by recomputing them from checkpoints
  14568. for (int i = gf->n_nodes; i<gb_tmp->n_nodes; ++i) {
  14569. struct ggml_tensor * node = gb_tmp->nodes[i];
  14570. for (int k = 0; k < GGML_MAX_SRC; ++k) {
  14571. // insert new tensors recomputing src, reusing already made replacements,
  14572. // remember replacements: remember new tensors with mapping from corresponding gf nodes
  14573. // recurse for input tensors,
  14574. // unless (i.e. terminating when) input tensors are replacements (like checkpoints)
  14575. node->src[k] = ggml_recompute_graph_node(ctx, gf, replacements, node->src[k]);
  14576. }
  14577. // insert rewritten backward node with replacements made into resulting backward graph gb
  14578. ggml_build_forward_expand(gb, node);
  14579. }
  14580. ggml_hash_map_free(replacements);
  14581. }
  14582. // functions to change gradients considering the case that input a might be initial gradient with zero value
  14583. static struct ggml_tensor * ggml_add_or_set(struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b, struct ggml_hash_set zero_table) {
  14584. if (ggml_hash_contains(zero_table, a)) {
  14585. return b;
  14586. } else {
  14587. return ggml_add_impl(ctx, a, b, false);
  14588. }
  14589. }
  14590. static struct ggml_tensor * ggml_acc_or_set(struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b, size_t nb1, size_t nb2, size_t nb3, size_t offset, struct ggml_hash_set zero_table) {
  14591. if (ggml_hash_contains(zero_table, a)) {
  14592. struct ggml_tensor * a_zero = ggml_scale(ctx, a, 0.0f);
  14593. return ggml_acc_impl(ctx, a_zero, b, nb1, nb2, nb3, offset, false);
  14594. } else {
  14595. return ggml_acc_impl(ctx, a, b, nb1, nb2, nb3, offset, false);
  14596. }
  14597. }
  14598. static struct ggml_tensor * ggml_add1_or_set(struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b, struct ggml_hash_set zero_table) {
  14599. if (ggml_hash_contains(zero_table, a)) {
  14600. return ggml_repeat(ctx, b, a);
  14601. } else {
  14602. return ggml_add1_impl(ctx, a, b, false);
  14603. }
  14604. }
  14605. static struct ggml_tensor * ggml_sub_or_set(struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b, struct ggml_hash_set zero_table) {
  14606. if (ggml_hash_contains(zero_table, a)) {
  14607. return ggml_neg(ctx, b);
  14608. } else {
  14609. return ggml_sub_impl(ctx, a, b, false);
  14610. }
  14611. }
  14612. static void ggml_compute_backward(struct ggml_context * ctx, struct ggml_tensor * tensor, struct ggml_hash_set zero_table) {
  14613. struct ggml_tensor * src0 = tensor->src[0];
  14614. struct ggml_tensor * src1 = tensor->src[1];
  14615. switch (tensor->op) {
  14616. case GGML_OP_DUP:
  14617. {
  14618. if (src0->grad) {
  14619. src0->grad = ggml_add_or_set(ctx, src0->grad, tensor->grad, zero_table);
  14620. }
  14621. } break;
  14622. case GGML_OP_ADD:
  14623. {
  14624. if (src0->grad) {
  14625. src0->grad = ggml_add_or_set(ctx, src0->grad, tensor->grad, zero_table);
  14626. }
  14627. if (src1->grad) {
  14628. src1->grad = ggml_add_or_set(ctx, src1->grad, tensor->grad, zero_table);
  14629. }
  14630. } break;
  14631. case GGML_OP_ADD1:
  14632. {
  14633. if (src0->grad) {
  14634. src0->grad = ggml_add_or_set(ctx, src0->grad, tensor->grad, zero_table);
  14635. }
  14636. if (src1->grad) {
  14637. src1->grad = ggml_add_or_set(ctx,
  14638. src1->grad,
  14639. ggml_mean(ctx, tensor->grad), // TODO: should probably be sum instead of mean
  14640. zero_table);
  14641. }
  14642. } break;
  14643. case GGML_OP_ACC:
  14644. {
  14645. if (src0->grad) {
  14646. src0->grad = ggml_add_or_set(ctx, src0->grad, tensor->grad, zero_table);
  14647. }
  14648. if (src1->grad) {
  14649. const size_t nb1 = ((int32_t *) tensor->op_params)[0];
  14650. const size_t nb2 = ((int32_t *) tensor->op_params)[1];
  14651. const size_t nb3 = ((int32_t *) tensor->op_params)[2];
  14652. const size_t offset = ((int32_t *) tensor->op_params)[3];
  14653. struct ggml_tensor * tensor_grad_view = ggml_view_4d(ctx,
  14654. tensor->grad,
  14655. src1->grad->ne[0],
  14656. src1->grad->ne[1],
  14657. src1->grad->ne[2],
  14658. src1->grad->ne[3],
  14659. nb1, nb2, nb3, offset);
  14660. src1->grad =
  14661. ggml_add_or_set(ctx,
  14662. src1->grad,
  14663. ggml_reshape(ctx,
  14664. ggml_cont(ctx, tensor_grad_view),
  14665. src1->grad),
  14666. zero_table);
  14667. }
  14668. } break;
  14669. case GGML_OP_SUB:
  14670. {
  14671. if (src0->grad) {
  14672. src0->grad = ggml_add_or_set(ctx, src0->grad, tensor->grad, zero_table);
  14673. }
  14674. if (src1->grad) {
  14675. src1->grad = ggml_sub_or_set(ctx, src1->grad, tensor->grad, zero_table);
  14676. }
  14677. } break;
  14678. case GGML_OP_MUL:
  14679. {
  14680. if (src0->grad) {
  14681. src0->grad =
  14682. ggml_add_or_set(ctx,
  14683. src0->grad,
  14684. ggml_mul(ctx, src1, tensor->grad),
  14685. zero_table);
  14686. }
  14687. if (src1->grad) {
  14688. src1->grad =
  14689. ggml_add_or_set(ctx,
  14690. src1->grad,
  14691. ggml_mul(ctx, src0, tensor->grad),
  14692. zero_table);
  14693. }
  14694. } break;
  14695. case GGML_OP_DIV:
  14696. {
  14697. if (src0->grad) {
  14698. src0->grad =
  14699. ggml_add_or_set(ctx,
  14700. src0->grad,
  14701. ggml_div(ctx, tensor->grad, src1),
  14702. zero_table);
  14703. }
  14704. if (src1->grad) {
  14705. src1->grad =
  14706. ggml_sub_or_set(ctx,
  14707. src1->grad,
  14708. ggml_mul(ctx,
  14709. tensor->grad,
  14710. ggml_div(ctx, tensor, src1)),
  14711. zero_table);
  14712. }
  14713. } break;
  14714. case GGML_OP_SQR:
  14715. {
  14716. if (src0->grad) {
  14717. src0->grad =
  14718. ggml_add_or_set(ctx,
  14719. src0->grad,
  14720. ggml_scale(ctx,
  14721. ggml_mul(ctx, src0, tensor->grad),
  14722. 2.0f),
  14723. zero_table);
  14724. }
  14725. } break;
  14726. case GGML_OP_SQRT:
  14727. {
  14728. if (src0->grad) {
  14729. src0->grad =
  14730. ggml_add_or_set(ctx,
  14731. src0->grad,
  14732. ggml_scale(ctx,
  14733. ggml_div(ctx,
  14734. tensor->grad,
  14735. tensor),
  14736. 0.5f),
  14737. zero_table);
  14738. }
  14739. } break;
  14740. case GGML_OP_LOG:
  14741. {
  14742. if (src0->grad) {
  14743. src0->grad =
  14744. ggml_add_or_set(ctx,
  14745. src0->grad,
  14746. ggml_div(ctx,
  14747. tensor->grad,
  14748. src0),
  14749. zero_table);
  14750. }
  14751. } break;
  14752. case GGML_OP_SUM:
  14753. {
  14754. if (src0->grad) {
  14755. src0->grad =
  14756. ggml_add1_or_set(ctx,
  14757. src0->grad,
  14758. tensor->grad,
  14759. zero_table);
  14760. }
  14761. } break;
  14762. case GGML_OP_SUM_ROWS:
  14763. {
  14764. if (src0->grad) {
  14765. src0->grad =
  14766. ggml_add_or_set(ctx,
  14767. src0->grad,
  14768. ggml_repeat(ctx,
  14769. tensor->grad,
  14770. src0->grad),
  14771. zero_table);
  14772. }
  14773. } break;
  14774. case GGML_OP_MEAN:
  14775. case GGML_OP_ARGMAX:
  14776. {
  14777. GGML_ASSERT(false); // TODO: implement
  14778. } break;
  14779. case GGML_OP_REPEAT:
  14780. {
  14781. // necessary for llama
  14782. if (src0->grad) {
  14783. src0->grad = ggml_add_or_set(ctx,
  14784. src0->grad,
  14785. ggml_repeat_back(ctx, tensor->grad, src0->grad),
  14786. zero_table);
  14787. }
  14788. } break;
  14789. case GGML_OP_REPEAT_BACK:
  14790. {
  14791. if (src0->grad) {
  14792. // TODO: test this
  14793. src0->grad = ggml_add_or_set(ctx,
  14794. src0->grad,
  14795. ggml_repeat(ctx, tensor->grad, src0->grad),
  14796. zero_table);
  14797. }
  14798. } break;
  14799. case GGML_OP_CONCAT:
  14800. {
  14801. GGML_ASSERT(false); // TODO: implement
  14802. } break;
  14803. case GGML_OP_SILU_BACK:
  14804. {
  14805. GGML_ASSERT(false); // TODO: not implemented
  14806. } break;
  14807. case GGML_OP_NORM:
  14808. {
  14809. GGML_ASSERT(false); // TODO: not implemented
  14810. } break;
  14811. case GGML_OP_RMS_NORM:
  14812. {
  14813. // necessary for llama
  14814. if (src0->grad) {
  14815. float eps;
  14816. memcpy(&eps, tensor->op_params, sizeof(float));
  14817. src0->grad = ggml_add_or_set(ctx,
  14818. src0->grad,
  14819. ggml_rms_norm_back(ctx, src0, tensor->grad, eps),
  14820. zero_table);
  14821. }
  14822. } break;
  14823. case GGML_OP_RMS_NORM_BACK:
  14824. {
  14825. GGML_ASSERT(false); // TODO: not implemented
  14826. } break;
  14827. case GGML_OP_GROUP_NORM:
  14828. {
  14829. GGML_ASSERT(false); // TODO: not implemented
  14830. } break;
  14831. case GGML_OP_MUL_MAT:
  14832. {
  14833. // https://cs231n.github.io/optimization-2/#staged
  14834. // # forward pass
  14835. // s0 = np.random.randn(5, 10)
  14836. // s1 = np.random.randn(10, 3)
  14837. // t = s0.dot(s1)
  14838. // # now suppose we had the gradient on t from above in the circuit
  14839. // dt = np.random.randn(*t.shape) # same shape as t
  14840. // ds0 = dt.dot(s1.T) #.T gives the transpose of the matrix
  14841. // ds1 = t.T.dot(dt)
  14842. // tensor.shape [m,p,qq,rr]
  14843. // src0.shape [n,m,q1,r1]
  14844. // src1.shape [n,p,qq,rr]
  14845. // necessary for llama
  14846. if (src0->grad) {
  14847. struct ggml_tensor * s1_tg =
  14848. ggml_out_prod(ctx, // [n,m,qq,rr]
  14849. src1, // [n,p,qq,rr]
  14850. tensor->grad); // [m,p,qq,rr]
  14851. const int64_t qq = s1_tg->ne[2];
  14852. const int64_t rr = s1_tg->ne[3];
  14853. const int64_t q1 = src0->ne[2];
  14854. const int64_t r1 = src0->ne[3];
  14855. const bool ne2_broadcasted = qq > q1;
  14856. const bool ne3_broadcasted = rr > r1;
  14857. if (ne2_broadcasted || ne3_broadcasted) {
  14858. // sum broadcast repetitions of s1_tg into shape of src0
  14859. s1_tg = ggml_repeat_back(ctx, s1_tg, src0);
  14860. }
  14861. src0->grad =
  14862. ggml_add_or_set(ctx,
  14863. src0->grad, // [n,m,q1,r1]
  14864. s1_tg, // [n,m,q1,r1]
  14865. zero_table);
  14866. }
  14867. if (src1->grad) {
  14868. src1->grad =
  14869. ggml_add_or_set(ctx,
  14870. src1->grad, // [n,p,qq,rr]
  14871. // ggml_mul_mat(ctx, // [n,p,qq,rr]
  14872. // ggml_cont(ctx, // [m,n,q1,r1]
  14873. // ggml_transpose(ctx, src0)), // [m,n,q1,r1]
  14874. // tensor->grad), // [m,p,qq,rr]
  14875. // // when src0 is bigger than tensor->grad (this is mostly the case in llama),
  14876. // // avoid transpose of src0, rather transpose smaller tensor->grad
  14877. // // and then use ggml_out_prod
  14878. ggml_out_prod(ctx, // [n,p,qq,rr]
  14879. src0, // [n,m,q1,r1]
  14880. ggml_transpose(ctx, // [p,m,qq,rr]
  14881. tensor->grad)), // [m,p,qq,rr]
  14882. zero_table);
  14883. }
  14884. } break;
  14885. case GGML_OP_MUL_MAT_ID:
  14886. {
  14887. GGML_ASSERT(false); // TODO: not implemented
  14888. } break;
  14889. case GGML_OP_OUT_PROD:
  14890. {
  14891. GGML_ASSERT(false); // TODO: not implemented
  14892. } break;
  14893. case GGML_OP_SCALE:
  14894. {
  14895. // necessary for llama
  14896. if (src0->grad) {
  14897. float s;
  14898. memcpy(&s, tensor->op_params, sizeof(float));
  14899. src0->grad =
  14900. ggml_add_or_set(ctx,
  14901. src0->grad,
  14902. ggml_scale_impl(ctx, tensor->grad, s, false),
  14903. zero_table);
  14904. }
  14905. } break;
  14906. case GGML_OP_SET:
  14907. {
  14908. const size_t nb1 = ((int32_t *) tensor->op_params)[0];
  14909. const size_t nb2 = ((int32_t *) tensor->op_params)[1];
  14910. const size_t nb3 = ((int32_t *) tensor->op_params)[2];
  14911. const size_t offset = ((int32_t *) tensor->op_params)[3];
  14912. struct ggml_tensor * tensor_grad_view = NULL;
  14913. if (src0->grad || src1->grad) {
  14914. GGML_ASSERT(src0->type == tensor->type);
  14915. GGML_ASSERT(tensor->grad->type == tensor->type);
  14916. GGML_ASSERT(tensor->grad->type == src1->grad->type);
  14917. tensor_grad_view = ggml_view_4d(ctx,
  14918. tensor->grad,
  14919. src1->grad->ne[0],
  14920. src1->grad->ne[1],
  14921. src1->grad->ne[2],
  14922. src1->grad->ne[3],
  14923. nb1, nb2, nb3, offset);
  14924. }
  14925. if (src0->grad) {
  14926. src0->grad = ggml_add_or_set(ctx,
  14927. src0->grad,
  14928. ggml_acc_impl(ctx,
  14929. tensor->grad,
  14930. ggml_neg(ctx, tensor_grad_view),
  14931. nb1, nb2, nb3, offset, false),
  14932. zero_table);
  14933. }
  14934. if (src1->grad) {
  14935. src1->grad =
  14936. ggml_add_or_set(ctx,
  14937. src1->grad,
  14938. ggml_reshape(ctx,
  14939. ggml_cont(ctx, tensor_grad_view),
  14940. src1->grad),
  14941. zero_table);
  14942. }
  14943. } break;
  14944. case GGML_OP_CPY:
  14945. {
  14946. // necessary for llama
  14947. // cpy overwrites value of src1 by src0 and returns view(src1)
  14948. // the overwriting is mathematically equivalent to:
  14949. // tensor = src0 * 1 + src1 * 0
  14950. if (src0->grad) {
  14951. // dsrc0 = dtensor * 1
  14952. src0->grad = ggml_add_or_set(ctx, src0->grad, tensor->grad, zero_table);
  14953. }
  14954. if (src1->grad) {
  14955. // dsrc1 = dtensor * 0 -> noop
  14956. }
  14957. } break;
  14958. case GGML_OP_CONT:
  14959. {
  14960. // same as cpy
  14961. if (src0->grad) {
  14962. GGML_ASSERT(ggml_is_contiguous(src0->grad));
  14963. GGML_ASSERT(ggml_is_contiguous(tensor->grad));
  14964. src0->grad = ggml_add_or_set(ctx, src0->grad, tensor->grad, zero_table);
  14965. }
  14966. } break;
  14967. case GGML_OP_RESHAPE:
  14968. {
  14969. // necessary for llama
  14970. if (src0->grad) {
  14971. src0->grad =
  14972. ggml_add_or_set(ctx, src0->grad,
  14973. ggml_reshape(ctx,
  14974. ggml_is_contiguous(tensor->grad)
  14975. ? tensor->grad
  14976. : ggml_cont(ctx, tensor->grad),
  14977. src0->grad),
  14978. zero_table);
  14979. }
  14980. } break;
  14981. case GGML_OP_VIEW:
  14982. {
  14983. // necessary for llama
  14984. if (src0->grad) {
  14985. size_t offset;
  14986. memcpy(&offset, tensor->op_params, sizeof(offset));
  14987. size_t nb1 = tensor->nb[1];
  14988. size_t nb2 = tensor->nb[2];
  14989. size_t nb3 = tensor->nb[3];
  14990. if (src0->type != src0->grad->type) {
  14991. // gradient is typically F32, but src0 could be other type
  14992. size_t ng = ggml_element_size(src0->grad);
  14993. size_t n0 = ggml_element_size(src0);
  14994. GGML_ASSERT(offset % n0 == 0);
  14995. GGML_ASSERT(nb1 % n0 == 0);
  14996. GGML_ASSERT(nb2 % n0 == 0);
  14997. GGML_ASSERT(nb3 % n0 == 0);
  14998. offset = (offset / n0) * ng;
  14999. nb1 = (nb1 / n0) * ng;
  15000. nb2 = (nb2 / n0) * ng;
  15001. nb3 = (nb3 / n0) * ng;
  15002. }
  15003. src0->grad = ggml_acc_or_set(ctx, src0->grad, tensor->grad, nb1, nb2, nb3, offset, zero_table);
  15004. }
  15005. } break;
  15006. case GGML_OP_PERMUTE:
  15007. {
  15008. // necessary for llama
  15009. if (src0->grad) {
  15010. int32_t * axes = (int32_t *) tensor->op_params;
  15011. int axis0 = axes[0] & 0x3;
  15012. int axis1 = axes[1] & 0x3;
  15013. int axis2 = axes[2] & 0x3;
  15014. int axis3 = axes[3] & 0x3;
  15015. int axes_backward[4] = {0,0,0,0};
  15016. axes_backward[axis0] = 0;
  15017. axes_backward[axis1] = 1;
  15018. axes_backward[axis2] = 2;
  15019. axes_backward[axis3] = 3;
  15020. src0->grad =
  15021. ggml_add_or_set(ctx, src0->grad,
  15022. ggml_permute(ctx,
  15023. tensor->grad,
  15024. axes_backward[0],
  15025. axes_backward[1],
  15026. axes_backward[2],
  15027. axes_backward[3]),
  15028. zero_table);
  15029. }
  15030. } break;
  15031. case GGML_OP_TRANSPOSE:
  15032. {
  15033. // necessary for llama
  15034. if (src0->grad) {
  15035. src0->grad =
  15036. ggml_add_or_set(ctx, src0->grad,
  15037. ggml_transpose(ctx, tensor->grad),
  15038. zero_table);
  15039. }
  15040. } break;
  15041. case GGML_OP_GET_ROWS:
  15042. {
  15043. // necessary for llama (only for tokenizer)
  15044. if (src0->grad) {
  15045. src0->grad =
  15046. ggml_add_or_set(ctx, src0->grad,
  15047. // last ggml_get_rows_back argument src0->grad is only
  15048. // necessary to setup correct output shape
  15049. ggml_get_rows_back(ctx, tensor->grad, src1, src0->grad),
  15050. zero_table);
  15051. }
  15052. if (src1->grad) {
  15053. // noop
  15054. }
  15055. } break;
  15056. case GGML_OP_GET_ROWS_BACK:
  15057. {
  15058. GGML_ASSERT(false); // TODO: not implemented
  15059. } break;
  15060. case GGML_OP_DIAG:
  15061. {
  15062. GGML_ASSERT(false); // TODO: not implemented
  15063. } break;
  15064. case GGML_OP_DIAG_MASK_INF:
  15065. {
  15066. // necessary for llama
  15067. if (src0->grad) {
  15068. const int n_past = ((int32_t *) tensor->op_params)[0];
  15069. src0->grad =
  15070. ggml_add_or_set(ctx, src0->grad,
  15071. /* ggml_diag_mask_inf_impl() shouldn't be here */
  15072. /* ref: https://github.com/ggerganov/llama.cpp/pull/4203#discussion_r1412377992 */
  15073. ggml_diag_mask_zero_impl(ctx, tensor->grad, n_past, false),
  15074. zero_table);
  15075. }
  15076. } break;
  15077. case GGML_OP_DIAG_MASK_ZERO:
  15078. {
  15079. // necessary for llama
  15080. if (src0->grad) {
  15081. const int n_past = ((int32_t *) tensor->op_params)[0];
  15082. src0->grad =
  15083. ggml_add_or_set(ctx, src0->grad,
  15084. ggml_diag_mask_zero_impl(ctx, tensor->grad, n_past, false),
  15085. zero_table);
  15086. }
  15087. } break;
  15088. case GGML_OP_SOFT_MAX:
  15089. {
  15090. // necessary for llama
  15091. if (src0->grad) {
  15092. src0->grad =
  15093. ggml_add_or_set(ctx, src0->grad,
  15094. ggml_soft_max_back(ctx, tensor->grad, tensor),
  15095. zero_table);
  15096. }
  15097. } break;
  15098. case GGML_OP_SOFT_MAX_BACK:
  15099. {
  15100. GGML_ASSERT(false); // TODO: not implemented
  15101. } break;
  15102. case GGML_OP_ROPE:
  15103. {
  15104. // necessary for llama
  15105. if (src0->grad) {
  15106. //const int n_past = ((int32_t *) tensor->op_params)[0];
  15107. const int n_dims = ((int32_t *) tensor->op_params)[1];
  15108. const int mode = ((int32_t *) tensor->op_params)[2];
  15109. const int n_ctx = ((int32_t *) tensor->op_params)[3];
  15110. const int n_orig_ctx = ((int32_t *) tensor->op_params)[4];
  15111. float freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow, xpos_base, xpos_down;
  15112. memcpy(&freq_base, (int32_t *) tensor->op_params + 5, sizeof(float));
  15113. memcpy(&freq_scale, (int32_t *) tensor->op_params + 6, sizeof(float));
  15114. memcpy(&ext_factor, (int32_t *) tensor->op_params + 7, sizeof(float));
  15115. memcpy(&attn_factor, (int32_t *) tensor->op_params + 8, sizeof(float));
  15116. memcpy(&beta_fast, (int32_t *) tensor->op_params + 9, sizeof(float));
  15117. memcpy(&beta_slow, (int32_t *) tensor->op_params + 10, sizeof(float));
  15118. memcpy(&xpos_base, (int32_t *) tensor->op_params + 11, sizeof(float));
  15119. memcpy(&xpos_down, (int32_t *) tensor->op_params + 12, sizeof(bool));
  15120. src0->grad = ggml_add_or_set(ctx,
  15121. src0->grad,
  15122. ggml_rope_back(ctx,
  15123. tensor->grad,
  15124. src1,
  15125. n_dims,
  15126. mode,
  15127. n_ctx,
  15128. n_orig_ctx,
  15129. freq_base,
  15130. freq_scale,
  15131. ext_factor,
  15132. attn_factor,
  15133. beta_fast,
  15134. beta_slow,
  15135. xpos_base,
  15136. xpos_down),
  15137. zero_table);
  15138. }
  15139. } break;
  15140. case GGML_OP_ROPE_BACK:
  15141. {
  15142. if (src0->grad) {
  15143. //const int n_past = ((int32_t *) tensor->op_params)[0];
  15144. const int n_dims = ((int32_t *) tensor->op_params)[1];
  15145. const int mode = ((int32_t *) tensor->op_params)[2];
  15146. const int n_ctx = ((int32_t *) tensor->op_params)[3];
  15147. const int n_orig_ctx = ((int32_t *) tensor->op_params)[4];
  15148. float freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow, xpos_base, xpos_down;
  15149. memcpy(&freq_base, (int32_t *) tensor->op_params + 5, sizeof(float));
  15150. memcpy(&freq_scale, (int32_t *) tensor->op_params + 6, sizeof(float));
  15151. memcpy(&ext_factor, (int32_t *) tensor->op_params + 7, sizeof(float));
  15152. memcpy(&attn_factor, (int32_t *) tensor->op_params + 8, sizeof(float));
  15153. memcpy(&beta_fast, (int32_t *) tensor->op_params + 9, sizeof(float));
  15154. memcpy(&beta_slow, (int32_t *) tensor->op_params + 10, sizeof(float));
  15155. memcpy(&xpos_base, (int32_t *) tensor->op_params + 11, sizeof(float));
  15156. memcpy(&xpos_down, (int32_t *) tensor->op_params + 12, sizeof(bool));
  15157. src0->grad = ggml_add_or_set(ctx,
  15158. src0->grad,
  15159. ggml_rope_impl(ctx,
  15160. tensor->grad,
  15161. src1,
  15162. n_dims,
  15163. mode,
  15164. n_ctx,
  15165. n_orig_ctx,
  15166. freq_base,
  15167. freq_scale,
  15168. ext_factor,
  15169. attn_factor,
  15170. beta_fast,
  15171. beta_slow,
  15172. xpos_base,
  15173. xpos_down,
  15174. false),
  15175. zero_table);
  15176. }
  15177. } break;
  15178. case GGML_OP_CLAMP:
  15179. {
  15180. GGML_ASSERT(false); // TODO: not implemented
  15181. } break;
  15182. case GGML_OP_CONV_TRANSPOSE_1D:
  15183. {
  15184. GGML_ASSERT(false); // TODO: not implemented
  15185. } break;
  15186. case GGML_OP_IM2COL:
  15187. {
  15188. GGML_ASSERT(false); // TODO: not implemented
  15189. } break;
  15190. case GGML_OP_CONV_TRANSPOSE_2D:
  15191. {
  15192. GGML_ASSERT(false); // TODO: not implemented
  15193. } break;
  15194. case GGML_OP_POOL_1D:
  15195. {
  15196. GGML_ASSERT(false); // TODO: not implemented
  15197. } break;
  15198. case GGML_OP_POOL_2D:
  15199. {
  15200. GGML_ASSERT(false); // TODO: not implemented
  15201. } break;
  15202. case GGML_OP_UPSCALE:
  15203. {
  15204. GGML_ASSERT(false); // TODO: not implemented
  15205. } break;
  15206. case GGML_OP_PAD:
  15207. {
  15208. GGML_ASSERT(false); // TODO: not implemented
  15209. } break;
  15210. case GGML_OP_ARANGE:
  15211. {
  15212. GGML_ASSERT(false); // TODO: not implemented
  15213. } break;
  15214. case GGML_OP_TIMESTEP_EMBEDDING:
  15215. {
  15216. GGML_ASSERT(false); // TODO: not implemented
  15217. } break;
  15218. case GGML_OP_ARGSORT:
  15219. {
  15220. GGML_ASSERT(false); // TODO: not implemented
  15221. } break;
  15222. case GGML_OP_LEAKY_RELU:
  15223. {
  15224. GGML_ASSERT(false); // TODO: not implemented
  15225. } break;
  15226. case GGML_OP_FLASH_ATTN:
  15227. case GGML_OP_FLASH_ATTN_EXT:
  15228. {
  15229. struct ggml_tensor * flash_grad = NULL;
  15230. if (src0->grad || src1->grad || tensor->src[2]->grad) {
  15231. int32_t t = ggml_get_op_params_i32(tensor, 0);
  15232. GGML_ASSERT(t == 0 || t == 1);
  15233. bool masked = t != 0;
  15234. flash_grad =
  15235. ggml_flash_attn_back(ctx,
  15236. src0,
  15237. src1,
  15238. tensor->src[2],
  15239. tensor->grad,
  15240. masked);
  15241. }
  15242. struct ggml_tensor * src2 = tensor->src[2];
  15243. const int64_t elem_q = ggml_nelements(src0);
  15244. const int64_t elem_k = ggml_nelements(src1);
  15245. const int64_t elem_v = ggml_nelements(src2);
  15246. enum ggml_type result_type = flash_grad->type;
  15247. GGML_ASSERT(ggml_blck_size(result_type) == 1);
  15248. const size_t tsize = ggml_type_size(result_type);
  15249. const size_t offs_q = 0;
  15250. const size_t offs_k = offs_q + GGML_PAD(elem_q * tsize, GGML_MEM_ALIGN);
  15251. const size_t offs_v = offs_k + GGML_PAD(elem_k * tsize, GGML_MEM_ALIGN);
  15252. if (src0->grad) {
  15253. struct ggml_tensor * view_q = ggml_view_1d(ctx, flash_grad, elem_q, offs_q);
  15254. struct ggml_tensor * grad_q = ggml_reshape(ctx, view_q, src0);
  15255. src0->grad = ggml_add_or_set(ctx,
  15256. src0->grad,
  15257. grad_q,
  15258. zero_table);
  15259. }
  15260. if (src1->grad) {
  15261. struct ggml_tensor * view_k = ggml_view_1d(ctx, flash_grad, elem_k, offs_k);
  15262. struct ggml_tensor * grad_k = ggml_reshape(ctx, view_k, src1);
  15263. src1->grad = ggml_add_or_set(ctx,
  15264. src1->grad,
  15265. grad_k,
  15266. zero_table);
  15267. }
  15268. if (src2->grad) {
  15269. struct ggml_tensor * view_v = ggml_view_1d(ctx, flash_grad, elem_v, offs_v);
  15270. struct ggml_tensor * grad_v = ggml_reshape(ctx, view_v, src2);
  15271. src2->grad = ggml_add_or_set(ctx,
  15272. src2->grad,
  15273. grad_v,
  15274. zero_table);
  15275. }
  15276. } break;
  15277. case GGML_OP_FLASH_FF:
  15278. {
  15279. GGML_ASSERT(false); // not supported
  15280. } break;
  15281. case GGML_OP_FLASH_ATTN_BACK:
  15282. {
  15283. GGML_ASSERT(false); // not supported
  15284. } break;
  15285. case GGML_OP_SSM_CONV:
  15286. case GGML_OP_SSM_SCAN:
  15287. {
  15288. GGML_ASSERT(false); // TODO: not implemented
  15289. } break;
  15290. case GGML_OP_WIN_PART:
  15291. case GGML_OP_WIN_UNPART:
  15292. case GGML_OP_UNARY:
  15293. {
  15294. switch (ggml_get_unary_op(tensor)) {
  15295. case GGML_UNARY_OP_ABS:
  15296. {
  15297. if (src0->grad) {
  15298. src0->grad =
  15299. ggml_add_or_set(ctx,
  15300. src0->grad,
  15301. ggml_mul(ctx,
  15302. ggml_sgn(ctx, src0),
  15303. tensor->grad),
  15304. zero_table);
  15305. }
  15306. } break;
  15307. case GGML_UNARY_OP_SGN:
  15308. {
  15309. if (src0->grad) {
  15310. // noop
  15311. }
  15312. } break;
  15313. case GGML_UNARY_OP_NEG:
  15314. {
  15315. if (src0->grad) {
  15316. src0->grad = ggml_sub_or_set(ctx, src0->grad, tensor->grad, zero_table);
  15317. }
  15318. } break;
  15319. case GGML_UNARY_OP_STEP:
  15320. {
  15321. if (src0->grad) {
  15322. // noop
  15323. }
  15324. } break;
  15325. case GGML_UNARY_OP_TANH:
  15326. {
  15327. GGML_ASSERT(false); // TODO: not implemented
  15328. } break;
  15329. case GGML_UNARY_OP_ELU:
  15330. {
  15331. GGML_ASSERT(false); // TODO: not implemented
  15332. } break;
  15333. case GGML_UNARY_OP_RELU:
  15334. {
  15335. if (src0->grad) {
  15336. src0->grad = ggml_add_or_set(ctx,
  15337. src0->grad,
  15338. ggml_mul(ctx,
  15339. ggml_step(ctx, src0),
  15340. tensor->grad),
  15341. zero_table);
  15342. }
  15343. } break;
  15344. case GGML_UNARY_OP_SIGMOID:
  15345. {
  15346. GGML_ASSERT(false); // TODO: not implemented
  15347. } break;
  15348. case GGML_UNARY_OP_GELU:
  15349. {
  15350. GGML_ASSERT(false); // TODO: not implemented
  15351. } break;
  15352. case GGML_UNARY_OP_GELU_QUICK:
  15353. {
  15354. GGML_ASSERT(false); // TODO: not implemented
  15355. } break;
  15356. case GGML_UNARY_OP_SILU:
  15357. {
  15358. // necessary for llama
  15359. if (src0->grad) {
  15360. src0->grad = ggml_add_or_set(ctx,
  15361. src0->grad,
  15362. ggml_silu_back(ctx, src0, tensor->grad),
  15363. zero_table);
  15364. }
  15365. } break;
  15366. default:
  15367. GGML_ASSERT(false);
  15368. }
  15369. } break;
  15370. case GGML_OP_GET_REL_POS:
  15371. case GGML_OP_ADD_REL_POS:
  15372. case GGML_OP_MAP_UNARY:
  15373. case GGML_OP_MAP_BINARY:
  15374. case GGML_OP_MAP_CUSTOM1_F32:
  15375. case GGML_OP_MAP_CUSTOM2_F32:
  15376. case GGML_OP_MAP_CUSTOM3_F32:
  15377. case GGML_OP_MAP_CUSTOM1:
  15378. case GGML_OP_MAP_CUSTOM2:
  15379. case GGML_OP_MAP_CUSTOM3:
  15380. {
  15381. GGML_ASSERT(false); // not supported
  15382. } break;
  15383. case GGML_OP_CROSS_ENTROPY_LOSS:
  15384. {
  15385. if (src0->grad) {
  15386. src0->grad = ggml_add_or_set(ctx,
  15387. src0->grad,
  15388. ggml_cross_entropy_loss_back(ctx,
  15389. src0,
  15390. src1,
  15391. tensor->grad),
  15392. zero_table);
  15393. }
  15394. } break;
  15395. case GGML_OP_CROSS_ENTROPY_LOSS_BACK:
  15396. {
  15397. GGML_ASSERT(false); // not supported
  15398. } break;
  15399. case GGML_OP_NONE:
  15400. {
  15401. // nop
  15402. } break;
  15403. case GGML_OP_COUNT:
  15404. {
  15405. GGML_ASSERT(false);
  15406. } break;
  15407. }
  15408. for (int i = 0; i < GGML_MAX_SRC; ++i) {
  15409. if (tensor->src[i] && tensor->src[i]->grad) {
  15410. GGML_ASSERT(ggml_are_same_shape(tensor->src[i], tensor->src[i]->grad));
  15411. }
  15412. }
  15413. }
  15414. static void ggml_visit_parents(struct ggml_cgraph * cgraph, struct ggml_tensor * node) {
  15415. if (node->grad == NULL) {
  15416. // this usually happens when we generate intermediate nodes from constants in the backward pass
  15417. // it can also happen during forward pass, if the user performs computations with constants
  15418. if (node->op != GGML_OP_NONE) {
  15419. //GGML_PRINT_DEBUG("%s: warning: node %p has no grad, but op %d\n", __func__, (void *) node, node->op);
  15420. }
  15421. }
  15422. // check if already visited
  15423. if (ggml_hash_insert(cgraph->visited_hash_table, node) == GGML_HASHTABLE_ALREADY_EXISTS) {
  15424. return;
  15425. }
  15426. for (int i = 0; i < GGML_MAX_SRC; ++i) {
  15427. const int k =
  15428. (cgraph->order == GGML_CGRAPH_EVAL_ORDER_LEFT_TO_RIGHT) ? i :
  15429. (cgraph->order == GGML_CGRAPH_EVAL_ORDER_RIGHT_TO_LEFT) ? (GGML_MAX_SRC-1-i) :
  15430. /* unknown order, just fall back to using i*/ i;
  15431. if (node->src[k]) {
  15432. ggml_visit_parents(cgraph, node->src[k]);
  15433. }
  15434. }
  15435. if (node->op == GGML_OP_NONE && node->grad == NULL) {
  15436. // reached a leaf node, not part of the gradient graph (e.g. a constant)
  15437. GGML_ASSERT(cgraph->n_leafs < cgraph->size);
  15438. if (strlen(node->name) == 0) {
  15439. ggml_format_name(node, "leaf_%d", cgraph->n_leafs);
  15440. }
  15441. cgraph->leafs[cgraph->n_leafs] = node;
  15442. cgraph->n_leafs++;
  15443. } else {
  15444. GGML_ASSERT(cgraph->n_nodes < cgraph->size);
  15445. if (strlen(node->name) == 0) {
  15446. ggml_format_name(node, "node_%d", cgraph->n_nodes);
  15447. }
  15448. cgraph->nodes[cgraph->n_nodes] = node;
  15449. if (cgraph->grads) {
  15450. cgraph->grads[cgraph->n_nodes] = node->grad;
  15451. }
  15452. cgraph->n_nodes++;
  15453. }
  15454. }
  15455. static void ggml_build_forward_impl(struct ggml_cgraph * cgraph, struct ggml_tensor * tensor, bool expand) {
  15456. if (!expand) {
  15457. // TODO: this branch isn't accessible anymore, maybe move this to ggml_build_forward_expand
  15458. ggml_graph_clear(cgraph);
  15459. }
  15460. const int n0 = cgraph->n_nodes;
  15461. UNUSED(n0);
  15462. ggml_visit_parents(cgraph, tensor);
  15463. const int n_new = cgraph->n_nodes - n0;
  15464. GGML_PRINT_DEBUG("%s: visited %d new nodes\n", __func__, n_new);
  15465. if (n_new > 0) {
  15466. // the last added node should always be starting point
  15467. GGML_ASSERT(cgraph->nodes[cgraph->n_nodes - 1] == tensor);
  15468. }
  15469. }
  15470. void ggml_build_forward_expand(struct ggml_cgraph * cgraph, struct ggml_tensor * tensor) {
  15471. ggml_build_forward_impl(cgraph, tensor, true);
  15472. }
  15473. void ggml_build_backward_expand(struct ggml_context * ctx, struct ggml_cgraph * gf, struct ggml_cgraph * gb, bool keep) {
  15474. GGML_ASSERT(gf->n_nodes > 0);
  15475. // if we are keeping the gradient graph, we have to detach the gradient nodes from the original graph
  15476. if (keep) {
  15477. for (int i = 0; i < gf->n_nodes; i++) {
  15478. struct ggml_tensor * node = gf->nodes[i];
  15479. if (node->grad) {
  15480. node->grad = ggml_dup_tensor(ctx, node);
  15481. gf->grads[i] = node->grad;
  15482. }
  15483. }
  15484. }
  15485. // remember original gradients which start with zero values
  15486. struct ggml_hash_set zero_table = ggml_hash_set_new(gf->size);
  15487. for (int i = 0; i < gf->n_nodes; i++) {
  15488. if (gf->grads[i]) {
  15489. ggml_hash_insert(zero_table, gf->grads[i]);
  15490. }
  15491. }
  15492. for (int i = gf->n_nodes - 1; i >= 0; i--) {
  15493. struct ggml_tensor * node = gf->nodes[i];
  15494. // inplace operations to add gradients are not created by ggml_compute_backward
  15495. // use allocator to automatically make inplace operations
  15496. if (node->grad) {
  15497. ggml_compute_backward(ctx, node, zero_table);
  15498. }
  15499. }
  15500. for (int i = 0; i < gf->n_nodes; i++) {
  15501. struct ggml_tensor * node = gf->nodes[i];
  15502. if (node->flags & GGML_TENSOR_FLAG_PARAM) {
  15503. GGML_PRINT_DEBUG("%s: found root node %p\n", __func__, (void *) node);
  15504. ggml_build_forward_expand(gb, node->grad);
  15505. }
  15506. }
  15507. ggml_hash_set_free(zero_table);
  15508. }
  15509. static size_t ggml_graph_nbytes(size_t size, bool grads) {
  15510. size_t nbytes = sizeof(struct ggml_cgraph);
  15511. nbytes += size * sizeof(struct ggml_tensor *) * 2; // leafs + nodes
  15512. if (grads) {
  15513. nbytes += size * sizeof(struct ggml_tensor *); // grads
  15514. }
  15515. nbytes += ggml_hash_size(size * 2) * sizeof(struct ggml_tensor *); // hash set
  15516. return nbytes;
  15517. }
  15518. size_t ggml_graph_overhead_custom(size_t size, bool grads) {
  15519. return GGML_OBJECT_SIZE + GGML_PAD(ggml_graph_nbytes(size, grads), GGML_MEM_ALIGN);
  15520. }
  15521. size_t ggml_graph_overhead(void) {
  15522. return ggml_graph_overhead_custom(GGML_DEFAULT_GRAPH_SIZE, false);
  15523. }
  15524. struct ggml_cgraph * ggml_new_graph_custom(struct ggml_context * ctx, size_t size, bool grads) {
  15525. const size_t obj_size = ggml_graph_nbytes(size, grads);
  15526. struct ggml_object * obj = ggml_new_object(ctx, GGML_OBJECT_TYPE_GRAPH, obj_size);
  15527. struct ggml_cgraph * cgraph = (struct ggml_cgraph *) ((char *) ctx->mem_buffer + obj->offs);
  15528. struct ggml_tensor ** data_start = (struct ggml_tensor **) (cgraph + 1);
  15529. size_t hash_size = ggml_hash_size(size * 2);
  15530. struct ggml_tensor ** nodes_ptr = data_start;
  15531. struct ggml_tensor ** leafs_ptr = nodes_ptr + size;
  15532. struct ggml_tensor ** hash_keys_ptr = leafs_ptr + size;
  15533. struct ggml_tensor ** grads_ptr = grads ? hash_keys_ptr + hash_size : NULL;
  15534. // check that we allocated the correct amount of memory
  15535. assert(obj_size == (size_t) (
  15536. (grads ? (char *)(grads_ptr + size) : (char *)(hash_keys_ptr + hash_size)) - (char *)cgraph));
  15537. memset(hash_keys_ptr, 0, hash_size * sizeof(struct ggml_tensor *));
  15538. *cgraph = (struct ggml_cgraph) {
  15539. /*.size =*/ size,
  15540. /*.n_nodes =*/ 0,
  15541. /*.n_leafs =*/ 0,
  15542. /*.nodes =*/ nodes_ptr,
  15543. /*.grads =*/ grads_ptr,
  15544. /*.leafs =*/ leafs_ptr,
  15545. /*.hash_table =*/ { hash_size, hash_keys_ptr },
  15546. /*.order =*/ GGML_CGRAPH_EVAL_ORDER_LEFT_TO_RIGHT,
  15547. /*.perf_runs =*/ 0,
  15548. /*.perf_cycles =*/ 0,
  15549. /*.perf_time_us =*/ 0,
  15550. };
  15551. return cgraph;
  15552. }
  15553. struct ggml_cgraph * ggml_new_graph(struct ggml_context * ctx) {
  15554. return ggml_new_graph_custom(ctx, GGML_DEFAULT_GRAPH_SIZE, false);
  15555. }
  15556. struct ggml_cgraph ggml_graph_view(struct ggml_cgraph * cgraph0, int i0, int i1) {
  15557. struct ggml_cgraph cgraph = {
  15558. /*.size =*/ 0,
  15559. /*.n_nodes =*/ i1 - i0,
  15560. /*.n_leafs =*/ 0,
  15561. /*.nodes =*/ cgraph0->nodes + i0,
  15562. /*.grads =*/ cgraph0->grads ? cgraph0->grads + i0 : NULL,
  15563. /*.leafs =*/ NULL,
  15564. /*.hash_table =*/ { 0, NULL },
  15565. /*.order =*/ cgraph0->order,
  15566. /*.perf_runs =*/ 0,
  15567. /*.perf_cycles =*/ 0,
  15568. /*.perf_time_us =*/ 0,
  15569. };
  15570. return cgraph;
  15571. }
  15572. void ggml_graph_cpy(struct ggml_cgraph * src, struct ggml_cgraph * dst) {
  15573. GGML_ASSERT(dst->size >= src->n_leafs);
  15574. GGML_ASSERT(dst->size >= src->n_nodes);
  15575. GGML_ASSERT(dst->visited_hash_table.size >= src->visited_hash_table.size);
  15576. dst->n_leafs = src->n_leafs;
  15577. dst->n_nodes = src->n_nodes;
  15578. dst->order = src->order;
  15579. for (int i = 0; i < src->n_leafs; ++i) {
  15580. dst->leafs[i] = src->leafs[i];
  15581. }
  15582. for (int i = 0; i < src->n_nodes; ++i) {
  15583. dst->nodes[i] = src->nodes[i];
  15584. }
  15585. if (src->grads) {
  15586. GGML_ASSERT(dst->grads != NULL);
  15587. for (int i = 0; i < src->n_nodes; ++i) {
  15588. dst->grads[i] = src->grads[i];
  15589. }
  15590. }
  15591. for (size_t i = 0; i < src->visited_hash_table.size; ++i) {
  15592. if (src->visited_hash_table.keys[i]) {
  15593. ggml_hash_insert(dst->visited_hash_table, src->visited_hash_table.keys[i]);
  15594. }
  15595. }
  15596. }
  15597. struct ggml_cgraph * ggml_graph_dup(struct ggml_context * ctx, struct ggml_cgraph * cgraph) {
  15598. struct ggml_cgraph * result = ggml_new_graph_custom(ctx, cgraph->size, cgraph->grads != NULL);
  15599. ggml_graph_cpy(cgraph, result);
  15600. return result;
  15601. }
  15602. void ggml_graph_reset(struct ggml_cgraph * cgraph) {
  15603. GGML_ASSERT(cgraph->grads != NULL);
  15604. for (int i = 0; i < cgraph->n_nodes; i++) {
  15605. struct ggml_tensor * grad = cgraph->grads[i];
  15606. if (grad) {
  15607. ggml_set_zero(grad);
  15608. }
  15609. }
  15610. }
  15611. void ggml_graph_clear(struct ggml_cgraph * cgraph) {
  15612. cgraph->n_leafs = 0;
  15613. cgraph->n_nodes = 0;
  15614. memset(cgraph->visited_hash_table.keys, 0, cgraph->visited_hash_table.size * sizeof(struct ggml_tensor *));
  15615. }
  15616. //
  15617. // thread data
  15618. //
  15619. // synchronization is done via busy loops
  15620. // I tried using spin locks, but not sure how to use them correctly - the things I tried were slower than busy loops
  15621. //
  15622. #ifdef __APPLE__
  15623. //#include <os/lock.h>
  15624. //
  15625. //typedef os_unfair_lock ggml_lock_t;
  15626. //
  15627. //#define ggml_lock_init(x) UNUSED(x)
  15628. //#define ggml_lock_destroy(x) UNUSED(x)
  15629. //#define ggml_lock_lock os_unfair_lock_lock
  15630. //#define ggml_lock_unlock os_unfair_lock_unlock
  15631. //
  15632. //#define GGML_LOCK_INITIALIZER OS_UNFAIR_LOCK_INIT
  15633. typedef int ggml_lock_t;
  15634. #define ggml_lock_init(x) UNUSED(x)
  15635. #define ggml_lock_destroy(x) UNUSED(x)
  15636. #define ggml_lock_lock(x) UNUSED(x)
  15637. #define ggml_lock_unlock(x) UNUSED(x)
  15638. #define GGML_LOCK_INITIALIZER 0
  15639. typedef pthread_t ggml_thread_t;
  15640. #define ggml_thread_create pthread_create
  15641. #define ggml_thread_join pthread_join
  15642. #else
  15643. //typedef pthread_spinlock_t ggml_lock_t;
  15644. //#define ggml_lock_init(x) pthread_spin_init(x, PTHREAD_PROCESS_PRIVATE)
  15645. //#define ggml_lock_destroy pthread_spin_destroy
  15646. //#define ggml_lock_lock pthread_spin_lock
  15647. //#define ggml_lock_unlock pthread_spin_unlock
  15648. typedef int ggml_lock_t;
  15649. #define ggml_lock_init(x) UNUSED(x)
  15650. #define ggml_lock_destroy(x) UNUSED(x)
  15651. #if defined(__x86_64__) || (defined(_MSC_VER) && defined(_M_AMD64))
  15652. #define ggml_lock_lock(x) _mm_pause()
  15653. #else
  15654. #define ggml_lock_lock(x) UNUSED(x)
  15655. #endif
  15656. #define ggml_lock_unlock(x) UNUSED(x)
  15657. #define GGML_LOCK_INITIALIZER 0
  15658. typedef pthread_t ggml_thread_t;
  15659. #define ggml_thread_create pthread_create
  15660. #define ggml_thread_join pthread_join
  15661. #endif
  15662. // Android's libc implementation "bionic" does not support setting affinity
  15663. #if defined(__gnu_linux__)
  15664. static void set_numa_thread_affinity(int thread_n) {
  15665. if (!ggml_is_numa()) {
  15666. return;
  15667. }
  15668. int node_num;
  15669. int rv;
  15670. size_t setsize = CPU_ALLOC_SIZE(g_state.numa.total_cpus);
  15671. switch(g_state.numa.numa_strategy) {
  15672. case GGML_NUMA_STRATEGY_DISTRIBUTE:
  15673. // run thread on node_num thread_n / (threads per node)
  15674. node_num = thread_n % g_state.numa.n_nodes;
  15675. break;
  15676. case GGML_NUMA_STRATEGY_ISOLATE:
  15677. // run thread on current_node
  15678. node_num = g_state.numa.current_node;
  15679. break;
  15680. case GGML_NUMA_STRATEGY_NUMACTL:
  15681. // use the cpuset that numactl gave us
  15682. rv = pthread_setaffinity_np(pthread_self(), setsize, &g_state.numa.cpuset);
  15683. if (rv) {
  15684. fprintf(stderr, "warning: pthread_setaffinity_np() failed: %s\n",strerror(rv));
  15685. }
  15686. return;
  15687. default:
  15688. return;
  15689. }
  15690. struct ggml_numa_node * node = &g_state.numa.nodes[node_num];
  15691. cpu_set_t * cpus = CPU_ALLOC(g_state.numa.total_cpus);
  15692. CPU_ZERO_S(setsize, cpus);
  15693. for (size_t i = 0; i < node->n_cpus; ++i) {
  15694. CPU_SET_S(node->cpus[i], setsize, cpus);
  15695. }
  15696. rv = pthread_setaffinity_np(pthread_self(), setsize, cpus);
  15697. if (rv) {
  15698. fprintf(stderr, "warning: pthread_setaffinity_np() failed: %s\n", strerror(rv));
  15699. }
  15700. CPU_FREE(cpus);
  15701. }
  15702. static void clear_numa_thread_affinity(void) {
  15703. if (!ggml_is_numa()) {
  15704. return;
  15705. }
  15706. size_t setsize = CPU_ALLOC_SIZE(g_state.numa.total_cpus);
  15707. cpu_set_t * cpus = CPU_ALLOC(g_state.numa.total_cpus);
  15708. CPU_ZERO_S(setsize, cpus);
  15709. for (unsigned i = 0; i < g_state.numa.total_cpus; ++i) {
  15710. CPU_SET_S(i, setsize, cpus);
  15711. }
  15712. int rv = pthread_setaffinity_np(pthread_self(), setsize, cpus);
  15713. if (rv) {
  15714. fprintf(stderr, "warning: pthread_setaffinity_np() failed: %s\n", strerror(rv));
  15715. }
  15716. CPU_FREE(cpus);
  15717. }
  15718. #else
  15719. // TODO: Windows etc.
  15720. // (the linux implementation may also work on BSD, someone should test)
  15721. static void set_numa_thread_affinity(int thread_n) { UNUSED(thread_n); }
  15722. static void clear_numa_thread_affinity(void) {}
  15723. #endif
  15724. struct ggml_compute_state_shared {
  15725. const struct ggml_cgraph * cgraph;
  15726. const struct ggml_cplan * cplan;
  15727. int64_t perf_node_start_cycles;
  15728. int64_t perf_node_start_time_us;
  15729. const int n_threads;
  15730. // synchronization primitives
  15731. atomic_int n_active; // num active threads
  15732. atomic_int node_n; // active graph node
  15733. atomic_int node_task; // active graph node task phase
  15734. ggml_abort_callback abort_callback; // abort ggml_graph_compute when true
  15735. void * abort_callback_data;
  15736. };
  15737. struct ggml_compute_state {
  15738. ggml_thread_t thrd;
  15739. int ith;
  15740. struct ggml_compute_state_shared * shared;
  15741. enum ggml_status ec;
  15742. };
  15743. static void ggml_graph_compute_perf_stats_node(struct ggml_tensor * node, const struct ggml_compute_state_shared * st) {
  15744. int64_t cycles_cur = ggml_perf_cycles() - st->perf_node_start_cycles;
  15745. int64_t time_us_cur = ggml_perf_time_us() - st->perf_node_start_time_us;
  15746. node->perf_runs++;
  15747. node->perf_cycles += cycles_cur;
  15748. node->perf_time_us += time_us_cur;
  15749. }
  15750. static int ggml_get_n_tasks(struct ggml_tensor * node, int n_threads, int n_cur_threads) {
  15751. int n_tasks = 0;
  15752. if (ggml_is_empty(node)) {
  15753. // no need to multi-thread a no-op
  15754. n_tasks = 1;
  15755. return n_tasks;
  15756. }
  15757. switch (node->op) {
  15758. case GGML_OP_CPY:
  15759. case GGML_OP_DUP:
  15760. case GGML_OP_ADD:
  15761. case GGML_OP_ADD1:
  15762. case GGML_OP_ACC:
  15763. {
  15764. n_tasks = n_threads;
  15765. } break;
  15766. case GGML_OP_SUB:
  15767. case GGML_OP_SQR:
  15768. case GGML_OP_SQRT:
  15769. case GGML_OP_LOG:
  15770. case GGML_OP_SUM:
  15771. case GGML_OP_SUM_ROWS:
  15772. case GGML_OP_MEAN:
  15773. case GGML_OP_ARGMAX:
  15774. case GGML_OP_REPEAT:
  15775. case GGML_OP_REPEAT_BACK:
  15776. case GGML_OP_LEAKY_RELU:
  15777. {
  15778. n_tasks = 1;
  15779. } break;
  15780. case GGML_OP_UNARY:
  15781. switch (ggml_get_unary_op(node)) {
  15782. case GGML_UNARY_OP_ABS:
  15783. case GGML_UNARY_OP_SGN:
  15784. case GGML_UNARY_OP_NEG:
  15785. case GGML_UNARY_OP_STEP:
  15786. case GGML_UNARY_OP_TANH:
  15787. case GGML_UNARY_OP_ELU:
  15788. case GGML_UNARY_OP_RELU:
  15789. case GGML_UNARY_OP_SIGMOID:
  15790. case GGML_UNARY_OP_HARDSWISH: // to opt for multiple threads
  15791. case GGML_UNARY_OP_HARDSIGMOID: // to opt for multiple threads
  15792. {
  15793. n_tasks = 1;
  15794. } break;
  15795. case GGML_UNARY_OP_GELU:
  15796. case GGML_UNARY_OP_GELU_QUICK:
  15797. case GGML_UNARY_OP_SILU:
  15798. {
  15799. n_tasks = n_threads;
  15800. } break;
  15801. default:
  15802. GGML_ASSERT(false);
  15803. }
  15804. break;
  15805. case GGML_OP_SILU_BACK:
  15806. case GGML_OP_MUL:
  15807. case GGML_OP_DIV:
  15808. case GGML_OP_NORM:
  15809. case GGML_OP_RMS_NORM:
  15810. case GGML_OP_RMS_NORM_BACK:
  15811. case GGML_OP_GROUP_NORM:
  15812. case GGML_OP_CONCAT:
  15813. {
  15814. n_tasks = n_threads;
  15815. } break;
  15816. case GGML_OP_MUL_MAT:
  15817. {
  15818. n_tasks = n_threads;
  15819. // TODO: use different scheduling for different matrix sizes
  15820. //const int nr0 = ggml_nrows(node->src[0]);
  15821. //const int nr1 = ggml_nrows(node->src[1]);
  15822. //n_tasks = MIN(n_threads, MAX(1, nr0/128));
  15823. //printf("nr0 = %8d, nr1 = %8d, nr0*nr1 = %8d, n_tasks%d\n", nr0, nr1, nr0*nr1, n_tasks);
  15824. } break;
  15825. case GGML_OP_MUL_MAT_ID:
  15826. {
  15827. n_tasks = n_threads;
  15828. } break;
  15829. case GGML_OP_OUT_PROD:
  15830. {
  15831. n_tasks = n_threads;
  15832. } break;
  15833. case GGML_OP_GET_ROWS:
  15834. {
  15835. // FIXME: the cost of launching additional threads decreases performance with GPU offloading
  15836. //n_tasks = MIN(n_threads, ggml_nelements(node->src[1]));
  15837. n_tasks = MIN(n_cur_threads, ggml_nelements(node->src[1]));
  15838. } break;
  15839. case GGML_OP_SCALE:
  15840. case GGML_OP_SET:
  15841. case GGML_OP_CONT:
  15842. case GGML_OP_RESHAPE:
  15843. case GGML_OP_VIEW:
  15844. case GGML_OP_PERMUTE:
  15845. case GGML_OP_TRANSPOSE:
  15846. case GGML_OP_GET_ROWS_BACK:
  15847. case GGML_OP_DIAG:
  15848. {
  15849. n_tasks = 1;
  15850. } break;
  15851. case GGML_OP_DIAG_MASK_ZERO:
  15852. case GGML_OP_DIAG_MASK_INF:
  15853. case GGML_OP_SOFT_MAX_BACK:
  15854. case GGML_OP_ROPE:
  15855. case GGML_OP_ROPE_BACK:
  15856. case GGML_OP_ADD_REL_POS:
  15857. {
  15858. n_tasks = n_threads;
  15859. } break;
  15860. case GGML_OP_CLAMP:
  15861. {
  15862. n_tasks = 1; //TODO
  15863. } break;
  15864. case GGML_OP_SOFT_MAX:
  15865. {
  15866. n_tasks = MIN(n_threads, ggml_nrows(node->src[0]));
  15867. } break;
  15868. case GGML_OP_CONV_TRANSPOSE_1D:
  15869. {
  15870. n_tasks = n_threads;
  15871. } break;
  15872. case GGML_OP_IM2COL:
  15873. {
  15874. n_tasks = n_threads;
  15875. } break;
  15876. case GGML_OP_CONV_TRANSPOSE_2D:
  15877. {
  15878. n_tasks = n_threads;
  15879. } break;
  15880. case GGML_OP_POOL_1D:
  15881. case GGML_OP_POOL_2D:
  15882. {
  15883. n_tasks = 1;
  15884. } break;
  15885. case GGML_OP_UPSCALE:
  15886. {
  15887. n_tasks = n_threads;
  15888. } break;
  15889. case GGML_OP_PAD:
  15890. {
  15891. n_tasks = n_threads;
  15892. } break;
  15893. case GGML_OP_ARANGE:
  15894. {
  15895. n_tasks = n_threads;
  15896. } break;
  15897. case GGML_OP_TIMESTEP_EMBEDDING:
  15898. {
  15899. n_tasks = n_threads;
  15900. } break;
  15901. case GGML_OP_ARGSORT:
  15902. {
  15903. n_tasks = n_threads;
  15904. } break;
  15905. case GGML_OP_FLASH_ATTN:
  15906. case GGML_OP_FLASH_ATTN_EXT:
  15907. {
  15908. n_tasks = n_threads;
  15909. } break;
  15910. case GGML_OP_FLASH_FF:
  15911. {
  15912. n_tasks = n_threads;
  15913. } break;
  15914. case GGML_OP_FLASH_ATTN_BACK:
  15915. {
  15916. n_tasks = n_threads;
  15917. } break;
  15918. case GGML_OP_SSM_CONV:
  15919. case GGML_OP_SSM_SCAN:
  15920. {
  15921. n_tasks = n_threads;
  15922. } break;
  15923. case GGML_OP_WIN_PART:
  15924. case GGML_OP_WIN_UNPART:
  15925. case GGML_OP_GET_REL_POS:
  15926. case GGML_OP_MAP_UNARY:
  15927. case GGML_OP_MAP_BINARY:
  15928. case GGML_OP_MAP_CUSTOM1_F32:
  15929. case GGML_OP_MAP_CUSTOM2_F32:
  15930. case GGML_OP_MAP_CUSTOM3_F32:
  15931. {
  15932. n_tasks = 1;
  15933. } break;
  15934. case GGML_OP_MAP_CUSTOM1:
  15935. {
  15936. struct ggml_map_custom1_op_params p;
  15937. memcpy(&p, node->op_params, sizeof(p));
  15938. if (p.n_tasks == GGML_N_TASKS_MAX) {
  15939. n_tasks = n_threads;
  15940. } else {
  15941. n_tasks = MIN(p.n_tasks, n_threads);
  15942. }
  15943. } break;
  15944. case GGML_OP_MAP_CUSTOM2:
  15945. {
  15946. struct ggml_map_custom2_op_params p;
  15947. memcpy(&p, node->op_params, sizeof(p));
  15948. if (p.n_tasks == GGML_N_TASKS_MAX) {
  15949. n_tasks = n_threads;
  15950. } else {
  15951. n_tasks = MIN(p.n_tasks, n_threads);
  15952. }
  15953. } break;
  15954. case GGML_OP_MAP_CUSTOM3:
  15955. {
  15956. struct ggml_map_custom3_op_params p;
  15957. memcpy(&p, node->op_params, sizeof(p));
  15958. if (p.n_tasks == GGML_N_TASKS_MAX) {
  15959. n_tasks = n_threads;
  15960. } else {
  15961. n_tasks = MIN(p.n_tasks, n_threads);
  15962. }
  15963. } break;
  15964. case GGML_OP_CROSS_ENTROPY_LOSS:
  15965. {
  15966. n_tasks = n_threads;
  15967. } break;
  15968. case GGML_OP_CROSS_ENTROPY_LOSS_BACK:
  15969. {
  15970. n_tasks = n_threads;
  15971. } break;
  15972. case GGML_OP_NONE:
  15973. {
  15974. n_tasks = 1;
  15975. } break;
  15976. case GGML_OP_COUNT:
  15977. {
  15978. GGML_ASSERT(false);
  15979. } break;
  15980. default:
  15981. {
  15982. fprintf(stderr, "%s: op not implemented: ", __func__);
  15983. if (node->op < GGML_OP_COUNT) {
  15984. fprintf(stderr, "%s\n", ggml_op_name(node->op));
  15985. } else {
  15986. fprintf(stderr, "%d\n", node->op);
  15987. }
  15988. GGML_ASSERT(false);
  15989. } break;
  15990. }
  15991. assert(n_tasks > 0);
  15992. return n_tasks;
  15993. }
  15994. static void ggml_graph_compute_thread_sync_node(int * node_n, struct ggml_compute_state * state, const bool do_yield) {
  15995. // wait for other threads to finish
  15996. const int last_node_n = * node_n;
  15997. while (true) {
  15998. if (do_yield) {
  15999. sched_yield();
  16000. }
  16001. * node_n = atomic_load(&state->shared->node_n);
  16002. if (* node_n != last_node_n) break;
  16003. }
  16004. }
  16005. static void ggml_graph_compute_thread_sync_task(int * task_phase, struct ggml_compute_state * state, const bool do_yield) {
  16006. // wait for other threads to finish
  16007. const int last_task_phase = * task_phase;
  16008. while (true) {
  16009. if (do_yield) {
  16010. sched_yield();
  16011. }
  16012. * task_phase = atomic_load(&state->shared->node_task);
  16013. if (* task_phase != last_task_phase) break;
  16014. }
  16015. }
  16016. static thread_ret_t ggml_graph_compute_thread(void * data) {
  16017. struct ggml_compute_state * state = (struct ggml_compute_state *) data;
  16018. const struct ggml_cgraph * cgraph = state->shared->cgraph;
  16019. const struct ggml_cplan * cplan = state->shared->cplan;
  16020. const int n_threads = state->shared->n_threads;
  16021. set_numa_thread_affinity(state->ith);
  16022. int node_n = -1;
  16023. int task_phase = GGML_TASK_TYPE_FINALIZE;
  16024. while (true) {
  16025. if (cplan->abort_callback && cplan->abort_callback(cplan->abort_callback_data)) {
  16026. state->shared->node_n += 1;
  16027. state->ec = GGML_STATUS_ABORTED;
  16028. return 0;
  16029. }
  16030. if (atomic_fetch_sub(&state->shared->n_active, 1) == 1) {
  16031. // all other threads are finished and spinning
  16032. // do finalize and init here so we don't have synchronize again
  16033. struct ggml_compute_params params = {
  16034. /*.type =*/ GGML_TASK_TYPE_FINALIZE,
  16035. /*.ith =*/ 0,
  16036. /*.nth =*/ 0,
  16037. /*.wsize =*/ cplan->work_size,
  16038. /*.wdata =*/ cplan->work_data,
  16039. };
  16040. if (node_n != -1) {
  16041. /* FINALIZE */
  16042. struct ggml_tensor * node = cgraph->nodes[node_n];
  16043. if (GGML_OP_HAS_FINALIZE[node->op]) {
  16044. params.nth = ggml_get_n_tasks(node, n_threads, state->shared->n_threads);
  16045. ggml_compute_forward(&params, node);
  16046. }
  16047. ggml_graph_compute_perf_stats_node(node, state->shared);
  16048. }
  16049. // distribute new work or execute it direct if 1T
  16050. while (++node_n < cgraph->n_nodes) {
  16051. GGML_PRINT_DEBUG_5("%s: %d/%d\n", __func__, node_n, cgraph->n_nodes);
  16052. struct ggml_tensor * node = cgraph->nodes[node_n];
  16053. const int n_tasks = ggml_get_n_tasks(node, n_threads, state->shared->n_threads);
  16054. state->shared->perf_node_start_cycles = ggml_perf_cycles();
  16055. state->shared->perf_node_start_time_us = ggml_perf_time_us();
  16056. params.nth = n_tasks;
  16057. if (n_tasks == 1) {
  16058. /* INIT */
  16059. if (GGML_OP_HAS_INIT[node->op]) {
  16060. params.type = GGML_TASK_TYPE_INIT;
  16061. ggml_compute_forward(&params, node);
  16062. }
  16063. // TODO: maybe push node_n to the atomic but if other threads see n_tasks is 1,
  16064. // they do something more efficient than spinning (?)
  16065. params.type = GGML_TASK_TYPE_COMPUTE;
  16066. ggml_compute_forward(&params, node);
  16067. if (GGML_OP_HAS_FINALIZE[node->op]) {
  16068. params.type = GGML_TASK_TYPE_FINALIZE;
  16069. ggml_compute_forward(&params, node);
  16070. }
  16071. ggml_graph_compute_perf_stats_node(node, state->shared);
  16072. } else {
  16073. break;
  16074. }
  16075. if (cplan->abort_callback && cplan->abort_callback(cplan->abort_callback_data)) {
  16076. break;
  16077. }
  16078. }
  16079. task_phase = GGML_TASK_TYPE_INIT;
  16080. atomic_store(&state->shared->n_active, n_threads);
  16081. atomic_store(&state->shared->node_n, node_n);
  16082. atomic_store(&state->shared->node_task, task_phase);
  16083. } else {
  16084. ggml_graph_compute_thread_sync_node(&node_n, state, false);
  16085. ggml_graph_compute_thread_sync_task(&task_phase, state, false);
  16086. }
  16087. // check if we should stop
  16088. if (node_n >= cgraph->n_nodes) break;
  16089. /* INIT & COMPUTE */
  16090. struct ggml_tensor * node = cgraph->nodes[node_n];
  16091. const int n_tasks = ggml_get_n_tasks(node, n_threads, state->shared->n_threads);
  16092. struct ggml_compute_params params = {
  16093. /*.type =*/ GGML_TASK_TYPE_INIT,
  16094. /*.ith =*/ state->ith,
  16095. /*.nth =*/ n_tasks,
  16096. /*.wsize =*/ cplan->work_size,
  16097. /*.wdata =*/ cplan->work_data,
  16098. };
  16099. if (state->ith < n_tasks) {
  16100. if (GGML_OP_HAS_INIT[node->op]) {
  16101. ggml_compute_forward(&params, node);
  16102. }
  16103. }
  16104. if (atomic_fetch_sub(&state->shared->n_active, 1) == 1) {
  16105. task_phase = GGML_TASK_TYPE_COMPUTE;
  16106. atomic_store(&state->shared->n_active, n_threads);
  16107. atomic_store(&state->shared->node_task, task_phase);
  16108. }
  16109. else {
  16110. // TODO: this sched_yield can have significant impact on the performance - either positive or negative
  16111. // depending on the workload and the operating system.
  16112. // since it is not clear what is the best approach, it should potentially become user-configurable
  16113. // ref: https://github.com/ggerganov/ggml/issues/291
  16114. // UPD: adding the do_yield flag seems to resolve the issue universally
  16115. const bool do_yield = node_n < 0 || cgraph->nodes[node_n]->op == GGML_OP_MUL_MAT;
  16116. ggml_graph_compute_thread_sync_task(&task_phase, state, do_yield);
  16117. }
  16118. if (state->ith < n_tasks) {
  16119. params.type = GGML_TASK_TYPE_COMPUTE;
  16120. ggml_compute_forward(&params, node);
  16121. }
  16122. if (atomic_fetch_sub(&state->shared->n_active, 1) == 1) {
  16123. task_phase = GGML_TASK_TYPE_FINALIZE;
  16124. atomic_store(&state->shared->n_active, n_threads);
  16125. atomic_store(&state->shared->node_task, task_phase);
  16126. }
  16127. else {
  16128. ggml_graph_compute_thread_sync_task(&task_phase, state, false);
  16129. }
  16130. }
  16131. return 0;
  16132. }
  16133. struct ggml_cplan ggml_graph_plan(const struct ggml_cgraph * cgraph, int n_threads) {
  16134. if (n_threads <= 0) {
  16135. n_threads = GGML_DEFAULT_N_THREADS;
  16136. }
  16137. size_t work_size = 0;
  16138. struct ggml_cplan cplan;
  16139. memset(&cplan, 0, sizeof(struct ggml_cplan));
  16140. int max_tasks = 1;
  16141. // thread scheduling for the different operations + work buffer size estimation
  16142. for (int i = 0; i < cgraph->n_nodes; i++) {
  16143. struct ggml_tensor * node = cgraph->nodes[i];
  16144. const int n_tasks = ggml_get_n_tasks(node, n_threads, 1);
  16145. max_tasks = MAX(max_tasks, n_tasks);
  16146. size_t cur = 0;
  16147. switch (node->op) {
  16148. case GGML_OP_CPY:
  16149. case GGML_OP_DUP:
  16150. {
  16151. if (ggml_is_quantized(node->type) ||
  16152. // F16 -> BF16 and BF16 -> F16 copies go through intermediate F32
  16153. (node->src[0]->type == GGML_TYPE_F16 && node->src[1] && node->src[1]->type == GGML_TYPE_BF16) ||
  16154. (node->src[0]->type == GGML_TYPE_BF16 && node->src[1] && node->src[1]->type == GGML_TYPE_F16)) {
  16155. cur = ggml_type_size(GGML_TYPE_F32) * node->ne[0] * n_tasks;
  16156. }
  16157. } break;
  16158. case GGML_OP_ADD:
  16159. case GGML_OP_ADD1:
  16160. {
  16161. if (ggml_is_quantized(node->src[0]->type)) {
  16162. cur = ggml_type_size(GGML_TYPE_F32) * node->src[0]->ne[0] * n_tasks;
  16163. }
  16164. } break;
  16165. case GGML_OP_ACC:
  16166. {
  16167. if (ggml_is_quantized(node->src[0]->type)) {
  16168. cur = ggml_type_size(GGML_TYPE_F32) * node->src[1]->ne[0] * n_tasks;
  16169. }
  16170. } break;
  16171. case GGML_OP_MUL_MAT:
  16172. {
  16173. const enum ggml_type vec_dot_type = type_traits[node->src[0]->type].vec_dot_type;
  16174. #if defined(GGML_USE_CLBLAST)
  16175. if (ggml_cl_can_mul_mat(node->src[0], node->src[1], node)) {
  16176. cur = ggml_cl_mul_mat_get_wsize(node->src[0], node->src[1], node);
  16177. } else
  16178. #endif
  16179. #if defined(GGML_USE_ACCELERATE) || defined(GGML_USE_OPENBLAS)
  16180. if (ggml_compute_forward_mul_mat_use_blas(node)) {
  16181. if (node->src[0]->type != GGML_TYPE_F32) {
  16182. // here we need memory for fully dequantized matrix from src0
  16183. // take into account that src0 can be broadcasted into src1[2,3]
  16184. cur = ggml_type_size(GGML_TYPE_F32)
  16185. * node->src[0]->ne[0]*node->src[0]->ne[1]
  16186. * node->src[1]->ne[2]*node->src[1]->ne[3];
  16187. }
  16188. } else
  16189. #endif
  16190. if (node->src[1]->type != vec_dot_type) {
  16191. cur = ggml_row_size(vec_dot_type, ggml_nelements(node->src[1]));
  16192. }
  16193. } break;
  16194. case GGML_OP_MUL_MAT_ID:
  16195. {
  16196. cur = 0;
  16197. const struct ggml_tensor * src0 = node->src[0];
  16198. const struct ggml_tensor * src1 = node->src[1];
  16199. const enum ggml_type vec_dot_type = type_traits[src0->type].vec_dot_type;
  16200. if (src1->type != vec_dot_type) {
  16201. cur += ggml_row_size(vec_dot_type, ggml_nelements(src1));
  16202. }
  16203. const int n_as = src0->ne[2];
  16204. cur += GGML_PAD(cur, sizeof(int64_t)); // align
  16205. cur += n_as * sizeof(int64_t); // matrix_row_counts
  16206. cur += n_as * src1->ne[2] * sizeof(int64_t); // matrix_rows
  16207. } break;
  16208. case GGML_OP_OUT_PROD:
  16209. {
  16210. if (ggml_is_quantized(node->src[0]->type)) {
  16211. cur = ggml_type_size(GGML_TYPE_F32) * node->src[0]->ne[0] * n_tasks;
  16212. }
  16213. } break;
  16214. case GGML_OP_SOFT_MAX:
  16215. case GGML_OP_ROPE:
  16216. {
  16217. cur = ggml_type_size(GGML_TYPE_F32) * node->ne[0] * n_tasks;
  16218. } break;
  16219. case GGML_OP_CONV_TRANSPOSE_1D:
  16220. {
  16221. GGML_ASSERT(node->src[0]->ne[3] == 1);
  16222. GGML_ASSERT(node->src[1]->ne[2] == 1);
  16223. GGML_ASSERT(node->src[1]->ne[3] == 1);
  16224. const int64_t ne00 = node->src[0]->ne[0]; // K
  16225. const int64_t ne01 = node->src[0]->ne[1]; // Cout
  16226. const int64_t ne02 = node->src[0]->ne[2]; // Cin
  16227. const int64_t ne10 = node->src[1]->ne[0]; // L
  16228. const int64_t ne11 = node->src[1]->ne[1]; // Cin
  16229. if ((node->src[0]->type == GGML_TYPE_F16 ||
  16230. node->src[0]->type == GGML_TYPE_BF16) &&
  16231. node->src[1]->type == GGML_TYPE_F32) {
  16232. cur += sizeof(ggml_fp16_t)*ne00*ne01*ne02;
  16233. cur += sizeof(ggml_fp16_t)*ne10*ne11;
  16234. } else if (node->src[0]->type == GGML_TYPE_F32 &&
  16235. node->src[1]->type == GGML_TYPE_F32) {
  16236. cur += sizeof(float)*ne00*ne01*ne02;
  16237. cur += sizeof(float)*ne10*ne11;
  16238. } else {
  16239. GGML_ASSERT(false);
  16240. }
  16241. } break;
  16242. case GGML_OP_CONV_TRANSPOSE_2D:
  16243. {
  16244. const int64_t ne00 = node->src[0]->ne[0]; // W
  16245. const int64_t ne01 = node->src[0]->ne[1]; // H
  16246. const int64_t ne02 = node->src[0]->ne[2]; // Channels Out
  16247. const int64_t ne03 = node->src[0]->ne[3]; // Channels In
  16248. const int64_t ne10 = node->src[1]->ne[0]; // W
  16249. const int64_t ne11 = node->src[1]->ne[1]; // H
  16250. const int64_t ne12 = node->src[1]->ne[2]; // Channels In
  16251. cur += sizeof(ggml_fp16_t)*ne00*ne01*ne02*ne03;
  16252. cur += sizeof(ggml_fp16_t)*ne10*ne11*ne12;
  16253. } break;
  16254. case GGML_OP_FLASH_ATTN:
  16255. {
  16256. const int64_t ne11 = ggml_up(node->src[1]->ne[1], GGML_SOFT_MAX_UNROLL);
  16257. if (node->src[1]->type == GGML_TYPE_F32) {
  16258. cur = sizeof(float)*ne11*n_tasks; // TODO: this can become (n_tasks-1)
  16259. cur += sizeof(float)*ne11*n_tasks; // this is overestimated by x2
  16260. } else if (node->src[1]->type == GGML_TYPE_F16) {
  16261. cur = sizeof(float)*ne11*n_tasks; // TODO: this can become (n_tasks-1)
  16262. cur += sizeof(float)*ne11*n_tasks; // this is overestimated by x2
  16263. } else if (node->src[1]->type == GGML_TYPE_BF16) {
  16264. cur = sizeof(float)*ne11*n_tasks; // TODO: this can become (n_tasks-1)
  16265. cur += sizeof(float)*ne11*n_tasks; // this is overestimated by x2
  16266. }
  16267. } break;
  16268. case GGML_OP_FLASH_ATTN_EXT:
  16269. {
  16270. const int64_t ne00 = node->src[0]->ne[0]; // D
  16271. cur = 2*sizeof(float)*ne00*n_tasks; // 2x head size
  16272. } break;
  16273. case GGML_OP_FLASH_FF:
  16274. {
  16275. if (node->src[1]->type == GGML_TYPE_F32) {
  16276. cur = sizeof(float)*node->src[1]->ne[1]*n_tasks; // TODO: this can become (n_tasks-1)
  16277. cur += sizeof(float)*node->src[1]->ne[1]*n_tasks; // this is overestimated by x2
  16278. } else if (node->src[1]->type == GGML_TYPE_F16) {
  16279. cur = sizeof(float)*node->src[1]->ne[1]*n_tasks; // TODO: this can become (n_tasks-1)
  16280. cur += sizeof(float)*node->src[1]->ne[1]*n_tasks; // this is overestimated by x2
  16281. } else if (node->src[1]->type == GGML_TYPE_BF16) {
  16282. cur = sizeof(float)*node->src[1]->ne[1]*n_tasks; // TODO: this can become (n_tasks-1)
  16283. cur += sizeof(float)*node->src[1]->ne[1]*n_tasks; // this is overestimated by x2
  16284. }
  16285. } break;
  16286. case GGML_OP_FLASH_ATTN_BACK:
  16287. {
  16288. const int64_t D = node->src[0]->ne[0];
  16289. const int64_t ne11 = ggml_up(node->src[1]->ne[1], GGML_SOFT_MAX_UNROLL);
  16290. const int64_t mxDn = MAX(D, ne11) * 2; // *2 because of S and SM in ggml_compute_forward_flash_attn_back
  16291. if (node->src[1]->type == GGML_TYPE_F32) {
  16292. cur = sizeof(float)*mxDn*n_tasks; // TODO: this can become (n_tasks-1)
  16293. cur += sizeof(float)*mxDn*n_tasks; // this is overestimated by x2
  16294. } else if (node->src[1]->type == GGML_TYPE_F16) {
  16295. cur = sizeof(float)*mxDn*n_tasks; // TODO: this can become (n_tasks-1)
  16296. cur += sizeof(float)*mxDn*n_tasks; // this is overestimated by x2
  16297. } else if (node->src[1]->type == GGML_TYPE_BF16) {
  16298. cur = sizeof(float)*mxDn*n_tasks; // TODO: this can become (n_tasks-1)
  16299. cur += sizeof(float)*mxDn*n_tasks; // this is overestimated by x2
  16300. }
  16301. } break;
  16302. case GGML_OP_CROSS_ENTROPY_LOSS:
  16303. {
  16304. cur = ggml_type_size(node->type)*(n_tasks + node->src[0]->ne[0]*n_tasks);
  16305. } break;
  16306. case GGML_OP_COUNT:
  16307. {
  16308. GGML_ASSERT(false);
  16309. } break;
  16310. default:
  16311. break;
  16312. }
  16313. work_size = MAX(work_size, cur);
  16314. }
  16315. if (work_size > 0) {
  16316. work_size += CACHE_LINE_SIZE*(n_threads - 1);
  16317. }
  16318. cplan.n_threads = MIN(max_tasks, n_threads);
  16319. cplan.work_size = work_size;
  16320. cplan.work_data = NULL;
  16321. return cplan;
  16322. }
  16323. enum ggml_status ggml_graph_compute(struct ggml_cgraph * cgraph, struct ggml_cplan * cplan) {
  16324. {
  16325. GGML_ASSERT(cplan);
  16326. GGML_ASSERT(cplan->n_threads > 0);
  16327. if (cplan->work_size > 0) {
  16328. GGML_ASSERT(cplan->work_data);
  16329. }
  16330. }
  16331. const int n_threads = cplan->n_threads;
  16332. struct ggml_compute_state_shared state_shared = {
  16333. /*.cgraph =*/ cgraph,
  16334. /*.cgraph_plan =*/ cplan,
  16335. /*.perf_node_start_cycles =*/ 0,
  16336. /*.perf_node_start_time_us =*/ 0,
  16337. /*.n_threads =*/ n_threads,
  16338. /*.n_active =*/ n_threads,
  16339. /*.node_n =*/ -1,
  16340. /*.node_task =*/ GGML_TASK_TYPE_FINALIZE,
  16341. /*.abort_callback =*/ NULL,
  16342. /*.abort_callback_data =*/ NULL,
  16343. };
  16344. struct ggml_compute_state * workers = alloca(sizeof(struct ggml_compute_state)*n_threads);
  16345. // create thread pool
  16346. if (n_threads > 1) {
  16347. for (int j = 1; j < n_threads; ++j) {
  16348. workers[j] = (struct ggml_compute_state) {
  16349. .thrd = 0,
  16350. .ith = j,
  16351. .shared = &state_shared,
  16352. .ec = GGML_STATUS_SUCCESS,
  16353. };
  16354. const int rc = ggml_thread_create(&workers[j].thrd, NULL, ggml_graph_compute_thread, &workers[j]);
  16355. GGML_ASSERT(rc == 0);
  16356. UNUSED(rc);
  16357. }
  16358. }
  16359. workers[0].ith = 0;
  16360. workers[0].shared = &state_shared;
  16361. workers[0].ec = GGML_STATUS_SUCCESS;
  16362. const int64_t perf_start_cycles = ggml_perf_cycles();
  16363. const int64_t perf_start_time_us = ggml_perf_time_us();
  16364. // this is a work thread too
  16365. ggml_graph_compute_thread(&workers[0]);
  16366. enum ggml_status compute_status = workers[0].ec;
  16367. // don't leave affinity set on the main thread
  16368. clear_numa_thread_affinity();
  16369. // join or kill thread pool
  16370. if (n_threads > 1) {
  16371. for (int j = 1; j < n_threads; j++) {
  16372. const int rc = ggml_thread_join(workers[j].thrd, NULL);
  16373. GGML_ASSERT(rc == 0);
  16374. if (workers[j].ec != GGML_STATUS_SUCCESS)
  16375. compute_status = workers[j].ec;
  16376. }
  16377. }
  16378. // performance stats (graph)
  16379. {
  16380. int64_t perf_cycles_cur = ggml_perf_cycles() - perf_start_cycles;
  16381. int64_t perf_time_us_cur = ggml_perf_time_us() - perf_start_time_us;
  16382. cgraph->perf_runs++;
  16383. cgraph->perf_cycles += perf_cycles_cur;
  16384. cgraph->perf_time_us += perf_time_us_cur;
  16385. GGML_PRINT_DEBUG("%s: perf (%d) - cpu = %.3f / %.3f ms, wall = %.3f / %.3f ms\n",
  16386. __func__, cgraph->perf_runs,
  16387. (double) perf_cycles_cur / (double) ggml_cycles_per_ms(),
  16388. (double) cgraph->perf_cycles / (double) ggml_cycles_per_ms() / (double) cgraph->perf_runs,
  16389. (double) perf_time_us_cur / 1000.0,
  16390. (double) cgraph->perf_time_us / 1000.0 / cgraph->perf_runs);
  16391. }
  16392. return compute_status;
  16393. }
  16394. enum ggml_status ggml_graph_compute_with_ctx(struct ggml_context * ctx, struct ggml_cgraph * cgraph, int n_threads) {
  16395. struct ggml_cplan cplan = ggml_graph_plan(cgraph, n_threads);
  16396. struct ggml_object * obj = ggml_new_object(ctx, GGML_OBJECT_TYPE_WORK_BUFFER, cplan.work_size);
  16397. cplan.work_data = (uint8_t *)ctx->mem_buffer + obj->offs;
  16398. return ggml_graph_compute(cgraph, &cplan);
  16399. }
  16400. struct ggml_tensor * ggml_graph_get_tensor(struct ggml_cgraph * cgraph, const char * name) {
  16401. for (int i = 0; i < cgraph->n_leafs; i++) {
  16402. struct ggml_tensor * leaf = cgraph->leafs[i];
  16403. if (strcmp(leaf->name, name) == 0) {
  16404. return leaf;
  16405. }
  16406. }
  16407. for (int i = 0; i < cgraph->n_nodes; i++) {
  16408. struct ggml_tensor * node = cgraph->nodes[i];
  16409. if (strcmp(node->name, name) == 0) {
  16410. return node;
  16411. }
  16412. }
  16413. return NULL;
  16414. }
  16415. static void ggml_graph_export_leaf(const struct ggml_tensor * tensor, FILE * fout) {
  16416. const int64_t * ne = tensor->ne;
  16417. const size_t * nb = tensor->nb;
  16418. fprintf(fout, "%-6s %-12s %8d %" PRId64 " %" PRId64 " %" PRId64 " %" PRId64 " %16zu %16zu %16zu %16zu %16p %32s\n",
  16419. ggml_type_name(tensor->type),
  16420. ggml_op_name (tensor->op),
  16421. ggml_n_dims(tensor),
  16422. ne[0], ne[1], ne[2], ne[3],
  16423. nb[0], nb[1], nb[2], nb[3],
  16424. tensor->data,
  16425. tensor->name);
  16426. }
  16427. static void ggml_graph_export_node(const struct ggml_tensor * tensor, const char * arg, FILE * fout) {
  16428. const int64_t * ne = tensor->ne;
  16429. const size_t * nb = tensor->nb;
  16430. fprintf(fout, "%-6s %-6s %-12s %8d %" PRId64 " %" PRId64 " %" PRId64 " %" PRId64 " %16zu %16zu %16zu %16zu %16p %32s\n",
  16431. arg,
  16432. ggml_type_name(tensor->type),
  16433. ggml_op_name (tensor->op),
  16434. ggml_n_dims(tensor),
  16435. ne[0], ne[1], ne[2], ne[3],
  16436. nb[0], nb[1], nb[2], nb[3],
  16437. tensor->data,
  16438. tensor->name);
  16439. }
  16440. void ggml_graph_export(const struct ggml_cgraph * cgraph, const char * fname) {
  16441. uint64_t size_eval = 0;
  16442. // compute size of intermediate results
  16443. // TODO: does not take into account scratch buffers !!!!
  16444. for (int i = 0; i < cgraph->n_nodes; ++i) {
  16445. size_eval += ggml_nbytes_pad(cgraph->nodes[i]);
  16446. }
  16447. // print
  16448. {
  16449. FILE * fout = stdout;
  16450. fprintf(fout, "\n");
  16451. fprintf(fout, "%-16s %8x\n", "magic", GGML_FILE_MAGIC);
  16452. fprintf(fout, "%-16s %8d\n", "version", GGML_FILE_VERSION);
  16453. fprintf(fout, "%-16s %8d\n", "leafs", cgraph->n_leafs);
  16454. fprintf(fout, "%-16s %8d\n", "nodes", cgraph->n_nodes);
  16455. fprintf(fout, "%-16s %" PRIu64 "\n", "eval", size_eval);
  16456. // header
  16457. fprintf(fout, "\n");
  16458. fprintf(fout, "%-6s %-12s %8s %8s %8s %8s %8s %16s %16s %16s %16s %16s %16s\n",
  16459. "TYPE", "OP", "NDIMS", "NE0", "NE1", "NE2", "NE3", "NB0", "NB1", "NB2", "NB3", "DATA", "NAME");
  16460. for (int i = 0; i < cgraph->n_leafs; ++i) {
  16461. ggml_graph_export_leaf(cgraph->leafs[i], fout);
  16462. GGML_ASSERT(cgraph->leafs[i]->op == GGML_OP_NONE);
  16463. GGML_ASSERT(cgraph->leafs[i]->src[0] == NULL);
  16464. GGML_ASSERT(cgraph->leafs[i]->src[1] == NULL);
  16465. }
  16466. // header
  16467. fprintf(fout, "\n");
  16468. fprintf(fout, "%-6s %-6s %-12s %8s %8s %8s %8s %8s %16s %16s %16s %16s %8s %16s %16s\n",
  16469. "ARG", "TYPE", "OP", "NDIMS", "NE0", "NE1", "NE2", "NE3", "NB0", "NB1", "NB2", "NB3", "NTASKS", "DATA", "NAME");
  16470. for (int i = 0; i < cgraph->n_nodes; ++i) {
  16471. ggml_graph_export_node(cgraph->nodes[i], "DST", fout);
  16472. for (int j = 0; j < GGML_MAX_SRC; ++j) {
  16473. if (cgraph->nodes[i]->src[j]) {
  16474. ggml_graph_export_node(cgraph->nodes[i]->src[j], "SRC", fout);
  16475. }
  16476. }
  16477. fprintf(fout, "\n");
  16478. }
  16479. fprintf(fout, "\n");
  16480. }
  16481. // write binary data
  16482. {
  16483. FILE * fout = ggml_fopen(fname, "wb");
  16484. if (!fout) {
  16485. fprintf(stderr, "%s: failed to open %s\n", __func__, fname);
  16486. return;
  16487. }
  16488. // header
  16489. {
  16490. const uint32_t magic = GGML_FILE_MAGIC;
  16491. const uint32_t version = GGML_FILE_VERSION;
  16492. const uint32_t n_leafs = cgraph->n_leafs;
  16493. const uint32_t n_nodes = cgraph->n_nodes;
  16494. fwrite(&magic, sizeof(uint32_t), 1, fout);
  16495. fwrite(&version, sizeof(uint32_t), 1, fout);
  16496. fwrite(&n_leafs, sizeof(uint32_t), 1, fout);
  16497. fwrite(&n_nodes, sizeof(uint32_t), 1, fout);
  16498. fwrite(&size_eval, sizeof(uint64_t), 1, fout);
  16499. }
  16500. // leafs
  16501. {
  16502. for (int i = 0; i < cgraph->n_leafs; ++i) {
  16503. const struct ggml_tensor * tensor = cgraph->leafs[i];
  16504. const uint32_t type = tensor->type;
  16505. const uint32_t op = tensor->op;
  16506. fwrite(&type, sizeof(uint32_t), 1, fout);
  16507. fwrite(&op, sizeof(uint32_t), 1, fout);
  16508. for (int j = 0; j < GGML_MAX_DIMS; ++j) {
  16509. const uint64_t ne = tensor->ne[j];
  16510. const uint64_t nb = tensor->nb[j];
  16511. fwrite(&ne, sizeof(uint64_t), 1, fout);
  16512. fwrite(&nb, sizeof(uint64_t), 1, fout);
  16513. }
  16514. fwrite(tensor->name, sizeof(char), GGML_MAX_NAME, fout);
  16515. fwrite(tensor->op_params, sizeof(char), GGML_MAX_OP_PARAMS, fout);
  16516. // dump the data
  16517. // TODO: pad this to 32 byte boundary
  16518. {
  16519. const size_t size = ggml_nbytes(tensor);
  16520. fwrite(tensor->data, sizeof(char), size, fout);
  16521. }
  16522. }
  16523. }
  16524. // nodes
  16525. {
  16526. for (int i = 0; i < cgraph->n_nodes; ++i) {
  16527. const struct ggml_tensor * tensor = cgraph->nodes[i];
  16528. const uint32_t type = tensor->type;
  16529. const uint32_t op = tensor->op;
  16530. fwrite(&type, sizeof(uint32_t), 1, fout);
  16531. fwrite(&op, sizeof(uint32_t), 1, fout);
  16532. for (int j = 0; j < GGML_MAX_DIMS; ++j) {
  16533. const uint64_t ne = tensor->ne[j];
  16534. const uint64_t nb = tensor->nb[j];
  16535. fwrite(&ne, sizeof(uint64_t), 1, fout);
  16536. fwrite(&nb, sizeof(uint64_t), 1, fout);
  16537. }
  16538. fwrite(tensor->name, sizeof(char), GGML_MAX_NAME, fout);
  16539. fwrite(tensor->op_params, sizeof(char), GGML_MAX_OP_PARAMS, fout);
  16540. // output the op arguments
  16541. {
  16542. struct ggml_tensor * args[GGML_MAX_SRC] = { NULL };
  16543. for (int j = 0; j < GGML_MAX_SRC; ++j) {
  16544. args[j] = tensor->src[j];
  16545. }
  16546. for (int j = 0; j < GGML_MAX_SRC; ++j) {
  16547. if (args[j]) {
  16548. int32_t idx = -1;
  16549. // check if leaf
  16550. {
  16551. for (int k = 0; k < cgraph->n_leafs; ++k) {
  16552. if (args[j] == cgraph->leafs[k]) {
  16553. idx = k;
  16554. break;
  16555. }
  16556. }
  16557. }
  16558. // check if node
  16559. if (idx == -1) {
  16560. for (int k = 0; k < cgraph->n_nodes; ++k) {
  16561. if (args[j] == cgraph->nodes[k]) {
  16562. idx = cgraph->n_leafs + k;
  16563. break;
  16564. }
  16565. }
  16566. }
  16567. if (idx == -1) {
  16568. fprintf(stderr, "%s: failed to find tensor, arg = %d, node = %d\n", __func__, j, i);
  16569. fclose(fout);
  16570. return;
  16571. }
  16572. fwrite(&idx, sizeof(int32_t), 1, fout);
  16573. } else {
  16574. const int32_t nul = -1;
  16575. fwrite(&nul, sizeof(int32_t), 1, fout);
  16576. }
  16577. }
  16578. }
  16579. }
  16580. }
  16581. fclose(fout);
  16582. }
  16583. }
  16584. struct ggml_cgraph * ggml_graph_import(const char * fname, struct ggml_context ** ctx_data, struct ggml_context ** ctx_eval) {
  16585. assert(*ctx_data == NULL);
  16586. assert(*ctx_eval == NULL);
  16587. struct ggml_cgraph * result = NULL;
  16588. struct ggml_tensor * data = NULL;
  16589. // read file into data
  16590. {
  16591. FILE * fin = ggml_fopen(fname, "rb");
  16592. if (!fin) {
  16593. fprintf(stderr, "%s: failed to open %s\n", __func__, fname);
  16594. return result;
  16595. }
  16596. size_t fsize = 0;
  16597. fseek(fin, 0, SEEK_END);
  16598. fsize = ftell(fin);
  16599. fseek(fin, 0, SEEK_SET);
  16600. // create the data context
  16601. {
  16602. const size_t overhead = 1*ggml_tensor_overhead();
  16603. struct ggml_init_params params = {
  16604. .mem_size = fsize + overhead,
  16605. .mem_buffer = NULL,
  16606. .no_alloc = false,
  16607. };
  16608. *ctx_data = ggml_init(params);
  16609. if (!*ctx_data) {
  16610. fprintf(stderr, "%s: failed to create ggml context\n", __func__);
  16611. fclose(fin);
  16612. return result;
  16613. }
  16614. }
  16615. data = ggml_new_tensor_1d(*ctx_data, GGML_TYPE_I8, fsize);
  16616. {
  16617. const size_t ret = fread(data->data, sizeof(char), fsize, fin);
  16618. if (ret != fsize) {
  16619. fprintf(stderr, "%s: failed to read %s\n", __func__, fname);
  16620. fclose(fin);
  16621. return result;
  16622. }
  16623. }
  16624. fclose(fin);
  16625. }
  16626. // populate result
  16627. {
  16628. char * ptr = (char *) data->data;
  16629. const uint32_t magic = *(const uint32_t *) ptr; ptr += sizeof(magic);
  16630. if (magic != GGML_FILE_MAGIC) {
  16631. fprintf(stderr, "%s: invalid magic number, got %08x\n", __func__, magic);
  16632. return result;
  16633. }
  16634. const uint32_t version = *(const uint32_t *) ptr; ptr += sizeof(version);
  16635. if (version != GGML_FILE_VERSION) {
  16636. fprintf(stderr, "%s: invalid version number\n", __func__);
  16637. return result;
  16638. }
  16639. const uint32_t n_leafs = *(const uint32_t *) ptr; ptr += sizeof(n_leafs);
  16640. const uint32_t n_nodes = *(const uint32_t *) ptr; ptr += sizeof(n_nodes);
  16641. const uint64_t size_eval = *(const uint64_t *) ptr; ptr += sizeof(size_eval);
  16642. const int graph_size = MAX(n_leafs, n_nodes);
  16643. // create the data context
  16644. {
  16645. const size_t overhead = (n_leafs + n_nodes)*ggml_tensor_overhead() + ggml_graph_overhead_custom(graph_size, false);
  16646. struct ggml_init_params params = {
  16647. .mem_size = size_eval + overhead,
  16648. .mem_buffer = NULL,
  16649. .no_alloc = true,
  16650. };
  16651. *ctx_eval = ggml_init(params);
  16652. if (!*ctx_eval) {
  16653. fprintf(stderr, "%s: failed to create ggml context\n", __func__);
  16654. return result;
  16655. }
  16656. }
  16657. result = ggml_new_graph_custom(*ctx_eval, graph_size, false);
  16658. result->n_leafs = n_leafs;
  16659. result->n_nodes = n_nodes;
  16660. // leafs
  16661. {
  16662. uint32_t type;
  16663. uint32_t op;
  16664. for (uint32_t i = 0; i < n_leafs; ++i) {
  16665. type = *(const uint32_t *) ptr; ptr += sizeof(type);
  16666. op = *(const uint32_t *) ptr; ptr += sizeof(op);
  16667. int64_t ne[GGML_MAX_DIMS];
  16668. size_t nb[GGML_MAX_DIMS];
  16669. for (int j = 0; j < GGML_MAX_DIMS; ++j) {
  16670. uint64_t ne_cur;
  16671. uint64_t nb_cur;
  16672. ne_cur = *(const uint64_t *) ptr; ptr += sizeof(ne_cur);
  16673. nb_cur = *(const uint64_t *) ptr; ptr += sizeof(nb_cur);
  16674. ne[j] = ne_cur;
  16675. nb[j] = nb_cur;
  16676. }
  16677. struct ggml_tensor * tensor = ggml_new_tensor(*ctx_eval, (enum ggml_type) type, GGML_MAX_DIMS, ne);
  16678. tensor->op = (enum ggml_op) op;
  16679. memcpy(tensor->name, ptr, GGML_MAX_NAME); ptr += GGML_MAX_NAME;
  16680. memcpy(tensor->op_params, ptr, GGML_MAX_OP_PARAMS); ptr += GGML_MAX_OP_PARAMS;
  16681. tensor->data = (void *) ptr;
  16682. for (int j = 0; j < GGML_MAX_DIMS; ++j) {
  16683. tensor->nb[j] = nb[j];
  16684. }
  16685. result->leafs[i] = tensor;
  16686. ptr += ggml_nbytes(tensor);
  16687. fprintf(stderr, "%s: loaded leaf %u: '%16s', %9zu bytes\n", __func__, i, tensor->name, ggml_nbytes(tensor));
  16688. }
  16689. }
  16690. ggml_set_no_alloc(*ctx_eval, false);
  16691. // nodes
  16692. {
  16693. uint32_t type;
  16694. uint32_t op;
  16695. for (uint32_t i = 0; i < n_nodes; ++i) {
  16696. type = *(const uint32_t *) ptr; ptr += sizeof(type);
  16697. op = *(const uint32_t *) ptr; ptr += sizeof(op);
  16698. enum ggml_op eop = (enum ggml_op) op;
  16699. int64_t ne[GGML_MAX_DIMS];
  16700. size_t nb[GGML_MAX_DIMS];
  16701. for (int j = 0; j < GGML_MAX_DIMS; ++j) {
  16702. uint64_t ne_cur;
  16703. uint64_t nb_cur;
  16704. ne_cur = *(const uint64_t *) ptr; ptr += sizeof(ne_cur);
  16705. nb_cur = *(const uint64_t *) ptr; ptr += sizeof(nb_cur);
  16706. ne[j] = ne_cur;
  16707. nb[j] = nb_cur;
  16708. }
  16709. const char * ptr_name = ptr; ptr += GGML_MAX_NAME;
  16710. const char * ptr_op_params = ptr; ptr += GGML_MAX_OP_PARAMS;
  16711. const int32_t * ptr_arg_idx = (const int32_t *) ptr; ptr += GGML_MAX_SRC*sizeof(int32_t);
  16712. struct ggml_tensor * args[GGML_MAX_SRC] = { NULL };
  16713. // parse args
  16714. for (int j = 0; j < GGML_MAX_SRC; ++j) {
  16715. const int32_t arg_idx = ptr_arg_idx[j];
  16716. if (arg_idx == -1) {
  16717. continue;
  16718. }
  16719. if (arg_idx < result->n_leafs) {
  16720. args[j] = result->leafs[arg_idx];
  16721. } else {
  16722. args[j] = result->nodes[arg_idx - result->n_leafs];
  16723. }
  16724. }
  16725. // create the tensor
  16726. // "view" operations are handled differently
  16727. // TODO: handle inplace ops - currently a copy is always made
  16728. struct ggml_tensor * tensor = NULL;
  16729. switch (eop) {
  16730. // TODO: implement other view ops
  16731. case GGML_OP_RESHAPE:
  16732. {
  16733. tensor = ggml_reshape_4d(*ctx_eval, args[0], ne[0], ne[1], ne[2], ne[3]);
  16734. } break;
  16735. case GGML_OP_VIEW:
  16736. {
  16737. tensor = ggml_view_4d(*ctx_eval, args[0], ne[0], ne[1], ne[2], ne[3], 0, 0, 0, 0);
  16738. size_t offs;
  16739. memcpy(&offs, ptr_op_params, sizeof(offs));
  16740. tensor->data = ((char *) tensor->data) + offs;
  16741. } break;
  16742. case GGML_OP_TRANSPOSE:
  16743. {
  16744. tensor = ggml_transpose(*ctx_eval, args[0]);
  16745. } break;
  16746. case GGML_OP_PERMUTE:
  16747. {
  16748. tensor = ggml_view_4d(*ctx_eval, args[0], ne[0], ne[1], ne[2], ne[3], 0, 0, 0, 0);
  16749. } break;
  16750. default:
  16751. {
  16752. tensor = ggml_new_tensor(*ctx_eval, (enum ggml_type) type, GGML_MAX_DIMS, ne);
  16753. tensor->op = eop;
  16754. } break;
  16755. }
  16756. memcpy(tensor->name, ptr_name, GGML_MAX_NAME);
  16757. memcpy(tensor->op_params, ptr_op_params, GGML_MAX_OP_PARAMS);
  16758. for (int j = 0; j < GGML_MAX_DIMS; ++j) {
  16759. tensor->nb[j] = nb[j];
  16760. }
  16761. for (int j = 0; j < GGML_MAX_SRC; ++j) {
  16762. tensor->src[j] = args[j];
  16763. }
  16764. result->nodes[i] = tensor;
  16765. fprintf(stderr, "%s: loaded node %u: '%16s', %9zu bytes\n", __func__, i, tensor->name, ggml_nbytes(tensor));
  16766. }
  16767. }
  16768. }
  16769. return result;
  16770. }
  16771. void ggml_graph_print(const struct ggml_cgraph * cgraph) {
  16772. int64_t perf_total_per_op_us[GGML_OP_COUNT] = {0};
  16773. GGML_PRINT("=== GRAPH ===\n");
  16774. GGML_PRINT("n_nodes = %d\n", cgraph->n_nodes);
  16775. for (int i = 0; i < cgraph->n_nodes; i++) {
  16776. struct ggml_tensor * node = cgraph->nodes[i];
  16777. perf_total_per_op_us[node->op] += MAX(1, node->perf_time_us);
  16778. GGML_PRINT(" - %3d: [ %5" PRId64 ", %5" PRId64 ", %5" PRId64 "] %16s %s (%3d) cpu = %7.3f / %7.3f ms, wall = %7.3f / %7.3f ms\n",
  16779. i,
  16780. node->ne[0], node->ne[1], node->ne[2],
  16781. ggml_op_name(node->op), (node->flags & GGML_TENSOR_FLAG_PARAM) ? "x" : node->grad ? "g" : " ", node->perf_runs,
  16782. (double) node->perf_cycles / (double) ggml_cycles_per_ms(),
  16783. (double) node->perf_cycles / (double) ggml_cycles_per_ms() / (double) node->perf_runs,
  16784. (double) node->perf_time_us / 1000.0,
  16785. (double) node->perf_time_us / 1000.0 / node->perf_runs);
  16786. }
  16787. GGML_PRINT("n_leafs = %d\n", cgraph->n_leafs);
  16788. for (int i = 0; i < cgraph->n_leafs; i++) {
  16789. struct ggml_tensor * node = cgraph->leafs[i];
  16790. GGML_PRINT(" - %3d: [ %5" PRId64 ", %5" PRId64 "] %8s %16s\n",
  16791. i,
  16792. node->ne[0], node->ne[1],
  16793. ggml_op_name(node->op),
  16794. ggml_get_name(node));
  16795. }
  16796. for (int i = 0; i < GGML_OP_COUNT; i++) {
  16797. if (perf_total_per_op_us[i] == 0) {
  16798. continue;
  16799. }
  16800. GGML_PRINT("perf_total_per_op_us[%16s] = %7.3f ms\n", ggml_op_name(i), (double) perf_total_per_op_us[i] / 1000.0);
  16801. }
  16802. GGML_PRINT("========================================\n");
  16803. }
  16804. // check if node is part of the graph
  16805. static bool ggml_graph_find(const struct ggml_cgraph * cgraph, const struct ggml_tensor * node) {
  16806. if (cgraph == NULL) {
  16807. return true;
  16808. }
  16809. for (int i = 0; i < cgraph->n_nodes; i++) {
  16810. if (cgraph->nodes[i] == node) {
  16811. return true;
  16812. }
  16813. }
  16814. return false;
  16815. }
  16816. static struct ggml_tensor * ggml_graph_get_parent(const struct ggml_cgraph * cgraph, const struct ggml_tensor * node) {
  16817. for (int i = 0; i < cgraph->n_nodes; i++) {
  16818. struct ggml_tensor * parent = cgraph->nodes[i];
  16819. if (parent->grad == node) {
  16820. return parent;
  16821. }
  16822. }
  16823. return NULL;
  16824. }
  16825. static void ggml_graph_dump_dot_node_edge(FILE * fp, const struct ggml_cgraph * gb, struct ggml_tensor * node, struct ggml_tensor * parent, const char * label) {
  16826. struct ggml_tensor * gparent = ggml_graph_get_parent(gb, node);
  16827. struct ggml_tensor * gparent0 = ggml_graph_get_parent(gb, parent);
  16828. fprintf(fp, " \"%p\":%s -> \"%p\":%s [ arrowhead = %s; style = %s; label = \"%s\"; ]\n",
  16829. gparent0 ? (void *) gparent0 : (void *) parent,
  16830. gparent0 ? "g" : "x",
  16831. gparent ? (void *) gparent : (void *) node,
  16832. gparent ? "g" : "x",
  16833. gparent ? "empty" : "vee",
  16834. gparent ? "dashed" : "solid",
  16835. label);
  16836. }
  16837. static void ggml_graph_dump_dot_leaf_edge(FILE * fp, struct ggml_tensor * node, struct ggml_tensor * parent, const char * label) {
  16838. fprintf(fp, " \"%p\":%s -> \"%p\":%s [ label = \"%s\"; ]\n",
  16839. (void *) parent, "x",
  16840. (void *) node, "x",
  16841. label);
  16842. }
  16843. void ggml_graph_dump_dot(const struct ggml_cgraph * gb, const struct ggml_cgraph * gf, const char * filename) {
  16844. char color[16];
  16845. FILE * fp = ggml_fopen(filename, "w");
  16846. GGML_ASSERT(fp);
  16847. fprintf(fp, "digraph G {\n");
  16848. fprintf(fp, " newrank = true;\n");
  16849. fprintf(fp, " rankdir = LR;\n");
  16850. for (int i = 0; i < gb->n_nodes; i++) {
  16851. struct ggml_tensor * node = gb->nodes[i];
  16852. if (ggml_graph_get_parent(gb, node) != NULL) {
  16853. continue;
  16854. }
  16855. if (node->flags & GGML_TENSOR_FLAG_PARAM) {
  16856. snprintf(color, sizeof(color), "yellow");
  16857. } else if (node->grad) {
  16858. if (ggml_graph_find(gf, node)) {
  16859. snprintf(color, sizeof(color), "green");
  16860. } else {
  16861. snprintf(color, sizeof(color), "lightblue");
  16862. }
  16863. } else {
  16864. snprintf(color, sizeof(color), "white");
  16865. }
  16866. fprintf(fp, " \"%p\" [ "
  16867. "style = filled; fillcolor = %s; shape = record; "
  16868. "label=\"",
  16869. (void *) node, color);
  16870. if (strlen(node->name) > 0) {
  16871. fprintf(fp, "%s (%s)|", node->name, ggml_type_name(node->type));
  16872. } else {
  16873. fprintf(fp, "(%s)|", ggml_type_name(node->type));
  16874. }
  16875. if (ggml_is_matrix(node)) {
  16876. fprintf(fp, "%d [%" PRId64 ", %" PRId64 "] | <x>%s", i, node->ne[0], node->ne[1], ggml_op_symbol(node->op));
  16877. } else {
  16878. fprintf(fp, "%d [%" PRId64 ", %" PRId64 ", %" PRId64 "] | <x>%s", i, node->ne[0], node->ne[1], node->ne[2], ggml_op_symbol(node->op));
  16879. }
  16880. if (node->grad) {
  16881. fprintf(fp, " | <g>%s\"; ]\n", ggml_op_symbol(node->grad->op));
  16882. } else {
  16883. fprintf(fp, "\"; ]\n");
  16884. }
  16885. }
  16886. for (int i = 0; i < gb->n_leafs; i++) {
  16887. struct ggml_tensor * node = gb->leafs[i];
  16888. snprintf(color, sizeof(color), "pink");
  16889. fprintf(fp, " \"%p\" [ "
  16890. "style = filled; fillcolor = %s; shape = record; "
  16891. "label=\"<x>",
  16892. (void *) node, color);
  16893. if (strlen(node->name) > 0) {
  16894. fprintf(fp, "%s (%s)|", node->name, ggml_type_name(node->type));
  16895. } else {
  16896. fprintf(fp, "(%s)|", ggml_type_name(node->type));
  16897. }
  16898. fprintf(fp, "CONST %d [%" PRId64 ", %" PRId64 "]", i, node->ne[0], node->ne[1]);
  16899. if (ggml_nelements(node) < 5) {
  16900. fprintf(fp, " | (");
  16901. for (int j = 0; j < ggml_nelements(node); j++) {
  16902. if (node->type == GGML_TYPE_I8 || node->type == GGML_TYPE_I16 || node->type == GGML_TYPE_I32) {
  16903. fprintf(fp, "%d", ggml_get_i32_1d(node, j));
  16904. }
  16905. else if (node->type == GGML_TYPE_F32 ||
  16906. node->type == GGML_TYPE_F16 ||
  16907. node->type == GGML_TYPE_BF16) {
  16908. fprintf(fp, "%.1e", (double)ggml_get_f32_1d(node, j));
  16909. }
  16910. else {
  16911. fprintf(fp, "#");
  16912. }
  16913. if (j < ggml_nelements(node) - 1) {
  16914. fprintf(fp, ", ");
  16915. }
  16916. }
  16917. fprintf(fp, ")");
  16918. }
  16919. fprintf(fp, "\"; ]\n");
  16920. }
  16921. for (int i = 0; i < gb->n_nodes; i++) {
  16922. struct ggml_tensor * node = gb->nodes[i];
  16923. for (int j = 0; j < GGML_MAX_SRC; j++) {
  16924. if (node->src[j]) {
  16925. char label[16];
  16926. snprintf(label, sizeof(label), "src %d", j);
  16927. ggml_graph_dump_dot_node_edge(fp, gb, node, node->src[j], label);
  16928. }
  16929. }
  16930. }
  16931. for (int i = 0; i < gb->n_leafs; i++) {
  16932. struct ggml_tensor * node = gb->leafs[i];
  16933. for (int j = 0; j < GGML_MAX_SRC; j++) {
  16934. if (node->src[j]) {
  16935. char label[16];
  16936. snprintf(label, sizeof(label), "src %d", j);
  16937. ggml_graph_dump_dot_leaf_edge(fp, node, node->src[j], label);
  16938. }
  16939. }
  16940. }
  16941. fprintf(fp, "}\n");
  16942. fclose(fp);
  16943. GGML_PRINT("%s: dot -Tpng %s -o %s.png && open %s.png\n", __func__, filename, filename, filename);
  16944. }
  16945. ////////////////////////////////////////////////////////////////////////////////
  16946. static void ggml_opt_set_params(int np, struct ggml_tensor * const ps[], const float * x) {
  16947. int i = 0;
  16948. for (int p = 0; p < np; ++p) {
  16949. const int64_t ne = ggml_nelements(ps[p]) ;
  16950. // TODO: add function to set tensor from array
  16951. for (int64_t j = 0; j < ne; ++j) {
  16952. ggml_set_f32_1d(ps[p], j, x[i++]);
  16953. }
  16954. }
  16955. }
  16956. static void ggml_opt_get_params(int np, struct ggml_tensor * const ps[], float * x) {
  16957. int i = 0;
  16958. for (int p = 0; p < np; ++p) {
  16959. const int64_t ne = ggml_nelements(ps[p]) ;
  16960. // TODO: add function to get all elements at once
  16961. for (int64_t j = 0; j < ne; ++j) {
  16962. x[i++] = ggml_get_f32_1d(ps[p], j);
  16963. }
  16964. }
  16965. }
  16966. static void ggml_opt_get_grad(int np, struct ggml_tensor * const ps[], float * g) {
  16967. int64_t i = 0;
  16968. for (int p = 0; p < np; ++p) {
  16969. const int64_t ne = ggml_nelements(ps[p]) ;
  16970. // TODO: add function to get all elements at once
  16971. for (int64_t j = 0; j < ne; ++j) {
  16972. g[i++] = ggml_get_f32_1d(ps[p]->grad, j);
  16973. }
  16974. }
  16975. }
  16976. static void ggml_opt_acc_grad(int np, struct ggml_tensor * const ps[], float * g, float scale) {
  16977. int64_t i = 0;
  16978. for (int p = 0; p < np; ++p) {
  16979. const int64_t ne = ggml_nelements(ps[p]) ;
  16980. // TODO: add function to get all elements at once
  16981. for (int64_t j = 0; j < ne; ++j) {
  16982. g[i++] += ggml_get_f32_1d(ps[p]->grad, j) * scale;
  16983. }
  16984. }
  16985. }
  16986. //
  16987. // Using AdamW - ref: https://arxiv.org/pdf/1711.05101v3.pdf
  16988. //
  16989. // (Original Adam - ref: https://arxiv.org/pdf/1412.6980.pdf)
  16990. //
  16991. static enum ggml_opt_result ggml_opt_adam(
  16992. struct ggml_context * ctx,
  16993. struct ggml_opt_context * opt,
  16994. struct ggml_opt_params params,
  16995. struct ggml_tensor * f,
  16996. struct ggml_cgraph * gf,
  16997. struct ggml_cgraph * gb,
  16998. ggml_opt_callback callback,
  16999. void * callback_data) {
  17000. GGML_ASSERT(ggml_is_scalar(f));
  17001. // these will store the parameters we want to optimize
  17002. struct ggml_tensor * ps[GGML_MAX_PARAMS];
  17003. int np = 0;
  17004. int64_t nx = 0;
  17005. for (int i = 0; i < gf->n_nodes; ++i) {
  17006. if (gf->nodes[i]->flags & GGML_TENSOR_FLAG_PARAM) {
  17007. GGML_PRINT_DEBUG("found param %d: grad->op = %d\n", np, gf->nodes[i]->grad->op);
  17008. GGML_ASSERT(np < GGML_MAX_PARAMS);
  17009. ps[np++] = gf->nodes[i];
  17010. nx += ggml_nelements(gf->nodes[i]);
  17011. }
  17012. }
  17013. if ((opt->params.type != params.type) || (opt->nx != nx) || (opt->params.past != params.past)) {
  17014. int iter = opt->iter;
  17015. ggml_opt_init(opt->ctx, opt, params, nx);
  17016. opt->iter = iter;
  17017. }
  17018. // constants
  17019. float sched = params.adam.sched;
  17020. const float alpha = params.adam.alpha;
  17021. const float decay = params.adam.decay * alpha;
  17022. const float beta1 = params.adam.beta1;
  17023. const float beta2 = params.adam.beta2;
  17024. const float eps = params.adam.eps;
  17025. const float gclip = params.adam.gclip;
  17026. const int decay_min_ndim = params.adam.decay_min_ndim;
  17027. const int n_accum = MAX(1, params.n_gradient_accumulation);
  17028. const float accum_norm = 1.0f / (float) n_accum;
  17029. float * g = opt->adam.g->data; // gradients
  17030. float * m = opt->adam.m->data; // first moment
  17031. float * v = opt->adam.v->data; // second moment
  17032. float * pf = params.past > 0 ? opt->adam.pf->data : NULL; // past function values
  17033. struct ggml_cplan cplan = ggml_graph_plan(gb, params.n_threads);
  17034. struct ggml_object * obj = ggml_new_object(ctx, GGML_OBJECT_TYPE_WORK_BUFFER, cplan.work_size);
  17035. cplan.work_data = (uint8_t *)ctx->mem_buffer + obj->offs;
  17036. bool cancel = false;
  17037. // compute the function value
  17038. float fx = 0;
  17039. ggml_set_zero(opt->adam.g);
  17040. for (int accum_step = 0; accum_step < n_accum; ++accum_step) {
  17041. if (callback) {
  17042. callback(callback_data, accum_step, &sched, &cancel);
  17043. if (cancel) {
  17044. return GGML_OPT_RESULT_CANCEL;
  17045. }
  17046. }
  17047. // ggml_graph_reset (gf);
  17048. ggml_set_f32 (f->grad, 1.0f);
  17049. ggml_graph_compute(gb, &cplan);
  17050. ggml_opt_acc_grad(np, ps, g, accum_norm);
  17051. fx += ggml_get_f32_1d(f, 0);
  17052. }
  17053. fx *= accum_norm;
  17054. opt->adam.fx_prev = fx;
  17055. opt->adam.fx_best = opt->adam.fx_prev;
  17056. if (pf) {
  17057. pf[opt->iter % params.past] = opt->adam.fx_prev;
  17058. }
  17059. opt->loss_before = opt->adam.fx_prev;
  17060. opt->loss_after = opt->adam.fx_prev;
  17061. // initialize
  17062. if (opt->just_initialized) {
  17063. opt->adam.n_no_improvement = 0;
  17064. opt->just_initialized = false;
  17065. }
  17066. float * fx_best = &opt->adam.fx_best;
  17067. float * fx_prev = &opt->adam.fx_prev;
  17068. int * n_no_improvement = &opt->adam.n_no_improvement;
  17069. int iter0 = opt->iter;
  17070. // run the optimizer
  17071. for (int t = 0; t < params.adam.n_iter; ++t) {
  17072. opt->iter = iter0 + t + 1;
  17073. GGML_PRINT_DEBUG ("=== iter %d ===\n", t);
  17074. GGML_PRINT_DEBUG ("f = %10.6f\n", ggml_get_f32_1d(f, 0));
  17075. GGML_PRINT_DEBUG_5("df/dx0 = %10.6f\n", ggml_get_f32_1d(ps[0]->grad, 0));
  17076. GGML_PRINT_DEBUG_5("df/dx1 = %10.6f\n", ggml_get_f32_1d(ps[1]->grad, 0));
  17077. for (int i = 0; i < np; ++i) {
  17078. GGML_PRINT_DEBUG("param %d: %10.6f, g = %10.6f\n", i,
  17079. ggml_get_f32_1d(ps[i], 0), ggml_get_f32_1d(ps[i]->grad, 0));
  17080. }
  17081. const int64_t t_start_wall = ggml_time_us();
  17082. const int64_t t_start_cpu = ggml_cycles();
  17083. UNUSED(t_start_wall);
  17084. UNUSED(t_start_cpu);
  17085. {
  17086. float gnorm = 1.0f;
  17087. if (gclip > 0.0f) {
  17088. // gradient clipping
  17089. ggml_float sum = 0.0;
  17090. for (int64_t i = 0; i < nx; ++i) {
  17091. sum += (ggml_float)(g[i]*g[i]);
  17092. }
  17093. ggml_float norm = sqrt(sum);
  17094. if (norm > (ggml_float) gclip) {
  17095. gnorm = (float) ((ggml_float) gclip / norm);
  17096. }
  17097. }
  17098. const float beta1h = alpha*sched/(1.0f - powf(beta1, opt->iter));
  17099. const float beta2h = 1.0f/(1.0f - powf(beta2, opt->iter));
  17100. int64_t i = 0;
  17101. for (int p = 0; p < np; ++p) {
  17102. const int64_t ne = ggml_nelements(ps[p]);
  17103. const float p_decay = ((ggml_n_dims(ps[p]) >= decay_min_ndim) ? decay : 0.0f) * sched;
  17104. for (int64_t j = 0; j < ne; ++j) {
  17105. float x = ggml_get_f32_1d(ps[p], j);
  17106. float g_ = g[i]*gnorm;
  17107. m[i] = m[i]*beta1 + g_*(1.0f - beta1);
  17108. v[i] = v[i]*beta2 + g_*g_*(1.0f - beta2);
  17109. float mh = m[i]*beta1h;
  17110. float vh = v[i]*beta2h;
  17111. vh = sqrtf(vh) + eps;
  17112. x = x*(1.0f - p_decay) - mh/vh;
  17113. ggml_set_f32_1d(ps[p], j, x);
  17114. ++i;
  17115. }
  17116. }
  17117. }
  17118. fx = 0;
  17119. ggml_set_zero(opt->adam.g);
  17120. for (int accum_step = 0; accum_step < n_accum; ++accum_step) {
  17121. if (callback) {
  17122. callback(callback_data, accum_step, &sched, &cancel);
  17123. if (cancel) {
  17124. return GGML_OPT_RESULT_CANCEL;;
  17125. }
  17126. }
  17127. // ggml_graph_reset (gf);
  17128. ggml_set_f32 (f->grad, 1.0f);
  17129. ggml_graph_compute(gb, &cplan);
  17130. ggml_opt_acc_grad(np, ps, g, accum_norm);
  17131. fx += ggml_get_f32_1d(f, 0);
  17132. }
  17133. fx *= accum_norm;
  17134. opt->loss_after = fx;
  17135. // check convergence
  17136. if (fabsf(fx - fx_prev[0])/fx < params.adam.eps_f) {
  17137. GGML_PRINT_DEBUG("converged\n");
  17138. return GGML_OPT_RESULT_OK;
  17139. }
  17140. // delta-based convergence test
  17141. if (pf != NULL) {
  17142. // need at least params.past iterations to start checking for convergence
  17143. if (params.past <= iter0 + t) {
  17144. const float rate = (pf[(iter0 + t)%params.past] - fx)/fx;
  17145. if (fabsf(rate) < params.delta) {
  17146. return GGML_OPT_RESULT_OK;
  17147. }
  17148. }
  17149. pf[(iter0 + t)%params.past] = fx;
  17150. }
  17151. // check for improvement
  17152. if (params.max_no_improvement > 0) {
  17153. if (fx_best[0] > fx) {
  17154. fx_best[0] = fx;
  17155. n_no_improvement[0] = 0;
  17156. } else {
  17157. ++n_no_improvement[0];
  17158. if (n_no_improvement[0] >= params.max_no_improvement) {
  17159. return GGML_OPT_RESULT_OK;
  17160. }
  17161. }
  17162. }
  17163. fx_prev[0] = fx;
  17164. {
  17165. const int64_t t_end_cpu = ggml_cycles();
  17166. GGML_PRINT_DEBUG("time iter: %5.3f s\n", ((float)(t_end_cpu - t_start_cpu))/CLOCKS_PER_SEC);
  17167. UNUSED(t_end_cpu);
  17168. const int64_t t_end_wall = ggml_time_us();
  17169. GGML_PRINT_DEBUG("wall time iter: %5.3f s\n", (t_end_wall - t_start_wall)/1e6);
  17170. UNUSED(t_end_wall);
  17171. }
  17172. }
  17173. return GGML_OPT_RESULT_DID_NOT_CONVERGE;
  17174. }
  17175. //
  17176. // L-BFGS
  17177. //
  17178. // the L-BFGS implementation below is based on the following implementation:
  17179. //
  17180. // https://github.com/chokkan/liblbfgs
  17181. //
  17182. struct ggml_lbfgs_iteration_data {
  17183. float alpha;
  17184. float ys;
  17185. float * s;
  17186. float * y;
  17187. };
  17188. static enum ggml_opt_result linesearch_backtracking(
  17189. const struct ggml_opt_params * params,
  17190. int nx,
  17191. float * x,
  17192. float * fx,
  17193. float * g,
  17194. float * d,
  17195. float * step,
  17196. const float * xp,
  17197. struct ggml_tensor * f,
  17198. struct ggml_cgraph * gb,
  17199. struct ggml_cplan * cplan,
  17200. const int np,
  17201. struct ggml_tensor * ps[],
  17202. bool * cancel,
  17203. ggml_opt_callback callback,
  17204. void * callback_data) {
  17205. int count = 0;
  17206. float width = 0.0f;
  17207. float dg = 0.0f;
  17208. float finit = 0.0f;
  17209. float dginit = 0.0f;
  17210. float dgtest = 0.0f;
  17211. const float dec = 0.5f;
  17212. const float inc = 2.1f;
  17213. const int n_accum = MAX(1, params->n_gradient_accumulation);
  17214. const float accum_norm = 1.0f / (float) n_accum;
  17215. if (*step <= 0.f) {
  17216. return GGML_LINESEARCH_INVALID_PARAMETERS;
  17217. }
  17218. // compute the initial gradient in the search direction
  17219. ggml_vec_dot_f32(nx, &dginit, 0, g, 0, d, 0, 1);
  17220. // make sure that d points to a descent direction
  17221. if (0 < dginit) {
  17222. return GGML_LINESEARCH_FAIL;
  17223. }
  17224. // initialize local variables
  17225. finit = *fx;
  17226. dgtest = params->lbfgs.ftol*dginit;
  17227. while (true) {
  17228. ggml_vec_cpy_f32(nx, x, xp);
  17229. ggml_vec_mad_f32(nx, x, d, *step);
  17230. // evaluate the function and gradient values
  17231. {
  17232. ggml_opt_set_params(np, ps, x);
  17233. *fx = 0;
  17234. memset(g, 0, sizeof(float)*nx);
  17235. for (int accum_step = 0; accum_step < n_accum; ++accum_step) {
  17236. if (callback) {
  17237. // LBFG-S does not support learning rate -> ignore learning schedule
  17238. float sched = 0;
  17239. callback(callback_data, accum_step, &sched, cancel);
  17240. if (*cancel) {
  17241. return GGML_OPT_RESULT_CANCEL;
  17242. }
  17243. }
  17244. // ggml_graph_reset (gf);
  17245. ggml_set_f32 (f->grad, 1.0f);
  17246. ggml_graph_compute(gb, cplan);
  17247. ggml_opt_acc_grad(np, ps, g, accum_norm);
  17248. *fx += ggml_get_f32_1d(f, 0);
  17249. }
  17250. *fx *= accum_norm;
  17251. }
  17252. ++count;
  17253. if (*fx > finit + (*step)*dgtest) {
  17254. width = dec;
  17255. } else {
  17256. // Armijo condition is satisfied
  17257. if (params->lbfgs.linesearch == GGML_LINESEARCH_BACKTRACKING_ARMIJO) {
  17258. return count;
  17259. }
  17260. ggml_vec_dot_f32(nx, &dg, 0, g, 0, d, 0, 1);
  17261. // check the Wolfe condition
  17262. if (dg < params->lbfgs.wolfe * dginit) {
  17263. width = inc;
  17264. } else {
  17265. if(params->lbfgs.linesearch == GGML_LINESEARCH_BACKTRACKING_WOLFE) {
  17266. // regular Wolfe conditions
  17267. return count;
  17268. }
  17269. if(dg > -params->lbfgs.wolfe*dginit) {
  17270. width = dec;
  17271. } else {
  17272. // strong Wolfe condition (GGML_LINESEARCH_BACKTRACKING_STRONG_WOLFE)
  17273. return count;
  17274. }
  17275. }
  17276. }
  17277. if (*step < params->lbfgs.min_step) {
  17278. return GGML_LINESEARCH_MINIMUM_STEP;
  17279. }
  17280. if (*step > params->lbfgs.max_step) {
  17281. return GGML_LINESEARCH_MAXIMUM_STEP;
  17282. }
  17283. if (params->lbfgs.max_linesearch <= count) {
  17284. return GGML_LINESEARCH_MAXIMUM_ITERATIONS;
  17285. }
  17286. (*step) *= width;
  17287. }
  17288. GGML_ASSERT(false && "line search failed");
  17289. return GGML_LINESEARCH_FAIL;
  17290. }
  17291. static enum ggml_opt_result ggml_opt_lbfgs(
  17292. struct ggml_context * ctx,
  17293. struct ggml_opt_context * opt,
  17294. struct ggml_opt_params params,
  17295. struct ggml_tensor * f,
  17296. struct ggml_cgraph * gf,
  17297. struct ggml_cgraph * gb,
  17298. ggml_opt_callback callback,
  17299. void * callback_data) {
  17300. if (params.lbfgs.linesearch == GGML_LINESEARCH_BACKTRACKING_WOLFE ||
  17301. params.lbfgs.linesearch == GGML_LINESEARCH_BACKTRACKING_STRONG_WOLFE) {
  17302. if (params.lbfgs.wolfe <= params.lbfgs.ftol || 1.f <= params.lbfgs.wolfe) {
  17303. return GGML_OPT_RESULT_INVALID_WOLFE;
  17304. }
  17305. }
  17306. const int m = params.lbfgs.m;
  17307. // these will store the parameters we want to optimize
  17308. struct ggml_tensor * ps[GGML_MAX_PARAMS];
  17309. int np = 0;
  17310. int nx = 0;
  17311. for (int i = 0; i < gf->n_nodes; ++i) {
  17312. if (gf->nodes[i]->flags & GGML_TENSOR_FLAG_PARAM) {
  17313. GGML_PRINT_DEBUG("found param %d: grad->op = %d\n", np, gf->nodes[i]->grad->op);
  17314. GGML_ASSERT(np < GGML_MAX_PARAMS);
  17315. ps[np++] = gf->nodes[i];
  17316. nx += ggml_nelements(gf->nodes[i]);
  17317. }
  17318. }
  17319. if ((opt->params.type != params.type) || (opt->nx != nx) || (opt->params.past != params.past) || (opt->params.lbfgs.m != params.lbfgs.m)) {
  17320. int iter = opt->iter;
  17321. ggml_opt_init(ctx, opt, params, nx);
  17322. opt->iter = iter;
  17323. }
  17324. struct ggml_cplan cplan = ggml_graph_plan(gb, params.n_threads);
  17325. struct ggml_object * obj = ggml_new_object(ctx, GGML_OBJECT_TYPE_WORK_BUFFER, cplan.work_size);
  17326. cplan.work_data = (uint8_t *)ctx->mem_buffer + obj->offs;
  17327. float * x = opt->lbfgs.x->data; // current parameters
  17328. float * xp = opt->lbfgs.xp->data; // previous parameters
  17329. float * g = opt->lbfgs.g->data; // current gradient
  17330. float * gp = opt->lbfgs.gp->data; // previous gradient
  17331. float * d = opt->lbfgs.d->data; // search direction
  17332. float * pf = params.past > 0 ? opt->lbfgs.pf->data : NULL; // past function values
  17333. const int n_accum = MAX(1, params.n_gradient_accumulation);
  17334. const float accum_norm = 1.0f / (float) n_accum;
  17335. float fx = 0.0f; // cost function value
  17336. float xnorm = 0.0f; // ||x||
  17337. float gnorm = 0.0f; // ||g||
  17338. // initialize x from the graph nodes
  17339. ggml_opt_get_params(np, ps, x);
  17340. // the L-BFGS memory
  17341. float * lm_alpha = opt->lbfgs.lmal->data;
  17342. float * lm_ys = opt->lbfgs.lmys->data;
  17343. float * lm_s = opt->lbfgs.lms->data;
  17344. float * lm_y = opt->lbfgs.lmy->data;
  17345. bool cancel = false;
  17346. // evaluate the function value and its gradient
  17347. {
  17348. ggml_opt_set_params(np, ps, x);
  17349. fx = 0;
  17350. memset(g, 0, sizeof(float)*nx);
  17351. for (int accum_step = 0; accum_step < n_accum; ++accum_step) {
  17352. if (callback) {
  17353. // LBFG-S does not support learning rate -> ignore learning schedule
  17354. float sched = 0;
  17355. callback(callback_data, accum_step, &sched, &cancel);
  17356. if (cancel) {
  17357. return GGML_OPT_RESULT_CANCEL;
  17358. }
  17359. }
  17360. // ggml_graph_reset (gf);
  17361. ggml_set_f32 (f->grad, 1.0f);
  17362. ggml_graph_compute(gb, &cplan);
  17363. ggml_opt_acc_grad(np, ps, g, accum_norm);
  17364. fx += ggml_get_f32_1d(f, 0);
  17365. }
  17366. fx *= accum_norm;
  17367. opt->loss_before = fx;
  17368. opt->loss_after = fx;
  17369. }
  17370. // search direction = -gradient
  17371. ggml_vec_neg_f32(nx, d, g);
  17372. // ||x||, ||g||
  17373. ggml_vec_norm_f32(nx, &xnorm, x);
  17374. ggml_vec_norm_f32(nx, &gnorm, g);
  17375. if (xnorm < 1.0f) {
  17376. xnorm = 1.0f;
  17377. }
  17378. // already optimized
  17379. if (gnorm/xnorm <= params.lbfgs.eps) {
  17380. return GGML_OPT_RESULT_OK;
  17381. }
  17382. if (opt->just_initialized) {
  17383. if (pf) {
  17384. pf[0] = fx;
  17385. }
  17386. opt->lbfgs.fx_best = fx;
  17387. // initial step
  17388. ggml_vec_norm_inv_f32(nx, &opt->lbfgs.step, d);
  17389. opt->lbfgs.j = 0;
  17390. opt->lbfgs.k = 1;
  17391. opt->lbfgs.end = 0;
  17392. opt->lbfgs.n_no_improvement = 0;
  17393. opt->just_initialized = false;
  17394. }
  17395. float * fx_best = &opt->lbfgs.fx_best;
  17396. float * step = &opt->lbfgs.step;
  17397. int * j = &opt->lbfgs.j;
  17398. int * k = &opt->lbfgs.k;
  17399. int * end = &opt->lbfgs.end;
  17400. int * n_no_improvement = &opt->lbfgs.n_no_improvement;
  17401. int ls = 0;
  17402. int bound = 0;
  17403. float ys = 0.0f;
  17404. float yy = 0.0f;
  17405. float beta = 0.0f;
  17406. int it = 0;
  17407. while (true) {
  17408. // store the current position and gradient vectors
  17409. ggml_vec_cpy_f32(nx, xp, x);
  17410. ggml_vec_cpy_f32(nx, gp, g);
  17411. // TODO: instead of passing &cancel here, use the return code of the linesearch
  17412. // to determine if the optimization should be cancelled
  17413. // this is a simple change, but not doing this atm, since I don't have a nice
  17414. // way to test and don't want to break something with so many changes lined up
  17415. ls = linesearch_backtracking(&params, nx, x, &fx, g, d, step, xp, f, gb, &cplan, np, ps, &cancel, callback, callback_data);
  17416. if (cancel) {
  17417. return GGML_OPT_RESULT_CANCEL;
  17418. }
  17419. if (ls < 0) {
  17420. // linesearch failed - go back to the previous point and return
  17421. ggml_vec_cpy_f32(nx, x, xp);
  17422. ggml_vec_cpy_f32(nx, g, gp);
  17423. return ls;
  17424. }
  17425. opt->loss_after = fx;
  17426. ggml_vec_norm_f32(nx, &xnorm, x);
  17427. ggml_vec_norm_f32(nx, &gnorm, g);
  17428. GGML_PRINT_DEBUG("f = %10.6f\n", ggml_get_f32_1d(f, 0));
  17429. if (xnorm < 1.0f) {
  17430. xnorm = 1.0f;
  17431. }
  17432. if (gnorm/xnorm <= params.lbfgs.eps) {
  17433. // converged
  17434. return GGML_OPT_RESULT_OK;
  17435. }
  17436. // delta-based convergence test
  17437. if (pf != NULL) {
  17438. // need at least params.past iterations to start checking for convergence
  17439. if (params.past <= k[0]) {
  17440. const float rate = (pf[k[0]%params.past] - fx)/fx;
  17441. if (fabsf(rate) < params.delta) {
  17442. return GGML_OPT_RESULT_OK;
  17443. }
  17444. }
  17445. pf[k[0]%params.past] = fx;
  17446. }
  17447. // check for improvement
  17448. if (params.max_no_improvement > 0) {
  17449. if (fx < fx_best[0]) {
  17450. fx_best[0] = fx;
  17451. n_no_improvement[0] = 0;
  17452. } else {
  17453. n_no_improvement[0]++;
  17454. if (n_no_improvement[0] >= params.max_no_improvement) {
  17455. return GGML_OPT_RESULT_OK;
  17456. }
  17457. }
  17458. }
  17459. if (params.lbfgs.n_iter != 0 && params.lbfgs.n_iter < it + 1) {
  17460. // reached the maximum number of iterations
  17461. return GGML_OPT_RESULT_DID_NOT_CONVERGE;
  17462. }
  17463. // update vectors s and y:
  17464. // s_{k+1} = x_{k+1} - x_{k} = \step * d_{k}.
  17465. // y_{k+1} = g_{k+1} - g_{k}.
  17466. //
  17467. ggml_vec_sub_f32(nx, &lm_s[end[0]*nx], x, xp);
  17468. ggml_vec_sub_f32(nx, &lm_y[end[0]*nx], g, gp);
  17469. // compute scalars ys and yy:
  17470. // ys = y^t \cdot s -> 1 / \rho.
  17471. // yy = y^t \cdot y.
  17472. //
  17473. ggml_vec_dot_f32(nx, &ys, 0, &lm_y[end[0]*nx], 0, &lm_s[end[0]*nx], 0, 1);
  17474. ggml_vec_dot_f32(nx, &yy, 0, &lm_y[end[0]*nx], 0, &lm_y[end[0]*nx], 0, 1);
  17475. lm_ys[end[0]] = ys;
  17476. // find new search direction
  17477. // ref: https://en.wikipedia.org/wiki/Limited-memory_BFGS
  17478. bound = (m <= k[0]) ? m : k[0];
  17479. k[0]++;
  17480. it++;
  17481. end[0] = (end[0] + 1)%m;
  17482. // initialize search direction with -g
  17483. ggml_vec_neg_f32(nx, d, g);
  17484. j[0] = end[0];
  17485. for (int i = 0; i < bound; ++i) {
  17486. j[0] = (j[0] + m - 1) % m;
  17487. // \alpha_{j} = \rho_{j} s^{t}_{j} \cdot q_{k+1}
  17488. ggml_vec_dot_f32(nx, &lm_alpha[j[0]], 0, &lm_s[j[0]*nx], 0, d, 0, 1);
  17489. lm_alpha[j[0]] /= lm_ys[j[0]];
  17490. // q_{i} = q_{i+1} - \alpha_{i} y_{i}
  17491. ggml_vec_mad_f32(nx, d, &lm_y[j[0]*nx], -lm_alpha[j[0]]);
  17492. }
  17493. ggml_vec_scale_f32(nx, d, ys/yy);
  17494. for (int i = 0; i < bound; ++i) {
  17495. // \beta_{j} = \rho_{j} y^t_{j} \cdot \gamma_{i}
  17496. ggml_vec_dot_f32(nx, &beta, 0, &lm_y[j[0]*nx], 0, d, 0, 1);
  17497. beta /= lm_ys[j[0]];
  17498. // \gamma_{i+1} = \gamma_{i} + (\alpha_{j} - \beta_{j}) s_{j}
  17499. ggml_vec_mad_f32(nx, d, &lm_s[j[0]*nx], lm_alpha[j[0]] - beta);
  17500. j[0] = (j[0] + 1)%m;
  17501. }
  17502. step[0] = 1.0;
  17503. }
  17504. GGML_ASSERT(false && "lbfgs failed");
  17505. return GGML_OPT_RESULT_DID_NOT_CONVERGE;
  17506. }
  17507. struct ggml_opt_params ggml_opt_default_params(enum ggml_opt_type type) {
  17508. struct ggml_opt_params result;
  17509. switch (type) {
  17510. case GGML_OPT_TYPE_ADAM:
  17511. {
  17512. result = (struct ggml_opt_params) {
  17513. .type = GGML_OPT_TYPE_ADAM,
  17514. .graph_size = GGML_DEFAULT_GRAPH_SIZE,
  17515. .n_threads = 1, // FIXME: GGML_DEFAULT_N_THREADS ?
  17516. .past = 0,
  17517. .delta = 1e-5f,
  17518. .max_no_improvement = 100,
  17519. .print_forward_graph = true,
  17520. .print_backward_graph = true,
  17521. .n_gradient_accumulation = 1,
  17522. .adam = {
  17523. .n_iter = 10000,
  17524. .sched = 1.000f,
  17525. .decay = 0.0f,
  17526. .decay_min_ndim = 2,
  17527. .alpha = 0.001f,
  17528. .beta1 = 0.9f,
  17529. .beta2 = 0.999f,
  17530. .eps = 1e-8f,
  17531. .eps_f = 1e-5f,
  17532. .eps_g = 1e-3f,
  17533. .gclip = 0.0f,
  17534. },
  17535. };
  17536. } break;
  17537. case GGML_OPT_TYPE_LBFGS:
  17538. {
  17539. result = (struct ggml_opt_params) {
  17540. .type = GGML_OPT_TYPE_LBFGS,
  17541. .graph_size = GGML_DEFAULT_GRAPH_SIZE,
  17542. .n_threads = 1,
  17543. .past = 0,
  17544. .delta = 1e-5f,
  17545. .max_no_improvement = 0,
  17546. .print_forward_graph = true,
  17547. .print_backward_graph = true,
  17548. .n_gradient_accumulation = 1,
  17549. .lbfgs = {
  17550. .m = 6,
  17551. .n_iter = 100,
  17552. .max_linesearch = 20,
  17553. .eps = 1e-5f,
  17554. .ftol = 1e-4f,
  17555. .wolfe = 0.9f,
  17556. .min_step = 1e-20f,
  17557. .max_step = 1e+20f,
  17558. .linesearch = GGML_LINESEARCH_DEFAULT,
  17559. },
  17560. };
  17561. } break;
  17562. }
  17563. return result;
  17564. }
  17565. GGML_API void ggml_opt_init(
  17566. struct ggml_context * ctx,
  17567. struct ggml_opt_context * opt,
  17568. struct ggml_opt_params params,
  17569. int64_t nx) {
  17570. opt->ctx = ctx;
  17571. opt->params = params;
  17572. opt->iter = 0;
  17573. opt->nx = nx;
  17574. opt->just_initialized = true;
  17575. if (opt->ctx == NULL) {
  17576. struct ggml_init_params ctx_opt_params;
  17577. if (opt->params.type == GGML_OPT_TYPE_ADAM) {
  17578. ctx_opt_params.mem_size = GGML_MEM_ALIGN*3 + ggml_tensor_overhead()*3 + ggml_type_size(GGML_TYPE_F32)*nx*3;
  17579. if (opt->params.past > 0) {
  17580. ctx_opt_params.mem_size += GGML_MEM_ALIGN + ggml_tensor_overhead() + ggml_type_size(GGML_TYPE_F32)*opt->params.past;
  17581. }
  17582. } else if (opt->params.type == GGML_OPT_TYPE_LBFGS) {
  17583. ctx_opt_params.mem_size = GGML_MEM_ALIGN*9 + ggml_tensor_overhead()*9 + ggml_type_size(GGML_TYPE_F32)*(nx*5 + opt->params.lbfgs.m*2 + nx*opt->params.lbfgs.m*2);
  17584. if (opt->params.past > 0) {
  17585. ctx_opt_params.mem_size += GGML_MEM_ALIGN + ggml_tensor_overhead() + ggml_type_size(GGML_TYPE_F32)*opt->params.past;
  17586. }
  17587. }
  17588. ctx_opt_params.mem_buffer = NULL;
  17589. ctx_opt_params.no_alloc = false;
  17590. opt->ctx = ggml_init(ctx_opt_params);
  17591. }
  17592. switch (opt->params.type) {
  17593. case GGML_OPT_TYPE_ADAM:
  17594. {
  17595. opt->adam.g = ggml_new_tensor_1d(opt->ctx, GGML_TYPE_F32, nx);
  17596. opt->adam.m = ggml_new_tensor_1d(opt->ctx, GGML_TYPE_F32, nx);
  17597. opt->adam.v = ggml_new_tensor_1d(opt->ctx, GGML_TYPE_F32, nx);
  17598. opt->adam.pf = params.past > 0
  17599. ? ggml_new_tensor_1d(opt->ctx, GGML_TYPE_F32, params.past)
  17600. : NULL;
  17601. ggml_set_zero(opt->adam.m);
  17602. ggml_set_zero(opt->adam.v);
  17603. if (opt->adam.pf) {
  17604. ggml_set_zero(opt->adam.pf);
  17605. }
  17606. } break;
  17607. case GGML_OPT_TYPE_LBFGS:
  17608. {
  17609. opt->lbfgs.x = ggml_new_tensor_1d(opt->ctx, GGML_TYPE_F32, nx);
  17610. opt->lbfgs.xp = ggml_new_tensor_1d(opt->ctx, GGML_TYPE_F32, nx);
  17611. opt->lbfgs.g = ggml_new_tensor_1d(opt->ctx, GGML_TYPE_F32, nx);
  17612. opt->lbfgs.gp = ggml_new_tensor_1d(opt->ctx, GGML_TYPE_F32, nx);
  17613. opt->lbfgs.d = ggml_new_tensor_1d(opt->ctx, GGML_TYPE_F32, nx);
  17614. opt->lbfgs.pf = params.past > 0
  17615. ? ggml_new_tensor_1d(opt->ctx, GGML_TYPE_F32, params.past)
  17616. : NULL;
  17617. opt->lbfgs.lmal = ggml_new_tensor_1d(opt->ctx, GGML_TYPE_F32, params.lbfgs.m);
  17618. opt->lbfgs.lmys = ggml_new_tensor_1d(opt->ctx, GGML_TYPE_F32, params.lbfgs.m);
  17619. opt->lbfgs.lms = ggml_new_tensor_2d(opt->ctx, GGML_TYPE_F32, nx, params.lbfgs.m);
  17620. opt->lbfgs.lmy = ggml_new_tensor_2d(opt->ctx, GGML_TYPE_F32, nx, params.lbfgs.m);
  17621. ggml_set_zero(opt->lbfgs.x);
  17622. ggml_set_zero(opt->lbfgs.xp);
  17623. ggml_set_zero(opt->lbfgs.g);
  17624. ggml_set_zero(opt->lbfgs.gp);
  17625. ggml_set_zero(opt->lbfgs.d);
  17626. if (opt->lbfgs.pf) {
  17627. ggml_set_zero(opt->lbfgs.pf);
  17628. }
  17629. ggml_set_zero(opt->lbfgs.lmal);
  17630. ggml_set_zero(opt->lbfgs.lmys);
  17631. ggml_set_zero(opt->lbfgs.lms);
  17632. ggml_set_zero(opt->lbfgs.lmy);
  17633. } break;
  17634. }
  17635. }
  17636. enum ggml_opt_result ggml_opt(
  17637. struct ggml_context * ctx,
  17638. struct ggml_opt_params params,
  17639. struct ggml_tensor * f) {
  17640. bool free_ctx = false;
  17641. if (ctx == NULL) {
  17642. struct ggml_init_params params_ctx = {
  17643. .mem_size = 16*1024*1024,
  17644. .mem_buffer = NULL,
  17645. .no_alloc = false,
  17646. };
  17647. ctx = ggml_init(params_ctx);
  17648. if (ctx == NULL) {
  17649. return GGML_OPT_RESULT_NO_CONTEXT;
  17650. }
  17651. free_ctx = true;
  17652. }
  17653. enum ggml_opt_result result = GGML_OPT_RESULT_OK;
  17654. struct ggml_opt_context * opt = (struct ggml_opt_context *) alloca(sizeof(struct ggml_opt_context));
  17655. ggml_opt_init(ctx, opt, params, 0);
  17656. result = ggml_opt_resume(ctx, opt, f);
  17657. if (free_ctx) {
  17658. ggml_free(ctx);
  17659. }
  17660. return result;
  17661. }
  17662. enum ggml_opt_result ggml_opt_resume(
  17663. struct ggml_context * ctx,
  17664. struct ggml_opt_context * opt,
  17665. struct ggml_tensor * f) {
  17666. // build forward + backward compute graphs
  17667. struct ggml_cgraph * gf = ggml_new_graph_custom(ctx, opt->params.graph_size, true);
  17668. ggml_build_forward_expand(gf, f);
  17669. struct ggml_cgraph * gb = ggml_graph_dup(ctx, gf);
  17670. ggml_build_backward_expand(ctx, gf, gb, true);
  17671. return ggml_opt_resume_g(ctx, opt, f, gf, gb, NULL, NULL);
  17672. }
  17673. enum ggml_opt_result ggml_opt_resume_g(
  17674. struct ggml_context * ctx,
  17675. struct ggml_opt_context * opt,
  17676. struct ggml_tensor * f,
  17677. struct ggml_cgraph * gf,
  17678. struct ggml_cgraph * gb,
  17679. ggml_opt_callback callback,
  17680. void * callback_data) {
  17681. // build forward + backward compute graphs
  17682. enum ggml_opt_result result = GGML_OPT_RESULT_OK;
  17683. switch (opt->params.type) {
  17684. case GGML_OPT_TYPE_ADAM:
  17685. {
  17686. result = ggml_opt_adam(ctx, opt, opt->params, f, gf, gb, callback, callback_data);
  17687. } break;
  17688. case GGML_OPT_TYPE_LBFGS:
  17689. {
  17690. result = ggml_opt_lbfgs(ctx, opt, opt->params, f, gf, gb, callback, callback_data);
  17691. } break;
  17692. }
  17693. if (opt->params.print_forward_graph) {
  17694. ggml_graph_print (gf);
  17695. ggml_graph_dump_dot(gf, NULL, "opt-forward.dot");
  17696. }
  17697. if (opt->params.print_backward_graph) {
  17698. ggml_graph_print (gb);
  17699. ggml_graph_dump_dot(gb, gf, "opt-backward.dot");
  17700. }
  17701. return result;
  17702. }
  17703. ////////////////////////////////////////////////////////////////////////////////
  17704. void ggml_set_input(struct ggml_tensor * tensor) {
  17705. tensor->flags |= GGML_TENSOR_FLAG_INPUT;
  17706. }
  17707. void ggml_set_output(struct ggml_tensor * tensor) {
  17708. tensor->flags |= GGML_TENSOR_FLAG_OUTPUT;
  17709. }
  17710. ////////////////////////////////////////////////////////////////////////////////
  17711. void ggml_quantize_init(enum ggml_type type) {
  17712. ggml_critical_section_start();
  17713. switch (type) {
  17714. case GGML_TYPE_IQ2_XXS:
  17715. case GGML_TYPE_IQ2_XS:
  17716. case GGML_TYPE_IQ2_S:
  17717. case GGML_TYPE_IQ1_S:
  17718. case GGML_TYPE_IQ1_M: iq2xs_init_impl(type); break;
  17719. case GGML_TYPE_IQ3_XXS: iq3xs_init_impl(256); break;
  17720. case GGML_TYPE_IQ3_S: iq3xs_init_impl(512); break;
  17721. default: // nothing
  17722. break;
  17723. }
  17724. ggml_critical_section_end();
  17725. }
  17726. void ggml_quantize_free(void) {
  17727. ggml_critical_section_start();
  17728. iq2xs_free_impl(GGML_TYPE_IQ2_XXS);
  17729. iq2xs_free_impl(GGML_TYPE_IQ2_XS);
  17730. iq2xs_free_impl(GGML_TYPE_IQ1_S);
  17731. iq3xs_free_impl(256);
  17732. ggml_critical_section_end();
  17733. }
  17734. bool ggml_quantize_requires_imatrix(enum ggml_type type) {
  17735. return
  17736. type == GGML_TYPE_IQ2_XXS ||
  17737. type == GGML_TYPE_IQ2_XS ||
  17738. type == GGML_TYPE_IQ1_S;// ||
  17739. //type == GGML_TYPE_IQ1_M;
  17740. }
  17741. size_t ggml_quantize_chunk(
  17742. enum ggml_type type,
  17743. const float * src,
  17744. void * dst,
  17745. int64_t start,
  17746. int64_t nrows,
  17747. int64_t n_per_row,
  17748. const float * imatrix) {
  17749. const int64_t n = (int64_t) nrows * n_per_row;
  17750. if (ggml_quantize_requires_imatrix(type)) {
  17751. GGML_ASSERT(imatrix != NULL);
  17752. }
  17753. GGML_ASSERT(start % type_traits[type].blck_size == 0);
  17754. GGML_ASSERT(start % n_per_row == 0);
  17755. ggml_quantize_init(type); // this is noop if already initialized
  17756. const size_t start_row = start / n_per_row;
  17757. const size_t row_size = ggml_row_size(type, n_per_row);
  17758. size_t result = 0;
  17759. switch (type) {
  17760. case GGML_TYPE_Q4_0: result = quantize_q4_0(src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break;
  17761. case GGML_TYPE_Q4_1: result = quantize_q4_1(src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break;
  17762. case GGML_TYPE_Q5_0: result = quantize_q5_0(src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break;
  17763. case GGML_TYPE_Q5_1: result = quantize_q5_1(src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break;
  17764. case GGML_TYPE_Q8_0: result = quantize_q8_0(src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break;
  17765. case GGML_TYPE_Q2_K: result = quantize_q2_K(src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break;
  17766. case GGML_TYPE_Q3_K: result = quantize_q3_K(src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break;
  17767. case GGML_TYPE_Q4_K: result = quantize_q4_K(src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break;
  17768. case GGML_TYPE_Q5_K: result = quantize_q5_K(src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break;
  17769. case GGML_TYPE_Q6_K: result = quantize_q6_K(src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break;
  17770. case GGML_TYPE_IQ2_XXS: result = quantize_iq2_xxs(src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break;
  17771. case GGML_TYPE_IQ2_XS: result = quantize_iq2_xs (src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break;
  17772. case GGML_TYPE_IQ3_XXS: result = quantize_iq3_xxs(src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break;
  17773. case GGML_TYPE_IQ3_S: result = quantize_iq3_s (src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break;
  17774. case GGML_TYPE_IQ2_S: result = quantize_iq2_s (src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break;
  17775. case GGML_TYPE_IQ1_S: result = quantize_iq1_s (src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break;
  17776. case GGML_TYPE_IQ1_M: result = quantize_iq1_m (src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break;
  17777. case GGML_TYPE_IQ4_NL: result = quantize_iq4_nl (src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break;
  17778. #if QK_K == 64
  17779. case GGML_TYPE_IQ4_XS: result = quantize_iq4_nl (src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break;
  17780. #else
  17781. case GGML_TYPE_IQ4_XS: result = quantize_iq4_xs (src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break;
  17782. #endif
  17783. case GGML_TYPE_F16:
  17784. {
  17785. size_t elemsize = sizeof(ggml_fp16_t);
  17786. ggml_fp32_to_fp16_row(src + start, (ggml_fp16_t *)dst + start, n);
  17787. result = n * elemsize;
  17788. } break;
  17789. case GGML_TYPE_BF16:
  17790. {
  17791. size_t elemsize = sizeof(ggml_bf16_t);
  17792. ggml_fp32_to_bf16_row(src + start, (ggml_bf16_t *)dst + start, n);
  17793. result = n * elemsize;
  17794. } break;
  17795. case GGML_TYPE_F32:
  17796. {
  17797. size_t elemsize = sizeof(float);
  17798. result = n * elemsize;
  17799. memcpy((uint8_t *)dst + start * elemsize, src + start, result);
  17800. } break;
  17801. default:
  17802. assert(false);
  17803. }
  17804. GGML_ASSERT(result == nrows * row_size);
  17805. return result;
  17806. }
  17807. ////////////////////////////////////////////////////////////////////////////////
  17808. struct gguf_str {
  17809. uint64_t n; // GGUFv2
  17810. char * data;
  17811. };
  17812. static const size_t GGUF_TYPE_SIZE[GGUF_TYPE_COUNT] = {
  17813. [GGUF_TYPE_UINT8] = sizeof(uint8_t),
  17814. [GGUF_TYPE_INT8] = sizeof(int8_t),
  17815. [GGUF_TYPE_UINT16] = sizeof(uint16_t),
  17816. [GGUF_TYPE_INT16] = sizeof(int16_t),
  17817. [GGUF_TYPE_UINT32] = sizeof(uint32_t),
  17818. [GGUF_TYPE_INT32] = sizeof(int32_t),
  17819. [GGUF_TYPE_FLOAT32] = sizeof(float),
  17820. [GGUF_TYPE_BOOL] = sizeof(bool),
  17821. [GGUF_TYPE_STRING] = sizeof(struct gguf_str),
  17822. [GGUF_TYPE_UINT64] = sizeof(uint64_t),
  17823. [GGUF_TYPE_INT64] = sizeof(int64_t),
  17824. [GGUF_TYPE_FLOAT64] = sizeof(double),
  17825. [GGUF_TYPE_ARRAY] = 0, // undefined
  17826. };
  17827. static_assert(GGUF_TYPE_COUNT == 13, "GGUF_TYPE_COUNT != 13");
  17828. static const char * GGUF_TYPE_NAME[GGUF_TYPE_COUNT] = {
  17829. [GGUF_TYPE_UINT8] = "u8",
  17830. [GGUF_TYPE_INT8] = "i8",
  17831. [GGUF_TYPE_UINT16] = "u16",
  17832. [GGUF_TYPE_INT16] = "i16",
  17833. [GGUF_TYPE_UINT32] = "u32",
  17834. [GGUF_TYPE_INT32] = "i32",
  17835. [GGUF_TYPE_FLOAT32] = "f32",
  17836. [GGUF_TYPE_BOOL] = "bool",
  17837. [GGUF_TYPE_STRING] = "str",
  17838. [GGUF_TYPE_ARRAY] = "arr",
  17839. [GGUF_TYPE_UINT64] = "u64",
  17840. [GGUF_TYPE_INT64] = "i64",
  17841. [GGUF_TYPE_FLOAT64] = "f64",
  17842. };
  17843. static_assert(GGUF_TYPE_COUNT == 13, "GGUF_TYPE_COUNT != 13");
  17844. union gguf_value {
  17845. uint8_t uint8;
  17846. int8_t int8;
  17847. uint16_t uint16;
  17848. int16_t int16;
  17849. uint32_t uint32;
  17850. int32_t int32;
  17851. float float32;
  17852. uint64_t uint64;
  17853. int64_t int64;
  17854. double float64;
  17855. bool bool_;
  17856. struct gguf_str str;
  17857. struct {
  17858. enum gguf_type type;
  17859. uint64_t n; // GGUFv2
  17860. void * data;
  17861. } arr;
  17862. };
  17863. struct gguf_kv {
  17864. struct gguf_str key;
  17865. enum gguf_type type;
  17866. union gguf_value value;
  17867. };
  17868. struct gguf_header {
  17869. char magic[4];
  17870. uint32_t version;
  17871. uint64_t n_tensors; // GGUFv2
  17872. uint64_t n_kv; // GGUFv2
  17873. };
  17874. struct gguf_tensor_info {
  17875. struct gguf_str name;
  17876. uint32_t n_dims;
  17877. uint64_t ne[GGML_MAX_DIMS];
  17878. enum ggml_type type;
  17879. uint64_t offset; // offset from start of `data`, must be a multiple of `ALIGNMENT`
  17880. // for writing API
  17881. const void * data;
  17882. size_t size;
  17883. };
  17884. struct gguf_context {
  17885. struct gguf_header header;
  17886. struct gguf_kv * kv;
  17887. struct gguf_tensor_info * infos;
  17888. size_t alignment;
  17889. size_t offset; // offset of `data` from beginning of file
  17890. size_t size; // size of `data` in bytes
  17891. //uint8_t * padding;
  17892. void * data;
  17893. };
  17894. static size_t gguf_type_size(enum gguf_type type) {
  17895. GGML_ASSERT(0 <= type && type < GGUF_TYPE_COUNT);
  17896. return GGUF_TYPE_SIZE[type];
  17897. }
  17898. static void gguf_tensor_info_sanitize(struct gguf_tensor_info * info) {
  17899. GGML_ASSERT(info->n_dims <= GGML_MAX_DIMS);
  17900. GGML_ASSERT(0 <= info->type && info->type < GGML_TYPE_COUNT);
  17901. for (uint32_t i = 0; i < info->n_dims; ++i) {
  17902. GGML_ASSERT(info->ne[i] > 0);
  17903. }
  17904. // prevent overflow for total number of elements
  17905. GGML_ASSERT(INT64_MAX/info->ne[1] > info->ne[0]);
  17906. GGML_ASSERT(INT64_MAX/info->ne[2] > info->ne[0]*info->ne[1]);
  17907. GGML_ASSERT(INT64_MAX/info->ne[3] > info->ne[0]*info->ne[1]*info->ne[2]);
  17908. }
  17909. static bool gguf_fread_el(FILE * file, void * dst, size_t size, size_t * offset) {
  17910. const size_t n = fread(dst, 1, size, file);
  17911. *offset += n;
  17912. return n == size;
  17913. }
  17914. static bool gguf_fread_str(FILE * file, struct gguf_str * p, size_t * offset) {
  17915. p->n = 0;
  17916. p->data = NULL;
  17917. bool ok = true;
  17918. ok = ok && gguf_fread_el(file, &p->n, sizeof(p->n), offset);
  17919. // early exit if string length is invalid, prevents from integer overflow
  17920. if (p->n == SIZE_MAX) {
  17921. fprintf(stderr, "%s: invalid string length (%" PRIu64 ")\n", __func__, p->n);
  17922. return false;
  17923. }
  17924. p->data = GGML_CALLOC(p->n + 1, 1);
  17925. ok = ok && gguf_fread_el(file, p->data, p->n, offset);
  17926. return ok;
  17927. }
  17928. static void gguf_free_kv(struct gguf_kv * kv) {
  17929. if (kv->key.data) {
  17930. GGML_FREE(kv->key.data);
  17931. }
  17932. if (kv->type == GGUF_TYPE_STRING) {
  17933. if (kv->value.str.data) {
  17934. GGML_FREE(kv->value.str.data);
  17935. }
  17936. }
  17937. if (kv->type == GGUF_TYPE_ARRAY) {
  17938. if (kv->value.arr.data) {
  17939. if (kv->value.arr.type == GGUF_TYPE_STRING) {
  17940. for (uint64_t j = 0; j < kv->value.arr.n; ++j) {
  17941. struct gguf_str * str = &((struct gguf_str *) kv->value.arr.data)[j];
  17942. if (str->data) {
  17943. GGML_FREE(str->data);
  17944. }
  17945. }
  17946. }
  17947. GGML_FREE(kv->value.arr.data);
  17948. }
  17949. }
  17950. }
  17951. struct gguf_context * gguf_init_empty(void) {
  17952. struct gguf_context * ctx = GGML_CALLOC(1, sizeof(struct gguf_context));
  17953. memcpy(ctx->header.magic, GGUF_MAGIC, sizeof(ctx->header.magic));
  17954. ctx->header.version = GGUF_VERSION;
  17955. ctx->header.n_tensors = 0;
  17956. ctx->header.n_kv = 0;
  17957. ctx->kv = NULL;
  17958. ctx->infos = NULL;
  17959. ctx->alignment = GGUF_DEFAULT_ALIGNMENT;
  17960. ctx->offset = 0;
  17961. ctx->size = 0;
  17962. ctx->data = NULL;
  17963. return ctx;
  17964. }
  17965. struct gguf_context * gguf_init_from_file(const char * fname, struct gguf_init_params params) {
  17966. FILE * file = ggml_fopen(fname, "rb");
  17967. if (!file) {
  17968. return NULL;
  17969. }
  17970. // offset from start of file
  17971. size_t offset = 0;
  17972. char magic[4];
  17973. // check the magic before making allocations
  17974. {
  17975. gguf_fread_el(file, &magic, sizeof(magic), &offset);
  17976. for (uint32_t i = 0; i < sizeof(magic); i++) {
  17977. if (magic[i] != GGUF_MAGIC[i]) {
  17978. fprintf(stderr, "%s: invalid magic characters '%c%c%c%c'\n", __func__, magic[0], magic[1], magic[2], magic[3]);
  17979. fclose(file);
  17980. return NULL;
  17981. }
  17982. }
  17983. }
  17984. bool ok = true;
  17985. struct gguf_context * ctx = GGML_CALLOC(1, sizeof(struct gguf_context));
  17986. // read the header
  17987. {
  17988. strncpy(ctx->header.magic, magic, 4);
  17989. ctx->kv = NULL;
  17990. ctx->infos = NULL;
  17991. ctx->data = NULL;
  17992. ok = ok && gguf_fread_el(file, &ctx->header.version, sizeof(ctx->header.version), &offset);
  17993. ok = ok && gguf_fread_el(file, &ctx->header.n_tensors, sizeof(ctx->header.n_tensors), &offset);
  17994. ok = ok && gguf_fread_el(file, &ctx->header.n_kv, sizeof(ctx->header.n_kv), &offset);
  17995. if (ctx->header.version == 1) {
  17996. fprintf(stderr, "%s: GGUFv1 is no longer supported. please use a more up-to-date version\n", __func__);
  17997. fclose(file);
  17998. gguf_free(ctx);
  17999. return NULL;
  18000. }
  18001. // sanity-checks to prevent from integer/buffer overflows
  18002. ok = ok && (ctx->header.n_tensors < (SIZE_MAX/2)/sizeof(struct gguf_tensor_info));
  18003. ok = ok && (ctx->header.n_tensors < (SIZE_MAX/2)/ggml_tensor_overhead());
  18004. ok = ok && (ctx->header.n_kv < (SIZE_MAX/2)/sizeof(struct gguf_kv));
  18005. if (!ok) {
  18006. fprintf(stderr, "%s: failed to read header\n", __func__);
  18007. fclose(file);
  18008. gguf_free(ctx);
  18009. return NULL;
  18010. }
  18011. }
  18012. // read the kv pairs
  18013. {
  18014. const uint64_t n_kv = ctx->header.n_kv;
  18015. // header.n_kv will hold the actual value of pairs that were successfully read in the loop below
  18016. ctx->header.n_kv = 0;
  18017. ctx->kv = GGML_CALLOC(n_kv, sizeof(struct gguf_kv));
  18018. for (uint64_t i = 0; i < n_kv; ++i) {
  18019. struct gguf_kv * kv = &ctx->kv[i];
  18020. //fprintf(stderr, "%s: reading kv %d\n", __func__, i);
  18021. ok = ok && gguf_fread_str(file, &kv->key, &offset);
  18022. ok = ok && gguf_fread_el (file, &kv->type, sizeof(kv->type), &offset);
  18023. //fprintf(stderr, "%s: reading kv with key %s\n", __func__, kv->key.data);
  18024. switch (kv->type) {
  18025. case GGUF_TYPE_UINT8: ok = ok && gguf_fread_el (file, &kv->value.uint8, sizeof(kv->value.uint8), &offset); break;
  18026. case GGUF_TYPE_INT8: ok = ok && gguf_fread_el (file, &kv->value.int8, sizeof(kv->value.int8), &offset); break;
  18027. case GGUF_TYPE_UINT16: ok = ok && gguf_fread_el (file, &kv->value.uint16, sizeof(kv->value.uint16), &offset); break;
  18028. case GGUF_TYPE_INT16: ok = ok && gguf_fread_el (file, &kv->value.int16, sizeof(kv->value.int16), &offset); break;
  18029. case GGUF_TYPE_UINT32: ok = ok && gguf_fread_el (file, &kv->value.uint32, sizeof(kv->value.uint32), &offset); break;
  18030. case GGUF_TYPE_INT32: ok = ok && gguf_fread_el (file, &kv->value.int32, sizeof(kv->value.int32), &offset); break;
  18031. case GGUF_TYPE_FLOAT32: ok = ok && gguf_fread_el (file, &kv->value.float32, sizeof(kv->value.float32), &offset); break;
  18032. case GGUF_TYPE_UINT64: ok = ok && gguf_fread_el (file, &kv->value.uint64, sizeof(kv->value.uint64), &offset); break;
  18033. case GGUF_TYPE_INT64: ok = ok && gguf_fread_el (file, &kv->value.int64, sizeof(kv->value.int64), &offset); break;
  18034. case GGUF_TYPE_FLOAT64: ok = ok && gguf_fread_el (file, &kv->value.float64, sizeof(kv->value.float64), &offset); break;
  18035. case GGUF_TYPE_BOOL: ok = ok && gguf_fread_el (file, &kv->value.bool_, sizeof(kv->value.bool_), &offset); break;
  18036. case GGUF_TYPE_STRING: ok = ok && gguf_fread_str(file, &kv->value.str, &offset); break;
  18037. case GGUF_TYPE_ARRAY:
  18038. {
  18039. ok = ok && gguf_fread_el(file, &kv->value.arr.type, sizeof(kv->value.arr.type), &offset);
  18040. ok = ok && gguf_fread_el(file, &kv->value.arr.n, sizeof(kv->value.arr.n), &offset);
  18041. switch (kv->value.arr.type) {
  18042. case GGUF_TYPE_UINT8:
  18043. case GGUF_TYPE_INT8:
  18044. case GGUF_TYPE_UINT16:
  18045. case GGUF_TYPE_INT16:
  18046. case GGUF_TYPE_UINT32:
  18047. case GGUF_TYPE_INT32:
  18048. case GGUF_TYPE_FLOAT32:
  18049. case GGUF_TYPE_UINT64:
  18050. case GGUF_TYPE_INT64:
  18051. case GGUF_TYPE_FLOAT64:
  18052. case GGUF_TYPE_BOOL:
  18053. {
  18054. // prevent from integer overflow in the malloc below
  18055. if (kv->value.arr.n >= SIZE_MAX/gguf_type_size(kv->value.arr.type)) {
  18056. fprintf(stderr, "%s: array size is too large (%" PRIu64 ")\n", __func__, kv->value.arr.n);
  18057. fclose(file);
  18058. gguf_free(ctx);
  18059. return NULL;
  18060. }
  18061. kv->value.arr.data = GGML_CALLOC(kv->value.arr.n, gguf_type_size(kv->value.arr.type));
  18062. ok = ok && gguf_fread_el(file, kv->value.arr.data, kv->value.arr.n * gguf_type_size(kv->value.arr.type), &offset);
  18063. } break;
  18064. case GGUF_TYPE_STRING:
  18065. {
  18066. // prevent from integer overflow in the malloc below
  18067. if (kv->value.arr.n >= SIZE_MAX/sizeof(struct gguf_str)) {
  18068. fprintf(stderr, "%s: array size is too large (%" PRIu64 ")\n", __func__, kv->value.arr.n);
  18069. fclose(file);
  18070. gguf_free(ctx);
  18071. return NULL;
  18072. }
  18073. kv->value.arr.data = GGML_CALLOC(kv->value.arr.n, sizeof(struct gguf_str));
  18074. for (uint64_t j = 0; j < kv->value.arr.n; ++j) {
  18075. ok = ok && gguf_fread_str(file, &((struct gguf_str *) kv->value.arr.data)[j], &offset);
  18076. }
  18077. } break;
  18078. case GGUF_TYPE_ARRAY:
  18079. default: GGML_ASSERT(false && "invalid type"); break;
  18080. }
  18081. } break;
  18082. default: GGML_ASSERT(false && "invalid type");
  18083. }
  18084. if (!ok) {
  18085. break;
  18086. }
  18087. ctx->header.n_kv++;
  18088. }
  18089. if (!ok) {
  18090. fprintf(stderr, "%s: failed to read key-value pairs\n", __func__);
  18091. fclose(file);
  18092. gguf_free(ctx);
  18093. return NULL;
  18094. }
  18095. }
  18096. // read the tensor infos
  18097. if (ctx->header.n_tensors > 0) {
  18098. ctx->infos = GGML_CALLOC(ctx->header.n_tensors, sizeof(struct gguf_tensor_info));
  18099. for (uint64_t i = 0; i < ctx->header.n_tensors; ++i) {
  18100. struct gguf_tensor_info * info = &ctx->infos[i];
  18101. for (int j = 0; j < GGML_MAX_DIMS; ++j) {
  18102. info->ne[j] = 1;
  18103. }
  18104. ok = ok && gguf_fread_str(file, &info->name, &offset);
  18105. ok = ok && gguf_fread_el (file, &info->n_dims, sizeof(info->n_dims), &offset);
  18106. ok = ok && (info->n_dims <= GGML_MAX_DIMS);
  18107. for (uint32_t j = 0; j < info->n_dims; ++j) {
  18108. ok = ok && gguf_fread_el(file, &info->ne[j], sizeof(info->ne[j]), &offset);
  18109. }
  18110. ok = ok && gguf_fread_el (file, &info->type, sizeof(info->type), &offset);
  18111. ok = ok && gguf_fread_el (file, &info->offset, sizeof(info->offset), &offset);
  18112. // TODO: return an error instead of crashing with GGML_ASSERT
  18113. gguf_tensor_info_sanitize(info);
  18114. // make sure there is no duplicated tensor names
  18115. for (uint64_t j = 0; j < i; ++j) {
  18116. if (strcmp(info->name.data, ctx->infos[j].name.data) == 0) {
  18117. fprintf(stderr, "%s: duplicated tensor name %s\n", __func__, info->name.data);
  18118. ok = false;
  18119. }
  18120. }
  18121. if (!ok) {
  18122. fprintf(stderr, "%s: failed to read tensor info\n", __func__);
  18123. fclose(file);
  18124. gguf_free(ctx);
  18125. return NULL;
  18126. }
  18127. }
  18128. }
  18129. ctx->alignment = GGUF_DEFAULT_ALIGNMENT;
  18130. int alignment_idx = gguf_find_key(ctx, "general.alignment");
  18131. if (alignment_idx != -1) {
  18132. ctx->alignment = gguf_get_val_u32(ctx, alignment_idx);
  18133. }
  18134. // we require the data section to be aligned, so take into account any padding
  18135. {
  18136. const size_t offset_pad = offset % ctx->alignment;
  18137. if (offset_pad != 0) {
  18138. offset += ctx->alignment - offset_pad;
  18139. fseek(file, offset, SEEK_SET);
  18140. }
  18141. }
  18142. // store the current file offset - this is where the data section starts
  18143. ctx->offset = offset;
  18144. // compute the total size of the data section, taking into account the alignment
  18145. {
  18146. ctx->size = 0;
  18147. for (uint64_t i = 0; i < ctx->header.n_tensors; ++i) {
  18148. struct gguf_tensor_info * info = &ctx->infos[i];
  18149. const int64_t ne =
  18150. (int64_t) info->ne[0] *
  18151. (int64_t) info->ne[1] *
  18152. (int64_t) info->ne[2] *
  18153. (int64_t) info->ne[3];
  18154. if (ne % ggml_blck_size(info->type) != 0) {
  18155. fprintf(stderr, "%s: tensor '%s' of type %d (%s) number of elements (%" PRId64 ") is not a multiple of block size (%d)\n",
  18156. __func__, info->name.data, (int)info->type, ggml_type_name(info->type), ne, ggml_blck_size(info->type));
  18157. fclose(file);
  18158. gguf_free(ctx);
  18159. return NULL;
  18160. }
  18161. const size_t size_cur = ggml_row_size(info->type, ne);
  18162. ctx->size += GGML_PAD(size_cur, ctx->alignment);
  18163. }
  18164. }
  18165. // load the tensor data only if requested
  18166. if (params.ctx != NULL) {
  18167. // if the provided gguf_context is no_alloc, then we create "empty" tensors and do not read the binary blob
  18168. // otherwise, we load the binary blob into the created ggml_context as well, and point the "data" members of
  18169. // the ggml_tensor structs to the appropriate locations in the binary blob
  18170. // compute the exact size needed for the new ggml_context
  18171. const size_t mem_size =
  18172. params.no_alloc ?
  18173. (ctx->header.n_tensors )*ggml_tensor_overhead() :
  18174. (ctx->header.n_tensors + 1)*ggml_tensor_overhead() + ctx->size;
  18175. struct ggml_init_params pdata = {
  18176. .mem_size = mem_size,
  18177. .mem_buffer = NULL,
  18178. .no_alloc = params.no_alloc,
  18179. };
  18180. *params.ctx = ggml_init(pdata);
  18181. struct ggml_context * ctx_data = *params.ctx;
  18182. struct ggml_tensor * data = NULL;
  18183. if (!params.no_alloc) {
  18184. data = ggml_new_tensor_1d(ctx_data, GGML_TYPE_I8, ctx->size);
  18185. ok = ok && data != NULL;
  18186. // read the binary blob with the tensor data
  18187. ok = ok && gguf_fread_el(file, data->data, ctx->size, &offset);
  18188. if (!ok) {
  18189. fprintf(stderr, "%s: failed to read tensor data\n", __func__);
  18190. fclose(file);
  18191. ggml_free(ctx_data);
  18192. gguf_free(ctx);
  18193. return NULL;
  18194. }
  18195. ctx->data = data->data;
  18196. }
  18197. ggml_set_no_alloc(ctx_data, true);
  18198. // create the tensors
  18199. for (uint64_t i = 0; i < ctx->header.n_tensors; ++i) {
  18200. const int64_t ne[GGML_MAX_DIMS] = {
  18201. ctx->infos[i].ne[0],
  18202. ctx->infos[i].ne[1],
  18203. ctx->infos[i].ne[2],
  18204. ctx->infos[i].ne[3],
  18205. };
  18206. struct ggml_tensor * cur = ggml_new_tensor(ctx_data, ctx->infos[i].type, ctx->infos[i].n_dims, ne);
  18207. ok = ok && cur != NULL;
  18208. if (!ok) {
  18209. break;
  18210. }
  18211. ggml_set_name(cur, ctx->infos[i].name.data);
  18212. // point the data member to the appropriate location in the binary blob using the tensor infos
  18213. if (!params.no_alloc) {
  18214. //cur->data = (char *) data->data + ctx->infos[i].offset - ctx->offset; // offset from start of file
  18215. cur->data = (char *) data->data + ctx->infos[i].offset; // offset from data
  18216. }
  18217. }
  18218. if (!ok) {
  18219. fprintf(stderr, "%s: failed to read the tensor data\n", __func__);
  18220. fclose(file);
  18221. ggml_free(ctx_data);
  18222. gguf_free(ctx);
  18223. return NULL;
  18224. }
  18225. ggml_set_no_alloc(ctx_data, params.no_alloc);
  18226. }
  18227. fclose(file);
  18228. return ctx;
  18229. }
  18230. void gguf_free(struct gguf_context * ctx) {
  18231. if (ctx == NULL) {
  18232. return;
  18233. }
  18234. if (ctx->kv) {
  18235. // free string memory - not great..
  18236. for (uint64_t i = 0; i < ctx->header.n_kv; ++i) {
  18237. gguf_free_kv(&ctx->kv[i]);
  18238. }
  18239. GGML_FREE(ctx->kv);
  18240. }
  18241. if (ctx->infos) {
  18242. for (uint64_t i = 0; i < ctx->header.n_tensors; ++i) {
  18243. struct gguf_tensor_info * info = &ctx->infos[i];
  18244. if (info->name.data) {
  18245. GGML_FREE(info->name.data);
  18246. }
  18247. }
  18248. GGML_FREE(ctx->infos);
  18249. }
  18250. GGML_FREE(ctx);
  18251. }
  18252. const char * gguf_type_name(enum gguf_type type) {
  18253. return GGUF_TYPE_NAME[type];
  18254. }
  18255. int gguf_get_version(const struct gguf_context * ctx) {
  18256. return ctx->header.version;
  18257. }
  18258. size_t gguf_get_alignment(const struct gguf_context * ctx) {
  18259. return ctx->alignment;
  18260. }
  18261. size_t gguf_get_data_offset(const struct gguf_context * ctx) {
  18262. return ctx->offset;
  18263. }
  18264. void * gguf_get_data(const struct gguf_context * ctx) {
  18265. return ctx->data;
  18266. }
  18267. int gguf_get_n_kv(const struct gguf_context * ctx) {
  18268. return ctx->header.n_kv;
  18269. }
  18270. int gguf_find_key(const struct gguf_context * ctx, const char * key) {
  18271. // return -1 if key not found
  18272. int keyfound = -1;
  18273. const int n_kv = gguf_get_n_kv(ctx);
  18274. for (int i = 0; i < n_kv; ++i) {
  18275. if (strcmp(key, gguf_get_key(ctx, i)) == 0) {
  18276. keyfound = i;
  18277. break;
  18278. }
  18279. }
  18280. return keyfound;
  18281. }
  18282. const char * gguf_get_key(const struct gguf_context * ctx, int key_id) {
  18283. GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx));
  18284. return ctx->kv[key_id].key.data;
  18285. }
  18286. enum gguf_type gguf_get_kv_type(const struct gguf_context * ctx, int key_id) {
  18287. GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx));
  18288. return ctx->kv[key_id].type;
  18289. }
  18290. enum gguf_type gguf_get_arr_type(const struct gguf_context * ctx, int key_id) {
  18291. GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx));
  18292. GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_ARRAY);
  18293. return ctx->kv[key_id].value.arr.type;
  18294. }
  18295. const void * gguf_get_arr_data(const struct gguf_context * ctx, int key_id) {
  18296. GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx));
  18297. GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_ARRAY);
  18298. return ctx->kv[key_id].value.arr.data;
  18299. }
  18300. const char * gguf_get_arr_str(const struct gguf_context * ctx, int key_id, int i) {
  18301. GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx));
  18302. GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_ARRAY);
  18303. struct gguf_kv * kv = &ctx->kv[key_id];
  18304. struct gguf_str * str = &((struct gguf_str *) kv->value.arr.data)[i];
  18305. return str->data;
  18306. }
  18307. int gguf_get_arr_n(const struct gguf_context * ctx, int key_id) {
  18308. GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx));
  18309. GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_ARRAY);
  18310. return ctx->kv[key_id].value.arr.n;
  18311. }
  18312. uint8_t gguf_get_val_u8(const struct gguf_context * ctx, int key_id) {
  18313. GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx));
  18314. GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_UINT8);
  18315. return ctx->kv[key_id].value.uint8;
  18316. }
  18317. int8_t gguf_get_val_i8(const struct gguf_context * ctx, int key_id) {
  18318. GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx));
  18319. GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_INT8);
  18320. return ctx->kv[key_id].value.int8;
  18321. }
  18322. uint16_t gguf_get_val_u16(const struct gguf_context * ctx, int key_id) {
  18323. GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx));
  18324. GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_UINT16);
  18325. return ctx->kv[key_id].value.uint16;
  18326. }
  18327. int16_t gguf_get_val_i16(const struct gguf_context * ctx, int key_id) {
  18328. GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx));
  18329. GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_INT16);
  18330. return ctx->kv[key_id].value.int16;
  18331. }
  18332. uint32_t gguf_get_val_u32(const struct gguf_context * ctx, int key_id) {
  18333. GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx));
  18334. GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_UINT32);
  18335. return ctx->kv[key_id].value.uint32;
  18336. }
  18337. int32_t gguf_get_val_i32(const struct gguf_context * ctx, int key_id) {
  18338. GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx));
  18339. GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_INT32);
  18340. return ctx->kv[key_id].value.int32;
  18341. }
  18342. float gguf_get_val_f32(const struct gguf_context * ctx, int key_id) {
  18343. GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx));
  18344. GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_FLOAT32);
  18345. return ctx->kv[key_id].value.float32;
  18346. }
  18347. uint64_t gguf_get_val_u64(const struct gguf_context * ctx, int key_id) {
  18348. GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx));
  18349. GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_UINT64);
  18350. return ctx->kv[key_id].value.uint64;
  18351. }
  18352. int64_t gguf_get_val_i64(const struct gguf_context * ctx, int key_id) {
  18353. GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx));
  18354. GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_INT64);
  18355. return ctx->kv[key_id].value.int64;
  18356. }
  18357. double gguf_get_val_f64(const struct gguf_context * ctx, int key_id) {
  18358. GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx));
  18359. GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_FLOAT64);
  18360. return ctx->kv[key_id].value.float64;
  18361. }
  18362. bool gguf_get_val_bool(const struct gguf_context * ctx, int key_id) {
  18363. GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx));
  18364. GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_BOOL);
  18365. return ctx->kv[key_id].value.bool_;
  18366. }
  18367. const char * gguf_get_val_str(const struct gguf_context * ctx, int key_id) {
  18368. GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx));
  18369. GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_STRING);
  18370. return ctx->kv[key_id].value.str.data;
  18371. }
  18372. const void * gguf_get_val_data(const struct gguf_context * ctx, int key_id) {
  18373. GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx));
  18374. GGML_ASSERT(ctx->kv[key_id].type != GGUF_TYPE_ARRAY);
  18375. GGML_ASSERT(ctx->kv[key_id].type != GGUF_TYPE_STRING);
  18376. return &ctx->kv[key_id].value;
  18377. }
  18378. int gguf_get_n_tensors(const struct gguf_context * ctx) {
  18379. return ctx->header.n_tensors;
  18380. }
  18381. int gguf_find_tensor(const struct gguf_context * ctx, const char * name) {
  18382. // return -1 if tensor not found
  18383. int tensorfound = -1;
  18384. const int n_tensors = gguf_get_n_tensors(ctx);
  18385. for (int i = 0; i < n_tensors; ++i) {
  18386. if (strcmp(name, gguf_get_tensor_name(ctx, i)) == 0) {
  18387. tensorfound = i;
  18388. break;
  18389. }
  18390. }
  18391. return tensorfound;
  18392. }
  18393. size_t gguf_get_tensor_offset(const struct gguf_context * ctx, int i) {
  18394. return ctx->infos[i].offset;
  18395. }
  18396. char * gguf_get_tensor_name(const struct gguf_context * ctx, int i) {
  18397. return ctx->infos[i].name.data;
  18398. }
  18399. enum ggml_type gguf_get_tensor_type(const struct gguf_context * ctx, int i) {
  18400. return ctx->infos[i].type;
  18401. }
  18402. // returns the index
  18403. static int gguf_get_or_add_key(struct gguf_context * ctx, const char * key) {
  18404. const int idx = gguf_find_key(ctx, key);
  18405. if (idx >= 0) {
  18406. return idx;
  18407. }
  18408. const int n_kv = gguf_get_n_kv(ctx);
  18409. ctx->kv = realloc(ctx->kv, (n_kv + 1) * sizeof(struct gguf_kv));
  18410. ctx->kv[n_kv].key.n = strlen(key);
  18411. ctx->kv[n_kv].key.data = strdup(key);
  18412. ctx->header.n_kv++;
  18413. return n_kv;
  18414. }
  18415. void gguf_remove_key(struct gguf_context * ctx, const char * key) {
  18416. const int idx = gguf_find_key(ctx, key);
  18417. if (idx >= 0) {
  18418. const int n_kv = gguf_get_n_kv(ctx);
  18419. gguf_free_kv(&ctx->kv[idx]);
  18420. for (int i = idx; i < n_kv-1; ++i) {
  18421. ctx->kv[i] = ctx->kv[i+1];
  18422. }
  18423. ctx->kv = realloc(ctx->kv, (n_kv - 1) * sizeof(struct gguf_kv));
  18424. ctx->header.n_kv--;
  18425. }
  18426. }
  18427. void gguf_set_val_u8(struct gguf_context * ctx, const char * key, uint8_t val) {
  18428. const int idx = gguf_get_or_add_key(ctx, key);
  18429. ctx->kv[idx].type = GGUF_TYPE_UINT8;
  18430. ctx->kv[idx].value.uint8 = val;
  18431. }
  18432. void gguf_set_val_i8(struct gguf_context * ctx, const char * key, int8_t val) {
  18433. const int idx = gguf_get_or_add_key(ctx, key);
  18434. ctx->kv[idx].type = GGUF_TYPE_INT8;
  18435. ctx->kv[idx].value.int8 = val;
  18436. }
  18437. void gguf_set_val_u16(struct gguf_context * ctx, const char * key, uint16_t val) {
  18438. const int idx = gguf_get_or_add_key(ctx, key);
  18439. ctx->kv[idx].type = GGUF_TYPE_UINT16;
  18440. ctx->kv[idx].value.uint16 = val;
  18441. }
  18442. void gguf_set_val_i16(struct gguf_context * ctx, const char * key, int16_t val) {
  18443. const int idx = gguf_get_or_add_key(ctx, key);
  18444. ctx->kv[idx].type = GGUF_TYPE_INT16;
  18445. ctx->kv[idx].value.int16 = val;
  18446. }
  18447. void gguf_set_val_u32(struct gguf_context * ctx, const char * key, uint32_t val) {
  18448. const int idx = gguf_get_or_add_key(ctx, key);
  18449. ctx->kv[idx].type = GGUF_TYPE_UINT32;
  18450. ctx->kv[idx].value.uint32 = val;
  18451. }
  18452. void gguf_set_val_i32(struct gguf_context * ctx, const char * key, int32_t val) {
  18453. const int idx = gguf_get_or_add_key(ctx, key);
  18454. ctx->kv[idx].type = GGUF_TYPE_INT32;
  18455. ctx->kv[idx].value.int32 = val;
  18456. }
  18457. void gguf_set_val_f32(struct gguf_context * ctx, const char * key, float val) {
  18458. const int idx = gguf_get_or_add_key(ctx, key);
  18459. ctx->kv[idx].type = GGUF_TYPE_FLOAT32;
  18460. ctx->kv[idx].value.float32 = val;
  18461. }
  18462. void gguf_set_val_u64(struct gguf_context * ctx, const char * key, uint64_t val) {
  18463. const int idx = gguf_get_or_add_key(ctx, key);
  18464. ctx->kv[idx].type = GGUF_TYPE_UINT64;
  18465. ctx->kv[idx].value.uint64 = val;
  18466. }
  18467. void gguf_set_val_i64(struct gguf_context * ctx, const char * key, int64_t val) {
  18468. const int idx = gguf_get_or_add_key(ctx, key);
  18469. ctx->kv[idx].type = GGUF_TYPE_INT64;
  18470. ctx->kv[idx].value.int64 = val;
  18471. }
  18472. void gguf_set_val_f64(struct gguf_context * ctx, const char * key, double val) {
  18473. const int idx = gguf_get_or_add_key(ctx, key);
  18474. ctx->kv[idx].type = GGUF_TYPE_FLOAT64;
  18475. ctx->kv[idx].value.float64 = val;
  18476. }
  18477. void gguf_set_val_bool(struct gguf_context * ctx, const char * key, bool val) {
  18478. const int idx = gguf_get_or_add_key(ctx, key);
  18479. ctx->kv[idx].type = GGUF_TYPE_BOOL;
  18480. ctx->kv[idx].value.bool_ = val;
  18481. }
  18482. void gguf_set_val_str(struct gguf_context * ctx, const char * key, const char * val) {
  18483. const int idx = gguf_get_or_add_key(ctx, key);
  18484. ctx->kv[idx].type = GGUF_TYPE_STRING;
  18485. ctx->kv[idx].value.str.n = strlen(val);
  18486. ctx->kv[idx].value.str.data = strdup(val);
  18487. }
  18488. void gguf_set_arr_data(struct gguf_context * ctx, const char * key, enum gguf_type type, const void * data, int n) {
  18489. const int idx = gguf_get_or_add_key(ctx, key);
  18490. ctx->kv[idx].type = GGUF_TYPE_ARRAY;
  18491. ctx->kv[idx].value.arr.type = type;
  18492. ctx->kv[idx].value.arr.n = n;
  18493. ctx->kv[idx].value.arr.data = GGML_CALLOC(n, gguf_type_size(type));
  18494. memcpy(ctx->kv[idx].value.arr.data, data, n*gguf_type_size(type));
  18495. }
  18496. void gguf_set_arr_str(struct gguf_context * ctx, const char * key, const char ** data, int n) {
  18497. const int idx = gguf_get_or_add_key(ctx, key);
  18498. ctx->kv[idx].type = GGUF_TYPE_ARRAY;
  18499. ctx->kv[idx].value.arr.type = GGUF_TYPE_STRING;
  18500. ctx->kv[idx].value.arr.n = n;
  18501. ctx->kv[idx].value.arr.data = GGML_CALLOC(n, sizeof(struct gguf_str));
  18502. for (int i = 0; i < n; i++) {
  18503. struct gguf_str * str = &((struct gguf_str *)ctx->kv[idx].value.arr.data)[i];
  18504. str->n = strlen(data[i]);
  18505. str->data = strdup(data[i]);
  18506. }
  18507. }
  18508. // set or add KV pairs from another context
  18509. void gguf_set_kv(struct gguf_context * ctx, struct gguf_context * src) {
  18510. for (uint32_t i = 0; i < src->header.n_kv; i++) {
  18511. switch (src->kv[i].type) {
  18512. case GGUF_TYPE_UINT8: gguf_set_val_u8 (ctx, src->kv[i].key.data, src->kv[i].value.uint8); break;
  18513. case GGUF_TYPE_INT8: gguf_set_val_i8 (ctx, src->kv[i].key.data, src->kv[i].value.int8); break;
  18514. case GGUF_TYPE_UINT16: gguf_set_val_u16 (ctx, src->kv[i].key.data, src->kv[i].value.uint16); break;
  18515. case GGUF_TYPE_INT16: gguf_set_val_i16 (ctx, src->kv[i].key.data, src->kv[i].value.int16); break;
  18516. case GGUF_TYPE_UINT32: gguf_set_val_u32 (ctx, src->kv[i].key.data, src->kv[i].value.uint32); break;
  18517. case GGUF_TYPE_INT32: gguf_set_val_i32 (ctx, src->kv[i].key.data, src->kv[i].value.int32); break;
  18518. case GGUF_TYPE_FLOAT32: gguf_set_val_f32 (ctx, src->kv[i].key.data, src->kv[i].value.float32); break;
  18519. case GGUF_TYPE_UINT64: gguf_set_val_u64 (ctx, src->kv[i].key.data, src->kv[i].value.uint64); break;
  18520. case GGUF_TYPE_INT64: gguf_set_val_i64 (ctx, src->kv[i].key.data, src->kv[i].value.int64); break;
  18521. case GGUF_TYPE_FLOAT64: gguf_set_val_f64 (ctx, src->kv[i].key.data, src->kv[i].value.float64); break;
  18522. case GGUF_TYPE_BOOL: gguf_set_val_bool(ctx, src->kv[i].key.data, src->kv[i].value.bool_); break;
  18523. case GGUF_TYPE_STRING: gguf_set_val_str (ctx, src->kv[i].key.data, src->kv[i].value.str.data); break;
  18524. case GGUF_TYPE_ARRAY:
  18525. {
  18526. if (src->kv[i].value.arr.type == GGUF_TYPE_STRING) {
  18527. const char ** data = GGML_CALLOC(src->kv[i].value.arr.n, sizeof(char *));
  18528. for (uint32_t j = 0; j < src->kv[i].value.arr.n; j++) {
  18529. data[j] = ((struct gguf_str *)src->kv[i].value.arr.data)[j].data;
  18530. }
  18531. gguf_set_arr_str(ctx, src->kv[i].key.data, data, src->kv[i].value.arr.n);
  18532. GGML_FREE((void *)data);
  18533. } else if (src->kv[i].value.arr.type == GGUF_TYPE_ARRAY) {
  18534. GGML_ASSERT(false && "nested arrays not supported");
  18535. } else {
  18536. gguf_set_arr_data(ctx, src->kv[i].key.data, src->kv[i].value.arr.type, src->kv[i].value.arr.data, src->kv[i].value.arr.n);
  18537. }
  18538. } break;
  18539. default: GGML_ASSERT(false && "invalid type"); break;
  18540. }
  18541. }
  18542. }
  18543. void gguf_add_tensor(
  18544. struct gguf_context * ctx,
  18545. const struct ggml_tensor * tensor) {
  18546. if (gguf_find_tensor(ctx, tensor->name) != -1) {
  18547. GGML_ASSERT(false && "duplicated tensor name");
  18548. }
  18549. const int idx = ctx->header.n_tensors;
  18550. ctx->infos = realloc(ctx->infos, (idx + 1)*sizeof(struct gguf_tensor_info));
  18551. ctx->infos[idx].name.n = strlen(tensor->name);
  18552. ctx->infos[idx].name.data = strdup(tensor->name);
  18553. for (int i = 0; i < GGML_MAX_DIMS; ++i) {
  18554. ctx->infos[idx].ne[i] = 1;
  18555. }
  18556. ctx->infos[idx].n_dims = ggml_n_dims(tensor);
  18557. for (uint32_t i = 0; i < ctx->infos[idx].n_dims; i++) {
  18558. ctx->infos[idx].ne[i] = tensor->ne[i];
  18559. }
  18560. ctx->infos[idx].type = tensor->type;
  18561. ctx->infos[idx].offset = 0;
  18562. ctx->infos[idx].data = tensor->data;
  18563. ctx->infos[idx].size = ggml_nbytes(tensor);
  18564. if (ctx->header.n_tensors > 0) {
  18565. ctx->infos[idx].offset = ctx->infos[idx - 1].offset + GGML_PAD(ctx->infos[idx - 1].size, ctx->alignment);
  18566. }
  18567. ctx->header.n_tensors++;
  18568. }
  18569. void gguf_set_tensor_type(struct gguf_context * ctx, const char * name, enum ggml_type type) {
  18570. const int idx = gguf_find_tensor(ctx, name);
  18571. if (idx < 0) {
  18572. GGML_ASSERT(false && "tensor not found");
  18573. }
  18574. ctx->infos[idx].type = type;
  18575. }
  18576. void gguf_set_tensor_data(struct gguf_context * ctx, const char * name, const void * data, size_t size) {
  18577. const int idx = gguf_find_tensor(ctx, name);
  18578. if (idx < 0) {
  18579. GGML_ASSERT(false && "tensor not found");
  18580. }
  18581. ctx->infos[idx].data = data;
  18582. ctx->infos[idx].size = size;
  18583. // update offsets
  18584. for (uint32_t i = idx + 1; i < ctx->header.n_tensors; ++i) {
  18585. ctx->infos[i].offset = ctx->infos[i - 1].offset + GGML_PAD(ctx->infos[i - 1].size, ctx->alignment);
  18586. }
  18587. }
  18588. //static void gguf_fwrite_str(FILE * file, const struct gguf_str * val) {
  18589. // fwrite(&val->n, sizeof(val->n), 1, file);
  18590. // fwrite(val->data, sizeof(char), val->n, file);
  18591. //}
  18592. //
  18593. //static void gguf_fwrite_el(FILE * file, const void * val, size_t size) {
  18594. // fwrite(val, sizeof(char), size, file);
  18595. //}
  18596. struct gguf_buf {
  18597. void * data;
  18598. size_t size;
  18599. size_t offset;
  18600. };
  18601. static struct gguf_buf gguf_buf_init(size_t size) {
  18602. struct gguf_buf buf = {
  18603. /*buf.data =*/ size == 0 ? NULL : GGML_CALLOC(1, size),
  18604. /*buf.size =*/ size,
  18605. /*buf.offset =*/ 0,
  18606. };
  18607. return buf;
  18608. }
  18609. static void gguf_buf_free(struct gguf_buf buf) {
  18610. if (buf.data) {
  18611. GGML_FREE(buf.data);
  18612. }
  18613. }
  18614. static void gguf_buf_grow(struct gguf_buf * buf, size_t size) {
  18615. if (buf->offset + size > buf->size) {
  18616. buf->size = 1.5*(buf->offset + size);
  18617. if (buf->data) {
  18618. buf->data = realloc(buf->data, buf->size);
  18619. }
  18620. }
  18621. }
  18622. static void gguf_bwrite_str(struct gguf_buf * buf, const struct gguf_str * val) {
  18623. gguf_buf_grow(buf, sizeof(val->n) + val->n);
  18624. if (buf->data) {
  18625. memcpy((char *) buf->data + buf->offset, &val->n, sizeof(val->n));
  18626. }
  18627. buf->offset += sizeof(val->n);
  18628. if (buf->data) {
  18629. memcpy((char *) buf->data + buf->offset, val->data, val->n);
  18630. }
  18631. buf->offset += val->n;
  18632. }
  18633. static void gguf_bwrite_el(struct gguf_buf * buf, const void * val, size_t el_size) {
  18634. gguf_buf_grow(buf, el_size);
  18635. if (buf->data) {
  18636. memcpy((char *) buf->data + buf->offset, val, el_size);
  18637. }
  18638. buf->offset += el_size;
  18639. }
  18640. static void gguf_write_to_buf(const struct gguf_context * ctx, struct gguf_buf * buf, bool only_meta) {
  18641. // write header
  18642. gguf_bwrite_el(buf, &ctx->header.magic, sizeof(ctx->header.magic));
  18643. gguf_bwrite_el(buf, &ctx->header.version, sizeof(ctx->header.version));
  18644. gguf_bwrite_el(buf, &ctx->header.n_tensors, sizeof(ctx->header.n_tensors));
  18645. gguf_bwrite_el(buf, &ctx->header.n_kv, sizeof(ctx->header.n_kv));
  18646. // write key-value pairs
  18647. for (uint32_t i = 0; i < ctx->header.n_kv; ++i) {
  18648. struct gguf_kv * kv = &ctx->kv[i];
  18649. gguf_bwrite_str(buf, &kv->key);
  18650. gguf_bwrite_el (buf, &kv->type, sizeof(kv->type));
  18651. switch (kv->type) {
  18652. case GGUF_TYPE_UINT8: gguf_bwrite_el( buf, &kv->value.uint8, sizeof(kv->value.uint8) ); break;
  18653. case GGUF_TYPE_INT8: gguf_bwrite_el (buf, &kv->value.int8, sizeof(kv->value.int8) ); break;
  18654. case GGUF_TYPE_UINT16: gguf_bwrite_el (buf, &kv->value.uint16, sizeof(kv->value.uint16) ); break;
  18655. case GGUF_TYPE_INT16: gguf_bwrite_el (buf, &kv->value.int16, sizeof(kv->value.int16) ); break;
  18656. case GGUF_TYPE_UINT32: gguf_bwrite_el (buf, &kv->value.uint32, sizeof(kv->value.uint32) ); break;
  18657. case GGUF_TYPE_INT32: gguf_bwrite_el (buf, &kv->value.int32, sizeof(kv->value.int32) ); break;
  18658. case GGUF_TYPE_FLOAT32: gguf_bwrite_el (buf, &kv->value.float32, sizeof(kv->value.float32)); break;
  18659. case GGUF_TYPE_UINT64: gguf_bwrite_el (buf, &kv->value.uint64, sizeof(kv->value.uint64) ); break;
  18660. case GGUF_TYPE_INT64: gguf_bwrite_el (buf, &kv->value.int64, sizeof(kv->value.int64) ); break;
  18661. case GGUF_TYPE_FLOAT64: gguf_bwrite_el (buf, &kv->value.float64, sizeof(kv->value.float64)); break;
  18662. case GGUF_TYPE_BOOL: gguf_bwrite_el (buf, &kv->value.bool_, sizeof(kv->value.bool_) ); break;
  18663. case GGUF_TYPE_STRING: gguf_bwrite_str(buf, &kv->value.str ); break;
  18664. case GGUF_TYPE_ARRAY:
  18665. {
  18666. gguf_bwrite_el(buf, &kv->value.arr.type, sizeof(kv->value.arr.type));
  18667. gguf_bwrite_el(buf, &kv->value.arr.n, sizeof(kv->value.arr.n) );
  18668. switch (kv->value.arr.type) {
  18669. case GGUF_TYPE_UINT8:
  18670. case GGUF_TYPE_INT8:
  18671. case GGUF_TYPE_UINT16:
  18672. case GGUF_TYPE_INT16:
  18673. case GGUF_TYPE_UINT32:
  18674. case GGUF_TYPE_INT32:
  18675. case GGUF_TYPE_FLOAT32:
  18676. case GGUF_TYPE_UINT64:
  18677. case GGUF_TYPE_INT64:
  18678. case GGUF_TYPE_FLOAT64:
  18679. case GGUF_TYPE_BOOL:
  18680. {
  18681. gguf_bwrite_el(buf, kv->value.arr.data, kv->value.arr.n * gguf_type_size(kv->value.arr.type));
  18682. } break;
  18683. case GGUF_TYPE_STRING:
  18684. {
  18685. for (uint32_t j = 0; j < kv->value.arr.n; ++j) {
  18686. gguf_bwrite_str(buf, &((struct gguf_str *) kv->value.arr.data)[j]);
  18687. }
  18688. } break;
  18689. case GGUF_TYPE_ARRAY:
  18690. default: GGML_ASSERT(false && "invalid type"); break;
  18691. }
  18692. } break;
  18693. default: GGML_ASSERT(false && "invalid type");
  18694. }
  18695. }
  18696. // write tensor infos
  18697. for (uint32_t i = 0; i < ctx->header.n_tensors; ++i) {
  18698. struct gguf_tensor_info * info = &ctx->infos[i];
  18699. gguf_bwrite_str(buf, &info->name);
  18700. gguf_bwrite_el (buf, &info->n_dims, sizeof(info->n_dims));
  18701. for (uint32_t j = 0; j < info->n_dims; ++j) {
  18702. gguf_bwrite_el(buf, &info->ne[j], sizeof(info->ne[j]));
  18703. }
  18704. gguf_bwrite_el(buf, &info->type, sizeof(info->type));
  18705. gguf_bwrite_el(buf, &info->offset, sizeof(info->offset));
  18706. }
  18707. // we require the data section to be aligned, so take into account any padding
  18708. {
  18709. const size_t offset = buf->offset;
  18710. const size_t offset_pad = GGML_PAD(offset, ctx->alignment);
  18711. if (offset_pad != offset) {
  18712. uint8_t pad = 0;
  18713. for (size_t i = 0; i < offset_pad - offset; ++i) {
  18714. gguf_bwrite_el(buf, &pad, sizeof(pad));
  18715. }
  18716. }
  18717. }
  18718. if (only_meta) {
  18719. return;
  18720. }
  18721. size_t offset = 0;
  18722. // write tensor data
  18723. for (uint32_t i = 0; i < ctx->header.n_tensors; ++i) {
  18724. struct gguf_tensor_info * info = &ctx->infos[i];
  18725. const size_t size = info->size;
  18726. const size_t size_pad = GGML_PAD(size, ctx->alignment);
  18727. gguf_bwrite_el(buf, info->data, size);
  18728. if (size_pad != size) {
  18729. uint8_t pad = 0;
  18730. for (size_t j = 0; j < size_pad - size; ++j) {
  18731. gguf_bwrite_el(buf, &pad, sizeof(pad));
  18732. }
  18733. }
  18734. GGML_ASSERT(offset == info->offset);
  18735. offset += size_pad;
  18736. }
  18737. }
  18738. void gguf_write_to_file(const struct gguf_context * ctx, const char * fname, bool only_meta) {
  18739. FILE * file = ggml_fopen(fname, "wb");
  18740. if (!file) {
  18741. GGML_ASSERT(false && "failed to open file for writing");
  18742. }
  18743. struct gguf_buf buf = gguf_buf_init(16*1024);
  18744. gguf_write_to_buf(ctx, &buf, only_meta);
  18745. fwrite(buf.data, 1, buf.offset, file);
  18746. gguf_buf_free(buf);
  18747. fclose(file);
  18748. }
  18749. size_t gguf_get_meta_size(const struct gguf_context * ctx) {
  18750. // no allocs - only compute size
  18751. struct gguf_buf buf = gguf_buf_init(0);
  18752. gguf_write_to_buf(ctx, &buf, true);
  18753. return buf.offset;
  18754. }
  18755. void gguf_get_meta_data(const struct gguf_context * ctx, void * data) {
  18756. struct gguf_buf buf = gguf_buf_init(16*1024);
  18757. gguf_write_to_buf(ctx, &buf, true);
  18758. memcpy(data, buf.data, buf.offset);
  18759. gguf_buf_free(buf);
  18760. }
  18761. ////////////////////////////////////////////////////////////////////////////////
  18762. int ggml_cpu_has_avx(void) {
  18763. #if defined(__AVX__)
  18764. return 1;
  18765. #else
  18766. return 0;
  18767. #endif
  18768. }
  18769. int ggml_cpu_has_avx_vnni(void) {
  18770. #if defined(__AVXVNNI__)
  18771. return 1;
  18772. #else
  18773. return 0;
  18774. #endif
  18775. }
  18776. int ggml_cpu_has_avx2(void) {
  18777. #if defined(__AVX2__)
  18778. return 1;
  18779. #else
  18780. return 0;
  18781. #endif
  18782. }
  18783. int ggml_cpu_has_avx512(void) {
  18784. #if defined(__AVX512F__)
  18785. return 1;
  18786. #else
  18787. return 0;
  18788. #endif
  18789. }
  18790. int ggml_cpu_has_avx512_vbmi(void) {
  18791. #if defined(__AVX512VBMI__)
  18792. return 1;
  18793. #else
  18794. return 0;
  18795. #endif
  18796. }
  18797. int ggml_cpu_has_avx512_vnni(void) {
  18798. #if defined(__AVX512VNNI__)
  18799. return 1;
  18800. #else
  18801. return 0;
  18802. #endif
  18803. }
  18804. int ggml_cpu_has_fma(void) {
  18805. #if defined(__FMA__)
  18806. return 1;
  18807. #else
  18808. return 0;
  18809. #endif
  18810. }
  18811. int ggml_cpu_has_neon(void) {
  18812. #if defined(__ARM_NEON)
  18813. return 1;
  18814. #else
  18815. return 0;
  18816. #endif
  18817. }
  18818. int ggml_cpu_has_arm_fma(void) {
  18819. #if defined(__ARM_FEATURE_FMA)
  18820. return 1;
  18821. #else
  18822. return 0;
  18823. #endif
  18824. }
  18825. int ggml_cpu_has_metal(void) {
  18826. #if defined(GGML_USE_METAL)
  18827. return 1;
  18828. #else
  18829. return 0;
  18830. #endif
  18831. }
  18832. int ggml_cpu_has_f16c(void) {
  18833. #if defined(__F16C__)
  18834. return 1;
  18835. #else
  18836. return 0;
  18837. #endif
  18838. }
  18839. int ggml_cpu_has_fp16_va(void) {
  18840. #if defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC)
  18841. return 1;
  18842. #else
  18843. return 0;
  18844. #endif
  18845. }
  18846. int ggml_cpu_has_wasm_simd(void) {
  18847. #if defined(__wasm_simd128__)
  18848. return 1;
  18849. #else
  18850. return 0;
  18851. #endif
  18852. }
  18853. int ggml_cpu_has_blas(void) {
  18854. #if defined(GGML_USE_ACCELERATE) || defined(GGML_USE_OPENBLAS) || defined(GGML_USE_CUDA) || defined(GGML_USE_VULKAN) || defined(GGML_USE_CLBLAST) || defined(GGML_USE_SYCL)
  18855. return 1;
  18856. #else
  18857. return 0;
  18858. #endif
  18859. }
  18860. int ggml_cpu_has_cuda(void) {
  18861. #if defined(GGML_USE_CUDA)
  18862. return 1;
  18863. #else
  18864. return 0;
  18865. #endif
  18866. }
  18867. int ggml_cpu_has_clblast(void) {
  18868. #if defined(GGML_USE_CLBLAST)
  18869. return 1;
  18870. #else
  18871. return 0;
  18872. #endif
  18873. }
  18874. int ggml_cpu_has_vulkan(void) {
  18875. #if defined(GGML_USE_VULKAN)
  18876. return 1;
  18877. #else
  18878. return 0;
  18879. #endif
  18880. }
  18881. int ggml_cpu_has_kompute(void) {
  18882. #if defined(GGML_USE_KOMPUTE)
  18883. return 1;
  18884. #else
  18885. return 0;
  18886. #endif
  18887. }
  18888. int ggml_cpu_has_sycl(void) {
  18889. #if defined(GGML_USE_SYCL)
  18890. return 1;
  18891. #else
  18892. return 0;
  18893. #endif
  18894. }
  18895. int ggml_cpu_has_gpublas(void) {
  18896. return ggml_cpu_has_cuda() || ggml_cpu_has_clblast() || ggml_cpu_has_vulkan() || ggml_cpu_has_kompute() ||
  18897. ggml_cpu_has_sycl();
  18898. }
  18899. int ggml_cpu_has_sse3(void) {
  18900. #if defined(__SSE3__)
  18901. return 1;
  18902. #else
  18903. return 0;
  18904. #endif
  18905. }
  18906. int ggml_cpu_has_ssse3(void) {
  18907. #if defined(__SSSE3__)
  18908. return 1;
  18909. #else
  18910. return 0;
  18911. #endif
  18912. }
  18913. int ggml_cpu_has_vsx(void) {
  18914. #if defined(__POWER9_VECTOR__)
  18915. return 1;
  18916. #else
  18917. return 0;
  18918. #endif
  18919. }
  18920. int ggml_cpu_has_matmul_int8(void) {
  18921. #if defined(__ARM_FEATURE_MATMUL_INT8)
  18922. return 1;
  18923. #else
  18924. return 0;
  18925. #endif
  18926. }
  18927. ////////////////////////////////////////////////////////////////////////////////