ggml.c 673 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788478947904791479247934794479547964797479847994800480148024803480448054806480748084809481048114812481348144815481648174818481948204821482248234824482548264827482848294830483148324833483448354836483748384839484048414842484348444845484648474848484948504851485248534854485548564857485848594860486148624863486448654866486748684869487048714872487348744875487648774878487948804881488248834884488548864887488848894890489148924893489448954896489748984899490049014902490349044905490649074908490949104911491249134914491549164917491849194920492149224923492449254926492749284929493049314932493349344935493649374938493949404941494249434944494549464947494849494950495149524953495449554956495749584959496049614962496349644965496649674968496949704971497249734974497549764977497849794980498149824983498449854986498749884989499049914992499349944995499649974998499950005001500250035004500550065007500850095010501150125013501450155016501750185019502050215022502350245025502650275028502950305031503250335034503550365037503850395040504150425043504450455046504750485049505050515052505350545055505650575058505950605061506250635064506550665067506850695070507150725073507450755076507750785079508050815082508350845085508650875088508950905091509250935094509550965097509850995100510151025103510451055106510751085109511051115112511351145115511651175118511951205121512251235124512551265127512851295130513151325133513451355136513751385139514051415142514351445145514651475148514951505151515251535154515551565157515851595160516151625163516451655166516751685169517051715172517351745175517651775178517951805181518251835184518551865187518851895190519151925193519451955196519751985199520052015202520352045205520652075208520952105211521252135214521552165217521852195220522152225223522452255226522752285229523052315232523352345235523652375238523952405241524252435244524552465247524852495250525152525253525452555256525752585259526052615262526352645265526652675268526952705271527252735274527552765277527852795280528152825283528452855286528752885289529052915292529352945295529652975298529953005301530253035304530553065307530853095310531153125313531453155316531753185319532053215322532353245325532653275328532953305331533253335334533553365337533853395340534153425343534453455346534753485349535053515352535353545355535653575358535953605361536253635364536553665367536853695370537153725373537453755376537753785379538053815382538353845385538653875388538953905391539253935394539553965397539853995400540154025403540454055406540754085409541054115412541354145415541654175418541954205421542254235424542554265427542854295430543154325433543454355436543754385439544054415442544354445445544654475448544954505451545254535454545554565457545854595460546154625463546454655466546754685469547054715472547354745475547654775478547954805481548254835484548554865487548854895490549154925493549454955496549754985499550055015502550355045505550655075508550955105511551255135514551555165517551855195520552155225523552455255526552755285529553055315532553355345535553655375538553955405541554255435544554555465547554855495550555155525553555455555556555755585559556055615562556355645565556655675568556955705571557255735574557555765577557855795580558155825583558455855586558755885589559055915592559355945595559655975598559956005601560256035604560556065607560856095610561156125613561456155616561756185619562056215622562356245625562656275628562956305631563256335634563556365637563856395640564156425643564456455646564756485649565056515652565356545655565656575658565956605661566256635664566556665667566856695670567156725673567456755676567756785679568056815682568356845685568656875688568956905691569256935694569556965697569856995700570157025703570457055706570757085709571057115712571357145715571657175718571957205721572257235724572557265727572857295730573157325733573457355736573757385739574057415742574357445745574657475748574957505751575257535754575557565757575857595760576157625763576457655766576757685769577057715772577357745775577657775778577957805781578257835784578557865787578857895790579157925793579457955796579757985799580058015802580358045805580658075808580958105811581258135814581558165817581858195820582158225823582458255826582758285829583058315832583358345835583658375838583958405841584258435844584558465847584858495850585158525853585458555856585758585859586058615862586358645865586658675868586958705871587258735874587558765877587858795880588158825883588458855886588758885889589058915892589358945895589658975898589959005901590259035904590559065907590859095910591159125913591459155916591759185919592059215922592359245925592659275928592959305931593259335934593559365937593859395940594159425943594459455946594759485949595059515952595359545955595659575958595959605961596259635964596559665967596859695970597159725973597459755976597759785979598059815982598359845985598659875988598959905991599259935994599559965997599859996000600160026003600460056006600760086009601060116012601360146015601660176018601960206021602260236024602560266027602860296030603160326033603460356036603760386039604060416042604360446045604660476048604960506051605260536054605560566057605860596060606160626063606460656066606760686069607060716072607360746075607660776078607960806081608260836084608560866087608860896090609160926093609460956096609760986099610061016102610361046105610661076108610961106111611261136114611561166117611861196120612161226123612461256126612761286129613061316132613361346135613661376138613961406141614261436144614561466147614861496150615161526153615461556156615761586159616061616162616361646165616661676168616961706171617261736174617561766177617861796180618161826183618461856186618761886189619061916192619361946195619661976198619962006201620262036204620562066207620862096210621162126213621462156216621762186219622062216222622362246225622662276228622962306231623262336234623562366237623862396240624162426243624462456246624762486249625062516252625362546255625662576258625962606261626262636264626562666267626862696270627162726273627462756276627762786279628062816282628362846285628662876288628962906291629262936294629562966297629862996300630163026303630463056306630763086309631063116312631363146315631663176318631963206321632263236324632563266327632863296330633163326333633463356336633763386339634063416342634363446345634663476348634963506351635263536354635563566357635863596360636163626363636463656366636763686369637063716372637363746375637663776378637963806381638263836384638563866387638863896390639163926393639463956396639763986399640064016402640364046405640664076408640964106411641264136414641564166417641864196420642164226423642464256426642764286429643064316432643364346435643664376438643964406441644264436444644564466447644864496450645164526453645464556456645764586459646064616462646364646465646664676468646964706471647264736474647564766477647864796480648164826483648464856486648764886489649064916492649364946495649664976498649965006501650265036504650565066507650865096510651165126513651465156516651765186519652065216522652365246525652665276528652965306531653265336534653565366537653865396540654165426543654465456546654765486549655065516552655365546555655665576558655965606561656265636564656565666567656865696570657165726573657465756576657765786579658065816582658365846585658665876588658965906591659265936594659565966597659865996600660166026603660466056606660766086609661066116612661366146615661666176618661966206621662266236624662566266627662866296630663166326633663466356636663766386639664066416642664366446645664666476648664966506651665266536654665566566657665866596660666166626663666466656666666766686669667066716672667366746675667666776678667966806681668266836684668566866687668866896690669166926693669466956696669766986699670067016702670367046705670667076708670967106711671267136714671567166717671867196720672167226723672467256726672767286729673067316732673367346735673667376738673967406741674267436744674567466747674867496750675167526753675467556756675767586759676067616762676367646765676667676768676967706771677267736774677567766777677867796780678167826783678467856786678767886789679067916792679367946795679667976798679968006801680268036804680568066807680868096810681168126813681468156816681768186819682068216822682368246825682668276828682968306831683268336834683568366837683868396840684168426843684468456846684768486849685068516852685368546855685668576858685968606861686268636864686568666867686868696870687168726873687468756876687768786879688068816882688368846885688668876888688968906891689268936894689568966897689868996900690169026903690469056906690769086909691069116912691369146915691669176918691969206921692269236924692569266927692869296930693169326933693469356936693769386939694069416942694369446945694669476948694969506951695269536954695569566957695869596960696169626963696469656966696769686969697069716972697369746975697669776978697969806981698269836984698569866987698869896990699169926993699469956996699769986999700070017002700370047005700670077008700970107011701270137014701570167017701870197020702170227023702470257026702770287029703070317032703370347035703670377038703970407041704270437044704570467047704870497050705170527053705470557056705770587059706070617062706370647065706670677068706970707071707270737074707570767077707870797080708170827083708470857086708770887089709070917092709370947095709670977098709971007101710271037104710571067107710871097110711171127113711471157116711771187119712071217122712371247125712671277128712971307131713271337134713571367137713871397140714171427143714471457146714771487149715071517152715371547155715671577158715971607161716271637164716571667167716871697170717171727173717471757176717771787179718071817182718371847185718671877188718971907191719271937194719571967197719871997200720172027203720472057206720772087209721072117212721372147215721672177218721972207221722272237224722572267227722872297230723172327233723472357236723772387239724072417242724372447245724672477248724972507251725272537254725572567257725872597260726172627263726472657266726772687269727072717272727372747275727672777278727972807281728272837284728572867287728872897290729172927293729472957296729772987299730073017302730373047305730673077308730973107311731273137314731573167317731873197320732173227323732473257326732773287329733073317332733373347335733673377338733973407341734273437344734573467347734873497350735173527353735473557356735773587359736073617362736373647365736673677368736973707371737273737374737573767377737873797380738173827383738473857386738773887389739073917392739373947395739673977398739974007401740274037404740574067407740874097410741174127413741474157416741774187419742074217422742374247425742674277428742974307431743274337434743574367437743874397440744174427443744474457446744774487449745074517452745374547455745674577458745974607461746274637464746574667467746874697470747174727473747474757476747774787479748074817482748374847485748674877488748974907491749274937494749574967497749874997500750175027503750475057506750775087509751075117512751375147515751675177518751975207521752275237524752575267527752875297530753175327533753475357536753775387539754075417542754375447545754675477548754975507551755275537554755575567557755875597560756175627563756475657566756775687569757075717572757375747575757675777578757975807581758275837584758575867587758875897590759175927593759475957596759775987599760076017602760376047605760676077608760976107611761276137614761576167617761876197620762176227623762476257626762776287629763076317632763376347635763676377638763976407641764276437644764576467647764876497650765176527653765476557656765776587659766076617662766376647665766676677668766976707671767276737674767576767677767876797680768176827683768476857686768776887689769076917692769376947695769676977698769977007701770277037704770577067707770877097710771177127713771477157716771777187719772077217722772377247725772677277728772977307731773277337734773577367737773877397740774177427743774477457746774777487749775077517752775377547755775677577758775977607761776277637764776577667767776877697770777177727773777477757776777777787779778077817782778377847785778677877788778977907791779277937794779577967797779877997800780178027803780478057806780778087809781078117812781378147815781678177818781978207821782278237824782578267827782878297830783178327833783478357836783778387839784078417842784378447845784678477848784978507851785278537854785578567857785878597860786178627863786478657866786778687869787078717872787378747875787678777878787978807881788278837884788578867887788878897890789178927893789478957896789778987899790079017902790379047905790679077908790979107911791279137914791579167917791879197920792179227923792479257926792779287929793079317932793379347935793679377938793979407941794279437944794579467947794879497950795179527953795479557956795779587959796079617962796379647965796679677968796979707971797279737974797579767977797879797980798179827983798479857986798779887989799079917992799379947995799679977998799980008001800280038004800580068007800880098010801180128013801480158016801780188019802080218022802380248025802680278028802980308031803280338034803580368037803880398040804180428043804480458046804780488049805080518052805380548055805680578058805980608061806280638064806580668067806880698070807180728073807480758076807780788079808080818082808380848085808680878088808980908091809280938094809580968097809880998100810181028103810481058106810781088109811081118112811381148115811681178118811981208121812281238124812581268127812881298130813181328133813481358136813781388139814081418142814381448145814681478148814981508151815281538154815581568157815881598160816181628163816481658166816781688169817081718172817381748175817681778178817981808181818281838184818581868187818881898190819181928193819481958196819781988199820082018202820382048205820682078208820982108211821282138214821582168217821882198220822182228223822482258226822782288229823082318232823382348235823682378238823982408241824282438244824582468247824882498250825182528253825482558256825782588259826082618262826382648265826682678268826982708271827282738274827582768277827882798280828182828283828482858286828782888289829082918292829382948295829682978298829983008301830283038304830583068307830883098310831183128313831483158316831783188319832083218322832383248325832683278328832983308331833283338334833583368337833883398340834183428343834483458346834783488349835083518352835383548355835683578358835983608361836283638364836583668367836883698370837183728373837483758376837783788379838083818382838383848385838683878388838983908391839283938394839583968397839883998400840184028403840484058406840784088409841084118412841384148415841684178418841984208421842284238424842584268427842884298430843184328433843484358436843784388439844084418442844384448445844684478448844984508451845284538454845584568457845884598460846184628463846484658466846784688469847084718472847384748475847684778478847984808481848284838484848584868487848884898490849184928493849484958496849784988499850085018502850385048505850685078508850985108511851285138514851585168517851885198520852185228523852485258526852785288529853085318532853385348535853685378538853985408541854285438544854585468547854885498550855185528553855485558556855785588559856085618562856385648565856685678568856985708571857285738574857585768577857885798580858185828583858485858586858785888589859085918592859385948595859685978598859986008601860286038604860586068607860886098610861186128613861486158616861786188619862086218622862386248625862686278628862986308631863286338634863586368637863886398640864186428643864486458646864786488649865086518652865386548655865686578658865986608661866286638664866586668667866886698670867186728673867486758676867786788679868086818682868386848685868686878688868986908691869286938694869586968697869886998700870187028703870487058706870787088709871087118712871387148715871687178718871987208721872287238724872587268727872887298730873187328733873487358736873787388739874087418742874387448745874687478748874987508751875287538754875587568757875887598760876187628763876487658766876787688769877087718772877387748775877687778778877987808781878287838784878587868787878887898790879187928793879487958796879787988799880088018802880388048805880688078808880988108811881288138814881588168817881888198820882188228823882488258826882788288829883088318832883388348835883688378838883988408841884288438844884588468847884888498850885188528853885488558856885788588859886088618862886388648865886688678868886988708871887288738874887588768877887888798880888188828883888488858886888788888889889088918892889388948895889688978898889989008901890289038904890589068907890889098910891189128913891489158916891789188919892089218922892389248925892689278928892989308931893289338934893589368937893889398940894189428943894489458946894789488949895089518952895389548955895689578958895989608961896289638964896589668967896889698970897189728973897489758976897789788979898089818982898389848985898689878988898989908991899289938994899589968997899889999000900190029003900490059006900790089009901090119012901390149015901690179018901990209021902290239024902590269027902890299030903190329033903490359036903790389039904090419042904390449045904690479048904990509051905290539054905590569057905890599060906190629063906490659066906790689069907090719072907390749075907690779078907990809081908290839084908590869087908890899090909190929093909490959096909790989099910091019102910391049105910691079108910991109111911291139114911591169117911891199120912191229123912491259126912791289129913091319132913391349135913691379138913991409141914291439144914591469147914891499150915191529153915491559156915791589159916091619162916391649165916691679168916991709171917291739174917591769177917891799180918191829183918491859186918791889189919091919192919391949195919691979198919992009201920292039204920592069207920892099210921192129213921492159216921792189219922092219222922392249225922692279228922992309231923292339234923592369237923892399240924192429243924492459246924792489249925092519252925392549255925692579258925992609261926292639264926592669267926892699270927192729273927492759276927792789279928092819282928392849285928692879288928992909291929292939294929592969297929892999300930193029303930493059306930793089309931093119312931393149315931693179318931993209321932293239324932593269327932893299330933193329333933493359336933793389339934093419342934393449345934693479348934993509351935293539354935593569357935893599360936193629363936493659366936793689369937093719372937393749375937693779378937993809381938293839384938593869387938893899390939193929393939493959396939793989399940094019402940394049405940694079408940994109411941294139414941594169417941894199420942194229423942494259426942794289429943094319432943394349435943694379438943994409441944294439444944594469447944894499450945194529453945494559456945794589459946094619462946394649465946694679468946994709471947294739474947594769477947894799480948194829483948494859486948794889489949094919492949394949495949694979498949995009501950295039504950595069507950895099510951195129513951495159516951795189519952095219522952395249525952695279528952995309531953295339534953595369537953895399540954195429543954495459546954795489549955095519552955395549555955695579558955995609561956295639564956595669567956895699570957195729573957495759576957795789579958095819582958395849585958695879588958995909591959295939594959595969597959895999600960196029603960496059606960796089609961096119612961396149615961696179618961996209621962296239624962596269627962896299630963196329633963496359636963796389639964096419642964396449645964696479648964996509651965296539654965596569657965896599660966196629663966496659666966796689669967096719672967396749675967696779678967996809681968296839684968596869687968896899690969196929693969496959696969796989699970097019702970397049705970697079708970997109711971297139714971597169717971897199720972197229723972497259726972797289729973097319732973397349735973697379738973997409741974297439744974597469747974897499750975197529753975497559756975797589759976097619762976397649765976697679768976997709771977297739774977597769777977897799780978197829783978497859786978797889789979097919792979397949795979697979798979998009801980298039804980598069807980898099810981198129813981498159816981798189819982098219822982398249825982698279828982998309831983298339834983598369837983898399840984198429843984498459846984798489849985098519852985398549855985698579858985998609861986298639864986598669867986898699870987198729873987498759876987798789879988098819882988398849885988698879888988998909891989298939894989598969897989898999900990199029903990499059906990799089909991099119912991399149915991699179918991999209921992299239924992599269927992899299930993199329933993499359936993799389939994099419942994399449945994699479948994999509951995299539954995599569957995899599960996199629963996499659966996799689969997099719972997399749975997699779978997999809981998299839984998599869987998899899990999199929993999499959996999799989999100001000110002100031000410005100061000710008100091001010011100121001310014100151001610017100181001910020100211002210023100241002510026100271002810029100301003110032100331003410035100361003710038100391004010041100421004310044100451004610047100481004910050100511005210053100541005510056100571005810059100601006110062100631006410065100661006710068100691007010071100721007310074100751007610077100781007910080100811008210083100841008510086100871008810089100901009110092100931009410095100961009710098100991010010101101021010310104101051010610107101081010910110101111011210113101141011510116101171011810119101201012110122101231012410125101261012710128101291013010131101321013310134101351013610137101381013910140101411014210143101441014510146101471014810149101501015110152101531015410155101561015710158101591016010161101621016310164101651016610167101681016910170101711017210173101741017510176101771017810179101801018110182101831018410185101861018710188101891019010191101921019310194101951019610197101981019910200102011020210203102041020510206102071020810209102101021110212102131021410215102161021710218102191022010221102221022310224102251022610227102281022910230102311023210233102341023510236102371023810239102401024110242102431024410245102461024710248102491025010251102521025310254102551025610257102581025910260102611026210263102641026510266102671026810269102701027110272102731027410275102761027710278102791028010281102821028310284102851028610287102881028910290102911029210293102941029510296102971029810299103001030110302103031030410305103061030710308103091031010311103121031310314103151031610317103181031910320103211032210323103241032510326103271032810329103301033110332103331033410335103361033710338103391034010341103421034310344103451034610347103481034910350103511035210353103541035510356103571035810359103601036110362103631036410365103661036710368103691037010371103721037310374103751037610377103781037910380103811038210383103841038510386103871038810389103901039110392103931039410395103961039710398103991040010401104021040310404104051040610407104081040910410104111041210413104141041510416104171041810419104201042110422104231042410425104261042710428104291043010431104321043310434104351043610437104381043910440104411044210443104441044510446104471044810449104501045110452104531045410455104561045710458104591046010461104621046310464104651046610467104681046910470104711047210473104741047510476104771047810479104801048110482104831048410485104861048710488104891049010491104921049310494104951049610497104981049910500105011050210503105041050510506105071050810509105101051110512105131051410515105161051710518105191052010521105221052310524105251052610527105281052910530105311053210533105341053510536105371053810539105401054110542105431054410545105461054710548105491055010551105521055310554105551055610557105581055910560105611056210563105641056510566105671056810569105701057110572105731057410575105761057710578105791058010581105821058310584105851058610587105881058910590105911059210593105941059510596105971059810599106001060110602106031060410605106061060710608106091061010611106121061310614106151061610617106181061910620106211062210623106241062510626106271062810629106301063110632106331063410635106361063710638106391064010641106421064310644106451064610647106481064910650106511065210653106541065510656106571065810659106601066110662106631066410665106661066710668106691067010671106721067310674106751067610677106781067910680106811068210683106841068510686106871068810689106901069110692106931069410695106961069710698106991070010701107021070310704107051070610707107081070910710107111071210713107141071510716107171071810719107201072110722107231072410725107261072710728107291073010731107321073310734107351073610737107381073910740107411074210743107441074510746107471074810749107501075110752107531075410755107561075710758107591076010761107621076310764107651076610767107681076910770107711077210773107741077510776107771077810779107801078110782107831078410785107861078710788107891079010791107921079310794107951079610797107981079910800108011080210803108041080510806108071080810809108101081110812108131081410815108161081710818108191082010821108221082310824108251082610827108281082910830108311083210833108341083510836108371083810839108401084110842108431084410845108461084710848108491085010851108521085310854108551085610857108581085910860108611086210863108641086510866108671086810869108701087110872108731087410875108761087710878108791088010881108821088310884108851088610887108881088910890108911089210893108941089510896108971089810899109001090110902109031090410905109061090710908109091091010911109121091310914109151091610917109181091910920109211092210923109241092510926109271092810929109301093110932109331093410935109361093710938109391094010941109421094310944109451094610947109481094910950109511095210953109541095510956109571095810959109601096110962109631096410965109661096710968109691097010971109721097310974109751097610977109781097910980109811098210983109841098510986109871098810989109901099110992109931099410995109961099710998109991100011001110021100311004110051100611007110081100911010110111101211013110141101511016110171101811019110201102111022110231102411025110261102711028110291103011031110321103311034110351103611037110381103911040110411104211043110441104511046110471104811049110501105111052110531105411055110561105711058110591106011061110621106311064110651106611067110681106911070110711107211073110741107511076110771107811079110801108111082110831108411085110861108711088110891109011091110921109311094110951109611097110981109911100111011110211103111041110511106111071110811109111101111111112111131111411115111161111711118111191112011121111221112311124111251112611127111281112911130111311113211133111341113511136111371113811139111401114111142111431114411145111461114711148111491115011151111521115311154111551115611157111581115911160111611116211163111641116511166111671116811169111701117111172111731117411175111761117711178111791118011181111821118311184111851118611187111881118911190111911119211193111941119511196111971119811199112001120111202112031120411205112061120711208112091121011211112121121311214112151121611217112181121911220112211122211223112241122511226112271122811229112301123111232112331123411235112361123711238112391124011241112421124311244112451124611247112481124911250112511125211253112541125511256112571125811259112601126111262112631126411265112661126711268112691127011271112721127311274112751127611277112781127911280112811128211283112841128511286112871128811289112901129111292112931129411295112961129711298112991130011301113021130311304113051130611307113081130911310113111131211313113141131511316113171131811319113201132111322113231132411325113261132711328113291133011331113321133311334113351133611337113381133911340113411134211343113441134511346113471134811349113501135111352113531135411355113561135711358113591136011361113621136311364113651136611367113681136911370113711137211373113741137511376113771137811379113801138111382113831138411385113861138711388113891139011391113921139311394113951139611397113981139911400114011140211403114041140511406114071140811409114101141111412114131141411415114161141711418114191142011421114221142311424114251142611427114281142911430114311143211433114341143511436114371143811439114401144111442114431144411445114461144711448114491145011451114521145311454114551145611457114581145911460114611146211463114641146511466114671146811469114701147111472114731147411475114761147711478114791148011481114821148311484114851148611487114881148911490114911149211493114941149511496114971149811499115001150111502115031150411505115061150711508115091151011511115121151311514115151151611517115181151911520115211152211523115241152511526115271152811529115301153111532115331153411535115361153711538115391154011541115421154311544115451154611547115481154911550115511155211553115541155511556115571155811559115601156111562115631156411565115661156711568115691157011571115721157311574115751157611577115781157911580115811158211583115841158511586115871158811589115901159111592115931159411595115961159711598115991160011601116021160311604116051160611607116081160911610116111161211613116141161511616116171161811619116201162111622116231162411625116261162711628116291163011631116321163311634116351163611637116381163911640116411164211643116441164511646116471164811649116501165111652116531165411655116561165711658116591166011661116621166311664116651166611667116681166911670116711167211673116741167511676116771167811679116801168111682116831168411685116861168711688116891169011691116921169311694116951169611697116981169911700117011170211703117041170511706117071170811709117101171111712117131171411715117161171711718117191172011721117221172311724117251172611727117281172911730117311173211733117341173511736117371173811739117401174111742117431174411745117461174711748117491175011751117521175311754117551175611757117581175911760117611176211763117641176511766117671176811769117701177111772117731177411775117761177711778117791178011781117821178311784117851178611787117881178911790117911179211793117941179511796117971179811799118001180111802118031180411805118061180711808118091181011811118121181311814118151181611817118181181911820118211182211823118241182511826118271182811829118301183111832118331183411835118361183711838118391184011841118421184311844118451184611847118481184911850118511185211853118541185511856118571185811859118601186111862118631186411865118661186711868118691187011871118721187311874118751187611877118781187911880118811188211883118841188511886118871188811889118901189111892118931189411895118961189711898118991190011901119021190311904119051190611907119081190911910119111191211913119141191511916119171191811919119201192111922119231192411925119261192711928119291193011931119321193311934119351193611937119381193911940119411194211943119441194511946119471194811949119501195111952119531195411955119561195711958119591196011961119621196311964119651196611967119681196911970119711197211973119741197511976119771197811979119801198111982119831198411985119861198711988119891199011991119921199311994119951199611997119981199912000120011200212003120041200512006120071200812009120101201112012120131201412015120161201712018120191202012021120221202312024120251202612027120281202912030120311203212033120341203512036120371203812039120401204112042120431204412045120461204712048120491205012051120521205312054120551205612057120581205912060120611206212063120641206512066120671206812069120701207112072120731207412075120761207712078120791208012081120821208312084120851208612087120881208912090120911209212093120941209512096120971209812099121001210112102121031210412105121061210712108121091211012111121121211312114121151211612117121181211912120121211212212123121241212512126121271212812129121301213112132121331213412135121361213712138121391214012141121421214312144121451214612147121481214912150121511215212153121541215512156121571215812159121601216112162121631216412165121661216712168121691217012171121721217312174121751217612177121781217912180121811218212183121841218512186121871218812189121901219112192121931219412195121961219712198121991220012201122021220312204122051220612207122081220912210122111221212213122141221512216122171221812219122201222112222122231222412225122261222712228122291223012231122321223312234122351223612237122381223912240122411224212243122441224512246122471224812249122501225112252122531225412255122561225712258122591226012261122621226312264122651226612267122681226912270122711227212273122741227512276122771227812279122801228112282122831228412285122861228712288122891229012291122921229312294122951229612297122981229912300123011230212303123041230512306123071230812309123101231112312123131231412315123161231712318123191232012321123221232312324123251232612327123281232912330123311233212333123341233512336123371233812339123401234112342123431234412345123461234712348123491235012351123521235312354123551235612357123581235912360123611236212363123641236512366123671236812369123701237112372123731237412375123761237712378123791238012381123821238312384123851238612387123881238912390123911239212393123941239512396123971239812399124001240112402124031240412405124061240712408124091241012411124121241312414124151241612417124181241912420124211242212423124241242512426124271242812429124301243112432124331243412435124361243712438124391244012441124421244312444124451244612447124481244912450124511245212453124541245512456124571245812459124601246112462124631246412465124661246712468124691247012471124721247312474124751247612477124781247912480124811248212483124841248512486124871248812489124901249112492124931249412495124961249712498124991250012501125021250312504125051250612507125081250912510125111251212513125141251512516125171251812519125201252112522125231252412525125261252712528125291253012531125321253312534125351253612537125381253912540125411254212543125441254512546125471254812549125501255112552125531255412555125561255712558125591256012561125621256312564125651256612567125681256912570125711257212573125741257512576125771257812579125801258112582125831258412585125861258712588125891259012591125921259312594125951259612597125981259912600126011260212603126041260512606126071260812609126101261112612126131261412615126161261712618126191262012621126221262312624126251262612627126281262912630126311263212633126341263512636126371263812639126401264112642126431264412645126461264712648126491265012651126521265312654126551265612657126581265912660126611266212663126641266512666126671266812669126701267112672126731267412675126761267712678126791268012681126821268312684126851268612687126881268912690126911269212693126941269512696126971269812699127001270112702127031270412705127061270712708127091271012711127121271312714127151271612717127181271912720127211272212723127241272512726127271272812729127301273112732127331273412735127361273712738127391274012741127421274312744127451274612747127481274912750127511275212753127541275512756127571275812759127601276112762127631276412765127661276712768127691277012771127721277312774127751277612777127781277912780127811278212783127841278512786127871278812789127901279112792127931279412795127961279712798127991280012801128021280312804128051280612807128081280912810128111281212813128141281512816128171281812819128201282112822128231282412825128261282712828128291283012831128321283312834128351283612837128381283912840128411284212843128441284512846128471284812849128501285112852128531285412855128561285712858128591286012861128621286312864128651286612867128681286912870128711287212873128741287512876128771287812879128801288112882128831288412885128861288712888128891289012891128921289312894128951289612897128981289912900129011290212903129041290512906129071290812909129101291112912129131291412915129161291712918129191292012921129221292312924129251292612927129281292912930129311293212933129341293512936129371293812939129401294112942129431294412945129461294712948129491295012951129521295312954129551295612957129581295912960129611296212963129641296512966129671296812969129701297112972129731297412975129761297712978129791298012981129821298312984129851298612987129881298912990129911299212993129941299512996129971299812999130001300113002130031300413005130061300713008130091301013011130121301313014130151301613017130181301913020130211302213023130241302513026130271302813029130301303113032130331303413035130361303713038130391304013041130421304313044130451304613047130481304913050130511305213053130541305513056130571305813059130601306113062130631306413065130661306713068130691307013071130721307313074130751307613077130781307913080130811308213083130841308513086130871308813089130901309113092130931309413095130961309713098130991310013101131021310313104131051310613107131081310913110131111311213113131141311513116131171311813119131201312113122131231312413125131261312713128131291313013131131321313313134131351313613137131381313913140131411314213143131441314513146131471314813149131501315113152131531315413155131561315713158131591316013161131621316313164131651316613167131681316913170131711317213173131741317513176131771317813179131801318113182131831318413185131861318713188131891319013191131921319313194131951319613197131981319913200132011320213203132041320513206132071320813209132101321113212132131321413215132161321713218132191322013221132221322313224132251322613227132281322913230132311323213233132341323513236132371323813239132401324113242132431324413245132461324713248132491325013251132521325313254132551325613257132581325913260132611326213263132641326513266132671326813269132701327113272132731327413275132761327713278132791328013281132821328313284132851328613287132881328913290132911329213293132941329513296132971329813299133001330113302133031330413305133061330713308133091331013311133121331313314133151331613317133181331913320133211332213323133241332513326133271332813329133301333113332133331333413335133361333713338133391334013341133421334313344133451334613347133481334913350133511335213353133541335513356133571335813359133601336113362133631336413365133661336713368133691337013371133721337313374133751337613377133781337913380133811338213383133841338513386133871338813389133901339113392133931339413395133961339713398133991340013401134021340313404134051340613407134081340913410134111341213413134141341513416134171341813419134201342113422134231342413425134261342713428134291343013431134321343313434134351343613437134381343913440134411344213443134441344513446134471344813449134501345113452134531345413455134561345713458134591346013461134621346313464134651346613467134681346913470134711347213473134741347513476134771347813479134801348113482134831348413485134861348713488134891349013491134921349313494134951349613497134981349913500135011350213503135041350513506135071350813509135101351113512135131351413515135161351713518135191352013521135221352313524135251352613527135281352913530135311353213533135341353513536135371353813539135401354113542135431354413545135461354713548135491355013551135521355313554135551355613557135581355913560135611356213563135641356513566135671356813569135701357113572135731357413575135761357713578135791358013581135821358313584135851358613587135881358913590135911359213593135941359513596135971359813599136001360113602136031360413605136061360713608136091361013611136121361313614136151361613617136181361913620136211362213623136241362513626136271362813629136301363113632136331363413635136361363713638136391364013641136421364313644136451364613647136481364913650136511365213653136541365513656136571365813659136601366113662136631366413665136661366713668136691367013671136721367313674136751367613677136781367913680136811368213683136841368513686136871368813689136901369113692136931369413695136961369713698136991370013701137021370313704137051370613707137081370913710137111371213713137141371513716137171371813719137201372113722137231372413725137261372713728137291373013731137321373313734137351373613737137381373913740137411374213743137441374513746137471374813749137501375113752137531375413755137561375713758137591376013761137621376313764137651376613767137681376913770137711377213773137741377513776137771377813779137801378113782137831378413785137861378713788137891379013791137921379313794137951379613797137981379913800138011380213803138041380513806138071380813809138101381113812138131381413815138161381713818138191382013821138221382313824138251382613827138281382913830138311383213833138341383513836138371383813839138401384113842138431384413845138461384713848138491385013851138521385313854138551385613857138581385913860138611386213863138641386513866138671386813869138701387113872138731387413875138761387713878138791388013881138821388313884138851388613887138881388913890138911389213893138941389513896138971389813899139001390113902139031390413905139061390713908139091391013911139121391313914139151391613917139181391913920139211392213923139241392513926139271392813929139301393113932139331393413935139361393713938139391394013941139421394313944139451394613947139481394913950139511395213953139541395513956139571395813959139601396113962139631396413965139661396713968139691397013971139721397313974139751397613977139781397913980139811398213983139841398513986139871398813989139901399113992139931399413995139961399713998139991400014001140021400314004140051400614007140081400914010140111401214013140141401514016140171401814019140201402114022140231402414025140261402714028140291403014031140321403314034140351403614037140381403914040140411404214043140441404514046140471404814049140501405114052140531405414055140561405714058140591406014061140621406314064140651406614067140681406914070140711407214073140741407514076140771407814079140801408114082140831408414085140861408714088140891409014091140921409314094140951409614097140981409914100141011410214103141041410514106141071410814109141101411114112141131411414115141161411714118141191412014121141221412314124141251412614127141281412914130141311413214133141341413514136141371413814139141401414114142141431414414145141461414714148141491415014151141521415314154141551415614157141581415914160141611416214163141641416514166141671416814169141701417114172141731417414175141761417714178141791418014181141821418314184141851418614187141881418914190141911419214193141941419514196141971419814199142001420114202142031420414205142061420714208142091421014211142121421314214142151421614217142181421914220142211422214223142241422514226142271422814229142301423114232142331423414235142361423714238142391424014241142421424314244142451424614247142481424914250142511425214253142541425514256142571425814259142601426114262142631426414265142661426714268142691427014271142721427314274142751427614277142781427914280142811428214283142841428514286142871428814289142901429114292142931429414295142961429714298142991430014301143021430314304143051430614307143081430914310143111431214313143141431514316143171431814319143201432114322143231432414325143261432714328143291433014331143321433314334143351433614337143381433914340143411434214343143441434514346143471434814349143501435114352143531435414355143561435714358143591436014361143621436314364143651436614367143681436914370143711437214373143741437514376143771437814379143801438114382143831438414385143861438714388143891439014391143921439314394143951439614397143981439914400144011440214403144041440514406144071440814409144101441114412144131441414415144161441714418144191442014421144221442314424144251442614427144281442914430144311443214433144341443514436144371443814439144401444114442144431444414445144461444714448144491445014451144521445314454144551445614457144581445914460144611446214463144641446514466144671446814469144701447114472144731447414475144761447714478144791448014481144821448314484144851448614487144881448914490144911449214493144941449514496144971449814499145001450114502145031450414505145061450714508145091451014511145121451314514145151451614517145181451914520145211452214523145241452514526145271452814529145301453114532145331453414535145361453714538145391454014541145421454314544145451454614547145481454914550145511455214553145541455514556145571455814559145601456114562145631456414565145661456714568145691457014571145721457314574145751457614577145781457914580145811458214583145841458514586145871458814589145901459114592145931459414595145961459714598145991460014601146021460314604146051460614607146081460914610146111461214613146141461514616146171461814619146201462114622146231462414625146261462714628146291463014631146321463314634146351463614637146381463914640146411464214643146441464514646146471464814649146501465114652146531465414655146561465714658146591466014661146621466314664146651466614667146681466914670146711467214673146741467514676146771467814679146801468114682146831468414685146861468714688146891469014691146921469314694146951469614697146981469914700147011470214703147041470514706147071470814709147101471114712147131471414715147161471714718147191472014721147221472314724147251472614727147281472914730147311473214733147341473514736147371473814739147401474114742147431474414745147461474714748147491475014751147521475314754147551475614757147581475914760147611476214763147641476514766147671476814769147701477114772147731477414775147761477714778147791478014781147821478314784147851478614787147881478914790147911479214793147941479514796147971479814799148001480114802148031480414805148061480714808148091481014811148121481314814148151481614817148181481914820148211482214823148241482514826148271482814829148301483114832148331483414835148361483714838148391484014841148421484314844148451484614847148481484914850148511485214853148541485514856148571485814859148601486114862148631486414865148661486714868148691487014871148721487314874148751487614877148781487914880148811488214883148841488514886148871488814889148901489114892148931489414895148961489714898148991490014901149021490314904149051490614907149081490914910149111491214913149141491514916149171491814919149201492114922149231492414925149261492714928149291493014931149321493314934149351493614937149381493914940149411494214943149441494514946149471494814949149501495114952149531495414955149561495714958149591496014961149621496314964149651496614967149681496914970149711497214973149741497514976149771497814979149801498114982149831498414985149861498714988149891499014991149921499314994149951499614997149981499915000150011500215003150041500515006150071500815009150101501115012150131501415015150161501715018150191502015021150221502315024150251502615027150281502915030150311503215033150341503515036150371503815039150401504115042150431504415045150461504715048150491505015051150521505315054150551505615057150581505915060150611506215063150641506515066150671506815069150701507115072150731507415075150761507715078150791508015081150821508315084150851508615087150881508915090150911509215093150941509515096150971509815099151001510115102151031510415105151061510715108151091511015111151121511315114151151511615117151181511915120151211512215123151241512515126151271512815129151301513115132151331513415135151361513715138151391514015141151421514315144151451514615147151481514915150151511515215153151541515515156151571515815159151601516115162151631516415165151661516715168151691517015171151721517315174151751517615177151781517915180151811518215183151841518515186151871518815189151901519115192151931519415195151961519715198151991520015201152021520315204152051520615207152081520915210152111521215213152141521515216152171521815219152201522115222152231522415225152261522715228152291523015231152321523315234152351523615237152381523915240152411524215243152441524515246152471524815249152501525115252152531525415255152561525715258152591526015261152621526315264152651526615267152681526915270152711527215273152741527515276152771527815279152801528115282152831528415285152861528715288152891529015291152921529315294152951529615297152981529915300153011530215303153041530515306153071530815309153101531115312153131531415315153161531715318153191532015321153221532315324153251532615327153281532915330153311533215333153341533515336153371533815339153401534115342153431534415345153461534715348153491535015351153521535315354153551535615357153581535915360153611536215363153641536515366153671536815369153701537115372153731537415375153761537715378153791538015381153821538315384153851538615387153881538915390153911539215393153941539515396153971539815399154001540115402154031540415405154061540715408154091541015411154121541315414154151541615417154181541915420154211542215423154241542515426154271542815429154301543115432154331543415435154361543715438154391544015441154421544315444154451544615447154481544915450154511545215453154541545515456154571545815459154601546115462154631546415465154661546715468154691547015471154721547315474154751547615477154781547915480154811548215483154841548515486154871548815489154901549115492154931549415495154961549715498154991550015501155021550315504155051550615507155081550915510155111551215513155141551515516155171551815519155201552115522155231552415525155261552715528155291553015531155321553315534155351553615537155381553915540155411554215543155441554515546155471554815549155501555115552155531555415555155561555715558155591556015561155621556315564155651556615567155681556915570155711557215573155741557515576155771557815579155801558115582155831558415585155861558715588155891559015591155921559315594155951559615597155981559915600156011560215603156041560515606156071560815609156101561115612156131561415615156161561715618156191562015621156221562315624156251562615627156281562915630156311563215633156341563515636156371563815639156401564115642156431564415645156461564715648156491565015651156521565315654156551565615657156581565915660156611566215663156641566515666156671566815669156701567115672156731567415675156761567715678156791568015681156821568315684156851568615687156881568915690156911569215693156941569515696156971569815699157001570115702157031570415705157061570715708157091571015711157121571315714157151571615717157181571915720157211572215723157241572515726157271572815729157301573115732157331573415735157361573715738157391574015741157421574315744157451574615747157481574915750157511575215753157541575515756157571575815759157601576115762157631576415765157661576715768157691577015771157721577315774157751577615777157781577915780157811578215783157841578515786157871578815789157901579115792157931579415795157961579715798157991580015801158021580315804158051580615807158081580915810158111581215813158141581515816158171581815819158201582115822158231582415825158261582715828158291583015831158321583315834158351583615837158381583915840158411584215843158441584515846158471584815849158501585115852158531585415855158561585715858158591586015861158621586315864158651586615867158681586915870158711587215873158741587515876158771587815879158801588115882158831588415885158861588715888158891589015891158921589315894158951589615897158981589915900159011590215903159041590515906159071590815909159101591115912159131591415915159161591715918159191592015921159221592315924159251592615927159281592915930159311593215933159341593515936159371593815939159401594115942159431594415945159461594715948159491595015951159521595315954159551595615957159581595915960159611596215963159641596515966159671596815969159701597115972159731597415975159761597715978159791598015981159821598315984159851598615987159881598915990159911599215993159941599515996159971599815999160001600116002160031600416005160061600716008160091601016011160121601316014160151601616017160181601916020160211602216023160241602516026160271602816029160301603116032160331603416035160361603716038160391604016041160421604316044160451604616047160481604916050160511605216053160541605516056160571605816059160601606116062160631606416065160661606716068160691607016071160721607316074160751607616077160781607916080160811608216083160841608516086160871608816089160901609116092160931609416095160961609716098160991610016101161021610316104161051610616107161081610916110161111611216113161141611516116161171611816119161201612116122161231612416125161261612716128161291613016131161321613316134161351613616137161381613916140161411614216143161441614516146161471614816149161501615116152161531615416155161561615716158161591616016161161621616316164161651616616167161681616916170161711617216173161741617516176161771617816179161801618116182161831618416185161861618716188161891619016191161921619316194161951619616197161981619916200162011620216203162041620516206162071620816209162101621116212162131621416215162161621716218162191622016221162221622316224162251622616227162281622916230162311623216233162341623516236162371623816239162401624116242162431624416245162461624716248162491625016251162521625316254162551625616257162581625916260162611626216263162641626516266162671626816269162701627116272162731627416275162761627716278162791628016281162821628316284162851628616287162881628916290162911629216293162941629516296162971629816299163001630116302163031630416305163061630716308163091631016311163121631316314163151631616317163181631916320163211632216323163241632516326163271632816329163301633116332163331633416335163361633716338163391634016341163421634316344163451634616347163481634916350163511635216353163541635516356163571635816359163601636116362163631636416365163661636716368163691637016371163721637316374163751637616377163781637916380163811638216383163841638516386163871638816389163901639116392163931639416395163961639716398163991640016401164021640316404164051640616407164081640916410164111641216413164141641516416164171641816419164201642116422164231642416425164261642716428164291643016431164321643316434164351643616437164381643916440164411644216443164441644516446164471644816449164501645116452164531645416455164561645716458164591646016461164621646316464164651646616467164681646916470164711647216473164741647516476164771647816479164801648116482164831648416485164861648716488164891649016491164921649316494164951649616497164981649916500165011650216503165041650516506165071650816509165101651116512165131651416515165161651716518165191652016521165221652316524165251652616527165281652916530165311653216533165341653516536165371653816539165401654116542165431654416545165461654716548165491655016551165521655316554165551655616557165581655916560165611656216563165641656516566165671656816569165701657116572165731657416575165761657716578165791658016581165821658316584165851658616587165881658916590165911659216593165941659516596165971659816599166001660116602166031660416605166061660716608166091661016611166121661316614166151661616617166181661916620166211662216623166241662516626166271662816629166301663116632166331663416635166361663716638166391664016641166421664316644166451664616647166481664916650166511665216653166541665516656166571665816659166601666116662166631666416665166661666716668166691667016671166721667316674166751667616677166781667916680166811668216683166841668516686166871668816689166901669116692166931669416695166961669716698166991670016701167021670316704167051670616707167081670916710167111671216713167141671516716167171671816719167201672116722167231672416725167261672716728167291673016731167321673316734167351673616737167381673916740167411674216743167441674516746167471674816749167501675116752167531675416755167561675716758167591676016761167621676316764167651676616767167681676916770167711677216773167741677516776167771677816779167801678116782167831678416785167861678716788167891679016791167921679316794167951679616797167981679916800168011680216803168041680516806168071680816809168101681116812168131681416815168161681716818168191682016821168221682316824168251682616827168281682916830168311683216833168341683516836168371683816839168401684116842168431684416845168461684716848168491685016851168521685316854168551685616857168581685916860168611686216863168641686516866168671686816869168701687116872168731687416875168761687716878168791688016881168821688316884168851688616887168881688916890168911689216893168941689516896168971689816899169001690116902169031690416905169061690716908169091691016911169121691316914169151691616917169181691916920169211692216923169241692516926169271692816929169301693116932169331693416935169361693716938169391694016941169421694316944169451694616947169481694916950169511695216953169541695516956169571695816959169601696116962169631696416965169661696716968169691697016971169721697316974169751697616977169781697916980169811698216983169841698516986169871698816989169901699116992169931699416995169961699716998169991700017001170021700317004170051700617007170081700917010170111701217013170141701517016170171701817019170201702117022170231702417025170261702717028170291703017031170321703317034170351703617037170381703917040170411704217043170441704517046170471704817049170501705117052170531705417055170561705717058170591706017061170621706317064170651706617067170681706917070170711707217073170741707517076170771707817079170801708117082170831708417085170861708717088170891709017091170921709317094170951709617097170981709917100171011710217103171041710517106171071710817109171101711117112171131711417115171161711717118171191712017121171221712317124171251712617127171281712917130171311713217133171341713517136171371713817139171401714117142171431714417145171461714717148171491715017151171521715317154171551715617157171581715917160171611716217163171641716517166171671716817169171701717117172171731717417175171761717717178171791718017181171821718317184171851718617187171881718917190171911719217193171941719517196171971719817199172001720117202172031720417205172061720717208172091721017211172121721317214172151721617217172181721917220172211722217223172241722517226172271722817229172301723117232172331723417235172361723717238172391724017241172421724317244172451724617247172481724917250172511725217253172541725517256172571725817259172601726117262172631726417265172661726717268172691727017271172721727317274172751727617277172781727917280172811728217283172841728517286172871728817289172901729117292172931729417295172961729717298172991730017301173021730317304173051730617307173081730917310173111731217313173141731517316173171731817319173201732117322173231732417325173261732717328173291733017331173321733317334173351733617337173381733917340173411734217343173441734517346173471734817349173501735117352173531735417355173561735717358173591736017361173621736317364173651736617367173681736917370173711737217373173741737517376173771737817379173801738117382173831738417385173861738717388173891739017391173921739317394173951739617397173981739917400174011740217403174041740517406174071740817409174101741117412174131741417415174161741717418174191742017421174221742317424174251742617427174281742917430174311743217433174341743517436174371743817439174401744117442174431744417445174461744717448174491745017451174521745317454174551745617457174581745917460174611746217463174641746517466174671746817469174701747117472174731747417475174761747717478174791748017481174821748317484174851748617487174881748917490174911749217493174941749517496174971749817499175001750117502175031750417505175061750717508175091751017511175121751317514175151751617517175181751917520175211752217523175241752517526175271752817529175301753117532175331753417535175361753717538175391754017541175421754317544175451754617547175481754917550175511755217553175541755517556175571755817559175601756117562175631756417565175661756717568175691757017571175721757317574175751757617577175781757917580175811758217583175841758517586175871758817589175901759117592175931759417595175961759717598175991760017601176021760317604176051760617607176081760917610176111761217613176141761517616176171761817619176201762117622176231762417625176261762717628176291763017631176321763317634176351763617637176381763917640176411764217643176441764517646176471764817649176501765117652176531765417655176561765717658176591766017661176621766317664176651766617667176681766917670176711767217673176741767517676176771767817679176801768117682176831768417685176861768717688176891769017691176921769317694176951769617697176981769917700177011770217703177041770517706177071770817709177101771117712177131771417715177161771717718177191772017721177221772317724177251772617727177281772917730177311773217733177341773517736177371773817739177401774117742177431774417745177461774717748177491775017751177521775317754177551775617757177581775917760177611776217763177641776517766177671776817769177701777117772177731777417775177761777717778177791778017781177821778317784177851778617787177881778917790177911779217793177941779517796177971779817799178001780117802178031780417805178061780717808178091781017811178121781317814178151781617817178181781917820178211782217823178241782517826178271782817829178301783117832178331783417835178361783717838178391784017841178421784317844178451784617847178481784917850178511785217853178541785517856178571785817859178601786117862178631786417865178661786717868178691787017871178721787317874178751787617877178781787917880178811788217883178841788517886178871788817889178901789117892178931789417895178961789717898178991790017901179021790317904179051790617907179081790917910179111791217913179141791517916179171791817919179201792117922179231792417925179261792717928179291793017931179321793317934179351793617937179381793917940179411794217943179441794517946179471794817949179501795117952179531795417955179561795717958179591796017961179621796317964179651796617967179681796917970179711797217973179741797517976179771797817979179801798117982179831798417985179861798717988179891799017991179921799317994179951799617997179981799918000180011800218003180041800518006180071800818009180101801118012180131801418015180161801718018180191802018021180221802318024180251802618027180281802918030180311803218033180341803518036180371803818039180401804118042180431804418045180461804718048180491805018051180521805318054180551805618057180581805918060180611806218063180641806518066180671806818069180701807118072180731807418075180761807718078180791808018081180821808318084180851808618087180881808918090180911809218093180941809518096180971809818099181001810118102181031810418105181061810718108181091811018111181121811318114181151811618117181181811918120181211812218123181241812518126181271812818129181301813118132181331813418135181361813718138181391814018141181421814318144181451814618147181481814918150181511815218153181541815518156181571815818159181601816118162181631816418165181661816718168181691817018171181721817318174181751817618177181781817918180181811818218183181841818518186181871818818189181901819118192181931819418195181961819718198181991820018201182021820318204182051820618207182081820918210182111821218213182141821518216182171821818219182201822118222182231822418225182261822718228182291823018231182321823318234182351823618237182381823918240182411824218243182441824518246182471824818249182501825118252182531825418255182561825718258182591826018261182621826318264182651826618267182681826918270182711827218273182741827518276182771827818279182801828118282182831828418285182861828718288182891829018291182921829318294182951829618297182981829918300183011830218303183041830518306183071830818309183101831118312183131831418315183161831718318183191832018321183221832318324183251832618327183281832918330183311833218333183341833518336183371833818339183401834118342183431834418345183461834718348183491835018351183521835318354183551835618357183581835918360183611836218363183641836518366183671836818369183701837118372183731837418375183761837718378183791838018381183821838318384183851838618387183881838918390183911839218393183941839518396183971839818399184001840118402184031840418405184061840718408184091841018411184121841318414184151841618417184181841918420184211842218423184241842518426184271842818429184301843118432184331843418435184361843718438184391844018441184421844318444184451844618447184481844918450184511845218453184541845518456184571845818459184601846118462184631846418465184661846718468184691847018471184721847318474184751847618477184781847918480184811848218483184841848518486184871848818489184901849118492184931849418495184961849718498184991850018501185021850318504185051850618507185081850918510185111851218513185141851518516185171851818519185201852118522185231852418525185261852718528185291853018531185321853318534185351853618537185381853918540185411854218543185441854518546185471854818549185501855118552185531855418555185561855718558185591856018561185621856318564185651856618567185681856918570185711857218573185741857518576185771857818579185801858118582185831858418585185861858718588185891859018591185921859318594185951859618597185981859918600186011860218603186041860518606186071860818609186101861118612186131861418615186161861718618186191862018621186221862318624186251862618627186281862918630186311863218633186341863518636186371863818639186401864118642186431864418645186461864718648186491865018651186521865318654186551865618657186581865918660186611866218663186641866518666186671866818669186701867118672186731867418675186761867718678186791868018681186821868318684186851868618687186881868918690186911869218693186941869518696186971869818699187001870118702187031870418705187061870718708187091871018711187121871318714187151871618717187181871918720187211872218723187241872518726187271872818729187301873118732187331873418735187361873718738187391874018741187421874318744187451874618747187481874918750187511875218753187541875518756187571875818759187601876118762187631876418765187661876718768187691877018771187721877318774187751877618777187781877918780187811878218783187841878518786187871878818789187901879118792187931879418795187961879718798187991880018801188021880318804188051880618807188081880918810188111881218813188141881518816188171881818819188201882118822188231882418825188261882718828188291883018831188321883318834188351883618837188381883918840188411884218843188441884518846188471884818849188501885118852188531885418855188561885718858188591886018861188621886318864188651886618867188681886918870188711887218873188741887518876188771887818879188801888118882188831888418885188861888718888188891889018891188921889318894188951889618897188981889918900189011890218903189041890518906189071890818909189101891118912189131891418915189161891718918189191892018921189221892318924189251892618927189281892918930189311893218933189341893518936189371893818939189401894118942189431894418945189461894718948189491895018951189521895318954189551895618957189581895918960189611896218963189641896518966189671896818969189701897118972189731897418975189761897718978189791898018981189821898318984189851898618987189881898918990189911899218993189941899518996189971899818999190001900119002190031900419005190061900719008190091901019011190121901319014190151901619017190181901919020190211902219023190241902519026190271902819029190301903119032190331903419035190361903719038190391904019041190421904319044190451904619047190481904919050190511905219053190541905519056190571905819059190601906119062190631906419065190661906719068190691907019071190721907319074190751907619077190781907919080190811908219083190841908519086190871908819089190901909119092190931909419095190961909719098190991910019101191021910319104191051910619107191081910919110191111911219113191141911519116191171911819119191201912119122191231912419125191261912719128191291913019131191321913319134191351913619137191381913919140191411914219143191441914519146191471914819149191501915119152191531915419155191561915719158191591916019161191621916319164191651916619167191681916919170191711917219173191741917519176191771917819179191801918119182191831918419185191861918719188191891919019191191921919319194191951919619197191981919919200192011920219203192041920519206192071920819209192101921119212192131921419215192161921719218192191922019221192221922319224192251922619227192281922919230192311923219233192341923519236192371923819239192401924119242192431924419245192461924719248192491925019251192521925319254192551925619257192581925919260192611926219263192641926519266192671926819269192701927119272192731927419275192761927719278192791928019281192821928319284192851928619287192881928919290192911929219293192941929519296192971929819299193001930119302193031930419305193061930719308193091931019311193121931319314193151931619317193181931919320193211932219323193241932519326193271932819329193301933119332193331933419335193361933719338193391934019341193421934319344193451934619347193481934919350193511935219353193541935519356193571935819359193601936119362193631936419365193661936719368193691937019371193721937319374193751937619377193781937919380193811938219383193841938519386193871938819389193901939119392193931939419395193961939719398193991940019401194021940319404194051940619407194081940919410194111941219413194141941519416194171941819419194201942119422194231942419425194261942719428194291943019431194321943319434194351943619437194381943919440194411944219443194441944519446194471944819449194501945119452194531945419455194561945719458194591946019461194621946319464194651946619467194681946919470194711947219473194741947519476194771947819479194801948119482194831948419485194861948719488194891949019491194921949319494194951949619497194981949919500195011950219503195041950519506195071950819509195101951119512195131951419515195161951719518195191952019521195221952319524195251952619527195281952919530195311953219533195341953519536195371953819539195401954119542195431954419545195461954719548195491955019551195521955319554195551955619557195581955919560195611956219563195641956519566195671956819569195701957119572195731957419575195761957719578195791958019581195821958319584195851958619587195881958919590195911959219593195941959519596195971959819599196001960119602196031960419605196061960719608196091961019611196121961319614196151961619617196181961919620196211962219623196241962519626196271962819629196301963119632196331963419635196361963719638196391964019641196421964319644196451964619647196481964919650196511965219653196541965519656196571965819659196601966119662196631966419665196661966719668196691967019671196721967319674196751967619677196781967919680196811968219683196841968519686196871968819689196901969119692196931969419695196961969719698196991970019701197021970319704197051970619707197081970919710197111971219713197141971519716197171971819719197201972119722197231972419725197261972719728197291973019731197321973319734197351973619737197381973919740197411974219743197441974519746197471974819749197501975119752197531975419755197561975719758197591976019761197621976319764197651976619767197681976919770197711977219773197741977519776197771977819779197801978119782197831978419785197861978719788197891979019791197921979319794197951979619797197981979919800198011980219803198041980519806198071980819809198101981119812198131981419815198161981719818198191982019821198221982319824198251982619827198281982919830198311983219833198341983519836198371983819839198401984119842198431984419845198461984719848198491985019851198521985319854198551985619857198581985919860198611986219863198641986519866198671986819869198701987119872198731987419875198761987719878198791988019881198821988319884198851988619887198881988919890198911989219893198941989519896198971989819899199001990119902199031990419905199061990719908199091991019911199121991319914199151991619917199181991919920199211992219923199241992519926199271992819929199301993119932199331993419935199361993719938199391994019941199421994319944199451994619947199481994919950199511995219953199541995519956199571995819959199601996119962199631996419965199661996719968199691997019971199721997319974199751997619977199781997919980199811998219983199841998519986199871998819989199901999119992199931999419995199961999719998199992000020001200022000320004200052000620007200082000920010200112001220013200142001520016200172001820019200202002120022200232002420025200262002720028200292003020031200322003320034200352003620037200382003920040200412004220043200442004520046200472004820049200502005120052200532005420055200562005720058200592006020061200622006320064200652006620067200682006920070200712007220073200742007520076200772007820079200802008120082200832008420085200862008720088200892009020091200922009320094200952009620097200982009920100201012010220103201042010520106201072010820109201102011120112201132011420115201162011720118201192012020121201222012320124201252012620127201282012920130201312013220133201342013520136201372013820139201402014120142201432014420145201462014720148201492015020151201522015320154201552015620157201582015920160201612016220163201642016520166201672016820169201702017120172201732017420175201762017720178201792018020181201822018320184201852018620187201882018920190201912019220193201942019520196201972019820199202002020120202202032020420205202062020720208202092021020211202122021320214202152021620217202182021920220202212022220223202242022520226202272022820229202302023120232202332023420235202362023720238202392024020241202422024320244202452024620247202482024920250202512025220253202542025520256202572025820259202602026120262202632026420265202662026720268202692027020271202722027320274202752027620277202782027920280202812028220283202842028520286202872028820289202902029120292202932029420295202962029720298202992030020301203022030320304203052030620307203082030920310203112031220313203142031520316203172031820319203202032120322203232032420325203262032720328203292033020331203322033320334203352033620337203382033920340203412034220343203442034520346203472034820349203502035120352203532035420355203562035720358203592036020361203622036320364203652036620367203682036920370203712037220373203742037520376203772037820379203802038120382203832038420385203862038720388203892039020391203922039320394203952039620397203982039920400204012040220403204042040520406204072040820409204102041120412204132041420415204162041720418204192042020421204222042320424204252042620427204282042920430204312043220433204342043520436204372043820439204402044120442204432044420445204462044720448204492045020451204522045320454204552045620457204582045920460204612046220463204642046520466204672046820469204702047120472204732047420475204762047720478204792048020481204822048320484204852048620487204882048920490204912049220493204942049520496204972049820499205002050120502205032050420505205062050720508205092051020511205122051320514205152051620517205182051920520205212052220523205242052520526205272052820529205302053120532205332053420535205362053720538205392054020541205422054320544205452054620547205482054920550205512055220553205542055520556205572055820559205602056120562205632056420565205662056720568205692057020571205722057320574205752057620577205782057920580205812058220583205842058520586205872058820589205902059120592205932059420595205962059720598205992060020601206022060320604206052060620607206082060920610206112061220613206142061520616206172061820619206202062120622206232062420625206262062720628206292063020631206322063320634206352063620637206382063920640206412064220643206442064520646206472064820649206502065120652206532065420655206562065720658206592066020661206622066320664206652066620667206682066920670206712067220673206742067520676206772067820679206802068120682206832068420685206862068720688206892069020691206922069320694206952069620697206982069920700207012070220703207042070520706207072070820709207102071120712207132071420715207162071720718207192072020721207222072320724207252072620727207282072920730207312073220733207342073520736207372073820739207402074120742207432074420745207462074720748207492075020751207522075320754207552075620757207582075920760207612076220763207642076520766207672076820769207702077120772207732077420775207762077720778207792078020781207822078320784207852078620787207882078920790207912079220793207942079520796207972079820799208002080120802208032080420805208062080720808208092081020811208122081320814208152081620817208182081920820208212082220823208242082520826208272082820829208302083120832208332083420835208362083720838208392084020841208422084320844208452084620847208482084920850208512085220853208542085520856208572085820859208602086120862
  1. #define _CRT_SECURE_NO_DEPRECATE // Disables ridiculous "unsafe" warnings on Windows
  2. #define _USE_MATH_DEFINES // For M_PI on MSVC
  3. #include "ggml-impl.h"
  4. #include "ggml-quants.h"
  5. #if defined(_MSC_VER) || defined(__MINGW32__)
  6. #include <malloc.h> // using malloc.h with MSC/MINGW
  7. #elif !defined(__FreeBSD__) && !defined(__NetBSD__) && !defined(__OpenBSD__)
  8. #include <alloca.h>
  9. #endif
  10. #include <assert.h>
  11. #include <errno.h>
  12. #include <time.h>
  13. #include <math.h>
  14. #include <stdlib.h>
  15. #include <string.h>
  16. #include <stdint.h>
  17. #include <inttypes.h>
  18. #include <stdio.h>
  19. #include <float.h>
  20. #include <limits.h>
  21. #include <stdarg.h>
  22. #include <signal.h>
  23. #if defined(__gnu_linux__)
  24. #include <syscall.h>
  25. #endif
  26. #ifdef GGML_USE_METAL
  27. #include <unistd.h>
  28. #endif
  29. #if defined(_MSC_VER)
  30. // disable "possible loss of data" to avoid hundreds of casts
  31. // we should just be careful :)
  32. #pragma warning(disable: 4244 4267)
  33. // disable POSIX deprecation warnings
  34. // these functions are never going away, anyway
  35. #pragma warning(disable: 4996)
  36. #endif
  37. #if defined(_WIN32)
  38. #include <windows.h>
  39. typedef volatile LONG atomic_int;
  40. typedef atomic_int atomic_bool;
  41. static void atomic_store(atomic_int * ptr, LONG val) {
  42. InterlockedExchange(ptr, val);
  43. }
  44. static LONG atomic_load(atomic_int * ptr) {
  45. return InterlockedCompareExchange(ptr, 0, 0);
  46. }
  47. static LONG atomic_fetch_add(atomic_int * ptr, LONG inc) {
  48. return InterlockedExchangeAdd(ptr, inc);
  49. }
  50. static LONG atomic_fetch_sub(atomic_int * ptr, LONG dec) {
  51. return atomic_fetch_add(ptr, -(dec));
  52. }
  53. typedef HANDLE pthread_t;
  54. typedef DWORD thread_ret_t;
  55. static int pthread_create(pthread_t * out, void * unused, thread_ret_t(*func)(void *), void * arg) {
  56. (void) unused;
  57. HANDLE handle = CreateThread(NULL, 0, (LPTHREAD_START_ROUTINE) func, arg, 0, NULL);
  58. if (handle == NULL)
  59. {
  60. return EAGAIN;
  61. }
  62. *out = handle;
  63. return 0;
  64. }
  65. static int pthread_join(pthread_t thread, void * unused) {
  66. (void) unused;
  67. int ret = (int) WaitForSingleObject(thread, INFINITE);
  68. CloseHandle(thread);
  69. return ret;
  70. }
  71. static int sched_yield (void) {
  72. Sleep (0);
  73. return 0;
  74. }
  75. #else
  76. #include <pthread.h>
  77. #include <stdatomic.h>
  78. typedef void * thread_ret_t;
  79. #include <sys/types.h>
  80. #include <sys/stat.h>
  81. #include <unistd.h>
  82. #endif
  83. #ifdef GGML_USE_CPU_HBM
  84. #include <hbwmalloc.h>
  85. #endif
  86. #if defined(__APPLE__)
  87. #include <TargetConditionals.h>
  88. #endif
  89. #if (defined(__linux__) || defined(__APPLE__) || defined(__FreeBSD__) || defined(__NetBSD__) || defined(__OpenBSD__)) && \
  90. (!defined(TARGET_OS_TV) && !defined(TARGET_OS_WATCH))
  91. #include <sys/wait.h>
  92. void ggml_print_backtrace(void) {
  93. /*
  94. #include <execinfo.h>
  95. #include <dlfcn.h>
  96. void * trace[100];
  97. int nptrs = backtrace(trace, sizeof(trace)/sizeof(trace[0]));
  98. backtrace_symbols_fd(trace, nptrs, STDERR_FILENO);
  99. */
  100. // backtrack_symbols does not show line numbers, use gdb instead
  101. char attach[32];
  102. snprintf(attach, sizeof(attach), "attach %d", getpid());
  103. int pid = fork();
  104. if (pid == 0) {
  105. execlp("gdb", "gdb", "--batch",
  106. "-ex", "set style enabled on",
  107. "-ex", attach,
  108. "-ex", "bt -frame-info source-and-location",
  109. "-ex", "detach",
  110. "-ex", "quit",
  111. (char *) NULL);
  112. } else {
  113. waitpid(pid, NULL, 0);
  114. }
  115. }
  116. #else
  117. void ggml_print_backtrace(void) {
  118. // platform not supported
  119. }
  120. #endif
  121. /*#define GGML_PERF*/
  122. #define GGML_DEBUG 0
  123. #define GGML_GELU_FP16
  124. #define GGML_GELU_QUICK_FP16
  125. #define GGML_SILU_FP16
  126. // #define GGML_CROSS_ENTROPY_EXP_FP16
  127. // #define GGML_FLASH_ATTN_EXP_FP16
  128. #define GGML_SOFT_MAX_UNROLL 4
  129. #define GGML_VEC_DOT_UNROLL 2
  130. #define GGML_VEC_MAD_UNROLL 32
  131. //
  132. // logging
  133. //
  134. #if (GGML_DEBUG >= 1)
  135. #define GGML_PRINT_DEBUG(...) printf(__VA_ARGS__)
  136. #else
  137. #define GGML_PRINT_DEBUG(...)
  138. #endif
  139. #if (GGML_DEBUG >= 5)
  140. #define GGML_PRINT_DEBUG_5(...) printf(__VA_ARGS__)
  141. #else
  142. #define GGML_PRINT_DEBUG_5(...)
  143. #endif
  144. #if (GGML_DEBUG >= 10)
  145. #define GGML_PRINT_DEBUG_10(...) printf(__VA_ARGS__)
  146. #else
  147. #define GGML_PRINT_DEBUG_10(...)
  148. #endif
  149. #define GGML_PRINT(...) printf(__VA_ARGS__)
  150. //
  151. // end of logging block
  152. //
  153. #ifdef GGML_USE_ACCELERATE
  154. // uncomment to use vDSP for soft max computation
  155. // note: not sure if it is actually faster
  156. //#define GGML_SOFT_MAX_ACCELERATE
  157. #endif
  158. #if defined(_MSC_VER) || defined(__MINGW32__)
  159. #define GGML_ALIGNED_MALLOC(size) _aligned_malloc(size, GGML_MEM_ALIGN)
  160. #define GGML_ALIGNED_FREE(ptr) _aligned_free(ptr)
  161. #else
  162. inline static void * ggml_aligned_malloc(size_t size) {
  163. if (size == 0) {
  164. GGML_PRINT("WARNING: Behavior may be unexpected when allocating 0 bytes for ggml_aligned_malloc!\n");
  165. return NULL;
  166. }
  167. void * aligned_memory = NULL;
  168. #ifdef GGML_USE_CPU_HBM
  169. int result = hbw_posix_memalign(&aligned_memory, 16, size);
  170. #elif GGML_USE_METAL
  171. int result = posix_memalign(&aligned_memory, sysconf(_SC_PAGESIZE), size);
  172. #else
  173. int result = posix_memalign(&aligned_memory, GGML_MEM_ALIGN, size);
  174. #endif
  175. if (result != 0) {
  176. // Handle allocation failure
  177. const char *error_desc = "unknown allocation error";
  178. switch (result) {
  179. case EINVAL:
  180. error_desc = "invalid alignment value";
  181. break;
  182. case ENOMEM:
  183. error_desc = "insufficient memory";
  184. break;
  185. }
  186. GGML_PRINT("%s: %s (attempted to allocate %6.2f MB)\n", __func__, error_desc, size/(1024.0*1024.0));
  187. GGML_ASSERT(false);
  188. return NULL;
  189. }
  190. return aligned_memory;
  191. }
  192. #define GGML_ALIGNED_MALLOC(size) ggml_aligned_malloc(size)
  193. #ifdef GGML_USE_CPU_HBM
  194. #define GGML_ALIGNED_FREE(ptr) if(NULL != ptr) hbw_free(ptr)
  195. #else
  196. #define GGML_ALIGNED_FREE(ptr) free(ptr)
  197. #endif
  198. #endif
  199. inline static void * ggml_malloc(size_t size) {
  200. if (size == 0) {
  201. GGML_PRINT("WARNING: Behavior may be unexpected when allocating 0 bytes for ggml_malloc!\n");
  202. return NULL;
  203. }
  204. void * result = malloc(size);
  205. if (result == NULL) {
  206. GGML_PRINT("%s: failed to allocate %6.2f MB\n", __func__, size/(1024.0*1024.0));
  207. GGML_ASSERT(false);
  208. }
  209. return result;
  210. }
  211. // calloc
  212. inline static void * ggml_calloc(size_t num, size_t size) {
  213. if (num == 0 || size == 0) {
  214. GGML_PRINT("WARNING: Behavior may be unexpected when allocating 0 bytes for ggml_calloc!\n");
  215. return NULL;
  216. }
  217. void * result = calloc(num, size);
  218. if (result == NULL) {
  219. GGML_PRINT("%s: failed to allocate %6.2f MB\n", __func__, size/(1024.0*1024.0));
  220. GGML_ASSERT(false);
  221. }
  222. return result;
  223. }
  224. #define GGML_MALLOC(size) ggml_malloc(size)
  225. #define GGML_CALLOC(num, size) ggml_calloc(num, size)
  226. #define GGML_FREE(ptr) free(ptr)
  227. #define UNUSED GGML_UNUSED
  228. #define SWAP(x, y, T) do { T SWAP = x; x = y; y = SWAP; } while (0)
  229. #if defined(GGML_USE_ACCELERATE)
  230. #include <Accelerate/Accelerate.h>
  231. #if defined(GGML_USE_CLBLAST) // allow usage of CLBlast alongside Accelerate functions
  232. #include "ggml-opencl.h"
  233. #elif defined(GGML_USE_VULKAN)
  234. #include "ggml-vulkan.h"
  235. #endif
  236. #elif defined(GGML_USE_OPENBLAS)
  237. #if defined(GGML_BLAS_USE_MKL)
  238. #include <mkl.h>
  239. #else
  240. #include <cblas.h>
  241. #endif
  242. #elif defined(GGML_USE_CUBLAS)
  243. #include "ggml-cuda.h"
  244. #elif defined(GGML_USE_CLBLAST)
  245. #include "ggml-opencl.h"
  246. #elif defined(GGML_USE_VULKAN)
  247. #include "ggml-vulkan.h"
  248. #elif defined(GGML_USE_SYCL)
  249. #include "ggml-sycl.h"
  250. #endif
  251. // floating point type used to accumulate sums
  252. typedef double ggml_float;
  253. #undef MIN
  254. #undef MAX
  255. #define MIN(a, b) ((a) < (b) ? (a) : (b))
  256. #define MAX(a, b) ((a) > (b) ? (a) : (b))
  257. //
  258. // global data
  259. //
  260. // precomputed gelu table for f16 (128 KB)
  261. static ggml_fp16_t ggml_table_gelu_f16[1 << 16];
  262. // precomputed quick gelu table for f16 (128 KB)
  263. static ggml_fp16_t ggml_table_gelu_quick_f16[1 << 16];
  264. // precomputed silu table for f16 (128 KB)
  265. static ggml_fp16_t ggml_table_silu_f16[1 << 16];
  266. // precomputed exp table for f16 (128 KB)
  267. static ggml_fp16_t ggml_table_exp_f16[1 << 16];
  268. // precomputed f32 table for f16 (256 KB) (ggml-impl.h)
  269. float ggml_table_f32_f16[1 << 16];
  270. // note: do not use these inside ggml.c
  271. // these are meant to be used via the ggml.h API
  272. float ggml_fp16_to_fp32(ggml_fp16_t x) {
  273. return (float) GGML_FP16_TO_FP32(x);
  274. }
  275. ggml_fp16_t ggml_fp32_to_fp16(float x) {
  276. return GGML_FP32_TO_FP16(x);
  277. }
  278. void ggml_fp16_to_fp32_row(const ggml_fp16_t * x, float * y, int n) {
  279. for (int i = 0; i < n; i++) {
  280. y[i] = GGML_FP16_TO_FP32(x[i]);
  281. }
  282. }
  283. void ggml_fp32_to_fp16_row(const float * x, ggml_fp16_t * y, int n) {
  284. int i = 0;
  285. #if defined(__F16C__)
  286. for (; i + 7 < n; i += 8) {
  287. __m256 x_vec = _mm256_loadu_ps(x + i);
  288. __m128i y_vec = _mm256_cvtps_ph(x_vec, _MM_FROUND_TO_NEAREST_INT);
  289. _mm_storeu_si128((__m128i *)(y + i), y_vec);
  290. }
  291. for(; i + 3 < n; i += 4) {
  292. __m128 x_vec = _mm_loadu_ps(x + i);
  293. __m128i y_vec = _mm_cvtps_ph(x_vec, _MM_FROUND_TO_NEAREST_INT);
  294. _mm_storel_epi64((__m128i *)(y + i), y_vec);
  295. }
  296. #endif
  297. for (; i < n; i++) {
  298. y[i] = GGML_FP32_TO_FP16(x[i]);
  299. }
  300. }
  301. //
  302. // timing
  303. //
  304. #if defined(_MSC_VER) || defined(__MINGW32__)
  305. static int64_t timer_freq, timer_start;
  306. void ggml_time_init(void) {
  307. LARGE_INTEGER t;
  308. QueryPerformanceFrequency(&t);
  309. timer_freq = t.QuadPart;
  310. // The multiplication by 1000 or 1000000 below can cause an overflow if timer_freq
  311. // and the uptime is high enough.
  312. // We subtract the program start time to reduce the likelihood of that happening.
  313. QueryPerformanceCounter(&t);
  314. timer_start = t.QuadPart;
  315. }
  316. int64_t ggml_time_ms(void) {
  317. LARGE_INTEGER t;
  318. QueryPerformanceCounter(&t);
  319. return ((t.QuadPart-timer_start) * 1000) / timer_freq;
  320. }
  321. int64_t ggml_time_us(void) {
  322. LARGE_INTEGER t;
  323. QueryPerformanceCounter(&t);
  324. return ((t.QuadPart-timer_start) * 1000000) / timer_freq;
  325. }
  326. #else
  327. void ggml_time_init(void) {}
  328. int64_t ggml_time_ms(void) {
  329. struct timespec ts;
  330. clock_gettime(CLOCK_MONOTONIC, &ts);
  331. return (int64_t)ts.tv_sec*1000 + (int64_t)ts.tv_nsec/1000000;
  332. }
  333. int64_t ggml_time_us(void) {
  334. struct timespec ts;
  335. clock_gettime(CLOCK_MONOTONIC, &ts);
  336. return (int64_t)ts.tv_sec*1000000 + (int64_t)ts.tv_nsec/1000;
  337. }
  338. #endif
  339. int64_t ggml_cycles(void) {
  340. return clock();
  341. }
  342. int64_t ggml_cycles_per_ms(void) {
  343. return CLOCKS_PER_SEC/1000;
  344. }
  345. #ifdef GGML_PERF
  346. #define ggml_perf_time_ms() ggml_time_ms()
  347. #define ggml_perf_time_us() ggml_time_us()
  348. #define ggml_perf_cycles() ggml_cycles()
  349. #define ggml_perf_cycles_per_ms() ggml_cycles_per_ms()
  350. #else
  351. #define ggml_perf_time_ms() 0
  352. #define ggml_perf_time_us() 0
  353. #define ggml_perf_cycles() 0
  354. #define ggml_perf_cycles_per_ms() 0
  355. #endif
  356. //
  357. // cache line
  358. //
  359. #if defined(__cpp_lib_hardware_interference_size)
  360. #define CACHE_LINE_SIZE hardware_destructive_interference_size
  361. #else
  362. #if defined(__POWER9_VECTOR__)
  363. #define CACHE_LINE_SIZE 128
  364. #else
  365. #define CACHE_LINE_SIZE 64
  366. #endif
  367. #endif
  368. static const size_t CACHE_LINE_SIZE_F32 = CACHE_LINE_SIZE/sizeof(float);
  369. static void ggml_vec_dot_f32(int n, float * restrict s, size_t bs, const float * restrict x, size_t bx, const float * restrict y, size_t by, int nrc);
  370. static void ggml_vec_dot_f16(int n, float * restrict s, size_t bs, ggml_fp16_t * restrict x, size_t bx, ggml_fp16_t * restrict y, size_t by, int nrc);
  371. static const ggml_type_traits_t type_traits[GGML_TYPE_COUNT] = {
  372. [GGML_TYPE_I8] = {
  373. .type_name = "i8",
  374. .blck_size = 1,
  375. .type_size = sizeof(int8_t),
  376. .is_quantized = false,
  377. },
  378. [GGML_TYPE_I16] = {
  379. .type_name = "i16",
  380. .blck_size = 1,
  381. .type_size = sizeof(int16_t),
  382. .is_quantized = false,
  383. },
  384. [GGML_TYPE_I32] = {
  385. .type_name = "i32",
  386. .blck_size = 1,
  387. .type_size = sizeof(int32_t),
  388. .is_quantized = false,
  389. },
  390. [GGML_TYPE_F32] = {
  391. .type_name = "f32",
  392. .blck_size = 1,
  393. .type_size = sizeof(float),
  394. .is_quantized = false,
  395. .vec_dot = (ggml_vec_dot_t) ggml_vec_dot_f32,
  396. .vec_dot_type = GGML_TYPE_F32,
  397. .nrows = 1,
  398. },
  399. [GGML_TYPE_F16] = {
  400. .type_name = "f16",
  401. .blck_size = 1,
  402. .type_size = sizeof(ggml_fp16_t),
  403. .is_quantized = false,
  404. .to_float = (ggml_to_float_t) ggml_fp16_to_fp32_row,
  405. .from_float = (ggml_from_float_t) ggml_fp32_to_fp16_row,
  406. .from_float_reference = (ggml_from_float_t) ggml_fp32_to_fp16_row,
  407. .vec_dot = (ggml_vec_dot_t) ggml_vec_dot_f16,
  408. .vec_dot_type = GGML_TYPE_F16,
  409. .nrows = 1,
  410. },
  411. [GGML_TYPE_Q4_0] = {
  412. .type_name = "q4_0",
  413. .blck_size = QK4_0,
  414. .type_size = sizeof(block_q4_0),
  415. .is_quantized = true,
  416. .to_float = (ggml_to_float_t) dequantize_row_q4_0,
  417. .from_float = quantize_row_q4_0,
  418. .from_float_reference = (ggml_from_float_t) quantize_row_q4_0_reference,
  419. .vec_dot = ggml_vec_dot_q4_0_q8_0,
  420. .vec_dot_type = GGML_TYPE_Q8_0,
  421. #if defined (__ARM_FEATURE_MATMUL_INT8)
  422. .nrows = 2,
  423. #else
  424. .nrows = 1,
  425. #endif
  426. },
  427. [GGML_TYPE_Q4_1] = {
  428. .type_name = "q4_1",
  429. .blck_size = QK4_1,
  430. .type_size = sizeof(block_q4_1),
  431. .is_quantized = true,
  432. .to_float = (ggml_to_float_t) dequantize_row_q4_1,
  433. .from_float = quantize_row_q4_1,
  434. .from_float_reference = (ggml_from_float_t) quantize_row_q4_1_reference,
  435. .vec_dot = ggml_vec_dot_q4_1_q8_1,
  436. .vec_dot_type = GGML_TYPE_Q8_1,
  437. #if defined (__ARM_FEATURE_MATMUL_INT8)
  438. .nrows = 2,
  439. #else
  440. .nrows = 1,
  441. #endif
  442. },
  443. [4] = { // GGML_TYPE_Q4_2
  444. .type_name = "DEPRECATED",
  445. .blck_size = 0,
  446. .type_size = 0,
  447. .is_quantized = false,
  448. .to_float = NULL,
  449. .from_float = NULL,
  450. .from_float_reference = NULL,
  451. .vec_dot = NULL,
  452. .vec_dot_type = GGML_TYPE_COUNT,
  453. .nrows = 1,
  454. },
  455. [5] = { // GGML_TYPE_Q4_3
  456. .type_name = "DEPRECATED",
  457. .blck_size = 0,
  458. .type_size = 0,
  459. .is_quantized = false,
  460. .to_float = NULL,
  461. .from_float = NULL,
  462. .from_float_reference = NULL,
  463. .vec_dot = NULL,
  464. .vec_dot_type = GGML_TYPE_COUNT,
  465. .nrows = 1,
  466. },
  467. [GGML_TYPE_Q5_0] = {
  468. .type_name = "q5_0",
  469. .blck_size = QK5_0,
  470. .type_size = sizeof(block_q5_0),
  471. .is_quantized = true,
  472. .to_float = (ggml_to_float_t) dequantize_row_q5_0,
  473. .from_float = quantize_row_q5_0,
  474. .from_float_reference = (ggml_from_float_t) quantize_row_q5_0_reference,
  475. .vec_dot = ggml_vec_dot_q5_0_q8_0,
  476. .vec_dot_type = GGML_TYPE_Q8_0,
  477. .nrows = 1,
  478. },
  479. [GGML_TYPE_Q5_1] = {
  480. .type_name = "q5_1",
  481. .blck_size = QK5_1,
  482. .type_size = sizeof(block_q5_1),
  483. .is_quantized = true,
  484. .to_float = (ggml_to_float_t) dequantize_row_q5_1,
  485. .from_float = quantize_row_q5_1,
  486. .from_float_reference = (ggml_from_float_t) quantize_row_q5_1_reference,
  487. .vec_dot = ggml_vec_dot_q5_1_q8_1,
  488. .vec_dot_type = GGML_TYPE_Q8_1,
  489. .nrows = 1,
  490. },
  491. [GGML_TYPE_Q8_0] = {
  492. .type_name = "q8_0",
  493. .blck_size = QK8_0,
  494. .type_size = sizeof(block_q8_0),
  495. .is_quantized = true,
  496. .to_float = (ggml_to_float_t) dequantize_row_q8_0,
  497. .from_float = quantize_row_q8_0,
  498. .from_float_reference = (ggml_from_float_t) quantize_row_q8_0_reference,
  499. .vec_dot = ggml_vec_dot_q8_0_q8_0,
  500. .vec_dot_type = GGML_TYPE_Q8_0,
  501. #if defined (__ARM_FEATURE_MATMUL_INT8)
  502. .nrows = 2,
  503. #else
  504. .nrows = 1,
  505. #endif
  506. },
  507. [GGML_TYPE_Q8_1] = {
  508. .type_name = "q8_1",
  509. .blck_size = QK8_1,
  510. .type_size = sizeof(block_q8_1),
  511. .is_quantized = true,
  512. .from_float = quantize_row_q8_1,
  513. .from_float_reference = (ggml_from_float_t) quantize_row_q8_1_reference,
  514. .vec_dot_type = GGML_TYPE_Q8_1,
  515. .nrows = 1,
  516. },
  517. [GGML_TYPE_Q2_K] = {
  518. .type_name = "q2_K",
  519. .blck_size = QK_K,
  520. .type_size = sizeof(block_q2_K),
  521. .is_quantized = true,
  522. .to_float = (ggml_to_float_t) dequantize_row_q2_K,
  523. .from_float = quantize_row_q2_K,
  524. .from_float_reference = (ggml_from_float_t) quantize_row_q2_K_reference,
  525. .vec_dot = ggml_vec_dot_q2_K_q8_K,
  526. .vec_dot_type = GGML_TYPE_Q8_K,
  527. .nrows = 1,
  528. },
  529. [GGML_TYPE_Q3_K] = {
  530. .type_name = "q3_K",
  531. .blck_size = QK_K,
  532. .type_size = sizeof(block_q3_K),
  533. .is_quantized = true,
  534. .to_float = (ggml_to_float_t) dequantize_row_q3_K,
  535. .from_float = quantize_row_q3_K,
  536. .from_float_reference = (ggml_from_float_t) quantize_row_q3_K_reference,
  537. .vec_dot = ggml_vec_dot_q3_K_q8_K,
  538. .vec_dot_type = GGML_TYPE_Q8_K,
  539. .nrows = 1,
  540. },
  541. [GGML_TYPE_Q4_K] = {
  542. .type_name = "q4_K",
  543. .blck_size = QK_K,
  544. .type_size = sizeof(block_q4_K),
  545. .is_quantized = true,
  546. .to_float = (ggml_to_float_t) dequantize_row_q4_K,
  547. .from_float = quantize_row_q4_K,
  548. .from_float_reference = (ggml_from_float_t) quantize_row_q4_K_reference,
  549. .vec_dot = ggml_vec_dot_q4_K_q8_K,
  550. .vec_dot_type = GGML_TYPE_Q8_K,
  551. .nrows = 1,
  552. },
  553. [GGML_TYPE_Q5_K] = {
  554. .type_name = "q5_K",
  555. .blck_size = QK_K,
  556. .type_size = sizeof(block_q5_K),
  557. .is_quantized = true,
  558. .to_float = (ggml_to_float_t) dequantize_row_q5_K,
  559. .from_float = quantize_row_q5_K,
  560. .from_float_reference = (ggml_from_float_t) quantize_row_q5_K_reference,
  561. .vec_dot = ggml_vec_dot_q5_K_q8_K,
  562. .vec_dot_type = GGML_TYPE_Q8_K,
  563. .nrows = 1,
  564. },
  565. [GGML_TYPE_Q6_K] = {
  566. .type_name = "q6_K",
  567. .blck_size = QK_K,
  568. .type_size = sizeof(block_q6_K),
  569. .is_quantized = true,
  570. .to_float = (ggml_to_float_t) dequantize_row_q6_K,
  571. .from_float = quantize_row_q6_K,
  572. .from_float_reference = (ggml_from_float_t) quantize_row_q6_K_reference,
  573. .vec_dot = ggml_vec_dot_q6_K_q8_K,
  574. .vec_dot_type = GGML_TYPE_Q8_K,
  575. .nrows = 1,
  576. },
  577. [GGML_TYPE_IQ2_XXS] = {
  578. .type_name = "iq2_xxs",
  579. .blck_size = QK_K,
  580. .type_size = sizeof(block_iq2_xxs),
  581. .is_quantized = true,
  582. .to_float = (ggml_to_float_t) dequantize_row_iq2_xxs,
  583. .from_float = NULL,
  584. .from_float_reference = NULL,
  585. .vec_dot = ggml_vec_dot_iq2_xxs_q8_K,
  586. .vec_dot_type = GGML_TYPE_Q8_K,
  587. .nrows = 1,
  588. },
  589. [GGML_TYPE_IQ2_XS] = {
  590. .type_name = "iq2_xs",
  591. .blck_size = QK_K,
  592. .type_size = sizeof(block_iq2_xs),
  593. .is_quantized = true,
  594. .to_float = (ggml_to_float_t) dequantize_row_iq2_xs,
  595. .from_float = NULL,
  596. .from_float_reference = NULL,
  597. .vec_dot = ggml_vec_dot_iq2_xs_q8_K,
  598. .vec_dot_type = GGML_TYPE_Q8_K,
  599. .nrows = 1,
  600. },
  601. [GGML_TYPE_IQ3_XXS] = {
  602. .type_name = "iq3_xxs",
  603. .blck_size = QK_K,
  604. .type_size = sizeof(block_iq3_xxs),
  605. .is_quantized = true,
  606. .to_float = (ggml_to_float_t) dequantize_row_iq3_xxs,
  607. .from_float = quantize_row_iq3_xxs,
  608. .from_float_reference = (ggml_from_float_t)quantize_row_iq3_xxs_reference,
  609. .vec_dot = ggml_vec_dot_iq3_xxs_q8_K,
  610. .vec_dot_type = GGML_TYPE_Q8_K,
  611. .nrows = 1,
  612. },
  613. [GGML_TYPE_IQ1_S] = {
  614. .type_name = "iq1_s",
  615. .blck_size = QK_K,
  616. .type_size = sizeof(block_iq1_s),
  617. .is_quantized = true,
  618. .to_float = (ggml_to_float_t) dequantize_row_iq1_s,
  619. .from_float = NULL,
  620. .from_float_reference = NULL,
  621. .vec_dot = ggml_vec_dot_iq1_s_q8_K,
  622. .vec_dot_type = GGML_TYPE_Q8_K,
  623. .nrows = 1,
  624. },
  625. [GGML_TYPE_IQ4_NL] = {
  626. .type_name = "iq4_nl",
  627. .blck_size = QK4_NL,
  628. .type_size = sizeof(block_iq4_nl),
  629. .is_quantized = true,
  630. .to_float = (ggml_to_float_t) dequantize_row_iq4_nl,
  631. .from_float = quantize_row_iq4_nl,
  632. .from_float_reference = (ggml_from_float_t)quantize_row_iq4_nl_reference,
  633. .vec_dot = ggml_vec_dot_iq4_nl_q8_0,
  634. .vec_dot_type = GGML_TYPE_Q8_0,
  635. .nrows = 1,
  636. },
  637. [GGML_TYPE_Q8_K] = {
  638. .type_name = "q8_K",
  639. .blck_size = QK_K,
  640. .type_size = sizeof(block_q8_K),
  641. .is_quantized = true,
  642. .from_float = quantize_row_q8_K,
  643. }
  644. };
  645. // For internal test use
  646. ggml_type_traits_t ggml_internal_get_type_traits(enum ggml_type type) {
  647. GGML_ASSERT(type < GGML_TYPE_COUNT);
  648. return type_traits[type];
  649. }
  650. //
  651. // simd mappings
  652. //
  653. #if defined(__ARM_NEON)
  654. #if !defined(__aarch64__)
  655. // 64-bit compatibility
  656. inline static float vaddvq_f32(float32x4_t v) {
  657. return vgetq_lane_f32(v, 0) + vgetq_lane_f32(v, 1) + vgetq_lane_f32(v, 2) + vgetq_lane_f32(v, 3);
  658. }
  659. #endif
  660. #endif
  661. // we define a common set of C macros which map to specific intrinsics based on the current architecture
  662. // we then implement the fundamental computation operations below using only these macros
  663. // adding support for new architectures requires to define the corresponding SIMD macros
  664. //
  665. // GGML_F32_STEP / GGML_F16_STEP
  666. // number of elements to process in a single step
  667. //
  668. // GGML_F32_EPR / GGML_F16_EPR
  669. // number of elements to fit in a single register
  670. //
  671. #if defined(__ARM_NEON) && defined(__ARM_FEATURE_FMA)
  672. #define GGML_SIMD
  673. // F32 NEON
  674. #define GGML_F32_STEP 16
  675. #define GGML_F32_EPR 4
  676. #define GGML_F32x4 float32x4_t
  677. #define GGML_F32x4_ZERO vdupq_n_f32(0.0f)
  678. #define GGML_F32x4_SET1(x) vdupq_n_f32(x)
  679. #define GGML_F32x4_LOAD vld1q_f32
  680. #define GGML_F32x4_STORE vst1q_f32
  681. #define GGML_F32x4_FMA(a, b, c) vfmaq_f32(a, b, c)
  682. #define GGML_F32x4_ADD vaddq_f32
  683. #define GGML_F32x4_MUL vmulq_f32
  684. #define GGML_F32x4_REDUCE_ONE(x) vaddvq_f32(x)
  685. #define GGML_F32x4_REDUCE(res, x) \
  686. { \
  687. int offset = GGML_F32_ARR >> 1; \
  688. for (int i = 0; i < offset; ++i) { \
  689. x[i] = vaddq_f32(x[i], x[offset+i]); \
  690. } \
  691. offset >>= 1; \
  692. for (int i = 0; i < offset; ++i) { \
  693. x[i] = vaddq_f32(x[i], x[offset+i]); \
  694. } \
  695. offset >>= 1; \
  696. for (int i = 0; i < offset; ++i) { \
  697. x[i] = vaddq_f32(x[i], x[offset+i]); \
  698. } \
  699. res = GGML_F32x4_REDUCE_ONE(x[0]); \
  700. }
  701. #define GGML_F32_VEC GGML_F32x4
  702. #define GGML_F32_VEC_ZERO GGML_F32x4_ZERO
  703. #define GGML_F32_VEC_SET1 GGML_F32x4_SET1
  704. #define GGML_F32_VEC_LOAD GGML_F32x4_LOAD
  705. #define GGML_F32_VEC_STORE GGML_F32x4_STORE
  706. #define GGML_F32_VEC_FMA GGML_F32x4_FMA
  707. #define GGML_F32_VEC_ADD GGML_F32x4_ADD
  708. #define GGML_F32_VEC_MUL GGML_F32x4_MUL
  709. #define GGML_F32_VEC_REDUCE GGML_F32x4_REDUCE
  710. // F16 NEON
  711. #if defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC)
  712. #define GGML_F16_STEP 32
  713. #define GGML_F16_EPR 8
  714. #define GGML_F16x8 float16x8_t
  715. #define GGML_F16x8_ZERO vdupq_n_f16(0.0f)
  716. #define GGML_F16x8_SET1(x) vdupq_n_f16(x)
  717. #define GGML_F16x8_LOAD vld1q_f16
  718. #define GGML_F16x8_STORE vst1q_f16
  719. #define GGML_F16x8_FMA(a, b, c) vfmaq_f16(a, b, c)
  720. #define GGML_F16x8_ADD vaddq_f16
  721. #define GGML_F16x8_MUL vmulq_f16
  722. #define GGML_F16x8_REDUCE(res, x) \
  723. do { \
  724. int offset = GGML_F16_ARR >> 1; \
  725. for (int i = 0; i < offset; ++i) { \
  726. x[i] = vaddq_f16(x[i], x[offset+i]); \
  727. } \
  728. offset >>= 1; \
  729. for (int i = 0; i < offset; ++i) { \
  730. x[i] = vaddq_f16(x[i], x[offset+i]); \
  731. } \
  732. offset >>= 1; \
  733. for (int i = 0; i < offset; ++i) { \
  734. x[i] = vaddq_f16(x[i], x[offset+i]); \
  735. } \
  736. const float32x4_t t0 = vcvt_f32_f16(vget_low_f16 (x[0])); \
  737. const float32x4_t t1 = vcvt_f32_f16(vget_high_f16(x[0])); \
  738. res = (ggml_float) vaddvq_f32(vaddq_f32(t0, t1)); \
  739. } while (0)
  740. #define GGML_F16_VEC GGML_F16x8
  741. #define GGML_F16_VEC_ZERO GGML_F16x8_ZERO
  742. #define GGML_F16_VEC_SET1 GGML_F16x8_SET1
  743. #define GGML_F16_VEC_LOAD(p, i) GGML_F16x8_LOAD(p)
  744. #define GGML_F16_VEC_STORE(p, r, i) GGML_F16x8_STORE(p, r[i])
  745. #define GGML_F16_VEC_FMA GGML_F16x8_FMA
  746. #define GGML_F16_VEC_ADD GGML_F16x8_ADD
  747. #define GGML_F16_VEC_MUL GGML_F16x8_MUL
  748. #define GGML_F16_VEC_REDUCE GGML_F16x8_REDUCE
  749. #else
  750. // if FP16 vector arithmetic is not supported, we use FP32 instead
  751. // and take advantage of the vcvt_ functions to convert to/from FP16
  752. #define GGML_F16_STEP 16
  753. #define GGML_F16_EPR 4
  754. #define GGML_F32Cx4 float32x4_t
  755. #define GGML_F32Cx4_ZERO vdupq_n_f32(0.0f)
  756. #define GGML_F32Cx4_SET1(x) vdupq_n_f32(x)
  757. #define GGML_F32Cx4_LOAD(x) vcvt_f32_f16(vld1_f16(x))
  758. #define GGML_F32Cx4_STORE(x, y) vst1_f16(x, vcvt_f16_f32(y))
  759. #define GGML_F32Cx4_FMA(a, b, c) vfmaq_f32(a, b, c)
  760. #define GGML_F32Cx4_ADD vaddq_f32
  761. #define GGML_F32Cx4_MUL vmulq_f32
  762. #define GGML_F32Cx4_REDUCE GGML_F32x4_REDUCE
  763. #define GGML_F16_VEC GGML_F32Cx4
  764. #define GGML_F16_VEC_ZERO GGML_F32Cx4_ZERO
  765. #define GGML_F16_VEC_SET1 GGML_F32Cx4_SET1
  766. #define GGML_F16_VEC_LOAD(p, i) GGML_F32Cx4_LOAD(p)
  767. #define GGML_F16_VEC_STORE(p, r, i) GGML_F32Cx4_STORE(p, r[i])
  768. #define GGML_F16_VEC_FMA GGML_F32Cx4_FMA
  769. #define GGML_F16_VEC_ADD GGML_F32Cx4_ADD
  770. #define GGML_F16_VEC_MUL GGML_F32Cx4_MUL
  771. #define GGML_F16_VEC_REDUCE GGML_F32Cx4_REDUCE
  772. #endif
  773. #elif defined(__AVX__)
  774. #define GGML_SIMD
  775. // F32 AVX
  776. #define GGML_F32_STEP 32
  777. #define GGML_F32_EPR 8
  778. #define GGML_F32x8 __m256
  779. #define GGML_F32x8_ZERO _mm256_setzero_ps()
  780. #define GGML_F32x8_SET1(x) _mm256_set1_ps(x)
  781. #define GGML_F32x8_LOAD _mm256_loadu_ps
  782. #define GGML_F32x8_STORE _mm256_storeu_ps
  783. #if defined(__FMA__)
  784. #define GGML_F32x8_FMA(a, b, c) _mm256_fmadd_ps(b, c, a)
  785. #else
  786. #define GGML_F32x8_FMA(a, b, c) _mm256_add_ps(_mm256_mul_ps(b, c), a)
  787. #endif
  788. #define GGML_F32x8_ADD _mm256_add_ps
  789. #define GGML_F32x8_MUL _mm256_mul_ps
  790. #define GGML_F32x8_REDUCE(res, x) \
  791. do { \
  792. int offset = GGML_F32_ARR >> 1; \
  793. for (int i = 0; i < offset; ++i) { \
  794. x[i] = _mm256_add_ps(x[i], x[offset+i]); \
  795. } \
  796. offset >>= 1; \
  797. for (int i = 0; i < offset; ++i) { \
  798. x[i] = _mm256_add_ps(x[i], x[offset+i]); \
  799. } \
  800. offset >>= 1; \
  801. for (int i = 0; i < offset; ++i) { \
  802. x[i] = _mm256_add_ps(x[i], x[offset+i]); \
  803. } \
  804. const __m128 t0 = _mm_add_ps(_mm256_castps256_ps128(x[0]), \
  805. _mm256_extractf128_ps(x[0], 1)); \
  806. const __m128 t1 = _mm_hadd_ps(t0, t0); \
  807. res = (ggml_float) _mm_cvtss_f32(_mm_hadd_ps(t1, t1)); \
  808. } while (0)
  809. // TODO: is this optimal ?
  810. #define GGML_F32_VEC GGML_F32x8
  811. #define GGML_F32_VEC_ZERO GGML_F32x8_ZERO
  812. #define GGML_F32_VEC_SET1 GGML_F32x8_SET1
  813. #define GGML_F32_VEC_LOAD GGML_F32x8_LOAD
  814. #define GGML_F32_VEC_STORE GGML_F32x8_STORE
  815. #define GGML_F32_VEC_FMA GGML_F32x8_FMA
  816. #define GGML_F32_VEC_ADD GGML_F32x8_ADD
  817. #define GGML_F32_VEC_MUL GGML_F32x8_MUL
  818. #define GGML_F32_VEC_REDUCE GGML_F32x8_REDUCE
  819. // F16 AVX
  820. #define GGML_F16_STEP 32
  821. #define GGML_F16_EPR 8
  822. // F16 arithmetic is not supported by AVX, so we use F32 instead
  823. #define GGML_F32Cx8 __m256
  824. #define GGML_F32Cx8_ZERO _mm256_setzero_ps()
  825. #define GGML_F32Cx8_SET1(x) _mm256_set1_ps(x)
  826. #if defined(__F16C__)
  827. // the _mm256_cvt intrinsics require F16C
  828. #define GGML_F32Cx8_LOAD(x) _mm256_cvtph_ps(_mm_loadu_si128((__m128i *)(x)))
  829. #define GGML_F32Cx8_STORE(x, y) _mm_storeu_si128((__m128i *)(x), _mm256_cvtps_ph(y, 0))
  830. #else
  831. static inline __m256 __avx_f32cx8_load(ggml_fp16_t *x) {
  832. float tmp[8];
  833. for (int i = 0; i < 8; i++) {
  834. tmp[i] = GGML_FP16_TO_FP32(x[i]);
  835. }
  836. return _mm256_loadu_ps(tmp);
  837. }
  838. static inline void __avx_f32cx8_store(ggml_fp16_t *x, __m256 y) {
  839. float arr[8];
  840. _mm256_storeu_ps(arr, y);
  841. for (int i = 0; i < 8; i++)
  842. x[i] = GGML_FP32_TO_FP16(arr[i]);
  843. }
  844. #define GGML_F32Cx8_LOAD(x) __avx_f32cx8_load(x)
  845. #define GGML_F32Cx8_STORE(x, y) __avx_f32cx8_store(x, y)
  846. #endif
  847. #define GGML_F32Cx8_FMA GGML_F32x8_FMA
  848. #define GGML_F32Cx8_ADD _mm256_add_ps
  849. #define GGML_F32Cx8_MUL _mm256_mul_ps
  850. #define GGML_F32Cx8_REDUCE GGML_F32x8_REDUCE
  851. #define GGML_F16_VEC GGML_F32Cx8
  852. #define GGML_F16_VEC_ZERO GGML_F32Cx8_ZERO
  853. #define GGML_F16_VEC_SET1 GGML_F32Cx8_SET1
  854. #define GGML_F16_VEC_LOAD(p, i) GGML_F32Cx8_LOAD(p)
  855. #define GGML_F16_VEC_STORE(p, r, i) GGML_F32Cx8_STORE(p, r[i])
  856. #define GGML_F16_VEC_FMA GGML_F32Cx8_FMA
  857. #define GGML_F16_VEC_ADD GGML_F32Cx8_ADD
  858. #define GGML_F16_VEC_MUL GGML_F32Cx8_MUL
  859. #define GGML_F16_VEC_REDUCE GGML_F32Cx8_REDUCE
  860. #elif defined(__POWER9_VECTOR__)
  861. #define GGML_SIMD
  862. // F32 POWER9
  863. #define GGML_F32_STEP 32
  864. #define GGML_F32_EPR 4
  865. #define GGML_F32x4 vector float
  866. #define GGML_F32x4_ZERO 0.0f
  867. #define GGML_F32x4_SET1 vec_splats
  868. #define GGML_F32x4_LOAD(p) vec_xl(0, p)
  869. #define GGML_F32x4_STORE(p, r) vec_xst(r, 0, p)
  870. #define GGML_F32x4_FMA(a, b, c) vec_madd(b, c, a)
  871. #define GGML_F32x4_ADD vec_add
  872. #define GGML_F32x4_MUL vec_mul
  873. #define GGML_F32x4_REDUCE(res, x) \
  874. { \
  875. int offset = GGML_F32_ARR >> 1; \
  876. for (int i = 0; i < offset; ++i) { \
  877. x[i] = vec_add(x[i], x[offset+i]); \
  878. } \
  879. offset >>= 1; \
  880. for (int i = 0; i < offset; ++i) { \
  881. x[i] = vec_add(x[i], x[offset+i]); \
  882. } \
  883. offset >>= 1; \
  884. for (int i = 0; i < offset; ++i) { \
  885. x[i] = vec_add(x[i], x[offset+i]); \
  886. } \
  887. res = vec_extract(x[0], 0) + \
  888. vec_extract(x[0], 1) + \
  889. vec_extract(x[0], 2) + \
  890. vec_extract(x[0], 3); \
  891. }
  892. #define GGML_F32_VEC GGML_F32x4
  893. #define GGML_F32_VEC_ZERO GGML_F32x4_ZERO
  894. #define GGML_F32_VEC_SET1 GGML_F32x4_SET1
  895. #define GGML_F32_VEC_LOAD GGML_F32x4_LOAD
  896. #define GGML_F32_VEC_STORE GGML_F32x4_STORE
  897. #define GGML_F32_VEC_FMA GGML_F32x4_FMA
  898. #define GGML_F32_VEC_ADD GGML_F32x4_ADD
  899. #define GGML_F32_VEC_MUL GGML_F32x4_MUL
  900. #define GGML_F32_VEC_REDUCE GGML_F32x4_REDUCE
  901. // F16 POWER9
  902. #define GGML_F16_STEP GGML_F32_STEP
  903. #define GGML_F16_EPR GGML_F32_EPR
  904. #define GGML_F16_VEC GGML_F32x4
  905. #define GGML_F16_VEC_ZERO GGML_F32x4_ZERO
  906. #define GGML_F16_VEC_SET1 GGML_F32x4_SET1
  907. #define GGML_F16_VEC_FMA GGML_F32x4_FMA
  908. #define GGML_F16_VEC_REDUCE GGML_F32x4_REDUCE
  909. // Use vec_xl, not vec_ld, in case the load address is not aligned.
  910. #define GGML_F16_VEC_LOAD(p, i) (i & 0x1) ? \
  911. vec_extract_fp32_from_shorth(vec_xl(0, p - GGML_F16_EPR)) : \
  912. vec_extract_fp32_from_shortl(vec_xl(0, p))
  913. #define GGML_ENDIAN_BYTE(i) ((unsigned char *)&(uint16_t){1})[i]
  914. #define GGML_F16_VEC_STORE(p, r, i) \
  915. if (i & 0x1) \
  916. vec_xst(vec_pack_to_short_fp32(r[i - GGML_ENDIAN_BYTE(1)], \
  917. r[i - GGML_ENDIAN_BYTE(0)]), \
  918. 0, p - GGML_F16_EPR)
  919. #elif defined(__wasm_simd128__)
  920. #define GGML_SIMD
  921. // F32 WASM
  922. #define GGML_F32_STEP 16
  923. #define GGML_F32_EPR 4
  924. #define GGML_F32x4 v128_t
  925. #define GGML_F32x4_ZERO wasm_f32x4_splat(0.0f)
  926. #define GGML_F32x4_SET1(x) wasm_f32x4_splat(x)
  927. #define GGML_F32x4_LOAD wasm_v128_load
  928. #define GGML_F32x4_STORE wasm_v128_store
  929. #define GGML_F32x4_FMA(a, b, c) wasm_f32x4_add(wasm_f32x4_mul(b, c), a)
  930. #define GGML_F32x4_ADD wasm_f32x4_add
  931. #define GGML_F32x4_MUL wasm_f32x4_mul
  932. #define GGML_F32x4_REDUCE(res, x) \
  933. { \
  934. int offset = GGML_F32_ARR >> 1; \
  935. for (int i = 0; i < offset; ++i) { \
  936. x[i] = wasm_f32x4_add(x[i], x[offset+i]); \
  937. } \
  938. offset >>= 1; \
  939. for (int i = 0; i < offset; ++i) { \
  940. x[i] = wasm_f32x4_add(x[i], x[offset+i]); \
  941. } \
  942. offset >>= 1; \
  943. for (int i = 0; i < offset; ++i) { \
  944. x[i] = wasm_f32x4_add(x[i], x[offset+i]); \
  945. } \
  946. res = wasm_f32x4_extract_lane(x[0], 0) + \
  947. wasm_f32x4_extract_lane(x[0], 1) + \
  948. wasm_f32x4_extract_lane(x[0], 2) + \
  949. wasm_f32x4_extract_lane(x[0], 3); \
  950. }
  951. #define GGML_F32_VEC GGML_F32x4
  952. #define GGML_F32_VEC_ZERO GGML_F32x4_ZERO
  953. #define GGML_F32_VEC_SET1 GGML_F32x4_SET1
  954. #define GGML_F32_VEC_LOAD GGML_F32x4_LOAD
  955. #define GGML_F32_VEC_STORE GGML_F32x4_STORE
  956. #define GGML_F32_VEC_FMA GGML_F32x4_FMA
  957. #define GGML_F32_VEC_ADD GGML_F32x4_ADD
  958. #define GGML_F32_VEC_MUL GGML_F32x4_MUL
  959. #define GGML_F32_VEC_REDUCE GGML_F32x4_REDUCE
  960. // F16 WASM
  961. #define GGML_F16_STEP 16
  962. #define GGML_F16_EPR 4
  963. inline static v128_t __wasm_f16x4_load(const ggml_fp16_t * p) {
  964. float tmp[4];
  965. tmp[0] = GGML_FP16_TO_FP32(p[0]);
  966. tmp[1] = GGML_FP16_TO_FP32(p[1]);
  967. tmp[2] = GGML_FP16_TO_FP32(p[2]);
  968. tmp[3] = GGML_FP16_TO_FP32(p[3]);
  969. return wasm_v128_load(tmp);
  970. }
  971. inline static void __wasm_f16x4_store(ggml_fp16_t * p, v128_t x) {
  972. float tmp[4];
  973. wasm_v128_store(tmp, x);
  974. p[0] = GGML_FP32_TO_FP16(tmp[0]);
  975. p[1] = GGML_FP32_TO_FP16(tmp[1]);
  976. p[2] = GGML_FP32_TO_FP16(tmp[2]);
  977. p[3] = GGML_FP32_TO_FP16(tmp[3]);
  978. }
  979. #define GGML_F16x4 v128_t
  980. #define GGML_F16x4_ZERO wasm_f32x4_splat(0.0f)
  981. #define GGML_F16x4_SET1(x) wasm_f32x4_splat(x)
  982. #define GGML_F16x4_LOAD(x) __wasm_f16x4_load(x)
  983. #define GGML_F16x4_STORE(x, y) __wasm_f16x4_store(x, y)
  984. #define GGML_F16x4_FMA GGML_F32x4_FMA
  985. #define GGML_F16x4_ADD wasm_f32x4_add
  986. #define GGML_F16x4_MUL wasm_f32x4_mul
  987. #define GGML_F16x4_REDUCE(res, x) \
  988. { \
  989. int offset = GGML_F16_ARR >> 1; \
  990. for (int i = 0; i < offset; ++i) { \
  991. x[i] = wasm_f32x4_add(x[i], x[offset+i]); \
  992. } \
  993. offset >>= 1; \
  994. for (int i = 0; i < offset; ++i) { \
  995. x[i] = wasm_f32x4_add(x[i], x[offset+i]); \
  996. } \
  997. offset >>= 1; \
  998. for (int i = 0; i < offset; ++i) { \
  999. x[i] = wasm_f32x4_add(x[i], x[offset+i]); \
  1000. } \
  1001. res = wasm_f32x4_extract_lane(x[0], 0) + \
  1002. wasm_f32x4_extract_lane(x[0], 1) + \
  1003. wasm_f32x4_extract_lane(x[0], 2) + \
  1004. wasm_f32x4_extract_lane(x[0], 3); \
  1005. }
  1006. #define GGML_F16_VEC GGML_F16x4
  1007. #define GGML_F16_VEC_ZERO GGML_F16x4_ZERO
  1008. #define GGML_F16_VEC_SET1 GGML_F16x4_SET1
  1009. #define GGML_F16_VEC_LOAD(p, i) GGML_F16x4_LOAD(p)
  1010. #define GGML_F16_VEC_STORE(p, r, i) GGML_F16x4_STORE(p, r[i])
  1011. #define GGML_F16_VEC_FMA GGML_F16x4_FMA
  1012. #define GGML_F16_VEC_ADD GGML_F16x4_ADD
  1013. #define GGML_F16_VEC_MUL GGML_F16x4_MUL
  1014. #define GGML_F16_VEC_REDUCE GGML_F16x4_REDUCE
  1015. #elif defined(__SSE3__)
  1016. #define GGML_SIMD
  1017. // F32 SSE
  1018. #define GGML_F32_STEP 32
  1019. #define GGML_F32_EPR 4
  1020. #define GGML_F32x4 __m128
  1021. #define GGML_F32x4_ZERO _mm_setzero_ps()
  1022. #define GGML_F32x4_SET1(x) _mm_set1_ps(x)
  1023. #define GGML_F32x4_LOAD _mm_loadu_ps
  1024. #define GGML_F32x4_STORE _mm_storeu_ps
  1025. #if defined(__FMA__)
  1026. // TODO: Does this work?
  1027. #define GGML_F32x4_FMA(a, b, c) _mm_fmadd_ps(b, c, a)
  1028. #else
  1029. #define GGML_F32x4_FMA(a, b, c) _mm_add_ps(_mm_mul_ps(b, c), a)
  1030. #endif
  1031. #define GGML_F32x4_ADD _mm_add_ps
  1032. #define GGML_F32x4_MUL _mm_mul_ps
  1033. #define GGML_F32x4_REDUCE(res, x) \
  1034. { \
  1035. int offset = GGML_F32_ARR >> 1; \
  1036. for (int i = 0; i < offset; ++i) { \
  1037. x[i] = _mm_add_ps(x[i], x[offset+i]); \
  1038. } \
  1039. offset >>= 1; \
  1040. for (int i = 0; i < offset; ++i) { \
  1041. x[i] = _mm_add_ps(x[i], x[offset+i]); \
  1042. } \
  1043. offset >>= 1; \
  1044. for (int i = 0; i < offset; ++i) { \
  1045. x[i] = _mm_add_ps(x[i], x[offset+i]); \
  1046. } \
  1047. const __m128 t0 = _mm_hadd_ps(x[0], x[0]); \
  1048. res = (ggml_float) _mm_cvtss_f32(_mm_hadd_ps(t0, t0)); \
  1049. }
  1050. // TODO: is this optimal ?
  1051. #define GGML_F32_VEC GGML_F32x4
  1052. #define GGML_F32_VEC_ZERO GGML_F32x4_ZERO
  1053. #define GGML_F32_VEC_SET1 GGML_F32x4_SET1
  1054. #define GGML_F32_VEC_LOAD GGML_F32x4_LOAD
  1055. #define GGML_F32_VEC_STORE GGML_F32x4_STORE
  1056. #define GGML_F32_VEC_FMA GGML_F32x4_FMA
  1057. #define GGML_F32_VEC_ADD GGML_F32x4_ADD
  1058. #define GGML_F32_VEC_MUL GGML_F32x4_MUL
  1059. #define GGML_F32_VEC_REDUCE GGML_F32x4_REDUCE
  1060. // F16 SSE
  1061. #define GGML_F16_STEP 32
  1062. #define GGML_F16_EPR 4
  1063. static inline __m128 __sse_f16x4_load(ggml_fp16_t *x) {
  1064. float tmp[4];
  1065. tmp[0] = GGML_FP16_TO_FP32(x[0]);
  1066. tmp[1] = GGML_FP16_TO_FP32(x[1]);
  1067. tmp[2] = GGML_FP16_TO_FP32(x[2]);
  1068. tmp[3] = GGML_FP16_TO_FP32(x[3]);
  1069. return _mm_loadu_ps(tmp);
  1070. }
  1071. static inline void __sse_f16x4_store(ggml_fp16_t *x, __m128 y) {
  1072. float arr[4];
  1073. _mm_storeu_ps(arr, y);
  1074. x[0] = GGML_FP32_TO_FP16(arr[0]);
  1075. x[1] = GGML_FP32_TO_FP16(arr[1]);
  1076. x[2] = GGML_FP32_TO_FP16(arr[2]);
  1077. x[3] = GGML_FP32_TO_FP16(arr[3]);
  1078. }
  1079. #define GGML_F32Cx4 __m128
  1080. #define GGML_F32Cx4_ZERO _mm_setzero_ps()
  1081. #define GGML_F32Cx4_SET1(x) _mm_set1_ps(x)
  1082. #define GGML_F32Cx4_LOAD(x) __sse_f16x4_load(x)
  1083. #define GGML_F32Cx4_STORE(x, y) __sse_f16x4_store(x, y)
  1084. #define GGML_F32Cx4_FMA GGML_F32x4_FMA
  1085. #define GGML_F32Cx4_ADD _mm_add_ps
  1086. #define GGML_F32Cx4_MUL _mm_mul_ps
  1087. #define GGML_F32Cx4_REDUCE GGML_F32x4_REDUCE
  1088. #define GGML_F16_VEC GGML_F32Cx4
  1089. #define GGML_F16_VEC_ZERO GGML_F32Cx4_ZERO
  1090. #define GGML_F16_VEC_SET1 GGML_F32Cx4_SET1
  1091. #define GGML_F16_VEC_LOAD(p, i) GGML_F32Cx4_LOAD(p)
  1092. #define GGML_F16_VEC_STORE(p, r, i) GGML_F32Cx4_STORE(p, r[i])
  1093. #define GGML_F16_VEC_FMA GGML_F32Cx4_FMA
  1094. #define GGML_F16_VEC_ADD GGML_F32Cx4_ADD
  1095. #define GGML_F16_VEC_MUL GGML_F32Cx4_MUL
  1096. #define GGML_F16_VEC_REDUCE GGML_F32Cx4_REDUCE
  1097. #endif
  1098. // GGML_F32_ARR / GGML_F16_ARR
  1099. // number of registers to use per step
  1100. #ifdef GGML_SIMD
  1101. #define GGML_F32_ARR (GGML_F32_STEP/GGML_F32_EPR)
  1102. #define GGML_F16_ARR (GGML_F16_STEP/GGML_F16_EPR)
  1103. #endif
  1104. //
  1105. // fundamental operations
  1106. //
  1107. inline static void ggml_vec_set_i8(const int n, int8_t * x, const int8_t v) { for (int i = 0; i < n; ++i) x[i] = v; }
  1108. inline static void ggml_vec_set_i16(const int n, int16_t * x, const int16_t v) { for (int i = 0; i < n; ++i) x[i] = v; }
  1109. inline static void ggml_vec_set_i32(const int n, int32_t * x, const int32_t v) { for (int i = 0; i < n; ++i) x[i] = v; }
  1110. inline static void ggml_vec_set_f16(const int n, ggml_fp16_t * x, const int32_t v) { for (int i = 0; i < n; ++i) x[i] = v; }
  1111. inline static void ggml_vec_add_f32 (const int n, float * z, const float * x, const float * y) { for (int i = 0; i < n; ++i) z[i] = x[i] + y[i]; }
  1112. inline static void ggml_vec_add1_f32(const int n, float * z, const float * x, const float v) { for (int i = 0; i < n; ++i) z[i] = x[i] + v; }
  1113. inline static void ggml_vec_acc_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] += x[i]; }
  1114. inline static void ggml_vec_acc1_f32(const int n, float * y, const float v) { for (int i = 0; i < n; ++i) y[i] += v; }
  1115. inline static void ggml_vec_sub_f32 (const int n, float * z, const float * x, const float * y) { for (int i = 0; i < n; ++i) z[i] = x[i] - y[i]; }
  1116. inline static void ggml_vec_set_f32 (const int n, float * x, const float v) { for (int i = 0; i < n; ++i) x[i] = v; }
  1117. inline static void ggml_vec_cpy_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = x[i]; }
  1118. inline static void ggml_vec_neg_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = -x[i]; }
  1119. inline static void ggml_vec_mul_f32 (const int n, float * z, const float * x, const float * y) { for (int i = 0; i < n; ++i) z[i] = x[i]*y[i]; }
  1120. inline static void ggml_vec_div_f32 (const int n, float * z, const float * x, const float * y) { for (int i = 0; i < n; ++i) z[i] = x[i]/y[i]; }
  1121. static void ggml_vec_dot_f32(int n, float * restrict s, size_t bs, const float * restrict x, size_t bx, const float * restrict y, size_t by, int nrc) {
  1122. assert(nrc == 1);
  1123. UNUSED(nrc);
  1124. UNUSED(bx);
  1125. UNUSED(by);
  1126. UNUSED(bs);
  1127. #ifdef GGML_SIMD
  1128. float sumf = 0.0f;
  1129. const int np = (n & ~(GGML_F32_STEP - 1));
  1130. GGML_F32_VEC sum[GGML_F32_ARR] = { GGML_F32_VEC_ZERO };
  1131. GGML_F32_VEC ax[GGML_F32_ARR];
  1132. GGML_F32_VEC ay[GGML_F32_ARR];
  1133. for (int i = 0; i < np; i += GGML_F32_STEP) {
  1134. for (int j = 0; j < GGML_F32_ARR; j++) {
  1135. ax[j] = GGML_F32_VEC_LOAD(x + i + j*GGML_F32_EPR);
  1136. ay[j] = GGML_F32_VEC_LOAD(y + i + j*GGML_F32_EPR);
  1137. sum[j] = GGML_F32_VEC_FMA(sum[j], ax[j], ay[j]);
  1138. }
  1139. }
  1140. // reduce sum0..sum3 to sum0
  1141. GGML_F32_VEC_REDUCE(sumf, sum);
  1142. // leftovers
  1143. for (int i = np; i < n; ++i) {
  1144. sumf += x[i]*y[i];
  1145. }
  1146. #else
  1147. // scalar
  1148. ggml_float sumf = 0.0;
  1149. for (int i = 0; i < n; ++i) {
  1150. sumf += (ggml_float)(x[i]*y[i]);
  1151. }
  1152. #endif
  1153. *s = sumf;
  1154. }
  1155. static void ggml_vec_dot_f16(int n, float * restrict s, size_t bs, ggml_fp16_t * restrict x, size_t bx, ggml_fp16_t * restrict y, size_t by, int nrc) {
  1156. assert(nrc == 1);
  1157. UNUSED(nrc);
  1158. UNUSED(bx);
  1159. UNUSED(by);
  1160. UNUSED(bs);
  1161. ggml_float sumf = 0.0;
  1162. #if defined(GGML_SIMD)
  1163. const int np = (n & ~(GGML_F16_STEP - 1));
  1164. GGML_F16_VEC sum[GGML_F16_ARR] = { GGML_F16_VEC_ZERO };
  1165. GGML_F16_VEC ax[GGML_F16_ARR];
  1166. GGML_F16_VEC ay[GGML_F16_ARR];
  1167. for (int i = 0; i < np; i += GGML_F16_STEP) {
  1168. for (int j = 0; j < GGML_F16_ARR; j++) {
  1169. ax[j] = GGML_F16_VEC_LOAD(x + i + j*GGML_F16_EPR, j);
  1170. ay[j] = GGML_F16_VEC_LOAD(y + i + j*GGML_F16_EPR, j);
  1171. sum[j] = GGML_F16_VEC_FMA(sum[j], ax[j], ay[j]);
  1172. }
  1173. }
  1174. // reduce sum0..sum3 to sum0
  1175. GGML_F16_VEC_REDUCE(sumf, sum);
  1176. // leftovers
  1177. for (int i = np; i < n; ++i) {
  1178. sumf += (ggml_float)(GGML_FP16_TO_FP32(x[i])*GGML_FP16_TO_FP32(y[i]));
  1179. }
  1180. #else
  1181. for (int i = 0; i < n; ++i) {
  1182. sumf += (ggml_float)(GGML_FP16_TO_FP32(x[i])*GGML_FP16_TO_FP32(y[i]));
  1183. }
  1184. #endif
  1185. *s = sumf;
  1186. }
  1187. // compute GGML_VEC_DOT_UNROLL dot products at once
  1188. // xs - x row stride in bytes
  1189. inline static void ggml_vec_dot_f16_unroll(const int n, const int xs, float * restrict s, void * restrict xv, ggml_fp16_t * restrict y) {
  1190. ggml_float sumf[GGML_VEC_DOT_UNROLL] = { 0.0 };
  1191. ggml_fp16_t * restrict x[GGML_VEC_DOT_UNROLL];
  1192. for (int i = 0; i < GGML_VEC_DOT_UNROLL; ++i) {
  1193. x[i] = (ggml_fp16_t *) ((char *) xv + i*xs);
  1194. }
  1195. #if defined(GGML_SIMD)
  1196. const int np = (n & ~(GGML_F16_STEP - 1));
  1197. GGML_F16_VEC sum[GGML_VEC_DOT_UNROLL][GGML_F16_ARR] = { { GGML_F16_VEC_ZERO } };
  1198. GGML_F16_VEC ax[GGML_F16_ARR];
  1199. GGML_F16_VEC ay[GGML_F16_ARR];
  1200. for (int i = 0; i < np; i += GGML_F16_STEP) {
  1201. for (int j = 0; j < GGML_F16_ARR; j++) {
  1202. ay[j] = GGML_F16_VEC_LOAD(y + i + j*GGML_F16_EPR, j);
  1203. for (int k = 0; k < GGML_VEC_DOT_UNROLL; ++k) {
  1204. ax[j] = GGML_F16_VEC_LOAD(x[k] + i + j*GGML_F16_EPR, j);
  1205. sum[k][j] = GGML_F16_VEC_FMA(sum[k][j], ax[j], ay[j]);
  1206. }
  1207. }
  1208. }
  1209. // reduce sum0..sum3 to sum0
  1210. for (int k = 0; k < GGML_VEC_DOT_UNROLL; ++k) {
  1211. GGML_F16_VEC_REDUCE(sumf[k], sum[k]);
  1212. }
  1213. // leftovers
  1214. for (int i = np; i < n; ++i) {
  1215. for (int j = 0; j < GGML_VEC_DOT_UNROLL; ++j) {
  1216. sumf[j] += (ggml_float)(GGML_FP16_TO_FP32(x[j][i])*GGML_FP16_TO_FP32(y[i]));
  1217. }
  1218. }
  1219. #else
  1220. for (int i = 0; i < n; ++i) {
  1221. for (int j = 0; j < GGML_VEC_DOT_UNROLL; ++j) {
  1222. sumf[j] += (ggml_float)(GGML_FP16_TO_FP32(x[j][i])*GGML_FP16_TO_FP32(y[i]));
  1223. }
  1224. }
  1225. #endif
  1226. for (int i = 0; i < GGML_VEC_DOT_UNROLL; ++i) {
  1227. s[i] = sumf[i];
  1228. }
  1229. }
  1230. inline static void ggml_vec_mad_f32(const int n, float * restrict y, const float * restrict x, const float v) {
  1231. #if defined(GGML_SIMD)
  1232. const int np = (n & ~(GGML_F32_STEP - 1));
  1233. GGML_F32_VEC vx = GGML_F32_VEC_SET1(v);
  1234. GGML_F32_VEC ax[GGML_F32_ARR];
  1235. GGML_F32_VEC ay[GGML_F32_ARR];
  1236. for (int i = 0; i < np; i += GGML_F32_STEP) {
  1237. for (int j = 0; j < GGML_F32_ARR; j++) {
  1238. ax[j] = GGML_F32_VEC_LOAD(x + i + j*GGML_F32_EPR);
  1239. ay[j] = GGML_F32_VEC_LOAD(y + i + j*GGML_F32_EPR);
  1240. ay[j] = GGML_F32_VEC_FMA(ay[j], ax[j], vx);
  1241. GGML_F32_VEC_STORE(y + i + j*GGML_F32_EPR, ay[j]);
  1242. }
  1243. }
  1244. // leftovers
  1245. for (int i = np; i < n; ++i) {
  1246. y[i] += x[i]*v;
  1247. }
  1248. #else
  1249. // scalar
  1250. for (int i = 0; i < n; ++i) {
  1251. y[i] += x[i]*v;
  1252. }
  1253. #endif
  1254. }
  1255. // xs and vs are byte strides of x and v
  1256. inline static void ggml_vec_mad_f32_unroll(const int n, const int xs, const int vs, float * restrict y, const float * restrict xv, const float * restrict vv) {
  1257. const float * restrict x[GGML_VEC_MAD_UNROLL];
  1258. const float * restrict v[GGML_VEC_MAD_UNROLL];
  1259. for (int i = 0; i < GGML_VEC_MAD_UNROLL; ++i) {
  1260. x[i] = (const float *) ((const char *) xv + i*xs);
  1261. v[i] = (const float *) ((const char *) vv + i*vs);
  1262. }
  1263. #if defined(GGML_SIMD)
  1264. const int np = (n & ~(GGML_F32_STEP - 1));
  1265. GGML_F32_VEC vx[GGML_VEC_MAD_UNROLL];
  1266. for (int k = 0; k < GGML_VEC_MAD_UNROLL; ++k) {
  1267. vx[k] = GGML_F32_VEC_SET1(v[k][0]);
  1268. }
  1269. GGML_F32_VEC ax[GGML_VEC_MAD_UNROLL][GGML_F32_ARR];
  1270. GGML_F32_VEC ay[GGML_F32_ARR];
  1271. for (int i = 0; i < np; i += GGML_F32_STEP) {
  1272. for (int j = 0; j < GGML_F32_ARR; j++) {
  1273. ay[j] = GGML_F32_VEC_LOAD(y + i + j*GGML_F32_EPR);
  1274. for (int k = 0; k < GGML_VEC_MAD_UNROLL; ++k) {
  1275. ax[k][j] = GGML_F32_VEC_LOAD(x[k] + i + j*GGML_F32_EPR);
  1276. ay[j] = GGML_F32_VEC_FMA(ay[j], ax[k][j], vx[k]);
  1277. }
  1278. GGML_F32_VEC_STORE(y + i + j*GGML_F32_EPR, ay[j]);
  1279. }
  1280. }
  1281. // leftovers
  1282. for (int k = 0; k < GGML_VEC_MAD_UNROLL; ++k) {
  1283. for (int i = np; i < n; ++i) {
  1284. y[i] += x[k][i]*v[k][0];
  1285. }
  1286. }
  1287. #else
  1288. // scalar
  1289. for (int k = 0; k < GGML_VEC_MAD_UNROLL; ++k) {
  1290. for (int i = 0; i < n; ++i) {
  1291. y[i] += x[k][i]*v[k][0];
  1292. }
  1293. }
  1294. #endif
  1295. }
  1296. //inline static void ggml_vec_scale_f32(const int n, float * y, const float v) { for (int i = 0; i < n; ++i) y[i] *= v; }
  1297. inline static void ggml_vec_scale_f32(const int n, float * y, const float v) {
  1298. #if defined(GGML_USE_ACCELERATE)
  1299. vDSP_vsmul(y, 1, &v, y, 1, n);
  1300. #elif defined(GGML_SIMD)
  1301. const int np = (n & ~(GGML_F32_STEP - 1));
  1302. GGML_F32_VEC vx = GGML_F32_VEC_SET1(v);
  1303. GGML_F32_VEC ay[GGML_F32_ARR];
  1304. for (int i = 0; i < np; i += GGML_F32_STEP) {
  1305. for (int j = 0; j < GGML_F32_ARR; j++) {
  1306. ay[j] = GGML_F32_VEC_LOAD(y + i + j*GGML_F32_EPR);
  1307. ay[j] = GGML_F32_VEC_MUL(ay[j], vx);
  1308. GGML_F32_VEC_STORE(y + i + j*GGML_F32_EPR, ay[j]);
  1309. }
  1310. }
  1311. // leftovers
  1312. for (int i = np; i < n; ++i) {
  1313. y[i] *= v;
  1314. }
  1315. #else
  1316. // scalar
  1317. for (int i = 0; i < n; ++i) {
  1318. y[i] *= v;
  1319. }
  1320. #endif
  1321. }
  1322. inline static void ggml_vec_norm_f32 (const int n, float * s, const float * x) { ggml_vec_dot_f32(n, s, 0, x, 0, x, 0, 1); *s = sqrtf(*s); }
  1323. inline static void ggml_vec_sqr_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = x[i]*x[i]; }
  1324. inline static void ggml_vec_sqrt_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = sqrtf(x[i]); }
  1325. inline static void ggml_vec_log_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = logf(x[i]); }
  1326. inline static void ggml_vec_abs_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = fabsf(x[i]); }
  1327. inline static void ggml_vec_sgn_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = (x[i] > 0.f) ? 1.f : ((x[i] < 0.f) ? -1.f : 0.f); }
  1328. inline static void ggml_vec_step_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = (x[i] > 0.f) ? 1.f : 0.f; }
  1329. inline static void ggml_vec_tanh_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = tanhf(x[i]); }
  1330. inline static void ggml_vec_elu_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = (x[i] > 0.f) ? x[i] : expf(x[i])-1; }
  1331. inline static void ggml_vec_relu_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = (x[i] > 0.f) ? x[i] : 0.f; }
  1332. inline static void ggml_vec_leaky_relu_f32 (const int n, float * y, const float * x, const float ns) { for (int i = 0; i < n; ++i) y[i] = ((x[i] > 0.f) ? x[i] : 0.f) + ns * ((x[i] < 0.0f) ? x[i] : 0.f); }
  1333. // TODO: optimize performance
  1334. inline static void ggml_vec_hardswish_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = x[i] * fminf(1.0f, fmaxf(0.0f, (x[i] + 3.0f) / 6.0f)); }
  1335. inline static void ggml_vec_hardsigmoid_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = fminf(1.0f, fmaxf(0.0f, (x[i] + 3.0f) / 6.0f)); }
  1336. static const float GELU_COEF_A = 0.044715f;
  1337. static const float GELU_QUICK_COEF = -1.702f;
  1338. static const float SQRT_2_OVER_PI = 0.79788456080286535587989211986876f;
  1339. inline static float ggml_gelu_f32(float x) {
  1340. return 0.5f*x*(1.0f + tanhf(SQRT_2_OVER_PI*x*(1.0f + GELU_COEF_A*x*x)));
  1341. }
  1342. inline static void ggml_vec_gelu_f16(const int n, ggml_fp16_t * y, const ggml_fp16_t * x) {
  1343. const uint16_t * i16 = (const uint16_t *) x;
  1344. for (int i = 0; i < n; ++i) {
  1345. y[i] = ggml_table_gelu_f16[i16[i]];
  1346. }
  1347. }
  1348. #ifdef GGML_GELU_FP16
  1349. inline static void ggml_vec_gelu_f32(const int n, float * y, const float * x) {
  1350. uint16_t t;
  1351. for (int i = 0; i < n; ++i) {
  1352. ggml_fp16_t fp16 = GGML_FP32_TO_FP16(x[i]);
  1353. memcpy(&t, &fp16, sizeof(uint16_t));
  1354. y[i] = GGML_FP16_TO_FP32(ggml_table_gelu_f16[t]);
  1355. }
  1356. }
  1357. #else
  1358. inline static void ggml_vec_gelu_f32(const int n, float * y, const float * x) {
  1359. for (int i = 0; i < n; ++i) {
  1360. y[i] = ggml_gelu_f32(x[i]);
  1361. }
  1362. }
  1363. #endif
  1364. inline static float ggml_gelu_quick_f32(float x) {
  1365. return x*(1.0f/(1.0f+expf(GELU_QUICK_COEF*x)));
  1366. }
  1367. //inline static void ggml_vec_gelu_quick_f16(const int n, ggml_fp16_t * y, const ggml_fp16_t * x) {
  1368. // const uint16_t * i16 = (const uint16_t *) x;
  1369. // for (int i = 0; i < n; ++i) {
  1370. // y[i] = ggml_table_gelu_quick_f16[i16[i]];
  1371. // }
  1372. //}
  1373. #ifdef GGML_GELU_QUICK_FP16
  1374. inline static void ggml_vec_gelu_quick_f32(const int n, float * y, const float * x) {
  1375. uint16_t t;
  1376. for (int i = 0; i < n; ++i) {
  1377. ggml_fp16_t fp16 = GGML_FP32_TO_FP16(x[i]);
  1378. memcpy(&t, &fp16, sizeof(uint16_t));
  1379. y[i] = GGML_FP16_TO_FP32(ggml_table_gelu_quick_f16[t]);
  1380. }
  1381. }
  1382. #else
  1383. inline static void ggml_vec_gelu_quick_f32(const int n, float * y, const float * x) {
  1384. for (int i = 0; i < n; ++i) {
  1385. y[i] = ggml_gelu_quick_f32(x[i]);
  1386. }
  1387. }
  1388. #endif
  1389. // Sigmoid Linear Unit (SiLU) function
  1390. inline static float ggml_silu_f32(float x) {
  1391. return x/(1.0f + expf(-x));
  1392. }
  1393. //inline static void ggml_vec_silu_f16(const int n, ggml_fp16_t * y, const ggml_fp16_t * x) {
  1394. // const uint16_t * i16 = (const uint16_t *) x;
  1395. // for (int i = 0; i < n; ++i) {
  1396. // y[i] = ggml_table_silu_f16[i16[i]];
  1397. // }
  1398. //}
  1399. #ifdef GGML_SILU_FP16
  1400. inline static void ggml_vec_silu_f32(const int n, float * y, const float * x) {
  1401. uint16_t t;
  1402. for (int i = 0; i < n; ++i) {
  1403. ggml_fp16_t fp16 = GGML_FP32_TO_FP16(x[i]);
  1404. memcpy(&t, &fp16, sizeof(uint16_t));
  1405. y[i] = GGML_FP16_TO_FP32(ggml_table_silu_f16[t]);
  1406. }
  1407. }
  1408. #else
  1409. inline static void ggml_vec_silu_f32(const int n, float * y, const float * x) {
  1410. for (int i = 0; i < n; ++i) {
  1411. y[i] = ggml_silu_f32(x[i]);
  1412. }
  1413. }
  1414. #endif
  1415. inline static float ggml_silu_backward_f32(float x, float dy) {
  1416. const float s = 1.0f/(1.0f + expf(-x));
  1417. return dy*s*(1.0f + x*(1.0f - s));
  1418. }
  1419. #ifdef GGML_SILU_FP16
  1420. inline static void ggml_vec_silu_backward_f32(const int n, float * dx, const float * x, const float * dy) {
  1421. for (int i = 0; i < n; ++i) {
  1422. // we did not use x[i] to compute forward silu but its f16 equivalent
  1423. // take derivative at f16 of x[i]:
  1424. ggml_fp16_t fp16 = GGML_FP32_TO_FP16(x[i]);
  1425. float usedx = GGML_FP16_TO_FP32(fp16);
  1426. dx[i] = ggml_silu_backward_f32(usedx, dy[i]);
  1427. }
  1428. }
  1429. #else
  1430. inline static void ggml_vec_silu_backward_f32(const int n, float * dx, const float * x, const float * dy) {
  1431. for (int i = 0; i < n; ++i) {
  1432. dx[i] = ggml_silu_backward_f32(x[i], dy[i]);
  1433. }
  1434. }
  1435. #endif
  1436. inline static void ggml_vec_sum_f32(const int n, float * s, const float * x) {
  1437. #ifndef GGML_USE_ACCELERATE
  1438. ggml_float sum = 0.0;
  1439. for (int i = 0; i < n; ++i) {
  1440. sum += (ggml_float)x[i];
  1441. }
  1442. *s = sum;
  1443. #else
  1444. vDSP_sve(x, 1, s, n);
  1445. #endif
  1446. }
  1447. inline static void ggml_vec_sum_f32_ggf(const int n, ggml_float * s, const float * x) {
  1448. ggml_float sum = 0.0;
  1449. for (int i = 0; i < n; ++i) {
  1450. sum += (ggml_float)x[i];
  1451. }
  1452. *s = sum;
  1453. }
  1454. inline static void ggml_vec_sum_f16_ggf(const int n, float * s, const ggml_fp16_t * x) {
  1455. float sum = 0.0f;
  1456. for (int i = 0; i < n; ++i) {
  1457. sum += GGML_FP16_TO_FP32(x[i]);
  1458. }
  1459. *s = sum;
  1460. }
  1461. inline static void ggml_vec_max_f32(const int n, float * s, const float * x) {
  1462. #ifndef GGML_USE_ACCELERATE
  1463. float max = -INFINITY;
  1464. for (int i = 0; i < n; ++i) {
  1465. max = MAX(max, x[i]);
  1466. }
  1467. *s = max;
  1468. #else
  1469. vDSP_maxv(x, 1, s, n);
  1470. #endif
  1471. }
  1472. inline static void ggml_vec_norm_inv_f32(const int n, float * s, const float * x) {
  1473. ggml_vec_norm_f32(n, s, x);
  1474. *s = 1.f/(*s);
  1475. }
  1476. inline static void ggml_vec_argmax_f32(const int n, int * s, const float * x) {
  1477. float max = -INFINITY;
  1478. int idx = 0;
  1479. for (int i = 0; i < n; ++i) {
  1480. max = MAX(max, x[i]);
  1481. if (max == x[i]) { idx = i; }
  1482. }
  1483. *s = idx;
  1484. }
  1485. //
  1486. // data types
  1487. //
  1488. static const char * GGML_OP_NAME[GGML_OP_COUNT] = {
  1489. "NONE",
  1490. "DUP",
  1491. "ADD",
  1492. "ADD1",
  1493. "ACC",
  1494. "SUB",
  1495. "MUL",
  1496. "DIV",
  1497. "SQR",
  1498. "SQRT",
  1499. "LOG",
  1500. "SUM",
  1501. "SUM_ROWS",
  1502. "MEAN",
  1503. "ARGMAX",
  1504. "REPEAT",
  1505. "REPEAT_BACK",
  1506. "CONCAT",
  1507. "SILU_BACK",
  1508. "NORM",
  1509. "RMS_NORM",
  1510. "RMS_NORM_BACK",
  1511. "GROUP_NORM",
  1512. "MUL_MAT",
  1513. "MUL_MAT_ID",
  1514. "OUT_PROD",
  1515. "SCALE",
  1516. "SET",
  1517. "CPY",
  1518. "CONT",
  1519. "RESHAPE",
  1520. "VIEW",
  1521. "PERMUTE",
  1522. "TRANSPOSE",
  1523. "GET_ROWS",
  1524. "GET_ROWS_BACK",
  1525. "DIAG",
  1526. "DIAG_MASK_INF",
  1527. "DIAG_MASK_ZERO",
  1528. "SOFT_MAX",
  1529. "SOFT_MAX_BACK",
  1530. "ROPE",
  1531. "ROPE_BACK",
  1532. "ALIBI",
  1533. "CLAMP",
  1534. "CONV_TRANSPOSE_1D",
  1535. "IM2COL",
  1536. "CONV_TRANSPOSE_2D",
  1537. "POOL_1D",
  1538. "POOL_2D",
  1539. "UPSCALE",
  1540. "PAD",
  1541. "ARGSORT",
  1542. "LEAKY_RELU",
  1543. "FLASH_ATTN",
  1544. "FLASH_FF",
  1545. "FLASH_ATTN_BACK",
  1546. "WIN_PART",
  1547. "WIN_UNPART",
  1548. "GET_REL_POS",
  1549. "ADD_REL_POS",
  1550. "UNARY",
  1551. "MAP_UNARY",
  1552. "MAP_BINARY",
  1553. "MAP_CUSTOM1_F32",
  1554. "MAP_CUSTOM2_F32",
  1555. "MAP_CUSTOM3_F32",
  1556. "MAP_CUSTOM1",
  1557. "MAP_CUSTOM2",
  1558. "MAP_CUSTOM3",
  1559. "CROSS_ENTROPY_LOSS",
  1560. "CROSS_ENTROPY_LOSS_BACK",
  1561. };
  1562. static_assert(GGML_OP_COUNT == 72, "GGML_OP_COUNT != 72");
  1563. static const char * GGML_OP_SYMBOL[GGML_OP_COUNT] = {
  1564. "none",
  1565. "x",
  1566. "x+y",
  1567. "x+y",
  1568. "view(x,nb,offset)+=y->x",
  1569. "x-y",
  1570. "x*y",
  1571. "x/y",
  1572. "x^2",
  1573. "√x",
  1574. "log(x)",
  1575. "Σx",
  1576. "Σx_k",
  1577. "Σx/n",
  1578. "argmax(x)",
  1579. "repeat(x)",
  1580. "repeat_back(x)",
  1581. "concat(x, y)",
  1582. "silu_back(x)",
  1583. "norm(x)",
  1584. "rms_norm(x)",
  1585. "rms_norm_back(x)",
  1586. "group_norm(x)",
  1587. "X*Y",
  1588. "X[i]*Y",
  1589. "X*Y",
  1590. "x*v",
  1591. "y-\\>view(x)",
  1592. "x-\\>y",
  1593. "cont(x)",
  1594. "reshape(x)",
  1595. "view(x)",
  1596. "permute(x)",
  1597. "transpose(x)",
  1598. "get_rows(x)",
  1599. "get_rows_back(x)",
  1600. "diag(x)",
  1601. "diag_mask_inf(x)",
  1602. "diag_mask_zero(x)",
  1603. "soft_max(x)",
  1604. "soft_max_back(x)",
  1605. "rope(x)",
  1606. "rope_back(x)",
  1607. "alibi(x)",
  1608. "clamp(x)",
  1609. "conv_transpose_1d(x)",
  1610. "im2col(x)",
  1611. "conv_transpose_2d(x)",
  1612. "pool_1d(x)",
  1613. "pool_2d(x)",
  1614. "upscale(x)",
  1615. "pad(x)",
  1616. "argsort(x)",
  1617. "leaky_relu(x)",
  1618. "flash_attn(x)",
  1619. "flash_ff(x)",
  1620. "flash_attn_back(x)",
  1621. "win_part(x)",
  1622. "win_unpart(x)",
  1623. "get_rel_pos(x)",
  1624. "add_rel_pos(x)",
  1625. "unary(x)",
  1626. "f(x)",
  1627. "f(x,y)",
  1628. "custom_f32(x)",
  1629. "custom_f32(x,y)",
  1630. "custom_f32(x,y,z)",
  1631. "custom(x)",
  1632. "custom(x,y)",
  1633. "custom(x,y,z)",
  1634. "cross_entropy_loss(x,y)",
  1635. "cross_entropy_loss_back(x,y)",
  1636. };
  1637. static_assert(GGML_OP_COUNT == 72, "GGML_OP_COUNT != 72");
  1638. static_assert(GGML_OP_POOL_COUNT == 2, "GGML_OP_POOL_COUNT != 2");
  1639. static const char * GGML_UNARY_OP_NAME[GGML_UNARY_OP_COUNT] = {
  1640. "ABS",
  1641. "SGN",
  1642. "NEG",
  1643. "STEP",
  1644. "TANH",
  1645. "ELU",
  1646. "RELU",
  1647. "GELU",
  1648. "GELU_QUICK",
  1649. "SILU",
  1650. "HARDSWISH",
  1651. "HARDSIGMOID",
  1652. };
  1653. static_assert(GGML_UNARY_OP_COUNT == 12, "GGML_UNARY_OP_COUNT != 12");
  1654. static_assert(sizeof(struct ggml_object)%GGML_MEM_ALIGN == 0, "ggml_object size must be a multiple of GGML_MEM_ALIGN");
  1655. static_assert(sizeof(struct ggml_tensor)%GGML_MEM_ALIGN == 0, "ggml_tensor size must be a multiple of GGML_MEM_ALIGN");
  1656. // WARN:
  1657. // Mis-configuration can lead to problem that's hard to reason about:
  1658. // * At best it crash or talks nosense.
  1659. // * At worst it talks slightly difference but hard to perceive.
  1660. //
  1661. // An op has to enable INIT or FINALIZE when any of it's branch needs that pass.
  1662. // Take care about compile options (e.g., GGML_USE_xxx).
  1663. static bool GGML_OP_HAS_INIT [GGML_OP_COUNT] = { 0 };
  1664. static bool GGML_OP_HAS_FINALIZE[GGML_OP_COUNT] = { 0 };
  1665. static void ggml_setup_op_has_task_pass(void) {
  1666. { // INIT
  1667. bool * p = GGML_OP_HAS_INIT;
  1668. p[GGML_OP_ACC ] = true;
  1669. p[GGML_OP_MUL_MAT ] = true;
  1670. p[GGML_OP_MUL_MAT_ID ] = true;
  1671. p[GGML_OP_OUT_PROD ] = true;
  1672. p[GGML_OP_SET ] = true;
  1673. p[GGML_OP_GET_ROWS_BACK ] = true;
  1674. p[GGML_OP_DIAG_MASK_INF ] = true;
  1675. p[GGML_OP_DIAG_MASK_ZERO ] = true;
  1676. p[GGML_OP_CONV_TRANSPOSE_1D ] = true;
  1677. p[GGML_OP_CONV_TRANSPOSE_2D ] = true;
  1678. p[GGML_OP_FLASH_ATTN_BACK ] = true;
  1679. p[GGML_OP_CROSS_ENTROPY_LOSS ] = true;
  1680. p[GGML_OP_ADD_REL_POS ] = true;
  1681. }
  1682. { // FINALIZE
  1683. bool * p = GGML_OP_HAS_FINALIZE;
  1684. p[GGML_OP_CROSS_ENTROPY_LOSS ] = true;
  1685. }
  1686. }
  1687. //
  1688. // ggml context
  1689. //
  1690. struct ggml_context {
  1691. size_t mem_size;
  1692. void * mem_buffer;
  1693. bool mem_buffer_owned;
  1694. bool no_alloc;
  1695. bool no_alloc_save; // this is used to save the no_alloc state when using scratch buffers
  1696. int n_objects;
  1697. struct ggml_object * objects_begin;
  1698. struct ggml_object * objects_end;
  1699. struct ggml_scratch scratch;
  1700. struct ggml_scratch scratch_save;
  1701. };
  1702. struct ggml_context_container {
  1703. bool used;
  1704. struct ggml_context context;
  1705. };
  1706. //
  1707. // NUMA support
  1708. //
  1709. #define GGML_NUMA_MAX_NODES 8
  1710. #define GGML_NUMA_MAX_CPUS 512
  1711. struct ggml_numa_node {
  1712. uint32_t cpus[GGML_NUMA_MAX_CPUS]; // hardware threads on this node
  1713. uint32_t n_cpus;
  1714. };
  1715. struct ggml_numa_nodes {
  1716. enum ggml_numa_strategy numa_strategy;
  1717. struct ggml_numa_node nodes[GGML_NUMA_MAX_NODES];
  1718. uint32_t n_nodes;
  1719. uint32_t total_cpus; // hardware threads on system
  1720. uint32_t current_node; // node on which main process is execting
  1721. #if defined(__gnu_linux__)
  1722. cpu_set_t cpuset; // cpuset from numactl
  1723. #else
  1724. uint32_t cpuset; // no NUMA support outside of Linux at this time. Use a portable datatype
  1725. #endif
  1726. };
  1727. //
  1728. // ggml state
  1729. //
  1730. struct ggml_state {
  1731. struct ggml_context_container contexts[GGML_MAX_CONTEXTS];
  1732. struct ggml_numa_nodes numa;
  1733. };
  1734. // global state
  1735. static struct ggml_state g_state;
  1736. static atomic_int g_state_barrier = 0;
  1737. // barrier via spin lock
  1738. inline static void ggml_critical_section_start(void) {
  1739. int processing = atomic_fetch_add(&g_state_barrier, 1);
  1740. while (processing > 0) {
  1741. // wait for other threads to finish
  1742. atomic_fetch_sub(&g_state_barrier, 1);
  1743. sched_yield(); // TODO: reconsider this
  1744. processing = atomic_fetch_add(&g_state_barrier, 1);
  1745. }
  1746. }
  1747. // TODO: make this somehow automatically executed
  1748. // some sort of "sentry" mechanism
  1749. inline static void ggml_critical_section_end(void) {
  1750. atomic_fetch_sub(&g_state_barrier, 1);
  1751. }
  1752. #if defined(__gnu_linux__)
  1753. static cpu_set_t ggml_get_numa_affinity(void) {
  1754. cpu_set_t cpuset;
  1755. pthread_t thread;
  1756. thread = pthread_self();
  1757. CPU_ZERO(&cpuset);
  1758. pthread_getaffinity_np(thread, sizeof(cpu_set_t), &cpuset);
  1759. return cpuset;
  1760. }
  1761. #else
  1762. static uint32_t ggml_get_numa_affinity(void) {
  1763. return 0; // no NUMA support
  1764. }
  1765. #endif
  1766. void ggml_numa_init(enum ggml_numa_strategy numa_flag) {
  1767. if (g_state.numa.n_nodes > 0) {
  1768. fprintf(stderr, "ggml_numa_init: NUMA already initialized\n");
  1769. return;
  1770. }
  1771. #if defined(__gnu_linux__)
  1772. struct stat st;
  1773. char path[256];
  1774. int rv;
  1775. // set numa scheme
  1776. g_state.numa.numa_strategy = numa_flag;
  1777. GGML_PRINT_DEBUG("numa strategy %u\n",g_state.numa.numa_strategy);
  1778. g_state.numa.cpuset = ggml_get_numa_affinity();
  1779. // enumerate nodes
  1780. while (g_state.numa.n_nodes < GGML_NUMA_MAX_NODES) {
  1781. rv = snprintf(path, sizeof(path), "/sys/devices/system/node/node%u", g_state.numa.n_nodes);
  1782. GGML_ASSERT(rv > 0 && (unsigned)rv < sizeof(path));
  1783. if (stat(path, &st) != 0) { break; }
  1784. ++g_state.numa.n_nodes;
  1785. }
  1786. // enumerate CPUs
  1787. while (g_state.numa.total_cpus < GGML_NUMA_MAX_CPUS) {
  1788. rv = snprintf(path, sizeof(path), "/sys/devices/system/cpu/cpu%u", g_state.numa.total_cpus);
  1789. GGML_ASSERT(rv > 0 && (unsigned)rv < sizeof(path));
  1790. if (stat(path, &st) != 0) { break; }
  1791. ++g_state.numa.total_cpus;
  1792. }
  1793. GGML_PRINT_DEBUG("found %u numa nodes, %u CPUs\n", g_state.numa.n_nodes, g_state.numa.total_cpus);
  1794. // figure out which node we're on
  1795. uint current_cpu;
  1796. int getcpu_ret = 0;
  1797. #if __GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ > 28)
  1798. getcpu_ret = getcpu(&current_cpu, &g_state.numa.current_node);
  1799. #else
  1800. // old glibc doesn't have a wrapper for this call. Fall back on direct syscall
  1801. getcpu_ret = syscall(SYS_getcpu,&current_cpu,&g_state.numa.current_node);
  1802. #endif
  1803. if (g_state.numa.n_nodes < 1 || g_state.numa.total_cpus < 1 || getcpu_ret != 0) {
  1804. g_state.numa.n_nodes = 0;
  1805. return;
  1806. }
  1807. GGML_PRINT_DEBUG("found our process on numa node %u, CPU %u\n", g_state.numa.current_node, current_cpu);
  1808. for (uint32_t n = 0; n < g_state.numa.n_nodes; ++n) {
  1809. struct ggml_numa_node * node = &g_state.numa.nodes[n];
  1810. GGML_PRINT_DEBUG("CPUs on node %u:", n);
  1811. node->n_cpus = 0;
  1812. for (uint32_t c = 0; c < g_state.numa.total_cpus; ++c) {
  1813. rv = snprintf(path, sizeof(path), "/sys/devices/system/node/node%u/cpu%u", n, c);
  1814. GGML_ASSERT(rv > 0 && (unsigned)rv < sizeof(path));
  1815. if (stat(path, &st) == 0) {
  1816. node->cpus[node->n_cpus++] = c;
  1817. GGML_PRINT_DEBUG(" %u", c);
  1818. }
  1819. }
  1820. GGML_PRINT_DEBUG("\n");
  1821. }
  1822. if (ggml_is_numa()) {
  1823. FILE *fptr = fopen("/proc/sys/kernel/numa_balancing", "r");
  1824. if (fptr != NULL) {
  1825. char buf[42];
  1826. if (fgets(buf, sizeof(buf), fptr) && strncmp(buf, "0\n", sizeof(buf)) != 0) {
  1827. GGML_PRINT("WARNING: /proc/sys/kernel/numa_balancing is enabled, this has been observed to impair performance\n");
  1828. }
  1829. fclose(fptr);
  1830. }
  1831. }
  1832. #else
  1833. GGML_UNUSED(numa_flag);
  1834. // TODO
  1835. #endif
  1836. }
  1837. bool ggml_is_numa(void) {
  1838. return g_state.numa.n_nodes > 1;
  1839. }
  1840. ////////////////////////////////////////////////////////////////////////////////
  1841. void ggml_print_object(const struct ggml_object * obj) {
  1842. GGML_PRINT(" - ggml_object: type = %d, offset = %zu, size = %zu, next = %p\n",
  1843. obj->type, obj->offs, obj->size, (const void *) obj->next);
  1844. }
  1845. void ggml_print_objects(const struct ggml_context * ctx) {
  1846. struct ggml_object * obj = ctx->objects_begin;
  1847. GGML_PRINT("%s: objects in context %p:\n", __func__, (const void *) ctx);
  1848. while (obj != NULL) {
  1849. ggml_print_object(obj);
  1850. obj = obj->next;
  1851. }
  1852. GGML_PRINT("%s: --- end ---\n", __func__);
  1853. }
  1854. GGML_CALL int64_t ggml_nelements(const struct ggml_tensor * tensor) {
  1855. static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function");
  1856. return tensor->ne[0]*tensor->ne[1]*tensor->ne[2]*tensor->ne[3];
  1857. }
  1858. GGML_CALL int64_t ggml_nrows(const struct ggml_tensor * tensor) {
  1859. static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function");
  1860. return tensor->ne[1]*tensor->ne[2]*tensor->ne[3];
  1861. }
  1862. GGML_CALL size_t ggml_nbytes(const struct ggml_tensor * tensor) {
  1863. size_t nbytes;
  1864. size_t blck_size = ggml_blck_size(tensor->type);
  1865. if (blck_size == 1) {
  1866. nbytes = ggml_type_size(tensor->type);
  1867. for (int i = 0; i < GGML_MAX_DIMS; ++i) {
  1868. nbytes += (tensor->ne[i] - 1)*tensor->nb[i];
  1869. }
  1870. }
  1871. else {
  1872. nbytes = tensor->ne[0]*tensor->nb[0]/blck_size;
  1873. for (int i = 1; i < GGML_MAX_DIMS; ++i) {
  1874. nbytes += (tensor->ne[i] - 1)*tensor->nb[i];
  1875. }
  1876. }
  1877. return nbytes;
  1878. }
  1879. size_t ggml_nbytes_pad(const struct ggml_tensor * tensor) {
  1880. return GGML_PAD(ggml_nbytes(tensor), GGML_MEM_ALIGN);
  1881. }
  1882. GGML_CALL int ggml_blck_size(enum ggml_type type) {
  1883. return type_traits[type].blck_size;
  1884. }
  1885. GGML_CALL size_t ggml_type_size(enum ggml_type type) {
  1886. return type_traits[type].type_size;
  1887. }
  1888. GGML_CALL size_t ggml_row_size(enum ggml_type type, int64_t ne) {
  1889. assert(ne % ggml_blck_size(type) == 0);
  1890. return ggml_type_size(type)*ne/ggml_blck_size(type);
  1891. }
  1892. double ggml_type_sizef(enum ggml_type type) {
  1893. return ((double)(type_traits[type].type_size))/type_traits[type].blck_size;
  1894. }
  1895. GGML_CALL const char * ggml_type_name(enum ggml_type type) {
  1896. return type_traits[type].type_name;
  1897. }
  1898. GGML_CALL bool ggml_is_quantized(enum ggml_type type) {
  1899. return type_traits[type].is_quantized;
  1900. }
  1901. GGML_CALL const char * ggml_op_name(enum ggml_op op) {
  1902. return GGML_OP_NAME[op];
  1903. }
  1904. const char * ggml_op_symbol(enum ggml_op op) {
  1905. return GGML_OP_SYMBOL[op];
  1906. }
  1907. const char * ggml_unary_op_name(enum ggml_unary_op op) {
  1908. return GGML_UNARY_OP_NAME[op];
  1909. }
  1910. GGML_CALL const char * ggml_op_desc(const struct ggml_tensor * t) {
  1911. if (t->op == GGML_OP_UNARY) {
  1912. enum ggml_unary_op uop = ggml_get_unary_op(t);
  1913. return ggml_unary_op_name(uop);
  1914. }
  1915. else {
  1916. return ggml_op_name(t->op);
  1917. }
  1918. }
  1919. GGML_CALL size_t ggml_element_size(const struct ggml_tensor * tensor) {
  1920. return ggml_type_size(tensor->type);
  1921. }
  1922. bool ggml_is_scalar(const struct ggml_tensor * tensor) {
  1923. static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function");
  1924. return tensor->ne[0] == 1 && tensor->ne[1] == 1 && tensor->ne[2] == 1 && tensor->ne[3] == 1;
  1925. }
  1926. bool ggml_is_vector(const struct ggml_tensor * tensor) {
  1927. static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function");
  1928. return tensor->ne[1] == 1 && tensor->ne[2] == 1 && tensor->ne[3] == 1;
  1929. }
  1930. bool ggml_is_matrix(const struct ggml_tensor * tensor) {
  1931. static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function");
  1932. return tensor->ne[2] == 1 && tensor->ne[3] == 1;
  1933. }
  1934. bool ggml_is_3d(const struct ggml_tensor * tensor) {
  1935. return tensor->ne[3] == 1;
  1936. }
  1937. int ggml_n_dims(const struct ggml_tensor * tensor) {
  1938. for (int i = GGML_MAX_DIMS - 1; i >= 1; --i) {
  1939. if (tensor->ne[i] > 1) {
  1940. return i + 1;
  1941. }
  1942. }
  1943. return 1;
  1944. }
  1945. static inline bool ggml_can_mul_mat(const struct ggml_tensor * t0, const struct ggml_tensor * t1) {
  1946. static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function");
  1947. return (t0->ne[0] == t1->ne[0]) &&
  1948. (t1->ne[2]%t0->ne[2] == 0) && // verify t0 is broadcastable
  1949. (t1->ne[3]%t0->ne[3] == 0);
  1950. }
  1951. static inline bool ggml_can_out_prod(const struct ggml_tensor * t0, const struct ggml_tensor * t1) {
  1952. static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function");
  1953. return (t0->ne[1] == t1->ne[1]) &&
  1954. (t1->ne[2]%t0->ne[2] == 0) && // verify t0 is broadcastable
  1955. (t1->ne[3]%t0->ne[3] == 0);
  1956. }
  1957. enum ggml_type ggml_ftype_to_ggml_type(enum ggml_ftype ftype) {
  1958. enum ggml_type wtype = GGML_TYPE_COUNT;
  1959. switch (ftype) {
  1960. case GGML_FTYPE_ALL_F32: wtype = GGML_TYPE_F32; break;
  1961. case GGML_FTYPE_MOSTLY_F16: wtype = GGML_TYPE_F16; break;
  1962. case GGML_FTYPE_MOSTLY_Q4_0: wtype = GGML_TYPE_Q4_0; break;
  1963. case GGML_FTYPE_MOSTLY_Q4_1: wtype = GGML_TYPE_Q4_1; break;
  1964. case GGML_FTYPE_MOSTLY_Q5_0: wtype = GGML_TYPE_Q5_0; break;
  1965. case GGML_FTYPE_MOSTLY_Q5_1: wtype = GGML_TYPE_Q5_1; break;
  1966. case GGML_FTYPE_MOSTLY_Q8_0: wtype = GGML_TYPE_Q8_0; break;
  1967. case GGML_FTYPE_MOSTLY_Q2_K: wtype = GGML_TYPE_Q2_K; break;
  1968. case GGML_FTYPE_MOSTLY_Q3_K: wtype = GGML_TYPE_Q3_K; break;
  1969. case GGML_FTYPE_MOSTLY_Q4_K: wtype = GGML_TYPE_Q4_K; break;
  1970. case GGML_FTYPE_MOSTLY_Q5_K: wtype = GGML_TYPE_Q5_K; break;
  1971. case GGML_FTYPE_MOSTLY_Q6_K: wtype = GGML_TYPE_Q6_K; break;
  1972. case GGML_FTYPE_MOSTLY_IQ2_XXS: wtype = GGML_TYPE_IQ2_XXS; break;
  1973. case GGML_FTYPE_MOSTLY_IQ2_XS: wtype = GGML_TYPE_IQ2_XS; break;
  1974. case GGML_FTYPE_MOSTLY_IQ3_XXS: wtype = GGML_TYPE_IQ3_XXS; break;
  1975. case GGML_FTYPE_MOSTLY_IQ1_S: wtype = GGML_TYPE_IQ1_S; break;
  1976. case GGML_FTYPE_MOSTLY_IQ4_NL: wtype = GGML_TYPE_IQ4_NL; break;
  1977. case GGML_FTYPE_UNKNOWN: wtype = GGML_TYPE_COUNT; break;
  1978. case GGML_FTYPE_MOSTLY_Q4_1_SOME_F16: wtype = GGML_TYPE_COUNT; break;
  1979. }
  1980. GGML_ASSERT(wtype != GGML_TYPE_COUNT);
  1981. return wtype;
  1982. }
  1983. size_t ggml_tensor_overhead(void) {
  1984. return GGML_OBJECT_SIZE + GGML_TENSOR_SIZE;
  1985. }
  1986. GGML_CALL bool ggml_is_transposed(const struct ggml_tensor * tensor) {
  1987. return tensor->nb[0] > tensor->nb[1];
  1988. }
  1989. GGML_CALL bool ggml_is_contiguous(const struct ggml_tensor * tensor) {
  1990. static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function");
  1991. return
  1992. tensor->nb[0] == ggml_type_size(tensor->type) &&
  1993. tensor->nb[1] == (tensor->nb[0]*tensor->ne[0])/ggml_blck_size(tensor->type) &&
  1994. tensor->nb[2] == tensor->nb[1]*tensor->ne[1] &&
  1995. tensor->nb[3] == tensor->nb[2]*tensor->ne[2];
  1996. }
  1997. static inline bool ggml_is_contiguous_except_dim_1(const struct ggml_tensor * tensor) {
  1998. static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function");
  1999. return
  2000. tensor->nb[0] == ggml_type_size(tensor->type) &&
  2001. tensor->nb[2] == tensor->nb[1]*tensor->ne[1] &&
  2002. tensor->nb[3] == tensor->nb[2]*tensor->ne[2];
  2003. }
  2004. GGML_CALL bool ggml_is_permuted(const struct ggml_tensor * tensor) {
  2005. static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function");
  2006. return tensor->nb[0] > tensor->nb[1] || tensor->nb[1] > tensor->nb[2] || tensor->nb[2] > tensor->nb[3];
  2007. }
  2008. static inline bool ggml_is_padded_1d(const struct ggml_tensor * tensor) {
  2009. static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function");
  2010. return
  2011. tensor->nb[0] == ggml_type_size(tensor->type) &&
  2012. tensor->nb[2] == tensor->nb[1]*tensor->ne[1] &&
  2013. tensor->nb[3] == tensor->nb[2]*tensor->ne[2];
  2014. }
  2015. bool ggml_are_same_shape(const struct ggml_tensor * t0, const struct ggml_tensor * t1) {
  2016. static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function");
  2017. return
  2018. (t0->ne[0] == t1->ne[0] ) &&
  2019. (t0->ne[1] == t1->ne[1] ) &&
  2020. (t0->ne[2] == t1->ne[2] ) &&
  2021. (t0->ne[3] == t1->ne[3] );
  2022. }
  2023. // check if t1 can be represented as a repeatition of t0
  2024. static inline bool ggml_can_repeat(const struct ggml_tensor * t0, const struct ggml_tensor * t1) {
  2025. static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function");
  2026. return
  2027. (t1->ne[0]%t0->ne[0] == 0) &&
  2028. (t1->ne[1]%t0->ne[1] == 0) &&
  2029. (t1->ne[2]%t0->ne[2] == 0) &&
  2030. (t1->ne[3]%t0->ne[3] == 0);
  2031. }
  2032. static inline bool ggml_can_repeat_rows(const struct ggml_tensor * t0, const struct ggml_tensor * t1) {
  2033. static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function");
  2034. return (t0->ne[0] == t1->ne[0]) && ggml_can_repeat(t0, t1);
  2035. }
  2036. static inline int ggml_up32(int n) {
  2037. return (n + 31) & ~31;
  2038. }
  2039. //static inline int ggml_up64(int n) {
  2040. // return (n + 63) & ~63;
  2041. //}
  2042. static inline int ggml_up(int n, int m) {
  2043. // assert m is a power of 2
  2044. GGML_ASSERT((m & (m - 1)) == 0);
  2045. return (n + m - 1) & ~(m - 1);
  2046. }
  2047. // assert that pointer is aligned to GGML_MEM_ALIGN
  2048. #define ggml_assert_aligned(ptr) \
  2049. GGML_ASSERT(((uintptr_t) (ptr))%GGML_MEM_ALIGN == 0)
  2050. ////////////////////////////////////////////////////////////////////////////////
  2051. struct ggml_context * ggml_init(struct ggml_init_params params) {
  2052. // make this function thread safe
  2053. ggml_critical_section_start();
  2054. static bool is_first_call = true;
  2055. if (is_first_call) {
  2056. // initialize time system (required on Windows)
  2057. ggml_time_init();
  2058. // initialize GELU, Quick GELU, SILU and EXP F32 tables
  2059. {
  2060. const uint64_t t_start = ggml_time_us(); UNUSED(t_start);
  2061. ggml_fp16_t ii;
  2062. for (int i = 0; i < (1 << 16); ++i) {
  2063. uint16_t ui = i;
  2064. memcpy(&ii, &ui, sizeof(ii));
  2065. const float f = ggml_table_f32_f16[i] = GGML_COMPUTE_FP16_TO_FP32(ii);
  2066. ggml_table_gelu_f16[i] = GGML_FP32_TO_FP16(ggml_gelu_f32(f));
  2067. ggml_table_gelu_quick_f16[i] = GGML_FP32_TO_FP16(ggml_gelu_quick_f32(f));
  2068. ggml_table_silu_f16[i] = GGML_FP32_TO_FP16(ggml_silu_f32(f));
  2069. ggml_table_exp_f16[i] = GGML_FP32_TO_FP16(expf(f));
  2070. }
  2071. const uint64_t t_end = ggml_time_us(); UNUSED(t_end);
  2072. GGML_PRINT_DEBUG("%s: GELU, Quick GELU, SILU and EXP tables initialized in %f ms\n", __func__, (t_end - t_start)/1000.0f);
  2073. }
  2074. // initialize g_state
  2075. {
  2076. const uint64_t t_start = ggml_time_us(); UNUSED(t_start);
  2077. g_state = (struct ggml_state) {
  2078. /*.contexts =*/ { { 0 } },
  2079. /*.numa =*/ {
  2080. .n_nodes = 0,
  2081. .total_cpus = 0,
  2082. },
  2083. };
  2084. for (int i = 0; i < GGML_MAX_CONTEXTS; ++i) {
  2085. g_state.contexts[i].used = false;
  2086. }
  2087. const uint64_t t_end = ggml_time_us(); UNUSED(t_end);
  2088. GGML_PRINT_DEBUG("%s: g_state initialized in %f ms\n", __func__, (t_end - t_start)/1000.0f);
  2089. }
  2090. #if defined(GGML_USE_CUBLAS)
  2091. ggml_init_cublas();
  2092. #elif defined(GGML_USE_CLBLAST)
  2093. ggml_cl_init();
  2094. #elif defined(GGML_USE_VULKAN)
  2095. ggml_vk_init_cpu_assist();
  2096. #elif defined(GGML_USE_SYCL)
  2097. ggml_init_sycl();
  2098. #endif
  2099. ggml_setup_op_has_task_pass();
  2100. is_first_call = false;
  2101. }
  2102. // find non-used context in g_state
  2103. struct ggml_context * ctx = NULL;
  2104. for (int i = 0; i < GGML_MAX_CONTEXTS; i++) {
  2105. if (!g_state.contexts[i].used) {
  2106. g_state.contexts[i].used = true;
  2107. ctx = &g_state.contexts[i].context;
  2108. GGML_PRINT_DEBUG("%s: found unused context %d\n", __func__, i);
  2109. break;
  2110. }
  2111. }
  2112. if (ctx == NULL) {
  2113. GGML_PRINT_DEBUG("%s: no unused context found\n", __func__);
  2114. ggml_critical_section_end();
  2115. return NULL;
  2116. }
  2117. // allow to call ggml_init with 0 size
  2118. if (params.mem_size == 0) {
  2119. params.mem_size = GGML_MEM_ALIGN;
  2120. }
  2121. const size_t mem_size = params.mem_buffer ? params.mem_size : GGML_PAD(params.mem_size, GGML_MEM_ALIGN);
  2122. *ctx = (struct ggml_context) {
  2123. /*.mem_size =*/ mem_size,
  2124. /*.mem_buffer =*/ params.mem_buffer ? params.mem_buffer : GGML_ALIGNED_MALLOC(mem_size),
  2125. /*.mem_buffer_owned =*/ params.mem_buffer ? false : true,
  2126. /*.no_alloc =*/ params.no_alloc,
  2127. /*.no_alloc_save =*/ params.no_alloc,
  2128. /*.n_objects =*/ 0,
  2129. /*.objects_begin =*/ NULL,
  2130. /*.objects_end =*/ NULL,
  2131. /*.scratch =*/ { 0, 0, NULL, },
  2132. /*.scratch_save =*/ { 0, 0, NULL, },
  2133. };
  2134. GGML_ASSERT(ctx->mem_buffer != NULL);
  2135. ggml_assert_aligned(ctx->mem_buffer);
  2136. GGML_PRINT_DEBUG("%s: context initialized\n", __func__);
  2137. ggml_critical_section_end();
  2138. return ctx;
  2139. }
  2140. void ggml_free(struct ggml_context * ctx) {
  2141. if (ctx == NULL) {
  2142. return;
  2143. }
  2144. // make this function thread safe
  2145. ggml_critical_section_start();
  2146. bool found = false;
  2147. for (int i = 0; i < GGML_MAX_CONTEXTS; i++) {
  2148. if (&g_state.contexts[i].context == ctx) {
  2149. g_state.contexts[i].used = false;
  2150. GGML_PRINT_DEBUG("%s: context %d has been freed. memory used = %zu\n",
  2151. __func__, i, ggml_used_mem(ctx));
  2152. if (ctx->mem_buffer_owned) {
  2153. GGML_ALIGNED_FREE(ctx->mem_buffer);
  2154. }
  2155. found = true;
  2156. break;
  2157. }
  2158. }
  2159. if (!found) {
  2160. GGML_PRINT_DEBUG("%s: context not found\n", __func__);
  2161. }
  2162. ggml_critical_section_end();
  2163. }
  2164. size_t ggml_used_mem(const struct ggml_context * ctx) {
  2165. return ctx->objects_end == NULL ? 0 : ctx->objects_end->offs + ctx->objects_end->size;
  2166. }
  2167. size_t ggml_set_scratch(struct ggml_context * ctx, struct ggml_scratch scratch) {
  2168. const size_t result = ctx->scratch.data ? ctx->scratch.offs : 0;
  2169. ctx->scratch = scratch;
  2170. return result;
  2171. }
  2172. bool ggml_get_no_alloc(struct ggml_context * ctx) {
  2173. return ctx->no_alloc;
  2174. }
  2175. void ggml_set_no_alloc(struct ggml_context * ctx, bool no_alloc) {
  2176. ctx->no_alloc = no_alloc;
  2177. }
  2178. void * ggml_get_mem_buffer(const struct ggml_context * ctx) {
  2179. return ctx->mem_buffer;
  2180. }
  2181. size_t ggml_get_mem_size(const struct ggml_context * ctx) {
  2182. return ctx->mem_size;
  2183. }
  2184. size_t ggml_get_max_tensor_size(const struct ggml_context * ctx) {
  2185. size_t max_size = 0;
  2186. for (struct ggml_tensor * tensor = ggml_get_first_tensor(ctx); tensor != NULL; tensor = ggml_get_next_tensor(ctx, tensor)) {
  2187. size_t bytes = ggml_nbytes(tensor);
  2188. max_size = MAX(max_size, bytes);
  2189. }
  2190. return max_size;
  2191. }
  2192. // IMPORTANT:
  2193. // when creating "opt" tensors, always save and load the scratch buffer
  2194. // this is an error prone process, but it is necessary to support inplace
  2195. // operators when using scratch buffers
  2196. // TODO: implement a better way
  2197. static void ggml_scratch_save(struct ggml_context * ctx) {
  2198. // this is needed to allow opt tensors to store their data
  2199. // TODO: again, need to find a better way
  2200. ctx->no_alloc_save = ctx->no_alloc;
  2201. ctx->no_alloc = false;
  2202. ctx->scratch_save = ctx->scratch;
  2203. ctx->scratch.data = NULL;
  2204. }
  2205. static void ggml_scratch_load(struct ggml_context * ctx) {
  2206. ctx->no_alloc = ctx->no_alloc_save;
  2207. ctx->scratch = ctx->scratch_save;
  2208. }
  2209. ////////////////////////////////////////////////////////////////////////////////
  2210. static struct ggml_object * ggml_new_object(struct ggml_context * ctx, enum ggml_object_type type, size_t size) {
  2211. // always insert objects at the end of the context's memory pool
  2212. struct ggml_object * obj_cur = ctx->objects_end;
  2213. const size_t cur_offs = obj_cur == NULL ? 0 : obj_cur->offs;
  2214. const size_t cur_size = obj_cur == NULL ? 0 : obj_cur->size;
  2215. const size_t cur_end = cur_offs + cur_size;
  2216. // align to GGML_MEM_ALIGN
  2217. size_t size_needed = GGML_PAD(size, GGML_MEM_ALIGN);
  2218. char * const mem_buffer = ctx->mem_buffer;
  2219. struct ggml_object * const obj_new = (struct ggml_object *)(mem_buffer + cur_end);
  2220. if (cur_end + size_needed + GGML_OBJECT_SIZE > ctx->mem_size) {
  2221. GGML_PRINT("%s: not enough space in the context's memory pool (needed %zu, available %zu)\n",
  2222. __func__, cur_end + size_needed, ctx->mem_size);
  2223. assert(false);
  2224. return NULL;
  2225. }
  2226. *obj_new = (struct ggml_object) {
  2227. .offs = cur_end + GGML_OBJECT_SIZE,
  2228. .size = size_needed,
  2229. .next = NULL,
  2230. .type = type,
  2231. };
  2232. ggml_assert_aligned(mem_buffer + obj_new->offs);
  2233. if (obj_cur != NULL) {
  2234. obj_cur->next = obj_new;
  2235. } else {
  2236. // this is the first object in this context
  2237. ctx->objects_begin = obj_new;
  2238. }
  2239. ctx->objects_end = obj_new;
  2240. //printf("%s: inserted new object at %zu, size = %zu\n", __func__, cur_end, obj_new->size);
  2241. return obj_new;
  2242. }
  2243. static struct ggml_tensor * ggml_new_tensor_impl(
  2244. struct ggml_context * ctx,
  2245. enum ggml_type type,
  2246. int n_dims,
  2247. const int64_t * ne,
  2248. struct ggml_tensor * view_src,
  2249. size_t view_offs) {
  2250. assert(n_dims >= 1 && n_dims <= GGML_MAX_DIMS);
  2251. // find the base tensor and absolute offset
  2252. if (view_src != NULL && view_src->view_src != NULL) {
  2253. view_offs += view_src->view_offs;
  2254. view_src = view_src->view_src;
  2255. }
  2256. size_t data_size = ggml_row_size(type, ne[0]);
  2257. for (int i = 1; i < n_dims; i++) {
  2258. data_size *= ne[i];
  2259. }
  2260. GGML_ASSERT(view_src == NULL || data_size + view_offs <= ggml_nbytes(view_src));
  2261. void * data = view_src != NULL ? view_src->data : NULL;
  2262. if (data != NULL) {
  2263. data = (char *) data + view_offs;
  2264. }
  2265. size_t obj_alloc_size = 0;
  2266. if (view_src == NULL && !ctx->no_alloc) {
  2267. if (ctx->scratch.data != NULL) {
  2268. // allocate tensor data in the scratch buffer
  2269. if (ctx->scratch.offs + data_size > ctx->scratch.size) {
  2270. GGML_PRINT("%s: not enough space in the scratch memory pool (needed %zu, available %zu)\n",
  2271. __func__, ctx->scratch.offs + data_size, ctx->scratch.size);
  2272. assert(false);
  2273. return NULL;
  2274. }
  2275. data = (char * const) ctx->scratch.data + ctx->scratch.offs;
  2276. ctx->scratch.offs += data_size;
  2277. } else {
  2278. // allocate tensor data in the context's memory pool
  2279. obj_alloc_size = data_size;
  2280. }
  2281. }
  2282. struct ggml_object * const obj_new = ggml_new_object(ctx, GGML_OBJECT_TENSOR, GGML_TENSOR_SIZE + obj_alloc_size);
  2283. // TODO: for recoverable errors, we would need to free the data allocated from the scratch buffer here
  2284. struct ggml_tensor * const result = (struct ggml_tensor *)((char *)ctx->mem_buffer + obj_new->offs);
  2285. *result = (struct ggml_tensor) {
  2286. /*.type =*/ type,
  2287. /*.backend =*/ GGML_BACKEND_CPU,
  2288. /*.buffer =*/ NULL,
  2289. /*.ne =*/ { 1, 1, 1, 1 },
  2290. /*.nb =*/ { 0, 0, 0, 0 },
  2291. /*.op =*/ GGML_OP_NONE,
  2292. /*.op_params =*/ { 0 },
  2293. /*.flags =*/ 0,
  2294. /*.grad =*/ NULL,
  2295. /*.src =*/ { NULL },
  2296. /*.perf_runs =*/ 0,
  2297. /*.perf_cycles =*/ 0,
  2298. /*.perf_time_us =*/ 0,
  2299. /*.view_src =*/ view_src,
  2300. /*.view_offs =*/ view_offs,
  2301. /*.data =*/ obj_alloc_size > 0 ? (void *)(result + 1) : data,
  2302. /*.name =*/ { 0 },
  2303. /*.extra =*/ NULL,
  2304. /*.padding =*/ { 0 },
  2305. };
  2306. // TODO: this should not be needed as long as we don't rely on aligned SIMD loads
  2307. //ggml_assert_aligned(result->data);
  2308. for (int i = 0; i < n_dims; i++) {
  2309. result->ne[i] = ne[i];
  2310. }
  2311. result->nb[0] = ggml_type_size(type);
  2312. result->nb[1] = result->nb[0]*(result->ne[0]/ggml_blck_size(type));
  2313. for (int i = 2; i < GGML_MAX_DIMS; i++) {
  2314. result->nb[i] = result->nb[i - 1]*result->ne[i - 1];
  2315. }
  2316. ctx->n_objects++;
  2317. return result;
  2318. }
  2319. struct ggml_tensor * ggml_new_tensor(
  2320. struct ggml_context * ctx,
  2321. enum ggml_type type,
  2322. int n_dims,
  2323. const int64_t * ne) {
  2324. return ggml_new_tensor_impl(ctx, type, n_dims, ne, NULL, 0);
  2325. }
  2326. struct ggml_tensor * ggml_new_tensor_1d(
  2327. struct ggml_context * ctx,
  2328. enum ggml_type type,
  2329. int64_t ne0) {
  2330. return ggml_new_tensor(ctx, type, 1, &ne0);
  2331. }
  2332. struct ggml_tensor * ggml_new_tensor_2d(
  2333. struct ggml_context * ctx,
  2334. enum ggml_type type,
  2335. int64_t ne0,
  2336. int64_t ne1) {
  2337. const int64_t ne[2] = { ne0, ne1 };
  2338. return ggml_new_tensor(ctx, type, 2, ne);
  2339. }
  2340. struct ggml_tensor * ggml_new_tensor_3d(
  2341. struct ggml_context * ctx,
  2342. enum ggml_type type,
  2343. int64_t ne0,
  2344. int64_t ne1,
  2345. int64_t ne2) {
  2346. const int64_t ne[3] = { ne0, ne1, ne2 };
  2347. return ggml_new_tensor(ctx, type, 3, ne);
  2348. }
  2349. struct ggml_tensor * ggml_new_tensor_4d(
  2350. struct ggml_context * ctx,
  2351. enum ggml_type type,
  2352. int64_t ne0,
  2353. int64_t ne1,
  2354. int64_t ne2,
  2355. int64_t ne3) {
  2356. const int64_t ne[4] = { ne0, ne1, ne2, ne3 };
  2357. return ggml_new_tensor(ctx, type, 4, ne);
  2358. }
  2359. struct ggml_tensor * ggml_new_i32(struct ggml_context * ctx, int32_t value) {
  2360. ggml_scratch_save(ctx);
  2361. struct ggml_tensor * result = ggml_new_tensor_1d(ctx, GGML_TYPE_I32, 1);
  2362. ggml_scratch_load(ctx);
  2363. ggml_set_i32(result, value);
  2364. return result;
  2365. }
  2366. struct ggml_tensor * ggml_new_f32(struct ggml_context * ctx, float value) {
  2367. ggml_scratch_save(ctx);
  2368. struct ggml_tensor * result = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, 1);
  2369. ggml_scratch_load(ctx);
  2370. ggml_set_f32(result, value);
  2371. return result;
  2372. }
  2373. struct ggml_tensor * ggml_dup_tensor(struct ggml_context * ctx, const struct ggml_tensor * src) {
  2374. return ggml_new_tensor(ctx, src->type, GGML_MAX_DIMS, src->ne);
  2375. }
  2376. static void ggml_set_op_params(struct ggml_tensor * tensor, const void * params, size_t params_size) {
  2377. GGML_ASSERT(tensor != NULL); // silence -Warray-bounds warnings
  2378. assert(params_size <= GGML_MAX_OP_PARAMS);
  2379. memcpy(tensor->op_params, params, params_size);
  2380. }
  2381. static int32_t ggml_get_op_params_i32(const struct ggml_tensor * tensor, uint32_t i) {
  2382. assert(i < GGML_MAX_OP_PARAMS / sizeof(int32_t));
  2383. return ((const int32_t *)(tensor->op_params))[i];
  2384. }
  2385. static void ggml_set_op_params_i32(struct ggml_tensor * tensor, uint32_t i, int32_t value) {
  2386. assert(i < GGML_MAX_OP_PARAMS / sizeof(int32_t));
  2387. ((int32_t *)(tensor->op_params))[i] = value;
  2388. }
  2389. struct ggml_tensor * ggml_set_zero(struct ggml_tensor * tensor) {
  2390. memset(tensor->data, 0, ggml_nbytes(tensor));
  2391. return tensor;
  2392. }
  2393. struct ggml_tensor * ggml_set_i32 (struct ggml_tensor * tensor, int32_t value) {
  2394. const int n = ggml_nrows(tensor);
  2395. const int nc = tensor->ne[0];
  2396. const size_t n1 = tensor->nb[1];
  2397. char * const data = tensor->data;
  2398. switch (tensor->type) {
  2399. case GGML_TYPE_I8:
  2400. {
  2401. assert(tensor->nb[0] == sizeof(int8_t));
  2402. for (int i = 0; i < n; i++) {
  2403. ggml_vec_set_i8(nc, (int8_t *)(data + i*n1), value);
  2404. }
  2405. } break;
  2406. case GGML_TYPE_I16:
  2407. {
  2408. assert(tensor->nb[0] == sizeof(int16_t));
  2409. for (int i = 0; i < n; i++) {
  2410. ggml_vec_set_i16(nc, (int16_t *)(data + i*n1), value);
  2411. }
  2412. } break;
  2413. case GGML_TYPE_I32:
  2414. {
  2415. assert(tensor->nb[0] == sizeof(int32_t));
  2416. for (int i = 0; i < n; i++) {
  2417. ggml_vec_set_i32(nc, (int32_t *)(data + i*n1), value);
  2418. }
  2419. } break;
  2420. case GGML_TYPE_F16:
  2421. {
  2422. assert(tensor->nb[0] == sizeof(ggml_fp16_t));
  2423. for (int i = 0; i < n; i++) {
  2424. ggml_vec_set_f16(nc, (ggml_fp16_t *)(data + i*n1), GGML_FP32_TO_FP16(value));
  2425. }
  2426. } break;
  2427. case GGML_TYPE_F32:
  2428. {
  2429. assert(tensor->nb[0] == sizeof(float));
  2430. for (int i = 0; i < n; i++) {
  2431. ggml_vec_set_f32(nc, (float *)(data + i*n1), value);
  2432. }
  2433. } break;
  2434. default:
  2435. {
  2436. GGML_ASSERT(false);
  2437. } break;
  2438. }
  2439. return tensor;
  2440. }
  2441. struct ggml_tensor * ggml_set_f32(struct ggml_tensor * tensor, float value) {
  2442. const int n = ggml_nrows(tensor);
  2443. const int nc = tensor->ne[0];
  2444. const size_t n1 = tensor->nb[1];
  2445. char * const data = tensor->data;
  2446. switch (tensor->type) {
  2447. case GGML_TYPE_I8:
  2448. {
  2449. assert(tensor->nb[0] == sizeof(int8_t));
  2450. for (int i = 0; i < n; i++) {
  2451. ggml_vec_set_i8(nc, (int8_t *)(data + i*n1), value);
  2452. }
  2453. } break;
  2454. case GGML_TYPE_I16:
  2455. {
  2456. assert(tensor->nb[0] == sizeof(int16_t));
  2457. for (int i = 0; i < n; i++) {
  2458. ggml_vec_set_i16(nc, (int16_t *)(data + i*n1), value);
  2459. }
  2460. } break;
  2461. case GGML_TYPE_I32:
  2462. {
  2463. assert(tensor->nb[0] == sizeof(int32_t));
  2464. for (int i = 0; i < n; i++) {
  2465. ggml_vec_set_i32(nc, (int32_t *)(data + i*n1), value);
  2466. }
  2467. } break;
  2468. case GGML_TYPE_F16:
  2469. {
  2470. assert(tensor->nb[0] == sizeof(ggml_fp16_t));
  2471. for (int i = 0; i < n; i++) {
  2472. ggml_vec_set_f16(nc, (ggml_fp16_t *)(data + i*n1), GGML_FP32_TO_FP16(value));
  2473. }
  2474. } break;
  2475. case GGML_TYPE_F32:
  2476. {
  2477. assert(tensor->nb[0] == sizeof(float));
  2478. for (int i = 0; i < n; i++) {
  2479. ggml_vec_set_f32(nc, (float *)(data + i*n1), value);
  2480. }
  2481. } break;
  2482. default:
  2483. {
  2484. GGML_ASSERT(false);
  2485. } break;
  2486. }
  2487. return tensor;
  2488. }
  2489. void ggml_unravel_index(const struct ggml_tensor * tensor, int64_t i, int64_t * i0, int64_t * i1, int64_t * i2, int64_t * i3) {
  2490. const int64_t ne2 = tensor->ne[2];
  2491. const int64_t ne1 = tensor->ne[1];
  2492. const int64_t ne0 = tensor->ne[0];
  2493. const int64_t i3_ = (i/(ne2*ne1*ne0));
  2494. const int64_t i2_ = (i - i3_*ne2*ne1*ne0)/(ne1*ne0);
  2495. const int64_t i1_ = (i - i3_*ne2*ne1*ne0 - i2_*ne1*ne0)/ne0;
  2496. const int64_t i0_ = (i - i3_*ne2*ne1*ne0 - i2_*ne1*ne0 - i1_*ne0);
  2497. if (i0) {
  2498. * i0 = i0_;
  2499. }
  2500. if (i1) {
  2501. * i1 = i1_;
  2502. }
  2503. if (i2) {
  2504. * i2 = i2_;
  2505. }
  2506. if (i3) {
  2507. * i3 = i3_;
  2508. }
  2509. }
  2510. int32_t ggml_get_i32_1d(const struct ggml_tensor * tensor, int i) {
  2511. if (!ggml_is_contiguous(tensor)) {
  2512. int64_t id[4] = { 0, 0, 0, 0 };
  2513. ggml_unravel_index(tensor, i, &id[0], &id[1], &id[2], &id[3]);
  2514. return ggml_get_i32_nd(tensor, id[0], id[1], id[2], id[3]);
  2515. }
  2516. switch (tensor->type) {
  2517. case GGML_TYPE_I8:
  2518. {
  2519. GGML_ASSERT(tensor->nb[0] == sizeof(int8_t));
  2520. return ((int8_t *)(tensor->data))[i];
  2521. }
  2522. case GGML_TYPE_I16:
  2523. {
  2524. GGML_ASSERT(tensor->nb[0] == sizeof(int16_t));
  2525. return ((int16_t *)(tensor->data))[i];
  2526. }
  2527. case GGML_TYPE_I32:
  2528. {
  2529. GGML_ASSERT(tensor->nb[0] == sizeof(int32_t));
  2530. return ((int32_t *)(tensor->data))[i];
  2531. }
  2532. case GGML_TYPE_F16:
  2533. {
  2534. GGML_ASSERT(tensor->nb[0] == sizeof(ggml_fp16_t));
  2535. return GGML_FP16_TO_FP32(((ggml_fp16_t *)(tensor->data))[i]);
  2536. }
  2537. case GGML_TYPE_F32:
  2538. {
  2539. GGML_ASSERT(tensor->nb[0] == sizeof(float));
  2540. return ((float *)(tensor->data))[i];
  2541. }
  2542. default:
  2543. {
  2544. GGML_ASSERT(false);
  2545. }
  2546. }
  2547. return 0.0f;
  2548. }
  2549. void ggml_set_i32_1d(const struct ggml_tensor * tensor, int i, int32_t value) {
  2550. if (!ggml_is_contiguous(tensor)) {
  2551. int64_t id[4] = { 0, 0, 0, 0 };
  2552. ggml_unravel_index(tensor, i, &id[0], &id[1], &id[2], &id[3]);
  2553. ggml_set_i32_nd(tensor, id[0], id[1], id[2], id[3], value);
  2554. return;
  2555. }
  2556. switch (tensor->type) {
  2557. case GGML_TYPE_I8:
  2558. {
  2559. GGML_ASSERT(tensor->nb[0] == sizeof(int8_t));
  2560. ((int8_t *)(tensor->data))[i] = value;
  2561. } break;
  2562. case GGML_TYPE_I16:
  2563. {
  2564. GGML_ASSERT(tensor->nb[0] == sizeof(int16_t));
  2565. ((int16_t *)(tensor->data))[i] = value;
  2566. } break;
  2567. case GGML_TYPE_I32:
  2568. {
  2569. GGML_ASSERT(tensor->nb[0] == sizeof(int32_t));
  2570. ((int32_t *)(tensor->data))[i] = value;
  2571. } break;
  2572. case GGML_TYPE_F16:
  2573. {
  2574. GGML_ASSERT(tensor->nb[0] == sizeof(ggml_fp16_t));
  2575. ((ggml_fp16_t *)(tensor->data))[i] = GGML_FP32_TO_FP16(value);
  2576. } break;
  2577. case GGML_TYPE_F32:
  2578. {
  2579. GGML_ASSERT(tensor->nb[0] == sizeof(float));
  2580. ((float *)(tensor->data))[i] = value;
  2581. } break;
  2582. default:
  2583. {
  2584. GGML_ASSERT(false);
  2585. } break;
  2586. }
  2587. }
  2588. int32_t ggml_get_i32_nd(const struct ggml_tensor * tensor, int i0, int i1, int i2, int i3) {
  2589. void * data = (char *) tensor->data + i0*tensor->nb[0] + i1*tensor->nb[1] + i2*tensor->nb[2] + i3*tensor->nb[3];
  2590. switch (tensor->type) {
  2591. case GGML_TYPE_I8:
  2592. return ((int8_t *) data)[0];
  2593. case GGML_TYPE_I16:
  2594. return ((int16_t *) data)[0];
  2595. case GGML_TYPE_I32:
  2596. return ((int32_t *) data)[0];
  2597. case GGML_TYPE_F16:
  2598. return GGML_FP16_TO_FP32(((ggml_fp16_t *) data)[0]);
  2599. case GGML_TYPE_F32:
  2600. return ((float *) data)[0];
  2601. default:
  2602. GGML_ASSERT(false);
  2603. }
  2604. return 0.0f;
  2605. }
  2606. void ggml_set_i32_nd(const struct ggml_tensor * tensor, int i0, int i1, int i2, int i3, int32_t value) {
  2607. void * data = (char *) tensor->data + i0*tensor->nb[0] + i1*tensor->nb[1] + i2*tensor->nb[2] + i3*tensor->nb[3];
  2608. switch (tensor->type) {
  2609. case GGML_TYPE_I8:
  2610. {
  2611. ((int8_t *)(data))[0] = value;
  2612. } break;
  2613. case GGML_TYPE_I16:
  2614. {
  2615. ((int16_t *)(data))[0] = value;
  2616. } break;
  2617. case GGML_TYPE_I32:
  2618. {
  2619. ((int32_t *)(data))[0] = value;
  2620. } break;
  2621. case GGML_TYPE_F16:
  2622. {
  2623. ((ggml_fp16_t *)(data))[0] = GGML_FP32_TO_FP16(value);
  2624. } break;
  2625. case GGML_TYPE_F32:
  2626. {
  2627. ((float *)(data))[0] = value;
  2628. } break;
  2629. default:
  2630. {
  2631. GGML_ASSERT(false);
  2632. } break;
  2633. }
  2634. }
  2635. float ggml_get_f32_1d(const struct ggml_tensor * tensor, int i) {
  2636. if (!ggml_is_contiguous(tensor)) {
  2637. int64_t id[4] = { 0, 0, 0, 0 };
  2638. ggml_unravel_index(tensor, i, &id[0], &id[1], &id[2], &id[3]);
  2639. return ggml_get_f32_nd(tensor, id[0], id[1], id[2], id[3]);
  2640. }
  2641. switch (tensor->type) {
  2642. case GGML_TYPE_I8:
  2643. {
  2644. GGML_ASSERT(tensor->nb[0] == sizeof(int8_t));
  2645. return ((int8_t *)(tensor->data))[i];
  2646. }
  2647. case GGML_TYPE_I16:
  2648. {
  2649. GGML_ASSERT(tensor->nb[0] == sizeof(int16_t));
  2650. return ((int16_t *)(tensor->data))[i];
  2651. }
  2652. case GGML_TYPE_I32:
  2653. {
  2654. GGML_ASSERT(tensor->nb[0] == sizeof(int32_t));
  2655. return ((int32_t *)(tensor->data))[i];
  2656. }
  2657. case GGML_TYPE_F16:
  2658. {
  2659. GGML_ASSERT(tensor->nb[0] == sizeof(ggml_fp16_t));
  2660. return GGML_FP16_TO_FP32(((ggml_fp16_t *)(tensor->data))[i]);
  2661. }
  2662. case GGML_TYPE_F32:
  2663. {
  2664. GGML_ASSERT(tensor->nb[0] == sizeof(float));
  2665. return ((float *)(tensor->data))[i];
  2666. }
  2667. default:
  2668. {
  2669. GGML_ASSERT(false);
  2670. }
  2671. }
  2672. return 0.0f;
  2673. }
  2674. void ggml_set_f32_1d(const struct ggml_tensor * tensor, int i, float value) {
  2675. if (!ggml_is_contiguous(tensor)) {
  2676. int64_t id[4] = { 0, 0, 0, 0 };
  2677. ggml_unravel_index(tensor, i, &id[0], &id[1], &id[2], &id[3]);
  2678. ggml_set_f32_nd(tensor, id[0], id[1], id[2], id[3], value);
  2679. return;
  2680. }
  2681. switch (tensor->type) {
  2682. case GGML_TYPE_I8:
  2683. {
  2684. GGML_ASSERT(tensor->nb[0] == sizeof(int8_t));
  2685. ((int8_t *)(tensor->data))[i] = value;
  2686. } break;
  2687. case GGML_TYPE_I16:
  2688. {
  2689. GGML_ASSERT(tensor->nb[0] == sizeof(int16_t));
  2690. ((int16_t *)(tensor->data))[i] = value;
  2691. } break;
  2692. case GGML_TYPE_I32:
  2693. {
  2694. GGML_ASSERT(tensor->nb[0] == sizeof(int32_t));
  2695. ((int32_t *)(tensor->data))[i] = value;
  2696. } break;
  2697. case GGML_TYPE_F16:
  2698. {
  2699. GGML_ASSERT(tensor->nb[0] == sizeof(ggml_fp16_t));
  2700. ((ggml_fp16_t *)(tensor->data))[i] = GGML_FP32_TO_FP16(value);
  2701. } break;
  2702. case GGML_TYPE_F32:
  2703. {
  2704. GGML_ASSERT(tensor->nb[0] == sizeof(float));
  2705. ((float *)(tensor->data))[i] = value;
  2706. } break;
  2707. default:
  2708. {
  2709. GGML_ASSERT(false);
  2710. } break;
  2711. }
  2712. }
  2713. float ggml_get_f32_nd(const struct ggml_tensor * tensor, int i0, int i1, int i2, int i3) {
  2714. void * data = (char *) tensor->data + i0*tensor->nb[0] + i1*tensor->nb[1] + i2*tensor->nb[2] + i3*tensor->nb[3];
  2715. switch (tensor->type) {
  2716. case GGML_TYPE_I8:
  2717. return ((int8_t *) data)[0];
  2718. case GGML_TYPE_I16:
  2719. return ((int16_t *) data)[0];
  2720. case GGML_TYPE_I32:
  2721. return ((int32_t *) data)[0];
  2722. case GGML_TYPE_F16:
  2723. return GGML_FP16_TO_FP32(((ggml_fp16_t *) data)[0]);
  2724. case GGML_TYPE_F32:
  2725. return ((float *) data)[0];
  2726. default:
  2727. GGML_ASSERT(false);
  2728. }
  2729. return 0.0f;
  2730. }
  2731. void ggml_set_f32_nd(const struct ggml_tensor * tensor, int i0, int i1, int i2, int i3, float value) {
  2732. void * data = (char *) tensor->data + i0*tensor->nb[0] + i1*tensor->nb[1] + i2*tensor->nb[2] + i3*tensor->nb[3];
  2733. switch (tensor->type) {
  2734. case GGML_TYPE_I8:
  2735. {
  2736. ((int8_t *)(data))[0] = value;
  2737. } break;
  2738. case GGML_TYPE_I16:
  2739. {
  2740. ((int16_t *)(data))[0] = value;
  2741. } break;
  2742. case GGML_TYPE_I32:
  2743. {
  2744. ((int32_t *)(data))[0] = value;
  2745. } break;
  2746. case GGML_TYPE_F16:
  2747. {
  2748. ((ggml_fp16_t *)(data))[0] = GGML_FP32_TO_FP16(value);
  2749. } break;
  2750. case GGML_TYPE_F32:
  2751. {
  2752. ((float *)(data))[0] = value;
  2753. } break;
  2754. default:
  2755. {
  2756. GGML_ASSERT(false);
  2757. } break;
  2758. }
  2759. }
  2760. void * ggml_get_data(const struct ggml_tensor * tensor) {
  2761. return tensor->data;
  2762. }
  2763. float * ggml_get_data_f32(const struct ggml_tensor * tensor) {
  2764. assert(tensor->type == GGML_TYPE_F32);
  2765. return (float *)(tensor->data);
  2766. }
  2767. GGML_CALL enum ggml_unary_op ggml_get_unary_op(const struct ggml_tensor * tensor) {
  2768. GGML_ASSERT(tensor->op == GGML_OP_UNARY);
  2769. return (enum ggml_unary_op) ggml_get_op_params_i32(tensor, 0);
  2770. }
  2771. const char * ggml_get_name(const struct ggml_tensor * tensor) {
  2772. return tensor->name;
  2773. }
  2774. struct ggml_tensor * ggml_set_name(struct ggml_tensor * tensor, const char * name) {
  2775. strncpy(tensor->name, name, sizeof(tensor->name) - 1);
  2776. tensor->name[sizeof(tensor->name) - 1] = '\0';
  2777. return tensor;
  2778. }
  2779. struct ggml_tensor * ggml_format_name(struct ggml_tensor * tensor, const char * fmt, ...) {
  2780. va_list args;
  2781. va_start(args, fmt);
  2782. vsnprintf(tensor->name, sizeof(tensor->name), fmt, args);
  2783. va_end(args);
  2784. return tensor;
  2785. }
  2786. struct ggml_tensor * ggml_view_tensor(
  2787. struct ggml_context * ctx,
  2788. struct ggml_tensor * src) {
  2789. struct ggml_tensor * result = ggml_new_tensor_impl(ctx, src->type, GGML_MAX_DIMS, src->ne, src, 0);
  2790. ggml_format_name(result, "%s (view)", src->name);
  2791. for (int i = 0; i < GGML_MAX_DIMS; i++) {
  2792. result->nb[i] = src->nb[i];
  2793. }
  2794. return result;
  2795. }
  2796. struct ggml_tensor * ggml_get_first_tensor(const struct ggml_context * ctx) {
  2797. struct ggml_object * obj = ctx->objects_begin;
  2798. char * const mem_buffer = ctx->mem_buffer;
  2799. while (obj != NULL) {
  2800. if (obj->type == GGML_OBJECT_TENSOR) {
  2801. return (struct ggml_tensor *)(mem_buffer + obj->offs);
  2802. }
  2803. obj = obj->next;
  2804. }
  2805. return NULL;
  2806. }
  2807. struct ggml_tensor * ggml_get_next_tensor(const struct ggml_context * ctx, struct ggml_tensor * tensor) {
  2808. struct ggml_object * obj = (struct ggml_object *) ((char *)tensor - GGML_OBJECT_SIZE);
  2809. obj = obj->next;
  2810. char * const mem_buffer = ctx->mem_buffer;
  2811. while (obj != NULL) {
  2812. if (obj->type == GGML_OBJECT_TENSOR) {
  2813. return (struct ggml_tensor *)(mem_buffer + obj->offs);
  2814. }
  2815. obj = obj->next;
  2816. }
  2817. return NULL;
  2818. }
  2819. struct ggml_tensor * ggml_get_tensor(struct ggml_context * ctx, const char * name) {
  2820. struct ggml_object * obj = ctx->objects_begin;
  2821. char * const mem_buffer = ctx->mem_buffer;
  2822. while (obj != NULL) {
  2823. if (obj->type == GGML_OBJECT_TENSOR) {
  2824. struct ggml_tensor * cur = (struct ggml_tensor *)(mem_buffer + obj->offs);
  2825. if (strcmp(cur->name, name) == 0) {
  2826. return cur;
  2827. }
  2828. }
  2829. obj = obj->next;
  2830. }
  2831. return NULL;
  2832. }
  2833. ////////////////////////////////////////////////////////////////////////////////
  2834. // ggml_dup
  2835. static struct ggml_tensor * ggml_dup_impl(
  2836. struct ggml_context * ctx,
  2837. struct ggml_tensor * a,
  2838. bool inplace) {
  2839. bool is_node = false;
  2840. if (!inplace && (a->grad)) {
  2841. is_node = true;
  2842. }
  2843. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  2844. result->op = GGML_OP_DUP;
  2845. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  2846. result->src[0] = a;
  2847. return result;
  2848. }
  2849. struct ggml_tensor * ggml_dup(
  2850. struct ggml_context * ctx,
  2851. struct ggml_tensor * a) {
  2852. return ggml_dup_impl(ctx, a, false);
  2853. }
  2854. struct ggml_tensor * ggml_dup_inplace(
  2855. struct ggml_context * ctx,
  2856. struct ggml_tensor * a) {
  2857. return ggml_dup_impl(ctx, a, true);
  2858. }
  2859. // ggml_add
  2860. static struct ggml_tensor * ggml_add_impl(
  2861. struct ggml_context * ctx,
  2862. struct ggml_tensor * a,
  2863. struct ggml_tensor * b,
  2864. bool inplace) {
  2865. GGML_ASSERT(ggml_can_repeat(b, a));
  2866. bool is_node = false;
  2867. if (!inplace && (a->grad || b->grad)) {
  2868. // TODO: support backward pass for broadcasting
  2869. GGML_ASSERT(ggml_are_same_shape(a, b));
  2870. is_node = true;
  2871. }
  2872. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  2873. result->op = GGML_OP_ADD;
  2874. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  2875. result->src[0] = a;
  2876. result->src[1] = b;
  2877. return result;
  2878. }
  2879. struct ggml_tensor * ggml_add(
  2880. struct ggml_context * ctx,
  2881. struct ggml_tensor * a,
  2882. struct ggml_tensor * b) {
  2883. return ggml_add_impl(ctx, a, b, false);
  2884. }
  2885. struct ggml_tensor * ggml_add_inplace(
  2886. struct ggml_context * ctx,
  2887. struct ggml_tensor * a,
  2888. struct ggml_tensor * b) {
  2889. return ggml_add_impl(ctx, a, b, true);
  2890. }
  2891. // ggml_add_cast
  2892. static struct ggml_tensor * ggml_add_cast_impl(
  2893. struct ggml_context * ctx,
  2894. struct ggml_tensor * a,
  2895. struct ggml_tensor * b,
  2896. enum ggml_type type) {
  2897. // TODO: support less-strict constraint
  2898. // GGML_ASSERT(ggml_can_repeat(b, a));
  2899. GGML_ASSERT(ggml_can_repeat_rows(b, a));
  2900. GGML_ASSERT(ggml_is_quantized(a->type) || a->type == GGML_TYPE_F16); // currently only supported for quantized input and f16
  2901. bool is_node = false;
  2902. if (a->grad || b->grad) {
  2903. // TODO: support backward pass for broadcasting
  2904. GGML_ASSERT(ggml_are_same_shape(a, b));
  2905. is_node = true;
  2906. }
  2907. struct ggml_tensor * result = ggml_new_tensor(ctx, type, GGML_MAX_DIMS, a->ne);
  2908. result->op = GGML_OP_ADD;
  2909. result->grad = is_node ? ggml_new_tensor(ctx, GGML_TYPE_F32, GGML_MAX_DIMS, a->ne) : NULL;
  2910. result->src[0] = a;
  2911. result->src[1] = b;
  2912. return result;
  2913. }
  2914. struct ggml_tensor * ggml_add_cast(
  2915. struct ggml_context * ctx,
  2916. struct ggml_tensor * a,
  2917. struct ggml_tensor * b,
  2918. enum ggml_type type) {
  2919. return ggml_add_cast_impl(ctx, a, b, type);
  2920. }
  2921. // ggml_add1
  2922. static struct ggml_tensor * ggml_add1_impl(
  2923. struct ggml_context * ctx,
  2924. struct ggml_tensor * a,
  2925. struct ggml_tensor * b,
  2926. bool inplace) {
  2927. GGML_ASSERT(ggml_is_scalar(b));
  2928. GGML_ASSERT(ggml_is_padded_1d(a));
  2929. bool is_node = false;
  2930. if (a->grad || b->grad) {
  2931. is_node = true;
  2932. }
  2933. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  2934. result->op = GGML_OP_ADD1;
  2935. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  2936. result->src[0] = a;
  2937. result->src[1] = b;
  2938. return result;
  2939. }
  2940. struct ggml_tensor * ggml_add1(
  2941. struct ggml_context * ctx,
  2942. struct ggml_tensor * a,
  2943. struct ggml_tensor * b) {
  2944. return ggml_add1_impl(ctx, a, b, false);
  2945. }
  2946. struct ggml_tensor * ggml_add1_inplace(
  2947. struct ggml_context * ctx,
  2948. struct ggml_tensor * a,
  2949. struct ggml_tensor * b) {
  2950. return ggml_add1_impl(ctx, a, b, true);
  2951. }
  2952. // ggml_acc
  2953. static struct ggml_tensor * ggml_acc_impl(
  2954. struct ggml_context * ctx,
  2955. struct ggml_tensor * a,
  2956. struct ggml_tensor * b,
  2957. size_t nb1,
  2958. size_t nb2,
  2959. size_t nb3,
  2960. size_t offset,
  2961. bool inplace) {
  2962. GGML_ASSERT(ggml_nelements(b) <= ggml_nelements(a));
  2963. GGML_ASSERT(ggml_is_contiguous(a));
  2964. GGML_ASSERT(a->type == GGML_TYPE_F32);
  2965. GGML_ASSERT(b->type == GGML_TYPE_F32);
  2966. bool is_node = false;
  2967. if (!inplace && (a->grad || b->grad)) {
  2968. is_node = true;
  2969. }
  2970. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  2971. int32_t params[] = { nb1, nb2, nb3, offset, inplace ? 1 : 0 };
  2972. ggml_set_op_params(result, params, sizeof(params));
  2973. result->op = GGML_OP_ACC;
  2974. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  2975. result->src[0] = a;
  2976. result->src[1] = b;
  2977. return result;
  2978. }
  2979. struct ggml_tensor * ggml_acc(
  2980. struct ggml_context * ctx,
  2981. struct ggml_tensor * a,
  2982. struct ggml_tensor * b,
  2983. size_t nb1,
  2984. size_t nb2,
  2985. size_t nb3,
  2986. size_t offset) {
  2987. return ggml_acc_impl(ctx, a, b, nb1, nb2, nb3, offset, false);
  2988. }
  2989. struct ggml_tensor * ggml_acc_inplace(
  2990. struct ggml_context * ctx,
  2991. struct ggml_tensor * a,
  2992. struct ggml_tensor * b,
  2993. size_t nb1,
  2994. size_t nb2,
  2995. size_t nb3,
  2996. size_t offset) {
  2997. return ggml_acc_impl(ctx, a, b, nb1, nb2, nb3, offset, true);
  2998. }
  2999. // ggml_sub
  3000. static struct ggml_tensor * ggml_sub_impl(
  3001. struct ggml_context * ctx,
  3002. struct ggml_tensor * a,
  3003. struct ggml_tensor * b,
  3004. bool inplace) {
  3005. GGML_ASSERT(ggml_are_same_shape(a, b));
  3006. bool is_node = false;
  3007. if (!inplace && (a->grad || b->grad)) {
  3008. is_node = true;
  3009. }
  3010. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  3011. result->op = GGML_OP_SUB;
  3012. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3013. result->src[0] = a;
  3014. result->src[1] = b;
  3015. return result;
  3016. }
  3017. struct ggml_tensor * ggml_sub(
  3018. struct ggml_context * ctx,
  3019. struct ggml_tensor * a,
  3020. struct ggml_tensor * b) {
  3021. return ggml_sub_impl(ctx, a, b, false);
  3022. }
  3023. struct ggml_tensor * ggml_sub_inplace(
  3024. struct ggml_context * ctx,
  3025. struct ggml_tensor * a,
  3026. struct ggml_tensor * b) {
  3027. return ggml_sub_impl(ctx, a, b, true);
  3028. }
  3029. // ggml_mul
  3030. static struct ggml_tensor * ggml_mul_impl(
  3031. struct ggml_context * ctx,
  3032. struct ggml_tensor * a,
  3033. struct ggml_tensor * b,
  3034. bool inplace) {
  3035. GGML_ASSERT(ggml_can_repeat(b, a));
  3036. bool is_node = false;
  3037. if (!inplace && (a->grad || b->grad)) {
  3038. // TODO: support backward pass for broadcasting
  3039. GGML_ASSERT(ggml_are_same_shape(a, b));
  3040. is_node = true;
  3041. }
  3042. if (inplace) {
  3043. GGML_ASSERT(!is_node);
  3044. }
  3045. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  3046. result->op = GGML_OP_MUL;
  3047. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3048. result->src[0] = a;
  3049. result->src[1] = b;
  3050. return result;
  3051. }
  3052. struct ggml_tensor * ggml_mul(
  3053. struct ggml_context * ctx,
  3054. struct ggml_tensor * a,
  3055. struct ggml_tensor * b) {
  3056. return ggml_mul_impl(ctx, a, b, false);
  3057. }
  3058. struct ggml_tensor * ggml_mul_inplace(
  3059. struct ggml_context * ctx,
  3060. struct ggml_tensor * a,
  3061. struct ggml_tensor * b) {
  3062. return ggml_mul_impl(ctx, a, b, true);
  3063. }
  3064. // ggml_div
  3065. static struct ggml_tensor * ggml_div_impl(
  3066. struct ggml_context * ctx,
  3067. struct ggml_tensor * a,
  3068. struct ggml_tensor * b,
  3069. bool inplace) {
  3070. GGML_ASSERT(ggml_can_repeat(b, a));
  3071. bool is_node = false;
  3072. if (!inplace && (a->grad || b->grad)) {
  3073. is_node = true;
  3074. }
  3075. if (inplace) {
  3076. GGML_ASSERT(!is_node);
  3077. }
  3078. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  3079. result->op = GGML_OP_DIV;
  3080. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3081. result->src[0] = a;
  3082. result->src[1] = b;
  3083. return result;
  3084. }
  3085. struct ggml_tensor * ggml_div(
  3086. struct ggml_context * ctx,
  3087. struct ggml_tensor * a,
  3088. struct ggml_tensor * b) {
  3089. return ggml_div_impl(ctx, a, b, false);
  3090. }
  3091. struct ggml_tensor * ggml_div_inplace(
  3092. struct ggml_context * ctx,
  3093. struct ggml_tensor * a,
  3094. struct ggml_tensor * b) {
  3095. return ggml_div_impl(ctx, a, b, true);
  3096. }
  3097. // ggml_sqr
  3098. static struct ggml_tensor * ggml_sqr_impl(
  3099. struct ggml_context * ctx,
  3100. struct ggml_tensor * a,
  3101. bool inplace) {
  3102. bool is_node = false;
  3103. if (!inplace && (a->grad)) {
  3104. is_node = true;
  3105. }
  3106. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  3107. result->op = GGML_OP_SQR;
  3108. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3109. result->src[0] = a;
  3110. return result;
  3111. }
  3112. struct ggml_tensor * ggml_sqr(
  3113. struct ggml_context * ctx,
  3114. struct ggml_tensor * a) {
  3115. return ggml_sqr_impl(ctx, a, false);
  3116. }
  3117. struct ggml_tensor * ggml_sqr_inplace(
  3118. struct ggml_context * ctx,
  3119. struct ggml_tensor * a) {
  3120. return ggml_sqr_impl(ctx, a, true);
  3121. }
  3122. // ggml_sqrt
  3123. static struct ggml_tensor * ggml_sqrt_impl(
  3124. struct ggml_context * ctx,
  3125. struct ggml_tensor * a,
  3126. bool inplace) {
  3127. bool is_node = false;
  3128. if (!inplace && (a->grad)) {
  3129. is_node = true;
  3130. }
  3131. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  3132. result->op = GGML_OP_SQRT;
  3133. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3134. result->src[0] = a;
  3135. return result;
  3136. }
  3137. struct ggml_tensor * ggml_sqrt(
  3138. struct ggml_context * ctx,
  3139. struct ggml_tensor * a) {
  3140. return ggml_sqrt_impl(ctx, a, false);
  3141. }
  3142. struct ggml_tensor * ggml_sqrt_inplace(
  3143. struct ggml_context * ctx,
  3144. struct ggml_tensor * a) {
  3145. return ggml_sqrt_impl(ctx, a, true);
  3146. }
  3147. // ggml_log
  3148. static struct ggml_tensor * ggml_log_impl(
  3149. struct ggml_context * ctx,
  3150. struct ggml_tensor * a,
  3151. bool inplace) {
  3152. bool is_node = false;
  3153. if (!inplace && (a->grad)) {
  3154. is_node = true;
  3155. }
  3156. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  3157. result->op = GGML_OP_LOG;
  3158. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3159. result->src[0] = a;
  3160. return result;
  3161. }
  3162. struct ggml_tensor * ggml_log(
  3163. struct ggml_context * ctx,
  3164. struct ggml_tensor * a) {
  3165. return ggml_log_impl(ctx, a, false);
  3166. }
  3167. struct ggml_tensor * ggml_log_inplace(
  3168. struct ggml_context * ctx,
  3169. struct ggml_tensor * a) {
  3170. return ggml_log_impl(ctx, a, true);
  3171. }
  3172. // ggml_sum
  3173. struct ggml_tensor * ggml_sum(
  3174. struct ggml_context * ctx,
  3175. struct ggml_tensor * a) {
  3176. bool is_node = false;
  3177. if (a->grad) {
  3178. is_node = true;
  3179. }
  3180. struct ggml_tensor * result = ggml_new_tensor_1d(ctx, a->type, 1);
  3181. result->op = GGML_OP_SUM;
  3182. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3183. result->src[0] = a;
  3184. return result;
  3185. }
  3186. // ggml_sum_rows
  3187. struct ggml_tensor * ggml_sum_rows(
  3188. struct ggml_context * ctx,
  3189. struct ggml_tensor * a) {
  3190. bool is_node = false;
  3191. if (a->grad) {
  3192. is_node = true;
  3193. }
  3194. int64_t ne[GGML_MAX_DIMS] = { 1 };
  3195. for (int i = 1; i < GGML_MAX_DIMS; ++i) {
  3196. ne[i] = a->ne[i];
  3197. }
  3198. struct ggml_tensor * result = ggml_new_tensor(ctx, a->type, GGML_MAX_DIMS, ne);
  3199. result->op = GGML_OP_SUM_ROWS;
  3200. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3201. result->src[0] = a;
  3202. return result;
  3203. }
  3204. // ggml_mean
  3205. struct ggml_tensor * ggml_mean(
  3206. struct ggml_context * ctx,
  3207. struct ggml_tensor * a) {
  3208. bool is_node = false;
  3209. if (a->grad) {
  3210. GGML_ASSERT(false); // TODO: implement
  3211. is_node = true;
  3212. }
  3213. int64_t ne[4] = { 1, a->ne[1], a->ne[2], a->ne[3] };
  3214. struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, 4, ne);
  3215. result->op = GGML_OP_MEAN;
  3216. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3217. result->src[0] = a;
  3218. return result;
  3219. }
  3220. // ggml_argmax
  3221. struct ggml_tensor * ggml_argmax(
  3222. struct ggml_context * ctx,
  3223. struct ggml_tensor * a) {
  3224. GGML_ASSERT(ggml_is_matrix(a));
  3225. bool is_node = false;
  3226. if (a->grad) {
  3227. GGML_ASSERT(false);
  3228. is_node = true;
  3229. }
  3230. struct ggml_tensor * result = ggml_new_tensor_1d(ctx, GGML_TYPE_I32, a->ne[1]);
  3231. result->op = GGML_OP_ARGMAX;
  3232. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3233. result->src[0] = a;
  3234. return result;
  3235. }
  3236. // ggml_repeat
  3237. struct ggml_tensor * ggml_repeat(
  3238. struct ggml_context * ctx,
  3239. struct ggml_tensor * a,
  3240. struct ggml_tensor * b) {
  3241. GGML_ASSERT(ggml_can_repeat(a, b));
  3242. bool is_node = false;
  3243. if (a->grad) {
  3244. is_node = true;
  3245. }
  3246. struct ggml_tensor * result = ggml_new_tensor(ctx, a->type, GGML_MAX_DIMS, b->ne);
  3247. result->op = GGML_OP_REPEAT;
  3248. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3249. result->src[0] = a;
  3250. return result;
  3251. }
  3252. // ggml_repeat_back
  3253. struct ggml_tensor * ggml_repeat_back(
  3254. struct ggml_context * ctx,
  3255. struct ggml_tensor * a,
  3256. struct ggml_tensor * b) {
  3257. GGML_ASSERT(ggml_can_repeat(b, a));
  3258. bool is_node = false;
  3259. if (a->grad) {
  3260. is_node = true;
  3261. }
  3262. if (ggml_are_same_shape(a, b) && !is_node) {
  3263. return a;
  3264. }
  3265. struct ggml_tensor * result = ggml_new_tensor(ctx, a->type, GGML_MAX_DIMS, b->ne);
  3266. result->op = GGML_OP_REPEAT_BACK;
  3267. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3268. result->src[0] = a;
  3269. return result;
  3270. }
  3271. // ggml_concat
  3272. struct ggml_tensor * ggml_concat(
  3273. struct ggml_context* ctx,
  3274. struct ggml_tensor* a,
  3275. struct ggml_tensor* b) {
  3276. GGML_ASSERT(a->ne[0] == b->ne[0] && a->ne[1] == b->ne[1] && a->ne[3] == b->ne[3]);
  3277. bool is_node = false;
  3278. if (a->grad || b->grad) {
  3279. is_node = true;
  3280. }
  3281. struct ggml_tensor * result = ggml_new_tensor_4d(ctx, a->type, a->ne[0], a->ne[1], a->ne[2] + b->ne[2], a->ne[3]);
  3282. result->op = GGML_OP_CONCAT;
  3283. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3284. result->src[0] = a;
  3285. result->src[1] = b;
  3286. return result;
  3287. }
  3288. // ggml_abs
  3289. struct ggml_tensor * ggml_abs(
  3290. struct ggml_context * ctx,
  3291. struct ggml_tensor * a) {
  3292. return ggml_unary(ctx, a, GGML_UNARY_OP_ABS);
  3293. }
  3294. struct ggml_tensor * ggml_abs_inplace(
  3295. struct ggml_context * ctx,
  3296. struct ggml_tensor * a) {
  3297. return ggml_unary_inplace(ctx, a, GGML_UNARY_OP_ABS);
  3298. }
  3299. // ggml_sgn
  3300. struct ggml_tensor * ggml_sgn(
  3301. struct ggml_context * ctx,
  3302. struct ggml_tensor * a) {
  3303. return ggml_unary(ctx, a, GGML_UNARY_OP_SGN);
  3304. }
  3305. struct ggml_tensor * ggml_sgn_inplace(
  3306. struct ggml_context * ctx,
  3307. struct ggml_tensor * a) {
  3308. return ggml_unary_inplace(ctx, a, GGML_UNARY_OP_SGN);
  3309. }
  3310. // ggml_neg
  3311. struct ggml_tensor * ggml_neg(
  3312. struct ggml_context * ctx,
  3313. struct ggml_tensor * a) {
  3314. return ggml_unary(ctx, a, GGML_UNARY_OP_NEG);
  3315. }
  3316. struct ggml_tensor * ggml_neg_inplace(
  3317. struct ggml_context * ctx,
  3318. struct ggml_tensor * a) {
  3319. return ggml_unary_inplace(ctx, a, GGML_UNARY_OP_NEG);
  3320. }
  3321. // ggml_step
  3322. struct ggml_tensor * ggml_step(
  3323. struct ggml_context * ctx,
  3324. struct ggml_tensor * a) {
  3325. return ggml_unary(ctx, a, GGML_UNARY_OP_STEP);
  3326. }
  3327. struct ggml_tensor * ggml_step_inplace(
  3328. struct ggml_context * ctx,
  3329. struct ggml_tensor * a) {
  3330. return ggml_unary_inplace(ctx, a, GGML_UNARY_OP_STEP);
  3331. }
  3332. // ggml_tanh
  3333. struct ggml_tensor * ggml_tanh(
  3334. struct ggml_context * ctx,
  3335. struct ggml_tensor * a) {
  3336. return ggml_unary(ctx, a, GGML_UNARY_OP_TANH);
  3337. }
  3338. struct ggml_tensor * ggml_tanh_inplace(
  3339. struct ggml_context * ctx,
  3340. struct ggml_tensor * a) {
  3341. return ggml_unary_inplace(ctx, a, GGML_UNARY_OP_TANH);
  3342. }
  3343. // ggml_elu
  3344. struct ggml_tensor * ggml_elu(
  3345. struct ggml_context * ctx,
  3346. struct ggml_tensor * a) {
  3347. return ggml_unary(ctx, a, GGML_UNARY_OP_ELU);
  3348. }
  3349. struct ggml_tensor * ggml_elu_inplace(
  3350. struct ggml_context * ctx,
  3351. struct ggml_tensor * a) {
  3352. return ggml_unary_inplace(ctx, a, GGML_UNARY_OP_ELU);
  3353. }
  3354. // ggml_relu
  3355. struct ggml_tensor * ggml_relu(
  3356. struct ggml_context * ctx,
  3357. struct ggml_tensor * a) {
  3358. return ggml_unary(ctx, a, GGML_UNARY_OP_RELU);
  3359. }
  3360. struct ggml_tensor * ggml_relu_inplace(
  3361. struct ggml_context * ctx,
  3362. struct ggml_tensor * a) {
  3363. return ggml_unary_inplace(ctx, a, GGML_UNARY_OP_RELU);
  3364. }
  3365. // ggml_leaky_relu
  3366. struct ggml_tensor * ggml_leaky_relu(
  3367. struct ggml_context * ctx,
  3368. struct ggml_tensor * a, float negative_slope, bool inplace) {
  3369. bool is_node = false;
  3370. if (!inplace && (a->grad)) {
  3371. is_node = true;
  3372. }
  3373. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  3374. ggml_set_op_params(result, &negative_slope, sizeof(negative_slope));
  3375. result->op = GGML_OP_LEAKY_RELU;
  3376. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3377. result->src[0] = a;
  3378. return result;
  3379. }
  3380. // ggml_gelu
  3381. struct ggml_tensor * ggml_gelu(
  3382. struct ggml_context * ctx,
  3383. struct ggml_tensor * a) {
  3384. return ggml_unary(ctx, a, GGML_UNARY_OP_GELU);
  3385. }
  3386. struct ggml_tensor * ggml_gelu_inplace(
  3387. struct ggml_context * ctx,
  3388. struct ggml_tensor * a) {
  3389. return ggml_unary_inplace(ctx, a, GGML_UNARY_OP_GELU);
  3390. }
  3391. // ggml_gelu_quick
  3392. struct ggml_tensor * ggml_gelu_quick(
  3393. struct ggml_context * ctx,
  3394. struct ggml_tensor * a) {
  3395. return ggml_unary(ctx, a, GGML_UNARY_OP_GELU_QUICK);
  3396. }
  3397. struct ggml_tensor * ggml_gelu_quick_inplace(
  3398. struct ggml_context * ctx,
  3399. struct ggml_tensor * a) {
  3400. return ggml_unary_inplace(ctx, a, GGML_UNARY_OP_GELU_QUICK);
  3401. }
  3402. // ggml_silu
  3403. struct ggml_tensor * ggml_silu(
  3404. struct ggml_context * ctx,
  3405. struct ggml_tensor * a) {
  3406. return ggml_unary(ctx, a, GGML_UNARY_OP_SILU);
  3407. }
  3408. struct ggml_tensor * ggml_silu_inplace(
  3409. struct ggml_context * ctx,
  3410. struct ggml_tensor * a) {
  3411. return ggml_unary_inplace(ctx, a, GGML_UNARY_OP_SILU);
  3412. }
  3413. // ggml_silu_back
  3414. struct ggml_tensor * ggml_silu_back(
  3415. struct ggml_context * ctx,
  3416. struct ggml_tensor * a,
  3417. struct ggml_tensor * b) {
  3418. bool is_node = false;
  3419. if (a->grad || b->grad) {
  3420. // TODO: implement backward
  3421. is_node = true;
  3422. }
  3423. struct ggml_tensor * result = ggml_dup_tensor(ctx, a);
  3424. result->op = GGML_OP_SILU_BACK;
  3425. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3426. result->src[0] = a;
  3427. result->src[1] = b;
  3428. return result;
  3429. }
  3430. // ggml hardswish
  3431. struct ggml_tensor * ggml_hardswish(
  3432. struct ggml_context * ctx,
  3433. struct ggml_tensor * a) {
  3434. return ggml_unary(ctx, a, GGML_UNARY_OP_HARDSWISH);
  3435. }
  3436. // ggml hardsigmoid
  3437. struct ggml_tensor * ggml_hardsigmoid(
  3438. struct ggml_context * ctx,
  3439. struct ggml_tensor * a) {
  3440. return ggml_unary(ctx, a, GGML_UNARY_OP_HARDSIGMOID);
  3441. }
  3442. // ggml_norm
  3443. static struct ggml_tensor * ggml_norm_impl(
  3444. struct ggml_context * ctx,
  3445. struct ggml_tensor * a,
  3446. float eps,
  3447. bool inplace) {
  3448. bool is_node = false;
  3449. if (!inplace && (a->grad)) {
  3450. GGML_ASSERT(false); // TODO: implement backward
  3451. is_node = true;
  3452. }
  3453. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  3454. ggml_set_op_params(result, &eps, sizeof(eps));
  3455. result->op = GGML_OP_NORM;
  3456. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3457. result->src[0] = a;
  3458. return result;
  3459. }
  3460. struct ggml_tensor * ggml_norm(
  3461. struct ggml_context * ctx,
  3462. struct ggml_tensor * a,
  3463. float eps) {
  3464. return ggml_norm_impl(ctx, a, eps, false);
  3465. }
  3466. struct ggml_tensor * ggml_norm_inplace(
  3467. struct ggml_context * ctx,
  3468. struct ggml_tensor * a,
  3469. float eps) {
  3470. return ggml_norm_impl(ctx, a, eps, true);
  3471. }
  3472. // ggml_rms_norm
  3473. static struct ggml_tensor * ggml_rms_norm_impl(
  3474. struct ggml_context * ctx,
  3475. struct ggml_tensor * a,
  3476. float eps,
  3477. bool inplace) {
  3478. bool is_node = false;
  3479. if (!inplace && (a->grad)) {
  3480. is_node = true;
  3481. }
  3482. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  3483. ggml_set_op_params(result, &eps, sizeof(eps));
  3484. result->op = GGML_OP_RMS_NORM;
  3485. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3486. result->src[0] = a;
  3487. return result;
  3488. }
  3489. struct ggml_tensor * ggml_rms_norm(
  3490. struct ggml_context * ctx,
  3491. struct ggml_tensor * a,
  3492. float eps) {
  3493. return ggml_rms_norm_impl(ctx, a, eps, false);
  3494. }
  3495. struct ggml_tensor * ggml_rms_norm_inplace(
  3496. struct ggml_context * ctx,
  3497. struct ggml_tensor * a,
  3498. float eps) {
  3499. return ggml_rms_norm_impl(ctx, a, eps, true);
  3500. }
  3501. // ggml_rms_norm_back
  3502. struct ggml_tensor * ggml_rms_norm_back(
  3503. struct ggml_context * ctx,
  3504. struct ggml_tensor * a,
  3505. struct ggml_tensor * b,
  3506. float eps) {
  3507. bool is_node = false;
  3508. if (a->grad) {
  3509. // TODO: implement backward
  3510. is_node = true;
  3511. }
  3512. struct ggml_tensor * result = ggml_dup_tensor(ctx, a);
  3513. ggml_set_op_params(result, &eps, sizeof(eps));
  3514. result->op = GGML_OP_RMS_NORM_BACK;
  3515. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3516. result->src[0] = a;
  3517. result->src[1] = b;
  3518. return result;
  3519. }
  3520. // ggml_group_norm
  3521. static struct ggml_tensor * ggml_group_norm_impl(
  3522. struct ggml_context * ctx,
  3523. struct ggml_tensor * a,
  3524. int n_groups,
  3525. bool inplace) {
  3526. bool is_node = false;
  3527. if (!inplace && (a->grad)) {
  3528. GGML_ASSERT(false); // TODO: implement backward
  3529. is_node = true;
  3530. }
  3531. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  3532. result->op_params[0] = n_groups;
  3533. result->op = GGML_OP_GROUP_NORM;
  3534. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3535. result->src[0] = a;
  3536. return result;
  3537. }
  3538. struct ggml_tensor * ggml_group_norm(
  3539. struct ggml_context * ctx,
  3540. struct ggml_tensor * a,
  3541. int n_groups) {
  3542. return ggml_group_norm_impl(ctx, a, n_groups, false);
  3543. }
  3544. struct ggml_tensor * ggml_group_norm_inplace(
  3545. struct ggml_context * ctx,
  3546. struct ggml_tensor * a,
  3547. int n_groups) {
  3548. return ggml_group_norm_impl(ctx, a, n_groups, true);
  3549. }
  3550. // ggml_mul_mat
  3551. struct ggml_tensor * ggml_mul_mat(
  3552. struct ggml_context * ctx,
  3553. struct ggml_tensor * a,
  3554. struct ggml_tensor * b) {
  3555. GGML_ASSERT(ggml_can_mul_mat(a, b));
  3556. GGML_ASSERT(!ggml_is_transposed(a));
  3557. bool is_node = false;
  3558. if (a->grad || b->grad) {
  3559. is_node = true;
  3560. }
  3561. const int64_t ne[4] = { a->ne[1], b->ne[1], b->ne[2], b->ne[3] };
  3562. struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, 4, ne);
  3563. result->op = GGML_OP_MUL_MAT;
  3564. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3565. result->src[0] = a;
  3566. result->src[1] = b;
  3567. return result;
  3568. }
  3569. void ggml_mul_mat_set_prec(
  3570. struct ggml_tensor * a,
  3571. enum ggml_prec prec) {
  3572. const int32_t prec_i32 = (int32_t) prec;
  3573. ggml_set_op_params_i32(a, 0, prec_i32);
  3574. }
  3575. // ggml_mul_mat_id
  3576. struct ggml_tensor * ggml_mul_mat_id(
  3577. struct ggml_context * ctx,
  3578. struct ggml_tensor * const as[],
  3579. int n_as,
  3580. struct ggml_tensor * ids,
  3581. int id,
  3582. struct ggml_tensor * b) {
  3583. GGML_ASSERT(ids->type == GGML_TYPE_I32);
  3584. GGML_ASSERT(ids->ne[2] == 1 && ids->ne[3] == 1);
  3585. GGML_ASSERT(ids->ne[1] == b->ne[1]);
  3586. GGML_ASSERT(ids->ne[2] == b->ne[2] && ids->ne[3] == b->ne[3]);
  3587. GGML_ASSERT(n_as > 0 && n_as <= GGML_MAX_SRC - 2);
  3588. GGML_ASSERT(id >= 0 && id < ids->ne[0]);
  3589. bool is_node = false;
  3590. if (as[0]->grad || b->grad) {
  3591. is_node = true;
  3592. }
  3593. const int64_t ne[4] = { as[0]->ne[1], b->ne[1], b->ne[2], b->ne[3] };
  3594. struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, 4, ne);
  3595. ggml_set_op_params_i32(result, 0, id);
  3596. ggml_set_op_params_i32(result, 1, n_as);
  3597. result->op = GGML_OP_MUL_MAT_ID;
  3598. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3599. result->src[0] = ids;
  3600. result->src[1] = b;
  3601. for (int i = 0; i < n_as; i++) {
  3602. struct ggml_tensor * a = as[i];
  3603. GGML_ASSERT(ggml_are_same_shape(as[0], a));
  3604. GGML_ASSERT(ggml_can_mul_mat(a, b));
  3605. GGML_ASSERT(!ggml_is_transposed(a));
  3606. result->src[i + 2] = a;
  3607. }
  3608. return result;
  3609. }
  3610. // ggml_out_prod
  3611. struct ggml_tensor * ggml_out_prod(
  3612. struct ggml_context * ctx,
  3613. struct ggml_tensor * a,
  3614. struct ggml_tensor * b) {
  3615. GGML_ASSERT(ggml_can_out_prod(a, b));
  3616. GGML_ASSERT(!ggml_is_transposed(a));
  3617. bool is_node = false;
  3618. if (a->grad || b->grad) {
  3619. is_node = true;
  3620. }
  3621. // a is broadcastable to b for ne[2] and ne[3] -> use b->ne[2] and b->ne[3]
  3622. const int64_t ne[4] = { a->ne[0], b->ne[0], b->ne[2], b->ne[3] };
  3623. struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, 4, ne);
  3624. result->op = GGML_OP_OUT_PROD;
  3625. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3626. result->src[0] = a;
  3627. result->src[1] = b;
  3628. return result;
  3629. }
  3630. // ggml_scale
  3631. static struct ggml_tensor * ggml_scale_impl(
  3632. struct ggml_context * ctx,
  3633. struct ggml_tensor * a,
  3634. float s,
  3635. bool inplace) {
  3636. GGML_ASSERT(ggml_is_padded_1d(a));
  3637. bool is_node = false;
  3638. if (a->grad) {
  3639. is_node = true;
  3640. }
  3641. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  3642. ggml_set_op_params(result, &s, sizeof(s));
  3643. result->op = GGML_OP_SCALE;
  3644. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3645. result->src[0] = a;
  3646. return result;
  3647. }
  3648. struct ggml_tensor * ggml_scale(
  3649. struct ggml_context * ctx,
  3650. struct ggml_tensor * a,
  3651. float s) {
  3652. return ggml_scale_impl(ctx, a, s, false);
  3653. }
  3654. struct ggml_tensor * ggml_scale_inplace(
  3655. struct ggml_context * ctx,
  3656. struct ggml_tensor * a,
  3657. float s) {
  3658. return ggml_scale_impl(ctx, a, s, true);
  3659. }
  3660. // ggml_set
  3661. static struct ggml_tensor * ggml_set_impl(
  3662. struct ggml_context * ctx,
  3663. struct ggml_tensor * a,
  3664. struct ggml_tensor * b,
  3665. size_t nb1,
  3666. size_t nb2,
  3667. size_t nb3,
  3668. size_t offset,
  3669. bool inplace) {
  3670. GGML_ASSERT(ggml_nelements(a) >= ggml_nelements(b));
  3671. bool is_node = false;
  3672. if (a->grad || b->grad) {
  3673. is_node = true;
  3674. }
  3675. // make a view of the destination
  3676. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  3677. int32_t params[] = { nb1, nb2, nb3, offset, inplace ? 1 : 0 };
  3678. ggml_set_op_params(result, params, sizeof(params));
  3679. result->op = GGML_OP_SET;
  3680. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3681. result->src[0] = a;
  3682. result->src[1] = b;
  3683. return result;
  3684. }
  3685. struct ggml_tensor * ggml_set(
  3686. struct ggml_context * ctx,
  3687. struct ggml_tensor * a,
  3688. struct ggml_tensor * b,
  3689. size_t nb1,
  3690. size_t nb2,
  3691. size_t nb3,
  3692. size_t offset) {
  3693. return ggml_set_impl(ctx, a, b, nb1, nb2, nb3, offset, false);
  3694. }
  3695. struct ggml_tensor * ggml_set_inplace(
  3696. struct ggml_context * ctx,
  3697. struct ggml_tensor * a,
  3698. struct ggml_tensor * b,
  3699. size_t nb1,
  3700. size_t nb2,
  3701. size_t nb3,
  3702. size_t offset) {
  3703. return ggml_set_impl(ctx, a, b, nb1, nb2, nb3, offset, true);
  3704. }
  3705. struct ggml_tensor * ggml_set_1d(
  3706. struct ggml_context * ctx,
  3707. struct ggml_tensor * a,
  3708. struct ggml_tensor * b,
  3709. size_t offset) {
  3710. return ggml_set_impl(ctx, a, b, a->nb[1], a->nb[2], a->nb[3], offset, false);
  3711. }
  3712. struct ggml_tensor * ggml_set_1d_inplace(
  3713. struct ggml_context * ctx,
  3714. struct ggml_tensor * a,
  3715. struct ggml_tensor * b,
  3716. size_t offset) {
  3717. return ggml_set_impl(ctx, a, b, a->nb[1], a->nb[2], a->nb[3], offset, true);
  3718. }
  3719. struct ggml_tensor * ggml_set_2d(
  3720. struct ggml_context * ctx,
  3721. struct ggml_tensor * a,
  3722. struct ggml_tensor * b,
  3723. size_t nb1,
  3724. size_t offset) {
  3725. return ggml_set_impl(ctx, a, b, nb1, a->nb[2], a->nb[3], offset, false);
  3726. }
  3727. struct ggml_tensor * ggml_set_2d_inplace(
  3728. struct ggml_context * ctx,
  3729. struct ggml_tensor * a,
  3730. struct ggml_tensor * b,
  3731. size_t nb1,
  3732. size_t offset) {
  3733. return ggml_set_impl(ctx, a, b, nb1, a->nb[2], a->nb[3], offset, true);
  3734. }
  3735. // ggml_cpy
  3736. static struct ggml_tensor * ggml_cpy_impl(
  3737. struct ggml_context * ctx,
  3738. struct ggml_tensor * a,
  3739. struct ggml_tensor * b) {
  3740. GGML_ASSERT(ggml_nelements(a) == ggml_nelements(b));
  3741. bool is_node = false;
  3742. if (a->grad || b->grad) {
  3743. // inplace is false and either one have a grad
  3744. is_node = true;
  3745. }
  3746. // make a view of the destination
  3747. struct ggml_tensor * result = ggml_view_tensor(ctx, b);
  3748. if (strlen(b->name) > 0) {
  3749. ggml_format_name(result, "%s (copy of %s)", b->name, a->name);
  3750. } else {
  3751. ggml_format_name(result, "%s (copy)", a->name);
  3752. }
  3753. result->op = GGML_OP_CPY;
  3754. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3755. result->src[0] = a;
  3756. result->src[1] = b;
  3757. return result;
  3758. }
  3759. struct ggml_tensor * ggml_cpy(
  3760. struct ggml_context * ctx,
  3761. struct ggml_tensor * a,
  3762. struct ggml_tensor * b) {
  3763. return ggml_cpy_impl(ctx, a, b);
  3764. }
  3765. struct ggml_tensor * ggml_cast(
  3766. struct ggml_context * ctx,
  3767. struct ggml_tensor * a,
  3768. enum ggml_type type) {
  3769. bool is_node = false;
  3770. struct ggml_tensor * result = ggml_new_tensor(ctx, type, GGML_MAX_DIMS, a->ne);
  3771. ggml_format_name(result, "%s (copy)", a->name);
  3772. result->op = GGML_OP_CPY;
  3773. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3774. result->src[0] = a;
  3775. result->src[1] = result;
  3776. return result;
  3777. }
  3778. // ggml_cont
  3779. static struct ggml_tensor * ggml_cont_impl(
  3780. struct ggml_context * ctx,
  3781. struct ggml_tensor * a) {
  3782. bool is_node = false;
  3783. if (a->grad) {
  3784. is_node = true;
  3785. }
  3786. struct ggml_tensor * result = ggml_dup_tensor(ctx, a);
  3787. ggml_format_name(result, "%s (cont)", a->name);
  3788. result->op = GGML_OP_CONT;
  3789. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3790. result->src[0] = a;
  3791. return result;
  3792. }
  3793. struct ggml_tensor * ggml_cont(
  3794. struct ggml_context * ctx,
  3795. struct ggml_tensor * a) {
  3796. return ggml_cont_impl(ctx, a);
  3797. }
  3798. // make contiguous, with new shape
  3799. GGML_API struct ggml_tensor * ggml_cont_1d(
  3800. struct ggml_context * ctx,
  3801. struct ggml_tensor * a,
  3802. int64_t ne0) {
  3803. return ggml_cont_4d(ctx, a, ne0, 1, 1, 1);
  3804. }
  3805. GGML_API struct ggml_tensor * ggml_cont_2d(
  3806. struct ggml_context * ctx,
  3807. struct ggml_tensor * a,
  3808. int64_t ne0,
  3809. int64_t ne1) {
  3810. return ggml_cont_4d(ctx, a, ne0, ne1, 1, 1);
  3811. }
  3812. GGML_API struct ggml_tensor * ggml_cont_3d(
  3813. struct ggml_context * ctx,
  3814. struct ggml_tensor * a,
  3815. int64_t ne0,
  3816. int64_t ne1,
  3817. int64_t ne2) {
  3818. return ggml_cont_4d(ctx, a, ne0, ne1, ne2, 1);
  3819. }
  3820. struct ggml_tensor * ggml_cont_4d(
  3821. struct ggml_context * ctx,
  3822. struct ggml_tensor * a,
  3823. int64_t ne0,
  3824. int64_t ne1,
  3825. int64_t ne2,
  3826. int64_t ne3) {
  3827. GGML_ASSERT(ggml_nelements(a) == (ne0*ne1*ne2*ne3));
  3828. bool is_node = false;
  3829. struct ggml_tensor * result = ggml_new_tensor_4d(ctx, a->type, ne0, ne1, ne2, ne3);
  3830. ggml_format_name(result, "%s (cont)", a->name);
  3831. result->op = GGML_OP_CONT;
  3832. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3833. result->src[0] = a;
  3834. return result;
  3835. }
  3836. // ggml_reshape
  3837. struct ggml_tensor * ggml_reshape(
  3838. struct ggml_context * ctx,
  3839. struct ggml_tensor * a,
  3840. struct ggml_tensor * b) {
  3841. GGML_ASSERT(ggml_is_contiguous(a));
  3842. // as only the shape of b is relevant, and not its memory layout, b is allowed to be non contiguous.
  3843. GGML_ASSERT(ggml_nelements(a) == ggml_nelements(b));
  3844. bool is_node = false;
  3845. if (a->grad) {
  3846. is_node = true;
  3847. }
  3848. if (b->grad) {
  3849. // gradient propagation is not supported
  3850. //GGML_ASSERT(false);
  3851. }
  3852. struct ggml_tensor * result = ggml_new_tensor_impl(ctx, a->type, GGML_MAX_DIMS, b->ne, a, 0);
  3853. ggml_format_name(result, "%s (reshaped)", a->name);
  3854. result->op = GGML_OP_RESHAPE;
  3855. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3856. result->src[0] = a;
  3857. return result;
  3858. }
  3859. struct ggml_tensor * ggml_reshape_1d(
  3860. struct ggml_context * ctx,
  3861. struct ggml_tensor * a,
  3862. int64_t ne0) {
  3863. GGML_ASSERT(ggml_is_contiguous(a));
  3864. GGML_ASSERT(ggml_nelements(a) == ne0);
  3865. bool is_node = false;
  3866. if (a->grad) {
  3867. is_node = true;
  3868. }
  3869. const int64_t ne[1] = { ne0 };
  3870. struct ggml_tensor * result = ggml_new_tensor_impl(ctx, a->type, 1, ne, a, 0);
  3871. ggml_format_name(result, "%s (reshaped)", a->name);
  3872. result->op = GGML_OP_RESHAPE;
  3873. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3874. result->src[0] = a;
  3875. return result;
  3876. }
  3877. struct ggml_tensor * ggml_reshape_2d(
  3878. struct ggml_context * ctx,
  3879. struct ggml_tensor * a,
  3880. int64_t ne0,
  3881. int64_t ne1) {
  3882. GGML_ASSERT(ggml_is_contiguous(a));
  3883. GGML_ASSERT(ggml_nelements(a) == ne0*ne1);
  3884. bool is_node = false;
  3885. if (a->grad) {
  3886. is_node = true;
  3887. }
  3888. const int64_t ne[2] = { ne0, ne1 };
  3889. struct ggml_tensor * result = ggml_new_tensor_impl(ctx, a->type, 2, ne, a, 0);
  3890. ggml_format_name(result, "%s (reshaped)", a->name);
  3891. result->op = GGML_OP_RESHAPE;
  3892. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3893. result->src[0] = a;
  3894. return result;
  3895. }
  3896. struct ggml_tensor * ggml_reshape_3d(
  3897. struct ggml_context * ctx,
  3898. struct ggml_tensor * a,
  3899. int64_t ne0,
  3900. int64_t ne1,
  3901. int64_t ne2) {
  3902. GGML_ASSERT(ggml_is_contiguous(a));
  3903. GGML_ASSERT(ggml_nelements(a) == ne0*ne1*ne2);
  3904. bool is_node = false;
  3905. if (a->grad) {
  3906. is_node = true;
  3907. }
  3908. const int64_t ne[3] = { ne0, ne1, ne2 };
  3909. struct ggml_tensor * result = ggml_new_tensor_impl(ctx, a->type, 3, ne, a, 0);
  3910. ggml_format_name(result, "%s (reshaped)", a->name);
  3911. result->op = GGML_OP_RESHAPE;
  3912. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3913. result->src[0] = a;
  3914. return result;
  3915. }
  3916. struct ggml_tensor * ggml_reshape_4d(
  3917. struct ggml_context * ctx,
  3918. struct ggml_tensor * a,
  3919. int64_t ne0,
  3920. int64_t ne1,
  3921. int64_t ne2,
  3922. int64_t ne3) {
  3923. GGML_ASSERT(ggml_is_contiguous(a));
  3924. GGML_ASSERT(ggml_nelements(a) == ne0*ne1*ne2*ne3);
  3925. bool is_node = false;
  3926. if (a->grad) {
  3927. is_node = true;
  3928. }
  3929. const int64_t ne[4] = { ne0, ne1, ne2, ne3 };
  3930. struct ggml_tensor * result = ggml_new_tensor_impl(ctx, a->type, 4, ne, a, 0);
  3931. ggml_format_name(result, "%s (reshaped)", a->name);
  3932. result->op = GGML_OP_RESHAPE;
  3933. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3934. result->src[0] = a;
  3935. return result;
  3936. }
  3937. static struct ggml_tensor * ggml_view_impl(
  3938. struct ggml_context * ctx,
  3939. struct ggml_tensor * a,
  3940. int n_dims,
  3941. const int64_t * ne,
  3942. size_t offset) {
  3943. bool is_node = false;
  3944. if (a->grad) {
  3945. is_node = true;
  3946. }
  3947. struct ggml_tensor * result = ggml_new_tensor_impl(ctx, a->type, n_dims, ne, a, offset);
  3948. ggml_format_name(result, "%s (view)", a->name);
  3949. ggml_set_op_params(result, &offset, sizeof(offset));
  3950. result->op = GGML_OP_VIEW;
  3951. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  3952. result->src[0] = a;
  3953. return result;
  3954. }
  3955. // ggml_view_1d
  3956. struct ggml_tensor * ggml_view_1d(
  3957. struct ggml_context * ctx,
  3958. struct ggml_tensor * a,
  3959. int64_t ne0,
  3960. size_t offset) {
  3961. struct ggml_tensor * result = ggml_view_impl(ctx, a, 1, &ne0, offset);
  3962. return result;
  3963. }
  3964. // ggml_view_2d
  3965. struct ggml_tensor * ggml_view_2d(
  3966. struct ggml_context * ctx,
  3967. struct ggml_tensor * a,
  3968. int64_t ne0,
  3969. int64_t ne1,
  3970. size_t nb1,
  3971. size_t offset) {
  3972. const int64_t ne[2] = { ne0, ne1 };
  3973. struct ggml_tensor * result = ggml_view_impl(ctx, a, 2, ne, offset);
  3974. result->nb[1] = nb1;
  3975. result->nb[2] = result->nb[1]*ne1;
  3976. result->nb[3] = result->nb[2];
  3977. return result;
  3978. }
  3979. // ggml_view_3d
  3980. struct ggml_tensor * ggml_view_3d(
  3981. struct ggml_context * ctx,
  3982. struct ggml_tensor * a,
  3983. int64_t ne0,
  3984. int64_t ne1,
  3985. int64_t ne2,
  3986. size_t nb1,
  3987. size_t nb2,
  3988. size_t offset) {
  3989. const int64_t ne[3] = { ne0, ne1, ne2 };
  3990. struct ggml_tensor * result = ggml_view_impl(ctx, a, 3, ne, offset);
  3991. result->nb[1] = nb1;
  3992. result->nb[2] = nb2;
  3993. result->nb[3] = result->nb[2]*ne2;
  3994. return result;
  3995. }
  3996. // ggml_view_4d
  3997. struct ggml_tensor * ggml_view_4d(
  3998. struct ggml_context * ctx,
  3999. struct ggml_tensor * a,
  4000. int64_t ne0,
  4001. int64_t ne1,
  4002. int64_t ne2,
  4003. int64_t ne3,
  4004. size_t nb1,
  4005. size_t nb2,
  4006. size_t nb3,
  4007. size_t offset) {
  4008. const int64_t ne[4] = { ne0, ne1, ne2, ne3 };
  4009. struct ggml_tensor * result = ggml_view_impl(ctx, a, 4, ne, offset);
  4010. result->nb[1] = nb1;
  4011. result->nb[2] = nb2;
  4012. result->nb[3] = nb3;
  4013. return result;
  4014. }
  4015. // ggml_permute
  4016. struct ggml_tensor * ggml_permute(
  4017. struct ggml_context * ctx,
  4018. struct ggml_tensor * a,
  4019. int axis0,
  4020. int axis1,
  4021. int axis2,
  4022. int axis3) {
  4023. GGML_ASSERT(axis0 >= 0 && axis0 < GGML_MAX_DIMS);
  4024. GGML_ASSERT(axis1 >= 0 && axis1 < GGML_MAX_DIMS);
  4025. GGML_ASSERT(axis2 >= 0 && axis2 < GGML_MAX_DIMS);
  4026. GGML_ASSERT(axis3 >= 0 && axis3 < GGML_MAX_DIMS);
  4027. GGML_ASSERT(axis0 != axis1);
  4028. GGML_ASSERT(axis0 != axis2);
  4029. GGML_ASSERT(axis0 != axis3);
  4030. GGML_ASSERT(axis1 != axis2);
  4031. GGML_ASSERT(axis1 != axis3);
  4032. GGML_ASSERT(axis2 != axis3);
  4033. bool is_node = false;
  4034. if (a->grad) {
  4035. is_node = true;
  4036. }
  4037. struct ggml_tensor * result = ggml_view_tensor(ctx, a);
  4038. ggml_format_name(result, "%s (permuted)", a->name);
  4039. int ne[GGML_MAX_DIMS];
  4040. int nb[GGML_MAX_DIMS];
  4041. ne[axis0] = a->ne[0];
  4042. ne[axis1] = a->ne[1];
  4043. ne[axis2] = a->ne[2];
  4044. ne[axis3] = a->ne[3];
  4045. nb[axis0] = a->nb[0];
  4046. nb[axis1] = a->nb[1];
  4047. nb[axis2] = a->nb[2];
  4048. nb[axis3] = a->nb[3];
  4049. result->ne[0] = ne[0];
  4050. result->ne[1] = ne[1];
  4051. result->ne[2] = ne[2];
  4052. result->ne[3] = ne[3];
  4053. result->nb[0] = nb[0];
  4054. result->nb[1] = nb[1];
  4055. result->nb[2] = nb[2];
  4056. result->nb[3] = nb[3];
  4057. result->op = GGML_OP_PERMUTE;
  4058. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4059. result->src[0] = a;
  4060. int32_t params[] = { axis0, axis1, axis2, axis3 };
  4061. ggml_set_op_params(result, params, sizeof(params));
  4062. return result;
  4063. }
  4064. // ggml_transpose
  4065. struct ggml_tensor * ggml_transpose(
  4066. struct ggml_context * ctx,
  4067. struct ggml_tensor * a) {
  4068. bool is_node = false;
  4069. if (a->grad) {
  4070. is_node = true;
  4071. }
  4072. struct ggml_tensor * result = ggml_view_tensor(ctx, a);
  4073. ggml_format_name(result, "%s (transposed)", a->name);
  4074. result->ne[0] = a->ne[1];
  4075. result->ne[1] = a->ne[0];
  4076. result->nb[0] = a->nb[1];
  4077. result->nb[1] = a->nb[0];
  4078. result->op = GGML_OP_TRANSPOSE;
  4079. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4080. result->src[0] = a;
  4081. return result;
  4082. }
  4083. // ggml_get_rows
  4084. struct ggml_tensor * ggml_get_rows(
  4085. struct ggml_context * ctx,
  4086. struct ggml_tensor * a,
  4087. struct ggml_tensor * b) {
  4088. GGML_ASSERT(a->ne[2] == b->ne[1]);
  4089. GGML_ASSERT(b->ne[3] == 1);
  4090. GGML_ASSERT(b->type == GGML_TYPE_I32);
  4091. bool is_node = false;
  4092. if (a->grad || b->grad) {
  4093. is_node = true;
  4094. }
  4095. // TODO: implement non F32 return
  4096. enum ggml_type type = GGML_TYPE_F32;
  4097. if (a->type == GGML_TYPE_I32) {
  4098. type = a->type;
  4099. }
  4100. struct ggml_tensor * result = ggml_new_tensor_4d(ctx, type, a->ne[0], b->ne[0], b->ne[1], b->ne[2]);
  4101. result->op = GGML_OP_GET_ROWS;
  4102. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4103. result->src[0] = a;
  4104. result->src[1] = b;
  4105. return result;
  4106. }
  4107. // ggml_get_rows_back
  4108. struct ggml_tensor * ggml_get_rows_back(
  4109. struct ggml_context * ctx,
  4110. struct ggml_tensor * a,
  4111. struct ggml_tensor * b,
  4112. struct ggml_tensor * c) {
  4113. GGML_ASSERT(ggml_is_matrix(a) && ggml_is_vector(b) && b->type == GGML_TYPE_I32);
  4114. GGML_ASSERT(ggml_is_matrix(c) && (a->ne[0] == c->ne[0]));
  4115. bool is_node = false;
  4116. if (a->grad || b->grad) {
  4117. is_node = true;
  4118. }
  4119. // TODO: implement non F32 return
  4120. //struct ggml_tensor * result = ggml_new_tensor_2d(ctx, a->type, a->ne[0], b->ne[0]);
  4121. struct ggml_tensor * result = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, c->ne[0], c->ne[1]);
  4122. result->op = GGML_OP_GET_ROWS_BACK;
  4123. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4124. result->src[0] = a;
  4125. result->src[1] = b;
  4126. return result;
  4127. }
  4128. // ggml_diag
  4129. struct ggml_tensor * ggml_diag(
  4130. struct ggml_context * ctx,
  4131. struct ggml_tensor * a) {
  4132. GGML_ASSERT(a->ne[1] == 1);
  4133. bool is_node = false;
  4134. if (a->grad) {
  4135. is_node = true;
  4136. }
  4137. const int64_t ne[4] = { a->ne[0], a->ne[0], a->ne[2], a->ne[3] };
  4138. struct ggml_tensor * result = ggml_new_tensor(ctx, a->type, 4, ne);
  4139. result->op = GGML_OP_DIAG;
  4140. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4141. result->src[0] = a;
  4142. return result;
  4143. }
  4144. // ggml_diag_mask_inf
  4145. static struct ggml_tensor * ggml_diag_mask_inf_impl(
  4146. struct ggml_context * ctx,
  4147. struct ggml_tensor * a,
  4148. int n_past,
  4149. bool inplace) {
  4150. bool is_node = false;
  4151. if (a->grad) {
  4152. is_node = true;
  4153. }
  4154. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  4155. int32_t params[] = { n_past };
  4156. ggml_set_op_params(result, params, sizeof(params));
  4157. result->op = GGML_OP_DIAG_MASK_INF;
  4158. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4159. result->src[0] = a;
  4160. return result;
  4161. }
  4162. struct ggml_tensor * ggml_diag_mask_inf(
  4163. struct ggml_context * ctx,
  4164. struct ggml_tensor * a,
  4165. int n_past) {
  4166. return ggml_diag_mask_inf_impl(ctx, a, n_past, false);
  4167. }
  4168. struct ggml_tensor * ggml_diag_mask_inf_inplace(
  4169. struct ggml_context * ctx,
  4170. struct ggml_tensor * a,
  4171. int n_past) {
  4172. return ggml_diag_mask_inf_impl(ctx, a, n_past, true);
  4173. }
  4174. // ggml_diag_mask_zero
  4175. static struct ggml_tensor * ggml_diag_mask_zero_impl(
  4176. struct ggml_context * ctx,
  4177. struct ggml_tensor * a,
  4178. int n_past,
  4179. bool inplace) {
  4180. bool is_node = false;
  4181. if (a->grad) {
  4182. is_node = true;
  4183. }
  4184. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  4185. int32_t params[] = { n_past };
  4186. ggml_set_op_params(result, params, sizeof(params));
  4187. result->op = GGML_OP_DIAG_MASK_ZERO;
  4188. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4189. result->src[0] = a;
  4190. return result;
  4191. }
  4192. struct ggml_tensor * ggml_diag_mask_zero(
  4193. struct ggml_context * ctx,
  4194. struct ggml_tensor * a,
  4195. int n_past) {
  4196. return ggml_diag_mask_zero_impl(ctx, a, n_past, false);
  4197. }
  4198. struct ggml_tensor * ggml_diag_mask_zero_inplace(
  4199. struct ggml_context * ctx,
  4200. struct ggml_tensor * a,
  4201. int n_past) {
  4202. return ggml_diag_mask_zero_impl(ctx, a, n_past, true);
  4203. }
  4204. // ggml_soft_max
  4205. static struct ggml_tensor * ggml_soft_max_impl(
  4206. struct ggml_context * ctx,
  4207. struct ggml_tensor * a,
  4208. struct ggml_tensor * mask,
  4209. struct ggml_tensor * pos,
  4210. float scale,
  4211. float max_bias,
  4212. bool inplace) {
  4213. GGML_ASSERT(ggml_is_contiguous(a));
  4214. if (mask) {
  4215. GGML_ASSERT(ggml_is_contiguous(mask));
  4216. GGML_ASSERT(ggml_is_matrix(mask));
  4217. GGML_ASSERT(ggml_can_repeat_rows(mask, a));
  4218. }
  4219. if (pos) {
  4220. GGML_ASSERT(ggml_is_vector(pos));
  4221. GGML_ASSERT(pos->type == GGML_TYPE_F32);
  4222. GGML_ASSERT(pos->ne[0] == a->ne[0]);
  4223. }
  4224. if (max_bias > 0.0f) {
  4225. GGML_ASSERT(pos);
  4226. }
  4227. bool is_node = false;
  4228. if (a->grad) {
  4229. is_node = true;
  4230. }
  4231. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  4232. float params[] = { scale, max_bias };
  4233. ggml_set_op_params(result, params, sizeof(params));
  4234. result->op = GGML_OP_SOFT_MAX;
  4235. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4236. result->src[0] = a;
  4237. result->src[1] = mask;
  4238. result->src[2] = pos;
  4239. return result;
  4240. }
  4241. struct ggml_tensor * ggml_soft_max(
  4242. struct ggml_context * ctx,
  4243. struct ggml_tensor * a) {
  4244. return ggml_soft_max_impl(ctx, a, NULL, NULL, 1.0f, 0.0f, false);
  4245. }
  4246. struct ggml_tensor * ggml_soft_max_inplace(
  4247. struct ggml_context * ctx,
  4248. struct ggml_tensor * a) {
  4249. return ggml_soft_max_impl(ctx, a, NULL, NULL, 1.0f, 0.0f, true);
  4250. }
  4251. struct ggml_tensor * ggml_soft_max_ext(
  4252. struct ggml_context * ctx,
  4253. struct ggml_tensor * a,
  4254. struct ggml_tensor * mask,
  4255. struct ggml_tensor * pos,
  4256. float scale,
  4257. float max_bias) {
  4258. return ggml_soft_max_impl(ctx, a, mask, pos, scale, max_bias, false);
  4259. }
  4260. // ggml_soft_max_back
  4261. static struct ggml_tensor * ggml_soft_max_back_impl(
  4262. struct ggml_context * ctx,
  4263. struct ggml_tensor * a,
  4264. struct ggml_tensor * b,
  4265. bool inplace) {
  4266. bool is_node = false;
  4267. if (a->grad || b->grad) {
  4268. is_node = true; // TODO : implement backward pass
  4269. }
  4270. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  4271. result->op = GGML_OP_SOFT_MAX_BACK;
  4272. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4273. result->src[0] = a;
  4274. result->src[1] = b;
  4275. return result;
  4276. }
  4277. struct ggml_tensor * ggml_soft_max_back(
  4278. struct ggml_context * ctx,
  4279. struct ggml_tensor * a,
  4280. struct ggml_tensor * b) {
  4281. return ggml_soft_max_back_impl(ctx, a, b, false);
  4282. }
  4283. struct ggml_tensor * ggml_soft_max_back_inplace(
  4284. struct ggml_context * ctx,
  4285. struct ggml_tensor * a,
  4286. struct ggml_tensor * b) {
  4287. return ggml_soft_max_back_impl(ctx, a, b, true);
  4288. }
  4289. // ggml_rope
  4290. static struct ggml_tensor * ggml_rope_impl(
  4291. struct ggml_context * ctx,
  4292. struct ggml_tensor * a,
  4293. struct ggml_tensor * b,
  4294. int n_dims,
  4295. int mode,
  4296. int n_ctx,
  4297. int n_orig_ctx,
  4298. float freq_base,
  4299. float freq_scale,
  4300. float ext_factor,
  4301. float attn_factor,
  4302. float beta_fast,
  4303. float beta_slow,
  4304. float xpos_base,
  4305. bool xpos_down,
  4306. bool inplace) {
  4307. GGML_ASSERT(ggml_is_vector(b));
  4308. GGML_ASSERT(b->type == GGML_TYPE_I32);
  4309. GGML_ASSERT(a->ne[2] == b->ne[0]);
  4310. bool is_node = false;
  4311. if (a->grad) {
  4312. is_node = true;
  4313. }
  4314. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  4315. int32_t params[13] = { /*n_past*/ 0, n_dims, mode, n_ctx, n_orig_ctx };
  4316. memcpy(params + 5, &freq_base, sizeof(float));
  4317. memcpy(params + 6, &freq_scale, sizeof(float));
  4318. memcpy(params + 7, &ext_factor, sizeof(float));
  4319. memcpy(params + 8, &attn_factor, sizeof(float));
  4320. memcpy(params + 9, &beta_fast, sizeof(float));
  4321. memcpy(params + 10, &beta_slow, sizeof(float));
  4322. memcpy(params + 11, &xpos_base, sizeof(float));
  4323. memcpy(params + 12, &xpos_down, sizeof(bool));
  4324. ggml_set_op_params(result, params, sizeof(params));
  4325. result->op = GGML_OP_ROPE;
  4326. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4327. result->src[0] = a;
  4328. result->src[1] = b;
  4329. return result;
  4330. }
  4331. struct ggml_tensor * ggml_rope(
  4332. struct ggml_context * ctx,
  4333. struct ggml_tensor * a,
  4334. struct ggml_tensor * b,
  4335. int n_dims,
  4336. int mode,
  4337. int n_ctx) {
  4338. return ggml_rope_impl(
  4339. ctx, a, b, n_dims, mode, n_ctx, 0, 10000.0f, 1.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, false, false
  4340. );
  4341. }
  4342. struct ggml_tensor * ggml_rope_inplace(
  4343. struct ggml_context * ctx,
  4344. struct ggml_tensor * a,
  4345. struct ggml_tensor * b,
  4346. int n_dims,
  4347. int mode,
  4348. int n_ctx) {
  4349. return ggml_rope_impl(
  4350. ctx, a, b, n_dims, mode, n_ctx, 0, 10000.0f, 1.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, false, true
  4351. );
  4352. }
  4353. struct ggml_tensor * ggml_rope_custom(
  4354. struct ggml_context * ctx,
  4355. struct ggml_tensor * a,
  4356. struct ggml_tensor * b,
  4357. int n_dims,
  4358. int mode,
  4359. int n_ctx,
  4360. int n_orig_ctx,
  4361. float freq_base,
  4362. float freq_scale,
  4363. float ext_factor,
  4364. float attn_factor,
  4365. float beta_fast,
  4366. float beta_slow) {
  4367. return ggml_rope_impl(
  4368. ctx, a, b, n_dims, mode, n_ctx, n_orig_ctx, freq_base, freq_scale,
  4369. ext_factor, attn_factor, beta_fast, beta_slow, 0.0f, false, false
  4370. );
  4371. }
  4372. struct ggml_tensor * ggml_rope_custom_inplace(
  4373. struct ggml_context * ctx,
  4374. struct ggml_tensor * a,
  4375. struct ggml_tensor * b,
  4376. int n_dims,
  4377. int mode,
  4378. int n_ctx,
  4379. int n_orig_ctx,
  4380. float freq_base,
  4381. float freq_scale,
  4382. float ext_factor,
  4383. float attn_factor,
  4384. float beta_fast,
  4385. float beta_slow) {
  4386. return ggml_rope_impl(
  4387. ctx, a, b, n_dims, mode, n_ctx, n_orig_ctx, freq_base, freq_scale,
  4388. ext_factor, attn_factor, beta_fast, beta_slow, 0.0f, false, true
  4389. );
  4390. }
  4391. struct ggml_tensor * ggml_rope_xpos_inplace(
  4392. struct ggml_context * ctx,
  4393. struct ggml_tensor * a,
  4394. struct ggml_tensor * b,
  4395. int n_dims,
  4396. float base,
  4397. bool down) {
  4398. return ggml_rope_impl(ctx, a, b, n_dims, 0, 0, 0, 10000.0f, 1.0f, 0.0f, 1.0f, 0.0f, 0.0f, base, down, true);
  4399. }
  4400. // ggml_rope_back
  4401. struct ggml_tensor * ggml_rope_back(
  4402. struct ggml_context * ctx,
  4403. struct ggml_tensor * a,
  4404. struct ggml_tensor * b,
  4405. int n_dims,
  4406. int mode,
  4407. int n_ctx,
  4408. int n_orig_ctx,
  4409. float freq_base,
  4410. float freq_scale,
  4411. float ext_factor,
  4412. float attn_factor,
  4413. float beta_fast,
  4414. float beta_slow,
  4415. float xpos_base,
  4416. bool xpos_down) {
  4417. GGML_ASSERT(ggml_is_vector(b));
  4418. GGML_ASSERT(b->type == GGML_TYPE_I32);
  4419. GGML_ASSERT(a->ne[2] == b->ne[0]);
  4420. GGML_ASSERT((mode & 4) == 0 && "ggml_rope_back() for ChatGLM not implemented yet");
  4421. bool is_node = false;
  4422. if (a->grad) {
  4423. is_node = false; // TODO: implement backward
  4424. }
  4425. struct ggml_tensor * result = ggml_dup_tensor(ctx, a);
  4426. int32_t params[13] = { /*n_past*/ 0, n_dims, mode, n_ctx, n_orig_ctx };
  4427. memcpy(params + 5, &freq_base, sizeof(float));
  4428. memcpy(params + 6, &freq_scale, sizeof(float));
  4429. memcpy(params + 7, &ext_factor, sizeof(float));
  4430. memcpy(params + 8, &attn_factor, sizeof(float));
  4431. memcpy(params + 9, &beta_fast, sizeof(float));
  4432. memcpy(params + 10, &beta_slow, sizeof(float));
  4433. memcpy(params + 11, &xpos_base, sizeof(float));
  4434. memcpy(params + 12, &xpos_down, sizeof(bool));
  4435. ggml_set_op_params(result, params, sizeof(params));
  4436. result->op = GGML_OP_ROPE_BACK;
  4437. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4438. result->src[0] = a;
  4439. result->src[1] = b;
  4440. return result;
  4441. }
  4442. // ggml_alibi
  4443. struct ggml_tensor * ggml_alibi(
  4444. struct ggml_context * ctx,
  4445. struct ggml_tensor * a,
  4446. int n_past,
  4447. int n_head,
  4448. float bias_max) {
  4449. GGML_ASSERT(n_past >= 0);
  4450. bool is_node = false;
  4451. if (a->grad) {
  4452. GGML_ASSERT(false); // TODO: implement backward
  4453. is_node = true;
  4454. }
  4455. // TODO: when implement backward, fix this:
  4456. //struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  4457. struct ggml_tensor * result = ggml_view_tensor(ctx, a);
  4458. int32_t op_params[3] = { n_past, n_head };
  4459. memcpy(op_params + 2, &bias_max, sizeof(float));
  4460. ggml_set_op_params(result, op_params, sizeof(op_params));
  4461. result->op = GGML_OP_ALIBI;
  4462. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4463. result->src[0] = a;
  4464. return result;
  4465. }
  4466. // ggml_clamp
  4467. struct ggml_tensor * ggml_clamp(
  4468. struct ggml_context * ctx,
  4469. struct ggml_tensor * a,
  4470. float min,
  4471. float max) {
  4472. bool is_node = false;
  4473. if (a->grad) {
  4474. GGML_ASSERT(false); // TODO: implement backward
  4475. is_node = true;
  4476. }
  4477. // TODO: when implement backward, fix this:
  4478. struct ggml_tensor * result = ggml_view_tensor(ctx, a);
  4479. float params[] = { min, max };
  4480. ggml_set_op_params(result, params, sizeof(params));
  4481. result->op = GGML_OP_CLAMP;
  4482. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4483. result->src[0] = a;
  4484. return result;
  4485. }
  4486. // ggml_conv_1d
  4487. static int64_t ggml_calc_conv_output_size(int64_t ins, int64_t ks, int s, int p, int d) {
  4488. return (ins + 2 * p - d * (ks - 1) - 1) / s + 1;
  4489. }
  4490. GGML_API struct ggml_tensor * ggml_conv_1d(
  4491. struct ggml_context * ctx,
  4492. struct ggml_tensor * a,
  4493. struct ggml_tensor * b,
  4494. int s0,
  4495. int p0,
  4496. int d0) {
  4497. struct ggml_tensor * im2col = ggml_im2col(ctx, a, b, s0, 0, p0, 0, d0, 0, false, GGML_TYPE_F16); // [N, OL, IC * K]
  4498. struct ggml_tensor * result =
  4499. ggml_mul_mat(ctx,
  4500. ggml_reshape_2d(ctx, im2col, im2col->ne[0], (im2col->ne[2] * im2col->ne[1])), // [N, OL, IC * K] => [N*OL, IC * K]
  4501. ggml_reshape_2d(ctx, a, (a->ne[0] * a->ne[1]), a->ne[2])); // [OC,IC, K] => [OC, IC * K]
  4502. result = ggml_reshape_3d(ctx, result, im2col->ne[1], a->ne[2], im2col->ne[2]); // [N, OC, OL]
  4503. return result;
  4504. }
  4505. // ggml_conv_1d_ph
  4506. struct ggml_tensor* ggml_conv_1d_ph(
  4507. struct ggml_context * ctx,
  4508. struct ggml_tensor * a,
  4509. struct ggml_tensor * b,
  4510. int s,
  4511. int d) {
  4512. return ggml_conv_1d(ctx, a, b, s, a->ne[0] / 2, d);
  4513. }
  4514. // ggml_conv_transpose_1d
  4515. static int64_t ggml_calc_conv_transpose_1d_output_size(int64_t ins, int64_t ks, int s, int p, int d) {
  4516. return (ins - 1) * s - 2 * p + d * (ks - 1) + 1;
  4517. }
  4518. GGML_API struct ggml_tensor * ggml_conv_transpose_1d(
  4519. struct ggml_context * ctx,
  4520. struct ggml_tensor * a,
  4521. struct ggml_tensor * b,
  4522. int s0,
  4523. int p0,
  4524. int d0) {
  4525. GGML_ASSERT(ggml_is_matrix(b));
  4526. GGML_ASSERT(a->ne[2] == b->ne[1]);
  4527. GGML_ASSERT(a->ne[3] == 1);
  4528. GGML_ASSERT(p0 == 0);
  4529. GGML_ASSERT(d0 == 1);
  4530. bool is_node = false;
  4531. if (a->grad || b->grad) {
  4532. GGML_ASSERT(false); // TODO: implement backward
  4533. is_node = true;
  4534. }
  4535. const int64_t ne[4] = {
  4536. ggml_calc_conv_transpose_1d_output_size(b->ne[0], a->ne[0], s0, 0 /*p0*/, 1 /*d0*/),
  4537. a->ne[1], b->ne[2], 1,
  4538. };
  4539. struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, 4, ne);
  4540. int32_t params[] = { s0, p0, d0 };
  4541. ggml_set_op_params(result, params, sizeof(params));
  4542. result->op = GGML_OP_CONV_TRANSPOSE_1D;
  4543. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4544. result->src[0] = a;
  4545. result->src[1] = b;
  4546. return result;
  4547. }
  4548. // ggml_conv_depthwise
  4549. struct ggml_tensor * ggml_conv_depthwise_2d(
  4550. struct ggml_context * ctx,
  4551. struct ggml_tensor * a,
  4552. struct ggml_tensor * b,
  4553. int s0,
  4554. int s1,
  4555. int p0,
  4556. int p1,
  4557. int d0,
  4558. int d1) {
  4559. struct ggml_tensor * new_a = ggml_reshape_4d(ctx, a, a->ne[0], a->ne[1], 1, a->ne[2] * a->ne[3]);
  4560. struct ggml_tensor * im2col = ggml_im2col(ctx, new_a,
  4561. ggml_reshape_4d(ctx, b, b->ne[0], b->ne[1], 1, b->ne[2] * b->ne[3]),
  4562. s0, s1, p0, p1, d0, d1, true, GGML_TYPE_F16); // [N * IC, OH, OW, KH * KW]
  4563. struct ggml_tensor * new_b = ggml_reshape_4d(ctx, im2col, im2col->ne[0], im2col->ne[2] * im2col->ne[1], b->ne[2], b->ne[3]); // [N * IC, OH, OW, KH * KW] => [N, IC, OH * OW, KH * KW]
  4564. new_a = ggml_reshape_4d(ctx, new_a, (new_a->ne[0] * new_a->ne[1]), new_a->ne[2], new_a->ne[3], 1); // [OC,1, KH, KW] => [1, OC, 1, KH * KW]
  4565. struct ggml_tensor * result = ggml_mul_mat(ctx, new_a, new_b);
  4566. result = ggml_reshape_4d(ctx, result, im2col->ne[1], im2col->ne[2], b->ne[2], b->ne[3]); // [N, OC, OH, OW]
  4567. return result;
  4568. }
  4569. // ggml_conv_2d
  4570. // im2col: [N, IC, IH, IW] => [N, OH, OW, IC*KH*KW]
  4571. // a: [OC,IC, KH, KW]
  4572. // b: [N, IC, IH, IW]
  4573. // result: [N, OH, OW, IC*KH*KW]
  4574. struct ggml_tensor * ggml_im2col(
  4575. struct ggml_context * ctx,
  4576. struct ggml_tensor * a,
  4577. struct ggml_tensor * b,
  4578. int s0,
  4579. int s1,
  4580. int p0,
  4581. int p1,
  4582. int d0,
  4583. int d1,
  4584. bool is_2D,
  4585. enum ggml_type dst_type) {
  4586. if(is_2D) {
  4587. GGML_ASSERT(a->ne[2] == b->ne[2]);
  4588. } else {
  4589. GGML_ASSERT(a->ne[1] == b->ne[1]);
  4590. }
  4591. bool is_node = false;
  4592. if (a->grad || b->grad) {
  4593. GGML_ASSERT(false); // TODO: implement backward
  4594. is_node = true;
  4595. }
  4596. const int64_t OH = is_2D ? ggml_calc_conv_output_size(b->ne[1], a->ne[1], s1, p1, d1) : 0;
  4597. const int64_t OW = ggml_calc_conv_output_size(b->ne[0], a->ne[0], s0, p0, d0);
  4598. const int64_t ne[4] = {
  4599. is_2D ? (a->ne[2] * a->ne[1] * a->ne[0]) : a->ne[1] * a->ne[0],
  4600. OW,
  4601. is_2D ? OH : b->ne[2],
  4602. is_2D ? b->ne[3] : 1,
  4603. };
  4604. struct ggml_tensor * result = ggml_new_tensor(ctx, dst_type, 4, ne);
  4605. int32_t params[] = { s0, s1, p0, p1, d0, d1, (is_2D ? 1 : 0) };
  4606. ggml_set_op_params(result, params, sizeof(params));
  4607. result->op = GGML_OP_IM2COL;
  4608. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4609. result->src[0] = a;
  4610. result->src[1] = b;
  4611. return result;
  4612. }
  4613. // a: [OC,IC, KH, KW]
  4614. // b: [N, IC, IH, IW]
  4615. // result: [N, OC, OH, OW]
  4616. struct ggml_tensor * ggml_conv_2d(
  4617. struct ggml_context * ctx,
  4618. struct ggml_tensor * a,
  4619. struct ggml_tensor * b,
  4620. int s0,
  4621. int s1,
  4622. int p0,
  4623. int p1,
  4624. int d0,
  4625. int d1) {
  4626. struct ggml_tensor * im2col = ggml_im2col(ctx, a, b, s0, s1, p0, p1, d0, d1, true, GGML_TYPE_F16); // [N, OH, OW, IC * KH * KW]
  4627. struct ggml_tensor * result =
  4628. ggml_mul_mat(ctx,
  4629. ggml_reshape_2d(ctx, im2col, im2col->ne[0], im2col->ne[3] * im2col->ne[2] * im2col->ne[1]), // [N, OH, OW, IC * KH * KW] => [N*OH*OW, IC * KH * KW]
  4630. ggml_reshape_2d(ctx, a, (a->ne[0] * a->ne[1] * a->ne[2]), a->ne[3])); // [OC,IC, KH, KW] => [OC, IC * KH * KW]
  4631. result = ggml_reshape_4d(ctx, result, im2col->ne[1], im2col->ne[2], a->ne[3], im2col->ne[3]); // [N, OC, OH, OW]
  4632. return result;
  4633. }
  4634. // ggml_conv_2d_sk_p0
  4635. struct ggml_tensor * ggml_conv_2d_sk_p0(
  4636. struct ggml_context * ctx,
  4637. struct ggml_tensor * a,
  4638. struct ggml_tensor * b) {
  4639. return ggml_conv_2d(ctx, a, b, a->ne[0], a->ne[1], 0, 0, 1, 1);
  4640. }
  4641. // ggml_conv_2d_s1_ph
  4642. struct ggml_tensor * ggml_conv_2d_s1_ph(
  4643. struct ggml_context * ctx,
  4644. struct ggml_tensor * a,
  4645. struct ggml_tensor * b) {
  4646. return ggml_conv_2d(ctx, a, b, 1, 1, a->ne[0] / 2, a->ne[1] / 2, 1, 1);
  4647. }
  4648. // ggml_conv_transpose_2d_p0
  4649. static int64_t ggml_calc_conv_transpose_output_size(int64_t ins, int64_t ks, int s, int p) {
  4650. return (ins - 1) * s - 2 * p + ks;
  4651. }
  4652. struct ggml_tensor * ggml_conv_transpose_2d_p0(
  4653. struct ggml_context * ctx,
  4654. struct ggml_tensor * a,
  4655. struct ggml_tensor * b,
  4656. int stride) {
  4657. GGML_ASSERT(a->ne[3] == b->ne[2]);
  4658. bool is_node = false;
  4659. if (a->grad || b->grad) {
  4660. GGML_ASSERT(false); // TODO: implement backward
  4661. is_node = true;
  4662. }
  4663. const int64_t ne[4] = {
  4664. ggml_calc_conv_transpose_output_size(b->ne[0], a->ne[0], stride, 0 /*p0*/),
  4665. ggml_calc_conv_transpose_output_size(b->ne[1], a->ne[1], stride, 0 /*p1*/),
  4666. a->ne[2], b->ne[3],
  4667. };
  4668. struct ggml_tensor* result = ggml_new_tensor(ctx, GGML_TYPE_F32, 4, ne);
  4669. ggml_set_op_params_i32(result, 0, stride);
  4670. result->op = GGML_OP_CONV_TRANSPOSE_2D;
  4671. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4672. result->src[0] = a;
  4673. result->src[1] = b;
  4674. return result;
  4675. }
  4676. // ggml_pool_*
  4677. static int64_t ggml_calc_pool_output_size(int64_t ins, int ks, int s, float p) {
  4678. return (ins + 2 * p - ks) / s + 1;
  4679. }
  4680. // ggml_pool_1d
  4681. struct ggml_tensor * ggml_pool_1d(
  4682. struct ggml_context * ctx,
  4683. struct ggml_tensor * a,
  4684. enum ggml_op_pool op,
  4685. int k0,
  4686. int s0,
  4687. int p0) {
  4688. bool is_node = false;
  4689. if (a->grad) {
  4690. GGML_ASSERT(false); // TODO: implement backward
  4691. is_node = true;
  4692. }
  4693. const int64_t ne[2] = {
  4694. ggml_calc_pool_output_size(a->ne[0], k0, s0, p0),
  4695. a->ne[1],
  4696. };
  4697. struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, 2, ne);
  4698. int32_t params[] = { op, k0, s0, p0 };
  4699. ggml_set_op_params(result, params, sizeof(params));
  4700. result->op = GGML_OP_POOL_1D;
  4701. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4702. result->src[0] = a;
  4703. return result;
  4704. }
  4705. // ggml_pool_2d
  4706. struct ggml_tensor * ggml_pool_2d(
  4707. struct ggml_context * ctx,
  4708. struct ggml_tensor * a,
  4709. enum ggml_op_pool op,
  4710. int k0,
  4711. int k1,
  4712. int s0,
  4713. int s1,
  4714. float p0,
  4715. float p1) {
  4716. bool is_node = false;
  4717. if (a->grad) {
  4718. GGML_ASSERT(false); // TODO: implement backward
  4719. is_node = true;
  4720. }
  4721. struct ggml_tensor * result;
  4722. const int64_t ne[3] = {
  4723. ggml_calc_pool_output_size(a->ne[0], k0, s0, p0),
  4724. ggml_calc_pool_output_size(a->ne[1], k1, s1, p1),
  4725. a->ne[2],
  4726. };
  4727. result = ggml_new_tensor(ctx, GGML_TYPE_F32, 3, ne);
  4728. int32_t params[] = { op, k0, k1, s0, s1, p0, p1 };
  4729. ggml_set_op_params(result, params, sizeof(params));
  4730. result->op = GGML_OP_POOL_2D;
  4731. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4732. result->src[0] = a;
  4733. return result;
  4734. }
  4735. // ggml_upscale
  4736. static struct ggml_tensor * ggml_upscale_impl(
  4737. struct ggml_context * ctx,
  4738. struct ggml_tensor * a,
  4739. int scale_factor) {
  4740. bool is_node = false;
  4741. if (a->grad) {
  4742. GGML_ASSERT(false); // TODO: implement backward
  4743. is_node = true;
  4744. }
  4745. struct ggml_tensor * result = ggml_new_tensor_4d(ctx, a->type,
  4746. a->ne[0] * scale_factor,
  4747. a->ne[1] * scale_factor,
  4748. a->ne[2], a->ne[3]);
  4749. result->op = GGML_OP_UPSCALE;
  4750. result->op_params[0] = scale_factor;
  4751. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4752. result->src[0] = a;
  4753. return result;
  4754. }
  4755. struct ggml_tensor * ggml_pad(
  4756. struct ggml_context * ctx,
  4757. struct ggml_tensor * a,
  4758. int p0, int p1, int p2, int p3) {
  4759. bool is_node = false;
  4760. if (a->grad) {
  4761. GGML_ASSERT(false); // TODO: implement backward
  4762. is_node = true;
  4763. }
  4764. struct ggml_tensor * result = ggml_new_tensor_4d(ctx, a->type,
  4765. a->ne[0] + p0,
  4766. a->ne[1] + p1,
  4767. a->ne[2] + p2,
  4768. a->ne[3] + p3);
  4769. result->op = GGML_OP_PAD;
  4770. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4771. result->src[0] = a;
  4772. return result;
  4773. }
  4774. struct ggml_tensor * ggml_upscale(
  4775. struct ggml_context * ctx,
  4776. struct ggml_tensor * a,
  4777. int scale_factor) {
  4778. return ggml_upscale_impl(ctx, a, scale_factor);
  4779. }
  4780. // ggml_argsort
  4781. struct ggml_tensor * ggml_argsort(
  4782. struct ggml_context * ctx,
  4783. struct ggml_tensor * a,
  4784. enum ggml_sort_order order) {
  4785. bool is_node = false;
  4786. struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_I32, GGML_MAX_DIMS, a->ne);
  4787. ggml_set_op_params_i32(result, 0, (int32_t) order);
  4788. result->op = GGML_OP_ARGSORT;
  4789. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4790. result->src[0] = a;
  4791. return result;
  4792. }
  4793. // ggml_top_k
  4794. struct ggml_tensor * ggml_top_k(
  4795. struct ggml_context * ctx,
  4796. struct ggml_tensor * a,
  4797. int k) {
  4798. GGML_ASSERT(a->ne[0] >= k);
  4799. struct ggml_tensor * result = ggml_argsort(ctx, a, GGML_SORT_DESC);
  4800. result = ggml_view_4d(ctx, result,
  4801. k, result->ne[1], result->ne[2], result->ne[3],
  4802. result->nb[1], result->nb[2], result->nb[3],
  4803. 0);
  4804. return result;
  4805. }
  4806. // ggml_flash_attn
  4807. struct ggml_tensor * ggml_flash_attn(
  4808. struct ggml_context * ctx,
  4809. struct ggml_tensor * q,
  4810. struct ggml_tensor * k,
  4811. struct ggml_tensor * v,
  4812. bool masked) {
  4813. GGML_ASSERT(ggml_can_mul_mat(k, q));
  4814. // TODO: check if vT can be multiplied by (k*qT)
  4815. bool is_node = false;
  4816. if (q->grad || k->grad || v->grad) {
  4817. is_node = true;
  4818. }
  4819. //struct ggml_tensor * result = ggml_dup_tensor(ctx, q);
  4820. struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, GGML_MAX_DIMS, q->ne);
  4821. int32_t t = masked ? 1 : 0;
  4822. ggml_set_op_params(result, &t, sizeof(t));
  4823. result->op = GGML_OP_FLASH_ATTN;
  4824. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4825. result->src[0] = q;
  4826. result->src[1] = k;
  4827. result->src[2] = v;
  4828. return result;
  4829. }
  4830. // ggml_flash_ff
  4831. struct ggml_tensor * ggml_flash_ff(
  4832. struct ggml_context * ctx,
  4833. struct ggml_tensor * a,
  4834. struct ggml_tensor * b0,
  4835. struct ggml_tensor * b1,
  4836. struct ggml_tensor * c0,
  4837. struct ggml_tensor * c1) {
  4838. GGML_ASSERT(ggml_can_mul_mat(b0, a));
  4839. // TODO: more checks
  4840. bool is_node = false;
  4841. if (a->grad || b0->grad || b1->grad || c0->grad || c1->grad) {
  4842. is_node = true;
  4843. }
  4844. //struct ggml_tensor * result = ggml_dup_tensor(ctx, a);
  4845. struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, GGML_MAX_DIMS, a->ne);
  4846. result->op = GGML_OP_FLASH_FF;
  4847. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4848. result->src[0] = a;
  4849. result->src[1] = b0;
  4850. result->src[2] = b1;
  4851. result->src[3] = c0;
  4852. result->src[4] = c1;
  4853. return result;
  4854. }
  4855. // ggml_flash_attn_back
  4856. struct ggml_tensor * ggml_flash_attn_back(
  4857. struct ggml_context * ctx,
  4858. struct ggml_tensor * q,
  4859. struct ggml_tensor * k,
  4860. struct ggml_tensor * v,
  4861. struct ggml_tensor * d,
  4862. bool masked) {
  4863. GGML_ASSERT(ggml_can_mul_mat(k, q));
  4864. // TODO: check if vT can be multiplied by (k*qT)
  4865. // d shape [D,N,ne2,ne3]
  4866. // q shape [D,N,ne2,ne3]
  4867. // k shape [D,M,kvne2,ne3]
  4868. // v shape [M,D,kvne2,ne3]
  4869. const int64_t D = q->ne[0];
  4870. const int64_t N = q->ne[1];
  4871. const int64_t M = k->ne[1];
  4872. const int64_t ne2 = q->ne[2];
  4873. const int64_t ne3 = q->ne[3];
  4874. const int64_t kvne2 = k->ne[2];
  4875. GGML_ASSERT(k->ne[0] == D);
  4876. GGML_ASSERT(v->ne[0] == M);
  4877. GGML_ASSERT(v->ne[1] == D);
  4878. GGML_ASSERT(d->ne[0] == D);
  4879. GGML_ASSERT(d->ne[1] == N);
  4880. GGML_ASSERT(k->ne[2] == kvne2);
  4881. GGML_ASSERT(k->ne[3] == ne3);
  4882. GGML_ASSERT(v->ne[2] == kvne2);
  4883. GGML_ASSERT(v->ne[3] == ne3);
  4884. GGML_ASSERT(d->ne[2] == ne2);
  4885. GGML_ASSERT(d->ne[3] == ne3);
  4886. GGML_ASSERT(ne2 % kvne2 == 0);
  4887. bool is_node = false;
  4888. if (q->grad || k->grad || v->grad) {
  4889. // when using this operation (in backwards pass) these grads are set.
  4890. // we don't want to create (big) grad of our result, so is_node is false.
  4891. is_node = false;
  4892. }
  4893. // store gradients of q, k and v as continuous tensors concatenated in result.
  4894. // note: v and gradv are actually transposed, i.e. v->ne[0] != D.
  4895. const int64_t elem_q = ggml_nelements(q);
  4896. const int64_t elem_k = ggml_nelements(k);
  4897. const int64_t elem_v = ggml_nelements(v);
  4898. enum ggml_type result_type = GGML_TYPE_F32;
  4899. GGML_ASSERT(ggml_blck_size(result_type) == 1);
  4900. const size_t tsize = ggml_type_size(result_type);
  4901. const size_t offs_q = 0;
  4902. const size_t offs_k = offs_q + GGML_PAD(elem_q * tsize, GGML_MEM_ALIGN);
  4903. const size_t offs_v = offs_k + GGML_PAD(elem_k * tsize, GGML_MEM_ALIGN);
  4904. const size_t end = offs_v + GGML_PAD(elem_v * tsize, GGML_MEM_ALIGN);
  4905. const size_t nelements = (end + tsize - 1)/tsize;
  4906. struct ggml_tensor * result = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, nelements);
  4907. int32_t masked_i = masked ? 1 : 0;
  4908. ggml_set_op_params(result, &masked_i, sizeof(masked_i));
  4909. result->op = GGML_OP_FLASH_ATTN_BACK;
  4910. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4911. result->src[0] = q;
  4912. result->src[1] = k;
  4913. result->src[2] = v;
  4914. result->src[3] = d;
  4915. return result;
  4916. }
  4917. // ggml_win_part
  4918. struct ggml_tensor * ggml_win_part(
  4919. struct ggml_context * ctx,
  4920. struct ggml_tensor * a,
  4921. int w) {
  4922. GGML_ASSERT(a->ne[3] == 1);
  4923. GGML_ASSERT(a->type == GGML_TYPE_F32);
  4924. bool is_node = false;
  4925. if (a->grad) {
  4926. GGML_ASSERT(false); // TODO: implement backward
  4927. is_node = true;
  4928. }
  4929. // padding
  4930. const int px = (w - a->ne[1]%w)%w;
  4931. const int py = (w - a->ne[2]%w)%w;
  4932. const int npx = (px + a->ne[1])/w;
  4933. const int npy = (py + a->ne[2])/w;
  4934. const int np = npx*npy;
  4935. const int64_t ne[4] = { a->ne[0], w, w, np, };
  4936. struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, 4, ne);
  4937. int32_t params[] = { npx, npy, w };
  4938. ggml_set_op_params(result, params, sizeof(params));
  4939. result->op = GGML_OP_WIN_PART;
  4940. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4941. result->src[0] = a;
  4942. return result;
  4943. }
  4944. // ggml_win_unpart
  4945. struct ggml_tensor * ggml_win_unpart(
  4946. struct ggml_context * ctx,
  4947. struct ggml_tensor * a,
  4948. int w0,
  4949. int h0,
  4950. int w) {
  4951. GGML_ASSERT(a->type == GGML_TYPE_F32);
  4952. bool is_node = false;
  4953. if (a->grad) {
  4954. GGML_ASSERT(false); // TODO: implement backward
  4955. is_node = true;
  4956. }
  4957. const int64_t ne[4] = { a->ne[0], w0, h0, 1, };
  4958. struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, 3, ne);
  4959. int32_t params[] = { w };
  4960. ggml_set_op_params(result, params, sizeof(params));
  4961. result->op = GGML_OP_WIN_UNPART;
  4962. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4963. result->src[0] = a;
  4964. return result;
  4965. }
  4966. // ggml_get_rel_pos
  4967. struct ggml_tensor * ggml_get_rel_pos(
  4968. struct ggml_context * ctx,
  4969. struct ggml_tensor * a,
  4970. int qh,
  4971. int kh) {
  4972. GGML_ASSERT(qh == kh);
  4973. GGML_ASSERT(2*MAX(qh, kh) - 1 == a->ne[1]);
  4974. bool is_node = false;
  4975. if (a->grad) {
  4976. GGML_ASSERT(false); // TODO: implement backward
  4977. is_node = true;
  4978. }
  4979. const int64_t ne[4] = { a->ne[0], kh, qh, 1, };
  4980. struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F16, 3, ne);
  4981. result->op = GGML_OP_GET_REL_POS;
  4982. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4983. result->src[0] = a;
  4984. return result;
  4985. }
  4986. // ggml_add_rel_pos
  4987. static struct ggml_tensor * ggml_add_rel_pos_impl(
  4988. struct ggml_context * ctx,
  4989. struct ggml_tensor * a,
  4990. struct ggml_tensor * pw,
  4991. struct ggml_tensor * ph,
  4992. bool inplace) {
  4993. GGML_ASSERT(ggml_are_same_shape(pw, ph));
  4994. GGML_ASSERT(ggml_is_contiguous(a));
  4995. GGML_ASSERT(ggml_is_contiguous(pw));
  4996. GGML_ASSERT(ggml_is_contiguous(ph));
  4997. GGML_ASSERT(ph->type == GGML_TYPE_F32);
  4998. GGML_ASSERT(pw->type == GGML_TYPE_F32);
  4999. GGML_ASSERT(pw->ne[3] == a->ne[2]);
  5000. GGML_ASSERT(pw->ne[0]*pw->ne[0] == a->ne[0]);
  5001. GGML_ASSERT(pw->ne[1]*pw->ne[2] == a->ne[1]);
  5002. bool is_node = false;
  5003. if (!inplace && (a->grad || pw->grad || ph->grad)) {
  5004. is_node = true;
  5005. }
  5006. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  5007. ggml_set_op_params_i32(result, 0, inplace ? 1 : 0);
  5008. result->op = GGML_OP_ADD_REL_POS;
  5009. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5010. result->src[0] = a;
  5011. result->src[1] = pw;
  5012. result->src[2] = ph;
  5013. return result;
  5014. }
  5015. struct ggml_tensor * ggml_add_rel_pos(
  5016. struct ggml_context * ctx,
  5017. struct ggml_tensor * a,
  5018. struct ggml_tensor * pw,
  5019. struct ggml_tensor * ph) {
  5020. return ggml_add_rel_pos_impl(ctx, a, pw, ph, false);
  5021. }
  5022. struct ggml_tensor * ggml_add_rel_pos_inplace(
  5023. struct ggml_context * ctx,
  5024. struct ggml_tensor * a,
  5025. struct ggml_tensor * pw,
  5026. struct ggml_tensor * ph) {
  5027. return ggml_add_rel_pos_impl(ctx, a, pw, ph, true);
  5028. }
  5029. // gmml_unary
  5030. static struct ggml_tensor * ggml_unary_impl(
  5031. struct ggml_context * ctx,
  5032. struct ggml_tensor * a,
  5033. enum ggml_unary_op op,
  5034. bool inplace) {
  5035. bool is_node = false;
  5036. if (!inplace && (a->grad)) {
  5037. is_node = true;
  5038. }
  5039. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  5040. ggml_set_op_params_i32(result, 0, (int32_t) op);
  5041. result->op = GGML_OP_UNARY;
  5042. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5043. result->src[0] = a;
  5044. return result;
  5045. }
  5046. struct ggml_tensor * ggml_unary(
  5047. struct ggml_context * ctx,
  5048. struct ggml_tensor * a,
  5049. enum ggml_unary_op op) {
  5050. return ggml_unary_impl(ctx, a, op, false);
  5051. }
  5052. struct ggml_tensor * ggml_unary_inplace(
  5053. struct ggml_context * ctx,
  5054. struct ggml_tensor * a,
  5055. enum ggml_unary_op op) {
  5056. return ggml_unary_impl(ctx, a, op, true);
  5057. }
  5058. // ggml_map_unary
  5059. static struct ggml_tensor * ggml_map_unary_impl_f32(
  5060. struct ggml_context * ctx,
  5061. struct ggml_tensor * a,
  5062. const ggml_unary_op_f32_t fun,
  5063. bool inplace) {
  5064. bool is_node = false;
  5065. if (!inplace && a->grad) {
  5066. is_node = true;
  5067. }
  5068. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  5069. ggml_set_op_params(result, (const void *) &fun, sizeof(fun));
  5070. result->op = GGML_OP_MAP_UNARY;
  5071. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5072. result->src[0] = a;
  5073. return result;
  5074. }
  5075. struct ggml_tensor * ggml_map_unary_f32(
  5076. struct ggml_context * ctx,
  5077. struct ggml_tensor * a,
  5078. const ggml_unary_op_f32_t fun) {
  5079. return ggml_map_unary_impl_f32(ctx, a, fun, false);
  5080. }
  5081. struct ggml_tensor * ggml_map_unary_inplace_f32(
  5082. struct ggml_context * ctx,
  5083. struct ggml_tensor * a,
  5084. const ggml_unary_op_f32_t fun) {
  5085. return ggml_map_unary_impl_f32(ctx, a, fun, true);
  5086. }
  5087. // ggml_map_binary
  5088. static struct ggml_tensor * ggml_map_binary_impl_f32(
  5089. struct ggml_context * ctx,
  5090. struct ggml_tensor * a,
  5091. struct ggml_tensor * b,
  5092. const ggml_binary_op_f32_t fun,
  5093. bool inplace) {
  5094. GGML_ASSERT(ggml_are_same_shape(a, b));
  5095. bool is_node = false;
  5096. if (!inplace && (a->grad || b->grad)) {
  5097. is_node = true;
  5098. }
  5099. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  5100. ggml_set_op_params(result, (const void *) &fun, sizeof(fun));
  5101. result->op = GGML_OP_MAP_BINARY;
  5102. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5103. result->src[0] = a;
  5104. result->src[1] = b;
  5105. return result;
  5106. }
  5107. struct ggml_tensor * ggml_map_binary_f32(
  5108. struct ggml_context * ctx,
  5109. struct ggml_tensor * a,
  5110. struct ggml_tensor * b,
  5111. const ggml_binary_op_f32_t fun) {
  5112. return ggml_map_binary_impl_f32(ctx, a, b, fun, false);
  5113. }
  5114. struct ggml_tensor * ggml_map_binary_inplace_f32(
  5115. struct ggml_context * ctx,
  5116. struct ggml_tensor * a,
  5117. struct ggml_tensor * b,
  5118. const ggml_binary_op_f32_t fun) {
  5119. return ggml_map_binary_impl_f32(ctx, a, b, fun, true);
  5120. }
  5121. // ggml_map_custom1_f32
  5122. static struct ggml_tensor * ggml_map_custom1_impl_f32(
  5123. struct ggml_context * ctx,
  5124. struct ggml_tensor * a,
  5125. const ggml_custom1_op_f32_t fun,
  5126. bool inplace) {
  5127. bool is_node = false;
  5128. if (!inplace && a->grad) {
  5129. is_node = true;
  5130. }
  5131. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  5132. ggml_set_op_params(result, (const void *) &fun, sizeof(fun));
  5133. result->op = GGML_OP_MAP_CUSTOM1_F32;
  5134. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5135. result->src[0] = a;
  5136. return result;
  5137. }
  5138. struct ggml_tensor * ggml_map_custom1_f32(
  5139. struct ggml_context * ctx,
  5140. struct ggml_tensor * a,
  5141. const ggml_custom1_op_f32_t fun) {
  5142. return ggml_map_custom1_impl_f32(ctx, a, fun, false);
  5143. }
  5144. struct ggml_tensor * ggml_map_custom1_inplace_f32(
  5145. struct ggml_context * ctx,
  5146. struct ggml_tensor * a,
  5147. const ggml_custom1_op_f32_t fun) {
  5148. return ggml_map_custom1_impl_f32(ctx, a, fun, true);
  5149. }
  5150. // ggml_map_custom2_f32
  5151. static struct ggml_tensor * ggml_map_custom2_impl_f32(
  5152. struct ggml_context * ctx,
  5153. struct ggml_tensor * a,
  5154. struct ggml_tensor * b,
  5155. const ggml_custom2_op_f32_t fun,
  5156. bool inplace) {
  5157. bool is_node = false;
  5158. if (!inplace && (a->grad || b->grad)) {
  5159. is_node = true;
  5160. }
  5161. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  5162. ggml_set_op_params(result, (const void *) &fun, sizeof(fun));
  5163. result->op = GGML_OP_MAP_CUSTOM2_F32;
  5164. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5165. result->src[0] = a;
  5166. result->src[1] = b;
  5167. return result;
  5168. }
  5169. struct ggml_tensor * ggml_map_custom2_f32(
  5170. struct ggml_context * ctx,
  5171. struct ggml_tensor * a,
  5172. struct ggml_tensor * b,
  5173. const ggml_custom2_op_f32_t fun) {
  5174. return ggml_map_custom2_impl_f32(ctx, a, b, fun, false);
  5175. }
  5176. struct ggml_tensor * ggml_map_custom2_inplace_f32(
  5177. struct ggml_context * ctx,
  5178. struct ggml_tensor * a,
  5179. struct ggml_tensor * b,
  5180. const ggml_custom2_op_f32_t fun) {
  5181. return ggml_map_custom2_impl_f32(ctx, a, b, fun, true);
  5182. }
  5183. // ggml_map_custom3_f32
  5184. static struct ggml_tensor * ggml_map_custom3_impl_f32(
  5185. struct ggml_context * ctx,
  5186. struct ggml_tensor * a,
  5187. struct ggml_tensor * b,
  5188. struct ggml_tensor * c,
  5189. const ggml_custom3_op_f32_t fun,
  5190. bool inplace) {
  5191. bool is_node = false;
  5192. if (!inplace && (a->grad || b->grad || c->grad)) {
  5193. is_node = true;
  5194. }
  5195. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  5196. ggml_set_op_params(result, (const void *) &fun, sizeof(fun));
  5197. result->op = GGML_OP_MAP_CUSTOM3_F32;
  5198. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5199. result->src[0] = a;
  5200. result->src[1] = b;
  5201. result->src[2] = c;
  5202. return result;
  5203. }
  5204. struct ggml_tensor * ggml_map_custom3_f32(
  5205. struct ggml_context * ctx,
  5206. struct ggml_tensor * a,
  5207. struct ggml_tensor * b,
  5208. struct ggml_tensor * c,
  5209. const ggml_custom3_op_f32_t fun) {
  5210. return ggml_map_custom3_impl_f32(ctx, a, b, c, fun, false);
  5211. }
  5212. struct ggml_tensor * ggml_map_custom3_inplace_f32(
  5213. struct ggml_context * ctx,
  5214. struct ggml_tensor * a,
  5215. struct ggml_tensor * b,
  5216. struct ggml_tensor * c,
  5217. const ggml_custom3_op_f32_t fun) {
  5218. return ggml_map_custom3_impl_f32(ctx, a, b, c, fun, true);
  5219. }
  5220. // ggml_map_custom1
  5221. struct ggml_map_custom1_op_params {
  5222. ggml_custom1_op_t fun;
  5223. int n_tasks;
  5224. void * userdata;
  5225. };
  5226. static struct ggml_tensor * ggml_map_custom1_impl(
  5227. struct ggml_context * ctx,
  5228. struct ggml_tensor * a,
  5229. const ggml_custom1_op_t fun,
  5230. int n_tasks,
  5231. void * userdata,
  5232. bool inplace) {
  5233. GGML_ASSERT(n_tasks == GGML_N_TASKS_MAX || n_tasks > 0);
  5234. bool is_node = false;
  5235. if (!inplace && a->grad) {
  5236. is_node = true;
  5237. }
  5238. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  5239. struct ggml_map_custom1_op_params params = {
  5240. /*.fun =*/ fun,
  5241. /*.n_tasks =*/ n_tasks,
  5242. /*.userdata =*/ userdata
  5243. };
  5244. ggml_set_op_params(result, (const void *) &params, sizeof(params));
  5245. result->op = GGML_OP_MAP_CUSTOM1;
  5246. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5247. result->src[0] = a;
  5248. return result;
  5249. }
  5250. struct ggml_tensor * ggml_map_custom1(
  5251. struct ggml_context * ctx,
  5252. struct ggml_tensor * a,
  5253. const ggml_custom1_op_t fun,
  5254. int n_tasks,
  5255. void * userdata) {
  5256. return ggml_map_custom1_impl(ctx, a, fun, n_tasks, userdata, false);
  5257. }
  5258. struct ggml_tensor * ggml_map_custom1_inplace(
  5259. struct ggml_context * ctx,
  5260. struct ggml_tensor * a,
  5261. const ggml_custom1_op_t fun,
  5262. int n_tasks,
  5263. void * userdata) {
  5264. return ggml_map_custom1_impl(ctx, a, fun, n_tasks, userdata, true);
  5265. }
  5266. // ggml_map_custom2
  5267. struct ggml_map_custom2_op_params {
  5268. ggml_custom2_op_t fun;
  5269. int n_tasks;
  5270. void * userdata;
  5271. };
  5272. static struct ggml_tensor * ggml_map_custom2_impl(
  5273. struct ggml_context * ctx,
  5274. struct ggml_tensor * a,
  5275. struct ggml_tensor * b,
  5276. const ggml_custom2_op_t fun,
  5277. int n_tasks,
  5278. void * userdata,
  5279. bool inplace) {
  5280. GGML_ASSERT(n_tasks == GGML_N_TASKS_MAX || n_tasks > 0);
  5281. bool is_node = false;
  5282. if (!inplace && (a->grad || b->grad)) {
  5283. is_node = true;
  5284. }
  5285. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  5286. struct ggml_map_custom2_op_params params = {
  5287. /*.fun =*/ fun,
  5288. /*.n_tasks =*/ n_tasks,
  5289. /*.userdata =*/ userdata
  5290. };
  5291. ggml_set_op_params(result, (const void *) &params, sizeof(params));
  5292. result->op = GGML_OP_MAP_CUSTOM2;
  5293. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5294. result->src[0] = a;
  5295. result->src[1] = b;
  5296. return result;
  5297. }
  5298. struct ggml_tensor * ggml_map_custom2(
  5299. struct ggml_context * ctx,
  5300. struct ggml_tensor * a,
  5301. struct ggml_tensor * b,
  5302. const ggml_custom2_op_t fun,
  5303. int n_tasks,
  5304. void * userdata) {
  5305. return ggml_map_custom2_impl(ctx, a, b, fun, n_tasks, userdata, false);
  5306. }
  5307. struct ggml_tensor * ggml_map_custom2_inplace(
  5308. struct ggml_context * ctx,
  5309. struct ggml_tensor * a,
  5310. struct ggml_tensor * b,
  5311. const ggml_custom2_op_t fun,
  5312. int n_tasks,
  5313. void * userdata) {
  5314. return ggml_map_custom2_impl(ctx, a, b, fun, n_tasks, userdata, true);
  5315. }
  5316. // ggml_map_custom3
  5317. struct ggml_map_custom3_op_params {
  5318. ggml_custom3_op_t fun;
  5319. int n_tasks;
  5320. void * userdata;
  5321. };
  5322. static struct ggml_tensor * ggml_map_custom3_impl(
  5323. struct ggml_context * ctx,
  5324. struct ggml_tensor * a,
  5325. struct ggml_tensor * b,
  5326. struct ggml_tensor * c,
  5327. const ggml_custom3_op_t fun,
  5328. int n_tasks,
  5329. void * userdata,
  5330. bool inplace) {
  5331. GGML_ASSERT(n_tasks == GGML_N_TASKS_MAX || n_tasks > 0);
  5332. bool is_node = false;
  5333. if (!inplace && (a->grad || b->grad || c->grad)) {
  5334. is_node = true;
  5335. }
  5336. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  5337. struct ggml_map_custom3_op_params params = {
  5338. /*.fun =*/ fun,
  5339. /*.n_tasks =*/ n_tasks,
  5340. /*.userdata =*/ userdata
  5341. };
  5342. ggml_set_op_params(result, (const void *) &params, sizeof(params));
  5343. result->op = GGML_OP_MAP_CUSTOM3;
  5344. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5345. result->src[0] = a;
  5346. result->src[1] = b;
  5347. result->src[2] = c;
  5348. return result;
  5349. }
  5350. struct ggml_tensor * ggml_map_custom3(
  5351. struct ggml_context * ctx,
  5352. struct ggml_tensor * a,
  5353. struct ggml_tensor * b,
  5354. struct ggml_tensor * c,
  5355. const ggml_custom3_op_t fun,
  5356. int n_tasks,
  5357. void * userdata) {
  5358. return ggml_map_custom3_impl(ctx, a, b, c, fun, n_tasks, userdata, false);
  5359. }
  5360. struct ggml_tensor * ggml_map_custom3_inplace(
  5361. struct ggml_context * ctx,
  5362. struct ggml_tensor * a,
  5363. struct ggml_tensor * b,
  5364. struct ggml_tensor * c,
  5365. const ggml_custom3_op_t fun,
  5366. int n_tasks,
  5367. void * userdata) {
  5368. return ggml_map_custom3_impl(ctx, a, b, c, fun, n_tasks, userdata, true);
  5369. }
  5370. // ggml_cross_entropy_loss
  5371. struct ggml_tensor * ggml_cross_entropy_loss(
  5372. struct ggml_context * ctx,
  5373. struct ggml_tensor * a,
  5374. struct ggml_tensor * b) {
  5375. GGML_ASSERT(ggml_are_same_shape(a, b));
  5376. bool is_node = false;
  5377. if (a->grad || b->grad) {
  5378. is_node = true;
  5379. }
  5380. struct ggml_tensor * result = ggml_new_tensor_1d(ctx, a->type, 1);
  5381. result->op = GGML_OP_CROSS_ENTROPY_LOSS;
  5382. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5383. result->src[0] = a;
  5384. result->src[1] = b;
  5385. return result;
  5386. }
  5387. // ggml_cross_entropy_loss_back
  5388. struct ggml_tensor * ggml_cross_entropy_loss_back(
  5389. struct ggml_context * ctx,
  5390. struct ggml_tensor * a,
  5391. struct ggml_tensor * b,
  5392. struct ggml_tensor * c) {
  5393. GGML_ASSERT(ggml_are_same_shape(a, b));
  5394. GGML_ASSERT(ggml_is_scalar(c));
  5395. struct ggml_tensor * result = ggml_dup_tensor(ctx, a);
  5396. result->op = GGML_OP_CROSS_ENTROPY_LOSS_BACK;
  5397. result->grad = NULL;
  5398. result->src[0] = a;
  5399. result->src[1] = b;
  5400. result->src[2] = c;
  5401. return result;
  5402. }
  5403. ////////////////////////////////////////////////////////////////////////////////
  5404. void ggml_set_param(
  5405. struct ggml_context * ctx,
  5406. struct ggml_tensor * tensor) {
  5407. tensor->flags |= GGML_TENSOR_FLAG_PARAM;
  5408. GGML_ASSERT(tensor->grad == NULL);
  5409. tensor->grad = ggml_dup_tensor(ctx, tensor);
  5410. ggml_format_name(tensor->grad, "%s (grad)", tensor->name);
  5411. }
  5412. // ggml_compute_forward_dup
  5413. static void ggml_compute_forward_dup_same_cont(
  5414. const struct ggml_compute_params * params,
  5415. const struct ggml_tensor * src0,
  5416. struct ggml_tensor * dst) {
  5417. GGML_ASSERT(ggml_nelements(dst) == ggml_nelements(src0));
  5418. GGML_ASSERT(ggml_is_contiguous(dst) && ggml_is_contiguous(src0));
  5419. GGML_ASSERT(src0->type == dst->type);
  5420. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  5421. return;
  5422. }
  5423. const size_t nb00 = src0->nb[0];
  5424. const size_t nb0 = dst->nb[0];
  5425. const int ith = params->ith; // thread index
  5426. const int nth = params->nth; // number of threads
  5427. // parallelize by elements
  5428. const int ne = ggml_nelements(dst);
  5429. const int dr = (ne + nth - 1) / nth;
  5430. const int ie0 = dr * ith;
  5431. const int ie1 = MIN(ie0 + dr, ne);
  5432. if (ie0 < ie1) {
  5433. memcpy(
  5434. ((char *) dst->data + ie0*nb0),
  5435. ((char *) src0->data + ie0*nb00),
  5436. (ie1 - ie0) * ggml_type_size(src0->type));
  5437. }
  5438. }
  5439. static void ggml_compute_forward_dup_f16(
  5440. const struct ggml_compute_params * params,
  5441. const struct ggml_tensor * src0,
  5442. struct ggml_tensor * dst) {
  5443. GGML_ASSERT(ggml_nelements(dst) == ggml_nelements(src0));
  5444. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  5445. return;
  5446. }
  5447. GGML_TENSOR_UNARY_OP_LOCALS
  5448. const int ith = params->ith; // thread index
  5449. const int nth = params->nth; // number of threads
  5450. if (ggml_is_contiguous(src0) && ggml_is_contiguous(dst) && src0->type == dst->type) {
  5451. ggml_compute_forward_dup_same_cont(params, src0, dst);
  5452. return;
  5453. }
  5454. // parallelize by rows
  5455. const int nr = ne01;
  5456. // number of rows per thread
  5457. const int dr = (nr + nth - 1) / nth;
  5458. // row range for this thread
  5459. const int ir0 = dr * ith;
  5460. const int ir1 = MIN(ir0 + dr, nr);
  5461. if (src0->type == dst->type &&
  5462. ne00 == ne0 &&
  5463. nb00 == ggml_type_size(src0->type) && nb0 == ggml_type_size(dst->type)) {
  5464. // copy by rows
  5465. const size_t rs = ne00*nb00;
  5466. for (int64_t i03 = 0; i03 < ne03; i03++) {
  5467. for (int64_t i02 = 0; i02 < ne02; i02++) {
  5468. for (int64_t i01 = ir0; i01 < ir1; i01++) {
  5469. memcpy(
  5470. ((char *) dst->data + i01*nb1 + i02*nb2 + i03*nb3),
  5471. ((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03),
  5472. rs);
  5473. }
  5474. }
  5475. }
  5476. return;
  5477. }
  5478. // TODO: add more special-case implementations for tensor shapes/strides that can benefit from memcpy
  5479. if (ggml_is_contiguous(dst)) {
  5480. if (nb00 == sizeof(ggml_fp16_t)) {
  5481. if (dst->type == GGML_TYPE_F16) {
  5482. size_t id = 0;
  5483. const size_t rs = ne00 * nb00;
  5484. char * dst_ptr = (char *) dst->data;
  5485. for (int i03 = 0; i03 < ne03; i03++) {
  5486. for (int i02 = 0; i02 < ne02; i02++) {
  5487. id += rs * ir0;
  5488. for (int i01 = ir0; i01 < ir1; i01++) {
  5489. const char * src0_ptr = (char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03;
  5490. memcpy(dst_ptr + id, src0_ptr, rs);
  5491. id += rs;
  5492. }
  5493. id += rs * (ne01 - ir1);
  5494. }
  5495. }
  5496. } else if (dst->type == GGML_TYPE_F32) {
  5497. size_t id = 0;
  5498. float * dst_ptr = (float *) dst->data;
  5499. for (int i03 = 0; i03 < ne03; i03++) {
  5500. for (int i02 = 0; i02 < ne02; i02++) {
  5501. id += ne00 * ir0;
  5502. for (int i01 = ir0; i01 < ir1; i01++) {
  5503. const ggml_fp16_t * src0_ptr = (ggml_fp16_t *) ((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03);
  5504. for (int i00 = 0; i00 < ne00; i00++) {
  5505. dst_ptr[id] = GGML_FP16_TO_FP32(src0_ptr[i00]);
  5506. id++;
  5507. }
  5508. }
  5509. id += ne00 * (ne01 - ir1);
  5510. }
  5511. }
  5512. } else if (type_traits[dst->type].from_float) {
  5513. ggml_from_float_t const quantize_row_q = type_traits[dst->type].from_float;
  5514. float * src0_f32 = (float *) params->wdata + (ne00 + CACHE_LINE_SIZE_F32) * ith;
  5515. size_t id = 0;
  5516. size_t rs = nb0 * (ne00 / ggml_blck_size(dst->type));
  5517. char * dst_ptr = (char *) dst->data;
  5518. for (int i03 = 0; i03 < ne03; i03++) {
  5519. for (int i02 = 0; i02 < ne02; i02++) {
  5520. id += rs * ir0;
  5521. for (int i01 = ir0; i01 < ir1; i01++) {
  5522. const ggml_fp16_t * src0_ptr = (ggml_fp16_t *) ((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03);
  5523. for (int i00 = 0; i00 < ne00; i00++) {
  5524. src0_f32[i00] = GGML_FP16_TO_FP32(src0_ptr[i00]);
  5525. }
  5526. quantize_row_q(src0_f32, dst_ptr + id, ne00);
  5527. id += rs;
  5528. }
  5529. id += rs * (ne01 - ir1);
  5530. }
  5531. }
  5532. } else {
  5533. GGML_ASSERT(false); // TODO: implement
  5534. }
  5535. } else {
  5536. //printf("%s: this is not optimal - fix me\n", __func__);
  5537. if (dst->type == GGML_TYPE_F32) {
  5538. size_t id = 0;
  5539. float * dst_ptr = (float *) dst->data;
  5540. for (int i03 = 0; i03 < ne03; i03++) {
  5541. for (int i02 = 0; i02 < ne02; i02++) {
  5542. id += ne00 * ir0;
  5543. for (int i01 = ir0; i01 < ir1; i01++) {
  5544. for (int i00 = 0; i00 < ne00; i00++) {
  5545. const ggml_fp16_t * src0_ptr = (ggml_fp16_t *) ((char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03);
  5546. dst_ptr[id] = GGML_FP16_TO_FP32(*src0_ptr);
  5547. id++;
  5548. }
  5549. }
  5550. id += ne00 * (ne01 - ir1);
  5551. }
  5552. }
  5553. } else if (dst->type == GGML_TYPE_F16) {
  5554. size_t id = 0;
  5555. ggml_fp16_t * dst_ptr = (ggml_fp16_t *) dst->data;
  5556. for (int i03 = 0; i03 < ne03; i03++) {
  5557. for (int i02 = 0; i02 < ne02; i02++) {
  5558. id += ne00 * ir0;
  5559. for (int i01 = ir0; i01 < ir1; i01++) {
  5560. for (int i00 = 0; i00 < ne00; i00++) {
  5561. const ggml_fp16_t * src0_ptr = (ggml_fp16_t *) ((char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03);
  5562. dst_ptr[id] = *src0_ptr;
  5563. id++;
  5564. }
  5565. }
  5566. id += ne00 * (ne01 - ir1);
  5567. }
  5568. }
  5569. } else {
  5570. GGML_ASSERT(false); // TODO: implement
  5571. }
  5572. }
  5573. return;
  5574. }
  5575. // dst counters
  5576. int64_t i10 = 0;
  5577. int64_t i11 = 0;
  5578. int64_t i12 = 0;
  5579. int64_t i13 = 0;
  5580. if (dst->type == GGML_TYPE_F16) {
  5581. for (int64_t i03 = 0; i03 < ne03; i03++) {
  5582. for (int64_t i02 = 0; i02 < ne02; i02++) {
  5583. i10 += ne00 * ir0;
  5584. while (i10 >= ne0) {
  5585. i10 -= ne0;
  5586. if (++i11 == ne1) {
  5587. i11 = 0;
  5588. if (++i12 == ne2) {
  5589. i12 = 0;
  5590. if (++i13 == ne3) {
  5591. i13 = 0;
  5592. }
  5593. }
  5594. }
  5595. }
  5596. for (int64_t i01 = ir0; i01 < ir1; i01++) {
  5597. for (int64_t i00 = 0; i00 < ne00; i00++) {
  5598. const char * src0_ptr = ((char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03);
  5599. char * dst_ptr = ((char *) dst->data + i10*nb0 + i11*nb1 + i12*nb2 + i13*nb3);
  5600. memcpy(dst_ptr, src0_ptr, sizeof(ggml_fp16_t));
  5601. if (++i10 == ne00) {
  5602. i10 = 0;
  5603. if (++i11 == ne01) {
  5604. i11 = 0;
  5605. if (++i12 == ne02) {
  5606. i12 = 0;
  5607. if (++i13 == ne03) {
  5608. i13 = 0;
  5609. }
  5610. }
  5611. }
  5612. }
  5613. }
  5614. }
  5615. i10 += ne00 * (ne01 - ir1);
  5616. while (i10 >= ne0) {
  5617. i10 -= ne0;
  5618. if (++i11 == ne1) {
  5619. i11 = 0;
  5620. if (++i12 == ne2) {
  5621. i12 = 0;
  5622. if (++i13 == ne3) {
  5623. i13 = 0;
  5624. }
  5625. }
  5626. }
  5627. }
  5628. }
  5629. }
  5630. } else if (dst->type == GGML_TYPE_F32) {
  5631. for (int64_t i03 = 0; i03 < ne03; i03++) {
  5632. for (int64_t i02 = 0; i02 < ne02; i02++) {
  5633. i10 += ne00 * ir0;
  5634. while (i10 >= ne0) {
  5635. i10 -= ne0;
  5636. if (++i11 == ne1) {
  5637. i11 = 0;
  5638. if (++i12 == ne2) {
  5639. i12 = 0;
  5640. if (++i13 == ne3) {
  5641. i13 = 0;
  5642. }
  5643. }
  5644. }
  5645. }
  5646. for (int64_t i01 = ir0; i01 < ir1; i01++) {
  5647. for (int64_t i00 = 0; i00 < ne00; i00++) {
  5648. const char * src0_ptr = ((char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03);
  5649. char * dst_ptr = ((char *) dst->data + i10*nb0 + i11*nb1 + i12*nb2 + i13*nb3);
  5650. *(float *) dst_ptr = GGML_FP16_TO_FP32(*(const ggml_fp16_t *) src0_ptr);
  5651. if (++i10 == ne0) {
  5652. i10 = 0;
  5653. if (++i11 == ne1) {
  5654. i11 = 0;
  5655. if (++i12 == ne2) {
  5656. i12 = 0;
  5657. if (++i13 == ne3) {
  5658. i13 = 0;
  5659. }
  5660. }
  5661. }
  5662. }
  5663. }
  5664. }
  5665. i10 += ne00 * (ne01 - ir1);
  5666. while (i10 >= ne0) {
  5667. i10 -= ne0;
  5668. if (++i11 == ne1) {
  5669. i11 = 0;
  5670. if (++i12 == ne2) {
  5671. i12 = 0;
  5672. if (++i13 == ne3) {
  5673. i13 = 0;
  5674. }
  5675. }
  5676. }
  5677. }
  5678. }
  5679. }
  5680. } else {
  5681. GGML_ASSERT(false); // TODO: implement
  5682. }
  5683. }
  5684. static void ggml_compute_forward_dup_f32(
  5685. const struct ggml_compute_params * params,
  5686. const struct ggml_tensor * src0,
  5687. struct ggml_tensor * dst) {
  5688. GGML_ASSERT(ggml_nelements(dst) == ggml_nelements(src0));
  5689. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  5690. return;
  5691. }
  5692. GGML_TENSOR_UNARY_OP_LOCALS
  5693. const int ith = params->ith; // thread index
  5694. const int nth = params->nth; // number of threads
  5695. if (ggml_is_contiguous(src0) && ggml_is_contiguous(dst) && src0->type == dst->type) {
  5696. ggml_compute_forward_dup_same_cont(params, src0, dst);
  5697. return;
  5698. }
  5699. // parallelize by rows
  5700. const int nr = ne01;
  5701. // number of rows per thread
  5702. const int dr = (nr + nth - 1) / nth;
  5703. // row range for this thread
  5704. const int ir0 = dr * ith;
  5705. const int ir1 = MIN(ir0 + dr, nr);
  5706. if (src0->type == dst->type &&
  5707. ne00 == ne0 &&
  5708. nb00 == ggml_type_size(src0->type) && nb0 == ggml_type_size(dst->type)) {
  5709. // copy by rows
  5710. const size_t rs = ne00*nb00;
  5711. for (int64_t i03 = 0; i03 < ne03; i03++) {
  5712. for (int64_t i02 = 0; i02 < ne02; i02++) {
  5713. for (int64_t i01 = ir0; i01 < ir1; i01++) {
  5714. memcpy(
  5715. ((char *) dst->data + i01*nb1 + i02*nb2 + i03*nb3),
  5716. ((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03),
  5717. rs);
  5718. }
  5719. }
  5720. }
  5721. return;
  5722. }
  5723. if (ggml_is_contiguous(dst)) {
  5724. // TODO: simplify
  5725. if (nb00 == sizeof(float)) {
  5726. if (dst->type == GGML_TYPE_F32) {
  5727. size_t id = 0;
  5728. const size_t rs = ne00 * nb00;
  5729. char * dst_ptr = (char *) dst->data;
  5730. for (int i03 = 0; i03 < ne03; i03++) {
  5731. for (int i02 = 0; i02 < ne02; i02++) {
  5732. id += rs * ir0;
  5733. for (int i01 = ir0; i01 < ir1; i01++) {
  5734. const char * src0_ptr = (char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03;
  5735. memcpy(dst_ptr + id, src0_ptr, rs);
  5736. id += rs;
  5737. }
  5738. id += rs * (ne01 - ir1);
  5739. }
  5740. }
  5741. } else if (type_traits[dst->type].from_float) {
  5742. ggml_from_float_t const quantize_row_q = type_traits[dst->type].from_float;
  5743. size_t id = 0;
  5744. size_t rs = nb0 * (ne00 / ggml_blck_size(dst->type));
  5745. char * dst_ptr = (char *) dst->data;
  5746. for (int i03 = 0; i03 < ne03; i03++) {
  5747. for (int i02 = 0; i02 < ne02; i02++) {
  5748. id += rs * ir0;
  5749. for (int i01 = ir0; i01 < ir1; i01++) {
  5750. const float * src0_ptr = (float *) ((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03);
  5751. quantize_row_q(src0_ptr, dst_ptr + id, ne00);
  5752. id += rs;
  5753. }
  5754. id += rs * (ne01 - ir1);
  5755. }
  5756. }
  5757. } else {
  5758. GGML_ASSERT(false); // TODO: implement
  5759. }
  5760. } else {
  5761. //printf("%s: this is not optimal - fix me\n", __func__);
  5762. if (dst->type == GGML_TYPE_F32) {
  5763. size_t id = 0;
  5764. float * dst_ptr = (float *) dst->data;
  5765. for (int i03 = 0; i03 < ne03; i03++) {
  5766. for (int i02 = 0; i02 < ne02; i02++) {
  5767. id += ne00 * ir0;
  5768. for (int i01 = ir0; i01 < ir1; i01++) {
  5769. for (int i00 = 0; i00 < ne00; i00++) {
  5770. const float * src0_ptr = (float *) ((char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03);
  5771. dst_ptr[id] = *src0_ptr;
  5772. id++;
  5773. }
  5774. }
  5775. id += ne00 * (ne01 - ir1);
  5776. }
  5777. }
  5778. } else if (dst->type == GGML_TYPE_F16) {
  5779. size_t id = 0;
  5780. ggml_fp16_t * dst_ptr = (ggml_fp16_t *) dst->data;
  5781. for (int i03 = 0; i03 < ne03; i03++) {
  5782. for (int i02 = 0; i02 < ne02; i02++) {
  5783. id += ne00 * ir0;
  5784. for (int i01 = ir0; i01 < ir1; i01++) {
  5785. for (int i00 = 0; i00 < ne00; i00++) {
  5786. const float * src0_ptr = (float *) ((char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03);
  5787. dst_ptr[id] = GGML_FP32_TO_FP16(*src0_ptr);
  5788. id++;
  5789. }
  5790. }
  5791. id += ne00 * (ne01 - ir1);
  5792. }
  5793. }
  5794. } else {
  5795. GGML_ASSERT(false); // TODO: implement
  5796. }
  5797. }
  5798. return;
  5799. }
  5800. // dst counters
  5801. int64_t i10 = 0;
  5802. int64_t i11 = 0;
  5803. int64_t i12 = 0;
  5804. int64_t i13 = 0;
  5805. if (dst->type == GGML_TYPE_F32) {
  5806. for (int64_t i03 = 0; i03 < ne03; i03++) {
  5807. for (int64_t i02 = 0; i02 < ne02; i02++) {
  5808. i10 += ne00 * ir0;
  5809. while (i10 >= ne0) {
  5810. i10 -= ne0;
  5811. if (++i11 == ne1) {
  5812. i11 = 0;
  5813. if (++i12 == ne2) {
  5814. i12 = 0;
  5815. if (++i13 == ne3) {
  5816. i13 = 0;
  5817. }
  5818. }
  5819. }
  5820. }
  5821. for (int64_t i01 = ir0; i01 < ir1; i01++) {
  5822. for (int64_t i00 = 0; i00 < ne00; i00++) {
  5823. const char * src0_ptr = ((char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03);
  5824. char * dst_ptr = ((char *) dst->data + i10*nb0 + i11*nb1 + i12*nb2 + i13*nb3);
  5825. memcpy(dst_ptr, src0_ptr, sizeof(float));
  5826. if (++i10 == ne0) {
  5827. i10 = 0;
  5828. if (++i11 == ne1) {
  5829. i11 = 0;
  5830. if (++i12 == ne2) {
  5831. i12 = 0;
  5832. if (++i13 == ne3) {
  5833. i13 = 0;
  5834. }
  5835. }
  5836. }
  5837. }
  5838. }
  5839. }
  5840. i10 += ne00 * (ne01 - ir1);
  5841. while (i10 >= ne0) {
  5842. i10 -= ne0;
  5843. if (++i11 == ne1) {
  5844. i11 = 0;
  5845. if (++i12 == ne2) {
  5846. i12 = 0;
  5847. if (++i13 == ne3) {
  5848. i13 = 0;
  5849. }
  5850. }
  5851. }
  5852. }
  5853. }
  5854. }
  5855. } else if (dst->type == GGML_TYPE_F16) {
  5856. for (int64_t i03 = 0; i03 < ne03; i03++) {
  5857. for (int64_t i02 = 0; i02 < ne02; i02++) {
  5858. i10 += ne00 * ir0;
  5859. while (i10 >= ne0) {
  5860. i10 -= ne0;
  5861. if (++i11 == ne1) {
  5862. i11 = 0;
  5863. if (++i12 == ne2) {
  5864. i12 = 0;
  5865. if (++i13 == ne3) {
  5866. i13 = 0;
  5867. }
  5868. }
  5869. }
  5870. }
  5871. for (int64_t i01 = ir0; i01 < ir1; i01++) {
  5872. for (int64_t i00 = 0; i00 < ne00; i00++) {
  5873. const char * src0_ptr = ((char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03);
  5874. char * dst_ptr = ((char *) dst->data + i10*nb0 + i11*nb1 + i12*nb2 + i13*nb3);
  5875. *(ggml_fp16_t *) dst_ptr = GGML_FP32_TO_FP16(*(const float *) src0_ptr);
  5876. if (++i10 == ne0) {
  5877. i10 = 0;
  5878. if (++i11 == ne1) {
  5879. i11 = 0;
  5880. if (++i12 == ne2) {
  5881. i12 = 0;
  5882. if (++i13 == ne3) {
  5883. i13 = 0;
  5884. }
  5885. }
  5886. }
  5887. }
  5888. }
  5889. }
  5890. i10 += ne00 * (ne01 - ir1);
  5891. while (i10 >= ne0) {
  5892. i10 -= ne0;
  5893. if (++i11 == ne1) {
  5894. i11 = 0;
  5895. if (++i12 == ne2) {
  5896. i12 = 0;
  5897. if (++i13 == ne3) {
  5898. i13 = 0;
  5899. }
  5900. }
  5901. }
  5902. }
  5903. }
  5904. }
  5905. } else {
  5906. GGML_ASSERT(false); // TODO: implement
  5907. }
  5908. }
  5909. // A simplified version of ggml_compute_forward_dup that doesn't do float upcasting, and just plain old memcpy.
  5910. static void ggml_compute_forward_dup_bytes(
  5911. const struct ggml_compute_params * params,
  5912. const struct ggml_tensor * src0,
  5913. struct ggml_tensor * dst) {
  5914. GGML_ASSERT(ggml_nelements(dst) == ggml_nelements(src0));
  5915. GGML_ASSERT(src0->type == dst->type);
  5916. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  5917. return;
  5918. }
  5919. if (ggml_is_contiguous(src0) && ggml_is_contiguous(dst)) {
  5920. ggml_compute_forward_dup_same_cont(params, src0, dst);
  5921. return;
  5922. }
  5923. GGML_TENSOR_UNARY_OP_LOCALS;
  5924. const size_t type_size = ggml_type_size(src0->type);
  5925. const int ith = params->ith; // thread index
  5926. const int nth = params->nth; // number of threads
  5927. // parallelize by rows
  5928. const int nr = ne01;
  5929. // number of rows per thread
  5930. const int dr = (nr + nth - 1) / nth;
  5931. // row range for this thread
  5932. const int ir0 = dr * ith;
  5933. const int ir1 = MIN(ir0 + dr, nr);
  5934. if (src0->type == dst->type &&
  5935. ne00 == ne0 &&
  5936. nb00 == type_size && nb0 == type_size) {
  5937. // copy by rows
  5938. const size_t rs = ne00 * type_size;
  5939. for (int64_t i03 = 0; i03 < ne03; i03++) {
  5940. for (int64_t i02 = 0; i02 < ne02; i02++) {
  5941. for (int64_t i01 = ir0; i01 < ir1; i01++) {
  5942. memcpy(
  5943. ((char *) dst->data + i01*nb1 + i02*nb2 + i03*nb3),
  5944. ((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03),
  5945. rs);
  5946. }
  5947. }
  5948. }
  5949. return;
  5950. }
  5951. if (ggml_is_contiguous(dst)) {
  5952. size_t id = 0;
  5953. char * dst_ptr = (char *) dst->data;
  5954. const size_t rs = ne00 * type_size;
  5955. if (nb00 == type_size) {
  5956. // src0 is contigous on first dimension, copy by rows
  5957. for (int64_t i03 = 0; i03 < ne03; i03++) {
  5958. for (int64_t i02 = 0; i02 < ne02; i02++) {
  5959. id += rs * ir0;
  5960. for (int64_t i01 = ir0; i01 < ir1; i01++) {
  5961. const char * src0_ptr = (char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03;
  5962. memcpy(dst_ptr + id, src0_ptr, rs);
  5963. id += rs;
  5964. }
  5965. id += rs * (ne01 - ir1);
  5966. }
  5967. }
  5968. } else {
  5969. //printf("%s: this is not optimal - fix me\n", __func__);
  5970. for (int64_t i03 = 0; i03 < ne03; i03++) {
  5971. for (int64_t i02 = 0; i02 < ne02; i02++) {
  5972. id += rs * ir0;
  5973. for (int64_t i01 = ir0; i01 < ir1; i01++) {
  5974. for (int64_t i00 = 0; i00 < ne00; i00++) {
  5975. const char * src0_ptr = (char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03;
  5976. memcpy(dst_ptr + id, src0_ptr, type_size);
  5977. id += type_size;
  5978. }
  5979. }
  5980. id += rs * (ne01 - ir1);
  5981. }
  5982. }
  5983. }
  5984. return;
  5985. }
  5986. // dst counters
  5987. int64_t i10 = 0;
  5988. int64_t i11 = 0;
  5989. int64_t i12 = 0;
  5990. int64_t i13 = 0;
  5991. for (int64_t i03 = 0; i03 < ne03; i03++) {
  5992. for (int64_t i02 = 0; i02 < ne02; i02++) {
  5993. i10 += ne00 * ir0;
  5994. while (i10 >= ne0) {
  5995. i10 -= ne0;
  5996. if (++i11 == ne1) {
  5997. i11 = 0;
  5998. if (++i12 == ne2) {
  5999. i12 = 0;
  6000. if (++i13 == ne3) {
  6001. i13 = 0;
  6002. }
  6003. }
  6004. }
  6005. }
  6006. for (int64_t i01 = ir0; i01 < ir1; i01++) {
  6007. for (int64_t i00 = 0; i00 < ne00; i00++) {
  6008. const char * src0_ptr = ((char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03);
  6009. char * dst_ptr = ((char *) dst->data + i10*nb0 + i11*nb1 + i12*nb2 + i13*nb3);
  6010. memcpy(dst_ptr, src0_ptr, type_size);
  6011. if (++i10 == ne0) {
  6012. i10 = 0;
  6013. if (++i11 == ne1) {
  6014. i11 = 0;
  6015. if (++i12 == ne2) {
  6016. i12 = 0;
  6017. if (++i13 == ne3) {
  6018. i13 = 0;
  6019. }
  6020. }
  6021. }
  6022. }
  6023. }
  6024. }
  6025. i10 += ne00 * (ne01 - ir1);
  6026. while (i10 >= ne0) {
  6027. i10 -= ne0;
  6028. if (++i11 == ne1) {
  6029. i11 = 0;
  6030. if (++i12 == ne2) {
  6031. i12 = 0;
  6032. if (++i13 == ne3) {
  6033. i13 = 0;
  6034. }
  6035. }
  6036. }
  6037. }
  6038. }
  6039. }
  6040. }
  6041. static void ggml_compute_forward_dup(
  6042. const struct ggml_compute_params * params,
  6043. const struct ggml_tensor * src0,
  6044. struct ggml_tensor * dst) {
  6045. if (src0->type == dst->type) {
  6046. ggml_compute_forward_dup_bytes(params, src0, dst);
  6047. return;
  6048. }
  6049. switch (src0->type) {
  6050. case GGML_TYPE_F16:
  6051. {
  6052. ggml_compute_forward_dup_f16(params, src0, dst);
  6053. } break;
  6054. case GGML_TYPE_F32:
  6055. {
  6056. ggml_compute_forward_dup_f32(params, src0, dst);
  6057. } break;
  6058. default:
  6059. {
  6060. GGML_ASSERT(false);
  6061. } break;
  6062. }
  6063. }
  6064. // ggml_compute_forward_add
  6065. static void ggml_compute_forward_add_f32(
  6066. const struct ggml_compute_params * params,
  6067. const struct ggml_tensor * src0,
  6068. const struct ggml_tensor * src1,
  6069. struct ggml_tensor * dst) {
  6070. GGML_ASSERT(ggml_can_repeat(src1, src0) && ggml_are_same_shape(src0, dst));
  6071. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  6072. return;
  6073. }
  6074. const int ith = params->ith;
  6075. const int nth = params->nth;
  6076. #ifdef GGML_USE_CLBLAST
  6077. if (src1->backend == GGML_BACKEND_GPU) {
  6078. // TODO: OpenCL kernel support full broadcast
  6079. GGML_ASSERT(ggml_can_repeat_rows(src1, src0));
  6080. if (ith == 0) {
  6081. ggml_cl_add(src0, src1, dst);
  6082. }
  6083. return;
  6084. }
  6085. #endif
  6086. const int nr = ggml_nrows(src0);
  6087. GGML_TENSOR_BINARY_OP_LOCALS
  6088. GGML_ASSERT( nb0 == sizeof(float));
  6089. GGML_ASSERT(nb00 == sizeof(float));
  6090. // rows per thread
  6091. const int dr = (nr + nth - 1)/nth;
  6092. // row range for this thread
  6093. const int ir0 = dr*ith;
  6094. const int ir1 = MIN(ir0 + dr, nr);
  6095. if (nb10 == sizeof(float)) {
  6096. for (int ir = ir0; ir < ir1; ++ir) {
  6097. // src1 is broadcastable across src0 and dst in i1, i2, i3
  6098. const int64_t i03 = ir/(ne02*ne01);
  6099. const int64_t i02 = (ir - i03*ne02*ne01)/ne01;
  6100. const int64_t i01 = (ir - i03*ne02*ne01 - i02*ne01);
  6101. const int64_t i13 = i03 % ne13;
  6102. const int64_t i12 = i02 % ne12;
  6103. const int64_t i11 = i01 % ne11;
  6104. const int64_t nr0 = ne00 / ne10;
  6105. float * dst_ptr = (float *) ((char *) dst->data + i03*nb3 + i02*nb2 + i01*nb1 );
  6106. float * src0_ptr = (float *) ((char *) src0->data + i03*nb03 + i02*nb02 + i01*nb01);
  6107. float * src1_ptr = (float *) ((char *) src1->data + i13*nb13 + i12*nb12 + i11*nb11);
  6108. for (int64_t r = 0; r < nr0; ++r) {
  6109. #ifdef GGML_USE_ACCELERATE
  6110. vDSP_vadd(src0_ptr + r*ne10, 1, src1_ptr, 1, dst_ptr + r*ne10, 1, ne10);
  6111. #else
  6112. ggml_vec_add_f32(ne10, dst_ptr + r*ne10, src0_ptr + r*ne10, src1_ptr);
  6113. #endif
  6114. }
  6115. }
  6116. } else {
  6117. // src1 is not contiguous
  6118. for (int ir = ir0; ir < ir1; ++ir) {
  6119. // src1 is broadcastable across src0 and dst in i1, i2, i3
  6120. const int64_t i03 = ir/(ne02*ne01);
  6121. const int64_t i02 = (ir - i03*ne02*ne01)/ne01;
  6122. const int64_t i01 = (ir - i03*ne02*ne01 - i02*ne01);
  6123. const int64_t i13 = i03 % ne13;
  6124. const int64_t i12 = i02 % ne12;
  6125. const int64_t i11 = i01 % ne11;
  6126. float * dst_ptr = (float *) ((char *) dst->data + i03*nb3 + i02*nb2 + i01*nb1 );
  6127. float * src0_ptr = (float *) ((char *) src0->data + i03*nb03 + i02*nb02 + i01*nb01);
  6128. for (int64_t i0 = 0; i0 < ne0; ++i0) {
  6129. const int64_t i10 = i0 % ne10;
  6130. float * src1_ptr = (float *) ((char *) src1->data + i13*nb13 + i12*nb12 + i11*nb11 + i10*nb10);
  6131. dst_ptr[i0] = src0_ptr[i0] + *src1_ptr;
  6132. }
  6133. }
  6134. }
  6135. }
  6136. static void ggml_compute_forward_add_f16_f32(
  6137. const struct ggml_compute_params * params,
  6138. const struct ggml_tensor * src0,
  6139. const struct ggml_tensor * src1,
  6140. struct ggml_tensor * dst) {
  6141. GGML_ASSERT(ggml_are_same_shape(src0, src1) && ggml_are_same_shape(src0, dst));
  6142. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  6143. return;
  6144. }
  6145. const int ith = params->ith;
  6146. const int nth = params->nth;
  6147. const int nr = ggml_nrows(src0);
  6148. GGML_TENSOR_BINARY_OP_LOCALS
  6149. GGML_ASSERT(src0->type == GGML_TYPE_F16);
  6150. GGML_ASSERT(src1->type == GGML_TYPE_F32);
  6151. if (dst->type == GGML_TYPE_F32) {
  6152. GGML_ASSERT( nb0 == sizeof(float));
  6153. }
  6154. else {
  6155. GGML_ASSERT(dst->type == GGML_TYPE_F16);
  6156. GGML_ASSERT( nb0 == sizeof(ggml_fp16_t));
  6157. }
  6158. GGML_ASSERT(nb00 == sizeof(ggml_fp16_t));
  6159. // rows per thread
  6160. const int dr = (nr + nth - 1)/nth;
  6161. // row range for this thread
  6162. const int ir0 = dr*ith;
  6163. const int ir1 = MIN(ir0 + dr, nr);
  6164. if (nb10 == sizeof(float)) {
  6165. if (dst->type == GGML_TYPE_F16) {
  6166. for (int ir = ir0; ir < ir1; ++ir) {
  6167. // src0, src1 and dst are same shape => same indices
  6168. const int i3 = ir/(ne2*ne1);
  6169. const int i2 = (ir - i3*ne2*ne1)/ne1;
  6170. const int i1 = (ir - i3*ne2*ne1 - i2*ne1);
  6171. ggml_fp16_t * dst_ptr = (ggml_fp16_t *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1);
  6172. ggml_fp16_t * src0_ptr = (ggml_fp16_t *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01);
  6173. float * src1_ptr = (float *) ((char *) src1->data + i3*nb13 + i2*nb12 + i1*nb11);
  6174. for (int i = 0; i < ne0; i++) {
  6175. dst_ptr[i] = GGML_FP32_TO_FP16(GGML_FP16_TO_FP32(src0_ptr[i]) + src1_ptr[i]);
  6176. }
  6177. }
  6178. } else {
  6179. for (int ir = ir0; ir < ir1; ++ir) {
  6180. // src0, src1 and dst are same shape => same indices
  6181. const int i3 = ir/(ne2*ne1);
  6182. const int i2 = (ir - i3*ne2*ne1)/ne1;
  6183. const int i1 = (ir - i3*ne2*ne1 - i2*ne1);
  6184. float * dst_ptr = (float *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1);
  6185. ggml_fp16_t * src0_ptr = (ggml_fp16_t *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01);
  6186. float * src1_ptr = (float *) ((char *) src1->data + i3*nb13 + i2*nb12 + i1*nb11);
  6187. for (int i = 0; i < ne0; i++) {
  6188. dst_ptr[i] = GGML_FP16_TO_FP32(src0_ptr[i]) + src1_ptr[i];
  6189. }
  6190. }
  6191. }
  6192. }
  6193. else {
  6194. // src1 is not contiguous
  6195. GGML_ASSERT(false);
  6196. }
  6197. }
  6198. static void ggml_compute_forward_add_f16_f16(
  6199. const struct ggml_compute_params * params,
  6200. const struct ggml_tensor * src0,
  6201. const struct ggml_tensor * src1,
  6202. struct ggml_tensor * dst) {
  6203. GGML_ASSERT(ggml_are_same_shape(src0, src1) && ggml_are_same_shape(src0, dst));
  6204. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  6205. return;
  6206. }
  6207. const int ith = params->ith;
  6208. const int nth = params->nth;
  6209. const int nr = ggml_nrows(src0);
  6210. GGML_TENSOR_BINARY_OP_LOCALS
  6211. GGML_ASSERT(src0->type == GGML_TYPE_F16);
  6212. GGML_ASSERT(src1->type == GGML_TYPE_F16);
  6213. GGML_ASSERT(dst->type == GGML_TYPE_F16);
  6214. GGML_ASSERT( nb0 == sizeof(ggml_fp16_t));
  6215. GGML_ASSERT(nb00 == sizeof(ggml_fp16_t));
  6216. // rows per thread
  6217. const int dr = (nr + nth - 1)/nth;
  6218. // row range for this thread
  6219. const int ir0 = dr*ith;
  6220. const int ir1 = MIN(ir0 + dr, nr);
  6221. if (nb10 == sizeof(ggml_fp16_t)) {
  6222. for (int ir = ir0; ir < ir1; ++ir) {
  6223. // src0, src1 and dst are same shape => same indices
  6224. const int i3 = ir/(ne2*ne1);
  6225. const int i2 = (ir - i3*ne2*ne1)/ne1;
  6226. const int i1 = (ir - i3*ne2*ne1 - i2*ne1);
  6227. ggml_fp16_t * dst_ptr = (ggml_fp16_t *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1);
  6228. ggml_fp16_t * src0_ptr = (ggml_fp16_t *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01);
  6229. ggml_fp16_t * src1_ptr = (ggml_fp16_t *) ((char *) src1->data + i3*nb13 + i2*nb12 + i1*nb11);
  6230. for (int i = 0; i < ne0; i++) {
  6231. dst_ptr[i] = GGML_FP32_TO_FP16(GGML_FP16_TO_FP32(src0_ptr[i]) + GGML_FP16_TO_FP32(src1_ptr[i]));
  6232. }
  6233. }
  6234. }
  6235. else {
  6236. // src1 is not contiguous
  6237. GGML_ASSERT(false);
  6238. }
  6239. }
  6240. static void ggml_compute_forward_add_q_f32(
  6241. const struct ggml_compute_params * params,
  6242. const struct ggml_tensor * src0,
  6243. const struct ggml_tensor * src1,
  6244. struct ggml_tensor * dst) {
  6245. GGML_ASSERT(ggml_are_same_shape(src0, src1) && ggml_are_same_shape(src0, dst));
  6246. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  6247. return;
  6248. }
  6249. const int nr = ggml_nrows(src0);
  6250. GGML_TENSOR_BINARY_OP_LOCALS
  6251. const int ith = params->ith;
  6252. const int nth = params->nth;
  6253. const enum ggml_type type = src0->type;
  6254. const enum ggml_type dtype = dst->type;
  6255. ggml_to_float_t const dequantize_row_q = type_traits[type].to_float;
  6256. ggml_from_float_t const quantize_row_q = type_traits[dtype].from_float;
  6257. // we don't support permuted src0 or src1
  6258. GGML_ASSERT(nb00 == ggml_type_size(type));
  6259. GGML_ASSERT(nb10 == sizeof(float));
  6260. // dst cannot be transposed or permuted
  6261. GGML_ASSERT(nb0 <= nb1);
  6262. GGML_ASSERT(nb1 <= nb2);
  6263. GGML_ASSERT(nb2 <= nb3);
  6264. GGML_ASSERT(ggml_is_quantized(src0->type));
  6265. GGML_ASSERT(src1->type == GGML_TYPE_F32);
  6266. // rows per thread
  6267. const int dr = (nr + nth - 1)/nth;
  6268. // row range for this thread
  6269. const int ir0 = dr*ith;
  6270. const int ir1 = MIN(ir0 + dr, nr);
  6271. float * wdata = (float *) params->wdata + (ne00 + CACHE_LINE_SIZE_F32) * ith;
  6272. for (int ir = ir0; ir < ir1; ++ir) {
  6273. // src0 indices
  6274. const int i03 = ir/(ne02*ne01);
  6275. const int i02 = (ir - i03*ne02*ne01)/ne01;
  6276. const int i01 = (ir - i03*ne02*ne01 - i02*ne01);
  6277. // src1 and dst are same shape as src0 => same indices
  6278. const int i13 = i03;
  6279. const int i12 = i02;
  6280. const int i11 = i01;
  6281. const int i3 = i03;
  6282. const int i2 = i02;
  6283. const int i1 = i01;
  6284. void * src0_row = (void *) ((char *) src0->data + (i01*nb01 + i02*nb02 + i03*nb03));
  6285. float * src1_row = (float *)((char *) src1->data + (i11*nb11 + i12*nb12 + i13*nb13));
  6286. void * dst_row = (void *) ((char *) dst->data + ( i1*nb1 + i2*nb2 + i3*nb3));
  6287. assert(ne00 % 32 == 0);
  6288. // unquantize row from src0 to temp buffer
  6289. dequantize_row_q(src0_row, wdata, ne00);
  6290. // add src1
  6291. ggml_vec_acc_f32(ne00, wdata, src1_row);
  6292. // quantize row to dst
  6293. if (quantize_row_q != NULL) {
  6294. quantize_row_q(wdata, dst_row, ne00);
  6295. } else {
  6296. memcpy(dst_row, wdata, ne0*nb0);
  6297. }
  6298. }
  6299. }
  6300. static void ggml_compute_forward_add(
  6301. const struct ggml_compute_params * params,
  6302. const struct ggml_tensor * src0,
  6303. const struct ggml_tensor * src1,
  6304. struct ggml_tensor * dst) {
  6305. switch (src0->type) {
  6306. case GGML_TYPE_F32:
  6307. {
  6308. if (src1->type == GGML_TYPE_F32) {
  6309. ggml_compute_forward_add_f32(params, src0, src1, dst);
  6310. }
  6311. else {
  6312. GGML_ASSERT(false);
  6313. }
  6314. } break;
  6315. case GGML_TYPE_F16:
  6316. {
  6317. if (src1->type == GGML_TYPE_F16) {
  6318. ggml_compute_forward_add_f16_f16(params, src0, src1, dst);
  6319. }
  6320. else if (src1->type == GGML_TYPE_F32) {
  6321. ggml_compute_forward_add_f16_f32(params, src0, src1, dst);
  6322. }
  6323. else {
  6324. GGML_ASSERT(false);
  6325. }
  6326. } break;
  6327. case GGML_TYPE_Q4_0:
  6328. case GGML_TYPE_Q4_1:
  6329. case GGML_TYPE_Q5_0:
  6330. case GGML_TYPE_Q5_1:
  6331. case GGML_TYPE_Q8_0:
  6332. case GGML_TYPE_Q2_K:
  6333. case GGML_TYPE_Q3_K:
  6334. case GGML_TYPE_Q4_K:
  6335. case GGML_TYPE_Q5_K:
  6336. case GGML_TYPE_Q6_K:
  6337. case GGML_TYPE_IQ2_XXS:
  6338. case GGML_TYPE_IQ2_XS:
  6339. case GGML_TYPE_IQ3_XXS:
  6340. case GGML_TYPE_IQ1_S:
  6341. case GGML_TYPE_IQ4_NL:
  6342. {
  6343. ggml_compute_forward_add_q_f32(params, src0, src1, dst);
  6344. } break;
  6345. default:
  6346. {
  6347. GGML_ASSERT(false);
  6348. } break;
  6349. }
  6350. }
  6351. // ggml_compute_forward_add1
  6352. static void ggml_compute_forward_add1_f32(
  6353. const struct ggml_compute_params * params,
  6354. const struct ggml_tensor * src0,
  6355. const struct ggml_tensor * src1,
  6356. struct ggml_tensor * dst) {
  6357. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  6358. GGML_ASSERT(ggml_is_scalar(src1));
  6359. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  6360. return;
  6361. }
  6362. const int ith = params->ith;
  6363. const int nth = params->nth;
  6364. const int nr = ggml_nrows(src0);
  6365. GGML_TENSOR_UNARY_OP_LOCALS
  6366. GGML_ASSERT( nb0 == sizeof(float));
  6367. GGML_ASSERT(nb00 == sizeof(float));
  6368. // rows per thread
  6369. const int dr = (nr + nth - 1)/nth;
  6370. // row range for this thread
  6371. const int ir0 = dr*ith;
  6372. const int ir1 = MIN(ir0 + dr, nr);
  6373. for (int ir = ir0; ir < ir1; ++ir) {
  6374. // src0 and dst are same shape => same indices
  6375. const int i3 = ir/(ne2*ne1);
  6376. const int i2 = (ir - i3*ne2*ne1)/ne1;
  6377. const int i1 = (ir - i3*ne2*ne1 - i2*ne1);
  6378. #ifdef GGML_USE_ACCELERATE
  6379. UNUSED(ggml_vec_add1_f32);
  6380. vDSP_vadd(
  6381. (float *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01), 1,
  6382. (float *) ((char *) src1->data), 0,
  6383. (float *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 ), 1,
  6384. ne0);
  6385. #else
  6386. ggml_vec_add1_f32(ne0,
  6387. (float *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 ),
  6388. (float *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01),
  6389. *(float *) src1->data);
  6390. #endif
  6391. }
  6392. }
  6393. static void ggml_compute_forward_add1_f16_f32(
  6394. const struct ggml_compute_params * params,
  6395. const struct ggml_tensor * src0,
  6396. const struct ggml_tensor * src1,
  6397. struct ggml_tensor * dst) {
  6398. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  6399. GGML_ASSERT(ggml_is_scalar(src1));
  6400. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  6401. return;
  6402. }
  6403. // scalar to add
  6404. const float v = *(float *) src1->data;
  6405. const int ith = params->ith;
  6406. const int nth = params->nth;
  6407. const int nr = ggml_nrows(src0);
  6408. GGML_TENSOR_UNARY_OP_LOCALS
  6409. GGML_ASSERT(src0->type == GGML_TYPE_F16);
  6410. GGML_ASSERT(src1->type == GGML_TYPE_F32);
  6411. GGML_ASSERT(dst->type == GGML_TYPE_F16);
  6412. GGML_ASSERT( nb0 == sizeof(ggml_fp16_t));
  6413. GGML_ASSERT(nb00 == sizeof(ggml_fp16_t));
  6414. // rows per thread
  6415. const int dr = (nr + nth - 1)/nth;
  6416. // row range for this thread
  6417. const int ir0 = dr*ith;
  6418. const int ir1 = MIN(ir0 + dr, nr);
  6419. for (int ir = ir0; ir < ir1; ++ir) {
  6420. // src0 and dst are same shape => same indices
  6421. const int i3 = ir/(ne2*ne1);
  6422. const int i2 = (ir - i3*ne2*ne1)/ne1;
  6423. const int i1 = (ir - i3*ne2*ne1 - i2*ne1);
  6424. ggml_fp16_t * dst_ptr = (ggml_fp16_t *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 );
  6425. ggml_fp16_t * src0_ptr = (ggml_fp16_t *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01);
  6426. for (int i = 0; i < ne0; i++) {
  6427. dst_ptr[i] = GGML_FP32_TO_FP16(GGML_FP16_TO_FP32(src0_ptr[i]) + v);
  6428. }
  6429. }
  6430. }
  6431. static void ggml_compute_forward_add1_f16_f16(
  6432. const struct ggml_compute_params * params,
  6433. const struct ggml_tensor * src0,
  6434. const struct ggml_tensor * src1,
  6435. struct ggml_tensor * dst) {
  6436. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  6437. GGML_ASSERT(ggml_is_scalar(src1));
  6438. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  6439. return;
  6440. }
  6441. // scalar to add
  6442. const float v = GGML_FP16_TO_FP32(*(ggml_fp16_t *) src1->data);
  6443. const int ith = params->ith;
  6444. const int nth = params->nth;
  6445. const int nr = ggml_nrows(src0);
  6446. GGML_TENSOR_UNARY_OP_LOCALS
  6447. GGML_ASSERT(src0->type == GGML_TYPE_F16);
  6448. GGML_ASSERT(src1->type == GGML_TYPE_F16);
  6449. GGML_ASSERT(dst->type == GGML_TYPE_F16);
  6450. GGML_ASSERT( nb0 == sizeof(ggml_fp16_t));
  6451. GGML_ASSERT(nb00 == sizeof(ggml_fp16_t));
  6452. // rows per thread
  6453. const int dr = (nr + nth - 1)/nth;
  6454. // row range for this thread
  6455. const int ir0 = dr*ith;
  6456. const int ir1 = MIN(ir0 + dr, nr);
  6457. for (int ir = ir0; ir < ir1; ++ir) {
  6458. // src0 and dst are same shape => same indices
  6459. const int i3 = ir/(ne2*ne1);
  6460. const int i2 = (ir - i3*ne2*ne1)/ne1;
  6461. const int i1 = (ir - i3*ne2*ne1 - i2*ne1);
  6462. ggml_fp16_t * dst_ptr = (ggml_fp16_t *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 );
  6463. ggml_fp16_t * src0_ptr = (ggml_fp16_t *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01);
  6464. for (int i = 0; i < ne0; i++) {
  6465. dst_ptr[i] = GGML_FP32_TO_FP16(GGML_FP16_TO_FP32(src0_ptr[i]) + v);
  6466. }
  6467. }
  6468. }
  6469. static void ggml_compute_forward_add1_q_f32(
  6470. const struct ggml_compute_params * params,
  6471. const struct ggml_tensor * src0,
  6472. const struct ggml_tensor * src1,
  6473. struct ggml_tensor * dst) {
  6474. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  6475. GGML_ASSERT(ggml_is_scalar(src1));
  6476. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  6477. return;
  6478. }
  6479. // scalar to add
  6480. const float v = *(float *) src1->data;
  6481. const int ith = params->ith;
  6482. const int nth = params->nth;
  6483. const int nr = ggml_nrows(src0);
  6484. GGML_TENSOR_UNARY_OP_LOCALS
  6485. const enum ggml_type type = src0->type;
  6486. ggml_to_float_t const dequantize_row_q = type_traits[type].to_float;
  6487. ggml_from_float_t const quantize_row_q = type_traits[type].from_float;
  6488. // we don't support permuted src0
  6489. GGML_ASSERT(nb00 == ggml_type_size(type));
  6490. // dst cannot be transposed or permuted
  6491. GGML_ASSERT(nb0 <= nb1);
  6492. GGML_ASSERT(nb1 <= nb2);
  6493. GGML_ASSERT(nb2 <= nb3);
  6494. GGML_ASSERT(ggml_is_quantized(src0->type));
  6495. GGML_ASSERT(dst->type == src0->type);
  6496. GGML_ASSERT(src1->type == GGML_TYPE_F32);
  6497. // rows per thread
  6498. const int dr = (nr + nth - 1)/nth;
  6499. // row range for this thread
  6500. const int ir0 = dr*ith;
  6501. const int ir1 = MIN(ir0 + dr, nr);
  6502. float * wdata = (float *) params->wdata + (ne0 + CACHE_LINE_SIZE_F32) * ith;
  6503. for (int ir = ir0; ir < ir1; ++ir) {
  6504. // src0 and dst are same shape => same indices
  6505. const int i3 = ir/(ne2*ne1);
  6506. const int i2 = (ir - i3*ne2*ne1)/ne1;
  6507. const int i1 = (ir - i3*ne2*ne1 - i2*ne1);
  6508. void * src0_row = (void *) ((char *) src0->data + (i1*nb01 + i2*nb02 + i3*nb03));
  6509. void * dst_row = (void *) ((char *) dst->data + (i1*nb1 + i2*nb2 + i3*nb0 ));
  6510. assert(ne0 % 32 == 0);
  6511. // unquantize row from src0 to temp buffer
  6512. dequantize_row_q(src0_row, wdata, ne0);
  6513. // add src1
  6514. ggml_vec_acc1_f32(ne0, wdata, v);
  6515. // quantize row to dst
  6516. quantize_row_q(wdata, dst_row, ne0);
  6517. }
  6518. }
  6519. static void ggml_compute_forward_add1(
  6520. const struct ggml_compute_params * params,
  6521. const struct ggml_tensor * src0,
  6522. const struct ggml_tensor * src1,
  6523. struct ggml_tensor * dst) {
  6524. switch (src0->type) {
  6525. case GGML_TYPE_F32:
  6526. {
  6527. ggml_compute_forward_add1_f32(params, src0, src1, dst);
  6528. } break;
  6529. case GGML_TYPE_F16:
  6530. {
  6531. if (src1->type == GGML_TYPE_F16) {
  6532. ggml_compute_forward_add1_f16_f16(params, src0, src1, dst);
  6533. }
  6534. else if (src1->type == GGML_TYPE_F32) {
  6535. ggml_compute_forward_add1_f16_f32(params, src0, src1, dst);
  6536. }
  6537. else {
  6538. GGML_ASSERT(false);
  6539. }
  6540. } break;
  6541. case GGML_TYPE_Q4_0:
  6542. case GGML_TYPE_Q4_1:
  6543. case GGML_TYPE_Q5_0:
  6544. case GGML_TYPE_Q5_1:
  6545. case GGML_TYPE_Q8_0:
  6546. case GGML_TYPE_Q8_1:
  6547. case GGML_TYPE_Q2_K:
  6548. case GGML_TYPE_Q3_K:
  6549. case GGML_TYPE_Q4_K:
  6550. case GGML_TYPE_Q5_K:
  6551. case GGML_TYPE_Q6_K:
  6552. case GGML_TYPE_IQ2_XXS:
  6553. case GGML_TYPE_IQ2_XS:
  6554. case GGML_TYPE_IQ3_XXS:
  6555. case GGML_TYPE_IQ1_S:
  6556. case GGML_TYPE_IQ4_NL:
  6557. {
  6558. ggml_compute_forward_add1_q_f32(params, src0, src1, dst);
  6559. } break;
  6560. default:
  6561. {
  6562. GGML_ASSERT(false);
  6563. } break;
  6564. }
  6565. }
  6566. // ggml_compute_forward_acc
  6567. static void ggml_compute_forward_acc_f32(
  6568. const struct ggml_compute_params * params,
  6569. const struct ggml_tensor * src0,
  6570. const struct ggml_tensor * src1,
  6571. struct ggml_tensor * dst) {
  6572. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  6573. GGML_ASSERT(ggml_is_contiguous(dst) && ggml_is_contiguous(src0));
  6574. // view src0 and dst with these strides and data offset inbytes during acc
  6575. // nb0 is implicitly element_size because src0 and dst are contiguous
  6576. size_t nb1 = ((int32_t *) dst->op_params)[0];
  6577. size_t nb2 = ((int32_t *) dst->op_params)[1];
  6578. size_t nb3 = ((int32_t *) dst->op_params)[2];
  6579. size_t offset = ((int32_t *) dst->op_params)[3];
  6580. bool inplace = (bool) ((int32_t *) dst->op_params)[4];
  6581. if (!inplace && (params->type == GGML_TASK_INIT)) {
  6582. if (params->ith != 0) {
  6583. return;
  6584. }
  6585. // memcpy needs to be synchronized across threads to avoid race conditions.
  6586. // => do it in INIT phase
  6587. memcpy(
  6588. ((char *) dst->data),
  6589. ((char *) src0->data),
  6590. ggml_nbytes(dst));
  6591. }
  6592. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  6593. return;
  6594. }
  6595. const int ith = params->ith;
  6596. const int nth = params->nth;
  6597. const int nr = ggml_nrows(src1);
  6598. const int nc = src1->ne[0];
  6599. GGML_TENSOR_LOCALS(int64_t, ne1, src1, ne)
  6600. GGML_TENSOR_LOCALS(size_t, nb1, src1, nb)
  6601. // src0 and dst as viewed during acc
  6602. const size_t nb0 = ggml_element_size(src0);
  6603. const size_t nb00 = nb0;
  6604. const size_t nb01 = nb1;
  6605. const size_t nb02 = nb2;
  6606. const size_t nb03 = nb3;
  6607. GGML_ASSERT(offset + (ne10 == 0 ? 0 : ne10-1)*nb0 + (ne11 == 0 ? 0 : ne11-1)*nb1 + (ne12 == 0 ? 0 : ne12-1)*nb2 + (ne13 == 0 ? 0 : ne13-1)*nb3 < ggml_nbytes(dst));
  6608. GGML_ASSERT(offset + (ne10 == 0 ? 0 : ne10-1)*nb00 + (ne11 == 0 ? 0 : ne11-1)*nb01 + (ne12 == 0 ? 0 : ne12-1)*nb02 + (ne13 == 0 ? 0 : ne13-1)*nb03 < ggml_nbytes(src0));
  6609. GGML_ASSERT(nb10 == sizeof(float));
  6610. // rows per thread
  6611. const int dr = (nr + nth - 1)/nth;
  6612. // row range for this thread
  6613. const int ir0 = dr*ith;
  6614. const int ir1 = MIN(ir0 + dr, nr);
  6615. for (int ir = ir0; ir < ir1; ++ir) {
  6616. // src0 and dst are viewed with shape of src1 and offset
  6617. // => same indices
  6618. const int i3 = ir/(ne12*ne11);
  6619. const int i2 = (ir - i3*ne12*ne11)/ne11;
  6620. const int i1 = (ir - i3*ne12*ne11 - i2*ne11);
  6621. #ifdef GGML_USE_ACCELERATE
  6622. vDSP_vadd(
  6623. (float *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + offset), 1,
  6624. (float *) ((char *) src1->data + i3*nb13 + i2*nb12 + i1*nb11), 1,
  6625. (float *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + offset), 1, nc);
  6626. #else
  6627. ggml_vec_add_f32(nc,
  6628. (float *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + offset),
  6629. (float *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + offset),
  6630. (float *) ((char *) src1->data + i3*nb13 + i2*nb12 + i1*nb11));
  6631. #endif
  6632. }
  6633. }
  6634. static void ggml_compute_forward_acc(
  6635. const struct ggml_compute_params * params,
  6636. const struct ggml_tensor * src0,
  6637. const struct ggml_tensor * src1,
  6638. struct ggml_tensor * dst) {
  6639. switch (src0->type) {
  6640. case GGML_TYPE_F32:
  6641. {
  6642. ggml_compute_forward_acc_f32(params, src0, src1, dst);
  6643. } break;
  6644. case GGML_TYPE_F16:
  6645. case GGML_TYPE_Q4_0:
  6646. case GGML_TYPE_Q4_1:
  6647. case GGML_TYPE_Q5_0:
  6648. case GGML_TYPE_Q5_1:
  6649. case GGML_TYPE_Q8_0:
  6650. case GGML_TYPE_Q8_1:
  6651. case GGML_TYPE_Q2_K:
  6652. case GGML_TYPE_Q3_K:
  6653. case GGML_TYPE_Q4_K:
  6654. case GGML_TYPE_Q5_K:
  6655. case GGML_TYPE_Q6_K:
  6656. case GGML_TYPE_IQ2_XXS:
  6657. case GGML_TYPE_IQ2_XS:
  6658. case GGML_TYPE_IQ3_XXS:
  6659. case GGML_TYPE_IQ1_S:
  6660. case GGML_TYPE_IQ4_NL:
  6661. default:
  6662. {
  6663. GGML_ASSERT(false);
  6664. } break;
  6665. }
  6666. }
  6667. // ggml_compute_forward_sub
  6668. static void ggml_compute_forward_sub_f32(
  6669. const struct ggml_compute_params * params,
  6670. const struct ggml_tensor * src0,
  6671. const struct ggml_tensor * src1,
  6672. struct ggml_tensor * dst) {
  6673. assert(params->ith == 0);
  6674. assert(ggml_are_same_shape(src0, src1) && ggml_are_same_shape(src0, dst));
  6675. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  6676. return;
  6677. }
  6678. const int nr = ggml_nrows(src0);
  6679. GGML_TENSOR_BINARY_OP_LOCALS
  6680. GGML_ASSERT( nb0 == sizeof(float));
  6681. GGML_ASSERT(nb00 == sizeof(float));
  6682. if (nb10 == sizeof(float)) {
  6683. for (int ir = 0; ir < nr; ++ir) {
  6684. // src0, src1 and dst are same shape => same indices
  6685. const int i3 = ir/(ne2*ne1);
  6686. const int i2 = (ir - i3*ne2*ne1)/ne1;
  6687. const int i1 = (ir - i3*ne2*ne1 - i2*ne1);
  6688. #ifdef GGML_USE_ACCELERATE
  6689. vDSP_vsub(
  6690. (float *) ((char *) src1->data + i3*nb13 + i2*nb12 + i1*nb11), 1,
  6691. (float *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01), 1,
  6692. (float *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 ), 1,
  6693. ne0);
  6694. #else
  6695. ggml_vec_sub_f32(ne0,
  6696. (float *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 ),
  6697. (float *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01),
  6698. (float *) ((char *) src1->data + i3*nb13 + i2*nb12 + i1*nb11));
  6699. #endif
  6700. // }
  6701. // }
  6702. }
  6703. } else {
  6704. // src1 is not contiguous
  6705. for (int ir = 0; ir < nr; ++ir) {
  6706. // src0, src1 and dst are same shape => same indices
  6707. const int i3 = ir/(ne2*ne1);
  6708. const int i2 = (ir - i3*ne2*ne1)/ne1;
  6709. const int i1 = (ir - i3*ne2*ne1 - i2*ne1);
  6710. float * dst_ptr = (float *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 );
  6711. float * src0_ptr = (float *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01);
  6712. for (int i0 = 0; i0 < ne0; i0++) {
  6713. float * src1_ptr = (float *) ((char *) src1->data + i3*nb13 + i2*nb12 + i1*nb11 + i0*nb10);
  6714. dst_ptr[i0] = src0_ptr[i0] - *src1_ptr;
  6715. }
  6716. }
  6717. }
  6718. }
  6719. static void ggml_compute_forward_sub(
  6720. const struct ggml_compute_params * params,
  6721. const struct ggml_tensor * src0,
  6722. const struct ggml_tensor * src1,
  6723. struct ggml_tensor * dst) {
  6724. switch (src0->type) {
  6725. case GGML_TYPE_F32:
  6726. {
  6727. ggml_compute_forward_sub_f32(params, src0, src1, dst);
  6728. } break;
  6729. default:
  6730. {
  6731. GGML_ASSERT(false);
  6732. } break;
  6733. }
  6734. }
  6735. // ggml_compute_forward_mul
  6736. static void ggml_compute_forward_mul_f32(
  6737. const struct ggml_compute_params * params,
  6738. const struct ggml_tensor * src0,
  6739. const struct ggml_tensor * src1,
  6740. struct ggml_tensor * dst) {
  6741. GGML_ASSERT(ggml_can_repeat(src1, src0) && ggml_are_same_shape(src0, dst));
  6742. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  6743. return;
  6744. }
  6745. const int ith = params->ith;
  6746. const int nth = params->nth;
  6747. #if defined(GGML_USE_CLBLAST)
  6748. if (src1->backend == GGML_BACKEND_GPU) {
  6749. // TODO: OpenCL kernel support full broadcast
  6750. GGML_ASSERT(ggml_can_repeat_rows(src1, src0));
  6751. if (ith == 0) {
  6752. ggml_cl_mul(src0, src1, dst);
  6753. }
  6754. return;
  6755. }
  6756. #endif
  6757. const int64_t nr = ggml_nrows(src0);
  6758. GGML_TENSOR_BINARY_OP_LOCALS
  6759. GGML_ASSERT( nb0 == sizeof(float));
  6760. GGML_ASSERT(nb00 == sizeof(float));
  6761. if (nb10 == sizeof(float)) {
  6762. for (int64_t ir = ith; ir < nr; ir += nth) {
  6763. // src0 and dst are same shape => same indices
  6764. const int64_t i03 = ir/(ne02*ne01);
  6765. const int64_t i02 = (ir - i03*ne02*ne01)/ne01;
  6766. const int64_t i01 = (ir - i03*ne02*ne01 - i02*ne01);
  6767. const int64_t i13 = i03 % ne13;
  6768. const int64_t i12 = i02 % ne12;
  6769. const int64_t i11 = i01 % ne11;
  6770. const int64_t nr0 = ne00 / ne10;
  6771. float * dst_ptr = (float *) ((char *) dst->data + i03*nb3 + i02*nb2 + i01*nb1 );
  6772. float * src0_ptr = (float *) ((char *) src0->data + i03*nb03 + i02*nb02 + i01*nb01);
  6773. float * src1_ptr = (float *) ((char *) src1->data + i13*nb13 + i12*nb12 + i11*nb11);
  6774. for (int64_t r = 0 ; r < nr0; ++r) {
  6775. #ifdef GGML_USE_ACCELERATE
  6776. UNUSED(ggml_vec_mul_f32);
  6777. vDSP_vmul(src0_ptr + r*ne10, 1, src1_ptr, 1, dst_ptr + r*ne10, 1, ne10);
  6778. #else
  6779. ggml_vec_mul_f32(ne10, dst_ptr + r*ne10, src0_ptr + r*ne10, src1_ptr);
  6780. #endif
  6781. }
  6782. }
  6783. } else {
  6784. // src1 is not contiguous
  6785. for (int64_t ir = ith; ir < nr; ir += nth) {
  6786. // src0 and dst are same shape => same indices
  6787. // src1 is broadcastable across src0 and dst in i1, i2, i3
  6788. const int64_t i03 = ir/(ne02*ne01);
  6789. const int64_t i02 = (ir - i03*ne02*ne01)/ne01;
  6790. const int64_t i01 = (ir - i03*ne02*ne01 - i02*ne01);
  6791. const int64_t i13 = i03 % ne13;
  6792. const int64_t i12 = i02 % ne12;
  6793. const int64_t i11 = i01 % ne11;
  6794. float * dst_ptr = (float *) ((char *) dst->data + i03*nb3 + i02*nb2 + i01*nb1 );
  6795. float * src0_ptr = (float *) ((char *) src0->data + i03*nb03 + i02*nb02 + i01*nb01);
  6796. for (int64_t i0 = 0; i0 < ne00; ++i0) {
  6797. const int64_t i10 = i0 % ne10;
  6798. float * src1_ptr = (float *) ((char *) src1->data + i13*nb13 + i12*nb12 + i11*nb11 + i10*nb10);
  6799. dst_ptr[i0] = src0_ptr[i0] * (*src1_ptr);
  6800. }
  6801. }
  6802. }
  6803. }
  6804. static void ggml_compute_forward_mul(
  6805. const struct ggml_compute_params * params,
  6806. const struct ggml_tensor * src0,
  6807. const struct ggml_tensor * src1,
  6808. struct ggml_tensor * dst) {
  6809. GGML_ASSERT(src1->type == GGML_TYPE_F32 && "only f32 src1 supported for now");
  6810. switch (src0->type) {
  6811. case GGML_TYPE_F32:
  6812. {
  6813. ggml_compute_forward_mul_f32(params, src0, src1, dst);
  6814. } break;
  6815. default:
  6816. {
  6817. GGML_ASSERT(false);
  6818. } break;
  6819. }
  6820. }
  6821. // ggml_compute_forward_div
  6822. static void ggml_compute_forward_div_f32(
  6823. const struct ggml_compute_params * params,
  6824. const struct ggml_tensor * src0,
  6825. const struct ggml_tensor * src1,
  6826. struct ggml_tensor * dst) {
  6827. GGML_ASSERT(ggml_can_repeat(src1, src0) && ggml_are_same_shape(src0, dst));
  6828. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  6829. return;
  6830. }
  6831. const int ith = params->ith;
  6832. const int nth = params->nth;
  6833. const int64_t nr = ggml_nrows(src0);
  6834. GGML_TENSOR_BINARY_OP_LOCALS
  6835. GGML_ASSERT( nb0 == sizeof(float));
  6836. GGML_ASSERT(nb00 == sizeof(float));
  6837. if (nb10 == sizeof(float)) {
  6838. for (int64_t ir = ith; ir < nr; ir += nth) {
  6839. // src0 and dst are same shape => same indices
  6840. const int64_t i03 = ir/(ne02*ne01);
  6841. const int64_t i02 = (ir - i03*ne02*ne01)/ne01;
  6842. const int64_t i01 = (ir - i03*ne02*ne01 - i02*ne01);
  6843. const int64_t i13 = i03 % ne13;
  6844. const int64_t i12 = i02 % ne12;
  6845. const int64_t i11 = i01 % ne11;
  6846. const int64_t nr0 = ne00 / ne10;
  6847. float * dst_ptr = (float *) ((char *) dst->data + i03*nb3 + i02*nb2 + i01*nb1 );
  6848. float * src0_ptr = (float *) ((char *) src0->data + i03*nb03 + i02*nb02 + i01*nb01);
  6849. float * src1_ptr = (float *) ((char *) src1->data + i13*nb13 + i12*nb12 + i11*nb11);
  6850. for (int64_t r = 0; r < nr0; ++r) {
  6851. #ifdef GGML_USE_ACCELERATE
  6852. UNUSED(ggml_vec_div_f32);
  6853. vDSP_vdiv(src1_ptr, 1, src0_ptr + r*ne10, 1, dst_ptr + r*ne10, 1, ne10);
  6854. #else
  6855. ggml_vec_div_f32(ne10, dst_ptr + r*ne10, src0_ptr + r*ne10, src1_ptr);
  6856. #endif
  6857. }
  6858. }
  6859. } else {
  6860. // src1 is not contiguous
  6861. for (int64_t ir = ith; ir < nr; ir += nth) {
  6862. // src0 and dst are same shape => same indices
  6863. // src1 is broadcastable across src0 and dst in i1, i2, i3
  6864. const int64_t i03 = ir/(ne02*ne01);
  6865. const int64_t i02 = (ir - i03*ne02*ne01)/ne01;
  6866. const int64_t i01 = (ir - i03*ne02*ne01 - i02*ne01);
  6867. const int64_t i13 = i03 % ne13;
  6868. const int64_t i12 = i02 % ne12;
  6869. const int64_t i11 = i01 % ne11;
  6870. float * dst_ptr = (float *) ((char *) dst->data + i03*nb3 + i02*nb2 + i01*nb1 );
  6871. float * src0_ptr = (float *) ((char *) src0->data + i03*nb03 + i02*nb02 + i01*nb01);
  6872. for (int64_t i0 = 0; i0 < ne00; ++i0) {
  6873. const int64_t i10 = i0 % ne10;
  6874. float * src1_ptr = (float *) ((char *) src1->data + i13*nb13 + i12*nb12 + i11*nb11 + i10*nb10);
  6875. dst_ptr[i0] = src0_ptr[i0] / (*src1_ptr);
  6876. }
  6877. }
  6878. }
  6879. }
  6880. static void ggml_compute_forward_div(
  6881. const struct ggml_compute_params * params,
  6882. const struct ggml_tensor * src0,
  6883. const struct ggml_tensor * src1,
  6884. struct ggml_tensor * dst) {
  6885. switch (src0->type) {
  6886. case GGML_TYPE_F32:
  6887. {
  6888. ggml_compute_forward_div_f32(params, src0, src1, dst);
  6889. } break;
  6890. default:
  6891. {
  6892. GGML_ASSERT(false);
  6893. } break;
  6894. }
  6895. }
  6896. // ggml_compute_forward_sqr
  6897. static void ggml_compute_forward_sqr_f32(
  6898. const struct ggml_compute_params * params,
  6899. const struct ggml_tensor * src0,
  6900. struct ggml_tensor * dst) {
  6901. assert(params->ith == 0);
  6902. assert(ggml_are_same_shape(src0, dst));
  6903. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  6904. return;
  6905. }
  6906. const int n = ggml_nrows(src0);
  6907. const int nc = src0->ne[0];
  6908. assert( dst->nb[0] == sizeof(float));
  6909. assert(src0->nb[0] == sizeof(float));
  6910. for (int i = 0; i < n; i++) {
  6911. ggml_vec_sqr_f32(nc,
  6912. (float *) ((char *) dst->data + i*( dst->nb[1])),
  6913. (float *) ((char *) src0->data + i*(src0->nb[1])));
  6914. }
  6915. }
  6916. static void ggml_compute_forward_sqr(
  6917. const struct ggml_compute_params * params,
  6918. const struct ggml_tensor * src0,
  6919. struct ggml_tensor * dst) {
  6920. switch (src0->type) {
  6921. case GGML_TYPE_F32:
  6922. {
  6923. ggml_compute_forward_sqr_f32(params, src0, dst);
  6924. } break;
  6925. default:
  6926. {
  6927. GGML_ASSERT(false);
  6928. } break;
  6929. }
  6930. }
  6931. // ggml_compute_forward_sqrt
  6932. static void ggml_compute_forward_sqrt_f32(
  6933. const struct ggml_compute_params * params,
  6934. const struct ggml_tensor * src0,
  6935. struct ggml_tensor * dst) {
  6936. assert(params->ith == 0);
  6937. assert(ggml_are_same_shape(src0, dst));
  6938. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  6939. return;
  6940. }
  6941. const int n = ggml_nrows(src0);
  6942. const int nc = src0->ne[0];
  6943. assert( dst->nb[0] == sizeof(float));
  6944. assert(src0->nb[0] == sizeof(float));
  6945. for (int i = 0; i < n; i++) {
  6946. ggml_vec_sqrt_f32(nc,
  6947. (float *) ((char *) dst->data + i*( dst->nb[1])),
  6948. (float *) ((char *) src0->data + i*(src0->nb[1])));
  6949. }
  6950. }
  6951. static void ggml_compute_forward_sqrt(
  6952. const struct ggml_compute_params * params,
  6953. const struct ggml_tensor * src0,
  6954. struct ggml_tensor * dst) {
  6955. switch (src0->type) {
  6956. case GGML_TYPE_F32:
  6957. {
  6958. ggml_compute_forward_sqrt_f32(params, src0, dst);
  6959. } break;
  6960. default:
  6961. {
  6962. GGML_ASSERT(false);
  6963. } break;
  6964. }
  6965. }
  6966. // ggml_compute_forward_log
  6967. static void ggml_compute_forward_log_f32(
  6968. const struct ggml_compute_params * params,
  6969. const struct ggml_tensor * src0,
  6970. struct ggml_tensor * dst) {
  6971. GGML_ASSERT(params->ith == 0);
  6972. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  6973. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  6974. return;
  6975. }
  6976. const int n = ggml_nrows(src0);
  6977. const int nc = src0->ne[0];
  6978. GGML_ASSERT( dst->nb[0] == sizeof(float));
  6979. GGML_ASSERT(src0->nb[0] == sizeof(float));
  6980. for (int i = 0; i < n; i++) {
  6981. ggml_vec_log_f32(nc,
  6982. (float *) ((char *) dst->data + i*( dst->nb[1])),
  6983. (float *) ((char *) src0->data + i*(src0->nb[1])));
  6984. }
  6985. }
  6986. static void ggml_compute_forward_log(
  6987. const struct ggml_compute_params * params,
  6988. const struct ggml_tensor * src0,
  6989. struct ggml_tensor * dst) {
  6990. switch (src0->type) {
  6991. case GGML_TYPE_F32:
  6992. {
  6993. ggml_compute_forward_log_f32(params, src0, dst);
  6994. } break;
  6995. default:
  6996. {
  6997. GGML_ASSERT(false);
  6998. } break;
  6999. }
  7000. }
  7001. // ggml_compute_forward_sum
  7002. static void ggml_compute_forward_sum_f32(
  7003. const struct ggml_compute_params * params,
  7004. const struct ggml_tensor * src0,
  7005. struct ggml_tensor * dst) {
  7006. assert(params->ith == 0);
  7007. assert(ggml_is_scalar(dst));
  7008. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  7009. return;
  7010. }
  7011. assert(ggml_is_scalar(dst));
  7012. assert(src0->nb[0] == sizeof(float));
  7013. GGML_TENSOR_LOCALS(int64_t, ne0, src0, ne)
  7014. GGML_TENSOR_LOCALS(size_t, nb0, src0, nb)
  7015. ggml_float sum = 0;
  7016. ggml_float row_sum = 0;
  7017. for (int64_t i03 = 0; i03 < ne03; i03++) {
  7018. for (int64_t i02 = 0; i02 < ne02; i02++) {
  7019. for (int64_t i01 = 0; i01 < ne01; i01++) {
  7020. ggml_vec_sum_f32_ggf(ne00,
  7021. &row_sum,
  7022. (float *) ((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03));
  7023. sum += row_sum;
  7024. }
  7025. }
  7026. }
  7027. ((float *) dst->data)[0] = sum;
  7028. }
  7029. static void ggml_compute_forward_sum_f16(
  7030. const struct ggml_compute_params * params,
  7031. const struct ggml_tensor * src0,
  7032. struct ggml_tensor * dst) {
  7033. assert(params->ith == 0);
  7034. assert(ggml_is_scalar(dst));
  7035. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  7036. return;
  7037. }
  7038. assert(src0->nb[0] == sizeof(ggml_fp16_t));
  7039. GGML_TENSOR_LOCALS(int64_t, ne0, src0, ne)
  7040. GGML_TENSOR_LOCALS(size_t, nb0, src0, nb)
  7041. float sum = 0;
  7042. float row_sum = 0;
  7043. for (int64_t i03 = 0; i03 < ne03; i03++) {
  7044. for (int64_t i02 = 0; i02 < ne02; i02++) {
  7045. for (int64_t i01 = 0; i01 < ne01; i01++) {
  7046. ggml_vec_sum_f16_ggf(ne00,
  7047. &row_sum,
  7048. (ggml_fp16_t *) ((char *) src0->data + i01 * nb01 + i02 * nb02 + i03 * nb03));
  7049. sum += row_sum;
  7050. }
  7051. }
  7052. }
  7053. ((ggml_fp16_t *) dst->data)[0] = GGML_FP32_TO_FP16(sum);
  7054. }
  7055. static void ggml_compute_forward_sum(
  7056. const struct ggml_compute_params * params,
  7057. const struct ggml_tensor * src0,
  7058. struct ggml_tensor * dst) {
  7059. switch (src0->type) {
  7060. case GGML_TYPE_F32:
  7061. {
  7062. ggml_compute_forward_sum_f32(params, src0, dst);
  7063. } break;
  7064. case GGML_TYPE_F16:
  7065. {
  7066. ggml_compute_forward_sum_f16(params, src0, dst);
  7067. } break;
  7068. default:
  7069. {
  7070. GGML_ASSERT(false);
  7071. } break;
  7072. }
  7073. }
  7074. // ggml_compute_forward_sum_rows
  7075. static void ggml_compute_forward_sum_rows_f32(
  7076. const struct ggml_compute_params * params,
  7077. const struct ggml_tensor * src0,
  7078. struct ggml_tensor * dst) {
  7079. GGML_ASSERT(params->ith == 0);
  7080. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  7081. return;
  7082. }
  7083. GGML_ASSERT(src0->nb[0] == sizeof(float));
  7084. GGML_ASSERT(dst->nb[0] == sizeof(float));
  7085. GGML_TENSOR_UNARY_OP_LOCALS
  7086. GGML_ASSERT(ne0 == 1);
  7087. GGML_ASSERT(ne1 == ne01);
  7088. GGML_ASSERT(ne2 == ne02);
  7089. GGML_ASSERT(ne3 == ne03);
  7090. for (int64_t i3 = 0; i3 < ne03; i3++) {
  7091. for (int64_t i2 = 0; i2 < ne02; i2++) {
  7092. for (int64_t i1 = 0; i1 < ne01; i1++) {
  7093. float * src_row = (float *) ((char *) src0->data + i1*nb01 + i2*nb02 + i3*nb03);
  7094. float * dst_row = (float *) ((char *) dst->data + i1*nb1 + i2*nb2 + i3*nb3);
  7095. float row_sum = 0;
  7096. ggml_vec_sum_f32(ne00, &row_sum, src_row);
  7097. dst_row[0] = row_sum;
  7098. }
  7099. }
  7100. }
  7101. }
  7102. static void ggml_compute_forward_sum_rows(
  7103. const struct ggml_compute_params * params,
  7104. const struct ggml_tensor * src0,
  7105. struct ggml_tensor * dst) {
  7106. switch (src0->type) {
  7107. case GGML_TYPE_F32:
  7108. {
  7109. ggml_compute_forward_sum_rows_f32(params, src0, dst);
  7110. } break;
  7111. default:
  7112. {
  7113. GGML_ASSERT(false);
  7114. } break;
  7115. }
  7116. }
  7117. // ggml_compute_forward_mean
  7118. static void ggml_compute_forward_mean_f32(
  7119. const struct ggml_compute_params * params,
  7120. const struct ggml_tensor * src0,
  7121. struct ggml_tensor * dst) {
  7122. assert(params->ith == 0);
  7123. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  7124. return;
  7125. }
  7126. assert(src0->nb[0] == sizeof(float));
  7127. GGML_TENSOR_UNARY_OP_LOCALS
  7128. assert(ne0 == 1);
  7129. assert(ne1 == ne01);
  7130. assert(ne2 == ne02);
  7131. assert(ne3 == ne03);
  7132. UNUSED(ne0);
  7133. UNUSED(ne1);
  7134. UNUSED(ne2);
  7135. UNUSED(ne3);
  7136. for (int64_t i03 = 0; i03 < ne03; i03++) {
  7137. for (int64_t i02 = 0; i02 < ne02; i02++) {
  7138. for (int64_t i01 = 0; i01 < ne01; i01++) {
  7139. ggml_vec_sum_f32(ne00,
  7140. (float *) ((char *) dst->data + i01*nb1 + i02*nb2 + i03*nb3),
  7141. (float *) ((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03));
  7142. *(float *) ((char *) dst->data + i01*nb1 + i02*nb2 + i03*nb3) /= (float) ne00;
  7143. }
  7144. }
  7145. }
  7146. }
  7147. static void ggml_compute_forward_mean(
  7148. const struct ggml_compute_params * params,
  7149. const struct ggml_tensor * src0,
  7150. struct ggml_tensor * dst) {
  7151. switch (src0->type) {
  7152. case GGML_TYPE_F32:
  7153. {
  7154. ggml_compute_forward_mean_f32(params, src0, dst);
  7155. } break;
  7156. default:
  7157. {
  7158. GGML_ASSERT(false);
  7159. } break;
  7160. }
  7161. }
  7162. // ggml_compute_forward_argmax
  7163. static void ggml_compute_forward_argmax_f32(
  7164. const struct ggml_compute_params * params,
  7165. const struct ggml_tensor * src0,
  7166. struct ggml_tensor * dst) {
  7167. assert(params->ith == 0);
  7168. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  7169. return;
  7170. }
  7171. assert(src0->nb[0] == sizeof(float));
  7172. assert(dst->nb[0] == sizeof(float));
  7173. const int64_t ne00 = src0->ne[0];
  7174. const int64_t ne01 = src0->ne[1];
  7175. const size_t nb01 = src0->nb[1];
  7176. const size_t nb0 = dst->nb[0];
  7177. for (int64_t i1 = 0; i1 < ne01; i1++) {
  7178. float * src = (float *) ((char *) src0->data + i1*nb01);
  7179. int32_t * dst_ = (int32_t *) ((char *) dst->data + i1*nb0);
  7180. int v = 0;
  7181. ggml_vec_argmax_f32(ne00, &v, src);
  7182. dst_[0] = v;
  7183. }
  7184. }
  7185. static void ggml_compute_forward_argmax(
  7186. const struct ggml_compute_params * params,
  7187. const struct ggml_tensor * src0,
  7188. struct ggml_tensor * dst) {
  7189. switch (src0->type) {
  7190. case GGML_TYPE_F32:
  7191. {
  7192. ggml_compute_forward_argmax_f32(params, src0, dst);
  7193. } break;
  7194. default:
  7195. {
  7196. GGML_ASSERT(false);
  7197. } break;
  7198. }
  7199. }
  7200. // ggml_compute_forward_repeat
  7201. static void ggml_compute_forward_repeat_f32(
  7202. const struct ggml_compute_params * params,
  7203. const struct ggml_tensor * src0,
  7204. struct ggml_tensor * dst) {
  7205. GGML_ASSERT(params->ith == 0);
  7206. GGML_ASSERT(ggml_can_repeat(src0, dst));
  7207. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  7208. return;
  7209. }
  7210. GGML_TENSOR_UNARY_OP_LOCALS
  7211. // guaranteed to be an integer due to the check in ggml_can_repeat
  7212. const int nr0 = (int)(ne0/ne00);
  7213. const int nr1 = (int)(ne1/ne01);
  7214. const int nr2 = (int)(ne2/ne02);
  7215. const int nr3 = (int)(ne3/ne03);
  7216. // TODO: support for transposed / permuted tensors
  7217. GGML_ASSERT(nb0 == sizeof(float));
  7218. GGML_ASSERT(nb00 == sizeof(float));
  7219. // TODO: maybe this is not optimal?
  7220. for (int i3 = 0; i3 < nr3; i3++) {
  7221. for (int k3 = 0; k3 < ne03; k3++) {
  7222. for (int i2 = 0; i2 < nr2; i2++) {
  7223. for (int k2 = 0; k2 < ne02; k2++) {
  7224. for (int i1 = 0; i1 < nr1; i1++) {
  7225. for (int k1 = 0; k1 < ne01; k1++) {
  7226. for (int i0 = 0; i0 < nr0; i0++) {
  7227. ggml_vec_cpy_f32(ne00,
  7228. (float *) ((char *) dst->data + (i3*ne03 + k3)*nb3 + (i2*ne02 + k2)*nb2 + (i1*ne01 + k1)*nb1 + (i0*ne00)*nb0),
  7229. (float *) ((char *) src0->data + ( k3)*nb03 + ( k2)*nb02 + ( k1)*nb01));
  7230. }
  7231. }
  7232. }
  7233. }
  7234. }
  7235. }
  7236. }
  7237. }
  7238. static void ggml_compute_forward_repeat_f16(
  7239. const struct ggml_compute_params * params,
  7240. const struct ggml_tensor * src0,
  7241. struct ggml_tensor * dst) {
  7242. GGML_ASSERT(params->ith == 0);
  7243. GGML_ASSERT(ggml_can_repeat(src0, dst));
  7244. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  7245. return;
  7246. }
  7247. GGML_TENSOR_UNARY_OP_LOCALS
  7248. // guaranteed to be an integer due to the check in ggml_can_repeat
  7249. const int nr0 = (int)(ne0/ne00);
  7250. const int nr1 = (int)(ne1/ne01);
  7251. const int nr2 = (int)(ne2/ne02);
  7252. const int nr3 = (int)(ne3/ne03);
  7253. // TODO: support for transposed / permuted tensors
  7254. GGML_ASSERT(nb0 == sizeof(ggml_fp16_t));
  7255. GGML_ASSERT(nb00 == sizeof(ggml_fp16_t));
  7256. // TODO: maybe this is not optimal?
  7257. for (int i3 = 0; i3 < nr3; i3++) {
  7258. for (int k3 = 0; k3 < ne03; k3++) {
  7259. for (int i2 = 0; i2 < nr2; i2++) {
  7260. for (int k2 = 0; k2 < ne02; k2++) {
  7261. for (int i1 = 0; i1 < nr1; i1++) {
  7262. for (int k1 = 0; k1 < ne01; k1++) {
  7263. for (int i0 = 0; i0 < nr0; i0++) {
  7264. ggml_fp16_t * y = (ggml_fp16_t *) ((char *) dst->data + (i3*ne03 + k3)*nb3 + (i2*ne02 + k2)*nb2 + (i1*ne01 + k1)*nb1 + (i0*ne00)*nb0);
  7265. ggml_fp16_t * x = (ggml_fp16_t *) ((char *) src0->data + ( k3)*nb03 + ( k2)*nb02 + ( k1)*nb01);
  7266. // ggml_vec_cpy_f16(ne00, y, x)
  7267. for (int i = 0; i < ne00; ++i) {
  7268. y[i] = x[i];
  7269. }
  7270. }
  7271. }
  7272. }
  7273. }
  7274. }
  7275. }
  7276. }
  7277. }
  7278. static void ggml_compute_forward_repeat(
  7279. const struct ggml_compute_params * params,
  7280. const struct ggml_tensor * src0,
  7281. struct ggml_tensor * dst) {
  7282. switch (src0->type) {
  7283. case GGML_TYPE_F16:
  7284. case GGML_TYPE_I16:
  7285. {
  7286. ggml_compute_forward_repeat_f16(params, src0, dst);
  7287. } break;
  7288. case GGML_TYPE_F32:
  7289. case GGML_TYPE_I32:
  7290. {
  7291. ggml_compute_forward_repeat_f32(params, src0, dst);
  7292. } break;
  7293. default:
  7294. {
  7295. GGML_ASSERT(false);
  7296. } break;
  7297. }
  7298. }
  7299. // ggml_compute_forward_repeat_back
  7300. static void ggml_compute_forward_repeat_back_f32(
  7301. const struct ggml_compute_params * params,
  7302. const struct ggml_tensor * src0,
  7303. struct ggml_tensor * dst) {
  7304. GGML_ASSERT(params->ith == 0);
  7305. GGML_ASSERT(ggml_can_repeat(dst, src0));
  7306. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  7307. return;
  7308. }
  7309. GGML_TENSOR_UNARY_OP_LOCALS
  7310. // guaranteed to be an integer due to the check in ggml_can_repeat
  7311. const int nr0 = (int)(ne00/ne0);
  7312. const int nr1 = (int)(ne01/ne1);
  7313. const int nr2 = (int)(ne02/ne2);
  7314. const int nr3 = (int)(ne03/ne3);
  7315. // TODO: support for transposed / permuted tensors
  7316. GGML_ASSERT(nb0 == sizeof(float));
  7317. GGML_ASSERT(nb00 == sizeof(float));
  7318. if (ggml_is_contiguous(dst)) {
  7319. ggml_vec_set_f32(ne0*ne1*ne2*ne3, dst->data, 0);
  7320. } else {
  7321. for (int k3 = 0; k3 < ne3; k3++) {
  7322. for (int k2 = 0; k2 < ne2; k2++) {
  7323. for (int k1 = 0; k1 < ne1; k1++) {
  7324. ggml_vec_set_f32(ne0,
  7325. (float *) ((char *) dst->data + k1*nb1 + k2*nb2 + k3*nb3),
  7326. 0);
  7327. }
  7328. }
  7329. }
  7330. }
  7331. // TODO: maybe this is not optimal?
  7332. for (int i3 = 0; i3 < nr3; i3++) {
  7333. for (int k3 = 0; k3 < ne3; k3++) {
  7334. for (int i2 = 0; i2 < nr2; i2++) {
  7335. for (int k2 = 0; k2 < ne2; k2++) {
  7336. for (int i1 = 0; i1 < nr1; i1++) {
  7337. for (int k1 = 0; k1 < ne1; k1++) {
  7338. for (int i0 = 0; i0 < nr0; i0++) {
  7339. ggml_vec_acc_f32(ne0,
  7340. (float *) ((char *) dst->data + ( k3)*nb3 + ( k2)*nb2 + ( k1)*nb1),
  7341. (float *) ((char *) src0->data + (i3*ne3 + k3)*nb03 + (i2*ne2 + k2)*nb02 + (i1*ne1 + k1)*nb01 + (i0*ne0)*nb00));
  7342. }
  7343. }
  7344. }
  7345. }
  7346. }
  7347. }
  7348. }
  7349. }
  7350. static void ggml_compute_forward_repeat_back(
  7351. const struct ggml_compute_params * params,
  7352. const struct ggml_tensor * src0,
  7353. struct ggml_tensor * dst) {
  7354. switch (src0->type) {
  7355. case GGML_TYPE_F32:
  7356. {
  7357. ggml_compute_forward_repeat_back_f32(params, src0, dst);
  7358. } break;
  7359. default:
  7360. {
  7361. GGML_ASSERT(false);
  7362. } break;
  7363. }
  7364. }
  7365. // ggml_compute_forward_concat
  7366. static void ggml_compute_forward_concat_f32(
  7367. const struct ggml_compute_params * params,
  7368. const struct ggml_tensor * src0,
  7369. const struct ggml_tensor * src1,
  7370. struct ggml_tensor * dst) {
  7371. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  7372. return;
  7373. }
  7374. GGML_ASSERT(src0->nb[0] == sizeof(float));
  7375. const int ith = params->ith;
  7376. const int nth = params->nth;
  7377. GGML_TENSOR_BINARY_OP_LOCALS
  7378. // TODO: support for transposed / permuted tensors
  7379. GGML_ASSERT(nb0 == sizeof(float));
  7380. GGML_ASSERT(nb00 == sizeof(float));
  7381. GGML_ASSERT(nb10 == sizeof(float));
  7382. for (int i3 = 0; i3 < ne3; i3++) {
  7383. for (int i2 = ith; i2 < ne2; i2 += nth) {
  7384. if (i2 < ne02) { // src0
  7385. for (int i1 = 0; i1 < ne1; i1++) {
  7386. for (int i0 = 0; i0 < ne0; i0++) {
  7387. const float * x = (float *)((char *) src0->data + i0 * nb00 + i1 * nb01 + i2 * nb02 + i3 * nb03);
  7388. float * y = (float *)((char *)dst->data + i0 * nb0 + i1 * nb1 + i2 * nb2 + i3 * nb3);
  7389. *y = *x;
  7390. }
  7391. }
  7392. } // src1
  7393. else {
  7394. for (int i1 = 0; i1 < ne1; i1++) {
  7395. for (int i0 = 0; i0 < ne0; i0++) {
  7396. const float * x = (float *)((char *) src1->data + i0 * nb10 + i1 * nb11 + (i2 - ne02) * nb12 + i3 * nb13);
  7397. float * y = (float *)((char *)dst->data + i0 * nb0 + i1 * nb1 + i2 * nb2 + i3 * nb3);
  7398. *y = *x;
  7399. }
  7400. }
  7401. }
  7402. }
  7403. }
  7404. }
  7405. static void ggml_compute_forward_concat(
  7406. const struct ggml_compute_params* params,
  7407. const struct ggml_tensor* src0,
  7408. const struct ggml_tensor* src1,
  7409. struct ggml_tensor* dst) {
  7410. switch (src0->type) {
  7411. case GGML_TYPE_F32:
  7412. case GGML_TYPE_I32:
  7413. {
  7414. ggml_compute_forward_concat_f32(params, src0, src1, dst);
  7415. } break;
  7416. default:
  7417. {
  7418. GGML_ASSERT(false);
  7419. } break;
  7420. }
  7421. }
  7422. // ggml_compute_forward_abs
  7423. static void ggml_compute_forward_abs_f32(
  7424. const struct ggml_compute_params * params,
  7425. const struct ggml_tensor * src0,
  7426. struct ggml_tensor * dst) {
  7427. assert(params->ith == 0);
  7428. assert(ggml_are_same_shape(src0, dst));
  7429. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  7430. return;
  7431. }
  7432. const int n = ggml_nrows(src0);
  7433. const int nc = src0->ne[0];
  7434. assert(dst->nb[0] == sizeof(float));
  7435. assert(src0->nb[0] == sizeof(float));
  7436. for (int i = 0; i < n; i++) {
  7437. ggml_vec_abs_f32(nc,
  7438. (float *) ((char *) dst->data + i*( dst->nb[1])),
  7439. (float *) ((char *) src0->data + i*(src0->nb[1])));
  7440. }
  7441. }
  7442. static void ggml_compute_forward_abs(
  7443. const struct ggml_compute_params * params,
  7444. const struct ggml_tensor * src0,
  7445. struct ggml_tensor * dst) {
  7446. switch (src0->type) {
  7447. case GGML_TYPE_F32:
  7448. {
  7449. ggml_compute_forward_abs_f32(params, src0, dst);
  7450. } break;
  7451. default:
  7452. {
  7453. GGML_ASSERT(false);
  7454. } break;
  7455. }
  7456. }
  7457. // ggml_compute_forward_sgn
  7458. static void ggml_compute_forward_sgn_f32(
  7459. const struct ggml_compute_params * params,
  7460. const struct ggml_tensor * src0,
  7461. struct ggml_tensor * dst) {
  7462. assert(params->ith == 0);
  7463. assert(ggml_are_same_shape(src0, dst));
  7464. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  7465. return;
  7466. }
  7467. const int n = ggml_nrows(src0);
  7468. const int nc = src0->ne[0];
  7469. assert(dst->nb[0] == sizeof(float));
  7470. assert(src0->nb[0] == sizeof(float));
  7471. for (int i = 0; i < n; i++) {
  7472. ggml_vec_sgn_f32(nc,
  7473. (float *) ((char *) dst->data + i*( dst->nb[1])),
  7474. (float *) ((char *) src0->data + i*(src0->nb[1])));
  7475. }
  7476. }
  7477. static void ggml_compute_forward_sgn(
  7478. const struct ggml_compute_params * params,
  7479. const struct ggml_tensor * src0,
  7480. struct ggml_tensor * dst) {
  7481. switch (src0->type) {
  7482. case GGML_TYPE_F32:
  7483. {
  7484. ggml_compute_forward_sgn_f32(params, src0, dst);
  7485. } break;
  7486. default:
  7487. {
  7488. GGML_ASSERT(false);
  7489. } break;
  7490. }
  7491. }
  7492. // ggml_compute_forward_neg
  7493. static void ggml_compute_forward_neg_f32(
  7494. const struct ggml_compute_params * params,
  7495. const struct ggml_tensor * src0,
  7496. struct ggml_tensor * dst) {
  7497. assert(params->ith == 0);
  7498. assert(ggml_are_same_shape(src0, dst));
  7499. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  7500. return;
  7501. }
  7502. const int n = ggml_nrows(src0);
  7503. const int nc = src0->ne[0];
  7504. assert(dst->nb[0] == sizeof(float));
  7505. assert(src0->nb[0] == sizeof(float));
  7506. for (int i = 0; i < n; i++) {
  7507. ggml_vec_neg_f32(nc,
  7508. (float *) ((char *) dst->data + i*( dst->nb[1])),
  7509. (float *) ((char *) src0->data + i*(src0->nb[1])));
  7510. }
  7511. }
  7512. static void ggml_compute_forward_neg(
  7513. const struct ggml_compute_params * params,
  7514. const struct ggml_tensor * src0,
  7515. struct ggml_tensor * dst) {
  7516. switch (src0->type) {
  7517. case GGML_TYPE_F32:
  7518. {
  7519. ggml_compute_forward_neg_f32(params, src0, dst);
  7520. } break;
  7521. default:
  7522. {
  7523. GGML_ASSERT(false);
  7524. } break;
  7525. }
  7526. }
  7527. // ggml_compute_forward_step
  7528. static void ggml_compute_forward_step_f32(
  7529. const struct ggml_compute_params * params,
  7530. const struct ggml_tensor * src0,
  7531. struct ggml_tensor * dst) {
  7532. assert(params->ith == 0);
  7533. assert(ggml_are_same_shape(src0, dst));
  7534. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  7535. return;
  7536. }
  7537. const int n = ggml_nrows(src0);
  7538. const int nc = src0->ne[0];
  7539. assert(dst->nb[0] == sizeof(float));
  7540. assert(src0->nb[0] == sizeof(float));
  7541. for (int i = 0; i < n; i++) {
  7542. ggml_vec_step_f32(nc,
  7543. (float *) ((char *) dst->data + i*( dst->nb[1])),
  7544. (float *) ((char *) src0->data + i*(src0->nb[1])));
  7545. }
  7546. }
  7547. static void ggml_compute_forward_step(
  7548. const struct ggml_compute_params * params,
  7549. const struct ggml_tensor * src0,
  7550. struct ggml_tensor * dst) {
  7551. switch (src0->type) {
  7552. case GGML_TYPE_F32:
  7553. {
  7554. ggml_compute_forward_step_f32(params, src0, dst);
  7555. } break;
  7556. default:
  7557. {
  7558. GGML_ASSERT(false);
  7559. } break;
  7560. }
  7561. }
  7562. // ggml_compute_forward_tanh
  7563. static void ggml_compute_forward_tanh_f32(
  7564. const struct ggml_compute_params * params,
  7565. const struct ggml_tensor * src0,
  7566. struct ggml_tensor * dst) {
  7567. assert(params->ith == 0);
  7568. assert(ggml_are_same_shape(src0, dst));
  7569. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  7570. return;
  7571. }
  7572. const int n = ggml_nrows(src0);
  7573. const int nc = src0->ne[0];
  7574. assert(dst->nb[0] == sizeof(float));
  7575. assert(src0->nb[0] == sizeof(float));
  7576. for (int i = 0; i < n; i++) {
  7577. ggml_vec_tanh_f32(nc,
  7578. (float *) ((char *) dst->data + i*( dst->nb[1])),
  7579. (float *) ((char *) src0->data + i*(src0->nb[1])));
  7580. }
  7581. }
  7582. static void ggml_compute_forward_tanh(
  7583. const struct ggml_compute_params * params,
  7584. const struct ggml_tensor * src0,
  7585. struct ggml_tensor * dst) {
  7586. switch (src0->type) {
  7587. case GGML_TYPE_F32:
  7588. {
  7589. ggml_compute_forward_tanh_f32(params, src0, dst);
  7590. } break;
  7591. default:
  7592. {
  7593. GGML_ASSERT(false);
  7594. } break;
  7595. }
  7596. }
  7597. // ggml_compute_forward_elu
  7598. static void ggml_compute_forward_elu_f32(
  7599. const struct ggml_compute_params * params,
  7600. const struct ggml_tensor * src0,
  7601. struct ggml_tensor * dst) {
  7602. assert(params->ith == 0);
  7603. assert(ggml_are_same_shape(src0, dst));
  7604. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  7605. return;
  7606. }
  7607. const int n = ggml_nrows(src0);
  7608. const int nc = src0->ne[0];
  7609. assert(dst->nb[0] == sizeof(float));
  7610. assert(src0->nb[0] == sizeof(float));
  7611. for (int i = 0; i < n; i++) {
  7612. ggml_vec_elu_f32(nc,
  7613. (float *) ((char *) dst->data + i*( dst->nb[1])),
  7614. (float *) ((char *) src0->data + i*(src0->nb[1])));
  7615. }
  7616. }
  7617. static void ggml_compute_forward_elu(
  7618. const struct ggml_compute_params * params,
  7619. const struct ggml_tensor * src0,
  7620. struct ggml_tensor * dst) {
  7621. switch (src0->type) {
  7622. case GGML_TYPE_F32:
  7623. {
  7624. ggml_compute_forward_elu_f32(params, src0, dst);
  7625. } break;
  7626. default:
  7627. {
  7628. GGML_ASSERT(false);
  7629. } break;
  7630. }
  7631. }
  7632. // ggml_compute_forward_relu
  7633. static void ggml_compute_forward_relu_f32(
  7634. const struct ggml_compute_params * params,
  7635. const struct ggml_tensor * src0,
  7636. struct ggml_tensor * dst) {
  7637. assert(params->ith == 0);
  7638. assert(ggml_are_same_shape(src0, dst));
  7639. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  7640. return;
  7641. }
  7642. const int n = ggml_nrows(src0);
  7643. const int nc = src0->ne[0];
  7644. assert(dst->nb[0] == sizeof(float));
  7645. assert(src0->nb[0] == sizeof(float));
  7646. for (int i = 0; i < n; i++) {
  7647. ggml_vec_relu_f32(nc,
  7648. (float *) ((char *) dst->data + i*( dst->nb[1])),
  7649. (float *) ((char *) src0->data + i*(src0->nb[1])));
  7650. }
  7651. }
  7652. static void ggml_compute_forward_relu(
  7653. const struct ggml_compute_params * params,
  7654. const struct ggml_tensor * src0,
  7655. struct ggml_tensor * dst) {
  7656. switch (src0->type) {
  7657. case GGML_TYPE_F32:
  7658. {
  7659. ggml_compute_forward_relu_f32(params, src0, dst);
  7660. } break;
  7661. default:
  7662. {
  7663. GGML_ASSERT(false);
  7664. } break;
  7665. }
  7666. }
  7667. // ggml_compute_forward_gelu
  7668. static void ggml_compute_forward_gelu_f32(
  7669. const struct ggml_compute_params * params,
  7670. const struct ggml_tensor * src0,
  7671. struct ggml_tensor * dst) {
  7672. GGML_ASSERT(ggml_is_contiguous_except_dim_1(src0));
  7673. GGML_ASSERT(ggml_is_contiguous_except_dim_1(dst));
  7674. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  7675. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  7676. return;
  7677. }
  7678. const int ith = params->ith;
  7679. const int nth = params->nth;
  7680. const int nc = src0->ne[0];
  7681. const int nr = ggml_nrows(src0);
  7682. // rows per thread
  7683. const int dr = (nr + nth - 1)/nth;
  7684. // row range for this thread
  7685. const int ir0 = dr*ith;
  7686. const int ir1 = MIN(ir0 + dr, nr);
  7687. for (int i1 = ir0; i1 < ir1; i1++) {
  7688. ggml_vec_gelu_f32(nc,
  7689. (float *) ((char *) dst->data + i1*( dst->nb[1])),
  7690. (float *) ((char *) src0->data + i1*(src0->nb[1])));
  7691. #ifndef NDEBUG
  7692. for (int k = 0; k < nc; k++) {
  7693. const float x = ((float *) ((char *) dst->data + i1*( dst->nb[1])))[k];
  7694. UNUSED(x);
  7695. assert(!isnan(x));
  7696. assert(!isinf(x));
  7697. }
  7698. #endif
  7699. }
  7700. }
  7701. static void ggml_compute_forward_gelu(
  7702. const struct ggml_compute_params * params,
  7703. const struct ggml_tensor * src0,
  7704. struct ggml_tensor * dst) {
  7705. switch (src0->type) {
  7706. case GGML_TYPE_F32:
  7707. {
  7708. ggml_compute_forward_gelu_f32(params, src0, dst);
  7709. } break;
  7710. default:
  7711. {
  7712. GGML_ASSERT(false);
  7713. } break;
  7714. }
  7715. }
  7716. // ggml_compute_forward_gelu_quick
  7717. static void ggml_compute_forward_gelu_quick_f32(
  7718. const struct ggml_compute_params * params,
  7719. const struct ggml_tensor * src0,
  7720. struct ggml_tensor * dst) {
  7721. GGML_ASSERT(ggml_is_contiguous_except_dim_1(src0));
  7722. GGML_ASSERT(ggml_is_contiguous_except_dim_1(dst));
  7723. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  7724. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  7725. return;
  7726. }
  7727. const int ith = params->ith;
  7728. const int nth = params->nth;
  7729. const int nc = src0->ne[0];
  7730. const int nr = ggml_nrows(src0);
  7731. // rows per thread
  7732. const int dr = (nr + nth - 1)/nth;
  7733. // row range for this thread
  7734. const int ir0 = dr*ith;
  7735. const int ir1 = MIN(ir0 + dr, nr);
  7736. for (int i1 = ir0; i1 < ir1; i1++) {
  7737. ggml_vec_gelu_quick_f32(nc,
  7738. (float *) ((char *) dst->data + i1*( dst->nb[1])),
  7739. (float *) ((char *) src0->data + i1*(src0->nb[1])));
  7740. #ifndef NDEBUG
  7741. for (int k = 0; k < nc; k++) {
  7742. const float x = ((float *) ((char *) dst->data + i1*( dst->nb[1])))[k];
  7743. UNUSED(x);
  7744. assert(!isnan(x));
  7745. assert(!isinf(x));
  7746. }
  7747. #endif
  7748. }
  7749. }
  7750. static void ggml_compute_forward_gelu_quick(
  7751. const struct ggml_compute_params * params,
  7752. const struct ggml_tensor * src0,
  7753. struct ggml_tensor * dst) {
  7754. switch (src0->type) {
  7755. case GGML_TYPE_F32:
  7756. {
  7757. ggml_compute_forward_gelu_quick_f32(params, src0, dst);
  7758. } break;
  7759. default:
  7760. {
  7761. GGML_ASSERT(false);
  7762. } break;
  7763. }
  7764. }
  7765. // ggml_compute_forward_silu
  7766. static void ggml_compute_forward_silu_f32(
  7767. const struct ggml_compute_params * params,
  7768. const struct ggml_tensor * src0,
  7769. struct ggml_tensor * dst) {
  7770. GGML_ASSERT(ggml_is_contiguous_except_dim_1(src0));
  7771. GGML_ASSERT(ggml_is_contiguous_except_dim_1(dst));
  7772. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  7773. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  7774. return;
  7775. }
  7776. const int ith = params->ith;
  7777. const int nth = params->nth;
  7778. const int nc = src0->ne[0];
  7779. const int nr = ggml_nrows(src0);
  7780. // rows per thread
  7781. const int dr = (nr + nth - 1)/nth;
  7782. // row range for this thread
  7783. const int ir0 = dr*ith;
  7784. const int ir1 = MIN(ir0 + dr, nr);
  7785. for (int i1 = ir0; i1 < ir1; i1++) {
  7786. ggml_vec_silu_f32(nc,
  7787. (float *) ((char *) dst->data + i1*( dst->nb[1])),
  7788. (float *) ((char *) src0->data + i1*(src0->nb[1])));
  7789. #ifndef NDEBUG
  7790. for (int k = 0; k < nc; k++) {
  7791. const float x = ((float *) ((char *) dst->data + i1*(dst->nb[1])))[k];
  7792. UNUSED(x);
  7793. assert(!isnan(x));
  7794. assert(!isinf(x));
  7795. }
  7796. #endif
  7797. }
  7798. }
  7799. static void ggml_compute_forward_silu(
  7800. const struct ggml_compute_params * params,
  7801. const struct ggml_tensor * src0,
  7802. struct ggml_tensor * dst) {
  7803. switch (src0->type) {
  7804. case GGML_TYPE_F32:
  7805. {
  7806. ggml_compute_forward_silu_f32(params, src0, dst);
  7807. } break;
  7808. default:
  7809. {
  7810. GGML_ASSERT(false);
  7811. } break;
  7812. }
  7813. }
  7814. // ggml_compute_forward_leaky_relu
  7815. static void ggml_compute_forward_leaky_relu_f32(
  7816. const struct ggml_compute_params * params,
  7817. const struct ggml_tensor * src0,
  7818. struct ggml_tensor * dst) {
  7819. assert(params->ith == 0);
  7820. assert(ggml_are_same_shape(src0, dst));
  7821. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  7822. return;
  7823. }
  7824. const int n = ggml_nrows(src0);
  7825. const int nc = src0->ne[0];
  7826. float negative_slope;
  7827. memcpy(&negative_slope, dst->op_params, sizeof(float));
  7828. assert(dst->nb[0] == sizeof(float));
  7829. assert(src0->nb[0] == sizeof(float));
  7830. for (int i = 0; i < n; i++) {
  7831. ggml_vec_leaky_relu_f32(nc,
  7832. (float *) ((char *) dst->data + i*( dst->nb[1])),
  7833. (float *) ((char *) src0->data + i*(src0->nb[1])), negative_slope);
  7834. }
  7835. }
  7836. static void ggml_compute_forward_leaky_relu(
  7837. const struct ggml_compute_params * params,
  7838. const struct ggml_tensor * src0,
  7839. struct ggml_tensor * dst) {
  7840. switch (src0->type) {
  7841. case GGML_TYPE_F32:
  7842. {
  7843. ggml_compute_forward_leaky_relu_f32(params, src0, dst);
  7844. } break;
  7845. default:
  7846. {
  7847. GGML_ASSERT(false);
  7848. } break;
  7849. }
  7850. }
  7851. // ggml_compute_forward_silu_back
  7852. static void ggml_compute_forward_silu_back_f32(
  7853. const struct ggml_compute_params * params,
  7854. const struct ggml_tensor * src0,
  7855. const struct ggml_tensor * grad,
  7856. struct ggml_tensor * dst) {
  7857. GGML_ASSERT(ggml_is_contiguous_except_dim_1(grad));
  7858. GGML_ASSERT(ggml_is_contiguous_except_dim_1(src0));
  7859. GGML_ASSERT(ggml_is_contiguous_except_dim_1(dst));
  7860. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  7861. GGML_ASSERT(ggml_are_same_shape(src0, grad));
  7862. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  7863. return;
  7864. }
  7865. const int ith = params->ith;
  7866. const int nth = params->nth;
  7867. const int nc = src0->ne[0];
  7868. const int nr = ggml_nrows(src0);
  7869. // rows per thread
  7870. const int dr = (nr + nth - 1)/nth;
  7871. // row range for this thread
  7872. const int ir0 = dr*ith;
  7873. const int ir1 = MIN(ir0 + dr, nr);
  7874. for (int i1 = ir0; i1 < ir1; i1++) {
  7875. ggml_vec_silu_backward_f32(nc,
  7876. (float *) ((char *) dst->data + i1*( dst->nb[1])),
  7877. (float *) ((char *) src0->data + i1*(src0->nb[1])),
  7878. (float *) ((char *) grad->data + i1*(grad->nb[1])));
  7879. #ifndef NDEBUG
  7880. for (int k = 0; k < nc; k++) {
  7881. const float x = ((float *) ((char *) dst->data + i1*( dst->nb[1])))[k];
  7882. UNUSED(x);
  7883. assert(!isnan(x));
  7884. assert(!isinf(x));
  7885. }
  7886. #endif
  7887. }
  7888. }
  7889. static void ggml_compute_forward_silu_back(
  7890. const struct ggml_compute_params * params,
  7891. const struct ggml_tensor * src0,
  7892. const struct ggml_tensor * grad,
  7893. struct ggml_tensor * dst) {
  7894. switch (src0->type) {
  7895. case GGML_TYPE_F32:
  7896. {
  7897. ggml_compute_forward_silu_back_f32(params, src0, grad, dst);
  7898. } break;
  7899. default:
  7900. {
  7901. GGML_ASSERT(false);
  7902. } break;
  7903. }
  7904. }
  7905. static void ggml_compute_forward_hardswish_f32(
  7906. const struct ggml_compute_params * params,
  7907. const struct ggml_tensor * src0,
  7908. struct ggml_tensor * dst) {
  7909. assert(params->ith == 0);
  7910. assert(ggml_are_same_shape(src0, dst));
  7911. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  7912. return;
  7913. }
  7914. const int n = ggml_nrows(src0);
  7915. const int nc = src0->ne[0];
  7916. assert(dst->nb[0] == sizeof(float));
  7917. assert(src0->nb[0] == sizeof(float));
  7918. for (int i = 0; i < n; i++) {
  7919. ggml_vec_hardswish_f32(nc,
  7920. (float *) ((char *) dst->data + i*( dst->nb[1])),
  7921. (float *) ((char *) src0->data + i*(src0->nb[1])));
  7922. }
  7923. }
  7924. static void ggml_compute_forward_hardswish(
  7925. const struct ggml_compute_params * params,
  7926. const struct ggml_tensor * src0,
  7927. struct ggml_tensor * dst) {
  7928. switch (src0->type) {
  7929. case GGML_TYPE_F32:
  7930. {
  7931. ggml_compute_forward_hardswish_f32(params, src0, dst);
  7932. } break;
  7933. default:
  7934. {
  7935. GGML_ASSERT(false);
  7936. } break;
  7937. }
  7938. }
  7939. static void ggml_compute_forward_hardsigmoid_f32(
  7940. const struct ggml_compute_params * params,
  7941. const struct ggml_tensor * src0,
  7942. struct ggml_tensor * dst) {
  7943. assert(params->ith == 0);
  7944. assert(ggml_are_same_shape(src0, dst));
  7945. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  7946. return;
  7947. }
  7948. const int n = ggml_nrows(src0);
  7949. const int nc = src0->ne[0];
  7950. assert(dst->nb[0] == sizeof(float));
  7951. assert(src0->nb[0] == sizeof(float));
  7952. for (int i = 0; i < n; i++) {
  7953. ggml_vec_hardsigmoid_f32(nc,
  7954. (float *) ((char *) dst->data + i*( dst->nb[1])),
  7955. (float *) ((char *) src0->data + i*(src0->nb[1])));
  7956. }
  7957. }
  7958. static void ggml_compute_forward_hardsigmoid(
  7959. const struct ggml_compute_params * params,
  7960. const struct ggml_tensor * src0,
  7961. struct ggml_tensor * dst) {
  7962. switch (src0->type) {
  7963. case GGML_TYPE_F32:
  7964. {
  7965. ggml_compute_forward_hardsigmoid_f32(params, src0, dst);
  7966. } break;
  7967. default:
  7968. {
  7969. GGML_ASSERT(false);
  7970. } break;
  7971. }
  7972. }
  7973. // ggml_compute_forward_norm
  7974. static void ggml_compute_forward_norm_f32(
  7975. const struct ggml_compute_params * params,
  7976. const struct ggml_tensor * src0,
  7977. struct ggml_tensor * dst) {
  7978. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  7979. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  7980. return;
  7981. }
  7982. GGML_ASSERT(src0->nb[0] == sizeof(float));
  7983. const int ith = params->ith;
  7984. const int nth = params->nth;
  7985. GGML_TENSOR_UNARY_OP_LOCALS
  7986. float eps;
  7987. memcpy(&eps, dst->op_params, sizeof(float));
  7988. GGML_ASSERT(eps > 0.0f);
  7989. // TODO: optimize
  7990. for (int64_t i03 = 0; i03 < ne03; i03++) {
  7991. for (int64_t i02 = 0; i02 < ne02; i02++) {
  7992. for (int64_t i01 = ith; i01 < ne01; i01 += nth) {
  7993. const float * x = (float *) ((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03);
  7994. ggml_float sum = 0.0;
  7995. for (int64_t i00 = 0; i00 < ne00; i00++) {
  7996. sum += (ggml_float)x[i00];
  7997. }
  7998. float mean = sum/ne00;
  7999. float * y = (float *) ((char *) dst->data + i01*nb1 + i02*nb2 + i03*nb3);
  8000. ggml_float sum2 = 0.0;
  8001. for (int64_t i00 = 0; i00 < ne00; i00++) {
  8002. float v = x[i00] - mean;
  8003. y[i00] = v;
  8004. sum2 += (ggml_float)(v*v);
  8005. }
  8006. float variance = sum2/ne00;
  8007. const float scale = 1.0f/sqrtf(variance + eps);
  8008. ggml_vec_scale_f32(ne00, y, scale);
  8009. }
  8010. }
  8011. }
  8012. }
  8013. static void ggml_compute_forward_norm(
  8014. const struct ggml_compute_params * params,
  8015. const struct ggml_tensor * src0,
  8016. struct ggml_tensor * dst) {
  8017. switch (src0->type) {
  8018. case GGML_TYPE_F32:
  8019. {
  8020. ggml_compute_forward_norm_f32(params, src0, dst);
  8021. } break;
  8022. default:
  8023. {
  8024. GGML_ASSERT(false);
  8025. } break;
  8026. }
  8027. }
  8028. // ggml_compute_forward_group_rms_norm
  8029. static void ggml_compute_forward_rms_norm_f32(
  8030. const struct ggml_compute_params * params,
  8031. const struct ggml_tensor * src0,
  8032. struct ggml_tensor * dst) {
  8033. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  8034. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  8035. return;
  8036. }
  8037. GGML_ASSERT(src0->nb[0] == sizeof(float));
  8038. const int ith = params->ith;
  8039. const int nth = params->nth;
  8040. GGML_TENSOR_UNARY_OP_LOCALS
  8041. float eps;
  8042. memcpy(&eps, dst->op_params, sizeof(float));
  8043. GGML_ASSERT(eps > 0.0f);
  8044. // TODO: optimize
  8045. for (int64_t i03 = 0; i03 < ne03; i03++) {
  8046. for (int64_t i02 = 0; i02 < ne02; i02++) {
  8047. for (int64_t i01 = ith; i01 < ne01; i01 += nth) {
  8048. const float * x = (float *) ((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03);
  8049. ggml_float sum = 0.0;
  8050. for (int64_t i00 = 0; i00 < ne00; i00++) {
  8051. sum += (ggml_float)(x[i00] * x[i00]);
  8052. }
  8053. const float mean = sum/ne00;
  8054. float * y = (float *) ((char *) dst->data + i01*nb1 + i02*nb2 + i03*nb3);
  8055. memcpy(y, x, ne00 * sizeof(float));
  8056. // for (int i00 = 0; i00 < ne00; i00++) {
  8057. // y[i00] = x[i00];
  8058. // }
  8059. const float scale = 1.0f/sqrtf(mean + eps);
  8060. ggml_vec_scale_f32(ne00, y, scale);
  8061. }
  8062. }
  8063. }
  8064. }
  8065. static void ggml_compute_forward_rms_norm(
  8066. const struct ggml_compute_params * params,
  8067. const struct ggml_tensor * src0,
  8068. struct ggml_tensor * dst) {
  8069. switch (src0->type) {
  8070. case GGML_TYPE_F32:
  8071. {
  8072. ggml_compute_forward_rms_norm_f32(params, src0, dst);
  8073. } break;
  8074. default:
  8075. {
  8076. GGML_ASSERT(false);
  8077. } break;
  8078. }
  8079. }
  8080. static void ggml_compute_forward_rms_norm_back_f32(
  8081. const struct ggml_compute_params * params,
  8082. const struct ggml_tensor * src0,
  8083. const struct ggml_tensor * src1,
  8084. struct ggml_tensor * dst) {
  8085. GGML_ASSERT(ggml_are_same_shape(src0, dst) && ggml_are_same_shape(src0, src1));
  8086. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  8087. return;
  8088. }
  8089. GGML_ASSERT(src0->nb[0] == sizeof(float));
  8090. const int ith = params->ith;
  8091. const int nth = params->nth;
  8092. GGML_TENSOR_BINARY_OP_LOCALS
  8093. float eps;
  8094. memcpy(&eps, dst->op_params, sizeof(float));
  8095. // TODO: optimize
  8096. for (int64_t i03 = 0; i03 < ne03; i03++) {
  8097. for (int64_t i02 = 0; i02 < ne02; i02++) {
  8098. for (int64_t i01 = ith; i01 < ne01; i01 += nth) {
  8099. // src1 is same shape as src0 => same indices
  8100. const int64_t i11 = i01;
  8101. const int64_t i12 = i02;
  8102. const int64_t i13 = i03;
  8103. const float * x = (float *) ((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03);
  8104. const float * dz = (float *) ((char *) src1->data + i11*nb11 + i12*nb12 + i13*nb13);
  8105. ggml_float sum_xx = 0.0;
  8106. ggml_float sum_xdz = 0.0;
  8107. for (int64_t i00 = 0; i00 < ne00; i00++) {
  8108. sum_xx += (ggml_float)(x[i00] * x[i00]);
  8109. sum_xdz += (ggml_float)(x[i00] * dz[i00]);
  8110. }
  8111. //const float mean = (float)(sum_xx)/ne00;
  8112. const float mean_eps = (float)(sum_xx)/ne00 + eps;
  8113. const float sum_eps = (float)(sum_xx) + eps*ne00;
  8114. //const float mean_xdz = (float)(sum_xdz)/ne00;
  8115. // we could cache rms from forward pass to improve performance.
  8116. // to do this implement ggml_rms and compose ggml_rms_norm using ggml_rms.
  8117. //const float rms = sqrtf(mean_eps);
  8118. const float rrms = 1.0f / sqrtf(mean_eps);
  8119. //const float scale = -rrms/(ne00 * mean_eps); // -1/(n*rms**3)
  8120. {
  8121. // z = rms_norm(x)
  8122. //
  8123. // rms_norm(src0) =
  8124. // scale(
  8125. // src0,
  8126. // div(
  8127. // 1,
  8128. // sqrt(
  8129. // add(
  8130. // scale(
  8131. // sum(
  8132. // sqr(
  8133. // src0)),
  8134. // (1.0/N)),
  8135. // eps))));
  8136. // postorder:
  8137. // ## op args grad
  8138. // 00 param src0 grad[#00]
  8139. // 01 const 1
  8140. // 02 sqr (#00) grad[#02]
  8141. // 03 sum (#02) grad[#03]
  8142. // 04 const 1/N
  8143. // 05 scale (#03, #04) grad[#05]
  8144. // 06 const eps
  8145. // 07 add (#05, #06) grad[#07]
  8146. // 08 sqrt (#07) grad[#08]
  8147. // 09 div (#01,#08) grad[#09]
  8148. // 10 scale (#00,#09) grad[#10]
  8149. //
  8150. // backward pass, given grad[#10]
  8151. // #10: scale
  8152. // grad[#00] += scale(grad[#10],#09)
  8153. // grad[#09] += sum(mul(grad[#10],#00))
  8154. // #09: div
  8155. // grad[#08] += neg(mul(grad[#09], div(#09,#08)))
  8156. // #08: sqrt
  8157. // grad[#07] += mul(grad[#08], div(0.5, #08))
  8158. // #07: add
  8159. // grad[#05] += grad[#07]
  8160. // #05: scale
  8161. // grad[#03] += scale(grad[#05],#04)
  8162. // #03: sum
  8163. // grad[#02] += repeat(grad[#03], #02)
  8164. // #02:
  8165. // grad[#00] += scale(mul(#00, grad[#02]), 2.0)
  8166. //
  8167. // substitute and simplify:
  8168. // grad[#00] = scale(grad(#10), #09) + scale(mul(#00, grad[#02]), 2.0)
  8169. // grad[#02] = repeat(grad[#03], #02)
  8170. // grad[#02] = repeat(scale(grad[#05],#04), #02)
  8171. // grad[#02] = repeat(scale(grad[#07],#04), #02)
  8172. // grad[#02] = repeat(scale(mul(grad[#08], div(0.5, #08)),#04), #02)
  8173. // grad[#02] = repeat(scale(mul(neg(mul(grad[#09], div(#09,#08))), div(0.5, #08)),#04), #02)
  8174. // grad[#02] = repeat(scale(mul(neg(mul(sum(mul(grad[#10],#00)), div(#09,#08))), div(0.5, #08)),#04), #02)
  8175. // grad[#02] = repeat(-(sum(mul(grad[#10],#00)) * div(#09,#08) * div(0.5, #08) * (1/N)), #02)
  8176. // grad[#02] = repeat(-(sum(mul(grad[#10],#00)) * div(div(#01,#08),#08) * div(0.5, #08) * (1/N)), #02)
  8177. // grad[#02] = repeat(-(sum(mul(grad[#10],#00)) * div(1,#08*#08) * div(0.5, #08) * (1/N)), #02)
  8178. // grad[#02] = repeat(-(sum(mul(grad[#10],#00)) * div(1,#07) * div(0.5, #08) * (1/N)), #02)
  8179. // grad[#00] = scale(grad(#10), #09) + scale(mul(#00, grad[#02]), 2.0)
  8180. // grad[#00] = scale(grad(#10), #09) + scale(mul(#00, repeat(-(sum(mul(grad[#10],#00)) * div(1,#07) * div(0.5, #08) * (1/N)), #02)), 2.0)
  8181. // grad[#00] = scale(grad(#10), #09) + scale(scale(#00, -(sum(mul(grad[#10],#00)) * div(1,#07) * div(0.5, #08) * (1/N))), 2.0)
  8182. // grad[#00] = scale(grad(#10), #09) + scale(#00, -(sum(mul(grad[#10],#00)) * div(1,#07) * div(1,#08) * (1/N)))
  8183. // grad[#00] = scale(grad(#10), #09) + scale(#00, sum(mul(grad[#10],#00)) * div(1,#07*#08) * (-1/N))
  8184. // grad[#00] = scale(grad(#10), #09) + scale(#00, sum(mul(grad[#10],#00)) * div(1,#07*#08) * (-1/N))
  8185. // grad[#00] = scale(grad(#10), #09) + scale(#00, sum(mul(grad[#10],#00)) * div(1,mean_eps*rms) * (-1/N))
  8186. // grad[#00] = scale(grad(#10), #09) + scale(#00, sum(mul(grad[#10],#00)) * div(-1,rms*N*mean_eps))
  8187. // grad[#00] = scale(grad(#10), #09) + scale(#00, sum(mul(grad[#10],#00)) * div(-1,rms*N*(sum_xx/N+eps)))
  8188. // grad[#00] = scale(grad(#10), #09) + scale(#00, sum(mul(grad[#10],#00)) * div(-1,rms*N*sum_xx+rms*N*eps))
  8189. // grad[#00] = scale(dz, rrms) + scale(x, sum(mul(dz,x)) * div(-1,rms*N*mean_eps))
  8190. // grad[#00] = scale(dz, rrms) + scale(x, sum_xdz * div(-1,rms*N*mean_eps))
  8191. // a = b*c + d*e
  8192. // a = b*c*f/f + d*e*f/f
  8193. // a = (b*c*f + d*e*f)*(1/f)
  8194. // a = (b*c*(1/c) + d*e*(1/c))*(1/(1/c))
  8195. // a = (b + d*e/c)*c
  8196. // b = dz, c = rrms, d = x, e = sum_xdz * div(-1,rms*N*mean_eps)
  8197. // a = (dz + x*sum_xdz * div(-1,rms*N*mean_eps)/rrms)*rrms
  8198. // a = (dz + x*sum_xdz * div(-1,rms*N*mean_eps)*rms)*rrms
  8199. // a = (dz + x*sum_xdz * div(-rms,rms*N*mean_eps))*rrms
  8200. // a = (dz + x*sum_xdz * div(-1,N*mean_eps))*rrms
  8201. // a = (dz + x*div(-sum_xdz,N*mean_eps))*rrms
  8202. // a = (dz + x*div(-mean_xdz,mean_eps))*rrms
  8203. // grad[#00] = scale(dz + scale(x, div(-mean_xdz,mean_eps)),rrms)
  8204. // grad[#00] = scale(dz + scale(x, -mean_xdz/mean_eps),rrms)
  8205. // dx = scale(dz + scale(x, -mean_xdz/mean_eps),rrms)
  8206. }
  8207. // dx = scale(dz + scale(x, -mean_xdz/mean_eps),rrms)
  8208. // post-order:
  8209. // dx := x
  8210. // dx := scale(dx,-mean_xdz/mean_eps)
  8211. // dx := add(dx, dz)
  8212. // dx := scale(dx, rrms)
  8213. float * dx = (float *) ((char *) dst->data + i01*nb1 + i02*nb2 + i03*nb3);
  8214. ggml_vec_cpy_f32 (ne00, dx, x);
  8215. // ggml_vec_scale_f32(ne00, dx, -mean_xdz/mean_eps);
  8216. ggml_vec_scale_f32(ne00, dx, (float)(-sum_xdz)/sum_eps);
  8217. ggml_vec_acc_f32 (ne00, dx, dz);
  8218. ggml_vec_scale_f32(ne00, dx, rrms);
  8219. }
  8220. }
  8221. }
  8222. }
  8223. static void ggml_compute_forward_rms_norm_back(
  8224. const struct ggml_compute_params * params,
  8225. const struct ggml_tensor * src0,
  8226. const struct ggml_tensor * src1,
  8227. struct ggml_tensor * dst) {
  8228. switch (src0->type) {
  8229. case GGML_TYPE_F32:
  8230. {
  8231. ggml_compute_forward_rms_norm_back_f32(params, src0, src1, dst);
  8232. } break;
  8233. default:
  8234. {
  8235. GGML_ASSERT(false);
  8236. } break;
  8237. }
  8238. }
  8239. // ggml_compute_forward_group_norm
  8240. static void ggml_compute_forward_group_norm_f32(
  8241. const struct ggml_compute_params * params,
  8242. const struct ggml_tensor * src0,
  8243. struct ggml_tensor * dst) {
  8244. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  8245. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  8246. return;
  8247. }
  8248. GGML_ASSERT(src0->nb[0] == sizeof(float));
  8249. const int ith = params->ith;
  8250. const int nth = params->nth;
  8251. GGML_TENSOR_UNARY_OP_LOCALS
  8252. const float eps = 1e-6f; // TODO: make this a parameter
  8253. // TODO: optimize
  8254. int n_channels = src0->ne[2];
  8255. int n_groups = dst->op_params[0];
  8256. int n_channels_per_group = (n_channels + n_groups - 1) / n_groups;
  8257. for (int i = ith; i < n_groups; i+=nth) {
  8258. int start = i * n_channels_per_group;
  8259. int end = start + n_channels_per_group;
  8260. if (end > n_channels) {
  8261. end = n_channels;
  8262. }
  8263. int step = end - start;
  8264. for (int64_t i03 = 0; i03 < ne03; i03++) {
  8265. ggml_float sum = 0.0;
  8266. for (int64_t i02 = start; i02 < end; i02++) {
  8267. for (int64_t i01 = 0; i01 < ne01; i01++) {
  8268. const float * x = (float *)((char *) src0->data + i01 * nb01 + i02 * nb02 + i03 * nb03);
  8269. for (int64_t i00 = 0; i00 < ne00; i00++) {
  8270. sum += (ggml_float)x[i00];
  8271. }
  8272. }
  8273. }
  8274. float mean = sum / (ne00 * ne01 * step);
  8275. ggml_float sum2 = 0.0;
  8276. for (int64_t i02 = start; i02 < end; i02++) {
  8277. for (int64_t i01 = 0; i01 < ne01; i01++) {
  8278. const float * x = (float *)((char *) src0->data + i01 * nb01 + i02 * nb02 + i03 * nb03);
  8279. float * y = (float *)((char *) dst->data + i01 * nb1 + i02 * nb2 + i03 * nb3);
  8280. for (int64_t i00 = 0; i00 < ne00; i00++) {
  8281. float v = x[i00] - mean;
  8282. y[i00] = v;
  8283. sum2 += (ggml_float)(v * v);
  8284. }
  8285. }
  8286. }
  8287. float variance = sum2 / (ne00 * ne01 * step);
  8288. const float scale = 1.0f / sqrtf(variance + eps);
  8289. for (int64_t i02 = start; i02 < end; i02++) {
  8290. for (int64_t i01 = 0; i01 < ne01; i01++) {
  8291. float * y = (float *)((char *) dst->data + i01 * nb1 + i02 * nb2 + i03 * nb3);
  8292. ggml_vec_scale_f32(ne00, y, scale);
  8293. }
  8294. }
  8295. }
  8296. }
  8297. }
  8298. static void ggml_compute_forward_group_norm(
  8299. const struct ggml_compute_params * params,
  8300. const struct ggml_tensor * src0,
  8301. struct ggml_tensor * dst) {
  8302. switch (src0->type) {
  8303. case GGML_TYPE_F32:
  8304. {
  8305. ggml_compute_forward_group_norm_f32(params, src0, dst);
  8306. } break;
  8307. default:
  8308. {
  8309. GGML_ASSERT(false);
  8310. } break;
  8311. }
  8312. }
  8313. // ggml_compute_forward_mul_mat
  8314. #if defined(GGML_USE_ACCELERATE) || defined(GGML_USE_OPENBLAS)
  8315. // helper function to determine if it is better to use BLAS or not
  8316. // for large matrices, BLAS is faster
  8317. static bool ggml_compute_forward_mul_mat_use_blas(struct ggml_tensor * dst) {
  8318. const struct ggml_tensor * src0 = dst->src[0];
  8319. const struct ggml_tensor * src1 = dst->src[1];
  8320. //const int64_t ne00 = src0->ne[0];
  8321. //const int64_t ne01 = src0->ne[1];
  8322. const int64_t ne10 = src1->ne[0];
  8323. const int64_t ne0 = dst->ne[0];
  8324. const int64_t ne1 = dst->ne[1];
  8325. // NOTE: with GGML_OP_MUL_MAT_ID we don't want to go through the BLAS branch because it will dequantize (to_float)
  8326. // all the experts for each batch element and the processing would become incredibly slow
  8327. // TODO: find the optimal values for these
  8328. if (dst->op != GGML_OP_MUL_MAT_ID &&
  8329. ggml_is_contiguous(src0) &&
  8330. ggml_is_contiguous(src1) &&
  8331. //src0->type == GGML_TYPE_F32 &&
  8332. src1->type == GGML_TYPE_F32 &&
  8333. (ne0 >= 32 && ne1 >= 32 && ne10 >= 32)) {
  8334. /*printf("BLAS: %d %d %d %d %d\n", ne0, ne1, ne10, ne00, ne01);*/
  8335. return true;
  8336. }
  8337. return false;
  8338. }
  8339. #endif
  8340. static void ggml_compute_forward_mul_mat(
  8341. const struct ggml_compute_params * params,
  8342. const struct ggml_tensor * src0,
  8343. const struct ggml_tensor * src1,
  8344. struct ggml_tensor * dst) {
  8345. int64_t t0 = ggml_perf_time_us();
  8346. UNUSED(t0);
  8347. GGML_TENSOR_BINARY_OP_LOCALS
  8348. const int ith = params->ith;
  8349. const int nth = params->nth;
  8350. const enum ggml_type type = src0->type;
  8351. const bool src1_cont = ggml_is_contiguous(src1);
  8352. ggml_vec_dot_t const vec_dot = type_traits[type].vec_dot;
  8353. enum ggml_type const vec_dot_type = type_traits[type].vec_dot_type;
  8354. ggml_from_float_t const from_float_to_vec_dot = type_traits[vec_dot_type].from_float;
  8355. int64_t const vec_dot_num_rows = type_traits[type].nrows;
  8356. GGML_ASSERT(ne0 == ne01);
  8357. GGML_ASSERT(ne1 == ne11);
  8358. GGML_ASSERT(ne2 == ne12);
  8359. GGML_ASSERT(ne3 == ne13);
  8360. // we don't support permuted src0 or src1
  8361. GGML_ASSERT(nb00 == ggml_type_size(type));
  8362. GGML_ASSERT(nb10 == ggml_type_size(src1->type));
  8363. // dst cannot be transposed or permuted
  8364. GGML_ASSERT(nb0 == sizeof(float));
  8365. GGML_ASSERT(nb0 <= nb1);
  8366. GGML_ASSERT(nb1 <= nb2);
  8367. GGML_ASSERT(nb2 <= nb3);
  8368. // broadcast factors
  8369. const int64_t r2 = ne12/ne02;
  8370. const int64_t r3 = ne13/ne03;
  8371. // nb01 >= nb00 - src0 is not transposed
  8372. // compute by src0 rows
  8373. #if defined(GGML_USE_CLBLAST)
  8374. if (ggml_cl_can_mul_mat(src0, src1, dst)) {
  8375. if (params->ith == 0 && params->type == GGML_TASK_COMPUTE) {
  8376. ggml_cl_mul_mat(src0, src1, dst, params->wdata, params->wsize);
  8377. }
  8378. return;
  8379. }
  8380. #endif
  8381. #if defined(GGML_USE_ACCELERATE) || defined(GGML_USE_OPENBLAS)
  8382. if (ggml_compute_forward_mul_mat_use_blas(dst)) {
  8383. const int64_t ne_plane = ne01*ne00;
  8384. const size_t desired_wsize = ne13*ne12*ne_plane*sizeof(float);
  8385. UNUSED(desired_wsize);
  8386. if (params->type == GGML_TASK_INIT) {
  8387. if (type != GGML_TYPE_F32) {
  8388. assert(params->wsize >= desired_wsize);
  8389. // parallelize by src0 rows
  8390. for (int64_t i13 = 0; i13 < ne13; i13++) {
  8391. for (int64_t i12 = 0; i12 < ne12; i12++) {
  8392. // broadcast src0 into src1 across 2nd,3rd dimension
  8393. const int64_t i03 = i13/r3;
  8394. const int64_t i02 = i12/r2;
  8395. const void * x = (char *) src0->data + i02*nb02 + i03*nb03;
  8396. float * const wdata = (float *) params->wdata + i13*ne12*ne_plane + i12*ne_plane;
  8397. ggml_to_float_t const to_float = type_traits[type].to_float;
  8398. for (int64_t i01 = ith; i01 < ne01; i01 += nth) {
  8399. to_float((const char *) x + i01*nb01, wdata + i01*ne00, ne00);
  8400. }
  8401. }
  8402. }
  8403. }
  8404. return;
  8405. }
  8406. if (params->type == GGML_TASK_FINALIZE) {
  8407. return;
  8408. }
  8409. // perform sgemm, parallelization controlled by blas lib
  8410. if (ith != 0) {
  8411. return;
  8412. }
  8413. //const int64_t tgemm0 = ggml_perf_time_us();
  8414. for (int64_t i13 = 0; i13 < ne13; i13++) {
  8415. for (int64_t i12 = 0; i12 < ne12; i12++) {
  8416. const int64_t i03 = i13/r3;
  8417. const int64_t i02 = i12/r2;
  8418. const void * x = (char *) src0->data + i02*nb02 + i03*nb03;
  8419. const float * y = (float *) ((char *) src1->data + i12*nb12 + i13*nb13);
  8420. float * d = (float *) ((char *) dst->data + i12*nb2 + i13*nb3);
  8421. if (type != GGML_TYPE_F32) {
  8422. x = (float *) params->wdata + i13*ne12*ne_plane + i12*ne_plane;
  8423. }
  8424. cblas_sgemm(CblasRowMajor, CblasNoTrans, CblasTrans,
  8425. ne1, ne01, ne10,
  8426. 1.0f, y, ne10,
  8427. x, ne00,
  8428. 0.0f, d, ne01);
  8429. }
  8430. }
  8431. //printf("cblas_sgemm = %.3f ms, %lld flops\n", (ggml_perf_time_us() - tgemm0)/1000.0, ne13*ne12*ne1*ne01*ne10*2);
  8432. //printf("CBLAS = %f ms, %d x %d x %d x %d\n", (ggml_perf_time_us() - t0)/1000.0, ne0, ne1, ne2, ne3);
  8433. return;
  8434. }
  8435. #endif
  8436. if (params->type == GGML_TASK_INIT) {
  8437. if (ith != 0) {
  8438. return;
  8439. }
  8440. if (src1->type != vec_dot_type) {
  8441. char * wdata = params->wdata;
  8442. const size_t row_size = ggml_row_size(vec_dot_type, ne10);
  8443. assert(params->wsize >= ne11*ne12*ne13*row_size);
  8444. GGML_ASSERT(src1->type == GGML_TYPE_F32);
  8445. for (int64_t i13 = 0; i13 < ne13; ++i13) {
  8446. for (int64_t i12 = 0; i12 < ne12; ++i12) {
  8447. for (int64_t i11 = 0; i11 < ne11; ++i11) {
  8448. from_float_to_vec_dot((float *)((char *) src1->data + i13*nb13 + i12*nb12 + i11*nb11), (void *) wdata, ne10);
  8449. wdata += row_size;
  8450. }
  8451. }
  8452. }
  8453. }
  8454. return;
  8455. }
  8456. if (params->type == GGML_TASK_FINALIZE) {
  8457. return;
  8458. }
  8459. const void * wdata = (src1->type == vec_dot_type) ? src1->data : params->wdata;
  8460. const size_t row_size = ggml_row_size(vec_dot_type, ne10);
  8461. const int64_t nr0 = ne01; // src0 rows
  8462. const int64_t nr1 = ne1*ne12*ne13; // src1 rows
  8463. //printf("nr0 = %lld, nr1 = %lld\n", nr0, nr1);
  8464. // distribute the thread work across the inner or outer loop based on which one is larger
  8465. const int64_t nth0 = nr0 > nr1 ? nth : 1; // parallelize by src0 rows
  8466. const int64_t nth1 = nr0 > nr1 ? 1 : nth; // parallelize by src1 rows
  8467. const int64_t ith0 = ith % nth0;
  8468. const int64_t ith1 = ith / nth0;
  8469. const int64_t dr0 = (nr0 + nth0 - 1)/nth0;
  8470. const int64_t dr1 = (nr1 + nth1 - 1)/nth1;
  8471. const int64_t ir010 = dr0*ith0;
  8472. const int64_t ir011 = MIN(ir010 + dr0, nr0);
  8473. const int64_t ir110 = dr1*ith1;
  8474. const int64_t ir111 = MIN(ir110 + dr1, nr1);
  8475. //printf("ir010 = %6lld, ir011 = %6lld, ir110 = %6lld, ir111 = %6lld\n", ir010, ir011, ir110, ir111);
  8476. // threads with no work simply yield (not sure if it helps)
  8477. if (ir010 >= ir011 || ir110 >= ir111) {
  8478. sched_yield();
  8479. return;
  8480. }
  8481. assert(ne12 % ne02 == 0);
  8482. assert(ne13 % ne03 == 0);
  8483. // block-tiling attempt
  8484. const int64_t blck_0 = 16;
  8485. const int64_t blck_1 = 16;
  8486. // dot kernels can handle 1 row and col at a time, but mmla kernels can process 2 rows and cols
  8487. int64_t nrc = vec_dot_num_rows;
  8488. // TODO: currently the mmla kernels support only even numbered rows/cols.
  8489. // this check can be removed once they are extended to support odd numbered rows/cols too
  8490. if ((nr0 % 2 != 0) || (ne11 % 2 != 0)) {
  8491. nrc = 1;
  8492. }
  8493. const size_t src1_col_stride = src1_cont || src1->type != vec_dot_type ? row_size : nb11;
  8494. // attempt to reduce false-sharing (does not seem to make a difference)
  8495. // 16 * 2, accounting for mmla kernels
  8496. float tmp[32];
  8497. for (int64_t iir1 = ir110; iir1 < ir111; iir1 += blck_1) {
  8498. for (int64_t iir0 = ir010; iir0 < ir011; iir0 += blck_0) {
  8499. for (int64_t ir1 = iir1; ir1 < iir1 + blck_1 && ir1 < ir111; ir1 += nrc) {
  8500. const int64_t i13 = (ir1/(ne12*ne1));
  8501. const int64_t i12 = (ir1 - i13*ne12*ne1)/ne1;
  8502. const int64_t i11 = (ir1 - i13*ne12*ne1 - i12*ne1);
  8503. // broadcast src0 into src1
  8504. const int64_t i03 = i13/r3;
  8505. const int64_t i02 = i12/r2;
  8506. const int64_t i1 = i11;
  8507. const int64_t i2 = i12;
  8508. const int64_t i3 = i13;
  8509. const char * src0_row = (const char *) src0->data + (0 + i02*nb02 + i03*nb03);
  8510. // desc: when src1 is not a contiguous memory block we have to calculate the offset using the strides
  8511. // if it is, then we have either copied the data to params->wdata and made it contiguous or we are using
  8512. // the original src1 data pointer, so we should index using the indices directly
  8513. // TODO: this is a bit of a hack, we should probably have a better way to handle this
  8514. const char * src1_col = (const char *) wdata +
  8515. (src1_cont || src1->type != vec_dot_type
  8516. ? (i11 + i12*ne11 + i13*ne12*ne11)*row_size
  8517. : (i11*nb11 + i12*nb12 + i13*nb13));
  8518. float * dst_col = (float *) ((char *) dst->data + (i1*nb1 + i2*nb2 + i3*nb3));
  8519. //for (int64_t ir0 = iir0; ir0 < iir0 + blck_0 && ir0 < ir011; ++ir0) {
  8520. // vec_dot(ne00, &dst_col[ir0], src0_row + ir0*nb01, src1_col);
  8521. //}
  8522. for (int64_t ir0 = iir0; ir0 < iir0 + blck_0 && ir0 < ir011; ir0 += nrc) {
  8523. vec_dot(ne00, &tmp[ir0 - iir0], (nrc>1 ? 16 : 0), src0_row + ir0*nb01, (nrc>1 ? nb01 : 0), src1_col, (nrc>1 ? src1_col_stride : 0), nrc);
  8524. }
  8525. for (int cn = 0; cn < nrc; ++cn) {
  8526. memcpy(&dst_col[iir0 + cn*nb1/nb0], tmp + (cn*16), (MIN(iir0 + blck_0, ir011) - iir0)*sizeof(float));
  8527. }
  8528. }
  8529. }
  8530. }
  8531. }
  8532. // ggml_compute_forward_mul_mat_id
  8533. static void ggml_compute_forward_mul_mat_id(
  8534. const struct ggml_compute_params * params,
  8535. const struct ggml_tensor * ids,
  8536. const struct ggml_tensor * src1,
  8537. struct ggml_tensor * dst) {
  8538. const struct ggml_tensor * src0 = dst->src[2]; // only for GGML_TENSOR_BINARY_OP_LOCALS
  8539. GGML_TENSOR_BINARY_OP_LOCALS
  8540. const int ith = params->ith;
  8541. const int nth = params->nth;
  8542. const enum ggml_type type = src0->type;
  8543. const bool src1_cont = ggml_is_contiguous(src1);
  8544. ggml_vec_dot_t const vec_dot = type_traits[type].vec_dot;
  8545. enum ggml_type const vec_dot_type = type_traits[type].vec_dot_type;
  8546. ggml_from_float_t const from_float_to_vec_dot = type_traits[vec_dot_type].from_float;
  8547. GGML_ASSERT(ne0 == ne01);
  8548. GGML_ASSERT(ne1 == ne11);
  8549. GGML_ASSERT(ne2 == ne12);
  8550. GGML_ASSERT(ne3 == ne13);
  8551. // we don't support permuted src0 or src1
  8552. GGML_ASSERT(nb00 == ggml_type_size(type));
  8553. GGML_ASSERT(nb10 == ggml_type_size(src1->type));
  8554. // dst cannot be transposed or permuted
  8555. GGML_ASSERT(nb0 == sizeof(float));
  8556. GGML_ASSERT(nb0 <= nb1);
  8557. GGML_ASSERT(nb1 <= nb2);
  8558. GGML_ASSERT(nb2 <= nb3);
  8559. // broadcast factors
  8560. const int64_t r2 = ne12/ne02;
  8561. const int64_t r3 = ne13/ne03;
  8562. // row groups
  8563. const int id = ggml_get_op_params_i32(dst, 0);
  8564. const int n_as = ggml_get_op_params_i32(dst, 1);
  8565. char * wdata_src1_end = (src1->type == vec_dot_type) ?
  8566. (char *) params->wdata :
  8567. (char *) params->wdata + GGML_PAD(ggml_row_size(vec_dot_type, ggml_nelements(src1)), sizeof(int64_t));
  8568. int64_t * matrix_row_counts = (int64_t *) (wdata_src1_end); // [n_as]
  8569. int64_t * matrix_rows = matrix_row_counts + n_as; // [n_as][ne11]
  8570. #define MMID_MATRIX_ROW(row_id, i1) matrix_rows[(row_id)*ne11 + (i1)]
  8571. if (params->type == GGML_TASK_INIT) {
  8572. if (ith != 0) {
  8573. return;
  8574. }
  8575. char * wdata = params->wdata;
  8576. if (src1->type != vec_dot_type) {
  8577. const size_t row_size = ggml_row_size(vec_dot_type, ne10);
  8578. assert(params->wsize >= ne11*ne12*ne13*row_size);
  8579. assert(src1->type == GGML_TYPE_F32);
  8580. for (int64_t i13 = 0; i13 < ne13; ++i13) {
  8581. for (int64_t i12 = 0; i12 < ne12; ++i12) {
  8582. for (int64_t i11 = 0; i11 < ne11; ++i11) {
  8583. from_float_to_vec_dot((float *)((char *) src1->data + i13*nb13 + i12*nb12 + i11*nb11), (void *) wdata, ne10);
  8584. wdata += row_size;
  8585. }
  8586. }
  8587. }
  8588. }
  8589. // initialize matrix_row_counts
  8590. GGML_ASSERT(wdata == wdata_src1_end);
  8591. memset(matrix_row_counts, 0, n_as*sizeof(int64_t));
  8592. // group rows by src0 matrix
  8593. for (int64_t i01 = 0; i01 < ids->ne[1]; i01++) {
  8594. const int32_t row_id = *(const int32_t *) ((const char *) ids->data + i01*ids->nb[1] + id*ids->nb[0]);
  8595. GGML_ASSERT(row_id >= 0 && row_id < n_as);
  8596. MMID_MATRIX_ROW(row_id, matrix_row_counts[row_id]) = i01;
  8597. matrix_row_counts[row_id] += 1;
  8598. }
  8599. return;
  8600. }
  8601. if (params->type == GGML_TASK_FINALIZE) {
  8602. return;
  8603. }
  8604. // compute each matrix multiplication in sequence
  8605. for (int cur_a = 0; cur_a < n_as; ++cur_a) {
  8606. const int64_t cne1 = matrix_row_counts[cur_a];
  8607. if (cne1 == 0) {
  8608. continue;
  8609. }
  8610. const struct ggml_tensor * src0_cur = dst->src[cur_a + 2];
  8611. const void * wdata = (src1->type == vec_dot_type) ? src1->data : params->wdata;
  8612. const size_t row_size = ggml_row_size(vec_dot_type, ne10);
  8613. const int64_t nr0 = ne01; // src0 rows
  8614. const int64_t nr1 = cne1*ne12*ne13; // src1 rows
  8615. //printf("nr0 = %lld, nr1 = %lld\n", nr0, nr1);
  8616. // distribute the thread work across the inner or outer loop based on which one is larger
  8617. const int64_t nth0 = nr0 > nr1 ? nth : 1; // parallelize by src0 rows
  8618. const int64_t nth1 = nr0 > nr1 ? 1 : nth; // parallelize by src1 rows
  8619. const int64_t ith0 = ith % nth0;
  8620. const int64_t ith1 = ith / nth0;
  8621. const int64_t dr0 = (nr0 + nth0 - 1)/nth0;
  8622. const int64_t dr1 = (nr1 + nth1 - 1)/nth1;
  8623. const int64_t ir010 = dr0*ith0;
  8624. const int64_t ir011 = MIN(ir010 + dr0, nr0);
  8625. const int64_t ir110 = dr1*ith1;
  8626. const int64_t ir111 = MIN(ir110 + dr1, nr1);
  8627. //printf("ir010 = %6lld, ir011 = %6lld, ir110 = %6lld, ir111 = %6lld\n", ir010, ir011, ir110, ir111);
  8628. // threads with no work simply yield (not sure if it helps)
  8629. if (ir010 >= ir011 || ir110 >= ir111) {
  8630. sched_yield();
  8631. continue;
  8632. }
  8633. assert(ne12 % ne02 == 0);
  8634. assert(ne13 % ne03 == 0);
  8635. // block-tiling attempt
  8636. const int64_t blck_0 = 16;
  8637. const int64_t blck_1 = 16;
  8638. // attempt to reduce false-sharing (does not seem to make a difference)
  8639. float tmp[16];
  8640. for (int64_t iir1 = ir110; iir1 < ir111; iir1 += blck_1) {
  8641. for (int64_t iir0 = ir010; iir0 < ir011; iir0 += blck_0) {
  8642. for (int64_t ir1 = iir1; ir1 < iir1 + blck_1 && ir1 < ir111; ++ir1) {
  8643. const int64_t i13 = (ir1/(ne12*cne1)); // Note: currently, src1 is always a matrix
  8644. const int64_t i12 = (ir1 - i13*ne12*cne1)/cne1;
  8645. const int64_t _i11 = (ir1 - i13*ne12*cne1 - i12*cne1);
  8646. const int64_t i11 = MMID_MATRIX_ROW(cur_a, _i11);
  8647. // broadcast src0 into src1
  8648. const int64_t i03 = i13/r3;
  8649. const int64_t i02 = i12/r2;
  8650. const int64_t i1 = i11;
  8651. const int64_t i2 = i12;
  8652. const int64_t i3 = i13;
  8653. const char * src0_row = (const char *) src0_cur->data + (0 + i02*nb02 + i03*nb03);
  8654. // desc: when src1 is not a contiguous memory block we have to calculate the offset using the strides
  8655. // if it is, then we have either copied the data to params->wdata and made it contiguous or we are using
  8656. // the original src1 data pointer, so we should index using the indices directly
  8657. // TODO: this is a bit of a hack, we should probably have a better way to handle this
  8658. const char * src1_col = (const char *) wdata +
  8659. (src1_cont || src1->type != vec_dot_type
  8660. ? (i11 + i12*ne11 + i13*ne12*ne11)*row_size
  8661. : (i11*nb11 + i12*nb12 + i13*nb13));
  8662. float * dst_col = (float *) ((char *) dst->data + (i1*nb1 + i2*nb2 + i3*nb3));
  8663. //for (int64_t ir0 = iir0; ir0 < iir0 + blck_0 && ir0 < ir011; ++ir0) {
  8664. // vec_dot(ne00, &dst_col[ir0], src0_row + ir0*nb01, src1_col);
  8665. //}
  8666. for (int64_t ir0 = iir0; ir0 < iir0 + blck_0 && ir0 < ir011; ++ir0) {
  8667. vec_dot(ne00, &tmp[ir0 - iir0], 0, src0_row + ir0*nb01, 0, src1_col, 0, 1);
  8668. }
  8669. memcpy(&dst_col[iir0], tmp, (MIN(iir0 + blck_0, ir011) - iir0)*sizeof(float));
  8670. }
  8671. }
  8672. }
  8673. }
  8674. #undef MMID_MATRIX_ROW
  8675. }
  8676. // ggml_compute_forward_out_prod
  8677. static void ggml_compute_forward_out_prod_f32(
  8678. const struct ggml_compute_params * params,
  8679. const struct ggml_tensor * src0,
  8680. const struct ggml_tensor * src1,
  8681. struct ggml_tensor * dst) {
  8682. // int64_t t0 = ggml_perf_time_us();
  8683. // UNUSED(t0);
  8684. GGML_TENSOR_BINARY_OP_LOCALS
  8685. const int ith = params->ith;
  8686. const int nth = params->nth;
  8687. GGML_ASSERT(ne0 == ne00);
  8688. GGML_ASSERT(ne1 == ne10);
  8689. GGML_ASSERT(ne2 == ne02);
  8690. GGML_ASSERT(ne02 == ne12);
  8691. GGML_ASSERT(ne3 == ne13);
  8692. GGML_ASSERT(ne03 == ne13);
  8693. // we don't support permuted src0 or src1
  8694. GGML_ASSERT(nb00 == sizeof(float));
  8695. // dst cannot be transposed or permuted
  8696. GGML_ASSERT(nb0 == sizeof(float));
  8697. // GGML_ASSERT(nb0 <= nb1);
  8698. // GGML_ASSERT(nb1 <= nb2);
  8699. // GGML_ASSERT(nb2 <= nb3);
  8700. // nb01 >= nb00 - src0 is not transposed
  8701. // compute by src0 rows
  8702. // TODO: #if defined(GGML_USE_CUBLAS) ggml_cuda_out_prod
  8703. // TODO: #if defined(GGML_USE_CLBLAST)
  8704. #if defined(GGML_USE_ACCELERATE) || defined(GGML_USE_OPENBLAS)
  8705. bool use_blas = ggml_is_matrix(src0) &&
  8706. ggml_is_matrix(src1) &&
  8707. ggml_is_contiguous(src0) &&
  8708. (ggml_is_contiguous(src1) || ggml_is_transposed(src1));
  8709. #endif
  8710. if (params->type == GGML_TASK_INIT) {
  8711. #if defined(GGML_USE_ACCELERATE) || defined(GGML_USE_OPENBLAS) // gemm beta will zero dst
  8712. if (use_blas) {
  8713. return;
  8714. }
  8715. #endif
  8716. if (ith != 0) {
  8717. return;
  8718. }
  8719. ggml_vec_set_f32(ne0*ne1*ne2*ne3, dst->data, 0);
  8720. return;
  8721. }
  8722. if (params->type == GGML_TASK_FINALIZE) {
  8723. return;
  8724. }
  8725. #if defined(GGML_USE_ACCELERATE) || defined(GGML_USE_OPENBLAS)
  8726. if (use_blas) {
  8727. if (params->ith != 0) { // All threads other than the first do no work.
  8728. return;
  8729. }
  8730. // Arguments to ggml_compute_forward_out_prod (expressed as major,minor)
  8731. // src0: (k,n)
  8732. // src1: (k,m)
  8733. // dst: (m,n)
  8734. //
  8735. // Arguments to sgemm (see https://github.com/Reference-LAPACK/lapack/blob/master/BLAS/SRC/sgemm.f)
  8736. // Also expressed as (major,minor)
  8737. // a: (m,k): so src1 transposed
  8738. // b: (k,n): so src0
  8739. // c: (m,n)
  8740. //
  8741. // However, if ggml_is_transposed(src1) is true, then
  8742. // src1->data already contains a transposed version, so sgemm mustn't
  8743. // transpose it further.
  8744. int n = src0->ne[0];
  8745. int k = src0->ne[1];
  8746. int m = src1->ne[0];
  8747. int transposeA, lda;
  8748. if (!ggml_is_transposed(src1)) {
  8749. transposeA = CblasTrans;
  8750. lda = m;
  8751. } else {
  8752. transposeA = CblasNoTrans;
  8753. lda = k;
  8754. }
  8755. float * a = (float *) ((char *) src1->data);
  8756. float * b = (float *) ((char *) src0->data);
  8757. float * c = (float *) ((char *) dst->data);
  8758. cblas_sgemm(CblasRowMajor, transposeA, CblasNoTrans, m, n, k, 1.0, a, lda, b, n, 0.0, c, n);
  8759. return;
  8760. }
  8761. #endif
  8762. // dst[:,:,:,:] = 0
  8763. // for i2,i3:
  8764. // for i1:
  8765. // for i01:
  8766. // for i0:
  8767. // dst[i0,i1,i2,i3] += src0[i0,i01,i2,i3] * src1[i1,i01,i2,i3]
  8768. // parallelize by last three dimensions
  8769. // total rows in dst
  8770. const int64_t nr = ne1*ne2*ne3;
  8771. // rows per thread
  8772. const int64_t dr = (nr + nth - 1)/nth;
  8773. // row range for this thread
  8774. const int64_t ir0 = dr*ith;
  8775. const int64_t ir1 = MIN(ir0 + dr, nr);
  8776. // block-tiling attempt
  8777. const int64_t blck_0 = MAX(GGML_VEC_MAD_UNROLL, 32);
  8778. const int64_t blck_1 = 16;
  8779. for (int64_t bir = ir0; bir < ir1; bir += blck_1) {
  8780. const int64_t bir1 = MIN(bir + blck_1, ir1);
  8781. for (int64_t bi01 = 0; bi01 < ne01; bi01 += blck_0) {
  8782. const int64_t bne01 = MIN(bi01 + blck_0, ne01);
  8783. for (int64_t ir = bir; ir < bir1; ++ir) {
  8784. // dst indices
  8785. const int64_t i3 = ir/(ne2*ne1);
  8786. const int64_t i2 = (ir - i3*ne2*ne1)/ne1;
  8787. const int64_t i1 = (ir - i3*ne2*ne1 - i2*ne1);
  8788. const int64_t i02 = i2;
  8789. const int64_t i03 = i3;
  8790. //const int64_t i10 = i1;
  8791. const int64_t i12 = i2;
  8792. const int64_t i13 = i3;
  8793. #if GGML_VEC_MAD_UNROLL > 2
  8794. const int64_t bne01_unroll = bne01 - (bne01 % GGML_VEC_MAD_UNROLL);
  8795. for (int64_t i01 = bi01; i01 < bne01_unroll; i01 += GGML_VEC_MAD_UNROLL) {
  8796. const int64_t i11 = i01;
  8797. float * s0 = (float *) ((char *) src0->data + ( i01*nb01 + i02*nb02 + i03*nb03));
  8798. float * s1 = (float *) ((char *) src1->data + (i1*nb10 + i11*nb11 + i12*nb12 + i13*nb13));
  8799. float * d = (float *) ((char *) dst->data + ( i1*nb1 + i2*nb2 + i3*nb3));
  8800. ggml_vec_mad_f32_unroll(ne0, nb01, nb11, d, s0, s1);
  8801. }
  8802. for (int64_t i01 = bne01_unroll; i01 < bne01; ++i01) {
  8803. const int64_t i11 = i01;
  8804. float * s0 = (float *) ((char *) src0->data + ( i01*nb01 + i02*nb02 + i03*nb03));
  8805. float * s1 = (float *) ((char *) src1->data + (i1*nb10 + i11*nb11 + i12*nb12 + i13*nb13));
  8806. float * d = (float *) ((char *) dst->data + ( i1*nb1 + i2*nb2 + i3*nb3));
  8807. ggml_vec_mad_f32(ne0, d, s0, *s1);
  8808. }
  8809. #else
  8810. for (int64_t i01 = bi01; i01 < bne01; ++i01) {
  8811. const int64_t i11 = i01;
  8812. float * s0 = (float *) ((char *) src0->data + ( i01*nb01 + i02*nb02 + i03*nb03));
  8813. float * s1 = (float *) ((char *) src1->data + (i1*nb10 + i11*nb11 + i12*nb12 + i13*nb13));
  8814. float * d = (float *) ((char *) dst->data + ( i1*nb1 + i2*nb2 + i3*nb3));
  8815. ggml_vec_mad_f32(ne0, d, s0, *s1);
  8816. }
  8817. #endif
  8818. }
  8819. }
  8820. }
  8821. //int64_t t1 = ggml_perf_time_us();
  8822. //static int64_t acc = 0;
  8823. //acc += t1 - t0;
  8824. //if (t1 - t0 > 10) {
  8825. // printf("\n");
  8826. // printf("ne00 = %5d, ne01 = %5d, ne02 = %5d, ne03 = %5d\n", ne00, ne01, ne02, ne03);
  8827. // printf("nb00 = %5d, nb01 = %5d, nb02 = %5d, nb03 = %5d\n", nb00, nb01, nb02, nb03);
  8828. // printf("ne10 = %5d, ne11 = %5d, ne12 = %5d, ne13 = %5d\n", ne10, ne11, ne12, ne13);
  8829. // printf("nb10 = %5d, nb11 = %5d, nb12 = %5d, nb13 = %5d\n", nb10, nb11, nb12, nb13);
  8830. // printf("XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX task %d/%d: %d us, acc = %d\n", ith, nth, (int) (t1 - t0), (int) acc);
  8831. //}
  8832. }
  8833. static void ggml_compute_forward_out_prod_q_f32(
  8834. const struct ggml_compute_params * params,
  8835. const struct ggml_tensor * src0,
  8836. const struct ggml_tensor * src1,
  8837. struct ggml_tensor * dst) {
  8838. // int64_t t0 = ggml_perf_time_us();
  8839. // UNUSED(t0);
  8840. GGML_TENSOR_BINARY_OP_LOCALS;
  8841. const int ith = params->ith;
  8842. const int nth = params->nth;
  8843. const enum ggml_type type = src0->type;
  8844. ggml_to_float_t const dequantize_row_q = type_traits[type].to_float;
  8845. GGML_ASSERT(ne02 == ne12);
  8846. GGML_ASSERT(ne03 == ne13);
  8847. GGML_ASSERT(ne2 == ne12);
  8848. GGML_ASSERT(ne3 == ne13);
  8849. // we don't support permuted src0 dim0
  8850. GGML_ASSERT(nb00 == ggml_type_size(type));
  8851. // dst dim0 cannot be transposed or permuted
  8852. GGML_ASSERT(nb0 == sizeof(float));
  8853. // GGML_ASSERT(nb0 <= nb1);
  8854. // GGML_ASSERT(nb1 <= nb2);
  8855. // GGML_ASSERT(nb2 <= nb3);
  8856. GGML_ASSERT(ne0 == ne00);
  8857. GGML_ASSERT(ne1 == ne10);
  8858. GGML_ASSERT(ne2 == ne02);
  8859. GGML_ASSERT(ne3 == ne03);
  8860. // nb01 >= nb00 - src0 is not transposed
  8861. // compute by src0 rows
  8862. // TODO: #if defined(GGML_USE_CUBLAS) ggml_cuda_out_prod
  8863. // TODO: #if defined(GGML_USE_ACCELERATE) || defined(GGML_USE_OPENBLAS) || defined(GGML_USE_CLBLAST)
  8864. if (params->type == GGML_TASK_INIT) {
  8865. if (ith != 0) {
  8866. return;
  8867. }
  8868. ggml_vec_set_f32(ne0*ne1*ne2*ne3, dst->data, 0);
  8869. return;
  8870. }
  8871. if (params->type == GGML_TASK_FINALIZE) {
  8872. return;
  8873. }
  8874. // parallelize by last three dimensions
  8875. // total rows in dst
  8876. const int64_t nr = ne1*ne2*ne3;
  8877. // rows per thread
  8878. const int64_t dr = (nr + nth - 1)/nth;
  8879. // row range for this thread
  8880. const int64_t ir0 = dr*ith;
  8881. const int64_t ir1 = MIN(ir0 + dr, nr);
  8882. // dst[:,:,:,:] = 0
  8883. // for i2,i3:
  8884. // for i1:
  8885. // for i01:
  8886. // for i0:
  8887. // dst[i0,i1,i2,i3] += src0[i0,i01,i2,i3] * src1[i1,i01,i2,i3]
  8888. float * wdata = (float *) params->wdata + (ne0 + CACHE_LINE_SIZE_F32) * ith;
  8889. for (int64_t ir = ir0; ir < ir1; ++ir) {
  8890. // dst indices
  8891. const int64_t i3 = ir/(ne2*ne1);
  8892. const int64_t i2 = (ir - i3*ne2*ne1)/ne1;
  8893. const int64_t i1 = (ir - i3*ne2*ne1 - i2*ne1);
  8894. const int64_t i02 = i2;
  8895. const int64_t i03 = i3;
  8896. //const int64_t i10 = i1;
  8897. const int64_t i12 = i2;
  8898. const int64_t i13 = i3;
  8899. for (int64_t i01 = 0; i01 < ne01; ++i01) {
  8900. const int64_t i11 = i01;
  8901. float * s0 = (float *) ((char *) src0->data + ( i01*nb01 + i02*nb02 + i03*nb03));
  8902. float * s1 = (float *) ((char *) src1->data + (i1*nb10 + i11*nb11 + i12*nb12 + i13*nb13));
  8903. float * d = (float *) ((char *) dst->data + ( i1*nb1 + i2*nb2 + i3*nb3));
  8904. dequantize_row_q(s0, wdata, ne0);
  8905. ggml_vec_mad_f32(ne0, d, wdata, *s1);
  8906. }
  8907. }
  8908. //int64_t t1 = ggml_perf_time_us();
  8909. //static int64_t acc = 0;
  8910. //acc += t1 - t0;
  8911. //if (t1 - t0 > 10) {
  8912. // printf("\n");
  8913. // printf("ne00 = %5d, ne01 = %5d, ne02 = %5d, ne03 = %5d\n", ne00, ne01, ne02, ne03);
  8914. // printf("nb00 = %5d, nb01 = %5d, nb02 = %5d, nb03 = %5d\n", nb00, nb01, nb02, nb03);
  8915. // printf("ne10 = %5d, ne11 = %5d, ne12 = %5d, ne13 = %5d\n", ne10, ne11, ne12, ne13);
  8916. // printf("nb10 = %5d, nb11 = %5d, nb12 = %5d, nb13 = %5d\n", nb10, nb11, nb12, nb13);
  8917. // printf("XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX task %d/%d: %d us, acc = %d\n", ith, nth, (int) (t1 - t0), (int) acc);
  8918. //}
  8919. }
  8920. static void ggml_compute_forward_out_prod(
  8921. const struct ggml_compute_params * params,
  8922. const struct ggml_tensor * src0,
  8923. const struct ggml_tensor * src1,
  8924. struct ggml_tensor * dst) {
  8925. switch (src0->type) {
  8926. case GGML_TYPE_Q4_0:
  8927. case GGML_TYPE_Q4_1:
  8928. case GGML_TYPE_Q5_0:
  8929. case GGML_TYPE_Q5_1:
  8930. case GGML_TYPE_Q8_0:
  8931. case GGML_TYPE_Q2_K:
  8932. case GGML_TYPE_Q3_K:
  8933. case GGML_TYPE_Q4_K:
  8934. case GGML_TYPE_Q5_K:
  8935. case GGML_TYPE_Q6_K:
  8936. case GGML_TYPE_IQ2_XXS:
  8937. case GGML_TYPE_IQ2_XS:
  8938. case GGML_TYPE_IQ3_XXS:
  8939. case GGML_TYPE_IQ1_S:
  8940. case GGML_TYPE_IQ4_NL:
  8941. {
  8942. ggml_compute_forward_out_prod_q_f32(params, src0, src1, dst);
  8943. } break;
  8944. case GGML_TYPE_F16:
  8945. {
  8946. GGML_ASSERT(false); // todo
  8947. // ggml_compute_forward_out_prod_f16_f32(params, src0, src1, dst);
  8948. } break;
  8949. case GGML_TYPE_F32:
  8950. {
  8951. ggml_compute_forward_out_prod_f32(params, src0, src1, dst);
  8952. } break;
  8953. default:
  8954. {
  8955. GGML_ASSERT(false);
  8956. } break;
  8957. }
  8958. }
  8959. // ggml_compute_forward_scale
  8960. static void ggml_compute_forward_scale_f32(
  8961. const struct ggml_compute_params * params,
  8962. const struct ggml_tensor * src0,
  8963. struct ggml_tensor * dst) {
  8964. GGML_ASSERT(ggml_is_contiguous(src0));
  8965. GGML_ASSERT(ggml_is_contiguous(dst));
  8966. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  8967. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  8968. return;
  8969. }
  8970. // scale factor
  8971. float v;
  8972. memcpy(&v, dst->op_params, sizeof(float));
  8973. const int ith = params->ith;
  8974. const int nth = params->nth;
  8975. const int nc = src0->ne[0];
  8976. const int nr = ggml_nrows(src0);
  8977. // rows per thread
  8978. const int dr = (nr + nth - 1)/nth;
  8979. // row range for this thread
  8980. const int ir0 = dr*ith;
  8981. const int ir1 = MIN(ir0 + dr, nr);
  8982. const size_t nb01 = src0->nb[1];
  8983. const size_t nb1 = dst->nb[1];
  8984. for (int i1 = ir0; i1 < ir1; i1++) {
  8985. if (dst->data != src0->data) {
  8986. // src0 is same shape as dst => same indices
  8987. memcpy((char *)dst->data + i1*nb1, (char *)src0->data + i1*nb01, nc * sizeof(float));
  8988. }
  8989. ggml_vec_scale_f32(nc, (float *) ((char *) dst->data + i1*nb1), v);
  8990. }
  8991. }
  8992. static void ggml_compute_forward_scale(
  8993. const struct ggml_compute_params * params,
  8994. const struct ggml_tensor * src0,
  8995. struct ggml_tensor * dst) {
  8996. switch (src0->type) {
  8997. case GGML_TYPE_F32:
  8998. {
  8999. ggml_compute_forward_scale_f32(params, src0, dst);
  9000. } break;
  9001. default:
  9002. {
  9003. GGML_ASSERT(false);
  9004. } break;
  9005. }
  9006. }
  9007. // ggml_compute_forward_set
  9008. static void ggml_compute_forward_set_f32(
  9009. const struct ggml_compute_params * params,
  9010. const struct ggml_tensor * src0,
  9011. const struct ggml_tensor * src1,
  9012. struct ggml_tensor * dst) {
  9013. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  9014. GGML_ASSERT(ggml_is_contiguous(dst) && ggml_is_contiguous(src0));
  9015. // view src0 and dst with these strides and data offset inbytes during set
  9016. // nb0 is implicitly element_size because src0 and dst are contiguous
  9017. size_t nb1 = ((int32_t *) dst->op_params)[0];
  9018. size_t nb2 = ((int32_t *) dst->op_params)[1];
  9019. size_t nb3 = ((int32_t *) dst->op_params)[2];
  9020. size_t offset = ((int32_t *) dst->op_params)[3];
  9021. bool inplace = (bool) ((int32_t *) dst->op_params)[4];
  9022. if (!inplace && (params->type == GGML_TASK_INIT)) {
  9023. if (params->ith != 0) {
  9024. return;
  9025. }
  9026. // memcpy needs to be synchronized across threads to avoid race conditions.
  9027. // => do it in INIT phase
  9028. memcpy(
  9029. ((char *) dst->data),
  9030. ((char *) src0->data),
  9031. ggml_nbytes(dst));
  9032. }
  9033. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  9034. return;
  9035. }
  9036. const int ith = params->ith;
  9037. const int nth = params->nth;
  9038. const int nr = ggml_nrows(src1);
  9039. const int nc = src1->ne[0];
  9040. GGML_TENSOR_LOCALS(int64_t, ne1, src1, ne)
  9041. GGML_TENSOR_LOCALS(size_t, nb1, src1, nb)
  9042. // src0 and dst as viewed during set
  9043. const size_t nb0 = ggml_element_size(src0);
  9044. const int im0 = (ne10 == 0 ? 0 : ne10-1);
  9045. const int im1 = (ne11 == 0 ? 0 : ne11-1);
  9046. const int im2 = (ne12 == 0 ? 0 : ne12-1);
  9047. const int im3 = (ne13 == 0 ? 0 : ne13-1);
  9048. GGML_ASSERT(offset + im0*nb0 + im1*nb1 + im2*nb2 + im3*nb3 <= ggml_nbytes(dst));
  9049. GGML_ASSERT(nb10 == sizeof(float));
  9050. // rows per thread
  9051. const int dr = (nr + nth - 1)/nth;
  9052. // row range for this thread
  9053. const int ir0 = dr*ith;
  9054. const int ir1 = MIN(ir0 + dr, nr);
  9055. for (int ir = ir0; ir < ir1; ++ir) {
  9056. // src0 and dst are viewed with shape of src1 and offset
  9057. // => same indices
  9058. const int i3 = ir/(ne12*ne11);
  9059. const int i2 = (ir - i3*ne12*ne11)/ne11;
  9060. const int i1 = (ir - i3*ne12*ne11 - i2*ne11);
  9061. ggml_vec_cpy_f32(nc,
  9062. (float *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + offset),
  9063. (float *) ((char *) src1->data + i3*nb13 + i2*nb12 + i1*nb11));
  9064. }
  9065. }
  9066. static void ggml_compute_forward_set(
  9067. const struct ggml_compute_params * params,
  9068. const struct ggml_tensor * src0,
  9069. const struct ggml_tensor * src1,
  9070. struct ggml_tensor * dst) {
  9071. switch (src0->type) {
  9072. case GGML_TYPE_F32:
  9073. {
  9074. ggml_compute_forward_set_f32(params, src0, src1, dst);
  9075. } break;
  9076. case GGML_TYPE_F16:
  9077. case GGML_TYPE_Q4_0:
  9078. case GGML_TYPE_Q4_1:
  9079. case GGML_TYPE_Q5_0:
  9080. case GGML_TYPE_Q5_1:
  9081. case GGML_TYPE_Q8_0:
  9082. case GGML_TYPE_Q8_1:
  9083. case GGML_TYPE_Q2_K:
  9084. case GGML_TYPE_Q3_K:
  9085. case GGML_TYPE_Q4_K:
  9086. case GGML_TYPE_Q5_K:
  9087. case GGML_TYPE_Q6_K:
  9088. case GGML_TYPE_IQ2_XXS:
  9089. case GGML_TYPE_IQ2_XS:
  9090. case GGML_TYPE_IQ3_XXS:
  9091. case GGML_TYPE_IQ1_S:
  9092. case GGML_TYPE_IQ4_NL:
  9093. default:
  9094. {
  9095. GGML_ASSERT(false);
  9096. } break;
  9097. }
  9098. }
  9099. // ggml_compute_forward_cpy
  9100. static void ggml_compute_forward_cpy(
  9101. const struct ggml_compute_params * params,
  9102. const struct ggml_tensor * src0,
  9103. struct ggml_tensor * dst) {
  9104. ggml_compute_forward_dup(params, src0, dst);
  9105. }
  9106. // ggml_compute_forward_cont
  9107. static void ggml_compute_forward_cont(
  9108. const struct ggml_compute_params * params,
  9109. const struct ggml_tensor * src0,
  9110. struct ggml_tensor * dst) {
  9111. ggml_compute_forward_dup(params, src0, dst);
  9112. }
  9113. // ggml_compute_forward_reshape
  9114. static void ggml_compute_forward_reshape(
  9115. const struct ggml_compute_params * params,
  9116. const struct ggml_tensor * src0,
  9117. struct ggml_tensor * dst) {
  9118. // NOP
  9119. UNUSED(params);
  9120. UNUSED(src0);
  9121. UNUSED(dst);
  9122. }
  9123. // ggml_compute_forward_view
  9124. static void ggml_compute_forward_view(
  9125. const struct ggml_compute_params * params,
  9126. const struct ggml_tensor * src0) {
  9127. // NOP
  9128. UNUSED(params);
  9129. UNUSED(src0);
  9130. }
  9131. // ggml_compute_forward_permute
  9132. static void ggml_compute_forward_permute(
  9133. const struct ggml_compute_params * params,
  9134. const struct ggml_tensor * src0) {
  9135. // NOP
  9136. UNUSED(params);
  9137. UNUSED(src0);
  9138. }
  9139. // ggml_compute_forward_transpose
  9140. static void ggml_compute_forward_transpose(
  9141. const struct ggml_compute_params * params,
  9142. const struct ggml_tensor * src0) {
  9143. // NOP
  9144. UNUSED(params);
  9145. UNUSED(src0);
  9146. }
  9147. // ggml_compute_forward_get_rows
  9148. static void ggml_compute_forward_get_rows_q(
  9149. const struct ggml_compute_params * params,
  9150. const struct ggml_tensor * src0,
  9151. const struct ggml_tensor * src1,
  9152. struct ggml_tensor * dst) {
  9153. assert(params->ith == 0);
  9154. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  9155. return;
  9156. }
  9157. GGML_TENSOR_BINARY_OP_LOCALS
  9158. const int64_t nc = ne00;
  9159. const int64_t nr = ggml_nelements(src1); GGML_UNUSED(nr);
  9160. const enum ggml_type type = src0->type;
  9161. ggml_to_float_t const dequantize_row_q = type_traits[type].to_float;
  9162. assert(ne0 == nc);
  9163. assert(ne02 == ne11);
  9164. assert(nb00 == ggml_type_size(type));
  9165. assert(ggml_nrows(dst) == nr);
  9166. // TODO: multi-thread
  9167. for (int64_t i12 = 0; i12 < ne12; ++i12) {
  9168. for (int64_t i11 = 0; i11 < ne11; ++i11) {
  9169. for (int64_t i10 = 0; i10 < ne10; ++i10) {
  9170. const int64_t i01 = *(int32_t *) ((char *) src1->data + i10*nb10 + i11*nb11 + i12*nb12);
  9171. dequantize_row_q(
  9172. (const void *) ((char *) src0->data + i01*nb01 + i11*nb02 + i12*nb03),
  9173. (float *) ((char *) dst->data + i10*nb1 + i11*nb2 + i12*nb3), nc);
  9174. }
  9175. }
  9176. }
  9177. }
  9178. static void ggml_compute_forward_get_rows_f16(
  9179. const struct ggml_compute_params * params,
  9180. const struct ggml_tensor * src0,
  9181. const struct ggml_tensor * src1,
  9182. struct ggml_tensor * dst) {
  9183. assert(params->ith == 0);
  9184. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  9185. return;
  9186. }
  9187. GGML_TENSOR_BINARY_OP_LOCALS
  9188. const int64_t nc = ne00;
  9189. const int64_t nr = ggml_nelements(src1); GGML_UNUSED(nr);
  9190. assert(ne0 == nc);
  9191. assert(ne02 == ne11);
  9192. assert(nb00 == sizeof(ggml_fp16_t));
  9193. assert(ggml_nrows(dst) == nr);
  9194. // TODO: multi-thread
  9195. for (int64_t i12 = 0; i12 < ne12; ++i12) {
  9196. for (int64_t i11 = 0; i11 < ne11; ++i11) {
  9197. for (int64_t i10 = 0; i10 < ne10; ++i10) {
  9198. const int64_t i01 = *(int32_t *) ((char *) src1->data + i10*nb10 + i11*nb11 + i12*nb12);
  9199. ggml_fp16_to_fp32_row(
  9200. (const void *) ((char *) src0->data + i01*nb01 + i11*nb02 + i12*nb03),
  9201. (float *) ((char *) dst->data + i10*nb1 + i11*nb2 + i12*nb3), nc);
  9202. }
  9203. }
  9204. }
  9205. }
  9206. static void ggml_compute_forward_get_rows_f32(
  9207. const struct ggml_compute_params * params,
  9208. const struct ggml_tensor * src0,
  9209. const struct ggml_tensor * src1,
  9210. struct ggml_tensor * dst) {
  9211. assert(params->ith == 0);
  9212. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  9213. return;
  9214. }
  9215. GGML_TENSOR_BINARY_OP_LOCALS
  9216. const int64_t nc = ne00;
  9217. const int64_t nr = ggml_nelements(src1); GGML_UNUSED(nr);
  9218. assert(ne0 == nc);
  9219. assert(ne02 == ne11);
  9220. assert(nb00 == sizeof(float));
  9221. assert(ggml_nrows(dst) == nr);
  9222. // TODO: multi-thread
  9223. for (int64_t i12 = 0; i12 < ne12; ++i12) {
  9224. for (int64_t i11 = 0; i11 < ne11; ++i11) {
  9225. for (int64_t i10 = 0; i10 < ne10; ++i10) {
  9226. const int64_t i01 = *(int32_t *) ((char *) src1->data + i10*nb10 + i11*nb11 + i12*nb12);
  9227. ggml_vec_cpy_f32(nc,
  9228. (float *) ((char *) dst->data + i10*nb1 + i11*nb2 + i12*nb3),
  9229. (float *) ((char *) src0->data + i01*nb01 + i11*nb02 + i12*nb03));
  9230. }
  9231. }
  9232. }
  9233. }
  9234. static void ggml_compute_forward_get_rows(
  9235. const struct ggml_compute_params * params,
  9236. const struct ggml_tensor * src0,
  9237. const struct ggml_tensor * src1,
  9238. struct ggml_tensor * dst) {
  9239. switch (src0->type) {
  9240. case GGML_TYPE_Q4_0:
  9241. case GGML_TYPE_Q4_1:
  9242. case GGML_TYPE_Q5_0:
  9243. case GGML_TYPE_Q5_1:
  9244. case GGML_TYPE_Q8_0:
  9245. case GGML_TYPE_Q8_1:
  9246. case GGML_TYPE_Q2_K:
  9247. case GGML_TYPE_Q3_K:
  9248. case GGML_TYPE_Q4_K:
  9249. case GGML_TYPE_Q5_K:
  9250. case GGML_TYPE_Q6_K:
  9251. case GGML_TYPE_IQ2_XXS:
  9252. case GGML_TYPE_IQ2_XS:
  9253. case GGML_TYPE_IQ3_XXS:
  9254. case GGML_TYPE_IQ1_S:
  9255. case GGML_TYPE_IQ4_NL:
  9256. {
  9257. ggml_compute_forward_get_rows_q(params, src0, src1, dst);
  9258. } break;
  9259. case GGML_TYPE_F16:
  9260. {
  9261. ggml_compute_forward_get_rows_f16(params, src0, src1, dst);
  9262. } break;
  9263. case GGML_TYPE_F32:
  9264. case GGML_TYPE_I32:
  9265. {
  9266. ggml_compute_forward_get_rows_f32(params, src0, src1, dst);
  9267. } break;
  9268. default:
  9269. {
  9270. GGML_ASSERT(false);
  9271. } break;
  9272. }
  9273. //static bool first = true;
  9274. //printf("ne0 = %d, ne1 = %d, ne2 = %d\n", dst->ne[0], dst->ne[1], dst->ne[2]);
  9275. //if (first) {
  9276. // first = false;
  9277. //} else {
  9278. // for (int k = 0; k < dst->ne[1]; ++k) {
  9279. // for (int j = 0; j < dst->ne[0]/16; ++j) {
  9280. // for (int i = 0; i < 16; ++i) {
  9281. // printf("%8.4f ", ((float *) dst->data)[k*dst->ne[0] + j*16 + i]);
  9282. // }
  9283. // printf("\n");
  9284. // }
  9285. // printf("\n");
  9286. // }
  9287. // printf("\n");
  9288. // exit(0);
  9289. //}
  9290. }
  9291. // ggml_compute_forward_get_rows_back
  9292. static void ggml_compute_forward_get_rows_back_f32_f16(
  9293. const struct ggml_compute_params * params,
  9294. const struct ggml_tensor * src0,
  9295. const struct ggml_tensor * src1,
  9296. struct ggml_tensor * dst) {
  9297. GGML_ASSERT(params->ith == 0);
  9298. GGML_ASSERT(ggml_is_contiguous(dst));
  9299. // ggml_compute_forward_dup_same_cont(params, opt0, dst);
  9300. if (params->type == GGML_TASK_INIT) {
  9301. if (params->ith != 0) {
  9302. return;
  9303. }
  9304. memset(dst->data, 0, ggml_nbytes(dst));
  9305. }
  9306. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  9307. return;
  9308. }
  9309. const int nc = src0->ne[0];
  9310. const int nr = ggml_nelements(src1);
  9311. GGML_ASSERT( dst->ne[0] == nc);
  9312. GGML_ASSERT(src0->nb[0] == sizeof(ggml_fp16_t));
  9313. for (int i = 0; i < nr; ++i) {
  9314. const int r = ((int32_t *) src1->data)[i];
  9315. for (int j = 0; j < nc; ++j) {
  9316. ggml_fp16_t v = ((ggml_fp16_t *) ((char *) src0->data + i*src0->nb[1]))[j];
  9317. ((float *) ((char *) dst->data + r*dst->nb[1]))[j] += GGML_FP16_TO_FP32(v);
  9318. }
  9319. }
  9320. }
  9321. static void ggml_compute_forward_get_rows_back_f32(
  9322. const struct ggml_compute_params * params,
  9323. const struct ggml_tensor * src0,
  9324. const struct ggml_tensor * src1,
  9325. struct ggml_tensor * dst) {
  9326. GGML_ASSERT(params->ith == 0);
  9327. GGML_ASSERT(ggml_is_contiguous(dst));
  9328. // ggml_compute_forward_dup_same_cont(params, opt0, dst);
  9329. if (params->type == GGML_TASK_INIT) {
  9330. if (params->ith != 0) {
  9331. return;
  9332. }
  9333. memset(dst->data, 0, ggml_nbytes(dst));
  9334. }
  9335. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  9336. return;
  9337. }
  9338. const int nc = src0->ne[0];
  9339. const int nr = ggml_nelements(src1);
  9340. GGML_ASSERT( dst->ne[0] == nc);
  9341. GGML_ASSERT(src0->nb[0] == sizeof(float));
  9342. for (int i = 0; i < nr; ++i) {
  9343. const int r = ((int32_t *) src1->data)[i];
  9344. ggml_vec_add_f32(nc,
  9345. (float *) ((char *) dst->data + r*dst->nb[1]),
  9346. (float *) ((char *) dst->data + r*dst->nb[1]),
  9347. (float *) ((char *) src0->data + i*src0->nb[1]));
  9348. }
  9349. }
  9350. static void ggml_compute_forward_get_rows_back(
  9351. const struct ggml_compute_params * params,
  9352. const struct ggml_tensor * src0,
  9353. const struct ggml_tensor * src1,
  9354. struct ggml_tensor * dst) {
  9355. switch (src0->type) {
  9356. case GGML_TYPE_F16:
  9357. {
  9358. ggml_compute_forward_get_rows_back_f32_f16(params, src0, src1, dst);
  9359. } break;
  9360. case GGML_TYPE_F32:
  9361. {
  9362. ggml_compute_forward_get_rows_back_f32(params, src0, src1, dst);
  9363. } break;
  9364. default:
  9365. {
  9366. GGML_ASSERT(false);
  9367. } break;
  9368. }
  9369. //static bool first = true;
  9370. //printf("ne0 = %d, ne1 = %d, ne2 = %d\n", dst->ne[0], dst->ne[1], dst->ne[2]);
  9371. //if (first) {
  9372. // first = false;
  9373. //} else {
  9374. // for (int k = 0; k < dst->ne[1]; ++k) {
  9375. // for (int j = 0; j < dst->ne[0]/16; ++j) {
  9376. // for (int i = 0; i < 16; ++i) {
  9377. // printf("%8.4f ", ((float *) dst->data)[k*dst->ne[0] + j*16 + i]);
  9378. // }
  9379. // printf("\n");
  9380. // }
  9381. // printf("\n");
  9382. // }
  9383. // printf("\n");
  9384. // exit(0);
  9385. //}
  9386. }
  9387. // ggml_compute_forward_diag
  9388. static void ggml_compute_forward_diag_f32(
  9389. const struct ggml_compute_params * params,
  9390. const struct ggml_tensor * src0,
  9391. struct ggml_tensor * dst) {
  9392. GGML_ASSERT(params->ith == 0);
  9393. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  9394. return;
  9395. }
  9396. // TODO: handle transposed/permuted matrices
  9397. GGML_TENSOR_UNARY_OP_LOCALS
  9398. GGML_ASSERT(ne00 == ne0);
  9399. GGML_ASSERT(ne00 == ne1);
  9400. GGML_ASSERT(ne01 == 1);
  9401. GGML_ASSERT(ne02 == ne2);
  9402. GGML_ASSERT(ne03 == ne3);
  9403. GGML_ASSERT(nb00 == sizeof(float));
  9404. GGML_ASSERT(nb0 == sizeof(float));
  9405. for (int i3 = 0; i3 < ne3; i3++) {
  9406. for (int i2 = 0; i2 < ne2; i2++) {
  9407. for (int i1 = 0; i1 < ne1; i1++) {
  9408. float * d = (float *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1);
  9409. float * s = (float *)((char *) src0->data + i3*nb03 + i2*nb02);
  9410. for (int i0 = 0; i0 < i1; i0++) {
  9411. d[i0] = 0;
  9412. }
  9413. d[i1] = s[i1];
  9414. for (int i0 = i1+1; i0 < ne0; i0++) {
  9415. d[i0] = 0;
  9416. }
  9417. }
  9418. }
  9419. }
  9420. }
  9421. static void ggml_compute_forward_diag(
  9422. const struct ggml_compute_params * params,
  9423. const struct ggml_tensor * src0,
  9424. struct ggml_tensor * dst) {
  9425. switch (src0->type) {
  9426. case GGML_TYPE_F32:
  9427. {
  9428. ggml_compute_forward_diag_f32(params, src0, dst);
  9429. } break;
  9430. default:
  9431. {
  9432. GGML_ASSERT(false);
  9433. } break;
  9434. }
  9435. }
  9436. // ggml_compute_forward_diag_mask_inf
  9437. static void ggml_compute_forward_diag_mask_f32(
  9438. const struct ggml_compute_params * params,
  9439. const struct ggml_tensor * src0,
  9440. struct ggml_tensor * dst,
  9441. const float value) {
  9442. const int ith = params->ith;
  9443. const int nth = params->nth;
  9444. const int n_past = ((int32_t *) dst->op_params)[0];
  9445. const bool inplace = src0->data == dst->data;
  9446. GGML_ASSERT(n_past >= 0);
  9447. if (!inplace && (params->type == GGML_TASK_INIT)) {
  9448. if (ith != 0) {
  9449. return;
  9450. }
  9451. // memcpy needs to be synchronized across threads to avoid race conditions.
  9452. // => do it in INIT phase
  9453. GGML_ASSERT(ggml_nelements(dst) == ggml_nelements(src0));
  9454. GGML_ASSERT(ggml_is_contiguous(dst) && ggml_is_contiguous(src0));
  9455. memcpy(
  9456. ((char *) dst->data),
  9457. ((char *) src0->data),
  9458. ggml_nbytes(dst));
  9459. }
  9460. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  9461. return;
  9462. }
  9463. // TODO: handle transposed/permuted matrices
  9464. const int n = ggml_nrows(src0);
  9465. const int nc = src0->ne[0];
  9466. const int nr = src0->ne[1];
  9467. const int nz = n/nr;
  9468. GGML_ASSERT( dst->nb[0] == sizeof(float));
  9469. GGML_ASSERT(src0->nb[0] == sizeof(float));
  9470. for (int k = 0; k < nz; k++) {
  9471. for (int j = ith; j < nr; j += nth) {
  9472. for (int i = n_past; i < nc; i++) {
  9473. if (i > n_past + j) {
  9474. *(float *)((char *) dst->data + k*dst->nb[2] + j*dst->nb[1] + i*dst->nb[0]) = value;
  9475. }
  9476. }
  9477. }
  9478. }
  9479. }
  9480. static void ggml_compute_forward_diag_mask_inf(
  9481. const struct ggml_compute_params * params,
  9482. const struct ggml_tensor * src0,
  9483. struct ggml_tensor * dst) {
  9484. switch (src0->type) {
  9485. case GGML_TYPE_F32:
  9486. {
  9487. ggml_compute_forward_diag_mask_f32(params, src0, dst, -INFINITY);
  9488. } break;
  9489. default:
  9490. {
  9491. GGML_ASSERT(false);
  9492. } break;
  9493. }
  9494. }
  9495. static void ggml_compute_forward_diag_mask_zero(
  9496. const struct ggml_compute_params * params,
  9497. const struct ggml_tensor * src0,
  9498. struct ggml_tensor * dst) {
  9499. switch (src0->type) {
  9500. case GGML_TYPE_F32:
  9501. {
  9502. ggml_compute_forward_diag_mask_f32(params, src0, dst, 0);
  9503. } break;
  9504. default:
  9505. {
  9506. GGML_ASSERT(false);
  9507. } break;
  9508. }
  9509. }
  9510. // ggml_compute_forward_soft_max
  9511. static void ggml_compute_forward_soft_max_f32(
  9512. const struct ggml_compute_params * params,
  9513. const struct ggml_tensor * src0,
  9514. const struct ggml_tensor * src1,
  9515. const struct ggml_tensor * src2,
  9516. struct ggml_tensor * dst) {
  9517. assert(ggml_is_contiguous(dst));
  9518. assert(ggml_are_same_shape(src0, dst));
  9519. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  9520. return;
  9521. }
  9522. float scale = 1.0f;
  9523. float max_bias = 0.0f;
  9524. memcpy(&scale, (float *) dst->op_params + 0, sizeof(float));
  9525. memcpy(&max_bias, (float *) dst->op_params + 1, sizeof(float));
  9526. // TODO: handle transposed/permuted matrices
  9527. const int ith = params->ith;
  9528. const int nth = params->nth;
  9529. GGML_TENSOR_UNARY_OP_LOCALS
  9530. const int64_t ne11 = src1 ? src1->ne[1] : 1;
  9531. // TODO: is this supposed to be ceil instead of floor?
  9532. // https://huggingface.co/mosaicml/mpt-7b/blob/main/attention.py#L370
  9533. const uint32_t n_head_kv = ne02;
  9534. const uint32_t n_head_log2 = 1u << (uint32_t) floor(log2(n_head_kv));
  9535. const float m0 = powf(2.0f, -(max_bias ) / n_head_log2);
  9536. const float m1 = powf(2.0f, -(max_bias / 2.0f) / n_head_log2);
  9537. const int nc = src0->ne[0];
  9538. const int nr = ggml_nrows(src0);
  9539. // rows per thread
  9540. const int dr = (nr + nth - 1)/nth;
  9541. // row range for this thread
  9542. const int ir0 = dr*ith;
  9543. const int ir1 = MIN(ir0 + dr, nr);
  9544. float * wp = (float *) params->wdata + (nc + CACHE_LINE_SIZE_F32) * ith;
  9545. // when max_bias <= 0.0f, src2 is not used and we default it to src0 to avoid branching
  9546. float * pos = src2 ? (float *) src2->data : src0->data;
  9547. for (int i1 = ir0; i1 < ir1; i1++) {
  9548. float * sp = (float *)((char *) src0->data + i1*src0->nb[1]);
  9549. float * dp = (float *)((char *) dst->data + i1*dst->nb[1]);
  9550. // broadcast the mask across rows
  9551. float * mp = src1 ? (float *)((char *) src1->data + (i1%ne11)*src1->nb[1]) : NULL;
  9552. ggml_vec_cpy_f32 (nc, wp, sp);
  9553. ggml_vec_scale_f32(nc, wp, scale);
  9554. if (mp) {
  9555. ggml_vec_acc_f32(nc, wp, mp);
  9556. }
  9557. // ALiBi bias
  9558. if (max_bias > 0.0f) {
  9559. const uint32_t h = (i1/ne01)%ne02; // head
  9560. const float slope = h < n_head_log2 ? powf(m0, h + 1) : powf(m1, 2*(h - n_head_log2) + 1);
  9561. for (int i = 0; i < nc; i++) {
  9562. wp[i] = wp[i] + slope*pos[i];
  9563. }
  9564. }
  9565. #ifndef NDEBUG
  9566. for (int i = 0; i < nc; ++i) {
  9567. //printf("p[%d] = %f\n", i, p[i]);
  9568. assert(!isnan(wp[i]));
  9569. }
  9570. #endif
  9571. float max = -INFINITY;
  9572. ggml_vec_max_f32(nc, &max, wp);
  9573. ggml_float sum = 0.0;
  9574. uint16_t scvt;
  9575. for (int i = 0; i < nc; i++) {
  9576. if (wp[i] == -INFINITY) {
  9577. dp[i] = 0.0f;
  9578. } else {
  9579. // const float val = (wp[i] == -INFINITY) ? 0.0 : exp(wp[i] - max);
  9580. ggml_fp16_t s = GGML_FP32_TO_FP16(wp[i] - max);
  9581. memcpy(&scvt, &s, sizeof(scvt));
  9582. const float val = GGML_FP16_TO_FP32(ggml_table_exp_f16[scvt]);
  9583. sum += (ggml_float)val;
  9584. dp[i] = val;
  9585. }
  9586. }
  9587. assert(sum > 0.0);
  9588. sum = 1.0/sum;
  9589. ggml_vec_scale_f32(nc, dp, sum);
  9590. #ifndef NDEBUG
  9591. for (int i = 0; i < nc; ++i) {
  9592. assert(!isnan(dp[i]));
  9593. assert(!isinf(dp[i]));
  9594. }
  9595. #endif
  9596. }
  9597. }
  9598. static void ggml_compute_forward_soft_max(
  9599. const struct ggml_compute_params * params,
  9600. const struct ggml_tensor * src0,
  9601. const struct ggml_tensor * src1,
  9602. const struct ggml_tensor * src2,
  9603. struct ggml_tensor * dst) {
  9604. switch (src0->type) {
  9605. case GGML_TYPE_F32:
  9606. {
  9607. ggml_compute_forward_soft_max_f32(params, src0, src1, src2, dst);
  9608. } break;
  9609. default:
  9610. {
  9611. GGML_ASSERT(false);
  9612. } break;
  9613. }
  9614. }
  9615. // ggml_compute_forward_soft_max_back
  9616. static void ggml_compute_forward_soft_max_back_f32(
  9617. const struct ggml_compute_params * params,
  9618. const struct ggml_tensor * src0,
  9619. const struct ggml_tensor * src1,
  9620. struct ggml_tensor * dst) {
  9621. GGML_ASSERT(ggml_is_contiguous(src0));
  9622. GGML_ASSERT(ggml_is_contiguous(src1));
  9623. GGML_ASSERT(ggml_is_contiguous(dst));
  9624. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  9625. GGML_ASSERT(ggml_are_same_shape(src1, dst));
  9626. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  9627. return;
  9628. }
  9629. // TODO: handle transposed/permuted matrices
  9630. const int ith = params->ith;
  9631. const int nth = params->nth;
  9632. const int nc = src0->ne[0];
  9633. const int nr = ggml_nrows(src0);
  9634. // rows per thread
  9635. const int dr = (nr + nth - 1)/nth;
  9636. // row range for this thread
  9637. const int ir0 = dr*ith;
  9638. const int ir1 = MIN(ir0 + dr, nr);
  9639. for (int i1 = ir0; i1 < ir1; i1++) {
  9640. float *dy = (float *)((char *) src0->data + i1*src0->nb[1]);
  9641. float *y = (float *)((char *) src1->data + i1*src1->nb[1]);
  9642. float *dx = (float *)((char *) dst->data + i1*dst->nb[1]);
  9643. #ifndef NDEBUG
  9644. for (int i = 0; i < nc; ++i) {
  9645. //printf("p[%d] = %f\n", i, p[i]);
  9646. assert(!isnan(dy[i]));
  9647. assert(!isnan(y[i]));
  9648. }
  9649. #endif
  9650. // Jii = yi - yi*yi
  9651. // Jij = -yi*yj
  9652. // J = diag(y)-y.T*y
  9653. // dx = J * dy
  9654. // dxk = sum_i(Jki * dyi)
  9655. // dxk = sum_i(-yk*yi * dyi) - (-yk*yk)*dyk + (yk - yk*yk)*dyk
  9656. // dxk = sum_i(-yk*yi * dyi) + yk*yk*dyk + yk*dyk - yk*yk*dyk
  9657. // dxk = sum_i(-yk*yi * dyi) + yk*dyk
  9658. // dxk = -yk * sum_i(yi * dyi) + yk*dyk
  9659. // dxk = -yk * dot(y, dy) + yk*dyk
  9660. // dxk = yk * (- dot(y, dy) + dyk)
  9661. // dxk = yk * (dyk - dot(y, dy))
  9662. //
  9663. // post-order:
  9664. // dot_y_dy := dot(y, dy)
  9665. // dx := dy
  9666. // dx := dx - dot_y_dy
  9667. // dx := dx * y
  9668. // linear runtime, no additional memory
  9669. float dot_y_dy = 0;
  9670. ggml_vec_dot_f32 (nc, &dot_y_dy, 0, y, 0, dy, 0, 1);
  9671. ggml_vec_cpy_f32 (nc, dx, dy);
  9672. ggml_vec_acc1_f32(nc, dx, -dot_y_dy);
  9673. ggml_vec_mul_f32 (nc, dx, dx, y);
  9674. #ifndef NDEBUG
  9675. for (int i = 0; i < nc; ++i) {
  9676. assert(!isnan(dx[i]));
  9677. assert(!isinf(dx[i]));
  9678. }
  9679. #endif
  9680. }
  9681. }
  9682. static void ggml_compute_forward_soft_max_back(
  9683. const struct ggml_compute_params * params,
  9684. const struct ggml_tensor * src0,
  9685. const struct ggml_tensor * src1,
  9686. struct ggml_tensor * dst) {
  9687. switch (src0->type) {
  9688. case GGML_TYPE_F32:
  9689. {
  9690. ggml_compute_forward_soft_max_back_f32(params, src0, src1, dst);
  9691. } break;
  9692. default:
  9693. {
  9694. GGML_ASSERT(false);
  9695. } break;
  9696. }
  9697. }
  9698. // ggml_compute_forward_alibi
  9699. static void ggml_compute_forward_alibi_f32(
  9700. const struct ggml_compute_params * params,
  9701. const struct ggml_tensor * src0,
  9702. struct ggml_tensor * dst) {
  9703. assert(params->ith == 0);
  9704. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  9705. return;
  9706. }
  9707. //const int n_past = ((int32_t *) dst->op_params)[0];
  9708. const int n_head = ((int32_t *) dst->op_params)[1];
  9709. float max_bias;
  9710. memcpy(&max_bias, (int32_t *) dst->op_params + 2, sizeof(float));
  9711. const int64_t ne0 = src0->ne[0]; // all_seq_len = n_past + ne1
  9712. const int64_t ne1 = src0->ne[1]; // seq_len_without_past
  9713. const int64_t ne2 = src0->ne[2]; // n_head -> this is k
  9714. //const int64_t ne3 = src0->ne[3]; // 1 -> bsz
  9715. const int64_t n = ggml_nrows(src0);
  9716. const int64_t ne2_ne3 = n/ne1; // ne2*ne3
  9717. const size_t nb0 = src0->nb[0];
  9718. const size_t nb1 = src0->nb[1];
  9719. const size_t nb2 = src0->nb[2];
  9720. //const int nb3 = src0->nb[3];
  9721. GGML_ASSERT(nb0 == sizeof(float));
  9722. GGML_ASSERT(n_head == ne2);
  9723. // add alibi to src0 (KQ_scaled)
  9724. const int n_heads_log2_floor = 1 << (int) floor(log2(n_head));
  9725. const float m0 = powf(2.0f, -(max_bias) / n_heads_log2_floor);
  9726. const float m1 = powf(2.0f, -(max_bias / 2.0f) / n_heads_log2_floor);
  9727. for (int64_t k = 0; k < ne2_ne3; k++) {
  9728. // TODO: k*nb2 or k*nb3
  9729. float m_k;
  9730. if (k < n_heads_log2_floor) {
  9731. m_k = powf(m0, k + 1);
  9732. } else {
  9733. m_k = powf(m1, 2 * (k - n_heads_log2_floor) + 1);
  9734. }
  9735. for (int64_t i = 0; i < ne0; i++) {
  9736. for (int64_t j = 0; j < ne1; j++) {
  9737. float * const src = (float *)((char *) src0->data + i*nb0 + j*nb1 + k*nb2);
  9738. float * pdst = (float *)((char *) dst->data + i*nb0 + j*nb1 + k*nb2);
  9739. pdst[0] = i * m_k + src[0];
  9740. }
  9741. }
  9742. }
  9743. }
  9744. static void ggml_compute_forward_alibi_f16(
  9745. const struct ggml_compute_params * params,
  9746. const struct ggml_tensor * src0,
  9747. struct ggml_tensor * dst) {
  9748. assert(params->ith == 0);
  9749. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  9750. return;
  9751. }
  9752. //const int n_past = ((int32_t *) dst->op_params)[0];
  9753. const int n_head = ((int32_t *) dst->op_params)[1];
  9754. float max_bias;
  9755. memcpy(&max_bias, (int32_t *) dst->op_params + 2, sizeof(float));
  9756. const int ne0 = src0->ne[0]; // all_seq_len = n_past + ne1
  9757. const int ne1 = src0->ne[1]; // seq_len_without_past
  9758. const int ne2 = src0->ne[2]; // n_head -> this is k
  9759. //const int ne3 = src0->ne[3]; // 1 -> bsz
  9760. const int n = ggml_nrows(src0);
  9761. const int ne2_ne3 = n/ne1; // ne2*ne3
  9762. const int nb0 = src0->nb[0];
  9763. const int nb1 = src0->nb[1];
  9764. const int nb2 = src0->nb[2];
  9765. //const int nb3 = src0->nb[3];
  9766. GGML_ASSERT(nb0 == sizeof(ggml_fp16_t));
  9767. //GGML_ASSERT(ne1 + n_past == ne0); (void) n_past;
  9768. GGML_ASSERT(n_head == ne2);
  9769. // add alibi to src0 (KQ_scaled)
  9770. const int n_heads_log2_floor = 1 << (int) floor(log2(n_head));
  9771. const float m0 = powf(2.0f, -(max_bias) / n_heads_log2_floor);
  9772. const float m1 = powf(2.0f, -(max_bias / 2.0f) / n_heads_log2_floor);
  9773. for (int k = 0; k < ne2_ne3; k++) {
  9774. // TODO: k*nb2 or k*nb3
  9775. float m_k;
  9776. if (k < n_heads_log2_floor) {
  9777. m_k = powf(m0, k + 1);
  9778. } else {
  9779. m_k = powf(m1, 2 * (k - n_heads_log2_floor) + 1);
  9780. }
  9781. for (int i = 0; i < ne0; i++) {
  9782. for (int j = 0; j < ne1; j++) {
  9783. ggml_fp16_t * const src = (ggml_fp16_t *)((char *) src0->data + i*nb0 + j*nb1 + k*nb2);
  9784. float * pdst = (float *)((char *) dst->data + i*nb0 + j*nb1 + k*nb2);
  9785. // we return F32
  9786. pdst[0] = i * m_k + GGML_FP16_TO_FP32(src[0]);
  9787. }
  9788. }
  9789. }
  9790. }
  9791. static void ggml_compute_forward_alibi(
  9792. const struct ggml_compute_params * params,
  9793. const struct ggml_tensor * src0,
  9794. struct ggml_tensor * dst) {
  9795. switch (src0->type) {
  9796. case GGML_TYPE_F16:
  9797. {
  9798. ggml_compute_forward_alibi_f16(params, src0, dst);
  9799. } break;
  9800. case GGML_TYPE_F32:
  9801. {
  9802. ggml_compute_forward_alibi_f32(params, src0, dst);
  9803. } break;
  9804. case GGML_TYPE_Q4_0:
  9805. case GGML_TYPE_Q4_1:
  9806. case GGML_TYPE_Q5_0:
  9807. case GGML_TYPE_Q5_1:
  9808. case GGML_TYPE_Q8_0:
  9809. case GGML_TYPE_Q8_1:
  9810. case GGML_TYPE_Q2_K:
  9811. case GGML_TYPE_Q3_K:
  9812. case GGML_TYPE_Q4_K:
  9813. case GGML_TYPE_Q5_K:
  9814. case GGML_TYPE_Q6_K:
  9815. case GGML_TYPE_IQ2_XXS:
  9816. case GGML_TYPE_IQ2_XS:
  9817. case GGML_TYPE_IQ3_XXS:
  9818. case GGML_TYPE_IQ1_S:
  9819. case GGML_TYPE_IQ4_NL:
  9820. case GGML_TYPE_Q8_K:
  9821. case GGML_TYPE_I8:
  9822. case GGML_TYPE_I16:
  9823. case GGML_TYPE_I32:
  9824. case GGML_TYPE_COUNT:
  9825. {
  9826. GGML_ASSERT(false);
  9827. } break;
  9828. }
  9829. }
  9830. // ggml_compute_forward_clamp
  9831. static void ggml_compute_forward_clamp_f32(
  9832. const struct ggml_compute_params * params,
  9833. const struct ggml_tensor * src0,
  9834. struct ggml_tensor * dst) {
  9835. assert(params->ith == 0);
  9836. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  9837. return;
  9838. }
  9839. float min;
  9840. float max;
  9841. memcpy(&min, (float *) dst->op_params + 0, sizeof(float));
  9842. memcpy(&max, (float *) dst->op_params + 1, sizeof(float));
  9843. const int ith = params->ith;
  9844. const int nth = params->nth;
  9845. const int n = ggml_nrows(src0);
  9846. const int nc = src0->ne[0];
  9847. const size_t nb00 = src0->nb[0];
  9848. const size_t nb01 = src0->nb[1];
  9849. const size_t nb0 = dst->nb[0];
  9850. const size_t nb1 = dst->nb[1];
  9851. GGML_ASSERT( nb0 == sizeof(float));
  9852. GGML_ASSERT(nb00 == sizeof(float));
  9853. for (int j = ith; j < n; j += nth) {
  9854. float * dst_ptr = (float *) ((char *) dst->data + j*nb1);
  9855. float * src0_ptr = (float *) ((char *) src0->data + j*nb01);
  9856. for (int i = 0; i < nc; i++) {
  9857. dst_ptr[i] = MAX(MIN(src0_ptr[i], max), min);
  9858. }
  9859. }
  9860. }
  9861. static void ggml_compute_forward_clamp(
  9862. const struct ggml_compute_params * params,
  9863. const struct ggml_tensor * src0,
  9864. struct ggml_tensor * dst) {
  9865. switch (src0->type) {
  9866. case GGML_TYPE_F32:
  9867. {
  9868. ggml_compute_forward_clamp_f32(params, src0, dst);
  9869. } break;
  9870. case GGML_TYPE_F16:
  9871. case GGML_TYPE_Q4_0:
  9872. case GGML_TYPE_Q4_1:
  9873. case GGML_TYPE_Q5_0:
  9874. case GGML_TYPE_Q5_1:
  9875. case GGML_TYPE_Q8_0:
  9876. case GGML_TYPE_Q8_1:
  9877. case GGML_TYPE_Q2_K:
  9878. case GGML_TYPE_Q3_K:
  9879. case GGML_TYPE_Q4_K:
  9880. case GGML_TYPE_Q5_K:
  9881. case GGML_TYPE_Q6_K:
  9882. case GGML_TYPE_IQ2_XXS:
  9883. case GGML_TYPE_IQ2_XS:
  9884. case GGML_TYPE_IQ3_XXS:
  9885. case GGML_TYPE_IQ1_S:
  9886. case GGML_TYPE_IQ4_NL:
  9887. case GGML_TYPE_Q8_K:
  9888. case GGML_TYPE_I8:
  9889. case GGML_TYPE_I16:
  9890. case GGML_TYPE_I32:
  9891. case GGML_TYPE_COUNT:
  9892. {
  9893. GGML_ASSERT(false);
  9894. } break;
  9895. }
  9896. }
  9897. // ggml_compute_forward_rope
  9898. static float rope_yarn_ramp(const float low, const float high, const int i0) {
  9899. const float y = (i0 / 2 - low) / MAX(0.001f, high - low);
  9900. return 1 - MIN(1, MAX(0, y));
  9901. }
  9902. // YaRN algorithm based on LlamaYaRNScaledRotaryEmbedding.py from https://github.com/jquesnelle/yarn
  9903. // MIT licensed. Copyright (c) 2023 Jeffrey Quesnelle and Bowen Peng.
  9904. static void rope_yarn(
  9905. float theta_extrap, float freq_scale, float corr_dims[2], int64_t i0, float ext_factor, float mscale,
  9906. float * cos_theta, float * sin_theta
  9907. ) {
  9908. // Get n-d rotational scaling corrected for extrapolation
  9909. float theta_interp = freq_scale * theta_extrap;
  9910. float theta = theta_interp;
  9911. if (ext_factor != 0.0f) {
  9912. float ramp_mix = rope_yarn_ramp(corr_dims[0], corr_dims[1], i0) * ext_factor;
  9913. theta = theta_interp * (1 - ramp_mix) + theta_extrap * ramp_mix;
  9914. // Get n-d magnitude scaling corrected for interpolation
  9915. mscale *= 1.0f + 0.1f * logf(1.0f / freq_scale);
  9916. }
  9917. *cos_theta = cosf(theta) * mscale;
  9918. *sin_theta = sinf(theta) * mscale;
  9919. }
  9920. // Apparently solving `n_rot = 2pi * x * base^((2 * max_pos_emb) / n_dims)` for x, we get
  9921. // `corr_dim(n_rot) = n_dims * log(max_pos_emb / (n_rot * 2pi)) / (2 * log(base))`
  9922. static float ggml_rope_yarn_corr_dim(int n_dims, int n_orig_ctx, float n_rot, float base) {
  9923. return n_dims * logf(n_orig_ctx / (n_rot * 2 * (float)M_PI)) / (2 * logf(base));
  9924. }
  9925. static void ggml_rope_cache_init(
  9926. float theta_base, float freq_scale, float corr_dims[2], int64_t ne0, float ext_factor, float mscale,
  9927. float * cache, float sin_sign, float theta_scale
  9928. ) {
  9929. float theta = theta_base;
  9930. for (int64_t i0 = 0; i0 < ne0; i0 += 2) {
  9931. rope_yarn(
  9932. theta, freq_scale, corr_dims, i0, ext_factor, mscale, &cache[i0 + 0], &cache[i0 + 1]
  9933. );
  9934. cache[i0 + 1] *= sin_sign;
  9935. theta *= theta_scale;
  9936. }
  9937. }
  9938. GGML_CALL void ggml_rope_yarn_corr_dims(
  9939. int n_dims, int n_orig_ctx, float freq_base, float beta_fast, float beta_slow, float dims[2]
  9940. ) {
  9941. // start and end correction dims
  9942. float start = floorf(ggml_rope_yarn_corr_dim(n_dims, n_orig_ctx, beta_fast, freq_base));
  9943. float end = ceilf(ggml_rope_yarn_corr_dim(n_dims, n_orig_ctx, beta_slow, freq_base));
  9944. dims[0] = MAX(0, start);
  9945. dims[1] = MIN(n_dims - 1, end);
  9946. }
  9947. static void ggml_compute_forward_rope_f32(
  9948. const struct ggml_compute_params * params,
  9949. const struct ggml_tensor * src0,
  9950. const struct ggml_tensor * src1,
  9951. struct ggml_tensor * dst,
  9952. const bool forward) {
  9953. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  9954. return;
  9955. }
  9956. float freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow;
  9957. // these two only relevant for xPos RoPE:
  9958. float xpos_base;
  9959. bool xpos_down;
  9960. //const int n_past = ((int32_t *) dst->op_params)[0];
  9961. const int n_dims = ((int32_t *) dst->op_params)[1];
  9962. const int mode = ((int32_t *) dst->op_params)[2];
  9963. const int n_ctx = ((int32_t *) dst->op_params)[3];
  9964. const int n_orig_ctx = ((int32_t *) dst->op_params)[4];
  9965. memcpy(&freq_base, (int32_t *) dst->op_params + 5, sizeof(float));
  9966. memcpy(&freq_scale, (int32_t *) dst->op_params + 6, sizeof(float));
  9967. memcpy(&ext_factor, (int32_t *) dst->op_params + 7, sizeof(float));
  9968. memcpy(&attn_factor, (int32_t *) dst->op_params + 8, sizeof(float));
  9969. memcpy(&beta_fast, (int32_t *) dst->op_params + 9, sizeof(float));
  9970. memcpy(&beta_slow, (int32_t *) dst->op_params + 10, sizeof(float));
  9971. memcpy(&xpos_base, (int32_t *) dst->op_params + 11, sizeof(float));
  9972. memcpy(&xpos_down, (int32_t *) dst->op_params + 12, sizeof(bool));
  9973. GGML_TENSOR_UNARY_OP_LOCALS
  9974. //printf("ne0: %d, ne1: %d, ne2: %d, ne3: %d\n", ne0, ne1, ne2, ne3);
  9975. //printf("n_past = %d, ne2 = %d\n", n_past, ne2);
  9976. GGML_ASSERT(nb00 == sizeof(float));
  9977. const int ith = params->ith;
  9978. const int nth = params->nth;
  9979. const int nr = ggml_nrows(dst);
  9980. GGML_ASSERT(n_dims <= ne0);
  9981. GGML_ASSERT(n_dims % 2 == 0);
  9982. // rows per thread
  9983. const int dr = (nr + nth - 1)/nth;
  9984. // row range for this thread
  9985. const int ir0 = dr*ith;
  9986. const int ir1 = MIN(ir0 + dr, nr);
  9987. // row index used to determine which thread to use
  9988. int ir = 0;
  9989. const float theta_scale = powf(freq_base, -2.0f/n_dims);
  9990. const float inv_ndims = -1.f/n_dims;
  9991. float corr_dims[2];
  9992. ggml_rope_yarn_corr_dims(n_dims, n_orig_ctx, freq_base, beta_fast, beta_slow, corr_dims);
  9993. const bool is_neox = mode & 2;
  9994. const bool is_glm = mode & 4;
  9995. // backward process uses inverse rotation by cos and sin.
  9996. // cos and sin build a rotation matrix, where the inverse is the transpose.
  9997. // this essentially just switches the sign of sin.
  9998. const float sin_sign = forward ? 1.0f : -1.0f;
  9999. const int32_t * pos = (const int32_t *) src1->data;
  10000. for (int64_t i3 = 0; i3 < ne3; i3++) {
  10001. for (int64_t i2 = 0; i2 < ne2; i2++) {
  10002. const int64_t p = pos[i2];
  10003. float * cache = (float *) params->wdata + (ne0 + CACHE_LINE_SIZE_F32)*ith;
  10004. if (!is_glm && !is_neox) { // TODO: cache sin/cos for glm, neox
  10005. ggml_rope_cache_init(p, freq_scale, corr_dims, ne0, ext_factor, attn_factor, cache, sin_sign, theta_scale);
  10006. }
  10007. for (int64_t i1 = 0; i1 < ne1; i1++) {
  10008. if (ir++ < ir0) continue;
  10009. if (ir > ir1) break;
  10010. float theta_base = (float)p;
  10011. if (is_glm) {
  10012. theta_base = MIN(p, n_ctx - 2);
  10013. float block_theta = MAX(p - (n_ctx - 2), 0);
  10014. for (int64_t i0 = 0; i0 < ne0 / 4; i0++) {
  10015. const float cos_theta = cosf(theta_base);
  10016. const float sin_theta = sinf(theta_base) * sin_sign;
  10017. const float cos_block_theta = cosf(block_theta);
  10018. const float sin_block_theta = sinf(block_theta) * sin_sign;
  10019. theta_base *= theta_scale;
  10020. block_theta *= theta_scale;
  10021. const float * const src = (float *)((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00);
  10022. float * dst_data = (float *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0);
  10023. const float x0 = src[0];
  10024. const float x1 = src[n_dims/2];
  10025. const float x2 = src[n_dims];
  10026. const float x3 = src[n_dims/2*3];
  10027. dst_data[0] = x0*cos_theta - x1*sin_theta;
  10028. dst_data[n_dims/2] = x0*sin_theta + x1*cos_theta;
  10029. dst_data[n_dims] = x2*cos_block_theta - x3*sin_block_theta;
  10030. dst_data[n_dims/2*3] = x2*sin_block_theta + x3*cos_block_theta;
  10031. }
  10032. } else if (!is_neox) {
  10033. for (int64_t i0 = 0; i0 < ne0; i0 += 2) {
  10034. const float cos_theta = cache[i0 + 0];
  10035. const float sin_theta = cache[i0 + 1];
  10036. // zeta scaling for xPos only:
  10037. float zeta = xpos_base != 0.0f ? powf((i0 + 0.4f * ne0) / (1.4f * ne0), p / xpos_base) : 1.0f;
  10038. if (xpos_down) zeta = 1.0f / zeta;
  10039. const float * const src = (float *)((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00);
  10040. float * dst_data = (float *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0);
  10041. const float x0 = src[0];
  10042. const float x1 = src[1];
  10043. dst_data[0] = x0*cos_theta*zeta - x1*sin_theta*zeta;
  10044. dst_data[1] = x0*sin_theta*zeta + x1*cos_theta*zeta;
  10045. }
  10046. } else {
  10047. // TODO: this might be wrong for ne0 != n_dims - need double check
  10048. // it seems we have to rope just the first n_dims elements and do nothing with the rest
  10049. // ref: https://github.com/ml-explore/mlx/blob/dc2edc762c797e3b8de50b1dad4dc0a131691033/benchmarks/python/llama_jax_bench.py#L11-L26
  10050. theta_base *= freq_scale;
  10051. for (int64_t ic = 0; ic < ne0; ic += 2) {
  10052. if (ic < n_dims) {
  10053. const int64_t ib = 0;
  10054. // simplified from `(ib * n_dims + ic) * inv_ndims`
  10055. float cur_rot = inv_ndims * ic - ib;
  10056. float cos_theta, sin_theta;
  10057. rope_yarn(
  10058. theta_base, freq_scale, corr_dims, cur_rot, ext_factor, attn_factor,
  10059. &cos_theta, &sin_theta
  10060. );
  10061. sin_theta *= sin_sign;
  10062. theta_base *= theta_scale;
  10063. const int64_t i0 = ib*n_dims + ic/2;
  10064. const float * const src = (float *)((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00);
  10065. float * dst_data = (float *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0);
  10066. const float x0 = src[0];
  10067. const float x1 = src[n_dims/2];
  10068. dst_data[0] = x0*cos_theta - x1*sin_theta;
  10069. dst_data[n_dims/2] = x0*sin_theta + x1*cos_theta;
  10070. } else {
  10071. const int64_t i0 = ic;
  10072. const float * const src = (float *)((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00);
  10073. float * dst_data = (float *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0);
  10074. dst_data[0] = src[0];
  10075. dst_data[1] = src[1];
  10076. }
  10077. }
  10078. }
  10079. }
  10080. }
  10081. }
  10082. }
  10083. static void ggml_compute_forward_rope_f16(
  10084. const struct ggml_compute_params * params,
  10085. const struct ggml_tensor * src0,
  10086. const struct ggml_tensor * src1,
  10087. struct ggml_tensor * dst,
  10088. const bool forward) {
  10089. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  10090. return;
  10091. }
  10092. float freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow;
  10093. //const int n_past = ((int32_t *) dst->op_params)[0];
  10094. const int n_dims = ((int32_t *) dst->op_params)[1];
  10095. const int mode = ((int32_t *) dst->op_params)[2];
  10096. const int n_ctx = ((int32_t *) dst->op_params)[3];
  10097. const int n_orig_ctx = ((int32_t *) dst->op_params)[4];
  10098. memcpy(&freq_base, (int32_t *) dst->op_params + 5, sizeof(float));
  10099. memcpy(&freq_scale, (int32_t *) dst->op_params + 6, sizeof(float));
  10100. memcpy(&ext_factor, (int32_t *) dst->op_params + 7, sizeof(float));
  10101. memcpy(&attn_factor, (int32_t *) dst->op_params + 8, sizeof(float));
  10102. memcpy(&beta_fast, (int32_t *) dst->op_params + 9, sizeof(float));
  10103. memcpy(&beta_slow, (int32_t *) dst->op_params + 10, sizeof(float));
  10104. GGML_TENSOR_UNARY_OP_LOCALS
  10105. //printf("ne0: %d, ne1: %d, ne2: %d, ne3: %d\n", ne0, ne1, ne2, ne3);
  10106. //printf("n_past = %d, ne2 = %d\n", n_past, ne2);
  10107. GGML_ASSERT(nb0 == sizeof(ggml_fp16_t));
  10108. const int ith = params->ith;
  10109. const int nth = params->nth;
  10110. const int nr = ggml_nrows(dst);
  10111. GGML_ASSERT(n_dims <= ne0);
  10112. GGML_ASSERT(n_dims % 2 == 0);
  10113. // rows per thread
  10114. const int dr = (nr + nth - 1)/nth;
  10115. // row range for this thread
  10116. const int ir0 = dr*ith;
  10117. const int ir1 = MIN(ir0 + dr, nr);
  10118. // row index used to determine which thread to use
  10119. int ir = 0;
  10120. const float theta_scale = powf(freq_base, -2.0f/n_dims);
  10121. const float inv_ndims = -1.f/n_dims;
  10122. float corr_dims[2];
  10123. ggml_rope_yarn_corr_dims(n_dims, n_orig_ctx, freq_base, beta_fast, beta_slow, corr_dims);
  10124. const bool is_neox = mode & 2;
  10125. const bool is_glm = mode & 4;
  10126. // backward process uses inverse rotation by cos and sin.
  10127. // cos and sin build a rotation matrix, where the inverse is the transpose.
  10128. // this essentially just switches the sign of sin.
  10129. const float sin_sign = forward ? 1.0f : -1.0f;
  10130. const int32_t * pos = (const int32_t *) src1->data;
  10131. for (int64_t i3 = 0; i3 < ne3; i3++) {
  10132. for (int64_t i2 = 0; i2 < ne2; i2++) {
  10133. const int64_t p = pos[i2];
  10134. float * cache = (float *) params->wdata + (ne0 + CACHE_LINE_SIZE_F32)*ith;
  10135. if (!is_glm && !is_neox) { // TODO: cache sin/cos for glm, neox
  10136. ggml_rope_cache_init(p, freq_scale, corr_dims, ne0, ext_factor, attn_factor, cache, sin_sign, theta_scale);
  10137. }
  10138. for (int64_t i1 = 0; i1 < ne1; i1++) {
  10139. if (ir++ < ir0) continue;
  10140. if (ir > ir1) break;
  10141. float theta_base = (float)p;
  10142. if (is_glm) {
  10143. theta_base = MIN(p, n_ctx - 2);
  10144. float block_theta = MAX(p - (n_ctx - 2), 0);
  10145. for (int64_t i0 = 0; i0 < ne0 / 4; i0++) {
  10146. const float cos_theta = cosf(theta_base);
  10147. const float sin_theta = sinf(theta_base) * sin_sign;
  10148. const float cos_block_theta = cosf(block_theta);
  10149. const float sin_block_theta = sinf(block_theta) * sin_sign;
  10150. theta_base *= theta_scale;
  10151. block_theta *= theta_scale;
  10152. const ggml_fp16_t * const src = (ggml_fp16_t *)((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00);
  10153. ggml_fp16_t * dst_data = (ggml_fp16_t *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0);
  10154. const float x0 = GGML_FP16_TO_FP32(src[0]);
  10155. const float x1 = GGML_FP16_TO_FP32(src[n_dims/2]);
  10156. const float x2 = GGML_FP16_TO_FP32(src[n_dims]);
  10157. const float x3 = GGML_FP16_TO_FP32(src[n_dims/2*3]);
  10158. dst_data[0] = GGML_FP32_TO_FP16(x0*cos_theta - x1*sin_theta);
  10159. dst_data[n_dims/2] = GGML_FP32_TO_FP16(x0*sin_theta + x1*cos_theta);
  10160. dst_data[n_dims] = GGML_FP32_TO_FP16(x2*cos_block_theta - x3*sin_block_theta);
  10161. dst_data[n_dims/2*3] = GGML_FP32_TO_FP16(x2*sin_block_theta + x3*cos_block_theta);
  10162. }
  10163. } else if (!is_neox) {
  10164. for (int64_t i0 = 0; i0 < ne0; i0 += 2) {
  10165. const float cos_theta = cache[i0 + 0];
  10166. const float sin_theta = cache[i0 + 1];
  10167. const ggml_fp16_t * const src = (ggml_fp16_t *)((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00);
  10168. ggml_fp16_t * dst_data = (ggml_fp16_t *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0);
  10169. const float x0 = GGML_FP16_TO_FP32(src[0]);
  10170. const float x1 = GGML_FP16_TO_FP32(src[1]);
  10171. dst_data[0] = GGML_FP32_TO_FP16(x0*cos_theta - x1*sin_theta);
  10172. dst_data[1] = GGML_FP32_TO_FP16(x0*sin_theta + x1*cos_theta);
  10173. }
  10174. } else {
  10175. // TODO: this might be wrong for ne0 != n_dims - need double check
  10176. // it seems we have to rope just the first n_dims elements and do nothing with the rest
  10177. // ref: https://github.com/ml-explore/mlx/blob/dc2edc762c797e3b8de50b1dad4dc0a131691033/benchmarks/python/llama_jax_bench.py#L11-L26
  10178. theta_base *= freq_scale;
  10179. for (int64_t ic = 0; ic < ne0; ic += 2) {
  10180. if (ic < n_dims) {
  10181. const int64_t ib = 0;
  10182. // simplified from `(ib * n_dims + ic) * inv_ndims`
  10183. float cur_rot = inv_ndims * ic - ib;
  10184. float cos_theta, sin_theta;
  10185. rope_yarn(
  10186. theta_base, freq_scale, corr_dims, cur_rot, ext_factor, attn_factor,
  10187. &cos_theta, &sin_theta
  10188. );
  10189. sin_theta *= sin_sign;
  10190. theta_base *= theta_scale;
  10191. const int64_t i0 = ib*n_dims + ic/2;
  10192. const ggml_fp16_t * const src = (ggml_fp16_t *)((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00);
  10193. ggml_fp16_t * dst_data = (ggml_fp16_t *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0);
  10194. const float x0 = GGML_FP16_TO_FP32(src[0]);
  10195. const float x1 = GGML_FP16_TO_FP32(src[n_dims/2]);
  10196. dst_data[0] = GGML_FP32_TO_FP16(x0*cos_theta - x1*sin_theta);
  10197. dst_data[n_dims/2] = GGML_FP32_TO_FP16(x0*sin_theta + x1*cos_theta);
  10198. } else {
  10199. const int64_t i0 = ic;
  10200. const ggml_fp16_t * const src = (ggml_fp16_t *)((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00);
  10201. ggml_fp16_t * dst_data = (ggml_fp16_t *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0);
  10202. dst_data[0] = src[0];
  10203. dst_data[1] = src[1];
  10204. }
  10205. }
  10206. }
  10207. }
  10208. }
  10209. }
  10210. }
  10211. static void ggml_compute_forward_rope(
  10212. const struct ggml_compute_params * params,
  10213. const struct ggml_tensor * src0,
  10214. const struct ggml_tensor * src1,
  10215. struct ggml_tensor * dst) {
  10216. switch (src0->type) {
  10217. case GGML_TYPE_F16:
  10218. {
  10219. ggml_compute_forward_rope_f16(params, src0, src1, dst, true);
  10220. } break;
  10221. case GGML_TYPE_F32:
  10222. {
  10223. ggml_compute_forward_rope_f32(params, src0, src1, dst, true);
  10224. } break;
  10225. default:
  10226. {
  10227. GGML_ASSERT(false);
  10228. } break;
  10229. }
  10230. }
  10231. // ggml_compute_forward_rope_back
  10232. static void ggml_compute_forward_rope_back(
  10233. const struct ggml_compute_params * params,
  10234. const struct ggml_tensor * src0,
  10235. const struct ggml_tensor * src1,
  10236. struct ggml_tensor * dst) {
  10237. switch (src0->type) {
  10238. case GGML_TYPE_F16:
  10239. {
  10240. ggml_compute_forward_rope_f16(params, src0, src1, dst, false);
  10241. } break;
  10242. case GGML_TYPE_F32:
  10243. {
  10244. ggml_compute_forward_rope_f32(params, src0, src1, dst, false);
  10245. } break;
  10246. default:
  10247. {
  10248. GGML_ASSERT(false);
  10249. } break;
  10250. }
  10251. }
  10252. // ggml_compute_forward_conv_transpose_1d
  10253. static void ggml_compute_forward_conv_transpose_1d_f16_f32(
  10254. const struct ggml_compute_params * params,
  10255. const struct ggml_tensor * src0,
  10256. const struct ggml_tensor * src1,
  10257. struct ggml_tensor * dst) {
  10258. GGML_ASSERT(src0->type == GGML_TYPE_F16);
  10259. GGML_ASSERT(src1->type == GGML_TYPE_F32);
  10260. GGML_ASSERT( dst->type == GGML_TYPE_F32);
  10261. int64_t t0 = ggml_perf_time_us();
  10262. UNUSED(t0);
  10263. GGML_TENSOR_BINARY_OP_LOCALS
  10264. const int ith = params->ith;
  10265. const int nth = params->nth;
  10266. const int nk = ne00*ne01*ne02;
  10267. GGML_ASSERT(nb00 == sizeof(ggml_fp16_t));
  10268. GGML_ASSERT(nb10 == sizeof(float));
  10269. if (params->type == GGML_TASK_INIT) {
  10270. if (ith != 0) {
  10271. return;
  10272. }
  10273. memset(params->wdata, 0, params->wsize);
  10274. // permute kernel data (src0) from (K x Cout x Cin) to (Cin x K x Cout)
  10275. {
  10276. ggml_fp16_t * const wdata = (ggml_fp16_t *) params->wdata + 0;
  10277. for (int64_t i02 = 0; i02 < ne02; i02++) {
  10278. for (int64_t i01 = 0; i01 < ne01; i01++) {
  10279. const ggml_fp16_t * const src = (ggml_fp16_t *)((char *) src0->data + i02*nb02 + i01*nb01);
  10280. ggml_fp16_t * dst_data = wdata + i01*ne00*ne02;
  10281. for (int64_t i00 = 0; i00 < ne00; i00++) {
  10282. dst_data[i00*ne02 + i02] = src[i00];
  10283. }
  10284. }
  10285. }
  10286. }
  10287. // permute source data (src1) from (L x Cin) to (Cin x L)
  10288. {
  10289. ggml_fp16_t * const wdata = (ggml_fp16_t *) params->wdata + nk;
  10290. ggml_fp16_t * dst_data = wdata;
  10291. for (int64_t i11 = 0; i11 < ne11; i11++) {
  10292. const float * const src = (float *)((char *) src1->data + i11*nb11);
  10293. for (int64_t i10 = 0; i10 < ne10; i10++) {
  10294. dst_data[i10*ne11 + i11] = GGML_FP32_TO_FP16(src[i10]);
  10295. }
  10296. }
  10297. }
  10298. // need to zero dst since we are accumulating into it
  10299. memset(dst->data, 0, ggml_nbytes(dst));
  10300. return;
  10301. }
  10302. if (params->type == GGML_TASK_FINALIZE) {
  10303. return;
  10304. }
  10305. const int32_t s0 = ((const int32_t*)(dst->op_params))[0];
  10306. // total rows in dst
  10307. const int nr = ne1;
  10308. // rows per thread
  10309. const int dr = (nr + nth - 1)/nth;
  10310. // row range for this thread
  10311. const int ir0 = dr*ith;
  10312. const int ir1 = MIN(ir0 + dr, nr);
  10313. ggml_fp16_t * const wdata = (ggml_fp16_t *) params->wdata + 0;
  10314. ggml_fp16_t * const wdata_src = wdata + nk;
  10315. for (int i1 = ir0; i1 < ir1; i1++) {
  10316. float * dst_data = (float *)((char *) dst->data + i1*nb1);
  10317. ggml_fp16_t * wdata_kernel = wdata + i1*ne02*ne00;
  10318. for (int i10 = 0; i10 < ne10; i10++) {
  10319. const int i1n = i10*ne11;
  10320. for (int i00 = 0; i00 < ne00; i00++) {
  10321. float v = 0;
  10322. ggml_vec_dot_f16(ne02, &v, 0,
  10323. (ggml_fp16_t *) wdata_src + i1n, 0,
  10324. (ggml_fp16_t *) wdata_kernel + i00*ne02, 0, 1);
  10325. dst_data[i10*s0 + i00] += v;
  10326. }
  10327. }
  10328. }
  10329. }
  10330. static void ggml_compute_forward_conv_transpose_1d_f32(
  10331. const struct ggml_compute_params * params,
  10332. const struct ggml_tensor * src0,
  10333. const struct ggml_tensor * src1,
  10334. struct ggml_tensor * dst) {
  10335. GGML_ASSERT(src0->type == GGML_TYPE_F32);
  10336. GGML_ASSERT(src1->type == GGML_TYPE_F32);
  10337. GGML_ASSERT( dst->type == GGML_TYPE_F32);
  10338. int64_t t0 = ggml_perf_time_us();
  10339. UNUSED(t0);
  10340. GGML_TENSOR_BINARY_OP_LOCALS
  10341. const int ith = params->ith;
  10342. const int nth = params->nth;
  10343. const int nk = ne00*ne01*ne02;
  10344. GGML_ASSERT(nb00 == sizeof(float));
  10345. GGML_ASSERT(nb10 == sizeof(float));
  10346. if (params->type == GGML_TASK_INIT) {
  10347. if (ith != 0) {
  10348. return;
  10349. }
  10350. memset(params->wdata, 0, params->wsize);
  10351. // prepare kernel data (src0) from (K x Cout x Cin) to (Cin x K x Cout)
  10352. {
  10353. float * const wdata = (float *) params->wdata + 0;
  10354. for (int64_t i02 = 0; i02 < ne02; i02++) {
  10355. for (int64_t i01 = 0; i01 < ne01; i01++) {
  10356. const float * const src = (float *)((char *) src0->data + i02*nb02 + i01*nb01);
  10357. float * dst_data = wdata + i01*ne00*ne02;
  10358. for (int64_t i00 = 0; i00 < ne00; i00++) {
  10359. dst_data[i00*ne02 + i02] = src[i00];
  10360. }
  10361. }
  10362. }
  10363. }
  10364. // prepare source data (src1)
  10365. {
  10366. float * const wdata = (float *) params->wdata + nk;
  10367. float * dst_data = wdata;
  10368. for (int64_t i11 = 0; i11 < ne11; i11++) {
  10369. const float * const src = (float *)((char *) src1->data + i11*nb11);
  10370. for (int64_t i10 = 0; i10 < ne10; i10++) {
  10371. dst_data[i10*ne11 + i11] = src[i10];
  10372. }
  10373. }
  10374. }
  10375. // need to zero dst since we are accumulating into it
  10376. memset(dst->data, 0, ggml_nbytes(dst));
  10377. return;
  10378. }
  10379. if (params->type == GGML_TASK_FINALIZE) {
  10380. return;
  10381. }
  10382. const int32_t s0 = ((const int32_t*)(dst->op_params))[0];
  10383. // total rows in dst
  10384. const int nr = ne1;
  10385. // rows per thread
  10386. const int dr = (nr + nth - 1)/nth;
  10387. // row range for this thread
  10388. const int ir0 = dr*ith;
  10389. const int ir1 = MIN(ir0 + dr, nr);
  10390. float * const wdata = (float *) params->wdata + 0;
  10391. float * const wdata_src = wdata + nk;
  10392. for (int i1 = ir0; i1 < ir1; i1++) {
  10393. float * dst_data = (float *)((char *) dst->data + i1*nb1);
  10394. float * wdata_kernel = wdata + i1*ne02*ne00;
  10395. for (int i10 = 0; i10 < ne10; i10++) {
  10396. const int i1n = i10*ne11;
  10397. for (int i00 = 0; i00 < ne00; i00++) {
  10398. float v = 0;
  10399. ggml_vec_dot_f32(ne02, &v, 0,
  10400. wdata_src + i1n, 0,
  10401. wdata_kernel + i00*ne02, 0, 1);
  10402. dst_data[i10*s0 + i00] += v;
  10403. }
  10404. }
  10405. }
  10406. }
  10407. static void ggml_compute_forward_conv_transpose_1d(
  10408. const struct ggml_compute_params * params,
  10409. const struct ggml_tensor * src0,
  10410. const struct ggml_tensor * src1,
  10411. struct ggml_tensor * dst) {
  10412. switch (src0->type) {
  10413. case GGML_TYPE_F16:
  10414. {
  10415. ggml_compute_forward_conv_transpose_1d_f16_f32(params, src0, src1, dst);
  10416. } break;
  10417. case GGML_TYPE_F32:
  10418. {
  10419. ggml_compute_forward_conv_transpose_1d_f32(params, src0, src1, dst);
  10420. } break;
  10421. default:
  10422. {
  10423. GGML_ASSERT(false);
  10424. } break;
  10425. }
  10426. }
  10427. // src0: kernel [OC, IC, KH, KW]
  10428. // src1: image [N, IC, IH, IW]
  10429. // dst: result [N, OH, OW, IC*KH*KW]
  10430. static void ggml_compute_forward_im2col_f32(
  10431. const struct ggml_compute_params * params,
  10432. const struct ggml_tensor * src0,
  10433. const struct ggml_tensor * src1,
  10434. struct ggml_tensor * dst) {
  10435. GGML_ASSERT(src0->type == GGML_TYPE_F16);
  10436. GGML_ASSERT(src1->type == GGML_TYPE_F32);
  10437. GGML_ASSERT( dst->type == GGML_TYPE_F32);
  10438. int64_t t0 = ggml_perf_time_us();
  10439. UNUSED(t0);
  10440. GGML_TENSOR_BINARY_OP_LOCALS;
  10441. const int32_t s0 = ((const int32_t *)(dst->op_params))[0];
  10442. const int32_t s1 = ((const int32_t *)(dst->op_params))[1];
  10443. const int32_t p0 = ((const int32_t *)(dst->op_params))[2];
  10444. const int32_t p1 = ((const int32_t *)(dst->op_params))[3];
  10445. const int32_t d0 = ((const int32_t *)(dst->op_params))[4];
  10446. const int32_t d1 = ((const int32_t *)(dst->op_params))[5];
  10447. const bool is_2D = ((const int32_t *)(dst->op_params))[6] == 1;
  10448. const int ith = params->ith;
  10449. const int nth = params->nth;
  10450. const int64_t N = is_2D ? ne13 : ne12;
  10451. const int64_t IC = is_2D ? ne12 : ne11;
  10452. const int64_t IH = is_2D ? ne11 : 1;
  10453. const int64_t IW = ne10;
  10454. const int64_t KH = is_2D ? ne01 : 1;
  10455. const int64_t KW = ne00;
  10456. const int64_t OH = is_2D ? ne2 : 1;
  10457. const int64_t OW = ne1;
  10458. int ofs0 = is_2D ? nb13 : nb12;
  10459. int ofs1 = is_2D ? nb12 : nb11;
  10460. GGML_ASSERT(nb00 == sizeof(ggml_fp16_t));
  10461. GGML_ASSERT(nb10 == sizeof(float));
  10462. if (params->type == GGML_TASK_INIT) {
  10463. return;
  10464. }
  10465. if (params->type == GGML_TASK_FINALIZE) {
  10466. return;
  10467. }
  10468. // im2col: [N, IC, IH, IW] => [N, OH, OW, IC*KH*KW]
  10469. {
  10470. float * const wdata = (float *) dst->data;
  10471. for (int64_t in = 0; in < N; in++) {
  10472. for (int64_t ioh = 0; ioh < OH; ioh++) { // 1
  10473. for (int64_t iow = 0; iow < OW; iow++) {
  10474. for (int64_t iic = ith; iic < IC; iic += nth) {
  10475. // micro kernel
  10476. float * dst_data = wdata + (in*OH*OW + ioh*OW + iow)*(IC*KH*KW); // [IC, KH, KW]
  10477. const float * const src_data = (float *)((char *) src1->data + in*ofs0 + iic*ofs1); // [IH, IW]
  10478. for (int64_t ikh = 0; ikh < KH; ikh++) { // 1
  10479. for (int64_t ikw = 0; ikw < KW; ikw++) {
  10480. const int64_t iiw = iow*s0 + ikw*d0 - p0;
  10481. const int64_t iih = ioh*s1 + ikh*d1 - p1;
  10482. if (iih < 0 || iih >= IH || iiw < 0 || iiw >= IW) {
  10483. dst_data[iic*(KH*KW) + ikh*KW + ikw] = 0;
  10484. } else {
  10485. dst_data[iic*(KH*KW) + ikh*KW + ikw] = (src_data[iih*IW + iiw]);
  10486. }
  10487. }
  10488. }
  10489. }
  10490. }
  10491. }
  10492. }
  10493. }
  10494. }
  10495. // src0: kernel [OC, IC, KH, KW]
  10496. // src1: image [N, IC, IH, IW]
  10497. // dst: result [N, OH, OW, IC*KH*KW]
  10498. static void ggml_compute_forward_im2col_f16(
  10499. const struct ggml_compute_params * params,
  10500. const struct ggml_tensor * src0,
  10501. const struct ggml_tensor * src1,
  10502. struct ggml_tensor * dst) {
  10503. GGML_ASSERT(src0->type == GGML_TYPE_F16);
  10504. GGML_ASSERT(src1->type == GGML_TYPE_F32);
  10505. GGML_ASSERT( dst->type == GGML_TYPE_F16);
  10506. int64_t t0 = ggml_perf_time_us();
  10507. UNUSED(t0);
  10508. GGML_TENSOR_BINARY_OP_LOCALS;
  10509. const int32_t s0 = ((const int32_t *)(dst->op_params))[0];
  10510. const int32_t s1 = ((const int32_t *)(dst->op_params))[1];
  10511. const int32_t p0 = ((const int32_t *)(dst->op_params))[2];
  10512. const int32_t p1 = ((const int32_t *)(dst->op_params))[3];
  10513. const int32_t d0 = ((const int32_t *)(dst->op_params))[4];
  10514. const int32_t d1 = ((const int32_t *)(dst->op_params))[5];
  10515. const bool is_2D = ((const int32_t *)(dst->op_params))[6] == 1;
  10516. const int ith = params->ith;
  10517. const int nth = params->nth;
  10518. const int64_t N = is_2D ? ne13 : ne12;
  10519. const int64_t IC = is_2D ? ne12 : ne11;
  10520. const int64_t IH = is_2D ? ne11 : 1;
  10521. const int64_t IW = ne10;
  10522. const int64_t KH = is_2D ? ne01 : 1;
  10523. const int64_t KW = ne00;
  10524. const int64_t OH = is_2D ? ne2 : 1;
  10525. const int64_t OW = ne1;
  10526. int ofs0 = is_2D ? nb13 : nb12;
  10527. int ofs1 = is_2D ? nb12 : nb11;
  10528. GGML_ASSERT(nb00 == sizeof(ggml_fp16_t));
  10529. GGML_ASSERT(nb10 == sizeof(float));
  10530. if (params->type == GGML_TASK_INIT) {
  10531. return;
  10532. }
  10533. if (params->type == GGML_TASK_FINALIZE) {
  10534. return;
  10535. }
  10536. // im2col: [N, IC, IH, IW] => [N, OH, OW, IC*KH*KW]
  10537. {
  10538. ggml_fp16_t * const wdata = (ggml_fp16_t *) dst->data;
  10539. for (int64_t in = 0; in < N; in++) {
  10540. for (int64_t ioh = 0; ioh < OH; ioh++) { // 1
  10541. for (int64_t iow = 0; iow < OW; iow++) {
  10542. for (int64_t iic = ith; iic < IC; iic += nth) {
  10543. // micro kernel
  10544. ggml_fp16_t * dst_data = wdata + (in*OH*OW + ioh*OW + iow)*(IC*KH*KW); // [IC, KH, KW]
  10545. const float * const src_data = (float *)((char *) src1->data + in*ofs0 + iic*ofs1); // [IH, IW]
  10546. for (int64_t ikh = 0; ikh < KH; ikh++) { // 1
  10547. for (int64_t ikw = 0; ikw < KW; ikw++) {
  10548. const int64_t iiw = iow*s0 + ikw*d0 - p0;
  10549. const int64_t iih = ioh*s1 + ikh*d1 - p1;
  10550. if (iih < 0 || iih >= IH || iiw < 0 || iiw >= IW) {
  10551. dst_data[iic*(KH*KW) + ikh*KW + ikw] = 0;
  10552. } else {
  10553. dst_data[iic*(KH*KW) + ikh*KW + ikw] = GGML_FP32_TO_FP16(src_data[iih*IW + iiw]);
  10554. }
  10555. }
  10556. }
  10557. }
  10558. }
  10559. }
  10560. }
  10561. }
  10562. }
  10563. static void ggml_compute_forward_im2col(
  10564. const struct ggml_compute_params * params,
  10565. const struct ggml_tensor * src0,
  10566. const struct ggml_tensor * src1,
  10567. struct ggml_tensor * dst) {
  10568. switch (dst->type) {
  10569. case GGML_TYPE_F16:
  10570. {
  10571. ggml_compute_forward_im2col_f16(params, src0, src1, dst);
  10572. } break;
  10573. case GGML_TYPE_F32:
  10574. {
  10575. ggml_compute_forward_im2col_f32(params, src0, src1, dst);
  10576. } break;
  10577. default:
  10578. {
  10579. GGML_ASSERT(false);
  10580. } break;
  10581. }
  10582. }
  10583. // ggml_compute_forward_conv_transpose_2d
  10584. static void ggml_compute_forward_conv_transpose_2d(
  10585. const struct ggml_compute_params * params,
  10586. const struct ggml_tensor * src0,
  10587. const struct ggml_tensor * src1,
  10588. struct ggml_tensor * dst) {
  10589. GGML_ASSERT(src0->type == GGML_TYPE_F16);
  10590. GGML_ASSERT(src1->type == GGML_TYPE_F32);
  10591. GGML_ASSERT( dst->type == GGML_TYPE_F32);
  10592. int64_t t0 = ggml_perf_time_us();
  10593. UNUSED(t0);
  10594. GGML_TENSOR_BINARY_OP_LOCALS
  10595. const int ith = params->ith;
  10596. const int nth = params->nth;
  10597. const int nk = ne00*ne01*ne02*ne03;
  10598. GGML_ASSERT(nb00 == sizeof(ggml_fp16_t));
  10599. GGML_ASSERT(nb10 == sizeof(float));
  10600. if (params->type == GGML_TASK_INIT) {
  10601. if (ith != 0) {
  10602. return;
  10603. }
  10604. memset(params->wdata, 0, params->wsize);
  10605. // permute kernel data (src0) from (Kw x Kh x Cout x Cin) to (Cin x Kw x Kh x Cout)
  10606. {
  10607. ggml_fp16_t * const wdata = (ggml_fp16_t *) params->wdata + 0;
  10608. for (int64_t i03 = 0; i03 < ne03; i03++) {
  10609. for (int64_t i02 = 0; i02 < ne02; i02++) {
  10610. const ggml_fp16_t * const src = (ggml_fp16_t *)((char *) src0->data + i03*nb03 + i02*nb02);
  10611. ggml_fp16_t * dst_data = wdata + i02*ne01*ne00*ne03;
  10612. for (int64_t i01 = 0; i01 < ne01; i01++) {
  10613. for (int64_t i00 = 0; i00 < ne00; i00++) {
  10614. dst_data[i01*ne00*ne03 + i00*ne03 + i03] = src[i01 * ne00 + i00];
  10615. }
  10616. }
  10617. }
  10618. }
  10619. }
  10620. // permute source data (src1) from (Sw x Sh x Cin) to (Cin x Sw x Sh)
  10621. {
  10622. ggml_fp16_t * const wdata = (ggml_fp16_t *) params->wdata + nk;
  10623. for (int i12 = 0; i12 < ne12; i12++) {
  10624. for (int i11 = 0; i11 < ne11; i11++) {
  10625. const float * const src = (float *)((char *) src1->data + i12*nb12 + i11*nb11);
  10626. ggml_fp16_t * dst_data = wdata + i11*ne10*ne12;
  10627. for (int i10 = 0; i10 < ne10; i10++) {
  10628. dst_data[i10*ne12 + i12] = GGML_FP32_TO_FP16(src[i10]);
  10629. }
  10630. }
  10631. }
  10632. }
  10633. memset(dst->data, 0, ggml_nbytes(dst));
  10634. return;
  10635. }
  10636. if (params->type == GGML_TASK_FINALIZE) {
  10637. return;
  10638. }
  10639. const int32_t stride = ggml_get_op_params_i32(dst, 0);
  10640. // total patches in dst
  10641. const int np = ne2;
  10642. // patches per thread
  10643. const int dp = (np + nth - 1)/nth;
  10644. // patch range for this thread
  10645. const int ip0 = dp*ith;
  10646. const int ip1 = MIN(ip0 + dp, np);
  10647. ggml_fp16_t * const wdata = (ggml_fp16_t *) params->wdata + 0;
  10648. ggml_fp16_t * const wdata_src = wdata + nk;
  10649. for (int i2 = ip0; i2 < ip1; i2++) { // Cout
  10650. float * dst_data = (float *)((char *) dst->data + i2*nb2);
  10651. ggml_fp16_t * wdata_kernel = wdata + i2*ne01*ne00*ne03;
  10652. for (int i11 = 0; i11 < ne11; i11++) {
  10653. for (int i10 = 0; i10 < ne10; i10++) {
  10654. const int i1n = i11*ne10*ne12 + i10*ne12;
  10655. for (int i01 = 0; i01 < ne01; i01++) {
  10656. for (int i00 = 0; i00 < ne00; i00++) {
  10657. float v = 0;
  10658. ggml_vec_dot_f16(ne03, &v, 0,
  10659. wdata_src + i1n, 0,
  10660. wdata_kernel + i01*ne00*ne03 + i00*ne03, 0, 1);
  10661. dst_data[(i11*stride + i01)*ne0 + i10*stride + i00] += v;
  10662. }
  10663. }
  10664. }
  10665. }
  10666. }
  10667. }
  10668. // ggml_compute_forward_pool_1d_sk_p0
  10669. static void ggml_compute_forward_pool_1d_sk_p0(
  10670. const struct ggml_compute_params * params,
  10671. const enum ggml_op_pool op,
  10672. const struct ggml_tensor * src,
  10673. const int k,
  10674. struct ggml_tensor * dst) {
  10675. assert(src->type == GGML_TYPE_F32);
  10676. assert(params->ith == 0);
  10677. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  10678. return;
  10679. }
  10680. const char * cdata = (const char *)src->data;
  10681. const char * const data_end = cdata + ggml_nbytes(src);
  10682. float * drow = (float *)dst->data;
  10683. const int64_t rs = dst->ne[0];
  10684. while (cdata < data_end) {
  10685. const float * const srow = (const float *)cdata;
  10686. int j = 0;
  10687. for (int64_t i = 0; i < rs; ++i) {
  10688. switch (op) {
  10689. case GGML_OP_POOL_AVG: drow[i] = 0; break;
  10690. case GGML_OP_POOL_MAX: drow[i] = -FLT_MAX; break;
  10691. case GGML_OP_POOL_COUNT: GGML_ASSERT(false); break;
  10692. }
  10693. for (int ki = 0; ki < k; ++ki) {
  10694. switch (op) {
  10695. case GGML_OP_POOL_AVG: drow[i] += srow[j]; break;
  10696. case GGML_OP_POOL_MAX: if (srow[j] > drow[i]) drow[i] = srow[j]; break;
  10697. case GGML_OP_POOL_COUNT: GGML_ASSERT(false); break;
  10698. }
  10699. ++j;
  10700. }
  10701. switch (op) {
  10702. case GGML_OP_POOL_AVG: drow[i] /= k; break;
  10703. case GGML_OP_POOL_MAX: break;
  10704. case GGML_OP_POOL_COUNT: GGML_ASSERT(false); break;
  10705. }
  10706. }
  10707. cdata += src->nb[1];
  10708. drow += rs;
  10709. }
  10710. }
  10711. // ggml_compute_forward_pool_1d
  10712. static void ggml_compute_forward_pool_1d(
  10713. const struct ggml_compute_params * params,
  10714. const struct ggml_tensor * src0,
  10715. struct ggml_tensor * dst) {
  10716. const int32_t * opts = (const int32_t *)dst->op_params;
  10717. enum ggml_op_pool op = opts[0];
  10718. const int k0 = opts[1];
  10719. const int s0 = opts[2];
  10720. const int p0 = opts[3];
  10721. GGML_ASSERT(p0 == 0); // padding not supported
  10722. GGML_ASSERT(k0 == s0); // only s = k supported
  10723. ggml_compute_forward_pool_1d_sk_p0(params, op, src0, k0, dst);
  10724. }
  10725. // ggml_compute_forward_pool_2d
  10726. static void ggml_compute_forward_pool_2d(
  10727. const struct ggml_compute_params * params,
  10728. const struct ggml_tensor * src,
  10729. struct ggml_tensor * dst) {
  10730. GGML_ASSERT(src->type == GGML_TYPE_F32);
  10731. GGML_ASSERT(params->ith == 0);
  10732. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  10733. return;
  10734. }
  10735. const int32_t * opts = (const int32_t *)dst->op_params;
  10736. enum ggml_op_pool op = opts[0];
  10737. const int k0 = opts[1];
  10738. const int k1 = opts[2];
  10739. const int s0 = opts[3];
  10740. const int s1 = opts[4];
  10741. const int p0 = opts[5];
  10742. const int p1 = opts[6];
  10743. const char * cdata = (const char*)src->data;
  10744. const char * const data_end = cdata + ggml_nbytes(src);
  10745. const int64_t px = dst->ne[0];
  10746. const int64_t py = dst->ne[1];
  10747. const int64_t pa = px * py;
  10748. float * dplane = (float *)dst->data;
  10749. const int ka = k0 * k1;
  10750. const int offset0 = -p0;
  10751. const int offset1 = -p1;
  10752. while (cdata < data_end) {
  10753. for (int oy = 0; oy < py; ++oy) {
  10754. float * const drow = dplane + oy * px;
  10755. for (int ox = 0; ox < px; ++ox) {
  10756. float * const out = drow + ox;
  10757. switch (op) {
  10758. case GGML_OP_POOL_AVG: *out = 0; break;
  10759. case GGML_OP_POOL_MAX: *out = -FLT_MAX; break;
  10760. case GGML_OP_POOL_COUNT: GGML_ASSERT(false); break;
  10761. }
  10762. const int ix = offset0 + ox * s0;
  10763. const int iy = offset1 + oy * s1;
  10764. for (int ky = 0; ky < k1; ++ky) {
  10765. if (iy + ky < 0 || iy + ky >= src->ne[1]) continue;
  10766. const float * const srow = (const float *)(cdata + src->nb[1] * (iy + ky));
  10767. for (int kx = 0; kx < k0; ++kx) {
  10768. int j = ix + kx;
  10769. if (j < 0 || j >= src->ne[0]) continue;
  10770. switch (op) {
  10771. case GGML_OP_POOL_AVG: *out += srow[j]; break;
  10772. case GGML_OP_POOL_MAX: if (srow[j] > *out) *out = srow[j]; break;
  10773. case GGML_OP_POOL_COUNT: GGML_ASSERT(false); break;
  10774. }
  10775. }
  10776. }
  10777. switch (op) {
  10778. case GGML_OP_POOL_AVG: *out /= ka; break;
  10779. case GGML_OP_POOL_MAX: break;
  10780. case GGML_OP_POOL_COUNT: GGML_ASSERT(false); break;
  10781. }
  10782. }
  10783. }
  10784. cdata += src->nb[2];
  10785. dplane += pa;
  10786. }
  10787. }
  10788. // ggml_compute_forward_upscale
  10789. static void ggml_compute_forward_upscale_f32(
  10790. const struct ggml_compute_params * params,
  10791. const struct ggml_tensor * src0,
  10792. struct ggml_tensor * dst) {
  10793. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  10794. return;
  10795. }
  10796. GGML_ASSERT(src0->nb[0] == sizeof(float));
  10797. const int ith = params->ith;
  10798. const int nth = params->nth;
  10799. GGML_TENSOR_UNARY_OP_LOCALS
  10800. const int scale_factor = dst->op_params[0];
  10801. // TODO: optimize
  10802. for (int64_t i3 = 0; i3 < ne3; i3++) {
  10803. const int64_t i03 = i3;
  10804. for (int64_t i2 = ith; i2 < ne2; i2 += nth) {
  10805. const int64_t i02 = i2;
  10806. for (int64_t i1 = 0; i1 < ne1; i1++) {
  10807. const int64_t i01 = i1 / scale_factor;
  10808. for (int64_t i0 = 0; i0 < ne0; i0++) {
  10809. const int64_t i00 = i0 / scale_factor;
  10810. const float * x = (float *)((char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03);
  10811. float * y = (float *)((char *) dst->data + i0*nb0 + i1*nb1 + i2*nb2 + i3*nb3);
  10812. *y = *x;
  10813. }
  10814. }
  10815. }
  10816. }
  10817. }
  10818. static void ggml_compute_forward_upscale(
  10819. const struct ggml_compute_params * params,
  10820. const struct ggml_tensor * src0,
  10821. struct ggml_tensor * dst) {
  10822. switch (src0->type) {
  10823. case GGML_TYPE_F32:
  10824. {
  10825. ggml_compute_forward_upscale_f32(params, src0, dst);
  10826. } break;
  10827. default:
  10828. {
  10829. GGML_ASSERT(false);
  10830. } break;
  10831. }
  10832. }
  10833. // ggml_compute_forward_pad
  10834. static void ggml_compute_forward_pad_f32(
  10835. const struct ggml_compute_params * params,
  10836. const struct ggml_tensor * src0,
  10837. struct ggml_tensor * dst) {
  10838. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  10839. return;
  10840. }
  10841. GGML_ASSERT(src0->nb[0] == sizeof(float));
  10842. GGML_ASSERT( dst->nb[0] == sizeof(float));
  10843. const int ith = params->ith;
  10844. const int nth = params->nth;
  10845. GGML_TENSOR_UNARY_OP_LOCALS
  10846. float * dst_ptr = (float *) dst->data;
  10847. // TODO: optimize
  10848. for (int64_t i2 = 0; i2 < ne2; ++i2) {
  10849. for (int64_t i1 = ith; i1 < ne1; i1 += nth) {
  10850. for (int64_t i0 = 0; i0 < ne0; ++i0) {
  10851. for (int64_t i3 = 0; i3 < ne3; ++i3) {
  10852. const int64_t dst_idx = i3*(ne0*ne1*ne2) + i2*(ne0*ne1) + i1*ne0 + i0;
  10853. const float * src_ptr = (const float *)((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00);
  10854. if (i0 < ne00 && i1 < ne01 && i2 < ne02 && i3 < ne03) {
  10855. dst_ptr[dst_idx] = *src_ptr;
  10856. } else {
  10857. dst_ptr[dst_idx] = 0;
  10858. }
  10859. }
  10860. }
  10861. }
  10862. }
  10863. }
  10864. static void ggml_compute_forward_pad(
  10865. const struct ggml_compute_params * params,
  10866. const struct ggml_tensor * src0,
  10867. struct ggml_tensor * dst) {
  10868. switch (src0->type) {
  10869. case GGML_TYPE_F32:
  10870. {
  10871. ggml_compute_forward_pad_f32(params, src0, dst);
  10872. } break;
  10873. default:
  10874. {
  10875. GGML_ASSERT(false);
  10876. } break;
  10877. }
  10878. }
  10879. // ggml_compute_forward_argsort
  10880. static void ggml_compute_forward_argsort_f32(
  10881. const struct ggml_compute_params * params,
  10882. const struct ggml_tensor * src0,
  10883. struct ggml_tensor * dst) {
  10884. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  10885. return;
  10886. }
  10887. GGML_TENSOR_UNARY_OP_LOCALS
  10888. GGML_ASSERT(nb0 == sizeof(float));
  10889. const int ith = params->ith;
  10890. const int nth = params->nth;
  10891. const int64_t nr = ggml_nrows(src0);
  10892. enum ggml_sort_order order = (enum ggml_sort_order) ggml_get_op_params_i32(dst, 0);
  10893. for (int64_t i = ith; i < nr; i += nth) {
  10894. int32_t * dst_data = (int32_t *)((char *) dst->data + i*nb1);
  10895. const float * src_data = (float *)((char *) src0->data + i*nb01);
  10896. for (int64_t j = 0; j < ne0; j++) {
  10897. dst_data[j] = j;
  10898. }
  10899. // C doesn't have a functional sort, so we do a bubble sort instead
  10900. for (int64_t j = 0; j < ne0; j++) {
  10901. for (int64_t k = j + 1; k < ne0; k++) {
  10902. if ((order == GGML_SORT_ASC && src_data[dst_data[j]] > src_data[dst_data[k]]) ||
  10903. (order == GGML_SORT_DESC && src_data[dst_data[j]] < src_data[dst_data[k]])) {
  10904. int32_t tmp = dst_data[j];
  10905. dst_data[j] = dst_data[k];
  10906. dst_data[k] = tmp;
  10907. }
  10908. }
  10909. }
  10910. }
  10911. }
  10912. static void ggml_compute_forward_argsort(
  10913. const struct ggml_compute_params * params,
  10914. const struct ggml_tensor * src0,
  10915. struct ggml_tensor * dst) {
  10916. switch (src0->type) {
  10917. case GGML_TYPE_F32:
  10918. {
  10919. ggml_compute_forward_argsort_f32(params, src0, dst);
  10920. } break;
  10921. default:
  10922. {
  10923. GGML_ASSERT(false);
  10924. } break;
  10925. }
  10926. }
  10927. // ggml_compute_forward_flash_attn
  10928. static void ggml_compute_forward_flash_attn_f32(
  10929. const struct ggml_compute_params * params,
  10930. const struct ggml_tensor * q,
  10931. const struct ggml_tensor * k,
  10932. const struct ggml_tensor * v,
  10933. const bool masked,
  10934. struct ggml_tensor * dst) {
  10935. int64_t t0 = ggml_perf_time_us();
  10936. UNUSED(t0);
  10937. GGML_TENSOR_LOCALS(int64_t, neq, q, ne)
  10938. GGML_TENSOR_LOCALS(size_t, nbq, q, nb)
  10939. GGML_TENSOR_LOCALS(int64_t, nek, k, ne)
  10940. GGML_TENSOR_LOCALS(size_t, nbk, k, nb)
  10941. GGML_TENSOR_LOCALS(int64_t, nev, v, ne)
  10942. GGML_TENSOR_LOCALS(size_t, nbv, v, nb)
  10943. GGML_TENSOR_LOCALS(int64_t, ne, dst, ne)
  10944. GGML_TENSOR_LOCALS(size_t, nb, dst, nb)
  10945. const int ith = params->ith;
  10946. const int nth = params->nth;
  10947. const int64_t D = neq0;
  10948. const int64_t N = neq1;
  10949. const int64_t P = nek1 - N;
  10950. const int64_t M = P + N;
  10951. const int Mup = ggml_up(M, GGML_SOFT_MAX_UNROLL);
  10952. GGML_ASSERT(ne0 == D);
  10953. GGML_ASSERT(ne1 == N);
  10954. GGML_ASSERT(P >= 0);
  10955. GGML_ASSERT(nbq0 == sizeof(float));
  10956. GGML_ASSERT(nbk0 == sizeof(float));
  10957. GGML_ASSERT(nbv0 == sizeof(float));
  10958. GGML_ASSERT(neq0 == D);
  10959. GGML_ASSERT(nek0 == D);
  10960. GGML_ASSERT(nev1 == D);
  10961. GGML_ASSERT(neq1 == N);
  10962. GGML_ASSERT(nek1 == N + P);
  10963. GGML_ASSERT(nev1 == D);
  10964. // dst cannot be transposed or permuted
  10965. GGML_ASSERT(nb0 == sizeof(float));
  10966. GGML_ASSERT(nb0 <= nb1);
  10967. GGML_ASSERT(nb1 <= nb2);
  10968. GGML_ASSERT(nb2 <= nb3);
  10969. if (params->type == GGML_TASK_INIT) {
  10970. return;
  10971. }
  10972. if (params->type == GGML_TASK_FINALIZE) {
  10973. return;
  10974. }
  10975. // parallelize by q rows using ggml_vec_dot_f32
  10976. // total rows in q
  10977. const int nr = neq1*neq2*neq3;
  10978. // rows per thread
  10979. const int dr = (nr + nth - 1)/nth;
  10980. // row range for this thread
  10981. const int ir0 = dr*ith;
  10982. const int ir1 = MIN(ir0 + dr, nr);
  10983. const float scale = 1.0f/sqrtf(D);
  10984. //printf("P=%d N=%d D=%d ir0=%d ir1=%d scale = %f\n", P, N, D, ir0, ir1, scale);
  10985. for (int ir = ir0; ir < ir1; ++ir) {
  10986. // q indices
  10987. const int iq3 = ir/(neq2*neq1);
  10988. const int iq2 = (ir - iq3*neq2*neq1)/neq1;
  10989. const int iq1 = (ir - iq3*neq2*neq1 - iq2*neq1);
  10990. float * S = (float *) params->wdata + ith*(Mup + CACHE_LINE_SIZE_F32);
  10991. for (int i = M; i < Mup; ++i) {
  10992. S[i] = -INFINITY;
  10993. }
  10994. const int64_t masked_begin = masked ? (P + iq1 + 1) : M;
  10995. for (int64_t ic = 0; ic < masked_begin; ++ic) {
  10996. // k indices
  10997. const int ik3 = iq3;
  10998. const int ik2 = iq2 % nek2;
  10999. const int ik1 = ic;
  11000. // S indices
  11001. const int i1 = ik1;
  11002. ggml_vec_dot_f32(neq0,
  11003. S + i1, 0,
  11004. (float *) ((char *) k->data + (ik1*nbk1 + ik2*nbk2 + ik3*nbk3)), 0,
  11005. (float *) ((char *) q->data + (iq1*nbq1 + iq2*nbq2 + iq3*nbq3)), 0, 1);
  11006. }
  11007. // scale
  11008. ggml_vec_scale_f32(masked_begin, S, scale);
  11009. for (int64_t i = masked_begin; i < M; i++) {
  11010. S[i] = -INFINITY;
  11011. }
  11012. // softmax
  11013. // exclude known -INF S[..] values from max and loop
  11014. // dont forget to set their SW values to zero
  11015. {
  11016. float max = -INFINITY;
  11017. ggml_vec_max_f32(masked_begin, &max, S);
  11018. ggml_float sum = 0.0;
  11019. {
  11020. #ifdef GGML_SOFT_MAX_ACCELERATE
  11021. max = -max;
  11022. vDSP_vsadd(S, 1, &max, S, 1, Mup);
  11023. vvexpf(S, S, &Mup);
  11024. ggml_vec_sum_f32(Mup, &sum, S);
  11025. #else
  11026. uint16_t scvt[GGML_SOFT_MAX_UNROLL]; UNUSED(scvt);
  11027. ggml_float sump[GGML_SOFT_MAX_UNROLL] = { 0.0 };
  11028. for (int i = 0; i < Mup; i += GGML_SOFT_MAX_UNROLL) {
  11029. if (i >= masked_begin) {
  11030. break;
  11031. }
  11032. float * SS = S + i;
  11033. for (int j = 0; j < GGML_SOFT_MAX_UNROLL; ++j) {
  11034. if (i + j >= masked_begin) {
  11035. break;
  11036. } else if (SS[j] == -INFINITY) {
  11037. SS[j] = 0.0f;
  11038. } else {
  11039. #ifndef GGML_FLASH_ATTN_EXP_FP16
  11040. const float val = expf(SS[j] - max);
  11041. #else
  11042. ggml_fp16_t s = GGML_FP32_TO_FP16(SS[j] - max);
  11043. memcpy(&scvt[j], &s, sizeof(uint16_t));
  11044. const float val = GGML_FP16_TO_FP32(ggml_table_exp_f16[scvt[j]]);
  11045. #endif
  11046. sump[j] += (ggml_float)val;
  11047. SS[j] = val;
  11048. }
  11049. }
  11050. }
  11051. for (int i = 0; i < GGML_SOFT_MAX_UNROLL; i++) {
  11052. sum += sump[i];
  11053. }
  11054. #endif
  11055. }
  11056. assert(sum > 0.0);
  11057. sum = 1.0/sum;
  11058. ggml_vec_scale_f32(masked_begin, S, sum);
  11059. #ifndef NDEBUG
  11060. for (int i = 0; i < masked_begin; ++i) {
  11061. assert(!isnan(S[i]));
  11062. assert(!isinf(S[i]));
  11063. }
  11064. #endif
  11065. }
  11066. for (int64_t ic = 0; ic < nev1; ++ic) {
  11067. // dst indices
  11068. const int i1 = iq1;
  11069. const int i2 = iq2;
  11070. const int i3 = iq3;
  11071. // v indices
  11072. const int iv2 = iq2 % nev2;
  11073. const int iv3 = iq3;
  11074. ggml_vec_dot_f32(masked_begin,
  11075. (float *) ((char *) dst->data + (ic*nb0 + i1*nb1 + i2*nb2 + i3*nb3)), 0,
  11076. (float *) ((char *) v->data + ( ic*nbv1 + iv2*nbv2 + iv3*nbv3)), 0,
  11077. S, 0, 1);
  11078. }
  11079. }
  11080. }
  11081. static void ggml_compute_forward_flash_attn_f16(
  11082. const struct ggml_compute_params * params,
  11083. const struct ggml_tensor * q,
  11084. const struct ggml_tensor * k,
  11085. const struct ggml_tensor * v,
  11086. const bool masked,
  11087. struct ggml_tensor * dst) {
  11088. int64_t t0 = ggml_perf_time_us();
  11089. UNUSED(t0);
  11090. GGML_TENSOR_LOCALS(int64_t, neq, q, ne)
  11091. GGML_TENSOR_LOCALS(size_t, nbq, q, nb)
  11092. GGML_TENSOR_LOCALS(int64_t, nek, k, ne)
  11093. GGML_TENSOR_LOCALS(size_t, nbk, k, nb)
  11094. GGML_TENSOR_LOCALS(int64_t, nev, v, ne)
  11095. GGML_TENSOR_LOCALS(size_t, nbv, v, nb)
  11096. GGML_TENSOR_LOCALS(int64_t, ne, dst, ne)
  11097. GGML_TENSOR_LOCALS(size_t, nb, dst, nb)
  11098. const int ith = params->ith;
  11099. const int nth = params->nth;
  11100. const int64_t D = neq0;
  11101. const int64_t N = neq1;
  11102. const int64_t P = nek1 - N;
  11103. const int64_t M = P + N;
  11104. const int Mup = ggml_up(M, GGML_SOFT_MAX_UNROLL);
  11105. GGML_ASSERT(ne0 == D);
  11106. GGML_ASSERT(ne1 == N);
  11107. GGML_ASSERT(P >= 0);
  11108. GGML_ASSERT(nbq0 == sizeof(ggml_fp16_t));
  11109. GGML_ASSERT(nbk0 == sizeof(ggml_fp16_t));
  11110. GGML_ASSERT(nbv0 == sizeof(ggml_fp16_t));
  11111. GGML_ASSERT(neq0 == D);
  11112. GGML_ASSERT(nek0 == D);
  11113. GGML_ASSERT(nev1 == D);
  11114. GGML_ASSERT(neq1 == N);
  11115. GGML_ASSERT(nek1 == N + P);
  11116. GGML_ASSERT(nev1 == D);
  11117. // dst cannot be transposed or permuted
  11118. GGML_ASSERT(nb0 == sizeof(float));
  11119. GGML_ASSERT(nb0 <= nb1);
  11120. GGML_ASSERT(nb1 <= nb2);
  11121. GGML_ASSERT(nb2 <= nb3);
  11122. if (params->type == GGML_TASK_INIT) {
  11123. return;
  11124. }
  11125. if (params->type == GGML_TASK_FINALIZE) {
  11126. return;
  11127. }
  11128. // parallelize by q rows using ggml_vec_dot_f32
  11129. // total rows in q
  11130. const int nr = neq1*neq2*neq3;
  11131. // rows per thread
  11132. const int dr = (nr + nth - 1)/nth;
  11133. // row range for this thread
  11134. const int ir0 = dr*ith;
  11135. const int ir1 = MIN(ir0 + dr, nr);
  11136. const float scale = 1.0f/sqrtf(D);
  11137. //printf("P=%d N=%d D=%d ir0=%d ir1=%d scale = %f\n", P, N, D, ir0, ir1, scale);
  11138. for (int ir = ir0; ir < ir1; ++ir) {
  11139. // q indices
  11140. const int iq3 = ir/(neq2*neq1);
  11141. const int iq2 = (ir - iq3*neq2*neq1)/neq1;
  11142. const int iq1 = (ir - iq3*neq2*neq1 - iq2*neq1);
  11143. float * S = (float *) params->wdata + ith*(2*Mup + CACHE_LINE_SIZE_F32);
  11144. for (int i = M; i < Mup; ++i) {
  11145. S[i] = -INFINITY;
  11146. }
  11147. if (GGML_VEC_DOT_UNROLL > 2 || nek1 % GGML_VEC_DOT_UNROLL != 0) {
  11148. for (int64_t ic = 0; ic < nek1; ++ic) {
  11149. // k indices
  11150. const int ik3 = iq3;
  11151. const int ik2 = iq2 % nek2;
  11152. const int ik1 = ic;
  11153. // S indices
  11154. const int i1 = ik1;
  11155. ggml_vec_dot_f16(neq0,
  11156. S + i1, 0,
  11157. (ggml_fp16_t *) ((char *) k->data + (ik1*nbk1 + ik2*nbk2 + ik3*nbk3)), 0,
  11158. (ggml_fp16_t *) ((char *) q->data + (iq1*nbq1 + iq2*nbq2 + iq3*nbq3)), 0, 1);
  11159. }
  11160. } else {
  11161. for (int64_t ic = 0; ic < nek1; ic += GGML_VEC_DOT_UNROLL) {
  11162. // k indices
  11163. const int ik3 = iq3;
  11164. const int ik2 = iq2 % nek2;
  11165. const int ik1 = ic;
  11166. // S indices
  11167. const int i1 = ik1;
  11168. ggml_vec_dot_f16_unroll(neq0, nbk1,
  11169. S + i1,
  11170. ((char *) k->data + (ik1*nbk1 + ik2*nbk2 + ik3*nbk3)),
  11171. (ggml_fp16_t *) ((char *) q->data + (iq1*nbq1 + iq2*nbq2 + iq3*nbq3)));
  11172. }
  11173. }
  11174. // scale
  11175. ggml_vec_scale_f32(nek1, S, scale);
  11176. if (masked) {
  11177. for (int64_t i = P; i < M; i++) {
  11178. if (i > P + iq1) {
  11179. S[i] = -INFINITY;
  11180. }
  11181. }
  11182. }
  11183. // softmax
  11184. // todo: exclude known -INF S[..] values from max and loop, assuming their results to be zero.
  11185. // dont forget to set their S values to zero
  11186. {
  11187. float max = -INFINITY;
  11188. ggml_vec_max_f32(M, &max, S);
  11189. ggml_float sum = 0.0;
  11190. {
  11191. #ifdef GGML_SOFT_MAX_ACCELERATE
  11192. max = -max;
  11193. vDSP_vsadd(S, 1, &max, S, 1, Mup);
  11194. vvexpf(S, S, &Mup);
  11195. ggml_vec_sum_f32(Mup, &sum, S);
  11196. #else
  11197. uint16_t scvt[GGML_SOFT_MAX_UNROLL];
  11198. ggml_float sump[GGML_SOFT_MAX_UNROLL] = { 0.0 };
  11199. for (int i = 0; i < Mup; i += GGML_SOFT_MAX_UNROLL) {
  11200. float * SS = S + i;
  11201. for (int j = 0; j < GGML_SOFT_MAX_UNROLL; ++j) {
  11202. if (SS[j] == -INFINITY) {
  11203. SS[j] = 0.0f;
  11204. } else {
  11205. ggml_fp16_t s = GGML_FP32_TO_FP16(SS[j] - max);
  11206. memcpy(&scvt[j], &s, sizeof(uint16_t));
  11207. const float val = GGML_FP16_TO_FP32(ggml_table_exp_f16[scvt[j]]);
  11208. sump[j] += (ggml_float)val;
  11209. SS[j] = val;
  11210. }
  11211. }
  11212. }
  11213. for (int i = 0; i < GGML_SOFT_MAX_UNROLL; i++) {
  11214. sum += sump[i];
  11215. }
  11216. #endif
  11217. }
  11218. assert(sum > 0.0);
  11219. sum = 1.0/sum;
  11220. ggml_vec_scale_f32(M, S, sum);
  11221. #ifndef NDEBUG
  11222. for (int i = 0; i < M; ++i) {
  11223. assert(!isnan(S[i]));
  11224. assert(!isinf(S[i]));
  11225. }
  11226. #endif
  11227. }
  11228. ggml_fp16_t * S16 = (ggml_fp16_t *) ((float *) params->wdata + ith*(2*Mup + CACHE_LINE_SIZE_F32) + Mup);
  11229. for (int64_t i = 0; i < M; i++) {
  11230. S16[i] = GGML_FP32_TO_FP16(S[i]);
  11231. }
  11232. // todo: exclude known zero S[..] values from dot (reducing nev0 and increasing begin of v and S16).
  11233. if (GGML_VEC_DOT_UNROLL == 1 || (nev1 % GGML_VEC_DOT_UNROLL != 0)) {
  11234. for (int64_t ic = 0; ic < nev1; ++ic) {
  11235. // dst indices
  11236. const int i1 = iq1;
  11237. const int i2 = iq2;
  11238. const int i3 = iq3;
  11239. // v indices
  11240. const int iv2 = iq2 % nev2;
  11241. const int iv3 = iq3;
  11242. ggml_vec_dot_f16(nev0,
  11243. (float *) ((char *) dst->data + (ic*nb0 + i1*nb1 + i2*nb2 + i3*nb3)), 0,
  11244. (ggml_fp16_t *) ((char *) v->data + ( ic*nbv1 + iv2*nbv2 + iv3*nbv3)), 0,
  11245. S16, 0, 1);
  11246. }
  11247. } else {
  11248. for (int64_t ic = 0; ic < nev1; ic += GGML_VEC_DOT_UNROLL) {
  11249. // dst indices
  11250. const int i1 = iq1;
  11251. const int i2 = iq2;
  11252. const int i3 = iq3;
  11253. // v indices
  11254. const int iv2 = iq2 % nev2;
  11255. const int iv3 = iq3;
  11256. ggml_vec_dot_f16_unroll(nev0, nbv1,
  11257. (float *) ((char *) dst->data + (ic*nb0 + i1*nb1 + i2*nb2 + i3*nb3)),
  11258. ((char *) v->data + ( ic*nbv1 + iv2*nbv2 + iv3*nbv3)),
  11259. S16);
  11260. }
  11261. }
  11262. }
  11263. }
  11264. static void ggml_compute_forward_flash_attn(
  11265. const struct ggml_compute_params * params,
  11266. const struct ggml_tensor * q,
  11267. const struct ggml_tensor * k,
  11268. const struct ggml_tensor * v,
  11269. const bool masked,
  11270. struct ggml_tensor * dst) {
  11271. switch (q->type) {
  11272. case GGML_TYPE_F16:
  11273. {
  11274. ggml_compute_forward_flash_attn_f16(params, q, k, v, masked, dst);
  11275. } break;
  11276. case GGML_TYPE_F32:
  11277. {
  11278. ggml_compute_forward_flash_attn_f32(params, q, k, v, masked, dst);
  11279. } break;
  11280. default:
  11281. {
  11282. GGML_ASSERT(false);
  11283. } break;
  11284. }
  11285. }
  11286. // ggml_compute_forward_flash_ff
  11287. static void ggml_compute_forward_flash_ff_f16(
  11288. const struct ggml_compute_params * params,
  11289. const struct ggml_tensor * a, // F16
  11290. const struct ggml_tensor * b0, // F16 fc_w
  11291. const struct ggml_tensor * b1, // F32 fc_b
  11292. const struct ggml_tensor * c0, // F16 proj_w
  11293. const struct ggml_tensor * c1, // F32 proj_b
  11294. struct ggml_tensor * dst) {
  11295. int64_t t0 = ggml_perf_time_us();
  11296. UNUSED(t0);
  11297. GGML_TENSOR_LOCALS(int64_t, nea, a, ne)
  11298. GGML_TENSOR_LOCALS(size_t, nba, a, nb)
  11299. GGML_TENSOR_LOCALS(int64_t, neb0, b0, ne)
  11300. GGML_TENSOR_LOCALS(size_t, nbb0, b0, nb)
  11301. GGML_TENSOR_LOCALS(int64_t, neb1, b1, ne)
  11302. GGML_TENSOR_LOCALS(size_t, nbb1, b1, nb)
  11303. GGML_TENSOR_LOCALS(int64_t, nec0, c0, ne)
  11304. GGML_TENSOR_LOCALS(size_t, nbc0, c0, nb)
  11305. GGML_TENSOR_LOCALS(int64_t, nec1, c1, ne)
  11306. GGML_TENSOR_LOCALS(size_t, nbc1, c1, nb)
  11307. GGML_TENSOR_LOCALS(int64_t, ne, dst, ne)
  11308. GGML_TENSOR_LOCALS(size_t, nb, dst, nb)
  11309. const int ith = params->ith;
  11310. const int nth = params->nth;
  11311. const int64_t D = nea0;
  11312. //const int64_t N = nea1;
  11313. const int64_t M = neb01;
  11314. GGML_ASSERT(ne0 == nea0);
  11315. GGML_ASSERT(ne1 == nea1);
  11316. GGML_ASSERT(ne2 == nea2);
  11317. GGML_ASSERT(nba0 == sizeof(ggml_fp16_t));
  11318. GGML_ASSERT(nbb00 == sizeof(ggml_fp16_t));
  11319. GGML_ASSERT(nbb10 == sizeof(float));
  11320. GGML_ASSERT(nbc00 == sizeof(ggml_fp16_t));
  11321. GGML_ASSERT(nbc10 == sizeof(float));
  11322. GGML_ASSERT(neb00 == D);
  11323. GGML_ASSERT(neb01 == M);
  11324. GGML_ASSERT(neb10 == M);
  11325. GGML_ASSERT(neb11 == 1);
  11326. GGML_ASSERT(nec00 == M);
  11327. GGML_ASSERT(nec01 == D);
  11328. GGML_ASSERT(nec10 == D);
  11329. GGML_ASSERT(nec11 == 1);
  11330. // dst cannot be transposed or permuted
  11331. GGML_ASSERT(nb0 == sizeof(float));
  11332. GGML_ASSERT(nb0 <= nb1);
  11333. GGML_ASSERT(nb1 <= nb2);
  11334. GGML_ASSERT(nb2 <= nb3);
  11335. if (params->type == GGML_TASK_INIT) {
  11336. return;
  11337. }
  11338. if (params->type == GGML_TASK_FINALIZE) {
  11339. return;
  11340. }
  11341. // parallelize by a rows using ggml_vec_dot_f32
  11342. // total rows in a
  11343. const int nr = nea1*nea2*nea3;
  11344. // rows per thread
  11345. const int dr = (nr + nth - 1)/nth;
  11346. // row range for this thread
  11347. const int ir0 = dr*ith;
  11348. const int ir1 = MIN(ir0 + dr, nr);
  11349. for (int ir = ir0; ir < ir1; ++ir) {
  11350. // a indices
  11351. const int ia3 = ir/(nea2*nea1);
  11352. const int ia2 = (ir - ia3*nea2*nea1)/nea1;
  11353. const int ia1 = (ir - ia3*nea2*nea1 - ia2*nea1);
  11354. float * S = (float *) params->wdata + ith*(2*M + CACHE_LINE_SIZE_F32);
  11355. for (int64_t ic = 0; ic < neb01; ++ic) {
  11356. // b0 indices
  11357. const int ib03 = ia3;
  11358. const int ib02 = ia2;
  11359. const int ib01 = ic;
  11360. // S indices
  11361. const int i1 = ib01;
  11362. ggml_vec_dot_f16(nea0,
  11363. S + i1, 0,
  11364. (ggml_fp16_t *) ((char *) b0->data + (ib01*nbb01 + ib02*nbb02 + ib03*nbb03)), 0,
  11365. (ggml_fp16_t *) ((char *) a->data + ( ia1*nba1 + ia2*nba2 + ia3*nba3)), 0, 1);
  11366. }
  11367. ggml_vec_add_f32(neb01, S, S, (float *) b1->data);
  11368. //ggml_vec_gelu_f32(neb01, S, S);
  11369. ggml_fp16_t * S16 = (ggml_fp16_t *) ((float *) params->wdata + ith*(2*M + CACHE_LINE_SIZE_F32) + M);
  11370. for (int64_t i = 0; i < M; i++) {
  11371. S16[i] = GGML_FP32_TO_FP16(S[i]);
  11372. }
  11373. ggml_vec_gelu_f16(neb01, S16, S16);
  11374. {
  11375. // dst indices
  11376. const int i1 = ia1;
  11377. const int i2 = ia2;
  11378. const int i3 = ia3;
  11379. for (int64_t ic = 0; ic < nec01; ++ic) {
  11380. ggml_vec_dot_f16(neb01,
  11381. (float *) ((char *) dst->data + (ic*nb0 + i1*nb1 + i2*nb2 + i3*nb3)), 0,
  11382. (ggml_fp16_t *) ((char *) c0->data + ( ic*nbc01 + i2*nbc02 + i3*nbc03)), 0,
  11383. S16, 0, 1);
  11384. }
  11385. ggml_vec_add_f32(nec01,
  11386. (float *) ((char *) dst->data + (i1*nb1 + i2*nb2 + i3*nb3)),
  11387. (float *) ((char *) dst->data + (i1*nb1 + i2*nb2 + i3*nb3)),
  11388. (float *) c1->data);
  11389. }
  11390. }
  11391. }
  11392. static void ggml_compute_forward_flash_ff(
  11393. const struct ggml_compute_params * params,
  11394. const struct ggml_tensor * a,
  11395. const struct ggml_tensor * b0,
  11396. const struct ggml_tensor * b1,
  11397. const struct ggml_tensor * c0,
  11398. const struct ggml_tensor * c1,
  11399. struct ggml_tensor * dst) {
  11400. switch (b0->type) {
  11401. case GGML_TYPE_F16:
  11402. {
  11403. ggml_compute_forward_flash_ff_f16(params, a, b0, b1, c0, c1, dst);
  11404. } break;
  11405. case GGML_TYPE_F32:
  11406. {
  11407. GGML_ASSERT(false); // TODO
  11408. } break;
  11409. default:
  11410. {
  11411. GGML_ASSERT(false);
  11412. } break;
  11413. }
  11414. }
  11415. // ggml_compute_forward_flash_attn_back
  11416. static void ggml_compute_forward_flash_attn_back_f32(
  11417. const struct ggml_compute_params * params,
  11418. const struct ggml_tensor * q,
  11419. const struct ggml_tensor * k,
  11420. const struct ggml_tensor * v,
  11421. const struct ggml_tensor * d,
  11422. const bool masked,
  11423. struct ggml_tensor * dst) {
  11424. int64_t t0 = ggml_perf_time_us();
  11425. UNUSED(t0);
  11426. GGML_TENSOR_LOCALS(int64_t, neq, q, ne)
  11427. GGML_TENSOR_LOCALS(size_t, nbq, q, nb)
  11428. GGML_TENSOR_LOCALS(int64_t, nek, k, ne)
  11429. GGML_TENSOR_LOCALS(size_t, nbk, k, nb)
  11430. GGML_TENSOR_LOCALS(int64_t, nev, v, ne)
  11431. GGML_TENSOR_LOCALS(size_t, nbv, v, nb)
  11432. GGML_TENSOR_LOCALS(int64_t, ned, d, ne)
  11433. GGML_TENSOR_LOCALS(size_t, nbd, d, nb)
  11434. GGML_TENSOR_LOCALS(int64_t, ne, dst, ne)
  11435. GGML_TENSOR_LOCALS(size_t, nb, dst, nb)
  11436. const int ith = params->ith;
  11437. const int nth = params->nth;
  11438. const int64_t D = neq0;
  11439. const int64_t N = neq1;
  11440. const int64_t P = nek1 - N;
  11441. const int64_t M = P + N;
  11442. const int Mup = ggml_up(M, GGML_SOFT_MAX_UNROLL);
  11443. const int mxDM = MAX(D, Mup);
  11444. // GGML_ASSERT(ne0 == D);
  11445. // GGML_ASSERT(ne1 == N);
  11446. GGML_ASSERT(P >= 0);
  11447. GGML_ASSERT(nbq0 == sizeof(float));
  11448. GGML_ASSERT(nbk0 == sizeof(float));
  11449. GGML_ASSERT(nbv0 == sizeof(float));
  11450. GGML_ASSERT(neq0 == D);
  11451. GGML_ASSERT(nek0 == D);
  11452. GGML_ASSERT(nev1 == D);
  11453. GGML_ASSERT(ned0 == D);
  11454. GGML_ASSERT(neq1 == N);
  11455. GGML_ASSERT(nek1 == N + P);
  11456. GGML_ASSERT(nev1 == D);
  11457. GGML_ASSERT(ned1 == N);
  11458. // dst cannot be transposed or permuted
  11459. GGML_ASSERT(nb0 == sizeof(float));
  11460. GGML_ASSERT(nb0 <= nb1);
  11461. GGML_ASSERT(nb1 <= nb2);
  11462. GGML_ASSERT(nb2 <= nb3);
  11463. if (params->type == GGML_TASK_INIT) {
  11464. if (ith == 0) {
  11465. memset(dst->data, 0, nb0*ne0*ne1*ne2*ne3);
  11466. }
  11467. return;
  11468. }
  11469. if (params->type == GGML_TASK_FINALIZE) {
  11470. return;
  11471. }
  11472. const int64_t elem_q = ggml_nelements(q);
  11473. const int64_t elem_k = ggml_nelements(k);
  11474. enum ggml_type result_type = dst->type;
  11475. GGML_ASSERT(ggml_blck_size(result_type) == 1);
  11476. const size_t tsize = ggml_type_size(result_type);
  11477. const size_t offs_q = 0;
  11478. const size_t offs_k = offs_q + GGML_PAD(elem_q * tsize, GGML_MEM_ALIGN);
  11479. const size_t offs_v = offs_k + GGML_PAD(elem_k * tsize, GGML_MEM_ALIGN);
  11480. void * grad_q = (char *) dst->data;
  11481. void * grad_k = (char *) dst->data + offs_k;
  11482. void * grad_v = (char *) dst->data + offs_v;
  11483. const size_t nbgq1 = nb0*neq0;
  11484. const size_t nbgq2 = nb0*neq0*neq1;
  11485. const size_t nbgq3 = nb0*neq0*neq1*neq2;
  11486. const size_t nbgk1 = nb0*nek0;
  11487. const size_t nbgk2 = nb0*nek0*nek1;
  11488. const size_t nbgk3 = nb0*nek0*nek1*neq2;
  11489. const size_t nbgv1 = nb0*nev0;
  11490. const size_t nbgv2 = nb0*nev0*nev1;
  11491. const size_t nbgv3 = nb0*nev0*nev1*neq2;
  11492. // parallelize by k rows using ggml_vec_dot_f32
  11493. // total rows in k
  11494. const int nr = nek2*nek3;
  11495. // rows per thread
  11496. const int dr = (nr + nth - 1)/nth;
  11497. // row range for this thread
  11498. const int ir0 = dr*ith;
  11499. const int ir1 = MIN(ir0 + dr, nr);
  11500. const float scale = 1.0f/sqrtf(D);
  11501. //printf("P=%d N=%d D=%d ir0=%d ir1=%d scale = %f\n", P, N, D, ir0, ir1, scale);
  11502. // how often k2 (and v2) is repeated in q2
  11503. int nrep = neq2/nek2;
  11504. for (int ir = ir0; ir < ir1; ++ir) {
  11505. // q indices
  11506. const int ik3 = ir/(nek2);
  11507. const int ik2 = ir - ik3*nek2;
  11508. const int iq3 = ik3;
  11509. const int id3 = ik3;
  11510. const int iv3 = ik3;
  11511. const int iv2 = ik2;
  11512. for (int irep = 0; irep < nrep; ++irep) {
  11513. const int iq2 = ik2 + irep*nek2;
  11514. const int id2 = iq2;
  11515. // (ik2 + irep*nek2) % nek2 == ik2
  11516. for (int iq1 = 0; iq1 < neq1; ++iq1) {
  11517. const int id1 = iq1;
  11518. // not sure about CACHE_LINE_SIZE_F32..
  11519. // - maybe it must not be multiplied by 2 and excluded from .. in SM 1*(..) offset?
  11520. float * S = (float *) params->wdata + ith*2*(mxDM + CACHE_LINE_SIZE_F32) + 0*(mxDM+CACHE_LINE_SIZE_F32);
  11521. float * SM = (float *) params->wdata + ith*2*(mxDM + CACHE_LINE_SIZE_F32) + 1*(mxDM+CACHE_LINE_SIZE_F32);
  11522. for (int i = M; i < Mup; ++i) {
  11523. S[i] = -INFINITY;
  11524. }
  11525. const int64_t masked_begin = masked ? (P + iq1 + 1) : M;
  11526. for (int64_t ic = 0; ic < masked_begin; ++ic) {
  11527. // k indices
  11528. const int ik1 = ic;
  11529. // S indices
  11530. const int i1 = ik1;
  11531. ggml_vec_dot_f32(neq0,
  11532. S + i1, 0,
  11533. (float *) ((char *) k->data + (ik1*nbk1 + ik2*nbk2 + ik3*nbk3)), 0,
  11534. (float *) ((char *) q->data + (iq1*nbq1 + iq2*nbq2 + iq3*nbq3)), 0, 1);
  11535. }
  11536. // scale
  11537. ggml_vec_scale_f32(masked_begin, S, scale);
  11538. for (int64_t i = masked_begin; i < M; i++) {
  11539. S[i] = -INFINITY;
  11540. }
  11541. // softmax
  11542. // exclude known -INF S[..] values from max and loop
  11543. // dont forget to set their SM values to zero
  11544. {
  11545. float max = -INFINITY;
  11546. ggml_vec_max_f32(masked_begin, &max, S);
  11547. ggml_float sum = 0.0;
  11548. {
  11549. #ifdef GGML_SOFT_MAX_ACCELERATE
  11550. max = -max;
  11551. vDSP_vsadd(SM, 1, &max, SM, 1, Mup);
  11552. vvexpf(SM, SM, &Mup);
  11553. ggml_vec_sum_f32(Mup, &sum, SM);
  11554. #else
  11555. uint16_t scvt[GGML_SOFT_MAX_UNROLL]; UNUSED(scvt);
  11556. ggml_float sump[GGML_SOFT_MAX_UNROLL] = { 0.0 };
  11557. for (int i = 0; i < Mup; i += GGML_SOFT_MAX_UNROLL) {
  11558. if (i >= masked_begin) {
  11559. break;
  11560. }
  11561. float * SR = S + i;
  11562. float * SW = SM + i;
  11563. for (int j = 0; j < GGML_SOFT_MAX_UNROLL; ++j) {
  11564. if (i + j >= masked_begin) {
  11565. break;
  11566. } else if (SR[j] == -INFINITY) {
  11567. SW[j] = 0.0f;
  11568. } else {
  11569. #ifndef GGML_FLASH_ATTN_EXP_FP16
  11570. const float val = expf(SR[j] - max);
  11571. #else
  11572. ggml_fp16_t s = GGML_FP32_TO_FP16(SR[j] - max);
  11573. memcpy(&scvt[j], &s, sizeof(uint16_t));
  11574. const float val = GGML_FP16_TO_FP32(ggml_table_exp_f16[scvt[j]]);
  11575. #endif
  11576. sump[j] += (ggml_float)val;
  11577. SW[j] = val;
  11578. }
  11579. }
  11580. }
  11581. for (int i = 0; i < GGML_SOFT_MAX_UNROLL; i++) {
  11582. sum += sump[i];
  11583. }
  11584. #endif
  11585. }
  11586. assert(sum > 0.0);
  11587. sum = 1.0/sum;
  11588. ggml_vec_scale_f32(masked_begin, SM, sum);
  11589. }
  11590. // step-by-step explanation
  11591. {
  11592. // forward-process shape grads from backward process
  11593. // parallel_for ik2,ik3:
  11594. // for irep:
  11595. // iq2 = ik2 + irep*nek2
  11596. // k[:D,:M,:,:] [D,M,:,:] grad[k][:D,:M,ik2,ik3] += grad[kcur]
  11597. // q[:D,:N,:,:] [D,N,:,:] grad[q][:D,iq1,iq2,iq3] += grad[qcur]
  11598. // v[:M,:D,:,:] [M,D,:,:] grad[v][:M,:D,iv2,iv3] += grad[vcur]
  11599. // for iq1:
  11600. // kcur = k[:D,:M,ik2,ik3] [D,M,1,1] grad[kcur] = grad[S1].T @ qcur
  11601. // qcur = q[:D,iq1,iq2,iq3] [D,1,1,1] grad[qcur] = grad[S1] @ kcur
  11602. // vcur = v[:M,:D,iv2,iv3] [M,D,1,1] grad[vcur] = grad[S5].T @ S4
  11603. // S0 = -Inf [D,1,1,1]
  11604. // ~S1[i] = dot(kcur[:D,i], qcur)
  11605. // S1 = qcur @ kcur.T [M,1,1,1] grad[S1] = grad[S2] * scale
  11606. // S2 = S1 * scale [M,1,1,1] grad[S2] = diag_mask_zero(grad[S3], P)
  11607. // S3 = diag_mask_inf(S2, P) [M,1,1,1] grad[S3] = S4 * (grad[S4] - dot(S4, grad[S4]))
  11608. // S4 = softmax(S3) [M,1,1,1] grad[S4] = grad[S5] @ vcur
  11609. // ~S5[i] = dot(vcur[:,i], S4)
  11610. // S5 = S4 @ vcur.T [D,1,1,1] grad[S5] = d[:D,id1,id2,id3]
  11611. // ~dst[i,iq1,iq2,iq3] = S5[i] ^
  11612. // dst[:D,iq1,iq2,iq3] = S5 | grad[dst[:D,iq1,iq2,iq3]] = d[:D,id1,id2,id3]
  11613. // dst backward-/ grad[dst] = d
  11614. //
  11615. // output gradients with their dependencies:
  11616. //
  11617. // grad[kcur] = grad[S1].T @ qcur
  11618. // grad[S1] = diag_mask_zero(grad[S3], P) * scale
  11619. // grad[S3] = S4 * (grad[S4] - dot(S4, grad[S4]))
  11620. // grad[S4] = grad[S5] @ vcur
  11621. // grad[S4] = d[:D,id1,id2,id3] @ vcur
  11622. // grad[qcur] = grad[S1] @ kcur
  11623. // grad[vcur] = grad[S5].T @ S4
  11624. // grad[vcur] = d[:D,id1,id2,id3].T @ S4
  11625. //
  11626. // in post-order:
  11627. //
  11628. // S1 = qcur @ kcur.T
  11629. // S2 = S1 * scale
  11630. // S3 = diag_mask_inf(S2, P)
  11631. // S4 = softmax(S3)
  11632. // grad[S4] = d[:D,id1,id2,id3] @ vcur
  11633. // grad[S3] = S4 * (grad[S4] - dot(S4, grad[S4]))
  11634. // grad[S1] = diag_mask_zero(grad[S3], P) * scale
  11635. // grad[qcur] = grad[S1] @ kcur
  11636. // grad[kcur] = grad[S1].T @ qcur
  11637. // grad[vcur] = d[:D,id1,id2,id3].T @ S4
  11638. //
  11639. // using less variables (SM=S4):
  11640. //
  11641. // S = diag_mask_inf(qcur @ kcur.T * scale, P)
  11642. // SM = softmax(S)
  11643. // S = d[:D,iq1,iq2,iq3] @ vcur
  11644. // dot_SM_gradSM = dot(SM, S)
  11645. // S = SM * (S - dot(SM, S))
  11646. // S = diag_mask_zero(S, P) * scale
  11647. //
  11648. // grad[q][:D,iq1,iq2,iq3] += S @ kcur
  11649. // grad[k][:D,:M,ik2,ik3] += S.T @ qcur
  11650. // grad[v][:M,:D,iv2,iv3] += d[:D,id1,id2,id3].T @ SM
  11651. }
  11652. // S = gradSM = d[:D,id1,id2,id3] @ vcur[:,:,iv2,iv3]
  11653. // S = d[:D,id1,id2,id3] @ vcur[:,:,iv2,iv3]
  11654. // for ic:
  11655. // S[:M] += vcur[:M,ic,iv2,iv3] * d[ic,id1,id2,id3]
  11656. // exclude known future zero S[..] values from operation
  11657. ggml_vec_set_f32(masked_begin, S, 0);
  11658. for (int64_t ic = 0; ic < D; ++ic) {
  11659. ggml_vec_mad_f32(masked_begin,
  11660. S,
  11661. (float *) ((char *) v->data + ( ic*nbv1 + iv2*nbv2 + iv3*nbv3)),
  11662. *(float *) ((char *) d->data + (ic*nbd0 + id1*nbd1 + id2*nbd2 + id3*nbd3)));
  11663. }
  11664. // S = SM * (S - dot(SM, S))
  11665. float dot_SM_gradSM = 0;
  11666. ggml_vec_dot_f32 (masked_begin, &dot_SM_gradSM, 0, SM, 0, S, 0, 1);
  11667. ggml_vec_acc1_f32(M, S, -dot_SM_gradSM);
  11668. ggml_vec_mul_f32 (masked_begin, S, S, SM);
  11669. // S = diag_mask_zero(S, P) * scale
  11670. // already done by above ggml_vec_set_f32
  11671. // exclude known zero S[..] values from operation
  11672. ggml_vec_scale_f32(masked_begin, S, scale);
  11673. // S shape [M,1]
  11674. // SM shape [M,1]
  11675. // kcur shape [D,M]
  11676. // qcur shape [D,1]
  11677. // vcur shape [M,D]
  11678. // grad[q][:D,iq1,iq2,iq3] += S @ kcur
  11679. // grad[q][:D,iq1,iq2,iq3] += shape[M,1] @ shape[D,M]
  11680. // for ic:
  11681. // grad[q][:D,iq1,iq2,iq3] += S[ic] * kcur[:D,ic,ik2,ik3]
  11682. // exclude known zero S[..] values from loop
  11683. for (int64_t ic = 0; ic < masked_begin; ++ic) {
  11684. ggml_vec_mad_f32(D,
  11685. (float *) ((char *) grad_q + (iq1*nbgq1 + iq2*nbgq2 + iq3*nbgq3)),
  11686. (float *) ((char *) k->data + (ic*nbk1 + ik2*nbk2 + ik3*nbk3)),
  11687. S[ic]);
  11688. }
  11689. // grad[k][:D,:M,iq2,iq3] += S.T @ qcur
  11690. // for ic:
  11691. // grad[k][:D,ic,iq2,iq3] += S.T[0,ic] * qcur[:D,0]
  11692. // grad[k][:D,ic,iq2,iq3] += S[ic] * qcur[:D,0]
  11693. // exclude known zero S[..] values from loop
  11694. for (int64_t ic = 0; ic < masked_begin; ++ic) {
  11695. ggml_vec_mad_f32(D,
  11696. (float *) ((char *) grad_k + (ic*nbgk1 + ik2*nbgk2 + ik3*nbgk3)),
  11697. (float *) ((char *) q->data + (iq1*nbq1 + iq2*nbq2 + iq3*nbq3)),
  11698. S[ic]);
  11699. }
  11700. // grad[v][:M,:D,iv2,iv3] += d[:D,id1,id2,id3].T @ SM
  11701. // for ic:
  11702. // grad[v][:M,ic,iv2,iv3] += d[:D,id1,id2,id3].T[0,ic] * SM[:M]
  11703. // grad[v][:M,ic,iv2,iv3] += d[ic,id1,id2,id3] * SM[:M]
  11704. // exclude known zero SM[..] values from mad
  11705. for (int64_t ic = 0; ic < D; ++ic) {
  11706. ggml_vec_mad_f32(masked_begin,
  11707. (float *) ((char *) grad_v + ( ic*nbgv1 + iv2*nbgv2 + iv3*nbgv3)),
  11708. SM,
  11709. *(float *) ((char *) d->data + (ic*nbd0 + id1*nbd1 + id2*nbd2 + id3*nbd3)));
  11710. }
  11711. }
  11712. }
  11713. }
  11714. }
  11715. static void ggml_compute_forward_flash_attn_back(
  11716. const struct ggml_compute_params * params,
  11717. const struct ggml_tensor * q,
  11718. const struct ggml_tensor * k,
  11719. const struct ggml_tensor * v,
  11720. const struct ggml_tensor * d,
  11721. const bool masked,
  11722. struct ggml_tensor * dst) {
  11723. switch (q->type) {
  11724. case GGML_TYPE_F32:
  11725. {
  11726. ggml_compute_forward_flash_attn_back_f32(params, q, k, v, d, masked, dst);
  11727. } break;
  11728. default:
  11729. {
  11730. GGML_ASSERT(false);
  11731. } break;
  11732. }
  11733. }
  11734. // ggml_compute_forward_win_part
  11735. static void ggml_compute_forward_win_part_f32(
  11736. const struct ggml_compute_params * params,
  11737. const struct ggml_tensor * src0,
  11738. struct ggml_tensor * dst) {
  11739. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  11740. return;
  11741. }
  11742. GGML_TENSOR_LOCALS(int64_t, ne0, src0, ne)
  11743. GGML_TENSOR_LOCALS(int64_t, ne, dst, ne)
  11744. const int32_t nep0 = ((const int32_t *)(dst->op_params))[0];
  11745. const int32_t nep1 = ((const int32_t *)(dst->op_params))[1];
  11746. const int32_t w = ((const int32_t *)(dst->op_params))[2];
  11747. assert(ne00 == ne0);
  11748. assert(ne3 == nep0*nep1);
  11749. // TODO: optimize / multi-thread
  11750. for (int py = 0; py < nep1; ++py) {
  11751. for (int px = 0; px < nep0; ++px) {
  11752. const int64_t i3 = py*nep0 + px;
  11753. for (int64_t i2 = 0; i2 < ne2; ++i2) {
  11754. for (int64_t i1 = 0; i1 < ne1; ++i1) {
  11755. for (int64_t i0 = 0; i0 < ne0; ++i0) {
  11756. const int64_t i02 = py*w + i2;
  11757. const int64_t i01 = px*w + i1;
  11758. const int64_t i00 = i0;
  11759. const int64_t i = i3*ne2*ne1*ne0 + i2*ne1*ne0 + i1*ne0 + i0;
  11760. const int64_t j = i02*ne01*ne00 + i01*ne00 + i00;
  11761. if (py*w + i2 >= ne02 || px*w + i1 >= ne01) {
  11762. ((float *) dst->data)[i] = 0.0f;
  11763. } else {
  11764. ((float *) dst->data)[i] = ((float *) src0->data)[j];
  11765. }
  11766. }
  11767. }
  11768. }
  11769. }
  11770. }
  11771. }
  11772. static void ggml_compute_forward_win_part(
  11773. const struct ggml_compute_params * params,
  11774. const struct ggml_tensor * src0,
  11775. struct ggml_tensor * dst) {
  11776. switch (src0->type) {
  11777. case GGML_TYPE_F32:
  11778. {
  11779. ggml_compute_forward_win_part_f32(params, src0, dst);
  11780. } break;
  11781. default:
  11782. {
  11783. GGML_ASSERT(false);
  11784. } break;
  11785. }
  11786. }
  11787. // ggml_compute_forward_win_unpart
  11788. static void ggml_compute_forward_win_unpart_f32(
  11789. const struct ggml_compute_params * params,
  11790. const struct ggml_tensor * src0,
  11791. struct ggml_tensor * dst) {
  11792. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  11793. return;
  11794. }
  11795. GGML_TENSOR_LOCALS(int64_t, ne0, src0, ne)
  11796. GGML_TENSOR_LOCALS(int64_t, ne, dst, ne)
  11797. const int32_t w = ((const int32_t *)(dst->op_params))[0];
  11798. // padding
  11799. const int px = (w - ne1%w)%w;
  11800. //const int py = (w - ne2%w)%w;
  11801. const int npx = (px + ne1)/w;
  11802. //const int npy = (py + ne2)/w;
  11803. assert(ne0 == ne00);
  11804. // TODO: optimize / multi-thread
  11805. for (int64_t i2 = 0; i2 < ne2; ++i2) {
  11806. for (int64_t i1 = 0; i1 < ne1; ++i1) {
  11807. for (int64_t i0 = 0; i0 < ne0; ++i0) {
  11808. const int ip2 = i2/w;
  11809. const int ip1 = i1/w;
  11810. const int64_t i02 = i2%w;
  11811. const int64_t i01 = i1%w;
  11812. const int64_t i00 = i0;
  11813. const int64_t i = (ip2*npx + ip1)*ne02*ne01*ne00 + i02*ne01*ne00 + i01*ne00 + i00;
  11814. const int64_t j = i2*ne1*ne0 + i1*ne0 + i0;
  11815. ((float *) dst->data)[j] = ((float *) src0->data)[i];
  11816. }
  11817. }
  11818. }
  11819. }
  11820. static void ggml_compute_forward_win_unpart(
  11821. const struct ggml_compute_params * params,
  11822. const struct ggml_tensor * src0,
  11823. struct ggml_tensor * dst) {
  11824. switch (src0->type) {
  11825. case GGML_TYPE_F32:
  11826. {
  11827. ggml_compute_forward_win_unpart_f32(params, src0, dst);
  11828. } break;
  11829. default:
  11830. {
  11831. GGML_ASSERT(false);
  11832. } break;
  11833. }
  11834. }
  11835. //gmml_compute_forward_unary
  11836. static void ggml_compute_forward_unary(
  11837. const struct ggml_compute_params * params,
  11838. const struct ggml_tensor * src0,
  11839. struct ggml_tensor * dst) {
  11840. const enum ggml_unary_op op = ggml_get_unary_op(dst);
  11841. switch (op) {
  11842. case GGML_UNARY_OP_ABS:
  11843. {
  11844. ggml_compute_forward_abs(params, src0, dst);
  11845. } break;
  11846. case GGML_UNARY_OP_SGN:
  11847. {
  11848. ggml_compute_forward_sgn(params, src0, dst);
  11849. } break;
  11850. case GGML_UNARY_OP_NEG:
  11851. {
  11852. ggml_compute_forward_neg(params, src0, dst);
  11853. } break;
  11854. case GGML_UNARY_OP_STEP:
  11855. {
  11856. ggml_compute_forward_step(params, src0, dst);
  11857. } break;
  11858. case GGML_UNARY_OP_TANH:
  11859. {
  11860. ggml_compute_forward_tanh(params, src0, dst);
  11861. } break;
  11862. case GGML_UNARY_OP_ELU:
  11863. {
  11864. ggml_compute_forward_elu(params, src0, dst);
  11865. } break;
  11866. case GGML_UNARY_OP_RELU:
  11867. {
  11868. ggml_compute_forward_relu(params, src0, dst);
  11869. } break;
  11870. case GGML_UNARY_OP_GELU:
  11871. {
  11872. ggml_compute_forward_gelu(params, src0, dst);
  11873. } break;
  11874. case GGML_UNARY_OP_GELU_QUICK:
  11875. {
  11876. ggml_compute_forward_gelu_quick(params, src0, dst);
  11877. } break;
  11878. case GGML_UNARY_OP_SILU:
  11879. {
  11880. ggml_compute_forward_silu(params, src0, dst);
  11881. } break;
  11882. case GGML_UNARY_OP_HARDSWISH:
  11883. {
  11884. ggml_compute_forward_hardswish(params, src0, dst);
  11885. } break;
  11886. case GGML_UNARY_OP_HARDSIGMOID:
  11887. {
  11888. ggml_compute_forward_hardsigmoid(params, src0, dst);
  11889. } break;
  11890. default:
  11891. {
  11892. GGML_ASSERT(false);
  11893. } break;
  11894. }
  11895. }
  11896. // ggml_compute_forward_get_rel_pos
  11897. static void ggml_compute_forward_get_rel_pos_f16(
  11898. const struct ggml_compute_params * params,
  11899. const struct ggml_tensor * src0,
  11900. struct ggml_tensor * dst) {
  11901. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  11902. return;
  11903. }
  11904. // ref: https://github.com/facebookresearch/segment-anything/blob/main/segment_anything/modeling/image_encoder.py#L292-L322
  11905. GGML_TENSOR_UNARY_OP_LOCALS
  11906. const int64_t w = ne1;
  11907. ggml_fp16_t * src0_data = (ggml_fp16_t *) src0->data;
  11908. ggml_fp16_t * dst_data = (ggml_fp16_t *) dst->data;
  11909. for (int64_t i2 = 0; i2 < ne2; ++i2) {
  11910. for (int64_t i1 = 0; i1 < ne1; ++i1) {
  11911. const int64_t pos = (w - i1 - 1) + i2;
  11912. for (int64_t i0 = 0; i0 < ne0; ++i0) {
  11913. dst_data[i2*ne1*ne0 + i1*ne0 + i0] = src0_data[pos*ne00 + i0];
  11914. }
  11915. }
  11916. }
  11917. }
  11918. static void ggml_compute_forward_get_rel_pos(
  11919. const struct ggml_compute_params * params,
  11920. const struct ggml_tensor * src0,
  11921. struct ggml_tensor * dst) {
  11922. switch (src0->type) {
  11923. case GGML_TYPE_F16:
  11924. {
  11925. ggml_compute_forward_get_rel_pos_f16(params, src0, dst);
  11926. } break;
  11927. default:
  11928. {
  11929. GGML_ASSERT(false);
  11930. } break;
  11931. }
  11932. }
  11933. // ggml_compute_forward_add_rel_pos
  11934. static void ggml_compute_forward_add_rel_pos_f32(
  11935. const struct ggml_compute_params * params,
  11936. const struct ggml_tensor * src0,
  11937. const struct ggml_tensor * src1,
  11938. const struct ggml_tensor * src2,
  11939. struct ggml_tensor * dst) {
  11940. const bool inplace = (bool) ((int32_t *) dst->op_params)[0];
  11941. if (!inplace && params->type == GGML_TASK_INIT) {
  11942. if (params->ith != 0) {
  11943. return;
  11944. }
  11945. memcpy((char *) dst->data, (char *) src0->data, ggml_nbytes(dst));
  11946. return;
  11947. }
  11948. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  11949. return;
  11950. }
  11951. int64_t t0 = ggml_perf_time_us();
  11952. UNUSED(t0);
  11953. // ref: https://github.com/facebookresearch/segment-anything/blob/main/segment_anything/modeling/image_encoder.py#L357-L359
  11954. float * src1_data = (float *) src1->data;
  11955. float * src2_data = (float *) src2->data;
  11956. float * dst_data = (float *) dst->data;
  11957. const int64_t ne10 = src1->ne[0];
  11958. const int64_t ne11 = src1->ne[1];
  11959. const int64_t ne12 = src1->ne[2];
  11960. const int64_t ne13 = src1->ne[3];
  11961. const int ith = params->ith;
  11962. const int nth = params->nth;
  11963. // total patches in dst
  11964. const int np = ne13;
  11965. // patches per thread
  11966. const int dp = (np + nth - 1)/nth;
  11967. // patch range for this thread
  11968. const int ip0 = dp*ith;
  11969. const int ip1 = MIN(ip0 + dp, np);
  11970. for (int64_t i13 = ip0; i13 < ip1; ++i13) {
  11971. for (int64_t i12 = 0; i12 < ne12; ++i12) {
  11972. for (int64_t i11 = 0; i11 < ne11; ++i11) {
  11973. const int64_t jp1 = i13*ne12*ne11*ne10 + i12*ne11*ne10 + i11*ne10;
  11974. for (int64_t i10 = 0; i10 < ne10; ++i10) {
  11975. const int64_t jp0 = jp1 + i10;
  11976. const float src1_e = src1_data[jp0];
  11977. const float src2_e = src2_data[jp0];
  11978. const int64_t jdh = jp0 * ne10;
  11979. const int64_t jdw = jdh - (ne10 - 1) * i10;
  11980. for (int64_t j = 0; j < ne10; ++j) {
  11981. dst_data[jdh + j ] += src2_e;
  11982. dst_data[jdw + j*ne10] += src1_e;
  11983. }
  11984. }
  11985. }
  11986. }
  11987. }
  11988. }
  11989. static void ggml_compute_forward_add_rel_pos(
  11990. const struct ggml_compute_params * params,
  11991. const struct ggml_tensor * src0,
  11992. const struct ggml_tensor * src1,
  11993. const struct ggml_tensor * src2,
  11994. struct ggml_tensor * dst) {
  11995. switch (src0->type) {
  11996. case GGML_TYPE_F32:
  11997. {
  11998. ggml_compute_forward_add_rel_pos_f32(params, src0, src1, src2, dst);
  11999. } break;
  12000. default:
  12001. {
  12002. GGML_ASSERT(false);
  12003. } break;
  12004. }
  12005. }
  12006. // ggml_compute_forward_map_unary
  12007. static void ggml_compute_forward_map_unary_f32(
  12008. const struct ggml_compute_params * params,
  12009. const struct ggml_tensor * src0,
  12010. struct ggml_tensor * dst,
  12011. const ggml_unary_op_f32_t fun) {
  12012. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  12013. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  12014. return;
  12015. }
  12016. const int n = ggml_nrows(src0);
  12017. const int nc = src0->ne[0];
  12018. assert( dst->nb[0] == sizeof(float));
  12019. assert(src0->nb[0] == sizeof(float));
  12020. for (int i = 0; i < n; i++) {
  12021. fun(nc,
  12022. (float *) ((char *) dst->data + i*( dst->nb[1])),
  12023. (float *) ((char *) src0->data + i*(src0->nb[1])));
  12024. }
  12025. }
  12026. static void ggml_compute_forward_map_unary(
  12027. const struct ggml_compute_params * params,
  12028. const struct ggml_tensor * src0,
  12029. struct ggml_tensor * dst,
  12030. const ggml_unary_op_f32_t fun) {
  12031. switch (src0->type) {
  12032. case GGML_TYPE_F32:
  12033. {
  12034. ggml_compute_forward_map_unary_f32(params, src0, dst, fun);
  12035. } break;
  12036. default:
  12037. {
  12038. GGML_ASSERT(false);
  12039. } break;
  12040. }
  12041. }
  12042. // ggml_compute_forward_map_binary
  12043. static void ggml_compute_forward_map_binary_f32(
  12044. const struct ggml_compute_params * params,
  12045. const struct ggml_tensor * src0,
  12046. const struct ggml_tensor * src1,
  12047. struct ggml_tensor * dst,
  12048. const ggml_binary_op_f32_t fun) {
  12049. assert(params->ith == 0);
  12050. assert(ggml_are_same_shape(src0, src1) && ggml_are_same_shape(src0, dst));
  12051. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  12052. return;
  12053. }
  12054. const int n = ggml_nrows(src0);
  12055. const int nc = src0->ne[0];
  12056. assert( dst->nb[0] == sizeof(float));
  12057. assert(src0->nb[0] == sizeof(float));
  12058. assert(src1->nb[0] == sizeof(float));
  12059. for (int i = 0; i < n; i++) {
  12060. fun(nc,
  12061. (float *) ((char *) dst->data + i*( dst->nb[1])),
  12062. (float *) ((char *) src0->data + i*(src0->nb[1])),
  12063. (float *) ((char *) src1->data + i*(src1->nb[1])));
  12064. }
  12065. }
  12066. static void ggml_compute_forward_map_binary(
  12067. const struct ggml_compute_params * params,
  12068. const struct ggml_tensor * src0,
  12069. const struct ggml_tensor * src1,
  12070. struct ggml_tensor * dst,
  12071. const ggml_binary_op_f32_t fun) {
  12072. switch (src0->type) {
  12073. case GGML_TYPE_F32:
  12074. {
  12075. ggml_compute_forward_map_binary_f32(params, src0, src1, dst, fun);
  12076. } break;
  12077. default:
  12078. {
  12079. GGML_ASSERT(false);
  12080. } break;
  12081. }
  12082. }
  12083. // ggml_compute_forward_map_custom1
  12084. static void ggml_compute_forward_map_custom1_f32(
  12085. const struct ggml_compute_params * params,
  12086. const struct ggml_tensor * a,
  12087. struct ggml_tensor * dst,
  12088. const ggml_custom1_op_f32_t fun) {
  12089. assert(params->ith == 0);
  12090. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  12091. return;
  12092. }
  12093. fun(dst, a);
  12094. }
  12095. // ggml_compute_forward_map_custom2
  12096. static void ggml_compute_forward_map_custom2_f32(
  12097. const struct ggml_compute_params * params,
  12098. const struct ggml_tensor * a,
  12099. const struct ggml_tensor * b,
  12100. struct ggml_tensor * dst,
  12101. const ggml_custom2_op_f32_t fun) {
  12102. assert(params->ith == 0);
  12103. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  12104. return;
  12105. }
  12106. fun(dst, a, b);
  12107. }
  12108. // ggml_compute_forward_map_custom3
  12109. static void ggml_compute_forward_map_custom3_f32(
  12110. const struct ggml_compute_params * params,
  12111. const struct ggml_tensor * a,
  12112. const struct ggml_tensor * b,
  12113. const struct ggml_tensor * c,
  12114. struct ggml_tensor * dst,
  12115. const ggml_custom3_op_f32_t fun) {
  12116. assert(params->ith == 0);
  12117. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  12118. return;
  12119. }
  12120. fun(dst, a, b, c);
  12121. }
  12122. // ggml_compute_forward_map_custom1
  12123. static void ggml_compute_forward_map_custom1(
  12124. const struct ggml_compute_params * params,
  12125. const struct ggml_tensor * a,
  12126. struct ggml_tensor * dst) {
  12127. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  12128. return;
  12129. }
  12130. struct ggml_map_custom1_op_params * p = (struct ggml_map_custom1_op_params *) dst->op_params;
  12131. p->fun(dst, a, params->ith, params->nth, p->userdata);
  12132. }
  12133. // ggml_compute_forward_map_custom2
  12134. static void ggml_compute_forward_map_custom2(
  12135. const struct ggml_compute_params * params,
  12136. const struct ggml_tensor * a,
  12137. const struct ggml_tensor * b,
  12138. struct ggml_tensor * dst) {
  12139. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  12140. return;
  12141. }
  12142. struct ggml_map_custom2_op_params * p = (struct ggml_map_custom2_op_params *) dst->op_params;
  12143. p->fun(dst, a, b, params->ith, params->nth, p->userdata);
  12144. }
  12145. // ggml_compute_forward_map_custom3
  12146. static void ggml_compute_forward_map_custom3(
  12147. const struct ggml_compute_params * params,
  12148. const struct ggml_tensor * a,
  12149. const struct ggml_tensor * b,
  12150. const struct ggml_tensor * c,
  12151. struct ggml_tensor * dst) {
  12152. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  12153. return;
  12154. }
  12155. struct ggml_map_custom3_op_params * p = (struct ggml_map_custom3_op_params *) dst->op_params;
  12156. p->fun(dst, a, b, c, params->ith, params->nth, p->userdata);
  12157. }
  12158. // ggml_compute_forward_cross_entropy_loss
  12159. static void ggml_compute_forward_cross_entropy_loss_f32(
  12160. const struct ggml_compute_params * params,
  12161. const struct ggml_tensor * src0,
  12162. const struct ggml_tensor * src1,
  12163. struct ggml_tensor * dst) {
  12164. GGML_ASSERT(ggml_is_contiguous(src0));
  12165. GGML_ASSERT(ggml_is_contiguous(src1));
  12166. GGML_ASSERT(ggml_is_scalar(dst));
  12167. GGML_ASSERT(ggml_are_same_shape(src0, src1));
  12168. const int ith = params->ith;
  12169. const int nth = params->nth;
  12170. float * sums = (float *) params->wdata;
  12171. // TODO: handle transposed/permuted matrices
  12172. const int nc = src0->ne[0];
  12173. const int nr = ggml_nrows(src0);
  12174. GGML_ASSERT(params->wsize >= sizeof(float) * (nth + nth * nc));
  12175. if (params->type == GGML_TASK_INIT) {
  12176. if (ith == 0) {
  12177. memset(sums, 0, sizeof(float) * (nth + nth * nc));
  12178. }
  12179. return;
  12180. }
  12181. if (params->type == GGML_TASK_FINALIZE) {
  12182. if (ith == 0) {
  12183. float * dp = (float *) dst->data;
  12184. ggml_vec_sum_f32(nth, dp, sums);
  12185. dp[0] *= -1.0f / (float) nr;
  12186. }
  12187. return;
  12188. }
  12189. const double eps = 1e-9;
  12190. // rows per thread
  12191. const int dr = (nr + nth - 1)/nth;
  12192. // row range for this thread
  12193. const int ir0 = dr*ith;
  12194. const int ir1 = MIN(ir0 + dr, nr);
  12195. for (int i1 = ir0; i1 < ir1; i1++) {
  12196. float * s0 = (float *)((char *) src0->data + i1*src0->nb[1]);
  12197. float * s1 = (float *)((char *) src1->data + i1*src1->nb[1]);
  12198. float * st = ((float *) params->wdata) + nth + ith*nc;
  12199. #ifndef NDEBUG
  12200. for (int i = 0; i < nc; ++i) {
  12201. //printf("p[%d] = %f\n", i, p[i]);
  12202. assert(!isnan(s0[i]));
  12203. assert(!isnan(s1[i]));
  12204. }
  12205. #endif
  12206. // soft_max
  12207. ggml_float sum = 0.0;
  12208. {
  12209. float max = -INFINITY;
  12210. ggml_vec_max_f32(nc, &max, s0);
  12211. uint16_t scvt; UNUSED(scvt);
  12212. for (int i = 0; i < nc; i++) {
  12213. if (s0[i] == -INFINITY) {
  12214. st[i] = 0.0f;
  12215. } else {
  12216. #ifndef GGML_CROSS_ENTROPY_EXP_FP16
  12217. const float s = s0[i] - max;
  12218. const float val = expf(s);
  12219. #else
  12220. ggml_fp16_t s = GGML_FP32_TO_FP16(s0[i] - max);
  12221. memcpy(&scvt, &s, sizeof(scvt));
  12222. const float val = GGML_FP16_TO_FP32(ggml_table_exp_f16[scvt]);
  12223. #endif
  12224. sum += (ggml_float)val;
  12225. st[i] = val;
  12226. }
  12227. }
  12228. assert(sum > 0.0);
  12229. // sum = 1.0/sum;
  12230. }
  12231. // avoid log(0) by rescaling from [0..1] to [eps..1]
  12232. sum = (1.0 - eps) / sum;
  12233. ggml_vec_scale_f32(nc, st, sum);
  12234. ggml_vec_add1_f32(nc, st, st, eps);
  12235. ggml_vec_log_f32(nc, st, st);
  12236. ggml_vec_mul_f32(nc, st, st, s1);
  12237. float st_sum = 0;
  12238. ggml_vec_sum_f32(nc, &st_sum, st);
  12239. sums[ith] += st_sum;
  12240. #ifndef NDEBUG
  12241. for (int i = 0; i < nc; ++i) {
  12242. assert(!isnan(st[i]));
  12243. assert(!isinf(st[i]));
  12244. }
  12245. #endif
  12246. }
  12247. }
  12248. static void ggml_compute_forward_cross_entropy_loss(
  12249. const struct ggml_compute_params * params,
  12250. const struct ggml_tensor * src0,
  12251. const struct ggml_tensor * src1,
  12252. struct ggml_tensor * dst) {
  12253. switch (src0->type) {
  12254. case GGML_TYPE_F32:
  12255. {
  12256. ggml_compute_forward_cross_entropy_loss_f32(params, src0, src1, dst);
  12257. } break;
  12258. default:
  12259. {
  12260. GGML_ASSERT(false);
  12261. } break;
  12262. }
  12263. }
  12264. // ggml_compute_forward_cross_entropy_loss_back
  12265. static void ggml_compute_forward_cross_entropy_loss_back_f32(
  12266. const struct ggml_compute_params * params,
  12267. const struct ggml_tensor * src0,
  12268. const struct ggml_tensor * src1,
  12269. const struct ggml_tensor * opt0,
  12270. struct ggml_tensor * dst) {
  12271. GGML_ASSERT(ggml_is_contiguous(dst));
  12272. GGML_ASSERT(ggml_is_contiguous(src0));
  12273. GGML_ASSERT(ggml_is_contiguous(src1));
  12274. GGML_ASSERT(ggml_is_contiguous(opt0));
  12275. GGML_ASSERT(ggml_are_same_shape(src0, src1) && ggml_are_same_shape(src0, dst));
  12276. const int64_t ith = params->ith;
  12277. const int64_t nth = params->nth;
  12278. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  12279. return;
  12280. }
  12281. const double eps = 1e-9;
  12282. // TODO: handle transposed/permuted matrices
  12283. const int64_t nc = src0->ne[0];
  12284. const int64_t nr = ggml_nrows(src0);
  12285. // rows per thread
  12286. const int64_t dr = (nr + nth - 1)/nth;
  12287. // row range for this thread
  12288. const int64_t ir0 = dr*ith;
  12289. const int64_t ir1 = MIN(ir0 + dr, nr);
  12290. float * d = (float *) opt0->data;
  12291. for (int64_t i1 = ir0; i1 < ir1; i1++) {
  12292. float * ds0 = (float *)((char *) dst->data + i1*dst->nb[1]);
  12293. float * s0 = (float *)((char *) src0->data + i1*src0->nb[1]);
  12294. float * s1 = (float *)((char *) src1->data + i1*src1->nb[1]);
  12295. #ifndef NDEBUG
  12296. for (int i = 0; i < nc; ++i) {
  12297. //printf("p[%d] = %f\n", i, p[i]);
  12298. assert(!isnan(s0[i]));
  12299. assert(!isnan(s1[i]));
  12300. }
  12301. #endif
  12302. // soft_max
  12303. ggml_float sum = 0.0;
  12304. {
  12305. float max = -INFINITY;
  12306. ggml_vec_max_f32(nc, &max, s0);
  12307. uint16_t scvt; UNUSED(scvt);
  12308. for (int i = 0; i < nc; i++) {
  12309. if (s0[i] == -INFINITY) {
  12310. ds0[i] = 0.0f;
  12311. } else {
  12312. #ifndef GGML_CROSS_ENTROPY_EXP_FP16
  12313. const float s = s0[i] - max;
  12314. const float val = expf(s);
  12315. #else
  12316. ggml_fp16_t s = GGML_FP32_TO_FP16(s0[i] - max);
  12317. memcpy(&scvt, &s, sizeof(scvt));
  12318. const float val = GGML_FP16_TO_FP32(ggml_table_exp_f16[scvt]);
  12319. #endif
  12320. sum += (ggml_float)val;
  12321. ds0[i] = val;
  12322. }
  12323. }
  12324. assert(sum > 0.0);
  12325. sum = (1.0 - eps)/sum;
  12326. }
  12327. // grad(src0) = (softmax(src0) - src1) * grad(cross_entropy_loss(src0, src1)) / nr
  12328. ggml_vec_scale_f32(nc, ds0, sum);
  12329. ggml_vec_add1_f32(nc, ds0, ds0, eps);
  12330. ggml_vec_sub_f32(nc, ds0, ds0, s1);
  12331. ggml_vec_scale_f32(nc, ds0, d[0] / (float) nr);
  12332. #ifndef NDEBUG
  12333. for (int i = 0; i < nc; ++i) {
  12334. assert(!isnan(ds0[i]));
  12335. assert(!isinf(ds0[i]));
  12336. }
  12337. #endif
  12338. }
  12339. }
  12340. static void ggml_compute_forward_cross_entropy_loss_back(
  12341. const struct ggml_compute_params * params,
  12342. const struct ggml_tensor * src0,
  12343. const struct ggml_tensor * src1,
  12344. const struct ggml_tensor * opt0,
  12345. struct ggml_tensor * dst) {
  12346. switch (src0->type) {
  12347. case GGML_TYPE_F32:
  12348. {
  12349. ggml_compute_forward_cross_entropy_loss_back_f32(params, src0, src1, opt0, dst);
  12350. } break;
  12351. default:
  12352. {
  12353. GGML_ASSERT(false);
  12354. } break;
  12355. }
  12356. }
  12357. /////////////////////////////////
  12358. static void ggml_compute_forward(struct ggml_compute_params * params, struct ggml_tensor * tensor) {
  12359. GGML_ASSERT(params);
  12360. if (tensor->op == GGML_OP_NONE) {
  12361. return;
  12362. }
  12363. #ifdef GGML_USE_CUBLAS
  12364. bool skip_cpu = ggml_cuda_compute_forward(params, tensor);
  12365. if (skip_cpu) {
  12366. return;
  12367. }
  12368. GGML_ASSERT(tensor->src[0] == NULL || tensor->src[0]->backend == GGML_BACKEND_CPU);
  12369. GGML_ASSERT(tensor->src[1] == NULL || tensor->src[1]->backend == GGML_BACKEND_CPU);
  12370. #elif defined(GGML_USE_VULKAN)
  12371. const bool skip_cpu = ggml_vk_compute_forward_cpu_assist(params, tensor);
  12372. #ifdef GGML_VULKAN_CHECK_RESULTS
  12373. if (skip_cpu) {
  12374. ggml_vk_check_results_1_cpu_assist(params, tensor);
  12375. }
  12376. #endif
  12377. if (skip_cpu) {
  12378. return;
  12379. }
  12380. GGML_ASSERT(tensor->src[0] == NULL || tensor->src[0]->backend == GGML_BACKEND_CPU);
  12381. GGML_ASSERT(tensor->src[1] == NULL || tensor->src[1]->backend == GGML_BACKEND_CPU);
  12382. #endif // GGML_USE_CUBLAS
  12383. #ifdef GGML_USE_SYCL
  12384. bool skip_cpu = ggml_sycl_compute_forward(params, tensor);
  12385. if (skip_cpu) {
  12386. return;
  12387. }
  12388. #endif // GGML_USE_SYCL
  12389. switch (tensor->op) {
  12390. case GGML_OP_DUP:
  12391. {
  12392. ggml_compute_forward_dup(params, tensor->src[0], tensor);
  12393. } break;
  12394. case GGML_OP_ADD:
  12395. {
  12396. ggml_compute_forward_add(params, tensor->src[0], tensor->src[1], tensor);
  12397. } break;
  12398. case GGML_OP_ADD1:
  12399. {
  12400. ggml_compute_forward_add1(params, tensor->src[0], tensor->src[1], tensor);
  12401. } break;
  12402. case GGML_OP_ACC:
  12403. {
  12404. ggml_compute_forward_acc(params, tensor->src[0], tensor->src[1], tensor);
  12405. } break;
  12406. case GGML_OP_SUB:
  12407. {
  12408. ggml_compute_forward_sub(params, tensor->src[0], tensor->src[1], tensor);
  12409. } break;
  12410. case GGML_OP_MUL:
  12411. {
  12412. ggml_compute_forward_mul(params, tensor->src[0], tensor->src[1], tensor);
  12413. } break;
  12414. case GGML_OP_DIV:
  12415. {
  12416. ggml_compute_forward_div(params, tensor->src[0], tensor->src[1], tensor);
  12417. } break;
  12418. case GGML_OP_SQR:
  12419. {
  12420. ggml_compute_forward_sqr(params, tensor->src[0], tensor);
  12421. } break;
  12422. case GGML_OP_SQRT:
  12423. {
  12424. ggml_compute_forward_sqrt(params, tensor->src[0], tensor);
  12425. } break;
  12426. case GGML_OP_LOG:
  12427. {
  12428. ggml_compute_forward_log(params, tensor->src[0], tensor);
  12429. } break;
  12430. case GGML_OP_SUM:
  12431. {
  12432. ggml_compute_forward_sum(params, tensor->src[0], tensor);
  12433. } break;
  12434. case GGML_OP_SUM_ROWS:
  12435. {
  12436. ggml_compute_forward_sum_rows(params, tensor->src[0], tensor);
  12437. } break;
  12438. case GGML_OP_MEAN:
  12439. {
  12440. ggml_compute_forward_mean(params, tensor->src[0], tensor);
  12441. } break;
  12442. case GGML_OP_ARGMAX:
  12443. {
  12444. ggml_compute_forward_argmax(params, tensor->src[0], tensor);
  12445. } break;
  12446. case GGML_OP_REPEAT:
  12447. {
  12448. ggml_compute_forward_repeat(params, tensor->src[0], tensor);
  12449. } break;
  12450. case GGML_OP_REPEAT_BACK:
  12451. {
  12452. ggml_compute_forward_repeat_back(params, tensor->src[0], tensor);
  12453. } break;
  12454. case GGML_OP_CONCAT:
  12455. {
  12456. ggml_compute_forward_concat(params, tensor->src[0], tensor->src[1], tensor);
  12457. } break;
  12458. case GGML_OP_SILU_BACK:
  12459. {
  12460. ggml_compute_forward_silu_back(params, tensor->src[0], tensor->src[1], tensor);
  12461. } break;
  12462. case GGML_OP_NORM:
  12463. {
  12464. ggml_compute_forward_norm(params, tensor->src[0], tensor);
  12465. } break;
  12466. case GGML_OP_RMS_NORM:
  12467. {
  12468. ggml_compute_forward_rms_norm(params, tensor->src[0], tensor);
  12469. } break;
  12470. case GGML_OP_RMS_NORM_BACK:
  12471. {
  12472. ggml_compute_forward_rms_norm_back(params, tensor->src[0], tensor->src[1], tensor);
  12473. } break;
  12474. case GGML_OP_GROUP_NORM:
  12475. {
  12476. ggml_compute_forward_group_norm(params, tensor->src[0], tensor);
  12477. } break;
  12478. case GGML_OP_MUL_MAT:
  12479. {
  12480. ggml_compute_forward_mul_mat(params, tensor->src[0], tensor->src[1], tensor);
  12481. } break;
  12482. case GGML_OP_MUL_MAT_ID:
  12483. {
  12484. ggml_compute_forward_mul_mat_id(params, tensor->src[0], tensor->src[1], tensor);
  12485. } break;
  12486. case GGML_OP_OUT_PROD:
  12487. {
  12488. ggml_compute_forward_out_prod(params, tensor->src[0], tensor->src[1], tensor);
  12489. } break;
  12490. case GGML_OP_SCALE:
  12491. {
  12492. ggml_compute_forward_scale(params, tensor->src[0], tensor);
  12493. } break;
  12494. case GGML_OP_SET:
  12495. {
  12496. ggml_compute_forward_set(params, tensor->src[0], tensor->src[1], tensor);
  12497. } break;
  12498. case GGML_OP_CPY:
  12499. {
  12500. ggml_compute_forward_cpy(params, tensor->src[0], tensor);
  12501. } break;
  12502. case GGML_OP_CONT:
  12503. {
  12504. ggml_compute_forward_cont(params, tensor->src[0], tensor);
  12505. } break;
  12506. case GGML_OP_RESHAPE:
  12507. {
  12508. ggml_compute_forward_reshape(params, tensor->src[0], tensor);
  12509. } break;
  12510. case GGML_OP_VIEW:
  12511. {
  12512. ggml_compute_forward_view(params, tensor->src[0]);
  12513. } break;
  12514. case GGML_OP_PERMUTE:
  12515. {
  12516. ggml_compute_forward_permute(params, tensor->src[0]);
  12517. } break;
  12518. case GGML_OP_TRANSPOSE:
  12519. {
  12520. ggml_compute_forward_transpose(params, tensor->src[0]);
  12521. } break;
  12522. case GGML_OP_GET_ROWS:
  12523. {
  12524. ggml_compute_forward_get_rows(params, tensor->src[0], tensor->src[1], tensor);
  12525. } break;
  12526. case GGML_OP_GET_ROWS_BACK:
  12527. {
  12528. ggml_compute_forward_get_rows_back(params, tensor->src[0], tensor->src[1], tensor);
  12529. } break;
  12530. case GGML_OP_DIAG:
  12531. {
  12532. ggml_compute_forward_diag(params, tensor->src[0], tensor);
  12533. } break;
  12534. case GGML_OP_DIAG_MASK_INF:
  12535. {
  12536. ggml_compute_forward_diag_mask_inf(params, tensor->src[0], tensor);
  12537. } break;
  12538. case GGML_OP_DIAG_MASK_ZERO:
  12539. {
  12540. ggml_compute_forward_diag_mask_zero(params, tensor->src[0], tensor);
  12541. } break;
  12542. case GGML_OP_SOFT_MAX:
  12543. {
  12544. ggml_compute_forward_soft_max(params, tensor->src[0], tensor->src[1], tensor->src[2], tensor);
  12545. } break;
  12546. case GGML_OP_SOFT_MAX_BACK:
  12547. {
  12548. ggml_compute_forward_soft_max_back(params, tensor->src[0], tensor->src[1], tensor);
  12549. } break;
  12550. case GGML_OP_ROPE:
  12551. {
  12552. ggml_compute_forward_rope(params, tensor->src[0], tensor->src[1], tensor);
  12553. } break;
  12554. case GGML_OP_ROPE_BACK:
  12555. {
  12556. ggml_compute_forward_rope_back(params, tensor->src[0], tensor->src[1], tensor);
  12557. } break;
  12558. case GGML_OP_ALIBI:
  12559. {
  12560. ggml_compute_forward_alibi(params, tensor->src[0], tensor);
  12561. } break;
  12562. case GGML_OP_CLAMP:
  12563. {
  12564. ggml_compute_forward_clamp(params, tensor->src[0], tensor);
  12565. } break;
  12566. case GGML_OP_CONV_TRANSPOSE_1D:
  12567. {
  12568. ggml_compute_forward_conv_transpose_1d(params, tensor->src[0], tensor->src[1], tensor);
  12569. } break;
  12570. case GGML_OP_IM2COL:
  12571. {
  12572. ggml_compute_forward_im2col(params, tensor->src[0], tensor->src[1], tensor);
  12573. } break;
  12574. case GGML_OP_CONV_TRANSPOSE_2D:
  12575. {
  12576. ggml_compute_forward_conv_transpose_2d(params, tensor->src[0], tensor->src[1], tensor);
  12577. } break;
  12578. case GGML_OP_POOL_1D:
  12579. {
  12580. ggml_compute_forward_pool_1d(params, tensor->src[0], tensor);
  12581. } break;
  12582. case GGML_OP_POOL_2D:
  12583. {
  12584. ggml_compute_forward_pool_2d(params, tensor->src[0], tensor);
  12585. } break;
  12586. case GGML_OP_UPSCALE:
  12587. {
  12588. ggml_compute_forward_upscale(params, tensor->src[0], tensor);
  12589. } break;
  12590. case GGML_OP_PAD:
  12591. {
  12592. ggml_compute_forward_pad(params, tensor->src[0], tensor);
  12593. } break;
  12594. case GGML_OP_ARGSORT:
  12595. {
  12596. ggml_compute_forward_argsort(params, tensor->src[0], tensor);
  12597. } break;
  12598. case GGML_OP_LEAKY_RELU:
  12599. {
  12600. ggml_compute_forward_leaky_relu(params, tensor->src[0], tensor);
  12601. } break;
  12602. case GGML_OP_FLASH_ATTN:
  12603. {
  12604. const int32_t t = ggml_get_op_params_i32(tensor, 0);
  12605. GGML_ASSERT(t == 0 || t == 1);
  12606. const bool masked = t != 0;
  12607. ggml_compute_forward_flash_attn(params, tensor->src[0], tensor->src[1], tensor->src[2], masked, tensor);
  12608. } break;
  12609. case GGML_OP_FLASH_FF:
  12610. {
  12611. ggml_compute_forward_flash_ff(params, tensor->src[0], tensor->src[1], tensor->src[2], tensor->src[3], tensor->src[4], tensor);
  12612. } break;
  12613. case GGML_OP_FLASH_ATTN_BACK:
  12614. {
  12615. int32_t t = ggml_get_op_params_i32(tensor, 0);
  12616. GGML_ASSERT(t == 0 || t == 1);
  12617. bool masked = t != 0;
  12618. ggml_compute_forward_flash_attn_back(params, tensor->src[0], tensor->src[1], tensor->src[2], tensor->src[3], masked, tensor);
  12619. } break;
  12620. case GGML_OP_WIN_PART:
  12621. {
  12622. ggml_compute_forward_win_part(params, tensor->src[0], tensor);
  12623. } break;
  12624. case GGML_OP_WIN_UNPART:
  12625. {
  12626. ggml_compute_forward_win_unpart(params, tensor->src[0], tensor);
  12627. } break;
  12628. case GGML_OP_UNARY:
  12629. {
  12630. ggml_compute_forward_unary(params, tensor->src[0], tensor);
  12631. } break;
  12632. case GGML_OP_GET_REL_POS:
  12633. {
  12634. ggml_compute_forward_get_rel_pos(params, tensor->src[0], tensor);
  12635. } break;
  12636. case GGML_OP_ADD_REL_POS:
  12637. {
  12638. ggml_compute_forward_add_rel_pos(params, tensor->src[0], tensor->src[1], tensor->src[2], tensor);
  12639. } break;
  12640. case GGML_OP_MAP_UNARY:
  12641. {
  12642. ggml_unary_op_f32_t fun;
  12643. memcpy(&fun, tensor->op_params, sizeof(fun));
  12644. ggml_compute_forward_map_unary(params, tensor->src[0], tensor, fun);
  12645. }
  12646. break;
  12647. case GGML_OP_MAP_BINARY:
  12648. {
  12649. ggml_binary_op_f32_t fun;
  12650. memcpy(&fun, tensor->op_params, sizeof(fun));
  12651. ggml_compute_forward_map_binary(params, tensor->src[0], tensor->src[1], tensor, fun);
  12652. }
  12653. break;
  12654. case GGML_OP_MAP_CUSTOM1_F32:
  12655. {
  12656. ggml_custom1_op_f32_t fun;
  12657. memcpy(&fun, tensor->op_params, sizeof(fun));
  12658. ggml_compute_forward_map_custom1_f32(params, tensor->src[0], tensor, fun);
  12659. }
  12660. break;
  12661. case GGML_OP_MAP_CUSTOM2_F32:
  12662. {
  12663. ggml_custom2_op_f32_t fun;
  12664. memcpy(&fun, tensor->op_params, sizeof(fun));
  12665. ggml_compute_forward_map_custom2_f32(params, tensor->src[0], tensor->src[1], tensor, fun);
  12666. }
  12667. break;
  12668. case GGML_OP_MAP_CUSTOM3_F32:
  12669. {
  12670. ggml_custom3_op_f32_t fun;
  12671. memcpy(&fun, tensor->op_params, sizeof(fun));
  12672. ggml_compute_forward_map_custom3_f32(params, tensor->src[0], tensor->src[1], tensor->src[2], tensor, fun);
  12673. }
  12674. break;
  12675. case GGML_OP_MAP_CUSTOM1:
  12676. {
  12677. ggml_compute_forward_map_custom1(params, tensor->src[0], tensor);
  12678. }
  12679. break;
  12680. case GGML_OP_MAP_CUSTOM2:
  12681. {
  12682. ggml_compute_forward_map_custom2(params, tensor->src[0], tensor->src[1], tensor);
  12683. }
  12684. break;
  12685. case GGML_OP_MAP_CUSTOM3:
  12686. {
  12687. ggml_compute_forward_map_custom3(params, tensor->src[0], tensor->src[1], tensor->src[2], tensor);
  12688. }
  12689. break;
  12690. case GGML_OP_CROSS_ENTROPY_LOSS:
  12691. {
  12692. ggml_compute_forward_cross_entropy_loss(params, tensor->src[0], tensor->src[1], tensor);
  12693. }
  12694. break;
  12695. case GGML_OP_CROSS_ENTROPY_LOSS_BACK:
  12696. {
  12697. ggml_compute_forward_cross_entropy_loss_back(params, tensor->src[0], tensor->src[1], tensor->src[2], tensor);
  12698. }
  12699. break;
  12700. case GGML_OP_NONE:
  12701. {
  12702. // nop
  12703. } break;
  12704. case GGML_OP_COUNT:
  12705. {
  12706. GGML_ASSERT(false);
  12707. } break;
  12708. }
  12709. }
  12710. ////////////////////////////////////////////////////////////////////////////////
  12711. static size_t ggml_hash_size(size_t min_sz) {
  12712. // next primes after powers of two
  12713. static const size_t primes[] = {
  12714. 2, 3, 5, 11, 17, 37, 67, 131, 257, 521, 1031,
  12715. 2053, 4099, 8209, 16411, 32771, 65537, 131101,
  12716. 262147, 524309, 1048583, 2097169, 4194319, 8388617,
  12717. 16777259, 33554467, 67108879, 134217757, 268435459,
  12718. 536870923, 1073741827, 2147483659
  12719. };
  12720. static const size_t n_primes = sizeof(primes)/sizeof(primes[0]);
  12721. // find the smallest prime that is larger or equal to min_sz
  12722. size_t l = 0;
  12723. size_t r = n_primes;
  12724. while (l < r) {
  12725. size_t m = (l + r)/2;
  12726. if (primes[m] < min_sz) {
  12727. l = m + 1;
  12728. } else {
  12729. r = m;
  12730. }
  12731. }
  12732. size_t sz = l < n_primes ? primes[l] : min_sz | 1;
  12733. return sz;
  12734. }
  12735. static size_t ggml_hash(const void * p) {
  12736. return (size_t)p;
  12737. }
  12738. size_t ggml_hash_find(const struct ggml_hash_set hash_set, struct ggml_tensor * key) {
  12739. size_t h = ggml_hash(key) % hash_set.size;
  12740. // linear probing
  12741. size_t i = h;
  12742. while (hash_set.keys[i] != NULL && hash_set.keys[i] != key) {
  12743. i = (i + 1) % hash_set.size;
  12744. if (i == h) {
  12745. // visited all hash table entries -> not found
  12746. return GGML_HASHTABLE_FULL;
  12747. }
  12748. }
  12749. return i;
  12750. }
  12751. bool ggml_hash_contains(struct ggml_hash_set hash_set, struct ggml_tensor * key) {
  12752. size_t i = ggml_hash_find(hash_set, key);
  12753. return i != GGML_HASHTABLE_FULL && hash_set.keys[i] == key;
  12754. }
  12755. size_t ggml_hash_insert(struct ggml_hash_set hash_set, struct ggml_tensor * key) {
  12756. size_t i = ggml_hash_find(hash_set, key);
  12757. GGML_ASSERT(i != GGML_HASHTABLE_FULL);
  12758. if (hash_set.keys[i] == key) {
  12759. return GGML_HASHTABLE_ALREADY_EXISTS;
  12760. }
  12761. // insert
  12762. GGML_ASSERT(hash_set.keys[i] == NULL);
  12763. hash_set.keys[i] = key;
  12764. return i;
  12765. }
  12766. size_t ggml_hash_find_or_insert(struct ggml_hash_set hash_set, struct ggml_tensor * key) {
  12767. size_t i = ggml_hash_find(hash_set, key);
  12768. GGML_ASSERT(i != GGML_HASHTABLE_FULL);
  12769. hash_set.keys[i] = key;
  12770. return i;
  12771. }
  12772. struct ggml_hash_set ggml_hash_set_new(size_t size) {
  12773. size = ggml_hash_size(size);
  12774. struct ggml_hash_set result;
  12775. result.size = size;
  12776. result.keys = GGML_MALLOC(sizeof(struct ggml_tensor *) * size);
  12777. memset(result.keys, 0, sizeof(struct ggml_tensor *) * size);
  12778. return result;
  12779. }
  12780. static void ggml_hash_set_free(struct ggml_hash_set hash_set) {
  12781. GGML_FREE(hash_set.keys);
  12782. }
  12783. struct hash_map {
  12784. struct ggml_hash_set set;
  12785. struct ggml_tensor ** vals;
  12786. };
  12787. static struct hash_map * ggml_new_hash_map(size_t size) {
  12788. struct hash_map * result = GGML_MALLOC(sizeof(struct hash_map));
  12789. result->set = ggml_hash_set_new(size);
  12790. result->vals = GGML_MALLOC(sizeof(struct ggml_tensor *) * result->set.size);
  12791. memset(result->vals, 0, sizeof(struct ggml_tensor *) * result->set.size);
  12792. return result;
  12793. }
  12794. static void ggml_hash_map_free(struct hash_map * map) {
  12795. ggml_hash_set_free(map->set);
  12796. GGML_FREE(map->vals);
  12797. GGML_FREE(map);
  12798. }
  12799. // gradient checkpointing
  12800. static struct ggml_tensor * ggml_recompute_graph_node(
  12801. struct ggml_context * ctx,
  12802. struct ggml_cgraph * graph,
  12803. struct hash_map * replacements,
  12804. struct ggml_tensor * node) {
  12805. if (node == NULL) {
  12806. return NULL;
  12807. }
  12808. if (node->flags & GGML_TENSOR_FLAG_PARAM) {
  12809. return node;
  12810. }
  12811. if (!ggml_hash_contains(graph->visited_hash_table, node)) {
  12812. return node;
  12813. }
  12814. int count_children = 0;
  12815. for (int k = 0; k < GGML_MAX_SRC; ++k) {
  12816. if (node->src[k]) {
  12817. ++count_children;
  12818. }
  12819. }
  12820. if (count_children == 0) {
  12821. return node;
  12822. }
  12823. size_t i = ggml_hash_find(replacements->set, node);
  12824. GGML_ASSERT(i != GGML_HASHTABLE_FULL); // assert that not full
  12825. if (replacements->set.keys[i] == node) {
  12826. return replacements->vals[i];
  12827. }
  12828. struct ggml_tensor * clone = ggml_new_tensor(ctx, node->type, GGML_MAX_DIMS, node->ne);
  12829. // insert clone into replacements
  12830. GGML_ASSERT(replacements->set.keys[i] == NULL); // assert that we don't overwrite
  12831. replacements->set.keys[i] = node;
  12832. replacements->vals[i] = clone;
  12833. clone->op = node->op;
  12834. clone->grad = node->grad;
  12835. clone->flags = node->flags;
  12836. clone->extra = node->extra;
  12837. for (int k = 0; k < GGML_MAX_DIMS; ++k) {
  12838. clone->nb[k] = node->nb[k];
  12839. }
  12840. for (int k = 0; k < GGML_MAX_SRC; ++k) {
  12841. clone->src[k] = ggml_recompute_graph_node(ctx, graph, replacements, node->src[k]);
  12842. }
  12843. if (node->view_src != NULL) {
  12844. clone->data = (node->view_src->data == NULL)
  12845. ? NULL // view_src not yet allocated
  12846. : (char *) node->view_src->data // view_src already allocated
  12847. + node->view_offs;
  12848. clone->view_src = node->view_src;
  12849. clone->view_offs = node->view_offs;
  12850. }
  12851. GGML_ASSERT(sizeof(node->op_params) == sizeof(int32_t) * (GGML_MAX_OP_PARAMS / sizeof(int32_t)));
  12852. GGML_ASSERT(sizeof(node->name) == GGML_MAX_NAME);
  12853. memcpy(clone->op_params, node->op_params, sizeof(node->op_params));
  12854. ggml_format_name(clone, "%s (clone)", ggml_get_name(node));
  12855. return clone;
  12856. }
  12857. void ggml_build_backward_gradient_checkpointing(
  12858. struct ggml_context * ctx,
  12859. struct ggml_cgraph * gf,
  12860. struct ggml_cgraph * gb,
  12861. struct ggml_cgraph * gb_tmp,
  12862. struct ggml_tensor * * checkpoints,
  12863. int n_checkpoints) {
  12864. ggml_graph_cpy(gf, gb_tmp);
  12865. ggml_build_backward_expand(ctx, gf, gb_tmp, true);
  12866. if (n_checkpoints <= 0) {
  12867. ggml_graph_cpy(gb_tmp, gb);
  12868. return;
  12869. }
  12870. struct hash_map * replacements = ggml_new_hash_map(gf->n_nodes + gf->n_leafs + n_checkpoints);
  12871. // insert checkpoints in replacements
  12872. for (int i = 0; i < n_checkpoints; ++i) {
  12873. size_t k = ggml_hash_find(replacements->set, checkpoints[i]);
  12874. GGML_ASSERT(k != GGML_HASHTABLE_FULL); // assert that not full
  12875. GGML_ASSERT(replacements->set.keys[k] == NULL); // assert that we don't overwrite
  12876. replacements->set.keys[k] = checkpoints[i];
  12877. replacements->vals[k] = checkpoints[i];
  12878. }
  12879. ggml_graph_cpy(gf, gb);
  12880. // rewrite gb_tmp->nodes[gf->n_nodes:gb_tmp->n_nodes],
  12881. // replacing references to gb_tmp->nodes[0:gf->n_nodes] ( == gf->nodes[0:gf->n_nodes]),
  12882. // by recomputing them from checkpoints
  12883. for (int i = gf->n_nodes; i<gb_tmp->n_nodes; ++i) {
  12884. struct ggml_tensor * node = gb_tmp->nodes[i];
  12885. for (int k = 0; k < GGML_MAX_SRC; ++k) {
  12886. // insert new tensors recomputing src, reusing already made replacements,
  12887. // remember replacements: remember new tensors with mapping from corresponding gf nodes
  12888. // recurse for input tensors,
  12889. // unless (i.e. terminating when) input tensors are replacements (like checkpoints)
  12890. node->src[k] = ggml_recompute_graph_node(ctx, gf, replacements, node->src[k]);
  12891. }
  12892. // insert rewritten backward node with replacements made into resulting backward graph gb
  12893. ggml_build_forward_expand(gb, node);
  12894. }
  12895. ggml_hash_map_free(replacements);
  12896. }
  12897. // functions to change gradients considering the case that input a might be initial gradient with zero value
  12898. static struct ggml_tensor * ggml_add_or_set(struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b, struct ggml_hash_set zero_table) {
  12899. if (ggml_hash_contains(zero_table, a)) {
  12900. return b;
  12901. } else {
  12902. return ggml_add_impl(ctx, a, b, false);
  12903. }
  12904. }
  12905. static struct ggml_tensor * ggml_acc_or_set(struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b, size_t nb1, size_t nb2, size_t nb3, size_t offset, struct ggml_hash_set zero_table) {
  12906. if (ggml_hash_contains(zero_table, a)) {
  12907. struct ggml_tensor * a_zero = ggml_scale(ctx, a, 0.0f);
  12908. return ggml_acc_impl(ctx, a_zero, b, nb1, nb2, nb3, offset, false);
  12909. } else {
  12910. return ggml_acc_impl(ctx, a, b, nb1, nb2, nb3, offset, false);
  12911. }
  12912. }
  12913. static struct ggml_tensor * ggml_add1_or_set(struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b, struct ggml_hash_set zero_table) {
  12914. if (ggml_hash_contains(zero_table, a)) {
  12915. return ggml_repeat(ctx, b, a);
  12916. } else {
  12917. return ggml_add1_impl(ctx, a, b, false);
  12918. }
  12919. }
  12920. static struct ggml_tensor * ggml_sub_or_set(struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b, struct ggml_hash_set zero_table) {
  12921. if (ggml_hash_contains(zero_table, a)) {
  12922. return ggml_neg(ctx, b);
  12923. } else {
  12924. return ggml_sub_impl(ctx, a, b, false);
  12925. }
  12926. }
  12927. static void ggml_compute_backward(struct ggml_context * ctx, struct ggml_tensor * tensor, struct ggml_hash_set zero_table) {
  12928. struct ggml_tensor * src0 = tensor->src[0];
  12929. struct ggml_tensor * src1 = tensor->src[1];
  12930. switch (tensor->op) {
  12931. case GGML_OP_DUP:
  12932. {
  12933. if (src0->grad) {
  12934. src0->grad = ggml_add_or_set(ctx, src0->grad, tensor->grad, zero_table);
  12935. }
  12936. } break;
  12937. case GGML_OP_ADD:
  12938. {
  12939. if (src0->grad) {
  12940. src0->grad = ggml_add_or_set(ctx, src0->grad, tensor->grad, zero_table);
  12941. }
  12942. if (src1->grad) {
  12943. src1->grad = ggml_add_or_set(ctx, src1->grad, tensor->grad, zero_table);
  12944. }
  12945. } break;
  12946. case GGML_OP_ADD1:
  12947. {
  12948. if (src0->grad) {
  12949. src0->grad = ggml_add_or_set(ctx, src0->grad, tensor->grad, zero_table);
  12950. }
  12951. if (src1->grad) {
  12952. src1->grad = ggml_add_or_set(ctx,
  12953. src1->grad,
  12954. ggml_mean(ctx, tensor->grad), // TODO: should probably be sum instead of mean
  12955. zero_table);
  12956. }
  12957. } break;
  12958. case GGML_OP_ACC:
  12959. {
  12960. if (src0->grad) {
  12961. src0->grad = ggml_add_or_set(ctx, src0->grad, tensor->grad, zero_table);
  12962. }
  12963. if (src1->grad) {
  12964. const size_t nb1 = ((int32_t *) tensor->op_params)[0];
  12965. const size_t nb2 = ((int32_t *) tensor->op_params)[1];
  12966. const size_t nb3 = ((int32_t *) tensor->op_params)[2];
  12967. const size_t offset = ((int32_t *) tensor->op_params)[3];
  12968. struct ggml_tensor * tensor_grad_view = ggml_view_4d(ctx,
  12969. tensor->grad,
  12970. src1->grad->ne[0],
  12971. src1->grad->ne[1],
  12972. src1->grad->ne[2],
  12973. src1->grad->ne[3],
  12974. nb1, nb2, nb3, offset);
  12975. src1->grad =
  12976. ggml_add_or_set(ctx,
  12977. src1->grad,
  12978. ggml_reshape(ctx,
  12979. ggml_cont(ctx, tensor_grad_view),
  12980. src1->grad),
  12981. zero_table);
  12982. }
  12983. } break;
  12984. case GGML_OP_SUB:
  12985. {
  12986. if (src0->grad) {
  12987. src0->grad = ggml_add_or_set(ctx, src0->grad, tensor->grad, zero_table);
  12988. }
  12989. if (src1->grad) {
  12990. src1->grad = ggml_sub_or_set(ctx, src1->grad, tensor->grad, zero_table);
  12991. }
  12992. } break;
  12993. case GGML_OP_MUL:
  12994. {
  12995. if (src0->grad) {
  12996. src0->grad =
  12997. ggml_add_or_set(ctx,
  12998. src0->grad,
  12999. ggml_mul(ctx, src1, tensor->grad),
  13000. zero_table);
  13001. }
  13002. if (src1->grad) {
  13003. src1->grad =
  13004. ggml_add_or_set(ctx,
  13005. src1->grad,
  13006. ggml_mul(ctx, src0, tensor->grad),
  13007. zero_table);
  13008. }
  13009. } break;
  13010. case GGML_OP_DIV:
  13011. {
  13012. if (src0->grad) {
  13013. src0->grad =
  13014. ggml_add_or_set(ctx,
  13015. src0->grad,
  13016. ggml_div(ctx, tensor->grad, src1),
  13017. zero_table);
  13018. }
  13019. if (src1->grad) {
  13020. src1->grad =
  13021. ggml_sub_or_set(ctx,
  13022. src1->grad,
  13023. ggml_mul(ctx,
  13024. tensor->grad,
  13025. ggml_div(ctx, tensor, src1)),
  13026. zero_table);
  13027. }
  13028. } break;
  13029. case GGML_OP_SQR:
  13030. {
  13031. if (src0->grad) {
  13032. src0->grad =
  13033. ggml_add_or_set(ctx,
  13034. src0->grad,
  13035. ggml_scale(ctx,
  13036. ggml_mul(ctx, src0, tensor->grad),
  13037. 2.0f),
  13038. zero_table);
  13039. }
  13040. } break;
  13041. case GGML_OP_SQRT:
  13042. {
  13043. if (src0->grad) {
  13044. src0->grad =
  13045. ggml_add_or_set(ctx,
  13046. src0->grad,
  13047. ggml_scale(ctx,
  13048. ggml_div(ctx,
  13049. tensor->grad,
  13050. tensor),
  13051. 0.5f),
  13052. zero_table);
  13053. }
  13054. } break;
  13055. case GGML_OP_LOG:
  13056. {
  13057. if (src0->grad) {
  13058. src0->grad =
  13059. ggml_add_or_set(ctx,
  13060. src0->grad,
  13061. ggml_div(ctx,
  13062. tensor->grad,
  13063. src0),
  13064. zero_table);
  13065. }
  13066. } break;
  13067. case GGML_OP_SUM:
  13068. {
  13069. if (src0->grad) {
  13070. src0->grad =
  13071. ggml_add1_or_set(ctx,
  13072. src0->grad,
  13073. tensor->grad,
  13074. zero_table);
  13075. }
  13076. } break;
  13077. case GGML_OP_SUM_ROWS:
  13078. {
  13079. if (src0->grad) {
  13080. src0->grad =
  13081. ggml_add_or_set(ctx,
  13082. src0->grad,
  13083. ggml_repeat(ctx,
  13084. tensor->grad,
  13085. src0->grad),
  13086. zero_table);
  13087. }
  13088. } break;
  13089. case GGML_OP_MEAN:
  13090. case GGML_OP_ARGMAX:
  13091. {
  13092. GGML_ASSERT(false); // TODO: implement
  13093. } break;
  13094. case GGML_OP_REPEAT:
  13095. {
  13096. // necessary for llama
  13097. if (src0->grad) {
  13098. src0->grad = ggml_add_or_set(ctx,
  13099. src0->grad,
  13100. ggml_repeat_back(ctx, tensor->grad, src0->grad),
  13101. zero_table);
  13102. }
  13103. } break;
  13104. case GGML_OP_REPEAT_BACK:
  13105. {
  13106. if (src0->grad) {
  13107. // TODO: test this
  13108. src0->grad = ggml_add_or_set(ctx,
  13109. src0->grad,
  13110. ggml_repeat(ctx, tensor->grad, src0->grad),
  13111. zero_table);
  13112. }
  13113. } break;
  13114. case GGML_OP_CONCAT:
  13115. {
  13116. GGML_ASSERT(false); // TODO: implement
  13117. } break;
  13118. case GGML_OP_SILU_BACK:
  13119. {
  13120. GGML_ASSERT(false); // TODO: not implemented
  13121. } break;
  13122. case GGML_OP_NORM:
  13123. {
  13124. GGML_ASSERT(false); // TODO: not implemented
  13125. } break;
  13126. case GGML_OP_RMS_NORM:
  13127. {
  13128. // necessary for llama
  13129. if (src0->grad) {
  13130. float eps;
  13131. memcpy(&eps, tensor->op_params, sizeof(float));
  13132. src0->grad = ggml_add_or_set(ctx,
  13133. src0->grad,
  13134. ggml_rms_norm_back(ctx, src0, tensor->grad, eps),
  13135. zero_table);
  13136. }
  13137. } break;
  13138. case GGML_OP_RMS_NORM_BACK:
  13139. {
  13140. GGML_ASSERT(false); // TODO: not implemented
  13141. } break;
  13142. case GGML_OP_GROUP_NORM:
  13143. {
  13144. GGML_ASSERT(false); // TODO: not implemented
  13145. } break;
  13146. case GGML_OP_MUL_MAT:
  13147. {
  13148. // https://cs231n.github.io/optimization-2/#staged
  13149. // # forward pass
  13150. // s0 = np.random.randn(5, 10)
  13151. // s1 = np.random.randn(10, 3)
  13152. // t = s0.dot(s1)
  13153. // # now suppose we had the gradient on t from above in the circuit
  13154. // dt = np.random.randn(*t.shape) # same shape as t
  13155. // ds0 = dt.dot(s1.T) #.T gives the transpose of the matrix
  13156. // ds1 = t.T.dot(dt)
  13157. // tensor.shape [m,p,qq,rr]
  13158. // src0.shape [n,m,q1,r1]
  13159. // src1.shape [n,p,qq,rr]
  13160. // necessary for llama
  13161. if (src0->grad) {
  13162. struct ggml_tensor * s1_tg =
  13163. ggml_out_prod(ctx, // [n,m,qq,rr]
  13164. src1, // [n,p,qq,rr]
  13165. tensor->grad); // [m,p,qq,rr]
  13166. const int64_t qq = s1_tg->ne[2];
  13167. const int64_t rr = s1_tg->ne[3];
  13168. const int64_t q1 = src0->ne[2];
  13169. const int64_t r1 = src0->ne[3];
  13170. const bool ne2_broadcasted = qq > q1;
  13171. const bool ne3_broadcasted = rr > r1;
  13172. if (ne2_broadcasted || ne3_broadcasted) {
  13173. // sum broadcast repetitions of s1_tg into shape of src0
  13174. s1_tg = ggml_repeat_back(ctx, s1_tg, src0);
  13175. }
  13176. src0->grad =
  13177. ggml_add_or_set(ctx,
  13178. src0->grad, // [n,m,q1,r1]
  13179. s1_tg, // [n,m,q1,r1]
  13180. zero_table);
  13181. }
  13182. if (src1->grad) {
  13183. src1->grad =
  13184. ggml_add_or_set(ctx,
  13185. src1->grad, // [n,p,qq,rr]
  13186. // ggml_mul_mat(ctx, // [n,p,qq,rr]
  13187. // ggml_cont(ctx, // [m,n,q1,r1]
  13188. // ggml_transpose(ctx, src0)), // [m,n,q1,r1]
  13189. // tensor->grad), // [m,p,qq,rr]
  13190. // // when src0 is bigger than tensor->grad (this is mostly the case in llama),
  13191. // // avoid transpose of src0, rather transpose smaller tensor->grad
  13192. // // and then use ggml_out_prod
  13193. ggml_out_prod(ctx, // [n,p,qq,rr]
  13194. src0, // [n,m,q1,r1]
  13195. ggml_transpose(ctx, // [p,m,qq,rr]
  13196. tensor->grad)), // [m,p,qq,rr]
  13197. zero_table);
  13198. }
  13199. } break;
  13200. case GGML_OP_MUL_MAT_ID:
  13201. {
  13202. GGML_ASSERT(false); // TODO: not implemented
  13203. } break;
  13204. case GGML_OP_OUT_PROD:
  13205. {
  13206. GGML_ASSERT(false); // TODO: not implemented
  13207. } break;
  13208. case GGML_OP_SCALE:
  13209. {
  13210. // necessary for llama
  13211. if (src0->grad) {
  13212. float s;
  13213. memcpy(&s, tensor->op_params, sizeof(float));
  13214. src0->grad =
  13215. ggml_add_or_set(ctx,
  13216. src0->grad,
  13217. ggml_scale_impl(ctx, tensor->grad, s, false),
  13218. zero_table);
  13219. }
  13220. } break;
  13221. case GGML_OP_SET:
  13222. {
  13223. const size_t nb1 = ((int32_t *) tensor->op_params)[0];
  13224. const size_t nb2 = ((int32_t *) tensor->op_params)[1];
  13225. const size_t nb3 = ((int32_t *) tensor->op_params)[2];
  13226. const size_t offset = ((int32_t *) tensor->op_params)[3];
  13227. struct ggml_tensor * tensor_grad_view = NULL;
  13228. if (src0->grad || src1->grad) {
  13229. GGML_ASSERT(src0->type == tensor->type);
  13230. GGML_ASSERT(tensor->grad->type == tensor->type);
  13231. GGML_ASSERT(tensor->grad->type == src1->grad->type);
  13232. tensor_grad_view = ggml_view_4d(ctx,
  13233. tensor->grad,
  13234. src1->grad->ne[0],
  13235. src1->grad->ne[1],
  13236. src1->grad->ne[2],
  13237. src1->grad->ne[3],
  13238. nb1, nb2, nb3, offset);
  13239. }
  13240. if (src0->grad) {
  13241. src0->grad = ggml_add_or_set(ctx,
  13242. src0->grad,
  13243. ggml_acc_impl(ctx,
  13244. tensor->grad,
  13245. ggml_neg(ctx, tensor_grad_view),
  13246. nb1, nb2, nb3, offset, false),
  13247. zero_table);
  13248. }
  13249. if (src1->grad) {
  13250. src1->grad =
  13251. ggml_add_or_set(ctx,
  13252. src1->grad,
  13253. ggml_reshape(ctx,
  13254. ggml_cont(ctx, tensor_grad_view),
  13255. src1->grad),
  13256. zero_table);
  13257. }
  13258. } break;
  13259. case GGML_OP_CPY:
  13260. {
  13261. // necessary for llama
  13262. // cpy overwrites value of src1 by src0 and returns view(src1)
  13263. // the overwriting is mathematically equivalent to:
  13264. // tensor = src0 * 1 + src1 * 0
  13265. if (src0->grad) {
  13266. // dsrc0 = dtensor * 1
  13267. src0->grad = ggml_add_or_set(ctx, src0->grad, tensor->grad, zero_table);
  13268. }
  13269. if (src1->grad) {
  13270. // dsrc1 = dtensor * 0 -> noop
  13271. }
  13272. } break;
  13273. case GGML_OP_CONT:
  13274. {
  13275. // same as cpy
  13276. if (src0->grad) {
  13277. GGML_ASSERT(ggml_is_contiguous(src0->grad));
  13278. GGML_ASSERT(ggml_is_contiguous(tensor->grad));
  13279. src0->grad = ggml_add_or_set(ctx, src0->grad, tensor->grad, zero_table);
  13280. }
  13281. } break;
  13282. case GGML_OP_RESHAPE:
  13283. {
  13284. // necessary for llama
  13285. if (src0->grad) {
  13286. src0->grad =
  13287. ggml_add_or_set(ctx, src0->grad,
  13288. ggml_reshape(ctx,
  13289. ggml_is_contiguous(tensor->grad)
  13290. ? tensor->grad
  13291. : ggml_cont(ctx, tensor->grad),
  13292. src0->grad),
  13293. zero_table);
  13294. }
  13295. } break;
  13296. case GGML_OP_VIEW:
  13297. {
  13298. // necessary for llama
  13299. if (src0->grad) {
  13300. size_t offset;
  13301. memcpy(&offset, tensor->op_params, sizeof(offset));
  13302. size_t nb1 = tensor->nb[1];
  13303. size_t nb2 = tensor->nb[2];
  13304. size_t nb3 = tensor->nb[3];
  13305. if (src0->type != src0->grad->type) {
  13306. // gradient is typically F32, but src0 could be other type
  13307. size_t ng = ggml_element_size(src0->grad);
  13308. size_t n0 = ggml_element_size(src0);
  13309. GGML_ASSERT(offset % n0 == 0);
  13310. GGML_ASSERT(nb1 % n0 == 0);
  13311. GGML_ASSERT(nb2 % n0 == 0);
  13312. GGML_ASSERT(nb3 % n0 == 0);
  13313. offset = (offset / n0) * ng;
  13314. nb1 = (nb1 / n0) * ng;
  13315. nb2 = (nb2 / n0) * ng;
  13316. nb3 = (nb3 / n0) * ng;
  13317. }
  13318. src0->grad = ggml_acc_or_set(ctx, src0->grad, tensor->grad, nb1, nb2, nb3, offset, zero_table);
  13319. }
  13320. } break;
  13321. case GGML_OP_PERMUTE:
  13322. {
  13323. // necessary for llama
  13324. if (src0->grad) {
  13325. int32_t * axes = (int32_t *) tensor->op_params;
  13326. int axis0 = axes[0] & 0x3;
  13327. int axis1 = axes[1] & 0x3;
  13328. int axis2 = axes[2] & 0x3;
  13329. int axis3 = axes[3] & 0x3;
  13330. int axes_backward[4] = {0,0,0,0};
  13331. axes_backward[axis0] = 0;
  13332. axes_backward[axis1] = 1;
  13333. axes_backward[axis2] = 2;
  13334. axes_backward[axis3] = 3;
  13335. src0->grad =
  13336. ggml_add_or_set(ctx, src0->grad,
  13337. ggml_permute(ctx,
  13338. tensor->grad,
  13339. axes_backward[0],
  13340. axes_backward[1],
  13341. axes_backward[2],
  13342. axes_backward[3]),
  13343. zero_table);
  13344. }
  13345. } break;
  13346. case GGML_OP_TRANSPOSE:
  13347. {
  13348. // necessary for llama
  13349. if (src0->grad) {
  13350. src0->grad =
  13351. ggml_add_or_set(ctx, src0->grad,
  13352. ggml_transpose(ctx, tensor->grad),
  13353. zero_table);
  13354. }
  13355. } break;
  13356. case GGML_OP_GET_ROWS:
  13357. {
  13358. // necessary for llama (only for tokenizer)
  13359. if (src0->grad) {
  13360. src0->grad =
  13361. ggml_add_or_set(ctx, src0->grad,
  13362. // last ggml_get_rows_back argument src0->grad is only
  13363. // necessary to setup correct output shape
  13364. ggml_get_rows_back(ctx, tensor->grad, src1, src0->grad),
  13365. zero_table);
  13366. }
  13367. if (src1->grad) {
  13368. // noop
  13369. }
  13370. } break;
  13371. case GGML_OP_GET_ROWS_BACK:
  13372. {
  13373. GGML_ASSERT(false); // TODO: not implemented
  13374. } break;
  13375. case GGML_OP_DIAG:
  13376. {
  13377. GGML_ASSERT(false); // TODO: not implemented
  13378. } break;
  13379. case GGML_OP_DIAG_MASK_INF:
  13380. {
  13381. // necessary for llama
  13382. if (src0->grad) {
  13383. const int n_past = ((int32_t *) tensor->op_params)[0];
  13384. src0->grad =
  13385. ggml_add_or_set(ctx, src0->grad,
  13386. /* ggml_diag_mask_inf_impl() shouldn't be here */
  13387. /* ref: https://github.com/ggerganov/llama.cpp/pull/4203#discussion_r1412377992 */
  13388. ggml_diag_mask_zero_impl(ctx, tensor->grad, n_past, false),
  13389. zero_table);
  13390. }
  13391. } break;
  13392. case GGML_OP_DIAG_MASK_ZERO:
  13393. {
  13394. // necessary for llama
  13395. if (src0->grad) {
  13396. const int n_past = ((int32_t *) tensor->op_params)[0];
  13397. src0->grad =
  13398. ggml_add_or_set(ctx, src0->grad,
  13399. ggml_diag_mask_zero_impl(ctx, tensor->grad, n_past, false),
  13400. zero_table);
  13401. }
  13402. } break;
  13403. case GGML_OP_SOFT_MAX:
  13404. {
  13405. // necessary for llama
  13406. if (src0->grad) {
  13407. src0->grad =
  13408. ggml_add_or_set(ctx, src0->grad,
  13409. ggml_soft_max_back(ctx, tensor->grad, tensor),
  13410. zero_table);
  13411. }
  13412. } break;
  13413. case GGML_OP_SOFT_MAX_BACK:
  13414. {
  13415. GGML_ASSERT(false); // TODO: not implemented
  13416. } break;
  13417. case GGML_OP_ROPE:
  13418. {
  13419. // necessary for llama
  13420. if (src0->grad) {
  13421. //const int n_past = ((int32_t *) tensor->op_params)[0];
  13422. const int n_dims = ((int32_t *) tensor->op_params)[1];
  13423. const int mode = ((int32_t *) tensor->op_params)[2];
  13424. const int n_ctx = ((int32_t *) tensor->op_params)[3];
  13425. const int n_orig_ctx = ((int32_t *) tensor->op_params)[4];
  13426. float freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow, xpos_base, xpos_down;
  13427. memcpy(&freq_base, (int32_t *) tensor->op_params + 5, sizeof(float));
  13428. memcpy(&freq_scale, (int32_t *) tensor->op_params + 6, sizeof(float));
  13429. memcpy(&ext_factor, (int32_t *) tensor->op_params + 7, sizeof(float));
  13430. memcpy(&attn_factor, (int32_t *) tensor->op_params + 8, sizeof(float));
  13431. memcpy(&beta_fast, (int32_t *) tensor->op_params + 9, sizeof(float));
  13432. memcpy(&beta_slow, (int32_t *) tensor->op_params + 10, sizeof(float));
  13433. memcpy(&xpos_base, (int32_t *) tensor->op_params + 11, sizeof(float));
  13434. memcpy(&xpos_down, (int32_t *) tensor->op_params + 12, sizeof(bool));
  13435. src0->grad = ggml_add_or_set(ctx,
  13436. src0->grad,
  13437. ggml_rope_back(ctx,
  13438. tensor->grad,
  13439. src1,
  13440. n_dims,
  13441. mode,
  13442. n_ctx,
  13443. n_orig_ctx,
  13444. freq_base,
  13445. freq_scale,
  13446. ext_factor,
  13447. attn_factor,
  13448. beta_fast,
  13449. beta_slow,
  13450. xpos_base,
  13451. xpos_down),
  13452. zero_table);
  13453. }
  13454. } break;
  13455. case GGML_OP_ROPE_BACK:
  13456. {
  13457. if (src0->grad) {
  13458. //const int n_past = ((int32_t *) tensor->op_params)[0];
  13459. const int n_dims = ((int32_t *) tensor->op_params)[1];
  13460. const int mode = ((int32_t *) tensor->op_params)[2];
  13461. const int n_ctx = ((int32_t *) tensor->op_params)[3];
  13462. const int n_orig_ctx = ((int32_t *) tensor->op_params)[4];
  13463. float freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow, xpos_base, xpos_down;
  13464. memcpy(&freq_base, (int32_t *) tensor->op_params + 5, sizeof(float));
  13465. memcpy(&freq_scale, (int32_t *) tensor->op_params + 6, sizeof(float));
  13466. memcpy(&ext_factor, (int32_t *) tensor->op_params + 7, sizeof(float));
  13467. memcpy(&attn_factor, (int32_t *) tensor->op_params + 8, sizeof(float));
  13468. memcpy(&beta_fast, (int32_t *) tensor->op_params + 9, sizeof(float));
  13469. memcpy(&beta_slow, (int32_t *) tensor->op_params + 10, sizeof(float));
  13470. memcpy(&xpos_base, (int32_t *) tensor->op_params + 11, sizeof(float));
  13471. memcpy(&xpos_down, (int32_t *) tensor->op_params + 12, sizeof(bool));
  13472. src0->grad = ggml_add_or_set(ctx,
  13473. src0->grad,
  13474. ggml_rope_impl(ctx,
  13475. tensor->grad,
  13476. src1,
  13477. n_dims,
  13478. mode,
  13479. n_ctx,
  13480. n_orig_ctx,
  13481. freq_base,
  13482. freq_scale,
  13483. ext_factor,
  13484. attn_factor,
  13485. beta_fast,
  13486. beta_slow,
  13487. xpos_base,
  13488. xpos_down,
  13489. false),
  13490. zero_table);
  13491. }
  13492. } break;
  13493. case GGML_OP_ALIBI:
  13494. {
  13495. GGML_ASSERT(false); // TODO: not implemented
  13496. } break;
  13497. case GGML_OP_CLAMP:
  13498. {
  13499. GGML_ASSERT(false); // TODO: not implemented
  13500. } break;
  13501. case GGML_OP_CONV_TRANSPOSE_1D:
  13502. {
  13503. GGML_ASSERT(false); // TODO: not implemented
  13504. } break;
  13505. case GGML_OP_IM2COL:
  13506. {
  13507. GGML_ASSERT(false); // TODO: not implemented
  13508. } break;
  13509. case GGML_OP_CONV_TRANSPOSE_2D:
  13510. {
  13511. GGML_ASSERT(false); // TODO: not implemented
  13512. } break;
  13513. case GGML_OP_POOL_1D:
  13514. {
  13515. GGML_ASSERT(false); // TODO: not implemented
  13516. } break;
  13517. case GGML_OP_POOL_2D:
  13518. {
  13519. GGML_ASSERT(false); // TODO: not implemented
  13520. } break;
  13521. case GGML_OP_UPSCALE:
  13522. {
  13523. GGML_ASSERT(false); // TODO: not implemented
  13524. } break;
  13525. case GGML_OP_PAD:
  13526. {
  13527. GGML_ASSERT(false); // TODO: not implemented
  13528. } break;
  13529. case GGML_OP_ARGSORT:
  13530. {
  13531. GGML_ASSERT(false); // TODO: not implemented
  13532. } break;
  13533. case GGML_OP_LEAKY_RELU:
  13534. {
  13535. GGML_ASSERT(false); // TODO: not implemented
  13536. } break;
  13537. case GGML_OP_FLASH_ATTN:
  13538. {
  13539. struct ggml_tensor * flash_grad = NULL;
  13540. if (src0->grad || src1->grad || tensor->src[2]->grad) {
  13541. int32_t t = ggml_get_op_params_i32(tensor, 0);
  13542. GGML_ASSERT(t == 0 || t == 1);
  13543. bool masked = t != 0;
  13544. flash_grad =
  13545. ggml_flash_attn_back(ctx,
  13546. src0,
  13547. src1,
  13548. tensor->src[2],
  13549. tensor->grad,
  13550. masked);
  13551. }
  13552. struct ggml_tensor * src2 = tensor->src[2];
  13553. const int64_t elem_q = ggml_nelements(src0);
  13554. const int64_t elem_k = ggml_nelements(src1);
  13555. const int64_t elem_v = ggml_nelements(src2);
  13556. enum ggml_type result_type = flash_grad->type;
  13557. GGML_ASSERT(ggml_blck_size(result_type) == 1);
  13558. const size_t tsize = ggml_type_size(result_type);
  13559. const size_t offs_q = 0;
  13560. const size_t offs_k = offs_q + GGML_PAD(elem_q * tsize, GGML_MEM_ALIGN);
  13561. const size_t offs_v = offs_k + GGML_PAD(elem_k * tsize, GGML_MEM_ALIGN);
  13562. if (src0->grad) {
  13563. struct ggml_tensor * view_q = ggml_view_1d(ctx, flash_grad, elem_q, offs_q);
  13564. struct ggml_tensor * grad_q = ggml_reshape(ctx, view_q, src0);
  13565. src0->grad = ggml_add_or_set(ctx,
  13566. src0->grad,
  13567. grad_q,
  13568. zero_table);
  13569. }
  13570. if (src1->grad) {
  13571. struct ggml_tensor * view_k = ggml_view_1d(ctx, flash_grad, elem_k, offs_k);
  13572. struct ggml_tensor * grad_k = ggml_reshape(ctx, view_k, src1);
  13573. src1->grad = ggml_add_or_set(ctx,
  13574. src1->grad,
  13575. grad_k,
  13576. zero_table);
  13577. }
  13578. if (src2->grad) {
  13579. struct ggml_tensor * view_v = ggml_view_1d(ctx, flash_grad, elem_v, offs_v);
  13580. struct ggml_tensor * grad_v = ggml_reshape(ctx, view_v, src2);
  13581. src2->grad = ggml_add_or_set(ctx,
  13582. src2->grad,
  13583. grad_v,
  13584. zero_table);
  13585. }
  13586. } break;
  13587. case GGML_OP_FLASH_FF:
  13588. {
  13589. GGML_ASSERT(false); // not supported
  13590. } break;
  13591. case GGML_OP_FLASH_ATTN_BACK:
  13592. {
  13593. GGML_ASSERT(false); // not supported
  13594. } break;
  13595. case GGML_OP_WIN_PART:
  13596. case GGML_OP_WIN_UNPART:
  13597. case GGML_OP_UNARY:
  13598. {
  13599. switch (ggml_get_unary_op(tensor)) {
  13600. case GGML_UNARY_OP_ABS:
  13601. {
  13602. if (src0->grad) {
  13603. src0->grad =
  13604. ggml_add_or_set(ctx,
  13605. src0->grad,
  13606. ggml_mul(ctx,
  13607. ggml_sgn(ctx, src0),
  13608. tensor->grad),
  13609. zero_table);
  13610. }
  13611. } break;
  13612. case GGML_UNARY_OP_SGN:
  13613. {
  13614. if (src0->grad) {
  13615. // noop
  13616. }
  13617. } break;
  13618. case GGML_UNARY_OP_NEG:
  13619. {
  13620. if (src0->grad) {
  13621. src0->grad = ggml_sub_or_set(ctx, src0->grad, tensor->grad, zero_table);
  13622. }
  13623. } break;
  13624. case GGML_UNARY_OP_STEP:
  13625. {
  13626. if (src0->grad) {
  13627. // noop
  13628. }
  13629. } break;
  13630. case GGML_UNARY_OP_TANH:
  13631. {
  13632. GGML_ASSERT(false); // TODO: not implemented
  13633. } break;
  13634. case GGML_UNARY_OP_ELU:
  13635. {
  13636. GGML_ASSERT(false); // TODO: not implemented
  13637. } break;
  13638. case GGML_UNARY_OP_RELU:
  13639. {
  13640. if (src0->grad) {
  13641. src0->grad = ggml_add_or_set(ctx,
  13642. src0->grad,
  13643. ggml_mul(ctx,
  13644. ggml_step(ctx, src0),
  13645. tensor->grad),
  13646. zero_table);
  13647. }
  13648. } break;
  13649. case GGML_UNARY_OP_GELU:
  13650. {
  13651. GGML_ASSERT(false); // TODO: not implemented
  13652. } break;
  13653. case GGML_UNARY_OP_GELU_QUICK:
  13654. {
  13655. GGML_ASSERT(false); // TODO: not implemented
  13656. } break;
  13657. case GGML_UNARY_OP_SILU:
  13658. {
  13659. // necessary for llama
  13660. if (src0->grad) {
  13661. src0->grad = ggml_add_or_set(ctx,
  13662. src0->grad,
  13663. ggml_silu_back(ctx, src0, tensor->grad),
  13664. zero_table);
  13665. }
  13666. } break;
  13667. default:
  13668. GGML_ASSERT(false);
  13669. }
  13670. } break;
  13671. case GGML_OP_GET_REL_POS:
  13672. case GGML_OP_ADD_REL_POS:
  13673. case GGML_OP_MAP_UNARY:
  13674. case GGML_OP_MAP_BINARY:
  13675. case GGML_OP_MAP_CUSTOM1_F32:
  13676. case GGML_OP_MAP_CUSTOM2_F32:
  13677. case GGML_OP_MAP_CUSTOM3_F32:
  13678. case GGML_OP_MAP_CUSTOM1:
  13679. case GGML_OP_MAP_CUSTOM2:
  13680. case GGML_OP_MAP_CUSTOM3:
  13681. {
  13682. GGML_ASSERT(false); // not supported
  13683. } break;
  13684. case GGML_OP_CROSS_ENTROPY_LOSS:
  13685. {
  13686. if (src0->grad) {
  13687. src0->grad = ggml_add_or_set(ctx,
  13688. src0->grad,
  13689. ggml_cross_entropy_loss_back(ctx,
  13690. src0,
  13691. src1,
  13692. tensor->grad),
  13693. zero_table);
  13694. }
  13695. } break;
  13696. case GGML_OP_CROSS_ENTROPY_LOSS_BACK:
  13697. {
  13698. GGML_ASSERT(false); // not supported
  13699. } break;
  13700. case GGML_OP_NONE:
  13701. {
  13702. // nop
  13703. } break;
  13704. case GGML_OP_COUNT:
  13705. {
  13706. GGML_ASSERT(false);
  13707. } break;
  13708. }
  13709. for (int i = 0; i < GGML_MAX_SRC; ++i) {
  13710. if (tensor->src[i] && tensor->src[i]->grad) {
  13711. GGML_ASSERT(ggml_are_same_shape(tensor->src[i], tensor->src[i]->grad));
  13712. }
  13713. }
  13714. }
  13715. static void ggml_visit_parents(struct ggml_cgraph * cgraph, struct ggml_tensor * node) {
  13716. if (node->grad == NULL) {
  13717. // this usually happens when we generate intermediate nodes from constants in the backward pass
  13718. // it can also happen during forward pass, if the user performs computations with constants
  13719. if (node->op != GGML_OP_NONE) {
  13720. //GGML_PRINT_DEBUG("%s: warning: node %p has no grad, but op %d\n", __func__, (void *) node, node->op);
  13721. }
  13722. }
  13723. // check if already visited
  13724. if (ggml_hash_insert(cgraph->visited_hash_table, node) == GGML_HASHTABLE_ALREADY_EXISTS) {
  13725. return;
  13726. }
  13727. for (int i = 0; i < GGML_MAX_SRC; ++i) {
  13728. const int k =
  13729. (cgraph->order == GGML_CGRAPH_EVAL_ORDER_LEFT_TO_RIGHT) ? i :
  13730. (cgraph->order == GGML_CGRAPH_EVAL_ORDER_RIGHT_TO_LEFT) ? (GGML_MAX_SRC-1-i) :
  13731. /* unknown order, just fall back to using i*/ i;
  13732. if (node->src[k]) {
  13733. ggml_visit_parents(cgraph, node->src[k]);
  13734. }
  13735. }
  13736. if (node->op == GGML_OP_NONE && node->grad == NULL) {
  13737. // reached a leaf node, not part of the gradient graph (e.g. a constant)
  13738. GGML_ASSERT(cgraph->n_leafs < cgraph->size);
  13739. if (strlen(node->name) == 0) {
  13740. ggml_format_name(node, "leaf_%d", cgraph->n_leafs);
  13741. }
  13742. cgraph->leafs[cgraph->n_leafs] = node;
  13743. cgraph->n_leafs++;
  13744. } else {
  13745. GGML_ASSERT(cgraph->n_nodes < cgraph->size);
  13746. if (strlen(node->name) == 0) {
  13747. ggml_format_name(node, "node_%d", cgraph->n_nodes);
  13748. }
  13749. cgraph->nodes[cgraph->n_nodes] = node;
  13750. if (cgraph->grads) {
  13751. cgraph->grads[cgraph->n_nodes] = node->grad;
  13752. }
  13753. cgraph->n_nodes++;
  13754. }
  13755. }
  13756. static void ggml_build_forward_impl(struct ggml_cgraph * cgraph, struct ggml_tensor * tensor, bool expand) {
  13757. if (!expand) {
  13758. // TODO: this branch isn't accessible anymore, maybe move this to ggml_build_forward_expand
  13759. ggml_graph_clear(cgraph);
  13760. }
  13761. const int n0 = cgraph->n_nodes;
  13762. UNUSED(n0);
  13763. ggml_visit_parents(cgraph, tensor);
  13764. const int n_new = cgraph->n_nodes - n0;
  13765. GGML_PRINT_DEBUG("%s: visited %d new nodes\n", __func__, n_new);
  13766. if (n_new > 0) {
  13767. // the last added node should always be starting point
  13768. GGML_ASSERT(cgraph->nodes[cgraph->n_nodes - 1] == tensor);
  13769. }
  13770. }
  13771. void ggml_build_forward_expand(struct ggml_cgraph * cgraph, struct ggml_tensor * tensor) {
  13772. ggml_build_forward_impl(cgraph, tensor, true);
  13773. }
  13774. void ggml_build_backward_expand(struct ggml_context * ctx, struct ggml_cgraph * gf, struct ggml_cgraph * gb, bool keep) {
  13775. GGML_ASSERT(gf->n_nodes > 0);
  13776. // if we are keeping the gradient graph, we have to detach the gradient nodes from the original graph
  13777. if (keep) {
  13778. for (int i = 0; i < gf->n_nodes; i++) {
  13779. struct ggml_tensor * node = gf->nodes[i];
  13780. if (node->grad) {
  13781. node->grad = ggml_dup_tensor(ctx, node);
  13782. gf->grads[i] = node->grad;
  13783. }
  13784. }
  13785. }
  13786. // remember original gradients which start with zero values
  13787. struct ggml_hash_set zero_table = ggml_hash_set_new(gf->size);
  13788. for (int i = 0; i < gf->n_nodes; i++) {
  13789. if (gf->grads[i]) {
  13790. ggml_hash_insert(zero_table, gf->grads[i]);
  13791. }
  13792. }
  13793. for (int i = gf->n_nodes - 1; i >= 0; i--) {
  13794. struct ggml_tensor * node = gf->nodes[i];
  13795. // inplace operations to add gradients are not created by ggml_compute_backward
  13796. // use allocator to automatically make inplace operations
  13797. if (node->grad) {
  13798. ggml_compute_backward(ctx, node, zero_table);
  13799. }
  13800. }
  13801. for (int i = 0; i < gf->n_nodes; i++) {
  13802. struct ggml_tensor * node = gf->nodes[i];
  13803. if (node->flags & GGML_TENSOR_FLAG_PARAM) {
  13804. GGML_PRINT_DEBUG("%s: found root node %p\n", __func__, (void *) node);
  13805. ggml_build_forward_expand(gb, node->grad);
  13806. }
  13807. }
  13808. ggml_hash_set_free(zero_table);
  13809. }
  13810. static size_t ggml_graph_nbytes(size_t size, bool grads) {
  13811. size_t nbytes = sizeof(struct ggml_cgraph);
  13812. nbytes += size * sizeof(struct ggml_tensor *) * 2; // leafs + nodes
  13813. if (grads) {
  13814. nbytes += size * sizeof(struct ggml_tensor *); // grads
  13815. }
  13816. nbytes += ggml_hash_size(size * 2) * sizeof(struct ggml_tensor *); // hash set
  13817. return nbytes;
  13818. }
  13819. size_t ggml_graph_overhead_custom(size_t size, bool grads) {
  13820. return GGML_OBJECT_SIZE + GGML_PAD(ggml_graph_nbytes(size, grads), GGML_MEM_ALIGN);
  13821. }
  13822. size_t ggml_graph_overhead(void) {
  13823. return ggml_graph_overhead_custom(GGML_DEFAULT_GRAPH_SIZE, false);
  13824. }
  13825. struct ggml_cgraph * ggml_new_graph_custom(struct ggml_context * ctx, size_t size, bool grads) {
  13826. const size_t obj_size = ggml_graph_nbytes(size, grads);
  13827. struct ggml_object * obj = ggml_new_object(ctx, GGML_OBJECT_GRAPH, obj_size);
  13828. struct ggml_cgraph * cgraph = (struct ggml_cgraph *) ((char *) ctx->mem_buffer + obj->offs);
  13829. struct ggml_tensor ** data_start = (struct ggml_tensor **) (cgraph + 1);
  13830. size_t hash_size = ggml_hash_size(size * 2);
  13831. struct ggml_tensor ** nodes_ptr = data_start;
  13832. struct ggml_tensor ** leafs_ptr = nodes_ptr + size;
  13833. struct ggml_tensor ** hash_keys_ptr = leafs_ptr + size;
  13834. struct ggml_tensor ** grads_ptr = grads ? hash_keys_ptr + hash_size : NULL;
  13835. // check that we allocated the correct amount of memory
  13836. assert(obj_size == (size_t) (
  13837. (grads ? (char *)(grads_ptr + size) : (char *)(hash_keys_ptr + hash_size)) - (char *)cgraph));
  13838. memset(hash_keys_ptr, 0, hash_size * sizeof(struct ggml_tensor *));
  13839. *cgraph = (struct ggml_cgraph) {
  13840. /*.size =*/ size,
  13841. /*.n_nodes =*/ 0,
  13842. /*.n_leafs =*/ 0,
  13843. /*.nodes =*/ nodes_ptr,
  13844. /*.grads =*/ grads_ptr,
  13845. /*.leafs =*/ leafs_ptr,
  13846. /*.hash_table =*/ { hash_size, hash_keys_ptr },
  13847. /*.order =*/ GGML_CGRAPH_EVAL_ORDER_LEFT_TO_RIGHT,
  13848. /*.perf_runs =*/ 0,
  13849. /*.perf_cycles =*/ 0,
  13850. /*.perf_time_us =*/ 0,
  13851. };
  13852. return cgraph;
  13853. }
  13854. struct ggml_cgraph * ggml_new_graph(struct ggml_context * ctx) {
  13855. return ggml_new_graph_custom(ctx, GGML_DEFAULT_GRAPH_SIZE, false);
  13856. }
  13857. struct ggml_cgraph ggml_graph_view(struct ggml_cgraph * cgraph0, int i0, int i1) {
  13858. struct ggml_cgraph cgraph = {
  13859. /*.size =*/ 0,
  13860. /*.n_nodes =*/ i1 - i0,
  13861. /*.n_leafs =*/ 0,
  13862. /*.nodes =*/ cgraph0->nodes + i0,
  13863. /*.grads =*/ cgraph0->grads ? cgraph0->grads + i0 : NULL,
  13864. /*.leafs =*/ NULL,
  13865. /*.hash_table =*/ { 0, NULL },
  13866. /*.order =*/ cgraph0->order,
  13867. /*.perf_runs =*/ 0,
  13868. /*.perf_cycles =*/ 0,
  13869. /*.perf_time_us =*/ 0,
  13870. };
  13871. return cgraph;
  13872. }
  13873. void ggml_graph_cpy(struct ggml_cgraph * src, struct ggml_cgraph * dst) {
  13874. GGML_ASSERT(dst->size >= src->n_leafs);
  13875. GGML_ASSERT(dst->size >= src->n_nodes);
  13876. GGML_ASSERT(dst->visited_hash_table.size >= src->visited_hash_table.size);
  13877. dst->n_leafs = src->n_leafs;
  13878. dst->n_nodes = src->n_nodes;
  13879. dst->order = src->order;
  13880. for (int i = 0; i < src->n_leafs; ++i) {
  13881. dst->leafs[i] = src->leafs[i];
  13882. }
  13883. for (int i = 0; i < src->n_nodes; ++i) {
  13884. dst->nodes[i] = src->nodes[i];
  13885. }
  13886. if (src->grads) {
  13887. GGML_ASSERT(dst->grads != NULL);
  13888. for (int i = 0; i < src->n_nodes; ++i) {
  13889. dst->grads[i] = src->grads[i];
  13890. }
  13891. }
  13892. for (size_t i = 0; i < src->visited_hash_table.size; ++i) {
  13893. if (src->visited_hash_table.keys[i]) {
  13894. ggml_hash_insert(dst->visited_hash_table, src->visited_hash_table.keys[i]);
  13895. }
  13896. }
  13897. }
  13898. struct ggml_cgraph * ggml_graph_dup(struct ggml_context * ctx, struct ggml_cgraph * cgraph) {
  13899. struct ggml_cgraph * result = ggml_new_graph_custom(ctx, cgraph->size, cgraph->grads != NULL);
  13900. ggml_graph_cpy(cgraph, result);
  13901. return result;
  13902. }
  13903. void ggml_graph_reset(struct ggml_cgraph * cgraph) {
  13904. GGML_ASSERT(cgraph->grads != NULL);
  13905. for (int i = 0; i < cgraph->n_nodes; i++) {
  13906. struct ggml_tensor * grad = cgraph->grads[i];
  13907. if (grad) {
  13908. ggml_set_zero(grad);
  13909. }
  13910. }
  13911. }
  13912. void ggml_graph_clear(struct ggml_cgraph * cgraph) {
  13913. cgraph->n_leafs = 0;
  13914. cgraph->n_nodes = 0;
  13915. memset(cgraph->visited_hash_table.keys, 0, cgraph->visited_hash_table.size * sizeof(struct ggml_tensor *));
  13916. }
  13917. //
  13918. // thread data
  13919. //
  13920. // synchronization is done via busy loops
  13921. // I tried using spin locks, but not sure how to use them correctly - the things I tried were slower than busy loops
  13922. //
  13923. #ifdef __APPLE__
  13924. //#include <os/lock.h>
  13925. //
  13926. //typedef os_unfair_lock ggml_lock_t;
  13927. //
  13928. //#define ggml_lock_init(x) UNUSED(x)
  13929. //#define ggml_lock_destroy(x) UNUSED(x)
  13930. //#define ggml_lock_lock os_unfair_lock_lock
  13931. //#define ggml_lock_unlock os_unfair_lock_unlock
  13932. //
  13933. //#define GGML_LOCK_INITIALIZER OS_UNFAIR_LOCK_INIT
  13934. typedef int ggml_lock_t;
  13935. #define ggml_lock_init(x) UNUSED(x)
  13936. #define ggml_lock_destroy(x) UNUSED(x)
  13937. #define ggml_lock_lock(x) UNUSED(x)
  13938. #define ggml_lock_unlock(x) UNUSED(x)
  13939. #define GGML_LOCK_INITIALIZER 0
  13940. typedef pthread_t ggml_thread_t;
  13941. #define ggml_thread_create pthread_create
  13942. #define ggml_thread_join pthread_join
  13943. #else
  13944. //typedef pthread_spinlock_t ggml_lock_t;
  13945. //#define ggml_lock_init(x) pthread_spin_init(x, PTHREAD_PROCESS_PRIVATE)
  13946. //#define ggml_lock_destroy pthread_spin_destroy
  13947. //#define ggml_lock_lock pthread_spin_lock
  13948. //#define ggml_lock_unlock pthread_spin_unlock
  13949. typedef int ggml_lock_t;
  13950. #define ggml_lock_init(x) UNUSED(x)
  13951. #define ggml_lock_destroy(x) UNUSED(x)
  13952. #if defined(__x86_64__) || (defined(_MSC_VER) && defined(_M_AMD64))
  13953. #define ggml_lock_lock(x) _mm_pause()
  13954. #else
  13955. #define ggml_lock_lock(x) UNUSED(x)
  13956. #endif
  13957. #define ggml_lock_unlock(x) UNUSED(x)
  13958. #define GGML_LOCK_INITIALIZER 0
  13959. typedef pthread_t ggml_thread_t;
  13960. #define ggml_thread_create pthread_create
  13961. #define ggml_thread_join pthread_join
  13962. #endif
  13963. // Android's libc implementation "bionic" does not support setting affinity
  13964. #if defined(__gnu_linux__)
  13965. static void set_numa_thread_affinity(int thread_n) {
  13966. if (!ggml_is_numa()) {
  13967. return;
  13968. }
  13969. int node_num;
  13970. int rv;
  13971. size_t setsize = CPU_ALLOC_SIZE(g_state.numa.total_cpus);
  13972. switch(g_state.numa.numa_strategy) {
  13973. case GGML_NUMA_STRATEGY_DISTRIBUTE:
  13974. // run thread on node_num thread_n / (threads per node)
  13975. node_num = thread_n % g_state.numa.n_nodes;
  13976. break;
  13977. case GGML_NUMA_STRATEGY_ISOLATE:
  13978. // run thread on current_node
  13979. node_num = g_state.numa.current_node;
  13980. break;
  13981. case GGML_NUMA_STRATEGY_NUMACTL:
  13982. // use the cpuset that numactl gave us
  13983. rv = pthread_setaffinity_np(pthread_self(), setsize, &g_state.numa.cpuset);
  13984. if (rv) {
  13985. fprintf(stderr, "warning: pthread_setaffinity_np() failed: %s\n",strerror(rv));
  13986. }
  13987. return;
  13988. default:
  13989. return;
  13990. }
  13991. struct ggml_numa_node * node = &g_state.numa.nodes[node_num];
  13992. cpu_set_t * cpus = CPU_ALLOC(g_state.numa.total_cpus);
  13993. CPU_ZERO_S(setsize, cpus);
  13994. for (size_t i = 0; i < node->n_cpus; ++i) {
  13995. CPU_SET_S(node->cpus[i], setsize, cpus);
  13996. }
  13997. rv = pthread_setaffinity_np(pthread_self(), setsize, cpus);
  13998. if (rv) {
  13999. fprintf(stderr, "warning: pthread_setaffinity_np() failed: %s\n", strerror(rv));
  14000. }
  14001. CPU_FREE(cpus);
  14002. }
  14003. static void clear_numa_thread_affinity(void) {
  14004. if (!ggml_is_numa()) {
  14005. return;
  14006. }
  14007. size_t setsize = CPU_ALLOC_SIZE(g_state.numa.total_cpus);
  14008. cpu_set_t * cpus = CPU_ALLOC(g_state.numa.total_cpus);
  14009. CPU_ZERO_S(setsize, cpus);
  14010. for (unsigned i = 0; i < g_state.numa.total_cpus; ++i) {
  14011. CPU_SET_S(i, setsize, cpus);
  14012. }
  14013. int rv = pthread_setaffinity_np(pthread_self(), setsize, cpus);
  14014. if (rv) {
  14015. fprintf(stderr, "warning: pthread_setaffinity_np() failed: %s\n", strerror(rv));
  14016. }
  14017. CPU_FREE(cpus);
  14018. }
  14019. #else
  14020. // TODO: Windows etc.
  14021. // (the linux implementation may also work on BSD, someone should test)
  14022. static void set_numa_thread_affinity(int thread_n) { UNUSED(thread_n); }
  14023. static void clear_numa_thread_affinity(void) {}
  14024. #endif
  14025. struct ggml_compute_state_shared {
  14026. const struct ggml_cgraph * cgraph;
  14027. const struct ggml_cplan * cplan;
  14028. int64_t perf_node_start_cycles;
  14029. int64_t perf_node_start_time_us;
  14030. const int n_threads;
  14031. // synchronization primitives
  14032. atomic_int n_active; // num active threads
  14033. atomic_int node_n; // active graph node
  14034. atomic_int node_task; // active graph node task phase
  14035. ggml_abort_callback abort_callback; // abort ggml_graph_compute when true
  14036. void * abort_callback_data;
  14037. };
  14038. struct ggml_compute_state {
  14039. ggml_thread_t thrd;
  14040. int ith;
  14041. struct ggml_compute_state_shared * shared;
  14042. };
  14043. static void ggml_graph_compute_perf_stats_node(struct ggml_tensor * node, const struct ggml_compute_state_shared * st) {
  14044. int64_t cycles_cur = ggml_perf_cycles() - st->perf_node_start_cycles;
  14045. int64_t time_us_cur = ggml_perf_time_us() - st->perf_node_start_time_us;
  14046. node->perf_runs++;
  14047. node->perf_cycles += cycles_cur;
  14048. node->perf_time_us += time_us_cur;
  14049. }
  14050. static int ggml_get_n_tasks(struct ggml_tensor * node, int n_threads) {
  14051. int n_tasks = 0;
  14052. switch (node->op) {
  14053. case GGML_OP_CPY:
  14054. case GGML_OP_DUP:
  14055. case GGML_OP_ADD:
  14056. case GGML_OP_ADD1:
  14057. case GGML_OP_ACC:
  14058. {
  14059. n_tasks = n_threads;
  14060. } break;
  14061. case GGML_OP_SUB:
  14062. case GGML_OP_SQR:
  14063. case GGML_OP_SQRT:
  14064. case GGML_OP_LOG:
  14065. case GGML_OP_SUM:
  14066. case GGML_OP_SUM_ROWS:
  14067. case GGML_OP_MEAN:
  14068. case GGML_OP_ARGMAX:
  14069. case GGML_OP_REPEAT:
  14070. case GGML_OP_REPEAT_BACK:
  14071. case GGML_OP_LEAKY_RELU:
  14072. {
  14073. n_tasks = 1;
  14074. } break;
  14075. case GGML_OP_UNARY:
  14076. switch (ggml_get_unary_op(node)) {
  14077. case GGML_UNARY_OP_ABS:
  14078. case GGML_UNARY_OP_SGN:
  14079. case GGML_UNARY_OP_NEG:
  14080. case GGML_UNARY_OP_STEP:
  14081. case GGML_UNARY_OP_TANH:
  14082. case GGML_UNARY_OP_ELU:
  14083. case GGML_UNARY_OP_RELU:
  14084. case GGML_UNARY_OP_HARDSWISH: // to opt for multiple threads
  14085. case GGML_UNARY_OP_HARDSIGMOID: // to opt for multiple threads
  14086. {
  14087. n_tasks = 1;
  14088. } break;
  14089. case GGML_UNARY_OP_GELU:
  14090. case GGML_UNARY_OP_GELU_QUICK:
  14091. case GGML_UNARY_OP_SILU:
  14092. {
  14093. n_tasks = n_threads;
  14094. } break;
  14095. default:
  14096. GGML_ASSERT(false);
  14097. }
  14098. break;
  14099. case GGML_OP_SILU_BACK:
  14100. case GGML_OP_MUL:
  14101. case GGML_OP_DIV:
  14102. case GGML_OP_NORM:
  14103. case GGML_OP_RMS_NORM:
  14104. case GGML_OP_RMS_NORM_BACK:
  14105. case GGML_OP_GROUP_NORM:
  14106. case GGML_OP_CONCAT:
  14107. {
  14108. n_tasks = n_threads;
  14109. } break;
  14110. case GGML_OP_MUL_MAT:
  14111. {
  14112. n_tasks = n_threads;
  14113. // TODO: use different scheduling for different matrix sizes
  14114. //const int nr0 = ggml_nrows(node->src[0]);
  14115. //const int nr1 = ggml_nrows(node->src[1]);
  14116. //n_tasks = MIN(n_threads, MAX(1, nr0/128));
  14117. //printf("nr0 = %8d, nr1 = %8d, nr0*nr1 = %8d, n_tasks%d\n", nr0, nr1, nr0*nr1, n_tasks);
  14118. } break;
  14119. case GGML_OP_MUL_MAT_ID:
  14120. {
  14121. n_tasks = n_threads;
  14122. } break;
  14123. case GGML_OP_OUT_PROD:
  14124. {
  14125. n_tasks = n_threads;
  14126. } break;
  14127. case GGML_OP_SCALE:
  14128. case GGML_OP_SET:
  14129. case GGML_OP_CONT:
  14130. case GGML_OP_RESHAPE:
  14131. case GGML_OP_VIEW:
  14132. case GGML_OP_PERMUTE:
  14133. case GGML_OP_TRANSPOSE:
  14134. case GGML_OP_GET_ROWS:
  14135. case GGML_OP_GET_ROWS_BACK:
  14136. case GGML_OP_DIAG:
  14137. {
  14138. n_tasks = 1;
  14139. } break;
  14140. case GGML_OP_DIAG_MASK_ZERO:
  14141. case GGML_OP_DIAG_MASK_INF:
  14142. case GGML_OP_SOFT_MAX_BACK:
  14143. case GGML_OP_ROPE:
  14144. case GGML_OP_ROPE_BACK:
  14145. case GGML_OP_ADD_REL_POS:
  14146. {
  14147. n_tasks = n_threads;
  14148. } break;
  14149. case GGML_OP_ALIBI:
  14150. {
  14151. n_tasks = 1; //TODO
  14152. } break;
  14153. case GGML_OP_CLAMP:
  14154. {
  14155. n_tasks = 1; //TODO
  14156. } break;
  14157. case GGML_OP_SOFT_MAX:
  14158. {
  14159. n_tasks = MIN(n_threads, ggml_nrows(node->src[0]));
  14160. } break;
  14161. case GGML_OP_CONV_TRANSPOSE_1D:
  14162. {
  14163. n_tasks = n_threads;
  14164. } break;
  14165. case GGML_OP_IM2COL:
  14166. {
  14167. n_tasks = n_threads;
  14168. } break;
  14169. case GGML_OP_CONV_TRANSPOSE_2D:
  14170. {
  14171. n_tasks = n_threads;
  14172. } break;
  14173. case GGML_OP_POOL_1D:
  14174. case GGML_OP_POOL_2D:
  14175. {
  14176. n_tasks = 1;
  14177. } break;
  14178. case GGML_OP_UPSCALE:
  14179. {
  14180. n_tasks = n_threads;
  14181. } break;
  14182. case GGML_OP_PAD:
  14183. {
  14184. n_tasks = n_threads;
  14185. } break;
  14186. case GGML_OP_ARGSORT:
  14187. {
  14188. n_tasks = n_threads;
  14189. } break;
  14190. case GGML_OP_FLASH_ATTN:
  14191. {
  14192. n_tasks = n_threads;
  14193. } break;
  14194. case GGML_OP_FLASH_FF:
  14195. {
  14196. n_tasks = n_threads;
  14197. } break;
  14198. case GGML_OP_FLASH_ATTN_BACK:
  14199. {
  14200. n_tasks = n_threads;
  14201. } break;
  14202. case GGML_OP_WIN_PART:
  14203. case GGML_OP_WIN_UNPART:
  14204. case GGML_OP_GET_REL_POS:
  14205. case GGML_OP_MAP_UNARY:
  14206. case GGML_OP_MAP_BINARY:
  14207. case GGML_OP_MAP_CUSTOM1_F32:
  14208. case GGML_OP_MAP_CUSTOM2_F32:
  14209. case GGML_OP_MAP_CUSTOM3_F32:
  14210. {
  14211. n_tasks = 1;
  14212. } break;
  14213. case GGML_OP_MAP_CUSTOM1:
  14214. {
  14215. struct ggml_map_custom1_op_params * p = (struct ggml_map_custom1_op_params *) node->op_params;
  14216. if (p->n_tasks == GGML_N_TASKS_MAX) {
  14217. n_tasks = n_threads;
  14218. } else {
  14219. n_tasks = MIN(p->n_tasks, n_threads);
  14220. }
  14221. } break;
  14222. case GGML_OP_MAP_CUSTOM2:
  14223. {
  14224. struct ggml_map_custom2_op_params * p = (struct ggml_map_custom2_op_params *) node->op_params;
  14225. if (p->n_tasks == GGML_N_TASKS_MAX) {
  14226. n_tasks = n_threads;
  14227. } else {
  14228. n_tasks = MIN(p->n_tasks, n_threads);
  14229. }
  14230. } break;
  14231. case GGML_OP_MAP_CUSTOM3:
  14232. {
  14233. struct ggml_map_custom3_op_params * p = (struct ggml_map_custom3_op_params *) node->op_params;
  14234. if (p->n_tasks == GGML_N_TASKS_MAX) {
  14235. n_tasks = n_threads;
  14236. } else {
  14237. n_tasks = MIN(p->n_tasks, n_threads);
  14238. }
  14239. } break;
  14240. case GGML_OP_CROSS_ENTROPY_LOSS:
  14241. {
  14242. n_tasks = n_threads;
  14243. } break;
  14244. case GGML_OP_CROSS_ENTROPY_LOSS_BACK:
  14245. {
  14246. n_tasks = n_threads;
  14247. } break;
  14248. case GGML_OP_NONE:
  14249. {
  14250. n_tasks = 1;
  14251. } break;
  14252. case GGML_OP_COUNT:
  14253. {
  14254. GGML_ASSERT(false);
  14255. } break;
  14256. default:
  14257. {
  14258. fprintf(stderr, "%s: op not implemented: ", __func__);
  14259. if (node->op < GGML_OP_COUNT) {
  14260. fprintf(stderr, "%s\n", ggml_op_name(node->op));
  14261. } else {
  14262. fprintf(stderr, "%d\n", node->op);
  14263. }
  14264. GGML_ASSERT(false);
  14265. } break;
  14266. }
  14267. assert(n_tasks > 0);
  14268. return n_tasks;
  14269. }
  14270. static void ggml_graph_compute_thread_sync_node(int * node_n, struct ggml_compute_state * state, const bool do_yield) {
  14271. // wait for other threads to finish
  14272. const int last_node_n = * node_n;
  14273. while (true) {
  14274. if (do_yield) {
  14275. sched_yield();
  14276. }
  14277. * node_n = atomic_load(&state->shared->node_n);
  14278. if (* node_n != last_node_n) break;
  14279. }
  14280. }
  14281. static void ggml_graph_compute_thread_sync_task(int * task_phase, struct ggml_compute_state * state, const bool do_yield) {
  14282. // wait for other threads to finish
  14283. const int last_task_phase = * task_phase;
  14284. while (true) {
  14285. if (do_yield) {
  14286. sched_yield();
  14287. }
  14288. * task_phase = atomic_load(&state->shared->node_task);
  14289. if (* task_phase != last_task_phase) break;
  14290. }
  14291. }
  14292. static thread_ret_t ggml_graph_compute_thread(void * data) {
  14293. struct ggml_compute_state * state = (struct ggml_compute_state *) data;
  14294. const struct ggml_cgraph * cgraph = state->shared->cgraph;
  14295. const struct ggml_cplan * cplan = state->shared->cplan;
  14296. const int n_threads = state->shared->n_threads;
  14297. set_numa_thread_affinity(state->ith);
  14298. int node_n = -1;
  14299. int task_phase = GGML_TASK_FINALIZE;
  14300. while (true) {
  14301. if (cplan->abort_callback && cplan->abort_callback(cplan->abort_callback_data)) {
  14302. state->shared->node_n += 1;
  14303. return (thread_ret_t) GGML_EXIT_ABORTED;
  14304. }
  14305. if (atomic_fetch_sub(&state->shared->n_active, 1) == 1) {
  14306. // all other threads are finished and spinning
  14307. // do finalize and init here so we don't have synchronize again
  14308. struct ggml_compute_params params = {
  14309. /*.type =*/ GGML_TASK_FINALIZE,
  14310. /*.ith =*/ 0,
  14311. /*.nth =*/ 0,
  14312. /*.wsize =*/ cplan->work_size,
  14313. /*.wdata =*/ cplan->work_data,
  14314. };
  14315. if (node_n != -1) {
  14316. /* FINALIZE */
  14317. struct ggml_tensor * node = cgraph->nodes[node_n];
  14318. if (GGML_OP_HAS_FINALIZE[node->op]) {
  14319. params.nth = ggml_get_n_tasks(node, n_threads);
  14320. ggml_compute_forward(&params, node);
  14321. }
  14322. ggml_graph_compute_perf_stats_node(node, state->shared);
  14323. }
  14324. // distribute new work or execute it direct if 1T
  14325. while (++node_n < cgraph->n_nodes) {
  14326. GGML_PRINT_DEBUG_5("%s: %d/%d\n", __func__, node_n, cgraph->n_nodes);
  14327. struct ggml_tensor * node = cgraph->nodes[node_n];
  14328. const int n_tasks = ggml_get_n_tasks(node, n_threads);
  14329. state->shared->perf_node_start_cycles = ggml_perf_cycles();
  14330. state->shared->perf_node_start_time_us = ggml_perf_time_us();
  14331. params.nth = n_tasks;
  14332. if (n_tasks == 1) {
  14333. /* INIT */
  14334. if (GGML_OP_HAS_INIT[node->op]) {
  14335. params.type = GGML_TASK_INIT;
  14336. ggml_compute_forward(&params, node);
  14337. }
  14338. // TODO: maybe push node_n to the atomic but if other threads see n_tasks is 1,
  14339. // they do something more efficient than spinning (?)
  14340. params.type = GGML_TASK_COMPUTE;
  14341. ggml_compute_forward(&params, node);
  14342. if (GGML_OP_HAS_FINALIZE[node->op]) {
  14343. params.type = GGML_TASK_FINALIZE;
  14344. ggml_compute_forward(&params, node);
  14345. }
  14346. ggml_graph_compute_perf_stats_node(node, state->shared);
  14347. } else {
  14348. break;
  14349. }
  14350. if (cplan->abort_callback && cplan->abort_callback(cplan->abort_callback_data)) {
  14351. break;
  14352. }
  14353. }
  14354. task_phase = GGML_TASK_INIT;
  14355. atomic_store(&state->shared->n_active, n_threads);
  14356. atomic_store(&state->shared->node_n, node_n);
  14357. atomic_store(&state->shared->node_task, task_phase);
  14358. } else {
  14359. ggml_graph_compute_thread_sync_node(&node_n, state, false);
  14360. ggml_graph_compute_thread_sync_task(&task_phase, state, false);
  14361. }
  14362. // check if we should stop
  14363. if (node_n >= cgraph->n_nodes) break;
  14364. /* INIT & COMPUTE */
  14365. struct ggml_tensor * node = cgraph->nodes[node_n];
  14366. const int n_tasks = ggml_get_n_tasks(node, n_threads);
  14367. struct ggml_compute_params params = {
  14368. /*.type =*/ GGML_TASK_INIT,
  14369. /*.ith =*/ state->ith,
  14370. /*.nth =*/ n_tasks,
  14371. /*.wsize =*/ cplan->work_size,
  14372. /*.wdata =*/ cplan->work_data,
  14373. };
  14374. if (state->ith < n_tasks) {
  14375. if (GGML_OP_HAS_INIT[node->op]) {
  14376. ggml_compute_forward(&params, node);
  14377. }
  14378. }
  14379. if (atomic_fetch_sub(&state->shared->n_active, 1) == 1) {
  14380. task_phase = GGML_TASK_COMPUTE;
  14381. atomic_store(&state->shared->n_active, n_threads);
  14382. atomic_store(&state->shared->node_task, task_phase);
  14383. }
  14384. else {
  14385. // TODO: this sched_yield can have significant impact on the performance - either positive or negative
  14386. // depending on the workload and the operating system.
  14387. // since it is not clear what is the best approach, it should potentially become user-configurable
  14388. // ref: https://github.com/ggerganov/ggml/issues/291
  14389. // UPD: adding the do_yield flag seems to resolve the issue universally
  14390. const bool do_yield = node_n < 0 || cgraph->nodes[node_n]->op == GGML_OP_MUL_MAT;
  14391. ggml_graph_compute_thread_sync_task(&task_phase, state, do_yield);
  14392. }
  14393. if (state->ith < n_tasks) {
  14394. params.type = GGML_TASK_COMPUTE;
  14395. ggml_compute_forward(&params, node);
  14396. }
  14397. if (atomic_fetch_sub(&state->shared->n_active, 1) == 1) {
  14398. task_phase = GGML_TASK_FINALIZE;
  14399. atomic_store(&state->shared->n_active, n_threads);
  14400. atomic_store(&state->shared->node_task, task_phase);
  14401. }
  14402. else {
  14403. ggml_graph_compute_thread_sync_task(&task_phase, state, false);
  14404. }
  14405. }
  14406. return GGML_EXIT_SUCCESS;
  14407. }
  14408. struct ggml_cplan ggml_graph_plan(const struct ggml_cgraph * cgraph, int n_threads) {
  14409. if (n_threads <= 0) {
  14410. n_threads = GGML_DEFAULT_N_THREADS;
  14411. }
  14412. size_t work_size = 0;
  14413. struct ggml_cplan cplan;
  14414. memset(&cplan, 0, sizeof(struct ggml_cplan));
  14415. int max_tasks = 1;
  14416. // thread scheduling for the different operations + work buffer size estimation
  14417. for (int i = 0; i < cgraph->n_nodes; i++) {
  14418. struct ggml_tensor * node = cgraph->nodes[i];
  14419. const int n_tasks = ggml_get_n_tasks(node, n_threads);
  14420. max_tasks = MAX(max_tasks, n_tasks);
  14421. size_t cur = 0;
  14422. switch (node->op) {
  14423. case GGML_OP_CPY:
  14424. case GGML_OP_DUP:
  14425. {
  14426. if (ggml_is_quantized(node->type)) {
  14427. cur = ggml_type_size(GGML_TYPE_F32) * node->ne[0] * n_tasks;
  14428. }
  14429. } break;
  14430. case GGML_OP_ADD:
  14431. case GGML_OP_ADD1:
  14432. {
  14433. if (ggml_is_quantized(node->src[0]->type)) {
  14434. cur = ggml_type_size(GGML_TYPE_F32) * node->src[0]->ne[0] * n_tasks;
  14435. }
  14436. } break;
  14437. case GGML_OP_ACC:
  14438. {
  14439. if (ggml_is_quantized(node->src[0]->type)) {
  14440. cur = ggml_type_size(GGML_TYPE_F32) * node->src[1]->ne[0] * n_tasks;
  14441. }
  14442. } break;
  14443. case GGML_OP_MUL_MAT:
  14444. {
  14445. const enum ggml_type vec_dot_type = type_traits[node->src[0]->type].vec_dot_type;
  14446. #if defined(GGML_USE_CLBLAST)
  14447. if (ggml_cl_can_mul_mat(node->src[0], node->src[1], node)) {
  14448. cur = ggml_cl_mul_mat_get_wsize(node->src[0], node->src[1], node);
  14449. } else
  14450. #endif
  14451. #if defined(GGML_USE_ACCELERATE) || defined(GGML_USE_OPENBLAS)
  14452. if (ggml_compute_forward_mul_mat_use_blas(node)) {
  14453. if (node->src[0]->type != GGML_TYPE_F32) {
  14454. // here we need memory for fully dequantized matrix from src0
  14455. // take into account that src0 can be broadcasted into src1[2,3]
  14456. cur = ggml_type_size(GGML_TYPE_F32)
  14457. * node->src[0]->ne[0]*node->src[0]->ne[1]
  14458. * node->src[1]->ne[2]*node->src[1]->ne[3];
  14459. }
  14460. } else
  14461. #endif
  14462. if (node->src[1]->type != vec_dot_type) {
  14463. cur = ggml_row_size(vec_dot_type, ggml_nelements(node->src[1]));
  14464. }
  14465. } break;
  14466. case GGML_OP_MUL_MAT_ID:
  14467. {
  14468. cur = 0;
  14469. const struct ggml_tensor * src0 = node->src[2];
  14470. const struct ggml_tensor * src1 = node->src[1];
  14471. const enum ggml_type vec_dot_type = type_traits[src0->type].vec_dot_type;
  14472. if (src1->type != vec_dot_type) {
  14473. cur += ggml_row_size(vec_dot_type, ggml_nelements(src1));
  14474. }
  14475. const int n_as = ggml_get_op_params_i32(node, 1);
  14476. cur += GGML_PAD(cur, sizeof(int64_t)); // align
  14477. cur += n_as * sizeof(int64_t); // matrix_row_counts
  14478. cur += n_as * src1->ne[1] * sizeof(int64_t); // matrix_rows
  14479. } break;
  14480. case GGML_OP_OUT_PROD:
  14481. {
  14482. if (ggml_is_quantized(node->src[0]->type)) {
  14483. cur = ggml_type_size(GGML_TYPE_F32) * node->src[0]->ne[0] * n_tasks;
  14484. }
  14485. } break;
  14486. case GGML_OP_SOFT_MAX:
  14487. case GGML_OP_ROPE:
  14488. {
  14489. cur = ggml_type_size(GGML_TYPE_F32) * node->ne[0] * n_tasks;
  14490. } break;
  14491. case GGML_OP_CONV_TRANSPOSE_1D:
  14492. {
  14493. GGML_ASSERT(node->src[0]->ne[3] == 1);
  14494. GGML_ASSERT(node->src[1]->ne[2] == 1);
  14495. GGML_ASSERT(node->src[1]->ne[3] == 1);
  14496. const int64_t ne00 = node->src[0]->ne[0]; // K
  14497. const int64_t ne01 = node->src[0]->ne[1]; // Cout
  14498. const int64_t ne02 = node->src[0]->ne[2]; // Cin
  14499. const int64_t ne10 = node->src[1]->ne[0]; // L
  14500. const int64_t ne11 = node->src[1]->ne[1]; // Cin
  14501. if (node->src[0]->type == GGML_TYPE_F16 &&
  14502. node->src[1]->type == GGML_TYPE_F32) {
  14503. cur += sizeof(ggml_fp16_t)*ne00*ne01*ne02;
  14504. cur += sizeof(ggml_fp16_t)*ne10*ne11;
  14505. } else if (node->src[0]->type == GGML_TYPE_F32 &&
  14506. node->src[1]->type == GGML_TYPE_F32) {
  14507. cur += sizeof(float)*ne00*ne01*ne02;
  14508. cur += sizeof(float)*ne10*ne11;
  14509. } else {
  14510. GGML_ASSERT(false);
  14511. }
  14512. } break;
  14513. case GGML_OP_CONV_TRANSPOSE_2D:
  14514. {
  14515. const int64_t ne00 = node->src[0]->ne[0]; // W
  14516. const int64_t ne01 = node->src[0]->ne[1]; // H
  14517. const int64_t ne02 = node->src[0]->ne[2]; // Channels Out
  14518. const int64_t ne03 = node->src[0]->ne[3]; // Channels In
  14519. const int64_t ne10 = node->src[1]->ne[0]; // W
  14520. const int64_t ne11 = node->src[1]->ne[1]; // H
  14521. const int64_t ne12 = node->src[1]->ne[2]; // Channels In
  14522. cur += sizeof(ggml_fp16_t)*ne00*ne01*ne02*ne03;
  14523. cur += sizeof(ggml_fp16_t)*ne10*ne11*ne12;
  14524. } break;
  14525. case GGML_OP_FLASH_ATTN:
  14526. {
  14527. const int64_t ne11 = ggml_up(node->src[1]->ne[1], GGML_SOFT_MAX_UNROLL);
  14528. if (node->src[1]->type == GGML_TYPE_F32) {
  14529. cur = sizeof(float)*ne11*n_tasks; // TODO: this can become (n_tasks-1)
  14530. cur += sizeof(float)*ne11*n_tasks; // this is overestimated by x2
  14531. } else if (node->src[1]->type == GGML_TYPE_F16) {
  14532. cur = sizeof(float)*ne11*n_tasks; // TODO: this can become (n_tasks-1)
  14533. cur += sizeof(float)*ne11*n_tasks; // this is overestimated by x2
  14534. }
  14535. } break;
  14536. case GGML_OP_FLASH_FF:
  14537. {
  14538. if (node->src[1]->type == GGML_TYPE_F32) {
  14539. cur = sizeof(float)*node->src[1]->ne[1]*n_tasks; // TODO: this can become (n_tasks-1)
  14540. cur += sizeof(float)*node->src[1]->ne[1]*n_tasks; // this is overestimated by x2
  14541. } else if (node->src[1]->type == GGML_TYPE_F16) {
  14542. cur = sizeof(float)*node->src[1]->ne[1]*n_tasks; // TODO: this can become (n_tasks-1)
  14543. cur += sizeof(float)*node->src[1]->ne[1]*n_tasks; // this is overestimated by x2
  14544. }
  14545. } break;
  14546. case GGML_OP_FLASH_ATTN_BACK:
  14547. {
  14548. const int64_t D = node->src[0]->ne[0];
  14549. const int64_t ne11 = ggml_up(node->src[1]->ne[1], GGML_SOFT_MAX_UNROLL);
  14550. const int64_t mxDn = MAX(D, ne11) * 2; // *2 because of S and SM in ggml_compute_forward_flash_attn_back
  14551. if (node->src[1]->type == GGML_TYPE_F32) {
  14552. cur = sizeof(float)*mxDn*n_tasks; // TODO: this can become (n_tasks-1)
  14553. cur += sizeof(float)*mxDn*n_tasks; // this is overestimated by x2
  14554. } else if (node->src[1]->type == GGML_TYPE_F16) {
  14555. cur = sizeof(float)*mxDn*n_tasks; // TODO: this can become (n_tasks-1)
  14556. cur += sizeof(float)*mxDn*n_tasks; // this is overestimated by x2
  14557. }
  14558. } break;
  14559. case GGML_OP_CROSS_ENTROPY_LOSS:
  14560. {
  14561. cur = ggml_type_size(node->type)*(n_tasks + node->src[0]->ne[0]*n_tasks);
  14562. } break;
  14563. case GGML_OP_COUNT:
  14564. {
  14565. GGML_ASSERT(false);
  14566. } break;
  14567. default:
  14568. break;
  14569. }
  14570. work_size = MAX(work_size, cur);
  14571. }
  14572. if (work_size > 0) {
  14573. work_size += CACHE_LINE_SIZE*(n_threads - 1);
  14574. }
  14575. cplan.n_threads = MIN(max_tasks, n_threads);
  14576. cplan.work_size = work_size;
  14577. cplan.work_data = NULL;
  14578. return cplan;
  14579. }
  14580. int ggml_graph_compute(struct ggml_cgraph * cgraph, struct ggml_cplan * cplan) {
  14581. {
  14582. GGML_ASSERT(cplan);
  14583. GGML_ASSERT(cplan->n_threads > 0);
  14584. if (cplan->work_size > 0) {
  14585. GGML_ASSERT(cplan->work_data);
  14586. }
  14587. }
  14588. #ifdef GGML_USE_VULKAN
  14589. for (int i = 0; i < cgraph->n_nodes; i++) {
  14590. ggml_vk_preallocate_buffers_graph_cpu_assist(cgraph->nodes[i]);
  14591. }
  14592. ggml_vk_preallocate_buffers_cpu_assist();
  14593. for (int i = 0; i < cgraph->n_nodes; i++) {
  14594. ggml_vk_build_graph_cpu_assist(cgraph->nodes[i], i == cgraph->n_nodes - 1);
  14595. }
  14596. #endif
  14597. const int n_threads = cplan->n_threads;
  14598. struct ggml_compute_state_shared state_shared = {
  14599. /*.cgraph =*/ cgraph,
  14600. /*.cgraph_plan =*/ cplan,
  14601. /*.perf_node_start_cycles =*/ 0,
  14602. /*.perf_node_start_time_us =*/ 0,
  14603. /*.n_threads =*/ n_threads,
  14604. /*.n_active =*/ n_threads,
  14605. /*.node_n =*/ -1,
  14606. /*.node_task =*/ GGML_TASK_FINALIZE,
  14607. /*.abort_callback =*/ NULL,
  14608. /*.abort_callback_data =*/ NULL,
  14609. };
  14610. struct ggml_compute_state * workers = alloca(sizeof(struct ggml_compute_state)*n_threads);
  14611. // create thread pool
  14612. if (n_threads > 1) {
  14613. for (int j = 1; j < n_threads; ++j) {
  14614. workers[j] = (struct ggml_compute_state) {
  14615. .thrd = 0,
  14616. .ith = j,
  14617. .shared = &state_shared,
  14618. };
  14619. const int rc = ggml_thread_create(&workers[j].thrd, NULL, ggml_graph_compute_thread, &workers[j]);
  14620. GGML_ASSERT(rc == 0);
  14621. UNUSED(rc);
  14622. }
  14623. }
  14624. workers[0].ith = 0;
  14625. workers[0].shared = &state_shared;
  14626. const int64_t perf_start_cycles = ggml_perf_cycles();
  14627. const int64_t perf_start_time_us = ggml_perf_time_us();
  14628. // this is a work thread too
  14629. int compute_status = (size_t) ggml_graph_compute_thread(&workers[0]);
  14630. // don't leave affinity set on the main thread
  14631. clear_numa_thread_affinity();
  14632. // join or kill thread pool
  14633. if (n_threads > 1) {
  14634. for (int j = 1; j < n_threads; j++) {
  14635. const int rc = ggml_thread_join(workers[j].thrd, NULL);
  14636. GGML_ASSERT(rc == 0);
  14637. }
  14638. }
  14639. #ifdef GGML_USE_VULKAN
  14640. ggml_vk_graph_cleanup_cpu_assist();
  14641. #endif
  14642. // performance stats (graph)
  14643. {
  14644. int64_t perf_cycles_cur = ggml_perf_cycles() - perf_start_cycles;
  14645. int64_t perf_time_us_cur = ggml_perf_time_us() - perf_start_time_us;
  14646. cgraph->perf_runs++;
  14647. cgraph->perf_cycles += perf_cycles_cur;
  14648. cgraph->perf_time_us += perf_time_us_cur;
  14649. GGML_PRINT_DEBUG("%s: perf (%d) - cpu = %.3f / %.3f ms, wall = %.3f / %.3f ms\n",
  14650. __func__, cgraph->perf_runs,
  14651. (double) perf_cycles_cur / (double) ggml_cycles_per_ms(),
  14652. (double) cgraph->perf_cycles / (double) ggml_cycles_per_ms() / (double) cgraph->perf_runs,
  14653. (double) perf_time_us_cur / 1000.0,
  14654. (double) cgraph->perf_time_us / 1000.0 / cgraph->perf_runs);
  14655. }
  14656. return compute_status;
  14657. }
  14658. void ggml_graph_compute_with_ctx(struct ggml_context * ctx, struct ggml_cgraph * cgraph, int n_threads) {
  14659. struct ggml_cplan cplan = ggml_graph_plan(cgraph, n_threads);
  14660. struct ggml_object * obj = ggml_new_object(ctx, GGML_OBJECT_WORK_BUFFER, cplan.work_size);
  14661. cplan.work_data = (uint8_t *)ctx->mem_buffer + obj->offs;
  14662. ggml_graph_compute(cgraph, &cplan);
  14663. }
  14664. struct ggml_tensor * ggml_graph_get_tensor(struct ggml_cgraph * cgraph, const char * name) {
  14665. for (int i = 0; i < cgraph->n_leafs; i++) {
  14666. struct ggml_tensor * leaf = cgraph->leafs[i];
  14667. if (strcmp(leaf->name, name) == 0) {
  14668. return leaf;
  14669. }
  14670. }
  14671. for (int i = 0; i < cgraph->n_nodes; i++) {
  14672. struct ggml_tensor * node = cgraph->nodes[i];
  14673. if (strcmp(node->name, name) == 0) {
  14674. return node;
  14675. }
  14676. }
  14677. return NULL;
  14678. }
  14679. static void ggml_graph_export_leaf(const struct ggml_tensor * tensor, FILE * fout) {
  14680. const int64_t * ne = tensor->ne;
  14681. const size_t * nb = tensor->nb;
  14682. fprintf(fout, "%-6s %-12s %8d %" PRId64 " %" PRId64 " %" PRId64 " %" PRId64 " %16zu %16zu %16zu %16zu %16p %32s\n",
  14683. ggml_type_name(tensor->type),
  14684. ggml_op_name (tensor->op),
  14685. ggml_n_dims(tensor),
  14686. ne[0], ne[1], ne[2], ne[3],
  14687. nb[0], nb[1], nb[2], nb[3],
  14688. tensor->data,
  14689. tensor->name);
  14690. }
  14691. static void ggml_graph_export_node(const struct ggml_tensor * tensor, const char * arg, FILE * fout) {
  14692. const int64_t * ne = tensor->ne;
  14693. const size_t * nb = tensor->nb;
  14694. fprintf(fout, "%-6s %-6s %-12s %8d %" PRId64 " %" PRId64 " %" PRId64 " %" PRId64 " %16zu %16zu %16zu %16zu %16p %32s\n",
  14695. arg,
  14696. ggml_type_name(tensor->type),
  14697. ggml_op_name (tensor->op),
  14698. ggml_n_dims(tensor),
  14699. ne[0], ne[1], ne[2], ne[3],
  14700. nb[0], nb[1], nb[2], nb[3],
  14701. tensor->data,
  14702. tensor->name);
  14703. }
  14704. void ggml_graph_export(const struct ggml_cgraph * cgraph, const char * fname) {
  14705. uint64_t size_eval = 0;
  14706. // compute size of intermediate results
  14707. // TODO: does not take into account scratch buffers !!!!
  14708. for (int i = 0; i < cgraph->n_nodes; ++i) {
  14709. size_eval += ggml_nbytes_pad(cgraph->nodes[i]);
  14710. }
  14711. // print
  14712. {
  14713. FILE * fout = stdout;
  14714. fprintf(fout, "\n");
  14715. fprintf(fout, "%-16s %8x\n", "magic", GGML_FILE_MAGIC);
  14716. fprintf(fout, "%-16s %8d\n", "version", GGML_FILE_VERSION);
  14717. fprintf(fout, "%-16s %8d\n", "leafs", cgraph->n_leafs);
  14718. fprintf(fout, "%-16s %8d\n", "nodes", cgraph->n_nodes);
  14719. fprintf(fout, "%-16s %" PRIu64 "\n", "eval", size_eval);
  14720. // header
  14721. fprintf(fout, "\n");
  14722. fprintf(fout, "%-6s %-12s %8s %8s %8s %8s %8s %16s %16s %16s %16s %16s %16s\n",
  14723. "TYPE", "OP", "NDIMS", "NE0", "NE1", "NE2", "NE3", "NB0", "NB1", "NB2", "NB3", "DATA", "NAME");
  14724. for (int i = 0; i < cgraph->n_leafs; ++i) {
  14725. ggml_graph_export_leaf(cgraph->leafs[i], fout);
  14726. GGML_ASSERT(cgraph->leafs[i]->op == GGML_OP_NONE);
  14727. GGML_ASSERT(cgraph->leafs[i]->src[0] == NULL);
  14728. GGML_ASSERT(cgraph->leafs[i]->src[1] == NULL);
  14729. }
  14730. // header
  14731. fprintf(fout, "\n");
  14732. fprintf(fout, "%-6s %-6s %-12s %8s %8s %8s %8s %8s %16s %16s %16s %16s %8s %16s %16s\n",
  14733. "ARG", "TYPE", "OP", "NDIMS", "NE0", "NE1", "NE2", "NE3", "NB0", "NB1", "NB2", "NB3", "NTASKS", "DATA", "NAME");
  14734. for (int i = 0; i < cgraph->n_nodes; ++i) {
  14735. ggml_graph_export_node(cgraph->nodes[i], "DST", fout);
  14736. for (int j = 0; j < GGML_MAX_SRC; ++j) {
  14737. if (cgraph->nodes[i]->src[j]) {
  14738. ggml_graph_export_node(cgraph->nodes[i]->src[j], "SRC", fout);
  14739. }
  14740. }
  14741. fprintf(fout, "\n");
  14742. }
  14743. fprintf(fout, "\n");
  14744. }
  14745. // write binary data
  14746. {
  14747. FILE * fout = fopen(fname, "wb");
  14748. if (!fout) {
  14749. fprintf(stderr, "%s: failed to open %s\n", __func__, fname);
  14750. return;
  14751. }
  14752. // header
  14753. {
  14754. const uint32_t magic = GGML_FILE_MAGIC;
  14755. const uint32_t version = GGML_FILE_VERSION;
  14756. const uint32_t n_leafs = cgraph->n_leafs;
  14757. const uint32_t n_nodes = cgraph->n_nodes;
  14758. fwrite(&magic, sizeof(uint32_t), 1, fout);
  14759. fwrite(&version, sizeof(uint32_t), 1, fout);
  14760. fwrite(&n_leafs, sizeof(uint32_t), 1, fout);
  14761. fwrite(&n_nodes, sizeof(uint32_t), 1, fout);
  14762. fwrite(&size_eval, sizeof(uint64_t), 1, fout);
  14763. }
  14764. // leafs
  14765. {
  14766. for (int i = 0; i < cgraph->n_leafs; ++i) {
  14767. const struct ggml_tensor * tensor = cgraph->leafs[i];
  14768. const uint32_t type = tensor->type;
  14769. const uint32_t op = tensor->op;
  14770. fwrite(&type, sizeof(uint32_t), 1, fout);
  14771. fwrite(&op, sizeof(uint32_t), 1, fout);
  14772. for (int j = 0; j < GGML_MAX_DIMS; ++j) {
  14773. const uint64_t ne = tensor->ne[j];
  14774. const uint64_t nb = tensor->nb[j];
  14775. fwrite(&ne, sizeof(uint64_t), 1, fout);
  14776. fwrite(&nb, sizeof(uint64_t), 1, fout);
  14777. }
  14778. fwrite(tensor->name, sizeof(char), GGML_MAX_NAME, fout);
  14779. fwrite(tensor->op_params, sizeof(char), GGML_MAX_OP_PARAMS, fout);
  14780. // dump the data
  14781. // TODO: pad this to 32 byte boundary
  14782. {
  14783. const size_t size = ggml_nbytes(tensor);
  14784. fwrite(tensor->data, sizeof(char), size, fout);
  14785. }
  14786. }
  14787. }
  14788. // nodes
  14789. {
  14790. for (int i = 0; i < cgraph->n_nodes; ++i) {
  14791. const struct ggml_tensor * tensor = cgraph->nodes[i];
  14792. const uint32_t type = tensor->type;
  14793. const uint32_t op = tensor->op;
  14794. fwrite(&type, sizeof(uint32_t), 1, fout);
  14795. fwrite(&op, sizeof(uint32_t), 1, fout);
  14796. for (int j = 0; j < GGML_MAX_DIMS; ++j) {
  14797. const uint64_t ne = tensor->ne[j];
  14798. const uint64_t nb = tensor->nb[j];
  14799. fwrite(&ne, sizeof(uint64_t), 1, fout);
  14800. fwrite(&nb, sizeof(uint64_t), 1, fout);
  14801. }
  14802. fwrite(tensor->name, sizeof(char), GGML_MAX_NAME, fout);
  14803. fwrite(tensor->op_params, sizeof(char), GGML_MAX_OP_PARAMS, fout);
  14804. // output the op arguments
  14805. {
  14806. struct ggml_tensor * args[GGML_MAX_SRC] = { NULL };
  14807. for (int j = 0; j < GGML_MAX_SRC; ++j) {
  14808. args[j] = tensor->src[j];
  14809. }
  14810. for (int j = 0; j < GGML_MAX_SRC; ++j) {
  14811. if (args[j]) {
  14812. int32_t idx = -1;
  14813. // check if leaf
  14814. {
  14815. for (int k = 0; k < cgraph->n_leafs; ++k) {
  14816. if (args[j] == cgraph->leafs[k]) {
  14817. idx = k;
  14818. break;
  14819. }
  14820. }
  14821. }
  14822. // check if node
  14823. if (idx == -1) {
  14824. for (int k = 0; k < cgraph->n_nodes; ++k) {
  14825. if (args[j] == cgraph->nodes[k]) {
  14826. idx = cgraph->n_leafs + k;
  14827. break;
  14828. }
  14829. }
  14830. }
  14831. if (idx == -1) {
  14832. fprintf(stderr, "%s: failed to find tensor, arg = %d, node = %d\n", __func__, j, i);
  14833. fclose(fout);
  14834. return;
  14835. }
  14836. fwrite(&idx, sizeof(int32_t), 1, fout);
  14837. } else {
  14838. const int32_t nul = -1;
  14839. fwrite(&nul, sizeof(int32_t), 1, fout);
  14840. }
  14841. }
  14842. }
  14843. }
  14844. }
  14845. fclose(fout);
  14846. }
  14847. }
  14848. struct ggml_cgraph * ggml_graph_import(const char * fname, struct ggml_context ** ctx_data, struct ggml_context ** ctx_eval) {
  14849. assert(*ctx_data == NULL);
  14850. assert(*ctx_eval == NULL);
  14851. struct ggml_cgraph * result = NULL;
  14852. struct ggml_tensor * data = NULL;
  14853. // read file into data
  14854. {
  14855. FILE * fin = fopen(fname, "rb");
  14856. if (!fin) {
  14857. fprintf(stderr, "%s: failed to open %s\n", __func__, fname);
  14858. return result;
  14859. }
  14860. size_t fsize = 0;
  14861. fseek(fin, 0, SEEK_END);
  14862. fsize = ftell(fin);
  14863. fseek(fin, 0, SEEK_SET);
  14864. // create the data context
  14865. {
  14866. const size_t overhead = 1*ggml_tensor_overhead();
  14867. struct ggml_init_params params = {
  14868. .mem_size = fsize + overhead,
  14869. .mem_buffer = NULL,
  14870. .no_alloc = false,
  14871. };
  14872. *ctx_data = ggml_init(params);
  14873. if (!*ctx_data) {
  14874. fprintf(stderr, "%s: failed to create ggml context\n", __func__);
  14875. fclose(fin);
  14876. return result;
  14877. }
  14878. }
  14879. data = ggml_new_tensor_1d(*ctx_data, GGML_TYPE_I8, fsize);
  14880. {
  14881. const size_t ret = fread(data->data, sizeof(char), fsize, fin);
  14882. if (ret != fsize) {
  14883. fprintf(stderr, "%s: failed to read %s\n", __func__, fname);
  14884. fclose(fin);
  14885. return result;
  14886. }
  14887. }
  14888. fclose(fin);
  14889. }
  14890. // populate result
  14891. {
  14892. char * ptr = (char *) data->data;
  14893. const uint32_t magic = *(const uint32_t *) ptr; ptr += sizeof(magic);
  14894. if (magic != GGML_FILE_MAGIC) {
  14895. fprintf(stderr, "%s: invalid magic number, got %08x\n", __func__, magic);
  14896. return result;
  14897. }
  14898. const uint32_t version = *(const uint32_t *) ptr; ptr += sizeof(version);
  14899. if (version != GGML_FILE_VERSION) {
  14900. fprintf(stderr, "%s: invalid version number\n", __func__);
  14901. return result;
  14902. }
  14903. const uint32_t n_leafs = *(const uint32_t *) ptr; ptr += sizeof(n_leafs);
  14904. const uint32_t n_nodes = *(const uint32_t *) ptr; ptr += sizeof(n_nodes);
  14905. const uint64_t size_eval = *(const uint64_t *) ptr; ptr += sizeof(size_eval);
  14906. const int graph_size = MAX(n_leafs, n_nodes);
  14907. // create the data context
  14908. {
  14909. const size_t overhead = (n_leafs + n_nodes)*ggml_tensor_overhead() + ggml_graph_overhead_custom(graph_size, false);
  14910. struct ggml_init_params params = {
  14911. .mem_size = size_eval + overhead,
  14912. .mem_buffer = NULL,
  14913. .no_alloc = true,
  14914. };
  14915. *ctx_eval = ggml_init(params);
  14916. if (!*ctx_eval) {
  14917. fprintf(stderr, "%s: failed to create ggml context\n", __func__);
  14918. return result;
  14919. }
  14920. }
  14921. result = ggml_new_graph_custom(*ctx_eval, graph_size, false);
  14922. result->n_leafs = n_leafs;
  14923. result->n_nodes = n_nodes;
  14924. // leafs
  14925. {
  14926. uint32_t type;
  14927. uint32_t op;
  14928. for (uint32_t i = 0; i < n_leafs; ++i) {
  14929. type = *(const uint32_t *) ptr; ptr += sizeof(type);
  14930. op = *(const uint32_t *) ptr; ptr += sizeof(op);
  14931. int64_t ne[GGML_MAX_DIMS];
  14932. size_t nb[GGML_MAX_DIMS];
  14933. for (int j = 0; j < GGML_MAX_DIMS; ++j) {
  14934. uint64_t ne_cur;
  14935. uint64_t nb_cur;
  14936. ne_cur = *(const uint64_t *) ptr; ptr += sizeof(ne_cur);
  14937. nb_cur = *(const uint64_t *) ptr; ptr += sizeof(nb_cur);
  14938. ne[j] = ne_cur;
  14939. nb[j] = nb_cur;
  14940. }
  14941. struct ggml_tensor * tensor = ggml_new_tensor(*ctx_eval, (enum ggml_type) type, GGML_MAX_DIMS, ne);
  14942. tensor->op = (enum ggml_op) op;
  14943. memcpy(tensor->name, ptr, GGML_MAX_NAME); ptr += GGML_MAX_NAME;
  14944. memcpy(tensor->op_params, ptr, GGML_MAX_OP_PARAMS); ptr += GGML_MAX_OP_PARAMS;
  14945. tensor->data = (void *) ptr;
  14946. for (int j = 0; j < GGML_MAX_DIMS; ++j) {
  14947. tensor->nb[j] = nb[j];
  14948. }
  14949. result->leafs[i] = tensor;
  14950. ptr += ggml_nbytes(tensor);
  14951. fprintf(stderr, "%s: loaded leaf %u: '%16s', %9zu bytes\n", __func__, i, tensor->name, ggml_nbytes(tensor));
  14952. }
  14953. }
  14954. ggml_set_no_alloc(*ctx_eval, false);
  14955. // nodes
  14956. {
  14957. uint32_t type;
  14958. uint32_t op;
  14959. for (uint32_t i = 0; i < n_nodes; ++i) {
  14960. type = *(const uint32_t *) ptr; ptr += sizeof(type);
  14961. op = *(const uint32_t *) ptr; ptr += sizeof(op);
  14962. enum ggml_op eop = (enum ggml_op) op;
  14963. int64_t ne[GGML_MAX_DIMS];
  14964. size_t nb[GGML_MAX_DIMS];
  14965. for (int j = 0; j < GGML_MAX_DIMS; ++j) {
  14966. uint64_t ne_cur;
  14967. uint64_t nb_cur;
  14968. ne_cur = *(const uint64_t *) ptr; ptr += sizeof(ne_cur);
  14969. nb_cur = *(const uint64_t *) ptr; ptr += sizeof(nb_cur);
  14970. ne[j] = ne_cur;
  14971. nb[j] = nb_cur;
  14972. }
  14973. const char * ptr_name = ptr; ptr += GGML_MAX_NAME;
  14974. const char * ptr_op_params = ptr; ptr += GGML_MAX_OP_PARAMS;
  14975. const int32_t * ptr_arg_idx = (const int32_t *) ptr; ptr += GGML_MAX_SRC*sizeof(int32_t);
  14976. struct ggml_tensor * args[GGML_MAX_SRC] = { NULL };
  14977. // parse args
  14978. for (int j = 0; j < GGML_MAX_SRC; ++j) {
  14979. const int32_t arg_idx = ptr_arg_idx[j];
  14980. if (arg_idx == -1) {
  14981. continue;
  14982. }
  14983. if (arg_idx < result->n_leafs) {
  14984. args[j] = result->leafs[arg_idx];
  14985. } else {
  14986. args[j] = result->nodes[arg_idx - result->n_leafs];
  14987. }
  14988. }
  14989. // create the tensor
  14990. // "view" operations are handled differently
  14991. // TODO: handle inplace ops - currently a copy is always made
  14992. struct ggml_tensor * tensor = NULL;
  14993. switch (eop) {
  14994. // TODO: implement other view ops
  14995. case GGML_OP_RESHAPE:
  14996. {
  14997. tensor = ggml_reshape_4d(*ctx_eval, args[0], ne[0], ne[1], ne[2], ne[3]);
  14998. } break;
  14999. case GGML_OP_VIEW:
  15000. {
  15001. tensor = ggml_view_4d(*ctx_eval, args[0], ne[0], ne[1], ne[2], ne[3], 0, 0, 0, 0);
  15002. size_t offs;
  15003. memcpy(&offs, ptr_op_params, sizeof(offs));
  15004. tensor->data = ((char *) tensor->data) + offs;
  15005. } break;
  15006. case GGML_OP_TRANSPOSE:
  15007. {
  15008. tensor = ggml_transpose(*ctx_eval, args[0]);
  15009. } break;
  15010. case GGML_OP_PERMUTE:
  15011. {
  15012. tensor = ggml_view_4d(*ctx_eval, args[0], ne[0], ne[1], ne[2], ne[3], 0, 0, 0, 0);
  15013. } break;
  15014. default:
  15015. {
  15016. tensor = ggml_new_tensor(*ctx_eval, (enum ggml_type) type, GGML_MAX_DIMS, ne);
  15017. tensor->op = eop;
  15018. } break;
  15019. }
  15020. memcpy(tensor->name, ptr_name, GGML_MAX_NAME);
  15021. memcpy(tensor->op_params, ptr_op_params, GGML_MAX_OP_PARAMS);
  15022. for (int j = 0; j < GGML_MAX_DIMS; ++j) {
  15023. tensor->nb[j] = nb[j];
  15024. }
  15025. for (int j = 0; j < GGML_MAX_SRC; ++j) {
  15026. tensor->src[j] = args[j];
  15027. }
  15028. result->nodes[i] = tensor;
  15029. fprintf(stderr, "%s: loaded node %u: '%16s', %9zu bytes\n", __func__, i, tensor->name, ggml_nbytes(tensor));
  15030. }
  15031. }
  15032. }
  15033. return result;
  15034. }
  15035. void ggml_graph_print(const struct ggml_cgraph * cgraph) {
  15036. int64_t perf_total_per_op_us[GGML_OP_COUNT] = {0};
  15037. GGML_PRINT("=== GRAPH ===\n");
  15038. GGML_PRINT("n_nodes = %d\n", cgraph->n_nodes);
  15039. for (int i = 0; i < cgraph->n_nodes; i++) {
  15040. struct ggml_tensor * node = cgraph->nodes[i];
  15041. perf_total_per_op_us[node->op] += MAX(1, node->perf_time_us);
  15042. GGML_PRINT(" - %3d: [ %5" PRId64 ", %5" PRId64 ", %5" PRId64 "] %16s %s (%3d) cpu = %7.3f / %7.3f ms, wall = %7.3f / %7.3f ms\n",
  15043. i,
  15044. node->ne[0], node->ne[1], node->ne[2],
  15045. ggml_op_name(node->op), (node->flags & GGML_TENSOR_FLAG_PARAM) ? "x" : node->grad ? "g" : " ", node->perf_runs,
  15046. (double) node->perf_cycles / (double) ggml_cycles_per_ms(),
  15047. (double) node->perf_cycles / (double) ggml_cycles_per_ms() / (double) node->perf_runs,
  15048. (double) node->perf_time_us / 1000.0,
  15049. (double) node->perf_time_us / 1000.0 / node->perf_runs);
  15050. }
  15051. GGML_PRINT("n_leafs = %d\n", cgraph->n_leafs);
  15052. for (int i = 0; i < cgraph->n_leafs; i++) {
  15053. struct ggml_tensor * node = cgraph->leafs[i];
  15054. GGML_PRINT(" - %3d: [ %5" PRId64 ", %5" PRId64 "] %8s %16s\n",
  15055. i,
  15056. node->ne[0], node->ne[1],
  15057. ggml_op_name(node->op),
  15058. ggml_get_name(node));
  15059. }
  15060. for (int i = 0; i < GGML_OP_COUNT; i++) {
  15061. if (perf_total_per_op_us[i] == 0) {
  15062. continue;
  15063. }
  15064. GGML_PRINT("perf_total_per_op_us[%16s] = %7.3f ms\n", ggml_op_name(i), (double) perf_total_per_op_us[i] / 1000.0);
  15065. }
  15066. GGML_PRINT("========================================\n");
  15067. }
  15068. // check if node is part of the graph
  15069. static bool ggml_graph_find(const struct ggml_cgraph * cgraph, const struct ggml_tensor * node) {
  15070. if (cgraph == NULL) {
  15071. return true;
  15072. }
  15073. for (int i = 0; i < cgraph->n_nodes; i++) {
  15074. if (cgraph->nodes[i] == node) {
  15075. return true;
  15076. }
  15077. }
  15078. return false;
  15079. }
  15080. static struct ggml_tensor * ggml_graph_get_parent(const struct ggml_cgraph * cgraph, const struct ggml_tensor * node) {
  15081. for (int i = 0; i < cgraph->n_nodes; i++) {
  15082. struct ggml_tensor * parent = cgraph->nodes[i];
  15083. if (parent->grad == node) {
  15084. return parent;
  15085. }
  15086. }
  15087. return NULL;
  15088. }
  15089. static void ggml_graph_dump_dot_node_edge(FILE * fp, const struct ggml_cgraph * gb, struct ggml_tensor * node, struct ggml_tensor * parent, const char * label) {
  15090. struct ggml_tensor * gparent = ggml_graph_get_parent(gb, node);
  15091. struct ggml_tensor * gparent0 = ggml_graph_get_parent(gb, parent);
  15092. fprintf(fp, " \"%p\":%s -> \"%p\":%s [ arrowhead = %s; style = %s; label = \"%s\"; ]\n",
  15093. gparent0 ? (void *) gparent0 : (void *) parent,
  15094. gparent0 ? "g" : "x",
  15095. gparent ? (void *) gparent : (void *) node,
  15096. gparent ? "g" : "x",
  15097. gparent ? "empty" : "vee",
  15098. gparent ? "dashed" : "solid",
  15099. label);
  15100. }
  15101. static void ggml_graph_dump_dot_leaf_edge(FILE * fp, struct ggml_tensor * node, struct ggml_tensor * parent, const char * label) {
  15102. fprintf(fp, " \"%p\":%s -> \"%p\":%s [ label = \"%s\"; ]\n",
  15103. (void *) parent, "x",
  15104. (void *) node, "x",
  15105. label);
  15106. }
  15107. void ggml_graph_dump_dot(const struct ggml_cgraph * gb, const struct ggml_cgraph * gf, const char * filename) {
  15108. char color[16];
  15109. FILE * fp = fopen(filename, "w");
  15110. GGML_ASSERT(fp);
  15111. fprintf(fp, "digraph G {\n");
  15112. fprintf(fp, " newrank = true;\n");
  15113. fprintf(fp, " rankdir = LR;\n");
  15114. for (int i = 0; i < gb->n_nodes; i++) {
  15115. struct ggml_tensor * node = gb->nodes[i];
  15116. if (ggml_graph_get_parent(gb, node) != NULL) {
  15117. continue;
  15118. }
  15119. if (node->flags & GGML_TENSOR_FLAG_PARAM) {
  15120. snprintf(color, sizeof(color), "yellow");
  15121. } else if (node->grad) {
  15122. if (ggml_graph_find(gf, node)) {
  15123. snprintf(color, sizeof(color), "green");
  15124. } else {
  15125. snprintf(color, sizeof(color), "lightblue");
  15126. }
  15127. } else {
  15128. snprintf(color, sizeof(color), "white");
  15129. }
  15130. fprintf(fp, " \"%p\" [ "
  15131. "style = filled; fillcolor = %s; shape = record; "
  15132. "label=\"",
  15133. (void *) node, color);
  15134. if (strlen(node->name) > 0) {
  15135. fprintf(fp, "%s (%s)|", node->name, ggml_type_name(node->type));
  15136. } else {
  15137. fprintf(fp, "(%s)|", ggml_type_name(node->type));
  15138. }
  15139. if (ggml_is_matrix(node)) {
  15140. fprintf(fp, "%d [%" PRId64 ", %" PRId64 "] | <x>%s", i, node->ne[0], node->ne[1], ggml_op_symbol(node->op));
  15141. } else {
  15142. fprintf(fp, "%d [%" PRId64 ", %" PRId64 ", %" PRId64 "] | <x>%s", i, node->ne[0], node->ne[1], node->ne[2], ggml_op_symbol(node->op));
  15143. }
  15144. if (node->grad) {
  15145. fprintf(fp, " | <g>%s\"; ]\n", ggml_op_symbol(node->grad->op));
  15146. } else {
  15147. fprintf(fp, "\"; ]\n");
  15148. }
  15149. }
  15150. for (int i = 0; i < gb->n_leafs; i++) {
  15151. struct ggml_tensor * node = gb->leafs[i];
  15152. snprintf(color, sizeof(color), "pink");
  15153. fprintf(fp, " \"%p\" [ "
  15154. "style = filled; fillcolor = %s; shape = record; "
  15155. "label=\"<x>",
  15156. (void *) node, color);
  15157. if (strlen(node->name) > 0) {
  15158. fprintf(fp, "%s (%s)|", node->name, ggml_type_name(node->type));
  15159. } else {
  15160. fprintf(fp, "(%s)|", ggml_type_name(node->type));
  15161. }
  15162. fprintf(fp, "CONST %d [%" PRId64 ", %" PRId64 "]", i, node->ne[0], node->ne[1]);
  15163. if (ggml_nelements(node) < 5) {
  15164. fprintf(fp, " | (");
  15165. for (int j = 0; j < ggml_nelements(node); j++) {
  15166. if (node->type == GGML_TYPE_I8 || node->type == GGML_TYPE_I16 || node->type == GGML_TYPE_I32) {
  15167. fprintf(fp, "%d", ggml_get_i32_1d(node, j));
  15168. }
  15169. else if (node->type == GGML_TYPE_F32 || node->type == GGML_TYPE_F16) {
  15170. fprintf(fp, "%.1e", (double)ggml_get_f32_1d(node, j));
  15171. }
  15172. else {
  15173. fprintf(fp, "#");
  15174. }
  15175. if (j < ggml_nelements(node) - 1) {
  15176. fprintf(fp, ", ");
  15177. }
  15178. }
  15179. fprintf(fp, ")");
  15180. }
  15181. fprintf(fp, "\"; ]\n");
  15182. }
  15183. for (int i = 0; i < gb->n_nodes; i++) {
  15184. struct ggml_tensor * node = gb->nodes[i];
  15185. for (int j = 0; j < GGML_MAX_SRC; j++) {
  15186. if (node->src[j]) {
  15187. char label[16];
  15188. snprintf(label, sizeof(label), "src %d", j);
  15189. ggml_graph_dump_dot_node_edge(fp, gb, node, node->src[j], label);
  15190. }
  15191. }
  15192. }
  15193. for (int i = 0; i < gb->n_leafs; i++) {
  15194. struct ggml_tensor * node = gb->leafs[i];
  15195. for (int j = 0; j < GGML_MAX_SRC; j++) {
  15196. if (node->src[j]) {
  15197. char label[16];
  15198. snprintf(label, sizeof(label), "src %d", j);
  15199. ggml_graph_dump_dot_leaf_edge(fp, node, node->src[j], label);
  15200. }
  15201. }
  15202. }
  15203. fprintf(fp, "}\n");
  15204. fclose(fp);
  15205. GGML_PRINT("%s: dot -Tpng %s -o %s.png && open %s.png\n", __func__, filename, filename, filename);
  15206. }
  15207. ////////////////////////////////////////////////////////////////////////////////
  15208. static void ggml_opt_set_params(int np, struct ggml_tensor * const ps[], const float * x) {
  15209. int i = 0;
  15210. for (int p = 0; p < np; ++p) {
  15211. const int64_t ne = ggml_nelements(ps[p]) ;
  15212. // TODO: add function to set tensor from array
  15213. for (int64_t j = 0; j < ne; ++j) {
  15214. ggml_set_f32_1d(ps[p], j, x[i++]);
  15215. }
  15216. }
  15217. }
  15218. static void ggml_opt_get_params(int np, struct ggml_tensor * const ps[], float * x) {
  15219. int i = 0;
  15220. for (int p = 0; p < np; ++p) {
  15221. const int64_t ne = ggml_nelements(ps[p]) ;
  15222. // TODO: add function to get all elements at once
  15223. for (int64_t j = 0; j < ne; ++j) {
  15224. x[i++] = ggml_get_f32_1d(ps[p], j);
  15225. }
  15226. }
  15227. }
  15228. static void ggml_opt_get_grad(int np, struct ggml_tensor * const ps[], float * g) {
  15229. int64_t i = 0;
  15230. for (int p = 0; p < np; ++p) {
  15231. const int64_t ne = ggml_nelements(ps[p]) ;
  15232. // TODO: add function to get all elements at once
  15233. for (int64_t j = 0; j < ne; ++j) {
  15234. g[i++] = ggml_get_f32_1d(ps[p]->grad, j);
  15235. }
  15236. }
  15237. }
  15238. static void ggml_opt_acc_grad(int np, struct ggml_tensor * const ps[], float * g, float scale) {
  15239. int64_t i = 0;
  15240. for (int p = 0; p < np; ++p) {
  15241. const int64_t ne = ggml_nelements(ps[p]) ;
  15242. // TODO: add function to get all elements at once
  15243. for (int64_t j = 0; j < ne; ++j) {
  15244. g[i++] += ggml_get_f32_1d(ps[p]->grad, j) * scale;
  15245. }
  15246. }
  15247. }
  15248. //
  15249. // Using AdamW - ref: https://arxiv.org/pdf/1711.05101v3.pdf
  15250. //
  15251. // (Original Adam - ref: https://arxiv.org/pdf/1412.6980.pdf)
  15252. //
  15253. static enum ggml_opt_result ggml_opt_adam(
  15254. struct ggml_context * ctx,
  15255. struct ggml_opt_context * opt,
  15256. struct ggml_opt_params params,
  15257. struct ggml_tensor * f,
  15258. struct ggml_cgraph * gf,
  15259. struct ggml_cgraph * gb,
  15260. ggml_opt_callback callback,
  15261. void * callback_data) {
  15262. GGML_ASSERT(ggml_is_scalar(f));
  15263. // these will store the parameters we want to optimize
  15264. struct ggml_tensor * ps[GGML_MAX_PARAMS];
  15265. int np = 0;
  15266. int64_t nx = 0;
  15267. for (int i = 0; i < gf->n_nodes; ++i) {
  15268. if (gf->nodes[i]->flags & GGML_TENSOR_FLAG_PARAM) {
  15269. GGML_PRINT_DEBUG("found param %d: grad->op = %d\n", np, gf->nodes[i]->grad->op);
  15270. GGML_ASSERT(np < GGML_MAX_PARAMS);
  15271. ps[np++] = gf->nodes[i];
  15272. nx += ggml_nelements(gf->nodes[i]);
  15273. }
  15274. }
  15275. if ((opt->params.type != params.type) || (opt->nx != nx) || (opt->params.past != params.past)) {
  15276. int iter = opt->iter;
  15277. ggml_opt_init(opt->ctx, opt, params, nx);
  15278. opt->iter = iter;
  15279. }
  15280. // constants
  15281. float sched = params.adam.sched;
  15282. const float alpha = params.adam.alpha;
  15283. const float decay = params.adam.decay * alpha;
  15284. const float beta1 = params.adam.beta1;
  15285. const float beta2 = params.adam.beta2;
  15286. const float eps = params.adam.eps;
  15287. const float gclip = params.adam.gclip;
  15288. const int decay_min_ndim = params.adam.decay_min_ndim;
  15289. const int n_accum = MAX(1, params.n_gradient_accumulation);
  15290. const float accum_norm = 1.0f / (float) n_accum;
  15291. float * g = opt->adam.g->data; // gradients
  15292. float * m = opt->adam.m->data; // first moment
  15293. float * v = opt->adam.v->data; // second moment
  15294. float * pf = params.past > 0 ? opt->adam.pf->data : NULL; // past function values
  15295. struct ggml_cplan cplan = ggml_graph_plan(gb, params.n_threads);
  15296. struct ggml_object * obj = ggml_new_object(ctx, GGML_OBJECT_WORK_BUFFER, cplan.work_size);
  15297. cplan.work_data = (uint8_t *)ctx->mem_buffer + obj->offs;
  15298. bool cancel = false;
  15299. // compute the function value
  15300. float fx = 0;
  15301. ggml_set_zero(opt->adam.g);
  15302. for (int accum_step = 0; accum_step < n_accum; ++accum_step) {
  15303. if (callback) {
  15304. callback(callback_data, accum_step, &sched, &cancel);
  15305. if (cancel) {
  15306. return GGML_OPT_CANCEL;
  15307. }
  15308. }
  15309. // ggml_graph_reset (gf);
  15310. ggml_set_f32 (f->grad, 1.0f);
  15311. ggml_graph_compute(gb, &cplan);
  15312. ggml_opt_acc_grad(np, ps, g, accum_norm);
  15313. fx += ggml_get_f32_1d(f, 0);
  15314. }
  15315. fx *= accum_norm;
  15316. opt->adam.fx_prev = fx;
  15317. opt->adam.fx_best = opt->adam.fx_prev;
  15318. if (pf) {
  15319. pf[opt->iter % params.past] = opt->adam.fx_prev;
  15320. }
  15321. opt->loss_before = opt->adam.fx_prev;
  15322. opt->loss_after = opt->adam.fx_prev;
  15323. // initialize
  15324. if (opt->just_initialized) {
  15325. opt->adam.n_no_improvement = 0;
  15326. opt->just_initialized = false;
  15327. }
  15328. float * fx_best = &opt->adam.fx_best;
  15329. float * fx_prev = &opt->adam.fx_prev;
  15330. int * n_no_improvement = &opt->adam.n_no_improvement;
  15331. int iter0 = opt->iter;
  15332. // run the optimizer
  15333. for (int t = 0; t < params.adam.n_iter; ++t) {
  15334. opt->iter = iter0 + t + 1;
  15335. GGML_PRINT_DEBUG ("=== iter %d ===\n", t);
  15336. GGML_PRINT_DEBUG ("f = %10.6f\n", ggml_get_f32_1d(f, 0));
  15337. GGML_PRINT_DEBUG_5("df/dx0 = %10.6f\n", ggml_get_f32_1d(ps[0]->grad, 0));
  15338. GGML_PRINT_DEBUG_5("df/dx1 = %10.6f\n", ggml_get_f32_1d(ps[1]->grad, 0));
  15339. for (int i = 0; i < np; ++i) {
  15340. GGML_PRINT_DEBUG("param %d: %10.6f, g = %10.6f\n", i,
  15341. ggml_get_f32_1d(ps[i], 0), ggml_get_f32_1d(ps[i]->grad, 0));
  15342. }
  15343. const int64_t t_start_wall = ggml_time_us();
  15344. const int64_t t_start_cpu = ggml_cycles();
  15345. UNUSED(t_start_wall);
  15346. UNUSED(t_start_cpu);
  15347. {
  15348. float gnorm = 1.0f;
  15349. if (gclip > 0.0f) {
  15350. // gradient clipping
  15351. ggml_float sum = 0.0;
  15352. for (int64_t i = 0; i < nx; ++i) {
  15353. sum += (ggml_float)(g[i]*g[i]);
  15354. }
  15355. ggml_float norm = sqrt(sum);
  15356. if (norm > (ggml_float) gclip) {
  15357. gnorm = (float) ((ggml_float) gclip / norm);
  15358. }
  15359. }
  15360. const float beta1h = alpha*sched/(1.0f - powf(beta1, opt->iter));
  15361. const float beta2h = 1.0f/(1.0f - powf(beta2, opt->iter));
  15362. int64_t i = 0;
  15363. for (int p = 0; p < np; ++p) {
  15364. const int64_t ne = ggml_nelements(ps[p]);
  15365. const float p_decay = ((ggml_n_dims(ps[p]) >= decay_min_ndim) ? decay : 0.0f) * sched;
  15366. for (int64_t j = 0; j < ne; ++j) {
  15367. float x = ggml_get_f32_1d(ps[p], j);
  15368. float g_ = g[i]*gnorm;
  15369. m[i] = m[i]*beta1 + g_*(1.0f - beta1);
  15370. v[i] = v[i]*beta2 + g_*g_*(1.0f - beta2);
  15371. float mh = m[i]*beta1h;
  15372. float vh = v[i]*beta2h;
  15373. vh = sqrtf(vh) + eps;
  15374. x = x*(1.0f - p_decay) - mh/vh;
  15375. ggml_set_f32_1d(ps[p], j, x);
  15376. ++i;
  15377. }
  15378. }
  15379. }
  15380. fx = 0;
  15381. ggml_set_zero(opt->adam.g);
  15382. for (int accum_step = 0; accum_step < n_accum; ++accum_step) {
  15383. if (callback) {
  15384. callback(callback_data, accum_step, &sched, &cancel);
  15385. if (cancel) {
  15386. return GGML_OPT_CANCEL;;
  15387. }
  15388. }
  15389. // ggml_graph_reset (gf);
  15390. ggml_set_f32 (f->grad, 1.0f);
  15391. ggml_graph_compute(gb, &cplan);
  15392. ggml_opt_acc_grad(np, ps, g, accum_norm);
  15393. fx += ggml_get_f32_1d(f, 0);
  15394. }
  15395. fx *= accum_norm;
  15396. opt->loss_after = fx;
  15397. // check convergence
  15398. if (fabsf(fx - fx_prev[0])/fx < params.adam.eps_f) {
  15399. GGML_PRINT_DEBUG("converged\n");
  15400. return GGML_OPT_OK;
  15401. }
  15402. // delta-based convergence test
  15403. if (pf != NULL) {
  15404. // need at least params.past iterations to start checking for convergence
  15405. if (params.past <= iter0 + t) {
  15406. const float rate = (pf[(iter0 + t)%params.past] - fx)/fx;
  15407. if (fabsf(rate) < params.delta) {
  15408. return GGML_OPT_OK;
  15409. }
  15410. }
  15411. pf[(iter0 + t)%params.past] = fx;
  15412. }
  15413. // check for improvement
  15414. if (params.max_no_improvement > 0) {
  15415. if (fx_best[0] > fx) {
  15416. fx_best[0] = fx;
  15417. n_no_improvement[0] = 0;
  15418. } else {
  15419. ++n_no_improvement[0];
  15420. if (n_no_improvement[0] >= params.max_no_improvement) {
  15421. return GGML_OPT_OK;
  15422. }
  15423. }
  15424. }
  15425. fx_prev[0] = fx;
  15426. {
  15427. const int64_t t_end_cpu = ggml_cycles();
  15428. GGML_PRINT_DEBUG("time iter: %5.3f s\n", ((float)(t_end_cpu - t_start_cpu))/CLOCKS_PER_SEC);
  15429. UNUSED(t_end_cpu);
  15430. const int64_t t_end_wall = ggml_time_us();
  15431. GGML_PRINT_DEBUG("wall time iter: %5.3f s\n", (t_end_wall - t_start_wall)/1e6);
  15432. UNUSED(t_end_wall);
  15433. }
  15434. }
  15435. return GGML_OPT_DID_NOT_CONVERGE;
  15436. }
  15437. //
  15438. // L-BFGS
  15439. //
  15440. // the L-BFGS implementation below is based on the following implementation:
  15441. //
  15442. // https://github.com/chokkan/liblbfgs
  15443. //
  15444. struct ggml_lbfgs_iteration_data {
  15445. float alpha;
  15446. float ys;
  15447. float * s;
  15448. float * y;
  15449. };
  15450. static enum ggml_opt_result linesearch_backtracking(
  15451. const struct ggml_opt_params * params,
  15452. int nx,
  15453. float * x,
  15454. float * fx,
  15455. float * g,
  15456. float * d,
  15457. float * step,
  15458. const float * xp,
  15459. struct ggml_tensor * f,
  15460. struct ggml_cgraph * gb,
  15461. struct ggml_cplan * cplan,
  15462. const int np,
  15463. struct ggml_tensor * ps[],
  15464. bool * cancel,
  15465. ggml_opt_callback callback,
  15466. void * callback_data) {
  15467. int count = 0;
  15468. float width = 0.0f;
  15469. float dg = 0.0f;
  15470. float finit = 0.0f;
  15471. float dginit = 0.0f;
  15472. float dgtest = 0.0f;
  15473. const float dec = 0.5f;
  15474. const float inc = 2.1f;
  15475. const int n_accum = MAX(1, params->n_gradient_accumulation);
  15476. const float accum_norm = 1.0f / (float) n_accum;
  15477. if (*step <= 0.f) {
  15478. return GGML_LINESEARCH_INVALID_PARAMETERS;
  15479. }
  15480. // compute the initial gradient in the search direction
  15481. ggml_vec_dot_f32(nx, &dginit, 0, g, 0, d, 0, 1);
  15482. // make sure that d points to a descent direction
  15483. if (0 < dginit) {
  15484. return GGML_LINESEARCH_FAIL;
  15485. }
  15486. // initialize local variables
  15487. finit = *fx;
  15488. dgtest = params->lbfgs.ftol*dginit;
  15489. while (true) {
  15490. ggml_vec_cpy_f32(nx, x, xp);
  15491. ggml_vec_mad_f32(nx, x, d, *step);
  15492. // evaluate the function and gradient values
  15493. {
  15494. ggml_opt_set_params(np, ps, x);
  15495. *fx = 0;
  15496. memset(g, 0, sizeof(float)*nx);
  15497. for (int accum_step = 0; accum_step < n_accum; ++accum_step) {
  15498. if (callback) {
  15499. // LBFG-S does not support learning rate -> ignore learning schedule
  15500. float sched = 0;
  15501. callback(callback_data, accum_step, &sched, cancel);
  15502. if (*cancel) {
  15503. return GGML_OPT_CANCEL;
  15504. }
  15505. }
  15506. // ggml_graph_reset (gf);
  15507. ggml_set_f32 (f->grad, 1.0f);
  15508. ggml_graph_compute(gb, cplan);
  15509. ggml_opt_acc_grad(np, ps, g, accum_norm);
  15510. *fx += ggml_get_f32_1d(f, 0);
  15511. }
  15512. *fx *= accum_norm;
  15513. }
  15514. ++count;
  15515. if (*fx > finit + (*step)*dgtest) {
  15516. width = dec;
  15517. } else {
  15518. // Armijo condition is satisfied
  15519. if (params->lbfgs.linesearch == GGML_LINESEARCH_BACKTRACKING_ARMIJO) {
  15520. return count;
  15521. }
  15522. ggml_vec_dot_f32(nx, &dg, 0, g, 0, d, 0, 1);
  15523. // check the Wolfe condition
  15524. if (dg < params->lbfgs.wolfe * dginit) {
  15525. width = inc;
  15526. } else {
  15527. if(params->lbfgs.linesearch == GGML_LINESEARCH_BACKTRACKING_WOLFE) {
  15528. // regular Wolfe conditions
  15529. return count;
  15530. }
  15531. if(dg > -params->lbfgs.wolfe*dginit) {
  15532. width = dec;
  15533. } else {
  15534. // strong Wolfe condition (GGML_LINESEARCH_BACKTRACKING_STRONG_WOLFE)
  15535. return count;
  15536. }
  15537. }
  15538. }
  15539. if (*step < params->lbfgs.min_step) {
  15540. return GGML_LINESEARCH_MINIMUM_STEP;
  15541. }
  15542. if (*step > params->lbfgs.max_step) {
  15543. return GGML_LINESEARCH_MAXIMUM_STEP;
  15544. }
  15545. if (params->lbfgs.max_linesearch <= count) {
  15546. return GGML_LINESEARCH_MAXIMUM_ITERATIONS;
  15547. }
  15548. (*step) *= width;
  15549. }
  15550. GGML_ASSERT(false && "line search failed");
  15551. return GGML_LINESEARCH_FAIL;
  15552. }
  15553. static enum ggml_opt_result ggml_opt_lbfgs(
  15554. struct ggml_context * ctx,
  15555. struct ggml_opt_context * opt,
  15556. struct ggml_opt_params params,
  15557. struct ggml_tensor * f,
  15558. struct ggml_cgraph * gf,
  15559. struct ggml_cgraph * gb,
  15560. ggml_opt_callback callback,
  15561. void * callback_data) {
  15562. if (params.lbfgs.linesearch == GGML_LINESEARCH_BACKTRACKING_WOLFE ||
  15563. params.lbfgs.linesearch == GGML_LINESEARCH_BACKTRACKING_STRONG_WOLFE) {
  15564. if (params.lbfgs.wolfe <= params.lbfgs.ftol || 1.f <= params.lbfgs.wolfe) {
  15565. return GGML_OPT_INVALID_WOLFE;
  15566. }
  15567. }
  15568. const int m = params.lbfgs.m;
  15569. // these will store the parameters we want to optimize
  15570. struct ggml_tensor * ps[GGML_MAX_PARAMS];
  15571. int np = 0;
  15572. int nx = 0;
  15573. for (int i = 0; i < gf->n_nodes; ++i) {
  15574. if (gf->nodes[i]->flags & GGML_TENSOR_FLAG_PARAM) {
  15575. GGML_PRINT_DEBUG("found param %d: grad->op = %d\n", np, gf->nodes[i]->grad->op);
  15576. GGML_ASSERT(np < GGML_MAX_PARAMS);
  15577. ps[np++] = gf->nodes[i];
  15578. nx += ggml_nelements(gf->nodes[i]);
  15579. }
  15580. }
  15581. if ((opt->params.type != params.type) || (opt->nx != nx) || (opt->params.past != params.past) || (opt->params.lbfgs.m != params.lbfgs.m)) {
  15582. int iter = opt->iter;
  15583. ggml_opt_init(ctx, opt, params, nx);
  15584. opt->iter = iter;
  15585. }
  15586. struct ggml_cplan cplan = ggml_graph_plan(gb, params.n_threads);
  15587. struct ggml_object * obj = ggml_new_object(ctx, GGML_OBJECT_WORK_BUFFER, cplan.work_size);
  15588. cplan.work_data = (uint8_t *)ctx->mem_buffer + obj->offs;
  15589. float * x = opt->lbfgs.x->data; // current parameters
  15590. float * xp = opt->lbfgs.xp->data; // previous parameters
  15591. float * g = opt->lbfgs.g->data; // current gradient
  15592. float * gp = opt->lbfgs.gp->data; // previous gradient
  15593. float * d = opt->lbfgs.d->data; // search direction
  15594. float * pf = params.past > 0 ? opt->lbfgs.pf->data : NULL; // past function values
  15595. const int n_accum = MAX(1, params.n_gradient_accumulation);
  15596. const float accum_norm = 1.0f / (float) n_accum;
  15597. float fx = 0.0f; // cost function value
  15598. float xnorm = 0.0f; // ||x||
  15599. float gnorm = 0.0f; // ||g||
  15600. // initialize x from the graph nodes
  15601. ggml_opt_get_params(np, ps, x);
  15602. // the L-BFGS memory
  15603. float * lm_alpha = opt->lbfgs.lmal->data;
  15604. float * lm_ys = opt->lbfgs.lmys->data;
  15605. float * lm_s = opt->lbfgs.lms->data;
  15606. float * lm_y = opt->lbfgs.lmy->data;
  15607. bool cancel = false;
  15608. // evaluate the function value and its gradient
  15609. {
  15610. ggml_opt_set_params(np, ps, x);
  15611. fx = 0;
  15612. memset(g, 0, sizeof(float)*nx);
  15613. for (int accum_step = 0; accum_step < n_accum; ++accum_step) {
  15614. if (callback) {
  15615. // LBFG-S does not support learning rate -> ignore learning schedule
  15616. float sched = 0;
  15617. callback(callback_data, accum_step, &sched, &cancel);
  15618. if (cancel) {
  15619. return GGML_OPT_CANCEL;
  15620. }
  15621. }
  15622. // ggml_graph_reset (gf);
  15623. ggml_set_f32 (f->grad, 1.0f);
  15624. ggml_graph_compute(gb, &cplan);
  15625. ggml_opt_acc_grad(np, ps, g, accum_norm);
  15626. fx += ggml_get_f32_1d(f, 0);
  15627. }
  15628. fx *= accum_norm;
  15629. opt->loss_before = fx;
  15630. opt->loss_after = fx;
  15631. }
  15632. // search direction = -gradient
  15633. ggml_vec_neg_f32(nx, d, g);
  15634. // ||x||, ||g||
  15635. ggml_vec_norm_f32(nx, &xnorm, x);
  15636. ggml_vec_norm_f32(nx, &gnorm, g);
  15637. if (xnorm < 1.0f) {
  15638. xnorm = 1.0f;
  15639. }
  15640. // already optimized
  15641. if (gnorm/xnorm <= params.lbfgs.eps) {
  15642. return GGML_OPT_OK;
  15643. }
  15644. if (opt->just_initialized) {
  15645. if (pf) {
  15646. pf[0] = fx;
  15647. }
  15648. opt->lbfgs.fx_best = fx;
  15649. // initial step
  15650. ggml_vec_norm_inv_f32(nx, &opt->lbfgs.step, d);
  15651. opt->lbfgs.j = 0;
  15652. opt->lbfgs.k = 1;
  15653. opt->lbfgs.end = 0;
  15654. opt->lbfgs.n_no_improvement = 0;
  15655. opt->just_initialized = false;
  15656. }
  15657. float * fx_best = &opt->lbfgs.fx_best;
  15658. float * step = &opt->lbfgs.step;
  15659. int * j = &opt->lbfgs.j;
  15660. int * k = &opt->lbfgs.k;
  15661. int * end = &opt->lbfgs.end;
  15662. int * n_no_improvement = &opt->lbfgs.n_no_improvement;
  15663. int ls = 0;
  15664. int bound = 0;
  15665. float ys = 0.0f;
  15666. float yy = 0.0f;
  15667. float beta = 0.0f;
  15668. int it = 0;
  15669. while (true) {
  15670. // store the current position and gradient vectors
  15671. ggml_vec_cpy_f32(nx, xp, x);
  15672. ggml_vec_cpy_f32(nx, gp, g);
  15673. // TODO: instead of passing &cancel here, use the return code of the linesearch
  15674. // to determine if the optimization should be cancelled
  15675. // this is a simple change, but not doing this atm, since I don't have a nice
  15676. // way to test and don't want to break something with so many changes lined up
  15677. ls = linesearch_backtracking(&params, nx, x, &fx, g, d, step, xp, f, gb, &cplan, np, ps, &cancel, callback, callback_data);
  15678. if (cancel) {
  15679. return GGML_OPT_CANCEL;
  15680. }
  15681. if (ls < 0) {
  15682. // linesearch failed - go back to the previous point and return
  15683. ggml_vec_cpy_f32(nx, x, xp);
  15684. ggml_vec_cpy_f32(nx, g, gp);
  15685. return ls;
  15686. }
  15687. opt->loss_after = fx;
  15688. ggml_vec_norm_f32(nx, &xnorm, x);
  15689. ggml_vec_norm_f32(nx, &gnorm, g);
  15690. GGML_PRINT_DEBUG("f = %10.6f\n", ggml_get_f32_1d(f, 0));
  15691. if (xnorm < 1.0f) {
  15692. xnorm = 1.0f;
  15693. }
  15694. if (gnorm/xnorm <= params.lbfgs.eps) {
  15695. // converged
  15696. return GGML_OPT_OK;
  15697. }
  15698. // delta-based convergence test
  15699. if (pf != NULL) {
  15700. // need at least params.past iterations to start checking for convergence
  15701. if (params.past <= k[0]) {
  15702. const float rate = (pf[k[0]%params.past] - fx)/fx;
  15703. if (fabsf(rate) < params.delta) {
  15704. return GGML_OPT_OK;
  15705. }
  15706. }
  15707. pf[k[0]%params.past] = fx;
  15708. }
  15709. // check for improvement
  15710. if (params.max_no_improvement > 0) {
  15711. if (fx < fx_best[0]) {
  15712. fx_best[0] = fx;
  15713. n_no_improvement[0] = 0;
  15714. } else {
  15715. n_no_improvement[0]++;
  15716. if (n_no_improvement[0] >= params.max_no_improvement) {
  15717. return GGML_OPT_OK;
  15718. }
  15719. }
  15720. }
  15721. if (params.lbfgs.n_iter != 0 && params.lbfgs.n_iter < it + 1) {
  15722. // reached the maximum number of iterations
  15723. return GGML_OPT_DID_NOT_CONVERGE;
  15724. }
  15725. // update vectors s and y:
  15726. // s_{k+1} = x_{k+1} - x_{k} = \step * d_{k}.
  15727. // y_{k+1} = g_{k+1} - g_{k}.
  15728. //
  15729. ggml_vec_sub_f32(nx, &lm_s[end[0]*nx], x, xp);
  15730. ggml_vec_sub_f32(nx, &lm_y[end[0]*nx], g, gp);
  15731. // compute scalars ys and yy:
  15732. // ys = y^t \cdot s -> 1 / \rho.
  15733. // yy = y^t \cdot y.
  15734. //
  15735. ggml_vec_dot_f32(nx, &ys, 0, &lm_y[end[0]*nx], 0, &lm_s[end[0]*nx], 0, 1);
  15736. ggml_vec_dot_f32(nx, &yy, 0, &lm_y[end[0]*nx], 0, &lm_y[end[0]*nx], 0, 1);
  15737. lm_ys[end[0]] = ys;
  15738. // find new search direction
  15739. // ref: https://en.wikipedia.org/wiki/Limited-memory_BFGS
  15740. bound = (m <= k[0]) ? m : k[0];
  15741. k[0]++;
  15742. it++;
  15743. end[0] = (end[0] + 1)%m;
  15744. // initialize search direction with -g
  15745. ggml_vec_neg_f32(nx, d, g);
  15746. j[0] = end[0];
  15747. for (int i = 0; i < bound; ++i) {
  15748. j[0] = (j[0] + m - 1) % m;
  15749. // \alpha_{j} = \rho_{j} s^{t}_{j} \cdot q_{k+1}
  15750. ggml_vec_dot_f32(nx, &lm_alpha[j[0]], 0, &lm_s[j[0]*nx], 0, d, 0, 1);
  15751. lm_alpha[j[0]] /= lm_ys[j[0]];
  15752. // q_{i} = q_{i+1} - \alpha_{i} y_{i}
  15753. ggml_vec_mad_f32(nx, d, &lm_y[j[0]*nx], -lm_alpha[j[0]]);
  15754. }
  15755. ggml_vec_scale_f32(nx, d, ys/yy);
  15756. for (int i = 0; i < bound; ++i) {
  15757. // \beta_{j} = \rho_{j} y^t_{j} \cdot \gamma_{i}
  15758. ggml_vec_dot_f32(nx, &beta, 0, &lm_y[j[0]*nx], 0, d, 0, 1);
  15759. beta /= lm_ys[j[0]];
  15760. // \gamma_{i+1} = \gamma_{i} + (\alpha_{j} - \beta_{j}) s_{j}
  15761. ggml_vec_mad_f32(nx, d, &lm_s[j[0]*nx], lm_alpha[j[0]] - beta);
  15762. j[0] = (j[0] + 1)%m;
  15763. }
  15764. step[0] = 1.0;
  15765. }
  15766. GGML_ASSERT(false && "lbfgs failed");
  15767. return GGML_OPT_DID_NOT_CONVERGE;
  15768. }
  15769. struct ggml_opt_params ggml_opt_default_params(enum ggml_opt_type type) {
  15770. struct ggml_opt_params result;
  15771. switch (type) {
  15772. case GGML_OPT_ADAM:
  15773. {
  15774. result = (struct ggml_opt_params) {
  15775. .type = GGML_OPT_ADAM,
  15776. .graph_size = GGML_DEFAULT_GRAPH_SIZE,
  15777. .n_threads = 1, // FIXME: GGML_DEFAULT_N_THREADS ?
  15778. .past = 0,
  15779. .delta = 1e-5f,
  15780. .max_no_improvement = 100,
  15781. .print_forward_graph = true,
  15782. .print_backward_graph = true,
  15783. .n_gradient_accumulation = 1,
  15784. .adam = {
  15785. .n_iter = 10000,
  15786. .sched = 1.000f,
  15787. .decay = 0.0f,
  15788. .decay_min_ndim = 2,
  15789. .alpha = 0.001f,
  15790. .beta1 = 0.9f,
  15791. .beta2 = 0.999f,
  15792. .eps = 1e-8f,
  15793. .eps_f = 1e-5f,
  15794. .eps_g = 1e-3f,
  15795. .gclip = 0.0f,
  15796. },
  15797. };
  15798. } break;
  15799. case GGML_OPT_LBFGS:
  15800. {
  15801. result = (struct ggml_opt_params) {
  15802. .type = GGML_OPT_LBFGS,
  15803. .graph_size = GGML_DEFAULT_GRAPH_SIZE,
  15804. .n_threads = 1,
  15805. .past = 0,
  15806. .delta = 1e-5f,
  15807. .max_no_improvement = 0,
  15808. .print_forward_graph = true,
  15809. .print_backward_graph = true,
  15810. .n_gradient_accumulation = 1,
  15811. .lbfgs = {
  15812. .m = 6,
  15813. .n_iter = 100,
  15814. .max_linesearch = 20,
  15815. .eps = 1e-5f,
  15816. .ftol = 1e-4f,
  15817. .wolfe = 0.9f,
  15818. .min_step = 1e-20f,
  15819. .max_step = 1e+20f,
  15820. .linesearch = GGML_LINESEARCH_DEFAULT,
  15821. },
  15822. };
  15823. } break;
  15824. }
  15825. return result;
  15826. }
  15827. GGML_API void ggml_opt_init(
  15828. struct ggml_context * ctx,
  15829. struct ggml_opt_context * opt,
  15830. struct ggml_opt_params params,
  15831. int64_t nx) {
  15832. opt->ctx = ctx;
  15833. opt->params = params;
  15834. opt->iter = 0;
  15835. opt->nx = nx;
  15836. opt->just_initialized = true;
  15837. if (opt->ctx == NULL) {
  15838. struct ggml_init_params ctx_opt_params;
  15839. if (opt->params.type == GGML_OPT_ADAM) {
  15840. ctx_opt_params.mem_size = GGML_MEM_ALIGN*3 + ggml_tensor_overhead()*3 + ggml_type_size(GGML_TYPE_F32)*nx*3;
  15841. if (opt->params.past > 0) {
  15842. ctx_opt_params.mem_size += GGML_MEM_ALIGN + ggml_tensor_overhead() + ggml_type_size(GGML_TYPE_F32)*opt->params.past;
  15843. }
  15844. } else if (opt->params.type == GGML_OPT_LBFGS) {
  15845. ctx_opt_params.mem_size = GGML_MEM_ALIGN*9 + ggml_tensor_overhead()*9 + ggml_type_size(GGML_TYPE_F32)*(nx*5 + opt->params.lbfgs.m*2 + nx*opt->params.lbfgs.m*2);
  15846. if (opt->params.past > 0) {
  15847. ctx_opt_params.mem_size += GGML_MEM_ALIGN + ggml_tensor_overhead() + ggml_type_size(GGML_TYPE_F32)*opt->params.past;
  15848. }
  15849. }
  15850. ctx_opt_params.mem_buffer = NULL;
  15851. ctx_opt_params.no_alloc = false;
  15852. opt->ctx = ggml_init(ctx_opt_params);
  15853. }
  15854. switch (opt->params.type) {
  15855. case GGML_OPT_ADAM:
  15856. {
  15857. opt->adam.g = ggml_new_tensor_1d(opt->ctx, GGML_TYPE_F32, nx);
  15858. opt->adam.m = ggml_new_tensor_1d(opt->ctx, GGML_TYPE_F32, nx);
  15859. opt->adam.v = ggml_new_tensor_1d(opt->ctx, GGML_TYPE_F32, nx);
  15860. opt->adam.pf = params.past > 0
  15861. ? ggml_new_tensor_1d(opt->ctx, GGML_TYPE_F32, params.past)
  15862. : NULL;
  15863. ggml_set_zero(opt->adam.m);
  15864. ggml_set_zero(opt->adam.v);
  15865. if (opt->adam.pf) {
  15866. ggml_set_zero(opt->adam.pf);
  15867. }
  15868. } break;
  15869. case GGML_OPT_LBFGS:
  15870. {
  15871. opt->lbfgs.x = ggml_new_tensor_1d(opt->ctx, GGML_TYPE_F32, nx);
  15872. opt->lbfgs.xp = ggml_new_tensor_1d(opt->ctx, GGML_TYPE_F32, nx);
  15873. opt->lbfgs.g = ggml_new_tensor_1d(opt->ctx, GGML_TYPE_F32, nx);
  15874. opt->lbfgs.gp = ggml_new_tensor_1d(opt->ctx, GGML_TYPE_F32, nx);
  15875. opt->lbfgs.d = ggml_new_tensor_1d(opt->ctx, GGML_TYPE_F32, nx);
  15876. opt->lbfgs.pf = params.past > 0
  15877. ? ggml_new_tensor_1d(opt->ctx, GGML_TYPE_F32, params.past)
  15878. : NULL;
  15879. opt->lbfgs.lmal = ggml_new_tensor_1d(opt->ctx, GGML_TYPE_F32, params.lbfgs.m);
  15880. opt->lbfgs.lmys = ggml_new_tensor_1d(opt->ctx, GGML_TYPE_F32, params.lbfgs.m);
  15881. opt->lbfgs.lms = ggml_new_tensor_2d(opt->ctx, GGML_TYPE_F32, nx, params.lbfgs.m);
  15882. opt->lbfgs.lmy = ggml_new_tensor_2d(opt->ctx, GGML_TYPE_F32, nx, params.lbfgs.m);
  15883. ggml_set_zero(opt->lbfgs.x);
  15884. ggml_set_zero(opt->lbfgs.xp);
  15885. ggml_set_zero(opt->lbfgs.g);
  15886. ggml_set_zero(opt->lbfgs.gp);
  15887. ggml_set_zero(opt->lbfgs.d);
  15888. if (opt->lbfgs.pf) {
  15889. ggml_set_zero(opt->lbfgs.pf);
  15890. }
  15891. ggml_set_zero(opt->lbfgs.lmal);
  15892. ggml_set_zero(opt->lbfgs.lmys);
  15893. ggml_set_zero(opt->lbfgs.lms);
  15894. ggml_set_zero(opt->lbfgs.lmy);
  15895. } break;
  15896. }
  15897. }
  15898. enum ggml_opt_result ggml_opt(
  15899. struct ggml_context * ctx,
  15900. struct ggml_opt_params params,
  15901. struct ggml_tensor * f) {
  15902. bool free_ctx = false;
  15903. if (ctx == NULL) {
  15904. struct ggml_init_params params_ctx = {
  15905. .mem_size = 16*1024*1024,
  15906. .mem_buffer = NULL,
  15907. .no_alloc = false,
  15908. };
  15909. ctx = ggml_init(params_ctx);
  15910. if (ctx == NULL) {
  15911. return GGML_OPT_NO_CONTEXT;
  15912. }
  15913. free_ctx = true;
  15914. }
  15915. enum ggml_opt_result result = GGML_OPT_OK;
  15916. struct ggml_opt_context * opt = (struct ggml_opt_context *) alloca(sizeof(struct ggml_opt_context));
  15917. ggml_opt_init(ctx, opt, params, 0);
  15918. result = ggml_opt_resume(ctx, opt, f);
  15919. if (free_ctx) {
  15920. ggml_free(ctx);
  15921. }
  15922. return result;
  15923. }
  15924. enum ggml_opt_result ggml_opt_resume(
  15925. struct ggml_context * ctx,
  15926. struct ggml_opt_context * opt,
  15927. struct ggml_tensor * f) {
  15928. // build forward + backward compute graphs
  15929. struct ggml_cgraph * gf = ggml_new_graph_custom(ctx, opt->params.graph_size, true);
  15930. ggml_build_forward_expand(gf, f);
  15931. struct ggml_cgraph * gb = ggml_graph_dup(ctx, gf);
  15932. ggml_build_backward_expand(ctx, gf, gb, true);
  15933. return ggml_opt_resume_g(ctx, opt, f, gf, gb, NULL, NULL);
  15934. }
  15935. enum ggml_opt_result ggml_opt_resume_g(
  15936. struct ggml_context * ctx,
  15937. struct ggml_opt_context * opt,
  15938. struct ggml_tensor * f,
  15939. struct ggml_cgraph * gf,
  15940. struct ggml_cgraph * gb,
  15941. ggml_opt_callback callback,
  15942. void * callback_data) {
  15943. // build forward + backward compute graphs
  15944. enum ggml_opt_result result = GGML_OPT_OK;
  15945. switch (opt->params.type) {
  15946. case GGML_OPT_ADAM:
  15947. {
  15948. result = ggml_opt_adam(ctx, opt, opt->params, f, gf, gb, callback, callback_data);
  15949. } break;
  15950. case GGML_OPT_LBFGS:
  15951. {
  15952. result = ggml_opt_lbfgs(ctx, opt, opt->params, f, gf, gb, callback, callback_data);
  15953. } break;
  15954. }
  15955. if (opt->params.print_forward_graph) {
  15956. ggml_graph_print (gf);
  15957. ggml_graph_dump_dot(gf, NULL, "opt-forward.dot");
  15958. }
  15959. if (opt->params.print_backward_graph) {
  15960. ggml_graph_print (gb);
  15961. ggml_graph_dump_dot(gb, gf, "opt-backward.dot");
  15962. }
  15963. return result;
  15964. }
  15965. ////////////////////////////////////////////////////////////////////////////////
  15966. void ggml_set_input(struct ggml_tensor * tensor) {
  15967. tensor->flags |= GGML_TENSOR_FLAG_INPUT;
  15968. }
  15969. void ggml_set_output(struct ggml_tensor * tensor) {
  15970. tensor->flags |= GGML_TENSOR_FLAG_OUTPUT;
  15971. }
  15972. ////////////////////////////////////////////////////////////////////////////////
  15973. void ggml_quantize_init(enum ggml_type type) {
  15974. ggml_critical_section_start();
  15975. switch (type) {
  15976. case GGML_TYPE_IQ2_XXS:
  15977. case GGML_TYPE_IQ2_XS:
  15978. case GGML_TYPE_IQ1_S: iq2xs_init_impl(type); break;
  15979. case GGML_TYPE_IQ3_XXS: iq3xs_init_impl(256); break;
  15980. default: // nothing
  15981. break;
  15982. }
  15983. ggml_critical_section_end();
  15984. }
  15985. void ggml_quantize_free(void) {
  15986. ggml_critical_section_start();
  15987. iq2xs_free_impl(GGML_TYPE_IQ2_XXS);
  15988. iq2xs_free_impl(GGML_TYPE_IQ2_XS);
  15989. iq2xs_free_impl(GGML_TYPE_IQ1_S);
  15990. iq3xs_free_impl(256);
  15991. ggml_critical_section_end();
  15992. }
  15993. size_t ggml_quantize_q4_0(const float * src, void * dst, int n, int k, int64_t * hist) {
  15994. assert(k % QK4_0 == 0);
  15995. const int nb = k / QK4_0;
  15996. for (int b = 0; b < n; b += k) {
  15997. block_q4_0 * restrict y = (block_q4_0 *) dst + b/QK4_0;
  15998. quantize_row_q4_0_reference(src + b, y, k);
  15999. for (int i = 0; i < nb; i++) {
  16000. for (int j = 0; j < QK4_0; j += 2) {
  16001. const uint8_t vi0 = y[i].qs[j/2] & 0x0F;
  16002. const uint8_t vi1 = y[i].qs[j/2] >> 4;
  16003. hist[vi0]++;
  16004. hist[vi1]++;
  16005. }
  16006. }
  16007. }
  16008. return (n/QK4_0*sizeof(block_q4_0));
  16009. }
  16010. size_t ggml_quantize_q4_1(const float * src, void * dst, int n, int k, int64_t * hist) {
  16011. assert(k % QK4_1 == 0);
  16012. const int nb = k / QK4_1;
  16013. for (int b = 0; b < n; b += k) {
  16014. block_q4_1 * restrict y = (block_q4_1 *) dst + b/QK4_1;
  16015. quantize_row_q4_1_reference(src + b, y, k);
  16016. for (int i = 0; i < nb; i++) {
  16017. for (int j = 0; j < QK4_1; j += 2) {
  16018. const uint8_t vi0 = y[i].qs[j/2] & 0x0F;
  16019. const uint8_t vi1 = y[i].qs[j/2] >> 4;
  16020. hist[vi0]++;
  16021. hist[vi1]++;
  16022. }
  16023. }
  16024. }
  16025. return (n/QK4_1*sizeof(block_q4_1));
  16026. }
  16027. size_t ggml_quantize_q5_0(const float * src, void * dst, int n, int k, int64_t * hist) {
  16028. assert(k % QK5_0 == 0);
  16029. const int nb = k / QK5_0;
  16030. for (int b = 0; b < n; b += k) {
  16031. block_q5_0 * restrict y = (block_q5_0 *)dst + b/QK5_0;
  16032. quantize_row_q5_0_reference(src + b, y, k);
  16033. for (int i = 0; i < nb; i++) {
  16034. uint32_t qh;
  16035. memcpy(&qh, &y[i].qh, sizeof(qh));
  16036. for (int j = 0; j < QK5_0; j += 2) {
  16037. const uint8_t vh0 = ((qh & (1u << (j/2 + 0 ))) >> (j/2 + 0 )) << 4;
  16038. const uint8_t vh1 = ((qh & (1u << (j/2 + 16))) >> (j/2 + 12));
  16039. // cast to 16 bins
  16040. const uint8_t vi0 = ((y[i].qs[j/2] & 0x0F) | vh0) / 2;
  16041. const uint8_t vi1 = ((y[i].qs[j/2] >> 4) | vh1) / 2;
  16042. hist[vi0]++;
  16043. hist[vi1]++;
  16044. }
  16045. }
  16046. }
  16047. return (n/QK5_0*sizeof(block_q5_0));
  16048. }
  16049. size_t ggml_quantize_q5_1(const float * src, void * dst, int n, int k, int64_t * hist) {
  16050. assert(k % QK5_1 == 0);
  16051. const int nb = k / QK5_1;
  16052. for (int b = 0; b < n; b += k) {
  16053. block_q5_1 * restrict y = (block_q5_1 *)dst + b/QK5_1;
  16054. quantize_row_q5_1_reference(src + b, y, k);
  16055. for (int i = 0; i < nb; i++) {
  16056. uint32_t qh;
  16057. memcpy(&qh, &y[i].qh, sizeof(qh));
  16058. for (int j = 0; j < QK5_1; j += 2) {
  16059. const uint8_t vh0 = ((qh & (1u << (j/2 + 0 ))) >> (j/2 + 0 )) << 4;
  16060. const uint8_t vh1 = ((qh & (1u << (j/2 + 16))) >> (j/2 + 12));
  16061. // cast to 16 bins
  16062. const uint8_t vi0 = ((y[i].qs[j/2] & 0x0F) | vh0) / 2;
  16063. const uint8_t vi1 = ((y[i].qs[j/2] >> 4) | vh1) / 2;
  16064. hist[vi0]++;
  16065. hist[vi1]++;
  16066. }
  16067. }
  16068. }
  16069. return (n/QK5_1*sizeof(block_q5_1));
  16070. }
  16071. size_t ggml_quantize_q8_0(const float * src, void * dst, int n, int k, int64_t * hist) {
  16072. assert(k % QK8_0 == 0);
  16073. const int nb = k / QK8_0;
  16074. for (int b = 0; b < n; b += k) {
  16075. block_q8_0 * restrict y = (block_q8_0 *)dst + b/QK8_0;
  16076. quantize_row_q8_0_reference(src + b, y, k);
  16077. for (int i = 0; i < nb; i++) {
  16078. for (int j = 0; j < QK8_0; ++j) {
  16079. const int8_t vi = y[i].qs[j];
  16080. hist[vi/16 + 8]++;
  16081. }
  16082. }
  16083. }
  16084. return (n/QK8_0*sizeof(block_q8_0));
  16085. }
  16086. bool ggml_quantize_requires_imatrix(enum ggml_type type) {
  16087. return
  16088. type == GGML_TYPE_IQ2_XXS ||
  16089. type == GGML_TYPE_IQ2_XS ||
  16090. type == GGML_TYPE_IQ1_S;
  16091. }
  16092. size_t ggml_quantize_chunk(enum ggml_type type, const float * src, void * dst, int start,
  16093. int nrows, int n_per_row, int64_t * hist, const float * imatrix) {
  16094. ggml_quantize_init(type); // this is noop if already initialized
  16095. size_t result = 0;
  16096. int n = nrows * n_per_row;
  16097. switch (type) {
  16098. case GGML_TYPE_Q4_0:
  16099. {
  16100. GGML_ASSERT(start % QK4_0 == 0);
  16101. GGML_ASSERT(start % n_per_row == 0);
  16102. size_t start_row = start / n_per_row;
  16103. size_t row_size = ggml_row_size(type, n_per_row);
  16104. result = quantize_q4_0(src + start, (char *)dst + start_row * row_size, nrows, n_per_row, hist, imatrix);
  16105. GGML_ASSERT(result == row_size * nrows);
  16106. } break;
  16107. case GGML_TYPE_Q4_1:
  16108. {
  16109. GGML_ASSERT(start % QK4_1 == 0);
  16110. GGML_ASSERT(start % n_per_row == 0);
  16111. size_t start_row = start / n_per_row;
  16112. size_t row_size = ggml_row_size(type, n_per_row);
  16113. result = quantize_q4_1(src + start, (char *)dst + start_row * row_size, nrows, n_per_row, hist, imatrix);
  16114. GGML_ASSERT(result == row_size * nrows);
  16115. } break;
  16116. case GGML_TYPE_Q5_0:
  16117. {
  16118. GGML_ASSERT(start % QK5_0 == 0);
  16119. GGML_ASSERT(start % n_per_row == 0);
  16120. size_t start_row = start / n_per_row;
  16121. size_t row_size = ggml_row_size(type, n_per_row);
  16122. result = quantize_q5_0(src + start, (char *)dst + start_row * row_size, nrows, n_per_row, hist, imatrix);
  16123. GGML_ASSERT(result == row_size * nrows);
  16124. } break;
  16125. case GGML_TYPE_Q5_1:
  16126. {
  16127. GGML_ASSERT(start % QK5_1 == 0);
  16128. GGML_ASSERT(start % n_per_row == 0);
  16129. size_t start_row = start / n_per_row;
  16130. size_t row_size = ggml_row_size(type, n_per_row);
  16131. result = quantize_q5_1(src + start, (char *)dst + start_row * row_size, nrows, n_per_row, hist, imatrix);
  16132. GGML_ASSERT(result == row_size * nrows);
  16133. } break;
  16134. case GGML_TYPE_Q8_0:
  16135. {
  16136. GGML_ASSERT(start % QK8_0 == 0);
  16137. block_q8_0 * block = (block_q8_0*)dst + start / QK8_0;
  16138. result = ggml_quantize_q8_0(src + start, block, n, n, hist);
  16139. } break;
  16140. case GGML_TYPE_Q2_K:
  16141. {
  16142. GGML_ASSERT(start % QK_K == 0);
  16143. GGML_ASSERT(start % n_per_row == 0);
  16144. size_t start_row = start / n_per_row;
  16145. size_t row_size = ggml_row_size(type, n_per_row);
  16146. result = quantize_q2_K(src + start, (char *)dst + start_row * row_size, nrows, n_per_row, hist, imatrix);
  16147. GGML_ASSERT(result == row_size * nrows);
  16148. } break;
  16149. case GGML_TYPE_Q3_K:
  16150. {
  16151. GGML_ASSERT(start % QK_K == 0);
  16152. GGML_ASSERT(start % n_per_row == 0);
  16153. size_t start_row = start / n_per_row;
  16154. size_t row_size = ggml_row_size(type, n_per_row);
  16155. result = quantize_q3_K(src + start, (char *)dst + start_row * row_size, nrows, n_per_row, hist, imatrix);
  16156. GGML_ASSERT(result == row_size * nrows);
  16157. } break;
  16158. case GGML_TYPE_Q4_K:
  16159. {
  16160. GGML_ASSERT(start % QK_K == 0);
  16161. GGML_ASSERT(start % n_per_row == 0);
  16162. size_t start_row = start / n_per_row;
  16163. size_t row_size = ggml_row_size(type, n_per_row);
  16164. result = quantize_q4_K(src + start, (char *)dst + start_row * row_size, nrows, n_per_row, hist, imatrix);
  16165. GGML_ASSERT(result == row_size * nrows);
  16166. } break;
  16167. case GGML_TYPE_Q5_K:
  16168. {
  16169. GGML_ASSERT(start % QK_K == 0);
  16170. GGML_ASSERT(start % n_per_row == 0);
  16171. size_t start_row = start / n_per_row;
  16172. size_t row_size = ggml_row_size(type, n_per_row);
  16173. result = quantize_q5_K(src + start, (char *)dst + start_row * row_size, nrows, n_per_row, hist, imatrix);
  16174. GGML_ASSERT(result == row_size * nrows);
  16175. } break;
  16176. case GGML_TYPE_Q6_K:
  16177. {
  16178. GGML_ASSERT(start % QK_K == 0);
  16179. GGML_ASSERT(start % n_per_row == 0);
  16180. size_t start_row = start / n_per_row;
  16181. size_t row_size = ggml_row_size(type, n_per_row);
  16182. result = quantize_q6_K(src + start, (char *)dst + start_row * row_size, nrows, n_per_row, hist, imatrix);
  16183. GGML_ASSERT(result == row_size * nrows);
  16184. } break;
  16185. case GGML_TYPE_IQ2_XXS:
  16186. {
  16187. GGML_ASSERT(start % QK_K == 0);
  16188. GGML_ASSERT(start % n_per_row == 0);
  16189. GGML_ASSERT(imatrix);
  16190. size_t start_row = start / n_per_row;
  16191. size_t row_size = ggml_row_size(type, n_per_row);
  16192. result = quantize_iq2_xxs(src + start, (char *)dst + start_row * row_size, nrows, n_per_row, hist, imatrix);
  16193. GGML_ASSERT(result == row_size * nrows);
  16194. } break;
  16195. case GGML_TYPE_IQ2_XS:
  16196. {
  16197. GGML_ASSERT(start % QK_K == 0);
  16198. GGML_ASSERT(start % n_per_row == 0);
  16199. GGML_ASSERT(imatrix);
  16200. size_t start_row = start / n_per_row;
  16201. size_t row_size = ggml_row_size(type, n_per_row);
  16202. result = quantize_iq2_xs(src + start, (char *)dst + start_row * row_size, nrows, n_per_row, hist, imatrix);
  16203. GGML_ASSERT(result == row_size * nrows);
  16204. } break;
  16205. case GGML_TYPE_IQ3_XXS:
  16206. {
  16207. GGML_ASSERT(start % QK_K == 0);
  16208. GGML_ASSERT(start % n_per_row == 0);
  16209. size_t start_row = start / n_per_row;
  16210. size_t row_size = ggml_row_size(type, n_per_row);
  16211. result = quantize_iq3_xxs(src + start, (char *)dst + start_row * row_size, nrows, n_per_row, hist, imatrix);
  16212. GGML_ASSERT(result == row_size * nrows);
  16213. } break;
  16214. case GGML_TYPE_IQ1_S:
  16215. {
  16216. GGML_ASSERT(start % QK_K == 0);
  16217. GGML_ASSERT(start % n_per_row == 0);
  16218. size_t start_row = start / n_per_row;
  16219. size_t row_size = ggml_row_size(type, n_per_row);
  16220. result = quantize_iq1_s(src + start, (char *)dst + start_row * row_size, nrows, n_per_row, hist, imatrix);
  16221. GGML_ASSERT(result == row_size * nrows);
  16222. } break;
  16223. case GGML_TYPE_IQ4_NL:
  16224. {
  16225. GGML_ASSERT(start % QK4_NL == 0);
  16226. GGML_ASSERT(start % n_per_row == 0);
  16227. size_t start_row = start / n_per_row;
  16228. size_t row_size = ggml_row_size(type, n_per_row);
  16229. result = quantize_iq4_nl(src + start, (char *)dst + start_row * row_size, nrows, n_per_row, hist, imatrix);
  16230. GGML_ASSERT(result == row_size * nrows);
  16231. } break;
  16232. case GGML_TYPE_F16:
  16233. {
  16234. size_t elemsize = sizeof(ggml_fp16_t);
  16235. ggml_fp32_to_fp16_row(src + start, (ggml_fp16_t *)dst + start, n);
  16236. result = n * elemsize;
  16237. } break;
  16238. case GGML_TYPE_F32:
  16239. {
  16240. size_t elemsize = sizeof(float);
  16241. result = n * elemsize;
  16242. memcpy((uint8_t *)dst + start * elemsize, src + start, result);
  16243. } break;
  16244. default:
  16245. assert(false);
  16246. }
  16247. return result;
  16248. }
  16249. ////////////////////////////////////////////////////////////////////////////////
  16250. struct gguf_str {
  16251. uint64_t n; // GGUFv2
  16252. char * data;
  16253. };
  16254. static const size_t GGUF_TYPE_SIZE[GGUF_TYPE_COUNT] = {
  16255. [GGUF_TYPE_UINT8] = sizeof(uint8_t),
  16256. [GGUF_TYPE_INT8] = sizeof(int8_t),
  16257. [GGUF_TYPE_UINT16] = sizeof(uint16_t),
  16258. [GGUF_TYPE_INT16] = sizeof(int16_t),
  16259. [GGUF_TYPE_UINT32] = sizeof(uint32_t),
  16260. [GGUF_TYPE_INT32] = sizeof(int32_t),
  16261. [GGUF_TYPE_FLOAT32] = sizeof(float),
  16262. [GGUF_TYPE_BOOL] = sizeof(bool),
  16263. [GGUF_TYPE_STRING] = sizeof(struct gguf_str),
  16264. [GGUF_TYPE_UINT64] = sizeof(uint64_t),
  16265. [GGUF_TYPE_INT64] = sizeof(int64_t),
  16266. [GGUF_TYPE_FLOAT64] = sizeof(double),
  16267. [GGUF_TYPE_ARRAY] = 0, // undefined
  16268. };
  16269. static_assert(GGUF_TYPE_COUNT == 13, "GGUF_TYPE_COUNT != 13");
  16270. static const char * GGUF_TYPE_NAME[GGUF_TYPE_COUNT] = {
  16271. [GGUF_TYPE_UINT8] = "u8",
  16272. [GGUF_TYPE_INT8] = "i8",
  16273. [GGUF_TYPE_UINT16] = "u16",
  16274. [GGUF_TYPE_INT16] = "i16",
  16275. [GGUF_TYPE_UINT32] = "u32",
  16276. [GGUF_TYPE_INT32] = "i32",
  16277. [GGUF_TYPE_FLOAT32] = "f32",
  16278. [GGUF_TYPE_BOOL] = "bool",
  16279. [GGUF_TYPE_STRING] = "str",
  16280. [GGUF_TYPE_ARRAY] = "arr",
  16281. [GGUF_TYPE_UINT64] = "u64",
  16282. [GGUF_TYPE_INT64] = "i64",
  16283. [GGUF_TYPE_FLOAT64] = "f64",
  16284. };
  16285. static_assert(GGUF_TYPE_COUNT == 13, "GGUF_TYPE_COUNT != 13");
  16286. union gguf_value {
  16287. uint8_t uint8;
  16288. int8_t int8;
  16289. uint16_t uint16;
  16290. int16_t int16;
  16291. uint32_t uint32;
  16292. int32_t int32;
  16293. float float32;
  16294. uint64_t uint64;
  16295. int64_t int64;
  16296. double float64;
  16297. bool bool_;
  16298. struct gguf_str str;
  16299. struct {
  16300. enum gguf_type type;
  16301. uint64_t n; // GGUFv2
  16302. void * data;
  16303. } arr;
  16304. };
  16305. struct gguf_kv {
  16306. struct gguf_str key;
  16307. enum gguf_type type;
  16308. union gguf_value value;
  16309. };
  16310. struct gguf_header {
  16311. char magic[4];
  16312. uint32_t version;
  16313. uint64_t n_tensors; // GGUFv2
  16314. uint64_t n_kv; // GGUFv2
  16315. };
  16316. struct gguf_tensor_info {
  16317. struct gguf_str name;
  16318. uint32_t n_dims;
  16319. uint64_t ne[GGML_MAX_DIMS];
  16320. enum ggml_type type;
  16321. uint64_t offset; // offset from start of `data`, must be a multiple of `ALIGNMENT`
  16322. // for writing API
  16323. const void * data;
  16324. size_t size;
  16325. };
  16326. struct gguf_context {
  16327. struct gguf_header header;
  16328. struct gguf_kv * kv;
  16329. struct gguf_tensor_info * infos;
  16330. size_t alignment;
  16331. size_t offset; // offset of `data` from beginning of file
  16332. size_t size; // size of `data` in bytes
  16333. //uint8_t * padding;
  16334. void * data;
  16335. };
  16336. static size_t gguf_type_size(enum gguf_type type) {
  16337. GGML_ASSERT(0 <= type && type < GGUF_TYPE_COUNT);
  16338. return GGUF_TYPE_SIZE[type];
  16339. }
  16340. static void gguf_tensor_info_sanitize(struct gguf_tensor_info * info) {
  16341. GGML_ASSERT(info->n_dims <= GGML_MAX_DIMS);
  16342. GGML_ASSERT(0 <= info->type && info->type < GGML_TYPE_COUNT);
  16343. for (uint32_t i = 0; i < info->n_dims; ++i) {
  16344. GGML_ASSERT(info->ne[i] > 0);
  16345. }
  16346. // prevent overflow for total number of elements
  16347. GGML_ASSERT(INT64_MAX/info->ne[1] > info->ne[0]);
  16348. GGML_ASSERT(INT64_MAX/info->ne[2] > info->ne[0]*info->ne[1]);
  16349. GGML_ASSERT(INT64_MAX/info->ne[3] > info->ne[0]*info->ne[1]*info->ne[2]);
  16350. }
  16351. static bool gguf_fread_el(FILE * file, void * dst, size_t size, size_t * offset) {
  16352. const size_t n = fread(dst, 1, size, file);
  16353. *offset += n;
  16354. return n == size;
  16355. }
  16356. static bool gguf_fread_str(FILE * file, struct gguf_str * p, size_t * offset) {
  16357. p->n = 0;
  16358. p->data = NULL;
  16359. bool ok = true;
  16360. ok = ok && gguf_fread_el(file, &p->n, sizeof(p->n), offset);
  16361. // early exit if string length is invalid, prevents from integer overflow
  16362. if (p->n == SIZE_MAX) {
  16363. fprintf(stderr, "%s: invalid string length (%" PRIu64 ")\n", __func__, p->n);
  16364. return false;
  16365. }
  16366. p->data = GGML_CALLOC(p->n + 1, 1);
  16367. ok = ok && gguf_fread_el(file, p->data, p->n, offset);
  16368. return ok;
  16369. }
  16370. struct gguf_context * gguf_init_empty(void) {
  16371. struct gguf_context * ctx = GGML_ALIGNED_MALLOC(sizeof(struct gguf_context));
  16372. memcpy(ctx->header.magic, GGUF_MAGIC, sizeof(ctx->header.magic));
  16373. ctx->header.version = GGUF_VERSION;
  16374. ctx->header.n_tensors = 0;
  16375. ctx->header.n_kv = 0;
  16376. ctx->kv = NULL;
  16377. ctx->infos = NULL;
  16378. ctx->alignment = GGUF_DEFAULT_ALIGNMENT;
  16379. ctx->offset = 0;
  16380. ctx->size = 0;
  16381. ctx->data = NULL;
  16382. return ctx;
  16383. }
  16384. struct gguf_context * gguf_init_from_file(const char * fname, struct gguf_init_params params) {
  16385. FILE * file = fopen(fname, "rb");
  16386. if (!file) {
  16387. return NULL;
  16388. }
  16389. // offset from start of file
  16390. size_t offset = 0;
  16391. char magic[4];
  16392. // check the magic before making allocations
  16393. {
  16394. gguf_fread_el(file, &magic, sizeof(magic), &offset);
  16395. for (uint32_t i = 0; i < sizeof(magic); i++) {
  16396. if (magic[i] != GGUF_MAGIC[i]) {
  16397. fprintf(stderr, "%s: invalid magic characters '%c%c%c%c'\n", __func__, magic[0], magic[1], magic[2], magic[3]);
  16398. fclose(file);
  16399. return NULL;
  16400. }
  16401. }
  16402. }
  16403. bool ok = true;
  16404. struct gguf_context * ctx = GGML_ALIGNED_MALLOC(sizeof(struct gguf_context));
  16405. // read the header
  16406. {
  16407. strncpy(ctx->header.magic, magic, 4);
  16408. ctx->kv = NULL;
  16409. ctx->infos = NULL;
  16410. ctx->data = NULL;
  16411. ok = ok && gguf_fread_el(file, &ctx->header.version, sizeof(ctx->header.version), &offset);
  16412. ok = ok && gguf_fread_el(file, &ctx->header.n_tensors, sizeof(ctx->header.n_tensors), &offset);
  16413. ok = ok && gguf_fread_el(file, &ctx->header.n_kv, sizeof(ctx->header.n_kv), &offset);
  16414. if (ctx->header.version == 1) {
  16415. fprintf(stderr, "%s: GGUFv1 is no longer supported. please use a more up-to-date version\n", __func__);
  16416. fclose(file);
  16417. gguf_free(ctx);
  16418. return NULL;
  16419. }
  16420. // sanity-checks to prevent from integer/buffer overflows
  16421. ok = ok && (ctx->header.n_tensors < (SIZE_MAX/2)/sizeof(struct gguf_tensor_info));
  16422. ok = ok && (ctx->header.n_tensors < (SIZE_MAX/2)/ggml_tensor_overhead());
  16423. ok = ok && (ctx->header.n_kv < (SIZE_MAX/2)/sizeof(struct gguf_kv));
  16424. if (!ok) {
  16425. fprintf(stderr, "%s: failed to read header\n", __func__);
  16426. fclose(file);
  16427. gguf_free(ctx);
  16428. return NULL;
  16429. }
  16430. }
  16431. // read the kv pairs
  16432. {
  16433. ctx->kv = GGML_MALLOC(ctx->header.n_kv * sizeof(struct gguf_kv));
  16434. for (uint64_t i = 0; i < ctx->header.n_kv; ++i) {
  16435. struct gguf_kv * kv = &ctx->kv[i];
  16436. //fprintf(stderr, "%s: reading kv %d\n", __func__, i);
  16437. ok = ok && gguf_fread_str(file, &kv->key, &offset);
  16438. ok = ok && gguf_fread_el (file, &kv->type, sizeof(kv->type), &offset);
  16439. //fprintf(stderr, "%s: reading kv with key %s\n", __func__, kv->key.data);
  16440. switch (kv->type) {
  16441. case GGUF_TYPE_UINT8: ok = ok && gguf_fread_el (file, &kv->value.uint8, sizeof(kv->value.uint8), &offset); break;
  16442. case GGUF_TYPE_INT8: ok = ok && gguf_fread_el (file, &kv->value.int8, sizeof(kv->value.int8), &offset); break;
  16443. case GGUF_TYPE_UINT16: ok = ok && gguf_fread_el (file, &kv->value.uint16, sizeof(kv->value.uint16), &offset); break;
  16444. case GGUF_TYPE_INT16: ok = ok && gguf_fread_el (file, &kv->value.int16, sizeof(kv->value.int16), &offset); break;
  16445. case GGUF_TYPE_UINT32: ok = ok && gguf_fread_el (file, &kv->value.uint32, sizeof(kv->value.uint32), &offset); break;
  16446. case GGUF_TYPE_INT32: ok = ok && gguf_fread_el (file, &kv->value.int32, sizeof(kv->value.int32), &offset); break;
  16447. case GGUF_TYPE_FLOAT32: ok = ok && gguf_fread_el (file, &kv->value.float32, sizeof(kv->value.float32), &offset); break;
  16448. case GGUF_TYPE_UINT64: ok = ok && gguf_fread_el (file, &kv->value.uint64, sizeof(kv->value.uint64), &offset); break;
  16449. case GGUF_TYPE_INT64: ok = ok && gguf_fread_el (file, &kv->value.int64, sizeof(kv->value.int64), &offset); break;
  16450. case GGUF_TYPE_FLOAT64: ok = ok && gguf_fread_el (file, &kv->value.float64, sizeof(kv->value.float64), &offset); break;
  16451. case GGUF_TYPE_BOOL: ok = ok && gguf_fread_el (file, &kv->value.bool_, sizeof(kv->value.bool_), &offset); break;
  16452. case GGUF_TYPE_STRING: ok = ok && gguf_fread_str(file, &kv->value.str, &offset); break;
  16453. case GGUF_TYPE_ARRAY:
  16454. {
  16455. ok = ok && gguf_fread_el(file, &kv->value.arr.type, sizeof(kv->value.arr.type), &offset);
  16456. ok = ok && gguf_fread_el(file, &kv->value.arr.n, sizeof(kv->value.arr.n), &offset);
  16457. switch (kv->value.arr.type) {
  16458. case GGUF_TYPE_UINT8:
  16459. case GGUF_TYPE_INT8:
  16460. case GGUF_TYPE_UINT16:
  16461. case GGUF_TYPE_INT16:
  16462. case GGUF_TYPE_UINT32:
  16463. case GGUF_TYPE_INT32:
  16464. case GGUF_TYPE_FLOAT32:
  16465. case GGUF_TYPE_UINT64:
  16466. case GGUF_TYPE_INT64:
  16467. case GGUF_TYPE_FLOAT64:
  16468. case GGUF_TYPE_BOOL:
  16469. {
  16470. // prevent from integer overflow in the malloc below
  16471. if (kv->value.arr.n >= SIZE_MAX/gguf_type_size(kv->value.arr.type)) {
  16472. fprintf(stderr, "%s: array size is too large (%" PRIu64 ")\n", __func__, kv->value.arr.n);
  16473. fclose(file);
  16474. gguf_free(ctx);
  16475. return NULL;
  16476. }
  16477. kv->value.arr.data = GGML_MALLOC(kv->value.arr.n * gguf_type_size(kv->value.arr.type));
  16478. ok = ok && gguf_fread_el(file, kv->value.arr.data, kv->value.arr.n * gguf_type_size(kv->value.arr.type), &offset);
  16479. } break;
  16480. case GGUF_TYPE_STRING:
  16481. {
  16482. // prevent from integer overflow in the malloc below
  16483. if (kv->value.arr.n >= SIZE_MAX/sizeof(struct gguf_str)) {
  16484. fprintf(stderr, "%s: array size is too large (%" PRIu64 ")\n", __func__, kv->value.arr.n);
  16485. fclose(file);
  16486. gguf_free(ctx);
  16487. return NULL;
  16488. }
  16489. kv->value.arr.data = GGML_MALLOC(kv->value.arr.n * sizeof(struct gguf_str));
  16490. for (uint64_t j = 0; j < kv->value.arr.n; ++j) {
  16491. ok = ok && gguf_fread_str(file, &((struct gguf_str *) kv->value.arr.data)[j], &offset);
  16492. }
  16493. } break;
  16494. case GGUF_TYPE_ARRAY:
  16495. default: GGML_ASSERT(false && "invalid type"); break;
  16496. }
  16497. } break;
  16498. default: GGML_ASSERT(false && "invalid type");
  16499. }
  16500. if (!ok) {
  16501. break;
  16502. }
  16503. }
  16504. if (!ok) {
  16505. fprintf(stderr, "%s: failed to read key-value pairs\n", __func__);
  16506. fclose(file);
  16507. gguf_free(ctx);
  16508. return NULL;
  16509. }
  16510. }
  16511. // read the tensor infos
  16512. {
  16513. ctx->infos = GGML_MALLOC(ctx->header.n_tensors * sizeof(struct gguf_tensor_info));
  16514. for (uint64_t i = 0; i < ctx->header.n_tensors; ++i) {
  16515. struct gguf_tensor_info * info = &ctx->infos[i];
  16516. for (int j = 0; j < GGML_MAX_DIMS; ++j) {
  16517. info->ne[j] = 1;
  16518. }
  16519. ok = ok && gguf_fread_str(file, &info->name, &offset);
  16520. ok = ok && gguf_fread_el (file, &info->n_dims, sizeof(info->n_dims), &offset);
  16521. ok = ok && (info->n_dims <= GGML_MAX_DIMS);
  16522. for (uint32_t j = 0; j < info->n_dims; ++j) {
  16523. ok = ok && gguf_fread_el(file, &info->ne[j], sizeof(info->ne[j]), &offset);
  16524. }
  16525. ok = ok && gguf_fread_el (file, &info->type, sizeof(info->type), &offset);
  16526. ok = ok && gguf_fread_el (file, &info->offset, sizeof(info->offset), &offset);
  16527. gguf_tensor_info_sanitize(info);
  16528. if (!ok) {
  16529. fprintf(stderr, "%s: failed to read tensor info\n", __func__);
  16530. fclose(file);
  16531. gguf_free(ctx);
  16532. return NULL;
  16533. }
  16534. }
  16535. }
  16536. ctx->alignment = GGUF_DEFAULT_ALIGNMENT;
  16537. int alignment_idx = gguf_find_key(ctx, "general.alignment");
  16538. if (alignment_idx != -1) {
  16539. ctx->alignment = gguf_get_val_u32(ctx, alignment_idx);
  16540. }
  16541. // we require the data section to be aligned, so take into account any padding
  16542. {
  16543. const size_t offset_pad = offset % ctx->alignment;
  16544. if (offset_pad != 0) {
  16545. offset += ctx->alignment - offset_pad;
  16546. fseek(file, offset, SEEK_SET);
  16547. }
  16548. }
  16549. // store the current file offset - this is where the data section starts
  16550. ctx->offset = offset;
  16551. // compute the total size of the data section, taking into account the alignment
  16552. {
  16553. ctx->size = 0;
  16554. for (uint64_t i = 0; i < ctx->header.n_tensors; ++i) {
  16555. struct gguf_tensor_info * info = &ctx->infos[i];
  16556. const int64_t ne =
  16557. (int64_t) info->ne[0] *
  16558. (int64_t) info->ne[1] *
  16559. (int64_t) info->ne[2] *
  16560. (int64_t) info->ne[3];
  16561. if (ne % ggml_blck_size(info->type) != 0) {
  16562. fprintf(stderr, "%s: tensor '%s' of type %d (%s) number of elements (%" PRId64 ") is not a multiple of block size (%d)\n",
  16563. __func__, info->name.data, (int)info->type, ggml_type_name(info->type), ne, ggml_blck_size(info->type));
  16564. fclose(file);
  16565. gguf_free(ctx);
  16566. return NULL;
  16567. }
  16568. const size_t size_cur = ggml_row_size(info->type, ne);
  16569. ctx->size += GGML_PAD(size_cur, ctx->alignment);
  16570. }
  16571. }
  16572. // load the tensor data only if requested
  16573. if (params.ctx != NULL) {
  16574. // if the provided gguf_context is no_alloc, then we create "empty" tensors and do not read the binary blob
  16575. // otherwise, we load the binary blob into the created ggml_context as well, and point the "data" members of
  16576. // the ggml_tensor structs to the appropriate locations in the binary blob
  16577. // compute the exact size needed for the new ggml_context
  16578. const size_t mem_size =
  16579. params.no_alloc ?
  16580. (ctx->header.n_tensors )*ggml_tensor_overhead() :
  16581. (ctx->header.n_tensors + 1)*ggml_tensor_overhead() + ctx->size;
  16582. struct ggml_init_params pdata = {
  16583. .mem_size = mem_size,
  16584. .mem_buffer = NULL,
  16585. .no_alloc = params.no_alloc,
  16586. };
  16587. *params.ctx = ggml_init(pdata);
  16588. struct ggml_context * ctx_data = *params.ctx;
  16589. struct ggml_tensor * data = NULL;
  16590. if (!params.no_alloc) {
  16591. data = ggml_new_tensor_1d(ctx_data, GGML_TYPE_I8, ctx->size);
  16592. ok = ok && data != NULL;
  16593. // read the binary blob with the tensor data
  16594. ok = ok && gguf_fread_el(file, data->data, ctx->size, &offset);
  16595. if (!ok) {
  16596. fprintf(stderr, "%s: failed to read tensor data\n", __func__);
  16597. fclose(file);
  16598. ggml_free(ctx_data);
  16599. gguf_free(ctx);
  16600. return NULL;
  16601. }
  16602. ctx->data = data->data;
  16603. }
  16604. ggml_set_no_alloc(ctx_data, true);
  16605. // create the tensors
  16606. for (uint64_t i = 0; i < ctx->header.n_tensors; ++i) {
  16607. const int64_t ne[GGML_MAX_DIMS] = {
  16608. ctx->infos[i].ne[0],
  16609. ctx->infos[i].ne[1],
  16610. ctx->infos[i].ne[2],
  16611. ctx->infos[i].ne[3],
  16612. };
  16613. struct ggml_tensor * cur = ggml_new_tensor(ctx_data, ctx->infos[i].type, ctx->infos[i].n_dims, ne);
  16614. ok = ok && cur != NULL;
  16615. ggml_set_name(cur, ctx->infos[i].name.data);
  16616. if (!ok) {
  16617. break;
  16618. }
  16619. // point the data member to the appropriate location in the binary blob using the tensor infos
  16620. if (!params.no_alloc) {
  16621. //cur->data = (char *) data->data + ctx->infos[i].offset - ctx->offset; // offset from start of file
  16622. cur->data = (char *) data->data + ctx->infos[i].offset; // offset from data
  16623. }
  16624. }
  16625. if (!ok) {
  16626. fprintf(stderr, "%s: failed to read the tensor data\n", __func__);
  16627. fclose(file);
  16628. ggml_free(ctx_data);
  16629. gguf_free(ctx);
  16630. return NULL;
  16631. }
  16632. ggml_set_no_alloc(ctx_data, params.no_alloc);
  16633. }
  16634. fclose(file);
  16635. return ctx;
  16636. }
  16637. void gguf_free(struct gguf_context * ctx) {
  16638. if (ctx == NULL) {
  16639. return;
  16640. }
  16641. if (ctx->kv) {
  16642. // free string memory - not great..
  16643. for (uint64_t i = 0; i < ctx->header.n_kv; ++i) {
  16644. struct gguf_kv * kv = &ctx->kv[i];
  16645. if (kv->key.data) {
  16646. GGML_FREE(kv->key.data);
  16647. }
  16648. if (kv->type == GGUF_TYPE_STRING) {
  16649. if (kv->value.str.data) {
  16650. GGML_FREE(kv->value.str.data);
  16651. }
  16652. }
  16653. if (kv->type == GGUF_TYPE_ARRAY) {
  16654. if (kv->value.arr.data) {
  16655. if (kv->value.arr.type == GGUF_TYPE_STRING) {
  16656. for (uint64_t j = 0; j < kv->value.arr.n; ++j) {
  16657. struct gguf_str * str = &((struct gguf_str *) kv->value.arr.data)[j];
  16658. if (str->data) {
  16659. GGML_FREE(str->data);
  16660. }
  16661. }
  16662. }
  16663. GGML_FREE(kv->value.arr.data);
  16664. }
  16665. }
  16666. }
  16667. GGML_FREE(ctx->kv);
  16668. }
  16669. if (ctx->infos) {
  16670. for (uint64_t i = 0; i < ctx->header.n_tensors; ++i) {
  16671. struct gguf_tensor_info * info = &ctx->infos[i];
  16672. if (info->name.data) {
  16673. GGML_FREE(info->name.data);
  16674. }
  16675. }
  16676. GGML_FREE(ctx->infos);
  16677. }
  16678. GGML_ALIGNED_FREE(ctx);
  16679. }
  16680. const char * gguf_type_name(enum gguf_type type) {
  16681. return GGUF_TYPE_NAME[type];
  16682. }
  16683. int gguf_get_version(const struct gguf_context * ctx) {
  16684. return ctx->header.version;
  16685. }
  16686. size_t gguf_get_alignment(const struct gguf_context * ctx) {
  16687. return ctx->alignment;
  16688. }
  16689. size_t gguf_get_data_offset(const struct gguf_context * ctx) {
  16690. return ctx->offset;
  16691. }
  16692. void * gguf_get_data(const struct gguf_context * ctx) {
  16693. return ctx->data;
  16694. }
  16695. int gguf_get_n_kv(const struct gguf_context * ctx) {
  16696. return ctx->header.n_kv;
  16697. }
  16698. int gguf_find_key(const struct gguf_context * ctx, const char * key) {
  16699. // return -1 if key not found
  16700. int keyfound = -1;
  16701. const int n_kv = gguf_get_n_kv(ctx);
  16702. for (int i = 0; i < n_kv; ++i) {
  16703. if (strcmp(key, gguf_get_key(ctx, i)) == 0) {
  16704. keyfound = i;
  16705. break;
  16706. }
  16707. }
  16708. return keyfound;
  16709. }
  16710. const char * gguf_get_key(const struct gguf_context * ctx, int key_id) {
  16711. GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx));
  16712. return ctx->kv[key_id].key.data;
  16713. }
  16714. enum gguf_type gguf_get_kv_type(const struct gguf_context * ctx, int key_id) {
  16715. GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx));
  16716. return ctx->kv[key_id].type;
  16717. }
  16718. enum gguf_type gguf_get_arr_type(const struct gguf_context * ctx, int key_id) {
  16719. GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx));
  16720. GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_ARRAY);
  16721. return ctx->kv[key_id].value.arr.type;
  16722. }
  16723. const void * gguf_get_arr_data(const struct gguf_context * ctx, int key_id) {
  16724. GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx));
  16725. GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_ARRAY);
  16726. return ctx->kv[key_id].value.arr.data;
  16727. }
  16728. const char * gguf_get_arr_str(const struct gguf_context * ctx, int key_id, int i) {
  16729. GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx));
  16730. GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_ARRAY);
  16731. struct gguf_kv * kv = &ctx->kv[key_id];
  16732. struct gguf_str * str = &((struct gguf_str *) kv->value.arr.data)[i];
  16733. return str->data;
  16734. }
  16735. int gguf_get_arr_n(const struct gguf_context * ctx, int key_id) {
  16736. GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx));
  16737. GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_ARRAY);
  16738. return ctx->kv[key_id].value.arr.n;
  16739. }
  16740. uint8_t gguf_get_val_u8(const struct gguf_context * ctx, int key_id) {
  16741. GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx));
  16742. GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_UINT8);
  16743. return ctx->kv[key_id].value.uint8;
  16744. }
  16745. int8_t gguf_get_val_i8(const struct gguf_context * ctx, int key_id) {
  16746. GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx));
  16747. GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_INT8);
  16748. return ctx->kv[key_id].value.int8;
  16749. }
  16750. uint16_t gguf_get_val_u16(const struct gguf_context * ctx, int key_id) {
  16751. GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx));
  16752. GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_UINT16);
  16753. return ctx->kv[key_id].value.uint16;
  16754. }
  16755. int16_t gguf_get_val_i16(const struct gguf_context * ctx, int key_id) {
  16756. GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx));
  16757. GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_INT16);
  16758. return ctx->kv[key_id].value.int16;
  16759. }
  16760. uint32_t gguf_get_val_u32(const struct gguf_context * ctx, int key_id) {
  16761. GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx));
  16762. GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_UINT32);
  16763. return ctx->kv[key_id].value.uint32;
  16764. }
  16765. int32_t gguf_get_val_i32(const struct gguf_context * ctx, int key_id) {
  16766. GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx));
  16767. GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_INT32);
  16768. return ctx->kv[key_id].value.int32;
  16769. }
  16770. float gguf_get_val_f32(const struct gguf_context * ctx, int key_id) {
  16771. GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx));
  16772. GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_FLOAT32);
  16773. return ctx->kv[key_id].value.float32;
  16774. }
  16775. uint64_t gguf_get_val_u64(const struct gguf_context * ctx, int key_id) {
  16776. GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx));
  16777. GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_UINT64);
  16778. return ctx->kv[key_id].value.uint64;
  16779. }
  16780. int64_t gguf_get_val_i64(const struct gguf_context * ctx, int key_id) {
  16781. GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx));
  16782. GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_INT64);
  16783. return ctx->kv[key_id].value.int64;
  16784. }
  16785. double gguf_get_val_f64(const struct gguf_context * ctx, int key_id) {
  16786. GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx));
  16787. GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_FLOAT64);
  16788. return ctx->kv[key_id].value.float64;
  16789. }
  16790. bool gguf_get_val_bool(const struct gguf_context * ctx, int key_id) {
  16791. GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx));
  16792. GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_BOOL);
  16793. return ctx->kv[key_id].value.bool_;
  16794. }
  16795. const char * gguf_get_val_str(const struct gguf_context * ctx, int key_id) {
  16796. GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx));
  16797. GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_STRING);
  16798. return ctx->kv[key_id].value.str.data;
  16799. }
  16800. const void * gguf_get_val_data(const struct gguf_context * ctx, int key_id) {
  16801. GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx));
  16802. GGML_ASSERT(ctx->kv[key_id].type != GGUF_TYPE_ARRAY);
  16803. GGML_ASSERT(ctx->kv[key_id].type != GGUF_TYPE_STRING);
  16804. return &ctx->kv[key_id].value;
  16805. }
  16806. int gguf_get_n_tensors(const struct gguf_context * ctx) {
  16807. return ctx->header.n_tensors;
  16808. }
  16809. int gguf_find_tensor(const struct gguf_context * ctx, const char * name) {
  16810. // return -1 if tensor not found
  16811. int tensorfound = -1;
  16812. const int n_tensors = gguf_get_n_tensors(ctx);
  16813. for (int i = 0; i < n_tensors; ++i) {
  16814. if (strcmp(name, gguf_get_tensor_name(ctx, i)) == 0) {
  16815. tensorfound = i;
  16816. break;
  16817. }
  16818. }
  16819. return tensorfound;
  16820. }
  16821. size_t gguf_get_tensor_offset(const struct gguf_context * ctx, int i) {
  16822. return ctx->infos[i].offset;
  16823. }
  16824. char * gguf_get_tensor_name(const struct gguf_context * ctx, int i) {
  16825. return ctx->infos[i].name.data;
  16826. }
  16827. enum ggml_type gguf_get_tensor_type(const struct gguf_context * ctx, int i) {
  16828. return ctx->infos[i].type;
  16829. }
  16830. // returns the index
  16831. static int gguf_get_or_add_key(struct gguf_context * ctx, const char * key) {
  16832. const int idx = gguf_find_key(ctx, key);
  16833. if (idx >= 0) {
  16834. return idx;
  16835. }
  16836. const int n_kv = gguf_get_n_kv(ctx);
  16837. ctx->kv = realloc(ctx->kv, (n_kv + 1) * sizeof(struct gguf_kv));
  16838. ctx->kv[n_kv].key.n = strlen(key);
  16839. ctx->kv[n_kv].key.data = strdup(key);
  16840. ctx->header.n_kv++;
  16841. return n_kv;
  16842. }
  16843. void gguf_set_val_u8(struct gguf_context * ctx, const char * key, uint8_t val) {
  16844. const int idx = gguf_get_or_add_key(ctx, key);
  16845. ctx->kv[idx].type = GGUF_TYPE_UINT8;
  16846. ctx->kv[idx].value.uint8 = val;
  16847. }
  16848. void gguf_set_val_i8(struct gguf_context * ctx, const char * key, int8_t val) {
  16849. const int idx = gguf_get_or_add_key(ctx, key);
  16850. ctx->kv[idx].type = GGUF_TYPE_INT8;
  16851. ctx->kv[idx].value.int8 = val;
  16852. }
  16853. void gguf_set_val_u16(struct gguf_context * ctx, const char * key, uint16_t val) {
  16854. const int idx = gguf_get_or_add_key(ctx, key);
  16855. ctx->kv[idx].type = GGUF_TYPE_UINT16;
  16856. ctx->kv[idx].value.uint16 = val;
  16857. }
  16858. void gguf_set_val_i16(struct gguf_context * ctx, const char * key, int16_t val) {
  16859. const int idx = gguf_get_or_add_key(ctx, key);
  16860. ctx->kv[idx].type = GGUF_TYPE_INT16;
  16861. ctx->kv[idx].value.int16 = val;
  16862. }
  16863. void gguf_set_val_u32(struct gguf_context * ctx, const char * key, uint32_t val) {
  16864. const int idx = gguf_get_or_add_key(ctx, key);
  16865. ctx->kv[idx].type = GGUF_TYPE_UINT32;
  16866. ctx->kv[idx].value.uint32 = val;
  16867. }
  16868. void gguf_set_val_i32(struct gguf_context * ctx, const char * key, int32_t val) {
  16869. const int idx = gguf_get_or_add_key(ctx, key);
  16870. ctx->kv[idx].type = GGUF_TYPE_INT32;
  16871. ctx->kv[idx].value.int32 = val;
  16872. }
  16873. void gguf_set_val_f32(struct gguf_context * ctx, const char * key, float val) {
  16874. const int idx = gguf_get_or_add_key(ctx, key);
  16875. ctx->kv[idx].type = GGUF_TYPE_FLOAT32;
  16876. ctx->kv[idx].value.float32 = val;
  16877. }
  16878. void gguf_set_val_u64(struct gguf_context * ctx, const char * key, uint64_t val) {
  16879. const int idx = gguf_get_or_add_key(ctx, key);
  16880. ctx->kv[idx].type = GGUF_TYPE_UINT64;
  16881. ctx->kv[idx].value.uint64 = val;
  16882. }
  16883. void gguf_set_val_i64(struct gguf_context * ctx, const char * key, int64_t val) {
  16884. const int idx = gguf_get_or_add_key(ctx, key);
  16885. ctx->kv[idx].type = GGUF_TYPE_INT64;
  16886. ctx->kv[idx].value.int64 = val;
  16887. }
  16888. void gguf_set_val_f64(struct gguf_context * ctx, const char * key, double val) {
  16889. const int idx = gguf_get_or_add_key(ctx, key);
  16890. ctx->kv[idx].type = GGUF_TYPE_FLOAT64;
  16891. ctx->kv[idx].value.float64 = val;
  16892. }
  16893. void gguf_set_val_bool(struct gguf_context * ctx, const char * key, bool val) {
  16894. const int idx = gguf_get_or_add_key(ctx, key);
  16895. ctx->kv[idx].type = GGUF_TYPE_BOOL;
  16896. ctx->kv[idx].value.bool_ = val;
  16897. }
  16898. void gguf_set_val_str(struct gguf_context * ctx, const char * key, const char * val) {
  16899. const int idx = gguf_get_or_add_key(ctx, key);
  16900. ctx->kv[idx].type = GGUF_TYPE_STRING;
  16901. ctx->kv[idx].value.str.n = strlen(val);
  16902. ctx->kv[idx].value.str.data = strdup(val);
  16903. }
  16904. void gguf_set_arr_data(struct gguf_context * ctx, const char * key, enum gguf_type type, const void * data, int n) {
  16905. const int idx = gguf_get_or_add_key(ctx, key);
  16906. ctx->kv[idx].type = GGUF_TYPE_ARRAY;
  16907. ctx->kv[idx].value.arr.type = type;
  16908. ctx->kv[idx].value.arr.n = n;
  16909. ctx->kv[idx].value.arr.data = GGML_MALLOC(n*gguf_type_size(type));
  16910. memcpy(ctx->kv[idx].value.arr.data, data, n*gguf_type_size(type));
  16911. }
  16912. void gguf_set_arr_str(struct gguf_context * ctx, const char * key, const char ** data, int n) {
  16913. const int idx = gguf_get_or_add_key(ctx, key);
  16914. ctx->kv[idx].type = GGUF_TYPE_ARRAY;
  16915. ctx->kv[idx].value.arr.type = GGUF_TYPE_STRING;
  16916. ctx->kv[idx].value.arr.n = n;
  16917. ctx->kv[idx].value.arr.data = GGML_MALLOC(n*sizeof(struct gguf_str));
  16918. for (int i = 0; i < n; i++) {
  16919. struct gguf_str * str = &((struct gguf_str *)ctx->kv[idx].value.arr.data)[i];
  16920. str->n = strlen(data[i]);
  16921. str->data = strdup(data[i]);
  16922. }
  16923. }
  16924. // set or add KV pairs from another context
  16925. void gguf_set_kv(struct gguf_context * ctx, struct gguf_context * src) {
  16926. for (uint32_t i = 0; i < src->header.n_kv; i++) {
  16927. switch (src->kv[i].type) {
  16928. case GGUF_TYPE_UINT8: gguf_set_val_u8 (ctx, src->kv[i].key.data, src->kv[i].value.uint8); break;
  16929. case GGUF_TYPE_INT8: gguf_set_val_i8 (ctx, src->kv[i].key.data, src->kv[i].value.int8); break;
  16930. case GGUF_TYPE_UINT16: gguf_set_val_u16 (ctx, src->kv[i].key.data, src->kv[i].value.uint16); break;
  16931. case GGUF_TYPE_INT16: gguf_set_val_i16 (ctx, src->kv[i].key.data, src->kv[i].value.int16); break;
  16932. case GGUF_TYPE_UINT32: gguf_set_val_u32 (ctx, src->kv[i].key.data, src->kv[i].value.uint32); break;
  16933. case GGUF_TYPE_INT32: gguf_set_val_i32 (ctx, src->kv[i].key.data, src->kv[i].value.int32); break;
  16934. case GGUF_TYPE_FLOAT32: gguf_set_val_f32 (ctx, src->kv[i].key.data, src->kv[i].value.float32); break;
  16935. case GGUF_TYPE_UINT64: gguf_set_val_u64 (ctx, src->kv[i].key.data, src->kv[i].value.uint64); break;
  16936. case GGUF_TYPE_INT64: gguf_set_val_i64 (ctx, src->kv[i].key.data, src->kv[i].value.int64); break;
  16937. case GGUF_TYPE_FLOAT64: gguf_set_val_f64 (ctx, src->kv[i].key.data, src->kv[i].value.float64); break;
  16938. case GGUF_TYPE_BOOL: gguf_set_val_bool(ctx, src->kv[i].key.data, src->kv[i].value.bool_); break;
  16939. case GGUF_TYPE_STRING: gguf_set_val_str (ctx, src->kv[i].key.data, src->kv[i].value.str.data); break;
  16940. case GGUF_TYPE_ARRAY:
  16941. {
  16942. if (src->kv[i].value.arr.type == GGUF_TYPE_STRING) {
  16943. const char ** data = GGML_MALLOC(src->kv[i].value.arr.n*sizeof(char *));
  16944. for (uint32_t j = 0; j < src->kv[i].value.arr.n; j++) {
  16945. data[j] = ((struct gguf_str *)src->kv[i].value.arr.data)[j].data;
  16946. }
  16947. gguf_set_arr_str(ctx, src->kv[i].key.data, data, src->kv[i].value.arr.n);
  16948. GGML_FREE((void *)data);
  16949. } else if (src->kv[i].value.arr.type == GGUF_TYPE_ARRAY) {
  16950. GGML_ASSERT(false && "nested arrays not supported");
  16951. } else {
  16952. gguf_set_arr_data(ctx, src->kv[i].key.data, src->kv[i].value.arr.type, src->kv[i].value.arr.data, src->kv[i].value.arr.n);
  16953. }
  16954. } break;
  16955. default: GGML_ASSERT(false && "invalid type"); break;
  16956. }
  16957. }
  16958. }
  16959. void gguf_add_tensor(
  16960. struct gguf_context * ctx,
  16961. const struct ggml_tensor * tensor) {
  16962. const int idx = ctx->header.n_tensors;
  16963. ctx->infos = realloc(ctx->infos, (idx + 1)*sizeof(struct gguf_tensor_info));
  16964. ctx->infos[idx].name.n = strlen(tensor->name);
  16965. ctx->infos[idx].name.data = strdup(tensor->name);
  16966. for (int i = 0; i < GGML_MAX_DIMS; ++i) {
  16967. ctx->infos[idx].ne[i] = 1;
  16968. }
  16969. ctx->infos[idx].n_dims = ggml_n_dims(tensor);
  16970. for (uint32_t i = 0; i < ctx->infos[idx].n_dims; i++) {
  16971. ctx->infos[idx].ne[i] = tensor->ne[i];
  16972. }
  16973. ctx->infos[idx].type = tensor->type;
  16974. ctx->infos[idx].offset = 0;
  16975. ctx->infos[idx].data = tensor->data;
  16976. ctx->infos[idx].size = ggml_nbytes(tensor);
  16977. if (ctx->header.n_tensors > 0) {
  16978. ctx->infos[idx].offset = ctx->infos[idx - 1].offset + GGML_PAD(ctx->infos[idx - 1].size, ctx->alignment);
  16979. }
  16980. ctx->header.n_tensors++;
  16981. }
  16982. void gguf_set_tensor_type(struct gguf_context * ctx, const char * name, enum ggml_type type) {
  16983. const int idx = gguf_find_tensor(ctx, name);
  16984. if (idx < 0) {
  16985. GGML_ASSERT(false && "tensor not found");
  16986. }
  16987. ctx->infos[idx].type = type;
  16988. }
  16989. void gguf_set_tensor_data(struct gguf_context * ctx, const char * name, const void * data, size_t size) {
  16990. const int idx = gguf_find_tensor(ctx, name);
  16991. if (idx < 0) {
  16992. GGML_ASSERT(false && "tensor not found");
  16993. }
  16994. ctx->infos[idx].data = data;
  16995. ctx->infos[idx].size = size;
  16996. // update offsets
  16997. for (uint32_t i = idx + 1; i < ctx->header.n_tensors; ++i) {
  16998. ctx->infos[i].offset = ctx->infos[i - 1].offset + GGML_PAD(ctx->infos[i - 1].size, ctx->alignment);
  16999. }
  17000. }
  17001. //static void gguf_fwrite_str(FILE * file, const struct gguf_str * val) {
  17002. // fwrite(&val->n, sizeof(val->n), 1, file);
  17003. // fwrite(val->data, sizeof(char), val->n, file);
  17004. //}
  17005. //
  17006. //static void gguf_fwrite_el(FILE * file, const void * val, size_t size) {
  17007. // fwrite(val, sizeof(char), size, file);
  17008. //}
  17009. struct gguf_buf {
  17010. void * data;
  17011. size_t size;
  17012. size_t offset;
  17013. };
  17014. static struct gguf_buf gguf_buf_init(size_t size) {
  17015. struct gguf_buf buf = {
  17016. /*buf.data =*/ size == 0 ? NULL : GGML_MALLOC(size),
  17017. /*buf.size =*/ size,
  17018. /*buf.offset =*/ 0,
  17019. };
  17020. return buf;
  17021. }
  17022. static void gguf_buf_free(struct gguf_buf buf) {
  17023. if (buf.data) {
  17024. GGML_FREE(buf.data);
  17025. }
  17026. }
  17027. static void gguf_buf_grow(struct gguf_buf * buf, size_t size) {
  17028. if (buf->offset + size > buf->size) {
  17029. buf->size = 1.5*(buf->offset + size);
  17030. if (buf->data) {
  17031. buf->data = realloc(buf->data, buf->size);
  17032. }
  17033. }
  17034. }
  17035. static void gguf_bwrite_str(struct gguf_buf * buf, const struct gguf_str * val) {
  17036. gguf_buf_grow(buf, sizeof(val->n) + val->n);
  17037. if (buf->data) {
  17038. memcpy((char *) buf->data + buf->offset, &val->n, sizeof(val->n));
  17039. }
  17040. buf->offset += sizeof(val->n);
  17041. if (buf->data) {
  17042. memcpy((char *) buf->data + buf->offset, val->data, val->n);
  17043. }
  17044. buf->offset += val->n;
  17045. }
  17046. static void gguf_bwrite_el(struct gguf_buf * buf, const void * val, size_t el_size) {
  17047. gguf_buf_grow(buf, el_size);
  17048. if (buf->data) {
  17049. memcpy((char *) buf->data + buf->offset, val, el_size);
  17050. }
  17051. buf->offset += el_size;
  17052. }
  17053. static void gguf_write_to_buf(const struct gguf_context * ctx, struct gguf_buf * buf, bool only_meta) {
  17054. // write header
  17055. gguf_bwrite_el(buf, &ctx->header.magic, sizeof(ctx->header.magic));
  17056. gguf_bwrite_el(buf, &ctx->header.version, sizeof(ctx->header.version));
  17057. gguf_bwrite_el(buf, &ctx->header.n_tensors, sizeof(ctx->header.n_tensors));
  17058. gguf_bwrite_el(buf, &ctx->header.n_kv, sizeof(ctx->header.n_kv));
  17059. // write key-value pairs
  17060. for (uint32_t i = 0; i < ctx->header.n_kv; ++i) {
  17061. struct gguf_kv * kv = &ctx->kv[i];
  17062. gguf_bwrite_str(buf, &kv->key);
  17063. gguf_bwrite_el (buf, &kv->type, sizeof(kv->type));
  17064. switch (kv->type) {
  17065. case GGUF_TYPE_UINT8: gguf_bwrite_el( buf, &kv->value.uint8, sizeof(kv->value.uint8) ); break;
  17066. case GGUF_TYPE_INT8: gguf_bwrite_el (buf, &kv->value.int8, sizeof(kv->value.int8) ); break;
  17067. case GGUF_TYPE_UINT16: gguf_bwrite_el (buf, &kv->value.uint16, sizeof(kv->value.uint16) ); break;
  17068. case GGUF_TYPE_INT16: gguf_bwrite_el (buf, &kv->value.int16, sizeof(kv->value.int16) ); break;
  17069. case GGUF_TYPE_UINT32: gguf_bwrite_el (buf, &kv->value.uint32, sizeof(kv->value.uint32) ); break;
  17070. case GGUF_TYPE_INT32: gguf_bwrite_el (buf, &kv->value.int32, sizeof(kv->value.int32) ); break;
  17071. case GGUF_TYPE_FLOAT32: gguf_bwrite_el (buf, &kv->value.float32, sizeof(kv->value.float32)); break;
  17072. case GGUF_TYPE_UINT64: gguf_bwrite_el (buf, &kv->value.uint64, sizeof(kv->value.uint64) ); break;
  17073. case GGUF_TYPE_INT64: gguf_bwrite_el (buf, &kv->value.int64, sizeof(kv->value.int64) ); break;
  17074. case GGUF_TYPE_FLOAT64: gguf_bwrite_el (buf, &kv->value.float64, sizeof(kv->value.float64)); break;
  17075. case GGUF_TYPE_BOOL: gguf_bwrite_el (buf, &kv->value.bool_, sizeof(kv->value.bool_) ); break;
  17076. case GGUF_TYPE_STRING: gguf_bwrite_str(buf, &kv->value.str ); break;
  17077. case GGUF_TYPE_ARRAY:
  17078. {
  17079. gguf_bwrite_el(buf, &kv->value.arr.type, sizeof(kv->value.arr.type));
  17080. gguf_bwrite_el(buf, &kv->value.arr.n, sizeof(kv->value.arr.n) );
  17081. switch (kv->value.arr.type) {
  17082. case GGUF_TYPE_UINT8:
  17083. case GGUF_TYPE_INT8:
  17084. case GGUF_TYPE_UINT16:
  17085. case GGUF_TYPE_INT16:
  17086. case GGUF_TYPE_UINT32:
  17087. case GGUF_TYPE_INT32:
  17088. case GGUF_TYPE_FLOAT32:
  17089. case GGUF_TYPE_UINT64:
  17090. case GGUF_TYPE_INT64:
  17091. case GGUF_TYPE_FLOAT64:
  17092. case GGUF_TYPE_BOOL:
  17093. {
  17094. gguf_bwrite_el(buf, kv->value.arr.data, kv->value.arr.n * gguf_type_size(kv->value.arr.type));
  17095. } break;
  17096. case GGUF_TYPE_STRING:
  17097. {
  17098. for (uint32_t j = 0; j < kv->value.arr.n; ++j) {
  17099. gguf_bwrite_str(buf, &((struct gguf_str *) kv->value.arr.data)[j]);
  17100. }
  17101. } break;
  17102. case GGUF_TYPE_ARRAY:
  17103. default: GGML_ASSERT(false && "invalid type"); break;
  17104. }
  17105. } break;
  17106. default: GGML_ASSERT(false && "invalid type");
  17107. }
  17108. }
  17109. // write tensor infos
  17110. for (uint32_t i = 0; i < ctx->header.n_tensors; ++i) {
  17111. struct gguf_tensor_info * info = &ctx->infos[i];
  17112. gguf_bwrite_str(buf, &info->name);
  17113. gguf_bwrite_el (buf, &info->n_dims, sizeof(info->n_dims));
  17114. for (uint32_t j = 0; j < info->n_dims; ++j) {
  17115. gguf_bwrite_el(buf, &info->ne[j], sizeof(info->ne[j]));
  17116. }
  17117. gguf_bwrite_el(buf, &info->type, sizeof(info->type));
  17118. gguf_bwrite_el(buf, &info->offset, sizeof(info->offset));
  17119. }
  17120. // we require the data section to be aligned, so take into account any padding
  17121. {
  17122. const size_t offset = buf->offset;
  17123. const size_t offset_pad = GGML_PAD(offset, ctx->alignment);
  17124. if (offset_pad != offset) {
  17125. uint8_t pad = 0;
  17126. for (size_t i = 0; i < offset_pad - offset; ++i) {
  17127. gguf_bwrite_el(buf, &pad, sizeof(pad));
  17128. }
  17129. }
  17130. }
  17131. if (only_meta) {
  17132. return;
  17133. }
  17134. size_t offset = 0;
  17135. // write tensor data
  17136. for (uint32_t i = 0; i < ctx->header.n_tensors; ++i) {
  17137. struct gguf_tensor_info * info = &ctx->infos[i];
  17138. const size_t size = info->size;
  17139. const size_t size_pad = GGML_PAD(size, ctx->alignment);
  17140. gguf_bwrite_el(buf, info->data, size);
  17141. if (size_pad != size) {
  17142. uint8_t pad = 0;
  17143. for (size_t j = 0; j < size_pad - size; ++j) {
  17144. gguf_bwrite_el(buf, &pad, sizeof(pad));
  17145. }
  17146. }
  17147. GGML_ASSERT(offset == info->offset);
  17148. offset += size_pad;
  17149. }
  17150. }
  17151. void gguf_write_to_file(const struct gguf_context * ctx, const char * fname, bool only_meta) {
  17152. FILE * file = fopen(fname, "wb");
  17153. if (!file) {
  17154. GGML_ASSERT(false && "failed to open file for writing");
  17155. }
  17156. struct gguf_buf buf = gguf_buf_init(16*1024);
  17157. gguf_write_to_buf(ctx, &buf, only_meta);
  17158. fwrite(buf.data, 1, buf.offset, file);
  17159. gguf_buf_free(buf);
  17160. fclose(file);
  17161. }
  17162. size_t gguf_get_meta_size(const struct gguf_context * ctx) {
  17163. // no allocs - only compute size
  17164. struct gguf_buf buf = gguf_buf_init(0);
  17165. gguf_write_to_buf(ctx, &buf, true);
  17166. return buf.offset;
  17167. }
  17168. void gguf_get_meta_data(const struct gguf_context * ctx, void * data) {
  17169. struct gguf_buf buf = gguf_buf_init(16*1024);
  17170. gguf_write_to_buf(ctx, &buf, true);
  17171. memcpy(data, buf.data, buf.offset);
  17172. gguf_buf_free(buf);
  17173. }
  17174. ////////////////////////////////////////////////////////////////////////////////
  17175. int ggml_cpu_has_avx(void) {
  17176. #if defined(__AVX__)
  17177. return 1;
  17178. #else
  17179. return 0;
  17180. #endif
  17181. }
  17182. int ggml_cpu_has_avx_vnni(void) {
  17183. #if defined(__AVXVNNI__)
  17184. return 1;
  17185. #else
  17186. return 0;
  17187. #endif
  17188. }
  17189. int ggml_cpu_has_avx2(void) {
  17190. #if defined(__AVX2__)
  17191. return 1;
  17192. #else
  17193. return 0;
  17194. #endif
  17195. }
  17196. int ggml_cpu_has_avx512(void) {
  17197. #if defined(__AVX512F__)
  17198. return 1;
  17199. #else
  17200. return 0;
  17201. #endif
  17202. }
  17203. int ggml_cpu_has_avx512_vbmi(void) {
  17204. #if defined(__AVX512VBMI__)
  17205. return 1;
  17206. #else
  17207. return 0;
  17208. #endif
  17209. }
  17210. int ggml_cpu_has_avx512_vnni(void) {
  17211. #if defined(__AVX512VNNI__)
  17212. return 1;
  17213. #else
  17214. return 0;
  17215. #endif
  17216. }
  17217. int ggml_cpu_has_fma(void) {
  17218. #if defined(__FMA__)
  17219. return 1;
  17220. #else
  17221. return 0;
  17222. #endif
  17223. }
  17224. int ggml_cpu_has_neon(void) {
  17225. #if defined(__ARM_NEON)
  17226. return 1;
  17227. #else
  17228. return 0;
  17229. #endif
  17230. }
  17231. int ggml_cpu_has_arm_fma(void) {
  17232. #if defined(__ARM_FEATURE_FMA)
  17233. return 1;
  17234. #else
  17235. return 0;
  17236. #endif
  17237. }
  17238. int ggml_cpu_has_metal(void) {
  17239. #if defined(GGML_USE_METAL)
  17240. return 1;
  17241. #else
  17242. return 0;
  17243. #endif
  17244. }
  17245. int ggml_cpu_has_f16c(void) {
  17246. #if defined(__F16C__)
  17247. return 1;
  17248. #else
  17249. return 0;
  17250. #endif
  17251. }
  17252. int ggml_cpu_has_fp16_va(void) {
  17253. #if defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC)
  17254. return 1;
  17255. #else
  17256. return 0;
  17257. #endif
  17258. }
  17259. int ggml_cpu_has_wasm_simd(void) {
  17260. #if defined(__wasm_simd128__)
  17261. return 1;
  17262. #else
  17263. return 0;
  17264. #endif
  17265. }
  17266. int ggml_cpu_has_blas(void) {
  17267. #if defined(GGML_USE_ACCELERATE) || defined(GGML_USE_OPENBLAS) || defined(GGML_USE_CUBLAS) || defined(GGML_USE_VULKAN) || defined(GGML_USE_CLBLAST) || defined(GGML_USE_SYCL)
  17268. return 1;
  17269. #else
  17270. return 0;
  17271. #endif
  17272. }
  17273. int ggml_cpu_has_cublas(void) {
  17274. #if defined(GGML_USE_CUBLAS)
  17275. return 1;
  17276. #else
  17277. return 0;
  17278. #endif
  17279. }
  17280. int ggml_cpu_has_clblast(void) {
  17281. #if defined(GGML_USE_CLBLAST)
  17282. return 1;
  17283. #else
  17284. return 0;
  17285. #endif
  17286. }
  17287. int ggml_cpu_has_vulkan(void) {
  17288. #if defined(GGML_USE_VULKAN)
  17289. return 1;
  17290. #else
  17291. return 0;
  17292. #endif
  17293. }
  17294. int ggml_cpu_has_kompute(void) {
  17295. #if defined(GGML_USE_KOMPUTE)
  17296. return 1;
  17297. #else
  17298. return 0;
  17299. #endif
  17300. }
  17301. int ggml_cpu_has_sycl(void) {
  17302. #if defined(GGML_USE_SYCL)
  17303. return 1;
  17304. #else
  17305. return 0;
  17306. #endif
  17307. }
  17308. int ggml_cpu_has_gpublas(void) {
  17309. return ggml_cpu_has_cublas() || ggml_cpu_has_clblast() || ggml_cpu_has_vulkan() || ggml_cpu_has_kompute() ||
  17310. ggml_cpu_has_sycl();
  17311. }
  17312. int ggml_cpu_has_sse3(void) {
  17313. #if defined(__SSE3__)
  17314. return 1;
  17315. #else
  17316. return 0;
  17317. #endif
  17318. }
  17319. int ggml_cpu_has_ssse3(void) {
  17320. #if defined(__SSSE3__)
  17321. return 1;
  17322. #else
  17323. return 0;
  17324. #endif
  17325. }
  17326. int ggml_cpu_has_vsx(void) {
  17327. #if defined(__POWER9_VECTOR__)
  17328. return 1;
  17329. #else
  17330. return 0;
  17331. #endif
  17332. }
  17333. int ggml_cpu_has_matmul_int8(void) {
  17334. #if defined(__ARM_FEATURE_MATMUL_INT8)
  17335. return 1;
  17336. #else
  17337. return 0;
  17338. #endif
  17339. }
  17340. ////////////////////////////////////////////////////////////////////////////////