ggml.c 710 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384438543864387438843894390439143924393439443954396439743984399440044014402440344044405440644074408440944104411441244134414441544164417441844194420442144224423442444254426442744284429443044314432443344344435443644374438443944404441444244434444444544464447444844494450445144524453445444554456445744584459446044614462446344644465446644674468446944704471447244734474447544764477447844794480448144824483448444854486448744884489449044914492449344944495449644974498449945004501450245034504450545064507450845094510451145124513451445154516451745184519452045214522452345244525452645274528452945304531453245334534453545364537453845394540454145424543454445454546454745484549455045514552455345544555455645574558455945604561456245634564456545664567456845694570457145724573457445754576457745784579458045814582458345844585458645874588458945904591459245934594459545964597459845994600460146024603460446054606460746084609461046114612461346144615461646174618461946204621462246234624462546264627462846294630463146324633463446354636463746384639464046414642464346444645464646474648464946504651465246534654465546564657465846594660466146624663466446654666466746684669467046714672467346744675467646774678467946804681468246834684468546864687468846894690469146924693469446954696469746984699470047014702470347044705470647074708470947104711471247134714471547164717471847194720472147224723472447254726472747284729473047314732473347344735473647374738473947404741474247434744474547464747474847494750475147524753475447554756475747584759476047614762476347644765476647674768476947704771477247734774477547764777477847794780478147824783478447854786478747884789479047914792479347944795479647974798479948004801480248034804480548064807480848094810481148124813481448154816481748184819482048214822482348244825482648274828482948304831483248334834483548364837483848394840484148424843484448454846484748484849485048514852485348544855485648574858485948604861486248634864486548664867486848694870487148724873487448754876487748784879488048814882488348844885488648874888488948904891489248934894489548964897489848994900490149024903490449054906490749084909491049114912491349144915491649174918491949204921492249234924492549264927492849294930493149324933493449354936493749384939494049414942494349444945494649474948494949504951495249534954495549564957495849594960496149624963496449654966496749684969497049714972497349744975497649774978497949804981498249834984498549864987498849894990499149924993499449954996499749984999500050015002500350045005500650075008500950105011501250135014501550165017501850195020502150225023502450255026502750285029503050315032503350345035503650375038503950405041504250435044504550465047504850495050505150525053505450555056505750585059506050615062506350645065506650675068506950705071507250735074507550765077507850795080508150825083508450855086508750885089509050915092509350945095509650975098509951005101510251035104510551065107510851095110511151125113511451155116511751185119512051215122512351245125512651275128512951305131513251335134513551365137513851395140514151425143514451455146514751485149515051515152515351545155515651575158515951605161516251635164516551665167516851695170517151725173517451755176517751785179518051815182518351845185518651875188518951905191519251935194519551965197519851995200520152025203520452055206520752085209521052115212521352145215521652175218521952205221522252235224522552265227522852295230523152325233523452355236523752385239524052415242524352445245524652475248524952505251525252535254525552565257525852595260526152625263526452655266526752685269527052715272527352745275527652775278527952805281528252835284528552865287528852895290529152925293529452955296529752985299530053015302530353045305530653075308530953105311531253135314531553165317531853195320532153225323532453255326532753285329533053315332533353345335533653375338533953405341534253435344534553465347534853495350535153525353535453555356535753585359536053615362536353645365536653675368536953705371537253735374537553765377537853795380538153825383538453855386538753885389539053915392539353945395539653975398539954005401540254035404540554065407540854095410541154125413541454155416541754185419542054215422542354245425542654275428542954305431543254335434543554365437543854395440544154425443544454455446544754485449545054515452545354545455545654575458545954605461546254635464546554665467546854695470547154725473547454755476547754785479548054815482548354845485548654875488548954905491549254935494549554965497549854995500550155025503550455055506550755085509551055115512551355145515551655175518551955205521552255235524552555265527552855295530553155325533553455355536553755385539554055415542554355445545554655475548554955505551555255535554555555565557555855595560556155625563556455655566556755685569557055715572557355745575557655775578557955805581558255835584558555865587558855895590559155925593559455955596559755985599560056015602560356045605560656075608560956105611561256135614561556165617561856195620562156225623562456255626562756285629563056315632563356345635563656375638563956405641564256435644564556465647564856495650565156525653565456555656565756585659566056615662566356645665566656675668566956705671567256735674567556765677567856795680568156825683568456855686568756885689569056915692569356945695569656975698569957005701570257035704570557065707570857095710571157125713571457155716571757185719572057215722572357245725572657275728572957305731573257335734573557365737573857395740574157425743574457455746574757485749575057515752575357545755575657575758575957605761576257635764576557665767576857695770577157725773577457755776577757785779578057815782578357845785578657875788578957905791579257935794579557965797579857995800580158025803580458055806580758085809581058115812581358145815581658175818581958205821582258235824582558265827582858295830583158325833583458355836583758385839584058415842584358445845584658475848584958505851585258535854585558565857585858595860586158625863586458655866586758685869587058715872587358745875587658775878587958805881588258835884588558865887588858895890589158925893589458955896589758985899590059015902590359045905590659075908590959105911591259135914591559165917591859195920592159225923592459255926592759285929593059315932593359345935593659375938593959405941594259435944594559465947594859495950595159525953595459555956595759585959596059615962596359645965596659675968596959705971597259735974597559765977597859795980598159825983598459855986598759885989599059915992599359945995599659975998599960006001600260036004600560066007600860096010601160126013601460156016601760186019602060216022602360246025602660276028602960306031603260336034603560366037603860396040604160426043604460456046604760486049605060516052605360546055605660576058605960606061606260636064606560666067606860696070607160726073607460756076607760786079608060816082608360846085608660876088608960906091609260936094609560966097609860996100610161026103610461056106610761086109611061116112611361146115611661176118611961206121612261236124612561266127612861296130613161326133613461356136613761386139614061416142614361446145614661476148614961506151615261536154615561566157615861596160616161626163616461656166616761686169617061716172617361746175617661776178617961806181618261836184618561866187618861896190619161926193619461956196619761986199620062016202620362046205620662076208620962106211621262136214621562166217621862196220622162226223622462256226622762286229623062316232623362346235623662376238623962406241624262436244624562466247624862496250625162526253625462556256625762586259626062616262626362646265626662676268626962706271627262736274627562766277627862796280628162826283628462856286628762886289629062916292629362946295629662976298629963006301630263036304630563066307630863096310631163126313631463156316631763186319632063216322632363246325632663276328632963306331633263336334633563366337633863396340634163426343634463456346634763486349635063516352635363546355635663576358635963606361636263636364636563666367636863696370637163726373637463756376637763786379638063816382638363846385638663876388638963906391639263936394639563966397639863996400640164026403640464056406640764086409641064116412641364146415641664176418641964206421642264236424642564266427642864296430643164326433643464356436643764386439644064416442644364446445644664476448644964506451645264536454645564566457645864596460646164626463646464656466646764686469647064716472647364746475647664776478647964806481648264836484648564866487648864896490649164926493649464956496649764986499650065016502650365046505650665076508650965106511651265136514651565166517651865196520652165226523652465256526652765286529653065316532653365346535653665376538653965406541654265436544654565466547654865496550655165526553655465556556655765586559656065616562656365646565656665676568656965706571657265736574657565766577657865796580658165826583658465856586658765886589659065916592659365946595659665976598659966006601660266036604660566066607660866096610661166126613661466156616661766186619662066216622662366246625662666276628662966306631663266336634663566366637663866396640664166426643664466456646664766486649665066516652665366546655665666576658665966606661666266636664666566666667666866696670667166726673667466756676667766786679668066816682668366846685668666876688668966906691669266936694669566966697669866996700670167026703670467056706670767086709671067116712671367146715671667176718671967206721672267236724672567266727672867296730673167326733673467356736673767386739674067416742674367446745674667476748674967506751675267536754675567566757675867596760676167626763676467656766676767686769677067716772677367746775677667776778677967806781678267836784678567866787678867896790679167926793679467956796679767986799680068016802680368046805680668076808680968106811681268136814681568166817681868196820682168226823682468256826682768286829683068316832683368346835683668376838683968406841684268436844684568466847684868496850685168526853685468556856685768586859686068616862686368646865686668676868686968706871687268736874687568766877687868796880688168826883688468856886688768886889689068916892689368946895689668976898689969006901690269036904690569066907690869096910691169126913691469156916691769186919692069216922692369246925692669276928692969306931693269336934693569366937693869396940694169426943694469456946694769486949695069516952695369546955695669576958695969606961696269636964696569666967696869696970697169726973697469756976697769786979698069816982698369846985698669876988698969906991699269936994699569966997699869997000700170027003700470057006700770087009701070117012701370147015701670177018701970207021702270237024702570267027702870297030703170327033703470357036703770387039704070417042704370447045704670477048704970507051705270537054705570567057705870597060706170627063706470657066706770687069707070717072707370747075707670777078707970807081708270837084708570867087708870897090709170927093709470957096709770987099710071017102710371047105710671077108710971107111711271137114711571167117711871197120712171227123712471257126712771287129713071317132713371347135713671377138713971407141714271437144714571467147714871497150715171527153715471557156715771587159716071617162716371647165716671677168716971707171717271737174717571767177717871797180718171827183718471857186718771887189719071917192719371947195719671977198719972007201720272037204720572067207720872097210721172127213721472157216721772187219722072217222722372247225722672277228722972307231723272337234723572367237723872397240724172427243724472457246724772487249725072517252725372547255725672577258725972607261726272637264726572667267726872697270727172727273727472757276727772787279728072817282728372847285728672877288728972907291729272937294729572967297729872997300730173027303730473057306730773087309731073117312731373147315731673177318731973207321732273237324732573267327732873297330733173327333733473357336733773387339734073417342734373447345734673477348734973507351735273537354735573567357735873597360736173627363736473657366736773687369737073717372737373747375737673777378737973807381738273837384738573867387738873897390739173927393739473957396739773987399740074017402740374047405740674077408740974107411741274137414741574167417741874197420742174227423742474257426742774287429743074317432743374347435743674377438743974407441744274437444744574467447744874497450745174527453745474557456745774587459746074617462746374647465746674677468746974707471747274737474747574767477747874797480748174827483748474857486748774887489749074917492749374947495749674977498749975007501750275037504750575067507750875097510751175127513751475157516751775187519752075217522752375247525752675277528752975307531753275337534753575367537753875397540754175427543754475457546754775487549755075517552755375547555755675577558755975607561756275637564756575667567756875697570757175727573757475757576757775787579758075817582758375847585758675877588758975907591759275937594759575967597759875997600760176027603760476057606760776087609761076117612761376147615761676177618761976207621762276237624762576267627762876297630763176327633763476357636763776387639764076417642764376447645764676477648764976507651765276537654765576567657765876597660766176627663766476657666766776687669767076717672767376747675767676777678767976807681768276837684768576867687768876897690769176927693769476957696769776987699770077017702770377047705770677077708770977107711771277137714771577167717771877197720772177227723772477257726772777287729773077317732773377347735773677377738773977407741774277437744774577467747774877497750775177527753775477557756775777587759776077617762776377647765776677677768776977707771777277737774777577767777777877797780778177827783778477857786778777887789779077917792779377947795779677977798779978007801780278037804780578067807780878097810781178127813781478157816781778187819782078217822782378247825782678277828782978307831783278337834783578367837783878397840784178427843784478457846784778487849785078517852785378547855785678577858785978607861786278637864786578667867786878697870787178727873787478757876787778787879788078817882788378847885788678877888788978907891789278937894789578967897789878997900790179027903790479057906790779087909791079117912791379147915791679177918791979207921792279237924792579267927792879297930793179327933793479357936793779387939794079417942794379447945794679477948794979507951795279537954795579567957795879597960796179627963796479657966796779687969797079717972797379747975797679777978797979807981798279837984798579867987798879897990799179927993799479957996799779987999800080018002800380048005800680078008800980108011801280138014801580168017801880198020802180228023802480258026802780288029803080318032803380348035803680378038803980408041804280438044804580468047804880498050805180528053805480558056805780588059806080618062806380648065806680678068806980708071807280738074807580768077807880798080808180828083808480858086808780888089809080918092809380948095809680978098809981008101810281038104810581068107810881098110811181128113811481158116811781188119812081218122812381248125812681278128812981308131813281338134813581368137813881398140814181428143814481458146814781488149815081518152815381548155815681578158815981608161816281638164816581668167816881698170817181728173817481758176817781788179818081818182818381848185818681878188818981908191819281938194819581968197819881998200820182028203820482058206820782088209821082118212821382148215821682178218821982208221822282238224822582268227822882298230823182328233823482358236823782388239824082418242824382448245824682478248824982508251825282538254825582568257825882598260826182628263826482658266826782688269827082718272827382748275827682778278827982808281828282838284828582868287828882898290829182928293829482958296829782988299830083018302830383048305830683078308830983108311831283138314831583168317831883198320832183228323832483258326832783288329833083318332833383348335833683378338833983408341834283438344834583468347834883498350835183528353835483558356835783588359836083618362836383648365836683678368836983708371837283738374837583768377837883798380838183828383838483858386838783888389839083918392839383948395839683978398839984008401840284038404840584068407840884098410841184128413841484158416841784188419842084218422842384248425842684278428842984308431843284338434843584368437843884398440844184428443844484458446844784488449845084518452845384548455845684578458845984608461846284638464846584668467846884698470847184728473847484758476847784788479848084818482848384848485848684878488848984908491849284938494849584968497849884998500850185028503850485058506850785088509851085118512851385148515851685178518851985208521852285238524852585268527852885298530853185328533853485358536853785388539854085418542854385448545854685478548854985508551855285538554855585568557855885598560856185628563856485658566856785688569857085718572857385748575857685778578857985808581858285838584858585868587858885898590859185928593859485958596859785988599860086018602860386048605860686078608860986108611861286138614861586168617861886198620862186228623862486258626862786288629863086318632863386348635863686378638863986408641864286438644864586468647864886498650865186528653865486558656865786588659866086618662866386648665866686678668866986708671867286738674867586768677867886798680868186828683868486858686868786888689869086918692869386948695869686978698869987008701870287038704870587068707870887098710871187128713871487158716871787188719872087218722872387248725872687278728872987308731873287338734873587368737873887398740874187428743874487458746874787488749875087518752875387548755875687578758875987608761876287638764876587668767876887698770877187728773877487758776877787788779878087818782878387848785878687878788878987908791879287938794879587968797879887998800880188028803880488058806880788088809881088118812881388148815881688178818881988208821882288238824882588268827882888298830883188328833883488358836883788388839884088418842884388448845884688478848884988508851885288538854885588568857885888598860886188628863886488658866886788688869887088718872887388748875887688778878887988808881888288838884888588868887888888898890889188928893889488958896889788988899890089018902890389048905890689078908890989108911891289138914891589168917891889198920892189228923892489258926892789288929893089318932893389348935893689378938893989408941894289438944894589468947894889498950895189528953895489558956895789588959896089618962896389648965896689678968896989708971897289738974897589768977897889798980898189828983898489858986898789888989899089918992899389948995899689978998899990009001900290039004900590069007900890099010901190129013901490159016901790189019902090219022902390249025902690279028902990309031903290339034903590369037903890399040904190429043904490459046904790489049905090519052905390549055905690579058905990609061906290639064906590669067906890699070907190729073907490759076907790789079908090819082908390849085908690879088908990909091909290939094909590969097909890999100910191029103910491059106910791089109911091119112911391149115911691179118911991209121912291239124912591269127912891299130913191329133913491359136913791389139914091419142914391449145914691479148914991509151915291539154915591569157915891599160916191629163916491659166916791689169917091719172917391749175917691779178917991809181918291839184918591869187918891899190919191929193919491959196919791989199920092019202920392049205920692079208920992109211921292139214921592169217921892199220922192229223922492259226922792289229923092319232923392349235923692379238923992409241924292439244924592469247924892499250925192529253925492559256925792589259926092619262926392649265926692679268926992709271927292739274927592769277927892799280928192829283928492859286928792889289929092919292929392949295929692979298929993009301930293039304930593069307930893099310931193129313931493159316931793189319932093219322932393249325932693279328932993309331933293339334933593369337933893399340934193429343934493459346934793489349935093519352935393549355935693579358935993609361936293639364936593669367936893699370937193729373937493759376937793789379938093819382938393849385938693879388938993909391939293939394939593969397939893999400940194029403940494059406940794089409941094119412941394149415941694179418941994209421942294239424942594269427942894299430943194329433943494359436943794389439944094419442944394449445944694479448944994509451945294539454945594569457945894599460946194629463946494659466946794689469947094719472947394749475947694779478947994809481948294839484948594869487948894899490949194929493949494959496949794989499950095019502950395049505950695079508950995109511951295139514951595169517951895199520952195229523952495259526952795289529953095319532953395349535953695379538953995409541954295439544954595469547954895499550955195529553955495559556955795589559956095619562956395649565956695679568956995709571957295739574957595769577957895799580958195829583958495859586958795889589959095919592959395949595959695979598959996009601960296039604960596069607960896099610961196129613961496159616961796189619962096219622962396249625962696279628962996309631963296339634963596369637963896399640964196429643964496459646964796489649965096519652965396549655965696579658965996609661966296639664966596669667966896699670967196729673967496759676967796789679968096819682968396849685968696879688968996909691969296939694969596969697969896999700970197029703970497059706970797089709971097119712971397149715971697179718971997209721972297239724972597269727972897299730973197329733973497359736973797389739974097419742974397449745974697479748974997509751975297539754975597569757975897599760976197629763976497659766976797689769977097719772977397749775977697779778977997809781978297839784978597869787978897899790979197929793979497959796979797989799980098019802980398049805980698079808980998109811981298139814981598169817981898199820982198229823982498259826982798289829983098319832983398349835983698379838983998409841984298439844984598469847984898499850985198529853985498559856985798589859986098619862986398649865986698679868986998709871987298739874987598769877987898799880988198829883988498859886988798889889989098919892989398949895989698979898989999009901990299039904990599069907990899099910991199129913991499159916991799189919992099219922992399249925992699279928992999309931993299339934993599369937993899399940994199429943994499459946994799489949995099519952995399549955995699579958995999609961996299639964996599669967996899699970997199729973997499759976997799789979998099819982998399849985998699879988998999909991999299939994999599969997999899991000010001100021000310004100051000610007100081000910010100111001210013100141001510016100171001810019100201002110022100231002410025100261002710028100291003010031100321003310034100351003610037100381003910040100411004210043100441004510046100471004810049100501005110052100531005410055100561005710058100591006010061100621006310064100651006610067100681006910070100711007210073100741007510076100771007810079100801008110082100831008410085100861008710088100891009010091100921009310094100951009610097100981009910100101011010210103101041010510106101071010810109101101011110112101131011410115101161011710118101191012010121101221012310124101251012610127101281012910130101311013210133101341013510136101371013810139101401014110142101431014410145101461014710148101491015010151101521015310154101551015610157101581015910160101611016210163101641016510166101671016810169101701017110172101731017410175101761017710178101791018010181101821018310184101851018610187101881018910190101911019210193101941019510196101971019810199102001020110202102031020410205102061020710208102091021010211102121021310214102151021610217102181021910220102211022210223102241022510226102271022810229102301023110232102331023410235102361023710238102391024010241102421024310244102451024610247102481024910250102511025210253102541025510256102571025810259102601026110262102631026410265102661026710268102691027010271102721027310274102751027610277102781027910280102811028210283102841028510286102871028810289102901029110292102931029410295102961029710298102991030010301103021030310304103051030610307103081030910310103111031210313103141031510316103171031810319103201032110322103231032410325103261032710328103291033010331103321033310334103351033610337103381033910340103411034210343103441034510346103471034810349103501035110352103531035410355103561035710358103591036010361103621036310364103651036610367103681036910370103711037210373103741037510376103771037810379103801038110382103831038410385103861038710388103891039010391103921039310394103951039610397103981039910400104011040210403104041040510406104071040810409104101041110412104131041410415104161041710418104191042010421104221042310424104251042610427104281042910430104311043210433104341043510436104371043810439104401044110442104431044410445104461044710448104491045010451104521045310454104551045610457104581045910460104611046210463104641046510466104671046810469104701047110472104731047410475104761047710478104791048010481104821048310484104851048610487104881048910490104911049210493104941049510496104971049810499105001050110502105031050410505105061050710508105091051010511105121051310514105151051610517105181051910520105211052210523105241052510526105271052810529105301053110532105331053410535105361053710538105391054010541105421054310544105451054610547105481054910550105511055210553105541055510556105571055810559105601056110562105631056410565105661056710568105691057010571105721057310574105751057610577105781057910580105811058210583105841058510586105871058810589105901059110592105931059410595105961059710598105991060010601106021060310604106051060610607106081060910610106111061210613106141061510616106171061810619106201062110622106231062410625106261062710628106291063010631106321063310634106351063610637106381063910640106411064210643106441064510646106471064810649106501065110652106531065410655106561065710658106591066010661106621066310664106651066610667106681066910670106711067210673106741067510676106771067810679106801068110682106831068410685106861068710688106891069010691106921069310694106951069610697106981069910700107011070210703107041070510706107071070810709107101071110712107131071410715107161071710718107191072010721107221072310724107251072610727107281072910730107311073210733107341073510736107371073810739107401074110742107431074410745107461074710748107491075010751107521075310754107551075610757107581075910760107611076210763107641076510766107671076810769107701077110772107731077410775107761077710778107791078010781107821078310784107851078610787107881078910790107911079210793107941079510796107971079810799108001080110802108031080410805108061080710808108091081010811108121081310814108151081610817108181081910820108211082210823108241082510826108271082810829108301083110832108331083410835108361083710838108391084010841108421084310844108451084610847108481084910850108511085210853108541085510856108571085810859108601086110862108631086410865108661086710868108691087010871108721087310874108751087610877108781087910880108811088210883108841088510886108871088810889108901089110892108931089410895108961089710898108991090010901109021090310904109051090610907109081090910910109111091210913109141091510916109171091810919109201092110922109231092410925109261092710928109291093010931109321093310934109351093610937109381093910940109411094210943109441094510946109471094810949109501095110952109531095410955109561095710958109591096010961109621096310964109651096610967109681096910970109711097210973109741097510976109771097810979109801098110982109831098410985109861098710988109891099010991109921099310994109951099610997109981099911000110011100211003110041100511006110071100811009110101101111012110131101411015110161101711018110191102011021110221102311024110251102611027110281102911030110311103211033110341103511036110371103811039110401104111042110431104411045110461104711048110491105011051110521105311054110551105611057110581105911060110611106211063110641106511066110671106811069110701107111072110731107411075110761107711078110791108011081110821108311084110851108611087110881108911090110911109211093110941109511096110971109811099111001110111102111031110411105111061110711108111091111011111111121111311114111151111611117111181111911120111211112211123111241112511126111271112811129111301113111132111331113411135111361113711138111391114011141111421114311144111451114611147111481114911150111511115211153111541115511156111571115811159111601116111162111631116411165111661116711168111691117011171111721117311174111751117611177111781117911180111811118211183111841118511186111871118811189111901119111192111931119411195111961119711198111991120011201112021120311204112051120611207112081120911210112111121211213112141121511216112171121811219112201122111222112231122411225112261122711228112291123011231112321123311234112351123611237112381123911240112411124211243112441124511246112471124811249112501125111252112531125411255112561125711258112591126011261112621126311264112651126611267112681126911270112711127211273112741127511276112771127811279112801128111282112831128411285112861128711288112891129011291112921129311294112951129611297112981129911300113011130211303113041130511306113071130811309113101131111312113131131411315113161131711318113191132011321113221132311324113251132611327113281132911330113311133211333113341133511336113371133811339113401134111342113431134411345113461134711348113491135011351113521135311354113551135611357113581135911360113611136211363113641136511366113671136811369113701137111372113731137411375113761137711378113791138011381113821138311384113851138611387113881138911390113911139211393113941139511396113971139811399114001140111402114031140411405114061140711408114091141011411114121141311414114151141611417114181141911420114211142211423114241142511426114271142811429114301143111432114331143411435114361143711438114391144011441114421144311444114451144611447114481144911450114511145211453114541145511456114571145811459114601146111462114631146411465114661146711468114691147011471114721147311474114751147611477114781147911480114811148211483114841148511486114871148811489114901149111492114931149411495114961149711498114991150011501115021150311504115051150611507115081150911510115111151211513115141151511516115171151811519115201152111522115231152411525115261152711528115291153011531115321153311534115351153611537115381153911540115411154211543115441154511546115471154811549115501155111552115531155411555115561155711558115591156011561115621156311564115651156611567115681156911570115711157211573115741157511576115771157811579115801158111582115831158411585115861158711588115891159011591115921159311594115951159611597115981159911600116011160211603116041160511606116071160811609116101161111612116131161411615116161161711618116191162011621116221162311624116251162611627116281162911630116311163211633116341163511636116371163811639116401164111642116431164411645116461164711648116491165011651116521165311654116551165611657116581165911660116611166211663116641166511666116671166811669116701167111672116731167411675116761167711678116791168011681116821168311684116851168611687116881168911690116911169211693116941169511696116971169811699117001170111702117031170411705117061170711708117091171011711117121171311714117151171611717117181171911720117211172211723117241172511726117271172811729117301173111732117331173411735117361173711738117391174011741117421174311744117451174611747117481174911750117511175211753117541175511756117571175811759117601176111762117631176411765117661176711768117691177011771117721177311774117751177611777117781177911780117811178211783117841178511786117871178811789117901179111792117931179411795117961179711798117991180011801118021180311804118051180611807118081180911810118111181211813118141181511816118171181811819118201182111822118231182411825118261182711828118291183011831118321183311834118351183611837118381183911840118411184211843118441184511846118471184811849118501185111852118531185411855118561185711858118591186011861118621186311864118651186611867118681186911870118711187211873118741187511876118771187811879118801188111882118831188411885118861188711888118891189011891118921189311894118951189611897118981189911900119011190211903119041190511906119071190811909119101191111912119131191411915119161191711918119191192011921119221192311924119251192611927119281192911930119311193211933119341193511936119371193811939119401194111942119431194411945119461194711948119491195011951119521195311954119551195611957119581195911960119611196211963119641196511966119671196811969119701197111972119731197411975119761197711978119791198011981119821198311984119851198611987119881198911990119911199211993119941199511996119971199811999120001200112002120031200412005120061200712008120091201012011120121201312014120151201612017120181201912020120211202212023120241202512026120271202812029120301203112032120331203412035120361203712038120391204012041120421204312044120451204612047120481204912050120511205212053120541205512056120571205812059120601206112062120631206412065120661206712068120691207012071120721207312074120751207612077120781207912080120811208212083120841208512086120871208812089120901209112092120931209412095120961209712098120991210012101121021210312104121051210612107121081210912110121111211212113121141211512116121171211812119121201212112122121231212412125121261212712128121291213012131121321213312134121351213612137121381213912140121411214212143121441214512146121471214812149121501215112152121531215412155121561215712158121591216012161121621216312164121651216612167121681216912170121711217212173121741217512176121771217812179121801218112182121831218412185121861218712188121891219012191121921219312194121951219612197121981219912200122011220212203122041220512206122071220812209122101221112212122131221412215122161221712218122191222012221122221222312224122251222612227122281222912230122311223212233122341223512236122371223812239122401224112242122431224412245122461224712248122491225012251122521225312254122551225612257122581225912260122611226212263122641226512266122671226812269122701227112272122731227412275122761227712278122791228012281122821228312284122851228612287122881228912290122911229212293122941229512296122971229812299123001230112302123031230412305123061230712308123091231012311123121231312314123151231612317123181231912320123211232212323123241232512326123271232812329123301233112332123331233412335123361233712338123391234012341123421234312344123451234612347123481234912350123511235212353123541235512356123571235812359123601236112362123631236412365123661236712368123691237012371123721237312374123751237612377123781237912380123811238212383123841238512386123871238812389123901239112392123931239412395123961239712398123991240012401124021240312404124051240612407124081240912410124111241212413124141241512416124171241812419124201242112422124231242412425124261242712428124291243012431124321243312434124351243612437124381243912440124411244212443124441244512446124471244812449124501245112452124531245412455124561245712458124591246012461124621246312464124651246612467124681246912470124711247212473124741247512476124771247812479124801248112482124831248412485124861248712488124891249012491124921249312494124951249612497124981249912500125011250212503125041250512506125071250812509125101251112512125131251412515125161251712518125191252012521125221252312524125251252612527125281252912530125311253212533125341253512536125371253812539125401254112542125431254412545125461254712548125491255012551125521255312554125551255612557125581255912560125611256212563125641256512566125671256812569125701257112572125731257412575125761257712578125791258012581125821258312584125851258612587125881258912590125911259212593125941259512596125971259812599126001260112602126031260412605126061260712608126091261012611126121261312614126151261612617126181261912620126211262212623126241262512626126271262812629126301263112632126331263412635126361263712638126391264012641126421264312644126451264612647126481264912650126511265212653126541265512656126571265812659126601266112662126631266412665126661266712668126691267012671126721267312674126751267612677126781267912680126811268212683126841268512686126871268812689126901269112692126931269412695126961269712698126991270012701127021270312704127051270612707127081270912710127111271212713127141271512716127171271812719127201272112722127231272412725127261272712728127291273012731127321273312734127351273612737127381273912740127411274212743127441274512746127471274812749127501275112752127531275412755127561275712758127591276012761127621276312764127651276612767127681276912770127711277212773127741277512776127771277812779127801278112782127831278412785127861278712788127891279012791127921279312794127951279612797127981279912800128011280212803128041280512806128071280812809128101281112812128131281412815128161281712818128191282012821128221282312824128251282612827128281282912830128311283212833128341283512836128371283812839128401284112842128431284412845128461284712848128491285012851128521285312854128551285612857128581285912860128611286212863128641286512866128671286812869128701287112872128731287412875128761287712878128791288012881128821288312884128851288612887128881288912890128911289212893128941289512896128971289812899129001290112902129031290412905129061290712908129091291012911129121291312914129151291612917129181291912920129211292212923129241292512926129271292812929129301293112932129331293412935129361293712938129391294012941129421294312944129451294612947129481294912950129511295212953129541295512956129571295812959129601296112962129631296412965129661296712968129691297012971129721297312974129751297612977129781297912980129811298212983129841298512986129871298812989129901299112992129931299412995129961299712998129991300013001130021300313004130051300613007130081300913010130111301213013130141301513016130171301813019130201302113022130231302413025130261302713028130291303013031130321303313034130351303613037130381303913040130411304213043130441304513046130471304813049130501305113052130531305413055130561305713058130591306013061130621306313064130651306613067130681306913070130711307213073130741307513076130771307813079130801308113082130831308413085130861308713088130891309013091130921309313094130951309613097130981309913100131011310213103131041310513106131071310813109131101311113112131131311413115131161311713118131191312013121131221312313124131251312613127131281312913130131311313213133131341313513136131371313813139131401314113142131431314413145131461314713148131491315013151131521315313154131551315613157131581315913160131611316213163131641316513166131671316813169131701317113172131731317413175131761317713178131791318013181131821318313184131851318613187131881318913190131911319213193131941319513196131971319813199132001320113202132031320413205132061320713208132091321013211132121321313214132151321613217132181321913220132211322213223132241322513226132271322813229132301323113232132331323413235132361323713238132391324013241132421324313244132451324613247132481324913250132511325213253132541325513256132571325813259132601326113262132631326413265132661326713268132691327013271132721327313274132751327613277132781327913280132811328213283132841328513286132871328813289132901329113292132931329413295132961329713298132991330013301133021330313304133051330613307133081330913310133111331213313133141331513316133171331813319133201332113322133231332413325133261332713328133291333013331133321333313334133351333613337133381333913340133411334213343133441334513346133471334813349133501335113352133531335413355133561335713358133591336013361133621336313364133651336613367133681336913370133711337213373133741337513376133771337813379133801338113382133831338413385133861338713388133891339013391133921339313394133951339613397133981339913400134011340213403134041340513406134071340813409134101341113412134131341413415134161341713418134191342013421134221342313424134251342613427134281342913430134311343213433134341343513436134371343813439134401344113442134431344413445134461344713448134491345013451134521345313454134551345613457134581345913460134611346213463134641346513466134671346813469134701347113472134731347413475134761347713478134791348013481134821348313484134851348613487134881348913490134911349213493134941349513496134971349813499135001350113502135031350413505135061350713508135091351013511135121351313514135151351613517135181351913520135211352213523135241352513526135271352813529135301353113532135331353413535135361353713538135391354013541135421354313544135451354613547135481354913550135511355213553135541355513556135571355813559135601356113562135631356413565135661356713568135691357013571135721357313574135751357613577135781357913580135811358213583135841358513586135871358813589135901359113592135931359413595135961359713598135991360013601136021360313604136051360613607136081360913610136111361213613136141361513616136171361813619136201362113622136231362413625136261362713628136291363013631136321363313634136351363613637136381363913640136411364213643136441364513646136471364813649136501365113652136531365413655136561365713658136591366013661136621366313664136651366613667136681366913670136711367213673136741367513676136771367813679136801368113682136831368413685136861368713688136891369013691136921369313694136951369613697136981369913700137011370213703137041370513706137071370813709137101371113712137131371413715137161371713718137191372013721137221372313724137251372613727137281372913730137311373213733137341373513736137371373813739137401374113742137431374413745137461374713748137491375013751137521375313754137551375613757137581375913760137611376213763137641376513766137671376813769137701377113772137731377413775137761377713778137791378013781137821378313784137851378613787137881378913790137911379213793137941379513796137971379813799138001380113802138031380413805138061380713808138091381013811138121381313814138151381613817138181381913820138211382213823138241382513826138271382813829138301383113832138331383413835138361383713838138391384013841138421384313844138451384613847138481384913850138511385213853138541385513856138571385813859138601386113862138631386413865138661386713868138691387013871138721387313874138751387613877138781387913880138811388213883138841388513886138871388813889138901389113892138931389413895138961389713898138991390013901139021390313904139051390613907139081390913910139111391213913139141391513916139171391813919139201392113922139231392413925139261392713928139291393013931139321393313934139351393613937139381393913940139411394213943139441394513946139471394813949139501395113952139531395413955139561395713958139591396013961139621396313964139651396613967139681396913970139711397213973139741397513976139771397813979139801398113982139831398413985139861398713988139891399013991139921399313994139951399613997139981399914000140011400214003140041400514006140071400814009140101401114012140131401414015140161401714018140191402014021140221402314024140251402614027140281402914030140311403214033140341403514036140371403814039140401404114042140431404414045140461404714048140491405014051140521405314054140551405614057140581405914060140611406214063140641406514066140671406814069140701407114072140731407414075140761407714078140791408014081140821408314084140851408614087140881408914090140911409214093140941409514096140971409814099141001410114102141031410414105141061410714108141091411014111141121411314114141151411614117141181411914120141211412214123141241412514126141271412814129141301413114132141331413414135141361413714138141391414014141141421414314144141451414614147141481414914150141511415214153141541415514156141571415814159141601416114162141631416414165141661416714168141691417014171141721417314174141751417614177141781417914180141811418214183141841418514186141871418814189141901419114192141931419414195141961419714198141991420014201142021420314204142051420614207142081420914210142111421214213142141421514216142171421814219142201422114222142231422414225142261422714228142291423014231142321423314234142351423614237142381423914240142411424214243142441424514246142471424814249142501425114252142531425414255142561425714258142591426014261142621426314264142651426614267142681426914270142711427214273142741427514276142771427814279142801428114282142831428414285142861428714288142891429014291142921429314294142951429614297142981429914300143011430214303143041430514306143071430814309143101431114312143131431414315143161431714318143191432014321143221432314324143251432614327143281432914330143311433214333143341433514336143371433814339143401434114342143431434414345143461434714348143491435014351143521435314354143551435614357143581435914360143611436214363143641436514366143671436814369143701437114372143731437414375143761437714378143791438014381143821438314384143851438614387143881438914390143911439214393143941439514396143971439814399144001440114402144031440414405144061440714408144091441014411144121441314414144151441614417144181441914420144211442214423144241442514426144271442814429144301443114432144331443414435144361443714438144391444014441144421444314444144451444614447144481444914450144511445214453144541445514456144571445814459144601446114462144631446414465144661446714468144691447014471144721447314474144751447614477144781447914480144811448214483144841448514486144871448814489144901449114492144931449414495144961449714498144991450014501145021450314504145051450614507145081450914510145111451214513145141451514516145171451814519145201452114522145231452414525145261452714528145291453014531145321453314534145351453614537145381453914540145411454214543145441454514546145471454814549145501455114552145531455414555145561455714558145591456014561145621456314564145651456614567145681456914570145711457214573145741457514576145771457814579145801458114582145831458414585145861458714588145891459014591145921459314594145951459614597145981459914600146011460214603146041460514606146071460814609146101461114612146131461414615146161461714618146191462014621146221462314624146251462614627146281462914630146311463214633146341463514636146371463814639146401464114642146431464414645146461464714648146491465014651146521465314654146551465614657146581465914660146611466214663146641466514666146671466814669146701467114672146731467414675146761467714678146791468014681146821468314684146851468614687146881468914690146911469214693146941469514696146971469814699147001470114702147031470414705147061470714708147091471014711147121471314714147151471614717147181471914720147211472214723147241472514726147271472814729147301473114732147331473414735147361473714738147391474014741147421474314744147451474614747147481474914750147511475214753147541475514756147571475814759147601476114762147631476414765147661476714768147691477014771147721477314774147751477614777147781477914780147811478214783147841478514786147871478814789147901479114792147931479414795147961479714798147991480014801148021480314804148051480614807148081480914810148111481214813148141481514816148171481814819148201482114822148231482414825148261482714828148291483014831148321483314834148351483614837148381483914840148411484214843148441484514846148471484814849148501485114852148531485414855148561485714858148591486014861148621486314864148651486614867148681486914870148711487214873148741487514876148771487814879148801488114882148831488414885148861488714888148891489014891148921489314894148951489614897148981489914900149011490214903149041490514906149071490814909149101491114912149131491414915149161491714918149191492014921149221492314924149251492614927149281492914930149311493214933149341493514936149371493814939149401494114942149431494414945149461494714948149491495014951149521495314954149551495614957149581495914960149611496214963149641496514966149671496814969149701497114972149731497414975149761497714978149791498014981149821498314984149851498614987149881498914990149911499214993149941499514996149971499814999150001500115002150031500415005150061500715008150091501015011150121501315014150151501615017150181501915020150211502215023150241502515026150271502815029150301503115032150331503415035150361503715038150391504015041150421504315044150451504615047150481504915050150511505215053150541505515056150571505815059150601506115062150631506415065150661506715068150691507015071150721507315074150751507615077150781507915080150811508215083150841508515086150871508815089150901509115092150931509415095150961509715098150991510015101151021510315104151051510615107151081510915110151111511215113151141511515116151171511815119151201512115122151231512415125151261512715128151291513015131151321513315134151351513615137151381513915140151411514215143151441514515146151471514815149151501515115152151531515415155151561515715158151591516015161151621516315164151651516615167151681516915170151711517215173151741517515176151771517815179151801518115182151831518415185151861518715188151891519015191151921519315194151951519615197151981519915200152011520215203152041520515206152071520815209152101521115212152131521415215152161521715218152191522015221152221522315224152251522615227152281522915230152311523215233152341523515236152371523815239152401524115242152431524415245152461524715248152491525015251152521525315254152551525615257152581525915260152611526215263152641526515266152671526815269152701527115272152731527415275152761527715278152791528015281152821528315284152851528615287152881528915290152911529215293152941529515296152971529815299153001530115302153031530415305153061530715308153091531015311153121531315314153151531615317153181531915320153211532215323153241532515326153271532815329153301533115332153331533415335153361533715338153391534015341153421534315344153451534615347153481534915350153511535215353153541535515356153571535815359153601536115362153631536415365153661536715368153691537015371153721537315374153751537615377153781537915380153811538215383153841538515386153871538815389153901539115392153931539415395153961539715398153991540015401154021540315404154051540615407154081540915410154111541215413154141541515416154171541815419154201542115422154231542415425154261542715428154291543015431154321543315434154351543615437154381543915440154411544215443154441544515446154471544815449154501545115452154531545415455154561545715458154591546015461154621546315464154651546615467154681546915470154711547215473154741547515476154771547815479154801548115482154831548415485154861548715488154891549015491154921549315494154951549615497154981549915500155011550215503155041550515506155071550815509155101551115512155131551415515155161551715518155191552015521155221552315524155251552615527155281552915530155311553215533155341553515536155371553815539155401554115542155431554415545155461554715548155491555015551155521555315554155551555615557155581555915560155611556215563155641556515566155671556815569155701557115572155731557415575155761557715578155791558015581155821558315584155851558615587155881558915590155911559215593155941559515596155971559815599156001560115602156031560415605156061560715608156091561015611156121561315614156151561615617156181561915620156211562215623156241562515626156271562815629156301563115632156331563415635156361563715638156391564015641156421564315644156451564615647156481564915650156511565215653156541565515656156571565815659156601566115662156631566415665156661566715668156691567015671156721567315674156751567615677156781567915680156811568215683156841568515686156871568815689156901569115692156931569415695156961569715698156991570015701157021570315704157051570615707157081570915710157111571215713157141571515716157171571815719157201572115722157231572415725157261572715728157291573015731157321573315734157351573615737157381573915740157411574215743157441574515746157471574815749157501575115752157531575415755157561575715758157591576015761157621576315764157651576615767157681576915770157711577215773157741577515776157771577815779157801578115782157831578415785157861578715788157891579015791157921579315794157951579615797157981579915800158011580215803158041580515806158071580815809158101581115812158131581415815158161581715818158191582015821158221582315824158251582615827158281582915830158311583215833158341583515836158371583815839158401584115842158431584415845158461584715848158491585015851158521585315854158551585615857158581585915860158611586215863158641586515866158671586815869158701587115872158731587415875158761587715878158791588015881158821588315884158851588615887158881588915890158911589215893158941589515896158971589815899159001590115902159031590415905159061590715908159091591015911159121591315914159151591615917159181591915920159211592215923159241592515926159271592815929159301593115932159331593415935159361593715938159391594015941159421594315944159451594615947159481594915950159511595215953159541595515956159571595815959159601596115962159631596415965159661596715968159691597015971159721597315974159751597615977159781597915980159811598215983159841598515986159871598815989159901599115992159931599415995159961599715998159991600016001160021600316004160051600616007160081600916010160111601216013160141601516016160171601816019160201602116022160231602416025160261602716028160291603016031160321603316034160351603616037160381603916040160411604216043160441604516046160471604816049160501605116052160531605416055160561605716058160591606016061160621606316064160651606616067160681606916070160711607216073160741607516076160771607816079160801608116082160831608416085160861608716088160891609016091160921609316094160951609616097160981609916100161011610216103161041610516106161071610816109161101611116112161131611416115161161611716118161191612016121161221612316124161251612616127161281612916130161311613216133161341613516136161371613816139161401614116142161431614416145161461614716148161491615016151161521615316154161551615616157161581615916160161611616216163161641616516166161671616816169161701617116172161731617416175161761617716178161791618016181161821618316184161851618616187161881618916190161911619216193161941619516196161971619816199162001620116202162031620416205162061620716208162091621016211162121621316214162151621616217162181621916220162211622216223162241622516226162271622816229162301623116232162331623416235162361623716238162391624016241162421624316244162451624616247162481624916250162511625216253162541625516256162571625816259162601626116262162631626416265162661626716268162691627016271162721627316274162751627616277162781627916280162811628216283162841628516286162871628816289162901629116292162931629416295162961629716298162991630016301163021630316304163051630616307163081630916310163111631216313163141631516316163171631816319163201632116322163231632416325163261632716328163291633016331163321633316334163351633616337163381633916340163411634216343163441634516346163471634816349163501635116352163531635416355163561635716358163591636016361163621636316364163651636616367163681636916370163711637216373163741637516376163771637816379163801638116382163831638416385163861638716388163891639016391163921639316394163951639616397163981639916400164011640216403164041640516406164071640816409164101641116412164131641416415164161641716418164191642016421164221642316424164251642616427164281642916430164311643216433164341643516436164371643816439164401644116442164431644416445164461644716448164491645016451164521645316454164551645616457164581645916460164611646216463164641646516466164671646816469164701647116472164731647416475164761647716478164791648016481164821648316484164851648616487164881648916490164911649216493164941649516496164971649816499165001650116502165031650416505165061650716508165091651016511165121651316514165151651616517165181651916520165211652216523165241652516526165271652816529165301653116532165331653416535165361653716538165391654016541165421654316544165451654616547165481654916550165511655216553165541655516556165571655816559165601656116562165631656416565165661656716568165691657016571165721657316574165751657616577165781657916580165811658216583165841658516586165871658816589165901659116592165931659416595165961659716598165991660016601166021660316604166051660616607166081660916610166111661216613166141661516616166171661816619166201662116622166231662416625166261662716628166291663016631166321663316634166351663616637166381663916640166411664216643166441664516646166471664816649166501665116652166531665416655166561665716658166591666016661166621666316664166651666616667166681666916670166711667216673166741667516676166771667816679166801668116682166831668416685166861668716688166891669016691166921669316694166951669616697166981669916700167011670216703167041670516706167071670816709167101671116712167131671416715167161671716718167191672016721167221672316724167251672616727167281672916730167311673216733167341673516736167371673816739167401674116742167431674416745167461674716748167491675016751167521675316754167551675616757167581675916760167611676216763167641676516766167671676816769167701677116772167731677416775167761677716778167791678016781167821678316784167851678616787167881678916790167911679216793167941679516796167971679816799168001680116802168031680416805168061680716808168091681016811168121681316814168151681616817168181681916820168211682216823168241682516826168271682816829168301683116832168331683416835168361683716838168391684016841168421684316844168451684616847168481684916850168511685216853168541685516856168571685816859168601686116862168631686416865168661686716868168691687016871168721687316874168751687616877168781687916880168811688216883168841688516886168871688816889168901689116892168931689416895168961689716898168991690016901169021690316904169051690616907169081690916910169111691216913169141691516916169171691816919169201692116922169231692416925169261692716928169291693016931169321693316934169351693616937169381693916940169411694216943169441694516946169471694816949169501695116952169531695416955169561695716958169591696016961169621696316964169651696616967169681696916970169711697216973169741697516976169771697816979169801698116982169831698416985169861698716988169891699016991169921699316994169951699616997169981699917000170011700217003170041700517006170071700817009170101701117012170131701417015170161701717018170191702017021170221702317024170251702617027170281702917030170311703217033170341703517036170371703817039170401704117042170431704417045170461704717048170491705017051170521705317054170551705617057170581705917060170611706217063170641706517066170671706817069170701707117072170731707417075170761707717078170791708017081170821708317084170851708617087170881708917090170911709217093170941709517096170971709817099171001710117102171031710417105171061710717108171091711017111171121711317114171151711617117171181711917120171211712217123171241712517126171271712817129171301713117132171331713417135171361713717138171391714017141171421714317144171451714617147171481714917150171511715217153171541715517156171571715817159171601716117162171631716417165171661716717168171691717017171171721717317174171751717617177171781717917180171811718217183171841718517186171871718817189171901719117192171931719417195171961719717198171991720017201172021720317204172051720617207172081720917210172111721217213172141721517216172171721817219172201722117222172231722417225172261722717228172291723017231172321723317234172351723617237172381723917240172411724217243172441724517246172471724817249172501725117252172531725417255172561725717258172591726017261172621726317264172651726617267172681726917270172711727217273172741727517276172771727817279172801728117282172831728417285172861728717288172891729017291172921729317294172951729617297172981729917300173011730217303173041730517306173071730817309173101731117312173131731417315173161731717318173191732017321173221732317324173251732617327173281732917330173311733217333173341733517336173371733817339173401734117342173431734417345173461734717348173491735017351173521735317354173551735617357173581735917360173611736217363173641736517366173671736817369173701737117372173731737417375173761737717378173791738017381173821738317384173851738617387173881738917390173911739217393173941739517396173971739817399174001740117402174031740417405174061740717408174091741017411174121741317414174151741617417174181741917420174211742217423174241742517426174271742817429174301743117432174331743417435174361743717438174391744017441174421744317444174451744617447174481744917450174511745217453174541745517456174571745817459174601746117462174631746417465174661746717468174691747017471174721747317474174751747617477174781747917480174811748217483174841748517486174871748817489174901749117492174931749417495174961749717498174991750017501175021750317504175051750617507175081750917510175111751217513175141751517516175171751817519175201752117522175231752417525175261752717528175291753017531175321753317534175351753617537175381753917540175411754217543175441754517546175471754817549175501755117552175531755417555175561755717558175591756017561175621756317564175651756617567175681756917570175711757217573175741757517576175771757817579175801758117582175831758417585175861758717588175891759017591175921759317594175951759617597175981759917600176011760217603176041760517606176071760817609176101761117612176131761417615176161761717618176191762017621176221762317624176251762617627176281762917630176311763217633176341763517636176371763817639176401764117642176431764417645176461764717648176491765017651176521765317654176551765617657176581765917660176611766217663176641766517666176671766817669176701767117672176731767417675176761767717678176791768017681176821768317684176851768617687176881768917690176911769217693176941769517696176971769817699177001770117702177031770417705177061770717708177091771017711177121771317714177151771617717177181771917720177211772217723177241772517726177271772817729177301773117732177331773417735177361773717738177391774017741177421774317744177451774617747177481774917750177511775217753177541775517756177571775817759177601776117762177631776417765177661776717768177691777017771177721777317774177751777617777177781777917780177811778217783177841778517786177871778817789177901779117792177931779417795177961779717798177991780017801178021780317804178051780617807178081780917810178111781217813178141781517816178171781817819178201782117822178231782417825178261782717828178291783017831178321783317834178351783617837178381783917840178411784217843178441784517846178471784817849178501785117852178531785417855178561785717858178591786017861178621786317864178651786617867178681786917870178711787217873178741787517876178771787817879178801788117882178831788417885178861788717888178891789017891178921789317894178951789617897178981789917900179011790217903179041790517906179071790817909179101791117912179131791417915179161791717918179191792017921179221792317924179251792617927179281792917930179311793217933179341793517936179371793817939179401794117942179431794417945179461794717948179491795017951179521795317954179551795617957179581795917960179611796217963179641796517966179671796817969179701797117972179731797417975179761797717978179791798017981179821798317984179851798617987179881798917990179911799217993179941799517996179971799817999180001800118002180031800418005180061800718008180091801018011180121801318014180151801618017180181801918020180211802218023180241802518026180271802818029180301803118032180331803418035180361803718038180391804018041180421804318044180451804618047180481804918050180511805218053180541805518056180571805818059180601806118062180631806418065180661806718068180691807018071180721807318074180751807618077180781807918080180811808218083180841808518086180871808818089180901809118092180931809418095180961809718098180991810018101181021810318104181051810618107181081810918110181111811218113181141811518116181171811818119181201812118122181231812418125181261812718128181291813018131181321813318134181351813618137181381813918140181411814218143181441814518146181471814818149181501815118152181531815418155181561815718158181591816018161181621816318164181651816618167181681816918170181711817218173181741817518176181771817818179181801818118182181831818418185181861818718188181891819018191181921819318194181951819618197181981819918200182011820218203182041820518206182071820818209182101821118212182131821418215182161821718218182191822018221182221822318224182251822618227182281822918230182311823218233182341823518236182371823818239182401824118242182431824418245182461824718248182491825018251182521825318254182551825618257182581825918260182611826218263182641826518266182671826818269182701827118272182731827418275182761827718278182791828018281182821828318284182851828618287182881828918290182911829218293182941829518296182971829818299183001830118302183031830418305183061830718308183091831018311183121831318314183151831618317183181831918320183211832218323183241832518326183271832818329183301833118332183331833418335183361833718338183391834018341183421834318344183451834618347183481834918350183511835218353183541835518356183571835818359183601836118362183631836418365183661836718368183691837018371183721837318374183751837618377183781837918380183811838218383183841838518386183871838818389183901839118392183931839418395183961839718398183991840018401184021840318404184051840618407184081840918410184111841218413184141841518416184171841818419184201842118422184231842418425184261842718428184291843018431184321843318434184351843618437184381843918440184411844218443184441844518446184471844818449184501845118452184531845418455184561845718458184591846018461184621846318464184651846618467184681846918470184711847218473184741847518476184771847818479184801848118482184831848418485184861848718488184891849018491184921849318494184951849618497184981849918500185011850218503185041850518506185071850818509185101851118512185131851418515185161851718518185191852018521185221852318524185251852618527185281852918530185311853218533185341853518536185371853818539185401854118542185431854418545185461854718548185491855018551185521855318554185551855618557185581855918560185611856218563185641856518566185671856818569185701857118572185731857418575185761857718578185791858018581185821858318584185851858618587185881858918590185911859218593185941859518596185971859818599186001860118602186031860418605186061860718608186091861018611186121861318614186151861618617186181861918620186211862218623186241862518626186271862818629186301863118632186331863418635186361863718638186391864018641186421864318644186451864618647186481864918650186511865218653186541865518656186571865818659186601866118662186631866418665186661866718668186691867018671186721867318674186751867618677186781867918680186811868218683186841868518686186871868818689186901869118692186931869418695186961869718698186991870018701187021870318704187051870618707187081870918710187111871218713187141871518716187171871818719187201872118722187231872418725187261872718728187291873018731187321873318734187351873618737187381873918740187411874218743187441874518746187471874818749187501875118752187531875418755187561875718758187591876018761187621876318764187651876618767187681876918770187711877218773187741877518776187771877818779187801878118782187831878418785187861878718788187891879018791187921879318794187951879618797187981879918800188011880218803188041880518806188071880818809188101881118812188131881418815188161881718818188191882018821188221882318824188251882618827188281882918830188311883218833188341883518836188371883818839188401884118842188431884418845188461884718848188491885018851188521885318854188551885618857188581885918860188611886218863188641886518866188671886818869188701887118872188731887418875188761887718878188791888018881188821888318884188851888618887188881888918890188911889218893188941889518896188971889818899189001890118902189031890418905189061890718908189091891018911189121891318914189151891618917189181891918920189211892218923189241892518926189271892818929189301893118932189331893418935189361893718938189391894018941189421894318944189451894618947189481894918950189511895218953189541895518956189571895818959189601896118962189631896418965189661896718968189691897018971189721897318974189751897618977189781897918980189811898218983189841898518986189871898818989189901899118992189931899418995189961899718998189991900019001190021900319004190051900619007190081900919010190111901219013190141901519016190171901819019190201902119022190231902419025190261902719028190291903019031190321903319034190351903619037190381903919040190411904219043190441904519046190471904819049190501905119052190531905419055190561905719058190591906019061190621906319064190651906619067190681906919070190711907219073190741907519076190771907819079190801908119082190831908419085190861908719088190891909019091190921909319094190951909619097190981909919100191011910219103191041910519106191071910819109191101911119112191131911419115191161911719118191191912019121191221912319124191251912619127191281912919130191311913219133191341913519136191371913819139191401914119142191431914419145191461914719148191491915019151191521915319154191551915619157191581915919160191611916219163191641916519166191671916819169191701917119172191731917419175191761917719178191791918019181191821918319184191851918619187191881918919190191911919219193191941919519196191971919819199192001920119202192031920419205192061920719208192091921019211192121921319214192151921619217192181921919220192211922219223192241922519226192271922819229192301923119232192331923419235192361923719238192391924019241192421924319244192451924619247192481924919250192511925219253192541925519256192571925819259192601926119262192631926419265192661926719268192691927019271192721927319274192751927619277192781927919280192811928219283192841928519286192871928819289192901929119292192931929419295192961929719298192991930019301193021930319304193051930619307193081930919310193111931219313193141931519316193171931819319193201932119322193231932419325193261932719328193291933019331193321933319334193351933619337193381933919340193411934219343193441934519346193471934819349193501935119352193531935419355193561935719358193591936019361193621936319364193651936619367193681936919370193711937219373193741937519376193771937819379193801938119382193831938419385193861938719388193891939019391193921939319394193951939619397193981939919400194011940219403194041940519406194071940819409194101941119412194131941419415194161941719418194191942019421194221942319424194251942619427194281942919430194311943219433194341943519436194371943819439194401944119442194431944419445194461944719448194491945019451194521945319454194551945619457194581945919460194611946219463194641946519466194671946819469194701947119472194731947419475194761947719478194791948019481194821948319484194851948619487194881948919490194911949219493194941949519496194971949819499195001950119502195031950419505195061950719508195091951019511195121951319514195151951619517195181951919520195211952219523195241952519526195271952819529195301953119532195331953419535195361953719538195391954019541195421954319544195451954619547195481954919550195511955219553195541955519556195571955819559195601956119562195631956419565195661956719568195691957019571195721957319574195751957619577195781957919580195811958219583195841958519586195871958819589195901959119592195931959419595195961959719598195991960019601196021960319604196051960619607196081960919610196111961219613196141961519616196171961819619196201962119622196231962419625196261962719628196291963019631196321963319634196351963619637196381963919640196411964219643196441964519646196471964819649196501965119652196531965419655196561965719658196591966019661196621966319664196651966619667196681966919670196711967219673196741967519676196771967819679196801968119682196831968419685196861968719688196891969019691196921969319694196951969619697196981969919700197011970219703197041970519706197071970819709197101971119712197131971419715197161971719718197191972019721197221972319724197251972619727197281972919730197311973219733197341973519736197371973819739197401974119742197431974419745197461974719748197491975019751197521975319754197551975619757197581975919760197611976219763197641976519766197671976819769197701977119772197731977419775197761977719778197791978019781197821978319784197851978619787197881978919790197911979219793197941979519796197971979819799198001980119802198031980419805198061980719808198091981019811198121981319814198151981619817198181981919820198211982219823198241982519826198271982819829198301983119832198331983419835198361983719838198391984019841198421984319844198451984619847198481984919850198511985219853198541985519856198571985819859198601986119862198631986419865198661986719868198691987019871198721987319874198751987619877198781987919880198811988219883198841988519886198871988819889198901989119892198931989419895198961989719898198991990019901199021990319904199051990619907199081990919910199111991219913199141991519916199171991819919199201992119922199231992419925199261992719928199291993019931199321993319934199351993619937199381993919940199411994219943199441994519946199471994819949199501995119952199531995419955199561995719958199591996019961199621996319964199651996619967199681996919970199711997219973199741997519976199771997819979199801998119982199831998419985199861998719988199891999019991199921999319994199951999619997199981999920000200012000220003200042000520006200072000820009200102001120012200132001420015200162001720018200192002020021200222002320024200252002620027200282002920030200312003220033200342003520036200372003820039200402004120042200432004420045200462004720048200492005020051200522005320054200552005620057200582005920060200612006220063200642006520066200672006820069200702007120072200732007420075200762007720078200792008020081200822008320084200852008620087200882008920090200912009220093200942009520096200972009820099201002010120102201032010420105201062010720108201092011020111201122011320114201152011620117201182011920120201212012220123201242012520126201272012820129201302013120132201332013420135201362013720138201392014020141201422014320144201452014620147201482014920150201512015220153201542015520156201572015820159201602016120162201632016420165201662016720168201692017020171201722017320174201752017620177201782017920180201812018220183201842018520186201872018820189201902019120192201932019420195201962019720198201992020020201202022020320204202052020620207202082020920210202112021220213202142021520216202172021820219202202022120222202232022420225202262022720228202292023020231202322023320234202352023620237202382023920240202412024220243202442024520246202472024820249202502025120252202532025420255202562025720258202592026020261202622026320264202652026620267202682026920270202712027220273202742027520276202772027820279202802028120282202832028420285202862028720288202892029020291202922029320294202952029620297202982029920300203012030220303203042030520306203072030820309203102031120312203132031420315203162031720318203192032020321203222032320324203252032620327203282032920330203312033220333203342033520336203372033820339203402034120342203432034420345203462034720348203492035020351203522035320354203552035620357203582035920360203612036220363203642036520366203672036820369203702037120372203732037420375203762037720378203792038020381203822038320384203852038620387203882038920390203912039220393203942039520396203972039820399204002040120402204032040420405204062040720408204092041020411204122041320414204152041620417204182041920420204212042220423204242042520426204272042820429204302043120432204332043420435204362043720438204392044020441204422044320444204452044620447204482044920450204512045220453204542045520456204572045820459204602046120462204632046420465204662046720468204692047020471204722047320474204752047620477204782047920480204812048220483204842048520486204872048820489204902049120492204932049420495204962049720498204992050020501205022050320504205052050620507205082050920510205112051220513205142051520516205172051820519205202052120522205232052420525205262052720528205292053020531205322053320534205352053620537205382053920540205412054220543205442054520546205472054820549205502055120552205532055420555205562055720558205592056020561205622056320564205652056620567205682056920570205712057220573205742057520576205772057820579205802058120582205832058420585205862058720588205892059020591205922059320594205952059620597205982059920600206012060220603206042060520606206072060820609206102061120612206132061420615206162061720618206192062020621206222062320624206252062620627206282062920630206312063220633206342063520636206372063820639206402064120642206432064420645206462064720648206492065020651206522065320654206552065620657206582065920660206612066220663206642066520666206672066820669206702067120672206732067420675206762067720678206792068020681206822068320684206852068620687206882068920690206912069220693206942069520696206972069820699207002070120702207032070420705207062070720708207092071020711207122071320714207152071620717207182071920720207212072220723207242072520726207272072820729207302073120732207332073420735207362073720738207392074020741207422074320744207452074620747207482074920750207512075220753207542075520756207572075820759207602076120762207632076420765207662076720768207692077020771207722077320774207752077620777207782077920780207812078220783207842078520786207872078820789207902079120792207932079420795207962079720798207992080020801208022080320804208052080620807208082080920810208112081220813208142081520816208172081820819208202082120822208232082420825208262082720828208292083020831208322083320834208352083620837208382083920840208412084220843208442084520846208472084820849208502085120852208532085420855208562085720858208592086020861208622086320864208652086620867208682086920870208712087220873208742087520876208772087820879208802088120882208832088420885208862088720888208892089020891208922089320894208952089620897208982089920900209012090220903209042090520906209072090820909209102091120912209132091420915209162091720918209192092020921209222092320924209252092620927209282092920930209312093220933209342093520936209372093820939209402094120942209432094420945209462094720948209492095020951209522095320954209552095620957209582095920960209612096220963209642096520966209672096820969209702097120972209732097420975209762097720978209792098020981209822098320984209852098620987209882098920990209912099220993209942099520996209972099820999210002100121002210032100421005210062100721008210092101021011210122101321014210152101621017210182101921020210212102221023210242102521026210272102821029210302103121032210332103421035210362103721038210392104021041210422104321044210452104621047210482104921050210512105221053210542105521056210572105821059210602106121062210632106421065210662106721068210692107021071210722107321074210752107621077210782107921080210812108221083210842108521086210872108821089210902109121092210932109421095210962109721098210992110021101211022110321104211052110621107211082110921110211112111221113211142111521116211172111821119211202112121122211232112421125211262112721128211292113021131211322113321134211352113621137211382113921140211412114221143211442114521146211472114821149211502115121152211532115421155211562115721158211592116021161211622116321164211652116621167211682116921170211712117221173211742117521176211772117821179211802118121182211832118421185211862118721188211892119021191211922119321194211952119621197211982119921200212012120221203212042120521206212072120821209212102121121212212132121421215212162121721218212192122021221212222122321224212252122621227212282122921230212312123221233212342123521236212372123821239212402124121242212432124421245212462124721248212492125021251212522125321254212552125621257212582125921260212612126221263212642126521266212672126821269212702127121272212732127421275212762127721278212792128021281212822128321284212852128621287212882128921290212912129221293212942129521296212972129821299213002130121302213032130421305213062130721308213092131021311213122131321314213152131621317213182131921320213212132221323213242132521326213272132821329213302133121332213332133421335213362133721338213392134021341213422134321344213452134621347213482134921350213512135221353213542135521356213572135821359213602136121362213632136421365213662136721368213692137021371213722137321374213752137621377213782137921380213812138221383213842138521386213872138821389213902139121392213932139421395213962139721398213992140021401214022140321404214052140621407214082140921410214112141221413214142141521416214172141821419214202142121422214232142421425214262142721428214292143021431214322143321434214352143621437214382143921440214412144221443214442144521446214472144821449214502145121452214532145421455214562145721458214592146021461214622146321464214652146621467214682146921470214712147221473214742147521476214772147821479214802148121482214832148421485214862148721488214892149021491214922149321494214952149621497214982149921500215012150221503215042150521506215072150821509215102151121512215132151421515215162151721518215192152021521215222152321524215252152621527215282152921530215312153221533215342153521536215372153821539215402154121542215432154421545215462154721548215492155021551215522155321554215552155621557215582155921560215612156221563215642156521566215672156821569215702157121572215732157421575215762157721578215792158021581215822158321584215852158621587215882158921590215912159221593215942159521596215972159821599216002160121602216032160421605216062160721608216092161021611216122161321614216152161621617216182161921620216212162221623216242162521626216272162821629216302163121632216332163421635216362163721638216392164021641216422164321644216452164621647216482164921650216512165221653216542165521656216572165821659216602166121662216632166421665216662166721668216692167021671216722167321674216752167621677216782167921680216812168221683216842168521686216872168821689216902169121692216932169421695216962169721698216992170021701217022170321704217052170621707217082170921710217112171221713217142171521716217172171821719217202172121722217232172421725217262172721728217292173021731217322173321734217352173621737217382173921740217412174221743217442174521746217472174821749217502175121752217532175421755217562175721758217592176021761217622176321764217652176621767217682176921770217712177221773217742177521776217772177821779217802178121782217832178421785217862178721788217892179021791217922179321794217952179621797217982179921800218012180221803218042180521806218072180821809218102181121812218132181421815218162181721818218192182021821218222182321824218252182621827218282182921830218312183221833218342183521836218372183821839218402184121842218432184421845218462184721848218492185021851218522185321854218552185621857218582185921860218612186221863218642186521866218672186821869218702187121872218732187421875218762187721878218792188021881218822188321884218852188621887218882188921890218912189221893218942189521896218972189821899219002190121902219032190421905219062190721908219092191021911219122191321914219152191621917219182191921920219212192221923219242192521926219272192821929219302193121932219332193421935219362193721938219392194021941219422194321944219452194621947219482194921950219512195221953219542195521956219572195821959219602196121962219632196421965219662196721968219692197021971219722197321974219752197621977219782197921980219812198221983219842198521986219872198821989219902199121992219932199421995219962199721998219992200022001220022200322004220052200622007220082200922010220112201222013220142201522016220172201822019220202202122022220232202422025220262202722028
  1. #define _CRT_SECURE_NO_DEPRECATE // Disables ridiculous "unsafe" warnigns on Windows
  2. #include "ggml.h"
  3. #ifdef GGML_USE_K_QUANTS
  4. #include "k_quants.h"
  5. #endif
  6. #if defined(_MSC_VER) || defined(__MINGW32__)
  7. #include <malloc.h> // using malloc.h with MSC/MINGW
  8. #elif !defined(__FreeBSD__) && !defined(__NetBSD__) && !defined(__OpenBSD__)
  9. #include <alloca.h>
  10. #endif
  11. #include <assert.h>
  12. #include <errno.h>
  13. #include <time.h>
  14. #include <math.h>
  15. #include <stdlib.h>
  16. #include <string.h>
  17. #include <stdint.h>
  18. #include <inttypes.h>
  19. #include <stdio.h>
  20. #include <float.h>
  21. #include <limits.h>
  22. #include <stdarg.h>
  23. #include <signal.h>
  24. #ifdef GGML_USE_METAL
  25. #include <unistd.h>
  26. #endif
  27. // static_assert should be a #define, but if it's not,
  28. // fall back to the _Static_assert C11 keyword.
  29. // if C99 - static_assert is noop
  30. // ref: https://stackoverflow.com/a/53923785/4039976
  31. #ifndef static_assert
  32. #if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201100L)
  33. #define static_assert(cond, msg) _Static_assert(cond, msg)
  34. #else
  35. #define static_assert(cond, msg) struct global_scope_noop_trick
  36. #endif
  37. #endif
  38. #if defined(_MSC_VER)
  39. // disable "possible loss of data" to avoid hundreds of casts
  40. // we should just be careful :)
  41. #pragma warning(disable: 4244 4267)
  42. // disable POSIX deprecation warnigns
  43. // these functions are never going away, anyway
  44. #pragma warning(disable: 4996)
  45. #endif
  46. #if defined(_WIN32)
  47. #include <windows.h>
  48. typedef volatile LONG atomic_int;
  49. typedef atomic_int atomic_bool;
  50. static void atomic_store(atomic_int * ptr, LONG val) {
  51. InterlockedExchange(ptr, val);
  52. }
  53. static LONG atomic_load(atomic_int * ptr) {
  54. return InterlockedCompareExchange(ptr, 0, 0);
  55. }
  56. static LONG atomic_fetch_add(atomic_int * ptr, LONG inc) {
  57. return InterlockedExchangeAdd(ptr, inc);
  58. }
  59. static LONG atomic_fetch_sub(atomic_int * ptr, LONG dec) {
  60. return atomic_fetch_add(ptr, -(dec));
  61. }
  62. typedef HANDLE pthread_t;
  63. typedef DWORD thread_ret_t;
  64. static int pthread_create(pthread_t * out, void * unused, thread_ret_t(*func)(void *), void * arg) {
  65. (void) unused;
  66. HANDLE handle = CreateThread(NULL, 0, (LPTHREAD_START_ROUTINE) func, arg, 0, NULL);
  67. if (handle == NULL)
  68. {
  69. return EAGAIN;
  70. }
  71. *out = handle;
  72. return 0;
  73. }
  74. static int pthread_join(pthread_t thread, void * unused) {
  75. (void) unused;
  76. int ret = (int) WaitForSingleObject(thread, INFINITE);
  77. CloseHandle(thread);
  78. return ret;
  79. }
  80. static int sched_yield (void) {
  81. Sleep (0);
  82. return 0;
  83. }
  84. #else
  85. #include <pthread.h>
  86. #include <stdatomic.h>
  87. typedef void * thread_ret_t;
  88. #include <sys/types.h>
  89. #include <sys/stat.h>
  90. #include <unistd.h>
  91. #endif
  92. #ifdef GGML_USE_CPU_HBM
  93. #include <hbwmalloc.h>
  94. #endif
  95. // __FMA__ and __F16C__ are not defined in MSVC, however they are implied with AVX2/AVX512
  96. #if defined(_MSC_VER) && (defined(__AVX2__) || defined(__AVX512F__))
  97. #ifndef __FMA__
  98. #define __FMA__
  99. #endif
  100. #ifndef __F16C__
  101. #define __F16C__
  102. #endif
  103. #ifndef __SSE3__
  104. #define __SSE3__
  105. #endif
  106. #endif
  107. /*#define GGML_PERF*/
  108. #define GGML_DEBUG 0
  109. #define GGML_GELU_FP16
  110. #define GGML_GELU_QUICK_FP16
  111. #define GGML_SILU_FP16
  112. // #define GGML_CROSS_ENTROPY_EXP_FP16
  113. // #define GGML_FLASH_ATTN_EXP_FP16
  114. #define GGML_SOFT_MAX_UNROLL 4
  115. #define GGML_VEC_DOT_UNROLL 2
  116. #define GGML_VEC_MAD_UNROLL 32
  117. //
  118. // logging
  119. //
  120. #if (GGML_DEBUG >= 1)
  121. #define GGML_PRINT_DEBUG(...) printf(__VA_ARGS__)
  122. #else
  123. #define GGML_PRINT_DEBUG(...)
  124. #endif
  125. #if (GGML_DEBUG >= 5)
  126. #define GGML_PRINT_DEBUG_5(...) printf(__VA_ARGS__)
  127. #else
  128. #define GGML_PRINT_DEBUG_5(...)
  129. #endif
  130. #if (GGML_DEBUG >= 10)
  131. #define GGML_PRINT_DEBUG_10(...) printf(__VA_ARGS__)
  132. #else
  133. #define GGML_PRINT_DEBUG_10(...)
  134. #endif
  135. #define GGML_PRINT(...) printf(__VA_ARGS__)
  136. #ifdef GGML_USE_ACCELERATE
  137. // uncomment to use vDSP for soft max computation
  138. // note: not sure if it is actually faster
  139. //#define GGML_SOFT_MAX_ACCELERATE
  140. #endif
  141. //
  142. // logging
  143. //
  144. #if (GGML_DEBUG >= 1)
  145. #define GGML_PRINT_DEBUG(...) printf(__VA_ARGS__)
  146. #else
  147. #define GGML_PRINT_DEBUG(...)
  148. #endif
  149. #if (GGML_DEBUG >= 5)
  150. #define GGML_PRINT_DEBUG_5(...) printf(__VA_ARGS__)
  151. #else
  152. #define GGML_PRINT_DEBUG_5(...)
  153. #endif
  154. #if (GGML_DEBUG >= 10)
  155. #define GGML_PRINT_DEBUG_10(...) printf(__VA_ARGS__)
  156. #else
  157. #define GGML_PRINT_DEBUG_10(...)
  158. #endif
  159. #define GGML_PRINT(...) printf(__VA_ARGS__)
  160. //
  161. // end of logging block
  162. //
  163. #if defined(_MSC_VER) || defined(__MINGW32__)
  164. #define GGML_ALIGNED_MALLOC(size) _aligned_malloc(size, GGML_MEM_ALIGN)
  165. #define GGML_ALIGNED_FREE(ptr) _aligned_free(ptr)
  166. #else
  167. inline static void * ggml_aligned_malloc(size_t size) {
  168. if (size == 0) {
  169. GGML_PRINT("WARNING: Behavior may be unexpected when allocating 0 bytes for ggml_aligned_malloc!\n");
  170. return NULL;
  171. }
  172. void * aligned_memory = NULL;
  173. #ifdef GGML_USE_CPU_HBM
  174. int result = hbw_posix_memalign(&aligned_memory, 16, size);
  175. #elif GGML_USE_METAL
  176. int result = posix_memalign(&aligned_memory, sysconf(_SC_PAGESIZE), size);
  177. #else
  178. int result = posix_memalign(&aligned_memory, GGML_MEM_ALIGN, size);
  179. #endif
  180. if (result != 0) {
  181. // Handle allocation failure
  182. const char *error_desc = "unknown allocation error";
  183. switch (result) {
  184. case EINVAL:
  185. error_desc = "invalid alignment value";
  186. break;
  187. case ENOMEM:
  188. error_desc = "insufficient memory";
  189. break;
  190. }
  191. GGML_PRINT("%s: %s (attempted to allocate %6.2f MB)\n", __func__, error_desc, size/(1024.0*1024.0));
  192. return NULL;
  193. }
  194. return aligned_memory;
  195. }
  196. #define GGML_ALIGNED_MALLOC(size) ggml_aligned_malloc(size)
  197. #ifdef GGML_USE_CPU_HBM
  198. #define GGML_ALIGNED_FREE(ptr) if(NULL != ptr) hbw_free(ptr)
  199. #else
  200. #define GGML_ALIGNED_FREE(ptr) free(ptr)
  201. #endif
  202. #endif
  203. #define UNUSED GGML_UNUSED
  204. #define SWAP(x, y, T) do { T SWAP = x; x = y; y = SWAP; } while (0)
  205. //
  206. // tensor access macros
  207. //
  208. #define GGML_TENSOR_UNARY_OP_LOCALS \
  209. GGML_TENSOR_LOCALS(int64_t, ne0, src0, ne) \
  210. GGML_TENSOR_LOCALS(size_t, nb0, src0, nb) \
  211. GGML_TENSOR_LOCALS(int64_t, ne, dst, ne) \
  212. GGML_TENSOR_LOCALS(size_t, nb, dst, nb)
  213. #define GGML_TENSOR_BINARY_OP_LOCALS \
  214. GGML_TENSOR_LOCALS(int64_t, ne0, src0, ne) \
  215. GGML_TENSOR_LOCALS(size_t, nb0, src0, nb) \
  216. GGML_TENSOR_LOCALS(int64_t, ne1, src1, ne) \
  217. GGML_TENSOR_LOCALS(size_t, nb1, src1, nb) \
  218. GGML_TENSOR_LOCALS(int64_t, ne, dst, ne) \
  219. GGML_TENSOR_LOCALS(size_t, nb, dst, nb)
  220. #if defined(GGML_USE_ACCELERATE)
  221. #include <Accelerate/Accelerate.h>
  222. #if defined(GGML_USE_CLBLAST) // allow usage of CLBlast alongside Accelerate functions
  223. #include "ggml-opencl.h"
  224. #endif
  225. #elif defined(GGML_USE_OPENBLAS)
  226. #if defined(GGML_BLAS_USE_MKL)
  227. #include <mkl.h>
  228. #else
  229. #include <cblas.h>
  230. #endif
  231. #elif defined(GGML_USE_CUBLAS)
  232. #include "ggml-cuda.h"
  233. #elif defined(GGML_USE_CLBLAST)
  234. #include "ggml-opencl.h"
  235. #endif
  236. #undef MIN
  237. #undef MAX
  238. #define MIN(a, b) ((a) < (b) ? (a) : (b))
  239. #define MAX(a, b) ((a) > (b) ? (a) : (b))
  240. // floating point type used to accumulate sums
  241. typedef double ggml_float;
  242. // 16-bit float
  243. // on Arm, we use __fp16
  244. // on x86, we use uint16_t
  245. #if defined(__ARM_NEON) && !defined(_MSC_VER)
  246. // if YCM cannot find <arm_neon.h>, make a symbolic link to it, for example:
  247. //
  248. // $ ln -sfn /Library/Developer/CommandLineTools/usr/lib/clang/13.1.6/include/arm_neon.h ./src/
  249. //
  250. #include <arm_neon.h>
  251. #define GGML_COMPUTE_FP16_TO_FP32(x) ((float) (x))
  252. #define GGML_COMPUTE_FP32_TO_FP16(x) (x)
  253. #define GGML_FP16_TO_FP32(x) ((float) (x))
  254. #define GGML_FP32_TO_FP16(x) (x)
  255. #else
  256. #ifdef __wasm_simd128__
  257. #include <wasm_simd128.h>
  258. #else
  259. #ifdef __POWER9_VECTOR__
  260. #include <altivec.h>
  261. #undef bool
  262. #define bool _Bool
  263. #else
  264. #if defined(_MSC_VER) || defined(__MINGW32__)
  265. #include <intrin.h>
  266. #else
  267. #if defined(__AVX__) || defined(__AVX2__) || defined(__AVX512F__) || defined(__SSSE3__) || defined(__SSE3__)
  268. #if !defined(__riscv)
  269. #include <immintrin.h>
  270. #endif
  271. #endif
  272. #endif
  273. #endif
  274. #endif
  275. #ifdef __riscv_v_intrinsic
  276. #include <riscv_vector.h>
  277. #endif
  278. #ifdef __F16C__
  279. #ifdef _MSC_VER
  280. #define GGML_COMPUTE_FP16_TO_FP32(x) _mm_cvtss_f32(_mm_cvtph_ps(_mm_cvtsi32_si128(x)))
  281. #define GGML_COMPUTE_FP32_TO_FP16(x) _mm_extract_epi16(_mm_cvtps_ph(_mm_set_ss(x), 0), 0)
  282. #else
  283. #define GGML_COMPUTE_FP16_TO_FP32(x) _cvtsh_ss(x)
  284. #define GGML_COMPUTE_FP32_TO_FP16(x) _cvtss_sh(x, 0)
  285. #endif
  286. #elif defined(__POWER9_VECTOR__)
  287. #define GGML_COMPUTE_FP16_TO_FP32(x) ggml_compute_fp16_to_fp32(x)
  288. #define GGML_COMPUTE_FP32_TO_FP16(x) ggml_compute_fp32_to_fp16(x)
  289. /* the inline asm below is about 12% faster than the lookup method */
  290. #define GGML_FP16_TO_FP32(x) GGML_COMPUTE_FP16_TO_FP32(x)
  291. #define GGML_FP32_TO_FP16(x) GGML_COMPUTE_FP32_TO_FP16(x)
  292. static inline float ggml_compute_fp16_to_fp32(ggml_fp16_t h) {
  293. register float f;
  294. register double d;
  295. __asm__(
  296. "mtfprd %0,%2\n"
  297. "xscvhpdp %0,%0\n"
  298. "frsp %1,%0\n" :
  299. /* temp */ "=d"(d),
  300. /* out */ "=f"(f):
  301. /* in */ "r"(h));
  302. return f;
  303. }
  304. static inline ggml_fp16_t ggml_compute_fp32_to_fp16(float f) {
  305. register double d;
  306. register ggml_fp16_t r;
  307. __asm__( /* xscvdphp can work on double or single precision */
  308. "xscvdphp %0,%2\n"
  309. "mffprd %1,%0\n" :
  310. /* temp */ "=d"(d),
  311. /* out */ "=r"(r):
  312. /* in */ "f"(f));
  313. return r;
  314. }
  315. #else
  316. // FP16 <-> FP32
  317. // ref: https://github.com/Maratyszcza/FP16
  318. static inline float fp32_from_bits(uint32_t w) {
  319. union {
  320. uint32_t as_bits;
  321. float as_value;
  322. } fp32;
  323. fp32.as_bits = w;
  324. return fp32.as_value;
  325. }
  326. static inline uint32_t fp32_to_bits(float f) {
  327. union {
  328. float as_value;
  329. uint32_t as_bits;
  330. } fp32;
  331. fp32.as_value = f;
  332. return fp32.as_bits;
  333. }
  334. static inline float ggml_compute_fp16_to_fp32(ggml_fp16_t h) {
  335. const uint32_t w = (uint32_t) h << 16;
  336. const uint32_t sign = w & UINT32_C(0x80000000);
  337. const uint32_t two_w = w + w;
  338. const uint32_t exp_offset = UINT32_C(0xE0) << 23;
  339. #if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) || defined(__GNUC__) && !defined(__STRICT_ANSI__)
  340. const float exp_scale = 0x1.0p-112f;
  341. #else
  342. const float exp_scale = fp32_from_bits(UINT32_C(0x7800000));
  343. #endif
  344. const float normalized_value = fp32_from_bits((two_w >> 4) + exp_offset) * exp_scale;
  345. const uint32_t magic_mask = UINT32_C(126) << 23;
  346. const float magic_bias = 0.5f;
  347. const float denormalized_value = fp32_from_bits((two_w >> 17) | magic_mask) - magic_bias;
  348. const uint32_t denormalized_cutoff = UINT32_C(1) << 27;
  349. const uint32_t result = sign |
  350. (two_w < denormalized_cutoff ? fp32_to_bits(denormalized_value) : fp32_to_bits(normalized_value));
  351. return fp32_from_bits(result);
  352. }
  353. static inline ggml_fp16_t ggml_compute_fp32_to_fp16(float f) {
  354. #if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) || defined(__GNUC__) && !defined(__STRICT_ANSI__)
  355. const float scale_to_inf = 0x1.0p+112f;
  356. const float scale_to_zero = 0x1.0p-110f;
  357. #else
  358. const float scale_to_inf = fp32_from_bits(UINT32_C(0x77800000));
  359. const float scale_to_zero = fp32_from_bits(UINT32_C(0x08800000));
  360. #endif
  361. float base = (fabsf(f) * scale_to_inf) * scale_to_zero;
  362. const uint32_t w = fp32_to_bits(f);
  363. const uint32_t shl1_w = w + w;
  364. const uint32_t sign = w & UINT32_C(0x80000000);
  365. uint32_t bias = shl1_w & UINT32_C(0xFF000000);
  366. if (bias < UINT32_C(0x71000000)) {
  367. bias = UINT32_C(0x71000000);
  368. }
  369. base = fp32_from_bits((bias >> 1) + UINT32_C(0x07800000)) + base;
  370. const uint32_t bits = fp32_to_bits(base);
  371. const uint32_t exp_bits = (bits >> 13) & UINT32_C(0x00007C00);
  372. const uint32_t mantissa_bits = bits & UINT32_C(0x00000FFF);
  373. const uint32_t nonsign = exp_bits + mantissa_bits;
  374. return (sign >> 16) | (shl1_w > UINT32_C(0xFF000000) ? UINT16_C(0x7E00) : nonsign);
  375. }
  376. #define GGML_COMPUTE_FP16_TO_FP32(x) ggml_compute_fp16_to_fp32(x)
  377. #define GGML_COMPUTE_FP32_TO_FP16(x) ggml_compute_fp32_to_fp16(x)
  378. #endif // __F16C__
  379. #endif // __ARM_NEON
  380. //
  381. // global data
  382. //
  383. // precomputed gelu table for f16 (128 KB)
  384. static ggml_fp16_t table_gelu_f16[1 << 16];
  385. // precomputed quick gelu table for f16 (128 KB)
  386. static ggml_fp16_t table_gelu_quick_f16[1 << 16];
  387. // precomputed silu table for f16 (128 KB)
  388. static ggml_fp16_t table_silu_f16[1 << 16];
  389. // precomputed exp table for f16 (128 KB)
  390. static ggml_fp16_t table_exp_f16[1 << 16];
  391. // precomputed f32 table for f16 (256 KB)
  392. static float table_f32_f16[1 << 16];
  393. #if defined(__ARM_NEON) || defined(__wasm_simd128__)
  394. #define B1(c,s,n) 0x ## n ## c , 0x ## n ## s
  395. #define B2(c,s,n) B1(c,s,n ## c), B1(c,s,n ## s)
  396. #define B3(c,s,n) B2(c,s,n ## c), B2(c,s,n ## s)
  397. #define B4(c,s,n) B3(c,s,n ## c), B3(c,s,n ## s)
  398. #define B5(c,s,n) B4(c,s,n ## c), B4(c,s,n ## s)
  399. #define B6(c,s,n) B5(c,s,n ## c), B5(c,s,n ## s)
  400. #define B7(c,s,n) B6(c,s,n ## c), B6(c,s,n ## s)
  401. #define B8(c,s ) B7(c,s, c), B7(c,s, s)
  402. // precomputed tables for expanding 8bits to 8 bytes:
  403. static const uint64_t table_b2b_0[1 << 8] = { B8(00, 10) }; // ( b) << 4
  404. static const uint64_t table_b2b_1[1 << 8] = { B8(10, 00) }; // (!b) << 4
  405. #endif
  406. // On ARM NEON, it's quicker to directly convert x -> x instead of calling into ggml_lookup_fp16_to_fp32,
  407. // so we define GGML_FP16_TO_FP32 and GGML_FP32_TO_FP16 elsewhere for NEON.
  408. // This is also true for POWER9.
  409. #if !defined(GGML_FP16_TO_FP32) || !defined(GGML_FP32_TO_FP16)
  410. inline static float ggml_lookup_fp16_to_fp32(ggml_fp16_t f) {
  411. uint16_t s;
  412. memcpy(&s, &f, sizeof(uint16_t));
  413. return table_f32_f16[s];
  414. }
  415. #define GGML_FP16_TO_FP32(x) ggml_lookup_fp16_to_fp32(x)
  416. #define GGML_FP32_TO_FP16(x) GGML_COMPUTE_FP32_TO_FP16(x)
  417. #endif
  418. // note: do not use these inside ggml.c
  419. // these are meant to be used via the ggml.h API
  420. float ggml_fp16_to_fp32(ggml_fp16_t x) {
  421. return (float) GGML_FP16_TO_FP32(x);
  422. }
  423. ggml_fp16_t ggml_fp32_to_fp16(float x) {
  424. return GGML_FP32_TO_FP16(x);
  425. }
  426. void ggml_fp16_to_fp32_row(const ggml_fp16_t * x, float * y, int n) {
  427. for (int i = 0; i < n; i++) {
  428. y[i] = GGML_FP16_TO_FP32(x[i]);
  429. }
  430. }
  431. void ggml_fp32_to_fp16_row(const float * x, ggml_fp16_t * y, int n) {
  432. int i = 0;
  433. #if defined(__F16C__)
  434. for (; i + 7 < n; i += 8) {
  435. __m256 x_vec = _mm256_loadu_ps(x + i);
  436. __m128i y_vec = _mm256_cvtps_ph(x_vec, _MM_FROUND_TO_NEAREST_INT);
  437. _mm_storeu_si128((__m128i *)(y + i), y_vec);
  438. }
  439. for(; i + 3 < n; i += 4) {
  440. __m128 x_vec = _mm_loadu_ps(x + i);
  441. __m128i y_vec = _mm_cvtps_ph(x_vec, _MM_FROUND_TO_NEAREST_INT);
  442. _mm_storel_epi64((__m128i *)(y + i), y_vec);
  443. }
  444. #endif
  445. for (; i < n; i++) {
  446. y[i] = GGML_FP32_TO_FP16(x[i]);
  447. }
  448. }
  449. //
  450. // timing
  451. //
  452. #if defined(_MSC_VER) || defined(__MINGW32__)
  453. static int64_t timer_freq, timer_start;
  454. void ggml_time_init(void) {
  455. LARGE_INTEGER t;
  456. QueryPerformanceFrequency(&t);
  457. timer_freq = t.QuadPart;
  458. // The multiplication by 1000 or 1000000 below can cause an overflow if timer_freq
  459. // and the uptime is high enough.
  460. // We subtract the program start time to reduce the likelihood of that happening.
  461. QueryPerformanceCounter(&t);
  462. timer_start = t.QuadPart;
  463. }
  464. int64_t ggml_time_ms(void) {
  465. LARGE_INTEGER t;
  466. QueryPerformanceCounter(&t);
  467. return ((t.QuadPart-timer_start) * 1000) / timer_freq;
  468. }
  469. int64_t ggml_time_us(void) {
  470. LARGE_INTEGER t;
  471. QueryPerformanceCounter(&t);
  472. return ((t.QuadPart-timer_start) * 1000000) / timer_freq;
  473. }
  474. #else
  475. void ggml_time_init(void) {}
  476. int64_t ggml_time_ms(void) {
  477. struct timespec ts;
  478. clock_gettime(CLOCK_MONOTONIC, &ts);
  479. return (int64_t)ts.tv_sec*1000 + (int64_t)ts.tv_nsec/1000000;
  480. }
  481. int64_t ggml_time_us(void) {
  482. struct timespec ts;
  483. clock_gettime(CLOCK_MONOTONIC, &ts);
  484. return (int64_t)ts.tv_sec*1000000 + (int64_t)ts.tv_nsec/1000;
  485. }
  486. #endif
  487. int64_t ggml_cycles(void) {
  488. return clock();
  489. }
  490. int64_t ggml_cycles_per_ms(void) {
  491. return CLOCKS_PER_SEC/1000;
  492. }
  493. #ifdef GGML_PERF
  494. #define ggml_perf_time_ms() ggml_time_ms()
  495. #define ggml_perf_time_us() ggml_time_us()
  496. #define ggml_perf_cycles() ggml_cycles()
  497. #define ggml_perf_cycles_per_ms() ggml_cycles_per_ms()
  498. #else
  499. #define ggml_perf_time_ms() 0
  500. #define ggml_perf_time_us() 0
  501. #define ggml_perf_cycles() 0
  502. #define ggml_perf_cycles_per_ms() 0
  503. #endif
  504. //
  505. // cache line
  506. //
  507. #if defined(__cpp_lib_hardware_interference_size)
  508. #define CACHE_LINE_SIZE hardware_destructive_interference_size
  509. #else
  510. #if defined(__POWER9_VECTOR__)
  511. #define CACHE_LINE_SIZE 128
  512. #else
  513. #define CACHE_LINE_SIZE 64
  514. #endif
  515. #endif
  516. static const size_t CACHE_LINE_SIZE_F32 = CACHE_LINE_SIZE/sizeof(float);
  517. //
  518. // quantization
  519. //
  520. #define MM256_SET_M128I(a, b) _mm256_insertf128_si256(_mm256_castsi128_si256(b), (a), 1)
  521. #if defined(__AVX__) || defined(__AVX2__) || defined(__AVX512F__) || defined(__SSSE3__)
  522. // multiply int8_t, add results pairwise twice
  523. static inline __m128i mul_sum_i8_pairs(const __m128i x, const __m128i y) {
  524. // Get absolute values of x vectors
  525. const __m128i ax = _mm_sign_epi8(x, x);
  526. // Sign the values of the y vectors
  527. const __m128i sy = _mm_sign_epi8(y, x);
  528. // Perform multiplication and create 16-bit values
  529. const __m128i dot = _mm_maddubs_epi16(ax, sy);
  530. const __m128i ones = _mm_set1_epi16(1);
  531. return _mm_madd_epi16(ones, dot);
  532. }
  533. #if __AVX__ || __AVX2__ || __AVX512F__
  534. // horizontally add 8 floats
  535. static inline float hsum_float_8(const __m256 x) {
  536. __m128 res = _mm256_extractf128_ps(x, 1);
  537. res = _mm_add_ps(res, _mm256_castps256_ps128(x));
  538. res = _mm_add_ps(res, _mm_movehl_ps(res, res));
  539. res = _mm_add_ss(res, _mm_movehdup_ps(res));
  540. return _mm_cvtss_f32(res);
  541. }
  542. // horizontally add 8 int32_t
  543. static inline int hsum_i32_8(const __m256i a) {
  544. const __m128i sum128 = _mm_add_epi32(_mm256_castsi256_si128(a), _mm256_extractf128_si256(a, 1));
  545. const __m128i hi64 = _mm_unpackhi_epi64(sum128, sum128);
  546. const __m128i sum64 = _mm_add_epi32(hi64, sum128);
  547. const __m128i hi32 = _mm_shuffle_epi32(sum64, _MM_SHUFFLE(2, 3, 0, 1));
  548. return _mm_cvtsi128_si32(_mm_add_epi32(sum64, hi32));
  549. }
  550. // horizontally add 4 int32_t
  551. static inline int hsum_i32_4(const __m128i a) {
  552. const __m128i hi64 = _mm_unpackhi_epi64(a, a);
  553. const __m128i sum64 = _mm_add_epi32(hi64, a);
  554. const __m128i hi32 = _mm_shuffle_epi32(sum64, _MM_SHUFFLE(2, 3, 0, 1));
  555. return _mm_cvtsi128_si32(_mm_add_epi32(sum64, hi32));
  556. }
  557. #if defined(__AVX2__) || defined(__AVX512F__)
  558. // spread 32 bits to 32 bytes { 0x00, 0xFF }
  559. static inline __m256i bytes_from_bits_32(const uint8_t * x) {
  560. uint32_t x32;
  561. memcpy(&x32, x, sizeof(uint32_t));
  562. const __m256i shuf_mask = _mm256_set_epi64x(
  563. 0x0303030303030303, 0x0202020202020202,
  564. 0x0101010101010101, 0x0000000000000000);
  565. __m256i bytes = _mm256_shuffle_epi8(_mm256_set1_epi32(x32), shuf_mask);
  566. const __m256i bit_mask = _mm256_set1_epi64x(0x7fbfdfeff7fbfdfe);
  567. bytes = _mm256_or_si256(bytes, bit_mask);
  568. return _mm256_cmpeq_epi8(bytes, _mm256_set1_epi64x(-1));
  569. }
  570. // Unpack 32 4-bit fields into 32 bytes
  571. // The output vector contains 32 bytes, each one in [ 0 .. 15 ] interval
  572. static inline __m256i bytes_from_nibbles_32(const uint8_t * rsi)
  573. {
  574. const __m128i tmp = _mm_loadu_si128((const __m128i *)rsi);
  575. const __m256i bytes = MM256_SET_M128I(_mm_srli_epi16(tmp, 4), tmp);
  576. const __m256i lowMask = _mm256_set1_epi8( 0xF );
  577. return _mm256_and_si256(lowMask, bytes);
  578. }
  579. // add int16_t pairwise and return as float vector
  580. static inline __m256 sum_i16_pairs_float(const __m256i x) {
  581. const __m256i ones = _mm256_set1_epi16(1);
  582. const __m256i summed_pairs = _mm256_madd_epi16(ones, x);
  583. return _mm256_cvtepi32_ps(summed_pairs);
  584. }
  585. static inline __m256 mul_sum_us8_pairs_float(const __m256i ax, const __m256i sy) {
  586. #if __AVXVNNI__
  587. const __m256i zero = _mm256_setzero_si256();
  588. const __m256i summed_pairs = _mm256_dpbusd_epi32(zero, ax, sy);
  589. return _mm256_cvtepi32_ps(summed_pairs);
  590. #else
  591. // Perform multiplication and create 16-bit values
  592. const __m256i dot = _mm256_maddubs_epi16(ax, sy);
  593. return sum_i16_pairs_float(dot);
  594. #endif
  595. }
  596. // multiply int8_t, add results pairwise twice and return as float vector
  597. static inline __m256 mul_sum_i8_pairs_float(const __m256i x, const __m256i y) {
  598. #if __AVXVNNIINT8__
  599. const __m256i zero = _mm256_setzero_si256();
  600. const __m256i summed_pairs = _mm256_dpbssd_epi32(zero, x, y);
  601. return _mm256_cvtepi32_ps(summed_pairs);
  602. #else
  603. // Get absolute values of x vectors
  604. const __m256i ax = _mm256_sign_epi8(x, x);
  605. // Sign the values of the y vectors
  606. const __m256i sy = _mm256_sign_epi8(y, x);
  607. return mul_sum_us8_pairs_float(ax, sy);
  608. #endif
  609. }
  610. static inline __m128i packNibbles( __m256i bytes )
  611. {
  612. // Move bits within 16-bit lanes from 0000_abcd_0000_efgh into 0000_0000_abcd_efgh
  613. #if __AVX512F__
  614. const __m256i bytes_srli_4 = _mm256_srli_epi16(bytes, 4); // 0000_0000_abcd_0000
  615. bytes = _mm256_or_si256(bytes, bytes_srli_4); // 0000_abcd_abcd_efgh
  616. return _mm256_cvtepi16_epi8(bytes); // abcd_efgh
  617. #else
  618. const __m256i lowByte = _mm256_set1_epi16( 0xFF );
  619. __m256i high = _mm256_andnot_si256( lowByte, bytes );
  620. __m256i low = _mm256_and_si256( lowByte, bytes );
  621. high = _mm256_srli_epi16( high, 4 );
  622. bytes = _mm256_or_si256( low, high );
  623. // Compress uint16_t lanes into bytes
  624. __m128i r0 = _mm256_castsi256_si128( bytes );
  625. __m128i r1 = _mm256_extracti128_si256( bytes, 1 );
  626. return _mm_packus_epi16( r0, r1 );
  627. #endif
  628. }
  629. #elif defined(__AVX__)
  630. // spread 32 bits to 32 bytes { 0x00, 0xFF }
  631. static inline __m256i bytes_from_bits_32(const uint8_t * x) {
  632. uint32_t x32;
  633. memcpy(&x32, x, sizeof(uint32_t));
  634. const __m128i shuf_maskl = _mm_set_epi64x(0x0101010101010101, 0x0000000000000000);
  635. const __m128i shuf_maskh = _mm_set_epi64x(0x0303030303030303, 0x0202020202020202);
  636. __m128i bytesl = _mm_shuffle_epi8(_mm_set1_epi32(x32), shuf_maskl);
  637. __m128i bytesh = _mm_shuffle_epi8(_mm_set1_epi32(x32), shuf_maskh);
  638. const __m128i bit_mask = _mm_set1_epi64x(0x7fbfdfeff7fbfdfe);
  639. bytesl = _mm_or_si128(bytesl, bit_mask);
  640. bytesh = _mm_or_si128(bytesh, bit_mask);
  641. bytesl = _mm_cmpeq_epi8(bytesl, _mm_set1_epi64x(-1));
  642. bytesh = _mm_cmpeq_epi8(bytesh, _mm_set1_epi64x(-1));
  643. return MM256_SET_M128I(bytesh, bytesl);
  644. }
  645. // Unpack 32 4-bit fields into 32 bytes
  646. // The output vector contains 32 bytes, each one in [ 0 .. 15 ] interval
  647. static inline __m256i bytes_from_nibbles_32(const uint8_t * rsi)
  648. {
  649. // Load 16 bytes from memory
  650. __m128i tmpl = _mm_loadu_si128((const __m128i *)rsi);
  651. __m128i tmph = _mm_srli_epi16(tmpl, 4);
  652. const __m128i lowMask = _mm_set1_epi8(0xF);
  653. tmpl = _mm_and_si128(lowMask, tmpl);
  654. tmph = _mm_and_si128(lowMask, tmph);
  655. return MM256_SET_M128I(tmph, tmpl);
  656. }
  657. // add int16_t pairwise and return as float vector
  658. static inline __m256 sum_i16_pairs_float(const __m128i xh, const __m128i xl) {
  659. const __m128i ones = _mm_set1_epi16(1);
  660. const __m128i summed_pairsl = _mm_madd_epi16(ones, xl);
  661. const __m128i summed_pairsh = _mm_madd_epi16(ones, xh);
  662. const __m256i summed_pairs = MM256_SET_M128I(summed_pairsh, summed_pairsl);
  663. return _mm256_cvtepi32_ps(summed_pairs);
  664. }
  665. static inline __m256 mul_sum_us8_pairs_float(const __m256i ax, const __m256i sy) {
  666. const __m128i axl = _mm256_castsi256_si128(ax);
  667. const __m128i axh = _mm256_extractf128_si256(ax, 1);
  668. const __m128i syl = _mm256_castsi256_si128(sy);
  669. const __m128i syh = _mm256_extractf128_si256(sy, 1);
  670. // Perform multiplication and create 16-bit values
  671. const __m128i dotl = _mm_maddubs_epi16(axl, syl);
  672. const __m128i doth = _mm_maddubs_epi16(axh, syh);
  673. return sum_i16_pairs_float(doth, dotl);
  674. }
  675. // multiply int8_t, add results pairwise twice and return as float vector
  676. static inline __m256 mul_sum_i8_pairs_float(const __m256i x, const __m256i y) {
  677. const __m128i xl = _mm256_castsi256_si128(x);
  678. const __m128i xh = _mm256_extractf128_si256(x, 1);
  679. const __m128i yl = _mm256_castsi256_si128(y);
  680. const __m128i yh = _mm256_extractf128_si256(y, 1);
  681. // Get absolute values of x vectors
  682. const __m128i axl = _mm_sign_epi8(xl, xl);
  683. const __m128i axh = _mm_sign_epi8(xh, xh);
  684. // Sign the values of the y vectors
  685. const __m128i syl = _mm_sign_epi8(yl, xl);
  686. const __m128i syh = _mm_sign_epi8(yh, xh);
  687. // Perform multiplication and create 16-bit values
  688. const __m128i dotl = _mm_maddubs_epi16(axl, syl);
  689. const __m128i doth = _mm_maddubs_epi16(axh, syh);
  690. return sum_i16_pairs_float(doth, dotl);
  691. }
  692. static inline __m128i packNibbles( __m128i bytes1, __m128i bytes2 )
  693. {
  694. // Move bits within 16-bit lanes from 0000_abcd_0000_efgh into 0000_0000_abcd_efgh
  695. const __m128i lowByte = _mm_set1_epi16( 0xFF );
  696. __m128i high = _mm_andnot_si128( lowByte, bytes1 );
  697. __m128i low = _mm_and_si128( lowByte, bytes1 );
  698. high = _mm_srli_epi16( high, 4 );
  699. bytes1 = _mm_or_si128( low, high );
  700. high = _mm_andnot_si128( lowByte, bytes2 );
  701. low = _mm_and_si128( lowByte, bytes2 );
  702. high = _mm_srli_epi16( high, 4 );
  703. bytes2 = _mm_or_si128( low, high );
  704. return _mm_packus_epi16( bytes1, bytes2);
  705. }
  706. #endif
  707. #elif defined(__SSSE3__)
  708. // horizontally add 4x4 floats
  709. static inline float hsum_float_4x4(const __m128 a, const __m128 b, const __m128 c, const __m128 d) {
  710. __m128 res_0 =_mm_hadd_ps(a, b);
  711. __m128 res_1 =_mm_hadd_ps(c, d);
  712. __m128 res =_mm_hadd_ps(res_0, res_1);
  713. res =_mm_hadd_ps(res, res);
  714. res =_mm_hadd_ps(res, res);
  715. return _mm_cvtss_f32(res);
  716. }
  717. #endif // __AVX__ || __AVX2__ || __AVX512F__
  718. #endif // defined(__AVX__) || defined(__AVX2__) || defined(__AVX512F__) || defined(__SSSE3__)
  719. #if defined(__ARM_NEON)
  720. #if !defined(__aarch64__)
  721. inline static int32_t vaddvq_s32(int32x4_t v) {
  722. return vgetq_lane_s32(v, 0) + vgetq_lane_s32(v, 1) + vgetq_lane_s32(v, 2) + vgetq_lane_s32(v, 3);
  723. }
  724. inline static float vaddvq_f32(float32x4_t v) {
  725. return vgetq_lane_f32(v, 0) + vgetq_lane_f32(v, 1) + vgetq_lane_f32(v, 2) + vgetq_lane_f32(v, 3);
  726. }
  727. inline static float vmaxvq_f32(float32x4_t v) {
  728. return
  729. MAX(MAX(vgetq_lane_f32(v, 0), vgetq_lane_f32(v, 1)),
  730. MAX(vgetq_lane_f32(v, 2), vgetq_lane_f32(v, 3)));
  731. }
  732. inline static int32x4_t vcvtnq_s32_f32(float32x4_t v) {
  733. int32x4_t res;
  734. res[0] = roundf(vgetq_lane_f32(v, 0));
  735. res[1] = roundf(vgetq_lane_f32(v, 1));
  736. res[2] = roundf(vgetq_lane_f32(v, 2));
  737. res[3] = roundf(vgetq_lane_f32(v, 3));
  738. return res;
  739. }
  740. #endif
  741. #endif
  742. #define QK4_0 32
  743. typedef struct {
  744. ggml_fp16_t d; // delta
  745. uint8_t qs[QK4_0 / 2]; // nibbles / quants
  746. } block_q4_0;
  747. static_assert(sizeof(block_q4_0) == sizeof(ggml_fp16_t) + QK4_0 / 2, "wrong q4_0 block size/padding");
  748. #define QK4_1 32
  749. typedef struct {
  750. ggml_fp16_t d; // delta
  751. ggml_fp16_t m; // min
  752. uint8_t qs[QK4_1 / 2]; // nibbles / quants
  753. } block_q4_1;
  754. static_assert(sizeof(block_q4_1) == 2 * sizeof(ggml_fp16_t) + QK4_1 / 2, "wrong q4_1 block size/padding");
  755. #define QK5_0 32
  756. typedef struct {
  757. ggml_fp16_t d; // delta
  758. uint8_t qh[4]; // 5-th bit of quants
  759. uint8_t qs[QK5_0 / 2]; // nibbles / quants
  760. } block_q5_0;
  761. static_assert(sizeof(block_q5_0) == sizeof(ggml_fp16_t) + sizeof(uint32_t) + QK5_0 / 2, "wrong q5_0 block size/padding");
  762. #define QK5_1 32
  763. typedef struct {
  764. ggml_fp16_t d; // delta
  765. ggml_fp16_t m; // min
  766. uint8_t qh[4]; // 5-th bit of quants
  767. uint8_t qs[QK5_1 / 2]; // nibbles / quants
  768. } block_q5_1;
  769. static_assert(sizeof(block_q5_1) == 2 * sizeof(ggml_fp16_t) + sizeof(uint32_t) + QK5_1 / 2, "wrong q5_1 block size/padding");
  770. #define QK8_0 32
  771. typedef struct {
  772. ggml_fp16_t d; // delta
  773. int8_t qs[QK8_0]; // quants
  774. } block_q8_0;
  775. static_assert(sizeof(block_q8_0) == sizeof(ggml_fp16_t) + QK8_0, "wrong q8_0 block size/padding");
  776. #define QK8_1 32
  777. typedef struct {
  778. float d; // delta
  779. float s; // d * sum(qs[i])
  780. int8_t qs[QK8_1]; // quants
  781. } block_q8_1;
  782. static_assert(sizeof(block_q8_1) == 2*sizeof(float) + QK8_1, "wrong q8_1 block size/padding");
  783. // reference implementation for deterministic creation of model files
  784. static void quantize_row_q4_0_reference(const float * restrict x, block_q4_0 * restrict y, int k) {
  785. static const int qk = QK4_0;
  786. assert(k % qk == 0);
  787. const int nb = k / qk;
  788. for (int i = 0; i < nb; i++) {
  789. float amax = 0.0f; // absolute max
  790. float max = 0.0f;
  791. for (int j = 0; j < qk; j++) {
  792. const float v = x[i*qk + j];
  793. if (amax < fabsf(v)) {
  794. amax = fabsf(v);
  795. max = v;
  796. }
  797. }
  798. const float d = max / -8;
  799. const float id = d ? 1.0f/d : 0.0f;
  800. y[i].d = GGML_FP32_TO_FP16(d);
  801. for (int j = 0; j < qk/2; ++j) {
  802. const float x0 = x[i*qk + 0 + j]*id;
  803. const float x1 = x[i*qk + qk/2 + j]*id;
  804. const uint8_t xi0 = MIN(15, (int8_t)(x0 + 8.5f));
  805. const uint8_t xi1 = MIN(15, (int8_t)(x1 + 8.5f));
  806. y[i].qs[j] = xi0;
  807. y[i].qs[j] |= xi1 << 4;
  808. }
  809. }
  810. }
  811. static void quantize_row_q4_0(const float * restrict x, void * restrict y, int k) {
  812. quantize_row_q4_0_reference(x, y, k);
  813. }
  814. static void quantize_row_q4_1_reference(const float * restrict x, block_q4_1 * restrict y, int k) {
  815. const int qk = QK4_1;
  816. assert(k % qk == 0);
  817. const int nb = k / qk;
  818. for (int i = 0; i < nb; i++) {
  819. float min = FLT_MAX;
  820. float max = -FLT_MAX;
  821. for (int j = 0; j < qk; j++) {
  822. const float v = x[i*qk + j];
  823. if (v < min) min = v;
  824. if (v > max) max = v;
  825. }
  826. const float d = (max - min) / ((1 << 4) - 1);
  827. const float id = d ? 1.0f/d : 0.0f;
  828. y[i].d = GGML_FP32_TO_FP16(d);
  829. y[i].m = GGML_FP32_TO_FP16(min);
  830. for (int j = 0; j < qk/2; ++j) {
  831. const float x0 = (x[i*qk + 0 + j] - min)*id;
  832. const float x1 = (x[i*qk + qk/2 + j] - min)*id;
  833. const uint8_t xi0 = MIN(15, (int8_t)(x0 + 0.5f));
  834. const uint8_t xi1 = MIN(15, (int8_t)(x1 + 0.5f));
  835. y[i].qs[j] = xi0;
  836. y[i].qs[j] |= xi1 << 4;
  837. }
  838. }
  839. }
  840. static void quantize_row_q4_1(const float * restrict x, void * restrict y, int k) {
  841. quantize_row_q4_1_reference(x, y, k);
  842. }
  843. static void quantize_row_q5_0_reference(const float * restrict x, block_q5_0 * restrict y, int k) {
  844. static const int qk = QK5_0;
  845. assert(k % qk == 0);
  846. const int nb = k / qk;
  847. for (int i = 0; i < nb; i++) {
  848. float amax = 0.0f; // absolute max
  849. float max = 0.0f;
  850. for (int j = 0; j < qk; j++) {
  851. const float v = x[i*qk + j];
  852. if (amax < fabsf(v)) {
  853. amax = fabsf(v);
  854. max = v;
  855. }
  856. }
  857. const float d = max / -16;
  858. const float id = d ? 1.0f/d : 0.0f;
  859. y[i].d = GGML_FP32_TO_FP16(d);
  860. uint32_t qh = 0;
  861. for (int j = 0; j < qk/2; ++j) {
  862. const float x0 = x[i*qk + 0 + j]*id;
  863. const float x1 = x[i*qk + qk/2 + j]*id;
  864. const uint8_t xi0 = MIN(31, (int8_t)(x0 + 16.5f));
  865. const uint8_t xi1 = MIN(31, (int8_t)(x1 + 16.5f));
  866. y[i].qs[j] = (xi0 & 0x0F) | ((xi1 & 0x0F) << 4);
  867. // get the 5-th bit and store it in qh at the right position
  868. qh |= ((xi0 & 0x10u) >> 4) << (j + 0);
  869. qh |= ((xi1 & 0x10u) >> 4) << (j + qk/2);
  870. }
  871. memcpy(&y[i].qh, &qh, sizeof(qh));
  872. }
  873. }
  874. static void quantize_row_q5_0(const float * restrict x, void * restrict y, int k) {
  875. quantize_row_q5_0_reference(x, y, k);
  876. }
  877. static void quantize_row_q5_1_reference(const float * restrict x, block_q5_1 * restrict y, int k) {
  878. const int qk = QK5_1;
  879. assert(k % qk == 0);
  880. const int nb = k / qk;
  881. for (int i = 0; i < nb; i++) {
  882. float min = FLT_MAX;
  883. float max = -FLT_MAX;
  884. for (int j = 0; j < qk; j++) {
  885. const float v = x[i*qk + j];
  886. if (v < min) min = v;
  887. if (v > max) max = v;
  888. }
  889. const float d = (max - min) / ((1 << 5) - 1);
  890. const float id = d ? 1.0f/d : 0.0f;
  891. y[i].d = GGML_FP32_TO_FP16(d);
  892. y[i].m = GGML_FP32_TO_FP16(min);
  893. uint32_t qh = 0;
  894. for (int j = 0; j < qk/2; ++j) {
  895. const float x0 = (x[i*qk + 0 + j] - min)*id;
  896. const float x1 = (x[i*qk + qk/2 + j] - min)*id;
  897. const uint8_t xi0 = (uint8_t)(x0 + 0.5f);
  898. const uint8_t xi1 = (uint8_t)(x1 + 0.5f);
  899. y[i].qs[j] = (xi0 & 0x0F) | ((xi1 & 0x0F) << 4);
  900. // get the 5-th bit and store it in qh at the right position
  901. qh |= ((xi0 & 0x10u) >> 4) << (j + 0);
  902. qh |= ((xi1 & 0x10u) >> 4) << (j + qk/2);
  903. }
  904. memcpy(&y[i].qh, &qh, sizeof(y[i].qh));
  905. }
  906. }
  907. static void quantize_row_q5_1(const float * restrict x, void * restrict y, int k) {
  908. quantize_row_q5_1_reference(x, y, k);
  909. }
  910. // reference implementation for deterministic creation of model files
  911. static void quantize_row_q8_0_reference(const float * restrict x, block_q8_0 * restrict y, int k) {
  912. assert(k % QK8_0 == 0);
  913. const int nb = k / QK8_0;
  914. for (int i = 0; i < nb; i++) {
  915. float amax = 0.0f; // absolute max
  916. for (int j = 0; j < QK8_0; j++) {
  917. const float v = x[i*QK8_0 + j];
  918. amax = MAX(amax, fabsf(v));
  919. }
  920. const float d = amax / ((1 << 7) - 1);
  921. const float id = d ? 1.0f/d : 0.0f;
  922. y[i].d = GGML_FP32_TO_FP16(d);
  923. for (int j = 0; j < QK8_0; ++j) {
  924. const float x0 = x[i*QK8_0 + j]*id;
  925. y[i].qs[j] = roundf(x0);
  926. }
  927. }
  928. }
  929. static void quantize_row_q8_0(const float * restrict x, void * restrict vy, int k) {
  930. assert(QK8_0 == 32);
  931. assert(k % QK8_0 == 0);
  932. const int nb = k / QK8_0;
  933. block_q8_0 * restrict y = vy;
  934. #if defined(__ARM_NEON)
  935. for (int i = 0; i < nb; i++) {
  936. float32x4_t srcv [8];
  937. float32x4_t asrcv[8];
  938. float32x4_t amaxv[8];
  939. for (int j = 0; j < 8; j++) srcv[j] = vld1q_f32(x + i*32 + 4*j);
  940. for (int j = 0; j < 8; j++) asrcv[j] = vabsq_f32(srcv[j]);
  941. for (int j = 0; j < 4; j++) amaxv[2*j] = vmaxq_f32(asrcv[2*j], asrcv[2*j+1]);
  942. for (int j = 0; j < 2; j++) amaxv[4*j] = vmaxq_f32(amaxv[4*j], amaxv[4*j+2]);
  943. for (int j = 0; j < 1; j++) amaxv[8*j] = vmaxq_f32(amaxv[8*j], amaxv[8*j+4]);
  944. const float amax = vmaxvq_f32(amaxv[0]);
  945. const float d = amax / ((1 << 7) - 1);
  946. const float id = d ? 1.0f/d : 0.0f;
  947. y[i].d = GGML_FP32_TO_FP16(d);
  948. for (int j = 0; j < 8; j++) {
  949. const float32x4_t v = vmulq_n_f32(srcv[j], id);
  950. const int32x4_t vi = vcvtnq_s32_f32(v);
  951. y[i].qs[4*j + 0] = vgetq_lane_s32(vi, 0);
  952. y[i].qs[4*j + 1] = vgetq_lane_s32(vi, 1);
  953. y[i].qs[4*j + 2] = vgetq_lane_s32(vi, 2);
  954. y[i].qs[4*j + 3] = vgetq_lane_s32(vi, 3);
  955. }
  956. }
  957. #elif defined(__wasm_simd128__)
  958. for (int i = 0; i < nb; i++) {
  959. v128_t srcv [8];
  960. v128_t asrcv[8];
  961. v128_t amaxv[8];
  962. for (int j = 0; j < 8; j++) srcv[j] = wasm_v128_load(x + i*32 + 4*j);
  963. for (int j = 0; j < 8; j++) asrcv[j] = wasm_f32x4_abs(srcv[j]);
  964. for (int j = 0; j < 4; j++) amaxv[2*j] = wasm_f32x4_max(asrcv[2*j], asrcv[2*j+1]);
  965. for (int j = 0; j < 2; j++) amaxv[4*j] = wasm_f32x4_max(amaxv[4*j], amaxv[4*j+2]);
  966. for (int j = 0; j < 1; j++) amaxv[8*j] = wasm_f32x4_max(amaxv[8*j], amaxv[8*j+4]);
  967. const float amax = MAX(MAX(wasm_f32x4_extract_lane(amaxv[0], 0),
  968. wasm_f32x4_extract_lane(amaxv[0], 1)),
  969. MAX(wasm_f32x4_extract_lane(amaxv[0], 2),
  970. wasm_f32x4_extract_lane(amaxv[0], 3)));
  971. const float d = amax / ((1 << 7) - 1);
  972. const float id = d ? 1.0f/d : 0.0f;
  973. y[i].d = GGML_FP32_TO_FP16(d);
  974. for (int j = 0; j < 8; j++) {
  975. const v128_t v = wasm_f32x4_mul(srcv[j], wasm_f32x4_splat(id));
  976. const v128_t vi = wasm_i32x4_trunc_sat_f32x4(v);
  977. y[i].qs[4*j + 0] = wasm_i32x4_extract_lane(vi, 0);
  978. y[i].qs[4*j + 1] = wasm_i32x4_extract_lane(vi, 1);
  979. y[i].qs[4*j + 2] = wasm_i32x4_extract_lane(vi, 2);
  980. y[i].qs[4*j + 3] = wasm_i32x4_extract_lane(vi, 3);
  981. }
  982. }
  983. #elif defined(__AVX2__) || defined(__AVX__)
  984. for (int i = 0; i < nb; i++) {
  985. // Load elements into 4 AVX vectors
  986. __m256 v0 = _mm256_loadu_ps( x );
  987. __m256 v1 = _mm256_loadu_ps( x + 8 );
  988. __m256 v2 = _mm256_loadu_ps( x + 16 );
  989. __m256 v3 = _mm256_loadu_ps( x + 24 );
  990. x += 32;
  991. // Compute max(abs(e)) for the block
  992. const __m256 signBit = _mm256_set1_ps( -0.0f );
  993. __m256 maxAbs = _mm256_andnot_ps( signBit, v0 );
  994. maxAbs = _mm256_max_ps( maxAbs, _mm256_andnot_ps( signBit, v1 ) );
  995. maxAbs = _mm256_max_ps( maxAbs, _mm256_andnot_ps( signBit, v2 ) );
  996. maxAbs = _mm256_max_ps( maxAbs, _mm256_andnot_ps( signBit, v3 ) );
  997. __m128 max4 = _mm_max_ps( _mm256_extractf128_ps( maxAbs, 1 ), _mm256_castps256_ps128( maxAbs ) );
  998. max4 = _mm_max_ps( max4, _mm_movehl_ps( max4, max4 ) );
  999. max4 = _mm_max_ss( max4, _mm_movehdup_ps( max4 ) );
  1000. const float maxScalar = _mm_cvtss_f32( max4 );
  1001. // Quantize these floats
  1002. const float d = maxScalar / 127.f;
  1003. y[i].d = GGML_FP32_TO_FP16(d);
  1004. const float id = ( maxScalar != 0.0f ) ? 127.f / maxScalar : 0.0f;
  1005. const __m256 mul = _mm256_set1_ps( id );
  1006. // Apply the multiplier
  1007. v0 = _mm256_mul_ps( v0, mul );
  1008. v1 = _mm256_mul_ps( v1, mul );
  1009. v2 = _mm256_mul_ps( v2, mul );
  1010. v3 = _mm256_mul_ps( v3, mul );
  1011. // Round to nearest integer
  1012. v0 = _mm256_round_ps( v0, _MM_ROUND_NEAREST );
  1013. v1 = _mm256_round_ps( v1, _MM_ROUND_NEAREST );
  1014. v2 = _mm256_round_ps( v2, _MM_ROUND_NEAREST );
  1015. v3 = _mm256_round_ps( v3, _MM_ROUND_NEAREST );
  1016. // Convert floats to integers
  1017. __m256i i0 = _mm256_cvtps_epi32( v0 );
  1018. __m256i i1 = _mm256_cvtps_epi32( v1 );
  1019. __m256i i2 = _mm256_cvtps_epi32( v2 );
  1020. __m256i i3 = _mm256_cvtps_epi32( v3 );
  1021. #if defined(__AVX2__)
  1022. // Convert int32 to int16
  1023. i0 = _mm256_packs_epi32( i0, i1 ); // 0, 1, 2, 3, 8, 9, 10, 11, 4, 5, 6, 7, 12, 13, 14, 15
  1024. i2 = _mm256_packs_epi32( i2, i3 ); // 16, 17, 18, 19, 24, 25, 26, 27, 20, 21, 22, 23, 28, 29, 30, 31
  1025. // Convert int16 to int8
  1026. i0 = _mm256_packs_epi16( i0, i2 ); // 0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27, 4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31
  1027. // We got our precious signed bytes, but the order is now wrong
  1028. // These AVX2 pack instructions process 16-byte pieces independently
  1029. // The following instruction is fixing the order
  1030. const __m256i perm = _mm256_setr_epi32( 0, 4, 1, 5, 2, 6, 3, 7 );
  1031. i0 = _mm256_permutevar8x32_epi32( i0, perm );
  1032. _mm256_storeu_si256((__m256i *)y[i].qs, i0);
  1033. #else
  1034. // Since we don't have in AVX some necessary functions,
  1035. // we split the registers in half and call AVX2 analogs from SSE
  1036. __m128i ni0 = _mm256_castsi256_si128( i0 );
  1037. __m128i ni1 = _mm256_extractf128_si256( i0, 1);
  1038. __m128i ni2 = _mm256_castsi256_si128( i1 );
  1039. __m128i ni3 = _mm256_extractf128_si256( i1, 1);
  1040. __m128i ni4 = _mm256_castsi256_si128( i2 );
  1041. __m128i ni5 = _mm256_extractf128_si256( i2, 1);
  1042. __m128i ni6 = _mm256_castsi256_si128( i3 );
  1043. __m128i ni7 = _mm256_extractf128_si256( i3, 1);
  1044. // Convert int32 to int16
  1045. ni0 = _mm_packs_epi32( ni0, ni1 );
  1046. ni2 = _mm_packs_epi32( ni2, ni3 );
  1047. ni4 = _mm_packs_epi32( ni4, ni5 );
  1048. ni6 = _mm_packs_epi32( ni6, ni7 );
  1049. // Convert int16 to int8
  1050. ni0 = _mm_packs_epi16( ni0, ni2 );
  1051. ni4 = _mm_packs_epi16( ni4, ni6 );
  1052. _mm_storeu_si128((__m128i *)(y[i].qs + 0), ni0);
  1053. _mm_storeu_si128((__m128i *)(y[i].qs + 16), ni4);
  1054. #endif
  1055. }
  1056. #elif defined(__riscv_v_intrinsic)
  1057. size_t vl = __riscv_vsetvl_e32m4(QK8_0);
  1058. for (int i = 0; i < nb; i++) {
  1059. // load elements
  1060. vfloat32m4_t v_x = __riscv_vle32_v_f32m4(x+i*QK8_0, vl);
  1061. vfloat32m4_t vfabs = __riscv_vfabs_v_f32m4(v_x, vl);
  1062. vfloat32m1_t tmp = __riscv_vfmv_v_f_f32m1(0.0f, vl);
  1063. vfloat32m1_t vmax = __riscv_vfredmax_vs_f32m4_f32m1(vfabs, tmp, vl);
  1064. float amax = __riscv_vfmv_f_s_f32m1_f32(vmax);
  1065. const float d = amax / ((1 << 7) - 1);
  1066. const float id = d ? 1.0f/d : 0.0f;
  1067. y[i].d = GGML_FP32_TO_FP16(d);
  1068. vfloat32m4_t x0 = __riscv_vfmul_vf_f32m4(v_x, id, vl);
  1069. // convert to integer
  1070. vint16m2_t vi = __riscv_vfncvt_x_f_w_i16m2(x0, vl);
  1071. vint8m1_t vs = __riscv_vncvt_x_x_w_i8m1(vi, vl);
  1072. // store result
  1073. __riscv_vse8_v_i8m1(y[i].qs , vs, vl);
  1074. }
  1075. #else
  1076. // scalar
  1077. quantize_row_q8_0_reference(x, y, k);
  1078. #endif
  1079. }
  1080. // reference implementation for deterministic creation of model files
  1081. static void quantize_row_q8_1_reference(const float * restrict x, block_q8_1 * restrict y, int k) {
  1082. assert(QK8_1 == 32);
  1083. assert(k % QK8_1 == 0);
  1084. const int nb = k / QK8_1;
  1085. for (int i = 0; i < nb; i++) {
  1086. float amax = 0.0f; // absolute max
  1087. for (int j = 0; j < QK8_1; j++) {
  1088. const float v = x[i*QK8_1 + j];
  1089. amax = MAX(amax, fabsf(v));
  1090. }
  1091. const float d = amax / ((1 << 7) - 1);
  1092. const float id = d ? 1.0f/d : 0.0f;
  1093. y[i].d = d;
  1094. int sum = 0;
  1095. for (int j = 0; j < QK8_1/2; ++j) {
  1096. const float v0 = x[i*QK8_1 + j]*id;
  1097. const float v1 = x[i*QK8_1 + QK8_1/2 + j]*id;
  1098. y[i].qs[ j] = roundf(v0);
  1099. y[i].qs[QK8_1/2 + j] = roundf(v1);
  1100. sum += y[i].qs[ j];
  1101. sum += y[i].qs[QK8_1/2 + j];
  1102. }
  1103. y[i].s = sum*d;
  1104. }
  1105. }
  1106. static void quantize_row_q8_1(const float * restrict x, void * restrict vy, int k) {
  1107. assert(k % QK8_1 == 0);
  1108. const int nb = k / QK8_1;
  1109. block_q8_1 * restrict y = vy;
  1110. #if defined(__ARM_NEON)
  1111. for (int i = 0; i < nb; i++) {
  1112. float32x4_t srcv [8];
  1113. float32x4_t asrcv[8];
  1114. float32x4_t amaxv[8];
  1115. for (int j = 0; j < 8; j++) srcv[j] = vld1q_f32(x + i*32 + 4*j);
  1116. for (int j = 0; j < 8; j++) asrcv[j] = vabsq_f32(srcv[j]);
  1117. for (int j = 0; j < 4; j++) amaxv[2*j] = vmaxq_f32(asrcv[2*j], asrcv[2*j+1]);
  1118. for (int j = 0; j < 2; j++) amaxv[4*j] = vmaxq_f32(amaxv[4*j], amaxv[4*j+2]);
  1119. for (int j = 0; j < 1; j++) amaxv[8*j] = vmaxq_f32(amaxv[8*j], amaxv[8*j+4]);
  1120. const float amax = vmaxvq_f32(amaxv[0]);
  1121. const float d = amax / ((1 << 7) - 1);
  1122. const float id = d ? 1.0f/d : 0.0f;
  1123. y[i].d = d;
  1124. int32x4_t accv = vdupq_n_s32(0);
  1125. for (int j = 0; j < 8; j++) {
  1126. const float32x4_t v = vmulq_n_f32(srcv[j], id);
  1127. const int32x4_t vi = vcvtnq_s32_f32(v);
  1128. y[i].qs[4*j + 0] = vgetq_lane_s32(vi, 0);
  1129. y[i].qs[4*j + 1] = vgetq_lane_s32(vi, 1);
  1130. y[i].qs[4*j + 2] = vgetq_lane_s32(vi, 2);
  1131. y[i].qs[4*j + 3] = vgetq_lane_s32(vi, 3);
  1132. accv = vaddq_s32(accv, vi);
  1133. }
  1134. y[i].s = d * vaddvq_s32(accv);
  1135. }
  1136. #elif defined(__wasm_simd128__)
  1137. for (int i = 0; i < nb; i++) {
  1138. v128_t srcv [8];
  1139. v128_t asrcv[8];
  1140. v128_t amaxv[8];
  1141. for (int j = 0; j < 8; j++) srcv[j] = wasm_v128_load(x + i*32 + 4*j);
  1142. for (int j = 0; j < 8; j++) asrcv[j] = wasm_f32x4_abs(srcv[j]);
  1143. for (int j = 0; j < 4; j++) amaxv[2*j] = wasm_f32x4_max(asrcv[2*j], asrcv[2*j+1]);
  1144. for (int j = 0; j < 2; j++) amaxv[4*j] = wasm_f32x4_max(amaxv[4*j], amaxv[4*j+2]);
  1145. for (int j = 0; j < 1; j++) amaxv[8*j] = wasm_f32x4_max(amaxv[8*j], amaxv[8*j+4]);
  1146. const float amax = MAX(MAX(wasm_f32x4_extract_lane(amaxv[0], 0),
  1147. wasm_f32x4_extract_lane(amaxv[0], 1)),
  1148. MAX(wasm_f32x4_extract_lane(amaxv[0], 2),
  1149. wasm_f32x4_extract_lane(amaxv[0], 3)));
  1150. const float d = amax / ((1 << 7) - 1);
  1151. const float id = d ? 1.0f/d : 0.0f;
  1152. y[i].d = d;
  1153. v128_t accv = wasm_i32x4_splat(0);
  1154. for (int j = 0; j < 8; j++) {
  1155. const v128_t v = wasm_f32x4_mul(srcv[j], wasm_f32x4_splat(id));
  1156. const v128_t vi = wasm_i32x4_trunc_sat_f32x4(v);
  1157. y[i].qs[4*j + 0] = wasm_i32x4_extract_lane(vi, 0);
  1158. y[i].qs[4*j + 1] = wasm_i32x4_extract_lane(vi, 1);
  1159. y[i].qs[4*j + 2] = wasm_i32x4_extract_lane(vi, 2);
  1160. y[i].qs[4*j + 3] = wasm_i32x4_extract_lane(vi, 3);
  1161. accv = wasm_i32x4_add(accv, vi);
  1162. }
  1163. y[i].s = d * (wasm_i32x4_extract_lane(accv, 0) +
  1164. wasm_i32x4_extract_lane(accv, 1) +
  1165. wasm_i32x4_extract_lane(accv, 2) +
  1166. wasm_i32x4_extract_lane(accv, 3));
  1167. }
  1168. #elif defined(__AVX2__) || defined(__AVX__)
  1169. for (int i = 0; i < nb; i++) {
  1170. // Load elements into 4 AVX vectors
  1171. __m256 v0 = _mm256_loadu_ps( x );
  1172. __m256 v1 = _mm256_loadu_ps( x + 8 );
  1173. __m256 v2 = _mm256_loadu_ps( x + 16 );
  1174. __m256 v3 = _mm256_loadu_ps( x + 24 );
  1175. x += 32;
  1176. // Compute max(abs(e)) for the block
  1177. const __m256 signBit = _mm256_set1_ps( -0.0f );
  1178. __m256 maxAbs = _mm256_andnot_ps( signBit, v0 );
  1179. maxAbs = _mm256_max_ps( maxAbs, _mm256_andnot_ps( signBit, v1 ) );
  1180. maxAbs = _mm256_max_ps( maxAbs, _mm256_andnot_ps( signBit, v2 ) );
  1181. maxAbs = _mm256_max_ps( maxAbs, _mm256_andnot_ps( signBit, v3 ) );
  1182. __m128 max4 = _mm_max_ps( _mm256_extractf128_ps( maxAbs, 1 ), _mm256_castps256_ps128( maxAbs ) );
  1183. max4 = _mm_max_ps( max4, _mm_movehl_ps( max4, max4 ) );
  1184. max4 = _mm_max_ss( max4, _mm_movehdup_ps( max4 ) );
  1185. const float maxScalar = _mm_cvtss_f32( max4 );
  1186. // Quantize these floats
  1187. const float d = maxScalar / 127.f;
  1188. y[i].d = d;
  1189. const float id = ( maxScalar != 0.0f ) ? 127.f / maxScalar : 0.0f;
  1190. const __m256 mul = _mm256_set1_ps( id );
  1191. // Apply the multiplier
  1192. v0 = _mm256_mul_ps( v0, mul );
  1193. v1 = _mm256_mul_ps( v1, mul );
  1194. v2 = _mm256_mul_ps( v2, mul );
  1195. v3 = _mm256_mul_ps( v3, mul );
  1196. // Round to nearest integer
  1197. v0 = _mm256_round_ps( v0, _MM_ROUND_NEAREST );
  1198. v1 = _mm256_round_ps( v1, _MM_ROUND_NEAREST );
  1199. v2 = _mm256_round_ps( v2, _MM_ROUND_NEAREST );
  1200. v3 = _mm256_round_ps( v3, _MM_ROUND_NEAREST );
  1201. // Convert floats to integers
  1202. __m256i i0 = _mm256_cvtps_epi32( v0 );
  1203. __m256i i1 = _mm256_cvtps_epi32( v1 );
  1204. __m256i i2 = _mm256_cvtps_epi32( v2 );
  1205. __m256i i3 = _mm256_cvtps_epi32( v3 );
  1206. #if defined(__AVX2__)
  1207. // Compute the sum of the quants and set y[i].s
  1208. y[i].s = d * hsum_i32_8(_mm256_add_epi32(_mm256_add_epi32(i0, i1), _mm256_add_epi32(i2, i3)));
  1209. // Convert int32 to int16
  1210. i0 = _mm256_packs_epi32( i0, i1 ); // 0, 1, 2, 3, 8, 9, 10, 11, 4, 5, 6, 7, 12, 13, 14, 15
  1211. i2 = _mm256_packs_epi32( i2, i3 ); // 16, 17, 18, 19, 24, 25, 26, 27, 20, 21, 22, 23, 28, 29, 30, 31
  1212. // Convert int16 to int8
  1213. i0 = _mm256_packs_epi16( i0, i2 ); // 0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27, 4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31
  1214. // We got our precious signed bytes, but the order is now wrong
  1215. // These AVX2 pack instructions process 16-byte pieces independently
  1216. // The following instruction is fixing the order
  1217. const __m256i perm = _mm256_setr_epi32( 0, 4, 1, 5, 2, 6, 3, 7 );
  1218. i0 = _mm256_permutevar8x32_epi32( i0, perm );
  1219. _mm256_storeu_si256((__m256i *)y[i].qs, i0);
  1220. #else
  1221. // Since we don't have in AVX some necessary functions,
  1222. // we split the registers in half and call AVX2 analogs from SSE
  1223. __m128i ni0 = _mm256_castsi256_si128( i0 );
  1224. __m128i ni1 = _mm256_extractf128_si256( i0, 1);
  1225. __m128i ni2 = _mm256_castsi256_si128( i1 );
  1226. __m128i ni3 = _mm256_extractf128_si256( i1, 1);
  1227. __m128i ni4 = _mm256_castsi256_si128( i2 );
  1228. __m128i ni5 = _mm256_extractf128_si256( i2, 1);
  1229. __m128i ni6 = _mm256_castsi256_si128( i3 );
  1230. __m128i ni7 = _mm256_extractf128_si256( i3, 1);
  1231. // Compute the sum of the quants and set y[i].s
  1232. const __m128i s0 = _mm_add_epi32(_mm_add_epi32(ni0, ni1), _mm_add_epi32(ni2, ni3));
  1233. const __m128i s1 = _mm_add_epi32(_mm_add_epi32(ni4, ni5), _mm_add_epi32(ni6, ni7));
  1234. y[i].s = d * hsum_i32_4(_mm_add_epi32(s0, s1));
  1235. // Convert int32 to int16
  1236. ni0 = _mm_packs_epi32( ni0, ni1 );
  1237. ni2 = _mm_packs_epi32( ni2, ni3 );
  1238. ni4 = _mm_packs_epi32( ni4, ni5 );
  1239. ni6 = _mm_packs_epi32( ni6, ni7 );
  1240. // Convert int16 to int8
  1241. ni0 = _mm_packs_epi16( ni0, ni2 );
  1242. ni4 = _mm_packs_epi16( ni4, ni6 );
  1243. _mm_storeu_si128((__m128i *)(y[i].qs + 0), ni0);
  1244. _mm_storeu_si128((__m128i *)(y[i].qs + 16), ni4);
  1245. #endif
  1246. }
  1247. #elif defined(__riscv_v_intrinsic)
  1248. size_t vl = __riscv_vsetvl_e32m4(QK8_1);
  1249. for (int i = 0; i < nb; i++) {
  1250. // load elements
  1251. vfloat32m4_t v_x = __riscv_vle32_v_f32m4(x+i*QK8_1, vl);
  1252. vfloat32m4_t vfabs = __riscv_vfabs_v_f32m4(v_x, vl);
  1253. vfloat32m1_t tmp = __riscv_vfmv_v_f_f32m1(0.0, vl);
  1254. vfloat32m1_t vmax = __riscv_vfredmax_vs_f32m4_f32m1(vfabs, tmp, vl);
  1255. float amax = __riscv_vfmv_f_s_f32m1_f32(vmax);
  1256. const float d = amax / ((1 << 7) - 1);
  1257. const float id = d ? 1.0f/d : 0.0f;
  1258. y[i].d = d;
  1259. vfloat32m4_t x0 = __riscv_vfmul_vf_f32m4(v_x, id, vl);
  1260. // convert to integer
  1261. vint16m2_t vi = __riscv_vfncvt_x_f_w_i16m2(x0, vl);
  1262. vint8m1_t vs = __riscv_vncvt_x_x_w_i8m1(vi, vl);
  1263. // store result
  1264. __riscv_vse8_v_i8m1(y[i].qs , vs, vl);
  1265. // compute sum for y[i].s
  1266. vint16m1_t tmp2 = __riscv_vmv_v_x_i16m1(0, vl);
  1267. vint16m1_t vwrs = __riscv_vwredsum_vs_i8m1_i16m1(vs, tmp2, vl);
  1268. // set y[i].s
  1269. int sum = __riscv_vmv_x_s_i16m1_i16(vwrs);
  1270. y[i].s = sum*d;
  1271. }
  1272. #else
  1273. // scalar
  1274. quantize_row_q8_1_reference(x, y, k);
  1275. #endif
  1276. }
  1277. static void dequantize_row_q4_0(const block_q4_0 * restrict x, float * restrict y, int k) {
  1278. static const int qk = QK4_0;
  1279. assert(k % qk == 0);
  1280. const int nb = k / qk;
  1281. for (int i = 0; i < nb; i++) {
  1282. const float d = GGML_FP16_TO_FP32(x[i].d);
  1283. for (int j = 0; j < qk/2; ++j) {
  1284. const int x0 = (x[i].qs[j] & 0x0F) - 8;
  1285. const int x1 = (x[i].qs[j] >> 4) - 8;
  1286. y[i*qk + j + 0 ] = x0*d;
  1287. y[i*qk + j + qk/2] = x1*d;
  1288. }
  1289. }
  1290. }
  1291. static void dequantize_row_q4_1(const block_q4_1 * restrict x, float * restrict y, int k) {
  1292. static const int qk = QK4_1;
  1293. assert(k % qk == 0);
  1294. const int nb = k / qk;
  1295. for (int i = 0; i < nb; i++) {
  1296. const float d = GGML_FP16_TO_FP32(x[i].d);
  1297. const float m = GGML_FP16_TO_FP32(x[i].m);
  1298. for (int j = 0; j < qk/2; ++j) {
  1299. const int x0 = (x[i].qs[j] & 0x0F);
  1300. const int x1 = (x[i].qs[j] >> 4);
  1301. y[i*qk + j + 0 ] = x0*d + m;
  1302. y[i*qk + j + qk/2] = x1*d + m;
  1303. }
  1304. }
  1305. }
  1306. static void dequantize_row_q5_0(const block_q5_0 * restrict x, float * restrict y, int k) {
  1307. static const int qk = QK5_0;
  1308. assert(k % qk == 0);
  1309. const int nb = k / qk;
  1310. for (int i = 0; i < nb; i++) {
  1311. const float d = GGML_FP16_TO_FP32(x[i].d);
  1312. uint32_t qh;
  1313. memcpy(&qh, x[i].qh, sizeof(qh));
  1314. for (int j = 0; j < qk/2; ++j) {
  1315. const uint8_t xh_0 = ((qh >> (j + 0)) << 4) & 0x10;
  1316. const uint8_t xh_1 = ((qh >> (j + 12)) ) & 0x10;
  1317. const int32_t x0 = ((x[i].qs[j] & 0x0F) | xh_0) - 16;
  1318. const int32_t x1 = ((x[i].qs[j] >> 4) | xh_1) - 16;
  1319. y[i*qk + j + 0 ] = x0*d;
  1320. y[i*qk + j + qk/2] = x1*d;
  1321. }
  1322. }
  1323. }
  1324. static void dequantize_row_q5_1(const block_q5_1 * restrict x, float * restrict y, int k) {
  1325. static const int qk = QK5_1;
  1326. assert(k % qk == 0);
  1327. const int nb = k / qk;
  1328. for (int i = 0; i < nb; i++) {
  1329. const float d = GGML_FP16_TO_FP32(x[i].d);
  1330. const float m = GGML_FP16_TO_FP32(x[i].m);
  1331. uint32_t qh;
  1332. memcpy(&qh, x[i].qh, sizeof(qh));
  1333. for (int j = 0; j < qk/2; ++j) {
  1334. const uint8_t xh_0 = ((qh >> (j + 0)) << 4) & 0x10;
  1335. const uint8_t xh_1 = ((qh >> (j + 12)) ) & 0x10;
  1336. const int x0 = (x[i].qs[j] & 0x0F) | xh_0;
  1337. const int x1 = (x[i].qs[j] >> 4) | xh_1;
  1338. y[i*qk + j + 0 ] = x0*d + m;
  1339. y[i*qk + j + qk/2] = x1*d + m;
  1340. }
  1341. }
  1342. }
  1343. static void dequantize_row_q8_0(const void * restrict vx, float * restrict y, int k) {
  1344. static const int qk = QK8_0;
  1345. assert(k % qk == 0);
  1346. const int nb = k / qk;
  1347. const block_q8_0 * restrict x = vx;
  1348. for (int i = 0; i < nb; i++) {
  1349. const float d = GGML_FP16_TO_FP32(x[i].d);
  1350. for (int j = 0; j < qk; ++j) {
  1351. y[i*qk + j] = x[i].qs[j]*d;
  1352. }
  1353. }
  1354. }
  1355. static void ggml_vec_dot_f32(const int n, float * restrict s, const float * restrict x, const float * restrict y);
  1356. static void ggml_vec_dot_f16(const int n, float * restrict s, ggml_fp16_t * restrict x, ggml_fp16_t * restrict y);
  1357. static void ggml_vec_dot_q4_0_q8_0(const int n, float * restrict s, const void * restrict vx, const void * restrict vy);
  1358. static void ggml_vec_dot_q4_1_q8_1(const int n, float * restrict s, const void * restrict vx, const void * restrict vy);
  1359. static void ggml_vec_dot_q5_0_q8_0(const int n, float * restrict s, const void * restrict vx, const void * restrict vy);
  1360. static void ggml_vec_dot_q5_1_q8_1(const int n, float * restrict s, const void * restrict vx, const void * restrict vy);
  1361. static void ggml_vec_dot_q8_0_q8_0(const int n, float * restrict s, const void * restrict vx, const void * restrict vy);
  1362. static const ggml_type_traits_t type_traits[GGML_TYPE_COUNT] = {
  1363. [GGML_TYPE_I8] = {
  1364. .type_name = "i8",
  1365. .blck_size = 1,
  1366. .type_size = sizeof(int8_t),
  1367. .is_quantized = false,
  1368. },
  1369. [GGML_TYPE_I16] = {
  1370. .type_name = "i16",
  1371. .blck_size = 1,
  1372. .type_size = sizeof(int16_t),
  1373. .is_quantized = false,
  1374. },
  1375. [GGML_TYPE_I32] = {
  1376. .type_name = "i32",
  1377. .blck_size = 1,
  1378. .type_size = sizeof(int32_t),
  1379. .is_quantized = false,
  1380. },
  1381. [GGML_TYPE_F32] = {
  1382. .type_name = "f32",
  1383. .blck_size = 1,
  1384. .type_size = sizeof(float),
  1385. .is_quantized = false,
  1386. .vec_dot = (ggml_vec_dot_t) ggml_vec_dot_f32,
  1387. .vec_dot_type = GGML_TYPE_F32,
  1388. },
  1389. [GGML_TYPE_F16] = {
  1390. .type_name = "f16",
  1391. .blck_size = 1,
  1392. .type_size = sizeof(ggml_fp16_t),
  1393. .is_quantized = false,
  1394. .to_float = (ggml_to_float_t) ggml_fp16_to_fp32_row,
  1395. .from_float = (ggml_from_float_t) ggml_fp32_to_fp16_row,
  1396. .from_float_reference = (ggml_from_float_t) ggml_fp32_to_fp16_row,
  1397. .vec_dot = (ggml_vec_dot_t) ggml_vec_dot_f16,
  1398. .vec_dot_type = GGML_TYPE_F16,
  1399. },
  1400. [GGML_TYPE_Q4_0] = {
  1401. .type_name = "q4_0",
  1402. .blck_size = QK4_0,
  1403. .type_size = sizeof(block_q4_0),
  1404. .is_quantized = true,
  1405. .to_float = (ggml_to_float_t) dequantize_row_q4_0,
  1406. .from_float = quantize_row_q4_0,
  1407. .from_float_reference = (ggml_from_float_t) quantize_row_q4_0_reference,
  1408. .vec_dot = ggml_vec_dot_q4_0_q8_0,
  1409. .vec_dot_type = GGML_TYPE_Q8_0,
  1410. },
  1411. [GGML_TYPE_Q4_1] = {
  1412. .type_name = "q4_1",
  1413. .blck_size = QK4_1,
  1414. .type_size = sizeof(block_q4_1),
  1415. .is_quantized = true,
  1416. .to_float = (ggml_to_float_t) dequantize_row_q4_1,
  1417. .from_float = quantize_row_q4_1,
  1418. .from_float_reference = (ggml_from_float_t) quantize_row_q4_1_reference,
  1419. .vec_dot = ggml_vec_dot_q4_1_q8_1,
  1420. .vec_dot_type = GGML_TYPE_Q8_1,
  1421. },
  1422. [GGML_TYPE_Q5_0] = {
  1423. .type_name = "q5_0",
  1424. .blck_size = QK5_0,
  1425. .type_size = sizeof(block_q5_0),
  1426. .is_quantized = true,
  1427. .to_float = (ggml_to_float_t) dequantize_row_q5_0,
  1428. .from_float = quantize_row_q5_0,
  1429. .from_float_reference = (ggml_from_float_t) quantize_row_q5_0_reference,
  1430. .vec_dot = ggml_vec_dot_q5_0_q8_0,
  1431. .vec_dot_type = GGML_TYPE_Q8_0,
  1432. },
  1433. [GGML_TYPE_Q5_1] = {
  1434. .type_name = "q5_1",
  1435. .blck_size = QK5_1,
  1436. .type_size = sizeof(block_q5_1),
  1437. .is_quantized = true,
  1438. .to_float = (ggml_to_float_t) dequantize_row_q5_1,
  1439. .from_float = quantize_row_q5_1,
  1440. .from_float_reference = (ggml_from_float_t) quantize_row_q5_1_reference,
  1441. .vec_dot = ggml_vec_dot_q5_1_q8_1,
  1442. .vec_dot_type = GGML_TYPE_Q8_1,
  1443. },
  1444. [GGML_TYPE_Q8_0] = {
  1445. .type_name = "q8_0",
  1446. .blck_size = QK8_0,
  1447. .type_size = sizeof(block_q8_0),
  1448. .is_quantized = true,
  1449. .to_float = dequantize_row_q8_0,
  1450. .from_float = quantize_row_q8_0,
  1451. .from_float_reference = (ggml_from_float_t) quantize_row_q8_0_reference,
  1452. .vec_dot = ggml_vec_dot_q8_0_q8_0,
  1453. .vec_dot_type = GGML_TYPE_Q8_0,
  1454. },
  1455. [GGML_TYPE_Q8_1] = {
  1456. .type_name = "q8_1",
  1457. .blck_size = QK8_1,
  1458. .type_size = sizeof(block_q8_1),
  1459. .is_quantized = true,
  1460. .from_float = quantize_row_q8_1,
  1461. .from_float_reference = (ggml_from_float_t) quantize_row_q8_1_reference,
  1462. .vec_dot_type = GGML_TYPE_Q8_1,
  1463. },
  1464. #ifdef GGML_USE_K_QUANTS
  1465. [GGML_TYPE_Q2_K] = {
  1466. .type_name = "q2_K",
  1467. .blck_size = QK_K,
  1468. .type_size = sizeof(block_q2_K),
  1469. .is_quantized = true,
  1470. .to_float = (ggml_to_float_t) dequantize_row_q2_K,
  1471. .from_float = quantize_row_q2_K,
  1472. .from_float_reference = (ggml_from_float_t) quantize_row_q2_K_reference,
  1473. .vec_dot = ggml_vec_dot_q2_K_q8_K,
  1474. .vec_dot_type = GGML_TYPE_Q8_K,
  1475. },
  1476. [GGML_TYPE_Q3_K] = {
  1477. .type_name = "q3_K",
  1478. .blck_size = QK_K,
  1479. .type_size = sizeof(block_q3_K),
  1480. .is_quantized = true,
  1481. .to_float = (ggml_to_float_t) dequantize_row_q3_K,
  1482. .from_float = quantize_row_q3_K,
  1483. .from_float_reference = (ggml_from_float_t) quantize_row_q3_K_reference,
  1484. .vec_dot = ggml_vec_dot_q3_K_q8_K,
  1485. .vec_dot_type = GGML_TYPE_Q8_K,
  1486. },
  1487. [GGML_TYPE_Q4_K] = {
  1488. .type_name = "q4_K",
  1489. .blck_size = QK_K,
  1490. .type_size = sizeof(block_q4_K),
  1491. .is_quantized = true,
  1492. .to_float = (ggml_to_float_t) dequantize_row_q4_K,
  1493. .from_float = quantize_row_q4_K,
  1494. .from_float_reference = (ggml_from_float_t) quantize_row_q4_K_reference,
  1495. .vec_dot = ggml_vec_dot_q4_K_q8_K,
  1496. .vec_dot_type = GGML_TYPE_Q8_K,
  1497. },
  1498. [GGML_TYPE_Q5_K] = {
  1499. .type_name = "q5_K",
  1500. .blck_size = QK_K,
  1501. .type_size = sizeof(block_q5_K),
  1502. .is_quantized = true,
  1503. .to_float = (ggml_to_float_t) dequantize_row_q5_K,
  1504. .from_float = quantize_row_q5_K,
  1505. .from_float_reference = (ggml_from_float_t) quantize_row_q5_K_reference,
  1506. .vec_dot = ggml_vec_dot_q5_K_q8_K,
  1507. .vec_dot_type = GGML_TYPE_Q8_K,
  1508. },
  1509. [GGML_TYPE_Q6_K] = {
  1510. .type_name = "q6_K",
  1511. .blck_size = QK_K,
  1512. .type_size = sizeof(block_q6_K),
  1513. .is_quantized = true,
  1514. .to_float = (ggml_to_float_t) dequantize_row_q6_K,
  1515. .from_float = quantize_row_q6_K,
  1516. .from_float_reference = (ggml_from_float_t) quantize_row_q6_K_reference,
  1517. .vec_dot = ggml_vec_dot_q6_K_q8_K,
  1518. .vec_dot_type = GGML_TYPE_Q8_K,
  1519. },
  1520. [GGML_TYPE_Q8_K] = {
  1521. .type_name = "q8_K",
  1522. .blck_size = QK_K,
  1523. .type_size = sizeof(block_q8_K),
  1524. .is_quantized = true,
  1525. .from_float = quantize_row_q8_K,
  1526. }
  1527. #endif
  1528. };
  1529. // For internal test use
  1530. ggml_type_traits_t ggml_internal_get_type_traits(enum ggml_type type) {
  1531. GGML_ASSERT(type < GGML_TYPE_COUNT);
  1532. return type_traits[type];
  1533. }
  1534. //
  1535. // simd mappings
  1536. //
  1537. // we define a common set of C macros which map to specific intrinsics based on the current architecture
  1538. // we then implement the fundamental computation operations below using only these macros
  1539. // adding support for new architectures requires to define the corresponding SIMD macros
  1540. //
  1541. // GGML_F32_STEP / GGML_F16_STEP
  1542. // number of elements to process in a single step
  1543. //
  1544. // GGML_F32_EPR / GGML_F16_EPR
  1545. // number of elements to fit in a single register
  1546. //
  1547. #if defined(__ARM_NEON) && defined(__ARM_FEATURE_FMA)
  1548. #define GGML_SIMD
  1549. // F32 NEON
  1550. #define GGML_F32_STEP 16
  1551. #define GGML_F32_EPR 4
  1552. #define GGML_F32x4 float32x4_t
  1553. #define GGML_F32x4_ZERO vdupq_n_f32(0.0f)
  1554. #define GGML_F32x4_SET1(x) vdupq_n_f32(x)
  1555. #define GGML_F32x4_LOAD vld1q_f32
  1556. #define GGML_F32x4_STORE vst1q_f32
  1557. #define GGML_F32x4_FMA(a, b, c) vfmaq_f32(a, b, c)
  1558. #define GGML_F32x4_ADD vaddq_f32
  1559. #define GGML_F32x4_MUL vmulq_f32
  1560. #define GGML_F32x4_REDUCE_ONE(x) vaddvq_f32(x)
  1561. #define GGML_F32x4_REDUCE(res, x) \
  1562. { \
  1563. int offset = GGML_F32_ARR >> 1; \
  1564. for (int i = 0; i < offset; ++i) { \
  1565. x[i] = vaddq_f32(x[i], x[offset+i]); \
  1566. } \
  1567. offset >>= 1; \
  1568. for (int i = 0; i < offset; ++i) { \
  1569. x[i] = vaddq_f32(x[i], x[offset+i]); \
  1570. } \
  1571. offset >>= 1; \
  1572. for (int i = 0; i < offset; ++i) { \
  1573. x[i] = vaddq_f32(x[i], x[offset+i]); \
  1574. } \
  1575. res = GGML_F32x4_REDUCE_ONE(x[0]); \
  1576. }
  1577. #define GGML_F32_VEC GGML_F32x4
  1578. #define GGML_F32_VEC_ZERO GGML_F32x4_ZERO
  1579. #define GGML_F32_VEC_SET1 GGML_F32x4_SET1
  1580. #define GGML_F32_VEC_LOAD GGML_F32x4_LOAD
  1581. #define GGML_F32_VEC_STORE GGML_F32x4_STORE
  1582. #define GGML_F32_VEC_FMA GGML_F32x4_FMA
  1583. #define GGML_F32_VEC_ADD GGML_F32x4_ADD
  1584. #define GGML_F32_VEC_MUL GGML_F32x4_MUL
  1585. #define GGML_F32_VEC_REDUCE GGML_F32x4_REDUCE
  1586. // F16 NEON
  1587. #if defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC)
  1588. #define GGML_F16_STEP 32
  1589. #define GGML_F16_EPR 8
  1590. #define GGML_F16x8 float16x8_t
  1591. #define GGML_F16x8_ZERO vdupq_n_f16(0.0f)
  1592. #define GGML_F16x8_SET1(x) vdupq_n_f16(x)
  1593. #define GGML_F16x8_LOAD vld1q_f16
  1594. #define GGML_F16x8_STORE vst1q_f16
  1595. #define GGML_F16x8_FMA(a, b, c) vfmaq_f16(a, b, c)
  1596. #define GGML_F16x8_ADD vaddq_f16
  1597. #define GGML_F16x8_MUL vmulq_f16
  1598. #define GGML_F16x8_REDUCE(res, x) \
  1599. do { \
  1600. int offset = GGML_F16_ARR >> 1; \
  1601. for (int i = 0; i < offset; ++i) { \
  1602. x[i] = vaddq_f16(x[i], x[offset+i]); \
  1603. } \
  1604. offset >>= 1; \
  1605. for (int i = 0; i < offset; ++i) { \
  1606. x[i] = vaddq_f16(x[i], x[offset+i]); \
  1607. } \
  1608. offset >>= 1; \
  1609. for (int i = 0; i < offset; ++i) { \
  1610. x[i] = vaddq_f16(x[i], x[offset+i]); \
  1611. } \
  1612. const float32x4_t t0 = vcvt_f32_f16(vget_low_f16 (x[0])); \
  1613. const float32x4_t t1 = vcvt_f32_f16(vget_high_f16(x[0])); \
  1614. res = (ggml_float) vaddvq_f32(vaddq_f32(t0, t1)); \
  1615. } while (0)
  1616. #define GGML_F16_VEC GGML_F16x8
  1617. #define GGML_F16_VEC_ZERO GGML_F16x8_ZERO
  1618. #define GGML_F16_VEC_SET1 GGML_F16x8_SET1
  1619. #define GGML_F16_VEC_LOAD(p, i) GGML_F16x8_LOAD(p)
  1620. #define GGML_F16_VEC_STORE(p, r, i) GGML_F16x8_STORE(p, r[i])
  1621. #define GGML_F16_VEC_FMA GGML_F16x8_FMA
  1622. #define GGML_F16_VEC_ADD GGML_F16x8_ADD
  1623. #define GGML_F16_VEC_MUL GGML_F16x8_MUL
  1624. #define GGML_F16_VEC_REDUCE GGML_F16x8_REDUCE
  1625. #else
  1626. // if FP16 vector arithmetic is not supported, we use FP32 instead
  1627. // and take advantage of the vcvt_ functions to convert to/from FP16
  1628. #define GGML_F16_STEP 16
  1629. #define GGML_F16_EPR 4
  1630. #define GGML_F32Cx4 float32x4_t
  1631. #define GGML_F32Cx4_ZERO vdupq_n_f32(0.0f)
  1632. #define GGML_F32Cx4_SET1(x) vdupq_n_f32(x)
  1633. #define GGML_F32Cx4_LOAD(x) vcvt_f32_f16(vld1_f16(x))
  1634. #define GGML_F32Cx4_STORE(x, y) vst1_f16(x, vcvt_f16_f32(y))
  1635. #define GGML_F32Cx4_FMA(a, b, c) vfmaq_f32(a, b, c)
  1636. #define GGML_F32Cx4_ADD vaddq_f32
  1637. #define GGML_F32Cx4_MUL vmulq_f32
  1638. #define GGML_F32Cx4_REDUCE GGML_F32x4_REDUCE
  1639. #define GGML_F16_VEC GGML_F32Cx4
  1640. #define GGML_F16_VEC_ZERO GGML_F32Cx4_ZERO
  1641. #define GGML_F16_VEC_SET1 GGML_F32Cx4_SET1
  1642. #define GGML_F16_VEC_LOAD(p, i) GGML_F32Cx4_LOAD(p)
  1643. #define GGML_F16_VEC_STORE(p, r, i) GGML_F32Cx4_STORE(p, r[i])
  1644. #define GGML_F16_VEC_FMA GGML_F32Cx4_FMA
  1645. #define GGML_F16_VEC_ADD GGML_F32Cx4_ADD
  1646. #define GGML_F16_VEC_MUL GGML_F32Cx4_MUL
  1647. #define GGML_F16_VEC_REDUCE GGML_F32Cx4_REDUCE
  1648. #endif
  1649. #elif defined(__AVX__)
  1650. #define GGML_SIMD
  1651. // F32 AVX
  1652. #define GGML_F32_STEP 32
  1653. #define GGML_F32_EPR 8
  1654. #define GGML_F32x8 __m256
  1655. #define GGML_F32x8_ZERO _mm256_setzero_ps()
  1656. #define GGML_F32x8_SET1(x) _mm256_set1_ps(x)
  1657. #define GGML_F32x8_LOAD _mm256_loadu_ps
  1658. #define GGML_F32x8_STORE _mm256_storeu_ps
  1659. #if defined(__FMA__)
  1660. #define GGML_F32x8_FMA(a, b, c) _mm256_fmadd_ps(b, c, a)
  1661. #else
  1662. #define GGML_F32x8_FMA(a, b, c) _mm256_add_ps(_mm256_mul_ps(b, c), a)
  1663. #endif
  1664. #define GGML_F32x8_ADD _mm256_add_ps
  1665. #define GGML_F32x8_MUL _mm256_mul_ps
  1666. #define GGML_F32x8_REDUCE(res, x) \
  1667. do { \
  1668. int offset = GGML_F32_ARR >> 1; \
  1669. for (int i = 0; i < offset; ++i) { \
  1670. x[i] = _mm256_add_ps(x[i], x[offset+i]); \
  1671. } \
  1672. offset >>= 1; \
  1673. for (int i = 0; i < offset; ++i) { \
  1674. x[i] = _mm256_add_ps(x[i], x[offset+i]); \
  1675. } \
  1676. offset >>= 1; \
  1677. for (int i = 0; i < offset; ++i) { \
  1678. x[i] = _mm256_add_ps(x[i], x[offset+i]); \
  1679. } \
  1680. const __m128 t0 = _mm_add_ps(_mm256_castps256_ps128(x[0]), \
  1681. _mm256_extractf128_ps(x[0], 1)); \
  1682. const __m128 t1 = _mm_hadd_ps(t0, t0); \
  1683. res = _mm_cvtss_f32(_mm_hadd_ps(t1, t1)); \
  1684. } while (0)
  1685. // TODO: is this optimal ?
  1686. #define GGML_F32_VEC GGML_F32x8
  1687. #define GGML_F32_VEC_ZERO GGML_F32x8_ZERO
  1688. #define GGML_F32_VEC_SET1 GGML_F32x8_SET1
  1689. #define GGML_F32_VEC_LOAD GGML_F32x8_LOAD
  1690. #define GGML_F32_VEC_STORE GGML_F32x8_STORE
  1691. #define GGML_F32_VEC_FMA GGML_F32x8_FMA
  1692. #define GGML_F32_VEC_ADD GGML_F32x8_ADD
  1693. #define GGML_F32_VEC_MUL GGML_F32x8_MUL
  1694. #define GGML_F32_VEC_REDUCE GGML_F32x8_REDUCE
  1695. // F16 AVX
  1696. #define GGML_F16_STEP 32
  1697. #define GGML_F16_EPR 8
  1698. // F16 arithmetic is not supported by AVX, so we use F32 instead
  1699. #define GGML_F32Cx8 __m256
  1700. #define GGML_F32Cx8_ZERO _mm256_setzero_ps()
  1701. #define GGML_F32Cx8_SET1(x) _mm256_set1_ps(x)
  1702. #if defined(__F16C__)
  1703. // the _mm256_cvt intrinsics require F16C
  1704. #define GGML_F32Cx8_LOAD(x) _mm256_cvtph_ps(_mm_loadu_si128((__m128i *)(x)))
  1705. #define GGML_F32Cx8_STORE(x, y) _mm_storeu_si128((__m128i *)(x), _mm256_cvtps_ph(y, 0))
  1706. #else
  1707. static inline __m256 __avx_f32cx8_load(ggml_fp16_t *x) {
  1708. float tmp[8];
  1709. for (int i = 0; i < 8; i++) {
  1710. tmp[i] = GGML_FP16_TO_FP32(x[i]);
  1711. }
  1712. return _mm256_loadu_ps(tmp);
  1713. }
  1714. static inline void __avx_f32cx8_store(ggml_fp16_t *x, __m256 y) {
  1715. float arr[8];
  1716. _mm256_storeu_ps(arr, y);
  1717. for (int i = 0; i < 8; i++)
  1718. x[i] = GGML_FP32_TO_FP16(arr[i]);
  1719. }
  1720. #define GGML_F32Cx8_LOAD(x) __avx_f32cx8_load(x)
  1721. #define GGML_F32Cx8_STORE(x, y) __avx_f32cx8_store(x, y)
  1722. #endif
  1723. #define GGML_F32Cx8_FMA GGML_F32x8_FMA
  1724. #define GGML_F32Cx8_ADD _mm256_add_ps
  1725. #define GGML_F32Cx8_MUL _mm256_mul_ps
  1726. #define GGML_F32Cx8_REDUCE GGML_F32x8_REDUCE
  1727. #define GGML_F16_VEC GGML_F32Cx8
  1728. #define GGML_F16_VEC_ZERO GGML_F32Cx8_ZERO
  1729. #define GGML_F16_VEC_SET1 GGML_F32Cx8_SET1
  1730. #define GGML_F16_VEC_LOAD(p, i) GGML_F32Cx8_LOAD(p)
  1731. #define GGML_F16_VEC_STORE(p, r, i) GGML_F32Cx8_STORE(p, r[i])
  1732. #define GGML_F16_VEC_FMA GGML_F32Cx8_FMA
  1733. #define GGML_F16_VEC_ADD GGML_F32Cx8_ADD
  1734. #define GGML_F16_VEC_MUL GGML_F32Cx8_MUL
  1735. #define GGML_F16_VEC_REDUCE GGML_F32Cx8_REDUCE
  1736. #elif defined(__POWER9_VECTOR__)
  1737. #define GGML_SIMD
  1738. // F32 POWER9
  1739. #define GGML_F32_STEP 32
  1740. #define GGML_F32_EPR 4
  1741. #define GGML_F32x4 vector float
  1742. #define GGML_F32x4_ZERO 0.0f
  1743. #define GGML_F32x4_SET1 vec_splats
  1744. #define GGML_F32x4_LOAD(p) vec_xl(0, p)
  1745. #define GGML_F32x4_STORE(p, r) vec_xst(r, 0, p)
  1746. #define GGML_F32x4_FMA(a, b, c) vec_madd(b, c, a)
  1747. #define GGML_F32x4_ADD vec_add
  1748. #define GGML_F32x4_MUL vec_mul
  1749. #define GGML_F32x4_REDUCE(res, x) \
  1750. { \
  1751. int offset = GGML_F32_ARR >> 1; \
  1752. for (int i = 0; i < offset; ++i) { \
  1753. x[i] = vec_add(x[i], x[offset+i]); \
  1754. } \
  1755. offset >>= 1; \
  1756. for (int i = 0; i < offset; ++i) { \
  1757. x[i] = vec_add(x[i], x[offset+i]); \
  1758. } \
  1759. offset >>= 1; \
  1760. for (int i = 0; i < offset; ++i) { \
  1761. x[i] = vec_add(x[i], x[offset+i]); \
  1762. } \
  1763. res = vec_extract(x[0], 0) + \
  1764. vec_extract(x[0], 1) + \
  1765. vec_extract(x[0], 2) + \
  1766. vec_extract(x[0], 3); \
  1767. }
  1768. #define GGML_F32_VEC GGML_F32x4
  1769. #define GGML_F32_VEC_ZERO GGML_F32x4_ZERO
  1770. #define GGML_F32_VEC_SET1 GGML_F32x4_SET1
  1771. #define GGML_F32_VEC_LOAD GGML_F32x4_LOAD
  1772. #define GGML_F32_VEC_STORE GGML_F32x4_STORE
  1773. #define GGML_F32_VEC_FMA GGML_F32x4_FMA
  1774. #define GGML_F32_VEC_ADD GGML_F32x4_ADD
  1775. #define GGML_F32_VEC_MUL GGML_F32x4_MUL
  1776. #define GGML_F32_VEC_REDUCE GGML_F32x4_REDUCE
  1777. // F16 POWER9
  1778. #define GGML_F16_STEP GGML_F32_STEP
  1779. #define GGML_F16_EPR GGML_F32_EPR
  1780. #define GGML_F16_VEC GGML_F32x4
  1781. #define GGML_F16_VEC_ZERO GGML_F32x4_ZERO
  1782. #define GGML_F16_VEC_SET1 GGML_F32x4_SET1
  1783. #define GGML_F16_VEC_FMA GGML_F32x4_FMA
  1784. #define GGML_F16_VEC_REDUCE GGML_F32x4_REDUCE
  1785. // Use vec_xl, not vec_ld, in case the load address is not aligned.
  1786. #define GGML_F16_VEC_LOAD(p, i) (i & 0x1) ? \
  1787. vec_extract_fp32_from_shorth(vec_xl(0, p - GGML_F16_EPR)) : \
  1788. vec_extract_fp32_from_shortl(vec_xl(0, p))
  1789. #define GGML_ENDIAN_BYTE(i) ((unsigned char *)&(uint16_t){1})[i]
  1790. #define GGML_F16_VEC_STORE(p, r, i) \
  1791. if (i & 0x1) \
  1792. vec_xst(vec_pack_to_short_fp32(r[i - GGML_ENDIAN_BYTE(1)], \
  1793. r[i - GGML_ENDIAN_BYTE(0)]), \
  1794. 0, p - GGML_F16_EPR)
  1795. #elif defined(__wasm_simd128__)
  1796. #define GGML_SIMD
  1797. // F32 WASM
  1798. #define GGML_F32_STEP 16
  1799. #define GGML_F32_EPR 4
  1800. #define GGML_F32x4 v128_t
  1801. #define GGML_F32x4_ZERO wasm_f32x4_splat(0.0f)
  1802. #define GGML_F32x4_SET1(x) wasm_f32x4_splat(x)
  1803. #define GGML_F32x4_LOAD wasm_v128_load
  1804. #define GGML_F32x4_STORE wasm_v128_store
  1805. #define GGML_F32x4_FMA(a, b, c) wasm_f32x4_add(wasm_f32x4_mul(b, c), a)
  1806. #define GGML_F32x4_ADD wasm_f32x4_add
  1807. #define GGML_F32x4_MUL wasm_f32x4_mul
  1808. #define GGML_F32x4_REDUCE(res, x) \
  1809. { \
  1810. int offset = GGML_F32_ARR >> 1; \
  1811. for (int i = 0; i < offset; ++i) { \
  1812. x[i] = wasm_f32x4_add(x[i], x[offset+i]); \
  1813. } \
  1814. offset >>= 1; \
  1815. for (int i = 0; i < offset; ++i) { \
  1816. x[i] = wasm_f32x4_add(x[i], x[offset+i]); \
  1817. } \
  1818. offset >>= 1; \
  1819. for (int i = 0; i < offset; ++i) { \
  1820. x[i] = wasm_f32x4_add(x[i], x[offset+i]); \
  1821. } \
  1822. res = wasm_f32x4_extract_lane(x[0], 0) + \
  1823. wasm_f32x4_extract_lane(x[0], 1) + \
  1824. wasm_f32x4_extract_lane(x[0], 2) + \
  1825. wasm_f32x4_extract_lane(x[0], 3); \
  1826. }
  1827. #define GGML_F32_VEC GGML_F32x4
  1828. #define GGML_F32_VEC_ZERO GGML_F32x4_ZERO
  1829. #define GGML_F32_VEC_SET1 GGML_F32x4_SET1
  1830. #define GGML_F32_VEC_LOAD GGML_F32x4_LOAD
  1831. #define GGML_F32_VEC_STORE GGML_F32x4_STORE
  1832. #define GGML_F32_VEC_FMA GGML_F32x4_FMA
  1833. #define GGML_F32_VEC_ADD GGML_F32x4_ADD
  1834. #define GGML_F32_VEC_MUL GGML_F32x4_MUL
  1835. #define GGML_F32_VEC_REDUCE GGML_F32x4_REDUCE
  1836. // F16 WASM
  1837. #define GGML_F16_STEP 16
  1838. #define GGML_F16_EPR 4
  1839. inline static v128_t __wasm_f16x4_load(const ggml_fp16_t * p) {
  1840. float tmp[4];
  1841. tmp[0] = GGML_FP16_TO_FP32(p[0]);
  1842. tmp[1] = GGML_FP16_TO_FP32(p[1]);
  1843. tmp[2] = GGML_FP16_TO_FP32(p[2]);
  1844. tmp[3] = GGML_FP16_TO_FP32(p[3]);
  1845. return wasm_v128_load(tmp);
  1846. }
  1847. inline static void __wasm_f16x4_store(ggml_fp16_t * p, v128_t x) {
  1848. float tmp[4];
  1849. wasm_v128_store(tmp, x);
  1850. p[0] = GGML_FP32_TO_FP16(tmp[0]);
  1851. p[1] = GGML_FP32_TO_FP16(tmp[1]);
  1852. p[2] = GGML_FP32_TO_FP16(tmp[2]);
  1853. p[3] = GGML_FP32_TO_FP16(tmp[3]);
  1854. }
  1855. #define GGML_F16x4 v128_t
  1856. #define GGML_F16x4_ZERO wasm_f32x4_splat(0.0f)
  1857. #define GGML_F16x4_SET1(x) wasm_f32x4_splat(x)
  1858. #define GGML_F16x4_LOAD(x) __wasm_f16x4_load(x)
  1859. #define GGML_F16x4_STORE(x, y) __wasm_f16x4_store(x, y)
  1860. #define GGML_F16x4_FMA GGML_F32x4_FMA
  1861. #define GGML_F16x4_ADD wasm_f32x4_add
  1862. #define GGML_F16x4_MUL wasm_f32x4_mul
  1863. #define GGML_F16x4_REDUCE(res, x) \
  1864. { \
  1865. int offset = GGML_F16_ARR >> 1; \
  1866. for (int i = 0; i < offset; ++i) { \
  1867. x[i] = wasm_f32x4_add(x[i], x[offset+i]); \
  1868. } \
  1869. offset >>= 1; \
  1870. for (int i = 0; i < offset; ++i) { \
  1871. x[i] = wasm_f32x4_add(x[i], x[offset+i]); \
  1872. } \
  1873. offset >>= 1; \
  1874. for (int i = 0; i < offset; ++i) { \
  1875. x[i] = wasm_f32x4_add(x[i], x[offset+i]); \
  1876. } \
  1877. res = wasm_f32x4_extract_lane(x[0], 0) + \
  1878. wasm_f32x4_extract_lane(x[0], 1) + \
  1879. wasm_f32x4_extract_lane(x[0], 2) + \
  1880. wasm_f32x4_extract_lane(x[0], 3); \
  1881. }
  1882. #define GGML_F16_VEC GGML_F16x4
  1883. #define GGML_F16_VEC_ZERO GGML_F16x4_ZERO
  1884. #define GGML_F16_VEC_SET1 GGML_F16x4_SET1
  1885. #define GGML_F16_VEC_LOAD(p, i) GGML_F16x4_LOAD(p)
  1886. #define GGML_F16_VEC_STORE(p, r, i) GGML_F16x4_STORE(p, r[i])
  1887. #define GGML_F16_VEC_FMA GGML_F16x4_FMA
  1888. #define GGML_F16_VEC_ADD GGML_F16x4_ADD
  1889. #define GGML_F16_VEC_MUL GGML_F16x4_MUL
  1890. #define GGML_F16_VEC_REDUCE GGML_F16x4_REDUCE
  1891. #elif defined(__SSE3__)
  1892. #define GGML_SIMD
  1893. // F32 SSE
  1894. #define GGML_F32_STEP 32
  1895. #define GGML_F32_EPR 4
  1896. #define GGML_F32x4 __m128
  1897. #define GGML_F32x4_ZERO _mm_setzero_ps()
  1898. #define GGML_F32x4_SET1(x) _mm_set1_ps(x)
  1899. #define GGML_F32x4_LOAD _mm_loadu_ps
  1900. #define GGML_F32x4_STORE _mm_storeu_ps
  1901. #if defined(__FMA__)
  1902. // TODO: Does this work?
  1903. #define GGML_F32x4_FMA(a, b, c) _mm_fmadd_ps(b, c, a)
  1904. #else
  1905. #define GGML_F32x4_FMA(a, b, c) _mm_add_ps(_mm_mul_ps(b, c), a)
  1906. #endif
  1907. #define GGML_F32x4_ADD _mm_add_ps
  1908. #define GGML_F32x4_MUL _mm_mul_ps
  1909. #define GGML_F32x4_REDUCE(res, x) \
  1910. { \
  1911. int offset = GGML_F32_ARR >> 1; \
  1912. for (int i = 0; i < offset; ++i) { \
  1913. x[i] = _mm_add_ps(x[i], x[offset+i]); \
  1914. } \
  1915. offset >>= 1; \
  1916. for (int i = 0; i < offset; ++i) { \
  1917. x[i] = _mm_add_ps(x[i], x[offset+i]); \
  1918. } \
  1919. offset >>= 1; \
  1920. for (int i = 0; i < offset; ++i) { \
  1921. x[i] = _mm_add_ps(x[i], x[offset+i]); \
  1922. } \
  1923. const __m128 t0 = _mm_hadd_ps(x[0], x[0]); \
  1924. res = _mm_cvtss_f32(_mm_hadd_ps(t0, t0)); \
  1925. }
  1926. // TODO: is this optimal ?
  1927. #define GGML_F32_VEC GGML_F32x4
  1928. #define GGML_F32_VEC_ZERO GGML_F32x4_ZERO
  1929. #define GGML_F32_VEC_SET1 GGML_F32x4_SET1
  1930. #define GGML_F32_VEC_LOAD GGML_F32x4_LOAD
  1931. #define GGML_F32_VEC_STORE GGML_F32x4_STORE
  1932. #define GGML_F32_VEC_FMA GGML_F32x4_FMA
  1933. #define GGML_F32_VEC_ADD GGML_F32x4_ADD
  1934. #define GGML_F32_VEC_MUL GGML_F32x4_MUL
  1935. #define GGML_F32_VEC_REDUCE GGML_F32x4_REDUCE
  1936. // F16 SSE
  1937. #define GGML_F16_STEP 32
  1938. #define GGML_F16_EPR 4
  1939. static inline __m128 __sse_f16x4_load(ggml_fp16_t *x) {
  1940. float tmp[4];
  1941. tmp[0] = GGML_FP16_TO_FP32(x[0]);
  1942. tmp[1] = GGML_FP16_TO_FP32(x[1]);
  1943. tmp[2] = GGML_FP16_TO_FP32(x[2]);
  1944. tmp[3] = GGML_FP16_TO_FP32(x[3]);
  1945. return _mm_loadu_ps(tmp);
  1946. }
  1947. static inline void __sse_f16x4_store(ggml_fp16_t *x, __m128 y) {
  1948. float arr[4];
  1949. _mm_storeu_ps(arr, y);
  1950. x[0] = GGML_FP32_TO_FP16(arr[0]);
  1951. x[1] = GGML_FP32_TO_FP16(arr[1]);
  1952. x[2] = GGML_FP32_TO_FP16(arr[2]);
  1953. x[3] = GGML_FP32_TO_FP16(arr[3]);
  1954. }
  1955. #define GGML_F32Cx4 __m128
  1956. #define GGML_F32Cx4_ZERO _mm_setzero_ps()
  1957. #define GGML_F32Cx4_SET1(x) _mm_set1_ps(x)
  1958. #define GGML_F32Cx4_LOAD(x) __sse_f16x4_load(x)
  1959. #define GGML_F32Cx4_STORE(x, y) __sse_f16x4_store(x, y)
  1960. #define GGML_F32Cx4_FMA GGML_F32x4_FMA
  1961. #define GGML_F32Cx4_ADD _mm_add_ps
  1962. #define GGML_F32Cx4_MUL _mm_mul_ps
  1963. #define GGML_F32Cx4_REDUCE GGML_F32x4_REDUCE
  1964. #define GGML_F16_VEC GGML_F32Cx4
  1965. #define GGML_F16_VEC_ZERO GGML_F32Cx4_ZERO
  1966. #define GGML_F16_VEC_SET1 GGML_F32Cx4_SET1
  1967. #define GGML_F16_VEC_LOAD(p, i) GGML_F32Cx4_LOAD(p)
  1968. #define GGML_F16_VEC_STORE(p, r, i) GGML_F32Cx4_STORE(p, r[i])
  1969. #define GGML_F16_VEC_FMA GGML_F32Cx4_FMA
  1970. #define GGML_F16_VEC_ADD GGML_F32Cx4_ADD
  1971. #define GGML_F16_VEC_MUL GGML_F32Cx4_MUL
  1972. #define GGML_F16_VEC_REDUCE GGML_F32Cx4_REDUCE
  1973. #endif
  1974. // GGML_F32_ARR / GGML_F16_ARR
  1975. // number of registers to use per step
  1976. #ifdef GGML_SIMD
  1977. #define GGML_F32_ARR (GGML_F32_STEP/GGML_F32_EPR)
  1978. #define GGML_F16_ARR (GGML_F16_STEP/GGML_F16_EPR)
  1979. #endif
  1980. //
  1981. // fundamental operations
  1982. //
  1983. inline static void ggml_vec_set_i8(const int n, int8_t * x, const int8_t v) { for (int i = 0; i < n; ++i) x[i] = v; }
  1984. inline static void ggml_vec_set_i16(const int n, int16_t * x, const int16_t v) { for (int i = 0; i < n; ++i) x[i] = v; }
  1985. inline static void ggml_vec_set_i32(const int n, int32_t * x, const int32_t v) { for (int i = 0; i < n; ++i) x[i] = v; }
  1986. inline static void ggml_vec_set_f16(const int n, ggml_fp16_t * x, const int32_t v) { for (int i = 0; i < n; ++i) x[i] = v; }
  1987. inline static void ggml_vec_add_f32 (const int n, float * z, const float * x, const float * y) { for (int i = 0; i < n; ++i) z[i] = x[i] + y[i]; }
  1988. inline static void ggml_vec_add1_f32(const int n, float * z, const float * x, const float v) { for (int i = 0; i < n; ++i) z[i] = x[i] + v; }
  1989. inline static void ggml_vec_acc_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] += x[i]; }
  1990. inline static void ggml_vec_acc1_f32(const int n, float * y, const float v) { for (int i = 0; i < n; ++i) y[i] += v; }
  1991. inline static void ggml_vec_sub_f32 (const int n, float * z, const float * x, const float * y) { for (int i = 0; i < n; ++i) z[i] = x[i] - y[i]; }
  1992. inline static void ggml_vec_set_f32 (const int n, float * x, const float v) { for (int i = 0; i < n; ++i) x[i] = v; }
  1993. inline static void ggml_vec_cpy_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = x[i]; }
  1994. inline static void ggml_vec_neg_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = -x[i]; }
  1995. inline static void ggml_vec_mul_f32 (const int n, float * z, const float * x, const float * y) { for (int i = 0; i < n; ++i) z[i] = x[i]*y[i]; }
  1996. inline static void ggml_vec_div_f32 (const int n, float * z, const float * x, const float * y) { for (int i = 0; i < n; ++i) z[i] = x[i]/y[i]; }
  1997. static void ggml_vec_dot_f32(const int n, float * restrict s, const float * restrict x, const float * restrict y) {
  1998. #ifdef GGML_SIMD
  1999. float sumf = 0.0f;
  2000. const int np = (n & ~(GGML_F32_STEP - 1));
  2001. GGML_F32_VEC sum[GGML_F32_ARR] = { GGML_F32_VEC_ZERO };
  2002. GGML_F32_VEC ax[GGML_F32_ARR];
  2003. GGML_F32_VEC ay[GGML_F32_ARR];
  2004. for (int i = 0; i < np; i += GGML_F32_STEP) {
  2005. for (int j = 0; j < GGML_F32_ARR; j++) {
  2006. ax[j] = GGML_F32_VEC_LOAD(x + i + j*GGML_F32_EPR);
  2007. ay[j] = GGML_F32_VEC_LOAD(y + i + j*GGML_F32_EPR);
  2008. sum[j] = GGML_F32_VEC_FMA(sum[j], ax[j], ay[j]);
  2009. }
  2010. }
  2011. // reduce sum0..sum3 to sum0
  2012. GGML_F32_VEC_REDUCE(sumf, sum);
  2013. // leftovers
  2014. for (int i = np; i < n; ++i) {
  2015. sumf += x[i]*y[i];
  2016. }
  2017. #else
  2018. // scalar
  2019. ggml_float sumf = 0.0;
  2020. for (int i = 0; i < n; ++i) {
  2021. sumf += (ggml_float)(x[i]*y[i]);
  2022. }
  2023. #endif
  2024. *s = sumf;
  2025. }
  2026. static void ggml_vec_dot_f16(const int n, float * restrict s, ggml_fp16_t * restrict x, ggml_fp16_t * restrict y) {
  2027. ggml_float sumf = 0.0;
  2028. #if defined(GGML_SIMD)
  2029. const int np = (n & ~(GGML_F16_STEP - 1));
  2030. GGML_F16_VEC sum[GGML_F16_ARR] = { GGML_F16_VEC_ZERO };
  2031. GGML_F16_VEC ax[GGML_F16_ARR];
  2032. GGML_F16_VEC ay[GGML_F16_ARR];
  2033. for (int i = 0; i < np; i += GGML_F16_STEP) {
  2034. for (int j = 0; j < GGML_F16_ARR; j++) {
  2035. ax[j] = GGML_F16_VEC_LOAD(x + i + j*GGML_F16_EPR, j);
  2036. ay[j] = GGML_F16_VEC_LOAD(y + i + j*GGML_F16_EPR, j);
  2037. sum[j] = GGML_F16_VEC_FMA(sum[j], ax[j], ay[j]);
  2038. }
  2039. }
  2040. // reduce sum0..sum3 to sum0
  2041. GGML_F16_VEC_REDUCE(sumf, sum);
  2042. // leftovers
  2043. for (int i = np; i < n; ++i) {
  2044. sumf += (ggml_float)(GGML_FP16_TO_FP32(x[i])*GGML_FP16_TO_FP32(y[i]));
  2045. }
  2046. #else
  2047. for (int i = 0; i < n; ++i) {
  2048. sumf += (ggml_float)(GGML_FP16_TO_FP32(x[i])*GGML_FP16_TO_FP32(y[i]));
  2049. }
  2050. #endif
  2051. *s = sumf;
  2052. }
  2053. static void ggml_vec_dot_q4_0_q8_0(const int n, float * restrict s, const void * restrict vx, const void * restrict vy) {
  2054. const int qk = QK8_0;
  2055. const int nb = n / qk;
  2056. assert(n % qk == 0);
  2057. const block_q4_0 * restrict x = vx;
  2058. const block_q8_0 * restrict y = vy;
  2059. #if defined(__ARM_NEON)
  2060. float32x4_t sumv0 = vdupq_n_f32(0.0f);
  2061. float32x4_t sumv1 = vdupq_n_f32(0.0f);
  2062. GGML_ASSERT(nb % 2 == 0); // TODO: handle odd nb
  2063. for (int i = 0; i < nb; i += 2) {
  2064. const block_q4_0 * restrict x0 = &x[i + 0];
  2065. const block_q4_0 * restrict x1 = &x[i + 1];
  2066. const block_q8_0 * restrict y0 = &y[i + 0];
  2067. const block_q8_0 * restrict y1 = &y[i + 1];
  2068. const uint8x16_t m4b = vdupq_n_u8(0x0F);
  2069. const int8x16_t s8b = vdupq_n_s8(0x8);
  2070. const uint8x16_t v0_0 = vld1q_u8(x0->qs);
  2071. const uint8x16_t v0_1 = vld1q_u8(x1->qs);
  2072. // 4-bit -> 8-bit
  2073. const int8x16_t v0_0l = vreinterpretq_s8_u8(vandq_u8 (v0_0, m4b));
  2074. const int8x16_t v0_0h = vreinterpretq_s8_u8(vshrq_n_u8(v0_0, 4));
  2075. const int8x16_t v0_1l = vreinterpretq_s8_u8(vandq_u8 (v0_1, m4b));
  2076. const int8x16_t v0_1h = vreinterpretq_s8_u8(vshrq_n_u8(v0_1, 4));
  2077. // sub 8
  2078. const int8x16_t v0_0ls = vsubq_s8(v0_0l, s8b);
  2079. const int8x16_t v0_0hs = vsubq_s8(v0_0h, s8b);
  2080. const int8x16_t v0_1ls = vsubq_s8(v0_1l, s8b);
  2081. const int8x16_t v0_1hs = vsubq_s8(v0_1h, s8b);
  2082. // load y
  2083. const int8x16_t v1_0l = vld1q_s8(y0->qs);
  2084. const int8x16_t v1_0h = vld1q_s8(y0->qs + 16);
  2085. const int8x16_t v1_1l = vld1q_s8(y1->qs);
  2086. const int8x16_t v1_1h = vld1q_s8(y1->qs + 16);
  2087. #if defined(__ARM_FEATURE_DOTPROD)
  2088. // dot product into int32x4_t
  2089. const int32x4_t p_0 = vdotq_s32(vdotq_s32(vdupq_n_s32(0), v0_0ls, v1_0l), v0_0hs, v1_0h);
  2090. const int32x4_t p_1 = vdotq_s32(vdotq_s32(vdupq_n_s32(0), v0_1ls, v1_1l), v0_1hs, v1_1h);
  2091. sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(p_0), GGML_FP16_TO_FP32(x0->d)*GGML_FP16_TO_FP32(y0->d));
  2092. sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(p_1), GGML_FP16_TO_FP32(x1->d)*GGML_FP16_TO_FP32(y1->d));
  2093. #else
  2094. const int16x8_t pl0l = vmull_s8(vget_low_s8 (v0_0ls), vget_low_s8 (v1_0l));
  2095. const int16x8_t pl0h = vmull_s8(vget_high_s8(v0_0ls), vget_high_s8(v1_0l));
  2096. const int16x8_t ph0l = vmull_s8(vget_low_s8 (v0_0hs), vget_low_s8 (v1_0h));
  2097. const int16x8_t ph0h = vmull_s8(vget_high_s8(v0_0hs), vget_high_s8(v1_0h));
  2098. const int16x8_t pl1l = vmull_s8(vget_low_s8 (v0_1ls), vget_low_s8 (v1_1l));
  2099. const int16x8_t pl1h = vmull_s8(vget_high_s8(v0_1ls), vget_high_s8(v1_1l));
  2100. const int16x8_t ph1l = vmull_s8(vget_low_s8 (v0_1hs), vget_low_s8 (v1_1h));
  2101. const int16x8_t ph1h = vmull_s8(vget_high_s8(v0_1hs), vget_high_s8(v1_1h));
  2102. const int32x4_t pl0 = vaddq_s32(vpaddlq_s16(pl0l), vpaddlq_s16(pl0h));
  2103. const int32x4_t ph0 = vaddq_s32(vpaddlq_s16(ph0l), vpaddlq_s16(ph0h));
  2104. const int32x4_t pl1 = vaddq_s32(vpaddlq_s16(pl1l), vpaddlq_s16(pl1h));
  2105. const int32x4_t ph1 = vaddq_s32(vpaddlq_s16(ph1l), vpaddlq_s16(ph1h));
  2106. sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(vaddq_s32(pl0, ph0)), GGML_FP16_TO_FP32(x0->d)*GGML_FP16_TO_FP32(y0->d));
  2107. sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(vaddq_s32(pl1, ph1)), GGML_FP16_TO_FP32(x1->d)*GGML_FP16_TO_FP32(y1->d));
  2108. #endif
  2109. }
  2110. *s = vaddvq_f32(sumv0) + vaddvq_f32(sumv1);
  2111. #elif defined(__AVX2__)
  2112. // Initialize accumulator with zeros
  2113. __m256 acc = _mm256_setzero_ps();
  2114. // Main loop
  2115. for (int i = 0; i < nb; ++i) {
  2116. /* Compute combined scale for the block */
  2117. const __m256 d = _mm256_set1_ps( GGML_FP16_TO_FP32(x[i].d) * GGML_FP16_TO_FP32(y[i].d) );
  2118. __m256i bx = bytes_from_nibbles_32(x[i].qs);
  2119. // Now we have a vector with bytes in [ 0 .. 15 ] interval. Offset them into [ -8 .. +7 ] interval.
  2120. const __m256i off = _mm256_set1_epi8( 8 );
  2121. bx = _mm256_sub_epi8( bx, off );
  2122. __m256i by = _mm256_loadu_si256((const __m256i *)y[i].qs);
  2123. const __m256 q = mul_sum_i8_pairs_float(bx, by);
  2124. /* Multiply q with scale and accumulate */
  2125. acc = _mm256_fmadd_ps( d, q, acc );
  2126. }
  2127. *s = hsum_float_8(acc);
  2128. #elif defined(__AVX__)
  2129. // Initialize accumulator with zeros
  2130. __m256 acc = _mm256_setzero_ps();
  2131. // Main loop
  2132. for (int i = 0; i < nb; ++i) {
  2133. // Compute combined scale for the block
  2134. const __m256 d = _mm256_set1_ps( GGML_FP16_TO_FP32(x[i].d) * GGML_FP16_TO_FP32(y[i].d) );
  2135. const __m128i lowMask = _mm_set1_epi8(0xF);
  2136. const __m128i off = _mm_set1_epi8(8);
  2137. const __m128i tmp = _mm_loadu_si128((const __m128i *)x[i].qs);
  2138. __m128i bx = _mm_and_si128(lowMask, tmp);
  2139. __m128i by = _mm_loadu_si128((const __m128i *)y[i].qs);
  2140. bx = _mm_sub_epi8(bx, off);
  2141. const __m128i i32_0 = mul_sum_i8_pairs(bx, by);
  2142. bx = _mm_and_si128(lowMask, _mm_srli_epi64(tmp, 4));
  2143. by = _mm_loadu_si128((const __m128i *)(y[i].qs + 16));
  2144. bx = _mm_sub_epi8(bx, off);
  2145. const __m128i i32_1 = mul_sum_i8_pairs(bx, by);
  2146. // Convert int32_t to float
  2147. __m256 p = _mm256_cvtepi32_ps(MM256_SET_M128I(i32_0, i32_1));
  2148. // Apply the scale, and accumulate
  2149. acc = _mm256_add_ps(_mm256_mul_ps( d, p ), acc);
  2150. }
  2151. *s = hsum_float_8(acc);
  2152. #elif defined(__SSSE3__)
  2153. // set constants
  2154. const __m128i lowMask = _mm_set1_epi8(0xF);
  2155. const __m128i off = _mm_set1_epi8(8);
  2156. // Initialize accumulator with zeros
  2157. __m128 acc_0 = _mm_setzero_ps();
  2158. __m128 acc_1 = _mm_setzero_ps();
  2159. __m128 acc_2 = _mm_setzero_ps();
  2160. __m128 acc_3 = _mm_setzero_ps();
  2161. // First round without accumulation
  2162. {
  2163. _mm_prefetch(&x[0] + sizeof(block_q4_0), _MM_HINT_T0);
  2164. _mm_prefetch(&y[0] + sizeof(block_q8_0), _MM_HINT_T0);
  2165. // Compute combined scale for the block 0 and 1
  2166. const __m128 d_0_1 = _mm_set1_ps( GGML_FP16_TO_FP32(x[0].d) * GGML_FP16_TO_FP32(y[0].d) );
  2167. const __m128i tmp_0_1 = _mm_loadu_si128((const __m128i *)x[0].qs);
  2168. __m128i bx_0 = _mm_and_si128(lowMask, tmp_0_1);
  2169. __m128i by_0 = _mm_loadu_si128((const __m128i *)y[0].qs);
  2170. bx_0 = _mm_sub_epi8(bx_0, off);
  2171. const __m128i i32_0 = mul_sum_i8_pairs(bx_0, by_0);
  2172. __m128i bx_1 = _mm_and_si128(lowMask, _mm_srli_epi64(tmp_0_1, 4));
  2173. __m128i by_1 = _mm_loadu_si128((const __m128i *)(y[0].qs + 16));
  2174. bx_1 = _mm_sub_epi8(bx_1, off);
  2175. const __m128i i32_1 = mul_sum_i8_pairs(bx_1, by_1);
  2176. _mm_prefetch(&x[1] + sizeof(block_q4_0), _MM_HINT_T0);
  2177. _mm_prefetch(&y[1] + sizeof(block_q8_0), _MM_HINT_T0);
  2178. // Compute combined scale for the block 2 and 3
  2179. const __m128 d_2_3 = _mm_set1_ps( GGML_FP16_TO_FP32(x[1].d) * GGML_FP16_TO_FP32(y[1].d) );
  2180. const __m128i tmp_2_3 = _mm_loadu_si128((const __m128i *)x[1].qs);
  2181. __m128i bx_2 = _mm_and_si128(lowMask, tmp_2_3);
  2182. __m128i by_2 = _mm_loadu_si128((const __m128i *)y[1].qs);
  2183. bx_2 = _mm_sub_epi8(bx_2, off);
  2184. const __m128i i32_2 = mul_sum_i8_pairs(bx_2, by_2);
  2185. __m128i bx_3 = _mm_and_si128(lowMask, _mm_srli_epi64(tmp_2_3, 4));
  2186. __m128i by_3 = _mm_loadu_si128((const __m128i *)(y[1].qs + 16));
  2187. bx_3 = _mm_sub_epi8(bx_3, off);
  2188. const __m128i i32_3 = mul_sum_i8_pairs(bx_3, by_3);
  2189. // Convert int32_t to float
  2190. __m128 p0 = _mm_cvtepi32_ps(i32_0);
  2191. __m128 p1 = _mm_cvtepi32_ps(i32_1);
  2192. __m128 p2 = _mm_cvtepi32_ps(i32_2);
  2193. __m128 p3 = _mm_cvtepi32_ps(i32_3);
  2194. // Apply the scale
  2195. acc_0 = _mm_mul_ps( d_0_1, p0 );
  2196. acc_1 = _mm_mul_ps( d_0_1, p1 );
  2197. acc_2 = _mm_mul_ps( d_2_3, p2 );
  2198. acc_3 = _mm_mul_ps( d_2_3, p3 );
  2199. }
  2200. // Main loop
  2201. GGML_ASSERT(nb % 2 == 0); // TODO: handle odd nb
  2202. for (int i = 2; i < nb; i+=2) {
  2203. _mm_prefetch(&x[i] + sizeof(block_q4_0), _MM_HINT_T0);
  2204. _mm_prefetch(&y[i] + sizeof(block_q8_0), _MM_HINT_T0);
  2205. // Compute combined scale for the block 0 and 1
  2206. const __m128 d_0_1 = _mm_set1_ps( GGML_FP16_TO_FP32(x[i].d) * GGML_FP16_TO_FP32(y[i].d) );
  2207. const __m128i tmp_0_1 = _mm_loadu_si128((const __m128i *)x[i].qs);
  2208. __m128i bx_0 = _mm_and_si128(lowMask, tmp_0_1);
  2209. __m128i by_0 = _mm_loadu_si128((const __m128i *)y[i].qs);
  2210. bx_0 = _mm_sub_epi8(bx_0, off);
  2211. const __m128i i32_0 = mul_sum_i8_pairs(bx_0, by_0);
  2212. __m128i bx_1 = _mm_and_si128(lowMask, _mm_srli_epi64(tmp_0_1, 4));
  2213. __m128i by_1 = _mm_loadu_si128((const __m128i *)(y[i].qs + 16));
  2214. bx_1 = _mm_sub_epi8(bx_1, off);
  2215. const __m128i i32_1 = mul_sum_i8_pairs(bx_1, by_1);
  2216. _mm_prefetch(&x[i] + 2 * sizeof(block_q4_0), _MM_HINT_T0);
  2217. _mm_prefetch(&y[i] + 2 * sizeof(block_q8_0), _MM_HINT_T0);
  2218. // Compute combined scale for the block 2 and 3
  2219. const __m128 d_2_3 = _mm_set1_ps( GGML_FP16_TO_FP32(x[i + 1].d) * GGML_FP16_TO_FP32(y[i + 1].d) );
  2220. const __m128i tmp_2_3 = _mm_loadu_si128((const __m128i *)x[i + 1].qs);
  2221. __m128i bx_2 = _mm_and_si128(lowMask, tmp_2_3);
  2222. __m128i by_2 = _mm_loadu_si128((const __m128i *)y[i + 1].qs);
  2223. bx_2 = _mm_sub_epi8(bx_2, off);
  2224. const __m128i i32_2 = mul_sum_i8_pairs(bx_2, by_2);
  2225. __m128i bx_3 = _mm_and_si128(lowMask, _mm_srli_epi64(tmp_2_3, 4));
  2226. __m128i by_3 = _mm_loadu_si128((const __m128i *)(y[i + 1].qs + 16));
  2227. bx_3 = _mm_sub_epi8(bx_3, off);
  2228. const __m128i i32_3 = mul_sum_i8_pairs(bx_3, by_3);
  2229. // Convert int32_t to float
  2230. __m128 p0 = _mm_cvtepi32_ps(i32_0);
  2231. __m128 p1 = _mm_cvtepi32_ps(i32_1);
  2232. __m128 p2 = _mm_cvtepi32_ps(i32_2);
  2233. __m128 p3 = _mm_cvtepi32_ps(i32_3);
  2234. // Apply the scale
  2235. __m128 p0_d = _mm_mul_ps( d_0_1, p0 );
  2236. __m128 p1_d = _mm_mul_ps( d_0_1, p1 );
  2237. __m128 p2_d = _mm_mul_ps( d_2_3, p2 );
  2238. __m128 p3_d = _mm_mul_ps( d_2_3, p3 );
  2239. // Acummulate
  2240. acc_0 = _mm_add_ps(p0_d, acc_0);
  2241. acc_1 = _mm_add_ps(p1_d, acc_1);
  2242. acc_2 = _mm_add_ps(p2_d, acc_2);
  2243. acc_3 = _mm_add_ps(p3_d, acc_3);
  2244. }
  2245. *s = hsum_float_4x4(acc_0, acc_1, acc_2, acc_3);
  2246. #elif defined(__riscv_v_intrinsic)
  2247. float sumf = 0.0;
  2248. size_t vl = __riscv_vsetvl_e8m1(qk/2);
  2249. for (int i = 0; i < nb; i++) {
  2250. // load elements
  2251. vuint8mf2_t tx = __riscv_vle8_v_u8mf2(x[i].qs, vl);
  2252. vint8mf2_t y0 = __riscv_vle8_v_i8mf2(y[i].qs, vl);
  2253. vint8mf2_t y1 = __riscv_vle8_v_i8mf2(y[i].qs+16, vl);
  2254. // mask and store lower part of x, and then upper part
  2255. vuint8mf2_t x_a = __riscv_vand_vx_u8mf2(tx, 0x0F, vl);
  2256. vuint8mf2_t x_l = __riscv_vsrl_vx_u8mf2(tx, 0x04, vl);
  2257. vint8mf2_t x_ai = __riscv_vreinterpret_v_u8mf2_i8mf2(x_a);
  2258. vint8mf2_t x_li = __riscv_vreinterpret_v_u8mf2_i8mf2(x_l);
  2259. // subtract offset
  2260. vint8mf2_t v0 = __riscv_vsub_vx_i8mf2(x_ai, 8, vl);
  2261. vint8mf2_t v1 = __riscv_vsub_vx_i8mf2(x_li, 8, vl);
  2262. vint16m1_t vec_mul1 = __riscv_vwmul_vv_i16m1(v0, y0, vl);
  2263. vint16m1_t vec_mul2 = __riscv_vwmul_vv_i16m1(v1, y1, vl);
  2264. vint32m1_t vec_zero = __riscv_vmv_v_x_i32m1(0, vl);
  2265. vint32m1_t vs1 = __riscv_vwredsum_vs_i16m1_i32m1(vec_mul1, vec_zero, vl);
  2266. vint32m1_t vs2 = __riscv_vwredsum_vs_i16m1_i32m1(vec_mul2, vs1, vl);
  2267. int sumi = __riscv_vmv_x_s_i32m1_i32(vs2);
  2268. sumf += sumi*GGML_FP16_TO_FP32(x[i].d)*GGML_FP16_TO_FP32(y[i].d);
  2269. }
  2270. *s = sumf;
  2271. #else
  2272. // scalar
  2273. float sumf = 0.0;
  2274. for (int i = 0; i < nb; i++) {
  2275. int sumi = 0;
  2276. for (int j = 0; j < qk/2; ++j) {
  2277. const int v0 = (x[i].qs[j] & 0x0F) - 8;
  2278. const int v1 = (x[i].qs[j] >> 4) - 8;
  2279. sumi += (v0 * y[i].qs[j]) + (v1 * y[i].qs[j + qk/2]);
  2280. }
  2281. sumf += sumi*GGML_FP16_TO_FP32(x[i].d)*GGML_FP16_TO_FP32(y[i].d);
  2282. }
  2283. *s = sumf;
  2284. #endif
  2285. }
  2286. static void ggml_vec_dot_q4_1_q8_1(const int n, float * restrict s, const void * restrict vx, const void * restrict vy) {
  2287. const int qk = QK8_1;
  2288. const int nb = n / qk;
  2289. assert(n % qk == 0);
  2290. const block_q4_1 * restrict x = vx;
  2291. const block_q8_1 * restrict y = vy;
  2292. // TODO: add WASM SIMD
  2293. #if defined(__ARM_NEON)
  2294. float32x4_t sumv0 = vdupq_n_f32(0.0f);
  2295. float32x4_t sumv1 = vdupq_n_f32(0.0f);
  2296. float summs = 0;
  2297. GGML_ASSERT(nb % 2 == 0); // TODO: handle odd nb
  2298. for (int i = 0; i < nb; i += 2) {
  2299. const block_q4_1 * restrict x0 = &x[i + 0];
  2300. const block_q4_1 * restrict x1 = &x[i + 1];
  2301. const block_q8_1 * restrict y0 = &y[i + 0];
  2302. const block_q8_1 * restrict y1 = &y[i + 1];
  2303. summs += GGML_FP16_TO_FP32(x0->m) * y0->s + GGML_FP16_TO_FP32(x1->m) * y1->s;
  2304. const uint8x16_t m4b = vdupq_n_u8(0x0F);
  2305. const uint8x16_t v0_0 = vld1q_u8(x0->qs);
  2306. const uint8x16_t v0_1 = vld1q_u8(x1->qs);
  2307. // 4-bit -> 8-bit
  2308. const int8x16_t v0_0l = vreinterpretq_s8_u8(vandq_u8 (v0_0, m4b));
  2309. const int8x16_t v0_0h = vreinterpretq_s8_u8(vshrq_n_u8(v0_0, 4));
  2310. const int8x16_t v0_1l = vreinterpretq_s8_u8(vandq_u8 (v0_1, m4b));
  2311. const int8x16_t v0_1h = vreinterpretq_s8_u8(vshrq_n_u8(v0_1, 4));
  2312. // load y
  2313. const int8x16_t v1_0l = vld1q_s8(y0->qs);
  2314. const int8x16_t v1_0h = vld1q_s8(y0->qs + 16);
  2315. const int8x16_t v1_1l = vld1q_s8(y1->qs);
  2316. const int8x16_t v1_1h = vld1q_s8(y1->qs + 16);
  2317. #if defined(__ARM_FEATURE_DOTPROD)
  2318. // dot product into int32x4_t
  2319. const int32x4_t p_0 = vdotq_s32(vdotq_s32(vdupq_n_s32(0), v0_0l, v1_0l), v0_0h, v1_0h);
  2320. const int32x4_t p_1 = vdotq_s32(vdotq_s32(vdupq_n_s32(0), v0_1l, v1_1l), v0_1h, v1_1h);
  2321. sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(p_0), GGML_FP16_TO_FP32(x0->d)*y0->d);
  2322. sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(p_1), GGML_FP16_TO_FP32(x1->d)*y1->d);
  2323. #else
  2324. const int16x8_t pl0l = vmull_s8(vget_low_s8 (v0_0l), vget_low_s8 (v1_0l));
  2325. const int16x8_t pl0h = vmull_s8(vget_high_s8(v0_0l), vget_high_s8(v1_0l));
  2326. const int16x8_t ph0l = vmull_s8(vget_low_s8 (v0_0h), vget_low_s8 (v1_0h));
  2327. const int16x8_t ph0h = vmull_s8(vget_high_s8(v0_0h), vget_high_s8(v1_0h));
  2328. const int16x8_t pl1l = vmull_s8(vget_low_s8 (v0_1l), vget_low_s8 (v1_1l));
  2329. const int16x8_t pl1h = vmull_s8(vget_high_s8(v0_1l), vget_high_s8(v1_1l));
  2330. const int16x8_t ph1l = vmull_s8(vget_low_s8 (v0_1h), vget_low_s8 (v1_1h));
  2331. const int16x8_t ph1h = vmull_s8(vget_high_s8(v0_1h), vget_high_s8(v1_1h));
  2332. const int32x4_t pl0 = vaddq_s32(vpaddlq_s16(pl0l), vpaddlq_s16(pl0h));
  2333. const int32x4_t ph0 = vaddq_s32(vpaddlq_s16(ph0l), vpaddlq_s16(ph0h));
  2334. const int32x4_t pl1 = vaddq_s32(vpaddlq_s16(pl1l), vpaddlq_s16(pl1h));
  2335. const int32x4_t ph1 = vaddq_s32(vpaddlq_s16(ph1l), vpaddlq_s16(ph1h));
  2336. sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(vaddq_s32(pl0, ph0)), GGML_FP16_TO_FP32(x0->d)*y0->d);
  2337. sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(vaddq_s32(pl1, ph1)), GGML_FP16_TO_FP32(x1->d)*y1->d);
  2338. #endif
  2339. }
  2340. *s = vaddvq_f32(sumv0) + vaddvq_f32(sumv1) + summs;
  2341. #elif defined(__AVX2__) || defined(__AVX__)
  2342. // Initialize accumulator with zeros
  2343. __m256 acc = _mm256_setzero_ps();
  2344. float summs = 0;
  2345. // Main loop
  2346. for (int i = 0; i < nb; ++i) {
  2347. const float d0 = GGML_FP16_TO_FP32(x[i].d);
  2348. const float d1 = y[i].d;
  2349. summs += GGML_FP16_TO_FP32(x[i].m) * y[i].s;
  2350. const __m256 d0v = _mm256_set1_ps( d0 );
  2351. const __m256 d1v = _mm256_set1_ps( d1 );
  2352. // Compute combined scales
  2353. const __m256 d0d1 = _mm256_mul_ps( d0v, d1v );
  2354. // Load 16 bytes, and unpack 4 bit fields into bytes, making 32 bytes
  2355. const __m256i bx = bytes_from_nibbles_32(x[i].qs);
  2356. const __m256i by = _mm256_loadu_si256( (const __m256i *)y[i].qs );
  2357. const __m256 xy = mul_sum_us8_pairs_float(bx, by);
  2358. // Accumulate d0*d1*x*y
  2359. #if defined(__AVX2__)
  2360. acc = _mm256_fmadd_ps( d0d1, xy, acc );
  2361. #else
  2362. acc = _mm256_add_ps( _mm256_mul_ps( d0d1, xy ), acc );
  2363. #endif
  2364. }
  2365. *s = hsum_float_8(acc) + summs;
  2366. #elif defined(__riscv_v_intrinsic)
  2367. float sumf = 0.0;
  2368. size_t vl = __riscv_vsetvl_e8m1(qk/2);
  2369. for (int i = 0; i < nb; i++) {
  2370. // load elements
  2371. vuint8mf2_t tx = __riscv_vle8_v_u8mf2(x[i].qs, vl);
  2372. vint8mf2_t y0 = __riscv_vle8_v_i8mf2(y[i].qs, vl);
  2373. vint8mf2_t y1 = __riscv_vle8_v_i8mf2(y[i].qs+16, vl);
  2374. // mask and store lower part of x, and then upper part
  2375. vuint8mf2_t x_a = __riscv_vand_vx_u8mf2(tx, 0x0F, vl);
  2376. vuint8mf2_t x_l = __riscv_vsrl_vx_u8mf2(tx, 0x04, vl);
  2377. vint8mf2_t v0 = __riscv_vreinterpret_v_u8mf2_i8mf2(x_a);
  2378. vint8mf2_t v1 = __riscv_vreinterpret_v_u8mf2_i8mf2(x_l);
  2379. vint16m1_t vec_mul1 = __riscv_vwmul_vv_i16m1(v0, y0, vl);
  2380. vint16m1_t vec_mul2 = __riscv_vwmul_vv_i16m1(v1, y1, vl);
  2381. vint32m1_t vec_zero = __riscv_vmv_v_x_i32m1(0, vl);
  2382. vint32m1_t vs1 = __riscv_vwredsum_vs_i16m1_i32m1(vec_mul1, vec_zero, vl);
  2383. vint32m1_t vs2 = __riscv_vwredsum_vs_i16m1_i32m1(vec_mul2, vs1, vl);
  2384. int sumi = __riscv_vmv_x_s_i32m1_i32(vs2);
  2385. sumf += (GGML_FP16_TO_FP32(x[i].d)*y[i].d)*sumi + GGML_FP16_TO_FP32(x[i].m)*y[i].s;
  2386. }
  2387. *s = sumf;
  2388. #else
  2389. // scalar
  2390. float sumf = 0.0;
  2391. for (int i = 0; i < nb; i++) {
  2392. int sumi = 0;
  2393. for (int j = 0; j < qk/2; ++j) {
  2394. const int v0 = (x[i].qs[j] & 0x0F);
  2395. const int v1 = (x[i].qs[j] >> 4);
  2396. sumi += (v0 * y[i].qs[j]) + (v1 * y[i].qs[j + qk/2]);
  2397. }
  2398. sumf += (GGML_FP16_TO_FP32(x[i].d)*y[i].d)*sumi + GGML_FP16_TO_FP32(x[i].m)*y[i].s;
  2399. }
  2400. *s = sumf;
  2401. #endif
  2402. }
  2403. static void ggml_vec_dot_q5_0_q8_0(const int n, float * restrict s, const void * restrict vx, const void * restrict vy) {
  2404. const int qk = QK8_0;
  2405. const int nb = n / qk;
  2406. assert(n % qk == 0);
  2407. assert(qk == QK5_0);
  2408. const block_q5_0 * restrict x = vx;
  2409. const block_q8_0 * restrict y = vy;
  2410. #if defined(__ARM_NEON)
  2411. float32x4_t sumv0 = vdupq_n_f32(0.0f);
  2412. float32x4_t sumv1 = vdupq_n_f32(0.0f);
  2413. uint32_t qh0;
  2414. uint32_t qh1;
  2415. uint64_t tmp0[4];
  2416. uint64_t tmp1[4];
  2417. GGML_ASSERT(nb % 2 == 0); // TODO: handle odd nb
  2418. for (int i = 0; i < nb; i += 2) {
  2419. const block_q5_0 * restrict x0 = &x[i];
  2420. const block_q5_0 * restrict x1 = &x[i + 1];
  2421. const block_q8_0 * restrict y0 = &y[i];
  2422. const block_q8_0 * restrict y1 = &y[i + 1];
  2423. const uint8x16_t m4b = vdupq_n_u8(0x0F);
  2424. // extract the 5th bit via lookup table ((!b) << 4)
  2425. memcpy(&qh0, x0->qh, sizeof(qh0));
  2426. memcpy(&qh1, x1->qh, sizeof(qh1));
  2427. tmp0[0] = table_b2b_1[(qh0 >> 0) & 0xFF];
  2428. tmp0[1] = table_b2b_1[(qh0 >> 8) & 0xFF];
  2429. tmp0[2] = table_b2b_1[(qh0 >> 16) & 0xFF];
  2430. tmp0[3] = table_b2b_1[(qh0 >> 24) ];
  2431. tmp1[0] = table_b2b_1[(qh1 >> 0) & 0xFF];
  2432. tmp1[1] = table_b2b_1[(qh1 >> 8) & 0xFF];
  2433. tmp1[2] = table_b2b_1[(qh1 >> 16) & 0xFF];
  2434. tmp1[3] = table_b2b_1[(qh1 >> 24) ];
  2435. const int8x16_t qhl0 = vld1q_s8((const int8_t *)(tmp0 + 0));
  2436. const int8x16_t qhh0 = vld1q_s8((const int8_t *)(tmp0 + 2));
  2437. const int8x16_t qhl1 = vld1q_s8((const int8_t *)(tmp1 + 0));
  2438. const int8x16_t qhh1 = vld1q_s8((const int8_t *)(tmp1 + 2));
  2439. const uint8x16_t v0_0 = vld1q_u8(x0->qs);
  2440. const uint8x16_t v0_1 = vld1q_u8(x1->qs);
  2441. // 4-bit -> 8-bit
  2442. int8x16_t v0_0l = vreinterpretq_s8_u8(vandq_u8 (v0_0, m4b));
  2443. int8x16_t v0_0h = vreinterpretq_s8_u8(vshrq_n_u8(v0_0, 4));
  2444. int8x16_t v0_1l = vreinterpretq_s8_u8(vandq_u8 (v0_1, m4b));
  2445. int8x16_t v0_1h = vreinterpretq_s8_u8(vshrq_n_u8(v0_1, 4));
  2446. // add high bit and sub 16 (equivalent to sub 0x10 when bit is zero)
  2447. const int8x16_t v0_0lf = vsubq_s8(v0_0l, qhl0);
  2448. const int8x16_t v0_0hf = vsubq_s8(v0_0h, qhh0);
  2449. const int8x16_t v0_1lf = vsubq_s8(v0_1l, qhl1);
  2450. const int8x16_t v0_1hf = vsubq_s8(v0_1h, qhh1);
  2451. // load y
  2452. const int8x16_t v1_0l = vld1q_s8(y0->qs);
  2453. const int8x16_t v1_0h = vld1q_s8(y0->qs + 16);
  2454. const int8x16_t v1_1l = vld1q_s8(y1->qs);
  2455. const int8x16_t v1_1h = vld1q_s8(y1->qs + 16);
  2456. #if defined(__ARM_FEATURE_DOTPROD)
  2457. sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(vaddq_s32(
  2458. vdotq_s32(vdupq_n_s32(0), v0_0lf, v1_0l),
  2459. vdotq_s32(vdupq_n_s32(0), v0_0hf, v1_0h))), GGML_FP16_TO_FP32(x0->d)*GGML_FP16_TO_FP32(y0->d));
  2460. sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(vaddq_s32(
  2461. vdotq_s32(vdupq_n_s32(0), v0_1lf, v1_1l),
  2462. vdotq_s32(vdupq_n_s32(0), v0_1hf, v1_1h))), GGML_FP16_TO_FP32(x1->d)*GGML_FP16_TO_FP32(y1->d));
  2463. #else
  2464. const int16x8_t pl0l = vmull_s8(vget_low_s8 (v0_0lf), vget_low_s8 (v1_0l));
  2465. const int16x8_t pl0h = vmull_s8(vget_high_s8(v0_0lf), vget_high_s8(v1_0l));
  2466. const int16x8_t ph0l = vmull_s8(vget_low_s8 (v0_0hf), vget_low_s8 (v1_0h));
  2467. const int16x8_t ph0h = vmull_s8(vget_high_s8(v0_0hf), vget_high_s8(v1_0h));
  2468. const int16x8_t pl1l = vmull_s8(vget_low_s8 (v0_1lf), vget_low_s8 (v1_1l));
  2469. const int16x8_t pl1h = vmull_s8(vget_high_s8(v0_1lf), vget_high_s8(v1_1l));
  2470. const int16x8_t ph1l = vmull_s8(vget_low_s8 (v0_1hf), vget_low_s8 (v1_1h));
  2471. const int16x8_t ph1h = vmull_s8(vget_high_s8(v0_1hf), vget_high_s8(v1_1h));
  2472. const int32x4_t pl0 = vaddq_s32(vpaddlq_s16(pl0l), vpaddlq_s16(pl0h));
  2473. const int32x4_t ph0 = vaddq_s32(vpaddlq_s16(ph0l), vpaddlq_s16(ph0h));
  2474. const int32x4_t pl1 = vaddq_s32(vpaddlq_s16(pl1l), vpaddlq_s16(pl1h));
  2475. const int32x4_t ph1 = vaddq_s32(vpaddlq_s16(ph1l), vpaddlq_s16(ph1h));
  2476. sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(vaddq_s32(pl0, ph0)), GGML_FP16_TO_FP32(x0->d)*GGML_FP16_TO_FP32(y0->d));
  2477. sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(vaddq_s32(pl1, ph1)), GGML_FP16_TO_FP32(x1->d)*GGML_FP16_TO_FP32(y1->d));
  2478. #endif
  2479. }
  2480. *s = vaddvq_f32(sumv0) + vaddvq_f32(sumv1);
  2481. #elif defined(__wasm_simd128__)
  2482. v128_t sumv = wasm_f32x4_splat(0.0f);
  2483. uint32_t qh;
  2484. uint64_t tmp[4];
  2485. // TODO: check if unrolling this is better
  2486. for (int i = 0; i < nb; ++i) {
  2487. const block_q5_0 * restrict x0 = &x[i];
  2488. const block_q8_0 * restrict y0 = &y[i];
  2489. const v128_t m4b = wasm_i8x16_splat(0x0F);
  2490. // extract the 5th bit
  2491. memcpy(&qh, x0->qh, sizeof(qh));
  2492. tmp[0] = table_b2b_1[(qh >> 0) & 0xFF];
  2493. tmp[1] = table_b2b_1[(qh >> 8) & 0xFF];
  2494. tmp[2] = table_b2b_1[(qh >> 16) & 0xFF];
  2495. tmp[3] = table_b2b_1[(qh >> 24) ];
  2496. const v128_t qhl = wasm_v128_load(tmp + 0);
  2497. const v128_t qhh = wasm_v128_load(tmp + 2);
  2498. const v128_t v0 = wasm_v128_load(x0->qs);
  2499. // 4-bit -> 8-bit
  2500. const v128_t v0l = wasm_v128_and (v0, m4b);
  2501. const v128_t v0h = wasm_u8x16_shr(v0, 4);
  2502. // add high bit and sub 16 (equivalent to sub 0x10 when bit is zero)
  2503. const v128_t v0lf = wasm_i8x16_sub(v0l, qhl);
  2504. const v128_t v0hf = wasm_i8x16_sub(v0h, qhh);
  2505. // load y
  2506. const v128_t v1l = wasm_v128_load(y0->qs);
  2507. const v128_t v1h = wasm_v128_load(y0->qs + 16);
  2508. // int8x16 -> int16x8
  2509. const v128_t v0lfl = wasm_i16x8_extend_low_i8x16 (v0lf);
  2510. const v128_t v0lfh = wasm_i16x8_extend_high_i8x16(v0lf);
  2511. const v128_t v0hfl = wasm_i16x8_extend_low_i8x16 (v0hf);
  2512. const v128_t v0hfh = wasm_i16x8_extend_high_i8x16(v0hf);
  2513. const v128_t v1ll = wasm_i16x8_extend_low_i8x16 (v1l);
  2514. const v128_t v1lh = wasm_i16x8_extend_high_i8x16(v1l);
  2515. const v128_t v1hl = wasm_i16x8_extend_low_i8x16 (v1h);
  2516. const v128_t v1hh = wasm_i16x8_extend_high_i8x16(v1h);
  2517. // dot product
  2518. sumv = wasm_f32x4_add(sumv, wasm_f32x4_mul(wasm_f32x4_convert_i32x4(
  2519. wasm_i32x4_add(
  2520. wasm_i32x4_add(wasm_i32x4_dot_i16x8(v0lfl, v1ll),
  2521. wasm_i32x4_dot_i16x8(v0lfh, v1lh)),
  2522. wasm_i32x4_add(wasm_i32x4_dot_i16x8(v0hfl, v1hl),
  2523. wasm_i32x4_dot_i16x8(v0hfh, v1hh)))),
  2524. wasm_f32x4_splat(GGML_FP16_TO_FP32(x0->d) * GGML_FP16_TO_FP32(y0->d))));
  2525. }
  2526. *s = wasm_f32x4_extract_lane(sumv, 0) + wasm_f32x4_extract_lane(sumv, 1) +
  2527. wasm_f32x4_extract_lane(sumv, 2) + wasm_f32x4_extract_lane(sumv, 3);
  2528. #elif defined(__AVX2__)
  2529. // Initialize accumulator with zeros
  2530. __m256 acc = _mm256_setzero_ps();
  2531. // Main loop
  2532. for (int i = 0; i < nb; i++) {
  2533. /* Compute combined scale for the block */
  2534. const __m256 d = _mm256_set1_ps(GGML_FP16_TO_FP32(x[i].d) * GGML_FP16_TO_FP32(y[i].d));
  2535. __m256i bx = bytes_from_nibbles_32(x[i].qs);
  2536. __m256i bxhi = bytes_from_bits_32(x[i].qh);
  2537. bxhi = _mm256_andnot_si256(bxhi, _mm256_set1_epi8((char)0xF0));
  2538. bx = _mm256_or_si256(bx, bxhi);
  2539. __m256i by = _mm256_loadu_si256((const __m256i *)y[i].qs);
  2540. const __m256 q = mul_sum_i8_pairs_float(bx, by);
  2541. /* Multiply q with scale and accumulate */
  2542. acc = _mm256_fmadd_ps(d, q, acc);
  2543. }
  2544. *s = hsum_float_8(acc);
  2545. #elif defined(__AVX__)
  2546. // Initialize accumulator with zeros
  2547. __m256 acc = _mm256_setzero_ps();
  2548. __m128i mask = _mm_set1_epi8((char)0xF0);
  2549. // Main loop
  2550. for (int i = 0; i < nb; i++) {
  2551. /* Compute combined scale for the block */
  2552. const __m256 d = _mm256_set1_ps(GGML_FP16_TO_FP32(x[i].d) * GGML_FP16_TO_FP32(y[i].d));
  2553. __m256i bx = bytes_from_nibbles_32(x[i].qs);
  2554. const __m256i bxhi = bytes_from_bits_32(x[i].qh);
  2555. __m128i bxhil = _mm256_castsi256_si128(bxhi);
  2556. __m128i bxhih = _mm256_extractf128_si256(bxhi, 1);
  2557. bxhil = _mm_andnot_si128(bxhil, mask);
  2558. bxhih = _mm_andnot_si128(bxhih, mask);
  2559. __m128i bxl = _mm256_castsi256_si128(bx);
  2560. __m128i bxh = _mm256_extractf128_si256(bx, 1);
  2561. bxl = _mm_or_si128(bxl, bxhil);
  2562. bxh = _mm_or_si128(bxh, bxhih);
  2563. bx = MM256_SET_M128I(bxh, bxl);
  2564. const __m256i by = _mm256_loadu_si256((const __m256i *)y[i].qs);
  2565. const __m256 q = mul_sum_i8_pairs_float(bx, by);
  2566. /* Multiply q with scale and accumulate */
  2567. acc = _mm256_add_ps(_mm256_mul_ps(d, q), acc);
  2568. }
  2569. *s = hsum_float_8(acc);
  2570. #elif defined(__riscv_v_intrinsic)
  2571. float sumf = 0.0;
  2572. uint32_t qh;
  2573. size_t vl = __riscv_vsetvl_e8m1(qk/2);
  2574. // These tempory registers are for masking and shift operations
  2575. vuint32m2_t vt_1 = __riscv_vid_v_u32m2(vl);
  2576. vuint32m2_t vt_2 = __riscv_vsll_vv_u32m2(__riscv_vmv_v_x_u32m2(1, vl), vt_1, vl);
  2577. vuint32m2_t vt_3 = __riscv_vsll_vx_u32m2(vt_2, 16, vl);
  2578. vuint32m2_t vt_4 = __riscv_vadd_vx_u32m2(vt_1, 12, vl);
  2579. for (int i = 0; i < nb; i++) {
  2580. memcpy(&qh, x[i].qh, sizeof(uint32_t));
  2581. // ((qh & (1u << (j + 0 ))) >> (j + 0 )) << 4;
  2582. vuint32m2_t xha_0 = __riscv_vand_vx_u32m2(vt_2, qh, vl);
  2583. vuint32m2_t xhr_0 = __riscv_vsrl_vv_u32m2(xha_0, vt_1, vl);
  2584. vuint32m2_t xhl_0 = __riscv_vsll_vx_u32m2(xhr_0, 4, vl);
  2585. // ((qh & (1u << (j + 16))) >> (j + 12));
  2586. vuint32m2_t xha_1 = __riscv_vand_vx_u32m2(vt_3, qh, vl);
  2587. vuint32m2_t xhl_1 = __riscv_vsrl_vv_u32m2(xha_1, vt_4, vl);
  2588. // narrowing
  2589. vuint16m1_t xhc_0 = __riscv_vncvt_x_x_w_u16m1(xhl_0, vl);
  2590. vuint8mf2_t xh_0 = __riscv_vncvt_x_x_w_u8mf2(xhc_0, vl);
  2591. vuint16m1_t xhc_1 = __riscv_vncvt_x_x_w_u16m1(xhl_1, vl);
  2592. vuint8mf2_t xh_1 = __riscv_vncvt_x_x_w_u8mf2(xhc_1, vl);
  2593. // load
  2594. vuint8mf2_t tx = __riscv_vle8_v_u8mf2(x[i].qs, vl);
  2595. vint8mf2_t y0 = __riscv_vle8_v_i8mf2(y[i].qs, vl);
  2596. vint8mf2_t y1 = __riscv_vle8_v_i8mf2(y[i].qs+16, vl);
  2597. vuint8mf2_t x_at = __riscv_vand_vx_u8mf2(tx, 0x0F, vl);
  2598. vuint8mf2_t x_lt = __riscv_vsrl_vx_u8mf2(tx, 0x04, vl);
  2599. vuint8mf2_t x_a = __riscv_vor_vv_u8mf2(x_at, xh_0, vl);
  2600. vuint8mf2_t x_l = __riscv_vor_vv_u8mf2(x_lt, xh_1, vl);
  2601. vint8mf2_t x_ai = __riscv_vreinterpret_v_u8mf2_i8mf2(x_a);
  2602. vint8mf2_t x_li = __riscv_vreinterpret_v_u8mf2_i8mf2(x_l);
  2603. vint8mf2_t v0 = __riscv_vsub_vx_i8mf2(x_ai, 16, vl);
  2604. vint8mf2_t v1 = __riscv_vsub_vx_i8mf2(x_li, 16, vl);
  2605. vint16m1_t vec_mul1 = __riscv_vwmul_vv_i16m1(v0, y0, vl);
  2606. vint16m1_t vec_mul2 = __riscv_vwmul_vv_i16m1(v1, y1, vl);
  2607. vint32m1_t vec_zero = __riscv_vmv_v_x_i32m1(0, vl);
  2608. vint32m1_t vs1 = __riscv_vwredsum_vs_i16m1_i32m1(vec_mul1, vec_zero, vl);
  2609. vint32m1_t vs2 = __riscv_vwredsum_vs_i16m1_i32m1(vec_mul2, vs1, vl);
  2610. int sumi = __riscv_vmv_x_s_i32m1_i32(vs2);
  2611. sumf += (GGML_FP16_TO_FP32(x[i].d)*GGML_FP16_TO_FP32(y[i].d)) * sumi;
  2612. }
  2613. *s = sumf;
  2614. #else
  2615. // scalar
  2616. float sumf = 0.0;
  2617. for (int i = 0; i < nb; i++) {
  2618. uint32_t qh;
  2619. memcpy(&qh, x[i].qh, sizeof(qh));
  2620. int sumi = 0;
  2621. for (int j = 0; j < qk/2; ++j) {
  2622. const uint8_t xh_0 = ((qh & (1u << (j + 0 ))) >> (j + 0 )) << 4;
  2623. const uint8_t xh_1 = ((qh & (1u << (j + 16))) >> (j + 12));
  2624. const int32_t x0 = ((x[i].qs[j] & 0x0F) | xh_0) - 16;
  2625. const int32_t x1 = ((x[i].qs[j] >> 4) | xh_1) - 16;
  2626. sumi += (x0 * y[i].qs[j]) + (x1 * y[i].qs[j + qk/2]);
  2627. }
  2628. sumf += (GGML_FP16_TO_FP32(x[i].d)*GGML_FP16_TO_FP32(y[i].d)) * sumi;
  2629. }
  2630. *s = sumf;
  2631. #endif
  2632. }
  2633. static void ggml_vec_dot_q5_1_q8_1(const int n, float * restrict s, const void * restrict vx, const void * restrict vy) {
  2634. const int qk = QK8_1;
  2635. const int nb = n / qk;
  2636. assert(n % qk == 0);
  2637. assert(qk == QK5_1);
  2638. const block_q5_1 * restrict x = vx;
  2639. const block_q8_1 * restrict y = vy;
  2640. #if defined(__ARM_NEON)
  2641. float32x4_t sumv0 = vdupq_n_f32(0.0f);
  2642. float32x4_t sumv1 = vdupq_n_f32(0.0f);
  2643. float summs0 = 0.0f;
  2644. float summs1 = 0.0f;
  2645. uint32_t qh0;
  2646. uint32_t qh1;
  2647. uint64_t tmp0[4];
  2648. uint64_t tmp1[4];
  2649. GGML_ASSERT(nb % 2 == 0); // TODO: handle odd nb
  2650. for (int i = 0; i < nb; i += 2) {
  2651. const block_q5_1 * restrict x0 = &x[i];
  2652. const block_q5_1 * restrict x1 = &x[i + 1];
  2653. const block_q8_1 * restrict y0 = &y[i];
  2654. const block_q8_1 * restrict y1 = &y[i + 1];
  2655. const uint8x16_t m4b = vdupq_n_u8(0x0F);
  2656. summs0 += GGML_FP16_TO_FP32(x0->m) * y0->s;
  2657. summs1 += GGML_FP16_TO_FP32(x1->m) * y1->s;
  2658. // extract the 5th bit via lookup table ((b) << 4)
  2659. memcpy(&qh0, x0->qh, sizeof(qh0));
  2660. memcpy(&qh1, x1->qh, sizeof(qh1));
  2661. tmp0[0] = table_b2b_0[(qh0 >> 0) & 0xFF];
  2662. tmp0[1] = table_b2b_0[(qh0 >> 8) & 0xFF];
  2663. tmp0[2] = table_b2b_0[(qh0 >> 16) & 0xFF];
  2664. tmp0[3] = table_b2b_0[(qh0 >> 24) ];
  2665. tmp1[0] = table_b2b_0[(qh1 >> 0) & 0xFF];
  2666. tmp1[1] = table_b2b_0[(qh1 >> 8) & 0xFF];
  2667. tmp1[2] = table_b2b_0[(qh1 >> 16) & 0xFF];
  2668. tmp1[3] = table_b2b_0[(qh1 >> 24) ];
  2669. const int8x16_t qhl0 = vld1q_s8((const int8_t *)(tmp0 + 0));
  2670. const int8x16_t qhh0 = vld1q_s8((const int8_t *)(tmp0 + 2));
  2671. const int8x16_t qhl1 = vld1q_s8((const int8_t *)(tmp1 + 0));
  2672. const int8x16_t qhh1 = vld1q_s8((const int8_t *)(tmp1 + 2));
  2673. const uint8x16_t v0_0 = vld1q_u8(x0->qs);
  2674. const uint8x16_t v0_1 = vld1q_u8(x1->qs);
  2675. // 4-bit -> 8-bit
  2676. const int8x16_t v0_0l = vreinterpretq_s8_u8(vandq_u8 (v0_0, m4b));
  2677. const int8x16_t v0_0h = vreinterpretq_s8_u8(vshrq_n_u8(v0_0, 4));
  2678. const int8x16_t v0_1l = vreinterpretq_s8_u8(vandq_u8 (v0_1, m4b));
  2679. const int8x16_t v0_1h = vreinterpretq_s8_u8(vshrq_n_u8(v0_1, 4));
  2680. // add high bit
  2681. const int8x16_t v0_0lf = vorrq_s8(v0_0l, qhl0);
  2682. const int8x16_t v0_0hf = vorrq_s8(v0_0h, qhh0);
  2683. const int8x16_t v0_1lf = vorrq_s8(v0_1l, qhl1);
  2684. const int8x16_t v0_1hf = vorrq_s8(v0_1h, qhh1);
  2685. // load y
  2686. const int8x16_t v1_0l = vld1q_s8(y0->qs);
  2687. const int8x16_t v1_0h = vld1q_s8(y0->qs + 16);
  2688. const int8x16_t v1_1l = vld1q_s8(y1->qs);
  2689. const int8x16_t v1_1h = vld1q_s8(y1->qs + 16);
  2690. #if defined(__ARM_FEATURE_DOTPROD)
  2691. sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(vaddq_s32(
  2692. vdotq_s32(vdupq_n_s32(0), v0_0lf, v1_0l),
  2693. vdotq_s32(vdupq_n_s32(0), v0_0hf, v1_0h))), GGML_FP16_TO_FP32(x0->d)*y0->d);
  2694. sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(vaddq_s32(
  2695. vdotq_s32(vdupq_n_s32(0), v0_1lf, v1_1l),
  2696. vdotq_s32(vdupq_n_s32(0), v0_1hf, v1_1h))), GGML_FP16_TO_FP32(x1->d)*y1->d);
  2697. #else
  2698. const int16x8_t pl0l = vmull_s8(vget_low_s8 (v0_0lf), vget_low_s8 (v1_0l));
  2699. const int16x8_t pl0h = vmull_s8(vget_high_s8(v0_0lf), vget_high_s8(v1_0l));
  2700. const int16x8_t ph0l = vmull_s8(vget_low_s8 (v0_0hf), vget_low_s8 (v1_0h));
  2701. const int16x8_t ph0h = vmull_s8(vget_high_s8(v0_0hf), vget_high_s8(v1_0h));
  2702. const int16x8_t pl1l = vmull_s8(vget_low_s8 (v0_1lf), vget_low_s8 (v1_1l));
  2703. const int16x8_t pl1h = vmull_s8(vget_high_s8(v0_1lf), vget_high_s8(v1_1l));
  2704. const int16x8_t ph1l = vmull_s8(vget_low_s8 (v0_1hf), vget_low_s8 (v1_1h));
  2705. const int16x8_t ph1h = vmull_s8(vget_high_s8(v0_1hf), vget_high_s8(v1_1h));
  2706. const int32x4_t pl0 = vaddq_s32(vpaddlq_s16(pl0l), vpaddlq_s16(pl0h));
  2707. const int32x4_t ph0 = vaddq_s32(vpaddlq_s16(ph0l), vpaddlq_s16(ph0h));
  2708. const int32x4_t pl1 = vaddq_s32(vpaddlq_s16(pl1l), vpaddlq_s16(pl1h));
  2709. const int32x4_t ph1 = vaddq_s32(vpaddlq_s16(ph1l), vpaddlq_s16(ph1h));
  2710. sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(vaddq_s32(pl0, ph0)), GGML_FP16_TO_FP32(x0->d)*y0->d);
  2711. sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(vaddq_s32(pl1, ph1)), GGML_FP16_TO_FP32(x1->d)*y1->d);
  2712. #endif
  2713. }
  2714. *s = vaddvq_f32(sumv0) + vaddvq_f32(sumv1) + summs0 + summs1;
  2715. #elif defined(__wasm_simd128__)
  2716. v128_t sumv = wasm_f32x4_splat(0.0f);
  2717. float summs = 0.0f;
  2718. uint32_t qh;
  2719. uint64_t tmp[4];
  2720. // TODO: check if unrolling this is better
  2721. for (int i = 0; i < nb; ++i) {
  2722. const block_q5_1 * restrict x0 = &x[i];
  2723. const block_q8_1 * restrict y0 = &y[i];
  2724. summs += GGML_FP16_TO_FP32(x0->m) * y0->s;
  2725. const v128_t m4b = wasm_i8x16_splat(0x0F);
  2726. // extract the 5th bit
  2727. memcpy(&qh, x0->qh, sizeof(qh));
  2728. tmp[0] = table_b2b_0[(qh >> 0) & 0xFF];
  2729. tmp[1] = table_b2b_0[(qh >> 8) & 0xFF];
  2730. tmp[2] = table_b2b_0[(qh >> 16) & 0xFF];
  2731. tmp[3] = table_b2b_0[(qh >> 24) ];
  2732. const v128_t qhl = wasm_v128_load(tmp + 0);
  2733. const v128_t qhh = wasm_v128_load(tmp + 2);
  2734. const v128_t v0 = wasm_v128_load(x0->qs);
  2735. // 4-bit -> 8-bit
  2736. const v128_t v0l = wasm_v128_and (v0, m4b);
  2737. const v128_t v0h = wasm_u8x16_shr(v0, 4);
  2738. // add high bit
  2739. const v128_t v0lf = wasm_v128_or(v0l, qhl);
  2740. const v128_t v0hf = wasm_v128_or(v0h, qhh);
  2741. // load y
  2742. const v128_t v1l = wasm_v128_load(y0->qs);
  2743. const v128_t v1h = wasm_v128_load(y0->qs + 16);
  2744. // int8x16 -> int16x8
  2745. const v128_t v0lfl = wasm_i16x8_extend_low_i8x16 (v0lf);
  2746. const v128_t v0lfh = wasm_i16x8_extend_high_i8x16(v0lf);
  2747. const v128_t v0hfl = wasm_i16x8_extend_low_i8x16 (v0hf);
  2748. const v128_t v0hfh = wasm_i16x8_extend_high_i8x16(v0hf);
  2749. const v128_t v1ll = wasm_i16x8_extend_low_i8x16 (v1l);
  2750. const v128_t v1lh = wasm_i16x8_extend_high_i8x16(v1l);
  2751. const v128_t v1hl = wasm_i16x8_extend_low_i8x16 (v1h);
  2752. const v128_t v1hh = wasm_i16x8_extend_high_i8x16(v1h);
  2753. // dot product
  2754. sumv = wasm_f32x4_add(sumv,
  2755. wasm_f32x4_mul(wasm_f32x4_convert_i32x4(wasm_i32x4_add(
  2756. wasm_i32x4_add(wasm_i32x4_dot_i16x8(v0lfl, v1ll),
  2757. wasm_i32x4_dot_i16x8(v0lfh, v1lh)),
  2758. wasm_i32x4_add(wasm_i32x4_dot_i16x8(v0hfl, v1hl),
  2759. wasm_i32x4_dot_i16x8(v0hfh, v1hh)))),
  2760. wasm_f32x4_splat(GGML_FP16_TO_FP32(x0->d) * y0->d)));
  2761. }
  2762. *s = wasm_f32x4_extract_lane(sumv, 0) + wasm_f32x4_extract_lane(sumv, 1) +
  2763. wasm_f32x4_extract_lane(sumv, 2) + wasm_f32x4_extract_lane(sumv, 3) + summs;
  2764. #elif defined(__AVX2__)
  2765. // Initialize accumulator with zeros
  2766. __m256 acc = _mm256_setzero_ps();
  2767. float summs = 0.0f;
  2768. // Main loop
  2769. for (int i = 0; i < nb; i++) {
  2770. const __m256 dx = _mm256_set1_ps(GGML_FP16_TO_FP32(x[i].d));
  2771. summs += GGML_FP16_TO_FP32(x[i].m) * y[i].s;
  2772. __m256i bx = bytes_from_nibbles_32(x[i].qs);
  2773. __m256i bxhi = bytes_from_bits_32(x[i].qh);
  2774. bxhi = _mm256_and_si256(bxhi, _mm256_set1_epi8(0x10));
  2775. bx = _mm256_or_si256(bx, bxhi);
  2776. const __m256 dy = _mm256_set1_ps(y[i].d);
  2777. const __m256i by = _mm256_loadu_si256((const __m256i *)y[i].qs);
  2778. const __m256 q = mul_sum_us8_pairs_float(bx, by);
  2779. acc = _mm256_fmadd_ps(q, _mm256_mul_ps(dx, dy), acc);
  2780. }
  2781. *s = hsum_float_8(acc) + summs;
  2782. #elif defined(__AVX__)
  2783. // Initialize accumulator with zeros
  2784. __m256 acc = _mm256_setzero_ps();
  2785. __m128i mask = _mm_set1_epi8(0x10);
  2786. float summs = 0.0f;
  2787. // Main loop
  2788. for (int i = 0; i < nb; i++) {
  2789. const __m256 dx = _mm256_set1_ps(GGML_FP16_TO_FP32(x[i].d));
  2790. summs += GGML_FP16_TO_FP32(x[i].m) * y[i].s;
  2791. __m256i bx = bytes_from_nibbles_32(x[i].qs);
  2792. const __m256i bxhi = bytes_from_bits_32(x[i].qh);
  2793. __m128i bxhil = _mm256_castsi256_si128(bxhi);
  2794. __m128i bxhih = _mm256_extractf128_si256(bxhi, 1);
  2795. bxhil = _mm_and_si128(bxhil, mask);
  2796. bxhih = _mm_and_si128(bxhih, mask);
  2797. __m128i bxl = _mm256_castsi256_si128(bx);
  2798. __m128i bxh = _mm256_extractf128_si256(bx, 1);
  2799. bxl = _mm_or_si128(bxl, bxhil);
  2800. bxh = _mm_or_si128(bxh, bxhih);
  2801. bx = MM256_SET_M128I(bxh, bxl);
  2802. const __m256 dy = _mm256_set1_ps(y[i].d);
  2803. const __m256i by = _mm256_loadu_si256((const __m256i *)y[i].qs);
  2804. const __m256 q = mul_sum_us8_pairs_float(bx, by);
  2805. acc = _mm256_add_ps(_mm256_mul_ps(q, _mm256_mul_ps(dx, dy)), acc);
  2806. }
  2807. *s = hsum_float_8(acc) + summs;
  2808. #elif defined(__riscv_v_intrinsic)
  2809. float sumf = 0.0;
  2810. uint32_t qh;
  2811. size_t vl = __riscv_vsetvl_e8m1(qk/2);
  2812. // temporary registers for shift operations
  2813. vuint32m2_t vt_1 = __riscv_vid_v_u32m2(vl);
  2814. vuint32m2_t vt_2 = __riscv_vadd_vx_u32m2(vt_1, 12, vl);
  2815. for (int i = 0; i < nb; i++) {
  2816. memcpy(&qh, x[i].qh, sizeof(uint32_t));
  2817. // load qh
  2818. vuint32m2_t vqh = __riscv_vmv_v_x_u32m2(qh, vl);
  2819. // ((qh >> (j + 0)) << 4) & 0x10;
  2820. vuint32m2_t xhr_0 = __riscv_vsrl_vv_u32m2(vqh, vt_1, vl);
  2821. vuint32m2_t xhl_0 = __riscv_vsll_vx_u32m2(xhr_0, 4, vl);
  2822. vuint32m2_t xha_0 = __riscv_vand_vx_u32m2(xhl_0, 0x10, vl);
  2823. // ((qh >> (j + 12)) ) & 0x10;
  2824. vuint32m2_t xhr_1 = __riscv_vsrl_vv_u32m2(vqh, vt_2, vl);
  2825. vuint32m2_t xha_1 = __riscv_vand_vx_u32m2(xhr_1, 0x10, vl);
  2826. // narrowing
  2827. vuint16m1_t xhc_0 = __riscv_vncvt_x_x_w_u16m1(xha_0, vl);
  2828. vuint8mf2_t xh_0 = __riscv_vncvt_x_x_w_u8mf2(xhc_0, vl);
  2829. vuint16m1_t xhc_1 = __riscv_vncvt_x_x_w_u16m1(xha_1, vl);
  2830. vuint8mf2_t xh_1 = __riscv_vncvt_x_x_w_u8mf2(xhc_1, vl);
  2831. // load
  2832. vuint8mf2_t tx = __riscv_vle8_v_u8mf2(x[i].qs, vl);
  2833. vint8mf2_t y0 = __riscv_vle8_v_i8mf2(y[i].qs, vl);
  2834. vint8mf2_t y1 = __riscv_vle8_v_i8mf2(y[i].qs+16, vl);
  2835. vuint8mf2_t x_at = __riscv_vand_vx_u8mf2(tx, 0x0F, vl);
  2836. vuint8mf2_t x_lt = __riscv_vsrl_vx_u8mf2(tx, 0x04, vl);
  2837. vuint8mf2_t x_a = __riscv_vor_vv_u8mf2(x_at, xh_0, vl);
  2838. vuint8mf2_t x_l = __riscv_vor_vv_u8mf2(x_lt, xh_1, vl);
  2839. vint8mf2_t v0 = __riscv_vreinterpret_v_u8mf2_i8mf2(x_a);
  2840. vint8mf2_t v1 = __riscv_vreinterpret_v_u8mf2_i8mf2(x_l);
  2841. vint16m1_t vec_mul1 = __riscv_vwmul_vv_i16m1(v0, y0, vl);
  2842. vint16m1_t vec_mul2 = __riscv_vwmul_vv_i16m1(v1, y1, vl);
  2843. vint32m1_t vec_zero = __riscv_vmv_v_x_i32m1(0, vl);
  2844. vint32m1_t vs1 = __riscv_vwredsum_vs_i16m1_i32m1(vec_mul1, vec_zero, vl);
  2845. vint32m1_t vs2 = __riscv_vwredsum_vs_i16m1_i32m1(vec_mul2, vs1, vl);
  2846. int sumi = __riscv_vmv_x_s_i32m1_i32(vs2);
  2847. sumf += (GGML_FP16_TO_FP32(x[i].d)*y[i].d)*sumi + GGML_FP16_TO_FP32(x[i].m)*y[i].s;
  2848. }
  2849. *s = sumf;
  2850. #else
  2851. // scalar
  2852. float sumf = 0.0;
  2853. for (int i = 0; i < nb; i++) {
  2854. uint32_t qh;
  2855. memcpy(&qh, x[i].qh, sizeof(qh));
  2856. int sumi = 0;
  2857. for (int j = 0; j < qk/2; ++j) {
  2858. const uint8_t xh_0 = ((qh >> (j + 0)) << 4) & 0x10;
  2859. const uint8_t xh_1 = ((qh >> (j + 12)) ) & 0x10;
  2860. const int32_t x0 = (x[i].qs[j] & 0xF) | xh_0;
  2861. const int32_t x1 = (x[i].qs[j] >> 4) | xh_1;
  2862. sumi += (x0 * y[i].qs[j]) + (x1 * y[i].qs[j + qk/2]);
  2863. }
  2864. sumf += (GGML_FP16_TO_FP32(x[i].d)*y[i].d)*sumi + GGML_FP16_TO_FP32(x[i].m)*y[i].s;
  2865. }
  2866. *s = sumf;
  2867. #endif
  2868. }
  2869. static void ggml_vec_dot_q8_0_q8_0(const int n, float * restrict s, const void * restrict vx, const void * restrict vy) {
  2870. const int qk = QK8_0;
  2871. const int nb = n / qk;
  2872. assert(n % qk == 0);
  2873. const block_q8_0 * restrict x = vx;
  2874. const block_q8_0 * restrict y = vy;
  2875. #if defined(__ARM_NEON)
  2876. float32x4_t sumv0 = vdupq_n_f32(0.0f);
  2877. float32x4_t sumv1 = vdupq_n_f32(0.0f);
  2878. GGML_ASSERT(nb % 2 == 0); // TODO: handle odd nb
  2879. for (int i = 0; i < nb; i += 2) {
  2880. const block_q8_0 * restrict x0 = &x[i + 0];
  2881. const block_q8_0 * restrict x1 = &x[i + 1];
  2882. const block_q8_0 * restrict y0 = &y[i + 0];
  2883. const block_q8_0 * restrict y1 = &y[i + 1];
  2884. const int8x16_t x0_0 = vld1q_s8(x0->qs);
  2885. const int8x16_t x0_1 = vld1q_s8(x0->qs + 16);
  2886. const int8x16_t x1_0 = vld1q_s8(x1->qs);
  2887. const int8x16_t x1_1 = vld1q_s8(x1->qs + 16);
  2888. // load y
  2889. const int8x16_t y0_0 = vld1q_s8(y0->qs);
  2890. const int8x16_t y0_1 = vld1q_s8(y0->qs + 16);
  2891. const int8x16_t y1_0 = vld1q_s8(y1->qs);
  2892. const int8x16_t y1_1 = vld1q_s8(y1->qs + 16);
  2893. #if defined(__ARM_FEATURE_DOTPROD)
  2894. sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(vaddq_s32(
  2895. vdotq_s32(vdupq_n_s32(0), x0_0, y0_0),
  2896. vdotq_s32(vdupq_n_s32(0), x0_1, y0_1))), GGML_FP16_TO_FP32(x0->d)*GGML_FP16_TO_FP32(y0->d));
  2897. sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(vaddq_s32(
  2898. vdotq_s32(vdupq_n_s32(0), x1_0, y1_0),
  2899. vdotq_s32(vdupq_n_s32(0), x1_1, y1_1))), GGML_FP16_TO_FP32(x1->d)*GGML_FP16_TO_FP32(y1->d));
  2900. #else
  2901. const int16x8_t p0_0 = vmull_s8(vget_low_s8 (x0_0), vget_low_s8 (y0_0));
  2902. const int16x8_t p0_1 = vmull_s8(vget_high_s8(x0_0), vget_high_s8(y0_0));
  2903. const int16x8_t p0_2 = vmull_s8(vget_low_s8 (x0_1), vget_low_s8 (y0_1));
  2904. const int16x8_t p0_3 = vmull_s8(vget_high_s8(x0_1), vget_high_s8(y0_1));
  2905. const int16x8_t p1_0 = vmull_s8(vget_low_s8 (x1_0), vget_low_s8 (y1_0));
  2906. const int16x8_t p1_1 = vmull_s8(vget_high_s8(x1_0), vget_high_s8(y1_0));
  2907. const int16x8_t p1_2 = vmull_s8(vget_low_s8 (x1_1), vget_low_s8 (y1_1));
  2908. const int16x8_t p1_3 = vmull_s8(vget_high_s8(x1_1), vget_high_s8(y1_1));
  2909. const int32x4_t p0 = vaddq_s32(vpaddlq_s16(p0_0), vpaddlq_s16(p0_1));
  2910. const int32x4_t p1 = vaddq_s32(vpaddlq_s16(p0_2), vpaddlq_s16(p0_3));
  2911. const int32x4_t p2 = vaddq_s32(vpaddlq_s16(p1_0), vpaddlq_s16(p1_1));
  2912. const int32x4_t p3 = vaddq_s32(vpaddlq_s16(p1_2), vpaddlq_s16(p1_3));
  2913. sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(vaddq_s32(p0, p1)), GGML_FP16_TO_FP32(x0->d)*GGML_FP16_TO_FP32(y0->d));
  2914. sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(vaddq_s32(p2, p3)), GGML_FP16_TO_FP32(x1->d)*GGML_FP16_TO_FP32(y1->d));
  2915. #endif
  2916. }
  2917. *s = vaddvq_f32(sumv0) + vaddvq_f32(sumv1);
  2918. #elif defined(__AVX2__) || defined(__AVX__)
  2919. // Initialize accumulator with zeros
  2920. __m256 acc = _mm256_setzero_ps();
  2921. // Main loop
  2922. for (int i = 0; i < nb; ++i) {
  2923. // Compute combined scale for the block
  2924. const __m256 d = _mm256_set1_ps(GGML_FP16_TO_FP32(x[i].d) * GGML_FP16_TO_FP32(y[i].d));
  2925. __m256i bx = _mm256_loadu_si256((const __m256i *)x[i].qs);
  2926. __m256i by = _mm256_loadu_si256((const __m256i *)y[i].qs);
  2927. const __m256 q = mul_sum_i8_pairs_float(bx, by);
  2928. // Multiply q with scale and accumulate
  2929. #if defined(__AVX2__)
  2930. acc = _mm256_fmadd_ps( d, q, acc );
  2931. #else
  2932. acc = _mm256_add_ps( _mm256_mul_ps( d, q ), acc );
  2933. #endif
  2934. }
  2935. *s = hsum_float_8(acc);
  2936. #elif defined(__riscv_v_intrinsic)
  2937. float sumf = 0.0;
  2938. size_t vl = __riscv_vsetvl_e8m1(qk);
  2939. for (int i = 0; i < nb; i++) {
  2940. // load elements
  2941. vint8m1_t bx = __riscv_vle8_v_i8m1(x[i].qs, vl);
  2942. vint8m1_t by = __riscv_vle8_v_i8m1(y[i].qs, vl);
  2943. vint16m2_t vw_mul = __riscv_vwmul_vv_i16m2(bx, by, vl);
  2944. vint32m1_t v_zero = __riscv_vmv_v_x_i32m1(0, vl);
  2945. vint32m1_t v_sum = __riscv_vwredsum_vs_i16m2_i32m1(vw_mul, v_zero, vl);
  2946. int sumi = __riscv_vmv_x_s_i32m1_i32(v_sum);
  2947. sumf += sumi*(GGML_FP16_TO_FP32(x[i].d)*GGML_FP16_TO_FP32(y[i].d));
  2948. }
  2949. *s = sumf;
  2950. #else
  2951. // scalar
  2952. float sumf = 0.0;
  2953. for (int i = 0; i < nb; i++) {
  2954. int sumi = 0;
  2955. for (int j = 0; j < qk; j++) {
  2956. sumi += x[i].qs[j]*y[i].qs[j];
  2957. }
  2958. sumf += sumi*(GGML_FP16_TO_FP32(x[i].d)*GGML_FP16_TO_FP32(y[i].d));
  2959. }
  2960. *s = sumf;
  2961. #endif
  2962. }
  2963. // compute GGML_VEC_DOT_UNROLL dot products at once
  2964. // xs - x row stride in bytes
  2965. inline static void ggml_vec_dot_f16_unroll(const int n, const int xs, float * restrict s, void * restrict xv, ggml_fp16_t * restrict y) {
  2966. ggml_float sumf[GGML_VEC_DOT_UNROLL] = { 0.0 };
  2967. ggml_fp16_t * restrict x[GGML_VEC_DOT_UNROLL];
  2968. for (int i = 0; i < GGML_VEC_DOT_UNROLL; ++i) {
  2969. x[i] = (ggml_fp16_t *) ((char *) xv + i*xs);
  2970. }
  2971. #if defined(GGML_SIMD)
  2972. const int np = (n & ~(GGML_F16_STEP - 1));
  2973. GGML_F16_VEC sum[GGML_VEC_DOT_UNROLL][GGML_F16_ARR] = { { GGML_F16_VEC_ZERO } };
  2974. GGML_F16_VEC ax[GGML_F16_ARR];
  2975. GGML_F16_VEC ay[GGML_F16_ARR];
  2976. for (int i = 0; i < np; i += GGML_F16_STEP) {
  2977. for (int j = 0; j < GGML_F16_ARR; j++) {
  2978. ay[j] = GGML_F16_VEC_LOAD(y + i + j*GGML_F16_EPR, j);
  2979. for (int k = 0; k < GGML_VEC_DOT_UNROLL; ++k) {
  2980. ax[j] = GGML_F16_VEC_LOAD(x[k] + i + j*GGML_F16_EPR, j);
  2981. sum[k][j] = GGML_F16_VEC_FMA(sum[k][j], ax[j], ay[j]);
  2982. }
  2983. }
  2984. }
  2985. // reduce sum0..sum3 to sum0
  2986. for (int k = 0; k < GGML_VEC_DOT_UNROLL; ++k) {
  2987. GGML_F16_VEC_REDUCE(sumf[k], sum[k]);
  2988. }
  2989. // leftovers
  2990. for (int i = np; i < n; ++i) {
  2991. for (int j = 0; j < GGML_VEC_DOT_UNROLL; ++j) {
  2992. sumf[j] += (ggml_float)(GGML_FP16_TO_FP32(x[j][i])*GGML_FP16_TO_FP32(y[i]));
  2993. }
  2994. }
  2995. #else
  2996. for (int i = 0; i < n; ++i) {
  2997. for (int j = 0; j < GGML_VEC_DOT_UNROLL; ++j) {
  2998. sumf[j] += (ggml_float)(GGML_FP16_TO_FP32(x[j][i])*GGML_FP16_TO_FP32(y[i]));
  2999. }
  3000. }
  3001. #endif
  3002. for (int i = 0; i < GGML_VEC_DOT_UNROLL; ++i) {
  3003. s[i] = sumf[i];
  3004. }
  3005. }
  3006. inline static void ggml_vec_mad_f32(const int n, float * restrict y, const float * restrict x, const float v) {
  3007. #if defined(GGML_SIMD)
  3008. const int np = (n & ~(GGML_F32_STEP - 1));
  3009. GGML_F32_VEC vx = GGML_F32_VEC_SET1(v);
  3010. GGML_F32_VEC ax[GGML_F32_ARR];
  3011. GGML_F32_VEC ay[GGML_F32_ARR];
  3012. for (int i = 0; i < np; i += GGML_F32_STEP) {
  3013. for (int j = 0; j < GGML_F32_ARR; j++) {
  3014. ax[j] = GGML_F32_VEC_LOAD(x + i + j*GGML_F32_EPR);
  3015. ay[j] = GGML_F32_VEC_LOAD(y + i + j*GGML_F32_EPR);
  3016. ay[j] = GGML_F32_VEC_FMA(ay[j], ax[j], vx);
  3017. GGML_F32_VEC_STORE(y + i + j*GGML_F32_EPR, ay[j]);
  3018. }
  3019. }
  3020. // leftovers
  3021. for (int i = np; i < n; ++i) {
  3022. y[i] += x[i]*v;
  3023. }
  3024. #else
  3025. // scalar
  3026. for (int i = 0; i < n; ++i) {
  3027. y[i] += x[i]*v;
  3028. }
  3029. #endif
  3030. }
  3031. // xs and vs are byte strides of x and v
  3032. inline static void ggml_vec_mad_f32_unroll(const int n, const int xs, const int vs, float * restrict y, const float * restrict xv, const float * restrict vv) {
  3033. const float * restrict x[GGML_VEC_MAD_UNROLL];
  3034. const float * restrict v[GGML_VEC_MAD_UNROLL];
  3035. for (int i = 0; i < GGML_VEC_MAD_UNROLL; ++i) {
  3036. x[i] = (const float *) ((const char *) xv + i*xs);
  3037. v[i] = (const float *) ((const char *) vv + i*vs);
  3038. }
  3039. #if defined(GGML_SIMD)
  3040. const int np = (n & ~(GGML_F32_STEP - 1));
  3041. GGML_F32_VEC vx[GGML_VEC_MAD_UNROLL];
  3042. for (int k = 0; k < GGML_VEC_MAD_UNROLL; ++k) {
  3043. vx[k] = GGML_F32_VEC_SET1(v[k][0]);
  3044. }
  3045. GGML_F32_VEC ax[GGML_VEC_MAD_UNROLL][GGML_F32_ARR];
  3046. GGML_F32_VEC ay[GGML_F32_ARR];
  3047. for (int i = 0; i < np; i += GGML_F32_STEP) {
  3048. for (int j = 0; j < GGML_F32_ARR; j++) {
  3049. ay[j] = GGML_F32_VEC_LOAD(y + i + j*GGML_F32_EPR);
  3050. for (int k = 0; k < GGML_VEC_MAD_UNROLL; ++k) {
  3051. ax[k][j] = GGML_F32_VEC_LOAD(x[k] + i + j*GGML_F32_EPR);
  3052. ay[j] = GGML_F32_VEC_FMA(ay[j], ax[k][j], vx[k]);
  3053. }
  3054. GGML_F32_VEC_STORE(y + i + j*GGML_F32_EPR, ay[j]);
  3055. }
  3056. }
  3057. // leftovers
  3058. for (int k = 0; k < GGML_VEC_MAD_UNROLL; ++k) {
  3059. for (int i = np; i < n; ++i) {
  3060. y[i] += x[k][i]*v[k][0];
  3061. }
  3062. }
  3063. #else
  3064. // scalar
  3065. for (int k = 0; k < GGML_VEC_MAD_UNROLL; ++k) {
  3066. for (int i = 0; i < n; ++i) {
  3067. y[i] += x[k][i]*v[k][0];
  3068. }
  3069. }
  3070. #endif
  3071. }
  3072. //inline static void ggml_vec_scale_f32(const int n, float * y, const float v) { for (int i = 0; i < n; ++i) y[i] *= v; }
  3073. inline static void ggml_vec_scale_f32(const int n, float * y, const float v) {
  3074. #if defined(GGML_USE_ACCELERATE)
  3075. vDSP_vsmul(y, 1, &v, y, 1, n);
  3076. #elif defined(GGML_SIMD)
  3077. const int np = (n & ~(GGML_F32_STEP - 1));
  3078. GGML_F32_VEC vx = GGML_F32_VEC_SET1(v);
  3079. GGML_F32_VEC ay[GGML_F32_ARR];
  3080. for (int i = 0; i < np; i += GGML_F32_STEP) {
  3081. for (int j = 0; j < GGML_F32_ARR; j++) {
  3082. ay[j] = GGML_F32_VEC_LOAD(y + i + j*GGML_F32_EPR);
  3083. ay[j] = GGML_F32_VEC_MUL(ay[j], vx);
  3084. GGML_F32_VEC_STORE(y + i + j*GGML_F32_EPR, ay[j]);
  3085. }
  3086. }
  3087. // leftovers
  3088. for (int i = np; i < n; ++i) {
  3089. y[i] *= v;
  3090. }
  3091. #else
  3092. // scalar
  3093. for (int i = 0; i < n; ++i) {
  3094. y[i] *= v;
  3095. }
  3096. #endif
  3097. }
  3098. inline static void ggml_vec_norm_f32 (const int n, float * s, const float * x) { ggml_vec_dot_f32(n, s, x, x); *s = sqrtf(*s); }
  3099. inline static void ggml_vec_sqr_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = x[i]*x[i]; }
  3100. inline static void ggml_vec_sqrt_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = sqrtf(x[i]); }
  3101. inline static void ggml_vec_log_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = logf(x[i]); }
  3102. inline static void ggml_vec_abs_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = fabsf(x[i]); }
  3103. inline static void ggml_vec_sgn_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = (x[i] > 0.f) ? 1.f : ((x[i] < 0.f) ? -1.f : 0.f); }
  3104. inline static void ggml_vec_step_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = (x[i] > 0.f) ? 1.f : 0.f; }
  3105. inline static void ggml_vec_tanh_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = tanhf(x[i]); }
  3106. inline static void ggml_vec_elu_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = (x[i] > 0.f) ? x[i] : expf(x[i])-1; }
  3107. inline static void ggml_vec_relu_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = (x[i] > 0.f) ? x[i] : 0.f; }
  3108. static const float GELU_COEF_A = 0.044715f;
  3109. static const float GELU_QUICK_COEF = -1.702f;
  3110. static const float SQRT_2_OVER_PI = 0.79788456080286535587989211986876f;
  3111. inline static float ggml_gelu_f32(float x) {
  3112. return 0.5f*x*(1.0f + tanhf(SQRT_2_OVER_PI*x*(1.0f + GELU_COEF_A*x*x)));
  3113. }
  3114. inline static void ggml_vec_gelu_f16(const int n, ggml_fp16_t * y, const ggml_fp16_t * x) {
  3115. const uint16_t * i16 = (const uint16_t *) x;
  3116. for (int i = 0; i < n; ++i) {
  3117. y[i] = table_gelu_f16[i16[i]];
  3118. }
  3119. }
  3120. #ifdef GGML_GELU_FP16
  3121. inline static void ggml_vec_gelu_f32(const int n, float * y, const float * x) {
  3122. uint16_t t;
  3123. for (int i = 0; i < n; ++i) {
  3124. ggml_fp16_t fp16 = GGML_FP32_TO_FP16(x[i]);
  3125. memcpy(&t, &fp16, sizeof(uint16_t));
  3126. y[i] = GGML_FP16_TO_FP32(table_gelu_f16[t]);
  3127. }
  3128. }
  3129. #else
  3130. inline static void ggml_vec_gelu_f32(const int n, float * y, const float * x) {
  3131. for (int i = 0; i < n; ++i) {
  3132. y[i] = ggml_gelu_f32(x[i]);
  3133. }
  3134. }
  3135. #endif
  3136. inline static float ggml_gelu_quick_f32(float x) {
  3137. return x*(1.0f/(1.0f+expf(GELU_QUICK_COEF*x)));
  3138. }
  3139. //inline static void ggml_vec_gelu_quick_f16(const int n, ggml_fp16_t * y, const ggml_fp16_t * x) {
  3140. // const uint16_t * i16 = (const uint16_t *) x;
  3141. // for (int i = 0; i < n; ++i) {
  3142. // y[i] = table_gelu_quick_f16[i16[i]];
  3143. // }
  3144. //}
  3145. #ifdef GGML_GELU_QUICK_FP16
  3146. inline static void ggml_vec_gelu_quick_f32(const int n, float * y, const float * x) {
  3147. uint16_t t;
  3148. for (int i = 0; i < n; ++i) {
  3149. ggml_fp16_t fp16 = GGML_FP32_TO_FP16(x[i]);
  3150. memcpy(&t, &fp16, sizeof(uint16_t));
  3151. y[i] = GGML_FP16_TO_FP32(table_gelu_quick_f16[t]);
  3152. }
  3153. }
  3154. #else
  3155. inline static void ggml_vec_gelu_quick_f32(const int n, float * y, const float * x) {
  3156. for (int i = 0; i < n; ++i) {
  3157. y[i] = ggml_gelu_quick_f32(x[i]);
  3158. }
  3159. }
  3160. #endif
  3161. // Sigmoid Linear Unit (SiLU) function
  3162. inline static float ggml_silu_f32(float x) {
  3163. return x/(1.0f + expf(-x));
  3164. }
  3165. //inline static void ggml_vec_silu_f16(const int n, ggml_fp16_t * y, const ggml_fp16_t * x) {
  3166. // const uint16_t * i16 = (const uint16_t *) x;
  3167. // for (int i = 0; i < n; ++i) {
  3168. // y[i] = table_silu_f16[i16[i]];
  3169. // }
  3170. //}
  3171. #ifdef GGML_SILU_FP16
  3172. inline static void ggml_vec_silu_f32(const int n, float * y, const float * x) {
  3173. uint16_t t;
  3174. for (int i = 0; i < n; ++i) {
  3175. ggml_fp16_t fp16 = GGML_FP32_TO_FP16(x[i]);
  3176. memcpy(&t, &fp16, sizeof(uint16_t));
  3177. y[i] = GGML_FP16_TO_FP32(table_silu_f16[t]);
  3178. }
  3179. }
  3180. #else
  3181. inline static void ggml_vec_silu_f32(const int n, float * y, const float * x) {
  3182. for (int i = 0; i < n; ++i) {
  3183. y[i] = ggml_silu_f32(x[i]);
  3184. }
  3185. }
  3186. #endif
  3187. inline static float ggml_silu_backward_f32(float x, float dy) {
  3188. const float s = 1.0f/(1.0f + expf(-x));
  3189. return dy*s*(1.0f + x*(1.0f - s));
  3190. }
  3191. #ifdef GGML_SILU_FP16
  3192. inline static void ggml_vec_silu_backward_f32(const int n, float * dx, const float * x, const float * dy) {
  3193. for (int i = 0; i < n; ++i) {
  3194. // we did not use x[i] to compute forward silu but its f16 equivalent
  3195. // take derivative at f16 of x[i]:
  3196. ggml_fp16_t fp16 = GGML_FP32_TO_FP16(x[i]);
  3197. float usedx = GGML_FP16_TO_FP32(fp16);
  3198. dx[i] = ggml_silu_backward_f32(usedx, dy[i]);
  3199. }
  3200. }
  3201. #else
  3202. inline static void ggml_vec_silu_backward_f32(const int n, float * dx, const float * x, const float * dy) {
  3203. for (int i = 0; i < n; ++i) {
  3204. dx[i] = ggml_silu_backward_f32(x[i], dy[i]);
  3205. }
  3206. }
  3207. #endif
  3208. inline static void ggml_vec_sum_f32(const int n, float * s, const float * x) {
  3209. #ifndef GGML_USE_ACCELERATE
  3210. ggml_float sum = 0.0;
  3211. for (int i = 0; i < n; ++i) {
  3212. sum += (ggml_float)x[i];
  3213. }
  3214. *s = sum;
  3215. #else
  3216. vDSP_sve(x, 1, s, n);
  3217. #endif
  3218. }
  3219. inline static void ggml_vec_sum_f32_ggf(const int n, ggml_float * s, const float * x) {
  3220. ggml_float sum = 0.0;
  3221. for (int i = 0; i < n; ++i) {
  3222. sum += (ggml_float)x[i];
  3223. }
  3224. *s = sum;
  3225. }
  3226. inline static void ggml_vec_sum_f16_ggf(const int n, float * s, const ggml_fp16_t * x) {
  3227. float sum = 0.0f;
  3228. for (int i = 0; i < n; ++i) {
  3229. sum += GGML_FP16_TO_FP32(x[i]);
  3230. }
  3231. *s = sum;
  3232. }
  3233. inline static void ggml_vec_max_f32(const int n, float * s, const float * x) {
  3234. #ifndef GGML_USE_ACCELERATE
  3235. float max = -INFINITY;
  3236. for (int i = 0; i < n; ++i) {
  3237. max = MAX(max, x[i]);
  3238. }
  3239. *s = max;
  3240. #else
  3241. vDSP_maxv(x, 1, s, n);
  3242. #endif
  3243. }
  3244. inline static void ggml_vec_norm_inv_f32(const int n, float * s, const float * x) {
  3245. ggml_vec_norm_f32(n, s, x);
  3246. *s = 1.f/(*s);
  3247. }
  3248. inline static void ggml_vec_argmax_f32(const int n, int * s, const float * x) {
  3249. float max = -INFINITY;
  3250. int idx = 0;
  3251. for (int i = 0; i < n; ++i) {
  3252. max = MAX(max, x[i]);
  3253. if (max == x[i]) { idx = i; }
  3254. }
  3255. *s = idx;
  3256. }
  3257. //
  3258. // data types
  3259. //
  3260. static const char * GGML_OP_NAME[GGML_OP_COUNT] = {
  3261. "NONE",
  3262. "DUP",
  3263. "ADD",
  3264. "ADD1",
  3265. "ACC",
  3266. "SUB",
  3267. "MUL",
  3268. "DIV",
  3269. "SQR",
  3270. "SQRT",
  3271. "LOG",
  3272. "SUM",
  3273. "SUM_ROWS",
  3274. "MEAN",
  3275. "ARGMAX",
  3276. "REPEAT",
  3277. "REPEAT_BACK",
  3278. "CONCAT",
  3279. "SILU_BACK",
  3280. "NORM",
  3281. "RMS_NORM",
  3282. "RMS_NORM_BACK",
  3283. "GROUP_NORM",
  3284. "MUL_MAT",
  3285. "OUT_PROD",
  3286. "SCALE",
  3287. "SET",
  3288. "CPY",
  3289. "CONT",
  3290. "RESHAPE",
  3291. "VIEW",
  3292. "PERMUTE",
  3293. "TRANSPOSE",
  3294. "GET_ROWS",
  3295. "GET_ROWS_BACK",
  3296. "DIAG",
  3297. "DIAG_MASK_INF",
  3298. "DIAG_MASK_ZERO",
  3299. "SOFT_MAX",
  3300. "SOFT_MAX_BACK",
  3301. "ROPE",
  3302. "ROPE_BACK",
  3303. "ALIBI",
  3304. "CLAMP",
  3305. "CONV_1D",
  3306. "CONV_TRANSPOSE_1D",
  3307. "CONV_2D",
  3308. "CONV_TRANSPOSE_2D",
  3309. "POOL_1D",
  3310. "POOL_2D",
  3311. "UPSCALE",
  3312. "CONV_1D_STAGE_0",
  3313. "CONV_1D_STAGE_1",
  3314. "FLASH_ATTN",
  3315. "FLASH_FF",
  3316. "FLASH_ATTN_BACK",
  3317. "WIN_PART",
  3318. "WIN_UNPART",
  3319. "GET_REL_POS",
  3320. "ADD_REL_POS",
  3321. "UNARY",
  3322. "MAP_UNARY",
  3323. "MAP_BINARY",
  3324. "MAP_CUSTOM1_F32",
  3325. "MAP_CUSTOM2_F32",
  3326. "MAP_CUSTOM3_F32",
  3327. "MAP_CUSTOM1",
  3328. "MAP_CUSTOM2",
  3329. "MAP_CUSTOM3",
  3330. "CROSS_ENTROPY_LOSS",
  3331. "CROSS_ENTROPY_LOSS_BACK",
  3332. };
  3333. static_assert(GGML_OP_COUNT == 71, "GGML_OP_COUNT != 71");
  3334. static const char * GGML_OP_SYMBOL[GGML_OP_COUNT] = {
  3335. "none",
  3336. "x",
  3337. "x+y",
  3338. "x+y",
  3339. "view(x,nb,offset)+=y->x",
  3340. "x-y",
  3341. "x*y",
  3342. "x/y",
  3343. "x^2",
  3344. "√x",
  3345. "log(x)",
  3346. "Σx",
  3347. "Σx_k",
  3348. "Σx/n",
  3349. "argmax(x)",
  3350. "repeat(x)",
  3351. "repeat_back(x)",
  3352. "concat(x, y)",
  3353. "silu_back(x)",
  3354. "norm(x)",
  3355. "rms_norm(x)",
  3356. "rms_norm_back(x)",
  3357. "group_norm(x)",
  3358. "X*Y",
  3359. "X*Y",
  3360. "x*v",
  3361. "y-\\>view(x)",
  3362. "x-\\>y",
  3363. "cont(x)",
  3364. "reshape(x)",
  3365. "view(x)",
  3366. "permute(x)",
  3367. "transpose(x)",
  3368. "get_rows(x)",
  3369. "get_rows_back(x)",
  3370. "diag(x)",
  3371. "diag_mask_inf(x)",
  3372. "diag_mask_zero(x)",
  3373. "soft_max(x)",
  3374. "soft_max_back(x)",
  3375. "rope(x)",
  3376. "rope_back(x)",
  3377. "alibi(x)",
  3378. "clamp(x)",
  3379. "conv_1d(x)",
  3380. "conv_transpose_1d(x)",
  3381. "conv_2d(x)",
  3382. "conv_transpose_2d(x)",
  3383. "pool_1d(x)",
  3384. "pool_2d(x)",
  3385. "upscale(x)",
  3386. "conv_1d_stage_0(x)",
  3387. "conv_1d_stage_1(x)",
  3388. "flash_attn(x)",
  3389. "flash_ff(x)",
  3390. "flash_attn_back(x)",
  3391. "win_part(x)",
  3392. "win_unpart(x)",
  3393. "get_rel_pos(x)",
  3394. "add_rel_pos(x)",
  3395. "unary(x)",
  3396. "f(x)",
  3397. "f(x,y)",
  3398. "custom_f32(x)",
  3399. "custom_f32(x,y)",
  3400. "custom_f32(x,y,z)",
  3401. "custom(x)",
  3402. "custom(x,y)",
  3403. "custom(x,y,z)",
  3404. "cross_entropy_loss(x,y)",
  3405. "cross_entropy_loss_back(x,y)",
  3406. };
  3407. static_assert(GGML_OP_COUNT == 71, "GGML_OP_COUNT != 71");
  3408. static_assert(GGML_OP_POOL_COUNT == 2, "GGML_OP_POOL_COUNT != 2");
  3409. static_assert(sizeof(struct ggml_object)%GGML_MEM_ALIGN == 0, "ggml_object size must be a multiple of GGML_MEM_ALIGN");
  3410. static_assert(sizeof(struct ggml_tensor)%GGML_MEM_ALIGN == 0, "ggml_tensor size must be a multiple of GGML_MEM_ALIGN");
  3411. // WARN:
  3412. // Mis-confguration can lead to problem that's hard to reason about:
  3413. // * At best it crash or talks nosense.
  3414. // * At worst it talks slightly difference but hard to perceive.
  3415. //
  3416. // An op has to enable INIT or FINALIZE when any of it's branch needs that pass.
  3417. // Take care about compile options (e.g., GGML_USE_xxx).
  3418. static bool GGML_OP_HAS_INIT [GGML_OP_COUNT] = { 0 };
  3419. static bool GGML_OP_HAS_FINALIZE[GGML_OP_COUNT] = { 0 };
  3420. static void ggml_setup_op_has_task_pass(void) {
  3421. { // INIT
  3422. bool * p = GGML_OP_HAS_INIT;
  3423. p[GGML_OP_ACC ] = true;
  3424. p[GGML_OP_MUL_MAT ] = true;
  3425. p[GGML_OP_OUT_PROD ] = true;
  3426. p[GGML_OP_SET ] = true;
  3427. p[GGML_OP_GET_ROWS_BACK ] = true;
  3428. p[GGML_OP_DIAG_MASK_INF ] = true;
  3429. p[GGML_OP_DIAG_MASK_ZERO ] = true;
  3430. p[GGML_OP_CONV_1D ] = true;
  3431. p[GGML_OP_CONV_1D_STAGE_0 ] = true;
  3432. p[GGML_OP_CONV_1D_STAGE_1 ] = true;
  3433. p[GGML_OP_CONV_2D ] = true;
  3434. p[GGML_OP_CONV_TRANSPOSE_1D ] = true;
  3435. p[GGML_OP_CONV_TRANSPOSE_2D ] = true;
  3436. p[GGML_OP_FLASH_ATTN_BACK ] = true;
  3437. p[GGML_OP_CROSS_ENTROPY_LOSS ] = true;
  3438. p[GGML_OP_ADD_REL_POS ] = true;
  3439. }
  3440. { // FINALIZE
  3441. bool * p = GGML_OP_HAS_FINALIZE;
  3442. p[GGML_OP_CROSS_ENTROPY_LOSS ] = true;
  3443. }
  3444. }
  3445. //
  3446. // ggml context
  3447. //
  3448. struct ggml_context {
  3449. size_t mem_size;
  3450. void * mem_buffer;
  3451. bool mem_buffer_owned;
  3452. bool no_alloc;
  3453. bool no_alloc_save; // this is used to save the no_alloc state when using scratch buffers
  3454. int n_objects;
  3455. struct ggml_object * objects_begin;
  3456. struct ggml_object * objects_end;
  3457. struct ggml_scratch scratch;
  3458. struct ggml_scratch scratch_save;
  3459. };
  3460. struct ggml_context_container {
  3461. bool used;
  3462. struct ggml_context context;
  3463. };
  3464. //
  3465. // NUMA support
  3466. //
  3467. #define GGML_NUMA_MAX_NODES 8
  3468. #define GGML_NUMA_MAX_CPUS 512
  3469. struct ggml_numa_node {
  3470. uint32_t cpus[GGML_NUMA_MAX_CPUS]; // hardware threads on this node
  3471. uint32_t n_cpus;
  3472. };
  3473. struct ggml_numa_nodes {
  3474. struct ggml_numa_node nodes[GGML_NUMA_MAX_NODES];
  3475. uint32_t n_nodes;
  3476. uint32_t total_cpus; // hardware threads on system
  3477. };
  3478. //
  3479. // ggml state
  3480. //
  3481. struct ggml_state {
  3482. struct ggml_context_container contexts[GGML_MAX_CONTEXTS];
  3483. struct ggml_numa_nodes numa;
  3484. };
  3485. // global state
  3486. static struct ggml_state g_state;
  3487. static atomic_int g_state_barrier = 0;
  3488. // barrier via spin lock
  3489. inline static void ggml_critical_section_start(void) {
  3490. int processing = atomic_fetch_add(&g_state_barrier, 1);
  3491. while (processing > 0) {
  3492. // wait for other threads to finish
  3493. atomic_fetch_sub(&g_state_barrier, 1);
  3494. sched_yield(); // TODO: reconsider this
  3495. processing = atomic_fetch_add(&g_state_barrier, 1);
  3496. }
  3497. }
  3498. // TODO: make this somehow automatically executed
  3499. // some sort of "sentry" mechanism
  3500. inline static void ggml_critical_section_end(void) {
  3501. atomic_fetch_sub(&g_state_barrier, 1);
  3502. }
  3503. void ggml_numa_init(void) {
  3504. if (g_state.numa.n_nodes > 0) {
  3505. fprintf(stderr, "ggml_numa_init: NUMA already initialized\n");
  3506. return;
  3507. }
  3508. #ifdef __linux__
  3509. struct stat st;
  3510. char path[256];
  3511. int rv;
  3512. // enumerate nodes
  3513. while (g_state.numa.n_nodes < GGML_NUMA_MAX_NODES) {
  3514. rv = snprintf(path, sizeof(path), "/sys/devices/system/node/node%u", g_state.numa.n_nodes);
  3515. GGML_ASSERT(rv > 0 && (unsigned)rv < sizeof(path));
  3516. if (stat(path, &st) != 0) { break; }
  3517. ++g_state.numa.n_nodes;
  3518. }
  3519. // enumerate CPUs
  3520. while (g_state.numa.total_cpus < GGML_NUMA_MAX_CPUS) {
  3521. rv = snprintf(path, sizeof(path), "/sys/devices/system/cpu/cpu%u", g_state.numa.total_cpus);
  3522. GGML_ASSERT(rv > 0 && (unsigned)rv < sizeof(path));
  3523. if (stat(path, &st) != 0) { break; }
  3524. ++g_state.numa.total_cpus;
  3525. }
  3526. GGML_PRINT_DEBUG("found %u numa nodes, %u CPUs\n", g_state.numa.n_nodes, g_state.numa.total_cpus);
  3527. if (g_state.numa.n_nodes < 1 || g_state.numa.total_cpus < 1) {
  3528. g_state.numa.n_nodes = 0;
  3529. return;
  3530. }
  3531. for (uint32_t n = 0; n < g_state.numa.n_nodes; ++n) {
  3532. struct ggml_numa_node * node = &g_state.numa.nodes[n];
  3533. GGML_PRINT_DEBUG("CPUs on node %u:", n);
  3534. node->n_cpus = 0;
  3535. for (uint32_t c = 0; c < g_state.numa.total_cpus; ++c) {
  3536. rv = snprintf(path, sizeof(path), "/sys/devices/system/node/node%u/cpu%u", n, c);
  3537. GGML_ASSERT(rv > 0 && (unsigned)rv < sizeof(path));
  3538. if (stat(path, &st) == 0) {
  3539. node->cpus[node->n_cpus++] = c;
  3540. GGML_PRINT_DEBUG(" %u", c);
  3541. }
  3542. }
  3543. GGML_PRINT_DEBUG("\n");
  3544. }
  3545. if (ggml_is_numa()) {
  3546. FILE *fptr = fopen("/proc/sys/kernel/numa_balancing", "r");
  3547. if (fptr != NULL) {
  3548. char buf[42];
  3549. if (fgets(buf, sizeof(buf), fptr) && strncmp(buf, "0\n", sizeof(buf)) != 0) {
  3550. GGML_PRINT("WARNING: /proc/sys/kernel/numa_balancing is enabled, this has been observed to impair performance\n");
  3551. }
  3552. fclose(fptr);
  3553. }
  3554. }
  3555. #else
  3556. // TODO
  3557. #endif
  3558. }
  3559. bool ggml_is_numa(void) {
  3560. return g_state.numa.n_nodes > 1;
  3561. }
  3562. ////////////////////////////////////////////////////////////////////////////////
  3563. void ggml_print_object(const struct ggml_object * obj) {
  3564. GGML_PRINT(" - ggml_object: type = %d, offset = %zu, size = %zu, next = %p\n",
  3565. obj->type, obj->offs, obj->size, (const void *) obj->next);
  3566. }
  3567. void ggml_print_objects(const struct ggml_context * ctx) {
  3568. struct ggml_object * obj = ctx->objects_begin;
  3569. GGML_PRINT("%s: objects in context %p:\n", __func__, (const void *) ctx);
  3570. while (obj != NULL) {
  3571. ggml_print_object(obj);
  3572. obj = obj->next;
  3573. }
  3574. GGML_PRINT("%s: --- end ---\n", __func__);
  3575. }
  3576. int64_t ggml_nelements(const struct ggml_tensor * tensor) {
  3577. static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function");
  3578. return tensor->ne[0]*tensor->ne[1]*tensor->ne[2]*tensor->ne[3];
  3579. }
  3580. int64_t ggml_nrows(const struct ggml_tensor * tensor) {
  3581. static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function");
  3582. return tensor->ne[1]*tensor->ne[2]*tensor->ne[3];
  3583. }
  3584. size_t ggml_nbytes(const struct ggml_tensor * tensor) {
  3585. size_t nbytes;
  3586. size_t blck_size = ggml_blck_size(tensor->type);
  3587. if (blck_size == 1) {
  3588. nbytes = ggml_type_size(tensor->type);
  3589. for (int i = 0; i < GGML_MAX_DIMS; ++i) {
  3590. nbytes += (tensor->ne[i] - 1)*tensor->nb[i];
  3591. }
  3592. }
  3593. else {
  3594. nbytes = tensor->ne[0]*tensor->nb[0]/blck_size;
  3595. for (int i = 1; i < GGML_MAX_DIMS; ++i) {
  3596. nbytes += (tensor->ne[i] - 1)*tensor->nb[i];
  3597. }
  3598. }
  3599. return nbytes;
  3600. }
  3601. size_t ggml_nbytes_pad(const struct ggml_tensor * tensor) {
  3602. return GGML_PAD(ggml_nbytes(tensor), GGML_MEM_ALIGN);
  3603. }
  3604. size_t ggml_nbytes_split(const struct ggml_tensor * tensor, int nrows_split) {
  3605. static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function");
  3606. return (nrows_split*tensor->ne[0]*ggml_type_size(tensor->type))/ggml_blck_size(tensor->type);
  3607. }
  3608. int ggml_blck_size(enum ggml_type type) {
  3609. return type_traits[type].blck_size;
  3610. }
  3611. size_t ggml_type_size(enum ggml_type type) {
  3612. return type_traits[type].type_size;
  3613. }
  3614. float ggml_type_sizef(enum ggml_type type) {
  3615. return ((float)(type_traits[type].type_size))/type_traits[type].blck_size;
  3616. }
  3617. const char * ggml_type_name(enum ggml_type type) {
  3618. return type_traits[type].type_name;
  3619. }
  3620. bool ggml_is_quantized(enum ggml_type type) {
  3621. return type_traits[type].is_quantized;
  3622. }
  3623. const char * ggml_op_name(enum ggml_op op) {
  3624. return GGML_OP_NAME[op];
  3625. }
  3626. const char * ggml_op_symbol(enum ggml_op op) {
  3627. return GGML_OP_SYMBOL[op];
  3628. }
  3629. size_t ggml_element_size(const struct ggml_tensor * tensor) {
  3630. return ggml_type_size(tensor->type);
  3631. }
  3632. static inline bool ggml_is_scalar(const struct ggml_tensor * tensor) {
  3633. static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function");
  3634. return tensor->ne[0] == 1 && tensor->ne[1] == 1 && tensor->ne[2] == 1 && tensor->ne[3] == 1;
  3635. }
  3636. static inline bool ggml_is_vector(const struct ggml_tensor * tensor) {
  3637. static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function");
  3638. return tensor->ne[1] == 1 && tensor->ne[2] == 1 && tensor->ne[3] == 1;
  3639. }
  3640. static inline bool ggml_is_matrix(const struct ggml_tensor * tensor) {
  3641. static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function");
  3642. return tensor->ne[2] == 1 && tensor->ne[3] == 1;
  3643. }
  3644. static inline bool ggml_can_mul_mat(const struct ggml_tensor * t0, const struct ggml_tensor * t1) {
  3645. static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function");
  3646. return (t0->ne[0] == t1->ne[0]) &&
  3647. (t1->ne[2]%t0->ne[2] == 0) && // verify t0 is broadcastable
  3648. (t1->ne[3]%t0->ne[3] == 0);
  3649. }
  3650. static inline bool ggml_can_out_prod(const struct ggml_tensor * t0, const struct ggml_tensor * t1) {
  3651. static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function");
  3652. return (t0->ne[1] == t1->ne[1]) &&
  3653. (t1->ne[2]%t0->ne[2] == 0) && // verify t0 is broadcastable
  3654. (t1->ne[3]%t0->ne[3] == 0);
  3655. }
  3656. enum ggml_type ggml_ftype_to_ggml_type(enum ggml_ftype ftype) {
  3657. enum ggml_type wtype = GGML_TYPE_COUNT;
  3658. switch (ftype) {
  3659. case GGML_FTYPE_ALL_F32: wtype = GGML_TYPE_F32; break;
  3660. case GGML_FTYPE_MOSTLY_F16: wtype = GGML_TYPE_F16; break;
  3661. case GGML_FTYPE_MOSTLY_Q4_0: wtype = GGML_TYPE_Q4_0; break;
  3662. case GGML_FTYPE_MOSTLY_Q4_1: wtype = GGML_TYPE_Q4_1; break;
  3663. case GGML_FTYPE_MOSTLY_Q5_0: wtype = GGML_TYPE_Q5_0; break;
  3664. case GGML_FTYPE_MOSTLY_Q5_1: wtype = GGML_TYPE_Q5_1; break;
  3665. case GGML_FTYPE_MOSTLY_Q8_0: wtype = GGML_TYPE_Q8_0; break;
  3666. case GGML_FTYPE_MOSTLY_Q2_K: wtype = GGML_TYPE_Q2_K; break;
  3667. case GGML_FTYPE_MOSTLY_Q3_K: wtype = GGML_TYPE_Q3_K; break;
  3668. case GGML_FTYPE_MOSTLY_Q4_K: wtype = GGML_TYPE_Q4_K; break;
  3669. case GGML_FTYPE_MOSTLY_Q5_K: wtype = GGML_TYPE_Q5_K; break;
  3670. case GGML_FTYPE_MOSTLY_Q6_K: wtype = GGML_TYPE_Q6_K; break;
  3671. case GGML_FTYPE_UNKNOWN: wtype = GGML_TYPE_COUNT; break;
  3672. case GGML_FTYPE_MOSTLY_Q4_1_SOME_F16: wtype = GGML_TYPE_COUNT; break;
  3673. }
  3674. GGML_ASSERT(wtype != GGML_TYPE_COUNT);
  3675. return wtype;
  3676. }
  3677. size_t ggml_tensor_overhead(void) {
  3678. return GGML_OBJECT_SIZE + GGML_TENSOR_SIZE;
  3679. }
  3680. bool ggml_is_transposed(const struct ggml_tensor * tensor) {
  3681. return tensor->nb[0] > tensor->nb[1];
  3682. }
  3683. bool ggml_is_contiguous(const struct ggml_tensor * tensor) {
  3684. static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function");
  3685. return
  3686. tensor->nb[0] == ggml_type_size(tensor->type) &&
  3687. tensor->nb[1] == (tensor->nb[0]*tensor->ne[0])/ggml_blck_size(tensor->type) &&
  3688. tensor->nb[2] == tensor->nb[1]*tensor->ne[1] &&
  3689. tensor->nb[3] == tensor->nb[2]*tensor->ne[2];
  3690. }
  3691. static inline bool ggml_is_contiguous_except_dim_1(const struct ggml_tensor * tensor) {
  3692. static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function");
  3693. return
  3694. tensor->nb[0] == ggml_type_size(tensor->type) &&
  3695. tensor->nb[2] == tensor->nb[1]*tensor->ne[1] &&
  3696. tensor->nb[3] == tensor->nb[2]*tensor->ne[2];
  3697. }
  3698. bool ggml_is_permuted(const struct ggml_tensor * tensor) {
  3699. static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function");
  3700. return tensor->nb[0] > tensor->nb[1] || tensor->nb[1] > tensor->nb[2] || tensor->nb[2] > tensor->nb[3];
  3701. }
  3702. static inline bool ggml_is_padded_1d(const struct ggml_tensor * tensor) {
  3703. static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function");
  3704. return
  3705. tensor->nb[0] == ggml_type_size(tensor->type) &&
  3706. tensor->nb[2] == tensor->nb[1]*tensor->ne[1] &&
  3707. tensor->nb[3] == tensor->nb[2]*tensor->ne[2];
  3708. }
  3709. bool ggml_are_same_shape(const struct ggml_tensor * t0, const struct ggml_tensor * t1) {
  3710. static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function");
  3711. return
  3712. (t0->ne[0] == t1->ne[0] ) &&
  3713. (t0->ne[1] == t1->ne[1] ) &&
  3714. (t0->ne[2] == t1->ne[2] ) &&
  3715. (t0->ne[3] == t1->ne[3] );
  3716. }
  3717. // check if t1 can be represented as a repeatition of t0
  3718. static inline bool ggml_can_repeat(const struct ggml_tensor * t0, const struct ggml_tensor * t1) {
  3719. static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function");
  3720. return
  3721. (t1->ne[0]%t0->ne[0] == 0) &&
  3722. (t1->ne[1]%t0->ne[1] == 0) &&
  3723. (t1->ne[2]%t0->ne[2] == 0) &&
  3724. (t1->ne[3]%t0->ne[3] == 0);
  3725. }
  3726. static inline bool ggml_can_repeat_rows(const struct ggml_tensor * t0, const struct ggml_tensor * t1) {
  3727. static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function");
  3728. return (t0->ne[0] == t1->ne[0]) && ggml_can_repeat(t0, t1);
  3729. }
  3730. static inline int ggml_up32(int n) {
  3731. return (n + 31) & ~31;
  3732. }
  3733. //static inline int ggml_up64(int n) {
  3734. // return (n + 63) & ~63;
  3735. //}
  3736. static inline int ggml_up(int n, int m) {
  3737. // assert m is a power of 2
  3738. GGML_ASSERT((m & (m - 1)) == 0);
  3739. return (n + m - 1) & ~(m - 1);
  3740. }
  3741. // assert that pointer is aligned to GGML_MEM_ALIGN
  3742. #define ggml_assert_aligned(ptr) \
  3743. GGML_ASSERT(((uintptr_t) (ptr))%GGML_MEM_ALIGN == 0)
  3744. ////////////////////////////////////////////////////////////////////////////////
  3745. struct ggml_context * ggml_init(struct ggml_init_params params) {
  3746. // make this function thread safe
  3747. ggml_critical_section_start();
  3748. static bool is_first_call = true;
  3749. if (is_first_call) {
  3750. // initialize time system (required on Windows)
  3751. ggml_time_init();
  3752. // initialize GELU, Quick GELU, SILU and EXP F32 tables
  3753. {
  3754. const uint64_t t_start = ggml_time_us(); UNUSED(t_start);
  3755. ggml_fp16_t ii;
  3756. for (int i = 0; i < (1 << 16); ++i) {
  3757. uint16_t ui = i;
  3758. memcpy(&ii, &ui, sizeof(ii));
  3759. const float f = table_f32_f16[i] = GGML_COMPUTE_FP16_TO_FP32(ii);
  3760. table_gelu_f16[i] = GGML_FP32_TO_FP16(ggml_gelu_f32(f));
  3761. table_gelu_quick_f16[i] = GGML_FP32_TO_FP16(ggml_gelu_quick_f32(f));
  3762. table_silu_f16[i] = GGML_FP32_TO_FP16(ggml_silu_f32(f));
  3763. table_exp_f16[i] = GGML_FP32_TO_FP16(expf(f));
  3764. }
  3765. const uint64_t t_end = ggml_time_us(); UNUSED(t_end);
  3766. GGML_PRINT_DEBUG("%s: GELU, Quick GELU, SILU and EXP tables initialized in %f ms\n", __func__, (t_end - t_start)/1000.0f);
  3767. }
  3768. // initialize g_state
  3769. {
  3770. const uint64_t t_start = ggml_time_us(); UNUSED(t_start);
  3771. g_state = (struct ggml_state) {
  3772. /*.contexts =*/ { { 0 } },
  3773. /*.numa =*/ {
  3774. .n_nodes = 0,
  3775. .total_cpus = 0,
  3776. },
  3777. };
  3778. for (int i = 0; i < GGML_MAX_CONTEXTS; ++i) {
  3779. g_state.contexts[i].used = false;
  3780. }
  3781. const uint64_t t_end = ggml_time_us(); UNUSED(t_end);
  3782. GGML_PRINT_DEBUG("%s: g_state initialized in %f ms\n", __func__, (t_end - t_start)/1000.0f);
  3783. }
  3784. #if defined(GGML_USE_CUBLAS)
  3785. ggml_init_cublas();
  3786. #elif defined(GGML_USE_CLBLAST)
  3787. ggml_cl_init();
  3788. #endif
  3789. ggml_setup_op_has_task_pass();
  3790. is_first_call = false;
  3791. }
  3792. // find non-used context in g_state
  3793. struct ggml_context * ctx = NULL;
  3794. for (int i = 0; i < GGML_MAX_CONTEXTS; i++) {
  3795. if (!g_state.contexts[i].used) {
  3796. g_state.contexts[i].used = true;
  3797. ctx = &g_state.contexts[i].context;
  3798. GGML_PRINT_DEBUG("%s: found unused context %d\n", __func__, i);
  3799. break;
  3800. }
  3801. }
  3802. if (ctx == NULL) {
  3803. GGML_PRINT_DEBUG("%s: no unused context found\n", __func__);
  3804. ggml_critical_section_end();
  3805. return NULL;
  3806. }
  3807. // allow to call ggml_init with 0 size
  3808. if (params.mem_size == 0) {
  3809. params.mem_size = GGML_MEM_ALIGN;
  3810. }
  3811. const size_t mem_size = params.mem_buffer ? params.mem_size : GGML_PAD(params.mem_size, GGML_MEM_ALIGN);
  3812. *ctx = (struct ggml_context) {
  3813. /*.mem_size =*/ mem_size,
  3814. /*.mem_buffer =*/ params.mem_buffer ? params.mem_buffer : GGML_ALIGNED_MALLOC(mem_size),
  3815. /*.mem_buffer_owned =*/ params.mem_buffer ? false : true,
  3816. /*.no_alloc =*/ params.no_alloc,
  3817. /*.no_alloc_save =*/ params.no_alloc,
  3818. /*.n_objects =*/ 0,
  3819. /*.objects_begin =*/ NULL,
  3820. /*.objects_end =*/ NULL,
  3821. /*.scratch =*/ { 0, 0, NULL, },
  3822. /*.scratch_save =*/ { 0, 0, NULL, },
  3823. };
  3824. GGML_ASSERT(ctx->mem_buffer != NULL);
  3825. ggml_assert_aligned(ctx->mem_buffer);
  3826. GGML_PRINT_DEBUG("%s: context initialized\n", __func__);
  3827. ggml_critical_section_end();
  3828. return ctx;
  3829. }
  3830. void ggml_free(struct ggml_context * ctx) {
  3831. // make this function thread safe
  3832. ggml_critical_section_start();
  3833. bool found = false;
  3834. for (int i = 0; i < GGML_MAX_CONTEXTS; i++) {
  3835. if (&g_state.contexts[i].context == ctx) {
  3836. g_state.contexts[i].used = false;
  3837. GGML_PRINT_DEBUG("%s: context %d has been freed. memory used = %zu\n",
  3838. __func__, i, ggml_used_mem(ctx));
  3839. if (ctx->mem_buffer_owned) {
  3840. GGML_ALIGNED_FREE(ctx->mem_buffer);
  3841. }
  3842. found = true;
  3843. break;
  3844. }
  3845. }
  3846. if (!found) {
  3847. GGML_PRINT_DEBUG("%s: context not found\n", __func__);
  3848. }
  3849. ggml_critical_section_end();
  3850. }
  3851. size_t ggml_used_mem(const struct ggml_context * ctx) {
  3852. return ctx->objects_end == NULL ? 0 : ctx->objects_end->offs + ctx->objects_end->size;
  3853. }
  3854. size_t ggml_set_scratch(struct ggml_context * ctx, struct ggml_scratch scratch) {
  3855. const size_t result = ctx->scratch.data ? ctx->scratch.offs : 0;
  3856. ctx->scratch = scratch;
  3857. return result;
  3858. }
  3859. bool ggml_get_no_alloc(struct ggml_context * ctx) {
  3860. return ctx->no_alloc;
  3861. }
  3862. void ggml_set_no_alloc(struct ggml_context * ctx, bool no_alloc) {
  3863. ctx->no_alloc = no_alloc;
  3864. }
  3865. void * ggml_get_mem_buffer(const struct ggml_context * ctx) {
  3866. return ctx->mem_buffer;
  3867. }
  3868. size_t ggml_get_mem_size(const struct ggml_context * ctx) {
  3869. return ctx->mem_size;
  3870. }
  3871. size_t ggml_get_max_tensor_size(const struct ggml_context * ctx) {
  3872. size_t max_size = 0;
  3873. struct ggml_object * obj = ctx->objects_begin;
  3874. while (obj != NULL) {
  3875. if (obj->type == GGML_OBJECT_TENSOR) {
  3876. struct ggml_tensor * tensor = (struct ggml_tensor *) ((char *) ctx->mem_buffer + obj->offs);
  3877. const size_t size = ggml_nbytes(tensor);
  3878. if (max_size < size) {
  3879. max_size = size;
  3880. }
  3881. }
  3882. obj = obj->next;
  3883. }
  3884. return max_size;
  3885. }
  3886. // IMPORTANT:
  3887. // when creating "opt" tensors, always save and load the scratch buffer
  3888. // this is an error prone process, but it is necessary to support inplace
  3889. // operators when using scratch buffers
  3890. // TODO: implement a better way
  3891. static void ggml_scratch_save(struct ggml_context * ctx) {
  3892. // this is needed to allow opt tensors to store their data
  3893. // TODO: again, need to find a better way
  3894. ctx->no_alloc_save = ctx->no_alloc;
  3895. ctx->no_alloc = false;
  3896. ctx->scratch_save = ctx->scratch;
  3897. ctx->scratch.data = NULL;
  3898. }
  3899. static void ggml_scratch_load(struct ggml_context * ctx) {
  3900. ctx->no_alloc = ctx->no_alloc_save;
  3901. ctx->scratch = ctx->scratch_save;
  3902. }
  3903. ////////////////////////////////////////////////////////////////////////////////
  3904. static struct ggml_object * ggml_new_object(struct ggml_context * ctx, enum ggml_object_type type, size_t size) {
  3905. // always insert objects at the end of the context's memory pool
  3906. struct ggml_object * obj_cur = ctx->objects_end;
  3907. const size_t cur_offs = obj_cur == NULL ? 0 : obj_cur->offs;
  3908. const size_t cur_size = obj_cur == NULL ? 0 : obj_cur->size;
  3909. const size_t cur_end = cur_offs + cur_size;
  3910. // align to GGML_MEM_ALIGN
  3911. size_t size_needed = GGML_PAD(size, GGML_MEM_ALIGN);
  3912. char * const mem_buffer = ctx->mem_buffer;
  3913. struct ggml_object * const obj_new = (struct ggml_object *)(mem_buffer + cur_end);
  3914. if (cur_end + size_needed + GGML_OBJECT_SIZE > ctx->mem_size) {
  3915. GGML_PRINT("%s: not enough space in the context's memory pool (needed %zu, available %zu)\n",
  3916. __func__, cur_end + size_needed, ctx->mem_size);
  3917. assert(false);
  3918. return NULL;
  3919. }
  3920. *obj_new = (struct ggml_object) {
  3921. .offs = cur_end + GGML_OBJECT_SIZE,
  3922. .size = size_needed,
  3923. .next = NULL,
  3924. .type = type,
  3925. };
  3926. ggml_assert_aligned(mem_buffer + obj_new->offs);
  3927. if (obj_cur != NULL) {
  3928. obj_cur->next = obj_new;
  3929. } else {
  3930. // this is the first object in this context
  3931. ctx->objects_begin = obj_new;
  3932. }
  3933. ctx->objects_end = obj_new;
  3934. //printf("%s: inserted new object at %zu, size = %zu\n", __func__, cur_end, obj_new->size);
  3935. return obj_new;
  3936. }
  3937. static struct ggml_tensor * ggml_new_tensor_impl(
  3938. struct ggml_context * ctx,
  3939. enum ggml_type type,
  3940. int n_dims,
  3941. const int64_t * ne,
  3942. struct ggml_tensor * view_src,
  3943. size_t view_offs) {
  3944. assert(n_dims >= 1 && n_dims <= GGML_MAX_DIMS);
  3945. // find the base tensor and absolute offset
  3946. if (view_src != NULL && view_src->view_src != NULL) {
  3947. view_offs += view_src->view_offs;
  3948. view_src = view_src->view_src;
  3949. }
  3950. size_t data_size = ggml_type_size(type)*(ne[0]/ggml_blck_size(type));
  3951. for (int i = 1; i < n_dims; i++) {
  3952. data_size *= ne[i];
  3953. }
  3954. GGML_ASSERT(view_src == NULL || data_size + view_offs <= ggml_nbytes(view_src));
  3955. void * data = view_src != NULL ? view_src->data : NULL;
  3956. if (data != NULL) {
  3957. data = (char *) data + view_offs;
  3958. }
  3959. size_t obj_alloc_size = 0;
  3960. if (view_src == NULL && !ctx->no_alloc) {
  3961. if (ctx->scratch.data != NULL) {
  3962. // allocate tensor data in the scratch buffer
  3963. if (ctx->scratch.offs + data_size > ctx->scratch.size) {
  3964. GGML_PRINT("%s: not enough space in the scratch memory pool (needed %zu, available %zu)\n",
  3965. __func__, ctx->scratch.offs + data_size, ctx->scratch.size);
  3966. assert(false);
  3967. return NULL;
  3968. }
  3969. data = (char * const) ctx->scratch.data + ctx->scratch.offs;
  3970. ctx->scratch.offs += data_size;
  3971. } else {
  3972. // allocate tensor data in the context's memory pool
  3973. obj_alloc_size = data_size;
  3974. }
  3975. }
  3976. struct ggml_object * const obj_new = ggml_new_object(ctx, GGML_OBJECT_TENSOR, GGML_TENSOR_SIZE + obj_alloc_size);
  3977. // TODO: for recoverable errors, we would need to free the data allocated from the scratch buffer here
  3978. struct ggml_tensor * const result = (struct ggml_tensor *)((char *)ctx->mem_buffer + obj_new->offs);
  3979. *result = (struct ggml_tensor) {
  3980. /*.type =*/ type,
  3981. /*.backend =*/ GGML_BACKEND_CPU,
  3982. /*.n_dims =*/ n_dims,
  3983. /*.ne =*/ { 1, 1, 1, 1 },
  3984. /*.nb =*/ { 0, 0, 0, 0 },
  3985. /*.op =*/ GGML_OP_NONE,
  3986. /*.op_params =*/ { 0 },
  3987. /*.is_param =*/ false,
  3988. /*.grad =*/ NULL,
  3989. /*.src =*/ { NULL },
  3990. /*.perf_runs =*/ 0,
  3991. /*.perf_cycles =*/ 0,
  3992. /*.perf_time_us =*/ 0,
  3993. /*.view_src =*/ view_src,
  3994. /*.view_offs =*/ view_offs,
  3995. /*.data =*/ obj_alloc_size > 0 ? (void *)(result + 1) : data,
  3996. /*.name =*/ { 0 },
  3997. /*.extra =*/ NULL,
  3998. /*.padding =*/ { 0 },
  3999. };
  4000. // TODO: this should not be needed as long as we don't rely on aligned SIMD loads
  4001. //ggml_assert_aligned(result->data);
  4002. for (int i = 0; i < n_dims; i++) {
  4003. result->ne[i] = ne[i];
  4004. }
  4005. result->nb[0] = ggml_type_size(type);
  4006. result->nb[1] = result->nb[0]*(result->ne[0]/ggml_blck_size(type));
  4007. for (int i = 2; i < GGML_MAX_DIMS; i++) {
  4008. result->nb[i] = result->nb[i - 1]*result->ne[i - 1];
  4009. }
  4010. ctx->n_objects++;
  4011. return result;
  4012. }
  4013. struct ggml_tensor * ggml_new_tensor(
  4014. struct ggml_context * ctx,
  4015. enum ggml_type type,
  4016. int n_dims,
  4017. const int64_t * ne) {
  4018. return ggml_new_tensor_impl(ctx, type, n_dims, ne, NULL, 0);
  4019. }
  4020. struct ggml_tensor * ggml_new_tensor_1d(
  4021. struct ggml_context * ctx,
  4022. enum ggml_type type,
  4023. int64_t ne0) {
  4024. return ggml_new_tensor(ctx, type, 1, &ne0);
  4025. }
  4026. struct ggml_tensor * ggml_new_tensor_2d(
  4027. struct ggml_context * ctx,
  4028. enum ggml_type type,
  4029. int64_t ne0,
  4030. int64_t ne1) {
  4031. const int64_t ne[2] = { ne0, ne1 };
  4032. return ggml_new_tensor(ctx, type, 2, ne);
  4033. }
  4034. struct ggml_tensor * ggml_new_tensor_3d(
  4035. struct ggml_context * ctx,
  4036. enum ggml_type type,
  4037. int64_t ne0,
  4038. int64_t ne1,
  4039. int64_t ne2) {
  4040. const int64_t ne[3] = { ne0, ne1, ne2 };
  4041. return ggml_new_tensor(ctx, type, 3, ne);
  4042. }
  4043. struct ggml_tensor * ggml_new_tensor_4d(
  4044. struct ggml_context * ctx,
  4045. enum ggml_type type,
  4046. int64_t ne0,
  4047. int64_t ne1,
  4048. int64_t ne2,
  4049. int64_t ne3) {
  4050. const int64_t ne[4] = { ne0, ne1, ne2, ne3 };
  4051. return ggml_new_tensor(ctx, type, 4, ne);
  4052. }
  4053. struct ggml_tensor * ggml_new_i32(struct ggml_context * ctx, int32_t value) {
  4054. ggml_scratch_save(ctx);
  4055. struct ggml_tensor * result = ggml_new_tensor_1d(ctx, GGML_TYPE_I32, 1);
  4056. ggml_scratch_load(ctx);
  4057. ggml_set_i32(result, value);
  4058. return result;
  4059. }
  4060. struct ggml_tensor * ggml_new_f32(struct ggml_context * ctx, float value) {
  4061. ggml_scratch_save(ctx);
  4062. struct ggml_tensor * result = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, 1);
  4063. ggml_scratch_load(ctx);
  4064. ggml_set_f32(result, value);
  4065. return result;
  4066. }
  4067. struct ggml_tensor * ggml_dup_tensor(struct ggml_context * ctx, const struct ggml_tensor * src) {
  4068. return ggml_new_tensor(ctx, src->type, src->n_dims, src->ne);
  4069. }
  4070. static void ggml_set_op_params(struct ggml_tensor * tensor, const void * params, size_t params_size) {
  4071. GGML_ASSERT(tensor != NULL); // silence -Warray-bounds warnings
  4072. assert(params_size <= GGML_MAX_OP_PARAMS);
  4073. memcpy(tensor->op_params, params, params_size);
  4074. }
  4075. static int32_t ggml_get_op_params_i32(const struct ggml_tensor * tensor, uint32_t i) {
  4076. assert(i < GGML_MAX_OP_PARAMS / sizeof(int32_t));
  4077. return ((const int32_t *)(tensor->op_params))[i];
  4078. }
  4079. static void ggml_set_op_params_i32(struct ggml_tensor * tensor, uint32_t i, int32_t value) {
  4080. assert(i < GGML_MAX_OP_PARAMS / sizeof(int32_t));
  4081. ((int32_t *)(tensor->op_params))[i] = value;
  4082. }
  4083. struct ggml_tensor * ggml_set_zero(struct ggml_tensor * tensor) {
  4084. memset(tensor->data, 0, ggml_nbytes(tensor));
  4085. return tensor;
  4086. }
  4087. struct ggml_tensor * ggml_set_i32 (struct ggml_tensor * tensor, int32_t value) {
  4088. const int n = ggml_nrows(tensor);
  4089. const int nc = tensor->ne[0];
  4090. const size_t n1 = tensor->nb[1];
  4091. char * const data = tensor->data;
  4092. switch (tensor->type) {
  4093. case GGML_TYPE_I8:
  4094. {
  4095. assert(tensor->nb[0] == sizeof(int8_t));
  4096. for (int i = 0; i < n; i++) {
  4097. ggml_vec_set_i8(nc, (int8_t *)(data + i*n1), value);
  4098. }
  4099. } break;
  4100. case GGML_TYPE_I16:
  4101. {
  4102. assert(tensor->nb[0] == sizeof(int16_t));
  4103. for (int i = 0; i < n; i++) {
  4104. ggml_vec_set_i16(nc, (int16_t *)(data + i*n1), value);
  4105. }
  4106. } break;
  4107. case GGML_TYPE_I32:
  4108. {
  4109. assert(tensor->nb[0] == sizeof(int32_t));
  4110. for (int i = 0; i < n; i++) {
  4111. ggml_vec_set_i32(nc, (int32_t *)(data + i*n1), value);
  4112. }
  4113. } break;
  4114. case GGML_TYPE_F16:
  4115. {
  4116. assert(tensor->nb[0] == sizeof(ggml_fp16_t));
  4117. for (int i = 0; i < n; i++) {
  4118. ggml_vec_set_f16(nc, (ggml_fp16_t *)(data + i*n1), GGML_FP32_TO_FP16(value));
  4119. }
  4120. } break;
  4121. case GGML_TYPE_F32:
  4122. {
  4123. assert(tensor->nb[0] == sizeof(float));
  4124. for (int i = 0; i < n; i++) {
  4125. ggml_vec_set_f32(nc, (float *)(data + i*n1), value);
  4126. }
  4127. } break;
  4128. default:
  4129. {
  4130. GGML_ASSERT(false);
  4131. } break;
  4132. }
  4133. return tensor;
  4134. }
  4135. struct ggml_tensor * ggml_set_f32(struct ggml_tensor * tensor, float value) {
  4136. const int n = ggml_nrows(tensor);
  4137. const int nc = tensor->ne[0];
  4138. const size_t n1 = tensor->nb[1];
  4139. char * const data = tensor->data;
  4140. switch (tensor->type) {
  4141. case GGML_TYPE_I8:
  4142. {
  4143. assert(tensor->nb[0] == sizeof(int8_t));
  4144. for (int i = 0; i < n; i++) {
  4145. ggml_vec_set_i8(nc, (int8_t *)(data + i*n1), value);
  4146. }
  4147. } break;
  4148. case GGML_TYPE_I16:
  4149. {
  4150. assert(tensor->nb[0] == sizeof(int16_t));
  4151. for (int i = 0; i < n; i++) {
  4152. ggml_vec_set_i16(nc, (int16_t *)(data + i*n1), value);
  4153. }
  4154. } break;
  4155. case GGML_TYPE_I32:
  4156. {
  4157. assert(tensor->nb[0] == sizeof(int32_t));
  4158. for (int i = 0; i < n; i++) {
  4159. ggml_vec_set_i32(nc, (int32_t *)(data + i*n1), value);
  4160. }
  4161. } break;
  4162. case GGML_TYPE_F16:
  4163. {
  4164. assert(tensor->nb[0] == sizeof(ggml_fp16_t));
  4165. for (int i = 0; i < n; i++) {
  4166. ggml_vec_set_f16(nc, (ggml_fp16_t *)(data + i*n1), GGML_FP32_TO_FP16(value));
  4167. }
  4168. } break;
  4169. case GGML_TYPE_F32:
  4170. {
  4171. assert(tensor->nb[0] == sizeof(float));
  4172. for (int i = 0; i < n; i++) {
  4173. ggml_vec_set_f32(nc, (float *)(data + i*n1), value);
  4174. }
  4175. } break;
  4176. default:
  4177. {
  4178. GGML_ASSERT(false);
  4179. } break;
  4180. }
  4181. return tensor;
  4182. }
  4183. void ggml_unravel_index(const struct ggml_tensor * tensor, int64_t i, int64_t * i0, int64_t * i1, int64_t * i2, int64_t * i3) {
  4184. const int64_t ne2 = tensor->ne[2];
  4185. const int64_t ne1 = tensor->ne[1];
  4186. const int64_t ne0 = tensor->ne[0];
  4187. const int64_t i3_ = (i/(ne2*ne1*ne0));
  4188. const int64_t i2_ = (i - i3_*ne2*ne1*ne0)/(ne1*ne0);
  4189. const int64_t i1_ = (i - i3_*ne2*ne1*ne0 - i2_*ne1*ne0)/ne0;
  4190. const int64_t i0_ = (i - i3_*ne2*ne1*ne0 - i2_*ne1*ne0 - i1_*ne0);
  4191. if (i0) {
  4192. * i0 = i0_;
  4193. }
  4194. if (i1) {
  4195. * i1 = i1_;
  4196. }
  4197. if (i2) {
  4198. * i2 = i2_;
  4199. }
  4200. if (i3) {
  4201. * i3 = i3_;
  4202. }
  4203. }
  4204. int32_t ggml_get_i32_1d(const struct ggml_tensor * tensor, int i) {
  4205. if (!ggml_is_contiguous(tensor)) {
  4206. int64_t id[4] = { 0, 0, 0, 0 };
  4207. ggml_unravel_index(tensor, i, &id[0], &id[1], &id[2], &id[3]);
  4208. return ggml_get_i32_nd(tensor, id[0], id[1], id[2], id[3]);
  4209. }
  4210. switch (tensor->type) {
  4211. case GGML_TYPE_I8:
  4212. {
  4213. GGML_ASSERT(tensor->nb[0] == sizeof(int8_t));
  4214. return ((int8_t *)(tensor->data))[i];
  4215. }
  4216. case GGML_TYPE_I16:
  4217. {
  4218. GGML_ASSERT(tensor->nb[0] == sizeof(int16_t));
  4219. return ((int16_t *)(tensor->data))[i];
  4220. }
  4221. case GGML_TYPE_I32:
  4222. {
  4223. GGML_ASSERT(tensor->nb[0] == sizeof(int32_t));
  4224. return ((int32_t *)(tensor->data))[i];
  4225. }
  4226. case GGML_TYPE_F16:
  4227. {
  4228. GGML_ASSERT(tensor->nb[0] == sizeof(ggml_fp16_t));
  4229. return GGML_FP16_TO_FP32(((ggml_fp16_t *)(tensor->data))[i]);
  4230. }
  4231. case GGML_TYPE_F32:
  4232. {
  4233. GGML_ASSERT(tensor->nb[0] == sizeof(float));
  4234. return ((float *)(tensor->data))[i];
  4235. }
  4236. default:
  4237. {
  4238. GGML_ASSERT(false);
  4239. }
  4240. }
  4241. return 0.0f;
  4242. }
  4243. void ggml_set_i32_1d(const struct ggml_tensor * tensor, int i, int32_t value) {
  4244. if (!ggml_is_contiguous(tensor)) {
  4245. int64_t id[4] = { 0, 0, 0, 0 };
  4246. ggml_unravel_index(tensor, i, &id[0], &id[1], &id[2], &id[3]);
  4247. ggml_set_i32_nd(tensor, id[0], id[1], id[2], id[3], value);
  4248. return;
  4249. }
  4250. switch (tensor->type) {
  4251. case GGML_TYPE_I8:
  4252. {
  4253. GGML_ASSERT(tensor->nb[0] == sizeof(int8_t));
  4254. ((int8_t *)(tensor->data))[i] = value;
  4255. } break;
  4256. case GGML_TYPE_I16:
  4257. {
  4258. GGML_ASSERT(tensor->nb[0] == sizeof(int16_t));
  4259. ((int16_t *)(tensor->data))[i] = value;
  4260. } break;
  4261. case GGML_TYPE_I32:
  4262. {
  4263. GGML_ASSERT(tensor->nb[0] == sizeof(int32_t));
  4264. ((int32_t *)(tensor->data))[i] = value;
  4265. } break;
  4266. case GGML_TYPE_F16:
  4267. {
  4268. GGML_ASSERT(tensor->nb[0] == sizeof(ggml_fp16_t));
  4269. ((ggml_fp16_t *)(tensor->data))[i] = GGML_FP32_TO_FP16(value);
  4270. } break;
  4271. case GGML_TYPE_F32:
  4272. {
  4273. GGML_ASSERT(tensor->nb[0] == sizeof(float));
  4274. ((float *)(tensor->data))[i] = value;
  4275. } break;
  4276. default:
  4277. {
  4278. GGML_ASSERT(false);
  4279. } break;
  4280. }
  4281. }
  4282. int32_t ggml_get_i32_nd(const struct ggml_tensor * tensor, int i0, int i1, int i2, int i3) {
  4283. void * data = (char *) tensor->data + i0*tensor->nb[0] + i1*tensor->nb[1] + i2*tensor->nb[2] + i3*tensor->nb[3];
  4284. switch (tensor->type) {
  4285. case GGML_TYPE_I8:
  4286. return ((int8_t *) data)[0];
  4287. case GGML_TYPE_I16:
  4288. return ((int16_t *) data)[0];
  4289. case GGML_TYPE_I32:
  4290. return ((int32_t *) data)[0];
  4291. case GGML_TYPE_F16:
  4292. return GGML_FP16_TO_FP32(((ggml_fp16_t *) data)[0]);
  4293. case GGML_TYPE_F32:
  4294. return ((float *) data)[0];
  4295. default:
  4296. GGML_ASSERT(false);
  4297. }
  4298. return 0.0f;
  4299. }
  4300. void ggml_set_i32_nd(const struct ggml_tensor * tensor, int i0, int i1, int i2, int i3, int32_t value) {
  4301. void * data = (char *) tensor->data + i0*tensor->nb[0] + i1*tensor->nb[1] + i2*tensor->nb[2] + i3*tensor->nb[3];
  4302. switch (tensor->type) {
  4303. case GGML_TYPE_I8:
  4304. {
  4305. ((int8_t *)(data))[0] = value;
  4306. } break;
  4307. case GGML_TYPE_I16:
  4308. {
  4309. ((int16_t *)(data))[0] = value;
  4310. } break;
  4311. case GGML_TYPE_I32:
  4312. {
  4313. ((int32_t *)(data))[0] = value;
  4314. } break;
  4315. case GGML_TYPE_F16:
  4316. {
  4317. ((ggml_fp16_t *)(data))[0] = GGML_FP32_TO_FP16(value);
  4318. } break;
  4319. case GGML_TYPE_F32:
  4320. {
  4321. ((float *)(data))[0] = value;
  4322. } break;
  4323. default:
  4324. {
  4325. GGML_ASSERT(false);
  4326. } break;
  4327. }
  4328. }
  4329. float ggml_get_f32_1d(const struct ggml_tensor * tensor, int i) {
  4330. if (!ggml_is_contiguous(tensor)) {
  4331. int64_t id[4] = { 0, 0, 0, 0 };
  4332. ggml_unravel_index(tensor, i, &id[0], &id[1], &id[2], &id[3]);
  4333. return ggml_get_f32_nd(tensor, id[0], id[1], id[2], id[3]);
  4334. }
  4335. switch (tensor->type) {
  4336. case GGML_TYPE_I8:
  4337. {
  4338. GGML_ASSERT(tensor->nb[0] == sizeof(int8_t));
  4339. return ((int8_t *)(tensor->data))[i];
  4340. }
  4341. case GGML_TYPE_I16:
  4342. {
  4343. GGML_ASSERT(tensor->nb[0] == sizeof(int16_t));
  4344. return ((int16_t *)(tensor->data))[i];
  4345. }
  4346. case GGML_TYPE_I32:
  4347. {
  4348. GGML_ASSERT(tensor->nb[0] == sizeof(int32_t));
  4349. return ((int32_t *)(tensor->data))[i];
  4350. }
  4351. case GGML_TYPE_F16:
  4352. {
  4353. GGML_ASSERT(tensor->nb[0] == sizeof(ggml_fp16_t));
  4354. return GGML_FP16_TO_FP32(((ggml_fp16_t *)(tensor->data))[i]);
  4355. }
  4356. case GGML_TYPE_F32:
  4357. {
  4358. GGML_ASSERT(tensor->nb[0] == sizeof(float));
  4359. return ((float *)(tensor->data))[i];
  4360. }
  4361. default:
  4362. {
  4363. GGML_ASSERT(false);
  4364. }
  4365. }
  4366. return 0.0f;
  4367. }
  4368. void ggml_set_f32_1d(const struct ggml_tensor * tensor, int i, float value) {
  4369. if (!ggml_is_contiguous(tensor)) {
  4370. int64_t id[4] = { 0, 0, 0, 0 };
  4371. ggml_unravel_index(tensor, i, &id[0], &id[1], &id[2], &id[3]);
  4372. ggml_set_f32_nd(tensor, id[0], id[1], id[2], id[3], value);
  4373. return;
  4374. }
  4375. switch (tensor->type) {
  4376. case GGML_TYPE_I8:
  4377. {
  4378. GGML_ASSERT(tensor->nb[0] == sizeof(int8_t));
  4379. ((int8_t *)(tensor->data))[i] = value;
  4380. } break;
  4381. case GGML_TYPE_I16:
  4382. {
  4383. GGML_ASSERT(tensor->nb[0] == sizeof(int16_t));
  4384. ((int16_t *)(tensor->data))[i] = value;
  4385. } break;
  4386. case GGML_TYPE_I32:
  4387. {
  4388. GGML_ASSERT(tensor->nb[0] == sizeof(int32_t));
  4389. ((int32_t *)(tensor->data))[i] = value;
  4390. } break;
  4391. case GGML_TYPE_F16:
  4392. {
  4393. GGML_ASSERT(tensor->nb[0] == sizeof(ggml_fp16_t));
  4394. ((ggml_fp16_t *)(tensor->data))[i] = GGML_FP32_TO_FP16(value);
  4395. } break;
  4396. case GGML_TYPE_F32:
  4397. {
  4398. GGML_ASSERT(tensor->nb[0] == sizeof(float));
  4399. ((float *)(tensor->data))[i] = value;
  4400. } break;
  4401. default:
  4402. {
  4403. GGML_ASSERT(false);
  4404. } break;
  4405. }
  4406. }
  4407. float ggml_get_f32_nd(const struct ggml_tensor * tensor, int i0, int i1, int i2, int i3) {
  4408. void * data = (char *) tensor->data + i0*tensor->nb[0] + i1*tensor->nb[1] + i2*tensor->nb[2] + i3*tensor->nb[3];
  4409. switch (tensor->type) {
  4410. case GGML_TYPE_I8:
  4411. return ((int8_t *) data)[0];
  4412. case GGML_TYPE_I16:
  4413. return ((int16_t *) data)[0];
  4414. case GGML_TYPE_I32:
  4415. return ((int32_t *) data)[0];
  4416. case GGML_TYPE_F16:
  4417. return GGML_FP16_TO_FP32(((ggml_fp16_t *) data)[0]);
  4418. case GGML_TYPE_F32:
  4419. return ((float *) data)[0];
  4420. default:
  4421. GGML_ASSERT(false);
  4422. }
  4423. return 0.0f;
  4424. }
  4425. void ggml_set_f32_nd(const struct ggml_tensor * tensor, int i0, int i1, int i2, int i3, float value) {
  4426. void * data = (char *) tensor->data + i0*tensor->nb[0] + i1*tensor->nb[1] + i2*tensor->nb[2] + i3*tensor->nb[3];
  4427. switch (tensor->type) {
  4428. case GGML_TYPE_I8:
  4429. {
  4430. ((int8_t *)(data))[0] = value;
  4431. } break;
  4432. case GGML_TYPE_I16:
  4433. {
  4434. ((int16_t *)(data))[0] = value;
  4435. } break;
  4436. case GGML_TYPE_I32:
  4437. {
  4438. ((int32_t *)(data))[0] = value;
  4439. } break;
  4440. case GGML_TYPE_F16:
  4441. {
  4442. ((ggml_fp16_t *)(data))[0] = GGML_FP32_TO_FP16(value);
  4443. } break;
  4444. case GGML_TYPE_F32:
  4445. {
  4446. ((float *)(data))[0] = value;
  4447. } break;
  4448. default:
  4449. {
  4450. GGML_ASSERT(false);
  4451. } break;
  4452. }
  4453. }
  4454. void * ggml_get_data(const struct ggml_tensor * tensor) {
  4455. return tensor->data;
  4456. }
  4457. float * ggml_get_data_f32(const struct ggml_tensor * tensor) {
  4458. assert(tensor->type == GGML_TYPE_F32);
  4459. return (float *)(tensor->data);
  4460. }
  4461. enum ggml_unary_op ggml_get_unary_op(const struct ggml_tensor * tensor) {
  4462. GGML_ASSERT(tensor->op == GGML_OP_UNARY);
  4463. return (enum ggml_unary_op) ggml_get_op_params_i32(tensor, 0);
  4464. }
  4465. const char * ggml_get_name(const struct ggml_tensor * tensor) {
  4466. return tensor->name;
  4467. }
  4468. struct ggml_tensor * ggml_set_name(struct ggml_tensor * tensor, const char * name) {
  4469. strncpy(tensor->name, name, sizeof(tensor->name));
  4470. tensor->name[sizeof(tensor->name) - 1] = '\0';
  4471. return tensor;
  4472. }
  4473. struct ggml_tensor * ggml_format_name(struct ggml_tensor * tensor, const char * fmt, ...) {
  4474. va_list args;
  4475. va_start(args, fmt);
  4476. vsnprintf(tensor->name, sizeof(tensor->name), fmt, args);
  4477. va_end(args);
  4478. return tensor;
  4479. }
  4480. struct ggml_tensor * ggml_view_tensor(
  4481. struct ggml_context * ctx,
  4482. struct ggml_tensor * src) {
  4483. struct ggml_tensor * result = ggml_new_tensor_impl(ctx, src->type, src->n_dims, src->ne, src, 0);
  4484. ggml_format_name(result, "%s (view)", src->name);
  4485. for (int i = 0; i < GGML_MAX_DIMS; i++) {
  4486. result->nb[i] = src->nb[i];
  4487. }
  4488. return result;
  4489. }
  4490. struct ggml_tensor * ggml_get_tensor(struct ggml_context * ctx, const char * name) {
  4491. struct ggml_object * obj = ctx->objects_begin;
  4492. char * const mem_buffer = ctx->mem_buffer;
  4493. while (obj != NULL) {
  4494. if (obj->type == GGML_OBJECT_TENSOR) {
  4495. struct ggml_tensor * cur = (struct ggml_tensor *)(mem_buffer + obj->offs);
  4496. if (strcmp(cur->name, name) == 0) {
  4497. return cur;
  4498. }
  4499. }
  4500. obj = obj->next;
  4501. }
  4502. return NULL;
  4503. }
  4504. ////////////////////////////////////////////////////////////////////////////////
  4505. // ggml_dup
  4506. static struct ggml_tensor * ggml_dup_impl(
  4507. struct ggml_context * ctx,
  4508. struct ggml_tensor * a,
  4509. bool inplace) {
  4510. bool is_node = false;
  4511. if (!inplace && (a->grad)) {
  4512. is_node = true;
  4513. }
  4514. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  4515. result->op = GGML_OP_DUP;
  4516. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4517. result->src[0] = a;
  4518. return result;
  4519. }
  4520. struct ggml_tensor * ggml_dup(
  4521. struct ggml_context * ctx,
  4522. struct ggml_tensor * a) {
  4523. return ggml_dup_impl(ctx, a, false);
  4524. }
  4525. struct ggml_tensor * ggml_dup_inplace(
  4526. struct ggml_context * ctx,
  4527. struct ggml_tensor * a) {
  4528. return ggml_dup_impl(ctx, a, true);
  4529. }
  4530. // ggml_add
  4531. static struct ggml_tensor * ggml_add_impl(
  4532. struct ggml_context * ctx,
  4533. struct ggml_tensor * a,
  4534. struct ggml_tensor * b,
  4535. bool inplace) {
  4536. // TODO: support less-strict constraint
  4537. // GGML_ASSERT(ggml_can_repeat(b, a));
  4538. GGML_ASSERT(ggml_can_repeat_rows(b, a));
  4539. bool is_node = false;
  4540. if (!inplace && (a->grad || b->grad)) {
  4541. // TODO: support backward pass for broadcasting
  4542. GGML_ASSERT(ggml_are_same_shape(a, b));
  4543. is_node = true;
  4544. }
  4545. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  4546. result->op = GGML_OP_ADD;
  4547. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4548. result->src[0] = a;
  4549. result->src[1] = b;
  4550. return result;
  4551. }
  4552. struct ggml_tensor * ggml_add(
  4553. struct ggml_context * ctx,
  4554. struct ggml_tensor * a,
  4555. struct ggml_tensor * b) {
  4556. return ggml_add_impl(ctx, a, b, false);
  4557. }
  4558. struct ggml_tensor * ggml_add_inplace(
  4559. struct ggml_context * ctx,
  4560. struct ggml_tensor * a,
  4561. struct ggml_tensor * b) {
  4562. return ggml_add_impl(ctx, a, b, true);
  4563. }
  4564. // ggml_add_cast
  4565. static struct ggml_tensor * ggml_add_cast_impl(
  4566. struct ggml_context * ctx,
  4567. struct ggml_tensor * a,
  4568. struct ggml_tensor * b,
  4569. enum ggml_type type) {
  4570. // TODO: support less-strict constraint
  4571. // GGML_ASSERT(ggml_can_repeat(b, a));
  4572. GGML_ASSERT(ggml_can_repeat_rows(b, a));
  4573. GGML_ASSERT(ggml_is_quantized(a->type)); // currently only supported for quantized input
  4574. bool is_node = false;
  4575. if (a->grad || b->grad) {
  4576. // TODO: support backward pass for broadcasting
  4577. GGML_ASSERT(ggml_are_same_shape(a, b));
  4578. is_node = true;
  4579. }
  4580. struct ggml_tensor * result = ggml_new_tensor(ctx, type, a->n_dims, a->ne);
  4581. result->op = GGML_OP_ADD;
  4582. result->grad = is_node ? ggml_new_tensor(ctx, GGML_TYPE_F32, a->n_dims, a->ne) : NULL;
  4583. result->src[0] = a;
  4584. result->src[1] = b;
  4585. return result;
  4586. }
  4587. struct ggml_tensor * ggml_add_cast(
  4588. struct ggml_context * ctx,
  4589. struct ggml_tensor * a,
  4590. struct ggml_tensor * b,
  4591. enum ggml_type type) {
  4592. return ggml_add_cast_impl(ctx, a, b, type);
  4593. }
  4594. // ggml_add1
  4595. static struct ggml_tensor * ggml_add1_impl(
  4596. struct ggml_context * ctx,
  4597. struct ggml_tensor * a,
  4598. struct ggml_tensor * b,
  4599. bool inplace) {
  4600. GGML_ASSERT(ggml_is_scalar(b));
  4601. GGML_ASSERT(ggml_is_padded_1d(a));
  4602. bool is_node = false;
  4603. if (a->grad || b->grad) {
  4604. is_node = true;
  4605. }
  4606. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  4607. result->op = GGML_OP_ADD1;
  4608. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4609. result->src[0] = a;
  4610. result->src[1] = b;
  4611. return result;
  4612. }
  4613. struct ggml_tensor * ggml_add1(
  4614. struct ggml_context * ctx,
  4615. struct ggml_tensor * a,
  4616. struct ggml_tensor * b) {
  4617. return ggml_add1_impl(ctx, a, b, false);
  4618. }
  4619. struct ggml_tensor * ggml_add1_inplace(
  4620. struct ggml_context * ctx,
  4621. struct ggml_tensor * a,
  4622. struct ggml_tensor * b) {
  4623. return ggml_add1_impl(ctx, a, b, true);
  4624. }
  4625. // ggml_acc
  4626. static struct ggml_tensor * ggml_acc_impl(
  4627. struct ggml_context * ctx,
  4628. struct ggml_tensor * a,
  4629. struct ggml_tensor * b,
  4630. size_t nb1,
  4631. size_t nb2,
  4632. size_t nb3,
  4633. size_t offset,
  4634. bool inplace) {
  4635. GGML_ASSERT(ggml_nelements(b) <= ggml_nelements(a));
  4636. GGML_ASSERT(ggml_is_contiguous(a));
  4637. GGML_ASSERT(a->type == GGML_TYPE_F32);
  4638. GGML_ASSERT(b->type == GGML_TYPE_F32);
  4639. bool is_node = false;
  4640. if (!inplace && (a->grad || b->grad)) {
  4641. is_node = true;
  4642. }
  4643. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  4644. int32_t params[] = { nb1, nb2, nb3, offset, inplace ? 1 : 0 };
  4645. ggml_set_op_params(result, params, sizeof(params));
  4646. result->op = GGML_OP_ACC;
  4647. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4648. result->src[0] = a;
  4649. result->src[1] = b;
  4650. return result;
  4651. }
  4652. struct ggml_tensor * ggml_acc(
  4653. struct ggml_context * ctx,
  4654. struct ggml_tensor * a,
  4655. struct ggml_tensor * b,
  4656. size_t nb1,
  4657. size_t nb2,
  4658. size_t nb3,
  4659. size_t offset) {
  4660. return ggml_acc_impl(ctx, a, b, nb1, nb2, nb3, offset, false);
  4661. }
  4662. struct ggml_tensor * ggml_acc_inplace(
  4663. struct ggml_context * ctx,
  4664. struct ggml_tensor * a,
  4665. struct ggml_tensor * b,
  4666. size_t nb1,
  4667. size_t nb2,
  4668. size_t nb3,
  4669. size_t offset) {
  4670. return ggml_acc_impl(ctx, a, b, nb1, nb2, nb3, offset, true);
  4671. }
  4672. // ggml_sub
  4673. static struct ggml_tensor * ggml_sub_impl(
  4674. struct ggml_context * ctx,
  4675. struct ggml_tensor * a,
  4676. struct ggml_tensor * b,
  4677. bool inplace) {
  4678. GGML_ASSERT(ggml_are_same_shape(a, b));
  4679. bool is_node = false;
  4680. if (!inplace && (a->grad || b->grad)) {
  4681. is_node = true;
  4682. }
  4683. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  4684. result->op = GGML_OP_SUB;
  4685. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4686. result->src[0] = a;
  4687. result->src[1] = b;
  4688. return result;
  4689. }
  4690. struct ggml_tensor * ggml_sub(
  4691. struct ggml_context * ctx,
  4692. struct ggml_tensor * a,
  4693. struct ggml_tensor * b) {
  4694. return ggml_sub_impl(ctx, a, b, false);
  4695. }
  4696. struct ggml_tensor * ggml_sub_inplace(
  4697. struct ggml_context * ctx,
  4698. struct ggml_tensor * a,
  4699. struct ggml_tensor * b) {
  4700. return ggml_sub_impl(ctx, a, b, true);
  4701. }
  4702. // ggml_mul
  4703. static struct ggml_tensor * ggml_mul_impl(
  4704. struct ggml_context * ctx,
  4705. struct ggml_tensor * a,
  4706. struct ggml_tensor * b,
  4707. bool inplace) {
  4708. // TODO: support less-strict constraint
  4709. // GGML_ASSERT(ggml_can_repeat(b, a));
  4710. GGML_ASSERT(ggml_can_repeat_rows(b, a));
  4711. bool is_node = false;
  4712. if (!inplace && (a->grad || b->grad)) {
  4713. // TODO: support backward pass for broadcasting
  4714. GGML_ASSERT(ggml_are_same_shape(a, b));
  4715. is_node = true;
  4716. }
  4717. if (inplace) {
  4718. GGML_ASSERT(!is_node);
  4719. }
  4720. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  4721. result->op = GGML_OP_MUL;
  4722. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4723. result->src[0] = a;
  4724. result->src[1] = b;
  4725. return result;
  4726. }
  4727. struct ggml_tensor * ggml_mul(
  4728. struct ggml_context * ctx,
  4729. struct ggml_tensor * a,
  4730. struct ggml_tensor * b) {
  4731. return ggml_mul_impl(ctx, a, b, false);
  4732. }
  4733. struct ggml_tensor * ggml_mul_inplace(
  4734. struct ggml_context * ctx,
  4735. struct ggml_tensor * a,
  4736. struct ggml_tensor * b) {
  4737. return ggml_mul_impl(ctx, a, b, true);
  4738. }
  4739. // ggml_div
  4740. static struct ggml_tensor * ggml_div_impl(
  4741. struct ggml_context * ctx,
  4742. struct ggml_tensor * a,
  4743. struct ggml_tensor * b,
  4744. bool inplace) {
  4745. GGML_ASSERT(ggml_are_same_shape(a, b));
  4746. bool is_node = false;
  4747. if (!inplace && (a->grad || b->grad)) {
  4748. is_node = true;
  4749. }
  4750. if (inplace) {
  4751. GGML_ASSERT(!is_node);
  4752. }
  4753. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  4754. result->op = GGML_OP_DIV;
  4755. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4756. result->src[0] = a;
  4757. result->src[1] = b;
  4758. return result;
  4759. }
  4760. struct ggml_tensor * ggml_div(
  4761. struct ggml_context * ctx,
  4762. struct ggml_tensor * a,
  4763. struct ggml_tensor * b) {
  4764. return ggml_div_impl(ctx, a, b, false);
  4765. }
  4766. struct ggml_tensor * ggml_div_inplace(
  4767. struct ggml_context * ctx,
  4768. struct ggml_tensor * a,
  4769. struct ggml_tensor * b) {
  4770. return ggml_div_impl(ctx, a, b, true);
  4771. }
  4772. // ggml_sqr
  4773. static struct ggml_tensor * ggml_sqr_impl(
  4774. struct ggml_context * ctx,
  4775. struct ggml_tensor * a,
  4776. bool inplace) {
  4777. bool is_node = false;
  4778. if (!inplace && (a->grad)) {
  4779. is_node = true;
  4780. }
  4781. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  4782. result->op = GGML_OP_SQR;
  4783. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4784. result->src[0] = a;
  4785. return result;
  4786. }
  4787. struct ggml_tensor * ggml_sqr(
  4788. struct ggml_context * ctx,
  4789. struct ggml_tensor * a) {
  4790. return ggml_sqr_impl(ctx, a, false);
  4791. }
  4792. struct ggml_tensor * ggml_sqr_inplace(
  4793. struct ggml_context * ctx,
  4794. struct ggml_tensor * a) {
  4795. return ggml_sqr_impl(ctx, a, true);
  4796. }
  4797. // ggml_sqrt
  4798. static struct ggml_tensor * ggml_sqrt_impl(
  4799. struct ggml_context * ctx,
  4800. struct ggml_tensor * a,
  4801. bool inplace) {
  4802. bool is_node = false;
  4803. if (!inplace && (a->grad)) {
  4804. is_node = true;
  4805. }
  4806. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  4807. result->op = GGML_OP_SQRT;
  4808. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4809. result->src[0] = a;
  4810. return result;
  4811. }
  4812. struct ggml_tensor * ggml_sqrt(
  4813. struct ggml_context * ctx,
  4814. struct ggml_tensor * a) {
  4815. return ggml_sqrt_impl(ctx, a, false);
  4816. }
  4817. struct ggml_tensor * ggml_sqrt_inplace(
  4818. struct ggml_context * ctx,
  4819. struct ggml_tensor * a) {
  4820. return ggml_sqrt_impl(ctx, a, true);
  4821. }
  4822. // ggml_log
  4823. static struct ggml_tensor * ggml_log_impl(
  4824. struct ggml_context * ctx,
  4825. struct ggml_tensor * a,
  4826. bool inplace) {
  4827. bool is_node = false;
  4828. if (!inplace && (a->grad)) {
  4829. is_node = true;
  4830. }
  4831. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  4832. result->op = GGML_OP_LOG;
  4833. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4834. result->src[0] = a;
  4835. return result;
  4836. }
  4837. struct ggml_tensor * ggml_log(
  4838. struct ggml_context * ctx,
  4839. struct ggml_tensor * a) {
  4840. return ggml_log_impl(ctx, a, false);
  4841. }
  4842. struct ggml_tensor * ggml_log_inplace(
  4843. struct ggml_context * ctx,
  4844. struct ggml_tensor * a) {
  4845. return ggml_log_impl(ctx, a, true);
  4846. }
  4847. // ggml_sum
  4848. struct ggml_tensor * ggml_sum(
  4849. struct ggml_context * ctx,
  4850. struct ggml_tensor * a) {
  4851. bool is_node = false;
  4852. if (a->grad) {
  4853. is_node = true;
  4854. }
  4855. struct ggml_tensor * result = ggml_new_tensor_1d(ctx, a->type, 1);
  4856. result->op = GGML_OP_SUM;
  4857. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4858. result->src[0] = a;
  4859. return result;
  4860. }
  4861. // ggml_sum_rows
  4862. struct ggml_tensor * ggml_sum_rows(
  4863. struct ggml_context * ctx,
  4864. struct ggml_tensor * a) {
  4865. bool is_node = false;
  4866. if (a->grad) {
  4867. is_node = true;
  4868. }
  4869. int64_t ne[4] = {1,1,1,1};
  4870. for (int i=1; i<a->n_dims; ++i) {
  4871. ne[i] = a->ne[i];
  4872. }
  4873. struct ggml_tensor * result = ggml_new_tensor(ctx, a->type, a->n_dims, ne);
  4874. result->op = GGML_OP_SUM_ROWS;
  4875. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4876. result->src[0] = a;
  4877. return result;
  4878. }
  4879. // ggml_mean
  4880. struct ggml_tensor * ggml_mean(
  4881. struct ggml_context * ctx,
  4882. struct ggml_tensor * a) {
  4883. bool is_node = false;
  4884. if (a->grad) {
  4885. GGML_ASSERT(false); // TODO: implement
  4886. is_node = true;
  4887. }
  4888. int64_t ne[GGML_MAX_DIMS] = { 1, a->ne[1], a->ne[2], a->ne[3] };
  4889. struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, a->n_dims, ne);
  4890. result->op = GGML_OP_MEAN;
  4891. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4892. result->src[0] = a;
  4893. return result;
  4894. }
  4895. // ggml_argmax
  4896. struct ggml_tensor * ggml_argmax(
  4897. struct ggml_context * ctx,
  4898. struct ggml_tensor * a) {
  4899. GGML_ASSERT(ggml_is_matrix(a));
  4900. bool is_node = false;
  4901. if (a->grad) {
  4902. GGML_ASSERT(false);
  4903. is_node = true;
  4904. }
  4905. int64_t ne[GGML_MAX_DIMS] = { a->ne[1], 1, 1, 1 };
  4906. struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_I32, a->n_dims, ne);
  4907. result->op = GGML_OP_ARGMAX;
  4908. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4909. result->src[0] = a;
  4910. return result;
  4911. }
  4912. // ggml_repeat
  4913. struct ggml_tensor * ggml_repeat(
  4914. struct ggml_context * ctx,
  4915. struct ggml_tensor * a,
  4916. struct ggml_tensor * b) {
  4917. GGML_ASSERT(ggml_can_repeat(a, b));
  4918. bool is_node = false;
  4919. if (a->grad) {
  4920. is_node = true;
  4921. }
  4922. struct ggml_tensor * result = ggml_new_tensor(ctx, a->type, b->n_dims, b->ne);
  4923. result->op = GGML_OP_REPEAT;
  4924. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4925. result->src[0] = a;
  4926. return result;
  4927. }
  4928. // ggml_repeat_back
  4929. struct ggml_tensor * ggml_repeat_back(
  4930. struct ggml_context * ctx,
  4931. struct ggml_tensor * a,
  4932. struct ggml_tensor * b) {
  4933. GGML_ASSERT(ggml_can_repeat(b, a));
  4934. bool is_node = false;
  4935. if (a->grad) {
  4936. is_node = true;
  4937. }
  4938. if (ggml_are_same_shape(a, b) && !is_node) {
  4939. return a;
  4940. }
  4941. struct ggml_tensor * result = ggml_new_tensor(ctx, a->type, b->n_dims, b->ne);
  4942. result->op = GGML_OP_REPEAT_BACK;
  4943. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4944. result->src[0] = a;
  4945. return result;
  4946. }
  4947. // ggml_concat
  4948. struct ggml_tensor * ggml_concat(
  4949. struct ggml_context* ctx,
  4950. struct ggml_tensor* a,
  4951. struct ggml_tensor* b) {
  4952. GGML_ASSERT(a->ne[0] == b->ne[0] && a->ne[1] == b->ne[1] && a->ne[3] == b->ne[3]);
  4953. bool is_node = false;
  4954. if (a->grad || b->grad) {
  4955. is_node = true;
  4956. }
  4957. struct ggml_tensor * result = ggml_new_tensor_4d(ctx, a->type, a->ne[0], a->ne[1], a->ne[2] + b->ne[2], a->ne[3]);
  4958. result->op = GGML_OP_CONCAT;
  4959. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4960. result->src[0] = a;
  4961. result->src[1] = b;
  4962. return result;
  4963. }
  4964. // ggml_abs
  4965. struct ggml_tensor * ggml_abs(
  4966. struct ggml_context * ctx,
  4967. struct ggml_tensor * a) {
  4968. return ggml_unary(ctx, a, GGML_UNARY_OP_ABS);
  4969. }
  4970. struct ggml_tensor * ggml_abs_inplace(
  4971. struct ggml_context * ctx,
  4972. struct ggml_tensor * a) {
  4973. return ggml_unary_inplace(ctx, a, GGML_UNARY_OP_ABS);
  4974. }
  4975. // ggml_sgn
  4976. struct ggml_tensor * ggml_sgn(
  4977. struct ggml_context * ctx,
  4978. struct ggml_tensor * a) {
  4979. return ggml_unary(ctx, a, GGML_UNARY_OP_SGN);
  4980. }
  4981. struct ggml_tensor * ggml_sgn_inplace(
  4982. struct ggml_context * ctx,
  4983. struct ggml_tensor * a) {
  4984. return ggml_unary_inplace(ctx, a, GGML_UNARY_OP_SGN);
  4985. }
  4986. // ggml_neg
  4987. struct ggml_tensor * ggml_neg(
  4988. struct ggml_context * ctx,
  4989. struct ggml_tensor * a) {
  4990. return ggml_unary(ctx, a, GGML_UNARY_OP_NEG);
  4991. }
  4992. struct ggml_tensor * ggml_neg_inplace(
  4993. struct ggml_context * ctx,
  4994. struct ggml_tensor * a) {
  4995. return ggml_unary_inplace(ctx, a, GGML_UNARY_OP_NEG);
  4996. }
  4997. // ggml_step
  4998. struct ggml_tensor * ggml_step(
  4999. struct ggml_context * ctx,
  5000. struct ggml_tensor * a) {
  5001. return ggml_unary(ctx, a, GGML_UNARY_OP_STEP);
  5002. }
  5003. struct ggml_tensor * ggml_step_inplace(
  5004. struct ggml_context * ctx,
  5005. struct ggml_tensor * a) {
  5006. return ggml_unary_inplace(ctx, a, GGML_UNARY_OP_STEP);
  5007. }
  5008. // ggml_tanh
  5009. struct ggml_tensor * ggml_tanh(
  5010. struct ggml_context * ctx,
  5011. struct ggml_tensor * a) {
  5012. return ggml_unary(ctx, a, GGML_UNARY_OP_TANH);
  5013. }
  5014. struct ggml_tensor * ggml_tanh_inplace(
  5015. struct ggml_context * ctx,
  5016. struct ggml_tensor * a) {
  5017. return ggml_unary_inplace(ctx, a, GGML_UNARY_OP_TANH);
  5018. }
  5019. // ggml_elu
  5020. struct ggml_tensor * ggml_elu(
  5021. struct ggml_context * ctx,
  5022. struct ggml_tensor * a) {
  5023. return ggml_unary(ctx, a, GGML_UNARY_OP_ELU);
  5024. }
  5025. struct ggml_tensor * ggml_elu_inplace(
  5026. struct ggml_context * ctx,
  5027. struct ggml_tensor * a) {
  5028. return ggml_unary_inplace(ctx, a, GGML_UNARY_OP_ELU);
  5029. }
  5030. // ggml_relu
  5031. struct ggml_tensor * ggml_relu(
  5032. struct ggml_context * ctx,
  5033. struct ggml_tensor * a) {
  5034. return ggml_unary(ctx, a, GGML_UNARY_OP_RELU);
  5035. }
  5036. struct ggml_tensor * ggml_relu_inplace(
  5037. struct ggml_context * ctx,
  5038. struct ggml_tensor * a) {
  5039. return ggml_unary_inplace(ctx, a, GGML_UNARY_OP_RELU);
  5040. }
  5041. // ggml_gelu
  5042. struct ggml_tensor * ggml_gelu(
  5043. struct ggml_context * ctx,
  5044. struct ggml_tensor * a) {
  5045. return ggml_unary(ctx, a, GGML_UNARY_OP_GELU);
  5046. }
  5047. struct ggml_tensor * ggml_gelu_inplace(
  5048. struct ggml_context * ctx,
  5049. struct ggml_tensor * a) {
  5050. return ggml_unary_inplace(ctx, a, GGML_UNARY_OP_GELU);
  5051. }
  5052. // ggml_gelu_quick
  5053. struct ggml_tensor * ggml_gelu_quick(
  5054. struct ggml_context * ctx,
  5055. struct ggml_tensor * a) {
  5056. return ggml_unary(ctx, a, GGML_UNARY_OP_GELU_QUICK);
  5057. }
  5058. struct ggml_tensor * ggml_gelu_quick_inplace(
  5059. struct ggml_context * ctx,
  5060. struct ggml_tensor * a) {
  5061. return ggml_unary_inplace(ctx, a, GGML_UNARY_OP_GELU_QUICK);
  5062. }
  5063. // ggml_silu
  5064. struct ggml_tensor * ggml_silu(
  5065. struct ggml_context * ctx,
  5066. struct ggml_tensor * a) {
  5067. return ggml_unary(ctx, a, GGML_UNARY_OP_SILU);
  5068. }
  5069. struct ggml_tensor * ggml_silu_inplace(
  5070. struct ggml_context * ctx,
  5071. struct ggml_tensor * a) {
  5072. return ggml_unary_inplace(ctx, a, GGML_UNARY_OP_SILU);
  5073. }
  5074. // ggml_silu_back
  5075. struct ggml_tensor * ggml_silu_back(
  5076. struct ggml_context * ctx,
  5077. struct ggml_tensor * a,
  5078. struct ggml_tensor * b) {
  5079. bool is_node = false;
  5080. if (a->grad || b->grad) {
  5081. // TODO: implement backward
  5082. is_node = true;
  5083. }
  5084. struct ggml_tensor * result = ggml_dup_tensor(ctx, a);
  5085. result->op = GGML_OP_SILU_BACK;
  5086. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5087. result->src[0] = a;
  5088. result->src[1] = b;
  5089. return result;
  5090. }
  5091. // ggml_norm
  5092. static struct ggml_tensor * ggml_norm_impl(
  5093. struct ggml_context * ctx,
  5094. struct ggml_tensor * a,
  5095. float eps,
  5096. bool inplace) {
  5097. bool is_node = false;
  5098. if (!inplace && (a->grad)) {
  5099. GGML_ASSERT(false); // TODO: implement backward
  5100. is_node = true;
  5101. }
  5102. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  5103. ggml_set_op_params(result, &eps, sizeof(eps));
  5104. result->op = GGML_OP_NORM;
  5105. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5106. result->src[0] = a;
  5107. return result;
  5108. }
  5109. struct ggml_tensor * ggml_norm(
  5110. struct ggml_context * ctx,
  5111. struct ggml_tensor * a,
  5112. float eps) {
  5113. return ggml_norm_impl(ctx, a, eps, false);
  5114. }
  5115. struct ggml_tensor * ggml_norm_inplace(
  5116. struct ggml_context * ctx,
  5117. struct ggml_tensor * a,
  5118. float eps) {
  5119. return ggml_norm_impl(ctx, a, eps, true);
  5120. }
  5121. // ggml_rms_norm
  5122. static struct ggml_tensor * ggml_rms_norm_impl(
  5123. struct ggml_context * ctx,
  5124. struct ggml_tensor * a,
  5125. float eps,
  5126. bool inplace) {
  5127. bool is_node = false;
  5128. if (!inplace && (a->grad)) {
  5129. is_node = true;
  5130. }
  5131. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  5132. ggml_set_op_params(result, &eps, sizeof(eps));
  5133. result->op = GGML_OP_RMS_NORM;
  5134. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5135. result->src[0] = a;
  5136. return result;
  5137. }
  5138. struct ggml_tensor * ggml_rms_norm(
  5139. struct ggml_context * ctx,
  5140. struct ggml_tensor * a,
  5141. float eps) {
  5142. return ggml_rms_norm_impl(ctx, a, eps, false);
  5143. }
  5144. struct ggml_tensor * ggml_rms_norm_inplace(
  5145. struct ggml_context * ctx,
  5146. struct ggml_tensor * a,
  5147. float eps) {
  5148. return ggml_rms_norm_impl(ctx, a, eps, true);
  5149. }
  5150. // ggml_rms_norm_back
  5151. struct ggml_tensor * ggml_rms_norm_back(
  5152. struct ggml_context * ctx,
  5153. struct ggml_tensor * a,
  5154. struct ggml_tensor * b,
  5155. float eps) {
  5156. bool is_node = false;
  5157. if (a->grad) {
  5158. // TODO: implement backward
  5159. is_node = true;
  5160. }
  5161. struct ggml_tensor * result = ggml_dup_tensor(ctx, a);
  5162. ggml_set_op_params(result, &eps, sizeof(eps));
  5163. result->op = GGML_OP_RMS_NORM_BACK;
  5164. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5165. result->src[0] = a;
  5166. result->src[1] = b;
  5167. return result;
  5168. }
  5169. // ggml_group_norm
  5170. static struct ggml_tensor * ggml_group_norm_impl(
  5171. struct ggml_context * ctx,
  5172. struct ggml_tensor * a,
  5173. int n_groups,
  5174. bool inplace) {
  5175. bool is_node = false;
  5176. if (!inplace && (a->grad)) {
  5177. GGML_ASSERT(false); // TODO: implement backward
  5178. is_node = true;
  5179. }
  5180. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  5181. result->op = GGML_OP_GROUP_NORM;
  5182. result->op_params[0] = n_groups;
  5183. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5184. result->src[0] = a;
  5185. result->src[1] = NULL; // TODO: maybe store epsilon here?
  5186. return result;
  5187. }
  5188. struct ggml_tensor * ggml_group_norm(
  5189. struct ggml_context * ctx,
  5190. struct ggml_tensor * a,
  5191. int n_groups) {
  5192. return ggml_group_norm_impl(ctx, a, n_groups, false);
  5193. }
  5194. struct ggml_tensor * ggml_group_norm_inplace(
  5195. struct ggml_context * ctx,
  5196. struct ggml_tensor * a,
  5197. int n_groups) {
  5198. return ggml_group_norm_impl(ctx, a, n_groups, true);
  5199. }
  5200. // ggml_mul_mat
  5201. struct ggml_tensor * ggml_mul_mat(
  5202. struct ggml_context * ctx,
  5203. struct ggml_tensor * a,
  5204. struct ggml_tensor * b) {
  5205. GGML_ASSERT(ggml_can_mul_mat(a, b));
  5206. GGML_ASSERT(!ggml_is_transposed(a));
  5207. bool is_node = false;
  5208. if (a->grad || b->grad) {
  5209. is_node = true;
  5210. }
  5211. const int64_t ne[4] = { a->ne[1], b->ne[1], b->ne[2], b->ne[3] };
  5212. struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, MAX(a->n_dims, b->n_dims), ne);
  5213. result->op = GGML_OP_MUL_MAT;
  5214. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5215. result->src[0] = a;
  5216. result->src[1] = b;
  5217. return result;
  5218. }
  5219. // ggml_out_prod
  5220. struct ggml_tensor * ggml_out_prod(
  5221. struct ggml_context * ctx,
  5222. struct ggml_tensor * a,
  5223. struct ggml_tensor * b) {
  5224. GGML_ASSERT(ggml_can_out_prod(a, b));
  5225. GGML_ASSERT(!ggml_is_transposed(a));
  5226. bool is_node = false;
  5227. if (a->grad || b->grad) {
  5228. is_node = true;
  5229. }
  5230. // a is broadcastable to b for ne[2] and ne[3] -> use b->ne[2] and b->ne[3]
  5231. const int64_t ne[4] = { a->ne[0], b->ne[0], b->ne[2], b->ne[3] };
  5232. struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, MAX(a->n_dims, b->n_dims), ne);
  5233. result->op = GGML_OP_OUT_PROD;
  5234. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5235. result->src[0] = a;
  5236. result->src[1] = b;
  5237. return result;
  5238. }
  5239. // ggml_scale
  5240. static struct ggml_tensor * ggml_scale_impl(
  5241. struct ggml_context * ctx,
  5242. struct ggml_tensor * a,
  5243. struct ggml_tensor * b,
  5244. bool inplace) {
  5245. GGML_ASSERT(ggml_is_scalar(b));
  5246. GGML_ASSERT(ggml_is_padded_1d(a));
  5247. bool is_node = false;
  5248. if (a->grad || b->grad) {
  5249. is_node = true;
  5250. }
  5251. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  5252. result->op = GGML_OP_SCALE;
  5253. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5254. result->src[0] = a;
  5255. result->src[1] = b;
  5256. return result;
  5257. }
  5258. struct ggml_tensor * ggml_scale(
  5259. struct ggml_context * ctx,
  5260. struct ggml_tensor * a,
  5261. struct ggml_tensor * b) {
  5262. return ggml_scale_impl(ctx, a, b, false);
  5263. }
  5264. struct ggml_tensor * ggml_scale_inplace(
  5265. struct ggml_context * ctx,
  5266. struct ggml_tensor * a,
  5267. struct ggml_tensor * b) {
  5268. return ggml_scale_impl(ctx, a, b, true);
  5269. }
  5270. // ggml_set
  5271. static struct ggml_tensor * ggml_set_impl(
  5272. struct ggml_context * ctx,
  5273. struct ggml_tensor * a,
  5274. struct ggml_tensor * b,
  5275. size_t nb1,
  5276. size_t nb2,
  5277. size_t nb3,
  5278. size_t offset,
  5279. bool inplace) {
  5280. GGML_ASSERT(ggml_nelements(a) >= ggml_nelements(b));
  5281. bool is_node = false;
  5282. if (a->grad || b->grad) {
  5283. is_node = true;
  5284. }
  5285. // make a view of the destination
  5286. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  5287. int32_t params[] = { nb1, nb2, nb3, offset, inplace ? 1 : 0 };
  5288. ggml_set_op_params(result, params, sizeof(params));
  5289. result->op = GGML_OP_SET;
  5290. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5291. result->src[0] = a;
  5292. result->src[1] = b;
  5293. return result;
  5294. }
  5295. struct ggml_tensor * ggml_set(
  5296. struct ggml_context * ctx,
  5297. struct ggml_tensor * a,
  5298. struct ggml_tensor * b,
  5299. size_t nb1,
  5300. size_t nb2,
  5301. size_t nb3,
  5302. size_t offset) {
  5303. return ggml_set_impl(ctx, a, b, nb1, nb2, nb3, offset, false);
  5304. }
  5305. struct ggml_tensor * ggml_set_inplace(
  5306. struct ggml_context * ctx,
  5307. struct ggml_tensor * a,
  5308. struct ggml_tensor * b,
  5309. size_t nb1,
  5310. size_t nb2,
  5311. size_t nb3,
  5312. size_t offset) {
  5313. return ggml_set_impl(ctx, a, b, nb1, nb2, nb3, offset, true);
  5314. }
  5315. struct ggml_tensor * ggml_set_1d(
  5316. struct ggml_context * ctx,
  5317. struct ggml_tensor * a,
  5318. struct ggml_tensor * b,
  5319. size_t offset) {
  5320. return ggml_set_impl(ctx, a, b, a->nb[1], a->nb[2], a->nb[3], offset, false);
  5321. }
  5322. struct ggml_tensor * ggml_set_1d_inplace(
  5323. struct ggml_context * ctx,
  5324. struct ggml_tensor * a,
  5325. struct ggml_tensor * b,
  5326. size_t offset) {
  5327. return ggml_set_impl(ctx, a, b, a->nb[1], a->nb[2], a->nb[3], offset, true);
  5328. }
  5329. struct ggml_tensor * ggml_set_2d(
  5330. struct ggml_context * ctx,
  5331. struct ggml_tensor * a,
  5332. struct ggml_tensor * b,
  5333. size_t nb1,
  5334. size_t offset) {
  5335. return ggml_set_impl(ctx, a, b, nb1, a->nb[2], a->nb[3], offset, false);
  5336. }
  5337. struct ggml_tensor * ggml_set_2d_inplace(
  5338. struct ggml_context * ctx,
  5339. struct ggml_tensor * a,
  5340. struct ggml_tensor * b,
  5341. size_t nb1,
  5342. size_t offset) {
  5343. return ggml_set_impl(ctx, a, b, nb1, a->nb[2], a->nb[3], offset, false);
  5344. }
  5345. // ggml_cpy
  5346. static struct ggml_tensor * ggml_cpy_impl(
  5347. struct ggml_context * ctx,
  5348. struct ggml_tensor * a,
  5349. struct ggml_tensor * b,
  5350. bool inplace) {
  5351. GGML_ASSERT(ggml_nelements(a) == ggml_nelements(b));
  5352. bool is_node = false;
  5353. if (!inplace && (a->grad || b->grad)) {
  5354. is_node = true;
  5355. }
  5356. // make a view of the destination
  5357. struct ggml_tensor * result = ggml_view_tensor(ctx, b);
  5358. if (strlen(b->name) > 0) {
  5359. ggml_format_name(result, "%s (copy of %s)", b->name, a->name);
  5360. } else {
  5361. ggml_format_name(result, "%s (copy)", a->name);
  5362. }
  5363. result->op = GGML_OP_CPY;
  5364. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5365. result->src[0] = a;
  5366. result->src[1] = b;
  5367. return result;
  5368. }
  5369. struct ggml_tensor * ggml_cpy(
  5370. struct ggml_context * ctx,
  5371. struct ggml_tensor * a,
  5372. struct ggml_tensor * b) {
  5373. return ggml_cpy_impl(ctx, a, b, false);
  5374. }
  5375. struct ggml_tensor * ggml_cpy_inplace(
  5376. struct ggml_context * ctx,
  5377. struct ggml_tensor * a,
  5378. struct ggml_tensor * b) {
  5379. return ggml_cpy_impl(ctx, a, b, true);
  5380. }
  5381. // ggml_cont
  5382. static struct ggml_tensor * ggml_cont_impl(
  5383. struct ggml_context * ctx,
  5384. struct ggml_tensor * a,
  5385. bool inplace) {
  5386. bool is_node = false;
  5387. if (!inplace && a->grad) {
  5388. is_node = true;
  5389. }
  5390. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  5391. ggml_format_name(result, "%s (cont)", a->name);
  5392. result->op = GGML_OP_CONT;
  5393. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5394. result->src[0] = a;
  5395. return result;
  5396. }
  5397. struct ggml_tensor * ggml_cont(
  5398. struct ggml_context * ctx,
  5399. struct ggml_tensor * a) {
  5400. return ggml_cont_impl(ctx, a, false);
  5401. }
  5402. struct ggml_tensor * ggml_cont_inplace(
  5403. struct ggml_context * ctx,
  5404. struct ggml_tensor * a) {
  5405. return ggml_cont_impl(ctx, a, true);
  5406. }
  5407. // make contiguous, with new shape
  5408. GGML_API struct ggml_tensor * ggml_cont_1d(
  5409. struct ggml_context * ctx,
  5410. struct ggml_tensor * a,
  5411. int64_t ne0) {
  5412. return ggml_cont_4d(ctx, a, ne0, 1, 1, 1);
  5413. }
  5414. GGML_API struct ggml_tensor * ggml_cont_2d(
  5415. struct ggml_context * ctx,
  5416. struct ggml_tensor * a,
  5417. int64_t ne0,
  5418. int64_t ne1) {
  5419. return ggml_cont_4d(ctx, a, ne0, ne1, 1, 1);
  5420. }
  5421. GGML_API struct ggml_tensor * ggml_cont_3d(
  5422. struct ggml_context * ctx,
  5423. struct ggml_tensor * a,
  5424. int64_t ne0,
  5425. int64_t ne1,
  5426. int64_t ne2) {
  5427. return ggml_cont_4d(ctx, a, ne0, ne1, ne2, 1);
  5428. }
  5429. struct ggml_tensor * ggml_cont_4d(
  5430. struct ggml_context * ctx,
  5431. struct ggml_tensor * a,
  5432. int64_t ne0,
  5433. int64_t ne1,
  5434. int64_t ne2,
  5435. int64_t ne3) {
  5436. GGML_ASSERT(ggml_nelements(a) == (ne0*ne1*ne2*ne3));
  5437. bool is_node = false;
  5438. struct ggml_tensor * result = ggml_new_tensor_4d(ctx, a->type, ne0, ne1, ne2, ne3);
  5439. ggml_format_name(result, "%s (cont)", a->name);
  5440. result->op = GGML_OP_CONT;
  5441. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5442. result->src[0] = a;
  5443. return result;
  5444. }
  5445. // ggml_reshape
  5446. struct ggml_tensor * ggml_reshape(
  5447. struct ggml_context * ctx,
  5448. struct ggml_tensor * a,
  5449. struct ggml_tensor * b) {
  5450. GGML_ASSERT(ggml_is_contiguous(a));
  5451. // as only the shape of b is relevant, and not its memory layout, b is allowed to be non contiguous.
  5452. GGML_ASSERT(ggml_nelements(a) == ggml_nelements(b));
  5453. bool is_node = false;
  5454. if (a->grad) {
  5455. is_node = true;
  5456. }
  5457. if (b->grad) {
  5458. // gradient propagation is not supported
  5459. //GGML_ASSERT(false);
  5460. }
  5461. struct ggml_tensor * result = ggml_new_tensor_impl(ctx, a->type, b->n_dims, b->ne, a, 0);
  5462. ggml_format_name(result, "%s (reshaped)", a->name);
  5463. result->op = GGML_OP_RESHAPE;
  5464. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5465. result->src[0] = a;
  5466. return result;
  5467. }
  5468. struct ggml_tensor * ggml_reshape_1d(
  5469. struct ggml_context * ctx,
  5470. struct ggml_tensor * a,
  5471. int64_t ne0) {
  5472. GGML_ASSERT(ggml_is_contiguous(a));
  5473. GGML_ASSERT(ggml_nelements(a) == ne0);
  5474. bool is_node = false;
  5475. if (a->grad) {
  5476. is_node = true;
  5477. }
  5478. const int64_t ne[1] = { ne0 };
  5479. struct ggml_tensor * result = ggml_new_tensor_impl(ctx, a->type, 1, ne, a, 0);
  5480. ggml_format_name(result, "%s (reshaped)", a->name);
  5481. result->op = GGML_OP_RESHAPE;
  5482. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5483. result->src[0] = a;
  5484. return result;
  5485. }
  5486. struct ggml_tensor * ggml_reshape_2d(
  5487. struct ggml_context * ctx,
  5488. struct ggml_tensor * a,
  5489. int64_t ne0,
  5490. int64_t ne1) {
  5491. GGML_ASSERT(ggml_is_contiguous(a));
  5492. GGML_ASSERT(ggml_nelements(a) == ne0*ne1);
  5493. bool is_node = false;
  5494. if (a->grad) {
  5495. is_node = true;
  5496. }
  5497. const int64_t ne[2] = { ne0, ne1 };
  5498. struct ggml_tensor * result = ggml_new_tensor_impl(ctx, a->type, 2, ne, a, 0);
  5499. ggml_format_name(result, "%s (reshaped)", a->name);
  5500. result->op = GGML_OP_RESHAPE;
  5501. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5502. result->src[0] = a;
  5503. return result;
  5504. }
  5505. struct ggml_tensor * ggml_reshape_3d(
  5506. struct ggml_context * ctx,
  5507. struct ggml_tensor * a,
  5508. int64_t ne0,
  5509. int64_t ne1,
  5510. int64_t ne2) {
  5511. GGML_ASSERT(ggml_is_contiguous(a));
  5512. GGML_ASSERT(ggml_nelements(a) == ne0*ne1*ne2);
  5513. bool is_node = false;
  5514. if (a->grad) {
  5515. is_node = true;
  5516. }
  5517. const int64_t ne[3] = { ne0, ne1, ne2 };
  5518. struct ggml_tensor * result = ggml_new_tensor_impl(ctx, a->type, 3, ne, a, 0);
  5519. ggml_format_name(result, "%s (reshaped)", a->name);
  5520. result->op = GGML_OP_RESHAPE;
  5521. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5522. result->src[0] = a;
  5523. return result;
  5524. }
  5525. struct ggml_tensor * ggml_reshape_4d(
  5526. struct ggml_context * ctx,
  5527. struct ggml_tensor * a,
  5528. int64_t ne0,
  5529. int64_t ne1,
  5530. int64_t ne2,
  5531. int64_t ne3) {
  5532. GGML_ASSERT(ggml_is_contiguous(a));
  5533. GGML_ASSERT(ggml_nelements(a) == ne0*ne1*ne2*ne3);
  5534. bool is_node = false;
  5535. if (a->grad) {
  5536. is_node = true;
  5537. }
  5538. const int64_t ne[4] = { ne0, ne1, ne2, ne3 };
  5539. struct ggml_tensor * result = ggml_new_tensor_impl(ctx, a->type, 4, ne, a, 0);
  5540. ggml_format_name(result, "%s (reshaped)", a->name);
  5541. result->op = GGML_OP_RESHAPE;
  5542. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5543. result->src[0] = a;
  5544. return result;
  5545. }
  5546. static struct ggml_tensor * ggml_view_impl(
  5547. struct ggml_context * ctx,
  5548. struct ggml_tensor * a,
  5549. int n_dims,
  5550. const int64_t * ne,
  5551. size_t offset) {
  5552. bool is_node = false;
  5553. if (a->grad) {
  5554. is_node = true;
  5555. }
  5556. struct ggml_tensor * result = ggml_new_tensor_impl(ctx, a->type, n_dims, ne, a, offset);
  5557. ggml_format_name(result, "%s (view)", a->name);
  5558. ggml_set_op_params(result, &offset, sizeof(offset));
  5559. result->op = GGML_OP_VIEW;
  5560. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5561. result->src[0] = a;
  5562. return result;
  5563. }
  5564. // ggml_view_1d
  5565. struct ggml_tensor * ggml_view_1d(
  5566. struct ggml_context * ctx,
  5567. struct ggml_tensor * a,
  5568. int64_t ne0,
  5569. size_t offset) {
  5570. struct ggml_tensor * result = ggml_view_impl(ctx, a, 1, &ne0, offset);
  5571. return result;
  5572. }
  5573. // ggml_view_2d
  5574. struct ggml_tensor * ggml_view_2d(
  5575. struct ggml_context * ctx,
  5576. struct ggml_tensor * a,
  5577. int64_t ne0,
  5578. int64_t ne1,
  5579. size_t nb1,
  5580. size_t offset) {
  5581. const int64_t ne[2] = { ne0, ne1 };
  5582. struct ggml_tensor * result = ggml_view_impl(ctx, a, 2, ne, offset);
  5583. result->nb[1] = nb1;
  5584. result->nb[2] = result->nb[1]*ne1;
  5585. result->nb[3] = result->nb[2];
  5586. return result;
  5587. }
  5588. // ggml_view_3d
  5589. struct ggml_tensor * ggml_view_3d(
  5590. struct ggml_context * ctx,
  5591. struct ggml_tensor * a,
  5592. int64_t ne0,
  5593. int64_t ne1,
  5594. int64_t ne2,
  5595. size_t nb1,
  5596. size_t nb2,
  5597. size_t offset) {
  5598. const int64_t ne[3] = { ne0, ne1, ne2 };
  5599. struct ggml_tensor * result = ggml_view_impl(ctx, a, 3, ne, offset);
  5600. result->nb[1] = nb1;
  5601. result->nb[2] = nb2;
  5602. result->nb[3] = result->nb[2]*ne2;
  5603. return result;
  5604. }
  5605. // ggml_view_4d
  5606. struct ggml_tensor * ggml_view_4d(
  5607. struct ggml_context * ctx,
  5608. struct ggml_tensor * a,
  5609. int64_t ne0,
  5610. int64_t ne1,
  5611. int64_t ne2,
  5612. int64_t ne3,
  5613. size_t nb1,
  5614. size_t nb2,
  5615. size_t nb3,
  5616. size_t offset) {
  5617. const int64_t ne[4] = { ne0, ne1, ne2, ne3 };
  5618. struct ggml_tensor * result = ggml_view_impl(ctx, a, 4, ne, offset);
  5619. result->nb[1] = nb1;
  5620. result->nb[2] = nb2;
  5621. result->nb[3] = nb3;
  5622. return result;
  5623. }
  5624. // ggml_permute
  5625. struct ggml_tensor * ggml_permute(
  5626. struct ggml_context * ctx,
  5627. struct ggml_tensor * a,
  5628. int axis0,
  5629. int axis1,
  5630. int axis2,
  5631. int axis3) {
  5632. GGML_ASSERT(axis0 >= 0 && axis0 < GGML_MAX_DIMS);
  5633. GGML_ASSERT(axis1 >= 0 && axis1 < GGML_MAX_DIMS);
  5634. GGML_ASSERT(axis2 >= 0 && axis2 < GGML_MAX_DIMS);
  5635. GGML_ASSERT(axis3 >= 0 && axis3 < GGML_MAX_DIMS);
  5636. GGML_ASSERT(axis0 != axis1);
  5637. GGML_ASSERT(axis0 != axis2);
  5638. GGML_ASSERT(axis0 != axis3);
  5639. GGML_ASSERT(axis1 != axis2);
  5640. GGML_ASSERT(axis1 != axis3);
  5641. GGML_ASSERT(axis2 != axis3);
  5642. bool is_node = false;
  5643. if (a->grad) {
  5644. is_node = true;
  5645. }
  5646. struct ggml_tensor * result = ggml_view_tensor(ctx, a);
  5647. ggml_format_name(result, "%s (permuted)", a->name);
  5648. int ne[GGML_MAX_DIMS];
  5649. int nb[GGML_MAX_DIMS];
  5650. ne[axis0] = a->ne[0];
  5651. ne[axis1] = a->ne[1];
  5652. ne[axis2] = a->ne[2];
  5653. ne[axis3] = a->ne[3];
  5654. nb[axis0] = a->nb[0];
  5655. nb[axis1] = a->nb[1];
  5656. nb[axis2] = a->nb[2];
  5657. nb[axis3] = a->nb[3];
  5658. result->ne[0] = ne[0];
  5659. result->ne[1] = ne[1];
  5660. result->ne[2] = ne[2];
  5661. result->ne[3] = ne[3];
  5662. result->nb[0] = nb[0];
  5663. result->nb[1] = nb[1];
  5664. result->nb[2] = nb[2];
  5665. result->nb[3] = nb[3];
  5666. result->op = GGML_OP_PERMUTE;
  5667. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5668. result->src[0] = a;
  5669. int32_t params[] = { axis0, axis1, axis2, axis3 };
  5670. ggml_set_op_params(result, params, sizeof(params));
  5671. return result;
  5672. }
  5673. // ggml_transpose
  5674. struct ggml_tensor * ggml_transpose(
  5675. struct ggml_context * ctx,
  5676. struct ggml_tensor * a) {
  5677. bool is_node = false;
  5678. if (a->grad) {
  5679. is_node = true;
  5680. }
  5681. struct ggml_tensor * result = ggml_view_tensor(ctx, a);
  5682. ggml_format_name(result, "%s (transposed)", a->name);
  5683. result->ne[0] = a->ne[1];
  5684. result->ne[1] = a->ne[0];
  5685. result->nb[0] = a->nb[1];
  5686. result->nb[1] = a->nb[0];
  5687. result->op = GGML_OP_TRANSPOSE;
  5688. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5689. result->src[0] = a;
  5690. return result;
  5691. }
  5692. // ggml_get_rows
  5693. struct ggml_tensor * ggml_get_rows(
  5694. struct ggml_context * ctx,
  5695. struct ggml_tensor * a,
  5696. struct ggml_tensor * b) {
  5697. GGML_ASSERT(ggml_is_matrix(a) && ggml_is_vector(b) && b->type == GGML_TYPE_I32);
  5698. bool is_node = false;
  5699. if (a->grad || b->grad) {
  5700. is_node = true;
  5701. }
  5702. // TODO: implement non F32 return
  5703. //struct ggml_tensor * result = ggml_new_tensor_2d(ctx, a->type, a->ne[0], b->ne[0]);
  5704. struct ggml_tensor * result = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, a->ne[0], b->ne[0]);
  5705. result->op = GGML_OP_GET_ROWS;
  5706. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5707. result->src[0] = a;
  5708. result->src[1] = b;
  5709. return result;
  5710. }
  5711. // ggml_get_rows_back
  5712. struct ggml_tensor * ggml_get_rows_back(
  5713. struct ggml_context * ctx,
  5714. struct ggml_tensor * a,
  5715. struct ggml_tensor * b,
  5716. struct ggml_tensor * c) {
  5717. GGML_ASSERT(ggml_is_matrix(a) && ggml_is_vector(b) && b->type == GGML_TYPE_I32);
  5718. GGML_ASSERT(ggml_is_matrix(c) && (a->ne[0] == c->ne[0]));
  5719. bool is_node = false;
  5720. if (a->grad || b->grad) {
  5721. is_node = true;
  5722. }
  5723. // TODO: implement non F32 return
  5724. //struct ggml_tensor * result = ggml_new_tensor_2d(ctx, a->type, a->ne[0], b->ne[0]);
  5725. struct ggml_tensor * result = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, c->ne[0], c->ne[1]);
  5726. result->op = GGML_OP_GET_ROWS_BACK;
  5727. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5728. result->src[0] = a;
  5729. result->src[1] = b;
  5730. return result;
  5731. }
  5732. // ggml_diag
  5733. struct ggml_tensor * ggml_diag(
  5734. struct ggml_context * ctx,
  5735. struct ggml_tensor * a) {
  5736. GGML_ASSERT(a->ne[1] == 1);
  5737. bool is_node = false;
  5738. if (a->grad) {
  5739. is_node = true;
  5740. }
  5741. const int64_t ne[4] = { a->ne[0], a->ne[0], a->ne[2], a->ne[3] };
  5742. struct ggml_tensor * result = ggml_new_tensor(ctx, a->type, MAX(a->n_dims, 2), ne);
  5743. result->op = GGML_OP_DIAG;
  5744. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5745. result->src[0] = a;
  5746. return result;
  5747. }
  5748. // ggml_diag_mask_inf
  5749. static struct ggml_tensor * ggml_diag_mask_inf_impl(
  5750. struct ggml_context * ctx,
  5751. struct ggml_tensor * a,
  5752. int n_past,
  5753. bool inplace) {
  5754. bool is_node = false;
  5755. if (a->grad) {
  5756. is_node = true;
  5757. }
  5758. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  5759. int32_t params[] = { n_past };
  5760. ggml_set_op_params(result, params, sizeof(params));
  5761. result->op = GGML_OP_DIAG_MASK_INF;
  5762. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5763. result->src[0] = a;
  5764. return result;
  5765. }
  5766. struct ggml_tensor * ggml_diag_mask_inf(
  5767. struct ggml_context * ctx,
  5768. struct ggml_tensor * a,
  5769. int n_past) {
  5770. return ggml_diag_mask_inf_impl(ctx, a, n_past, false);
  5771. }
  5772. struct ggml_tensor * ggml_diag_mask_inf_inplace(
  5773. struct ggml_context * ctx,
  5774. struct ggml_tensor * a,
  5775. int n_past) {
  5776. return ggml_diag_mask_inf_impl(ctx, a, n_past, true);
  5777. }
  5778. // ggml_diag_mask_zero
  5779. static struct ggml_tensor * ggml_diag_mask_zero_impl(
  5780. struct ggml_context * ctx,
  5781. struct ggml_tensor * a,
  5782. int n_past,
  5783. bool inplace) {
  5784. bool is_node = false;
  5785. if (a->grad) {
  5786. is_node = true;
  5787. }
  5788. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  5789. int32_t params[] = { n_past };
  5790. ggml_set_op_params(result, params, sizeof(params));
  5791. result->op = GGML_OP_DIAG_MASK_ZERO;
  5792. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5793. result->src[0] = a;
  5794. return result;
  5795. }
  5796. struct ggml_tensor * ggml_diag_mask_zero(
  5797. struct ggml_context * ctx,
  5798. struct ggml_tensor * a,
  5799. int n_past) {
  5800. return ggml_diag_mask_zero_impl(ctx, a, n_past, false);
  5801. }
  5802. struct ggml_tensor * ggml_diag_mask_zero_inplace(
  5803. struct ggml_context * ctx,
  5804. struct ggml_tensor * a,
  5805. int n_past) {
  5806. return ggml_diag_mask_zero_impl(ctx, a, n_past, true);
  5807. }
  5808. // ggml_soft_max
  5809. static struct ggml_tensor * ggml_soft_max_impl(
  5810. struct ggml_context * ctx,
  5811. struct ggml_tensor * a,
  5812. bool inplace) {
  5813. bool is_node = false;
  5814. if (a->grad) {
  5815. is_node = true;
  5816. }
  5817. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  5818. result->op = GGML_OP_SOFT_MAX;
  5819. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5820. result->src[0] = a;
  5821. return result;
  5822. }
  5823. struct ggml_tensor * ggml_soft_max(
  5824. struct ggml_context * ctx,
  5825. struct ggml_tensor * a) {
  5826. return ggml_soft_max_impl(ctx, a, false);
  5827. }
  5828. struct ggml_tensor * ggml_soft_max_inplace(
  5829. struct ggml_context * ctx,
  5830. struct ggml_tensor * a) {
  5831. return ggml_soft_max_impl(ctx, a, true);
  5832. }
  5833. // ggml_soft_max_back
  5834. static struct ggml_tensor * ggml_soft_max_back_impl(
  5835. struct ggml_context * ctx,
  5836. struct ggml_tensor * a,
  5837. struct ggml_tensor * b,
  5838. bool inplace) {
  5839. bool is_node = false;
  5840. if (a->grad || b->grad) {
  5841. is_node = true; // TODO : implement backward pass
  5842. }
  5843. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  5844. result->op = GGML_OP_SOFT_MAX_BACK;
  5845. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5846. result->src[0] = a;
  5847. result->src[1] = b;
  5848. return result;
  5849. }
  5850. struct ggml_tensor * ggml_soft_max_back(
  5851. struct ggml_context * ctx,
  5852. struct ggml_tensor * a,
  5853. struct ggml_tensor * b) {
  5854. return ggml_soft_max_back_impl(ctx, a, b, false);
  5855. }
  5856. struct ggml_tensor * ggml_soft_max_back_inplace(
  5857. struct ggml_context * ctx,
  5858. struct ggml_tensor * a,
  5859. struct ggml_tensor * b) {
  5860. return ggml_soft_max_back_impl(ctx, a, b, true);
  5861. }
  5862. // ggml_rope
  5863. static struct ggml_tensor * ggml_rope_impl(
  5864. struct ggml_context * ctx,
  5865. struct ggml_tensor * a,
  5866. struct ggml_tensor * b,
  5867. int n_dims,
  5868. int mode,
  5869. int n_ctx,
  5870. float freq_base,
  5871. float freq_scale,
  5872. float xpos_base,
  5873. bool xpos_down,
  5874. bool inplace) {
  5875. GGML_ASSERT(ggml_is_vector(b));
  5876. GGML_ASSERT(b->type == GGML_TYPE_I32);
  5877. GGML_ASSERT(a->ne[2] == b->ne[0]);
  5878. bool is_node = false;
  5879. if (a->grad) {
  5880. is_node = true;
  5881. }
  5882. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  5883. int32_t params[8] = { /*n_past*/ 0, n_dims, mode, n_ctx };
  5884. memcpy(params + 4, &freq_base, sizeof(float));
  5885. memcpy(params + 5, &freq_scale, sizeof(float));
  5886. memcpy(params + 6, &xpos_base, sizeof(float));
  5887. memcpy(params + 7, &xpos_down, sizeof(bool));
  5888. ggml_set_op_params(result, params, sizeof(params));
  5889. result->op = GGML_OP_ROPE;
  5890. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5891. result->src[0] = a;
  5892. result->src[1] = b;
  5893. return result;
  5894. }
  5895. struct ggml_tensor * ggml_rope(
  5896. struct ggml_context * ctx,
  5897. struct ggml_tensor * a,
  5898. struct ggml_tensor * b,
  5899. int n_dims,
  5900. int mode,
  5901. int n_ctx) {
  5902. return ggml_rope_impl(ctx, a, b, n_dims, mode, n_ctx, 10000.0f, 1.0f, 0.0f, false, false);
  5903. }
  5904. struct ggml_tensor * ggml_rope_inplace(
  5905. struct ggml_context * ctx,
  5906. struct ggml_tensor * a,
  5907. struct ggml_tensor * b,
  5908. int n_dims,
  5909. int mode,
  5910. int n_ctx) {
  5911. return ggml_rope_impl(ctx, a, b, n_dims, mode, n_ctx, 10000.0f, 1.0f, 0.0f, false, true);
  5912. }
  5913. struct ggml_tensor * ggml_rope_custom(
  5914. struct ggml_context * ctx,
  5915. struct ggml_tensor * a,
  5916. struct ggml_tensor * b,
  5917. int n_dims,
  5918. int mode,
  5919. int n_ctx,
  5920. float freq_base,
  5921. float freq_scale) {
  5922. return ggml_rope_impl(ctx, a, b, n_dims, mode, n_ctx, freq_base, freq_scale, 0.0f, false, false);
  5923. }
  5924. struct ggml_tensor * ggml_rope_custom_inplace(
  5925. struct ggml_context * ctx,
  5926. struct ggml_tensor * a,
  5927. struct ggml_tensor * b,
  5928. int n_dims,
  5929. int mode,
  5930. int n_ctx,
  5931. float freq_base,
  5932. float freq_scale) {
  5933. return ggml_rope_impl(ctx, a, b, n_dims, mode, n_ctx, freq_base, freq_scale, 0.0f, false, true);
  5934. }
  5935. struct ggml_tensor * ggml_rope_xpos_inplace(
  5936. struct ggml_context * ctx,
  5937. struct ggml_tensor * a,
  5938. struct ggml_tensor * b,
  5939. int n_dims,
  5940. float base,
  5941. bool down) {
  5942. return ggml_rope_impl(ctx, a, b, n_dims, 0, 0, 10000.0f, 1.0f, base, down, true);
  5943. }
  5944. // ggml_rope_back
  5945. struct ggml_tensor * ggml_rope_back(
  5946. struct ggml_context * ctx,
  5947. struct ggml_tensor * a,
  5948. struct ggml_tensor * b,
  5949. int n_dims,
  5950. int mode,
  5951. int n_ctx,
  5952. float freq_base,
  5953. float freq_scale,
  5954. float xpos_base,
  5955. bool xpos_down) {
  5956. GGML_ASSERT(ggml_is_vector(b));
  5957. GGML_ASSERT(b->type == GGML_TYPE_I32);
  5958. GGML_ASSERT(a->ne[2] == b->ne[0]);
  5959. GGML_ASSERT((mode & 4) == 0 && "ggml_rope_back() for ChatGLM not implemented yet");
  5960. bool is_node = false;
  5961. if (a->grad) {
  5962. is_node = false; // TODO: implement backward
  5963. }
  5964. struct ggml_tensor * result = ggml_dup_tensor(ctx, a);
  5965. int32_t params[8] = { /*n_past*/ 0, n_dims, mode, n_ctx };
  5966. memcpy(params + 4, &freq_base, sizeof(float));
  5967. memcpy(params + 5, &freq_scale, sizeof(float));
  5968. memcpy(params + 6, &xpos_base, sizeof(float));
  5969. memcpy(params + 7, &xpos_down, sizeof(bool));
  5970. ggml_set_op_params(result, params, sizeof(params));
  5971. result->op = GGML_OP_ROPE_BACK;
  5972. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5973. result->src[0] = a;
  5974. result->src[1] = b;
  5975. return result;
  5976. }
  5977. // ggml_alibi
  5978. struct ggml_tensor * ggml_alibi(
  5979. struct ggml_context * ctx,
  5980. struct ggml_tensor * a,
  5981. int n_past,
  5982. int n_head,
  5983. float bias_max) {
  5984. GGML_ASSERT(n_past >= 0);
  5985. bool is_node = false;
  5986. if (a->grad) {
  5987. GGML_ASSERT(false); // TODO: implement backward
  5988. is_node = true;
  5989. }
  5990. // TODO: when implement backward, fix this:
  5991. //struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  5992. struct ggml_tensor * result = ggml_view_tensor(ctx, a);
  5993. int32_t op_params[3] = { n_past, n_head };
  5994. memcpy(op_params + 2, &bias_max, sizeof(float));
  5995. ggml_set_op_params(result, op_params, sizeof(op_params));
  5996. result->op = GGML_OP_ALIBI;
  5997. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5998. result->src[0] = a;
  5999. return result;
  6000. }
  6001. // ggml_clamp
  6002. struct ggml_tensor * ggml_clamp(
  6003. struct ggml_context * ctx,
  6004. struct ggml_tensor * a,
  6005. float min,
  6006. float max) {
  6007. bool is_node = false;
  6008. if (a->grad) {
  6009. GGML_ASSERT(false); // TODO: implement backward
  6010. is_node = true;
  6011. }
  6012. // TODO: when implement backward, fix this:
  6013. struct ggml_tensor * result = ggml_view_tensor(ctx, a);
  6014. float params[] = { min, max };
  6015. ggml_set_op_params(result, params, sizeof(params));
  6016. result->op = GGML_OP_CLAMP;
  6017. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  6018. result->src[0] = a;
  6019. return result;
  6020. }
  6021. // ggml_conv_1d
  6022. static int64_t ggml_calc_conv_output_size(int64_t ins, int64_t ks, int s, int p, int d) {
  6023. return (ins + 2 * p - d * (ks - 1) - 1) / s + 1;
  6024. }
  6025. // im2col: [N, IC, IL] => [N, OL, IC*K]
  6026. // a: [OC,IC, K]
  6027. // b: [N, IC, IL]
  6028. // result: [N, OL, IC*K]
  6029. static struct ggml_tensor * ggml_conv_1d_stage_0(
  6030. struct ggml_context * ctx,
  6031. struct ggml_tensor * a,
  6032. struct ggml_tensor * b,
  6033. int s0,
  6034. int p0,
  6035. int d0) {
  6036. GGML_ASSERT(a->ne[1] == b->ne[1]);
  6037. bool is_node = false;
  6038. if (a->grad || b->grad) {
  6039. GGML_ASSERT(false); // TODO: implement backward
  6040. is_node = true;
  6041. }
  6042. const int64_t OL = ggml_calc_conv_output_size(b->ne[0], a->ne[0], s0, p0, d0);
  6043. const int64_t ne[4] = {
  6044. a->ne[1] * a->ne[0],
  6045. OL,
  6046. b->ne[2],
  6047. 1,
  6048. };
  6049. struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F16, 4, ne);
  6050. int32_t params[] = { s0, p0, d0 };
  6051. ggml_set_op_params(result, params, sizeof(params));
  6052. result->op = GGML_OP_CONV_1D_STAGE_0;
  6053. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  6054. result->src[0] = a;
  6055. result->src[1] = b;
  6056. return result;
  6057. }
  6058. // ggml_conv_1d_stage_1
  6059. // gemm: [N, OC, OL] = [OC, IC * K] x [N*OL, IC * K]
  6060. // a: [OC, IC, K]
  6061. // b: [N, OL, IC * K]
  6062. // result: [N, OC, OL]
  6063. static struct ggml_tensor * ggml_conv_1d_stage_1(
  6064. struct ggml_context * ctx,
  6065. struct ggml_tensor * a,
  6066. struct ggml_tensor * b) {
  6067. bool is_node = false;
  6068. if (a->grad || b->grad) {
  6069. GGML_ASSERT(false); // TODO: implement backward
  6070. is_node = true;
  6071. }
  6072. const int64_t ne[4] = {
  6073. b->ne[1],
  6074. a->ne[2],
  6075. b->ne[2],
  6076. 1,
  6077. };
  6078. struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, 4, ne);
  6079. result->op = GGML_OP_CONV_1D_STAGE_1;
  6080. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  6081. result->src[0] = a;
  6082. result->src[1] = b;
  6083. return result;
  6084. }
  6085. // ggml_conv_1d
  6086. GGML_API struct ggml_tensor * ggml_conv_1d(
  6087. struct ggml_context * ctx,
  6088. struct ggml_tensor * a,
  6089. struct ggml_tensor * b,
  6090. int s0,
  6091. int p0,
  6092. int d0) {
  6093. struct ggml_tensor * result = ggml_conv_1d_stage_0(ctx, a, b, s0, p0, d0);
  6094. result = ggml_conv_1d_stage_1(ctx, a, result);
  6095. return result;
  6096. }
  6097. // GGML_API struct ggml_tensor * ggml_conv_1d(
  6098. // struct ggml_context * ctx,
  6099. // struct ggml_tensor * a,
  6100. // struct ggml_tensor * b,
  6101. // int s0,
  6102. // int p0,
  6103. // int d0) {
  6104. // GGML_ASSERT(ggml_is_matrix(b));
  6105. // GGML_ASSERT(a->ne[1] == b->ne[1]);
  6106. // bool is_node = false;
  6107. // if (a->grad || b->grad) {
  6108. // GGML_ASSERT(false); // TODO: implement backward
  6109. // is_node = true;
  6110. // }
  6111. // const int64_t ne[4] = {
  6112. // ggml_calc_conv_output_size(b->ne[0], a->ne[0], s0, p0, d0),
  6113. // a->ne[2], 1, 1,
  6114. // };
  6115. // struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, 2, ne);
  6116. // int32_t params[] = { s0, p0, d0 };
  6117. // ggml_set_op_params(result, params, sizeof(params));
  6118. // result->op = GGML_OP_CONV_1D;
  6119. // result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  6120. // result->src[0] = a;
  6121. // result->src[1] = b;
  6122. // return result;
  6123. // }
  6124. // ggml_conv_1d_ph
  6125. struct ggml_tensor* ggml_conv_1d_ph(
  6126. struct ggml_context * ctx,
  6127. struct ggml_tensor * a,
  6128. struct ggml_tensor * b,
  6129. int s,
  6130. int d) {
  6131. return ggml_conv_1d(ctx, a, b, s, a->ne[0] / 2, d);
  6132. }
  6133. // ggml_conv_transpose_1d
  6134. static int64_t ggml_calc_conv_transpose_1d_output_size(int64_t ins, int64_t ks, int s, int p, int d) {
  6135. return (ins - 1) * s - 2 * p + d * (ks - 1) + 1;
  6136. }
  6137. GGML_API struct ggml_tensor * ggml_conv_transpose_1d(
  6138. struct ggml_context * ctx,
  6139. struct ggml_tensor * a,
  6140. struct ggml_tensor * b,
  6141. int s0,
  6142. int p0,
  6143. int d0) {
  6144. GGML_ASSERT(ggml_is_matrix(b));
  6145. GGML_ASSERT(a->ne[2] == b->ne[1]);
  6146. GGML_ASSERT(a->ne[3] == 1);
  6147. GGML_ASSERT(p0 == 0);
  6148. GGML_ASSERT(d0 == 1);
  6149. bool is_node = false;
  6150. if (a->grad || b->grad) {
  6151. GGML_ASSERT(false); // TODO: implement backward
  6152. is_node = true;
  6153. }
  6154. const int64_t ne[4] = {
  6155. ggml_calc_conv_transpose_1d_output_size(b->ne[0], a->ne[0], s0, 0 /*p0*/, 1 /*d0*/),
  6156. a->ne[1], b->ne[2], 1,
  6157. };
  6158. struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, 4, ne);
  6159. int32_t params[] = { s0, p0, d0 };
  6160. ggml_set_op_params(result, params, sizeof(params));
  6161. result->op = GGML_OP_CONV_TRANSPOSE_1D;
  6162. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  6163. result->src[0] = a;
  6164. result->src[1] = b;
  6165. return result;
  6166. }
  6167. // ggml_conv_2d
  6168. struct ggml_tensor * ggml_conv_2d(
  6169. struct ggml_context * ctx,
  6170. struct ggml_tensor * a,
  6171. struct ggml_tensor * b,
  6172. int s0,
  6173. int s1,
  6174. int p0,
  6175. int p1,
  6176. int d0,
  6177. int d1) {
  6178. GGML_ASSERT(a->ne[2] == b->ne[2]);
  6179. bool is_node = false;
  6180. if (a->grad || b->grad) {
  6181. GGML_ASSERT(false); // TODO: implement backward
  6182. is_node = true;
  6183. }
  6184. const int64_t ne[4] = {
  6185. ggml_calc_conv_output_size(b->ne[0], a->ne[0], s0, p0, d0),
  6186. ggml_calc_conv_output_size(b->ne[1], a->ne[1], s1, p1, d1),
  6187. a->ne[3], b->ne[3],
  6188. };
  6189. struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, 4, ne);
  6190. int32_t params[] = { s0, s1, p0, p1, d0, d1 };
  6191. ggml_set_op_params(result, params, sizeof(params));
  6192. result->op = GGML_OP_CONV_2D;
  6193. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  6194. result->src[0] = a;
  6195. result->src[1] = b;
  6196. return result;
  6197. }
  6198. // ggml_conv_2d_sk_p0
  6199. struct ggml_tensor * ggml_conv_2d_sk_p0(
  6200. struct ggml_context * ctx,
  6201. struct ggml_tensor * a,
  6202. struct ggml_tensor * b) {
  6203. return ggml_conv_2d(ctx, a, b, a->ne[0], a->ne[1], 0, 0, 1, 1);
  6204. }
  6205. // ggml_conv_2d_s1_ph
  6206. struct ggml_tensor * ggml_conv_2d_s1_ph(
  6207. struct ggml_context * ctx,
  6208. struct ggml_tensor * a,
  6209. struct ggml_tensor * b) {
  6210. return ggml_conv_2d(ctx, a, b, 1, 1, a->ne[0] / 2, a->ne[1] / 2, 1, 1);
  6211. }
  6212. // ggml_conv_transpose_2d_p0
  6213. static int64_t ggml_calc_conv_transpose_output_size(int64_t ins, int64_t ks, int s, int p) {
  6214. return (ins - 1) * s - 2 * p + ks;
  6215. }
  6216. struct ggml_tensor * ggml_conv_transpose_2d_p0(
  6217. struct ggml_context * ctx,
  6218. struct ggml_tensor * a,
  6219. struct ggml_tensor * b,
  6220. int stride) {
  6221. GGML_ASSERT(a->ne[3] == b->ne[2]);
  6222. bool is_node = false;
  6223. if (a->grad || b->grad) {
  6224. GGML_ASSERT(false); // TODO: implement backward
  6225. is_node = true;
  6226. }
  6227. const int64_t ne[4] = {
  6228. ggml_calc_conv_transpose_output_size(b->ne[0], a->ne[0], stride, 0 /*p0*/),
  6229. ggml_calc_conv_transpose_output_size(b->ne[1], a->ne[1], stride, 0 /*p1*/),
  6230. a->ne[2], b->ne[3],
  6231. };
  6232. struct ggml_tensor* result = ggml_new_tensor(ctx, GGML_TYPE_F32, 4, ne);
  6233. ggml_set_op_params_i32(result, 0, stride);
  6234. result->op = GGML_OP_CONV_TRANSPOSE_2D;
  6235. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  6236. result->src[0] = a;
  6237. result->src[1] = b;
  6238. return result;
  6239. }
  6240. // ggml_pool_*
  6241. static int64_t ggml_calc_pool_output_size(int64_t ins, int ks, int s, int p) {
  6242. return (ins + 2 * p - ks) / s + 1;
  6243. }
  6244. // ggml_pool_1d
  6245. struct ggml_tensor * ggml_pool_1d(
  6246. struct ggml_context * ctx,
  6247. struct ggml_tensor * a,
  6248. enum ggml_op_pool op,
  6249. int k0,
  6250. int s0,
  6251. int p0) {
  6252. bool is_node = false;
  6253. if (a->grad) {
  6254. GGML_ASSERT(false); // TODO: implement backward
  6255. is_node = true;
  6256. }
  6257. const int64_t ne[3] = {
  6258. ggml_calc_pool_output_size(a->ne[0], k0, s0, p0),
  6259. a->ne[1],
  6260. };
  6261. struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, 2, ne);
  6262. int32_t params[] = { op, k0, s0, p0 };
  6263. ggml_set_op_params(result, params, sizeof(params));
  6264. result->op = GGML_OP_POOL_1D;
  6265. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  6266. result->src[0] = a;
  6267. return result;
  6268. }
  6269. // ggml_pool_2d
  6270. struct ggml_tensor * ggml_pool_2d(
  6271. struct ggml_context * ctx,
  6272. struct ggml_tensor * a,
  6273. enum ggml_op_pool op,
  6274. int k0,
  6275. int k1,
  6276. int s0,
  6277. int s1,
  6278. int p0,
  6279. int p1) {
  6280. bool is_node = false;
  6281. if (a->grad) {
  6282. GGML_ASSERT(false); // TODO: implement backward
  6283. is_node = true;
  6284. }
  6285. const int64_t ne[3] = {
  6286. ggml_calc_pool_output_size(a->ne[0], k0, s0, p0),
  6287. ggml_calc_pool_output_size(a->ne[1], k1, s1, p1),
  6288. a->ne[2],
  6289. };
  6290. struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, 3, ne);
  6291. int32_t params[] = { op, k0, k1, s0, s1, p0, p1 };
  6292. ggml_set_op_params(result, params, sizeof(params));
  6293. result->op = GGML_OP_POOL_2D;
  6294. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  6295. result->src[0] = a;
  6296. return result;
  6297. }
  6298. // ggml_upscale
  6299. static struct ggml_tensor * ggml_upscale_impl(
  6300. struct ggml_context * ctx,
  6301. struct ggml_tensor * a,
  6302. int scale_factor) {
  6303. bool is_node = false;
  6304. if (a->grad) {
  6305. GGML_ASSERT(false); // TODO: implement backward
  6306. is_node = true;
  6307. }
  6308. struct ggml_tensor * result = ggml_new_tensor_4d(ctx, a->type,
  6309. a->ne[0] * scale_factor,
  6310. a->ne[1] * scale_factor,
  6311. a->ne[2], a->ne[3]);
  6312. result->op = GGML_OP_UPSCALE;
  6313. result->op_params[0] = scale_factor;
  6314. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  6315. result->src[0] = a;
  6316. result->src[1] = NULL;
  6317. return result;
  6318. }
  6319. struct ggml_tensor * ggml_upscale(
  6320. struct ggml_context * ctx,
  6321. struct ggml_tensor * a,
  6322. int scale_factor) {
  6323. return ggml_upscale_impl(ctx, a, scale_factor);
  6324. }
  6325. // ggml_flash_attn
  6326. struct ggml_tensor * ggml_flash_attn(
  6327. struct ggml_context * ctx,
  6328. struct ggml_tensor * q,
  6329. struct ggml_tensor * k,
  6330. struct ggml_tensor * v,
  6331. bool masked) {
  6332. GGML_ASSERT(ggml_can_mul_mat(k, q));
  6333. // TODO: check if vT can be multiplied by (k*qT)
  6334. bool is_node = false;
  6335. if (q->grad || k->grad || v->grad) {
  6336. is_node = true;
  6337. }
  6338. //struct ggml_tensor * result = ggml_dup_tensor(ctx, q);
  6339. struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, q->n_dims, q->ne);
  6340. int32_t t = masked ? 1 : 0;
  6341. ggml_set_op_params(result, &t, sizeof(t));
  6342. result->op = GGML_OP_FLASH_ATTN;
  6343. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  6344. result->src[0] = q;
  6345. result->src[1] = k;
  6346. result->src[2] = v;
  6347. return result;
  6348. }
  6349. // ggml_flash_ff
  6350. struct ggml_tensor * ggml_flash_ff(
  6351. struct ggml_context * ctx,
  6352. struct ggml_tensor * a,
  6353. struct ggml_tensor * b0,
  6354. struct ggml_tensor * b1,
  6355. struct ggml_tensor * c0,
  6356. struct ggml_tensor * c1) {
  6357. GGML_ASSERT(ggml_can_mul_mat(b0, a));
  6358. // TODO: more checks
  6359. bool is_node = false;
  6360. if (a->grad || b0->grad || b1->grad || c0->grad || c1->grad) {
  6361. is_node = true;
  6362. }
  6363. //struct ggml_tensor * result = ggml_dup_tensor(ctx, a);
  6364. struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, a->n_dims, a->ne);
  6365. result->op = GGML_OP_FLASH_FF;
  6366. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  6367. result->src[0] = a;
  6368. result->src[1] = b0;
  6369. result->src[2] = b1;
  6370. result->src[3] = c0;
  6371. result->src[4] = c1;
  6372. return result;
  6373. }
  6374. // ggml_flash_attn_back
  6375. struct ggml_tensor * ggml_flash_attn_back(
  6376. struct ggml_context * ctx,
  6377. struct ggml_tensor * q,
  6378. struct ggml_tensor * k,
  6379. struct ggml_tensor * v,
  6380. struct ggml_tensor * d,
  6381. bool masked) {
  6382. GGML_ASSERT(ggml_can_mul_mat(k, q));
  6383. // TODO: check if vT can be multiplied by (k*qT)
  6384. // d shape [D,N,ne2,ne3]
  6385. // q shape [D,N,ne2,ne3]
  6386. // k shape [D,M,kvne2,ne3]
  6387. // v shape [M,D,kvne2,ne3]
  6388. const int64_t D = q->ne[0];
  6389. const int64_t N = q->ne[1];
  6390. const int64_t M = k->ne[1];
  6391. const int64_t ne2 = q->ne[2];
  6392. const int64_t ne3 = q->ne[3];
  6393. const int64_t kvne2 = k->ne[2];
  6394. GGML_ASSERT(k->ne[0] == D);
  6395. GGML_ASSERT(v->ne[0] == M);
  6396. GGML_ASSERT(v->ne[1] == D);
  6397. GGML_ASSERT(d->ne[0] == D);
  6398. GGML_ASSERT(d->ne[1] == N);
  6399. GGML_ASSERT(k->ne[2] == kvne2);
  6400. GGML_ASSERT(k->ne[3] == ne3);
  6401. GGML_ASSERT(v->ne[2] == kvne2);
  6402. GGML_ASSERT(v->ne[3] == ne3);
  6403. GGML_ASSERT(d->ne[2] == ne2);
  6404. GGML_ASSERT(d->ne[3] == ne3);
  6405. GGML_ASSERT(ne2 % kvne2 == 0);
  6406. bool is_node = false;
  6407. if (q->grad || k->grad || v->grad) {
  6408. // when using this operation (in backwards pass) these grads are set.
  6409. // we don't want to create (big) grad of our result, so is_node is false.
  6410. is_node = false;
  6411. }
  6412. // store gradients of q, k and v as continuous tensors concatenated in result.
  6413. // note: v and gradv are actually transposed, i.e. v->ne[0] != D.
  6414. const int64_t elem_q = ggml_nelements(q);
  6415. const int64_t elem_k = ggml_nelements(k);
  6416. const int64_t elem_v = ggml_nelements(v);
  6417. enum ggml_type result_type = GGML_TYPE_F32;
  6418. GGML_ASSERT(ggml_blck_size(result_type) == 1);
  6419. const size_t tsize = ggml_type_size(result_type);
  6420. const size_t offs_q = 0;
  6421. const size_t offs_k = offs_q + GGML_PAD(elem_q * tsize, GGML_MEM_ALIGN);
  6422. const size_t offs_v = offs_k + GGML_PAD(elem_k * tsize, GGML_MEM_ALIGN);
  6423. const size_t end = offs_v + GGML_PAD(elem_v * tsize, GGML_MEM_ALIGN);
  6424. const size_t nelements = (end + tsize - 1)/tsize;
  6425. struct ggml_tensor * result = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, nelements);
  6426. int32_t masked_i = masked ? 1 : 0;
  6427. ggml_set_op_params(result, &masked_i, sizeof(masked_i));
  6428. result->op = GGML_OP_FLASH_ATTN_BACK;
  6429. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  6430. result->src[0] = q;
  6431. result->src[1] = k;
  6432. result->src[2] = v;
  6433. result->src[3] = d;
  6434. return result;
  6435. }
  6436. // ggml_win_part
  6437. struct ggml_tensor * ggml_win_part(
  6438. struct ggml_context * ctx,
  6439. struct ggml_tensor * a,
  6440. int w) {
  6441. GGML_ASSERT(a->ne[3] == 1);
  6442. GGML_ASSERT(a->type == GGML_TYPE_F32);
  6443. bool is_node = false;
  6444. if (a->grad) {
  6445. GGML_ASSERT(false); // TODO: implement backward
  6446. is_node = true;
  6447. }
  6448. // padding
  6449. const int px = (w - a->ne[1]%w)%w;
  6450. const int py = (w - a->ne[2]%w)%w;
  6451. const int npx = (px + a->ne[1])/w;
  6452. const int npy = (py + a->ne[2])/w;
  6453. const int np = npx*npy;
  6454. const int64_t ne[4] = { a->ne[0], w, w, np, };
  6455. struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, 4, ne);
  6456. int32_t params[] = { npx, npy, w };
  6457. ggml_set_op_params(result, params, sizeof(params));
  6458. result->op = GGML_OP_WIN_PART;
  6459. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  6460. result->src[0] = a;
  6461. return result;
  6462. }
  6463. // ggml_win_unpart
  6464. struct ggml_tensor * ggml_win_unpart(
  6465. struct ggml_context * ctx,
  6466. struct ggml_tensor * a,
  6467. int w0,
  6468. int h0,
  6469. int w) {
  6470. GGML_ASSERT(a->type == GGML_TYPE_F32);
  6471. bool is_node = false;
  6472. if (a->grad) {
  6473. GGML_ASSERT(false); // TODO: implement backward
  6474. is_node = true;
  6475. }
  6476. const int64_t ne[4] = { a->ne[0], w0, h0, 1, };
  6477. struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, 3, ne);
  6478. int32_t params[] = { w };
  6479. ggml_set_op_params(result, params, sizeof(params));
  6480. result->op = GGML_OP_WIN_UNPART;
  6481. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  6482. result->src[0] = a;
  6483. return result;
  6484. }
  6485. // ggml_get_rel_pos
  6486. struct ggml_tensor * ggml_get_rel_pos(
  6487. struct ggml_context * ctx,
  6488. struct ggml_tensor * a,
  6489. int qh,
  6490. int kh) {
  6491. GGML_ASSERT(qh == kh);
  6492. GGML_ASSERT(2*MAX(qh, kh) - 1 == a->ne[1]);
  6493. bool is_node = false;
  6494. if (a->grad) {
  6495. GGML_ASSERT(false); // TODO: implement backward
  6496. is_node = true;
  6497. }
  6498. const int64_t ne[4] = { a->ne[0], kh, qh, 1, };
  6499. struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F16, 3, ne);
  6500. result->op = GGML_OP_GET_REL_POS;
  6501. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  6502. result->src[0] = a;
  6503. result->src[1] = NULL;
  6504. return result;
  6505. }
  6506. // ggml_add_rel_pos
  6507. static struct ggml_tensor * ggml_add_rel_pos_impl(
  6508. struct ggml_context * ctx,
  6509. struct ggml_tensor * a,
  6510. struct ggml_tensor * pw,
  6511. struct ggml_tensor * ph,
  6512. bool inplace) {
  6513. GGML_ASSERT(ggml_are_same_shape(pw, ph));
  6514. GGML_ASSERT(ggml_is_contiguous(a));
  6515. GGML_ASSERT(ggml_is_contiguous(pw));
  6516. GGML_ASSERT(ggml_is_contiguous(ph));
  6517. GGML_ASSERT(ph->type == GGML_TYPE_F32);
  6518. GGML_ASSERT(pw->type == GGML_TYPE_F32);
  6519. GGML_ASSERT(pw->ne[3] == a->ne[2]);
  6520. GGML_ASSERT(pw->ne[0]*pw->ne[0] == a->ne[0]);
  6521. GGML_ASSERT(pw->ne[1]*pw->ne[2] == a->ne[1]);
  6522. bool is_node = false;
  6523. if (!inplace && (a->grad || pw->grad || ph->grad)) {
  6524. is_node = true;
  6525. }
  6526. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  6527. ggml_set_op_params_i32(result, 0, inplace ? 1 : 0);
  6528. result->op = GGML_OP_ADD_REL_POS;
  6529. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  6530. result->src[0] = a;
  6531. result->src[1] = pw;
  6532. result->src[2] = ph;
  6533. return result;
  6534. }
  6535. struct ggml_tensor * ggml_add_rel_pos(
  6536. struct ggml_context * ctx,
  6537. struct ggml_tensor * a,
  6538. struct ggml_tensor * pw,
  6539. struct ggml_tensor * ph) {
  6540. return ggml_add_rel_pos_impl(ctx, a, pw, ph, false);
  6541. }
  6542. struct ggml_tensor * ggml_add_rel_pos_inplace(
  6543. struct ggml_context * ctx,
  6544. struct ggml_tensor * a,
  6545. struct ggml_tensor * pw,
  6546. struct ggml_tensor * ph) {
  6547. return ggml_add_rel_pos_impl(ctx, a, pw, ph, true);
  6548. }
  6549. // gmml_unary
  6550. static struct ggml_tensor * ggml_unary_impl(
  6551. struct ggml_context * ctx,
  6552. struct ggml_tensor * a,
  6553. enum ggml_unary_op op,
  6554. bool inplace) {
  6555. bool is_node = false;
  6556. if (!inplace && (a->grad)) {
  6557. is_node = true;
  6558. }
  6559. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  6560. ggml_set_op_params_i32(result, 0, (int32_t) op);
  6561. result->op = GGML_OP_UNARY;
  6562. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  6563. result->src[0] = a;
  6564. return result;
  6565. }
  6566. struct ggml_tensor * ggml_unary(
  6567. struct ggml_context * ctx,
  6568. struct ggml_tensor * a,
  6569. enum ggml_unary_op op) {
  6570. return ggml_unary_impl(ctx, a, op, false);
  6571. }
  6572. struct ggml_tensor * ggml_unary_inplace(
  6573. struct ggml_context * ctx,
  6574. struct ggml_tensor * a,
  6575. enum ggml_unary_op op) {
  6576. return ggml_unary_impl(ctx, a, op, true);
  6577. }
  6578. // ggml_map_unary
  6579. static struct ggml_tensor * ggml_map_unary_impl_f32(
  6580. struct ggml_context * ctx,
  6581. struct ggml_tensor * a,
  6582. const ggml_unary_op_f32_t fun,
  6583. bool inplace) {
  6584. bool is_node = false;
  6585. if (!inplace && a->grad) {
  6586. is_node = true;
  6587. }
  6588. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  6589. ggml_set_op_params(result, (const void *) &fun, sizeof(fun));
  6590. result->op = GGML_OP_MAP_UNARY;
  6591. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  6592. result->src[0] = a;
  6593. return result;
  6594. }
  6595. struct ggml_tensor * ggml_map_unary_f32(
  6596. struct ggml_context * ctx,
  6597. struct ggml_tensor * a,
  6598. const ggml_unary_op_f32_t fun) {
  6599. return ggml_map_unary_impl_f32(ctx, a, fun, false);
  6600. }
  6601. struct ggml_tensor * ggml_map_unary_inplace_f32(
  6602. struct ggml_context * ctx,
  6603. struct ggml_tensor * a,
  6604. const ggml_unary_op_f32_t fun) {
  6605. return ggml_map_unary_impl_f32(ctx, a, fun, true);
  6606. }
  6607. // ggml_map_binary
  6608. static struct ggml_tensor * ggml_map_binary_impl_f32(
  6609. struct ggml_context * ctx,
  6610. struct ggml_tensor * a,
  6611. struct ggml_tensor * b,
  6612. const ggml_binary_op_f32_t fun,
  6613. bool inplace) {
  6614. GGML_ASSERT(ggml_are_same_shape(a, b));
  6615. bool is_node = false;
  6616. if (!inplace && (a->grad || b->grad)) {
  6617. is_node = true;
  6618. }
  6619. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  6620. ggml_set_op_params(result, (const void *) &fun, sizeof(fun));
  6621. result->op = GGML_OP_MAP_BINARY;
  6622. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  6623. result->src[0] = a;
  6624. result->src[1] = b;
  6625. return result;
  6626. }
  6627. struct ggml_tensor * ggml_map_binary_f32(
  6628. struct ggml_context * ctx,
  6629. struct ggml_tensor * a,
  6630. struct ggml_tensor * b,
  6631. const ggml_binary_op_f32_t fun) {
  6632. return ggml_map_binary_impl_f32(ctx, a, b, fun, false);
  6633. }
  6634. struct ggml_tensor * ggml_map_binary_inplace_f32(
  6635. struct ggml_context * ctx,
  6636. struct ggml_tensor * a,
  6637. struct ggml_tensor * b,
  6638. const ggml_binary_op_f32_t fun) {
  6639. return ggml_map_binary_impl_f32(ctx, a, b, fun, true);
  6640. }
  6641. // ggml_map_custom1_f32
  6642. static struct ggml_tensor * ggml_map_custom1_impl_f32(
  6643. struct ggml_context * ctx,
  6644. struct ggml_tensor * a,
  6645. const ggml_custom1_op_f32_t fun,
  6646. bool inplace) {
  6647. bool is_node = false;
  6648. if (!inplace && a->grad) {
  6649. is_node = true;
  6650. }
  6651. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  6652. ggml_set_op_params(result, (const void *) &fun, sizeof(fun));
  6653. result->op = GGML_OP_MAP_CUSTOM1_F32;
  6654. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  6655. result->src[0] = a;
  6656. return result;
  6657. }
  6658. struct ggml_tensor * ggml_map_custom1_f32(
  6659. struct ggml_context * ctx,
  6660. struct ggml_tensor * a,
  6661. const ggml_custom1_op_f32_t fun) {
  6662. return ggml_map_custom1_impl_f32(ctx, a, fun, false);
  6663. }
  6664. struct ggml_tensor * ggml_map_custom1_inplace_f32(
  6665. struct ggml_context * ctx,
  6666. struct ggml_tensor * a,
  6667. const ggml_custom1_op_f32_t fun) {
  6668. return ggml_map_custom1_impl_f32(ctx, a, fun, true);
  6669. }
  6670. // ggml_map_custom2_f32
  6671. static struct ggml_tensor * ggml_map_custom2_impl_f32(
  6672. struct ggml_context * ctx,
  6673. struct ggml_tensor * a,
  6674. struct ggml_tensor * b,
  6675. const ggml_custom2_op_f32_t fun,
  6676. bool inplace) {
  6677. bool is_node = false;
  6678. if (!inplace && (a->grad || b->grad)) {
  6679. is_node = true;
  6680. }
  6681. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  6682. ggml_set_op_params(result, (const void *) &fun, sizeof(fun));
  6683. result->op = GGML_OP_MAP_CUSTOM2_F32;
  6684. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  6685. result->src[0] = a;
  6686. result->src[1] = b;
  6687. return result;
  6688. }
  6689. struct ggml_tensor * ggml_map_custom2_f32(
  6690. struct ggml_context * ctx,
  6691. struct ggml_tensor * a,
  6692. struct ggml_tensor * b,
  6693. const ggml_custom2_op_f32_t fun) {
  6694. return ggml_map_custom2_impl_f32(ctx, a, b, fun, false);
  6695. }
  6696. struct ggml_tensor * ggml_map_custom2_inplace_f32(
  6697. struct ggml_context * ctx,
  6698. struct ggml_tensor * a,
  6699. struct ggml_tensor * b,
  6700. const ggml_custom2_op_f32_t fun) {
  6701. return ggml_map_custom2_impl_f32(ctx, a, b, fun, true);
  6702. }
  6703. // ggml_map_custom3_f32
  6704. static struct ggml_tensor * ggml_map_custom3_impl_f32(
  6705. struct ggml_context * ctx,
  6706. struct ggml_tensor * a,
  6707. struct ggml_tensor * b,
  6708. struct ggml_tensor * c,
  6709. const ggml_custom3_op_f32_t fun,
  6710. bool inplace) {
  6711. bool is_node = false;
  6712. if (!inplace && (a->grad || b->grad || c->grad)) {
  6713. is_node = true;
  6714. }
  6715. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  6716. ggml_set_op_params(result, (const void *) &fun, sizeof(fun));
  6717. result->op = GGML_OP_MAP_CUSTOM3_F32;
  6718. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  6719. result->src[0] = a;
  6720. result->src[1] = b;
  6721. result->src[2] = c;
  6722. return result;
  6723. }
  6724. struct ggml_tensor * ggml_map_custom3_f32(
  6725. struct ggml_context * ctx,
  6726. struct ggml_tensor * a,
  6727. struct ggml_tensor * b,
  6728. struct ggml_tensor * c,
  6729. const ggml_custom3_op_f32_t fun) {
  6730. return ggml_map_custom3_impl_f32(ctx, a, b, c, fun, false);
  6731. }
  6732. struct ggml_tensor * ggml_map_custom3_inplace_f32(
  6733. struct ggml_context * ctx,
  6734. struct ggml_tensor * a,
  6735. struct ggml_tensor * b,
  6736. struct ggml_tensor * c,
  6737. const ggml_custom3_op_f32_t fun) {
  6738. return ggml_map_custom3_impl_f32(ctx, a, b, c, fun, true);
  6739. }
  6740. // ggml_map_custom1
  6741. struct ggml_map_custom1_op_params {
  6742. ggml_custom1_op_t fun;
  6743. int n_tasks;
  6744. void * userdata;
  6745. };
  6746. static struct ggml_tensor * ggml_map_custom1_impl(
  6747. struct ggml_context * ctx,
  6748. struct ggml_tensor * a,
  6749. const ggml_custom1_op_t fun,
  6750. int n_tasks,
  6751. void * userdata,
  6752. bool inplace) {
  6753. GGML_ASSERT(n_tasks == GGML_N_TASKS_MAX || n_tasks > 0);
  6754. bool is_node = false;
  6755. if (!inplace && a->grad) {
  6756. is_node = true;
  6757. }
  6758. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  6759. struct ggml_map_custom1_op_params params = {
  6760. /*.fun =*/ fun,
  6761. /*.n_tasks =*/ n_tasks,
  6762. /*.userdata =*/ userdata
  6763. };
  6764. ggml_set_op_params(result, (const void *) &params, sizeof(params));
  6765. result->op = GGML_OP_MAP_CUSTOM1;
  6766. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  6767. result->src[0] = a;
  6768. return result;
  6769. }
  6770. struct ggml_tensor * ggml_map_custom1(
  6771. struct ggml_context * ctx,
  6772. struct ggml_tensor * a,
  6773. const ggml_custom1_op_t fun,
  6774. int n_tasks,
  6775. void * userdata) {
  6776. return ggml_map_custom1_impl(ctx, a, fun, n_tasks, userdata, false);
  6777. }
  6778. struct ggml_tensor * ggml_map_custom1_inplace(
  6779. struct ggml_context * ctx,
  6780. struct ggml_tensor * a,
  6781. const ggml_custom1_op_t fun,
  6782. int n_tasks,
  6783. void * userdata) {
  6784. return ggml_map_custom1_impl(ctx, a, fun, n_tasks, userdata, true);
  6785. }
  6786. // ggml_map_custom2
  6787. struct ggml_map_custom2_op_params {
  6788. ggml_custom2_op_t fun;
  6789. int n_tasks;
  6790. void * userdata;
  6791. };
  6792. static struct ggml_tensor * ggml_map_custom2_impl(
  6793. struct ggml_context * ctx,
  6794. struct ggml_tensor * a,
  6795. struct ggml_tensor * b,
  6796. const ggml_custom2_op_t fun,
  6797. int n_tasks,
  6798. void * userdata,
  6799. bool inplace) {
  6800. GGML_ASSERT(n_tasks == GGML_N_TASKS_MAX || n_tasks > 0);
  6801. bool is_node = false;
  6802. if (!inplace && (a->grad || b->grad)) {
  6803. is_node = true;
  6804. }
  6805. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  6806. struct ggml_map_custom2_op_params params = {
  6807. /*.fun =*/ fun,
  6808. /*.n_tasks =*/ n_tasks,
  6809. /*.userdata =*/ userdata
  6810. };
  6811. ggml_set_op_params(result, (const void *) &params, sizeof(params));
  6812. result->op = GGML_OP_MAP_CUSTOM2;
  6813. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  6814. result->src[0] = a;
  6815. result->src[1] = b;
  6816. return result;
  6817. }
  6818. struct ggml_tensor * ggml_map_custom2(
  6819. struct ggml_context * ctx,
  6820. struct ggml_tensor * a,
  6821. struct ggml_tensor * b,
  6822. const ggml_custom2_op_t fun,
  6823. int n_tasks,
  6824. void * userdata) {
  6825. return ggml_map_custom2_impl(ctx, a, b, fun, n_tasks, userdata, false);
  6826. }
  6827. struct ggml_tensor * ggml_map_custom2_inplace(
  6828. struct ggml_context * ctx,
  6829. struct ggml_tensor * a,
  6830. struct ggml_tensor * b,
  6831. const ggml_custom2_op_t fun,
  6832. int n_tasks,
  6833. void * userdata) {
  6834. return ggml_map_custom2_impl(ctx, a, b, fun, n_tasks, userdata, true);
  6835. }
  6836. // ggml_map_custom3
  6837. struct ggml_map_custom3_op_params {
  6838. ggml_custom3_op_t fun;
  6839. int n_tasks;
  6840. void * userdata;
  6841. };
  6842. static struct ggml_tensor * ggml_map_custom3_impl(
  6843. struct ggml_context * ctx,
  6844. struct ggml_tensor * a,
  6845. struct ggml_tensor * b,
  6846. struct ggml_tensor * c,
  6847. const ggml_custom3_op_t fun,
  6848. int n_tasks,
  6849. void * userdata,
  6850. bool inplace) {
  6851. GGML_ASSERT(n_tasks == GGML_N_TASKS_MAX || n_tasks > 0);
  6852. bool is_node = false;
  6853. if (!inplace && (a->grad || b->grad || c->grad)) {
  6854. is_node = true;
  6855. }
  6856. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  6857. struct ggml_map_custom3_op_params params = {
  6858. /*.fun =*/ fun,
  6859. /*.n_tasks =*/ n_tasks,
  6860. /*.userdata =*/ userdata
  6861. };
  6862. ggml_set_op_params(result, (const void *) &params, sizeof(params));
  6863. result->op = GGML_OP_MAP_CUSTOM3;
  6864. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  6865. result->src[0] = a;
  6866. result->src[1] = b;
  6867. result->src[2] = c;
  6868. return result;
  6869. }
  6870. struct ggml_tensor * ggml_map_custom3(
  6871. struct ggml_context * ctx,
  6872. struct ggml_tensor * a,
  6873. struct ggml_tensor * b,
  6874. struct ggml_tensor * c,
  6875. const ggml_custom3_op_t fun,
  6876. int n_tasks,
  6877. void * userdata) {
  6878. return ggml_map_custom3_impl(ctx, a, b, c, fun, n_tasks, userdata, false);
  6879. }
  6880. struct ggml_tensor * ggml_map_custom3_inplace(
  6881. struct ggml_context * ctx,
  6882. struct ggml_tensor * a,
  6883. struct ggml_tensor * b,
  6884. struct ggml_tensor * c,
  6885. const ggml_custom3_op_t fun,
  6886. int n_tasks,
  6887. void * userdata) {
  6888. return ggml_map_custom3_impl(ctx, a, b, c, fun, n_tasks, userdata, true);
  6889. }
  6890. // ggml_cross_entropy_loss
  6891. struct ggml_tensor * ggml_cross_entropy_loss(
  6892. struct ggml_context * ctx,
  6893. struct ggml_tensor * a,
  6894. struct ggml_tensor * b) {
  6895. GGML_ASSERT(ggml_are_same_shape(a, b));
  6896. bool is_node = false;
  6897. if (a->grad || b->grad) {
  6898. is_node = true;
  6899. }
  6900. struct ggml_tensor * result = ggml_new_tensor_1d(ctx, a->type, 1);
  6901. result->op = GGML_OP_CROSS_ENTROPY_LOSS;
  6902. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  6903. result->src[0] = a;
  6904. result->src[1] = b;
  6905. return result;
  6906. }
  6907. // ggml_cross_entropy_loss_back
  6908. struct ggml_tensor * ggml_cross_entropy_loss_back(
  6909. struct ggml_context * ctx,
  6910. struct ggml_tensor * a,
  6911. struct ggml_tensor * b,
  6912. struct ggml_tensor * c) {
  6913. GGML_ASSERT(ggml_are_same_shape(a, b));
  6914. GGML_ASSERT(ggml_is_scalar(c));
  6915. struct ggml_tensor * result = ggml_dup_tensor(ctx, a);
  6916. result->op = GGML_OP_CROSS_ENTROPY_LOSS_BACK;
  6917. result->grad = NULL;
  6918. result->src[0] = a;
  6919. result->src[1] = b;
  6920. result->src[2] = c;
  6921. return result;
  6922. }
  6923. ////////////////////////////////////////////////////////////////////////////////
  6924. void ggml_set_param(
  6925. struct ggml_context * ctx,
  6926. struct ggml_tensor * tensor) {
  6927. tensor->is_param = true;
  6928. GGML_ASSERT(tensor->grad == NULL);
  6929. tensor->grad = ggml_dup_tensor(ctx, tensor);
  6930. }
  6931. // ggml_compute_forward_dup
  6932. static void ggml_compute_forward_dup_same_cont(
  6933. const struct ggml_compute_params * params,
  6934. const struct ggml_tensor * src0,
  6935. struct ggml_tensor * dst) {
  6936. GGML_ASSERT(ggml_nelements(dst) == ggml_nelements(src0));
  6937. GGML_ASSERT(ggml_is_contiguous(dst) && ggml_is_contiguous(src0));
  6938. GGML_ASSERT(src0->type == dst->type);
  6939. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  6940. return;
  6941. }
  6942. const size_t nb00 = src0->nb[0];
  6943. const size_t nb0 = dst->nb[0];
  6944. const int ith = params->ith; // thread index
  6945. const int nth = params->nth; // number of threads
  6946. // parallelize by elements
  6947. const int ne = ggml_nelements(dst);
  6948. const int dr = (ne + nth - 1) / nth;
  6949. const int ie0 = dr * ith;
  6950. const int ie1 = MIN(ie0 + dr, ne);
  6951. if (ie0 < ie1) {
  6952. memcpy(
  6953. ((char *) dst->data + ie0*nb0),
  6954. ((char *) src0->data + ie0*nb00),
  6955. (ie1 - ie0) * ggml_type_size(src0->type));
  6956. }
  6957. }
  6958. static void ggml_compute_forward_dup_f16(
  6959. const struct ggml_compute_params * params,
  6960. const struct ggml_tensor * src0,
  6961. struct ggml_tensor * dst) {
  6962. GGML_ASSERT(ggml_nelements(dst) == ggml_nelements(src0));
  6963. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  6964. return;
  6965. }
  6966. GGML_TENSOR_UNARY_OP_LOCALS
  6967. const int ith = params->ith; // thread index
  6968. const int nth = params->nth; // number of threads
  6969. if (ggml_is_contiguous(src0) && ggml_is_contiguous(dst) && src0->type == dst->type) {
  6970. ggml_compute_forward_dup_same_cont(params, src0, dst);
  6971. return;
  6972. }
  6973. // parallelize by rows
  6974. const int nr = ne01;
  6975. // number of rows per thread
  6976. const int dr = (nr + nth - 1) / nth;
  6977. // row range for this thread
  6978. const int ir0 = dr * ith;
  6979. const int ir1 = MIN(ir0 + dr, nr);
  6980. if (src0->type == dst->type &&
  6981. ne00 == ne0 &&
  6982. nb00 == ggml_type_size(src0->type) && nb0 == ggml_type_size(dst->type)) {
  6983. // copy by rows
  6984. const size_t rs = ne00*nb00;
  6985. for (int64_t i03 = 0; i03 < ne03; i03++) {
  6986. for (int64_t i02 = 0; i02 < ne02; i02++) {
  6987. for (int64_t i01 = ir0; i01 < ir1; i01++) {
  6988. memcpy(
  6989. ((char *) dst->data + i01*nb1 + i02*nb2 + i03*nb3),
  6990. ((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03),
  6991. rs);
  6992. }
  6993. }
  6994. }
  6995. return;
  6996. }
  6997. // TODO: add more special-case implementations for tensor shapes/strides that can benefit from memcpy
  6998. if (ggml_is_contiguous(dst)) {
  6999. if (nb00 == sizeof(ggml_fp16_t)) {
  7000. if (dst->type == GGML_TYPE_F16) {
  7001. size_t id = 0;
  7002. const size_t rs = ne00 * nb00;
  7003. char * dst_ptr = (char *) dst->data;
  7004. for (int i03 = 0; i03 < ne03; i03++) {
  7005. for (int i02 = 0; i02 < ne02; i02++) {
  7006. id += rs * ir0;
  7007. for (int i01 = ir0; i01 < ir1; i01++) {
  7008. const char * src0_ptr = (char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03;
  7009. memcpy(dst_ptr + id, src0_ptr, rs);
  7010. id += rs;
  7011. }
  7012. id += rs * (ne01 - ir1);
  7013. }
  7014. }
  7015. } else if (dst->type == GGML_TYPE_F32) {
  7016. size_t id = 0;
  7017. float * dst_ptr = (float *) dst->data;
  7018. for (int i03 = 0; i03 < ne03; i03++) {
  7019. for (int i02 = 0; i02 < ne02; i02++) {
  7020. id += ne00 * ir0;
  7021. for (int i01 = ir0; i01 < ir1; i01++) {
  7022. const ggml_fp16_t * src0_ptr = (ggml_fp16_t *) ((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03);
  7023. for (int i00 = 0; i00 < ne00; i00++) {
  7024. dst_ptr[id] = GGML_FP16_TO_FP32(src0_ptr[i00]);
  7025. id++;
  7026. }
  7027. }
  7028. id += ne00 * (ne01 - ir1);
  7029. }
  7030. }
  7031. } else if (type_traits[dst->type].from_float) {
  7032. ggml_from_float_t const quantize_row_q = type_traits[dst->type].from_float;
  7033. float * src0_f32 = (float *) params->wdata + (ne00 + CACHE_LINE_SIZE_F32) * ith;
  7034. size_t id = 0;
  7035. size_t rs = nb0 * (ne00 / ggml_blck_size(dst->type));
  7036. char * dst_ptr = (char *) dst->data;
  7037. for (int i03 = 0; i03 < ne03; i03++) {
  7038. for (int i02 = 0; i02 < ne02; i02++) {
  7039. id += rs * ir0;
  7040. for (int i01 = ir0; i01 < ir1; i01++) {
  7041. const ggml_fp16_t * src0_ptr = (ggml_fp16_t *) ((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03);
  7042. for (int i00 = 0; i00 < ne00; i00++) {
  7043. src0_f32[i00] = GGML_FP16_TO_FP32(src0_ptr[i00]);
  7044. }
  7045. quantize_row_q(src0_f32, dst_ptr + id, ne00);
  7046. id += rs;
  7047. }
  7048. id += rs * (ne01 - ir1);
  7049. }
  7050. }
  7051. } else {
  7052. GGML_ASSERT(false); // TODO: implement
  7053. }
  7054. } else {
  7055. //printf("%s: this is not optimal - fix me\n", __func__);
  7056. if (dst->type == GGML_TYPE_F32) {
  7057. size_t id = 0;
  7058. float * dst_ptr = (float *) dst->data;
  7059. for (int i03 = 0; i03 < ne03; i03++) {
  7060. for (int i02 = 0; i02 < ne02; i02++) {
  7061. id += ne00 * ir0;
  7062. for (int i01 = ir0; i01 < ir1; i01++) {
  7063. for (int i00 = 0; i00 < ne00; i00++) {
  7064. const ggml_fp16_t * src0_ptr = (ggml_fp16_t *) ((char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03);
  7065. dst_ptr[id] = GGML_FP16_TO_FP32(*src0_ptr);
  7066. id++;
  7067. }
  7068. }
  7069. id += ne00 * (ne01 - ir1);
  7070. }
  7071. }
  7072. } else if (dst->type == GGML_TYPE_F16) {
  7073. size_t id = 0;
  7074. ggml_fp16_t * dst_ptr = (ggml_fp16_t *) dst->data;
  7075. for (int i03 = 0; i03 < ne03; i03++) {
  7076. for (int i02 = 0; i02 < ne02; i02++) {
  7077. id += ne00 * ir0;
  7078. for (int i01 = ir0; i01 < ir1; i01++) {
  7079. for (int i00 = 0; i00 < ne00; i00++) {
  7080. const ggml_fp16_t * src0_ptr = (ggml_fp16_t *) ((char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03);
  7081. dst_ptr[id] = *src0_ptr;
  7082. id++;
  7083. }
  7084. }
  7085. id += ne00 * (ne01 - ir1);
  7086. }
  7087. }
  7088. } else {
  7089. GGML_ASSERT(false); // TODO: implement
  7090. }
  7091. }
  7092. return;
  7093. }
  7094. // dst counters
  7095. int64_t i10 = 0;
  7096. int64_t i11 = 0;
  7097. int64_t i12 = 0;
  7098. int64_t i13 = 0;
  7099. if (dst->type == GGML_TYPE_F16) {
  7100. for (int64_t i03 = 0; i03 < ne03; i03++) {
  7101. for (int64_t i02 = 0; i02 < ne02; i02++) {
  7102. i10 += ne00 * ir0;
  7103. while (i10 >= ne0) {
  7104. i10 -= ne0;
  7105. if (++i11 == ne1) {
  7106. i11 = 0;
  7107. if (++i12 == ne2) {
  7108. i12 = 0;
  7109. if (++i13 == ne3) {
  7110. i13 = 0;
  7111. }
  7112. }
  7113. }
  7114. }
  7115. for (int64_t i01 = ir0; i01 < ir1; i01++) {
  7116. for (int64_t i00 = 0; i00 < ne00; i00++) {
  7117. const char * src0_ptr = ((char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03);
  7118. char * dst_ptr = ((char *) dst->data + i10*nb0 + i11*nb1 + i12*nb2 + i13*nb3);
  7119. memcpy(dst_ptr, src0_ptr, sizeof(ggml_fp16_t));
  7120. if (++i10 == ne00) {
  7121. i10 = 0;
  7122. if (++i11 == ne01) {
  7123. i11 = 0;
  7124. if (++i12 == ne02) {
  7125. i12 = 0;
  7126. if (++i13 == ne03) {
  7127. i13 = 0;
  7128. }
  7129. }
  7130. }
  7131. }
  7132. }
  7133. }
  7134. i10 += ne00 * (ne01 - ir1);
  7135. while (i10 >= ne0) {
  7136. i10 -= ne0;
  7137. if (++i11 == ne1) {
  7138. i11 = 0;
  7139. if (++i12 == ne2) {
  7140. i12 = 0;
  7141. if (++i13 == ne3) {
  7142. i13 = 0;
  7143. }
  7144. }
  7145. }
  7146. }
  7147. }
  7148. }
  7149. } else if (dst->type == GGML_TYPE_F32) {
  7150. for (int64_t i03 = 0; i03 < ne03; i03++) {
  7151. for (int64_t i02 = 0; i02 < ne02; i02++) {
  7152. i10 += ne00 * ir0;
  7153. while (i10 >= ne0) {
  7154. i10 -= ne0;
  7155. if (++i11 == ne1) {
  7156. i11 = 0;
  7157. if (++i12 == ne2) {
  7158. i12 = 0;
  7159. if (++i13 == ne3) {
  7160. i13 = 0;
  7161. }
  7162. }
  7163. }
  7164. }
  7165. for (int64_t i01 = ir0; i01 < ir1; i01++) {
  7166. for (int64_t i00 = 0; i00 < ne00; i00++) {
  7167. const char * src0_ptr = ((char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03);
  7168. char * dst_ptr = ((char *) dst->data + i10*nb0 + i11*nb1 + i12*nb2 + i13*nb3);
  7169. *(float *) dst_ptr = GGML_FP16_TO_FP32(*(const ggml_fp16_t *) src0_ptr);
  7170. if (++i10 == ne0) {
  7171. i10 = 0;
  7172. if (++i11 == ne1) {
  7173. i11 = 0;
  7174. if (++i12 == ne2) {
  7175. i12 = 0;
  7176. if (++i13 == ne3) {
  7177. i13 = 0;
  7178. }
  7179. }
  7180. }
  7181. }
  7182. }
  7183. }
  7184. i10 += ne00 * (ne01 - ir1);
  7185. while (i10 >= ne0) {
  7186. i10 -= ne0;
  7187. if (++i11 == ne1) {
  7188. i11 = 0;
  7189. if (++i12 == ne2) {
  7190. i12 = 0;
  7191. if (++i13 == ne3) {
  7192. i13 = 0;
  7193. }
  7194. }
  7195. }
  7196. }
  7197. }
  7198. }
  7199. } else {
  7200. GGML_ASSERT(false); // TODO: implement
  7201. }
  7202. }
  7203. static void ggml_compute_forward_dup_f32(
  7204. const struct ggml_compute_params * params,
  7205. const struct ggml_tensor * src0,
  7206. struct ggml_tensor * dst) {
  7207. GGML_ASSERT(ggml_nelements(dst) == ggml_nelements(src0));
  7208. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  7209. return;
  7210. }
  7211. GGML_TENSOR_UNARY_OP_LOCALS
  7212. const int ith = params->ith; // thread index
  7213. const int nth = params->nth; // number of threads
  7214. if (ggml_is_contiguous(src0) && ggml_is_contiguous(dst) && src0->type == dst->type) {
  7215. ggml_compute_forward_dup_same_cont(params, src0, dst);
  7216. return;
  7217. }
  7218. // parallelize by rows
  7219. const int nr = ne01;
  7220. // number of rows per thread
  7221. const int dr = (nr + nth - 1) / nth;
  7222. // row range for this thread
  7223. const int ir0 = dr * ith;
  7224. const int ir1 = MIN(ir0 + dr, nr);
  7225. if (src0->type == dst->type &&
  7226. ne00 == ne0 &&
  7227. nb00 == ggml_type_size(src0->type) && nb0 == ggml_type_size(dst->type)) {
  7228. // copy by rows
  7229. const size_t rs = ne00*nb00;
  7230. for (int64_t i03 = 0; i03 < ne03; i03++) {
  7231. for (int64_t i02 = 0; i02 < ne02; i02++) {
  7232. for (int64_t i01 = ir0; i01 < ir1; i01++) {
  7233. memcpy(
  7234. ((char *) dst->data + i01*nb1 + i02*nb2 + i03*nb3),
  7235. ((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03),
  7236. rs);
  7237. }
  7238. }
  7239. }
  7240. return;
  7241. }
  7242. if (ggml_is_contiguous(dst)) {
  7243. // TODO: simplify
  7244. if (nb00 == sizeof(float)) {
  7245. if (dst->type == GGML_TYPE_F32) {
  7246. size_t id = 0;
  7247. const size_t rs = ne00 * nb00;
  7248. char * dst_ptr = (char *) dst->data;
  7249. for (int i03 = 0; i03 < ne03; i03++) {
  7250. for (int i02 = 0; i02 < ne02; i02++) {
  7251. id += rs * ir0;
  7252. for (int i01 = ir0; i01 < ir1; i01++) {
  7253. const char * src0_ptr = (char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03;
  7254. memcpy(dst_ptr + id, src0_ptr, rs);
  7255. id += rs;
  7256. }
  7257. id += rs * (ne01 - ir1);
  7258. }
  7259. }
  7260. } else if (type_traits[dst->type].from_float) {
  7261. ggml_from_float_t const quantize_row_q = type_traits[dst->type].from_float;
  7262. size_t id = 0;
  7263. size_t rs = nb0 * (ne00 / ggml_blck_size(dst->type));
  7264. char * dst_ptr = (char *) dst->data;
  7265. for (int i03 = 0; i03 < ne03; i03++) {
  7266. for (int i02 = 0; i02 < ne02; i02++) {
  7267. id += rs * ir0;
  7268. for (int i01 = ir0; i01 < ir1; i01++) {
  7269. const float * src0_ptr = (float *) ((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03);
  7270. quantize_row_q(src0_ptr, dst_ptr + id, ne00);
  7271. id += rs;
  7272. }
  7273. id += rs * (ne01 - ir1);
  7274. }
  7275. }
  7276. } else {
  7277. GGML_ASSERT(false); // TODO: implement
  7278. }
  7279. } else {
  7280. //printf("%s: this is not optimal - fix me\n", __func__);
  7281. if (dst->type == GGML_TYPE_F32) {
  7282. size_t id = 0;
  7283. float * dst_ptr = (float *) dst->data;
  7284. for (int i03 = 0; i03 < ne03; i03++) {
  7285. for (int i02 = 0; i02 < ne02; i02++) {
  7286. id += ne00 * ir0;
  7287. for (int i01 = ir0; i01 < ir1; i01++) {
  7288. for (int i00 = 0; i00 < ne00; i00++) {
  7289. const float * src0_ptr = (float *) ((char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03);
  7290. dst_ptr[id] = *src0_ptr;
  7291. id++;
  7292. }
  7293. }
  7294. id += ne00 * (ne01 - ir1);
  7295. }
  7296. }
  7297. } else if (dst->type == GGML_TYPE_F16) {
  7298. size_t id = 0;
  7299. ggml_fp16_t * dst_ptr = (ggml_fp16_t *) dst->data;
  7300. for (int i03 = 0; i03 < ne03; i03++) {
  7301. for (int i02 = 0; i02 < ne02; i02++) {
  7302. id += ne00 * ir0;
  7303. for (int i01 = ir0; i01 < ir1; i01++) {
  7304. for (int i00 = 0; i00 < ne00; i00++) {
  7305. const float * src0_ptr = (float *) ((char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03);
  7306. dst_ptr[id] = GGML_FP32_TO_FP16(*src0_ptr);
  7307. id++;
  7308. }
  7309. }
  7310. id += ne00 * (ne01 - ir1);
  7311. }
  7312. }
  7313. } else {
  7314. GGML_ASSERT(false); // TODO: implement
  7315. }
  7316. }
  7317. return;
  7318. }
  7319. // dst counters
  7320. int64_t i10 = 0;
  7321. int64_t i11 = 0;
  7322. int64_t i12 = 0;
  7323. int64_t i13 = 0;
  7324. if (dst->type == GGML_TYPE_F32) {
  7325. for (int64_t i03 = 0; i03 < ne03; i03++) {
  7326. for (int64_t i02 = 0; i02 < ne02; i02++) {
  7327. i10 += ne00 * ir0;
  7328. while (i10 >= ne0) {
  7329. i10 -= ne0;
  7330. if (++i11 == ne1) {
  7331. i11 = 0;
  7332. if (++i12 == ne2) {
  7333. i12 = 0;
  7334. if (++i13 == ne3) {
  7335. i13 = 0;
  7336. }
  7337. }
  7338. }
  7339. }
  7340. for (int64_t i01 = ir0; i01 < ir1; i01++) {
  7341. for (int64_t i00 = 0; i00 < ne00; i00++) {
  7342. const char * src0_ptr = ((char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03);
  7343. char * dst_ptr = ((char *) dst->data + i10*nb0 + i11*nb1 + i12*nb2 + i13*nb3);
  7344. memcpy(dst_ptr, src0_ptr, sizeof(float));
  7345. if (++i10 == ne0) {
  7346. i10 = 0;
  7347. if (++i11 == ne1) {
  7348. i11 = 0;
  7349. if (++i12 == ne2) {
  7350. i12 = 0;
  7351. if (++i13 == ne3) {
  7352. i13 = 0;
  7353. }
  7354. }
  7355. }
  7356. }
  7357. }
  7358. }
  7359. i10 += ne00 * (ne01 - ir1);
  7360. while (i10 >= ne0) {
  7361. i10 -= ne0;
  7362. if (++i11 == ne1) {
  7363. i11 = 0;
  7364. if (++i12 == ne2) {
  7365. i12 = 0;
  7366. if (++i13 == ne3) {
  7367. i13 = 0;
  7368. }
  7369. }
  7370. }
  7371. }
  7372. }
  7373. }
  7374. } else if (dst->type == GGML_TYPE_F16) {
  7375. for (int64_t i03 = 0; i03 < ne03; i03++) {
  7376. for (int64_t i02 = 0; i02 < ne02; i02++) {
  7377. i10 += ne00 * ir0;
  7378. while (i10 >= ne0) {
  7379. i10 -= ne0;
  7380. if (++i11 == ne1) {
  7381. i11 = 0;
  7382. if (++i12 == ne2) {
  7383. i12 = 0;
  7384. if (++i13 == ne3) {
  7385. i13 = 0;
  7386. }
  7387. }
  7388. }
  7389. }
  7390. for (int64_t i01 = ir0; i01 < ir1; i01++) {
  7391. for (int64_t i00 = 0; i00 < ne00; i00++) {
  7392. const char * src0_ptr = ((char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03);
  7393. char * dst_ptr = ((char *) dst->data + i10*nb0 + i11*nb1 + i12*nb2 + i13*nb3);
  7394. *(ggml_fp16_t *) dst_ptr = GGML_FP32_TO_FP16(*(const float *) src0_ptr);
  7395. if (++i10 == ne0) {
  7396. i10 = 0;
  7397. if (++i11 == ne1) {
  7398. i11 = 0;
  7399. if (++i12 == ne2) {
  7400. i12 = 0;
  7401. if (++i13 == ne3) {
  7402. i13 = 0;
  7403. }
  7404. }
  7405. }
  7406. }
  7407. }
  7408. }
  7409. i10 += ne00 * (ne01 - ir1);
  7410. while (i10 >= ne0) {
  7411. i10 -= ne0;
  7412. if (++i11 == ne1) {
  7413. i11 = 0;
  7414. if (++i12 == ne2) {
  7415. i12 = 0;
  7416. if (++i13 == ne3) {
  7417. i13 = 0;
  7418. }
  7419. }
  7420. }
  7421. }
  7422. }
  7423. }
  7424. } else {
  7425. GGML_ASSERT(false); // TODO: implement
  7426. }
  7427. }
  7428. static void ggml_compute_forward_dup(
  7429. const struct ggml_compute_params * params,
  7430. const struct ggml_tensor * src0,
  7431. struct ggml_tensor * dst) {
  7432. if (ggml_is_contiguous(src0) && ggml_is_contiguous(dst) && src0->type == dst->type) {
  7433. ggml_compute_forward_dup_same_cont(params, src0, dst);
  7434. return;
  7435. }
  7436. switch (src0->type) {
  7437. case GGML_TYPE_F16:
  7438. {
  7439. ggml_compute_forward_dup_f16(params, src0, dst);
  7440. } break;
  7441. case GGML_TYPE_F32:
  7442. {
  7443. ggml_compute_forward_dup_f32(params, src0, dst);
  7444. } break;
  7445. default:
  7446. {
  7447. GGML_ASSERT(false);
  7448. } break;
  7449. }
  7450. }
  7451. // ggml_compute_forward_add
  7452. static void ggml_compute_forward_add_f32(
  7453. const struct ggml_compute_params * params,
  7454. const struct ggml_tensor * src0,
  7455. const struct ggml_tensor * src1,
  7456. struct ggml_tensor * dst) {
  7457. GGML_ASSERT(ggml_can_repeat_rows(src1, src0) && ggml_are_same_shape(src0, dst));
  7458. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  7459. return;
  7460. }
  7461. const int ith = params->ith;
  7462. const int nth = params->nth;
  7463. const int nr = ggml_nrows(src0);
  7464. GGML_TENSOR_BINARY_OP_LOCALS
  7465. GGML_ASSERT( nb0 == sizeof(float));
  7466. GGML_ASSERT(nb00 == sizeof(float));
  7467. // rows per thread
  7468. const int dr = (nr + nth - 1)/nth;
  7469. // row range for this thread
  7470. const int ir0 = dr*ith;
  7471. const int ir1 = MIN(ir0 + dr, nr);
  7472. if (nb10 == sizeof(float)) {
  7473. for (int ir = ir0; ir < ir1; ++ir) {
  7474. // src1 is broadcastable across src0 and dst in i1, i2, i3
  7475. const int64_t i03 = ir/(ne02*ne01);
  7476. const int64_t i02 = (ir - i03*ne02*ne01)/ne01;
  7477. const int64_t i01 = (ir - i03*ne02*ne01 - i02*ne01);
  7478. const int64_t i13 = i03 % ne13;
  7479. const int64_t i12 = i02 % ne12;
  7480. const int64_t i11 = i01 % ne11;
  7481. float * dst_ptr = (float *) ((char *) dst->data + i03*nb3 + i02*nb2 + i01*nb1 );
  7482. float * src0_ptr = (float *) ((char *) src0->data + i03*nb03 + i02*nb02 + i01*nb01);
  7483. float * src1_ptr = (float *) ((char *) src1->data + i13*nb13 + i12*nb12 + i11*nb11);
  7484. #ifdef GGML_USE_ACCELERATE
  7485. vDSP_vadd(src0_ptr, 1, src1_ptr, 1, dst_ptr, 1, ne00);
  7486. #else
  7487. ggml_vec_add_f32(ne00, dst_ptr, src0_ptr, src1_ptr);
  7488. #endif
  7489. }
  7490. } else {
  7491. // src1 is not contiguous
  7492. for (int ir = ir0; ir < ir1; ++ir) {
  7493. // src1 is broadcastable across src0 and dst in i1, i2, i3
  7494. const int64_t i03 = ir/(ne02*ne01);
  7495. const int64_t i02 = (ir - i03*ne02*ne01)/ne01;
  7496. const int64_t i01 = (ir - i03*ne02*ne01 - i02*ne01);
  7497. const int64_t i13 = i03 % ne13;
  7498. const int64_t i12 = i02 % ne12;
  7499. const int64_t i11 = i01 % ne11;
  7500. float * dst_ptr = (float *) ((char *) dst->data + i03*nb3 + i02*nb2 + i01*nb1 );
  7501. float * src0_ptr = (float *) ((char *) src0->data + i03*nb03 + i02*nb02 + i01*nb01);
  7502. for (int i0 = 0; i0 < ne0; i0++) {
  7503. float * src1_ptr = (float *) ((char *) src1->data + i13*nb13 + i12*nb12 + i11*nb11 + i0*nb10);
  7504. dst_ptr[i0] = src0_ptr[i0] + *src1_ptr;
  7505. }
  7506. }
  7507. }
  7508. }
  7509. static void ggml_compute_forward_add_f16_f32(
  7510. const struct ggml_compute_params * params,
  7511. const struct ggml_tensor * src0,
  7512. const struct ggml_tensor * src1,
  7513. struct ggml_tensor * dst) {
  7514. GGML_ASSERT(ggml_are_same_shape(src0, src1) && ggml_are_same_shape(src0, dst));
  7515. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  7516. return;
  7517. }
  7518. const int ith = params->ith;
  7519. const int nth = params->nth;
  7520. const int nr = ggml_nrows(src0);
  7521. GGML_TENSOR_BINARY_OP_LOCALS
  7522. GGML_ASSERT(src0->type == GGML_TYPE_F16);
  7523. GGML_ASSERT(src1->type == GGML_TYPE_F32);
  7524. GGML_ASSERT(dst->type == GGML_TYPE_F16);
  7525. GGML_ASSERT( nb0 == sizeof(ggml_fp16_t));
  7526. GGML_ASSERT(nb00 == sizeof(ggml_fp16_t));
  7527. // rows per thread
  7528. const int dr = (nr + nth - 1)/nth;
  7529. // row range for this thread
  7530. const int ir0 = dr*ith;
  7531. const int ir1 = MIN(ir0 + dr, nr);
  7532. if (nb10 == sizeof(float)) {
  7533. for (int ir = ir0; ir < ir1; ++ir) {
  7534. // src0, src1 and dst are same shape => same indices
  7535. const int i3 = ir/(ne2*ne1);
  7536. const int i2 = (ir - i3*ne2*ne1)/ne1;
  7537. const int i1 = (ir - i3*ne2*ne1 - i2*ne1);
  7538. ggml_fp16_t * dst_ptr = (ggml_fp16_t *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1);
  7539. ggml_fp16_t * src0_ptr = (ggml_fp16_t *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01);
  7540. float * src1_ptr = (float *) ((char *) src1->data + i3*nb13 + i2*nb12 + i1*nb11);
  7541. for (int i = 0; i < ne0; i++) {
  7542. dst_ptr[i] = GGML_FP32_TO_FP16(GGML_FP16_TO_FP32(src0_ptr[i]) + src1_ptr[i]);
  7543. }
  7544. }
  7545. }
  7546. else {
  7547. // src1 is not contiguous
  7548. GGML_ASSERT(false);
  7549. }
  7550. }
  7551. static void ggml_compute_forward_add_f16_f16(
  7552. const struct ggml_compute_params * params,
  7553. const struct ggml_tensor * src0,
  7554. const struct ggml_tensor * src1,
  7555. struct ggml_tensor * dst) {
  7556. GGML_ASSERT(ggml_are_same_shape(src0, src1) && ggml_are_same_shape(src0, dst));
  7557. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  7558. return;
  7559. }
  7560. const int ith = params->ith;
  7561. const int nth = params->nth;
  7562. const int nr = ggml_nrows(src0);
  7563. GGML_TENSOR_BINARY_OP_LOCALS
  7564. GGML_ASSERT(src0->type == GGML_TYPE_F16);
  7565. GGML_ASSERT(src1->type == GGML_TYPE_F16);
  7566. GGML_ASSERT(dst->type == GGML_TYPE_F16);
  7567. GGML_ASSERT( nb0 == sizeof(ggml_fp16_t));
  7568. GGML_ASSERT(nb00 == sizeof(ggml_fp16_t));
  7569. // rows per thread
  7570. const int dr = (nr + nth - 1)/nth;
  7571. // row range for this thread
  7572. const int ir0 = dr*ith;
  7573. const int ir1 = MIN(ir0 + dr, nr);
  7574. if (nb10 == sizeof(ggml_fp16_t)) {
  7575. for (int ir = ir0; ir < ir1; ++ir) {
  7576. // src0, src1 and dst are same shape => same indices
  7577. const int i3 = ir/(ne2*ne1);
  7578. const int i2 = (ir - i3*ne2*ne1)/ne1;
  7579. const int i1 = (ir - i3*ne2*ne1 - i2*ne1);
  7580. ggml_fp16_t * dst_ptr = (ggml_fp16_t *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1);
  7581. ggml_fp16_t * src0_ptr = (ggml_fp16_t *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01);
  7582. ggml_fp16_t * src1_ptr = (ggml_fp16_t *) ((char *) src1->data + i3*nb13 + i2*nb12 + i1*nb11);
  7583. for (int i = 0; i < ne0; i++) {
  7584. dst_ptr[i] = GGML_FP32_TO_FP16(GGML_FP16_TO_FP32(src0_ptr[i]) + GGML_FP16_TO_FP32(src1_ptr[i]));
  7585. }
  7586. }
  7587. }
  7588. else {
  7589. // src1 is not contiguous
  7590. GGML_ASSERT(false);
  7591. }
  7592. }
  7593. static void ggml_compute_forward_add_q_f32(
  7594. const struct ggml_compute_params * params,
  7595. const struct ggml_tensor * src0,
  7596. const struct ggml_tensor * src1,
  7597. struct ggml_tensor * dst) {
  7598. GGML_ASSERT(ggml_are_same_shape(src0, src1) && ggml_are_same_shape(src0, dst));
  7599. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  7600. return;
  7601. }
  7602. const int nr = ggml_nrows(src0);
  7603. GGML_TENSOR_BINARY_OP_LOCALS
  7604. const int ith = params->ith;
  7605. const int nth = params->nth;
  7606. const enum ggml_type type = src0->type;
  7607. const enum ggml_type dtype = dst->type;
  7608. ggml_to_float_t const dequantize_row_q = type_traits[type].to_float;
  7609. ggml_from_float_t const quantize_row_q = type_traits[dtype].from_float;
  7610. // we don't support permuted src0 or src1
  7611. GGML_ASSERT(nb00 == ggml_type_size(type));
  7612. GGML_ASSERT(nb10 == sizeof(float));
  7613. // dst cannot be transposed or permuted
  7614. GGML_ASSERT(nb0 <= nb1);
  7615. GGML_ASSERT(nb1 <= nb2);
  7616. GGML_ASSERT(nb2 <= nb3);
  7617. GGML_ASSERT(ggml_is_quantized(src0->type));
  7618. GGML_ASSERT(src1->type == GGML_TYPE_F32);
  7619. // rows per thread
  7620. const int dr = (nr + nth - 1)/nth;
  7621. // row range for this thread
  7622. const int ir0 = dr*ith;
  7623. const int ir1 = MIN(ir0 + dr, nr);
  7624. float * wdata = (float *) params->wdata + (ne00 + CACHE_LINE_SIZE_F32) * ith;
  7625. for (int ir = ir0; ir < ir1; ++ir) {
  7626. // src0 indices
  7627. const int i03 = ir/(ne02*ne01);
  7628. const int i02 = (ir - i03*ne02*ne01)/ne01;
  7629. const int i01 = (ir - i03*ne02*ne01 - i02*ne01);
  7630. // src1 and dst are same shape as src0 => same indices
  7631. const int i13 = i03;
  7632. const int i12 = i02;
  7633. const int i11 = i01;
  7634. const int i3 = i03;
  7635. const int i2 = i02;
  7636. const int i1 = i01;
  7637. void * src0_row = (void *) ((char *) src0->data + (i01*nb01 + i02*nb02 + i03*nb03));
  7638. float * src1_row = (float *)((char *) src1->data + (i11*nb11 + i12*nb12 + i13*nb13));
  7639. void * dst_row = (void *) ((char *) dst->data + ( i1*nb1 + i2*nb2 + i3*nb3));
  7640. assert(ne00 % 32 == 0);
  7641. // unquantize row from src0 to temp buffer
  7642. dequantize_row_q(src0_row, wdata, ne00);
  7643. // add src1
  7644. ggml_vec_acc_f32(ne00, wdata, src1_row);
  7645. // quantize row to dst
  7646. if (quantize_row_q != NULL) {
  7647. quantize_row_q(wdata, dst_row, ne00);
  7648. } else {
  7649. memcpy(dst_row, wdata, ne0*nb0);
  7650. }
  7651. }
  7652. }
  7653. static void ggml_compute_forward_add(
  7654. const struct ggml_compute_params * params,
  7655. const struct ggml_tensor * src0,
  7656. const struct ggml_tensor * src1,
  7657. struct ggml_tensor * dst) {
  7658. switch (src0->type) {
  7659. case GGML_TYPE_F32:
  7660. {
  7661. ggml_compute_forward_add_f32(params, src0, src1, dst);
  7662. } break;
  7663. case GGML_TYPE_F16:
  7664. {
  7665. if (src1->type == GGML_TYPE_F16) {
  7666. ggml_compute_forward_add_f16_f16(params, src0, src1, dst);
  7667. }
  7668. else if (src1->type == GGML_TYPE_F32) {
  7669. ggml_compute_forward_add_f16_f32(params, src0, src1, dst);
  7670. }
  7671. else {
  7672. GGML_ASSERT(false);
  7673. }
  7674. } break;
  7675. case GGML_TYPE_Q4_0:
  7676. case GGML_TYPE_Q4_1:
  7677. case GGML_TYPE_Q5_0:
  7678. case GGML_TYPE_Q5_1:
  7679. case GGML_TYPE_Q8_0:
  7680. case GGML_TYPE_Q2_K:
  7681. case GGML_TYPE_Q3_K:
  7682. case GGML_TYPE_Q4_K:
  7683. case GGML_TYPE_Q5_K:
  7684. case GGML_TYPE_Q6_K:
  7685. {
  7686. ggml_compute_forward_add_q_f32(params, src0, src1, dst);
  7687. } break;
  7688. default:
  7689. {
  7690. GGML_ASSERT(false);
  7691. } break;
  7692. }
  7693. }
  7694. // ggml_compute_forward_add1
  7695. static void ggml_compute_forward_add1_f32(
  7696. const struct ggml_compute_params * params,
  7697. const struct ggml_tensor * src0,
  7698. const struct ggml_tensor * src1,
  7699. struct ggml_tensor * dst) {
  7700. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  7701. GGML_ASSERT(ggml_is_scalar(src1));
  7702. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  7703. return;
  7704. }
  7705. const int ith = params->ith;
  7706. const int nth = params->nth;
  7707. const int nr = ggml_nrows(src0);
  7708. GGML_TENSOR_UNARY_OP_LOCALS
  7709. GGML_ASSERT( nb0 == sizeof(float));
  7710. GGML_ASSERT(nb00 == sizeof(float));
  7711. // rows per thread
  7712. const int dr = (nr + nth - 1)/nth;
  7713. // row range for this thread
  7714. const int ir0 = dr*ith;
  7715. const int ir1 = MIN(ir0 + dr, nr);
  7716. for (int ir = ir0; ir < ir1; ++ir) {
  7717. // src0 and dst are same shape => same indices
  7718. const int i3 = ir/(ne2*ne1);
  7719. const int i2 = (ir - i3*ne2*ne1)/ne1;
  7720. const int i1 = (ir - i3*ne2*ne1 - i2*ne1);
  7721. #ifdef GGML_USE_ACCELERATE
  7722. UNUSED(ggml_vec_add1_f32);
  7723. vDSP_vadd(
  7724. (float *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01), 1,
  7725. (float *) ((char *) src1->data), 0,
  7726. (float *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 ), 1,
  7727. ne0);
  7728. #else
  7729. ggml_vec_add1_f32(ne0,
  7730. (float *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 ),
  7731. (float *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01),
  7732. *(float *) src1->data);
  7733. #endif
  7734. }
  7735. }
  7736. static void ggml_compute_forward_add1_f16_f32(
  7737. const struct ggml_compute_params * params,
  7738. const struct ggml_tensor * src0,
  7739. const struct ggml_tensor * src1,
  7740. struct ggml_tensor * dst) {
  7741. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  7742. GGML_ASSERT(ggml_is_scalar(src1));
  7743. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  7744. return;
  7745. }
  7746. // scalar to add
  7747. const float v = *(float *) src1->data;
  7748. const int ith = params->ith;
  7749. const int nth = params->nth;
  7750. const int nr = ggml_nrows(src0);
  7751. GGML_TENSOR_UNARY_OP_LOCALS
  7752. GGML_ASSERT(src0->type == GGML_TYPE_F16);
  7753. GGML_ASSERT(src1->type == GGML_TYPE_F32);
  7754. GGML_ASSERT(dst->type == GGML_TYPE_F16);
  7755. GGML_ASSERT( nb0 == sizeof(ggml_fp16_t));
  7756. GGML_ASSERT(nb00 == sizeof(ggml_fp16_t));
  7757. // rows per thread
  7758. const int dr = (nr + nth - 1)/nth;
  7759. // row range for this thread
  7760. const int ir0 = dr*ith;
  7761. const int ir1 = MIN(ir0 + dr, nr);
  7762. for (int ir = ir0; ir < ir1; ++ir) {
  7763. // src0 and dst are same shape => same indices
  7764. const int i3 = ir/(ne2*ne1);
  7765. const int i2 = (ir - i3*ne2*ne1)/ne1;
  7766. const int i1 = (ir - i3*ne2*ne1 - i2*ne1);
  7767. ggml_fp16_t * dst_ptr = (ggml_fp16_t *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 );
  7768. ggml_fp16_t * src0_ptr = (ggml_fp16_t *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01);
  7769. for (int i = 0; i < ne0; i++) {
  7770. dst_ptr[i] = GGML_FP32_TO_FP16(GGML_FP16_TO_FP32(src0_ptr[i]) + v);
  7771. }
  7772. }
  7773. }
  7774. static void ggml_compute_forward_add1_f16_f16(
  7775. const struct ggml_compute_params * params,
  7776. const struct ggml_tensor * src0,
  7777. const struct ggml_tensor * src1,
  7778. struct ggml_tensor * dst) {
  7779. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  7780. GGML_ASSERT(ggml_is_scalar(src1));
  7781. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  7782. return;
  7783. }
  7784. // scalar to add
  7785. const float v = GGML_FP16_TO_FP32(*(ggml_fp16_t *) src1->data);
  7786. const int ith = params->ith;
  7787. const int nth = params->nth;
  7788. const int nr = ggml_nrows(src0);
  7789. GGML_TENSOR_UNARY_OP_LOCALS
  7790. GGML_ASSERT(src0->type == GGML_TYPE_F16);
  7791. GGML_ASSERT(src1->type == GGML_TYPE_F16);
  7792. GGML_ASSERT(dst->type == GGML_TYPE_F16);
  7793. GGML_ASSERT( nb0 == sizeof(ggml_fp16_t));
  7794. GGML_ASSERT(nb00 == sizeof(ggml_fp16_t));
  7795. // rows per thread
  7796. const int dr = (nr + nth - 1)/nth;
  7797. // row range for this thread
  7798. const int ir0 = dr*ith;
  7799. const int ir1 = MIN(ir0 + dr, nr);
  7800. for (int ir = ir0; ir < ir1; ++ir) {
  7801. // src0 and dst are same shape => same indices
  7802. const int i3 = ir/(ne2*ne1);
  7803. const int i2 = (ir - i3*ne2*ne1)/ne1;
  7804. const int i1 = (ir - i3*ne2*ne1 - i2*ne1);
  7805. ggml_fp16_t * dst_ptr = (ggml_fp16_t *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 );
  7806. ggml_fp16_t * src0_ptr = (ggml_fp16_t *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01);
  7807. for (int i = 0; i < ne0; i++) {
  7808. dst_ptr[i] = GGML_FP32_TO_FP16(GGML_FP16_TO_FP32(src0_ptr[i]) + v);
  7809. }
  7810. }
  7811. }
  7812. static void ggml_compute_forward_add1_q_f32(
  7813. const struct ggml_compute_params * params,
  7814. const struct ggml_tensor * src0,
  7815. const struct ggml_tensor * src1,
  7816. struct ggml_tensor * dst) {
  7817. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  7818. GGML_ASSERT(ggml_is_scalar(src1));
  7819. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  7820. return;
  7821. }
  7822. // scalar to add
  7823. const float v = *(float *) src1->data;
  7824. const int ith = params->ith;
  7825. const int nth = params->nth;
  7826. const int nr = ggml_nrows(src0);
  7827. GGML_TENSOR_UNARY_OP_LOCALS
  7828. const enum ggml_type type = src0->type;
  7829. ggml_to_float_t const dequantize_row_q = type_traits[type].to_float;
  7830. ggml_from_float_t const quantize_row_q = type_traits[type].from_float;
  7831. // we don't support permuted src0
  7832. GGML_ASSERT(nb00 == ggml_type_size(type));
  7833. // dst cannot be transposed or permuted
  7834. GGML_ASSERT(nb0 <= nb1);
  7835. GGML_ASSERT(nb1 <= nb2);
  7836. GGML_ASSERT(nb2 <= nb3);
  7837. GGML_ASSERT(ggml_is_quantized(src0->type));
  7838. GGML_ASSERT(dst->type == src0->type);
  7839. GGML_ASSERT(src1->type == GGML_TYPE_F32);
  7840. // rows per thread
  7841. const int dr = (nr + nth - 1)/nth;
  7842. // row range for this thread
  7843. const int ir0 = dr*ith;
  7844. const int ir1 = MIN(ir0 + dr, nr);
  7845. float * wdata = (float *) params->wdata + (ne0 + CACHE_LINE_SIZE_F32) * ith;
  7846. for (int ir = ir0; ir < ir1; ++ir) {
  7847. // src0 and dst are same shape => same indices
  7848. const int i3 = ir/(ne2*ne1);
  7849. const int i2 = (ir - i3*ne2*ne1)/ne1;
  7850. const int i1 = (ir - i3*ne2*ne1 - i2*ne1);
  7851. void * src0_row = (void *) ((char *) src0->data + (i1*nb01 + i2*nb02 + i3*nb03));
  7852. void * dst_row = (void *) ((char *) dst->data + (i1*nb1 + i2*nb2 + i3*nb0 ));
  7853. assert(ne0 % 32 == 0);
  7854. // unquantize row from src0 to temp buffer
  7855. dequantize_row_q(src0_row, wdata, ne0);
  7856. // add src1
  7857. ggml_vec_acc1_f32(ne0, wdata, v);
  7858. // quantize row to dst
  7859. quantize_row_q(wdata, dst_row, ne0);
  7860. }
  7861. }
  7862. static void ggml_compute_forward_add1(
  7863. const struct ggml_compute_params * params,
  7864. const struct ggml_tensor * src0,
  7865. const struct ggml_tensor * src1,
  7866. struct ggml_tensor * dst) {
  7867. switch (src0->type) {
  7868. case GGML_TYPE_F32:
  7869. {
  7870. ggml_compute_forward_add1_f32(params, src0, src1, dst);
  7871. } break;
  7872. case GGML_TYPE_F16:
  7873. {
  7874. if (src1->type == GGML_TYPE_F16) {
  7875. ggml_compute_forward_add1_f16_f16(params, src0, src1, dst);
  7876. }
  7877. else if (src1->type == GGML_TYPE_F32) {
  7878. ggml_compute_forward_add1_f16_f32(params, src0, src1, dst);
  7879. }
  7880. else {
  7881. GGML_ASSERT(false);
  7882. }
  7883. } break;
  7884. case GGML_TYPE_Q4_0:
  7885. case GGML_TYPE_Q4_1:
  7886. case GGML_TYPE_Q5_0:
  7887. case GGML_TYPE_Q5_1:
  7888. case GGML_TYPE_Q8_0:
  7889. case GGML_TYPE_Q8_1:
  7890. case GGML_TYPE_Q2_K:
  7891. case GGML_TYPE_Q3_K:
  7892. case GGML_TYPE_Q4_K:
  7893. case GGML_TYPE_Q5_K:
  7894. case GGML_TYPE_Q6_K:
  7895. {
  7896. ggml_compute_forward_add1_q_f32(params, src0, src1, dst);
  7897. } break;
  7898. default:
  7899. {
  7900. GGML_ASSERT(false);
  7901. } break;
  7902. }
  7903. }
  7904. // ggml_compute_forward_acc
  7905. static void ggml_compute_forward_acc_f32(
  7906. const struct ggml_compute_params * params,
  7907. const struct ggml_tensor * src0,
  7908. const struct ggml_tensor * src1,
  7909. struct ggml_tensor * dst) {
  7910. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  7911. GGML_ASSERT(ggml_is_contiguous(dst) && ggml_is_contiguous(src0));
  7912. // view src0 and dst with these strides and data offset inbytes during acc
  7913. // nb0 is implicitely element_size because src0 and dst are contiguous
  7914. size_t nb1 = ((int32_t *) dst->op_params)[0];
  7915. size_t nb2 = ((int32_t *) dst->op_params)[1];
  7916. size_t nb3 = ((int32_t *) dst->op_params)[2];
  7917. size_t offset = ((int32_t *) dst->op_params)[3];
  7918. bool inplace = (bool) ((int32_t *) dst->op_params)[4];
  7919. if (!inplace && (params->type == GGML_TASK_INIT)) {
  7920. // memcpy needs to be synchronized across threads to avoid race conditions.
  7921. // => do it in INIT phase
  7922. memcpy(
  7923. ((char *) dst->data),
  7924. ((char *) src0->data),
  7925. ggml_nbytes(dst));
  7926. }
  7927. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  7928. return;
  7929. }
  7930. const int ith = params->ith;
  7931. const int nth = params->nth;
  7932. const int nr = ggml_nrows(src1);
  7933. const int nc = src1->ne[0];
  7934. GGML_TENSOR_LOCALS(int64_t, ne1, src1, ne)
  7935. GGML_TENSOR_LOCALS(size_t, nb1, src1, nb)
  7936. // src0 and dst as viewed during acc
  7937. const size_t nb0 = ggml_element_size(src0);
  7938. const size_t nb00 = nb0;
  7939. const size_t nb01 = nb1;
  7940. const size_t nb02 = nb2;
  7941. const size_t nb03 = nb3;
  7942. GGML_ASSERT(offset + (ne10 == 0 ? 0 : ne10-1)*nb0 + (ne11 == 0 ? 0 : ne11-1)*nb1 + (ne12 == 0 ? 0 : ne12-1)*nb2 + (ne13 == 0 ? 0 : ne13-1)*nb3 < ggml_nbytes(dst));
  7943. GGML_ASSERT(offset + (ne10 == 0 ? 0 : ne10-1)*nb00 + (ne11 == 0 ? 0 : ne11-1)*nb01 + (ne12 == 0 ? 0 : ne12-1)*nb02 + (ne13 == 0 ? 0 : ne13-1)*nb03 < ggml_nbytes(src0));
  7944. GGML_ASSERT(nb10 == sizeof(float));
  7945. // rows per thread
  7946. const int dr = (nr + nth - 1)/nth;
  7947. // row range for this thread
  7948. const int ir0 = dr*ith;
  7949. const int ir1 = MIN(ir0 + dr, nr);
  7950. for (int ir = ir0; ir < ir1; ++ir) {
  7951. // src0 and dst are viewed with shape of src1 and offset
  7952. // => same indices
  7953. const int i3 = ir/(ne12*ne11);
  7954. const int i2 = (ir - i3*ne12*ne11)/ne11;
  7955. const int i1 = (ir - i3*ne12*ne11 - i2*ne11);
  7956. #ifdef GGML_USE_ACCELERATE
  7957. vDSP_vadd(
  7958. (float *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + offset), 1,
  7959. (float *) ((char *) src1->data + i3*nb13 + i2*nb12 + i1*nb11), 1,
  7960. (float *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + offset), 1, nc);
  7961. #else
  7962. ggml_vec_add_f32(nc,
  7963. (float *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + offset),
  7964. (float *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + offset),
  7965. (float *) ((char *) src1->data + i3*nb13 + i2*nb12 + i1*nb11));
  7966. #endif
  7967. }
  7968. }
  7969. static void ggml_compute_forward_acc(
  7970. const struct ggml_compute_params * params,
  7971. const struct ggml_tensor * src0,
  7972. const struct ggml_tensor * src1,
  7973. struct ggml_tensor * dst) {
  7974. switch (src0->type) {
  7975. case GGML_TYPE_F32:
  7976. {
  7977. ggml_compute_forward_acc_f32(params, src0, src1, dst);
  7978. } break;
  7979. case GGML_TYPE_F16:
  7980. case GGML_TYPE_Q4_0:
  7981. case GGML_TYPE_Q4_1:
  7982. case GGML_TYPE_Q5_0:
  7983. case GGML_TYPE_Q5_1:
  7984. case GGML_TYPE_Q8_0:
  7985. case GGML_TYPE_Q8_1:
  7986. case GGML_TYPE_Q2_K:
  7987. case GGML_TYPE_Q3_K:
  7988. case GGML_TYPE_Q4_K:
  7989. case GGML_TYPE_Q5_K:
  7990. case GGML_TYPE_Q6_K:
  7991. default:
  7992. {
  7993. GGML_ASSERT(false);
  7994. } break;
  7995. }
  7996. }
  7997. // ggml_compute_forward_sub
  7998. static void ggml_compute_forward_sub_f32(
  7999. const struct ggml_compute_params * params,
  8000. const struct ggml_tensor * src0,
  8001. const struct ggml_tensor * src1,
  8002. struct ggml_tensor * dst) {
  8003. assert(params->ith == 0);
  8004. assert(ggml_are_same_shape(src0, src1) && ggml_are_same_shape(src0, dst));
  8005. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  8006. return;
  8007. }
  8008. const int nr = ggml_nrows(src0);
  8009. GGML_TENSOR_BINARY_OP_LOCALS
  8010. GGML_ASSERT( nb0 == sizeof(float));
  8011. GGML_ASSERT(nb00 == sizeof(float));
  8012. if (nb10 == sizeof(float)) {
  8013. for (int ir = 0; ir < nr; ++ir) {
  8014. // src0, src1 and dst are same shape => same indices
  8015. const int i3 = ir/(ne2*ne1);
  8016. const int i2 = (ir - i3*ne2*ne1)/ne1;
  8017. const int i1 = (ir - i3*ne2*ne1 - i2*ne1);
  8018. #ifdef GGML_USE_ACCELERATE
  8019. vDSP_vsub(
  8020. (float *) ((char *) src1->data + i3*nb13 + i2*nb12 + i1*nb11), 1,
  8021. (float *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01), 1,
  8022. (float *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 ), 1,
  8023. ne0);
  8024. #else
  8025. ggml_vec_sub_f32(ne0,
  8026. (float *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 ),
  8027. (float *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01),
  8028. (float *) ((char *) src1->data + i3*nb13 + i2*nb12 + i1*nb11));
  8029. #endif
  8030. // }
  8031. // }
  8032. }
  8033. } else {
  8034. // src1 is not contiguous
  8035. for (int ir = 0; ir < nr; ++ir) {
  8036. // src0, src1 and dst are same shape => same indices
  8037. const int i3 = ir/(ne2*ne1);
  8038. const int i2 = (ir - i3*ne2*ne1)/ne1;
  8039. const int i1 = (ir - i3*ne2*ne1 - i2*ne1);
  8040. float * dst_ptr = (float *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 );
  8041. float * src0_ptr = (float *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01);
  8042. for (int i0 = 0; i0 < ne0; i0++) {
  8043. float * src1_ptr = (float *) ((char *) src1->data + i3*nb13 + i2*nb12 + i1*nb11 + i0*nb10);
  8044. dst_ptr[i0] = src0_ptr[i0] - *src1_ptr;
  8045. }
  8046. }
  8047. }
  8048. }
  8049. static void ggml_compute_forward_sub(
  8050. const struct ggml_compute_params * params,
  8051. const struct ggml_tensor * src0,
  8052. const struct ggml_tensor * src1,
  8053. struct ggml_tensor * dst) {
  8054. switch (src0->type) {
  8055. case GGML_TYPE_F32:
  8056. {
  8057. ggml_compute_forward_sub_f32(params, src0, src1, dst);
  8058. } break;
  8059. default:
  8060. {
  8061. GGML_ASSERT(false);
  8062. } break;
  8063. }
  8064. }
  8065. // ggml_compute_forward_mul
  8066. static void ggml_compute_forward_mul_f32(
  8067. const struct ggml_compute_params * params,
  8068. const struct ggml_tensor * src0,
  8069. const struct ggml_tensor * src1,
  8070. struct ggml_tensor * dst) {
  8071. GGML_ASSERT(ggml_can_repeat_rows(src1, src0) && ggml_are_same_shape(src0, dst));
  8072. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  8073. return;
  8074. }
  8075. const int ith = params->ith;
  8076. const int nth = params->nth;
  8077. #ifdef GGML_USE_CLBLAST
  8078. if (src1->backend == GGML_BACKEND_GPU) {
  8079. if (ith == 0) {
  8080. ggml_cl_mul(src0, src1, dst);
  8081. }
  8082. return;
  8083. }
  8084. #endif
  8085. const int64_t nr = ggml_nrows(src0);
  8086. GGML_TENSOR_BINARY_OP_LOCALS
  8087. GGML_ASSERT( nb0 == sizeof(float));
  8088. GGML_ASSERT(nb00 == sizeof(float));
  8089. GGML_ASSERT(ne00 == ne10);
  8090. if (nb10 == sizeof(float)) {
  8091. for (int64_t ir = ith; ir < nr; ir += nth) {
  8092. // src0 and dst are same shape => same indices
  8093. const int64_t i03 = ir/(ne02*ne01);
  8094. const int64_t i02 = (ir - i03*ne02*ne01)/ne01;
  8095. const int64_t i01 = (ir - i03*ne02*ne01 - i02*ne01);
  8096. const int64_t i13 = i03 % ne13;
  8097. const int64_t i12 = i02 % ne12;
  8098. const int64_t i11 = i01 % ne11;
  8099. float * dst_ptr = (float *) ((char *) dst->data + i03*nb3 + i02*nb2 + i01*nb1 );
  8100. float * src0_ptr = (float *) ((char *) src0->data + i03*nb03 + i02*nb02 + i01*nb01);
  8101. float * src1_ptr = (float *) ((char *) src1->data + i13*nb13 + i12*nb12 + i11*nb11);
  8102. #ifdef GGML_USE_ACCELERATE
  8103. UNUSED(ggml_vec_mul_f32);
  8104. vDSP_vmul( src0_ptr, 1, src1_ptr, 1, dst_ptr, 1, ne00);
  8105. #else
  8106. ggml_vec_mul_f32(ne00, dst_ptr, src0_ptr, src1_ptr);
  8107. #endif
  8108. // }
  8109. // }
  8110. }
  8111. } else {
  8112. // src1 is not contiguous
  8113. for (int64_t ir = ith; ir < nr; ir += nth) {
  8114. // src0 and dst are same shape => same indices
  8115. // src1 is broadcastable across src0 and dst in i1, i2, i3
  8116. const int64_t i03 = ir/(ne02*ne01);
  8117. const int64_t i02 = (ir - i03*ne02*ne01)/ne01;
  8118. const int64_t i01 = (ir - i03*ne02*ne01 - i02*ne01);
  8119. const int64_t i13 = i03 % ne13;
  8120. const int64_t i12 = i02 % ne12;
  8121. const int64_t i11 = i01 % ne11;
  8122. float * dst_ptr = (float *) ((char *) dst->data + i03*nb3 + i02*nb2 + i01*nb1 );
  8123. float * src0_ptr = (float *) ((char *) src0->data + i03*nb03 + i02*nb02 + i01*nb01);
  8124. for (int64_t i0 = 0; i0 < ne00; i0++) {
  8125. float * src1_ptr = (float *) ((char *) src1->data + i13*nb13 + i12*nb12 + i11*nb11 + i0*nb10);
  8126. dst_ptr[i0] = src0_ptr[i0] * (*src1_ptr);
  8127. }
  8128. }
  8129. }
  8130. }
  8131. static void ggml_compute_forward_mul(
  8132. const struct ggml_compute_params * params,
  8133. const struct ggml_tensor * src0,
  8134. const struct ggml_tensor * src1,
  8135. struct ggml_tensor * dst) {
  8136. GGML_ASSERT(src1->type == GGML_TYPE_F32 && "only f32 src1 supported for now");
  8137. switch (src0->type) {
  8138. case GGML_TYPE_F32:
  8139. {
  8140. ggml_compute_forward_mul_f32(params, src0, src1, dst);
  8141. } break;
  8142. default:
  8143. {
  8144. GGML_ASSERT(false);
  8145. } break;
  8146. }
  8147. }
  8148. // ggml_compute_forward_div
  8149. static void ggml_compute_forward_div_f32(
  8150. const struct ggml_compute_params * params,
  8151. const struct ggml_tensor * src0,
  8152. const struct ggml_tensor * src1,
  8153. struct ggml_tensor * dst) {
  8154. assert(params->ith == 0);
  8155. assert(ggml_are_same_shape(src0, src1) && ggml_are_same_shape(src0, dst));
  8156. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  8157. return;
  8158. }
  8159. const int nr = ggml_nrows(src0);
  8160. GGML_TENSOR_BINARY_OP_LOCALS
  8161. GGML_ASSERT( nb0 == sizeof(float));
  8162. GGML_ASSERT(nb00 == sizeof(float));
  8163. if (nb10 == sizeof(float)) {
  8164. for (int ir = 0; ir < nr; ++ir) {
  8165. // src0, src1 and dst are same shape => same indices
  8166. const int i3 = ir/(ne2*ne1);
  8167. const int i2 = (ir - i3*ne2*ne1)/ne1;
  8168. const int i1 = (ir - i3*ne2*ne1 - i2*ne1);
  8169. #ifdef GGML_USE_ACCELERATE
  8170. UNUSED(ggml_vec_div_f32);
  8171. vDSP_vdiv(
  8172. (float *) ((char *) src1->data + i3*nb13 + i2*nb12 + i1*nb11), 1,
  8173. (float *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01), 1,
  8174. (float *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 ), 1,
  8175. ne0);
  8176. #else
  8177. ggml_vec_div_f32(ne0,
  8178. (float *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 ),
  8179. (float *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01),
  8180. (float *) ((char *) src1->data + i3*nb13 + i2*nb12 + i1*nb11));
  8181. #endif
  8182. // }
  8183. // }
  8184. }
  8185. } else {
  8186. // src1 is not contiguous
  8187. for (int ir = 0; ir < nr; ++ir) {
  8188. // src0, src1 and dst are same shape => same indices
  8189. const int i3 = ir/(ne2*ne1);
  8190. const int i2 = (ir - i3*ne2*ne1)/ne1;
  8191. const int i1 = (ir - i3*ne2*ne1 - i2*ne1);
  8192. float * dst_ptr = (float *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 );
  8193. float * src0_ptr = (float *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01);
  8194. for (int i0 = 0; i0 < ne0; i0++) {
  8195. float * src1_ptr = (float *) ((char *) src1->data + i3*nb13 + i2*nb12 + i1*nb11 + i0*nb10);
  8196. dst_ptr[i0] = src0_ptr[i0] / (*src1_ptr);
  8197. }
  8198. }
  8199. }
  8200. }
  8201. static void ggml_compute_forward_div(
  8202. const struct ggml_compute_params * params,
  8203. const struct ggml_tensor * src0,
  8204. const struct ggml_tensor * src1,
  8205. struct ggml_tensor * dst) {
  8206. switch (src0->type) {
  8207. case GGML_TYPE_F32:
  8208. {
  8209. ggml_compute_forward_div_f32(params, src0, src1, dst);
  8210. } break;
  8211. default:
  8212. {
  8213. GGML_ASSERT(false);
  8214. } break;
  8215. }
  8216. }
  8217. // ggml_compute_forward_sqr
  8218. static void ggml_compute_forward_sqr_f32(
  8219. const struct ggml_compute_params * params,
  8220. const struct ggml_tensor * src0,
  8221. struct ggml_tensor * dst) {
  8222. assert(params->ith == 0);
  8223. assert(ggml_are_same_shape(src0, dst));
  8224. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  8225. return;
  8226. }
  8227. const int n = ggml_nrows(src0);
  8228. const int nc = src0->ne[0];
  8229. assert( dst->nb[0] == sizeof(float));
  8230. assert(src0->nb[0] == sizeof(float));
  8231. for (int i = 0; i < n; i++) {
  8232. ggml_vec_sqr_f32(nc,
  8233. (float *) ((char *) dst->data + i*( dst->nb[1])),
  8234. (float *) ((char *) src0->data + i*(src0->nb[1])));
  8235. }
  8236. }
  8237. static void ggml_compute_forward_sqr(
  8238. const struct ggml_compute_params * params,
  8239. const struct ggml_tensor * src0,
  8240. struct ggml_tensor * dst) {
  8241. switch (src0->type) {
  8242. case GGML_TYPE_F32:
  8243. {
  8244. ggml_compute_forward_sqr_f32(params, src0, dst);
  8245. } break;
  8246. default:
  8247. {
  8248. GGML_ASSERT(false);
  8249. } break;
  8250. }
  8251. }
  8252. // ggml_compute_forward_sqrt
  8253. static void ggml_compute_forward_sqrt_f32(
  8254. const struct ggml_compute_params * params,
  8255. const struct ggml_tensor * src0,
  8256. struct ggml_tensor * dst) {
  8257. assert(params->ith == 0);
  8258. assert(ggml_are_same_shape(src0, dst));
  8259. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  8260. return;
  8261. }
  8262. const int n = ggml_nrows(src0);
  8263. const int nc = src0->ne[0];
  8264. assert( dst->nb[0] == sizeof(float));
  8265. assert(src0->nb[0] == sizeof(float));
  8266. for (int i = 0; i < n; i++) {
  8267. ggml_vec_sqrt_f32(nc,
  8268. (float *) ((char *) dst->data + i*( dst->nb[1])),
  8269. (float *) ((char *) src0->data + i*(src0->nb[1])));
  8270. }
  8271. }
  8272. static void ggml_compute_forward_sqrt(
  8273. const struct ggml_compute_params * params,
  8274. const struct ggml_tensor * src0,
  8275. struct ggml_tensor * dst) {
  8276. switch (src0->type) {
  8277. case GGML_TYPE_F32:
  8278. {
  8279. ggml_compute_forward_sqrt_f32(params, src0, dst);
  8280. } break;
  8281. default:
  8282. {
  8283. GGML_ASSERT(false);
  8284. } break;
  8285. }
  8286. }
  8287. // ggml_compute_forward_log
  8288. static void ggml_compute_forward_log_f32(
  8289. const struct ggml_compute_params * params,
  8290. const struct ggml_tensor * src0,
  8291. struct ggml_tensor * dst) {
  8292. GGML_ASSERT(params->ith == 0);
  8293. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  8294. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  8295. return;
  8296. }
  8297. const int n = ggml_nrows(src0);
  8298. const int nc = src0->ne[0];
  8299. GGML_ASSERT( dst->nb[0] == sizeof(float));
  8300. GGML_ASSERT(src0->nb[0] == sizeof(float));
  8301. for (int i = 0; i < n; i++) {
  8302. ggml_vec_log_f32(nc,
  8303. (float *) ((char *) dst->data + i*( dst->nb[1])),
  8304. (float *) ((char *) src0->data + i*(src0->nb[1])));
  8305. }
  8306. }
  8307. static void ggml_compute_forward_log(
  8308. const struct ggml_compute_params * params,
  8309. const struct ggml_tensor * src0,
  8310. struct ggml_tensor * dst) {
  8311. switch (src0->type) {
  8312. case GGML_TYPE_F32:
  8313. {
  8314. ggml_compute_forward_log_f32(params, src0, dst);
  8315. } break;
  8316. default:
  8317. {
  8318. GGML_ASSERT(false);
  8319. } break;
  8320. }
  8321. }
  8322. // ggml_compute_forward_sum
  8323. static void ggml_compute_forward_sum_f32(
  8324. const struct ggml_compute_params * params,
  8325. const struct ggml_tensor * src0,
  8326. struct ggml_tensor * dst) {
  8327. assert(params->ith == 0);
  8328. assert(ggml_is_scalar(dst));
  8329. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  8330. return;
  8331. }
  8332. assert(ggml_is_scalar(dst));
  8333. assert(src0->nb[0] == sizeof(float));
  8334. GGML_TENSOR_LOCALS(int64_t, ne0, src0, ne)
  8335. GGML_TENSOR_LOCALS(size_t, nb0, src0, nb)
  8336. ggml_float sum = 0;
  8337. ggml_float row_sum = 0;
  8338. for (int64_t i03 = 0; i03 < ne03; i03++) {
  8339. for (int64_t i02 = 0; i02 < ne02; i02++) {
  8340. for (int64_t i01 = 0; i01 < ne01; i01++) {
  8341. ggml_vec_sum_f32_ggf(ne00,
  8342. &row_sum,
  8343. (float *) ((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03));
  8344. sum += row_sum;
  8345. }
  8346. }
  8347. }
  8348. ((float *) dst->data)[0] = sum;
  8349. }
  8350. static void ggml_compute_forward_sum_f16(
  8351. const struct ggml_compute_params * params,
  8352. const struct ggml_tensor * src0,
  8353. struct ggml_tensor * dst) {
  8354. assert(params->ith == 0);
  8355. assert(ggml_is_scalar(dst));
  8356. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  8357. return;
  8358. }
  8359. assert(src0->nb[0] == sizeof(ggml_fp16_t));
  8360. GGML_TENSOR_LOCALS(int64_t, ne0, src0, ne)
  8361. GGML_TENSOR_LOCALS(size_t, nb0, src0, nb)
  8362. float sum = 0;
  8363. float row_sum = 0;
  8364. for (int64_t i03 = 0; i03 < ne03; i03++) {
  8365. for (int64_t i02 = 0; i02 < ne02; i02++) {
  8366. for (int64_t i01 = 0; i01 < ne01; i01++) {
  8367. ggml_vec_sum_f16_ggf(ne00,
  8368. &row_sum,
  8369. (ggml_fp16_t *) ((char *) src0->data + i01 * nb01 + i02 * nb02 + i03 * nb03));
  8370. sum += row_sum;
  8371. }
  8372. }
  8373. }
  8374. ((ggml_fp16_t *) dst->data)[0] = GGML_FP32_TO_FP16(sum);
  8375. }
  8376. static void ggml_compute_forward_sum(
  8377. const struct ggml_compute_params * params,
  8378. const struct ggml_tensor * src0,
  8379. struct ggml_tensor * dst) {
  8380. switch (src0->type) {
  8381. case GGML_TYPE_F32:
  8382. {
  8383. ggml_compute_forward_sum_f32(params, src0, dst);
  8384. } break;
  8385. case GGML_TYPE_F16:
  8386. {
  8387. ggml_compute_forward_sum_f16(params, src0, dst);
  8388. } break;
  8389. default:
  8390. {
  8391. GGML_ASSERT(false);
  8392. } break;
  8393. }
  8394. }
  8395. // ggml_compute_forward_sum_rows
  8396. static void ggml_compute_forward_sum_rows_f32(
  8397. const struct ggml_compute_params * params,
  8398. const struct ggml_tensor * src0,
  8399. struct ggml_tensor * dst) {
  8400. GGML_ASSERT(params->ith == 0);
  8401. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  8402. return;
  8403. }
  8404. GGML_ASSERT(src0->nb[0] == sizeof(float));
  8405. GGML_ASSERT(dst->nb[0] == sizeof(float));
  8406. GGML_TENSOR_UNARY_OP_LOCALS
  8407. GGML_ASSERT(ne0 == 1);
  8408. GGML_ASSERT(ne1 == ne01);
  8409. GGML_ASSERT(ne2 == ne02);
  8410. GGML_ASSERT(ne3 == ne03);
  8411. for (int64_t i3 = 0; i3 < ne03; i3++) {
  8412. for (int64_t i2 = 0; i2 < ne02; i2++) {
  8413. for (int64_t i1 = 0; i1 < ne01; i1++) {
  8414. float * src_row = (float *) ((char *) src0->data + i1*nb01 + i2*nb02 + i3*nb03);
  8415. float * dst_row = (float *) ((char *) dst->data + i1*nb1 + i2*nb2 + i3*nb3);
  8416. float row_sum = 0;
  8417. ggml_vec_sum_f32(ne00, &row_sum, src_row);
  8418. dst_row[0] = row_sum;
  8419. }
  8420. }
  8421. }
  8422. }
  8423. static void ggml_compute_forward_sum_rows(
  8424. const struct ggml_compute_params * params,
  8425. const struct ggml_tensor * src0,
  8426. struct ggml_tensor * dst) {
  8427. switch (src0->type) {
  8428. case GGML_TYPE_F32:
  8429. {
  8430. ggml_compute_forward_sum_rows_f32(params, src0, dst);
  8431. } break;
  8432. default:
  8433. {
  8434. GGML_ASSERT(false);
  8435. } break;
  8436. }
  8437. }
  8438. // ggml_compute_forward_mean
  8439. static void ggml_compute_forward_mean_f32(
  8440. const struct ggml_compute_params * params,
  8441. const struct ggml_tensor * src0,
  8442. struct ggml_tensor * dst) {
  8443. assert(params->ith == 0);
  8444. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  8445. return;
  8446. }
  8447. assert(src0->nb[0] == sizeof(float));
  8448. GGML_TENSOR_UNARY_OP_LOCALS
  8449. assert(ne0 == 1);
  8450. assert(ne1 == ne01);
  8451. assert(ne2 == ne02);
  8452. assert(ne3 == ne03);
  8453. UNUSED(ne0);
  8454. UNUSED(ne1);
  8455. UNUSED(ne2);
  8456. UNUSED(ne3);
  8457. for (int64_t i03 = 0; i03 < ne03; i03++) {
  8458. for (int64_t i02 = 0; i02 < ne02; i02++) {
  8459. for (int64_t i01 = 0; i01 < ne01; i01++) {
  8460. ggml_vec_sum_f32(ne00,
  8461. (float *) ((char *) dst->data + i01*nb1 + i02*nb2 + i03*nb3),
  8462. (float *) ((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03));
  8463. *(float *) ((char *) dst->data + i01*nb1 + i02*nb2 + i03*nb3) /= (float) ne00;
  8464. }
  8465. }
  8466. }
  8467. }
  8468. static void ggml_compute_forward_mean(
  8469. const struct ggml_compute_params * params,
  8470. const struct ggml_tensor * src0,
  8471. struct ggml_tensor * dst) {
  8472. switch (src0->type) {
  8473. case GGML_TYPE_F32:
  8474. {
  8475. ggml_compute_forward_mean_f32(params, src0, dst);
  8476. } break;
  8477. default:
  8478. {
  8479. GGML_ASSERT(false);
  8480. } break;
  8481. }
  8482. }
  8483. // ggml_compute_forward_argmax
  8484. static void ggml_compute_forward_argmax_f32(
  8485. const struct ggml_compute_params * params,
  8486. const struct ggml_tensor * src0,
  8487. struct ggml_tensor * dst) {
  8488. assert(params->ith == 0);
  8489. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  8490. return;
  8491. }
  8492. assert(src0->nb[0] == sizeof(float));
  8493. assert(dst->nb[0] == sizeof(float));
  8494. const int64_t ne00 = src0->ne[0];
  8495. const int64_t ne01 = src0->ne[1];
  8496. const size_t nb01 = src0->nb[1];
  8497. const size_t nb0 = dst->nb[0];
  8498. for (int64_t i1 = 0; i1 < ne01; i1++) {
  8499. float * src = (float *) ((char *) src0->data + i1*nb01);
  8500. int32_t * dst_ = (int32_t *) ((char *) dst->data + i1*nb0);
  8501. int v = 0;
  8502. ggml_vec_argmax_f32(ne00, &v, src);
  8503. dst_[0] = v;
  8504. }
  8505. }
  8506. static void ggml_compute_forward_argmax(
  8507. const struct ggml_compute_params * params,
  8508. const struct ggml_tensor * src0,
  8509. struct ggml_tensor * dst) {
  8510. switch (src0->type) {
  8511. case GGML_TYPE_F32:
  8512. {
  8513. ggml_compute_forward_argmax_f32(params, src0, dst);
  8514. } break;
  8515. default:
  8516. {
  8517. GGML_ASSERT(false);
  8518. } break;
  8519. }
  8520. }
  8521. // ggml_compute_forward_repeat
  8522. static void ggml_compute_forward_repeat_f32(
  8523. const struct ggml_compute_params * params,
  8524. const struct ggml_tensor * src0,
  8525. struct ggml_tensor * dst) {
  8526. GGML_ASSERT(params->ith == 0);
  8527. GGML_ASSERT(ggml_can_repeat(src0, dst));
  8528. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  8529. return;
  8530. }
  8531. GGML_TENSOR_UNARY_OP_LOCALS
  8532. // guaranteed to be an integer due to the check in ggml_can_repeat
  8533. const int nr0 = (int)(ne0/ne00);
  8534. const int nr1 = (int)(ne1/ne01);
  8535. const int nr2 = (int)(ne2/ne02);
  8536. const int nr3 = (int)(ne3/ne03);
  8537. // TODO: support for transposed / permuted tensors
  8538. GGML_ASSERT(nb0 == sizeof(float));
  8539. GGML_ASSERT(nb00 == sizeof(float));
  8540. // TODO: maybe this is not optimal?
  8541. for (int i3 = 0; i3 < nr3; i3++) {
  8542. for (int k3 = 0; k3 < ne03; k3++) {
  8543. for (int i2 = 0; i2 < nr2; i2++) {
  8544. for (int k2 = 0; k2 < ne02; k2++) {
  8545. for (int i1 = 0; i1 < nr1; i1++) {
  8546. for (int k1 = 0; k1 < ne01; k1++) {
  8547. for (int i0 = 0; i0 < nr0; i0++) {
  8548. ggml_vec_cpy_f32(ne00,
  8549. (float *) ((char *) dst->data + (i3*ne03 + k3)*nb3 + (i2*ne02 + k2)*nb2 + (i1*ne01 + k1)*nb1 + (i0*ne00)*nb0),
  8550. (float *) ((char *) src0->data + ( k3)*nb03 + ( k2)*nb02 + ( k1)*nb01));
  8551. }
  8552. }
  8553. }
  8554. }
  8555. }
  8556. }
  8557. }
  8558. }
  8559. static void ggml_compute_forward_repeat_f16(
  8560. const struct ggml_compute_params * params,
  8561. const struct ggml_tensor * src0,
  8562. struct ggml_tensor * dst) {
  8563. GGML_ASSERT(params->ith == 0);
  8564. GGML_ASSERT(ggml_can_repeat(src0, dst));
  8565. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  8566. return;
  8567. }
  8568. GGML_TENSOR_UNARY_OP_LOCALS;
  8569. // guaranteed to be an integer due to the check in ggml_can_repeat
  8570. const int nr0 = (int)(ne0/ne00);
  8571. const int nr1 = (int)(ne1/ne01);
  8572. const int nr2 = (int)(ne2/ne02);
  8573. const int nr3 = (int)(ne3/ne03);
  8574. // TODO: support for transposed / permuted tensors
  8575. GGML_ASSERT(nb0 == sizeof(ggml_fp16_t));
  8576. GGML_ASSERT(nb00 == sizeof(ggml_fp16_t));
  8577. // TODO: maybe this is not optimal?
  8578. for (int i3 = 0; i3 < nr3; i3++) {
  8579. for (int k3 = 0; k3 < ne03; k3++) {
  8580. for (int i2 = 0; i2 < nr2; i2++) {
  8581. for (int k2 = 0; k2 < ne02; k2++) {
  8582. for (int i1 = 0; i1 < nr1; i1++) {
  8583. for (int k1 = 0; k1 < ne01; k1++) {
  8584. for (int i0 = 0; i0 < nr0; i0++) {
  8585. ggml_fp16_t * y = (ggml_fp16_t *) ((char *) dst->data + (i3*ne03 + k3)*nb3 + (i2*ne02 + k2)*nb2 + (i1*ne01 + k1)*nb1 + (i0*ne00)*nb0);
  8586. ggml_fp16_t * x = (ggml_fp16_t *) ((char *) src0->data + ( k3)*nb03 + ( k2)*nb02 + ( k1)*nb01);
  8587. // ggml_vec_cpy_f16(ne00, y, x)
  8588. for (int i = 0; i < ne00; ++i) {
  8589. y[i] = x[i];
  8590. }
  8591. }
  8592. }
  8593. }
  8594. }
  8595. }
  8596. }
  8597. }
  8598. }
  8599. static void ggml_compute_forward_repeat(
  8600. const struct ggml_compute_params * params,
  8601. const struct ggml_tensor * src0,
  8602. struct ggml_tensor * dst) {
  8603. switch (src0->type) {
  8604. case GGML_TYPE_F16:
  8605. {
  8606. ggml_compute_forward_repeat_f16(params, src0, dst);
  8607. } break;
  8608. case GGML_TYPE_F32:
  8609. {
  8610. ggml_compute_forward_repeat_f32(params, src0, dst);
  8611. } break;
  8612. default:
  8613. {
  8614. GGML_ASSERT(false);
  8615. } break;
  8616. }
  8617. }
  8618. // ggml_compute_forward_repeat_back
  8619. static void ggml_compute_forward_repeat_back_f32(
  8620. const struct ggml_compute_params * params,
  8621. const struct ggml_tensor * src0,
  8622. struct ggml_tensor * dst) {
  8623. GGML_ASSERT(params->ith == 0);
  8624. GGML_ASSERT(ggml_can_repeat(dst, src0));
  8625. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  8626. return;
  8627. }
  8628. GGML_TENSOR_UNARY_OP_LOCALS
  8629. // guaranteed to be an integer due to the check in ggml_can_repeat
  8630. const int nr0 = (int)(ne00/ne0);
  8631. const int nr1 = (int)(ne01/ne1);
  8632. const int nr2 = (int)(ne02/ne2);
  8633. const int nr3 = (int)(ne03/ne3);
  8634. // TODO: support for transposed / permuted tensors
  8635. GGML_ASSERT(nb0 == sizeof(float));
  8636. GGML_ASSERT(nb00 == sizeof(float));
  8637. if (ggml_is_contiguous(dst)) {
  8638. ggml_vec_set_f32(ne0*ne1*ne2*ne3, dst->data, 0);
  8639. } else {
  8640. for (int k3 = 0; k3 < ne3; k3++) {
  8641. for (int k2 = 0; k2 < ne2; k2++) {
  8642. for (int k1 = 0; k1 < ne1; k1++) {
  8643. ggml_vec_set_f32(ne0,
  8644. (float *) ((char *) dst->data + k1*nb1 + k2*nb2 + k3*nb3),
  8645. 0);
  8646. }
  8647. }
  8648. }
  8649. }
  8650. // TODO: maybe this is not optimal?
  8651. for (int i3 = 0; i3 < nr3; i3++) {
  8652. for (int k3 = 0; k3 < ne3; k3++) {
  8653. for (int i2 = 0; i2 < nr2; i2++) {
  8654. for (int k2 = 0; k2 < ne2; k2++) {
  8655. for (int i1 = 0; i1 < nr1; i1++) {
  8656. for (int k1 = 0; k1 < ne1; k1++) {
  8657. for (int i0 = 0; i0 < nr0; i0++) {
  8658. ggml_vec_acc_f32(ne0,
  8659. (float *) ((char *) dst->data + ( k3)*nb3 + ( k2)*nb2 + ( k1)*nb1),
  8660. (float *) ((char *) src0->data + (i3*ne3 + k3)*nb03 + (i2*ne2 + k2)*nb02 + (i1*ne1 + k1)*nb01 + (i0*ne0)*nb00));
  8661. }
  8662. }
  8663. }
  8664. }
  8665. }
  8666. }
  8667. }
  8668. }
  8669. static void ggml_compute_forward_repeat_back(
  8670. const struct ggml_compute_params * params,
  8671. const struct ggml_tensor * src0,
  8672. struct ggml_tensor * dst) {
  8673. switch (src0->type) {
  8674. case GGML_TYPE_F32:
  8675. {
  8676. ggml_compute_forward_repeat_back_f32(params, src0, dst);
  8677. } break;
  8678. default:
  8679. {
  8680. GGML_ASSERT(false);
  8681. } break;
  8682. }
  8683. }
  8684. // ggml_compute_forward_concat
  8685. static void ggml_compute_forward_concat_f32(
  8686. const struct ggml_compute_params * params,
  8687. const struct ggml_tensor * src0,
  8688. const struct ggml_tensor * src1,
  8689. struct ggml_tensor * dst) {
  8690. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  8691. return;
  8692. }
  8693. GGML_ASSERT(src0->nb[0] == sizeof(float));
  8694. const int ith = params->ith;
  8695. GGML_TENSOR_BINARY_OP_LOCALS
  8696. // TODO: support for transposed / permuted tensors
  8697. GGML_ASSERT(nb0 == sizeof(float));
  8698. GGML_ASSERT(nb00 == sizeof(float));
  8699. GGML_ASSERT(nb10 == sizeof(float));
  8700. for (int i3 = 0; i3 < ne3; i3++) {
  8701. for (int i2 = ith; i2 < ne2; i2++) {
  8702. if (i2 < ne02) { // src0
  8703. for (int i1 = 0; i1 < ne1; i1++) {
  8704. for (int i0 = 0; i0 < ne0; i0++) {
  8705. const float * x = (float *)((char *) src0->data + i0 * nb00 + i1 * nb01 + i2 * nb02 + i3 * nb03);
  8706. float * y = (float *)((char *)dst->data + i0 * nb0 + i1 * nb1 + i2 * nb2 + i3 * nb3);
  8707. *y = *x;
  8708. }
  8709. }
  8710. } // src1
  8711. else {
  8712. for (int i1 = 0; i1 < ne1; i1++) {
  8713. for (int i0 = 0; i0 < ne0; i0++) {
  8714. const float * x = (float *)((char *) src1->data + i0 * nb10 + i1 * nb11 + (i2 - ne02) * nb12 + i3 * nb13);
  8715. float * y = (float *)((char *)dst->data + i0 * nb0 + i1 * nb1 + i2 * nb2 + i3 * nb3);
  8716. *y = *x;
  8717. }
  8718. }
  8719. }
  8720. }
  8721. }
  8722. }
  8723. static void ggml_compute_forward_concat(
  8724. const struct ggml_compute_params* params,
  8725. const struct ggml_tensor* src0,
  8726. const struct ggml_tensor* src1,
  8727. struct ggml_tensor* dst) {
  8728. switch (src0->type) {
  8729. case GGML_TYPE_F32:
  8730. {
  8731. ggml_compute_forward_concat_f32(params, src0, src1, dst);
  8732. } break;
  8733. default:
  8734. {
  8735. GGML_ASSERT(false);
  8736. } break;
  8737. }
  8738. }
  8739. // ggml_compute_forward_abs
  8740. static void ggml_compute_forward_abs_f32(
  8741. const struct ggml_compute_params * params,
  8742. const struct ggml_tensor * src0,
  8743. struct ggml_tensor * dst) {
  8744. assert(params->ith == 0);
  8745. assert(ggml_are_same_shape(src0, dst));
  8746. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  8747. return;
  8748. }
  8749. const int n = ggml_nrows(src0);
  8750. const int nc = src0->ne[0];
  8751. assert(dst->nb[0] == sizeof(float));
  8752. assert(src0->nb[0] == sizeof(float));
  8753. for (int i = 0; i < n; i++) {
  8754. ggml_vec_abs_f32(nc,
  8755. (float *) ((char *) dst->data + i*( dst->nb[1])),
  8756. (float *) ((char *) src0->data + i*(src0->nb[1])));
  8757. }
  8758. }
  8759. static void ggml_compute_forward_abs(
  8760. const struct ggml_compute_params * params,
  8761. const struct ggml_tensor * src0,
  8762. struct ggml_tensor * dst) {
  8763. switch (src0->type) {
  8764. case GGML_TYPE_F32:
  8765. {
  8766. ggml_compute_forward_abs_f32(params, src0, dst);
  8767. } break;
  8768. default:
  8769. {
  8770. GGML_ASSERT(false);
  8771. } break;
  8772. }
  8773. }
  8774. // ggml_compute_forward_sgn
  8775. static void ggml_compute_forward_sgn_f32(
  8776. const struct ggml_compute_params * params,
  8777. const struct ggml_tensor * src0,
  8778. struct ggml_tensor * dst) {
  8779. assert(params->ith == 0);
  8780. assert(ggml_are_same_shape(src0, dst));
  8781. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  8782. return;
  8783. }
  8784. const int n = ggml_nrows(src0);
  8785. const int nc = src0->ne[0];
  8786. assert(dst->nb[0] == sizeof(float));
  8787. assert(src0->nb[0] == sizeof(float));
  8788. for (int i = 0; i < n; i++) {
  8789. ggml_vec_sgn_f32(nc,
  8790. (float *) ((char *) dst->data + i*( dst->nb[1])),
  8791. (float *) ((char *) src0->data + i*(src0->nb[1])));
  8792. }
  8793. }
  8794. static void ggml_compute_forward_sgn(
  8795. const struct ggml_compute_params * params,
  8796. const struct ggml_tensor * src0,
  8797. struct ggml_tensor * dst) {
  8798. switch (src0->type) {
  8799. case GGML_TYPE_F32:
  8800. {
  8801. ggml_compute_forward_sgn_f32(params, src0, dst);
  8802. } break;
  8803. default:
  8804. {
  8805. GGML_ASSERT(false);
  8806. } break;
  8807. }
  8808. }
  8809. // ggml_compute_forward_neg
  8810. static void ggml_compute_forward_neg_f32(
  8811. const struct ggml_compute_params * params,
  8812. const struct ggml_tensor * src0,
  8813. struct ggml_tensor * dst) {
  8814. assert(params->ith == 0);
  8815. assert(ggml_are_same_shape(src0, dst));
  8816. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  8817. return;
  8818. }
  8819. const int n = ggml_nrows(src0);
  8820. const int nc = src0->ne[0];
  8821. assert(dst->nb[0] == sizeof(float));
  8822. assert(src0->nb[0] == sizeof(float));
  8823. for (int i = 0; i < n; i++) {
  8824. ggml_vec_neg_f32(nc,
  8825. (float *) ((char *) dst->data + i*( dst->nb[1])),
  8826. (float *) ((char *) src0->data + i*(src0->nb[1])));
  8827. }
  8828. }
  8829. static void ggml_compute_forward_neg(
  8830. const struct ggml_compute_params * params,
  8831. const struct ggml_tensor * src0,
  8832. struct ggml_tensor * dst) {
  8833. switch (src0->type) {
  8834. case GGML_TYPE_F32:
  8835. {
  8836. ggml_compute_forward_neg_f32(params, src0, dst);
  8837. } break;
  8838. default:
  8839. {
  8840. GGML_ASSERT(false);
  8841. } break;
  8842. }
  8843. }
  8844. // ggml_compute_forward_step
  8845. static void ggml_compute_forward_step_f32(
  8846. const struct ggml_compute_params * params,
  8847. const struct ggml_tensor * src0,
  8848. struct ggml_tensor * dst) {
  8849. assert(params->ith == 0);
  8850. assert(ggml_are_same_shape(src0, dst));
  8851. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  8852. return;
  8853. }
  8854. const int n = ggml_nrows(src0);
  8855. const int nc = src0->ne[0];
  8856. assert(dst->nb[0] == sizeof(float));
  8857. assert(src0->nb[0] == sizeof(float));
  8858. for (int i = 0; i < n; i++) {
  8859. ggml_vec_step_f32(nc,
  8860. (float *) ((char *) dst->data + i*( dst->nb[1])),
  8861. (float *) ((char *) src0->data + i*(src0->nb[1])));
  8862. }
  8863. }
  8864. static void ggml_compute_forward_step(
  8865. const struct ggml_compute_params * params,
  8866. const struct ggml_tensor * src0,
  8867. struct ggml_tensor * dst) {
  8868. switch (src0->type) {
  8869. case GGML_TYPE_F32:
  8870. {
  8871. ggml_compute_forward_step_f32(params, src0, dst);
  8872. } break;
  8873. default:
  8874. {
  8875. GGML_ASSERT(false);
  8876. } break;
  8877. }
  8878. }
  8879. // ggml_compute_forward_tanh
  8880. static void ggml_compute_forward_tanh_f32(
  8881. const struct ggml_compute_params * params,
  8882. const struct ggml_tensor * src0,
  8883. struct ggml_tensor * dst) {
  8884. assert(params->ith == 0);
  8885. assert(ggml_are_same_shape(src0, dst));
  8886. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  8887. return;
  8888. }
  8889. const int n = ggml_nrows(src0);
  8890. const int nc = src0->ne[0];
  8891. assert(dst->nb[0] == sizeof(float));
  8892. assert(src0->nb[0] == sizeof(float));
  8893. for (int i = 0; i < n; i++) {
  8894. ggml_vec_tanh_f32(nc,
  8895. (float *) ((char *) dst->data + i*( dst->nb[1])),
  8896. (float *) ((char *) src0->data + i*(src0->nb[1])));
  8897. }
  8898. }
  8899. static void ggml_compute_forward_tanh(
  8900. const struct ggml_compute_params * params,
  8901. const struct ggml_tensor * src0,
  8902. struct ggml_tensor * dst) {
  8903. switch (src0->type) {
  8904. case GGML_TYPE_F32:
  8905. {
  8906. ggml_compute_forward_tanh_f32(params, src0, dst);
  8907. } break;
  8908. default:
  8909. {
  8910. GGML_ASSERT(false);
  8911. } break;
  8912. }
  8913. }
  8914. // ggml_compute_forward_elu
  8915. static void ggml_compute_forward_elu_f32(
  8916. const struct ggml_compute_params * params,
  8917. const struct ggml_tensor * src0,
  8918. struct ggml_tensor * dst) {
  8919. assert(params->ith == 0);
  8920. assert(ggml_are_same_shape(src0, dst));
  8921. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  8922. return;
  8923. }
  8924. const int n = ggml_nrows(src0);
  8925. const int nc = src0->ne[0];
  8926. assert(dst->nb[0] == sizeof(float));
  8927. assert(src0->nb[0] == sizeof(float));
  8928. for (int i = 0; i < n; i++) {
  8929. ggml_vec_elu_f32(nc,
  8930. (float *) ((char *) dst->data + i*( dst->nb[1])),
  8931. (float *) ((char *) src0->data + i*(src0->nb[1])));
  8932. }
  8933. }
  8934. static void ggml_compute_forward_elu(
  8935. const struct ggml_compute_params * params,
  8936. const struct ggml_tensor * src0,
  8937. struct ggml_tensor * dst) {
  8938. switch (src0->type) {
  8939. case GGML_TYPE_F32:
  8940. {
  8941. ggml_compute_forward_elu_f32(params, src0, dst);
  8942. } break;
  8943. default:
  8944. {
  8945. GGML_ASSERT(false);
  8946. } break;
  8947. }
  8948. }
  8949. // ggml_compute_forward_relu
  8950. static void ggml_compute_forward_relu_f32(
  8951. const struct ggml_compute_params * params,
  8952. const struct ggml_tensor * src0,
  8953. struct ggml_tensor * dst) {
  8954. assert(params->ith == 0);
  8955. assert(ggml_are_same_shape(src0, dst));
  8956. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  8957. return;
  8958. }
  8959. const int n = ggml_nrows(src0);
  8960. const int nc = src0->ne[0];
  8961. assert(dst->nb[0] == sizeof(float));
  8962. assert(src0->nb[0] == sizeof(float));
  8963. for (int i = 0; i < n; i++) {
  8964. ggml_vec_relu_f32(nc,
  8965. (float *) ((char *) dst->data + i*( dst->nb[1])),
  8966. (float *) ((char *) src0->data + i*(src0->nb[1])));
  8967. }
  8968. }
  8969. static void ggml_compute_forward_relu(
  8970. const struct ggml_compute_params * params,
  8971. const struct ggml_tensor * src0,
  8972. struct ggml_tensor * dst) {
  8973. switch (src0->type) {
  8974. case GGML_TYPE_F32:
  8975. {
  8976. ggml_compute_forward_relu_f32(params, src0, dst);
  8977. } break;
  8978. default:
  8979. {
  8980. GGML_ASSERT(false);
  8981. } break;
  8982. }
  8983. }
  8984. // ggml_compute_forward_gelu
  8985. static void ggml_compute_forward_gelu_f32(
  8986. const struct ggml_compute_params * params,
  8987. const struct ggml_tensor * src0,
  8988. struct ggml_tensor * dst) {
  8989. GGML_ASSERT(ggml_is_contiguous_except_dim_1(src0));
  8990. GGML_ASSERT(ggml_is_contiguous_except_dim_1(dst));
  8991. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  8992. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  8993. return;
  8994. }
  8995. const int ith = params->ith;
  8996. const int nth = params->nth;
  8997. const int nc = src0->ne[0];
  8998. const int nr = ggml_nrows(src0);
  8999. // rows per thread
  9000. const int dr = (nr + nth - 1)/nth;
  9001. // row range for this thread
  9002. const int ir0 = dr*ith;
  9003. const int ir1 = MIN(ir0 + dr, nr);
  9004. for (int i1 = ir0; i1 < ir1; i1++) {
  9005. ggml_vec_gelu_f32(nc,
  9006. (float *) ((char *) dst->data + i1*( dst->nb[1])),
  9007. (float *) ((char *) src0->data + i1*(src0->nb[1])));
  9008. #ifndef NDEBUG
  9009. for (int k = 0; k < nc; k++) {
  9010. const float x = ((float *) ((char *) dst->data + i1*( dst->nb[1])))[k];
  9011. UNUSED(x);
  9012. assert(!isnan(x));
  9013. assert(!isinf(x));
  9014. }
  9015. #endif
  9016. }
  9017. }
  9018. static void ggml_compute_forward_gelu(
  9019. const struct ggml_compute_params * params,
  9020. const struct ggml_tensor * src0,
  9021. struct ggml_tensor * dst) {
  9022. switch (src0->type) {
  9023. case GGML_TYPE_F32:
  9024. {
  9025. ggml_compute_forward_gelu_f32(params, src0, dst);
  9026. } break;
  9027. default:
  9028. {
  9029. GGML_ASSERT(false);
  9030. } break;
  9031. }
  9032. }
  9033. // ggml_compute_forward_gelu_quick
  9034. static void ggml_compute_forward_gelu_quick_f32(
  9035. const struct ggml_compute_params * params,
  9036. const struct ggml_tensor * src0,
  9037. struct ggml_tensor * dst) {
  9038. GGML_ASSERT(ggml_is_contiguous_except_dim_1(src0));
  9039. GGML_ASSERT(ggml_is_contiguous_except_dim_1(dst));
  9040. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  9041. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  9042. return;
  9043. }
  9044. const int ith = params->ith;
  9045. const int nth = params->nth;
  9046. const int nc = src0->ne[0];
  9047. const int nr = ggml_nrows(src0);
  9048. // rows per thread
  9049. const int dr = (nr + nth - 1)/nth;
  9050. // row range for this thread
  9051. const int ir0 = dr*ith;
  9052. const int ir1 = MIN(ir0 + dr, nr);
  9053. for (int i1 = ir0; i1 < ir1; i1++) {
  9054. ggml_vec_gelu_quick_f32(nc,
  9055. (float *) ((char *) dst->data + i1*( dst->nb[1])),
  9056. (float *) ((char *) src0->data + i1*(src0->nb[1])));
  9057. #ifndef NDEBUG
  9058. for (int k = 0; k < nc; k++) {
  9059. const float x = ((float *) ((char *) dst->data + i1*( dst->nb[1])))[k];
  9060. UNUSED(x);
  9061. assert(!isnan(x));
  9062. assert(!isinf(x));
  9063. }
  9064. #endif
  9065. }
  9066. }
  9067. static void ggml_compute_forward_gelu_quick(
  9068. const struct ggml_compute_params * params,
  9069. const struct ggml_tensor * src0,
  9070. struct ggml_tensor * dst) {
  9071. switch (src0->type) {
  9072. case GGML_TYPE_F32:
  9073. {
  9074. ggml_compute_forward_gelu_quick_f32(params, src0, dst);
  9075. } break;
  9076. default:
  9077. {
  9078. GGML_ASSERT(false);
  9079. } break;
  9080. }
  9081. }
  9082. // ggml_compute_forward_silu
  9083. static void ggml_compute_forward_silu_f32(
  9084. const struct ggml_compute_params * params,
  9085. const struct ggml_tensor * src0,
  9086. struct ggml_tensor * dst) {
  9087. GGML_ASSERT(ggml_is_contiguous_except_dim_1(src0));
  9088. GGML_ASSERT(ggml_is_contiguous_except_dim_1(dst));
  9089. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  9090. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  9091. return;
  9092. }
  9093. const int ith = params->ith;
  9094. const int nth = params->nth;
  9095. const int nc = src0->ne[0];
  9096. const int nr = ggml_nrows(src0);
  9097. // rows per thread
  9098. const int dr = (nr + nth - 1)/nth;
  9099. // row range for this thread
  9100. const int ir0 = dr*ith;
  9101. const int ir1 = MIN(ir0 + dr, nr);
  9102. for (int i1 = ir0; i1 < ir1; i1++) {
  9103. ggml_vec_silu_f32(nc,
  9104. (float *) ((char *) dst->data + i1*( dst->nb[1])),
  9105. (float *) ((char *) src0->data + i1*(src0->nb[1])));
  9106. #ifndef NDEBUG
  9107. for (int k = 0; k < nc; k++) {
  9108. const float x = ((float *) ((char *) dst->data + i1*( dst->nb[1])))[k];
  9109. UNUSED(x);
  9110. assert(!isnan(x));
  9111. assert(!isinf(x));
  9112. }
  9113. #endif
  9114. }
  9115. }
  9116. static void ggml_compute_forward_silu(
  9117. const struct ggml_compute_params * params,
  9118. const struct ggml_tensor * src0,
  9119. struct ggml_tensor * dst) {
  9120. switch (src0->type) {
  9121. case GGML_TYPE_F32:
  9122. {
  9123. ggml_compute_forward_silu_f32(params, src0, dst);
  9124. } break;
  9125. default:
  9126. {
  9127. GGML_ASSERT(false);
  9128. } break;
  9129. }
  9130. }
  9131. // ggml_compute_forward_silu_back
  9132. static void ggml_compute_forward_silu_back_f32(
  9133. const struct ggml_compute_params * params,
  9134. const struct ggml_tensor * src0,
  9135. const struct ggml_tensor * grad,
  9136. struct ggml_tensor * dst) {
  9137. GGML_ASSERT(ggml_is_contiguous_except_dim_1(grad));
  9138. GGML_ASSERT(ggml_is_contiguous_except_dim_1(src0));
  9139. GGML_ASSERT(ggml_is_contiguous_except_dim_1(dst));
  9140. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  9141. GGML_ASSERT(ggml_are_same_shape(src0, grad));
  9142. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  9143. return;
  9144. }
  9145. const int ith = params->ith;
  9146. const int nth = params->nth;
  9147. const int nc = src0->ne[0];
  9148. const int nr = ggml_nrows(src0);
  9149. // rows per thread
  9150. const int dr = (nr + nth - 1)/nth;
  9151. // row range for this thread
  9152. const int ir0 = dr*ith;
  9153. const int ir1 = MIN(ir0 + dr, nr);
  9154. for (int i1 = ir0; i1 < ir1; i1++) {
  9155. ggml_vec_silu_backward_f32(nc,
  9156. (float *) ((char *) dst->data + i1*( dst->nb[1])),
  9157. (float *) ((char *) src0->data + i1*(src0->nb[1])),
  9158. (float *) ((char *) grad->data + i1*(grad->nb[1])));
  9159. #ifndef NDEBUG
  9160. for (int k = 0; k < nc; k++) {
  9161. const float x = ((float *) ((char *) dst->data + i1*( dst->nb[1])))[k];
  9162. UNUSED(x);
  9163. assert(!isnan(x));
  9164. assert(!isinf(x));
  9165. }
  9166. #endif
  9167. }
  9168. }
  9169. static void ggml_compute_forward_silu_back(
  9170. const struct ggml_compute_params * params,
  9171. const struct ggml_tensor * src0,
  9172. const struct ggml_tensor * grad,
  9173. struct ggml_tensor * dst) {
  9174. switch (src0->type) {
  9175. case GGML_TYPE_F32:
  9176. {
  9177. ggml_compute_forward_silu_back_f32(params, src0, grad, dst);
  9178. } break;
  9179. default:
  9180. {
  9181. GGML_ASSERT(false);
  9182. } break;
  9183. }
  9184. }
  9185. // ggml_compute_forward_norm
  9186. static void ggml_compute_forward_norm_f32(
  9187. const struct ggml_compute_params * params,
  9188. const struct ggml_tensor * src0,
  9189. struct ggml_tensor * dst) {
  9190. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  9191. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  9192. return;
  9193. }
  9194. GGML_ASSERT(src0->nb[0] == sizeof(float));
  9195. const int ith = params->ith;
  9196. const int nth = params->nth;
  9197. GGML_TENSOR_UNARY_OP_LOCALS
  9198. float eps;
  9199. memcpy(&eps, dst->op_params, sizeof(float));
  9200. // TODO: optimize
  9201. for (int64_t i03 = 0; i03 < ne03; i03++) {
  9202. for (int64_t i02 = 0; i02 < ne02; i02++) {
  9203. for (int64_t i01 = ith; i01 < ne01; i01 += nth) {
  9204. const float * x = (float *) ((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03);
  9205. ggml_float sum = 0.0;
  9206. for (int64_t i00 = 0; i00 < ne00; i00++) {
  9207. sum += (ggml_float)x[i00];
  9208. }
  9209. float mean = sum/ne00;
  9210. float * y = (float *) ((char *) dst->data + i01*nb1 + i02*nb2 + i03*nb3);
  9211. ggml_float sum2 = 0.0;
  9212. for (int64_t i00 = 0; i00 < ne00; i00++) {
  9213. float v = x[i00] - mean;
  9214. y[i00] = v;
  9215. sum2 += (ggml_float)(v*v);
  9216. }
  9217. float variance = sum2/ne00;
  9218. const float scale = 1.0f/sqrtf(variance + eps);
  9219. ggml_vec_scale_f32(ne00, y, scale);
  9220. }
  9221. }
  9222. }
  9223. }
  9224. static void ggml_compute_forward_norm(
  9225. const struct ggml_compute_params * params,
  9226. const struct ggml_tensor * src0,
  9227. struct ggml_tensor * dst) {
  9228. switch (src0->type) {
  9229. case GGML_TYPE_F32:
  9230. {
  9231. ggml_compute_forward_norm_f32(params, src0, dst);
  9232. } break;
  9233. default:
  9234. {
  9235. GGML_ASSERT(false);
  9236. } break;
  9237. }
  9238. }
  9239. // ggml_compute_forward_group_rms_norm
  9240. static void ggml_compute_forward_rms_norm_f32(
  9241. const struct ggml_compute_params * params,
  9242. const struct ggml_tensor * src0,
  9243. struct ggml_tensor * dst) {
  9244. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  9245. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  9246. return;
  9247. }
  9248. GGML_ASSERT(src0->nb[0] == sizeof(float));
  9249. const int ith = params->ith;
  9250. const int nth = params->nth;
  9251. GGML_TENSOR_UNARY_OP_LOCALS
  9252. float eps;
  9253. memcpy(&eps, dst->op_params, sizeof(float));
  9254. // TODO: optimize
  9255. for (int64_t i03 = 0; i03 < ne03; i03++) {
  9256. for (int64_t i02 = 0; i02 < ne02; i02++) {
  9257. for (int64_t i01 = ith; i01 < ne01; i01 += nth) {
  9258. const float * x = (float *) ((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03);
  9259. ggml_float sum = 0.0;
  9260. for (int64_t i00 = 0; i00 < ne00; i00++) {
  9261. sum += (ggml_float)(x[i00] * x[i00]);
  9262. }
  9263. const float mean = sum/ne00;
  9264. float * y = (float *) ((char *) dst->data + i01*nb1 + i02*nb2 + i03*nb3);
  9265. memcpy(y, x, ne00 * sizeof(float));
  9266. // for (int i00 = 0; i00 < ne00; i00++) {
  9267. // y[i00] = x[i00];
  9268. // }
  9269. const float scale = 1.0f/sqrtf(mean + eps);
  9270. ggml_vec_scale_f32(ne00, y, scale);
  9271. }
  9272. }
  9273. }
  9274. }
  9275. static void ggml_compute_forward_rms_norm(
  9276. const struct ggml_compute_params * params,
  9277. const struct ggml_tensor * src0,
  9278. struct ggml_tensor * dst) {
  9279. switch (src0->type) {
  9280. case GGML_TYPE_F32:
  9281. {
  9282. ggml_compute_forward_rms_norm_f32(params, src0, dst);
  9283. } break;
  9284. default:
  9285. {
  9286. GGML_ASSERT(false);
  9287. } break;
  9288. }
  9289. }
  9290. static void ggml_compute_forward_rms_norm_back_f32(
  9291. const struct ggml_compute_params * params,
  9292. const struct ggml_tensor * src0,
  9293. const struct ggml_tensor * src1,
  9294. struct ggml_tensor * dst) {
  9295. GGML_ASSERT(ggml_are_same_shape(src0, dst) && ggml_are_same_shape(src0, src1));
  9296. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  9297. return;
  9298. }
  9299. GGML_ASSERT(src0->nb[0] == sizeof(float));
  9300. const int ith = params->ith;
  9301. const int nth = params->nth;
  9302. GGML_TENSOR_BINARY_OP_LOCALS
  9303. float eps;
  9304. memcpy(&eps, dst->op_params, sizeof(float));
  9305. // TODO: optimize
  9306. for (int64_t i03 = 0; i03 < ne03; i03++) {
  9307. for (int64_t i02 = 0; i02 < ne02; i02++) {
  9308. for (int64_t i01 = ith; i01 < ne01; i01 += nth) {
  9309. // src1 is same shape as src0 => same indices
  9310. const int64_t i11 = i01;
  9311. const int64_t i12 = i02;
  9312. const int64_t i13 = i03;
  9313. const float * x = (float *) ((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03);
  9314. const float * dz = (float *) ((char *) src1->data + i11*nb11 + i12*nb12 + i13*nb13);
  9315. ggml_float sum_xx = 0.0;
  9316. ggml_float sum_xdz = 0.0;
  9317. for (int64_t i00 = 0; i00 < ne00; i00++) {
  9318. sum_xx += (ggml_float)(x[i00] * x[i00]);
  9319. sum_xdz += (ggml_float)(x[i00] * dz[i00]);
  9320. }
  9321. //const float mean = (float)(sum_xx)/ne00;
  9322. const float mean_eps = (float)(sum_xx)/ne00 + eps;
  9323. const float sum_eps = (float)(sum_xx) + eps*ne00;
  9324. //const float mean_xdz = (float)(sum_xdz)/ne00;
  9325. // we could cache rms from forward pass to improve performance.
  9326. // to do this implement ggml_rms and compose ggml_rms_norm using ggml_rms.
  9327. //const float rms = sqrtf(mean_eps);
  9328. const float rrms = 1.0f / sqrtf(mean_eps);
  9329. //const float scale = -rrms/(ne00 * mean_eps); // -1/(n*rms**3)
  9330. {
  9331. // z = rms_norm(x)
  9332. //
  9333. // rms_norm(src0) =
  9334. // scale(
  9335. // src0,
  9336. // div(
  9337. // 1,
  9338. // sqrt(
  9339. // add(
  9340. // scale(
  9341. // sum(
  9342. // sqr(
  9343. // src0)),
  9344. // (1.0/N)),
  9345. // eps))));
  9346. // postorder:
  9347. // ## op args grad
  9348. // 00 param src0 grad[#00]
  9349. // 01 const 1
  9350. // 02 sqr (#00) grad[#02]
  9351. // 03 sum (#02) grad[#03]
  9352. // 04 const 1/N
  9353. // 05 scale (#03, #04) grad[#05]
  9354. // 06 const eps
  9355. // 07 add (#05, #06) grad[#07]
  9356. // 08 sqrt (#07) grad[#08]
  9357. // 09 div (#01,#08) grad[#09]
  9358. // 10 scale (#00,#09) grad[#10]
  9359. //
  9360. // backward pass, given grad[#10]
  9361. // #10: scale
  9362. // grad[#00] += scale(grad[#10],#09)
  9363. // grad[#09] += sum(mul(grad[#10],#00))
  9364. // #09: div
  9365. // grad[#08] += neg(mul(grad[#09], div(#09,#08)))
  9366. // #08: sqrt
  9367. // grad[#07] += mul(grad[#08], div(0.5, #08))
  9368. // #07: add
  9369. // grad[#05] += grad[#07]
  9370. // #05: scale
  9371. // grad[#03] += scale(grad[#05],#04)
  9372. // #03: sum
  9373. // grad[#02] += repeat(grad[#03], #02)
  9374. // #02:
  9375. // grad[#00] += scale(mul(#00, grad[#02]), 2.0)
  9376. //
  9377. // substitute and simplify:
  9378. // grad[#00] = scale(grad(#10), #09) + scale(mul(#00, grad[#02]), 2.0)
  9379. // grad[#02] = repeat(grad[#03], #02)
  9380. // grad[#02] = repeat(scale(grad[#05],#04), #02)
  9381. // grad[#02] = repeat(scale(grad[#07],#04), #02)
  9382. // grad[#02] = repeat(scale(mul(grad[#08], div(0.5, #08)),#04), #02)
  9383. // grad[#02] = repeat(scale(mul(neg(mul(grad[#09], div(#09,#08))), div(0.5, #08)),#04), #02)
  9384. // grad[#02] = repeat(scale(mul(neg(mul(sum(mul(grad[#10],#00)), div(#09,#08))), div(0.5, #08)),#04), #02)
  9385. // grad[#02] = repeat(-(sum(mul(grad[#10],#00)) * div(#09,#08) * div(0.5, #08) * (1/N)), #02)
  9386. // grad[#02] = repeat(-(sum(mul(grad[#10],#00)) * div(div(#01,#08),#08) * div(0.5, #08) * (1/N)), #02)
  9387. // grad[#02] = repeat(-(sum(mul(grad[#10],#00)) * div(1,#08*#08) * div(0.5, #08) * (1/N)), #02)
  9388. // grad[#02] = repeat(-(sum(mul(grad[#10],#00)) * div(1,#07) * div(0.5, #08) * (1/N)), #02)
  9389. // grad[#00] = scale(grad(#10), #09) + scale(mul(#00, grad[#02]), 2.0)
  9390. // grad[#00] = scale(grad(#10), #09) + scale(mul(#00, repeat(-(sum(mul(grad[#10],#00)) * div(1,#07) * div(0.5, #08) * (1/N)), #02)), 2.0)
  9391. // grad[#00] = scale(grad(#10), #09) + scale(scale(#00, -(sum(mul(grad[#10],#00)) * div(1,#07) * div(0.5, #08) * (1/N))), 2.0)
  9392. // grad[#00] = scale(grad(#10), #09) + scale(#00, -(sum(mul(grad[#10],#00)) * div(1,#07) * div(1,#08) * (1/N)))
  9393. // grad[#00] = scale(grad(#10), #09) + scale(#00, sum(mul(grad[#10],#00)) * div(1,#07*#08) * (-1/N))
  9394. // grad[#00] = scale(grad(#10), #09) + scale(#00, sum(mul(grad[#10],#00)) * div(1,#07*#08) * (-1/N))
  9395. // grad[#00] = scale(grad(#10), #09) + scale(#00, sum(mul(grad[#10],#00)) * div(1,mean_eps*rms) * (-1/N))
  9396. // grad[#00] = scale(grad(#10), #09) + scale(#00, sum(mul(grad[#10],#00)) * div(-1,rms*N*mean_eps))
  9397. // grad[#00] = scale(grad(#10), #09) + scale(#00, sum(mul(grad[#10],#00)) * div(-1,rms*N*(sum_xx/N+eps)))
  9398. // grad[#00] = scale(grad(#10), #09) + scale(#00, sum(mul(grad[#10],#00)) * div(-1,rms*N*sum_xx+rms*N*eps))
  9399. // grad[#00] = scale(dz, rrms) + scale(x, sum(mul(dz,x)) * div(-1,rms*N*mean_eps))
  9400. // grad[#00] = scale(dz, rrms) + scale(x, sum_xdz * div(-1,rms*N*mean_eps))
  9401. // a = b*c + d*e
  9402. // a = b*c*f/f + d*e*f/f
  9403. // a = (b*c*f + d*e*f)*(1/f)
  9404. // a = (b*c*(1/c) + d*e*(1/c))*(1/(1/c))
  9405. // a = (b + d*e/c)*c
  9406. // b = dz, c = rrms, d = x, e = sum_xdz * div(-1,rms*N*mean_eps)
  9407. // a = (dz + x*sum_xdz * div(-1,rms*N*mean_eps)/rrms)*rrms
  9408. // a = (dz + x*sum_xdz * div(-1,rms*N*mean_eps)*rms)*rrms
  9409. // a = (dz + x*sum_xdz * div(-rms,rms*N*mean_eps))*rrms
  9410. // a = (dz + x*sum_xdz * div(-1,N*mean_eps))*rrms
  9411. // a = (dz + x*div(-sum_xdz,N*mean_eps))*rrms
  9412. // a = (dz + x*div(-mean_xdz,mean_eps))*rrms
  9413. // grad[#00] = scale(dz + scale(x, div(-mean_xdz,mean_eps)),rrms)
  9414. // grad[#00] = scale(dz + scale(x, -mean_xdz/mean_eps),rrms)
  9415. // dx = scale(dz + scale(x, -mean_xdz/mean_eps),rrms)
  9416. }
  9417. // dx = scale(dz + scale(x, -mean_xdz/mean_eps),rrms)
  9418. // post-order:
  9419. // dx := x
  9420. // dx := scale(dx,-mean_xdz/mean_eps)
  9421. // dx := add(dx, dz)
  9422. // dx := scale(dx, rrms)
  9423. float * dx = (float *) ((char *) dst->data + i01*nb1 + i02*nb2 + i03*nb3);
  9424. ggml_vec_cpy_f32 (ne00, dx, x);
  9425. // ggml_vec_scale_f32(ne00, dx, -mean_xdz/mean_eps);
  9426. ggml_vec_scale_f32(ne00, dx, (float)(-sum_xdz)/sum_eps);
  9427. ggml_vec_acc_f32 (ne00, dx, dz);
  9428. ggml_vec_scale_f32(ne00, dx, rrms);
  9429. }
  9430. }
  9431. }
  9432. }
  9433. static void ggml_compute_forward_rms_norm_back(
  9434. const struct ggml_compute_params * params,
  9435. const struct ggml_tensor * src0,
  9436. const struct ggml_tensor * src1,
  9437. struct ggml_tensor * dst) {
  9438. switch (src0->type) {
  9439. case GGML_TYPE_F32:
  9440. {
  9441. ggml_compute_forward_rms_norm_back_f32(params, src0, src1, dst);
  9442. } break;
  9443. default:
  9444. {
  9445. GGML_ASSERT(false);
  9446. } break;
  9447. }
  9448. }
  9449. // ggml_compute_forward_group_norm
  9450. static void ggml_compute_forward_group_norm_f32(
  9451. const struct ggml_compute_params * params,
  9452. const struct ggml_tensor * src0,
  9453. struct ggml_tensor * dst) {
  9454. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  9455. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  9456. return;
  9457. }
  9458. GGML_ASSERT(src0->nb[0] == sizeof(float));
  9459. const int ith = params->ith;
  9460. const int nth = params->nth;
  9461. GGML_TENSOR_UNARY_OP_LOCALS
  9462. const float eps = 1e-6f; // TODO: make this a parameter
  9463. // TODO: optimize
  9464. int n_channels = src0->ne[2];
  9465. int n_groups = dst->op_params[0];
  9466. int n_channels_per_group = (n_channels + n_groups - 1) / n_groups;
  9467. for (int i = ith; i < n_groups; i+=nth) {
  9468. int start = i * n_channels_per_group;
  9469. int end = start + n_channels_per_group;
  9470. if (end > n_channels) {
  9471. end = n_channels;
  9472. }
  9473. int step = end - start;
  9474. for (int64_t i03 = 0; i03 < ne03; i03++) {
  9475. ggml_float sum = 0.0;
  9476. for (int64_t i02 = start; i02 < end; i02++) {
  9477. for (int64_t i01 = 0; i01 < ne01; i01++) {
  9478. const float * x = (float *)((char *) src0->data + i01 * nb01 + i02 * nb02 + i03 * nb03);
  9479. for (int64_t i00 = 0; i00 < ne00; i00++) {
  9480. sum += (ggml_float)x[i00];
  9481. }
  9482. }
  9483. }
  9484. float mean = sum / (ne00 * ne01 * step);
  9485. ggml_float sum2 = 0.0;
  9486. for (int64_t i02 = start; i02 < end; i02++) {
  9487. for (int64_t i01 = 0; i01 < ne01; i01++) {
  9488. const float * x = (float *)((char *) src0->data + i01 * nb01 + i02 * nb02 + i03 * nb03);
  9489. float * y = (float *)((char *) dst->data + i01 * nb1 + i02 * nb2 + i03 * nb3);
  9490. for (int64_t i00 = 0; i00 < ne00; i00++) {
  9491. float v = x[i00] - mean;
  9492. y[i00] = v;
  9493. sum2 += (ggml_float)(v * v);
  9494. }
  9495. }
  9496. }
  9497. float variance = sum2 / (ne00 * ne01 * step);
  9498. const float scale = 1.0f / sqrtf(variance + eps);
  9499. for (int64_t i02 = start; i02 < end; i02++) {
  9500. for (int64_t i01 = 0; i01 < ne01; i01++) {
  9501. float * y = (float *)((char *) dst->data + i01 * nb1 + i02 * nb2 + i03 * nb3);
  9502. ggml_vec_scale_f32(ne00, y, scale);
  9503. }
  9504. }
  9505. }
  9506. }
  9507. }
  9508. static void ggml_compute_forward_group_norm(
  9509. const struct ggml_compute_params * params,
  9510. const struct ggml_tensor * src0,
  9511. struct ggml_tensor * dst) {
  9512. switch (src0->type) {
  9513. case GGML_TYPE_F32:
  9514. {
  9515. ggml_compute_forward_group_norm_f32(params, src0, dst);
  9516. } break;
  9517. default:
  9518. {
  9519. GGML_ASSERT(false);
  9520. } break;
  9521. }
  9522. }
  9523. // ggml_compute_forward_mul_mat
  9524. #if defined(GGML_USE_ACCELERATE) || defined(GGML_USE_OPENBLAS)
  9525. // helper function to determine if it is better to use BLAS or not
  9526. // for large matrices, BLAS is faster
  9527. static bool ggml_compute_forward_mul_mat_use_blas(
  9528. const struct ggml_tensor * src0,
  9529. const struct ggml_tensor * src1,
  9530. struct ggml_tensor * dst) {
  9531. //const int64_t ne00 = src0->ne[0];
  9532. //const int64_t ne01 = src0->ne[1];
  9533. const int64_t ne10 = src1->ne[0];
  9534. const int64_t ne0 = dst->ne[0];
  9535. const int64_t ne1 = dst->ne[1];
  9536. // TODO: find the optimal values for these
  9537. if (ggml_is_contiguous(src0) &&
  9538. ggml_is_contiguous(src1) &&
  9539. (ne0 >= 32 && ne1 >= 32 && ne10 >= 32)) {
  9540. /*printf("BLAS: %d %d %d %d %d\n", ne0, ne1, ne10, ne00, ne01);*/
  9541. return true;
  9542. }
  9543. return false;
  9544. }
  9545. #endif
  9546. static void ggml_compute_forward_mul_mat(
  9547. const struct ggml_compute_params * params,
  9548. const struct ggml_tensor * src0,
  9549. const struct ggml_tensor * src1,
  9550. struct ggml_tensor * dst) {
  9551. int64_t t0 = ggml_perf_time_us();
  9552. UNUSED(t0);
  9553. GGML_TENSOR_BINARY_OP_LOCALS
  9554. const int ith = params->ith;
  9555. const int nth = params->nth;
  9556. const enum ggml_type type = src0->type;
  9557. const bool src1_cont = ggml_is_contiguous(src1);
  9558. ggml_vec_dot_t const vec_dot = type_traits[type].vec_dot;
  9559. enum ggml_type const vec_dot_type = type_traits[type].vec_dot_type;
  9560. ggml_from_float_t const from_float_to_vec_dot = type_traits[vec_dot_type].from_float;
  9561. GGML_ASSERT(ne0 == ne01);
  9562. GGML_ASSERT(ne1 == ne11);
  9563. GGML_ASSERT(ne2 == ne12);
  9564. GGML_ASSERT(ne3 == ne13);
  9565. // we don't support permuted src0 or src1
  9566. GGML_ASSERT(nb00 == ggml_type_size(type));
  9567. GGML_ASSERT(nb10 == sizeof(float));
  9568. // dst cannot be transposed or permuted
  9569. GGML_ASSERT(nb0 == sizeof(float));
  9570. GGML_ASSERT(nb0 <= nb1);
  9571. GGML_ASSERT(nb1 <= nb2);
  9572. GGML_ASSERT(nb2 <= nb3);
  9573. // broadcast factors
  9574. const int64_t r2 = ne12/ne02;
  9575. const int64_t r3 = ne13/ne03;
  9576. // nb01 >= nb00 - src0 is not transposed
  9577. // compute by src0 rows
  9578. #if defined(GGML_USE_CLBLAST)
  9579. if (ggml_cl_can_mul_mat(src0, src1, dst)) {
  9580. if (params->ith == 0 && params->type == GGML_TASK_COMPUTE) {
  9581. ggml_cl_mul_mat(src0, src1, dst, params->wdata, params->wsize);
  9582. }
  9583. return;
  9584. }
  9585. #endif
  9586. #if defined(GGML_USE_ACCELERATE) || defined(GGML_USE_OPENBLAS)
  9587. if (ggml_compute_forward_mul_mat_use_blas(src0, src1, dst)) {
  9588. if (params->ith != 0) {
  9589. return;
  9590. }
  9591. if (params->type == GGML_TASK_INIT) {
  9592. return;
  9593. }
  9594. if (params->type == GGML_TASK_FINALIZE) {
  9595. return;
  9596. }
  9597. for (int64_t i13 = 0; i13 < ne13; i13++) {
  9598. for (int64_t i12 = 0; i12 < ne12; i12++) {
  9599. // broadcast src0 into src1 across 2nd,3rd dimension
  9600. const int64_t i03 = i13/r3;
  9601. const int64_t i02 = i12/r2;
  9602. const void * x = (char *) src0->data + i02*nb02 + i03*nb03;
  9603. const float * y = (float *) ((char *) src1->data + i12*nb12 + i13*nb13);
  9604. float * d = (float *) ((char *) dst->data + i12*nb2 + i13*nb3);
  9605. if (type != GGML_TYPE_F32) {
  9606. float * const wdata = params->wdata;
  9607. ggml_to_float_t const to_float = type_traits[type].to_float;
  9608. size_t id = 0;
  9609. for (int64_t i01 = 0; i01 < ne01; ++i01) {
  9610. to_float((const char *) x + i01*nb01, wdata + id, ne00);
  9611. id += ne00;
  9612. }
  9613. assert(id*sizeof(float) <= params->wsize);
  9614. x = wdata;
  9615. }
  9616. cblas_sgemm(CblasRowMajor, CblasNoTrans, CblasTrans,
  9617. ne11, ne01, ne10,
  9618. 1.0f, y, ne10,
  9619. x, ne00,
  9620. 0.0f, d, ne01);
  9621. }
  9622. }
  9623. //printf("CBLAS = %f ms, %d x %d x %d x %d\n", (ggml_perf_time_us() - t0)/1000.0, ne0, ne1, ne2, ne3);
  9624. return;
  9625. }
  9626. #endif
  9627. if (params->type == GGML_TASK_INIT) {
  9628. if (src1->type != vec_dot_type) {
  9629. char * wdata = params->wdata;
  9630. const size_t row_size = ne10*ggml_type_size(vec_dot_type)/ggml_blck_size(vec_dot_type);
  9631. for (int64_t i13 = 0; i13 < ne13; ++i13) {
  9632. for (int64_t i12 = 0; i12 < ne12; ++i12) {
  9633. for (int64_t i11 = 0; i11 < ne11; ++i11) {
  9634. from_float_to_vec_dot((float *)((char *) src1->data + i13*nb13 + i12*nb12 + i11*nb11), (void *) wdata, ne10);
  9635. wdata += row_size;
  9636. }
  9637. }
  9638. }
  9639. }
  9640. return;
  9641. }
  9642. if (params->type == GGML_TASK_FINALIZE) {
  9643. return;
  9644. }
  9645. const void * wdata = (src1->type == vec_dot_type) ? src1->data : params->wdata;
  9646. const size_t row_size = ne10*ggml_type_size(vec_dot_type)/ggml_blck_size(vec_dot_type);
  9647. const int64_t nr0 = ne01; // src0 rows
  9648. const int64_t nr1 = ne11*ne12*ne13; // src1 rows
  9649. //printf("nr0 = %lld, nr1 = %lld\n", nr0, nr1);
  9650. // distribute the thread work across the inner or outer loop based on which one is larger
  9651. const int64_t nth0 = nr0 > nr1 ? nth : 1; // parallelize by src0 rows
  9652. const int64_t nth1 = nr0 > nr1 ? 1 : nth; // parallelize by src1 rows
  9653. const int64_t ith0 = ith % nth0;
  9654. const int64_t ith1 = ith / nth0;
  9655. const int64_t dr0 = (nr0 + nth0 - 1)/nth0;
  9656. const int64_t dr1 = (nr1 + nth1 - 1)/nth1;
  9657. const int64_t ir010 = dr0*ith0;
  9658. const int64_t ir011 = MIN(ir010 + dr0, nr0);
  9659. const int64_t ir110 = dr1*ith1;
  9660. const int64_t ir111 = MIN(ir110 + dr1, nr1);
  9661. //printf("ir010 = %6lld, ir011 = %6lld, ir110 = %6lld, ir111 = %6lld\n", ir010, ir011, ir110, ir111);
  9662. // threads with no work simply yield (not sure if it helps)
  9663. if (ir010 >= ir011 || ir110 >= ir111) {
  9664. sched_yield();
  9665. return;
  9666. }
  9667. assert(ne12 % ne02 == 0);
  9668. assert(ne13 % ne03 == 0);
  9669. // block-tiling attempt
  9670. const int64_t blck_0 = 16;
  9671. const int64_t blck_1 = 16;
  9672. // attempt to reduce false-sharing (does not seem to make a difference)
  9673. float tmp[16];
  9674. for (int64_t iir1 = ir110; iir1 < ir111; iir1 += blck_1) {
  9675. for (int64_t iir0 = ir010; iir0 < ir011; iir0 += blck_0) {
  9676. for (int64_t ir1 = iir1; ir1 < iir1 + blck_1 && ir1 < ir111; ++ir1) {
  9677. const int64_t i13 = (ir1/(ne12*ne11));
  9678. const int64_t i12 = (ir1 - i13*ne12*ne11)/ne11;
  9679. const int64_t i11 = (ir1 - i13*ne12*ne11 - i12*ne11);
  9680. // broadcast src0 into src1
  9681. const int64_t i03 = i13/r3;
  9682. const int64_t i02 = i12/r2;
  9683. const int64_t i1 = i11;
  9684. const int64_t i2 = i12;
  9685. const int64_t i3 = i13;
  9686. const char * src0_row = (const char *) src0->data + (0 + i02*nb02 + i03*nb03);
  9687. // desc: when src1 is not a contiguous memory block we have to calculate the offset using the strides
  9688. // if it is, then we have either copied the data to params->wdata and made it contiguous or we are using
  9689. // the original src1 data pointer, so we should index using the indices directly
  9690. // TODO: this is a bit of a hack, we should probably have a better way to handle this
  9691. const char * src1_col = (const char *) wdata +
  9692. (src1_cont || src1->type != vec_dot_type
  9693. ? (i11 + i12*ne11 + i13*ne12*ne11)*row_size
  9694. : (i11*nb11 + i12*nb12 + i13*nb13));
  9695. float * dst_col = (float *) ((char *) dst->data + (i1*nb1 + i2*nb2 + i3*nb3));
  9696. //for (int64_t ir0 = iir0; ir0 < iir0 + blck_0 && ir0 < ir011; ++ir0) {
  9697. // vec_dot(ne00, &dst_col[ir0], src0_row + ir0*nb01, src1_col);
  9698. //}
  9699. for (int64_t ir0 = iir0; ir0 < iir0 + blck_0 && ir0 < ir011; ++ir0) {
  9700. vec_dot(ne00, &tmp[ir0 - iir0], src0_row + ir0*nb01, src1_col);
  9701. }
  9702. memcpy(&dst_col[iir0], tmp, (MIN(iir0 + blck_0, ir011) - iir0)*sizeof(float));
  9703. }
  9704. }
  9705. }
  9706. }
  9707. // ggml_compute_forward_out_prod
  9708. static void ggml_compute_forward_out_prod_f32(
  9709. const struct ggml_compute_params * params,
  9710. const struct ggml_tensor * src0,
  9711. const struct ggml_tensor * src1,
  9712. struct ggml_tensor * dst) {
  9713. // int64_t t0 = ggml_perf_time_us();
  9714. // UNUSED(t0);
  9715. GGML_TENSOR_BINARY_OP_LOCALS
  9716. const int ith = params->ith;
  9717. const int nth = params->nth;
  9718. GGML_ASSERT(ne02 == ne12);
  9719. GGML_ASSERT(ne03 == ne13);
  9720. GGML_ASSERT(ne2 == ne12);
  9721. GGML_ASSERT(ne3 == ne13);
  9722. // we don't support permuted src0 or src1
  9723. GGML_ASSERT(nb00 == sizeof(float));
  9724. // dst cannot be transposed or permuted
  9725. GGML_ASSERT(nb0 == sizeof(float));
  9726. // GGML_ASSERT(nb0 <= nb1);
  9727. // GGML_ASSERT(nb1 <= nb2);
  9728. // GGML_ASSERT(nb2 <= nb3);
  9729. GGML_ASSERT(ne0 == ne00);
  9730. GGML_ASSERT(ne1 == ne10);
  9731. GGML_ASSERT(ne2 == ne02);
  9732. GGML_ASSERT(ne3 == ne03);
  9733. // nb01 >= nb00 - src0 is not transposed
  9734. // compute by src0 rows
  9735. // TODO: #if defined(GGML_USE_CUBLAS) ggml_cuda_out_prod
  9736. // TODO: #if defined(GGML_USE_ACCELERATE) || defined(GGML_USE_OPENBLAS) || defined(GGML_USE_CLBLAST)
  9737. if (params->type == GGML_TASK_INIT) {
  9738. ggml_vec_set_f32(ne0*ne1*ne2*ne3, dst->data, 0);
  9739. return;
  9740. }
  9741. if (params->type == GGML_TASK_FINALIZE) {
  9742. return;
  9743. }
  9744. // dst[:,:,:,:] = 0
  9745. // for i2,i3:
  9746. // for i1:
  9747. // for i01:
  9748. // for i0:
  9749. // dst[i0,i1,i2,i3] += src0[i0,i01,i2,i3] * src1[i1,i01,i2,i3]
  9750. // parallelize by last three dimensions
  9751. // total rows in dst
  9752. const int64_t nr = ne1*ne2*ne3;
  9753. // rows per thread
  9754. const int64_t dr = (nr + nth - 1)/nth;
  9755. // row range for this thread
  9756. const int64_t ir0 = dr*ith;
  9757. const int64_t ir1 = MIN(ir0 + dr, nr);
  9758. // block-tiling attempt
  9759. const int64_t blck_0 = MAX(GGML_VEC_MAD_UNROLL, 32);
  9760. const int64_t blck_1 = 16;
  9761. for (int64_t bir = ir0; bir < ir1; bir += blck_1) {
  9762. const int64_t bir1 = MIN(bir + blck_1, ir1);
  9763. for (int64_t bi01 = 0; bi01 < ne01; bi01 += blck_0) {
  9764. const int64_t bne01 = MIN(bi01 + blck_0, ne01);
  9765. for (int64_t ir = bir; ir < bir1; ++ir) {
  9766. // dst indices
  9767. const int64_t i3 = ir/(ne2*ne1);
  9768. const int64_t i2 = (ir - i3*ne2*ne1)/ne1;
  9769. const int64_t i1 = (ir - i3*ne2*ne1 - i2*ne1);
  9770. const int64_t i02 = i2;
  9771. const int64_t i03 = i3;
  9772. //const int64_t i10 = i1;
  9773. const int64_t i12 = i2;
  9774. const int64_t i13 = i3;
  9775. #if GGML_VEC_MAD_UNROLL > 2
  9776. const int64_t bne01_unroll = bne01 - (bne01 % GGML_VEC_MAD_UNROLL);
  9777. for (int64_t i01 = bi01; i01 < bne01_unroll; i01 += GGML_VEC_MAD_UNROLL) {
  9778. const int64_t i11 = i01;
  9779. float * s0 = (float *) ((char *) src0->data + ( i01*nb01 + i02*nb02 + i03*nb03));
  9780. float * s1 = (float *) ((char *) src1->data + (i1*nb10 + i11*nb11 + i12*nb12 + i13*nb13));
  9781. float * d = (float *) ((char *) dst->data + ( i1*nb1 + i2*nb2 + i3*nb3));
  9782. ggml_vec_mad_f32_unroll(ne0, nb01, nb11, d, s0, s1);
  9783. }
  9784. for (int64_t i01 = bne01_unroll; i01 < bne01; ++i01) {
  9785. const int64_t i11 = i01;
  9786. float * s0 = (float *) ((char *) src0->data + ( i01*nb01 + i02*nb02 + i03*nb03));
  9787. float * s1 = (float *) ((char *) src1->data + (i1*nb10 + i11*nb11 + i12*nb12 + i13*nb13));
  9788. float * d = (float *) ((char *) dst->data + ( i1*nb1 + i2*nb2 + i3*nb3));
  9789. ggml_vec_mad_f32(ne0, d, s0, *s1);
  9790. }
  9791. #else
  9792. for (int64_t i01 = bi01; i01 < bne01; ++i01) {
  9793. const int64_t i11 = i01;
  9794. float * s0 = (float *) ((char *) src0->data + ( i01*nb01 + i02*nb02 + i03*nb03));
  9795. float * s1 = (float *) ((char *) src1->data + (i1*nb10 + i11*nb11 + i12*nb12 + i13*nb13));
  9796. float * d = (float *) ((char *) dst->data + ( i1*nb1 + i2*nb2 + i3*nb3));
  9797. ggml_vec_mad_f32(ne0, d, s0, *s1);
  9798. }
  9799. #endif
  9800. }
  9801. }
  9802. }
  9803. //int64_t t1 = ggml_perf_time_us();
  9804. //static int64_t acc = 0;
  9805. //acc += t1 - t0;
  9806. //if (t1 - t0 > 10) {
  9807. // printf("\n");
  9808. // printf("ne00 = %5d, ne01 = %5d, ne02 = %5d, ne03 = %5d\n", ne00, ne01, ne02, ne03);
  9809. // printf("nb00 = %5d, nb01 = %5d, nb02 = %5d, nb03 = %5d\n", nb00, nb01, nb02, nb03);
  9810. // printf("ne10 = %5d, ne11 = %5d, ne12 = %5d, ne13 = %5d\n", ne10, ne11, ne12, ne13);
  9811. // printf("nb10 = %5d, nb11 = %5d, nb12 = %5d, nb13 = %5d\n", nb10, nb11, nb12, nb13);
  9812. // printf("XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX task %d/%d: %d us, acc = %d\n", ith, nth, (int) (t1 - t0), (int) acc);
  9813. //}
  9814. }
  9815. static void ggml_compute_forward_out_prod_q_f32(
  9816. const struct ggml_compute_params * params,
  9817. const struct ggml_tensor * src0,
  9818. const struct ggml_tensor * src1,
  9819. struct ggml_tensor * dst) {
  9820. // int64_t t0 = ggml_perf_time_us();
  9821. // UNUSED(t0);
  9822. GGML_TENSOR_BINARY_OP_LOCALS;
  9823. const int ith = params->ith;
  9824. const int nth = params->nth;
  9825. const enum ggml_type type = src0->type;
  9826. ggml_to_float_t const dequantize_row_q = type_traits[type].to_float;
  9827. GGML_ASSERT(ne02 == ne12);
  9828. GGML_ASSERT(ne03 == ne13);
  9829. GGML_ASSERT(ne2 == ne12);
  9830. GGML_ASSERT(ne3 == ne13);
  9831. // we don't support permuted src0 dim0
  9832. GGML_ASSERT(nb00 == ggml_type_size(type));
  9833. // dst dim0 cannot be transposed or permuted
  9834. GGML_ASSERT(nb0 == sizeof(float));
  9835. // GGML_ASSERT(nb0 <= nb1);
  9836. // GGML_ASSERT(nb1 <= nb2);
  9837. // GGML_ASSERT(nb2 <= nb3);
  9838. GGML_ASSERT(ne0 == ne00);
  9839. GGML_ASSERT(ne1 == ne10);
  9840. GGML_ASSERT(ne2 == ne02);
  9841. GGML_ASSERT(ne3 == ne03);
  9842. // nb01 >= nb00 - src0 is not transposed
  9843. // compute by src0 rows
  9844. // TODO: #if defined(GGML_USE_CUBLAS) ggml_cuda_out_prod
  9845. // TODO: #if defined(GGML_USE_ACCELERATE) || defined(GGML_USE_OPENBLAS) || defined(GGML_USE_CLBLAST)
  9846. if (params->type == GGML_TASK_INIT) {
  9847. ggml_vec_set_f32(ne0*ne1*ne2*ne3, dst->data, 0);
  9848. return;
  9849. }
  9850. if (params->type == GGML_TASK_FINALIZE) {
  9851. return;
  9852. }
  9853. // parallelize by last three dimensions
  9854. // total rows in dst
  9855. const int64_t nr = ne1*ne2*ne3;
  9856. // rows per thread
  9857. const int64_t dr = (nr + nth - 1)/nth;
  9858. // row range for this thread
  9859. const int64_t ir0 = dr*ith;
  9860. const int64_t ir1 = MIN(ir0 + dr, nr);
  9861. // dst[:,:,:,:] = 0
  9862. // for i2,i3:
  9863. // for i1:
  9864. // for i01:
  9865. // for i0:
  9866. // dst[i0,i1,i2,i3] += src0[i0,i01,i2,i3] * src1[i1,i01,i2,i3]
  9867. float * wdata = (float *) params->wdata + (ne0 + CACHE_LINE_SIZE_F32) * ith;
  9868. for (int64_t ir = ir0; ir < ir1; ++ir) {
  9869. // dst indices
  9870. const int64_t i3 = ir/(ne2*ne1);
  9871. const int64_t i2 = (ir - i3*ne2*ne1)/ne1;
  9872. const int64_t i1 = (ir - i3*ne2*ne1 - i2*ne1);
  9873. const int64_t i02 = i2;
  9874. const int64_t i03 = i3;
  9875. //const int64_t i10 = i1;
  9876. const int64_t i12 = i2;
  9877. const int64_t i13 = i3;
  9878. for (int64_t i01 = 0; i01 < ne01; ++i01) {
  9879. const int64_t i11 = i01;
  9880. float * s0 = (float *) ((char *) src0->data + ( i01*nb01 + i02*nb02 + i03*nb03));
  9881. float * s1 = (float *) ((char *) src1->data + (i1*nb10 + i11*nb11 + i12*nb12 + i13*nb13));
  9882. float * d = (float *) ((char *) dst->data + ( i1*nb1 + i2*nb2 + i3*nb3));
  9883. dequantize_row_q(s0, wdata, ne0);
  9884. ggml_vec_mad_f32(ne0, d, wdata, *s1);
  9885. }
  9886. }
  9887. //int64_t t1 = ggml_perf_time_us();
  9888. //static int64_t acc = 0;
  9889. //acc += t1 - t0;
  9890. //if (t1 - t0 > 10) {
  9891. // printf("\n");
  9892. // printf("ne00 = %5d, ne01 = %5d, ne02 = %5d, ne03 = %5d\n", ne00, ne01, ne02, ne03);
  9893. // printf("nb00 = %5d, nb01 = %5d, nb02 = %5d, nb03 = %5d\n", nb00, nb01, nb02, nb03);
  9894. // printf("ne10 = %5d, ne11 = %5d, ne12 = %5d, ne13 = %5d\n", ne10, ne11, ne12, ne13);
  9895. // printf("nb10 = %5d, nb11 = %5d, nb12 = %5d, nb13 = %5d\n", nb10, nb11, nb12, nb13);
  9896. // printf("XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX task %d/%d: %d us, acc = %d\n", ith, nth, (int) (t1 - t0), (int) acc);
  9897. //}
  9898. }
  9899. static void ggml_compute_forward_out_prod(
  9900. const struct ggml_compute_params * params,
  9901. const struct ggml_tensor * src0,
  9902. const struct ggml_tensor * src1,
  9903. struct ggml_tensor * dst) {
  9904. switch (src0->type) {
  9905. case GGML_TYPE_Q4_0:
  9906. case GGML_TYPE_Q4_1:
  9907. case GGML_TYPE_Q5_0:
  9908. case GGML_TYPE_Q5_1:
  9909. case GGML_TYPE_Q8_0:
  9910. case GGML_TYPE_Q2_K:
  9911. case GGML_TYPE_Q3_K:
  9912. case GGML_TYPE_Q4_K:
  9913. case GGML_TYPE_Q5_K:
  9914. case GGML_TYPE_Q6_K:
  9915. {
  9916. ggml_compute_forward_out_prod_q_f32(params, src0, src1, dst);
  9917. } break;
  9918. case GGML_TYPE_F16:
  9919. {
  9920. GGML_ASSERT(false); // todo
  9921. // ggml_compute_forward_out_prod_f16_f32(params, src0, src1, dst);
  9922. } break;
  9923. case GGML_TYPE_F32:
  9924. {
  9925. ggml_compute_forward_out_prod_f32(params, src0, src1, dst);
  9926. } break;
  9927. default:
  9928. {
  9929. GGML_ASSERT(false);
  9930. } break;
  9931. }
  9932. }
  9933. // ggml_compute_forward_scale
  9934. static void ggml_compute_forward_scale_f32(
  9935. const struct ggml_compute_params * params,
  9936. const struct ggml_tensor * src0,
  9937. const struct ggml_tensor * src1,
  9938. struct ggml_tensor * dst) {
  9939. GGML_ASSERT(ggml_is_contiguous(src0));
  9940. GGML_ASSERT(ggml_is_contiguous(dst));
  9941. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  9942. GGML_ASSERT(ggml_is_scalar(src1));
  9943. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  9944. return;
  9945. }
  9946. // scale factor
  9947. const float v = *(float *) src1->data;
  9948. const int ith = params->ith;
  9949. const int nth = params->nth;
  9950. const int nc = src0->ne[0];
  9951. const int nr = ggml_nrows(src0);
  9952. // rows per thread
  9953. const int dr = (nr + nth - 1)/nth;
  9954. // row range for this thread
  9955. const int ir0 = dr*ith;
  9956. const int ir1 = MIN(ir0 + dr, nr);
  9957. const size_t nb01 = src0->nb[1];
  9958. const size_t nb1 = dst->nb[1];
  9959. for (int i1 = ir0; i1 < ir1; i1++) {
  9960. if (dst->data != src0->data) {
  9961. // src0 is same shape as dst => same indices
  9962. memcpy((char *)dst->data + i1*nb1, (char *)src0->data + i1*nb01, nc * sizeof(float));
  9963. }
  9964. ggml_vec_scale_f32(nc, (float *) ((char *) dst->data + i1*nb1), v);
  9965. }
  9966. }
  9967. static void ggml_compute_forward_scale(
  9968. const struct ggml_compute_params * params,
  9969. const struct ggml_tensor * src0,
  9970. const struct ggml_tensor * src1,
  9971. struct ggml_tensor * dst) {
  9972. switch (src0->type) {
  9973. case GGML_TYPE_F32:
  9974. {
  9975. ggml_compute_forward_scale_f32(params, src0, src1, dst);
  9976. } break;
  9977. default:
  9978. {
  9979. GGML_ASSERT(false);
  9980. } break;
  9981. }
  9982. }
  9983. // ggml_compute_forward_set
  9984. static void ggml_compute_forward_set_f32(
  9985. const struct ggml_compute_params * params,
  9986. const struct ggml_tensor * src0,
  9987. const struct ggml_tensor * src1,
  9988. struct ggml_tensor * dst) {
  9989. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  9990. GGML_ASSERT(ggml_is_contiguous(dst) && ggml_is_contiguous(src0));
  9991. // view src0 and dst with these strides and data offset inbytes during set
  9992. // nb0 is implicitely element_size because src0 and dst are contiguous
  9993. size_t nb1 = ((int32_t *) dst->op_params)[0];
  9994. size_t nb2 = ((int32_t *) dst->op_params)[1];
  9995. size_t nb3 = ((int32_t *) dst->op_params)[2];
  9996. size_t offset = ((int32_t *) dst->op_params)[3];
  9997. bool inplace = (bool) ((int32_t *) dst->op_params)[4];
  9998. if (!inplace && (params->type == GGML_TASK_INIT)) {
  9999. // memcpy needs to be synchronized across threads to avoid race conditions.
  10000. // => do it in INIT phase
  10001. memcpy(
  10002. ((char *) dst->data),
  10003. ((char *) src0->data),
  10004. ggml_nbytes(dst));
  10005. }
  10006. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  10007. return;
  10008. }
  10009. const int ith = params->ith;
  10010. const int nth = params->nth;
  10011. const int nr = ggml_nrows(src1);
  10012. const int nc = src1->ne[0];
  10013. GGML_TENSOR_LOCALS(int64_t, ne1, src1, ne)
  10014. GGML_TENSOR_LOCALS(size_t, nb1, src1, nb)
  10015. // src0 and dst as viewed during set
  10016. const size_t nb0 = ggml_element_size(src0);
  10017. const int im0 = (ne10 == 0 ? 0 : ne10-1);
  10018. const int im1 = (ne11 == 0 ? 0 : ne11-1);
  10019. const int im2 = (ne12 == 0 ? 0 : ne12-1);
  10020. const int im3 = (ne13 == 0 ? 0 : ne13-1);
  10021. GGML_ASSERT(offset + im0*nb0 + im1*nb1 + im2*nb2 + im3*nb3 <= ggml_nbytes(dst));
  10022. GGML_ASSERT(nb10 == sizeof(float));
  10023. // rows per thread
  10024. const int dr = (nr + nth - 1)/nth;
  10025. // row range for this thread
  10026. const int ir0 = dr*ith;
  10027. const int ir1 = MIN(ir0 + dr, nr);
  10028. for (int ir = ir0; ir < ir1; ++ir) {
  10029. // src0 and dst are viewed with shape of src1 and offset
  10030. // => same indices
  10031. const int i3 = ir/(ne12*ne11);
  10032. const int i2 = (ir - i3*ne12*ne11)/ne11;
  10033. const int i1 = (ir - i3*ne12*ne11 - i2*ne11);
  10034. ggml_vec_cpy_f32(nc,
  10035. (float *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + offset),
  10036. (float *) ((char *) src1->data + i3*nb13 + i2*nb12 + i1*nb11));
  10037. }
  10038. }
  10039. static void ggml_compute_forward_set(
  10040. const struct ggml_compute_params * params,
  10041. const struct ggml_tensor * src0,
  10042. const struct ggml_tensor * src1,
  10043. struct ggml_tensor * dst) {
  10044. switch (src0->type) {
  10045. case GGML_TYPE_F32:
  10046. {
  10047. ggml_compute_forward_set_f32(params, src0, src1, dst);
  10048. } break;
  10049. case GGML_TYPE_F16:
  10050. case GGML_TYPE_Q4_0:
  10051. case GGML_TYPE_Q4_1:
  10052. case GGML_TYPE_Q5_0:
  10053. case GGML_TYPE_Q5_1:
  10054. case GGML_TYPE_Q8_0:
  10055. case GGML_TYPE_Q8_1:
  10056. case GGML_TYPE_Q2_K:
  10057. case GGML_TYPE_Q3_K:
  10058. case GGML_TYPE_Q4_K:
  10059. case GGML_TYPE_Q5_K:
  10060. case GGML_TYPE_Q6_K:
  10061. default:
  10062. {
  10063. GGML_ASSERT(false);
  10064. } break;
  10065. }
  10066. }
  10067. // ggml_compute_forward_cpy
  10068. static void ggml_compute_forward_cpy(
  10069. const struct ggml_compute_params * params,
  10070. const struct ggml_tensor * src0,
  10071. struct ggml_tensor * dst) {
  10072. ggml_compute_forward_dup(params, src0, dst);
  10073. }
  10074. // ggml_compute_forward_cont
  10075. static void ggml_compute_forward_cont(
  10076. const struct ggml_compute_params * params,
  10077. const struct ggml_tensor * src0,
  10078. struct ggml_tensor * dst) {
  10079. ggml_compute_forward_dup(params, src0, dst);
  10080. }
  10081. // ggml_compute_forward_reshape
  10082. static void ggml_compute_forward_reshape(
  10083. const struct ggml_compute_params * params,
  10084. const struct ggml_tensor * src0,
  10085. struct ggml_tensor * dst) {
  10086. // NOP
  10087. UNUSED(params);
  10088. UNUSED(src0);
  10089. UNUSED(dst);
  10090. }
  10091. // ggml_compute_forward_view
  10092. static void ggml_compute_forward_view(
  10093. const struct ggml_compute_params * params,
  10094. const struct ggml_tensor * src0) {
  10095. // NOP
  10096. UNUSED(params);
  10097. UNUSED(src0);
  10098. }
  10099. // ggml_compute_forward_permute
  10100. static void ggml_compute_forward_permute(
  10101. const struct ggml_compute_params * params,
  10102. const struct ggml_tensor * src0) {
  10103. // NOP
  10104. UNUSED(params);
  10105. UNUSED(src0);
  10106. }
  10107. // ggml_compute_forward_transpose
  10108. static void ggml_compute_forward_transpose(
  10109. const struct ggml_compute_params * params,
  10110. const struct ggml_tensor * src0) {
  10111. // NOP
  10112. UNUSED(params);
  10113. UNUSED(src0);
  10114. }
  10115. // ggml_compute_forward_get_rows
  10116. static void ggml_compute_forward_get_rows_q(
  10117. const struct ggml_compute_params * params,
  10118. const struct ggml_tensor * src0,
  10119. const struct ggml_tensor * src1,
  10120. struct ggml_tensor * dst) {
  10121. assert(params->ith == 0);
  10122. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  10123. return;
  10124. }
  10125. const int nc = src0->ne[0];
  10126. const int nr = ggml_nelements(src1);
  10127. const enum ggml_type type = src0->type;
  10128. ggml_to_float_t const dequantize_row_q = type_traits[type].to_float;
  10129. assert( dst->ne[0] == nc);
  10130. assert( dst->ne[1] == nr);
  10131. assert(src0->nb[0] == ggml_type_size(type));
  10132. for (int i = 0; i < nr; ++i) {
  10133. const int r = ((int32_t *) src1->data)[i];
  10134. dequantize_row_q(
  10135. (const void *) ((char *) src0->data + r*src0->nb[1]),
  10136. (float *) ((char *) dst->data + i*dst->nb[1]), nc);
  10137. }
  10138. }
  10139. static void ggml_compute_forward_get_rows_f16(
  10140. const struct ggml_compute_params * params,
  10141. const struct ggml_tensor * src0,
  10142. const struct ggml_tensor * src1,
  10143. struct ggml_tensor * dst) {
  10144. assert(params->ith == 0);
  10145. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  10146. return;
  10147. }
  10148. const int nc = src0->ne[0];
  10149. const int nr = ggml_nelements(src1);
  10150. assert( dst->ne[0] == nc);
  10151. assert( dst->ne[1] == nr);
  10152. assert(src0->nb[0] == sizeof(ggml_fp16_t));
  10153. for (int i = 0; i < nr; ++i) {
  10154. const int r = ((int32_t *) src1->data)[i];
  10155. for (int j = 0; j < nc; ++j) {
  10156. ggml_fp16_t v = ((ggml_fp16_t *) ((char *) src0->data + r*src0->nb[1]))[j];
  10157. ((float *) ((char *) dst->data + i*dst->nb[1]))[j] = GGML_FP16_TO_FP32(v);
  10158. }
  10159. }
  10160. }
  10161. static void ggml_compute_forward_get_rows_f32(
  10162. const struct ggml_compute_params * params,
  10163. const struct ggml_tensor * src0,
  10164. const struct ggml_tensor * src1,
  10165. struct ggml_tensor * dst) {
  10166. assert(params->ith == 0);
  10167. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  10168. return;
  10169. }
  10170. const int nc = src0->ne[0];
  10171. const int nr = ggml_nelements(src1);
  10172. assert( dst->ne[0] == nc);
  10173. assert( dst->ne[1] == nr);
  10174. assert(src0->nb[0] == sizeof(float));
  10175. for (int i = 0; i < nr; ++i) {
  10176. const int r = ((int32_t *) src1->data)[i];
  10177. ggml_vec_cpy_f32(nc,
  10178. (float *) ((char *) dst->data + i*dst->nb[1]),
  10179. (float *) ((char *) src0->data + r*src0->nb[1]));
  10180. }
  10181. }
  10182. static void ggml_compute_forward_get_rows(
  10183. const struct ggml_compute_params * params,
  10184. const struct ggml_tensor * src0,
  10185. const struct ggml_tensor * src1,
  10186. struct ggml_tensor * dst) {
  10187. switch (src0->type) {
  10188. case GGML_TYPE_Q4_0:
  10189. case GGML_TYPE_Q4_1:
  10190. case GGML_TYPE_Q5_0:
  10191. case GGML_TYPE_Q5_1:
  10192. case GGML_TYPE_Q8_0:
  10193. case GGML_TYPE_Q8_1:
  10194. case GGML_TYPE_Q2_K:
  10195. case GGML_TYPE_Q3_K:
  10196. case GGML_TYPE_Q4_K:
  10197. case GGML_TYPE_Q5_K:
  10198. case GGML_TYPE_Q6_K:
  10199. {
  10200. ggml_compute_forward_get_rows_q(params, src0, src1, dst);
  10201. } break;
  10202. case GGML_TYPE_F16:
  10203. {
  10204. ggml_compute_forward_get_rows_f16(params, src0, src1, dst);
  10205. } break;
  10206. case GGML_TYPE_F32:
  10207. {
  10208. ggml_compute_forward_get_rows_f32(params, src0, src1, dst);
  10209. } break;
  10210. default:
  10211. {
  10212. GGML_ASSERT(false);
  10213. } break;
  10214. }
  10215. //static bool first = true;
  10216. //printf("ne0 = %d, ne1 = %d, ne2 = %d\n", dst->ne[0], dst->ne[1], dst->ne[2]);
  10217. //if (first) {
  10218. // first = false;
  10219. //} else {
  10220. // for (int k = 0; k < dst->ne[1]; ++k) {
  10221. // for (int j = 0; j < dst->ne[0]/16; ++j) {
  10222. // for (int i = 0; i < 16; ++i) {
  10223. // printf("%8.4f ", ((float *) dst->data)[k*dst->ne[0] + j*16 + i]);
  10224. // }
  10225. // printf("\n");
  10226. // }
  10227. // printf("\n");
  10228. // }
  10229. // printf("\n");
  10230. // exit(0);
  10231. //}
  10232. }
  10233. // ggml_compute_forward_get_rows_back
  10234. static void ggml_compute_forward_get_rows_back_f32_f16(
  10235. const struct ggml_compute_params * params,
  10236. const struct ggml_tensor * src0,
  10237. const struct ggml_tensor * src1,
  10238. struct ggml_tensor * dst) {
  10239. GGML_ASSERT(params->ith == 0);
  10240. GGML_ASSERT(ggml_is_contiguous(dst));
  10241. // ggml_compute_forward_dup_same_cont(params, opt0, dst);
  10242. if (params->type == GGML_TASK_INIT) {
  10243. memset(dst->data, 0, ggml_nbytes(dst));
  10244. }
  10245. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  10246. return;
  10247. }
  10248. const int nc = src0->ne[0];
  10249. const int nr = ggml_nelements(src1);
  10250. GGML_ASSERT( dst->ne[0] == nc);
  10251. GGML_ASSERT(src0->nb[0] == sizeof(ggml_fp16_t));
  10252. for (int i = 0; i < nr; ++i) {
  10253. const int r = ((int32_t *) src1->data)[i];
  10254. for (int j = 0; j < nc; ++j) {
  10255. ggml_fp16_t v = ((ggml_fp16_t *) ((char *) src0->data + i*src0->nb[1]))[j];
  10256. ((float *) ((char *) dst->data + r*dst->nb[1]))[j] += GGML_FP16_TO_FP32(v);
  10257. }
  10258. }
  10259. }
  10260. static void ggml_compute_forward_get_rows_back_f32(
  10261. const struct ggml_compute_params * params,
  10262. const struct ggml_tensor * src0,
  10263. const struct ggml_tensor * src1,
  10264. struct ggml_tensor * dst) {
  10265. GGML_ASSERT(params->ith == 0);
  10266. GGML_ASSERT(ggml_is_contiguous(dst));
  10267. // ggml_compute_forward_dup_same_cont(params, opt0, dst);
  10268. if (params->type == GGML_TASK_INIT) {
  10269. memset(dst->data, 0, ggml_nbytes(dst));
  10270. }
  10271. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  10272. return;
  10273. }
  10274. const int nc = src0->ne[0];
  10275. const int nr = ggml_nelements(src1);
  10276. GGML_ASSERT( dst->ne[0] == nc);
  10277. GGML_ASSERT(src0->nb[0] == sizeof(float));
  10278. for (int i = 0; i < nr; ++i) {
  10279. const int r = ((int32_t *) src1->data)[i];
  10280. ggml_vec_add_f32(nc,
  10281. (float *) ((char *) dst->data + r*dst->nb[1]),
  10282. (float *) ((char *) dst->data + r*dst->nb[1]),
  10283. (float *) ((char *) src0->data + i*src0->nb[1]));
  10284. }
  10285. }
  10286. static void ggml_compute_forward_get_rows_back(
  10287. const struct ggml_compute_params * params,
  10288. const struct ggml_tensor * src0,
  10289. const struct ggml_tensor * src1,
  10290. struct ggml_tensor * dst) {
  10291. switch (src0->type) {
  10292. case GGML_TYPE_F16:
  10293. {
  10294. ggml_compute_forward_get_rows_back_f32_f16(params, src0, src1, dst);
  10295. } break;
  10296. case GGML_TYPE_F32:
  10297. {
  10298. ggml_compute_forward_get_rows_back_f32(params, src0, src1, dst);
  10299. } break;
  10300. default:
  10301. {
  10302. GGML_ASSERT(false);
  10303. } break;
  10304. }
  10305. //static bool first = true;
  10306. //printf("ne0 = %d, ne1 = %d, ne2 = %d\n", dst->ne[0], dst->ne[1], dst->ne[2]);
  10307. //if (first) {
  10308. // first = false;
  10309. //} else {
  10310. // for (int k = 0; k < dst->ne[1]; ++k) {
  10311. // for (int j = 0; j < dst->ne[0]/16; ++j) {
  10312. // for (int i = 0; i < 16; ++i) {
  10313. // printf("%8.4f ", ((float *) dst->data)[k*dst->ne[0] + j*16 + i]);
  10314. // }
  10315. // printf("\n");
  10316. // }
  10317. // printf("\n");
  10318. // }
  10319. // printf("\n");
  10320. // exit(0);
  10321. //}
  10322. }
  10323. // ggml_compute_forward_diag
  10324. static void ggml_compute_forward_diag_f32(
  10325. const struct ggml_compute_params * params,
  10326. const struct ggml_tensor * src0,
  10327. struct ggml_tensor * dst) {
  10328. GGML_ASSERT(params->ith == 0);
  10329. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  10330. return;
  10331. }
  10332. // TODO: handle transposed/permuted matrices
  10333. GGML_TENSOR_UNARY_OP_LOCALS
  10334. GGML_ASSERT(ne00 == ne0);
  10335. GGML_ASSERT(ne00 == ne1);
  10336. GGML_ASSERT(ne01 == 1);
  10337. GGML_ASSERT(ne02 == ne2);
  10338. GGML_ASSERT(ne03 == ne3);
  10339. GGML_ASSERT(nb00 == sizeof(float));
  10340. GGML_ASSERT(nb0 == sizeof(float));
  10341. for (int i3 = 0; i3 < ne3; i3++) {
  10342. for (int i2 = 0; i2 < ne2; i2++) {
  10343. for (int i1 = 0; i1 < ne1; i1++) {
  10344. float * d = (float *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1);
  10345. float * s = (float *)((char *) src0->data + i3*nb03 + i2*nb02);
  10346. for (int i0 = 0; i0 < i1; i0++) {
  10347. d[i0] = 0;
  10348. }
  10349. d[i1] = s[i1];
  10350. for (int i0 = i1+1; i0 < ne0; i0++) {
  10351. d[i0] = 0;
  10352. }
  10353. }
  10354. }
  10355. }
  10356. }
  10357. static void ggml_compute_forward_diag(
  10358. const struct ggml_compute_params * params,
  10359. const struct ggml_tensor * src0,
  10360. struct ggml_tensor * dst) {
  10361. switch (src0->type) {
  10362. case GGML_TYPE_F32:
  10363. {
  10364. ggml_compute_forward_diag_f32(params, src0, dst);
  10365. } break;
  10366. default:
  10367. {
  10368. GGML_ASSERT(false);
  10369. } break;
  10370. }
  10371. }
  10372. // ggml_compute_forward_diag_mask_inf
  10373. static void ggml_compute_forward_diag_mask_f32(
  10374. const struct ggml_compute_params * params,
  10375. const struct ggml_tensor * src0,
  10376. struct ggml_tensor * dst,
  10377. const float value) {
  10378. const int ith = params->ith;
  10379. const int nth = params->nth;
  10380. const int n_past = ((int32_t *) dst->op_params)[0];
  10381. const bool inplace = src0->data == dst->data;
  10382. GGML_ASSERT(n_past >= 0);
  10383. if (!inplace && (params->type == GGML_TASK_INIT)) {
  10384. // memcpy needs to be synchronized across threads to avoid race conditions.
  10385. // => do it in INIT phase
  10386. GGML_ASSERT(ggml_nelements(dst) == ggml_nelements(src0));
  10387. GGML_ASSERT(ggml_is_contiguous(dst) && ggml_is_contiguous(src0));
  10388. memcpy(
  10389. ((char *) dst->data),
  10390. ((char *) src0->data),
  10391. ggml_nbytes(dst));
  10392. }
  10393. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  10394. return;
  10395. }
  10396. // TODO: handle transposed/permuted matrices
  10397. const int n = ggml_nrows(src0);
  10398. const int nc = src0->ne[0];
  10399. const int nr = src0->ne[1];
  10400. const int nz = n/nr;
  10401. GGML_ASSERT( dst->nb[0] == sizeof(float));
  10402. GGML_ASSERT(src0->nb[0] == sizeof(float));
  10403. for (int k = 0; k < nz; k++) {
  10404. for (int j = ith; j < nr; j += nth) {
  10405. for (int i = n_past; i < nc; i++) {
  10406. if (i > n_past + j) {
  10407. *(float *)((char *) dst->data + k*dst->nb[2] + j*dst->nb[1] + i*dst->nb[0]) = value;
  10408. }
  10409. }
  10410. }
  10411. }
  10412. }
  10413. static void ggml_compute_forward_diag_mask_inf(
  10414. const struct ggml_compute_params * params,
  10415. const struct ggml_tensor * src0,
  10416. struct ggml_tensor * dst) {
  10417. switch (src0->type) {
  10418. case GGML_TYPE_F32:
  10419. {
  10420. ggml_compute_forward_diag_mask_f32(params, src0, dst, -INFINITY);
  10421. } break;
  10422. default:
  10423. {
  10424. GGML_ASSERT(false);
  10425. } break;
  10426. }
  10427. }
  10428. static void ggml_compute_forward_diag_mask_zero(
  10429. const struct ggml_compute_params * params,
  10430. const struct ggml_tensor * src0,
  10431. struct ggml_tensor * dst) {
  10432. switch (src0->type) {
  10433. case GGML_TYPE_F32:
  10434. {
  10435. ggml_compute_forward_diag_mask_f32(params, src0, dst, 0);
  10436. } break;
  10437. default:
  10438. {
  10439. GGML_ASSERT(false);
  10440. } break;
  10441. }
  10442. }
  10443. // ggml_compute_forward_soft_max
  10444. static void ggml_compute_forward_soft_max_f32(
  10445. const struct ggml_compute_params * params,
  10446. const struct ggml_tensor * src0,
  10447. struct ggml_tensor * dst) {
  10448. GGML_ASSERT(ggml_is_contiguous(src0));
  10449. GGML_ASSERT(ggml_is_contiguous(dst));
  10450. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  10451. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  10452. return;
  10453. }
  10454. // TODO: handle transposed/permuted matrices
  10455. const int ith = params->ith;
  10456. const int nth = params->nth;
  10457. const int nc = src0->ne[0];
  10458. const int nr = ggml_nrows(src0);
  10459. // rows per thread
  10460. const int dr = (nr + nth - 1)/nth;
  10461. // row range for this thread
  10462. const int ir0 = dr*ith;
  10463. const int ir1 = MIN(ir0 + dr, nr);
  10464. for (int i1 = ir0; i1 < ir1; i1++) {
  10465. float *sp = (float *)((char *) src0->data + i1*src0->nb[1]);
  10466. float *dp = (float *)((char *) dst->data + i1*dst->nb[1]);
  10467. #ifndef NDEBUG
  10468. for (int i = 0; i < nc; ++i) {
  10469. //printf("p[%d] = %f\n", i, p[i]);
  10470. assert(!isnan(sp[i]));
  10471. }
  10472. #endif
  10473. float max = -INFINITY;
  10474. ggml_vec_max_f32(nc, &max, sp);
  10475. ggml_float sum = 0.0;
  10476. uint16_t scvt;
  10477. for (int i = 0; i < nc; i++) {
  10478. if (sp[i] == -INFINITY) {
  10479. dp[i] = 0.0f;
  10480. } else {
  10481. // const float val = (sp[i] == -INFINITY) ? 0.0 : exp(sp[i] - max);
  10482. ggml_fp16_t s = GGML_FP32_TO_FP16(sp[i] - max);
  10483. memcpy(&scvt, &s, sizeof(scvt));
  10484. const float val = GGML_FP16_TO_FP32(table_exp_f16[scvt]);
  10485. sum += (ggml_float)val;
  10486. dp[i] = val;
  10487. }
  10488. }
  10489. assert(sum > 0.0);
  10490. sum = 1.0/sum;
  10491. ggml_vec_scale_f32(nc, dp, sum);
  10492. #ifndef NDEBUG
  10493. for (int i = 0; i < nc; ++i) {
  10494. assert(!isnan(dp[i]));
  10495. assert(!isinf(dp[i]));
  10496. }
  10497. #endif
  10498. }
  10499. }
  10500. static void ggml_compute_forward_soft_max(
  10501. const struct ggml_compute_params * params,
  10502. const struct ggml_tensor * src0,
  10503. struct ggml_tensor * dst) {
  10504. switch (src0->type) {
  10505. case GGML_TYPE_F32:
  10506. {
  10507. ggml_compute_forward_soft_max_f32(params, src0, dst);
  10508. } break;
  10509. default:
  10510. {
  10511. GGML_ASSERT(false);
  10512. } break;
  10513. }
  10514. }
  10515. // ggml_compute_forward_soft_max_back
  10516. static void ggml_compute_forward_soft_max_back_f32(
  10517. const struct ggml_compute_params * params,
  10518. const struct ggml_tensor * src0,
  10519. const struct ggml_tensor * src1,
  10520. struct ggml_tensor * dst) {
  10521. GGML_ASSERT(ggml_is_contiguous(src0));
  10522. GGML_ASSERT(ggml_is_contiguous(src1));
  10523. GGML_ASSERT(ggml_is_contiguous(dst));
  10524. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  10525. GGML_ASSERT(ggml_are_same_shape(src1, dst));
  10526. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  10527. return;
  10528. }
  10529. // TODO: handle transposed/permuted matrices
  10530. const int ith = params->ith;
  10531. const int nth = params->nth;
  10532. const int nc = src0->ne[0];
  10533. const int nr = ggml_nrows(src0);
  10534. // rows per thread
  10535. const int dr = (nr + nth - 1)/nth;
  10536. // row range for this thread
  10537. const int ir0 = dr*ith;
  10538. const int ir1 = MIN(ir0 + dr, nr);
  10539. for (int i1 = ir0; i1 < ir1; i1++) {
  10540. float *dy = (float *)((char *) src0->data + i1*src0->nb[1]);
  10541. float *y = (float *)((char *) src1->data + i1*src1->nb[1]);
  10542. float *dx = (float *)((char *) dst->data + i1*dst->nb[1]);
  10543. #ifndef NDEBUG
  10544. for (int i = 0; i < nc; ++i) {
  10545. //printf("p[%d] = %f\n", i, p[i]);
  10546. assert(!isnan(dy[i]));
  10547. assert(!isnan(y[i]));
  10548. }
  10549. #endif
  10550. // Jii = yi - yi*yi
  10551. // Jij = -yi*yj
  10552. // J = diag(y)-y.T*y
  10553. // dx = J * dy
  10554. // dxk = sum_i(Jki * dyi)
  10555. // dxk = sum_i(-yk*yi * dyi) - (-yk*yk)*dyk + (yk - yk*yk)*dyk
  10556. // dxk = sum_i(-yk*yi * dyi) + yk*yk*dyk + yk*dyk - yk*yk*dyk
  10557. // dxk = sum_i(-yk*yi * dyi) + yk*dyk
  10558. // dxk = -yk * sum_i(yi * dyi) + yk*dyk
  10559. // dxk = -yk * dot(y, dy) + yk*dyk
  10560. // dxk = yk * (- dot(y, dy) + dyk)
  10561. // dxk = yk * (dyk - dot(y, dy))
  10562. //
  10563. // post-order:
  10564. // dot_y_dy := dot(y, dy)
  10565. // dx := dy
  10566. // dx := dx - dot_y_dy
  10567. // dx := dx * y
  10568. // linear runtime, no additional memory
  10569. float dot_y_dy = 0;
  10570. ggml_vec_dot_f32 (nc, &dot_y_dy, y, dy);
  10571. ggml_vec_cpy_f32 (nc, dx, dy);
  10572. ggml_vec_acc1_f32(nc, dx, -dot_y_dy);
  10573. ggml_vec_mul_f32 (nc, dx, dx, y);
  10574. #ifndef NDEBUG
  10575. for (int i = 0; i < nc; ++i) {
  10576. assert(!isnan(dx[i]));
  10577. assert(!isinf(dx[i]));
  10578. }
  10579. #endif
  10580. }
  10581. }
  10582. static void ggml_compute_forward_soft_max_back(
  10583. const struct ggml_compute_params * params,
  10584. const struct ggml_tensor * src0,
  10585. const struct ggml_tensor * src1,
  10586. struct ggml_tensor * dst) {
  10587. switch (src0->type) {
  10588. case GGML_TYPE_F32:
  10589. {
  10590. ggml_compute_forward_soft_max_back_f32(params, src0, src1, dst);
  10591. } break;
  10592. default:
  10593. {
  10594. GGML_ASSERT(false);
  10595. } break;
  10596. }
  10597. }
  10598. // ggml_compute_forward_alibi
  10599. static void ggml_compute_forward_alibi_f32(
  10600. const struct ggml_compute_params * params,
  10601. const struct ggml_tensor * src0,
  10602. struct ggml_tensor * dst) {
  10603. assert(params->ith == 0);
  10604. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  10605. return;
  10606. }
  10607. const int n_head = ((int32_t *) dst->op_params)[1];
  10608. float max_bias;
  10609. memcpy(&max_bias, (int32_t *) dst->op_params + 2, sizeof(float));
  10610. assert(n_past >= 0);
  10611. const int ne0 = src0->ne[0]; // all_seq_len = n_past + ne1
  10612. const int ne1 = src0->ne[1]; // seq_len_without_past
  10613. const int ne2 = src0->ne[2]; // n_head -> this is k
  10614. //const int ne3 = src0->ne[3]; // 1 -> bsz
  10615. const int n = ggml_nrows(src0);
  10616. const int ne2_ne3 = n/ne1; // ne2*ne3
  10617. const int nb0 = src0->nb[0];
  10618. const int nb1 = src0->nb[1];
  10619. const int nb2 = src0->nb[2];
  10620. //const int nb3 = src0->nb[3];
  10621. GGML_ASSERT(nb0 == sizeof(float));
  10622. GGML_ASSERT(n_head == ne2);
  10623. // add alibi to src0 (KQ_scaled)
  10624. const int n_heads_log2_floor = 1 << (int) floor(log2(n_head));
  10625. const float m0 = powf(2.0f, -(max_bias) / n_heads_log2_floor);
  10626. const float m1 = powf(2.0f, -(max_bias / 2.0f) / n_heads_log2_floor);
  10627. for (int i = 0; i < ne0; i++) {
  10628. for (int j = 0; j < ne1; j++) {
  10629. for (int k = 0; k < ne2_ne3; k++) {
  10630. float * const src = (float *)((char *) src0->data + i*nb0 + j*nb1 + k*nb2);
  10631. float * pdst = (float *)((char *) dst->data + i*nb0 + j*nb1 + k*nb2);
  10632. // TODO: k*nb2 or k*nb3
  10633. float m_k;
  10634. if (k < n_heads_log2_floor) {
  10635. m_k = powf(m0, k + 1);
  10636. } else {
  10637. m_k = powf(m1, 2 * (k - n_heads_log2_floor) + 1);
  10638. }
  10639. pdst[0] = i * m_k + src[0];
  10640. }
  10641. }
  10642. }
  10643. }
  10644. static void ggml_compute_forward_alibi_f16(
  10645. const struct ggml_compute_params * params,
  10646. const struct ggml_tensor * src0,
  10647. struct ggml_tensor * dst) {
  10648. assert(params->ith == 0);
  10649. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  10650. return;
  10651. }
  10652. //const int n_past = ((int32_t *) dst->op_params)[0];
  10653. const int n_head = ((int32_t *) dst->op_params)[1];
  10654. float max_bias;
  10655. memcpy(&max_bias, (int32_t *) dst->op_params + 2, sizeof(float));
  10656. const int ne0 = src0->ne[0]; // all_seq_len = n_past + ne1
  10657. const int ne1 = src0->ne[1]; // seq_len_without_past
  10658. const int ne2 = src0->ne[2]; // n_head -> this is k
  10659. //const int ne3 = src0->ne[3]; // 1 -> bsz
  10660. const int n = ggml_nrows(src0);
  10661. const int ne2_ne3 = n/ne1; // ne2*ne3
  10662. const int nb0 = src0->nb[0];
  10663. const int nb1 = src0->nb[1];
  10664. const int nb2 = src0->nb[2];
  10665. //const int nb3 = src0->nb[3];
  10666. GGML_ASSERT(nb0 == sizeof(ggml_fp16_t));
  10667. //GGML_ASSERT(ne1 + n_past == ne0); (void) n_past;
  10668. GGML_ASSERT(n_head == ne2);
  10669. // add alibi to src0 (KQ_scaled)
  10670. const int n_heads_log2_floor = 1 << (int) floor(log2(n_head));
  10671. const float m0 = powf(2.0f, -(max_bias) / n_heads_log2_floor);
  10672. const float m1 = powf(2.0f, -(max_bias / 2.0f) / n_heads_log2_floor);
  10673. for (int i = 0; i < ne0; i++) {
  10674. for (int j = 0; j < ne1; j++) {
  10675. for (int k = 0; k < ne2_ne3; k++) {
  10676. ggml_fp16_t * const src = (ggml_fp16_t *)((char *) src0->data + i*nb0 + j*nb1 + k*nb2);
  10677. float * pdst = (float *)((char *) dst->data + i*nb0 + j*nb1 + k*nb2);
  10678. // TODO: k*nb2 or k*nb3
  10679. float m_k;
  10680. if (k < n_heads_log2_floor) {
  10681. m_k = powf(m0, k + 1);
  10682. } else {
  10683. m_k = powf(m1, 2 * (k - n_heads_log2_floor) + 1);
  10684. }
  10685. // we return F32
  10686. pdst[0] = i * m_k + GGML_FP16_TO_FP32(src[0]);
  10687. }
  10688. }
  10689. }
  10690. }
  10691. static void ggml_compute_forward_alibi(
  10692. const struct ggml_compute_params * params,
  10693. const struct ggml_tensor * src0,
  10694. struct ggml_tensor * dst) {
  10695. switch (src0->type) {
  10696. case GGML_TYPE_F16:
  10697. {
  10698. ggml_compute_forward_alibi_f16(params, src0, dst);
  10699. } break;
  10700. case GGML_TYPE_F32:
  10701. {
  10702. ggml_compute_forward_alibi_f32(params, src0, dst);
  10703. } break;
  10704. case GGML_TYPE_Q4_0:
  10705. case GGML_TYPE_Q4_1:
  10706. case GGML_TYPE_Q5_0:
  10707. case GGML_TYPE_Q5_1:
  10708. case GGML_TYPE_Q8_0:
  10709. case GGML_TYPE_Q8_1:
  10710. case GGML_TYPE_Q2_K:
  10711. case GGML_TYPE_Q3_K:
  10712. case GGML_TYPE_Q4_K:
  10713. case GGML_TYPE_Q5_K:
  10714. case GGML_TYPE_Q6_K:
  10715. case GGML_TYPE_Q8_K:
  10716. case GGML_TYPE_I8:
  10717. case GGML_TYPE_I16:
  10718. case GGML_TYPE_I32:
  10719. case GGML_TYPE_COUNT:
  10720. {
  10721. GGML_ASSERT(false);
  10722. } break;
  10723. }
  10724. }
  10725. // ggml_compute_forward_clamp
  10726. static void ggml_compute_forward_clamp_f32(
  10727. const struct ggml_compute_params * params,
  10728. const struct ggml_tensor * src0,
  10729. struct ggml_tensor * dst) {
  10730. assert(params->ith == 0);
  10731. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  10732. return;
  10733. }
  10734. float min;
  10735. float max;
  10736. memcpy(&min, (float *) dst->op_params + 0, sizeof(float));
  10737. memcpy(&max, (float *) dst->op_params + 1, sizeof(float));
  10738. const int ith = params->ith;
  10739. const int nth = params->nth;
  10740. const int n = ggml_nrows(src0);
  10741. const int nc = src0->ne[0];
  10742. const size_t nb00 = src0->nb[0];
  10743. const size_t nb01 = src0->nb[1];
  10744. const size_t nb0 = dst->nb[0];
  10745. const size_t nb1 = dst->nb[1];
  10746. GGML_ASSERT( nb0 == sizeof(float));
  10747. GGML_ASSERT(nb00 == sizeof(float));
  10748. for (int j = ith; j < n; j += nth) {
  10749. float * dst_ptr = (float *) ((char *) dst->data + j*nb1);
  10750. float * src0_ptr = (float *) ((char *) src0->data + j*nb01);
  10751. for (int i = 0; i < nc; i++) {
  10752. dst_ptr[i] = MAX(MIN(src0_ptr[i], max), min);
  10753. }
  10754. }
  10755. }
  10756. static void ggml_compute_forward_clamp(
  10757. const struct ggml_compute_params * params,
  10758. const struct ggml_tensor * src0,
  10759. struct ggml_tensor * dst) {
  10760. switch (src0->type) {
  10761. case GGML_TYPE_F32:
  10762. {
  10763. ggml_compute_forward_clamp_f32(params, src0, dst);
  10764. } break;
  10765. case GGML_TYPE_F16:
  10766. case GGML_TYPE_Q4_0:
  10767. case GGML_TYPE_Q4_1:
  10768. case GGML_TYPE_Q5_0:
  10769. case GGML_TYPE_Q5_1:
  10770. case GGML_TYPE_Q8_0:
  10771. case GGML_TYPE_Q8_1:
  10772. case GGML_TYPE_Q2_K:
  10773. case GGML_TYPE_Q3_K:
  10774. case GGML_TYPE_Q4_K:
  10775. case GGML_TYPE_Q5_K:
  10776. case GGML_TYPE_Q6_K:
  10777. case GGML_TYPE_Q8_K:
  10778. case GGML_TYPE_I8:
  10779. case GGML_TYPE_I16:
  10780. case GGML_TYPE_I32:
  10781. case GGML_TYPE_COUNT:
  10782. {
  10783. GGML_ASSERT(false);
  10784. } break;
  10785. }
  10786. }
  10787. // ggml_compute_forward_rope
  10788. static void ggml_compute_forward_rope_f32(
  10789. const struct ggml_compute_params * params,
  10790. const struct ggml_tensor * src0,
  10791. const struct ggml_tensor * src1,
  10792. struct ggml_tensor * dst) {
  10793. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  10794. return;
  10795. }
  10796. float freq_base;
  10797. float freq_scale;
  10798. // these two only relevant for xPos RoPE:
  10799. float xpos_base;
  10800. bool xpos_down;
  10801. //const int n_past = ((int32_t *) dst->op_params)[0];
  10802. const int n_dims = ((int32_t *) dst->op_params)[1];
  10803. const int mode = ((int32_t *) dst->op_params)[2];
  10804. const int n_ctx = ((int32_t *) dst->op_params)[3];
  10805. memcpy(&freq_base, (int32_t *) dst->op_params + 4, sizeof(float));
  10806. memcpy(&freq_scale, (int32_t *) dst->op_params + 5, sizeof(float));
  10807. memcpy(&xpos_base, (int32_t *) dst->op_params + 6, sizeof(float));
  10808. memcpy(&xpos_down, (int32_t *) dst->op_params + 7, sizeof(bool));
  10809. GGML_TENSOR_UNARY_OP_LOCALS
  10810. //printf("ne0: %d, ne1: %d, ne2: %d, ne3: %d\n", ne0, ne1, ne2, ne3);
  10811. //printf("n_past = %d, ne2 = %d\n", n_past, ne2);
  10812. GGML_ASSERT(nb00 == sizeof(float));
  10813. const int ith = params->ith;
  10814. const int nth = params->nth;
  10815. const int nr = ggml_nrows(dst);
  10816. GGML_ASSERT(n_dims <= ne0);
  10817. GGML_ASSERT(n_dims % 2 == 0);
  10818. // rows per thread
  10819. const int dr = (nr + nth - 1)/nth;
  10820. // row range for this thread
  10821. const int ir0 = dr*ith;
  10822. const int ir1 = MIN(ir0 + dr, nr);
  10823. // row index used to determine which thread to use
  10824. int ir = 0;
  10825. const float theta_scale = powf(freq_base, -2.0f/n_dims);
  10826. const bool is_neox = mode & 2;
  10827. const bool is_glm = mode & 4;
  10828. const int32_t * pos = (const int32_t *) src1->data;
  10829. for (int64_t i3 = 0; i3 < ne3; i3++) {
  10830. for (int64_t i2 = 0; i2 < ne2; i2++) {
  10831. const int64_t p = pos[i2];
  10832. for (int64_t i1 = 0; i1 < ne1; i1++) {
  10833. if (ir++ < ir0) continue;
  10834. if (ir > ir1) break;
  10835. float theta = freq_scale * (float)p;
  10836. if (is_glm) {
  10837. theta = MIN(p, n_ctx - 2);
  10838. float block_theta = MAX(p - (n_ctx - 2), 0);
  10839. for (int64_t i0 = 0; i0 < ne0 / 4; i0++) {
  10840. const float cos_theta = cosf(theta);
  10841. const float sin_theta = sinf(theta);
  10842. const float cos_block_theta = cosf(block_theta);
  10843. const float sin_block_theta = sinf(block_theta);
  10844. theta *= theta_scale;
  10845. block_theta *= theta_scale;
  10846. const float * const src = (float *)((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00);
  10847. float * dst_data = (float *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0);
  10848. const float x0 = src[0];
  10849. const float x1 = src[n_dims/2];
  10850. const float x2 = src[n_dims];
  10851. const float x3 = src[n_dims/2*3];
  10852. dst_data[0] = x0*cos_theta - x1*sin_theta;
  10853. dst_data[n_dims/2] = x0*sin_theta + x1*cos_theta;
  10854. dst_data[n_dims] = x2*cos_block_theta - x3*sin_block_theta;
  10855. dst_data[n_dims/2*3] = x2*sin_block_theta + x3*cos_block_theta;
  10856. }
  10857. } else if (!is_neox) {
  10858. for (int64_t i0 = 0; i0 < ne0; i0 += 2) {
  10859. const float cos_theta = cosf(theta);
  10860. const float sin_theta = sinf(theta);
  10861. // zeta scaling for xPos only:
  10862. float zeta = xpos_base != 0.0f ? powf((i0 + 0.4f * ne0) / (1.4f * ne0), p / xpos_base) : 1.0f;
  10863. if (xpos_down) zeta = 1.0f / zeta;
  10864. theta *= theta_scale;
  10865. const float * const src = (float *)((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00);
  10866. float * dst_data = (float *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0);
  10867. const float x0 = src[0];
  10868. const float x1 = src[1];
  10869. dst_data[0] = x0*cos_theta*zeta - x1*sin_theta*zeta;
  10870. dst_data[1] = x0*sin_theta*zeta + x1*cos_theta*zeta;
  10871. }
  10872. } else {
  10873. // TODO: this might be wrong for ne0 != n_dims - need double check
  10874. // ref: https://github.com/huggingface/transformers/blob/main/src/transformers/models/gpt_neox/modeling_gpt_neox.py#LL251C1-L294C28
  10875. for (int64_t ib = 0; ib < ne0/n_dims; ++ib) {
  10876. for (int64_t ic = 0; ic < n_dims; ic += 2) {
  10877. const float cos_theta = cosf(theta);
  10878. const float sin_theta = sinf(theta);
  10879. theta *= theta_scale;
  10880. const int64_t i0 = ib*n_dims + ic/2;
  10881. const float * const src = (float *)((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00);
  10882. float * dst_data = (float *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0);
  10883. const float x0 = src[0];
  10884. const float x1 = src[n_dims/2];
  10885. dst_data[0] = x0*cos_theta - x1*sin_theta;
  10886. dst_data[n_dims/2] = x0*sin_theta + x1*cos_theta;
  10887. }
  10888. }
  10889. }
  10890. }
  10891. }
  10892. }
  10893. }
  10894. static void ggml_compute_forward_rope_f16(
  10895. const struct ggml_compute_params * params,
  10896. const struct ggml_tensor * src0,
  10897. const struct ggml_tensor * src1,
  10898. struct ggml_tensor * dst) {
  10899. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  10900. return;
  10901. }
  10902. float freq_base;
  10903. float freq_scale;
  10904. //const int n_past = ((int32_t *) dst->op_params)[0];
  10905. const int n_dims = ((int32_t *) dst->op_params)[1];
  10906. const int mode = ((int32_t *) dst->op_params)[2];
  10907. const int n_ctx = ((int32_t *) dst->op_params)[3];
  10908. memcpy(&freq_base, (int32_t *) dst->op_params + 4, sizeof(float));
  10909. memcpy(&freq_scale, (int32_t *) dst->op_params + 5, sizeof(float));
  10910. GGML_TENSOR_UNARY_OP_LOCALS
  10911. //printf("ne0: %d, ne1: %d, ne2: %d, ne3: %d\n", ne0, ne1, ne2, ne3);
  10912. //printf("n_past = %d, ne2 = %d\n", n_past, ne2);
  10913. GGML_ASSERT(nb0 == sizeof(ggml_fp16_t));
  10914. const int ith = params->ith;
  10915. const int nth = params->nth;
  10916. const int nr = ggml_nrows(dst);
  10917. GGML_ASSERT(n_dims <= ne0);
  10918. GGML_ASSERT(n_dims % 2 == 0);
  10919. // rows per thread
  10920. const int dr = (nr + nth - 1)/nth;
  10921. // row range for this thread
  10922. const int ir0 = dr*ith;
  10923. const int ir1 = MIN(ir0 + dr, nr);
  10924. // row index used to determine which thread to use
  10925. int ir = 0;
  10926. const float theta_scale = powf(freq_base, -2.0f/n_dims);
  10927. const bool is_neox = mode & 2;
  10928. const bool is_glm = mode & 4;
  10929. const int32_t * pos = (const int32_t *) src1->data;
  10930. for (int64_t i3 = 0; i3 < ne3; i3++) {
  10931. for (int64_t i2 = 0; i2 < ne2; i2++) {
  10932. const int64_t p = pos[i2];
  10933. for (int64_t i1 = 0; i1 < ne1; i1++) {
  10934. if (ir++ < ir0) continue;
  10935. if (ir > ir1) break;
  10936. float theta = freq_scale * (float)p;
  10937. if (is_glm) {
  10938. theta = MIN(p, n_ctx - 2);
  10939. float block_theta = MAX(p - (n_ctx - 2), 0);
  10940. for (int64_t i0 = 0; i0 < ne0 / 4; i0++) {
  10941. const float cos_theta = cosf(theta);
  10942. const float sin_theta = sinf(theta);
  10943. const float cos_block_theta = cosf(block_theta);
  10944. const float sin_block_theta = sinf(block_theta);
  10945. theta *= theta_scale;
  10946. block_theta *= theta_scale;
  10947. const ggml_fp16_t * const src = (ggml_fp16_t *)((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00);
  10948. ggml_fp16_t * dst_data = (ggml_fp16_t *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0);
  10949. const float x0 = GGML_FP16_TO_FP32(src[0]);
  10950. const float x1 = GGML_FP16_TO_FP32(src[n_dims/2]);
  10951. const float x2 = GGML_FP16_TO_FP32(src[n_dims]);
  10952. const float x3 = GGML_FP16_TO_FP32(src[n_dims/2*3]);
  10953. dst_data[0] = GGML_FP32_TO_FP16(x0*cos_theta - x1*sin_theta);
  10954. dst_data[n_dims/2] = GGML_FP32_TO_FP16(x0*sin_theta + x1*cos_theta);
  10955. dst_data[n_dims] = GGML_FP32_TO_FP16(x2*cos_block_theta - x3*sin_block_theta);
  10956. dst_data[n_dims/2*3] = GGML_FP32_TO_FP16(x2*sin_block_theta + x3*cos_block_theta);
  10957. }
  10958. } if (!is_neox) {
  10959. for (int64_t i0 = 0; i0 < ne0; i0 += 2) {
  10960. const float cos_theta = cosf(theta);
  10961. const float sin_theta = sinf(theta);
  10962. theta *= theta_scale;
  10963. const ggml_fp16_t * const src = (ggml_fp16_t *)((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00);
  10964. ggml_fp16_t * dst_data = (ggml_fp16_t *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0);
  10965. const float x0 = GGML_FP16_TO_FP32(src[0]);
  10966. const float x1 = GGML_FP16_TO_FP32(src[1]);
  10967. dst_data[0] = GGML_FP32_TO_FP16(x0*cos_theta - x1*sin_theta);
  10968. dst_data[1] = GGML_FP32_TO_FP16(x0*sin_theta + x1*cos_theta);
  10969. }
  10970. } else {
  10971. // TODO: this might be wrong for ne0 != n_dims - need double check
  10972. // ref: https://github.com/huggingface/transformers/blob/main/src/transformers/models/gpt_neox/modeling_gpt_neox.py#LL251C1-L294C28
  10973. for (int64_t ib = 0; ib < ne0/n_dims; ++ib) {
  10974. for (int64_t ic = 0; ic < n_dims; ic += 2) {
  10975. const float cos_theta = cosf(theta);
  10976. const float sin_theta = sinf(theta);
  10977. theta *= theta_scale;
  10978. const int64_t i0 = ib*n_dims + ic/2;
  10979. const ggml_fp16_t * const src = (ggml_fp16_t *)((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00);
  10980. ggml_fp16_t * dst_data = (ggml_fp16_t *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0);
  10981. const float x0 = GGML_FP16_TO_FP32(src[0]);
  10982. const float x1 = GGML_FP16_TO_FP32(src[n_dims/2]);
  10983. dst_data[0] = GGML_FP32_TO_FP16(x0*cos_theta - x1*sin_theta);
  10984. dst_data[n_dims/2] = GGML_FP32_TO_FP16(x0*sin_theta + x1*cos_theta);
  10985. }
  10986. }
  10987. }
  10988. }
  10989. }
  10990. }
  10991. }
  10992. static void ggml_compute_forward_rope(
  10993. const struct ggml_compute_params * params,
  10994. const struct ggml_tensor * src0,
  10995. const struct ggml_tensor * src1,
  10996. struct ggml_tensor * dst) {
  10997. switch (src0->type) {
  10998. case GGML_TYPE_F16:
  10999. {
  11000. ggml_compute_forward_rope_f16(params, src0, src1, dst);
  11001. } break;
  11002. case GGML_TYPE_F32:
  11003. {
  11004. ggml_compute_forward_rope_f32(params, src0, src1, dst);
  11005. } break;
  11006. default:
  11007. {
  11008. GGML_ASSERT(false);
  11009. } break;
  11010. }
  11011. }
  11012. // ggml_compute_forward_rope_back
  11013. static void ggml_compute_forward_rope_back_f32(
  11014. const struct ggml_compute_params * params,
  11015. const struct ggml_tensor * src0,
  11016. const struct ggml_tensor * src1,
  11017. struct ggml_tensor * dst) {
  11018. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  11019. return;
  11020. }
  11021. // y = rope(x, src1)
  11022. // dx = rope_back(dy, src1)
  11023. // src0 is dy, src1 contains options
  11024. float freq_base;
  11025. float freq_scale;
  11026. // these two only relevant for xPos RoPE:
  11027. float xpos_base;
  11028. bool xpos_down;
  11029. //const int n_past = ((int32_t *) dst->op_params)[0];
  11030. const int n_dims = ((int32_t *) dst->op_params)[1];
  11031. const int mode = ((int32_t *) dst->op_params)[2];
  11032. const int n_ctx = ((int32_t *) dst->op_params)[3]; UNUSED(n_ctx);
  11033. memcpy(&freq_base, (int32_t *) dst->op_params + 4, sizeof(float));
  11034. memcpy(&freq_scale, (int32_t *) dst->op_params + 5, sizeof(float));
  11035. memcpy(&xpos_base, (int32_t *) dst->op_params + 6, sizeof(float));
  11036. memcpy(&xpos_down, (int32_t *) dst->op_params + 7, sizeof(bool));
  11037. GGML_TENSOR_UNARY_OP_LOCALS
  11038. //printf("ne0: %d, ne1: %d, ne2: %d, ne3: %d\n", ne0, ne1, ne2, ne3);
  11039. //printf("n_past = %d, ne2 = %d\n", n_past, ne2);
  11040. assert(nb0 == sizeof(float));
  11041. const int ith = params->ith;
  11042. const int nth = params->nth;
  11043. const int nr = ggml_nrows(dst);
  11044. // rows per thread
  11045. const int dr = (nr + nth - 1)/nth;
  11046. // row range for this thread
  11047. const int ir0 = dr*ith;
  11048. const int ir1 = MIN(ir0 + dr, nr);
  11049. // row index used to determine which thread to use
  11050. int ir = 0;
  11051. const float theta_scale = powf(freq_base, -2.0f/n_dims);
  11052. const bool is_neox = mode & 2;
  11053. const int32_t * pos = (const int32_t *) src1->data;
  11054. for (int64_t i3 = 0; i3 < ne3; i3++) {
  11055. for (int64_t i2 = 0; i2 < ne2; i2++) {
  11056. const int64_t p = pos[i2];
  11057. for (int64_t i1 = 0; i1 < ne1; i1++) {
  11058. if (ir++ < ir0) continue;
  11059. if (ir > ir1) break;
  11060. float theta = freq_scale * (float)p;
  11061. if (!is_neox) {
  11062. for (int64_t i0 = 0; i0 < ne0; i0 += 2) {
  11063. const float cos_theta = cosf(theta);
  11064. const float sin_theta = sinf(theta);
  11065. // zeta scaling for xPos only:
  11066. float zeta = xpos_base != 0.0f ? powf((i0 + 0.4f * ne0) / (1.4f * ne0), p / xpos_base) : 1.0f;
  11067. if (xpos_down) zeta = 1.0f / zeta;
  11068. theta *= theta_scale;
  11069. const float * const dy = (float *)((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00);
  11070. float * dx = (float *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0);
  11071. const float dy0 = dy[0];
  11072. const float dy1 = dy[1];
  11073. dx[0] = dy0*cos_theta*zeta + dy1*sin_theta*zeta;
  11074. dx[1] = - dy0*sin_theta*zeta + dy1*cos_theta*zeta;
  11075. }
  11076. } else {
  11077. for (int64_t ib = 0; ib < ne0/n_dims; ++ib) {
  11078. for (int64_t ic = 0; ic < n_dims; ic += 2) {
  11079. const float cos_theta = cosf(theta);
  11080. const float sin_theta = sinf(theta);
  11081. theta *= theta_scale;
  11082. const int64_t i0 = ib*n_dims + ic/2;
  11083. const float * const dy = (float *)((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00);
  11084. float * dx = (float *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0);
  11085. const float dy0 = dy[0];
  11086. const float dy1 = dy[n_dims/2];
  11087. dx[0] = dy0*cos_theta + dy1*sin_theta;
  11088. dx[n_dims/2] = - dy0*sin_theta + dy1*cos_theta;
  11089. }
  11090. }
  11091. }
  11092. }
  11093. }
  11094. }
  11095. }
  11096. static void ggml_compute_forward_rope_back_f16(
  11097. const struct ggml_compute_params * params,
  11098. const struct ggml_tensor * src0,
  11099. const struct ggml_tensor * src1,
  11100. struct ggml_tensor * dst) {
  11101. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  11102. return;
  11103. }
  11104. // y = rope(x, src1)
  11105. // dx = rope_back(dy, src1)
  11106. // src0 is dy, src1 contains options
  11107. //const int n_past = ((int32_t *) dst->op_params)[0];
  11108. const int n_dims = ((int32_t *) dst->op_params)[1];
  11109. const int mode = ((int32_t *) dst->op_params)[2];
  11110. GGML_TENSOR_UNARY_OP_LOCALS
  11111. //printf("ne0: %d, ne1: %d, ne2: %d, ne3: %d\n", ne0, ne1, ne2, ne3);
  11112. //printf("n_past = %d, ne2 = %d\n", n_past, ne2);
  11113. assert(nb0 == sizeof(ggml_fp16_t));
  11114. const int ith = params->ith;
  11115. const int nth = params->nth;
  11116. const int nr = ggml_nrows(dst);
  11117. // rows per thread
  11118. const int dr = (nr + nth - 1)/nth;
  11119. // row range for this thread
  11120. const int ir0 = dr*ith;
  11121. const int ir1 = MIN(ir0 + dr, nr);
  11122. // row index used to determine which thread to use
  11123. int ir = 0;
  11124. const float theta_scale = powf(10000.0, -2.0f/n_dims);
  11125. const bool is_neox = mode & 2;
  11126. const int32_t * pos = (const int32_t *) src1->data;
  11127. for (int64_t i3 = 0; i3 < ne3; i3++) {
  11128. for (int64_t i2 = 0; i2 < ne2; i2++) {
  11129. const int64_t p = pos[i2];
  11130. for (int64_t i1 = 0; i1 < ne1; i1++) {
  11131. if (ir++ < ir0) continue;
  11132. if (ir > ir1) break;
  11133. float theta = (float)p;
  11134. if (!is_neox) {
  11135. for (int64_t i0 = 0; i0 < ne0; i0 += 2) {
  11136. const float cos_theta = cosf(theta);
  11137. const float sin_theta = sinf(theta);
  11138. theta *= theta_scale;
  11139. const ggml_fp16_t * const dy = (ggml_fp16_t *)((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00);
  11140. ggml_fp16_t * dx = (ggml_fp16_t *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0);
  11141. const float dy0 = GGML_FP16_TO_FP32(dy[0]);
  11142. const float dy1 = GGML_FP16_TO_FP32(dy[1]);
  11143. dx[0] = GGML_FP32_TO_FP16( dy0*cos_theta + dy1*sin_theta);
  11144. dx[1] = GGML_FP32_TO_FP16(-dy0*sin_theta + dy1*cos_theta);
  11145. }
  11146. } else {
  11147. for (int64_t ib = 0; ib < ne0/n_dims; ++ib) {
  11148. for (int64_t ic = 0; ic < n_dims; ic += 2) {
  11149. const float cos_theta = cosf(theta);
  11150. const float sin_theta = sinf(theta);
  11151. theta *= theta_scale;
  11152. const int64_t i0 = ib*n_dims + ic/2;
  11153. const ggml_fp16_t * const dy = (ggml_fp16_t *)((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00);
  11154. ggml_fp16_t * dx = (ggml_fp16_t *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0);
  11155. const float dy0 = GGML_FP16_TO_FP32(dy[0]);
  11156. const float dy1 = GGML_FP16_TO_FP32(dy[n_dims/2]);
  11157. dx[0] = GGML_FP32_TO_FP16( dy0*cos_theta + dy1*sin_theta);
  11158. dx[n_dims/2] = GGML_FP32_TO_FP16(-dy0*sin_theta + dy1*cos_theta);
  11159. }
  11160. }
  11161. }
  11162. }
  11163. }
  11164. }
  11165. }
  11166. static void ggml_compute_forward_rope_back(
  11167. const struct ggml_compute_params * params,
  11168. const struct ggml_tensor * src0,
  11169. const struct ggml_tensor * src1,
  11170. struct ggml_tensor * dst) {
  11171. switch (src0->type) {
  11172. case GGML_TYPE_F16:
  11173. {
  11174. ggml_compute_forward_rope_back_f16(params, src0, src1, dst);
  11175. } break;
  11176. case GGML_TYPE_F32:
  11177. {
  11178. ggml_compute_forward_rope_back_f32(params, src0, src1, dst);
  11179. } break;
  11180. default:
  11181. {
  11182. GGML_ASSERT(false);
  11183. } break;
  11184. }
  11185. }
  11186. // ggml_compute_forward_conv_1d
  11187. static void ggml_compute_forward_conv_1d_f16_f32(
  11188. const struct ggml_compute_params * params,
  11189. const struct ggml_tensor * src0,
  11190. const struct ggml_tensor * src1,
  11191. struct ggml_tensor * dst) {
  11192. GGML_ASSERT(src0->type == GGML_TYPE_F16);
  11193. GGML_ASSERT(src1->type == GGML_TYPE_F32);
  11194. GGML_ASSERT( dst->type == GGML_TYPE_F32);
  11195. int64_t t0 = ggml_perf_time_us();
  11196. UNUSED(t0);
  11197. GGML_TENSOR_BINARY_OP_LOCALS
  11198. const int ith = params->ith;
  11199. const int nth = params->nth;
  11200. const int nk = ne00;
  11201. // size of the convolution row - the kernel size unrolled across all input channels
  11202. const int ew0 = nk*ne01;
  11203. const int32_t s0 = ((const int32_t*)(dst->op_params))[0];
  11204. const int32_t p0 = ((const int32_t*)(dst->op_params))[1];
  11205. const int32_t d0 = ((const int32_t*)(dst->op_params))[2];
  11206. GGML_ASSERT(nb00 == sizeof(ggml_fp16_t));
  11207. GGML_ASSERT(nb10 == sizeof(float));
  11208. if (params->type == GGML_TASK_INIT) {
  11209. memset(params->wdata, 0, params->wsize);
  11210. ggml_fp16_t * const wdata = (ggml_fp16_t *) params->wdata + 0;
  11211. for (int64_t i11 = 0; i11 < ne11; i11++) {
  11212. const float * const src = (float *)((char *) src1->data + i11*nb11);
  11213. ggml_fp16_t * dst_data = wdata;
  11214. for (int64_t i0 = 0; i0 < ne0; i0++) {
  11215. for (int64_t ik = 0; ik < nk; ik++) {
  11216. const int idx0 = i0*s0 + ik*d0 - p0;
  11217. if(!(idx0 < 0 || idx0 >= ne10)) {
  11218. dst_data[i0*ew0 + i11*nk + ik] = GGML_FP32_TO_FP16(src[idx0]);
  11219. }
  11220. }
  11221. }
  11222. }
  11223. return;
  11224. }
  11225. if (params->type == GGML_TASK_FINALIZE) {
  11226. return;
  11227. }
  11228. // total rows in dst
  11229. const int nr = ne2;
  11230. // rows per thread
  11231. const int dr = (nr + nth - 1)/nth;
  11232. // row range for this thread
  11233. const int ir0 = dr*ith;
  11234. const int ir1 = MIN(ir0 + dr, nr);
  11235. ggml_fp16_t * const wdata = (ggml_fp16_t *) params->wdata + 0;
  11236. for (int i2 = 0; i2 < ne2; i2++) {
  11237. for (int i1 = ir0; i1 < ir1; i1++) {
  11238. float * dst_data = (float *)((char *) dst->data + i2*nb2 + i1*nb1);
  11239. for (int i0 = 0; i0 < ne0; i0++) {
  11240. ggml_vec_dot_f16(ew0, dst_data + i0,
  11241. (ggml_fp16_t *) ((char *) src0->data + i1*nb02),
  11242. (ggml_fp16_t *) wdata + i2*nb2 + i0*ew0);
  11243. }
  11244. }
  11245. }
  11246. }
  11247. static void ggml_compute_forward_conv_1d_f32(
  11248. const struct ggml_compute_params * params,
  11249. const struct ggml_tensor * src0,
  11250. const struct ggml_tensor * src1,
  11251. struct ggml_tensor * dst) {
  11252. GGML_ASSERT(src0->type == GGML_TYPE_F32);
  11253. GGML_ASSERT(src1->type == GGML_TYPE_F32);
  11254. GGML_ASSERT( dst->type == GGML_TYPE_F32);
  11255. int64_t t0 = ggml_perf_time_us();
  11256. UNUSED(t0);
  11257. GGML_TENSOR_BINARY_OP_LOCALS
  11258. const int ith = params->ith;
  11259. const int nth = params->nth;
  11260. const int nk = ne00;
  11261. const int ew0 = nk*ne01;
  11262. const int32_t s0 = ((const int32_t*)(dst->op_params))[0];
  11263. const int32_t p0 = ((const int32_t*)(dst->op_params))[1];
  11264. const int32_t d0 = ((const int32_t*)(dst->op_params))[2];
  11265. GGML_ASSERT(nb00 == sizeof(float));
  11266. GGML_ASSERT(nb10 == sizeof(float));
  11267. if (params->type == GGML_TASK_INIT) {
  11268. memset(params->wdata, 0, params->wsize);
  11269. float * const wdata = (float *) params->wdata + 0;
  11270. for (int64_t i11 = 0; i11 < ne11; i11++) {
  11271. const float * const src = (float *)((char *) src1->data + i11*nb11);
  11272. float * dst_data = wdata;
  11273. for (int64_t i0 = 0; i0 < ne0; i0++) {
  11274. for (int64_t ik = 0; ik < nk; ik++) {
  11275. const int idx0 = i0*s0 + ik*d0 - p0;
  11276. if(!(idx0 < 0 || idx0 >= ne10)) {
  11277. dst_data[i0*ew0 + i11*nk + ik] = src[idx0];
  11278. }
  11279. }
  11280. }
  11281. }
  11282. return;
  11283. }
  11284. if (params->type == GGML_TASK_FINALIZE) {
  11285. return;
  11286. }
  11287. // total rows in dst
  11288. const int nr = ne02;
  11289. // rows per thread
  11290. const int dr = (nr + nth - 1)/nth;
  11291. // row range for this thread
  11292. const int ir0 = dr*ith;
  11293. const int ir1 = MIN(ir0 + dr, nr);
  11294. float * const wdata = (float *) params->wdata + 0;
  11295. for (int i2 = 0; i2 < ne2; i2++) {
  11296. for (int i1 = ir0; i1 < ir1; i1++) {
  11297. float * dst_data = (float *)((char *) dst->data + i2*nb2 + i1*nb1);
  11298. for (int i0 = 0; i0 < ne0; i0++) {
  11299. ggml_vec_dot_f32(ew0, dst_data + i0,
  11300. (float *) ((char *) src0->data + i1*nb02),
  11301. (float *) wdata + i2*nb2 + i0*ew0);
  11302. }
  11303. }
  11304. }
  11305. }
  11306. static void gemm_f16_out_f32(int64_t m, int64_t n, int64_t k,
  11307. ggml_fp16_t * A,
  11308. ggml_fp16_t * B,
  11309. float * C,
  11310. const int ith, const int nth) {
  11311. // does not seem to make a difference
  11312. int64_t m0, m1, n0, n1;
  11313. // patches per thread
  11314. if (m > n) {
  11315. n0 = 0;
  11316. n1 = n;
  11317. // total patches in dst
  11318. const int np = m;
  11319. // patches per thread
  11320. const int dp = (np + nth - 1)/nth;
  11321. // patch range for this thread
  11322. m0 = dp*ith;
  11323. m1 = MIN(m0 + dp, np);
  11324. } else {
  11325. m0 = 0;
  11326. m1 = m;
  11327. // total patches in dst
  11328. const int np = n;
  11329. // patches per thread
  11330. const int dp = (np + nth - 1)/nth;
  11331. // patch range for this thread
  11332. n0 = dp*ith;
  11333. n1 = MIN(n0 + dp, np);
  11334. }
  11335. // block-tiling attempt
  11336. int64_t blck_n = 16;
  11337. int64_t blck_m = 16;
  11338. // int64_t CACHE_SIZE = 2 * 1024 * 1024; // 2MB
  11339. // int64_t blck_size = CACHE_SIZE / (sizeof(float) + 2 * sizeof(ggml_fp16_t) * K);
  11340. // if (blck_size > 0) {
  11341. // blck_0 = 4;
  11342. // blck_1 = blck_size / blck_0;
  11343. // if (blck_1 < 0) {
  11344. // blck_1 = 1;
  11345. // }
  11346. // // blck_0 = (int64_t)sqrt(blck_size);
  11347. // // blck_1 = blck_0;
  11348. // }
  11349. // // printf("%zd %zd %zd %zd\n", blck_size, K, blck_0, blck_1);
  11350. for (int j = n0; j < n1; j+=blck_n) {
  11351. for (int i = m0; i < m1; i+=blck_m) {
  11352. // printf("i j k => %d %d %d\n", i, j, K);
  11353. for (int ii = i; ii < i + blck_m && ii < m1; ii++) {
  11354. for (int jj = j; jj < j + blck_n && jj < n1; jj++) {
  11355. ggml_vec_dot_f16(k,
  11356. C + ii*n + jj,
  11357. A + ii * k,
  11358. B + jj * k);
  11359. }
  11360. }
  11361. }
  11362. }
  11363. }
  11364. // src0: kernel [OC, IC, K]
  11365. // src1: signal [N, IC, IL]
  11366. // dst: result [N, OL, IC*K]
  11367. static void ggml_compute_forward_conv_1d_stage_0_f32(
  11368. const struct ggml_compute_params * params,
  11369. const struct ggml_tensor * src0,
  11370. const struct ggml_tensor * src1,
  11371. struct ggml_tensor * dst) {
  11372. GGML_ASSERT(src0->type == GGML_TYPE_F16);
  11373. GGML_ASSERT(src1->type == GGML_TYPE_F32);
  11374. GGML_ASSERT( dst->type == GGML_TYPE_F16);
  11375. int64_t t0 = ggml_perf_time_us();
  11376. UNUSED(t0);
  11377. GGML_TENSOR_BINARY_OP_LOCALS;
  11378. const int64_t N = ne12;
  11379. const int64_t IC = ne11;
  11380. const int64_t IL = ne10;
  11381. const int64_t K = ne00;
  11382. const int64_t OL = ne1;
  11383. const int ith = params->ith;
  11384. const int nth = params->nth;
  11385. const int32_t s0 = ((const int32_t*)(dst->op_params))[0];
  11386. const int32_t p0 = ((const int32_t*)(dst->op_params))[1];
  11387. const int32_t d0 = ((const int32_t*)(dst->op_params))[2];
  11388. GGML_ASSERT(nb00 == sizeof(ggml_fp16_t));
  11389. GGML_ASSERT(nb10 == sizeof(float));
  11390. if (params->type == GGML_TASK_INIT) {
  11391. memset(dst->data, 0, ggml_nbytes(dst));
  11392. return;
  11393. }
  11394. if (params->type == GGML_TASK_FINALIZE) {
  11395. return;
  11396. }
  11397. // im2col: [N, IC, IL] => [N, OL, IC*K]
  11398. {
  11399. ggml_fp16_t * const wdata = (ggml_fp16_t *) dst->data;
  11400. for (int64_t in = 0; in < N; in++) {
  11401. for (int64_t iol = 0; iol < OL; iol++) {
  11402. for (int64_t iic = ith; iic < IC; iic+=nth) {
  11403. // micro kernel
  11404. ggml_fp16_t * dst_data = wdata + (in*OL + iol)*(IC*K); // [IC, K]
  11405. const float * const src_data = (float *)((char *) src1->data + in*nb12 + iic*nb11); // [IL]
  11406. for (int64_t ik = 0; ik < K; ik++) {
  11407. const int64_t iil = iol*s0 + ik*d0 - p0;
  11408. if (!(iil < 0 || iil >= IL)) {
  11409. dst_data[iic*K + ik] = GGML_FP32_TO_FP16(src_data[iil]);
  11410. }
  11411. }
  11412. }
  11413. }
  11414. }
  11415. }
  11416. }
  11417. // gemm: [N, OC, OL] = [OC, IC * K] x [N*OL, IC * K]
  11418. // src0: [OC, IC, K]
  11419. // src1: [N, OL, IC * K]
  11420. // result: [N, OC, OL]
  11421. static void ggml_compute_forward_conv_1d_stage_1_f16(
  11422. const struct ggml_compute_params * params,
  11423. const struct ggml_tensor * src0,
  11424. const struct ggml_tensor * src1,
  11425. struct ggml_tensor * dst) {
  11426. GGML_ASSERT(src0->type == GGML_TYPE_F16);
  11427. GGML_ASSERT(src1->type == GGML_TYPE_F16);
  11428. GGML_ASSERT( dst->type == GGML_TYPE_F32);
  11429. int64_t t0 = ggml_perf_time_us();
  11430. UNUSED(t0);
  11431. if (params->type == GGML_TASK_INIT) {
  11432. return;
  11433. }
  11434. if (params->type == GGML_TASK_FINALIZE) {
  11435. return;
  11436. }
  11437. GGML_TENSOR_BINARY_OP_LOCALS;
  11438. GGML_ASSERT(nb00 == sizeof(ggml_fp16_t));
  11439. GGML_ASSERT(nb10 == sizeof(ggml_fp16_t));
  11440. GGML_ASSERT(nb0 == sizeof(float));
  11441. const int N = ne12;
  11442. const int OL = ne11;
  11443. const int OC = ne02;
  11444. const int IC = ne01;
  11445. const int K = ne00;
  11446. const int ith = params->ith;
  11447. const int nth = params->nth;
  11448. int64_t m = OC;
  11449. int64_t n = OL;
  11450. int64_t k = IC * K;
  11451. // [N, OC, OL] = [OC, IC * K] x [N*OL, IC * K]
  11452. for (int i = 0; i < N; i++) {
  11453. ggml_fp16_t * A = (ggml_fp16_t *)src0->data; // [m, k]
  11454. ggml_fp16_t * B = (ggml_fp16_t *)src1->data + i * m * k; // [n, k]
  11455. float * C = (float *)dst->data + i * m * n; // [m, n]
  11456. gemm_f16_out_f32(m, n, k, A, B, C, ith, nth);
  11457. }
  11458. }
  11459. static void ggml_compute_forward_conv_1d(
  11460. const struct ggml_compute_params * params,
  11461. const struct ggml_tensor * src0,
  11462. const struct ggml_tensor * src1,
  11463. struct ggml_tensor * dst) {
  11464. switch(src0->type) {
  11465. case GGML_TYPE_F16:
  11466. {
  11467. ggml_compute_forward_conv_1d_f16_f32(params, src0, src1, dst);
  11468. } break;
  11469. case GGML_TYPE_F32:
  11470. {
  11471. ggml_compute_forward_conv_1d_f32(params, src0, src1, dst);
  11472. } break;
  11473. default:
  11474. {
  11475. GGML_ASSERT(false);
  11476. } break;
  11477. }
  11478. }
  11479. static void ggml_compute_forward_conv_1d_stage_0(
  11480. const struct ggml_compute_params * params,
  11481. const struct ggml_tensor * src0,
  11482. const struct ggml_tensor * src1,
  11483. struct ggml_tensor * dst) {
  11484. switch(src0->type) {
  11485. case GGML_TYPE_F16:
  11486. {
  11487. ggml_compute_forward_conv_1d_stage_0_f32(params, src0, src1, dst);
  11488. } break;
  11489. default:
  11490. {
  11491. GGML_ASSERT(false);
  11492. } break;
  11493. }
  11494. }
  11495. static void ggml_compute_forward_conv_1d_stage_1(
  11496. const struct ggml_compute_params * params,
  11497. const struct ggml_tensor * src0,
  11498. const struct ggml_tensor * src1,
  11499. struct ggml_tensor * dst) {
  11500. switch(src0->type) {
  11501. case GGML_TYPE_F16:
  11502. {
  11503. ggml_compute_forward_conv_1d_stage_1_f16(params, src0, src1, dst);
  11504. } break;
  11505. default:
  11506. {
  11507. GGML_ASSERT(false);
  11508. } break;
  11509. }
  11510. }
  11511. // ggml_compute_forward_conv_transpose_1d
  11512. static void ggml_compute_forward_conv_transpose_1d_f16_f32(
  11513. const struct ggml_compute_params * params,
  11514. const struct ggml_tensor * src0,
  11515. const struct ggml_tensor * src1,
  11516. struct ggml_tensor * dst) {
  11517. GGML_ASSERT(src0->type == GGML_TYPE_F16);
  11518. GGML_ASSERT(src1->type == GGML_TYPE_F32);
  11519. GGML_ASSERT( dst->type == GGML_TYPE_F32);
  11520. int64_t t0 = ggml_perf_time_us();
  11521. UNUSED(t0);
  11522. GGML_TENSOR_BINARY_OP_LOCALS
  11523. const int ith = params->ith;
  11524. const int nth = params->nth;
  11525. const int nk = ne00*ne01*ne02;
  11526. GGML_ASSERT(nb00 == sizeof(ggml_fp16_t));
  11527. GGML_ASSERT(nb10 == sizeof(float));
  11528. if (params->type == GGML_TASK_INIT) {
  11529. memset(params->wdata, 0, params->wsize);
  11530. // permute kernel data (src0) from (K x Cout x Cin) to (Cin x K x Cout)
  11531. {
  11532. ggml_fp16_t * const wdata = (ggml_fp16_t *) params->wdata + 0;
  11533. for (int64_t i02 = 0; i02 < ne02; i02++) {
  11534. for (int64_t i01 = 0; i01 < ne01; i01++) {
  11535. const ggml_fp16_t * const src = (ggml_fp16_t *)((char *) src0->data + i02*nb02 + i01*nb01);
  11536. ggml_fp16_t * dst_data = wdata + i01*ne00*ne02;
  11537. for (int64_t i00 = 0; i00 < ne00; i00++) {
  11538. dst_data[i00*ne02 + i02] = src[i00];
  11539. }
  11540. }
  11541. }
  11542. }
  11543. // permute source data (src1) from (L x Cin) to (Cin x L)
  11544. {
  11545. ggml_fp16_t * const wdata = (ggml_fp16_t *) params->wdata + nk;
  11546. ggml_fp16_t * dst_data = wdata;
  11547. for (int64_t i11 = 0; i11 < ne11; i11++) {
  11548. const float * const src = (float *)((char *) src1->data + i11*nb11);
  11549. for (int64_t i10 = 0; i10 < ne10; i10++) {
  11550. dst_data[i10*ne11 + i11] = GGML_FP32_TO_FP16(src[i10]);
  11551. }
  11552. }
  11553. }
  11554. return;
  11555. }
  11556. if (params->type == GGML_TASK_FINALIZE) {
  11557. return;
  11558. }
  11559. const int32_t s0 = ((const int32_t*)(dst->op_params))[0];
  11560. // total rows in dst
  11561. const int nr = ne1;
  11562. // rows per thread
  11563. const int dr = (nr + nth - 1)/nth;
  11564. // row range for this thread
  11565. const int ir0 = dr*ith;
  11566. const int ir1 = MIN(ir0 + dr, nr);
  11567. ggml_fp16_t * const wdata = (ggml_fp16_t *) params->wdata + 0;
  11568. ggml_fp16_t * const wdata_src = wdata + nk;
  11569. for (int i1 = ir0; i1 < ir1; i1++) {
  11570. float * dst_data = (float *)((char *) dst->data + i1*nb1);
  11571. ggml_fp16_t * wdata_kernel = wdata + i1*ne02*ne00;
  11572. for (int i10 = 0; i10 < ne10; i10++) {
  11573. const int i1n = i10*ne11;
  11574. for (int i00 = 0; i00 < ne00; i00++) {
  11575. float v = 0;
  11576. ggml_vec_dot_f16(ne02, &v,
  11577. (ggml_fp16_t *) wdata_src + i1n,
  11578. (ggml_fp16_t *) wdata_kernel + i00*ne02);
  11579. dst_data[i10*s0 + i00] += v;
  11580. }
  11581. }
  11582. }
  11583. }
  11584. static void ggml_compute_forward_conv_transpose_1d_f32(
  11585. const struct ggml_compute_params * params,
  11586. const struct ggml_tensor * src0,
  11587. const struct ggml_tensor * src1,
  11588. struct ggml_tensor * dst) {
  11589. GGML_ASSERT(src0->type == GGML_TYPE_F32);
  11590. GGML_ASSERT(src1->type == GGML_TYPE_F32);
  11591. GGML_ASSERT( dst->type == GGML_TYPE_F32);
  11592. int64_t t0 = ggml_perf_time_us();
  11593. UNUSED(t0);
  11594. GGML_TENSOR_BINARY_OP_LOCALS
  11595. const int ith = params->ith;
  11596. const int nth = params->nth;
  11597. const int nk = ne00*ne01*ne02;
  11598. GGML_ASSERT(nb00 == sizeof(float));
  11599. GGML_ASSERT(nb10 == sizeof(float));
  11600. if (params->type == GGML_TASK_INIT) {
  11601. memset(params->wdata, 0, params->wsize);
  11602. // prepare kernel data (src0) from (K x Cout x Cin) to (Cin x K x Cout)
  11603. {
  11604. float * const wdata = (float *) params->wdata + 0;
  11605. for (int64_t i02 = 0; i02 < ne02; i02++) {
  11606. for (int64_t i01 = 0; i01 < ne01; i01++) {
  11607. const float * const src = (float *)((char *) src0->data + i02*nb02 + i01*nb01);
  11608. float * dst_data = wdata + i01*ne00*ne02;
  11609. for (int64_t i00 = 0; i00 < ne00; i00++) {
  11610. dst_data[i01*ne00*ne02 + i00*ne02 + i02] = src[i00];
  11611. }
  11612. }
  11613. }
  11614. }
  11615. // prepare source data (src1)
  11616. {
  11617. float * const wdata = (float *) params->wdata + nk;
  11618. float * dst_data = wdata;
  11619. for (int64_t i11 = 0; i11 < ne11; i11++) {
  11620. const float * const src = (float *)((char *) src1->data + i11*nb11);
  11621. for (int64_t i10 = 0; i10 < ne10; i10++) {
  11622. dst_data[i10*ne11 + i11] = src[i10];
  11623. }
  11624. }
  11625. }
  11626. return;
  11627. }
  11628. if (params->type == GGML_TASK_FINALIZE) {
  11629. return;
  11630. }
  11631. const int32_t s0 = ((const int32_t*)(dst->op_params))[0];
  11632. // total rows in dst
  11633. const int nr = ne1;
  11634. // rows per thread
  11635. const int dr = (nr + nth - 1)/nth;
  11636. // row range for this thread
  11637. const int ir0 = dr*ith;
  11638. const int ir1 = MIN(ir0 + dr, nr);
  11639. float * const wdata = (float *) params->wdata + 0;
  11640. float * const wdata_src = wdata + nk;
  11641. for (int i1 = ir0; i1 < ir1; i1++) {
  11642. float * dst_data = (float *)((char *) dst->data + i1*nb1);
  11643. float * wdata_kernel = wdata + i1*ne02*ne00;
  11644. for (int i10 = 0; i10 < ne10; i10++) {
  11645. const int i1n = i10*ne11;
  11646. for (int i00 = 0; i00 < ne00; i00++) {
  11647. float v = 0;
  11648. ggml_vec_dot_f32(ne02, &v,
  11649. wdata_src + i1n,
  11650. wdata_kernel + i00*ne02);
  11651. dst_data[i10*s0 + i00] += v;
  11652. }
  11653. }
  11654. }
  11655. }
  11656. static void ggml_compute_forward_conv_transpose_1d(
  11657. const struct ggml_compute_params * params,
  11658. const struct ggml_tensor * src0,
  11659. const struct ggml_tensor * src1,
  11660. struct ggml_tensor * dst) {
  11661. switch (src0->type) {
  11662. case GGML_TYPE_F16:
  11663. {
  11664. ggml_compute_forward_conv_transpose_1d_f16_f32(params, src0, src1, dst);
  11665. } break;
  11666. case GGML_TYPE_F32:
  11667. {
  11668. ggml_compute_forward_conv_transpose_1d_f32(params, src0, src1, dst);
  11669. } break;
  11670. default:
  11671. {
  11672. GGML_ASSERT(false);
  11673. } break;
  11674. }
  11675. }
  11676. // ggml_compute_forward_conv_2d
  11677. static void ggml_compute_forward_conv_2d_f16_f32(
  11678. const struct ggml_compute_params * params,
  11679. const struct ggml_tensor * src0,
  11680. const struct ggml_tensor * src1,
  11681. struct ggml_tensor * dst) {
  11682. GGML_ASSERT(src0->type == GGML_TYPE_F16);
  11683. GGML_ASSERT(src1->type == GGML_TYPE_F32);
  11684. GGML_ASSERT( dst->type == GGML_TYPE_F32);
  11685. int64_t t0 = ggml_perf_time_us();
  11686. UNUSED(t0);
  11687. GGML_TENSOR_BINARY_OP_LOCALS
  11688. const int ith = params->ith;
  11689. const int nth = params->nth;
  11690. const int nk0 = ne00;
  11691. const int nk1 = ne01;
  11692. // size of the convolution row - the kernel size unrolled across all channels
  11693. const int ew0 = nk0*nk1*ne02;
  11694. const int32_t s0 = ((const int32_t*)(dst->op_params))[0];
  11695. const int32_t s1 = ((const int32_t*)(dst->op_params))[1];
  11696. const int32_t p0 = ((const int32_t*)(dst->op_params))[2];
  11697. const int32_t p1 = ((const int32_t*)(dst->op_params))[3];
  11698. const int32_t d0 = ((const int32_t*)(dst->op_params))[4];
  11699. const int32_t d1 = ((const int32_t*)(dst->op_params))[5];
  11700. GGML_ASSERT(nb00 == sizeof(ggml_fp16_t));
  11701. GGML_ASSERT(nb10 == sizeof(float));
  11702. if (params->type == GGML_TASK_INIT) {
  11703. memset(params->wdata, 0, params->wsize);
  11704. // prepare source data (src1)
  11705. {
  11706. ggml_fp16_t * const wdata = (ggml_fp16_t *) params->wdata + 0;
  11707. for (int i13 = 0; i13 < ne13; i13++) {
  11708. for (int i12 = 0; i12 < ne12; i12++) {
  11709. const float * const src = (float *)((char *) src1->data + i13*nb13 + i12*nb12);
  11710. ggml_fp16_t * dst_data = wdata + i13*(ne1*ne0*ew0);
  11711. for (int i1 = 0; i1 < ne1; i1++) {
  11712. for (int i0 = 0; i0 < ne0; i0++) {
  11713. for (int ik1 = 0; ik1 < nk1; ik1++) {
  11714. for (int ik0 = 0; ik0 < nk0; ik0++) {
  11715. const int idx0 = i0*s0 + ik0*d0 - p0;
  11716. const int idx1 = i1*s1 + ik1*d1 - p1;
  11717. if (!(idx1 < 0 || idx1 >= ne11 || idx0 < 0 || idx0 >= ne10)) {
  11718. dst_data[(i1*ne0 + i0)*ew0 + i12*(nk0*nk1) + ik1*nk0 + ik0] =
  11719. GGML_FP32_TO_FP16(src[idx1*ne10 + idx0]);
  11720. }
  11721. }
  11722. }
  11723. }
  11724. }
  11725. }
  11726. }
  11727. }
  11728. return;
  11729. }
  11730. if (params->type == GGML_TASK_FINALIZE) {
  11731. return;
  11732. }
  11733. // total patches in dst
  11734. const int np = ne2;
  11735. // patches per thread
  11736. const int dp = (np + nth - 1)/nth;
  11737. // patch range for this thread
  11738. const int ip0 = dp*ith;
  11739. const int ip1 = MIN(ip0 + dp, np);
  11740. ggml_fp16_t * const wdata = (ggml_fp16_t *) params->wdata + 0;
  11741. for (int i3 = 0; i3 < ne3; i3++) {
  11742. for (int i2 = ip0; i2 < ip1; i2++) {
  11743. float * dst_data = (float *)((char *) dst->data + i3*nb3 + i2*nb2);
  11744. for (int i1 = 0; i1 < ne1; ++i1) {
  11745. for (int i0 = 0; i0 < ne0; ++i0) {
  11746. ggml_vec_dot_f16(ew0, dst_data + i1*ne0 + i0,
  11747. (ggml_fp16_t *) ((char *) src0->data + i2*nb03),
  11748. (ggml_fp16_t *) wdata + i3*nb3 + (i1*ne0 + i0)*ew0);
  11749. }
  11750. }
  11751. }
  11752. }
  11753. }
  11754. static void ggml_compute_forward_conv_2d(
  11755. const struct ggml_compute_params * params,
  11756. const struct ggml_tensor * src0,
  11757. const struct ggml_tensor * src1,
  11758. struct ggml_tensor * dst) {
  11759. switch (src0->type) {
  11760. case GGML_TYPE_F16:
  11761. {
  11762. ggml_compute_forward_conv_2d_f16_f32(params, src0, src1, dst);
  11763. } break;
  11764. case GGML_TYPE_F32:
  11765. {
  11766. //ggml_compute_forward_conv_2d_f32(params, src0, src1, dst);
  11767. GGML_ASSERT(false);
  11768. } break;
  11769. default:
  11770. {
  11771. GGML_ASSERT(false);
  11772. } break;
  11773. }
  11774. }
  11775. // ggml_compute_forward_conv_transpose_2d
  11776. static void ggml_compute_forward_conv_transpose_2d(
  11777. const struct ggml_compute_params * params,
  11778. const struct ggml_tensor * src0,
  11779. const struct ggml_tensor * src1,
  11780. struct ggml_tensor * dst) {
  11781. GGML_ASSERT(src0->type == GGML_TYPE_F16);
  11782. GGML_ASSERT(src1->type == GGML_TYPE_F32);
  11783. GGML_ASSERT( dst->type == GGML_TYPE_F32);
  11784. int64_t t0 = ggml_perf_time_us();
  11785. UNUSED(t0);
  11786. GGML_TENSOR_BINARY_OP_LOCALS
  11787. const int ith = params->ith;
  11788. const int nth = params->nth;
  11789. const int nk = ne00*ne01*ne02*ne03;
  11790. GGML_ASSERT(nb00 == sizeof(ggml_fp16_t));
  11791. GGML_ASSERT(nb10 == sizeof(float));
  11792. if (params->type == GGML_TASK_INIT) {
  11793. memset(params->wdata, 0, params->wsize);
  11794. // permute kernel data (src0) from (Kw x Kh x Cout x Cin) to (Cin x Kw x Kh x Cout)
  11795. {
  11796. ggml_fp16_t * const wdata = (ggml_fp16_t *) params->wdata + 0;
  11797. for (int64_t i03 = 0; i03 < ne03; i03++) {
  11798. for (int64_t i02 = 0; i02 < ne02; i02++) {
  11799. const ggml_fp16_t * const src = (ggml_fp16_t *)((char *) src0->data + i03*nb03 + i02*nb02);
  11800. ggml_fp16_t * dst_data = wdata + i02*ne01*ne00*ne03;
  11801. for (int64_t i01 = 0; i01 < ne01; i01++) {
  11802. for (int64_t i00 = 0; i00 < ne00; i00++) {
  11803. dst_data[i01*ne00*ne03 + i00*ne03 + i03] = src[i01 * ne00 + i00];
  11804. }
  11805. }
  11806. }
  11807. }
  11808. }
  11809. // permute source data (src1) from (Sw x Sh x Cin) to (Cin x Sw x Sh)
  11810. {
  11811. ggml_fp16_t * const wdata = (ggml_fp16_t *) params->wdata + nk;
  11812. for (int i12 = 0; i12 < ne12; i12++) {
  11813. for (int i11 = 0; i11 < ne11; i11++) {
  11814. const float * const src = (float *)((char *) src1->data + i12*nb12 + i11*nb11);
  11815. ggml_fp16_t * dst_data = wdata + i11*ne10*ne12;
  11816. for (int i10 = 0; i10 < ne10; i10++) {
  11817. dst_data[i10*ne12 + i12] = GGML_FP32_TO_FP16(src[i10]);
  11818. }
  11819. }
  11820. }
  11821. }
  11822. return;
  11823. }
  11824. if (params->type == GGML_TASK_FINALIZE) {
  11825. return;
  11826. }
  11827. const int32_t stride = ggml_get_op_params_i32(dst, 0);
  11828. // total patches in dst
  11829. const int np = ne2;
  11830. // patches per thread
  11831. const int dp = (np + nth - 1)/nth;
  11832. // patch range for this thread
  11833. const int ip0 = dp*ith;
  11834. const int ip1 = MIN(ip0 + dp, np);
  11835. ggml_fp16_t * const wdata = (ggml_fp16_t *) params->wdata + 0;
  11836. ggml_fp16_t * const wdata_src = wdata + nk;
  11837. for (int i2 = ip0; i2 < ip1; i2++) { // Cout
  11838. float * dst_data = (float *)((char *) dst->data + i2*nb2);
  11839. ggml_fp16_t * wdata_kernel = wdata + i2*ne01*ne00*ne03;
  11840. for (int i11 = 0; i11 < ne11; i11++) {
  11841. for (int i10 = 0; i10 < ne10; i10++) {
  11842. const int i1n = i11*ne10*ne12 + i10*ne12;
  11843. for (int i01 = 0; i01 < ne01; i01++) {
  11844. for (int i00 = 0; i00 < ne00; i00++) {
  11845. float v = 0;
  11846. ggml_vec_dot_f16(ne03, &v,
  11847. wdata_src + i1n,
  11848. wdata_kernel + i01*ne00*ne03 + i00*ne03);
  11849. dst_data[(i11*stride + i01)*ne0 + i10*stride + i00] += v;
  11850. }
  11851. }
  11852. }
  11853. }
  11854. }
  11855. }
  11856. // ggml_compute_forward_pool_1d_sk_p0
  11857. static void ggml_compute_forward_pool_1d_sk_p0(
  11858. const struct ggml_compute_params * params,
  11859. const enum ggml_op_pool op,
  11860. const struct ggml_tensor * src,
  11861. const int k,
  11862. struct ggml_tensor * dst) {
  11863. assert(src->type == GGML_TYPE_F32);
  11864. assert(params->ith == 0);
  11865. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  11866. return;
  11867. }
  11868. const char * cdata = (const char *)src->data;
  11869. const char * const data_end = cdata + ggml_nbytes(src);
  11870. float * drow = (float *)dst->data;
  11871. const int64_t rs = dst->ne[0];
  11872. while (cdata < data_end) {
  11873. const float * const srow = (const float *)cdata;
  11874. int j = 0;
  11875. for (int64_t i = 0; i < rs; ++i) {
  11876. switch (op) {
  11877. case GGML_OP_POOL_AVG: drow[i] = 0; break;
  11878. case GGML_OP_POOL_MAX: drow[i] = -FLT_MAX; break;
  11879. case GGML_OP_POOL_COUNT: GGML_ASSERT(false); break;
  11880. }
  11881. for (int ki = 0; ki < k; ++ki) {
  11882. switch (op) {
  11883. case GGML_OP_POOL_AVG: drow[i] += srow[j]; break;
  11884. case GGML_OP_POOL_MAX: if (srow[j] > drow[i]) drow[i] = srow[j]; break;
  11885. case GGML_OP_POOL_COUNT: GGML_ASSERT(false); break;
  11886. }
  11887. ++j;
  11888. }
  11889. switch (op) {
  11890. case GGML_OP_POOL_AVG: drow[i] /= k; break;
  11891. case GGML_OP_POOL_MAX: break;
  11892. case GGML_OP_POOL_COUNT: GGML_ASSERT(false); break;
  11893. }
  11894. }
  11895. cdata += src->nb[1];
  11896. drow += rs;
  11897. }
  11898. }
  11899. // ggml_compute_forward_pool_1d
  11900. static void ggml_compute_forward_pool_1d(
  11901. const struct ggml_compute_params * params,
  11902. const struct ggml_tensor * src0,
  11903. struct ggml_tensor * dst) {
  11904. const int32_t * opts = (const int32_t *)dst->op_params;
  11905. enum ggml_op_pool op = opts[0];
  11906. const int k0 = opts[1];
  11907. const int s0 = opts[2];
  11908. const int p0 = opts[3];
  11909. GGML_ASSERT(p0 == 0); // padding not supported
  11910. GGML_ASSERT(k0 == s0); // only s = k supported
  11911. ggml_compute_forward_pool_1d_sk_p0(params, op, src0, k0, dst);
  11912. }
  11913. // ggml_compute_forward_pool_2d_sk_p0
  11914. static void ggml_compute_forward_pool_2d_sk_p0(
  11915. const struct ggml_compute_params * params,
  11916. const enum ggml_op_pool op,
  11917. const struct ggml_tensor * src,
  11918. const int k0,
  11919. const int k1,
  11920. struct ggml_tensor * dst) {
  11921. assert(src->type == GGML_TYPE_F32);
  11922. assert(params->ith == 0);
  11923. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  11924. return;
  11925. }
  11926. const char * cdata = (const char*)src->data;
  11927. const char * const data_end = cdata + ggml_nbytes(src);
  11928. const int64_t px = dst->ne[0];
  11929. const int64_t py = dst->ne[1];
  11930. const int64_t pa = px * py;
  11931. float * dplane = (float *)dst->data;
  11932. const int ka = k0 * k1;
  11933. while (cdata < data_end) {
  11934. for (int oy = 0; oy < py; ++oy) {
  11935. float * const drow = dplane + oy * px;
  11936. for (int ox = 0; ox < px; ++ox) {
  11937. float * const out = drow + ox;
  11938. switch (op) {
  11939. case GGML_OP_POOL_AVG: *out = 0; break;
  11940. case GGML_OP_POOL_MAX: *out = -FLT_MAX; break;
  11941. case GGML_OP_POOL_COUNT: GGML_ASSERT(false); break;
  11942. }
  11943. const int ix = ox * k0;
  11944. const int iy = oy * k1;
  11945. for (int ky = 0; ky < k1; ++ky) {
  11946. const float * const srow = (const float *)(cdata + src->nb[1] * (iy + ky));
  11947. for (int kx = 0; kx < k0; ++kx) {
  11948. int j = ix + kx;
  11949. switch (op) {
  11950. case GGML_OP_POOL_AVG: *out += srow[j]; break;
  11951. case GGML_OP_POOL_MAX: if (srow[j] > *out) *out = srow[j]; break;
  11952. case GGML_OP_POOL_COUNT: GGML_ASSERT(false); break;
  11953. }
  11954. }
  11955. }
  11956. switch (op) {
  11957. case GGML_OP_POOL_AVG: *out /= ka; break;
  11958. case GGML_OP_POOL_MAX: break;
  11959. case GGML_OP_POOL_COUNT: GGML_ASSERT(false); break;
  11960. }
  11961. }
  11962. }
  11963. cdata += src->nb[2];
  11964. dplane += pa;
  11965. }
  11966. }
  11967. // ggml_compute_forward_pool_2d
  11968. static void ggml_compute_forward_pool_2d(
  11969. const struct ggml_compute_params * params,
  11970. const struct ggml_tensor * src0,
  11971. struct ggml_tensor * dst) {
  11972. const int32_t * opts = (const int32_t *)dst->op_params;
  11973. enum ggml_op_pool op = opts[0];
  11974. const int k0 = opts[1];
  11975. const int k1 = opts[2];
  11976. const int s0 = opts[3];
  11977. const int s1 = opts[4];
  11978. const int p0 = opts[5];
  11979. const int p1 = opts[6];
  11980. GGML_ASSERT(p0 == 0);
  11981. GGML_ASSERT(p1 == 0); // padding not supported
  11982. GGML_ASSERT(k0 == s0);
  11983. GGML_ASSERT(k1 == s1); // only s = k supported
  11984. ggml_compute_forward_pool_2d_sk_p0(params, op, src0, k0, k1, dst);
  11985. }
  11986. // ggml_compute_forward_upscale
  11987. static void ggml_compute_forward_upscale_f32(
  11988. const struct ggml_compute_params * params,
  11989. const struct ggml_tensor * src0,
  11990. struct ggml_tensor * dst) {
  11991. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  11992. return;
  11993. }
  11994. GGML_ASSERT(src0->nb[0] == sizeof(float));
  11995. const int ith = params->ith;
  11996. GGML_TENSOR_UNARY_OP_LOCALS
  11997. const int scale_factor = dst->op_params[0];
  11998. // TODO: optimize
  11999. for (int i03 = 0; i03 < ne03; i03++) {
  12000. for (int i02 = ith; i02 < ne02; i02++) {
  12001. for (int m = 0; m < dst->ne[1]; m++) {
  12002. int i01 = m / scale_factor;
  12003. for (int n = 0; n < dst->ne[0]; n++) {
  12004. int i00 = n / scale_factor;
  12005. const float * x = (float *)((char *) src0->data + i00 * nb00 +i01 * nb01 + i02 * nb02 + i03 * nb03);
  12006. float * y = (float *)((char *) dst->data + n * dst->nb[0] + m * dst->nb[1] + i02 * dst->nb[2] + i03 * dst->nb[3]);
  12007. *y = *x;
  12008. }
  12009. }
  12010. }
  12011. }
  12012. }
  12013. static void ggml_compute_forward_upscale(
  12014. const struct ggml_compute_params * params,
  12015. const struct ggml_tensor * src0,
  12016. struct ggml_tensor * dst) {
  12017. switch (src0->type) {
  12018. case GGML_TYPE_F32:
  12019. {
  12020. ggml_compute_forward_upscale_f32(params, src0, dst);
  12021. } break;
  12022. default:
  12023. {
  12024. GGML_ASSERT(false);
  12025. } break;
  12026. }
  12027. }
  12028. // ggml_compute_forward_flash_attn
  12029. static void ggml_compute_forward_flash_attn_f32(
  12030. const struct ggml_compute_params * params,
  12031. const struct ggml_tensor * q,
  12032. const struct ggml_tensor * k,
  12033. const struct ggml_tensor * v,
  12034. const bool masked,
  12035. struct ggml_tensor * dst) {
  12036. int64_t t0 = ggml_perf_time_us();
  12037. UNUSED(t0);
  12038. GGML_TENSOR_LOCALS(int64_t, neq, q, ne)
  12039. GGML_TENSOR_LOCALS(size_t, nbq, q, nb)
  12040. GGML_TENSOR_LOCALS(int64_t, nek, k, ne)
  12041. GGML_TENSOR_LOCALS(size_t, nbk, k, nb)
  12042. GGML_TENSOR_LOCALS(int64_t, nev, v, ne)
  12043. GGML_TENSOR_LOCALS(size_t, nbv, v, nb)
  12044. GGML_TENSOR_LOCALS(int64_t, ne, dst, ne)
  12045. GGML_TENSOR_LOCALS(size_t, nb, dst, nb)
  12046. const int ith = params->ith;
  12047. const int nth = params->nth;
  12048. const int64_t D = neq0;
  12049. const int64_t N = neq1;
  12050. const int64_t P = nek1 - N;
  12051. const int64_t M = P + N;
  12052. const int Mup = ggml_up(M, GGML_SOFT_MAX_UNROLL);
  12053. GGML_ASSERT(ne0 == D);
  12054. GGML_ASSERT(ne1 == N);
  12055. GGML_ASSERT(P >= 0);
  12056. GGML_ASSERT(nbq0 == sizeof(float));
  12057. GGML_ASSERT(nbk0 == sizeof(float));
  12058. GGML_ASSERT(nbv0 == sizeof(float));
  12059. GGML_ASSERT(neq0 == D);
  12060. GGML_ASSERT(nek0 == D);
  12061. GGML_ASSERT(nev1 == D);
  12062. GGML_ASSERT(neq1 == N);
  12063. GGML_ASSERT(nek1 == N + P);
  12064. GGML_ASSERT(nev1 == D);
  12065. // dst cannot be transposed or permuted
  12066. GGML_ASSERT(nb0 == sizeof(float));
  12067. GGML_ASSERT(nb0 <= nb1);
  12068. GGML_ASSERT(nb1 <= nb2);
  12069. GGML_ASSERT(nb2 <= nb3);
  12070. if (params->type == GGML_TASK_INIT) {
  12071. return;
  12072. }
  12073. if (params->type == GGML_TASK_FINALIZE) {
  12074. return;
  12075. }
  12076. // parallelize by q rows using ggml_vec_dot_f32
  12077. // total rows in q
  12078. const int nr = neq1*neq2*neq3;
  12079. // rows per thread
  12080. const int dr = (nr + nth - 1)/nth;
  12081. // row range for this thread
  12082. const int ir0 = dr*ith;
  12083. const int ir1 = MIN(ir0 + dr, nr);
  12084. const float scale = 1.0f/sqrtf(D);
  12085. //printf("P=%d N=%d D=%d ir0=%d ir1=%d scale = %f\n", P, N, D, ir0, ir1, scale);
  12086. for (int ir = ir0; ir < ir1; ++ir) {
  12087. // q indices
  12088. const int iq3 = ir/(neq2*neq1);
  12089. const int iq2 = (ir - iq3*neq2*neq1)/neq1;
  12090. const int iq1 = (ir - iq3*neq2*neq1 - iq2*neq1);
  12091. float * S = (float *) params->wdata + ith*(Mup + CACHE_LINE_SIZE_F32);
  12092. for (int i = M; i < Mup; ++i) {
  12093. S[i] = -INFINITY;
  12094. }
  12095. const int64_t masked_begin = masked ? (P + iq1 + 1) : M;
  12096. for (int64_t ic = 0; ic < masked_begin; ++ic) {
  12097. // k indices
  12098. const int ik3 = iq3;
  12099. const int ik2 = iq2 % nek2;
  12100. const int ik1 = ic;
  12101. // S indices
  12102. const int i1 = ik1;
  12103. ggml_vec_dot_f32(neq0,
  12104. S + i1,
  12105. (float *) ((char *) k->data + (ik1*nbk1 + ik2*nbk2 + ik3*nbk3)),
  12106. (float *) ((char *) q->data + (iq1*nbq1 + iq2*nbq2 + iq3*nbq3)));
  12107. }
  12108. // scale
  12109. ggml_vec_scale_f32(masked_begin, S, scale);
  12110. for (int64_t i = masked_begin; i < M; i++) {
  12111. S[i] = -INFINITY;
  12112. }
  12113. // softmax
  12114. // exclude known -INF S[..] values from max and loop
  12115. // dont forget to set their SW values to zero
  12116. {
  12117. float max = -INFINITY;
  12118. ggml_vec_max_f32(masked_begin, &max, S);
  12119. ggml_float sum = 0.0;
  12120. {
  12121. #ifdef GGML_SOFT_MAX_ACCELERATE
  12122. max = -max;
  12123. vDSP_vsadd(S, 1, &max, S, 1, Mup);
  12124. vvexpf(S, S, &Mup);
  12125. ggml_vec_sum_f32(Mup, &sum, S);
  12126. #else
  12127. uint16_t scvt[GGML_SOFT_MAX_UNROLL]; UNUSED(scvt);
  12128. ggml_float sump[GGML_SOFT_MAX_UNROLL] = { 0.0 };
  12129. for (int i = 0; i < Mup; i += GGML_SOFT_MAX_UNROLL) {
  12130. if (i >= masked_begin) {
  12131. break;
  12132. }
  12133. float * SS = S + i;
  12134. for (int j = 0; j < GGML_SOFT_MAX_UNROLL; ++j) {
  12135. if (i + j >= masked_begin) {
  12136. break;
  12137. } else if (SS[j] == -INFINITY) {
  12138. SS[j] = 0.0f;
  12139. } else {
  12140. #ifndef GGML_FLASH_ATTN_EXP_FP16
  12141. const float val = expf(SS[j] - max);
  12142. #else
  12143. ggml_fp16_t s = GGML_FP32_TO_FP16(SS[j] - max);
  12144. memcpy(&scvt[j], &s, sizeof(uint16_t));
  12145. const float val = GGML_FP16_TO_FP32(table_exp_f16[scvt[j]]);
  12146. #endif
  12147. sump[j] += (ggml_float)val;
  12148. SS[j] = val;
  12149. }
  12150. }
  12151. }
  12152. for (int i = 0; i < GGML_SOFT_MAX_UNROLL; i++) {
  12153. sum += sump[i];
  12154. }
  12155. #endif
  12156. }
  12157. assert(sum > 0.0);
  12158. sum = 1.0/sum;
  12159. ggml_vec_scale_f32(masked_begin, S, sum);
  12160. #ifndef NDEBUG
  12161. for (int i = 0; i < masked_begin; ++i) {
  12162. assert(!isnan(S[i]));
  12163. assert(!isinf(S[i]));
  12164. }
  12165. #endif
  12166. }
  12167. for (int64_t ic = 0; ic < nev1; ++ic) {
  12168. // dst indices
  12169. const int i1 = iq1;
  12170. const int i2 = iq2;
  12171. const int i3 = iq3;
  12172. // v indices
  12173. const int iv2 = iq2 % nev2;
  12174. const int iv3 = iq3;
  12175. ggml_vec_dot_f32(masked_begin,
  12176. (float *) ((char *) dst->data + (ic*nb0 + i1*nb1 + i2*nb2 + i3*nb3)),
  12177. (float *) ((char *) v->data + ( ic*nbv1 + iv2*nbv2 + iv3*nbv3)),
  12178. S);
  12179. }
  12180. }
  12181. }
  12182. static void ggml_compute_forward_flash_attn_f16(
  12183. const struct ggml_compute_params * params,
  12184. const struct ggml_tensor * q,
  12185. const struct ggml_tensor * k,
  12186. const struct ggml_tensor * v,
  12187. const bool masked,
  12188. struct ggml_tensor * dst) {
  12189. int64_t t0 = ggml_perf_time_us();
  12190. UNUSED(t0);
  12191. GGML_TENSOR_LOCALS(int64_t, neq, q, ne)
  12192. GGML_TENSOR_LOCALS(size_t, nbq, q, nb)
  12193. GGML_TENSOR_LOCALS(int64_t, nek, k, ne)
  12194. GGML_TENSOR_LOCALS(size_t, nbk, k, nb)
  12195. GGML_TENSOR_LOCALS(int64_t, nev, v, ne)
  12196. GGML_TENSOR_LOCALS(size_t, nbv, v, nb)
  12197. GGML_TENSOR_LOCALS(int64_t, ne, dst, ne)
  12198. GGML_TENSOR_LOCALS(size_t, nb, dst, nb)
  12199. const int ith = params->ith;
  12200. const int nth = params->nth;
  12201. const int64_t D = neq0;
  12202. const int64_t N = neq1;
  12203. const int64_t P = nek1 - N;
  12204. const int64_t M = P + N;
  12205. const int Mup = ggml_up(M, GGML_SOFT_MAX_UNROLL);
  12206. GGML_ASSERT(ne0 == D);
  12207. GGML_ASSERT(ne1 == N);
  12208. GGML_ASSERT(P >= 0);
  12209. GGML_ASSERT(nbq0 == sizeof(ggml_fp16_t));
  12210. GGML_ASSERT(nbk0 == sizeof(ggml_fp16_t));
  12211. GGML_ASSERT(nbv0 == sizeof(ggml_fp16_t));
  12212. GGML_ASSERT(neq0 == D);
  12213. GGML_ASSERT(nek0 == D);
  12214. GGML_ASSERT(nev1 == D);
  12215. GGML_ASSERT(neq1 == N);
  12216. GGML_ASSERT(nek1 == N + P);
  12217. GGML_ASSERT(nev1 == D);
  12218. // dst cannot be transposed or permuted
  12219. GGML_ASSERT(nb0 == sizeof(float));
  12220. GGML_ASSERT(nb0 <= nb1);
  12221. GGML_ASSERT(nb1 <= nb2);
  12222. GGML_ASSERT(nb2 <= nb3);
  12223. if (params->type == GGML_TASK_INIT) {
  12224. return;
  12225. }
  12226. if (params->type == GGML_TASK_FINALIZE) {
  12227. return;
  12228. }
  12229. // parallelize by q rows using ggml_vec_dot_f32
  12230. // total rows in q
  12231. const int nr = neq1*neq2*neq3;
  12232. // rows per thread
  12233. const int dr = (nr + nth - 1)/nth;
  12234. // row range for this thread
  12235. const int ir0 = dr*ith;
  12236. const int ir1 = MIN(ir0 + dr, nr);
  12237. const float scale = 1.0f/sqrtf(D);
  12238. //printf("P=%d N=%d D=%d ir0=%d ir1=%d scale = %f\n", P, N, D, ir0, ir1, scale);
  12239. for (int ir = ir0; ir < ir1; ++ir) {
  12240. // q indices
  12241. const int iq3 = ir/(neq2*neq1);
  12242. const int iq2 = (ir - iq3*neq2*neq1)/neq1;
  12243. const int iq1 = (ir - iq3*neq2*neq1 - iq2*neq1);
  12244. float * S = (float *) params->wdata + ith*(2*Mup + CACHE_LINE_SIZE_F32);
  12245. for (int i = M; i < Mup; ++i) {
  12246. S[i] = -INFINITY;
  12247. }
  12248. if (GGML_VEC_DOT_UNROLL > 2 || nek1 % GGML_VEC_DOT_UNROLL != 0) {
  12249. for (int64_t ic = 0; ic < nek1; ++ic) {
  12250. // k indices
  12251. const int ik3 = iq3;
  12252. const int ik2 = iq2 % nek2;
  12253. const int ik1 = ic;
  12254. // S indices
  12255. const int i1 = ik1;
  12256. ggml_vec_dot_f16(neq0,
  12257. S + i1,
  12258. (ggml_fp16_t *) ((char *) k->data + (ik1*nbk1 + ik2*nbk2 + ik3*nbk3)),
  12259. (ggml_fp16_t *) ((char *) q->data + (iq1*nbq1 + iq2*nbq2 + iq3*nbq3)));
  12260. }
  12261. } else {
  12262. for (int64_t ic = 0; ic < nek1; ic += GGML_VEC_DOT_UNROLL) {
  12263. // k indices
  12264. const int ik3 = iq3;
  12265. const int ik2 = iq2 % nek2;
  12266. const int ik1 = ic;
  12267. // S indices
  12268. const int i1 = ik1;
  12269. ggml_vec_dot_f16_unroll(neq0, nbk1,
  12270. S + i1,
  12271. ((char *) k->data + (ik1*nbk1 + ik2*nbk2 + ik3*nbk3)),
  12272. (ggml_fp16_t *) ((char *) q->data + (iq1*nbq1 + iq2*nbq2 + iq3*nbq3)));
  12273. }
  12274. }
  12275. // scale
  12276. ggml_vec_scale_f32(nek1, S, scale);
  12277. if (masked) {
  12278. for (int64_t i = P; i < M; i++) {
  12279. if (i > P + iq1) {
  12280. S[i] = -INFINITY;
  12281. }
  12282. }
  12283. }
  12284. // softmax
  12285. // todo: exclude known -INF S[..] values from max and loop, assuming their results to be zero.
  12286. // dont forget to set their S values to zero
  12287. {
  12288. float max = -INFINITY;
  12289. ggml_vec_max_f32(M, &max, S);
  12290. ggml_float sum = 0.0;
  12291. {
  12292. #ifdef GGML_SOFT_MAX_ACCELERATE
  12293. max = -max;
  12294. vDSP_vsadd(S, 1, &max, S, 1, Mup);
  12295. vvexpf(S, S, &Mup);
  12296. ggml_vec_sum_f32(Mup, &sum, S);
  12297. #else
  12298. uint16_t scvt[GGML_SOFT_MAX_UNROLL];
  12299. ggml_float sump[GGML_SOFT_MAX_UNROLL] = { 0.0 };
  12300. for (int i = 0; i < Mup; i += GGML_SOFT_MAX_UNROLL) {
  12301. float * SS = S + i;
  12302. for (int j = 0; j < GGML_SOFT_MAX_UNROLL; ++j) {
  12303. if (SS[j] == -INFINITY) {
  12304. SS[j] = 0.0f;
  12305. } else {
  12306. ggml_fp16_t s = GGML_FP32_TO_FP16(SS[j] - max);
  12307. memcpy(&scvt[j], &s, sizeof(uint16_t));
  12308. const float val = GGML_FP16_TO_FP32(table_exp_f16[scvt[j]]);
  12309. sump[j] += (ggml_float)val;
  12310. SS[j] = val;
  12311. }
  12312. }
  12313. }
  12314. for (int i = 0; i < GGML_SOFT_MAX_UNROLL; i++) {
  12315. sum += sump[i];
  12316. }
  12317. #endif
  12318. }
  12319. assert(sum > 0.0);
  12320. sum = 1.0/sum;
  12321. ggml_vec_scale_f32(M, S, sum);
  12322. #ifndef NDEBUG
  12323. for (int i = 0; i < M; ++i) {
  12324. assert(!isnan(S[i]));
  12325. assert(!isinf(S[i]));
  12326. }
  12327. #endif
  12328. }
  12329. ggml_fp16_t * S16 = (ggml_fp16_t *) ((float *) params->wdata + ith*(2*Mup + CACHE_LINE_SIZE_F32) + Mup);
  12330. for (int64_t i = 0; i < M; i++) {
  12331. S16[i] = GGML_FP32_TO_FP16(S[i]);
  12332. }
  12333. // todo: exclude known zero S[..] values from dot (reducing nev0 and increasing begin of v and S16).
  12334. if (GGML_VEC_DOT_UNROLL == 1 || (nev1 % GGML_VEC_DOT_UNROLL != 0)) {
  12335. for (int64_t ic = 0; ic < nev1; ++ic) {
  12336. // dst indices
  12337. const int i1 = iq1;
  12338. const int i2 = iq2;
  12339. const int i3 = iq3;
  12340. // v indices
  12341. const int iv2 = iq2 % nev2;
  12342. const int iv3 = iq3;
  12343. ggml_vec_dot_f16(nev0,
  12344. (float *) ((char *) dst->data + (ic*nb0 + i1*nb1 + i2*nb2 + i3*nb3)),
  12345. (ggml_fp16_t *) ((char *) v->data + ( ic*nbv1 + iv2*nbv2 + iv3*nbv3)),
  12346. S16);
  12347. }
  12348. } else {
  12349. for (int64_t ic = 0; ic < nev1; ic += GGML_VEC_DOT_UNROLL) {
  12350. // dst indices
  12351. const int i1 = iq1;
  12352. const int i2 = iq2;
  12353. const int i3 = iq3;
  12354. // v indices
  12355. const int iv2 = iq2 % nev2;
  12356. const int iv3 = iq3;
  12357. ggml_vec_dot_f16_unroll(nev0, nbv1,
  12358. (float *) ((char *) dst->data + (ic*nb0 + i1*nb1 + i2*nb2 + i3*nb3)),
  12359. ((char *) v->data + ( ic*nbv1 + iv2*nbv2 + iv3*nbv3)),
  12360. S16);
  12361. }
  12362. }
  12363. }
  12364. }
  12365. static void ggml_compute_forward_flash_attn(
  12366. const struct ggml_compute_params * params,
  12367. const struct ggml_tensor * q,
  12368. const struct ggml_tensor * k,
  12369. const struct ggml_tensor * v,
  12370. const bool masked,
  12371. struct ggml_tensor * dst) {
  12372. switch (q->type) {
  12373. case GGML_TYPE_F16:
  12374. {
  12375. ggml_compute_forward_flash_attn_f16(params, q, k, v, masked, dst);
  12376. } break;
  12377. case GGML_TYPE_F32:
  12378. {
  12379. ggml_compute_forward_flash_attn_f32(params, q, k, v, masked, dst);
  12380. } break;
  12381. default:
  12382. {
  12383. GGML_ASSERT(false);
  12384. } break;
  12385. }
  12386. }
  12387. // ggml_compute_forward_flash_ff
  12388. static void ggml_compute_forward_flash_ff_f16(
  12389. const struct ggml_compute_params * params,
  12390. const struct ggml_tensor * a, // F16
  12391. const struct ggml_tensor * b0, // F16 fc_w
  12392. const struct ggml_tensor * b1, // F32 fc_b
  12393. const struct ggml_tensor * c0, // F16 proj_w
  12394. const struct ggml_tensor * c1, // F32 proj_b
  12395. struct ggml_tensor * dst) {
  12396. int64_t t0 = ggml_perf_time_us();
  12397. UNUSED(t0);
  12398. GGML_TENSOR_LOCALS(int64_t, nea, a, ne)
  12399. GGML_TENSOR_LOCALS(size_t, nba, a, nb)
  12400. GGML_TENSOR_LOCALS(int64_t, neb0, b0, ne)
  12401. GGML_TENSOR_LOCALS(size_t, nbb0, b0, nb)
  12402. GGML_TENSOR_LOCALS(int64_t, neb1, b1, ne)
  12403. GGML_TENSOR_LOCALS(size_t, nbb1, b1, nb)
  12404. GGML_TENSOR_LOCALS(int64_t, nec0, c0, ne)
  12405. GGML_TENSOR_LOCALS(size_t, nbc0, c0, nb)
  12406. GGML_TENSOR_LOCALS(int64_t, nec1, c1, ne)
  12407. GGML_TENSOR_LOCALS(size_t, nbc1, c1, nb)
  12408. GGML_TENSOR_LOCALS(int64_t, ne, dst, ne)
  12409. GGML_TENSOR_LOCALS(size_t, nb, dst, nb)
  12410. const int ith = params->ith;
  12411. const int nth = params->nth;
  12412. const int64_t D = nea0;
  12413. //const int64_t N = nea1;
  12414. const int64_t M = neb01;
  12415. GGML_ASSERT(ne0 == nea0);
  12416. GGML_ASSERT(ne1 == nea1);
  12417. GGML_ASSERT(ne2 == nea2);
  12418. GGML_ASSERT(nba0 == sizeof(ggml_fp16_t));
  12419. GGML_ASSERT(nbb00 == sizeof(ggml_fp16_t));
  12420. GGML_ASSERT(nbb10 == sizeof(float));
  12421. GGML_ASSERT(nbc00 == sizeof(ggml_fp16_t));
  12422. GGML_ASSERT(nbc10 == sizeof(float));
  12423. GGML_ASSERT(neb00 == D);
  12424. GGML_ASSERT(neb01 == M);
  12425. GGML_ASSERT(neb10 == M);
  12426. GGML_ASSERT(neb11 == 1);
  12427. GGML_ASSERT(nec00 == M);
  12428. GGML_ASSERT(nec01 == D);
  12429. GGML_ASSERT(nec10 == D);
  12430. GGML_ASSERT(nec11 == 1);
  12431. // dst cannot be transposed or permuted
  12432. GGML_ASSERT(nb0 == sizeof(float));
  12433. GGML_ASSERT(nb0 <= nb1);
  12434. GGML_ASSERT(nb1 <= nb2);
  12435. GGML_ASSERT(nb2 <= nb3);
  12436. if (params->type == GGML_TASK_INIT) {
  12437. return;
  12438. }
  12439. if (params->type == GGML_TASK_FINALIZE) {
  12440. return;
  12441. }
  12442. // parallelize by a rows using ggml_vec_dot_f32
  12443. // total rows in a
  12444. const int nr = nea1*nea2*nea3;
  12445. // rows per thread
  12446. const int dr = (nr + nth - 1)/nth;
  12447. // row range for this thread
  12448. const int ir0 = dr*ith;
  12449. const int ir1 = MIN(ir0 + dr, nr);
  12450. for (int ir = ir0; ir < ir1; ++ir) {
  12451. // a indices
  12452. const int ia3 = ir/(nea2*nea1);
  12453. const int ia2 = (ir - ia3*nea2*nea1)/nea1;
  12454. const int ia1 = (ir - ia3*nea2*nea1 - ia2*nea1);
  12455. float * S = (float *) params->wdata + ith*(2*M + CACHE_LINE_SIZE_F32);
  12456. for (int64_t ic = 0; ic < neb01; ++ic) {
  12457. // b0 indices
  12458. const int ib03 = ia3;
  12459. const int ib02 = ia2;
  12460. const int ib01 = ic;
  12461. // S indices
  12462. const int i1 = ib01;
  12463. ggml_vec_dot_f16(nea0,
  12464. S + i1,
  12465. (ggml_fp16_t *) ((char *) b0->data + (ib01*nbb01 + ib02*nbb02 + ib03*nbb03)),
  12466. (ggml_fp16_t *) ((char *) a->data + ( ia1*nba1 + ia2*nba2 + ia3*nba3)));
  12467. }
  12468. ggml_vec_add_f32(neb01, S, S, (float *) b1->data);
  12469. //ggml_vec_gelu_f32(neb01, S, S);
  12470. ggml_fp16_t * S16 = (ggml_fp16_t *) ((float *) params->wdata + ith*(2*M + CACHE_LINE_SIZE_F32) + M);
  12471. for (int64_t i = 0; i < M; i++) {
  12472. S16[i] = GGML_FP32_TO_FP16(S[i]);
  12473. }
  12474. ggml_vec_gelu_f16(neb01, S16, S16);
  12475. {
  12476. // dst indices
  12477. const int i1 = ia1;
  12478. const int i2 = ia2;
  12479. const int i3 = ia3;
  12480. for (int64_t ic = 0; ic < nec01; ++ic) {
  12481. ggml_vec_dot_f16(neb01,
  12482. (float *) ((char *) dst->data + (ic*nb0 + i1*nb1 + i2*nb2 + i3*nb3)),
  12483. (ggml_fp16_t *) ((char *) c0->data + ( ic*nbc01 + i2*nbc02 + i3*nbc03)),
  12484. S16);
  12485. }
  12486. ggml_vec_add_f32(nec01,
  12487. (float *) ((char *) dst->data + (i1*nb1 + i2*nb2 + i3*nb3)),
  12488. (float *) ((char *) dst->data + (i1*nb1 + i2*nb2 + i3*nb3)),
  12489. (float *) c1->data);
  12490. }
  12491. }
  12492. }
  12493. static void ggml_compute_forward_flash_ff(
  12494. const struct ggml_compute_params * params,
  12495. const struct ggml_tensor * a,
  12496. const struct ggml_tensor * b0,
  12497. const struct ggml_tensor * b1,
  12498. const struct ggml_tensor * c0,
  12499. const struct ggml_tensor * c1,
  12500. struct ggml_tensor * dst) {
  12501. switch (b0->type) {
  12502. case GGML_TYPE_F16:
  12503. {
  12504. ggml_compute_forward_flash_ff_f16(params, a, b0, b1, c0, c1, dst);
  12505. } break;
  12506. case GGML_TYPE_F32:
  12507. {
  12508. GGML_ASSERT(false); // TODO
  12509. } break;
  12510. default:
  12511. {
  12512. GGML_ASSERT(false);
  12513. } break;
  12514. }
  12515. }
  12516. // ggml_compute_forward_flash_attn_back
  12517. static void ggml_compute_forward_flash_attn_back_f32(
  12518. const struct ggml_compute_params * params,
  12519. const struct ggml_tensor * q,
  12520. const struct ggml_tensor * k,
  12521. const struct ggml_tensor * v,
  12522. const struct ggml_tensor * d,
  12523. const bool masked,
  12524. struct ggml_tensor * dst) {
  12525. int64_t t0 = ggml_perf_time_us();
  12526. UNUSED(t0);
  12527. GGML_TENSOR_LOCALS(int64_t, neq, q, ne)
  12528. GGML_TENSOR_LOCALS(size_t, nbq, q, nb)
  12529. GGML_TENSOR_LOCALS(int64_t, nek, k, ne)
  12530. GGML_TENSOR_LOCALS(size_t, nbk, k, nb)
  12531. GGML_TENSOR_LOCALS(int64_t, nev, v, ne)
  12532. GGML_TENSOR_LOCALS(size_t, nbv, v, nb)
  12533. GGML_TENSOR_LOCALS(int64_t, ned, d, ne)
  12534. GGML_TENSOR_LOCALS(size_t, nbd, d, nb)
  12535. GGML_TENSOR_LOCALS(int64_t, ne, dst, ne)
  12536. GGML_TENSOR_LOCALS(size_t, nb, dst, nb)
  12537. const int ith = params->ith;
  12538. const int nth = params->nth;
  12539. const int64_t D = neq0;
  12540. const int64_t N = neq1;
  12541. const int64_t P = nek1 - N;
  12542. const int64_t M = P + N;
  12543. const int Mup = ggml_up(M, GGML_SOFT_MAX_UNROLL);
  12544. const int mxDM = MAX(D, Mup);
  12545. // GGML_ASSERT(ne0 == D);
  12546. // GGML_ASSERT(ne1 == N);
  12547. GGML_ASSERT(P >= 0);
  12548. GGML_ASSERT(nbq0 == sizeof(float));
  12549. GGML_ASSERT(nbk0 == sizeof(float));
  12550. GGML_ASSERT(nbv0 == sizeof(float));
  12551. GGML_ASSERT(neq0 == D);
  12552. GGML_ASSERT(nek0 == D);
  12553. GGML_ASSERT(nev1 == D);
  12554. GGML_ASSERT(ned0 == D);
  12555. GGML_ASSERT(neq1 == N);
  12556. GGML_ASSERT(nek1 == N + P);
  12557. GGML_ASSERT(nev1 == D);
  12558. GGML_ASSERT(ned1 == N);
  12559. // dst cannot be transposed or permuted
  12560. GGML_ASSERT(nb0 == sizeof(float));
  12561. GGML_ASSERT(nb0 <= nb1);
  12562. GGML_ASSERT(nb1 <= nb2);
  12563. GGML_ASSERT(nb2 <= nb3);
  12564. if (params->type == GGML_TASK_INIT) {
  12565. if (ith == 0) {
  12566. memset(dst->data, 0, nb0*ne0*ne1*ne2*ne3);
  12567. }
  12568. return;
  12569. }
  12570. if (params->type == GGML_TASK_FINALIZE) {
  12571. return;
  12572. }
  12573. const int64_t elem_q = ggml_nelements(q);
  12574. const int64_t elem_k = ggml_nelements(k);
  12575. enum ggml_type result_type = dst->type;
  12576. GGML_ASSERT(ggml_blck_size(result_type) == 1);
  12577. const size_t tsize = ggml_type_size(result_type);
  12578. const size_t offs_q = 0;
  12579. const size_t offs_k = offs_q + GGML_PAD(elem_q * tsize, GGML_MEM_ALIGN);
  12580. const size_t offs_v = offs_k + GGML_PAD(elem_k * tsize, GGML_MEM_ALIGN);
  12581. void * grad_q = (char *) dst->data;
  12582. void * grad_k = (char *) dst->data + offs_k;
  12583. void * grad_v = (char *) dst->data + offs_v;
  12584. const size_t nbgq1 = nb0*neq0;
  12585. const size_t nbgq2 = nb0*neq0*neq1;
  12586. const size_t nbgq3 = nb0*neq0*neq1*neq2;
  12587. const size_t nbgk1 = nb0*nek0;
  12588. const size_t nbgk2 = nb0*nek0*nek1;
  12589. const size_t nbgk3 = nb0*nek0*nek1*neq2;
  12590. const size_t nbgv1 = nb0*nev0;
  12591. const size_t nbgv2 = nb0*nev0*nev1;
  12592. const size_t nbgv3 = nb0*nev0*nev1*neq2;
  12593. // parallelize by k rows using ggml_vec_dot_f32
  12594. // total rows in k
  12595. const int nr = nek2*nek3;
  12596. // rows per thread
  12597. const int dr = (nr + nth - 1)/nth;
  12598. // row range for this thread
  12599. const int ir0 = dr*ith;
  12600. const int ir1 = MIN(ir0 + dr, nr);
  12601. const float scale = 1.0f/sqrtf(D);
  12602. //printf("P=%d N=%d D=%d ir0=%d ir1=%d scale = %f\n", P, N, D, ir0, ir1, scale);
  12603. // how often k2 (and v2) is repeated in q2
  12604. int nrep = neq2/nek2;
  12605. for (int ir = ir0; ir < ir1; ++ir) {
  12606. // q indices
  12607. const int ik3 = ir/(nek2);
  12608. const int ik2 = ir - ik3*nek2;
  12609. const int iq3 = ik3;
  12610. const int id3 = ik3;
  12611. const int iv3 = ik3;
  12612. const int iv2 = ik2;
  12613. for (int irep = 0; irep < nrep; ++irep) {
  12614. const int iq2 = ik2 + irep*nek2;
  12615. const int id2 = iq2;
  12616. // (ik2 + irep*nek2) % nek2 == ik2
  12617. for (int iq1 = 0; iq1 < neq1; ++iq1) {
  12618. const int id1 = iq1;
  12619. // not sure about CACHE_LINE_SIZE_F32..
  12620. // - maybe it must not be multiplied by 2 and excluded from .. in SM 1*(..) offset?
  12621. float * S = (float *) params->wdata + ith*2*(mxDM + CACHE_LINE_SIZE_F32) + 0*(mxDM+CACHE_LINE_SIZE_F32);
  12622. float * SM = (float *) params->wdata + ith*2*(mxDM + CACHE_LINE_SIZE_F32) + 1*(mxDM+CACHE_LINE_SIZE_F32);
  12623. for (int i = M; i < Mup; ++i) {
  12624. S[i] = -INFINITY;
  12625. }
  12626. const int64_t masked_begin = masked ? (P + iq1 + 1) : M;
  12627. for (int64_t ic = 0; ic < masked_begin; ++ic) {
  12628. // k indices
  12629. const int ik1 = ic;
  12630. // S indices
  12631. const int i1 = ik1;
  12632. ggml_vec_dot_f32(neq0,
  12633. S + i1,
  12634. (float *) ((char *) k->data + (ik1*nbk1 + ik2*nbk2 + ik3*nbk3)),
  12635. (float *) ((char *) q->data + (iq1*nbq1 + iq2*nbq2 + iq3*nbq3)));
  12636. }
  12637. // scale
  12638. ggml_vec_scale_f32(masked_begin, S, scale);
  12639. for (int64_t i = masked_begin; i < M; i++) {
  12640. S[i] = -INFINITY;
  12641. }
  12642. // softmax
  12643. // exclude known -INF S[..] values from max and loop
  12644. // dont forget to set their SM values to zero
  12645. {
  12646. float max = -INFINITY;
  12647. ggml_vec_max_f32(masked_begin, &max, S);
  12648. ggml_float sum = 0.0;
  12649. {
  12650. #ifdef GGML_SOFT_MAX_ACCELERATE
  12651. max = -max;
  12652. vDSP_vsadd(SM, 1, &max, SM, 1, Mup);
  12653. vvexpf(SM, SM, &Mup);
  12654. ggml_vec_sum_f32(Mup, &sum, SM);
  12655. #else
  12656. uint16_t scvt[GGML_SOFT_MAX_UNROLL]; UNUSED(scvt);
  12657. ggml_float sump[GGML_SOFT_MAX_UNROLL] = { 0.0 };
  12658. for (int i = 0; i < Mup; i += GGML_SOFT_MAX_UNROLL) {
  12659. if (i >= masked_begin) {
  12660. break;
  12661. }
  12662. float * SR = S + i;
  12663. float * SW = SM + i;
  12664. for (int j = 0; j < GGML_SOFT_MAX_UNROLL; ++j) {
  12665. if (i + j >= masked_begin) {
  12666. break;
  12667. } else if (SR[j] == -INFINITY) {
  12668. SW[j] = 0.0f;
  12669. } else {
  12670. #ifndef GGML_FLASH_ATTN_EXP_FP16
  12671. const float val = expf(SR[j] - max);
  12672. #else
  12673. ggml_fp16_t s = GGML_FP32_TO_FP16(SR[j] - max);
  12674. memcpy(&scvt[j], &s, sizeof(uint16_t));
  12675. const float val = GGML_FP16_TO_FP32(table_exp_f16[scvt[j]]);
  12676. #endif
  12677. sump[j] += (ggml_float)val;
  12678. SW[j] = val;
  12679. }
  12680. }
  12681. }
  12682. for (int i = 0; i < GGML_SOFT_MAX_UNROLL; i++) {
  12683. sum += sump[i];
  12684. }
  12685. #endif
  12686. }
  12687. assert(sum > 0.0);
  12688. sum = 1.0/sum;
  12689. ggml_vec_scale_f32(masked_begin, SM, sum);
  12690. }
  12691. // step-by-step explanation
  12692. {
  12693. // forward-process shape grads from backward process
  12694. // parallel_for ik2,ik3:
  12695. // for irep:
  12696. // iq2 = ik2 + irep*nek2
  12697. // k[:D,:M,:,:] [D,M,:,:] grad[k][:D,:M,ik2,ik3] += grad[kcur]
  12698. // q[:D,:N,:,:] [D,N,:,:] grad[q][:D,iq1,iq2,iq3] += grad[qcur]
  12699. // v[:M,:D,:,:] [M,D,:,:] grad[v][:M,:D,iv2,iv3] += grad[vcur]
  12700. // for iq1:
  12701. // kcur = k[:D,:M,ik2,ik3] [D,M,1,1] grad[kcur] = grad[S1].T @ qcur
  12702. // qcur = q[:D,iq1,iq2,iq3] [D,1,1,1] grad[qcur] = grad[S1] @ kcur
  12703. // vcur = v[:M,:D,iv2,iv3] [M,D,1,1] grad[vcur] = grad[S5].T @ S4
  12704. // S0 = -Inf [D,1,1,1]
  12705. // ~S1[i] = dot(kcur[:D,i], qcur)
  12706. // S1 = qcur @ kcur.T [M,1,1,1] grad[S1] = grad[S2] * scale
  12707. // S2 = S1 * scale [M,1,1,1] grad[S2] = diag_mask_zero(grad[S3], P)
  12708. // S3 = diag_mask_inf(S2, P) [M,1,1,1] grad[S3] = S4 * (grad[S4] - dot(S4, grad[S4]))
  12709. // S4 = softmax(S3) [M,1,1,1] grad[S4] = grad[S5] @ vcur
  12710. // ~S5[i] = dot(vcur[:,i], S4)
  12711. // S5 = S4 @ vcur.T [D,1,1,1] grad[S5] = d[:D,id1,id2,id3]
  12712. // ~dst[i,iq1,iq2,iq3] = S5[i] ^
  12713. // dst[:D,iq1,iq2,iq3] = S5 | grad[dst[:D,iq1,iq2,iq3]] = d[:D,id1,id2,id3]
  12714. // dst backward-/ grad[dst] = d
  12715. //
  12716. // output gradients with their dependencies:
  12717. //
  12718. // grad[kcur] = grad[S1].T @ qcur
  12719. // grad[S1] = diag_mask_zero(grad[S3], P) * scale
  12720. // grad[S3] = S4 * (grad[S4] - dot(S4, grad[S4]))
  12721. // grad[S4] = grad[S5] @ vcur
  12722. // grad[S4] = d[:D,id1,id2,id3] @ vcur
  12723. // grad[qcur] = grad[S1] @ kcur
  12724. // grad[vcur] = grad[S5].T @ S4
  12725. // grad[vcur] = d[:D,id1,id2,id3].T @ S4
  12726. //
  12727. // in post-order:
  12728. //
  12729. // S1 = qcur @ kcur.T
  12730. // S2 = S1 * scale
  12731. // S3 = diag_mask_inf(S2, P)
  12732. // S4 = softmax(S3)
  12733. // grad[S4] = d[:D,id1,id2,id3] @ vcur
  12734. // grad[S3] = S4 * (grad[S4] - dot(S4, grad[S4]))
  12735. // grad[S1] = diag_mask_zero(grad[S3], P) * scale
  12736. // grad[qcur] = grad[S1] @ kcur
  12737. // grad[kcur] = grad[S1].T @ qcur
  12738. // grad[vcur] = d[:D,id1,id2,id3].T @ S4
  12739. //
  12740. // using less variables (SM=S4):
  12741. //
  12742. // S = diag_mask_inf(qcur @ kcur.T * scale, P)
  12743. // SM = softmax(S)
  12744. // S = d[:D,iq1,iq2,iq3] @ vcur
  12745. // dot_SM_gradSM = dot(SM, S)
  12746. // S = SM * (S - dot(SM, S))
  12747. // S = diag_mask_zero(S, P) * scale
  12748. //
  12749. // grad[q][:D,iq1,iq2,iq3] += S @ kcur
  12750. // grad[k][:D,:M,ik2,ik3] += S.T @ qcur
  12751. // grad[v][:M,:D,iv2,iv3] += d[:D,id1,id2,id3].T @ SM
  12752. }
  12753. // S = gradSM = d[:D,id1,id2,id3] @ vcur[:,:,iv2,iv3]
  12754. // S = d[:D,id1,id2,id3] @ vcur[:,:,iv2,iv3]
  12755. // for ic:
  12756. // S[:M] += vcur[:M,ic,iv2,iv3] * d[ic,id1,id2,id3]
  12757. // exclude known future zero S[..] values from operation
  12758. ggml_vec_set_f32(masked_begin, S, 0);
  12759. for (int64_t ic = 0; ic < D; ++ic) {
  12760. ggml_vec_mad_f32(masked_begin,
  12761. S,
  12762. (float *) ((char *) v->data + ( ic*nbv1 + iv2*nbv2 + iv3*nbv3)),
  12763. *(float *) ((char *) d->data + (ic*nbd0 + id1*nbd1 + id2*nbd2 + id3*nbd3)));
  12764. }
  12765. // S = SM * (S - dot(SM, S))
  12766. float dot_SM_gradSM = 0;
  12767. ggml_vec_dot_f32 (masked_begin, &dot_SM_gradSM, SM, S);
  12768. ggml_vec_acc1_f32(M, S, -dot_SM_gradSM);
  12769. ggml_vec_mul_f32 (masked_begin, S, S, SM);
  12770. // S = diag_mask_zero(S, P) * scale
  12771. // already done by above ggml_vec_set_f32
  12772. // exclude known zero S[..] values from operation
  12773. ggml_vec_scale_f32(masked_begin, S, scale);
  12774. // S shape [M,1]
  12775. // SM shape [M,1]
  12776. // kcur shape [D,M]
  12777. // qcur shape [D,1]
  12778. // vcur shape [M,D]
  12779. // grad[q][:D,iq1,iq2,iq3] += S @ kcur
  12780. // grad[q][:D,iq1,iq2,iq3] += shape[M,1] @ shape[D,M]
  12781. // for ic:
  12782. // grad[q][:D,iq1,iq2,iq3] += S[ic] * kcur[:D,ic,ik2,ik3]
  12783. // exclude known zero S[..] values from loop
  12784. for (int64_t ic = 0; ic < masked_begin; ++ic) {
  12785. ggml_vec_mad_f32(D,
  12786. (float *) ((char *) grad_q + (iq1*nbgq1 + iq2*nbgq2 + iq3*nbgq3)),
  12787. (float *) ((char *) k->data + (ic*nbk1 + ik2*nbk2 + ik3*nbk3)),
  12788. S[ic]);
  12789. }
  12790. // grad[k][:D,:M,iq2,iq3] += S.T @ qcur
  12791. // for ic:
  12792. // grad[k][:D,ic,iq2,iq3] += S.T[0,ic] * qcur[:D,0]
  12793. // grad[k][:D,ic,iq2,iq3] += S[ic] * qcur[:D,0]
  12794. // exclude known zero S[..] values from loop
  12795. for (int64_t ic = 0; ic < masked_begin; ++ic) {
  12796. ggml_vec_mad_f32(D,
  12797. (float *) ((char *) grad_k + (ic*nbgk1 + ik2*nbgk2 + ik3*nbgk3)),
  12798. (float *) ((char *) q->data + (iq1*nbq1 + iq2*nbq2 + iq3*nbq3)),
  12799. S[ic]);
  12800. }
  12801. // grad[v][:M,:D,iv2,iv3] += d[:D,id1,id2,id3].T @ SM
  12802. // for ic:
  12803. // grad[v][:M,ic,iv2,iv3] += d[:D,id1,id2,id3].T[0,ic] * SM[:M]
  12804. // grad[v][:M,ic,iv2,iv3] += d[ic,id1,id2,id3] * SM[:M]
  12805. // exclude known zero SM[..] values from mad
  12806. for (int64_t ic = 0; ic < D; ++ic) {
  12807. ggml_vec_mad_f32(masked_begin,
  12808. (float *) ((char *) grad_v + ( ic*nbgv1 + iv2*nbgv2 + iv3*nbgv3)),
  12809. SM,
  12810. *(float *) ((char *) d->data + (ic*nbd0 + id1*nbd1 + id2*nbd2 + id3*nbd3)));
  12811. }
  12812. }
  12813. }
  12814. }
  12815. }
  12816. static void ggml_compute_forward_flash_attn_back(
  12817. const struct ggml_compute_params * params,
  12818. const struct ggml_tensor * q,
  12819. const struct ggml_tensor * k,
  12820. const struct ggml_tensor * v,
  12821. const struct ggml_tensor * d,
  12822. const bool masked,
  12823. struct ggml_tensor * dst) {
  12824. switch (q->type) {
  12825. case GGML_TYPE_F32:
  12826. {
  12827. ggml_compute_forward_flash_attn_back_f32(params, q, k, v, d, masked, dst);
  12828. } break;
  12829. default:
  12830. {
  12831. GGML_ASSERT(false);
  12832. } break;
  12833. }
  12834. }
  12835. // ggml_compute_forward_win_part
  12836. static void ggml_compute_forward_win_part_f32(
  12837. const struct ggml_compute_params * params,
  12838. const struct ggml_tensor * src0,
  12839. struct ggml_tensor * dst) {
  12840. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  12841. return;
  12842. }
  12843. GGML_TENSOR_LOCALS(int64_t, ne0, src0, ne)
  12844. GGML_TENSOR_LOCALS(int64_t, ne, dst, ne)
  12845. const int32_t nep0 = ((const int32_t *)(dst->op_params))[0];
  12846. const int32_t nep1 = ((const int32_t *)(dst->op_params))[1];
  12847. const int32_t w = ((const int32_t *)(dst->op_params))[2];
  12848. assert(ne00 == ne0);
  12849. assert(ne3 == nep0*nep1);
  12850. // TODO: optimize / multi-thread
  12851. for (int py = 0; py < nep1; ++py) {
  12852. for (int px = 0; px < nep0; ++px) {
  12853. const int64_t i3 = py*nep0 + px;
  12854. for (int64_t i2 = 0; i2 < ne2; ++i2) {
  12855. for (int64_t i1 = 0; i1 < ne1; ++i1) {
  12856. for (int64_t i0 = 0; i0 < ne0; ++i0) {
  12857. const int64_t i02 = py*w + i2;
  12858. const int64_t i01 = px*w + i1;
  12859. const int64_t i00 = i0;
  12860. const int64_t i = i3*ne2*ne1*ne0 + i2*ne1*ne0 + i1*ne0 + i0;
  12861. const int64_t j = i02*ne01*ne00 + i01*ne00 + i00;
  12862. if (py*w + i2 >= ne02 || px*w + i1 >= ne01) {
  12863. ((float *) dst->data)[i] = 0.0f;
  12864. } else {
  12865. ((float *) dst->data)[i] = ((float *) src0->data)[j];
  12866. }
  12867. }
  12868. }
  12869. }
  12870. }
  12871. }
  12872. }
  12873. static void ggml_compute_forward_win_part(
  12874. const struct ggml_compute_params * params,
  12875. const struct ggml_tensor * src0,
  12876. struct ggml_tensor * dst) {
  12877. switch (src0->type) {
  12878. case GGML_TYPE_F32:
  12879. {
  12880. ggml_compute_forward_win_part_f32(params, src0, dst);
  12881. } break;
  12882. default:
  12883. {
  12884. GGML_ASSERT(false);
  12885. } break;
  12886. }
  12887. }
  12888. // ggml_compute_forward_win_unpart
  12889. static void ggml_compute_forward_win_unpart_f32(
  12890. const struct ggml_compute_params * params,
  12891. const struct ggml_tensor * src0,
  12892. struct ggml_tensor * dst) {
  12893. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  12894. return;
  12895. }
  12896. GGML_TENSOR_LOCALS(int64_t, ne0, src0, ne)
  12897. GGML_TENSOR_LOCALS(int64_t, ne, dst, ne)
  12898. const int32_t w = ((const int32_t *)(dst->op_params))[0];
  12899. // padding
  12900. const int px = (w - ne1%w)%w;
  12901. //const int py = (w - ne2%w)%w;
  12902. const int npx = (px + ne1)/w;
  12903. //const int npy = (py + ne2)/w;
  12904. assert(ne0 == ne00);
  12905. // TODO: optimize / multi-thread
  12906. for (int64_t i2 = 0; i2 < ne2; ++i2) {
  12907. for (int64_t i1 = 0; i1 < ne1; ++i1) {
  12908. for (int64_t i0 = 0; i0 < ne0; ++i0) {
  12909. const int ip2 = i2/w;
  12910. const int ip1 = i1/w;
  12911. const int64_t i02 = i2%w;
  12912. const int64_t i01 = i1%w;
  12913. const int64_t i00 = i0;
  12914. const int64_t i = (ip2*npx + ip1)*ne02*ne01*ne00 + i02*ne01*ne00 + i01*ne00 + i00;
  12915. const int64_t j = i2*ne1*ne0 + i1*ne0 + i0;
  12916. ((float *) dst->data)[j] = ((float *) src0->data)[i];
  12917. }
  12918. }
  12919. }
  12920. }
  12921. static void ggml_compute_forward_win_unpart(
  12922. const struct ggml_compute_params * params,
  12923. const struct ggml_tensor * src0,
  12924. struct ggml_tensor * dst) {
  12925. switch (src0->type) {
  12926. case GGML_TYPE_F32:
  12927. {
  12928. ggml_compute_forward_win_unpart_f32(params, src0, dst);
  12929. } break;
  12930. default:
  12931. {
  12932. GGML_ASSERT(false);
  12933. } break;
  12934. }
  12935. }
  12936. //gmml_compute_forward_unary
  12937. static void ggml_compute_forward_unary(
  12938. const struct ggml_compute_params * params,
  12939. const struct ggml_tensor * src0,
  12940. struct ggml_tensor * dst) {
  12941. const enum ggml_unary_op op = ggml_get_unary_op(dst);
  12942. switch (op) {
  12943. case GGML_UNARY_OP_ABS:
  12944. {
  12945. ggml_compute_forward_abs(params, src0, dst);
  12946. } break;
  12947. case GGML_UNARY_OP_SGN:
  12948. {
  12949. ggml_compute_forward_sgn(params, src0, dst);
  12950. } break;
  12951. case GGML_UNARY_OP_NEG:
  12952. {
  12953. ggml_compute_forward_neg(params, src0, dst);
  12954. } break;
  12955. case GGML_UNARY_OP_STEP:
  12956. {
  12957. ggml_compute_forward_step(params, src0, dst);
  12958. } break;
  12959. case GGML_UNARY_OP_TANH:
  12960. {
  12961. ggml_compute_forward_tanh(params, src0, dst);
  12962. } break;
  12963. case GGML_UNARY_OP_ELU:
  12964. {
  12965. ggml_compute_forward_elu(params, src0, dst);
  12966. } break;
  12967. case GGML_UNARY_OP_RELU:
  12968. {
  12969. ggml_compute_forward_relu(params, src0, dst);
  12970. } break;
  12971. case GGML_UNARY_OP_GELU:
  12972. {
  12973. ggml_compute_forward_gelu(params, src0, dst);
  12974. } break;
  12975. case GGML_UNARY_OP_GELU_QUICK:
  12976. {
  12977. ggml_compute_forward_gelu_quick(params, src0, dst);
  12978. } break;
  12979. case GGML_UNARY_OP_SILU:
  12980. {
  12981. ggml_compute_forward_silu(params, src0, dst);
  12982. } break;
  12983. default:
  12984. {
  12985. GGML_ASSERT(false);
  12986. } break;
  12987. }
  12988. }
  12989. // ggml_compute_forward_get_rel_pos
  12990. static void ggml_compute_forward_get_rel_pos_f16(
  12991. const struct ggml_compute_params * params,
  12992. const struct ggml_tensor * src0,
  12993. struct ggml_tensor * dst) {
  12994. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  12995. return;
  12996. }
  12997. // ref: https://github.com/facebookresearch/segment-anything/blob/main/segment_anything/modeling/image_encoder.py#L292-L322
  12998. GGML_TENSOR_UNARY_OP_LOCALS
  12999. const int64_t w = ne1;
  13000. ggml_fp16_t * src0_data = (ggml_fp16_t *) src0->data;
  13001. ggml_fp16_t * dst_data = (ggml_fp16_t *) dst->data;
  13002. for (int64_t i2 = 0; i2 < ne2; ++i2) {
  13003. for (int64_t i1 = 0; i1 < ne1; ++i1) {
  13004. const int64_t pos = (w - i1 - 1) + i2;
  13005. for (int64_t i0 = 0; i0 < ne0; ++i0) {
  13006. dst_data[i2*ne1*ne0 + i1*ne0 + i0] = src0_data[pos*ne00 + i0];
  13007. }
  13008. }
  13009. }
  13010. }
  13011. static void ggml_compute_forward_get_rel_pos(
  13012. const struct ggml_compute_params * params,
  13013. const struct ggml_tensor * src0,
  13014. struct ggml_tensor * dst) {
  13015. switch (src0->type) {
  13016. case GGML_TYPE_F16:
  13017. {
  13018. ggml_compute_forward_get_rel_pos_f16(params, src0, dst);
  13019. } break;
  13020. default:
  13021. {
  13022. GGML_ASSERT(false);
  13023. } break;
  13024. }
  13025. }
  13026. // ggml_compute_forward_add_rel_pos
  13027. static void ggml_compute_forward_add_rel_pos_f32(
  13028. const struct ggml_compute_params * params,
  13029. const struct ggml_tensor * src0,
  13030. const struct ggml_tensor * src1,
  13031. const struct ggml_tensor * src2,
  13032. struct ggml_tensor * dst) {
  13033. const bool inplace = (bool) ((int32_t *) dst->op_params)[0];
  13034. if (!inplace && params->type == GGML_TASK_INIT) {
  13035. memcpy((char *) dst->data, (char *) src0->data, ggml_nbytes(dst));
  13036. return;
  13037. }
  13038. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  13039. return;
  13040. }
  13041. int64_t t0 = ggml_perf_time_us();
  13042. UNUSED(t0);
  13043. // ref: https://github.com/facebookresearch/segment-anything/blob/main/segment_anything/modeling/image_encoder.py#L357-L359
  13044. float * src1_data = (float *) src1->data;
  13045. float * src2_data = (float *) src2->data;
  13046. float * dst_data = (float *) dst->data;
  13047. const int64_t ne10 = src1->ne[0];
  13048. const int64_t ne11 = src1->ne[1];
  13049. const int64_t ne12 = src1->ne[2];
  13050. const int64_t ne13 = src1->ne[3];
  13051. const int ith = params->ith;
  13052. const int nth = params->nth;
  13053. // total patches in dst
  13054. const int np = ne13;
  13055. // patches per thread
  13056. const int dp = (np + nth - 1)/nth;
  13057. // patch range for this thread
  13058. const int ip0 = dp*ith;
  13059. const int ip1 = MIN(ip0 + dp, np);
  13060. for (int64_t i13 = ip0; i13 < ip1; ++i13) {
  13061. for (int64_t i12 = 0; i12 < ne12; ++i12) {
  13062. for (int64_t i11 = 0; i11 < ne11; ++i11) {
  13063. const int64_t jp1 = i13*ne12*ne11*ne10 + i12*ne11*ne10 + i11*ne10;
  13064. for (int64_t i10 = 0; i10 < ne10; ++i10) {
  13065. const int64_t jp0 = jp1 + i10;
  13066. const float src1_e = src1_data[jp0];
  13067. const float src2_e = src2_data[jp0];
  13068. const int64_t jdh = jp0 * ne10;
  13069. const int64_t jdw = jdh - (ne10 - 1) * i10;
  13070. for (int64_t j = 0; j < ne10; ++j) {
  13071. dst_data[jdh + j ] += src2_e;
  13072. dst_data[jdw + j*ne10] += src1_e;
  13073. }
  13074. }
  13075. }
  13076. }
  13077. }
  13078. }
  13079. static void ggml_compute_forward_add_rel_pos(
  13080. const struct ggml_compute_params * params,
  13081. const struct ggml_tensor * src0,
  13082. const struct ggml_tensor * src1,
  13083. const struct ggml_tensor * src2,
  13084. struct ggml_tensor * dst) {
  13085. switch (src0->type) {
  13086. case GGML_TYPE_F32:
  13087. {
  13088. ggml_compute_forward_add_rel_pos_f32(params, src0, src1, src2, dst);
  13089. } break;
  13090. default:
  13091. {
  13092. GGML_ASSERT(false);
  13093. } break;
  13094. }
  13095. }
  13096. // ggml_compute_forward_map_unary
  13097. static void ggml_compute_forward_map_unary_f32(
  13098. const struct ggml_compute_params * params,
  13099. const struct ggml_tensor * src0,
  13100. struct ggml_tensor * dst,
  13101. const ggml_unary_op_f32_t fun) {
  13102. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  13103. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  13104. return;
  13105. }
  13106. const int n = ggml_nrows(src0);
  13107. const int nc = src0->ne[0];
  13108. assert( dst->nb[0] == sizeof(float));
  13109. assert(src0->nb[0] == sizeof(float));
  13110. for (int i = 0; i < n; i++) {
  13111. fun(nc,
  13112. (float *) ((char *) dst->data + i*( dst->nb[1])),
  13113. (float *) ((char *) src0->data + i*(src0->nb[1])));
  13114. }
  13115. }
  13116. static void ggml_compute_forward_map_unary(
  13117. const struct ggml_compute_params * params,
  13118. const struct ggml_tensor * src0,
  13119. struct ggml_tensor * dst,
  13120. const ggml_unary_op_f32_t fun) {
  13121. switch (src0->type) {
  13122. case GGML_TYPE_F32:
  13123. {
  13124. ggml_compute_forward_map_unary_f32(params, src0, dst, fun);
  13125. } break;
  13126. default:
  13127. {
  13128. GGML_ASSERT(false);
  13129. } break;
  13130. }
  13131. }
  13132. // ggml_compute_forward_map_binary
  13133. static void ggml_compute_forward_map_binary_f32(
  13134. const struct ggml_compute_params * params,
  13135. const struct ggml_tensor * src0,
  13136. const struct ggml_tensor * src1,
  13137. struct ggml_tensor * dst,
  13138. const ggml_binary_op_f32_t fun) {
  13139. assert(params->ith == 0);
  13140. assert(ggml_are_same_shape(src0, src1) && ggml_are_same_shape(src0, dst));
  13141. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  13142. return;
  13143. }
  13144. const int n = ggml_nrows(src0);
  13145. const int nc = src0->ne[0];
  13146. assert( dst->nb[0] == sizeof(float));
  13147. assert(src0->nb[0] == sizeof(float));
  13148. assert(src1->nb[0] == sizeof(float));
  13149. for (int i = 0; i < n; i++) {
  13150. fun(nc,
  13151. (float *) ((char *) dst->data + i*( dst->nb[1])),
  13152. (float *) ((char *) src0->data + i*(src0->nb[1])),
  13153. (float *) ((char *) src1->data + i*(src1->nb[1])));
  13154. }
  13155. }
  13156. static void ggml_compute_forward_map_binary(
  13157. const struct ggml_compute_params * params,
  13158. const struct ggml_tensor * src0,
  13159. const struct ggml_tensor * src1,
  13160. struct ggml_tensor * dst,
  13161. const ggml_binary_op_f32_t fun) {
  13162. switch (src0->type) {
  13163. case GGML_TYPE_F32:
  13164. {
  13165. ggml_compute_forward_map_binary_f32(params, src0, src1, dst, fun);
  13166. } break;
  13167. default:
  13168. {
  13169. GGML_ASSERT(false);
  13170. } break;
  13171. }
  13172. }
  13173. // ggml_compute_forward_map_custom1
  13174. static void ggml_compute_forward_map_custom1_f32(
  13175. const struct ggml_compute_params * params,
  13176. const struct ggml_tensor * a,
  13177. struct ggml_tensor * dst,
  13178. const ggml_custom1_op_f32_t fun) {
  13179. assert(params->ith == 0);
  13180. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  13181. return;
  13182. }
  13183. fun(dst, a);
  13184. }
  13185. // ggml_compute_forward_map_custom2
  13186. static void ggml_compute_forward_map_custom2_f32(
  13187. const struct ggml_compute_params * params,
  13188. const struct ggml_tensor * a,
  13189. const struct ggml_tensor * b,
  13190. struct ggml_tensor * dst,
  13191. const ggml_custom2_op_f32_t fun) {
  13192. assert(params->ith == 0);
  13193. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  13194. return;
  13195. }
  13196. fun(dst, a, b);
  13197. }
  13198. // ggml_compute_forward_map_custom3
  13199. static void ggml_compute_forward_map_custom3_f32(
  13200. const struct ggml_compute_params * params,
  13201. const struct ggml_tensor * a,
  13202. const struct ggml_tensor * b,
  13203. const struct ggml_tensor * c,
  13204. struct ggml_tensor * dst,
  13205. const ggml_custom3_op_f32_t fun) {
  13206. assert(params->ith == 0);
  13207. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  13208. return;
  13209. }
  13210. fun(dst, a, b, c);
  13211. }
  13212. // ggml_compute_forward_map_custom1
  13213. static void ggml_compute_forward_map_custom1(
  13214. const struct ggml_compute_params * params,
  13215. const struct ggml_tensor * a,
  13216. struct ggml_tensor * dst) {
  13217. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  13218. return;
  13219. }
  13220. struct ggml_map_custom1_op_params * p = (struct ggml_map_custom1_op_params *) dst->op_params;
  13221. p->fun(dst, a, params->ith, params->nth, p->userdata);
  13222. }
  13223. // ggml_compute_forward_map_custom2
  13224. static void ggml_compute_forward_map_custom2(
  13225. const struct ggml_compute_params * params,
  13226. const struct ggml_tensor * a,
  13227. const struct ggml_tensor * b,
  13228. struct ggml_tensor * dst) {
  13229. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  13230. return;
  13231. }
  13232. struct ggml_map_custom2_op_params * p = (struct ggml_map_custom2_op_params *) dst->op_params;
  13233. p->fun(dst, a, b, params->ith, params->nth, p->userdata);
  13234. }
  13235. // ggml_compute_forward_map_custom3
  13236. static void ggml_compute_forward_map_custom3(
  13237. const struct ggml_compute_params * params,
  13238. const struct ggml_tensor * a,
  13239. const struct ggml_tensor * b,
  13240. const struct ggml_tensor * c,
  13241. struct ggml_tensor * dst) {
  13242. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  13243. return;
  13244. }
  13245. struct ggml_map_custom3_op_params * p = (struct ggml_map_custom3_op_params *) dst->op_params;
  13246. p->fun(dst, a, b, c, params->ith, params->nth, p->userdata);
  13247. }
  13248. // ggml_compute_forward_cross_entropy_loss
  13249. static void ggml_compute_forward_cross_entropy_loss_f32(
  13250. const struct ggml_compute_params * params,
  13251. const struct ggml_tensor * src0,
  13252. const struct ggml_tensor * src1,
  13253. struct ggml_tensor * dst) {
  13254. GGML_ASSERT(ggml_is_contiguous(src0));
  13255. GGML_ASSERT(ggml_is_contiguous(src1));
  13256. GGML_ASSERT(ggml_is_scalar(dst));
  13257. GGML_ASSERT(ggml_are_same_shape(src0, src1));
  13258. const int ith = params->ith;
  13259. const int nth = params->nth;
  13260. float * sums = (float *) params->wdata;
  13261. // TODO: handle transposed/permuted matrices
  13262. const int nc = src0->ne[0];
  13263. const int nr = ggml_nrows(src0);
  13264. GGML_ASSERT(params->wsize >= sizeof(float) * (nth + nth * nc));
  13265. if (params->type == GGML_TASK_INIT) {
  13266. if (ith == 0) {
  13267. memset(sums, 0, sizeof(float) * (nth + nth * nc));
  13268. }
  13269. return;
  13270. }
  13271. if (params->type == GGML_TASK_FINALIZE) {
  13272. if (ith == 0) {
  13273. float * dp = (float *) dst->data;
  13274. ggml_vec_sum_f32(nth, dp, sums);
  13275. dp[0] *= -1.0f / (float) nr;
  13276. }
  13277. return;
  13278. }
  13279. const double eps = 1e-9;
  13280. // rows per thread
  13281. const int dr = (nr + nth - 1)/nth;
  13282. // row range for this thread
  13283. const int ir0 = dr*ith;
  13284. const int ir1 = MIN(ir0 + dr, nr);
  13285. for (int i1 = ir0; i1 < ir1; i1++) {
  13286. float * s0 = (float *)((char *) src0->data + i1*src0->nb[1]);
  13287. float * s1 = (float *)((char *) src1->data + i1*src1->nb[1]);
  13288. float * st = ((float *) params->wdata) + nth + ith*nc;
  13289. #ifndef NDEBUG
  13290. for (int i = 0; i < nc; ++i) {
  13291. //printf("p[%d] = %f\n", i, p[i]);
  13292. assert(!isnan(s0[i]));
  13293. assert(!isnan(s1[i]));
  13294. }
  13295. #endif
  13296. // soft_max
  13297. ggml_float sum = 0.0;
  13298. {
  13299. float max = -INFINITY;
  13300. ggml_vec_max_f32(nc, &max, s0);
  13301. uint16_t scvt; UNUSED(scvt);
  13302. for (int i = 0; i < nc; i++) {
  13303. if (s0[i] == -INFINITY) {
  13304. st[i] = 0.0f;
  13305. } else {
  13306. #ifndef GGML_CROSS_ENTROPY_EXP_FP16
  13307. const float s = s0[i] - max;
  13308. const float val = expf(s);
  13309. #else
  13310. ggml_fp16_t s = GGML_FP32_TO_FP16(s0[i] - max);
  13311. memcpy(&scvt, &s, sizeof(scvt));
  13312. const float val = GGML_FP16_TO_FP32(table_exp_f16[scvt]);
  13313. #endif
  13314. sum += (ggml_float)val;
  13315. st[i] = val;
  13316. }
  13317. }
  13318. assert(sum > 0.0);
  13319. // sum = 1.0/sum;
  13320. }
  13321. // avoid log(0) by rescaling from [0..1] to [eps..1]
  13322. sum = (1.0 - eps) / sum;
  13323. ggml_vec_scale_f32(nc, st, sum);
  13324. ggml_vec_add1_f32(nc, st, st, eps);
  13325. ggml_vec_log_f32(nc, st, st);
  13326. ggml_vec_mul_f32(nc, st, st, s1);
  13327. float st_sum = 0;
  13328. ggml_vec_sum_f32(nc, &st_sum, st);
  13329. sums[ith] += st_sum;
  13330. #ifndef NDEBUG
  13331. for (int i = 0; i < nc; ++i) {
  13332. assert(!isnan(st[i]));
  13333. assert(!isinf(st[i]));
  13334. }
  13335. #endif
  13336. }
  13337. }
  13338. static void ggml_compute_forward_cross_entropy_loss(
  13339. const struct ggml_compute_params * params,
  13340. const struct ggml_tensor * src0,
  13341. const struct ggml_tensor * src1,
  13342. struct ggml_tensor * dst) {
  13343. switch (src0->type) {
  13344. case GGML_TYPE_F32:
  13345. {
  13346. ggml_compute_forward_cross_entropy_loss_f32(params, src0, src1, dst);
  13347. } break;
  13348. default:
  13349. {
  13350. GGML_ASSERT(false);
  13351. } break;
  13352. }
  13353. }
  13354. // ggml_compute_forward_cross_entropy_loss_back
  13355. static void ggml_compute_forward_cross_entropy_loss_back_f32(
  13356. const struct ggml_compute_params * params,
  13357. const struct ggml_tensor * src0,
  13358. const struct ggml_tensor * src1,
  13359. const struct ggml_tensor * opt0,
  13360. struct ggml_tensor * dst) {
  13361. GGML_ASSERT(ggml_is_contiguous(dst));
  13362. GGML_ASSERT(ggml_is_contiguous(src0));
  13363. GGML_ASSERT(ggml_is_contiguous(src1));
  13364. GGML_ASSERT(ggml_is_contiguous(opt0));
  13365. GGML_ASSERT(ggml_are_same_shape(src0, src1) && ggml_are_same_shape(src0, dst));
  13366. const int64_t ith = params->ith;
  13367. const int64_t nth = params->nth;
  13368. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  13369. return;
  13370. }
  13371. const double eps = 1e-9;
  13372. // TODO: handle transposed/permuted matrices
  13373. const int64_t nc = src0->ne[0];
  13374. const int64_t nr = ggml_nrows(src0);
  13375. // rows per thread
  13376. const int64_t dr = (nr + nth - 1)/nth;
  13377. // row range for this thread
  13378. const int64_t ir0 = dr*ith;
  13379. const int64_t ir1 = MIN(ir0 + dr, nr);
  13380. float * d = (float *) opt0->data;
  13381. for (int64_t i1 = ir0; i1 < ir1; i1++) {
  13382. float * ds0 = (float *)((char *) dst->data + i1*dst->nb[1]);
  13383. float * s0 = (float *)((char *) src0->data + i1*src0->nb[1]);
  13384. float * s1 = (float *)((char *) src1->data + i1*src1->nb[1]);
  13385. #ifndef NDEBUG
  13386. for (int i = 0; i < nc; ++i) {
  13387. //printf("p[%d] = %f\n", i, p[i]);
  13388. assert(!isnan(s0[i]));
  13389. assert(!isnan(s1[i]));
  13390. }
  13391. #endif
  13392. // soft_max
  13393. ggml_float sum = 0.0;
  13394. {
  13395. float max = -INFINITY;
  13396. ggml_vec_max_f32(nc, &max, s0);
  13397. uint16_t scvt; UNUSED(scvt);
  13398. for (int i = 0; i < nc; i++) {
  13399. if (s0[i] == -INFINITY) {
  13400. ds0[i] = 0.0f;
  13401. } else {
  13402. #ifndef GGML_CROSS_ENTROPY_EXP_FP16
  13403. const float s = s0[i] - max;
  13404. const float val = expf(s);
  13405. #else
  13406. ggml_fp16_t s = GGML_FP32_TO_FP16(s0[i] - max);
  13407. memcpy(&scvt, &s, sizeof(scvt));
  13408. const float val = GGML_FP16_TO_FP32(table_exp_f16[scvt]);
  13409. #endif
  13410. sum += (ggml_float)val;
  13411. ds0[i] = val;
  13412. }
  13413. }
  13414. assert(sum > 0.0);
  13415. sum = (1.0 - eps)/sum;
  13416. }
  13417. // grad(src0) = (softmax(src0) - src1) * grad(cross_entropy_loss(src0, src1)) / nr
  13418. ggml_vec_scale_f32(nc, ds0, sum);
  13419. ggml_vec_add1_f32(nc, ds0, ds0, eps);
  13420. ggml_vec_sub_f32(nc, ds0, ds0, s1);
  13421. ggml_vec_scale_f32(nc, ds0, d[0] / (float) nr);
  13422. #ifndef NDEBUG
  13423. for (int i = 0; i < nc; ++i) {
  13424. assert(!isnan(ds0[i]));
  13425. assert(!isinf(ds0[i]));
  13426. }
  13427. #endif
  13428. }
  13429. }
  13430. static void ggml_compute_forward_cross_entropy_loss_back(
  13431. const struct ggml_compute_params * params,
  13432. const struct ggml_tensor * src0,
  13433. const struct ggml_tensor * src1,
  13434. const struct ggml_tensor * opt0,
  13435. struct ggml_tensor * dst) {
  13436. switch (src0->type) {
  13437. case GGML_TYPE_F32:
  13438. {
  13439. ggml_compute_forward_cross_entropy_loss_back_f32(params, src0, src1, opt0, dst);
  13440. } break;
  13441. default:
  13442. {
  13443. GGML_ASSERT(false);
  13444. } break;
  13445. }
  13446. }
  13447. /////////////////////////////////
  13448. static void ggml_compute_forward(struct ggml_compute_params * params, struct ggml_tensor * tensor) {
  13449. GGML_ASSERT(params);
  13450. #ifdef GGML_USE_CUBLAS
  13451. bool skip_cpu = ggml_cuda_compute_forward(params, tensor);
  13452. if (skip_cpu) {
  13453. return;
  13454. }
  13455. GGML_ASSERT(tensor->src[0] == NULL || tensor->src[0]->backend == GGML_BACKEND_CPU);
  13456. GGML_ASSERT(tensor->src[1] == NULL || tensor->src[1]->backend == GGML_BACKEND_CPU);
  13457. #endif // GGML_USE_CUBLAS
  13458. switch (tensor->op) {
  13459. case GGML_OP_DUP:
  13460. {
  13461. ggml_compute_forward_dup(params, tensor->src[0], tensor);
  13462. } break;
  13463. case GGML_OP_ADD:
  13464. {
  13465. ggml_compute_forward_add(params, tensor->src[0], tensor->src[1], tensor);
  13466. } break;
  13467. case GGML_OP_ADD1:
  13468. {
  13469. ggml_compute_forward_add1(params, tensor->src[0], tensor->src[1], tensor);
  13470. } break;
  13471. case GGML_OP_ACC:
  13472. {
  13473. ggml_compute_forward_acc(params, tensor->src[0], tensor->src[1], tensor);
  13474. } break;
  13475. case GGML_OP_SUB:
  13476. {
  13477. ggml_compute_forward_sub(params, tensor->src[0], tensor->src[1], tensor);
  13478. } break;
  13479. case GGML_OP_MUL:
  13480. {
  13481. ggml_compute_forward_mul(params, tensor->src[0], tensor->src[1], tensor);
  13482. } break;
  13483. case GGML_OP_DIV:
  13484. {
  13485. ggml_compute_forward_div(params, tensor->src[0], tensor->src[1], tensor);
  13486. } break;
  13487. case GGML_OP_SQR:
  13488. {
  13489. ggml_compute_forward_sqr(params, tensor->src[0], tensor);
  13490. } break;
  13491. case GGML_OP_SQRT:
  13492. {
  13493. ggml_compute_forward_sqrt(params, tensor->src[0], tensor);
  13494. } break;
  13495. case GGML_OP_LOG:
  13496. {
  13497. ggml_compute_forward_log(params, tensor->src[0], tensor);
  13498. } break;
  13499. case GGML_OP_SUM:
  13500. {
  13501. ggml_compute_forward_sum(params, tensor->src[0], tensor);
  13502. } break;
  13503. case GGML_OP_SUM_ROWS:
  13504. {
  13505. ggml_compute_forward_sum_rows(params, tensor->src[0], tensor);
  13506. } break;
  13507. case GGML_OP_MEAN:
  13508. {
  13509. ggml_compute_forward_mean(params, tensor->src[0], tensor);
  13510. } break;
  13511. case GGML_OP_ARGMAX:
  13512. {
  13513. ggml_compute_forward_argmax(params, tensor->src[0], tensor);
  13514. } break;
  13515. case GGML_OP_REPEAT:
  13516. {
  13517. ggml_compute_forward_repeat(params, tensor->src[0], tensor);
  13518. } break;
  13519. case GGML_OP_REPEAT_BACK:
  13520. {
  13521. ggml_compute_forward_repeat_back(params, tensor->src[0], tensor);
  13522. } break;
  13523. case GGML_OP_CONCAT:
  13524. {
  13525. ggml_compute_forward_concat(params, tensor->src[0], tensor->src[1], tensor);
  13526. } break;
  13527. case GGML_OP_SILU_BACK:
  13528. {
  13529. ggml_compute_forward_silu_back(params, tensor->src[0], tensor->src[1], tensor);
  13530. } break;
  13531. case GGML_OP_NORM:
  13532. {
  13533. ggml_compute_forward_norm(params, tensor->src[0], tensor);
  13534. } break;
  13535. case GGML_OP_RMS_NORM:
  13536. {
  13537. ggml_compute_forward_rms_norm(params, tensor->src[0], tensor);
  13538. } break;
  13539. case GGML_OP_RMS_NORM_BACK:
  13540. {
  13541. ggml_compute_forward_rms_norm_back(params, tensor->src[0], tensor->src[1], tensor);
  13542. } break;
  13543. case GGML_OP_GROUP_NORM:
  13544. {
  13545. ggml_compute_forward_group_norm(params, tensor->src[0], tensor);
  13546. } break;
  13547. case GGML_OP_MUL_MAT:
  13548. {
  13549. ggml_compute_forward_mul_mat(params, tensor->src[0], tensor->src[1], tensor);
  13550. } break;
  13551. case GGML_OP_OUT_PROD:
  13552. {
  13553. ggml_compute_forward_out_prod(params, tensor->src[0], tensor->src[1], tensor);
  13554. } break;
  13555. case GGML_OP_SCALE:
  13556. {
  13557. ggml_compute_forward_scale(params, tensor->src[0], tensor->src[1], tensor);
  13558. } break;
  13559. case GGML_OP_SET:
  13560. {
  13561. ggml_compute_forward_set(params, tensor->src[0], tensor->src[1], tensor);
  13562. } break;
  13563. case GGML_OP_CPY:
  13564. {
  13565. ggml_compute_forward_cpy(params, tensor->src[0], tensor);
  13566. } break;
  13567. case GGML_OP_CONT:
  13568. {
  13569. ggml_compute_forward_cont(params, tensor->src[0], tensor);
  13570. } break;
  13571. case GGML_OP_RESHAPE:
  13572. {
  13573. ggml_compute_forward_reshape(params, tensor->src[0], tensor);
  13574. } break;
  13575. case GGML_OP_VIEW:
  13576. {
  13577. ggml_compute_forward_view(params, tensor->src[0]);
  13578. } break;
  13579. case GGML_OP_PERMUTE:
  13580. {
  13581. ggml_compute_forward_permute(params, tensor->src[0]);
  13582. } break;
  13583. case GGML_OP_TRANSPOSE:
  13584. {
  13585. ggml_compute_forward_transpose(params, tensor->src[0]);
  13586. } break;
  13587. case GGML_OP_GET_ROWS:
  13588. {
  13589. ggml_compute_forward_get_rows(params, tensor->src[0], tensor->src[1], tensor);
  13590. } break;
  13591. case GGML_OP_GET_ROWS_BACK:
  13592. {
  13593. ggml_compute_forward_get_rows_back(params, tensor->src[0], tensor->src[1], tensor);
  13594. } break;
  13595. case GGML_OP_DIAG:
  13596. {
  13597. ggml_compute_forward_diag(params, tensor->src[0], tensor);
  13598. } break;
  13599. case GGML_OP_DIAG_MASK_INF:
  13600. {
  13601. ggml_compute_forward_diag_mask_inf(params, tensor->src[0], tensor);
  13602. } break;
  13603. case GGML_OP_DIAG_MASK_ZERO:
  13604. {
  13605. ggml_compute_forward_diag_mask_zero(params, tensor->src[0], tensor);
  13606. } break;
  13607. case GGML_OP_SOFT_MAX:
  13608. {
  13609. ggml_compute_forward_soft_max(params, tensor->src[0], tensor);
  13610. } break;
  13611. case GGML_OP_SOFT_MAX_BACK:
  13612. {
  13613. ggml_compute_forward_soft_max_back(params, tensor->src[0], tensor->src[1], tensor);
  13614. } break;
  13615. case GGML_OP_ROPE:
  13616. {
  13617. ggml_compute_forward_rope(params, tensor->src[0], tensor->src[1], tensor);
  13618. } break;
  13619. case GGML_OP_ROPE_BACK:
  13620. {
  13621. ggml_compute_forward_rope_back(params, tensor->src[0], tensor->src[1], tensor);
  13622. } break;
  13623. case GGML_OP_ALIBI:
  13624. {
  13625. ggml_compute_forward_alibi(params, tensor->src[0], tensor);
  13626. } break;
  13627. case GGML_OP_CLAMP:
  13628. {
  13629. ggml_compute_forward_clamp(params, tensor->src[0], tensor);
  13630. } break;
  13631. case GGML_OP_CONV_1D:
  13632. {
  13633. ggml_compute_forward_conv_1d(params, tensor->src[0], tensor->src[1], tensor);
  13634. } break;
  13635. case GGML_OP_CONV_1D_STAGE_0:
  13636. {
  13637. ggml_compute_forward_conv_1d_stage_0(params, tensor->src[0], tensor->src[1], tensor);
  13638. } break;
  13639. case GGML_OP_CONV_1D_STAGE_1:
  13640. {
  13641. ggml_compute_forward_conv_1d_stage_1(params, tensor->src[0], tensor->src[1], tensor);
  13642. } break;
  13643. case GGML_OP_CONV_TRANSPOSE_1D:
  13644. {
  13645. ggml_compute_forward_conv_transpose_1d(params, tensor->src[0], tensor->src[1], tensor);
  13646. } break;
  13647. case GGML_OP_CONV_2D:
  13648. {
  13649. ggml_compute_forward_conv_2d(params, tensor->src[0], tensor->src[1], tensor);
  13650. } break;
  13651. case GGML_OP_CONV_TRANSPOSE_2D:
  13652. {
  13653. ggml_compute_forward_conv_transpose_2d(params, tensor->src[0], tensor->src[1], tensor);
  13654. } break;
  13655. case GGML_OP_POOL_1D:
  13656. {
  13657. ggml_compute_forward_pool_1d(params, tensor->src[0], tensor);
  13658. } break;
  13659. case GGML_OP_POOL_2D:
  13660. {
  13661. ggml_compute_forward_pool_2d(params, tensor->src[0], tensor);
  13662. } break;
  13663. case GGML_OP_UPSCALE:
  13664. {
  13665. ggml_compute_forward_upscale(params, tensor->src[0], tensor);
  13666. } break;
  13667. case GGML_OP_FLASH_ATTN:
  13668. {
  13669. const int32_t t = ggml_get_op_params_i32(tensor, 0);
  13670. GGML_ASSERT(t == 0 || t == 1);
  13671. const bool masked = t != 0;
  13672. ggml_compute_forward_flash_attn(params, tensor->src[0], tensor->src[1], tensor->src[2], masked, tensor);
  13673. } break;
  13674. case GGML_OP_FLASH_FF:
  13675. {
  13676. ggml_compute_forward_flash_ff(params, tensor->src[0], tensor->src[1], tensor->src[2], tensor->src[3], tensor->src[4], tensor);
  13677. } break;
  13678. case GGML_OP_FLASH_ATTN_BACK:
  13679. {
  13680. int32_t t = ggml_get_op_params_i32(tensor, 0);
  13681. GGML_ASSERT(t == 0 || t == 1);
  13682. bool masked = t != 0;
  13683. ggml_compute_forward_flash_attn_back(params, tensor->src[0], tensor->src[1], tensor->src[2], tensor->src[3], masked, tensor);
  13684. } break;
  13685. case GGML_OP_WIN_PART:
  13686. {
  13687. ggml_compute_forward_win_part(params, tensor->src[0], tensor);
  13688. } break;
  13689. case GGML_OP_WIN_UNPART:
  13690. {
  13691. ggml_compute_forward_win_unpart(params, tensor->src[0], tensor);
  13692. } break;
  13693. case GGML_OP_UNARY:
  13694. {
  13695. ggml_compute_forward_unary(params, tensor->src[0], tensor);
  13696. } break;
  13697. case GGML_OP_GET_REL_POS:
  13698. {
  13699. ggml_compute_forward_get_rel_pos(params, tensor->src[0], tensor);
  13700. } break;
  13701. case GGML_OP_ADD_REL_POS:
  13702. {
  13703. ggml_compute_forward_add_rel_pos(params, tensor->src[0], tensor->src[1], tensor->src[2], tensor);
  13704. } break;
  13705. case GGML_OP_MAP_UNARY:
  13706. {
  13707. ggml_unary_op_f32_t fun;
  13708. memcpy(&fun, tensor->op_params, sizeof(fun));
  13709. ggml_compute_forward_map_unary(params, tensor->src[0], tensor, fun);
  13710. }
  13711. break;
  13712. case GGML_OP_MAP_BINARY:
  13713. {
  13714. ggml_binary_op_f32_t fun;
  13715. memcpy(&fun, tensor->op_params, sizeof(fun));
  13716. ggml_compute_forward_map_binary(params, tensor->src[0], tensor->src[1], tensor, fun);
  13717. }
  13718. break;
  13719. case GGML_OP_MAP_CUSTOM1_F32:
  13720. {
  13721. ggml_custom1_op_f32_t fun;
  13722. memcpy(&fun, tensor->op_params, sizeof(fun));
  13723. ggml_compute_forward_map_custom1_f32(params, tensor->src[0], tensor, fun);
  13724. }
  13725. break;
  13726. case GGML_OP_MAP_CUSTOM2_F32:
  13727. {
  13728. ggml_custom2_op_f32_t fun;
  13729. memcpy(&fun, tensor->op_params, sizeof(fun));
  13730. ggml_compute_forward_map_custom2_f32(params, tensor->src[0], tensor->src[1], tensor, fun);
  13731. }
  13732. break;
  13733. case GGML_OP_MAP_CUSTOM3_F32:
  13734. {
  13735. ggml_custom3_op_f32_t fun;
  13736. memcpy(&fun, tensor->op_params, sizeof(fun));
  13737. ggml_compute_forward_map_custom3_f32(params, tensor->src[0], tensor->src[1], tensor->src[2], tensor, fun);
  13738. }
  13739. break;
  13740. case GGML_OP_MAP_CUSTOM1:
  13741. {
  13742. ggml_compute_forward_map_custom1(params, tensor->src[0], tensor);
  13743. }
  13744. break;
  13745. case GGML_OP_MAP_CUSTOM2:
  13746. {
  13747. ggml_compute_forward_map_custom2(params, tensor->src[0], tensor->src[1], tensor);
  13748. }
  13749. break;
  13750. case GGML_OP_MAP_CUSTOM3:
  13751. {
  13752. ggml_compute_forward_map_custom3(params, tensor->src[0], tensor->src[1], tensor->src[2], tensor);
  13753. }
  13754. break;
  13755. case GGML_OP_CROSS_ENTROPY_LOSS:
  13756. {
  13757. ggml_compute_forward_cross_entropy_loss(params, tensor->src[0], tensor->src[1], tensor);
  13758. }
  13759. break;
  13760. case GGML_OP_CROSS_ENTROPY_LOSS_BACK:
  13761. {
  13762. ggml_compute_forward_cross_entropy_loss_back(params, tensor->src[0], tensor->src[1], tensor->src[2], tensor);
  13763. }
  13764. break;
  13765. case GGML_OP_NONE:
  13766. {
  13767. // nop
  13768. } break;
  13769. case GGML_OP_COUNT:
  13770. {
  13771. GGML_ASSERT(false);
  13772. } break;
  13773. }
  13774. }
  13775. ////////////////////////////////////////////////////////////////////////////////
  13776. static_assert(GGML_GRAPH_HASHTABLE_SIZE > GGML_MAX_NODES * 2, "GGML_GRAPH_HT_SIZE is too small");
  13777. static size_t hash(void * p) {
  13778. return (size_t)p % GGML_GRAPH_HASHTABLE_SIZE;
  13779. }
  13780. static size_t hash_find(void * hash_table[], void * p) {
  13781. size_t h = hash(p);
  13782. // linear probing
  13783. size_t i = h;
  13784. while (hash_table[i] != NULL && hash_table[i] != p) {
  13785. i = (i + 1) % GGML_GRAPH_HASHTABLE_SIZE;
  13786. if (i == h) {
  13787. // visited all hash table entries -> not found
  13788. return GGML_GRAPH_HASHTABLE_SIZE;
  13789. }
  13790. }
  13791. return i;
  13792. }
  13793. static bool hash_insert(void * hash_table[], void * p) {
  13794. size_t i = hash_find(hash_table, p);
  13795. GGML_ASSERT(i < GGML_GRAPH_HASHTABLE_SIZE); // assert that not full
  13796. if (hash_table[i] == p) {
  13797. return true;
  13798. }
  13799. // insert
  13800. GGML_ASSERT(hash_table[i] == NULL);
  13801. hash_table[i] = p;
  13802. return false;
  13803. }
  13804. static bool hash_contains(void * hash_table[], void * p) {
  13805. size_t i = hash_find(hash_table, p);
  13806. return (i < GGML_GRAPH_HASHTABLE_SIZE) && (hash_table[i] == p);
  13807. }
  13808. struct hash_map {
  13809. void * keys[GGML_GRAPH_HASHTABLE_SIZE];
  13810. void * vals[GGML_GRAPH_HASHTABLE_SIZE];
  13811. };
  13812. static struct hash_map * new_hash_map(void) {
  13813. struct hash_map * result = malloc(sizeof(struct hash_map));
  13814. for (int i=0; i<GGML_GRAPH_HASHTABLE_SIZE; ++i) {
  13815. result->keys[i] = NULL;
  13816. result->vals[i] = NULL;
  13817. }
  13818. return result;
  13819. }
  13820. static void free_hash_map(struct hash_map * map) {
  13821. free(map);
  13822. }
  13823. // gradient checkpointing
  13824. static struct ggml_tensor * ggml_recompute_graph_node(
  13825. struct ggml_context * ctx,
  13826. struct ggml_cgraph * graph,
  13827. struct hash_map * replacements,
  13828. struct ggml_tensor * node) {
  13829. if (node == NULL) {
  13830. return NULL;
  13831. }
  13832. if (node->is_param) {
  13833. return node;
  13834. }
  13835. if (!hash_contains(graph->visited_hash_table, node)) {
  13836. return node;
  13837. }
  13838. int count_children = 0;
  13839. for (int k = 0; k < GGML_MAX_SRC; ++k) {
  13840. if (node->src[k]) {
  13841. ++count_children;
  13842. }
  13843. }
  13844. if (count_children == 0) {
  13845. return node;
  13846. }
  13847. size_t i = hash_find(replacements->keys, node);
  13848. GGML_ASSERT(i < GGML_GRAPH_HASHTABLE_SIZE); // assert that not full
  13849. if (replacements->keys[i] == node) {
  13850. return (struct ggml_tensor *) replacements->vals[i];
  13851. }
  13852. struct ggml_tensor * clone = ggml_new_tensor(ctx, node->type, node->n_dims, node->ne);
  13853. // insert clone into replacements
  13854. GGML_ASSERT(replacements->keys[i] == NULL); // assert that we don't overwrite
  13855. replacements->keys[i] = node;
  13856. replacements->vals[i] = clone;
  13857. clone->op = node->op;
  13858. clone->grad = node->grad;
  13859. clone->is_param = node->is_param;
  13860. clone->extra = node->extra;
  13861. for (int k = 0; k < GGML_MAX_DIMS; ++k) {
  13862. clone->nb[k] = node->nb[k];
  13863. }
  13864. for (int k = 0; k < GGML_MAX_SRC; ++k) {
  13865. clone->src[k] = ggml_recompute_graph_node(ctx, graph, replacements, node->src[k]);
  13866. }
  13867. if (node->view_src != NULL) {
  13868. clone->data = (node->view_src->data == NULL)
  13869. ? NULL // view_src not yet allocated
  13870. : (char *) node->view_src->data // view_src already allocated
  13871. + node->view_offs;
  13872. clone->view_src = node->view_src;
  13873. clone->view_offs = node->view_offs;
  13874. }
  13875. GGML_ASSERT(sizeof(node->op_params) == sizeof(int32_t) * (GGML_MAX_OP_PARAMS / sizeof(int32_t)));
  13876. GGML_ASSERT(sizeof(node->name) == GGML_MAX_NAME);
  13877. memcpy(clone->op_params, node->op_params, sizeof(node->op_params));
  13878. ggml_format_name(clone, "%s (clone)", ggml_get_name(node));
  13879. return clone;
  13880. }
  13881. void ggml_build_backward_gradient_checkpointing(
  13882. struct ggml_context * ctx,
  13883. struct ggml_cgraph * gf,
  13884. struct ggml_cgraph * gb,
  13885. struct ggml_cgraph * gb_tmp,
  13886. struct ggml_tensor * * checkpoints,
  13887. int n_checkpoints) {
  13888. *gb_tmp = *gf;
  13889. ggml_build_backward_expand(ctx, gf, gb_tmp, true);
  13890. if (n_checkpoints <= 0) {
  13891. *gb = *gb_tmp;
  13892. return;
  13893. }
  13894. struct hash_map * replacements = new_hash_map();
  13895. // insert checkpoints in replacements
  13896. for (int i = 0; i < n_checkpoints; ++i) {
  13897. size_t k = hash_find(replacements->keys, checkpoints[i]);
  13898. GGML_ASSERT(k < GGML_GRAPH_HASHTABLE_SIZE); // assert that not full
  13899. GGML_ASSERT(replacements->keys[k] == NULL); // assert that we don't overwrite
  13900. replacements->keys[k] = checkpoints[i];
  13901. replacements->vals[k] = checkpoints[i];
  13902. }
  13903. *gb = *gf;
  13904. // rewrite gb_tmp->nodes[gf->n_nodes:gb_tmp->n_nodes],
  13905. // replacing references to gb_tmp->nodes[0:gf->n_nodes] ( == gf->nodes[0:gf->n_nodes]),
  13906. // by recomputing them from checkpoints
  13907. for (int i = gf->n_nodes; i<gb_tmp->n_nodes; ++i) {
  13908. struct ggml_tensor * node = gb_tmp->nodes[i];
  13909. for (int k = 0; k < GGML_MAX_SRC; ++k) {
  13910. // insert new tensors recomputing src, reusing already made replacements,
  13911. // remember replacements: remember new tensors with mapping from corresponding gf nodes
  13912. // recurse for input tensors,
  13913. // unless (i.e. terminating when) input tensors are replacments (like checkpoints)
  13914. node->src[k] = ggml_recompute_graph_node(ctx, gf, replacements, node->src[k]);
  13915. }
  13916. // insert rewritten backward node with replacements made into resulting backward graph gb
  13917. ggml_build_forward_expand(gb, node);
  13918. }
  13919. free_hash_map(replacements);
  13920. }
  13921. // functions to change gradients considering the case that input a might be initial gradient with zero value
  13922. static struct ggml_tensor * ggml_add_or_set(struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b, void * zero_table[]) {
  13923. if (hash_contains(zero_table, a)) {
  13924. return b;
  13925. } else {
  13926. return ggml_add_impl(ctx, a, b, false);
  13927. }
  13928. }
  13929. static struct ggml_tensor * ggml_acc_or_set(struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b, size_t nb1, size_t nb2, size_t nb3, size_t offset, void * zero_table[]) {
  13930. if (hash_contains(zero_table, a)) {
  13931. struct ggml_tensor * a_zero = ggml_scale(ctx, a, ggml_new_f32(ctx, 0));
  13932. return ggml_acc_impl(ctx, a_zero, b, nb1, nb2, nb3, offset, false);
  13933. } else {
  13934. return ggml_acc_impl(ctx, a, b, nb1, nb2, nb3, offset, false);
  13935. }
  13936. }
  13937. static struct ggml_tensor * ggml_add1_or_set(struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b, void * zero_table[]) {
  13938. if (hash_contains(zero_table, a)) {
  13939. return ggml_repeat(ctx, b, a);
  13940. } else {
  13941. return ggml_add1_impl(ctx, a, b, false);
  13942. }
  13943. }
  13944. static struct ggml_tensor * ggml_sub_or_set(struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b, void * zero_table[]) {
  13945. if (hash_contains(zero_table, a)) {
  13946. return ggml_neg(ctx, b);
  13947. } else {
  13948. return ggml_sub_impl(ctx, a, b, false);
  13949. }
  13950. }
  13951. static void ggml_compute_backward(struct ggml_context * ctx, struct ggml_tensor * tensor, void * zero_table[]) {
  13952. struct ggml_tensor * src0 = tensor->src[0];
  13953. struct ggml_tensor * src1 = tensor->src[1];
  13954. switch (tensor->op) {
  13955. case GGML_OP_DUP:
  13956. {
  13957. if (src0->grad) {
  13958. src0->grad = ggml_add_or_set(ctx, src0->grad, tensor->grad, zero_table);
  13959. }
  13960. } break;
  13961. case GGML_OP_ADD:
  13962. {
  13963. if (src0->grad) {
  13964. src0->grad = ggml_add_or_set(ctx, src0->grad, tensor->grad, zero_table);
  13965. }
  13966. if (src1->grad) {
  13967. src1->grad = ggml_add_or_set(ctx, src1->grad, tensor->grad, zero_table);
  13968. }
  13969. } break;
  13970. case GGML_OP_ADD1:
  13971. {
  13972. if (src0->grad) {
  13973. src0->grad = ggml_add_or_set(ctx, src0->grad, tensor->grad, zero_table);
  13974. }
  13975. if (src1->grad) {
  13976. src1->grad = ggml_add_or_set(ctx,
  13977. src1->grad,
  13978. ggml_mean(ctx, tensor->grad), // TODO: should probably be sum instead of mean
  13979. zero_table);
  13980. }
  13981. } break;
  13982. case GGML_OP_ACC:
  13983. {
  13984. if (src0->grad) {
  13985. src0->grad = ggml_add_or_set(ctx, src0->grad, tensor->grad, zero_table);
  13986. }
  13987. if (src1->grad) {
  13988. const size_t nb1 = ((int32_t *) tensor->op_params)[0];
  13989. const size_t nb2 = ((int32_t *) tensor->op_params)[1];
  13990. const size_t nb3 = ((int32_t *) tensor->op_params)[2];
  13991. const size_t offset = ((int32_t *) tensor->op_params)[3];
  13992. struct ggml_tensor * tensor_grad_view = ggml_view_4d(ctx,
  13993. tensor->grad,
  13994. src1->grad->ne[0],
  13995. src1->grad->ne[1],
  13996. src1->grad->ne[2],
  13997. src1->grad->ne[3],
  13998. nb1, nb2, nb3, offset);
  13999. src1->grad =
  14000. ggml_add_or_set(ctx,
  14001. src1->grad,
  14002. ggml_reshape(ctx,
  14003. ggml_cont(ctx, tensor_grad_view),
  14004. src1->grad),
  14005. zero_table);
  14006. }
  14007. } break;
  14008. case GGML_OP_SUB:
  14009. {
  14010. if (src0->grad) {
  14011. src0->grad = ggml_add_or_set(ctx, src0->grad, tensor->grad, zero_table);
  14012. }
  14013. if (src1->grad) {
  14014. src1->grad = ggml_sub_or_set(ctx, src1->grad, tensor->grad, zero_table);
  14015. }
  14016. } break;
  14017. case GGML_OP_MUL:
  14018. {
  14019. if (src0->grad) {
  14020. src0->grad =
  14021. ggml_add_or_set(ctx,
  14022. src0->grad,
  14023. ggml_mul(ctx, src1, tensor->grad),
  14024. zero_table);
  14025. }
  14026. if (src1->grad) {
  14027. src1->grad =
  14028. ggml_add_or_set(ctx,
  14029. src1->grad,
  14030. ggml_mul(ctx, src0, tensor->grad),
  14031. zero_table);
  14032. }
  14033. } break;
  14034. case GGML_OP_DIV:
  14035. {
  14036. if (src0->grad) {
  14037. src0->grad =
  14038. ggml_add_or_set(ctx,
  14039. src0->grad,
  14040. ggml_div(ctx, tensor->grad, src1),
  14041. zero_table);
  14042. }
  14043. if (src1->grad) {
  14044. src1->grad =
  14045. ggml_sub_or_set(ctx,
  14046. src1->grad,
  14047. ggml_mul(ctx,
  14048. tensor->grad,
  14049. ggml_div(ctx, tensor, src1)),
  14050. zero_table);
  14051. }
  14052. } break;
  14053. case GGML_OP_SQR:
  14054. {
  14055. if (src0->grad) {
  14056. src0->grad =
  14057. ggml_add_or_set(ctx,
  14058. src0->grad,
  14059. ggml_scale(ctx,
  14060. ggml_mul(ctx, src0, tensor->grad),
  14061. ggml_new_f32(ctx, 2.0f)),
  14062. zero_table);
  14063. }
  14064. } break;
  14065. case GGML_OP_SQRT:
  14066. {
  14067. if (src0->grad) {
  14068. src0->grad =
  14069. ggml_add_or_set(ctx,
  14070. src0->grad,
  14071. ggml_scale(ctx,
  14072. ggml_div(ctx,
  14073. tensor->grad,
  14074. tensor),
  14075. ggml_new_f32(ctx, 0.5f)),
  14076. zero_table);
  14077. }
  14078. } break;
  14079. case GGML_OP_LOG:
  14080. {
  14081. if (src0->grad) {
  14082. src0->grad =
  14083. ggml_add_or_set(ctx,
  14084. src0->grad,
  14085. ggml_div(ctx,
  14086. tensor->grad,
  14087. src0),
  14088. zero_table);
  14089. }
  14090. } break;
  14091. case GGML_OP_SUM:
  14092. {
  14093. if (src0->grad) {
  14094. src0->grad =
  14095. ggml_add1_or_set(ctx,
  14096. src0->grad,
  14097. tensor->grad,
  14098. zero_table);
  14099. }
  14100. } break;
  14101. case GGML_OP_SUM_ROWS:
  14102. {
  14103. if (src0->grad) {
  14104. src0->grad =
  14105. ggml_add_or_set(ctx,
  14106. src0->grad,
  14107. ggml_repeat(ctx,
  14108. tensor->grad,
  14109. src0->grad),
  14110. zero_table);
  14111. }
  14112. } break;
  14113. case GGML_OP_MEAN:
  14114. case GGML_OP_ARGMAX:
  14115. {
  14116. GGML_ASSERT(false); // TODO: implement
  14117. } break;
  14118. case GGML_OP_REPEAT:
  14119. {
  14120. // necessary for llama
  14121. if (src0->grad) {
  14122. src0->grad = ggml_add_or_set(ctx,
  14123. src0->grad,
  14124. ggml_repeat_back(ctx, tensor->grad, src0->grad),
  14125. zero_table);
  14126. }
  14127. } break;
  14128. case GGML_OP_REPEAT_BACK:
  14129. {
  14130. if (src0->grad) {
  14131. // TODO: test this
  14132. src0->grad = ggml_add_or_set(ctx,
  14133. src0->grad,
  14134. ggml_repeat(ctx, tensor->grad, src0->grad),
  14135. zero_table);
  14136. }
  14137. } break;
  14138. case GGML_OP_CONCAT:
  14139. {
  14140. GGML_ASSERT(false); // TODO: implement
  14141. } break;
  14142. case GGML_OP_SILU_BACK:
  14143. {
  14144. GGML_ASSERT(false); // TODO: not implemented
  14145. } break;
  14146. case GGML_OP_NORM:
  14147. {
  14148. GGML_ASSERT(false); // TODO: not implemented
  14149. } break;
  14150. case GGML_OP_RMS_NORM:
  14151. {
  14152. // necessary for llama
  14153. if (src0->grad) {
  14154. float eps;
  14155. memcpy(&eps, tensor->op_params, sizeof(float));
  14156. src0->grad = ggml_add_or_set(ctx,
  14157. src0->grad,
  14158. ggml_rms_norm_back(ctx, src0, tensor->grad, eps),
  14159. zero_table);
  14160. }
  14161. } break;
  14162. case GGML_OP_RMS_NORM_BACK:
  14163. {
  14164. GGML_ASSERT(false); // TODO: not implemented
  14165. } break;
  14166. case GGML_OP_GROUP_NORM:
  14167. {
  14168. GGML_ASSERT(false); // TODO: not implemented
  14169. } break;
  14170. case GGML_OP_MUL_MAT:
  14171. {
  14172. // https://cs231n.github.io/optimization-2/#staged
  14173. // # forward pass
  14174. // s0 = np.random.randn(5, 10)
  14175. // s1 = np.random.randn(10, 3)
  14176. // t = s0.dot(s1)
  14177. // # now suppose we had the gradient on t from above in the circuit
  14178. // dt = np.random.randn(*t.shape) # same shape as t
  14179. // ds0 = dt.dot(s1.T) #.T gives the transpose of the matrix
  14180. // ds1 = t.T.dot(dt)
  14181. // tensor.shape [m,p,qq,rr]
  14182. // src0.shape [n,m,q1,r1]
  14183. // src1.shape [n,p,qq,rr]
  14184. // necessary for llama
  14185. if (src0->grad) {
  14186. struct ggml_tensor * s1_tg =
  14187. ggml_out_prod(ctx, // [n,m,qq,rr]
  14188. src1, // [n,p,qq,rr]
  14189. tensor->grad); // [m,p,qq,rr]
  14190. const int64_t qq = s1_tg->ne[2];
  14191. const int64_t rr = s1_tg->ne[3];
  14192. const int64_t q1 = src0->ne[2];
  14193. const int64_t r1 = src0->ne[3];
  14194. const bool ne2_broadcasted = qq > q1;
  14195. const bool ne3_broadcasted = rr > r1;
  14196. if (ne2_broadcasted || ne3_broadcasted) {
  14197. // sum broadcast repetitions of s1_tg into shape of src0
  14198. s1_tg = ggml_repeat_back(ctx, s1_tg, src0);
  14199. }
  14200. src0->grad =
  14201. ggml_add_or_set(ctx,
  14202. src0->grad, // [n,m,q1,r1]
  14203. s1_tg, // [n,m,q1,r1]
  14204. zero_table);
  14205. }
  14206. if (src1->grad) {
  14207. src1->grad =
  14208. ggml_add_or_set(ctx,
  14209. src1->grad, // [n,p,qq,rr]
  14210. // ggml_mul_mat(ctx, // [n,p,qq,rr]
  14211. // ggml_cont(ctx, // [m,n,q1,r1]
  14212. // ggml_transpose(ctx, src0)), // [m,n,q1,r1]
  14213. // tensor->grad), // [m,p,qq,rr]
  14214. // // when src0 is bigger than tensor->grad (this is mostly the case in llama),
  14215. // // avoid transpose of src0, rather transpose smaller tensor->grad
  14216. // // and then use ggml_out_prod
  14217. ggml_out_prod(ctx, // [n,p,qq,rr]
  14218. src0, // [n,m,q1,r1]
  14219. ggml_transpose(ctx, // [p,m,qq,rr]
  14220. tensor->grad)), // [m,p,qq,rr]
  14221. zero_table);
  14222. }
  14223. } break;
  14224. case GGML_OP_OUT_PROD:
  14225. {
  14226. GGML_ASSERT(false); // TODO: not implemented
  14227. } break;
  14228. case GGML_OP_SCALE:
  14229. {
  14230. // necessary for llama
  14231. if (src0->grad) {
  14232. src0->grad =
  14233. ggml_add_or_set(ctx,
  14234. src0->grad,
  14235. ggml_scale_impl(ctx, tensor->grad, src1, false),
  14236. zero_table);
  14237. }
  14238. if (src1->grad) {
  14239. src1->grad =
  14240. ggml_add_or_set(ctx,
  14241. src1->grad,
  14242. ggml_sum(ctx, ggml_mul_impl(ctx, tensor->grad, src0, false)),
  14243. zero_table);
  14244. }
  14245. } break;
  14246. case GGML_OP_SET:
  14247. {
  14248. const size_t nb1 = ((int32_t *) tensor->op_params)[0];
  14249. const size_t nb2 = ((int32_t *) tensor->op_params)[1];
  14250. const size_t nb3 = ((int32_t *) tensor->op_params)[2];
  14251. const size_t offset = ((int32_t *) tensor->op_params)[3];
  14252. struct ggml_tensor * tensor_grad_view = NULL;
  14253. if (src0->grad || src1->grad) {
  14254. GGML_ASSERT(src0->type == tensor->type);
  14255. GGML_ASSERT(tensor->grad->type == tensor->type);
  14256. GGML_ASSERT(tensor->grad->type == src1->grad->type);
  14257. tensor_grad_view = ggml_view_4d(ctx,
  14258. tensor->grad,
  14259. src1->grad->ne[0],
  14260. src1->grad->ne[1],
  14261. src1->grad->ne[2],
  14262. src1->grad->ne[3],
  14263. nb1, nb2, nb3, offset);
  14264. }
  14265. if (src0->grad) {
  14266. src0->grad = ggml_add_or_set(ctx,
  14267. src0->grad,
  14268. ggml_acc_impl(ctx,
  14269. tensor->grad,
  14270. ggml_neg(ctx, tensor_grad_view),
  14271. nb1, nb2, nb3, offset, false),
  14272. zero_table);
  14273. }
  14274. if (src1->grad) {
  14275. src1->grad =
  14276. ggml_add_or_set(ctx,
  14277. src1->grad,
  14278. ggml_reshape(ctx,
  14279. ggml_cont(ctx, tensor_grad_view),
  14280. src1->grad),
  14281. zero_table);
  14282. }
  14283. } break;
  14284. case GGML_OP_CPY:
  14285. {
  14286. // necessary for llama
  14287. // cpy overwrites value of src1 by src0 and returns view(src1)
  14288. // the overwriting is mathematically equivalent to:
  14289. // tensor = src0 * 1 + src1 * 0
  14290. if (src0->grad) {
  14291. // dsrc0 = dtensor * 1
  14292. src0->grad = ggml_add_or_set(ctx, src0->grad, tensor->grad, zero_table);
  14293. }
  14294. if (src1->grad) {
  14295. // dsrc1 = dtensor * 0 -> noop
  14296. }
  14297. } break;
  14298. case GGML_OP_CONT:
  14299. {
  14300. // same as cpy
  14301. if (src0->grad) {
  14302. GGML_ASSERT(ggml_is_contiguous(src0->grad));
  14303. GGML_ASSERT(ggml_is_contiguous(tensor->grad));
  14304. src0->grad = ggml_add_or_set(ctx, src0->grad, tensor->grad, zero_table);
  14305. }
  14306. } break;
  14307. case GGML_OP_RESHAPE:
  14308. {
  14309. // necessary for llama
  14310. if (src0->grad) {
  14311. src0->grad =
  14312. ggml_add_or_set(ctx, src0->grad,
  14313. ggml_reshape(ctx,
  14314. ggml_is_contiguous(tensor->grad)
  14315. ? tensor->grad
  14316. : ggml_cont(ctx, tensor->grad),
  14317. src0->grad),
  14318. zero_table);
  14319. }
  14320. } break;
  14321. case GGML_OP_VIEW:
  14322. {
  14323. // necessary for llama
  14324. if (src0->grad) {
  14325. size_t offset;
  14326. memcpy(&offset, tensor->op_params, sizeof(offset));
  14327. size_t nb1 = tensor->nb[1];
  14328. size_t nb2 = tensor->nb[2];
  14329. size_t nb3 = tensor->nb[3];
  14330. if (src0->type != src0->grad->type) {
  14331. // gradient is typically F32, but src0 could be other type
  14332. size_t ng = ggml_element_size(src0->grad);
  14333. size_t n0 = ggml_element_size(src0);
  14334. GGML_ASSERT(offset % n0 == 0);
  14335. GGML_ASSERT(nb1 % n0 == 0);
  14336. GGML_ASSERT(nb2 % n0 == 0);
  14337. GGML_ASSERT(nb3 % n0 == 0);
  14338. offset = (offset / n0) * ng;
  14339. nb1 = (nb1 / n0) * ng;
  14340. nb2 = (nb2 / n0) * ng;
  14341. nb3 = (nb3 / n0) * ng;
  14342. }
  14343. src0->grad = ggml_acc_or_set(ctx, src0->grad, tensor->grad, nb1, nb2, nb3, offset, zero_table);
  14344. }
  14345. } break;
  14346. case GGML_OP_PERMUTE:
  14347. {
  14348. // necessary for llama
  14349. if (src0->grad) {
  14350. int32_t * axes = (int32_t *) tensor->op_params;
  14351. int axis0 = axes[0] & 0x3;
  14352. int axis1 = axes[1] & 0x3;
  14353. int axis2 = axes[2] & 0x3;
  14354. int axis3 = axes[3] & 0x3;
  14355. int axes_backward[4] = {0,0,0,0};
  14356. axes_backward[axis0] = 0;
  14357. axes_backward[axis1] = 1;
  14358. axes_backward[axis2] = 2;
  14359. axes_backward[axis3] = 3;
  14360. src0->grad =
  14361. ggml_add_or_set(ctx, src0->grad,
  14362. ggml_permute(ctx,
  14363. tensor->grad,
  14364. axes_backward[0],
  14365. axes_backward[1],
  14366. axes_backward[2],
  14367. axes_backward[3]),
  14368. zero_table);
  14369. }
  14370. } break;
  14371. case GGML_OP_TRANSPOSE:
  14372. {
  14373. // necessary for llama
  14374. if (src0->grad) {
  14375. src0->grad =
  14376. ggml_add_or_set(ctx, src0->grad,
  14377. ggml_transpose(ctx, tensor->grad),
  14378. zero_table);
  14379. }
  14380. } break;
  14381. case GGML_OP_GET_ROWS:
  14382. {
  14383. // necessary for llama (only for tokenizer)
  14384. if (src0->grad) {
  14385. src0->grad =
  14386. ggml_add_or_set(ctx, src0->grad,
  14387. // last ggml_get_rows_back argument src0->grad is only
  14388. // necessary to setup correct output shape
  14389. ggml_get_rows_back(ctx, tensor->grad, src1, src0->grad),
  14390. zero_table);
  14391. }
  14392. if (src1->grad) {
  14393. // noop
  14394. }
  14395. } break;
  14396. case GGML_OP_GET_ROWS_BACK:
  14397. {
  14398. GGML_ASSERT(false); // TODO: not implemented
  14399. } break;
  14400. case GGML_OP_DIAG:
  14401. {
  14402. GGML_ASSERT(false); // TODO: not implemented
  14403. } break;
  14404. case GGML_OP_DIAG_MASK_INF:
  14405. {
  14406. // necessary for llama
  14407. if (src0->grad) {
  14408. const int n_past = ((int32_t *) tensor->op_params)[0];
  14409. src0->grad =
  14410. ggml_add_or_set(ctx, src0->grad,
  14411. ggml_diag_mask_zero_impl(ctx, tensor->grad, n_past, false),
  14412. zero_table);
  14413. }
  14414. } break;
  14415. case GGML_OP_DIAG_MASK_ZERO:
  14416. {
  14417. // necessary for llama
  14418. if (src0->grad) {
  14419. const int n_past = ((int32_t *) tensor->op_params)[0];
  14420. src0->grad =
  14421. ggml_add_or_set(ctx, src0->grad,
  14422. ggml_diag_mask_zero_impl(ctx, tensor->grad, n_past, false),
  14423. zero_table);
  14424. }
  14425. } break;
  14426. case GGML_OP_SOFT_MAX:
  14427. {
  14428. // necessary for llama
  14429. if (src0->grad) {
  14430. src0->grad =
  14431. ggml_add_or_set(ctx, src0->grad,
  14432. ggml_soft_max_back(ctx, tensor->grad, tensor),
  14433. zero_table);
  14434. }
  14435. } break;
  14436. case GGML_OP_SOFT_MAX_BACK:
  14437. {
  14438. GGML_ASSERT(false); // TODO: not implemented
  14439. } break;
  14440. case GGML_OP_ROPE:
  14441. {
  14442. // necessary for llama
  14443. if (src0->grad) {
  14444. //const int n_past = ((int32_t *) tensor->op_params)[0];
  14445. const int n_dims = ((int32_t *) tensor->op_params)[1];
  14446. const int mode = ((int32_t *) tensor->op_params)[2];
  14447. const int n_ctx = ((int32_t *) tensor->op_params)[3];
  14448. float freq_base;
  14449. float freq_scale;
  14450. float xpos_base;
  14451. bool xpos_down;
  14452. memcpy(&freq_base, (int32_t *) tensor->op_params + 4, sizeof(float));
  14453. memcpy(&freq_scale, (int32_t *) tensor->op_params + 5, sizeof(float));
  14454. memcpy(&xpos_base, (int32_t *) tensor->op_params + 6, sizeof(float));
  14455. memcpy(&xpos_down, (int32_t *) tensor->op_params + 7, sizeof(bool));
  14456. src0->grad = ggml_add_or_set(ctx,
  14457. src0->grad,
  14458. ggml_rope_back(ctx,
  14459. tensor->grad,
  14460. src1,
  14461. n_dims,
  14462. mode,
  14463. n_ctx,
  14464. freq_base,
  14465. freq_scale,
  14466. xpos_base,
  14467. xpos_down),
  14468. zero_table);
  14469. }
  14470. } break;
  14471. case GGML_OP_ROPE_BACK:
  14472. {
  14473. if (src0->grad) {
  14474. //const int n_past = ((int32_t *) tensor->op_params)[0];
  14475. const int n_dims = ((int32_t *) tensor->op_params)[1];
  14476. const int mode = ((int32_t *) tensor->op_params)[2];
  14477. const int n_ctx = ((int32_t *) tensor->op_params)[3];
  14478. float freq_base;
  14479. float freq_scale;
  14480. float xpos_base;
  14481. bool xpos_down;
  14482. memcpy(&freq_base, (int32_t *) tensor->op_params + 4, sizeof(float));
  14483. memcpy(&freq_scale, (int32_t *) tensor->op_params + 5, sizeof(float));
  14484. memcpy(&xpos_base, (int32_t *) tensor->op_params + 6, sizeof(float));
  14485. memcpy(&xpos_down, (int32_t *) tensor->op_params + 7, sizeof(bool));
  14486. src0->grad = ggml_add_or_set(ctx,
  14487. src0->grad,
  14488. ggml_rope_impl(ctx,
  14489. tensor->grad,
  14490. src1,
  14491. n_dims,
  14492. mode,
  14493. n_ctx,
  14494. freq_base,
  14495. freq_scale,
  14496. xpos_base,
  14497. xpos_down,
  14498. false),
  14499. zero_table);
  14500. }
  14501. } break;
  14502. case GGML_OP_ALIBI:
  14503. {
  14504. GGML_ASSERT(false); // TODO: not implemented
  14505. } break;
  14506. case GGML_OP_CLAMP:
  14507. {
  14508. GGML_ASSERT(false); // TODO: not implemented
  14509. } break;
  14510. case GGML_OP_CONV_1D:
  14511. {
  14512. GGML_ASSERT(false); // TODO: not implemented
  14513. } break;
  14514. case GGML_OP_CONV_1D_STAGE_0:
  14515. {
  14516. GGML_ASSERT(false); // TODO: not implemented
  14517. } break;
  14518. case GGML_OP_CONV_1D_STAGE_1:
  14519. {
  14520. GGML_ASSERT(false); // TODO: not implemented
  14521. } break;
  14522. case GGML_OP_CONV_2D:
  14523. {
  14524. GGML_ASSERT(false); // TODO: not implemented
  14525. } break;
  14526. case GGML_OP_CONV_TRANSPOSE_1D:
  14527. {
  14528. GGML_ASSERT(false); // TODO: not implemented
  14529. } break;
  14530. case GGML_OP_CONV_TRANSPOSE_2D:
  14531. {
  14532. GGML_ASSERT(false); // TODO: not implemented
  14533. } break;
  14534. case GGML_OP_POOL_1D:
  14535. {
  14536. GGML_ASSERT(false); // TODO: not implemented
  14537. } break;
  14538. case GGML_OP_POOL_2D:
  14539. {
  14540. GGML_ASSERT(false); // TODO: not implemented
  14541. } break;
  14542. case GGML_OP_UPSCALE:
  14543. {
  14544. GGML_ASSERT(false); // TODO: not implemented
  14545. } break;
  14546. case GGML_OP_FLASH_ATTN:
  14547. {
  14548. struct ggml_tensor * flash_grad = NULL;
  14549. if (src0->grad || src1->grad || tensor->src[2]->grad) {
  14550. int32_t t = ggml_get_op_params_i32(tensor, 0);
  14551. GGML_ASSERT(t == 0 || t == 1);
  14552. bool masked = t != 0;
  14553. flash_grad =
  14554. ggml_flash_attn_back(ctx,
  14555. src0,
  14556. src1,
  14557. tensor->src[2],
  14558. tensor->grad,
  14559. masked);
  14560. }
  14561. struct ggml_tensor * src2 = tensor->src[2];
  14562. const int64_t elem_q = ggml_nelements(src0);
  14563. const int64_t elem_k = ggml_nelements(src1);
  14564. const int64_t elem_v = ggml_nelements(src2);
  14565. enum ggml_type result_type = flash_grad->type;
  14566. GGML_ASSERT(ggml_blck_size(result_type) == 1);
  14567. const size_t tsize = ggml_type_size(result_type);
  14568. const size_t offs_q = 0;
  14569. const size_t offs_k = offs_q + GGML_PAD(elem_q * tsize, GGML_MEM_ALIGN);
  14570. const size_t offs_v = offs_k + GGML_PAD(elem_k * tsize, GGML_MEM_ALIGN);
  14571. if (src0->grad) {
  14572. struct ggml_tensor * view_q = ggml_view_1d(ctx, flash_grad, elem_q, offs_q);
  14573. struct ggml_tensor * grad_q = ggml_reshape(ctx, view_q, src0);
  14574. src0->grad = ggml_add_or_set(ctx,
  14575. src0->grad,
  14576. grad_q,
  14577. zero_table);
  14578. }
  14579. if (src1->grad) {
  14580. struct ggml_tensor * view_k = ggml_view_1d(ctx, flash_grad, elem_k, offs_k);
  14581. struct ggml_tensor * grad_k = ggml_reshape(ctx, view_k, src1);
  14582. src1->grad = ggml_add_or_set(ctx,
  14583. src1->grad,
  14584. grad_k,
  14585. zero_table);
  14586. }
  14587. if (src2->grad) {
  14588. struct ggml_tensor * view_v = ggml_view_1d(ctx, flash_grad, elem_v, offs_v);
  14589. struct ggml_tensor * grad_v = ggml_reshape(ctx, view_v, src2);
  14590. src2->grad = ggml_add_or_set(ctx,
  14591. src2->grad,
  14592. grad_v,
  14593. zero_table);
  14594. }
  14595. } break;
  14596. case GGML_OP_FLASH_FF:
  14597. {
  14598. GGML_ASSERT(false); // not supported
  14599. } break;
  14600. case GGML_OP_FLASH_ATTN_BACK:
  14601. {
  14602. GGML_ASSERT(false); // not supported
  14603. } break;
  14604. case GGML_OP_WIN_PART:
  14605. case GGML_OP_WIN_UNPART:
  14606. case GGML_OP_UNARY:
  14607. {
  14608. switch (ggml_get_unary_op(tensor)) {
  14609. case GGML_UNARY_OP_ABS:
  14610. {
  14611. if (src0->grad) {
  14612. src0->grad =
  14613. ggml_add_or_set(ctx,
  14614. src0->grad,
  14615. ggml_mul(ctx,
  14616. ggml_sgn(ctx, src0),
  14617. tensor->grad),
  14618. zero_table);
  14619. }
  14620. } break;
  14621. case GGML_UNARY_OP_SGN:
  14622. {
  14623. if (src0->grad) {
  14624. // noop
  14625. }
  14626. } break;
  14627. case GGML_UNARY_OP_NEG:
  14628. {
  14629. if (src0->grad) {
  14630. src0->grad = ggml_sub_or_set(ctx, src0->grad, tensor->grad, zero_table);
  14631. }
  14632. } break;
  14633. case GGML_UNARY_OP_STEP:
  14634. {
  14635. if (src0->grad) {
  14636. // noop
  14637. }
  14638. } break;
  14639. case GGML_UNARY_OP_TANH:
  14640. {
  14641. GGML_ASSERT(false); // TODO: not implemented
  14642. } break;
  14643. case GGML_UNARY_OP_ELU:
  14644. {
  14645. GGML_ASSERT(false); // TODO: not implemented
  14646. } break;
  14647. case GGML_UNARY_OP_RELU:
  14648. {
  14649. if (src0->grad) {
  14650. src0->grad = ggml_add_or_set(ctx,
  14651. src0->grad,
  14652. ggml_mul(ctx,
  14653. ggml_step(ctx, src0),
  14654. tensor->grad),
  14655. zero_table);
  14656. }
  14657. } break;
  14658. case GGML_UNARY_OP_GELU:
  14659. {
  14660. GGML_ASSERT(false); // TODO: not implemented
  14661. } break;
  14662. case GGML_UNARY_OP_GELU_QUICK:
  14663. {
  14664. GGML_ASSERT(false); // TODO: not implemented
  14665. } break;
  14666. case GGML_UNARY_OP_SILU:
  14667. {
  14668. // necessary for llama
  14669. if (src0->grad) {
  14670. src0->grad = ggml_add_or_set(ctx,
  14671. src0->grad,
  14672. ggml_silu_back(ctx, src0, tensor->grad),
  14673. zero_table);
  14674. }
  14675. } break;
  14676. default:
  14677. GGML_ASSERT(false);
  14678. }
  14679. } break;
  14680. case GGML_OP_GET_REL_POS:
  14681. case GGML_OP_ADD_REL_POS:
  14682. case GGML_OP_MAP_UNARY:
  14683. case GGML_OP_MAP_BINARY:
  14684. case GGML_OP_MAP_CUSTOM1_F32:
  14685. case GGML_OP_MAP_CUSTOM2_F32:
  14686. case GGML_OP_MAP_CUSTOM3_F32:
  14687. case GGML_OP_MAP_CUSTOM1:
  14688. case GGML_OP_MAP_CUSTOM2:
  14689. case GGML_OP_MAP_CUSTOM3:
  14690. {
  14691. GGML_ASSERT(false); // not supported
  14692. } break;
  14693. case GGML_OP_CROSS_ENTROPY_LOSS:
  14694. {
  14695. if (src0->grad) {
  14696. src0->grad = ggml_add_or_set(ctx,
  14697. src0->grad,
  14698. ggml_cross_entropy_loss_back(ctx,
  14699. src0,
  14700. src1,
  14701. tensor->grad),
  14702. zero_table);
  14703. }
  14704. } break;
  14705. case GGML_OP_CROSS_ENTROPY_LOSS_BACK:
  14706. {
  14707. GGML_ASSERT(false); // not supported
  14708. } break;
  14709. case GGML_OP_NONE:
  14710. {
  14711. // nop
  14712. } break;
  14713. case GGML_OP_COUNT:
  14714. {
  14715. GGML_ASSERT(false);
  14716. } break;
  14717. }
  14718. for (int i = 0; i < GGML_MAX_SRC; ++i) {
  14719. if (tensor->src[i] && tensor->src[i]->grad) {
  14720. GGML_ASSERT(ggml_are_same_shape(tensor->src[i], tensor->src[i]->grad));
  14721. }
  14722. }
  14723. }
  14724. static void ggml_visit_parents(struct ggml_cgraph * cgraph, struct ggml_tensor * node) {
  14725. if (node->grad == NULL) {
  14726. // this usually happens when we generate intermediate nodes from constants in the backward pass
  14727. // it can also happen during forward pass, if the user performs computations with constants
  14728. if (node->op != GGML_OP_NONE) {
  14729. //GGML_PRINT_DEBUG("%s: warning: node %p has no grad, but op %d\n", __func__, (void *) node, node->op);
  14730. }
  14731. }
  14732. // check if already visited
  14733. if (hash_insert(cgraph->visited_hash_table, node)) {
  14734. return;
  14735. }
  14736. for (int i = 0; i < GGML_MAX_SRC; ++i) {
  14737. const int k =
  14738. (cgraph->order == GGML_CGRAPH_EVAL_ORDER_LEFT_TO_RIGHT) ? i :
  14739. (cgraph->order == GGML_CGRAPH_EVAL_ORDER_RIGHT_TO_LEFT) ? (GGML_MAX_SRC-1-i) :
  14740. /* unknown order, just fall back to using i*/ i;
  14741. if (node->src[k]) {
  14742. ggml_visit_parents(cgraph, node->src[k]);
  14743. }
  14744. }
  14745. if (node->op == GGML_OP_NONE && node->grad == NULL) {
  14746. // reached a leaf node, not part of the gradient graph (e.g. a constant)
  14747. GGML_ASSERT(cgraph->n_leafs < GGML_MAX_NODES);
  14748. if (strlen(node->name) == 0) {
  14749. ggml_format_name(node, "leaf_%d", cgraph->n_leafs);
  14750. }
  14751. cgraph->leafs[cgraph->n_leafs] = node;
  14752. cgraph->n_leafs++;
  14753. } else {
  14754. GGML_ASSERT(cgraph->n_nodes < GGML_MAX_NODES);
  14755. if (strlen(node->name) == 0) {
  14756. ggml_format_name(node, "node_%d", cgraph->n_nodes);
  14757. }
  14758. cgraph->nodes[cgraph->n_nodes] = node;
  14759. cgraph->grads[cgraph->n_nodes] = node->grad;
  14760. cgraph->n_nodes++;
  14761. }
  14762. }
  14763. static void ggml_build_forward_impl(struct ggml_cgraph * cgraph, struct ggml_tensor * tensor, bool expand) {
  14764. if (!expand) {
  14765. cgraph->n_nodes = 0;
  14766. cgraph->n_leafs = 0;
  14767. }
  14768. const int n0 = cgraph->n_nodes;
  14769. UNUSED(n0);
  14770. ggml_visit_parents(cgraph, tensor);
  14771. const int n_new = cgraph->n_nodes - n0;
  14772. GGML_PRINT_DEBUG("%s: visited %d new nodes\n", __func__, n_new);
  14773. if (n_new > 0) {
  14774. // the last added node should always be starting point
  14775. GGML_ASSERT(cgraph->nodes[cgraph->n_nodes - 1] == tensor);
  14776. }
  14777. }
  14778. void ggml_build_forward_expand(struct ggml_cgraph * cgraph, struct ggml_tensor * tensor) {
  14779. ggml_build_forward_impl(cgraph, tensor, true);
  14780. }
  14781. struct ggml_cgraph ggml_build_forward(struct ggml_tensor * tensor) {
  14782. struct ggml_cgraph result = {
  14783. /*.n_nodes =*/ 0,
  14784. /*.n_leafs =*/ 0,
  14785. /*.nodes =*/ { NULL },
  14786. /*.grads =*/ { NULL },
  14787. /*.leafs =*/ { NULL },
  14788. /*.hash_table =*/ { NULL },
  14789. /*.order =*/ GGML_CGRAPH_EVAL_ORDER_LEFT_TO_RIGHT,
  14790. /*.perf_runs =*/ 0,
  14791. /*.perf_cycles =*/ 0,
  14792. /*.perf_time_us =*/ 0,
  14793. };
  14794. ggml_build_forward_impl(&result, tensor, false);
  14795. return result;
  14796. }
  14797. void ggml_build_backward_expand(struct ggml_context * ctx, struct ggml_cgraph * gf, struct ggml_cgraph * gb, bool keep) {
  14798. GGML_ASSERT(gf->n_nodes > 0);
  14799. // if we are keeping the gradient graph, we have to detach the gradient nodes from the original graph
  14800. if (keep) {
  14801. for (int i = 0; i < gf->n_nodes; i++) {
  14802. struct ggml_tensor * node = gf->nodes[i];
  14803. if (node->grad) {
  14804. node->grad = ggml_dup_tensor(ctx, node);
  14805. gf->grads[i] = node->grad;
  14806. }
  14807. }
  14808. }
  14809. // remember original gradients which start with zero values
  14810. void ** zero_table = malloc(sizeof(void *) * GGML_GRAPH_HASHTABLE_SIZE);
  14811. memset(zero_table, 0, sizeof(void*) * GGML_GRAPH_HASHTABLE_SIZE);
  14812. for (int i = 0; i < gf->n_nodes; i++) {
  14813. if (gf->grads[i]) {
  14814. hash_insert(zero_table, gf->grads[i]);
  14815. }
  14816. }
  14817. for (int i = gf->n_nodes - 1; i >= 0; i--) {
  14818. struct ggml_tensor * node = gf->nodes[i];
  14819. // inplace operations to add gradients are not created by ggml_compute_backward
  14820. // use allocator to automatically make inplace operations
  14821. if (node->grad) {
  14822. ggml_compute_backward(ctx, node, zero_table);
  14823. }
  14824. }
  14825. for (int i = 0; i < gf->n_nodes; i++) {
  14826. struct ggml_tensor * node = gf->nodes[i];
  14827. if (node->is_param) {
  14828. GGML_PRINT_DEBUG("%s: found root node %p\n", __func__, (void *) node);
  14829. ggml_build_forward_expand(gb, node->grad);
  14830. }
  14831. }
  14832. free(zero_table);
  14833. }
  14834. struct ggml_cgraph ggml_build_backward(struct ggml_context * ctx, struct ggml_cgraph * gf, bool keep) {
  14835. struct ggml_cgraph result = *gf;
  14836. ggml_build_backward_expand(ctx, gf, &result, keep);
  14837. return result;
  14838. }
  14839. struct ggml_cgraph * ggml_new_graph(struct ggml_context * ctx) {
  14840. struct ggml_object * obj = ggml_new_object(ctx, GGML_OBJECT_GRAPH, GGML_GRAPH_SIZE);
  14841. struct ggml_cgraph * cgraph = (struct ggml_cgraph *) ((char *) ctx->mem_buffer + obj->offs);
  14842. *cgraph = (struct ggml_cgraph) {
  14843. /*.n_nodes =*/ 0,
  14844. /*.n_leafs =*/ 0,
  14845. /*.nodes =*/ { NULL },
  14846. /*.grads =*/ { NULL },
  14847. /*.leafs =*/ { NULL },
  14848. /*.hash_table =*/ { NULL },
  14849. /*.order =*/ GGML_CGRAPH_EVAL_ORDER_LEFT_TO_RIGHT,
  14850. /*.perf_runs =*/ 0,
  14851. /*.perf_cycles =*/ 0,
  14852. /*.perf_time_us =*/ 0,
  14853. };
  14854. return cgraph;
  14855. }
  14856. struct ggml_cgraph * ggml_build_forward_ctx(struct ggml_context * ctx, struct ggml_tensor * tensor) {
  14857. struct ggml_cgraph * cgraph = ggml_new_graph(ctx);
  14858. ggml_build_forward_impl(cgraph, tensor, false);
  14859. return cgraph;
  14860. }
  14861. size_t ggml_graph_overhead(void) {
  14862. return GGML_OBJECT_SIZE + GGML_PAD(GGML_GRAPH_SIZE, GGML_MEM_ALIGN);
  14863. }
  14864. //
  14865. // thread data
  14866. //
  14867. // synchronization is done via busy loops
  14868. // I tried using spin locks, but not sure how to use them correctly - the things I tried were slower than busy loops
  14869. //
  14870. #ifdef __APPLE__
  14871. //#include <os/lock.h>
  14872. //
  14873. //typedef os_unfair_lock ggml_lock_t;
  14874. //
  14875. //#define ggml_lock_init(x) UNUSED(x)
  14876. //#define ggml_lock_destroy(x) UNUSED(x)
  14877. //#define ggml_lock_lock os_unfair_lock_lock
  14878. //#define ggml_lock_unlock os_unfair_lock_unlock
  14879. //
  14880. //#define GGML_LOCK_INITIALIZER OS_UNFAIR_LOCK_INIT
  14881. typedef int ggml_lock_t;
  14882. #define ggml_lock_init(x) UNUSED(x)
  14883. #define ggml_lock_destroy(x) UNUSED(x)
  14884. #define ggml_lock_lock(x) UNUSED(x)
  14885. #define ggml_lock_unlock(x) UNUSED(x)
  14886. #define GGML_LOCK_INITIALIZER 0
  14887. typedef pthread_t ggml_thread_t;
  14888. #define ggml_thread_create pthread_create
  14889. #define ggml_thread_join pthread_join
  14890. #else
  14891. //typedef pthread_spinlock_t ggml_lock_t;
  14892. //#define ggml_lock_init(x) pthread_spin_init(x, PTHREAD_PROCESS_PRIVATE)
  14893. //#define ggml_lock_destroy pthread_spin_destroy
  14894. //#define ggml_lock_lock pthread_spin_lock
  14895. //#define ggml_lock_unlock pthread_spin_unlock
  14896. typedef int ggml_lock_t;
  14897. #define ggml_lock_init(x) UNUSED(x)
  14898. #define ggml_lock_destroy(x) UNUSED(x)
  14899. #if defined(__x86_64__) || (defined(_MSC_VER) && defined(_M_AMD64))
  14900. #define ggml_lock_lock(x) _mm_pause()
  14901. #else
  14902. #define ggml_lock_lock(x) UNUSED(x)
  14903. #endif
  14904. #define ggml_lock_unlock(x) UNUSED(x)
  14905. #define GGML_LOCK_INITIALIZER 0
  14906. typedef pthread_t ggml_thread_t;
  14907. #define ggml_thread_create pthread_create
  14908. #define ggml_thread_join pthread_join
  14909. #endif
  14910. // Android's libc implementation "bionic" does not support setting affinity
  14911. #if defined(__linux__) && !defined(__BIONIC__)
  14912. static void set_numa_thread_affinity(int thread_n, int n_threads) {
  14913. if (!ggml_is_numa()) {
  14914. return;
  14915. }
  14916. // run thread on node_num thread_n / (threads per node)
  14917. const int node_num = thread_n / ((n_threads + g_state.numa.n_nodes - 1) / g_state.numa.n_nodes);
  14918. struct ggml_numa_node * node = &g_state.numa.nodes[node_num];
  14919. size_t setsize = CPU_ALLOC_SIZE(g_state.numa.total_cpus);
  14920. cpu_set_t * cpus = CPU_ALLOC(g_state.numa.total_cpus);
  14921. CPU_ZERO_S(setsize, cpus);
  14922. for (size_t i = 0; i < node->n_cpus; ++i) {
  14923. CPU_SET_S(node->cpus[i], setsize, cpus);
  14924. }
  14925. int rv = pthread_setaffinity_np(pthread_self(), setsize, cpus);
  14926. if (rv) {
  14927. fprintf(stderr, "warning: pthread_setaffinity_np() failed: %s\n",
  14928. strerror(rv));
  14929. }
  14930. CPU_FREE(cpus);
  14931. }
  14932. static void clear_numa_thread_affinity(void) {
  14933. if (!ggml_is_numa()) {
  14934. return;
  14935. }
  14936. size_t setsize = CPU_ALLOC_SIZE(g_state.numa.total_cpus);
  14937. cpu_set_t * cpus = CPU_ALLOC(g_state.numa.total_cpus);
  14938. CPU_ZERO_S(setsize, cpus);
  14939. for (unsigned i = 0; i < g_state.numa.total_cpus; ++i) {
  14940. CPU_SET_S(i, setsize, cpus);
  14941. }
  14942. int rv = pthread_setaffinity_np(pthread_self(), setsize, cpus);
  14943. if (rv) {
  14944. fprintf(stderr, "warning: pthread_setaffinity_np() failed: %s\n",
  14945. strerror(rv));
  14946. }
  14947. CPU_FREE(cpus);
  14948. }
  14949. #else
  14950. // TODO: Windows etc.
  14951. // (the linux implementation may also work on BSD, someone should test)
  14952. static void set_numa_thread_affinity(int thread_n, int n_threads) { UNUSED(thread_n); UNUSED(n_threads); }
  14953. static void clear_numa_thread_affinity(void) {}
  14954. #endif
  14955. struct ggml_compute_state_shared {
  14956. const struct ggml_cgraph * cgraph;
  14957. const struct ggml_cplan * cplan;
  14958. int64_t perf_node_start_cycles;
  14959. int64_t perf_node_start_time_us;
  14960. const int n_threads;
  14961. // synchronization primitives
  14962. atomic_int n_active; // num active threads
  14963. atomic_int node_n; // active graph node
  14964. bool (*abort_callback)(void * data); // abort ggml_graph_compute when true
  14965. void * abort_callback_data;
  14966. };
  14967. struct ggml_compute_state {
  14968. ggml_thread_t thrd;
  14969. int ith;
  14970. struct ggml_compute_state_shared * shared;
  14971. };
  14972. static void ggml_graph_compute_perf_stats_node(struct ggml_tensor * node, const struct ggml_compute_state_shared * st) {
  14973. int64_t cycles_cur = ggml_perf_cycles() - st->perf_node_start_cycles;
  14974. int64_t time_us_cur = ggml_perf_time_us() - st->perf_node_start_time_us;
  14975. node->perf_runs++;
  14976. node->perf_cycles += cycles_cur;
  14977. node->perf_time_us += time_us_cur;
  14978. }
  14979. static thread_ret_t ggml_graph_compute_thread(void * data) {
  14980. struct ggml_compute_state * state = (struct ggml_compute_state *) data;
  14981. const struct ggml_cgraph * cgraph = state->shared->cgraph;
  14982. const struct ggml_cplan * cplan = state->shared->cplan;
  14983. const int * n_tasks_arr = cplan->n_tasks;
  14984. const int n_threads = state->shared->n_threads;
  14985. set_numa_thread_affinity(state->ith, n_threads);
  14986. int node_n = -1;
  14987. while (true) {
  14988. if (cplan->abort_callback && cplan->abort_callback(cplan->abort_callback_data)) {
  14989. state->shared->node_n += 1;
  14990. return (thread_ret_t) GGML_EXIT_ABORTED;
  14991. }
  14992. if (atomic_fetch_sub(&state->shared->n_active, 1) == 1) {
  14993. // all other threads are finished and spinning
  14994. // do finalize and init here so we don't have synchronize again
  14995. struct ggml_compute_params params = {
  14996. /*.type =*/ GGML_TASK_FINALIZE,
  14997. /*.ith =*/ 0,
  14998. /*.nth =*/ 0,
  14999. /*.wsize =*/ cplan->work_size,
  15000. /*.wdata =*/ cplan->work_data,
  15001. };
  15002. if (node_n != -1) {
  15003. /* FINALIZE */
  15004. struct ggml_tensor * node = state->shared->cgraph->nodes[node_n];
  15005. if (GGML_OP_HAS_FINALIZE[node->op]) {
  15006. params.nth = n_tasks_arr[node_n];
  15007. ggml_compute_forward(&params, node);
  15008. }
  15009. ggml_graph_compute_perf_stats_node(node, state->shared);
  15010. }
  15011. // distribute new work or execute it direct if 1T
  15012. while (++node_n < cgraph->n_nodes) {
  15013. GGML_PRINT_DEBUG_5("%s: %d/%d\n", __func__, node_n, cgraph->n_nodes);
  15014. struct ggml_tensor * node = cgraph->nodes[node_n];
  15015. const int n_tasks = n_tasks_arr[node_n];
  15016. state->shared->perf_node_start_cycles = ggml_perf_cycles();
  15017. state->shared->perf_node_start_time_us = ggml_perf_time_us();
  15018. params.nth = n_tasks;
  15019. /* INIT */
  15020. if (GGML_OP_HAS_INIT[node->op]) {
  15021. params.type = GGML_TASK_INIT;
  15022. ggml_compute_forward(&params, node);
  15023. }
  15024. if (n_tasks == 1) {
  15025. // TODO: maybe push node_n to the atomic but if other threads see n_tasks is 1,
  15026. // they do something more efficient than spinning (?)
  15027. params.type = GGML_TASK_COMPUTE;
  15028. ggml_compute_forward(&params, node);
  15029. if (GGML_OP_HAS_FINALIZE[node->op]) {
  15030. params.type = GGML_TASK_FINALIZE;
  15031. ggml_compute_forward(&params, node);
  15032. }
  15033. ggml_graph_compute_perf_stats_node(node, state->shared);
  15034. } else {
  15035. break;
  15036. }
  15037. if (cplan->abort_callback && cplan->abort_callback(cplan->abort_callback_data)) {
  15038. break;
  15039. }
  15040. }
  15041. atomic_store(&state->shared->n_active, n_threads);
  15042. atomic_store(&state->shared->node_n, node_n);
  15043. } else {
  15044. // wait for other threads to finish
  15045. const int last = node_n;
  15046. while (true) {
  15047. // TODO: this sched_yield can have significant impact on the performance - either positive or negative
  15048. // depending on the workload and the operating system.
  15049. // since it is not clear what is the best approach, it should potentially become user-configurable
  15050. // ref: https://github.com/ggerganov/ggml/issues/291
  15051. #if defined(GGML_USE_ACCELERATE) || defined(GGML_USE_OPENBLAS)
  15052. sched_yield();
  15053. #endif
  15054. node_n = atomic_load(&state->shared->node_n);
  15055. if (node_n != last) break;
  15056. };
  15057. }
  15058. // check if we should stop
  15059. if (node_n >= cgraph->n_nodes) break;
  15060. /* COMPUTE */
  15061. struct ggml_tensor * node = cgraph->nodes[node_n];
  15062. const int n_tasks = n_tasks_arr[node_n];
  15063. struct ggml_compute_params params = {
  15064. /*.type =*/ GGML_TASK_COMPUTE,
  15065. /*.ith =*/ state->ith,
  15066. /*.nth =*/ n_tasks,
  15067. /*.wsize =*/ cplan->work_size,
  15068. /*.wdata =*/ cplan->work_data,
  15069. };
  15070. if (state->ith < n_tasks) {
  15071. ggml_compute_forward(&params, node);
  15072. }
  15073. }
  15074. return GGML_EXIT_SUCCESS;
  15075. }
  15076. struct ggml_cplan ggml_graph_plan(struct ggml_cgraph * cgraph, int n_threads) {
  15077. if (n_threads <= 0) {
  15078. n_threads = GGML_DEFAULT_N_THREADS;
  15079. }
  15080. size_t work_size = 0;
  15081. struct ggml_cplan cplan;
  15082. memset(&cplan, 0, sizeof(struct ggml_cplan));
  15083. // thread scheduling for the different operations + work buffer size estimation
  15084. for (int i = 0; i < cgraph->n_nodes; i++) {
  15085. int n_tasks = 1;
  15086. struct ggml_tensor * node = cgraph->nodes[i];
  15087. switch (node->op) {
  15088. case GGML_OP_CPY:
  15089. case GGML_OP_DUP:
  15090. {
  15091. n_tasks = n_threads;
  15092. size_t cur = 0;
  15093. if (ggml_is_quantized(node->type)) {
  15094. cur = ggml_type_size(GGML_TYPE_F32) * node->ne[0] * n_tasks;
  15095. }
  15096. work_size = MAX(work_size, cur);
  15097. } break;
  15098. case GGML_OP_ADD:
  15099. case GGML_OP_ADD1:
  15100. {
  15101. n_tasks = n_threads;
  15102. size_t cur = 0;
  15103. if (ggml_is_quantized(node->src[0]->type)) {
  15104. cur = ggml_type_size(GGML_TYPE_F32) * node->src[0]->ne[0] * n_tasks;
  15105. }
  15106. work_size = MAX(work_size, cur);
  15107. } break;
  15108. case GGML_OP_ACC:
  15109. {
  15110. n_tasks = n_threads;
  15111. size_t cur = 0;
  15112. if (ggml_is_quantized(node->src[0]->type)) {
  15113. cur = ggml_type_size(GGML_TYPE_F32) * node->src[1]->ne[0] * n_tasks;
  15114. }
  15115. work_size = MAX(work_size, cur);
  15116. } break;
  15117. case GGML_OP_SUB:
  15118. case GGML_OP_DIV:
  15119. case GGML_OP_SQR:
  15120. case GGML_OP_SQRT:
  15121. case GGML_OP_LOG:
  15122. case GGML_OP_SUM:
  15123. case GGML_OP_SUM_ROWS:
  15124. case GGML_OP_MEAN:
  15125. case GGML_OP_ARGMAX:
  15126. case GGML_OP_REPEAT:
  15127. case GGML_OP_REPEAT_BACK:
  15128. {
  15129. n_tasks = 1;
  15130. } break;
  15131. case GGML_OP_UNARY:
  15132. {
  15133. switch (ggml_get_unary_op(node)) {
  15134. case GGML_UNARY_OP_ABS:
  15135. case GGML_UNARY_OP_SGN:
  15136. case GGML_UNARY_OP_NEG:
  15137. case GGML_UNARY_OP_STEP:
  15138. case GGML_UNARY_OP_TANH:
  15139. case GGML_UNARY_OP_ELU:
  15140. case GGML_UNARY_OP_RELU:
  15141. {
  15142. n_tasks = 1;
  15143. } break;
  15144. case GGML_UNARY_OP_GELU:
  15145. case GGML_UNARY_OP_GELU_QUICK:
  15146. case GGML_UNARY_OP_SILU:
  15147. {
  15148. n_tasks = n_threads;
  15149. } break;
  15150. }
  15151. } break;
  15152. case GGML_OP_SILU_BACK:
  15153. case GGML_OP_MUL:
  15154. case GGML_OP_NORM:
  15155. case GGML_OP_RMS_NORM:
  15156. case GGML_OP_RMS_NORM_BACK:
  15157. case GGML_OP_GROUP_NORM:
  15158. {
  15159. n_tasks = n_threads;
  15160. } break;
  15161. case GGML_OP_CONCAT:
  15162. case GGML_OP_MUL_MAT:
  15163. {
  15164. n_tasks = n_threads;
  15165. // TODO: use different scheduling for different matrix sizes
  15166. //const int nr0 = ggml_nrows(node->src[0]);
  15167. //const int nr1 = ggml_nrows(node->src[1]);
  15168. //n_tasks = MIN(n_threads, MAX(1, nr0/128));
  15169. //printf("nr0 = %8d, nr1 = %8d, nr0*nr1 = %8d, n_tasks%d\n", nr0, nr1, nr0*nr1, n_tasks);
  15170. size_t cur = 0;
  15171. const enum ggml_type vec_dot_type = type_traits[node->src[0]->type].vec_dot_type;
  15172. #if defined(GGML_USE_CUBLAS)
  15173. if (ggml_cuda_can_mul_mat(node->src[0], node->src[1], node)) {
  15174. n_tasks = 1; // TODO: this actually is doing nothing
  15175. // the threads are still spinning
  15176. } else
  15177. #elif defined(GGML_USE_CLBLAST)
  15178. if (ggml_cl_can_mul_mat(node->src[0], node->src[1], node)) {
  15179. n_tasks = 1; // TODO: this actually is doing nothing
  15180. // the threads are still spinning
  15181. cur = ggml_cl_mul_mat_get_wsize(node->src[0], node->src[1], node);
  15182. } else
  15183. #endif
  15184. #if defined(GGML_USE_ACCELERATE) || defined(GGML_USE_OPENBLAS)
  15185. if (ggml_compute_forward_mul_mat_use_blas(node->src[0], node->src[1], node)) {
  15186. n_tasks = 1; // TODO: this actually is doing nothing
  15187. // the threads are still spinning
  15188. if (node->src[0]->type != GGML_TYPE_F32) {
  15189. // here we need memory just for single 2D matrix from src0
  15190. cur = ggml_type_size(GGML_TYPE_F32)*(node->src[0]->ne[0]*node->src[0]->ne[1]);
  15191. }
  15192. } else
  15193. #endif
  15194. if (node->src[1]->type != vec_dot_type) {
  15195. cur = ggml_type_size(vec_dot_type)*ggml_nelements(node->src[1])/ggml_blck_size(vec_dot_type);
  15196. } else {
  15197. cur = 0;
  15198. }
  15199. work_size = MAX(work_size, cur);
  15200. } break;
  15201. case GGML_OP_OUT_PROD:
  15202. {
  15203. n_tasks = n_threads;
  15204. size_t cur = 0;
  15205. if (ggml_is_quantized(node->src[0]->type)) {
  15206. cur = ggml_type_size(GGML_TYPE_F32) * node->src[0]->ne[0] * n_tasks;
  15207. }
  15208. work_size = MAX(work_size, cur);
  15209. } break;
  15210. case GGML_OP_SCALE:
  15211. {
  15212. n_tasks = 1;
  15213. } break;
  15214. case GGML_OP_SET:
  15215. case GGML_OP_CONT:
  15216. case GGML_OP_RESHAPE:
  15217. case GGML_OP_VIEW:
  15218. case GGML_OP_PERMUTE:
  15219. case GGML_OP_TRANSPOSE:
  15220. case GGML_OP_GET_ROWS:
  15221. case GGML_OP_GET_ROWS_BACK:
  15222. case GGML_OP_DIAG:
  15223. {
  15224. n_tasks = 1;
  15225. } break;
  15226. case GGML_OP_DIAG_MASK_ZERO:
  15227. case GGML_OP_DIAG_MASK_INF:
  15228. case GGML_OP_SOFT_MAX:
  15229. case GGML_OP_SOFT_MAX_BACK:
  15230. case GGML_OP_ROPE:
  15231. case GGML_OP_ROPE_BACK:
  15232. case GGML_OP_ADD_REL_POS:
  15233. {
  15234. n_tasks = n_threads;
  15235. } break;
  15236. case GGML_OP_ALIBI:
  15237. {
  15238. n_tasks = 1; //TODO
  15239. } break;
  15240. case GGML_OP_CLAMP:
  15241. {
  15242. n_tasks = 1; //TODO
  15243. } break;
  15244. case GGML_OP_CONV_1D:
  15245. {
  15246. n_tasks = n_threads;
  15247. GGML_ASSERT(node->src[0]->ne[3] == 1);
  15248. GGML_ASSERT(node->src[1]->ne[2] == 1);
  15249. GGML_ASSERT(node->src[1]->ne[3] == 1);
  15250. const int64_t ne00 = node->src[0]->ne[0];
  15251. const int64_t ne01 = node->src[0]->ne[1];
  15252. const int64_t ne02 = node->src[0]->ne[2];
  15253. const int64_t ne10 = node->src[1]->ne[0];
  15254. const int64_t ne11 = node->src[1]->ne[1];
  15255. const int64_t ne0 = node->ne[0];
  15256. const int64_t ne1 = node->ne[1];
  15257. const int64_t nk = ne00;
  15258. const int64_t ew0 = nk * ne01;
  15259. UNUSED(ne02);
  15260. UNUSED(ne10);
  15261. UNUSED(ne11);
  15262. size_t cur = 0;
  15263. if (node->src[0]->type == GGML_TYPE_F16 &&
  15264. node->src[1]->type == GGML_TYPE_F32) {
  15265. cur = sizeof(ggml_fp16_t)*(ne0*ne1*ew0);
  15266. } else if (node->src[0]->type == GGML_TYPE_F32 &&
  15267. node->src[1]->type == GGML_TYPE_F32) {
  15268. cur = sizeof(float)*(ne0*ne1*ew0);
  15269. } else {
  15270. GGML_ASSERT(false);
  15271. }
  15272. work_size = MAX(work_size, cur);
  15273. } break;
  15274. case GGML_OP_CONV_1D_STAGE_0:
  15275. {
  15276. n_tasks = n_threads;
  15277. } break;
  15278. case GGML_OP_CONV_1D_STAGE_1:
  15279. {
  15280. n_tasks = n_threads;
  15281. } break;
  15282. case GGML_OP_CONV_TRANSPOSE_1D:
  15283. {
  15284. n_tasks = n_threads;
  15285. GGML_ASSERT(node->src[0]->ne[3] == 1);
  15286. GGML_ASSERT(node->src[1]->ne[2] == 1);
  15287. GGML_ASSERT(node->src[1]->ne[3] == 1);
  15288. const int64_t ne00 = node->src[0]->ne[0]; // K
  15289. const int64_t ne01 = node->src[0]->ne[1]; // Cout
  15290. const int64_t ne02 = node->src[0]->ne[2]; // Cin
  15291. const int64_t ne10 = node->src[1]->ne[0]; // L
  15292. const int64_t ne11 = node->src[1]->ne[1]; // Cin
  15293. size_t cur = 0;
  15294. if (node->src[0]->type == GGML_TYPE_F16 &&
  15295. node->src[1]->type == GGML_TYPE_F32) {
  15296. cur += sizeof(ggml_fp16_t)*ne00*ne01*ne02;
  15297. cur += sizeof(ggml_fp16_t)*ne10*ne11;
  15298. } else if (node->src[0]->type == GGML_TYPE_F32 &&
  15299. node->src[1]->type == GGML_TYPE_F32) {
  15300. cur += sizeof(float)*ne00*ne01*ne02;
  15301. cur += sizeof(float)*ne10*ne11;
  15302. } else {
  15303. GGML_ASSERT(false);
  15304. }
  15305. work_size = MAX(work_size, cur);
  15306. } break;
  15307. case GGML_OP_CONV_2D:
  15308. {
  15309. n_tasks = n_threads;
  15310. const int64_t ne00 = node->src[0]->ne[0]; // W
  15311. const int64_t ne01 = node->src[0]->ne[1]; // H
  15312. const int64_t ne02 = node->src[0]->ne[2]; // C
  15313. const int64_t ne03 = node->src[0]->ne[3]; // N
  15314. const int64_t ne10 = node->src[1]->ne[0]; // W
  15315. const int64_t ne11 = node->src[1]->ne[1]; // H
  15316. const int64_t ne12 = node->src[1]->ne[2]; // C
  15317. const int64_t ne0 = node->ne[0];
  15318. const int64_t ne1 = node->ne[1];
  15319. const int64_t ne2 = node->ne[2];
  15320. const int64_t nk = ne00*ne01;
  15321. const int64_t ew0 = nk * ne02;
  15322. UNUSED(ne03);
  15323. UNUSED(ne2);
  15324. size_t cur = 0;
  15325. if (node->src[0]->type == GGML_TYPE_F16 &&
  15326. node->src[1]->type == GGML_TYPE_F32) {
  15327. cur = sizeof(ggml_fp16_t)*(ne0*ne1*ew0);
  15328. } else if (node->src[0]->type == GGML_TYPE_F32 &&
  15329. node->src[1]->type == GGML_TYPE_F32) {
  15330. cur = sizeof(float)* (ne10*ne11*ne12);
  15331. } else {
  15332. GGML_ASSERT(false);
  15333. }
  15334. work_size = MAX(work_size, cur);
  15335. } break;
  15336. case GGML_OP_CONV_TRANSPOSE_2D:
  15337. {
  15338. n_tasks = n_threads;
  15339. const int64_t ne00 = node->src[0]->ne[0]; // W
  15340. const int64_t ne01 = node->src[0]->ne[1]; // H
  15341. const int64_t ne02 = node->src[0]->ne[2]; // Channels Out
  15342. const int64_t ne03 = node->src[0]->ne[3]; // Channels In
  15343. const int64_t ne10 = node->src[1]->ne[0]; // W
  15344. const int64_t ne11 = node->src[1]->ne[1]; // H
  15345. const int64_t ne12 = node->src[1]->ne[2]; // Channels In
  15346. size_t cur = 0;
  15347. cur += sizeof(ggml_fp16_t)*ne00*ne01*ne02*ne03;
  15348. cur += sizeof(ggml_fp16_t)*ne10*ne11*ne12;
  15349. work_size = MAX(work_size, cur);
  15350. } break;
  15351. case GGML_OP_POOL_1D:
  15352. case GGML_OP_POOL_2D:
  15353. {
  15354. n_tasks = 1;
  15355. } break;
  15356. case GGML_OP_UPSCALE:
  15357. {
  15358. n_tasks = n_threads;
  15359. } break;
  15360. case GGML_OP_FLASH_ATTN:
  15361. {
  15362. n_tasks = n_threads;
  15363. size_t cur = 0;
  15364. const int64_t ne11 = ggml_up(node->src[1]->ne[1], GGML_SOFT_MAX_UNROLL);
  15365. if (node->src[1]->type == GGML_TYPE_F32) {
  15366. cur = sizeof(float)*ne11*n_tasks; // TODO: this can become (n_tasks-1)
  15367. cur += sizeof(float)*ne11*n_tasks; // this is overestimated by x2
  15368. }
  15369. if (node->src[1]->type == GGML_TYPE_F16) {
  15370. cur = sizeof(float)*ne11*n_tasks; // TODO: this can become (n_tasks-1)
  15371. cur += sizeof(float)*ne11*n_tasks; // this is overestimated by x2
  15372. }
  15373. work_size = MAX(work_size, cur);
  15374. } break;
  15375. case GGML_OP_FLASH_FF:
  15376. {
  15377. n_tasks = n_threads;
  15378. size_t cur = 0;
  15379. if (node->src[1]->type == GGML_TYPE_F32) {
  15380. cur = sizeof(float)*node->src[1]->ne[1]*n_tasks; // TODO: this can become (n_tasks-1)
  15381. cur += sizeof(float)*node->src[1]->ne[1]*n_tasks; // this is overestimated by x2
  15382. }
  15383. if (node->src[1]->type == GGML_TYPE_F16) {
  15384. cur = sizeof(float)*node->src[1]->ne[1]*n_tasks; // TODO: this can become (n_tasks-1)
  15385. cur += sizeof(float)*node->src[1]->ne[1]*n_tasks; // this is overestimated by x2
  15386. }
  15387. work_size = MAX(work_size, cur);
  15388. } break;
  15389. case GGML_OP_FLASH_ATTN_BACK:
  15390. {
  15391. n_tasks = n_threads;
  15392. size_t cur = 0;
  15393. const int64_t D = node->src[0]->ne[0];
  15394. const int64_t ne11 = ggml_up(node->src[1]->ne[1], GGML_SOFT_MAX_UNROLL);
  15395. const int64_t mxDn = MAX(D, ne11) * 2; // *2 because of S and SM in ggml_compute_forward_flash_attn_back
  15396. if (node->src[1]->type == GGML_TYPE_F32) {
  15397. cur = sizeof(float)*mxDn*n_tasks; // TODO: this can become (n_tasks-1)
  15398. cur += sizeof(float)*mxDn*n_tasks; // this is overestimated by x2
  15399. }
  15400. if (node->src[1]->type == GGML_TYPE_F16) {
  15401. cur = sizeof(float)*mxDn*n_tasks; // TODO: this can become (n_tasks-1)
  15402. cur += sizeof(float)*mxDn*n_tasks; // this is overestimated by x2
  15403. }
  15404. work_size = MAX(work_size, cur);
  15405. } break;
  15406. case GGML_OP_WIN_PART:
  15407. case GGML_OP_WIN_UNPART:
  15408. case GGML_OP_GET_REL_POS:
  15409. case GGML_OP_MAP_UNARY:
  15410. case GGML_OP_MAP_BINARY:
  15411. case GGML_OP_MAP_CUSTOM1_F32:
  15412. case GGML_OP_MAP_CUSTOM2_F32:
  15413. case GGML_OP_MAP_CUSTOM3_F32:
  15414. {
  15415. n_tasks = 1;
  15416. } break;
  15417. case GGML_OP_MAP_CUSTOM1:
  15418. {
  15419. struct ggml_map_custom1_op_params * p = (struct ggml_map_custom1_op_params *) node->op_params;
  15420. if (p->n_tasks == GGML_N_TASKS_MAX) {
  15421. n_tasks = n_threads;
  15422. } else {
  15423. n_tasks = MIN(p->n_tasks, n_threads);
  15424. }
  15425. } break;
  15426. case GGML_OP_MAP_CUSTOM2:
  15427. {
  15428. struct ggml_map_custom2_op_params * p = (struct ggml_map_custom2_op_params *) node->op_params;
  15429. if (p->n_tasks == GGML_N_TASKS_MAX) {
  15430. n_tasks = n_threads;
  15431. } else {
  15432. n_tasks = MIN(p->n_tasks, n_threads);
  15433. }
  15434. } break;
  15435. case GGML_OP_MAP_CUSTOM3:
  15436. {
  15437. struct ggml_map_custom3_op_params * p = (struct ggml_map_custom3_op_params *) node->op_params;
  15438. if (p->n_tasks == GGML_N_TASKS_MAX) {
  15439. n_tasks = n_threads;
  15440. } else {
  15441. n_tasks = MIN(p->n_tasks, n_threads);
  15442. }
  15443. } break;
  15444. case GGML_OP_CROSS_ENTROPY_LOSS:
  15445. {
  15446. n_tasks = n_threads;
  15447. size_t cur = ggml_type_size(node->type)*(n_tasks + node->src[0]->ne[0]*n_tasks);
  15448. work_size = MAX(work_size, cur);
  15449. } break;
  15450. case GGML_OP_CROSS_ENTROPY_LOSS_BACK:
  15451. {
  15452. n_tasks = n_threads;
  15453. } break;
  15454. case GGML_OP_NONE:
  15455. {
  15456. n_tasks = 1;
  15457. } break;
  15458. case GGML_OP_COUNT:
  15459. {
  15460. GGML_ASSERT(false);
  15461. } break;
  15462. }
  15463. cplan.n_tasks[i] = n_tasks;
  15464. }
  15465. if (work_size > 0) {
  15466. work_size += CACHE_LINE_SIZE*(n_threads - 1);
  15467. }
  15468. cplan.n_threads = n_threads;
  15469. cplan.work_size = work_size;
  15470. cplan.work_data = NULL;
  15471. return cplan;
  15472. }
  15473. int ggml_graph_compute(struct ggml_cgraph * cgraph, struct ggml_cplan * cplan) {
  15474. {
  15475. GGML_ASSERT(cplan);
  15476. GGML_ASSERT(cplan->n_threads > 0);
  15477. if (cplan->work_size > 0) {
  15478. GGML_ASSERT(cplan->work_data);
  15479. }
  15480. for (int i = 0; i < cgraph->n_nodes; ++i) {
  15481. if (cgraph->nodes[i]->op != GGML_OP_NONE) {
  15482. GGML_ASSERT(cplan->n_tasks[i] > 0);
  15483. }
  15484. }
  15485. }
  15486. const int n_threads = cplan->n_threads;
  15487. struct ggml_compute_state_shared state_shared = {
  15488. /*.cgraph =*/ cgraph,
  15489. /*.cgraph_plan =*/ cplan,
  15490. /*.perf_node_start_cycles =*/ 0,
  15491. /*.perf_node_start_time_us =*/ 0,
  15492. /*.n_threads =*/ n_threads,
  15493. /*.n_active =*/ n_threads,
  15494. /*.node_n =*/ -1,
  15495. /*.abort_callback =*/ NULL,
  15496. /*.abort_callback_data =*/ NULL,
  15497. };
  15498. struct ggml_compute_state * workers = alloca(sizeof(struct ggml_compute_state)*n_threads);
  15499. // create thread pool
  15500. if (n_threads > 1) {
  15501. for (int j = 1; j < n_threads; ++j) {
  15502. workers[j] = (struct ggml_compute_state) {
  15503. .thrd = 0,
  15504. .ith = j,
  15505. .shared = &state_shared,
  15506. };
  15507. const int rc = ggml_thread_create(&workers[j].thrd, NULL, ggml_graph_compute_thread, &workers[j]);
  15508. GGML_ASSERT(rc == 0);
  15509. UNUSED(rc);
  15510. }
  15511. }
  15512. workers[0].ith = 0;
  15513. workers[0].shared = &state_shared;
  15514. const int64_t perf_start_cycles = ggml_perf_cycles();
  15515. const int64_t perf_start_time_us = ggml_perf_time_us();
  15516. // this is a work thread too
  15517. int compute_status = (size_t) ggml_graph_compute_thread(&workers[0]);
  15518. // don't leave affinity set on the main thread
  15519. clear_numa_thread_affinity();
  15520. // join or kill thread pool
  15521. if (n_threads > 1) {
  15522. for (int j = 1; j < n_threads; j++) {
  15523. const int rc = ggml_thread_join(workers[j].thrd, NULL);
  15524. GGML_ASSERT(rc == 0);
  15525. }
  15526. }
  15527. // performance stats (graph)
  15528. {
  15529. int64_t perf_cycles_cur = ggml_perf_cycles() - perf_start_cycles;
  15530. int64_t perf_time_us_cur = ggml_perf_time_us() - perf_start_time_us;
  15531. cgraph->perf_runs++;
  15532. cgraph->perf_cycles += perf_cycles_cur;
  15533. cgraph->perf_time_us += perf_time_us_cur;
  15534. GGML_PRINT_DEBUG("%s: perf (%d) - cpu = %.3f / %.3f ms, wall = %.3f / %.3f ms\n",
  15535. __func__, cgraph->perf_runs,
  15536. (double) perf_cycles_cur / (double) ggml_cycles_per_ms(),
  15537. (double) cgraph->perf_cycles / (double) ggml_cycles_per_ms() / (double) cgraph->perf_runs,
  15538. (double) perf_time_us_cur / 1000.0,
  15539. (double) cgraph->perf_time_us / 1000.0 / cgraph->perf_runs);
  15540. }
  15541. return compute_status;
  15542. }
  15543. void ggml_graph_reset(struct ggml_cgraph * cgraph) {
  15544. for (int i = 0; i < cgraph->n_nodes; i++) {
  15545. struct ggml_tensor * grad = cgraph->grads[i];
  15546. if (grad) {
  15547. ggml_set_zero(grad);
  15548. }
  15549. }
  15550. }
  15551. void ggml_graph_compute_with_ctx(struct ggml_context * ctx, struct ggml_cgraph * cgraph, int n_threads) {
  15552. struct ggml_cplan cplan = ggml_graph_plan(cgraph, n_threads);
  15553. struct ggml_object * obj = ggml_new_object(ctx, GGML_OBJECT_WORK_BUFFER, cplan.work_size);
  15554. cplan.work_data = (uint8_t *)ctx->mem_buffer + obj->offs;
  15555. ggml_graph_compute(cgraph, &cplan);
  15556. }
  15557. struct ggml_tensor * ggml_graph_get_tensor(struct ggml_cgraph * cgraph, const char * name) {
  15558. for (int i = 0; i < cgraph->n_leafs; i++) {
  15559. struct ggml_tensor * leaf = cgraph->leafs[i];
  15560. if (strcmp(leaf->name, name) == 0) {
  15561. return leaf;
  15562. }
  15563. }
  15564. for (int i = 0; i < cgraph->n_nodes; i++) {
  15565. struct ggml_tensor * node = cgraph->nodes[i];
  15566. if (strcmp(node->name, name) == 0) {
  15567. return node;
  15568. }
  15569. }
  15570. return NULL;
  15571. }
  15572. static void ggml_graph_export_leaf(const struct ggml_tensor * tensor, FILE * fout) {
  15573. const int64_t * ne = tensor->ne;
  15574. const size_t * nb = tensor->nb;
  15575. fprintf(fout, "%-6s %-12s %8d %" PRId64 " %" PRId64 " %" PRId64 " %" PRId64 " %16zu %16zu %16zu %16zu %16p %32s\n",
  15576. ggml_type_name(tensor->type),
  15577. ggml_op_name (tensor->op),
  15578. tensor->n_dims,
  15579. ne[0], ne[1], ne[2], ne[3],
  15580. nb[0], nb[1], nb[2], nb[3],
  15581. tensor->data,
  15582. tensor->name);
  15583. }
  15584. static void ggml_graph_export_node(const struct ggml_tensor * tensor, const char * arg, FILE * fout) {
  15585. const int64_t * ne = tensor->ne;
  15586. const size_t * nb = tensor->nb;
  15587. fprintf(fout, "%-6s %-6s %-12s %8d %" PRId64 " %" PRId64 " %" PRId64 " %" PRId64 " %16zu %16zu %16zu %16zu %16p %32s\n",
  15588. arg,
  15589. ggml_type_name(tensor->type),
  15590. ggml_op_name (tensor->op),
  15591. tensor->n_dims,
  15592. ne[0], ne[1], ne[2], ne[3],
  15593. nb[0], nb[1], nb[2], nb[3],
  15594. tensor->data,
  15595. tensor->name);
  15596. }
  15597. void ggml_graph_export(const struct ggml_cgraph * cgraph, const char * fname) {
  15598. uint64_t size_eval = 0;
  15599. // compute size of intermediate results
  15600. // TODO: does not take into account scratch buffers !!!!
  15601. for (int i = 0; i < cgraph->n_nodes; ++i) {
  15602. size_eval += ggml_nbytes_pad(cgraph->nodes[i]);
  15603. }
  15604. // print
  15605. {
  15606. FILE * fout = stdout;
  15607. fprintf(fout, "\n");
  15608. fprintf(fout, "%-16s %8x\n", "magic", GGML_FILE_MAGIC);
  15609. fprintf(fout, "%-16s %8d\n", "version", GGML_FILE_VERSION);
  15610. fprintf(fout, "%-16s %8d\n", "leafs", cgraph->n_leafs);
  15611. fprintf(fout, "%-16s %8d\n", "nodes", cgraph->n_nodes);
  15612. fprintf(fout, "%-16s %" PRIu64 "\n", "eval", size_eval);
  15613. // header
  15614. fprintf(fout, "\n");
  15615. fprintf(fout, "%-6s %-12s %8s %8s %8s %8s %8s %16s %16s %16s %16s %16s %16s\n",
  15616. "TYPE", "OP", "NDIMS", "NE0", "NE1", "NE2", "NE3", "NB0", "NB1", "NB2", "NB3", "DATA", "NAME");
  15617. for (int i = 0; i < cgraph->n_leafs; ++i) {
  15618. ggml_graph_export_leaf(cgraph->leafs[i], fout);
  15619. GGML_ASSERT(cgraph->leafs[i]->op == GGML_OP_NONE);
  15620. GGML_ASSERT(cgraph->leafs[i]->src[0] == NULL);
  15621. GGML_ASSERT(cgraph->leafs[i]->src[1] == NULL);
  15622. }
  15623. // header
  15624. fprintf(fout, "\n");
  15625. fprintf(fout, "%-6s %-6s %-12s %8s %8s %8s %8s %8s %16s %16s %16s %16s %8s %16s %16s\n",
  15626. "ARG", "TYPE", "OP", "NDIMS", "NE0", "NE1", "NE2", "NE3", "NB0", "NB1", "NB2", "NB3", "NTASKS", "DATA", "NAME");
  15627. for (int i = 0; i < cgraph->n_nodes; ++i) {
  15628. ggml_graph_export_node(cgraph->nodes[i], "DST", fout);
  15629. for (int j = 0; j < GGML_MAX_SRC; ++j) {
  15630. if (cgraph->nodes[i]->src[j]) {
  15631. ggml_graph_export_node(cgraph->nodes[i]->src[j], "SRC", fout);
  15632. }
  15633. }
  15634. fprintf(fout, "\n");
  15635. }
  15636. fprintf(fout, "\n");
  15637. }
  15638. // write binary data
  15639. {
  15640. FILE * fout = fopen(fname, "wb");
  15641. if (!fout) {
  15642. fprintf(stderr, "%s: failed to open %s\n", __func__, fname);
  15643. return;
  15644. }
  15645. // header
  15646. {
  15647. const uint32_t magic = GGML_FILE_MAGIC;
  15648. const uint32_t version = GGML_FILE_VERSION;
  15649. const uint32_t n_leafs = cgraph->n_leafs;
  15650. const uint32_t nodes = cgraph->n_nodes;
  15651. fwrite(&magic, sizeof(uint32_t), 1, fout);
  15652. fwrite(&version, sizeof(uint32_t), 1, fout);
  15653. fwrite(&n_leafs, sizeof(uint32_t), 1, fout);
  15654. fwrite(&nodes, sizeof(uint32_t), 1, fout);
  15655. fwrite(&size_eval, sizeof(uint64_t), 1, fout);
  15656. }
  15657. // leafs
  15658. {
  15659. for (int i = 0; i < cgraph->n_leafs; ++i) {
  15660. const struct ggml_tensor * tensor = cgraph->leafs[i];
  15661. const uint32_t type = tensor->type;
  15662. const uint32_t op = tensor->op;
  15663. const uint32_t n_dims = tensor->n_dims;
  15664. fwrite(&type, sizeof(uint32_t), 1, fout);
  15665. fwrite(&op, sizeof(uint32_t), 1, fout);
  15666. fwrite(&n_dims, sizeof(uint32_t), 1, fout);
  15667. for (int j = 0; j < GGML_MAX_DIMS; ++j) {
  15668. const uint64_t ne = tensor->ne[j];
  15669. const uint64_t nb = tensor->nb[j];
  15670. fwrite(&ne, sizeof(uint64_t), 1, fout);
  15671. fwrite(&nb, sizeof(uint64_t), 1, fout);
  15672. }
  15673. fwrite(tensor->name, sizeof(char), GGML_MAX_NAME, fout);
  15674. fwrite(tensor->op_params, sizeof(char), GGML_MAX_OP_PARAMS, fout);
  15675. // dump the data
  15676. // TODO: pad this to 32 byte boundary
  15677. {
  15678. const size_t size = ggml_nbytes(tensor);
  15679. fwrite(tensor->data, sizeof(char), size, fout);
  15680. }
  15681. }
  15682. }
  15683. // nodes
  15684. {
  15685. for (int i = 0; i < cgraph->n_nodes; ++i) {
  15686. const struct ggml_tensor * tensor = cgraph->nodes[i];
  15687. const uint32_t type = tensor->type;
  15688. const uint32_t op = tensor->op;
  15689. const uint32_t n_dims = tensor->n_dims;
  15690. fwrite(&type, sizeof(uint32_t), 1, fout);
  15691. fwrite(&op, sizeof(uint32_t), 1, fout);
  15692. fwrite(&n_dims, sizeof(uint32_t), 1, fout);
  15693. for (int j = 0; j < GGML_MAX_DIMS; ++j) {
  15694. const uint64_t ne = tensor->ne[j];
  15695. const uint64_t nb = tensor->nb[j];
  15696. fwrite(&ne, sizeof(uint64_t), 1, fout);
  15697. fwrite(&nb, sizeof(uint64_t), 1, fout);
  15698. }
  15699. fwrite(tensor->name, sizeof(char), GGML_MAX_NAME, fout);
  15700. fwrite(tensor->op_params, sizeof(char), GGML_MAX_OP_PARAMS, fout);
  15701. // output the op arguments
  15702. {
  15703. struct ggml_tensor * args[GGML_MAX_SRC] = { NULL };
  15704. for (int j = 0; j < GGML_MAX_SRC; ++j) {
  15705. args[j] = tensor->src[j];
  15706. }
  15707. for (int j = 0; j < GGML_MAX_SRC; ++j) {
  15708. if (args[j]) {
  15709. int32_t idx = -1;
  15710. // check if leaf
  15711. {
  15712. for (int k = 0; k < cgraph->n_leafs; ++k) {
  15713. if (args[j] == cgraph->leafs[k]) {
  15714. idx = k;
  15715. break;
  15716. }
  15717. }
  15718. }
  15719. // check if node
  15720. if (idx == -1) {
  15721. for (int k = 0; k < cgraph->n_nodes; ++k) {
  15722. if (args[j] == cgraph->nodes[k]) {
  15723. idx = GGML_MAX_NODES + k;
  15724. break;
  15725. }
  15726. }
  15727. }
  15728. if (idx == -1) {
  15729. fprintf(stderr, "%s: failed to find tensor, arg = %d, node = %d\n", __func__, j, i);
  15730. return;
  15731. }
  15732. fwrite(&idx, sizeof(int32_t), 1, fout);
  15733. } else {
  15734. const int32_t nul = -1;
  15735. fwrite(&nul, sizeof(int32_t), 1, fout);
  15736. }
  15737. }
  15738. }
  15739. }
  15740. }
  15741. fclose(fout);
  15742. }
  15743. }
  15744. struct ggml_cgraph ggml_graph_import(const char * fname, struct ggml_context ** ctx_data, struct ggml_context ** ctx_eval) {
  15745. assert(*ctx_data == NULL);
  15746. assert(*ctx_eval == NULL);
  15747. struct ggml_cgraph result = { 0 };
  15748. struct ggml_tensor * data = NULL;
  15749. // read file into data
  15750. {
  15751. FILE * fin = fopen(fname, "rb");
  15752. if (!fin) {
  15753. fprintf(stderr, "%s: failed to open %s\n", __func__, fname);
  15754. return result;
  15755. }
  15756. size_t fsize = 0;
  15757. fseek(fin, 0, SEEK_END);
  15758. fsize = ftell(fin);
  15759. fseek(fin, 0, SEEK_SET);
  15760. // create the data context
  15761. {
  15762. const size_t overhead = 1*ggml_tensor_overhead();
  15763. struct ggml_init_params params = {
  15764. .mem_size = fsize + overhead,
  15765. .mem_buffer = NULL,
  15766. .no_alloc = false,
  15767. };
  15768. *ctx_data = ggml_init(params);
  15769. if (!*ctx_data) {
  15770. fprintf(stderr, "%s: failed to create ggml context\n", __func__);
  15771. fclose(fin);
  15772. return result;
  15773. }
  15774. }
  15775. data = ggml_new_tensor_1d(*ctx_data, GGML_TYPE_I8, fsize);
  15776. {
  15777. const size_t ret = fread(data->data, sizeof(char), fsize, fin);
  15778. if (ret != fsize) {
  15779. fprintf(stderr, "%s: failed to read %s\n", __func__, fname);
  15780. fclose(fin);
  15781. return result;
  15782. }
  15783. }
  15784. fclose(fin);
  15785. }
  15786. // populate result
  15787. {
  15788. char * ptr = (char *) data->data;
  15789. const uint32_t magic = *(const uint32_t *) ptr; ptr += sizeof(magic);
  15790. if (magic != GGML_FILE_MAGIC) {
  15791. fprintf(stderr, "%s: invalid magic number, got %08x\n", __func__, magic);
  15792. return result;
  15793. }
  15794. const uint32_t version = *(const uint32_t *) ptr; ptr += sizeof(version);
  15795. if (version != GGML_FILE_VERSION) {
  15796. fprintf(stderr, "%s: invalid version number\n", __func__);
  15797. return result;
  15798. }
  15799. const uint32_t n_leafs = *(const uint32_t *) ptr; ptr += sizeof(n_leafs);
  15800. const uint32_t n_nodes = *(const uint32_t *) ptr; ptr += sizeof(n_nodes);
  15801. const uint64_t size_eval = *(const uint64_t *) ptr; ptr += sizeof(size_eval);
  15802. result.n_leafs = n_leafs;
  15803. result.n_nodes = n_nodes;
  15804. // create the data context
  15805. {
  15806. const size_t overhead = (n_leafs + n_nodes)*ggml_tensor_overhead();
  15807. struct ggml_init_params params = {
  15808. .mem_size = size_eval + overhead,
  15809. .mem_buffer = NULL,
  15810. .no_alloc = true,
  15811. };
  15812. *ctx_eval = ggml_init(params);
  15813. if (!*ctx_eval) {
  15814. fprintf(stderr, "%s: failed to create ggml context\n", __func__);
  15815. return result;
  15816. }
  15817. }
  15818. // leafs
  15819. {
  15820. uint32_t type;
  15821. uint32_t op;
  15822. uint32_t n_dims;
  15823. for (uint32_t i = 0; i < n_leafs; ++i) {
  15824. type = *(const uint32_t *) ptr; ptr += sizeof(type);
  15825. op = *(const uint32_t *) ptr; ptr += sizeof(op);
  15826. n_dims = *(const uint32_t *) ptr; ptr += sizeof(n_dims);
  15827. int64_t ne[GGML_MAX_DIMS];
  15828. size_t nb[GGML_MAX_DIMS];
  15829. for (int j = 0; j < GGML_MAX_DIMS; ++j) {
  15830. uint64_t ne_cur;
  15831. uint64_t nb_cur;
  15832. ne_cur = *(const uint64_t *) ptr; ptr += sizeof(ne_cur);
  15833. nb_cur = *(const uint64_t *) ptr; ptr += sizeof(nb_cur);
  15834. ne[j] = ne_cur;
  15835. nb[j] = nb_cur;
  15836. }
  15837. struct ggml_tensor * tensor = ggml_new_tensor(*ctx_eval, (enum ggml_type) type, n_dims, ne);
  15838. tensor->op = (enum ggml_op) op;
  15839. memcpy(tensor->name, ptr, GGML_MAX_NAME); ptr += GGML_MAX_NAME;
  15840. memcpy(tensor->op_params, ptr, GGML_MAX_OP_PARAMS); ptr += GGML_MAX_OP_PARAMS;
  15841. tensor->data = (void *) ptr;
  15842. for (int j = 0; j < GGML_MAX_DIMS; ++j) {
  15843. tensor->nb[j] = nb[j];
  15844. }
  15845. result.leafs[i] = tensor;
  15846. ptr += ggml_nbytes(tensor);
  15847. fprintf(stderr, "%s: loaded leaf %d: '%16s', %3d dims, %9zu bytes\n", __func__, i, tensor->name, n_dims, ggml_nbytes(tensor));
  15848. }
  15849. }
  15850. ggml_set_no_alloc(*ctx_eval, false);
  15851. // nodes
  15852. {
  15853. uint32_t type;
  15854. uint32_t op;
  15855. uint32_t n_dims;
  15856. for (uint32_t i = 0; i < n_nodes; ++i) {
  15857. type = *(const uint32_t *) ptr; ptr += sizeof(type);
  15858. op = *(const uint32_t *) ptr; ptr += sizeof(op);
  15859. n_dims = *(const uint32_t *) ptr; ptr += sizeof(n_dims);
  15860. enum ggml_op eop = (enum ggml_op) op;
  15861. int64_t ne[GGML_MAX_DIMS];
  15862. size_t nb[GGML_MAX_DIMS];
  15863. for (int j = 0; j < GGML_MAX_DIMS; ++j) {
  15864. uint64_t ne_cur;
  15865. uint64_t nb_cur;
  15866. ne_cur = *(const uint64_t *) ptr; ptr += sizeof(ne_cur);
  15867. nb_cur = *(const uint64_t *) ptr; ptr += sizeof(nb_cur);
  15868. ne[j] = ne_cur;
  15869. nb[j] = nb_cur;
  15870. }
  15871. const char * ptr_name = ptr; ptr += GGML_MAX_NAME;
  15872. const char * ptr_op_params = ptr; ptr += GGML_MAX_OP_PARAMS;
  15873. const int32_t * ptr_arg_idx = (const int32_t *) ptr; ptr += GGML_MAX_SRC*sizeof(int32_t);
  15874. struct ggml_tensor * args[GGML_MAX_SRC] = { NULL };
  15875. // parse args
  15876. for (int j = 0; j < GGML_MAX_SRC; ++j) {
  15877. const int32_t arg_idx = ptr_arg_idx[j];
  15878. if (arg_idx == -1) {
  15879. continue;
  15880. }
  15881. if (arg_idx < GGML_MAX_NODES) {
  15882. args[j] = result.leafs[arg_idx];
  15883. } else {
  15884. args[j] = result.nodes[arg_idx - GGML_MAX_NODES];
  15885. }
  15886. }
  15887. // create the tensor
  15888. // "view" operations are handled differently
  15889. // TODO: handle inplace ops - currently a copy is always made
  15890. struct ggml_tensor * tensor = NULL;
  15891. switch (eop) {
  15892. // TODO: implement other view ops
  15893. case GGML_OP_RESHAPE:
  15894. {
  15895. tensor = ggml_reshape_4d(*ctx_eval, args[0], ne[0], ne[1], ne[2], ne[3]);
  15896. } break;
  15897. case GGML_OP_VIEW:
  15898. {
  15899. tensor = ggml_view_4d(*ctx_eval, args[0], ne[0], ne[1], ne[2], ne[3], 0, 0, 0, 0);
  15900. size_t offs;
  15901. memcpy(&offs, ptr_op_params, sizeof(offs));
  15902. tensor->data = ((char *) tensor->data) + offs;
  15903. } break;
  15904. case GGML_OP_TRANSPOSE:
  15905. {
  15906. tensor = ggml_transpose(*ctx_eval, args[0]);
  15907. } break;
  15908. case GGML_OP_PERMUTE:
  15909. {
  15910. tensor = ggml_view_4d(*ctx_eval, args[0], ne[0], ne[1], ne[2], ne[3], 0, 0, 0, 0);
  15911. } break;
  15912. default:
  15913. {
  15914. tensor = ggml_new_tensor(*ctx_eval, (enum ggml_type) type, n_dims, ne);
  15915. tensor->op = eop;
  15916. } break;
  15917. }
  15918. memcpy(tensor->name, ptr_name, GGML_MAX_NAME);
  15919. memcpy(tensor->op_params, ptr_op_params, GGML_MAX_OP_PARAMS);
  15920. for (int j = 0; j < GGML_MAX_DIMS; ++j) {
  15921. tensor->nb[j] = nb[j];
  15922. }
  15923. for (int j = 0; j < GGML_MAX_SRC; ++j) {
  15924. tensor->src[j] = args[j];
  15925. }
  15926. result.nodes[i] = tensor;
  15927. fprintf(stderr, "%s: loaded node %d: '%16s', %3d dims, %9zu bytes\n", __func__, i, tensor->name, n_dims, ggml_nbytes(tensor));
  15928. }
  15929. }
  15930. }
  15931. return result;
  15932. }
  15933. void ggml_graph_print(const struct ggml_cgraph * cgraph) {
  15934. int64_t perf_total_per_op_us[GGML_OP_COUNT] = {0};
  15935. GGML_PRINT("=== GRAPH ===\n");
  15936. GGML_PRINT("n_nodes = %d\n", cgraph->n_nodes);
  15937. for (int i = 0; i < cgraph->n_nodes; i++) {
  15938. struct ggml_tensor * node = cgraph->nodes[i];
  15939. perf_total_per_op_us[node->op] += MAX(1, node->perf_time_us);
  15940. GGML_PRINT(" - %3d: [ %5" PRId64 ", %5" PRId64 ", %5" PRId64 "] %16s %s (%3d) cpu = %7.3f / %7.3f ms, wall = %7.3f / %7.3f ms\n",
  15941. i,
  15942. node->ne[0], node->ne[1], node->ne[2],
  15943. ggml_op_name(node->op), node->is_param ? "x" : node->grad ? "g" : " ", node->perf_runs,
  15944. (double) node->perf_cycles / (double) ggml_cycles_per_ms(),
  15945. (double) node->perf_cycles / (double) ggml_cycles_per_ms() / (double) node->perf_runs,
  15946. (double) node->perf_time_us / 1000.0,
  15947. (double) node->perf_time_us / 1000.0 / node->perf_runs);
  15948. }
  15949. GGML_PRINT("n_leafs = %d\n", cgraph->n_leafs);
  15950. for (int i = 0; i < cgraph->n_leafs; i++) {
  15951. struct ggml_tensor * node = cgraph->leafs[i];
  15952. GGML_PRINT(" - %3d: [ %5" PRId64 ", %5" PRId64 "] %8s %16s\n",
  15953. i,
  15954. node->ne[0], node->ne[1],
  15955. ggml_op_name(node->op),
  15956. ggml_get_name(node));
  15957. }
  15958. for (int i = 0; i < GGML_OP_COUNT; i++) {
  15959. if (perf_total_per_op_us[i] == 0) {
  15960. continue;
  15961. }
  15962. GGML_PRINT("perf_total_per_op_us[%16s] = %7.3f ms\n", ggml_op_name(i), (double) perf_total_per_op_us[i] / 1000.0);
  15963. }
  15964. GGML_PRINT("========================================\n");
  15965. }
  15966. // check if node is part of the graph
  15967. static bool ggml_graph_find(const struct ggml_cgraph * cgraph, const struct ggml_tensor * node) {
  15968. if (cgraph == NULL) {
  15969. return true;
  15970. }
  15971. for (int i = 0; i < cgraph->n_nodes; i++) {
  15972. if (cgraph->nodes[i] == node) {
  15973. return true;
  15974. }
  15975. }
  15976. return false;
  15977. }
  15978. static struct ggml_tensor * ggml_graph_get_parent(const struct ggml_cgraph * cgraph, const struct ggml_tensor * node) {
  15979. for (int i = 0; i < cgraph->n_nodes; i++) {
  15980. struct ggml_tensor * parent = cgraph->nodes[i];
  15981. if (parent->grad == node) {
  15982. return parent;
  15983. }
  15984. }
  15985. return NULL;
  15986. }
  15987. static void ggml_graph_dump_dot_node_edge(FILE * fp, const struct ggml_cgraph * gb, struct ggml_tensor * node, struct ggml_tensor * parent, const char * label) {
  15988. struct ggml_tensor * gparent = ggml_graph_get_parent(gb, node);
  15989. struct ggml_tensor * gparent0 = ggml_graph_get_parent(gb, parent);
  15990. fprintf(fp, " \"%p\":%s -> \"%p\":%s [ arrowhead = %s; style = %s; label = \"%s\"; ]\n",
  15991. gparent0 ? (void *) gparent0 : (void *) parent,
  15992. gparent0 ? "g" : "x",
  15993. gparent ? (void *) gparent : (void *) node,
  15994. gparent ? "g" : "x",
  15995. gparent ? "empty" : "vee",
  15996. gparent ? "dashed" : "solid",
  15997. label);
  15998. }
  15999. static void ggml_graph_dump_dot_leaf_edge(FILE * fp, struct ggml_tensor * node, struct ggml_tensor * parent, const char * label) {
  16000. fprintf(fp, " \"%p\":%s -> \"%p\":%s [ label = \"%s\"; ]\n",
  16001. (void *) parent, "x",
  16002. (void *) node, "x",
  16003. label);
  16004. }
  16005. void ggml_graph_dump_dot(const struct ggml_cgraph * gb, const struct ggml_cgraph * gf, const char * filename) {
  16006. char color[16];
  16007. FILE * fp = fopen(filename, "w");
  16008. GGML_ASSERT(fp);
  16009. fprintf(fp, "digraph G {\n");
  16010. fprintf(fp, " newrank = true;\n");
  16011. fprintf(fp, " rankdir = LR;\n");
  16012. for (int i = 0; i < gb->n_nodes; i++) {
  16013. struct ggml_tensor * node = gb->nodes[i];
  16014. if (ggml_graph_get_parent(gb, node) != NULL) {
  16015. continue;
  16016. }
  16017. if (node->is_param) {
  16018. snprintf(color, sizeof(color), "yellow");
  16019. } else if (node->grad) {
  16020. if (ggml_graph_find(gf, node)) {
  16021. snprintf(color, sizeof(color), "green");
  16022. } else {
  16023. snprintf(color, sizeof(color), "lightblue");
  16024. }
  16025. } else {
  16026. snprintf(color, sizeof(color), "white");
  16027. }
  16028. fprintf(fp, " \"%p\" [ "
  16029. "style = filled; fillcolor = %s; shape = record; "
  16030. "label=\"",
  16031. (void *) node, color);
  16032. if (strlen(node->name) > 0) {
  16033. fprintf(fp, "%s (%s)|", node->name, ggml_type_name(node->type));
  16034. } else {
  16035. fprintf(fp, "(%s)|", ggml_type_name(node->type));
  16036. }
  16037. if (node->n_dims == 2) {
  16038. fprintf(fp, "%d [%" PRId64 ", %" PRId64 "] | <x>%s", i, node->ne[0], node->ne[1], ggml_op_symbol(node->op));
  16039. } else {
  16040. fprintf(fp, "%d [%" PRId64 ", %" PRId64 ", %" PRId64 "] | <x>%s", i, node->ne[0], node->ne[1], node->ne[2], ggml_op_symbol(node->op));
  16041. }
  16042. if (node->grad) {
  16043. fprintf(fp, " | <g>%s\"; ]\n", ggml_op_symbol(node->grad->op));
  16044. } else {
  16045. fprintf(fp, "\"; ]\n");
  16046. }
  16047. }
  16048. for (int i = 0; i < gb->n_leafs; i++) {
  16049. struct ggml_tensor * node = gb->leafs[i];
  16050. snprintf(color, sizeof(color), "pink");
  16051. fprintf(fp, " \"%p\" [ "
  16052. "style = filled; fillcolor = %s; shape = record; "
  16053. "label=\"<x>",
  16054. (void *) node, color);
  16055. if (strlen(node->name) > 0) {
  16056. fprintf(fp, "%s (%s)|", node->name, ggml_type_name(node->type));
  16057. } else {
  16058. fprintf(fp, "(%s)|", ggml_type_name(node->type));
  16059. }
  16060. fprintf(fp, "CONST %d [%" PRId64 ", %" PRId64 "]", i, node->ne[0], node->ne[1]);
  16061. if (ggml_nelements(node) < 5) {
  16062. fprintf(fp, " | (");
  16063. for (int j = 0; j < ggml_nelements(node); j++) {
  16064. if (node->type == GGML_TYPE_I8 || node->type == GGML_TYPE_I16 || node->type == GGML_TYPE_I32) {
  16065. fprintf(fp, "%d", ggml_get_i32_1d(node, j));
  16066. }
  16067. else if (node->type == GGML_TYPE_F32 || node->type == GGML_TYPE_F16) {
  16068. fprintf(fp, "%.1e", (double)ggml_get_f32_1d(node, j));
  16069. }
  16070. else {
  16071. fprintf(fp, "#");
  16072. }
  16073. if (j < ggml_nelements(node) - 1) {
  16074. fprintf(fp, ", ");
  16075. }
  16076. }
  16077. fprintf(fp, ")");
  16078. }
  16079. fprintf(fp, "\"; ]\n");
  16080. }
  16081. for (int i = 0; i < gb->n_nodes; i++) {
  16082. struct ggml_tensor * node = gb->nodes[i];
  16083. for (int j = 0; j < GGML_MAX_SRC; j++) {
  16084. if (node->src[j]) {
  16085. char label[16];
  16086. snprintf(label, sizeof(label), "src %d", j);
  16087. ggml_graph_dump_dot_node_edge(fp, gb, node, node->src[j], label);
  16088. }
  16089. }
  16090. }
  16091. for (int i = 0; i < gb->n_leafs; i++) {
  16092. struct ggml_tensor * node = gb->leafs[i];
  16093. for (int j = 0; j < GGML_MAX_SRC; j++) {
  16094. if (node->src[j]) {
  16095. char label[16];
  16096. snprintf(label, sizeof(label), "src %d", j);
  16097. ggml_graph_dump_dot_leaf_edge(fp, node, node->src[j], label);
  16098. }
  16099. }
  16100. }
  16101. fprintf(fp, "}\n");
  16102. fclose(fp);
  16103. GGML_PRINT("%s: dot -Tpng %s -o %s.png && open %s.png\n", __func__, filename, filename, filename);
  16104. }
  16105. ////////////////////////////////////////////////////////////////////////////////
  16106. static void ggml_opt_set_params(int np, struct ggml_tensor * const ps[], const float * x) {
  16107. int i = 0;
  16108. for (int p = 0; p < np; ++p) {
  16109. const int64_t ne = ggml_nelements(ps[p]) ;
  16110. // TODO: add function to set tensor from array
  16111. for (int64_t j = 0; j < ne; ++j) {
  16112. ggml_set_f32_1d(ps[p], j, x[i++]);
  16113. }
  16114. }
  16115. }
  16116. static void ggml_opt_get_params(int np, struct ggml_tensor * const ps[], float * x) {
  16117. int i = 0;
  16118. for (int p = 0; p < np; ++p) {
  16119. const int64_t ne = ggml_nelements(ps[p]) ;
  16120. // TODO: add function to get all elements at once
  16121. for (int64_t j = 0; j < ne; ++j) {
  16122. x[i++] = ggml_get_f32_1d(ps[p], j);
  16123. }
  16124. }
  16125. }
  16126. static void ggml_opt_get_grad(int np, struct ggml_tensor * const ps[], float * g) {
  16127. int64_t i = 0;
  16128. for (int p = 0; p < np; ++p) {
  16129. const int64_t ne = ggml_nelements(ps[p]) ;
  16130. // TODO: add function to get all elements at once
  16131. for (int64_t j = 0; j < ne; ++j) {
  16132. g[i++] = ggml_get_f32_1d(ps[p]->grad, j);
  16133. }
  16134. }
  16135. }
  16136. static void ggml_opt_acc_grad(int np, struct ggml_tensor * const ps[], float * g, float scale) {
  16137. int64_t i = 0;
  16138. for (int p = 0; p < np; ++p) {
  16139. const int64_t ne = ggml_nelements(ps[p]) ;
  16140. // TODO: add function to get all elements at once
  16141. for (int64_t j = 0; j < ne; ++j) {
  16142. g[i++] += ggml_get_f32_1d(ps[p]->grad, j) * scale;
  16143. }
  16144. }
  16145. }
  16146. //
  16147. // ADAM
  16148. //
  16149. // ref: https://arxiv.org/pdf/1412.6980.pdf
  16150. //
  16151. static enum ggml_opt_result ggml_opt_adam(
  16152. struct ggml_context * ctx,
  16153. struct ggml_opt_context * opt,
  16154. struct ggml_opt_params params,
  16155. struct ggml_tensor * f,
  16156. struct ggml_cgraph * gf,
  16157. struct ggml_cgraph * gb,
  16158. ggml_opt_callback callback,
  16159. void * callback_data) {
  16160. GGML_ASSERT(ggml_is_scalar(f));
  16161. // these will store the parameters we want to optimize
  16162. struct ggml_tensor * ps[GGML_MAX_PARAMS];
  16163. int np = 0;
  16164. int64_t nx = 0;
  16165. for (int i = 0; i < gf->n_nodes; ++i) {
  16166. if (gf->nodes[i]->is_param) {
  16167. GGML_PRINT_DEBUG("found param %d: grad->op = %d\n", np, gf->nodes[i]->grad->op);
  16168. GGML_ASSERT(np < GGML_MAX_PARAMS);
  16169. ps[np++] = gf->nodes[i];
  16170. nx += ggml_nelements(gf->nodes[i]);
  16171. }
  16172. }
  16173. if ((opt->params.type != params.type) || (opt->nx != nx) || (opt->params.past != params.past)) {
  16174. int iter = opt->iter;
  16175. ggml_opt_init(opt->ctx, opt, params, nx);
  16176. opt->iter = iter;
  16177. }
  16178. // constants
  16179. float sched = params.adam.sched;
  16180. const float alpha = params.adam.alpha;
  16181. const float decay = params.adam.decay * alpha;
  16182. const float beta1 = params.adam.beta1;
  16183. const float beta2 = params.adam.beta2;
  16184. const float eps = params.adam.eps;
  16185. const float gclip = params.adam.gclip;
  16186. const int decay_min_ndim = params.adam.decay_min_ndim;
  16187. const int n_accum = MAX(1, params.n_gradient_accumulation);
  16188. const float accum_norm = 1.0f / (float) n_accum;
  16189. float * g = opt->adam.g->data; // gradients
  16190. float * m = opt->adam.m->data; // first moment
  16191. float * v = opt->adam.v->data; // second moment
  16192. float * pf = params.past > 0 ? opt->adam.pf->data : NULL; // past function values
  16193. struct ggml_cplan cplan = ggml_graph_plan(gb, params.n_threads);
  16194. struct ggml_object * obj = ggml_new_object(ctx, GGML_OBJECT_WORK_BUFFER, cplan.work_size);
  16195. cplan.work_data = (uint8_t *)ctx->mem_buffer + obj->offs;
  16196. bool cancel = false;
  16197. // compute the function value
  16198. float fx = 0;
  16199. ggml_set_zero(opt->adam.g);
  16200. for (int accum_step = 0; accum_step < n_accum; ++accum_step) {
  16201. if (callback) {
  16202. callback(callback_data, accum_step, &sched, &cancel);
  16203. if (cancel) {
  16204. return GGML_OPT_CANCEL;
  16205. }
  16206. }
  16207. // ggml_graph_reset (gf);
  16208. ggml_set_f32 (f->grad, 1.0f);
  16209. ggml_graph_compute(gb, &cplan);
  16210. ggml_opt_acc_grad(np, ps, g, accum_norm);
  16211. fx += ggml_get_f32_1d(f, 0);
  16212. }
  16213. fx *= accum_norm;
  16214. opt->adam.fx_prev = fx;
  16215. opt->adam.fx_best = opt->adam.fx_prev;
  16216. if (pf) {
  16217. pf[opt->iter % params.past] = opt->adam.fx_prev;
  16218. }
  16219. opt->loss_before = opt->adam.fx_prev;
  16220. opt->loss_after = opt->adam.fx_prev;
  16221. // initialize
  16222. if (opt->just_initialized) {
  16223. opt->adam.n_no_improvement = 0;
  16224. opt->just_initialized = false;
  16225. }
  16226. float * fx_best = &opt->adam.fx_best;
  16227. float * fx_prev = &opt->adam.fx_prev;
  16228. int * n_no_improvement = &opt->adam.n_no_improvement;
  16229. int iter0 = opt->iter;
  16230. // run the optimizer
  16231. for (int t = 0; t < params.adam.n_iter; ++t) {
  16232. opt->iter = iter0 + t + 1;
  16233. GGML_PRINT_DEBUG ("=== iter %d ===\n", t);
  16234. GGML_PRINT_DEBUG ("f = %10.6f\n", ggml_get_f32_1d(f, 0));
  16235. GGML_PRINT_DEBUG_5("df/dx0 = %10.6f\n", ggml_get_f32_1d(ps[0]->grad, 0));
  16236. GGML_PRINT_DEBUG_5("df/dx1 = %10.6f\n", ggml_get_f32_1d(ps[1]->grad, 0));
  16237. for (int i = 0; i < np; ++i) {
  16238. GGML_PRINT_DEBUG("param %d: %10.6f, g = %10.6f\n", i,
  16239. ggml_get_f32_1d(ps[i], 0), ggml_get_f32_1d(ps[i]->grad, 0));
  16240. }
  16241. const int64_t t_start_wall = ggml_time_us();
  16242. const int64_t t_start_cpu = ggml_cycles();
  16243. UNUSED(t_start_wall);
  16244. UNUSED(t_start_cpu);
  16245. {
  16246. float gnorm = 1.0f;
  16247. if (gclip > 0.0f) {
  16248. // gradient clipping
  16249. ggml_float sum = 0.0;
  16250. for (int64_t i = 0; i < nx; ++i) {
  16251. sum += (ggml_float)(g[i]*g[i]);
  16252. }
  16253. ggml_float norm = sqrt(sum);
  16254. if (norm > (ggml_float) gclip) {
  16255. gnorm = (float) ((ggml_float) gclip / norm);
  16256. }
  16257. }
  16258. const float beta1h = alpha*sched/(1.0f - powf(beta1, opt->iter));
  16259. const float beta2h = 1.0f/(1.0f - powf(beta2, opt->iter));
  16260. int64_t i = 0;
  16261. for (int p = 0; p < np; ++p) {
  16262. const int64_t ne = ggml_nelements(ps[p]);
  16263. const float p_decay = ((ps[p]->n_dims >= decay_min_ndim) ? decay : 0.0f) * sched;
  16264. for (int64_t j = 0; j < ne; ++j) {
  16265. float x = ggml_get_f32_1d(ps[p], j);
  16266. float g_ = g[i]*gnorm;
  16267. m[i] = m[i]*beta1 + g_*(1.0f - beta1);
  16268. v[i] = v[i]*beta2 + g_*g_*(1.0f - beta2);
  16269. float mh = m[i]*beta1h;
  16270. float vh = v[i]*beta2h;
  16271. vh = sqrtf(vh) + eps;
  16272. x = x*(1.0f - p_decay) - mh/vh;
  16273. ggml_set_f32_1d(ps[p], j, x);
  16274. ++i;
  16275. }
  16276. }
  16277. }
  16278. fx = 0;
  16279. ggml_set_zero(opt->adam.g);
  16280. for (int accum_step = 0; accum_step < n_accum; ++accum_step) {
  16281. if (callback) {
  16282. callback(callback_data, accum_step, &sched, &cancel);
  16283. if (cancel) {
  16284. return GGML_OPT_CANCEL;;
  16285. }
  16286. }
  16287. // ggml_graph_reset (gf);
  16288. ggml_set_f32 (f->grad, 1.0f);
  16289. ggml_graph_compute(gb, &cplan);
  16290. ggml_opt_acc_grad(np, ps, g, accum_norm);
  16291. fx += ggml_get_f32_1d(f, 0);
  16292. }
  16293. fx *= accum_norm;
  16294. opt->loss_after = fx;
  16295. // check convergence
  16296. if (fabsf(fx - fx_prev[0])/fx < params.adam.eps_f) {
  16297. GGML_PRINT_DEBUG("converged\n");
  16298. return GGML_OPT_OK;
  16299. }
  16300. // delta-based convergence test
  16301. if (pf != NULL) {
  16302. // need at least params.past iterations to start checking for convergence
  16303. if (params.past <= iter0 + t) {
  16304. const float rate = (pf[(iter0 + t)%params.past] - fx)/fx;
  16305. if (fabsf(rate) < params.delta) {
  16306. return GGML_OPT_OK;
  16307. }
  16308. }
  16309. pf[(iter0 + t)%params.past] = fx;
  16310. }
  16311. // check for improvement
  16312. if (params.max_no_improvement > 0) {
  16313. if (fx_best[0] > fx) {
  16314. fx_best[0] = fx;
  16315. n_no_improvement[0] = 0;
  16316. } else {
  16317. ++n_no_improvement[0];
  16318. if (n_no_improvement[0] >= params.max_no_improvement) {
  16319. return GGML_OPT_OK;
  16320. }
  16321. }
  16322. }
  16323. fx_prev[0] = fx;
  16324. {
  16325. const int64_t t_end_cpu = ggml_cycles();
  16326. GGML_PRINT_DEBUG("time iter: %5.3f s\n", ((float)(t_end_cpu - t_start_cpu))/CLOCKS_PER_SEC);
  16327. UNUSED(t_end_cpu);
  16328. const int64_t t_end_wall = ggml_time_us();
  16329. GGML_PRINT_DEBUG("wall time iter: %5.3f s\n", (t_end_wall - t_start_wall)/1e6);
  16330. UNUSED(t_end_wall);
  16331. }
  16332. }
  16333. return GGML_OPT_DID_NOT_CONVERGE;
  16334. }
  16335. //
  16336. // L-BFGS
  16337. //
  16338. // the L-BFGS implementation below is based on the following implementation:
  16339. //
  16340. // https://github.com/chokkan/liblbfgs
  16341. //
  16342. struct ggml_lbfgs_iteration_data {
  16343. float alpha;
  16344. float ys;
  16345. float * s;
  16346. float * y;
  16347. };
  16348. static enum ggml_opt_result linesearch_backtracking(
  16349. const struct ggml_opt_params * params,
  16350. int nx,
  16351. float * x,
  16352. float * fx,
  16353. float * g,
  16354. float * d,
  16355. float * step,
  16356. const float * xp,
  16357. struct ggml_tensor * f,
  16358. struct ggml_cgraph * gb,
  16359. struct ggml_cplan * cplan,
  16360. const int np,
  16361. struct ggml_tensor * ps[],
  16362. bool * cancel,
  16363. ggml_opt_callback callback,
  16364. void * callback_data) {
  16365. int count = 0;
  16366. float width = 0.0f;
  16367. float dg = 0.0f;
  16368. float finit = 0.0f;
  16369. float dginit = 0.0f;
  16370. float dgtest = 0.0f;
  16371. const float dec = 0.5f;
  16372. const float inc = 2.1f;
  16373. const int n_accum = MAX(1, params->n_gradient_accumulation);
  16374. const float accum_norm = 1.0f / (float) n_accum;
  16375. if (*step <= 0.f) {
  16376. return GGML_LINESEARCH_INVALID_PARAMETERS;
  16377. }
  16378. // compute the initial gradient in the search direction
  16379. ggml_vec_dot_f32(nx, &dginit, g, d);
  16380. // make sure that d points to a descent direction
  16381. if (0 < dginit) {
  16382. return GGML_LINESEARCH_FAIL;
  16383. }
  16384. // initialize local variables
  16385. finit = *fx;
  16386. dgtest = params->lbfgs.ftol*dginit;
  16387. while (true) {
  16388. ggml_vec_cpy_f32(nx, x, xp);
  16389. ggml_vec_mad_f32(nx, x, d, *step);
  16390. // evaluate the function and gradient values
  16391. {
  16392. ggml_opt_set_params(np, ps, x);
  16393. *fx = 0;
  16394. memset(g, 0, sizeof(float)*nx);
  16395. for (int accum_step = 0; accum_step < n_accum; ++accum_step) {
  16396. if (callback) {
  16397. // LBFG-S does not support learning rate -> ignore learning schedule
  16398. float sched = 0;
  16399. callback(callback_data, accum_step, &sched, cancel);
  16400. if (*cancel) {
  16401. return GGML_OPT_CANCEL;
  16402. }
  16403. }
  16404. // ggml_graph_reset (gf);
  16405. ggml_set_f32 (f->grad, 1.0f);
  16406. ggml_graph_compute(gb, cplan);
  16407. ggml_opt_acc_grad(np, ps, g, accum_norm);
  16408. *fx += ggml_get_f32_1d(f, 0);
  16409. }
  16410. *fx *= accum_norm;
  16411. }
  16412. ++count;
  16413. if (*fx > finit + (*step)*dgtest) {
  16414. width = dec;
  16415. } else {
  16416. // Armijo condition is satisfied
  16417. if (params->lbfgs.linesearch == GGML_LINESEARCH_BACKTRACKING_ARMIJO) {
  16418. return count;
  16419. }
  16420. ggml_vec_dot_f32(nx, &dg, g, d);
  16421. // check the Wolfe condition
  16422. if (dg < params->lbfgs.wolfe * dginit) {
  16423. width = inc;
  16424. } else {
  16425. if(params->lbfgs.linesearch == GGML_LINESEARCH_BACKTRACKING_WOLFE) {
  16426. // regular Wolfe conditions
  16427. return count;
  16428. }
  16429. if(dg > -params->lbfgs.wolfe*dginit) {
  16430. width = dec;
  16431. } else {
  16432. // strong Wolfe condition (GGML_LINESEARCH_BACKTRACKING_STRONG_WOLFE)
  16433. return count;
  16434. }
  16435. }
  16436. }
  16437. if (*step < params->lbfgs.min_step) {
  16438. return GGML_LINESEARCH_MINIMUM_STEP;
  16439. }
  16440. if (*step > params->lbfgs.max_step) {
  16441. return GGML_LINESEARCH_MAXIMUM_STEP;
  16442. }
  16443. if (params->lbfgs.max_linesearch <= count) {
  16444. return GGML_LINESEARCH_MAXIMUM_ITERATIONS;
  16445. }
  16446. (*step) *= width;
  16447. }
  16448. GGML_UNREACHABLE();
  16449. }
  16450. static enum ggml_opt_result ggml_opt_lbfgs(
  16451. struct ggml_context * ctx,
  16452. struct ggml_opt_context * opt,
  16453. struct ggml_opt_params params,
  16454. struct ggml_tensor * f,
  16455. struct ggml_cgraph * gf,
  16456. struct ggml_cgraph * gb,
  16457. ggml_opt_callback callback,
  16458. void * callback_data) {
  16459. if (params.lbfgs.linesearch == GGML_LINESEARCH_BACKTRACKING_WOLFE ||
  16460. params.lbfgs.linesearch == GGML_LINESEARCH_BACKTRACKING_STRONG_WOLFE) {
  16461. if (params.lbfgs.wolfe <= params.lbfgs.ftol || 1.f <= params.lbfgs.wolfe) {
  16462. return GGML_OPT_INVALID_WOLFE;
  16463. }
  16464. }
  16465. const int m = params.lbfgs.m;
  16466. // these will store the parameters we want to optimize
  16467. struct ggml_tensor * ps[GGML_MAX_PARAMS];
  16468. int np = 0;
  16469. int nx = 0;
  16470. for (int i = 0; i < gf->n_nodes; ++i) {
  16471. if (gf->nodes[i]->is_param) {
  16472. GGML_PRINT_DEBUG("found param %d: grad->op = %d\n", np, gf->nodes[i]->grad->op);
  16473. GGML_ASSERT(np < GGML_MAX_PARAMS);
  16474. ps[np++] = gf->nodes[i];
  16475. nx += ggml_nelements(gf->nodes[i]);
  16476. }
  16477. }
  16478. if ((opt->params.type != params.type) || (opt->nx != nx) || (opt->params.past != params.past) || (opt->params.lbfgs.m != params.lbfgs.m)) {
  16479. int iter = opt->iter;
  16480. ggml_opt_init(ctx, opt, params, nx);
  16481. opt->iter = iter;
  16482. }
  16483. struct ggml_cplan cplan = ggml_graph_plan(gb, params.n_threads);
  16484. struct ggml_object * obj = ggml_new_object(ctx, GGML_OBJECT_WORK_BUFFER, cplan.work_size);
  16485. cplan.work_data = (uint8_t *)ctx->mem_buffer + obj->offs;
  16486. float * x = opt->lbfgs.x->data; // current parameters
  16487. float * xp = opt->lbfgs.xp->data; // previous parameters
  16488. float * g = opt->lbfgs.g->data; // current gradient
  16489. float * gp = opt->lbfgs.gp->data; // previous gradient
  16490. float * d = opt->lbfgs.d->data; // search direction
  16491. float * pf = params.past > 0 ? opt->lbfgs.pf->data : NULL; // past function values
  16492. const int n_accum = MAX(1, params.n_gradient_accumulation);
  16493. const float accum_norm = 1.0f / (float) n_accum;
  16494. float fx = 0.0f; // cost function value
  16495. float xnorm = 0.0f; // ||x||
  16496. float gnorm = 0.0f; // ||g||
  16497. // initialize x from the graph nodes
  16498. ggml_opt_get_params(np, ps, x);
  16499. // the L-BFGS memory
  16500. float * lm_alpha = opt->lbfgs.lmal->data;
  16501. float * lm_ys = opt->lbfgs.lmys->data;
  16502. float * lm_s = opt->lbfgs.lms->data;
  16503. float * lm_y = opt->lbfgs.lmy->data;
  16504. bool cancel = false;
  16505. // evaluate the function value and its gradient
  16506. {
  16507. ggml_opt_set_params(np, ps, x);
  16508. fx = 0;
  16509. memset(g, 0, sizeof(float)*nx);
  16510. for (int accum_step = 0; accum_step < n_accum; ++accum_step) {
  16511. if (callback) {
  16512. // LBFG-S does not support learning rate -> ignore learning schedule
  16513. float sched = 0;
  16514. callback(callback_data, accum_step, &sched, &cancel);
  16515. if (cancel) {
  16516. return GGML_OPT_CANCEL;
  16517. }
  16518. }
  16519. // ggml_graph_reset (gf);
  16520. ggml_set_f32 (f->grad, 1.0f);
  16521. ggml_graph_compute(gb, &cplan);
  16522. ggml_opt_acc_grad(np, ps, g, accum_norm);
  16523. fx += ggml_get_f32_1d(f, 0);
  16524. }
  16525. fx *= accum_norm;
  16526. opt->loss_before = fx;
  16527. opt->loss_after = fx;
  16528. }
  16529. // search direction = -gradient
  16530. ggml_vec_neg_f32(nx, d, g);
  16531. // ||x||, ||g||
  16532. ggml_vec_norm_f32(nx, &xnorm, x);
  16533. ggml_vec_norm_f32(nx, &gnorm, g);
  16534. if (xnorm < 1.0f) {
  16535. xnorm = 1.0f;
  16536. }
  16537. // already optimized
  16538. if (gnorm/xnorm <= params.lbfgs.eps) {
  16539. return GGML_OPT_OK;
  16540. }
  16541. if (opt->just_initialized) {
  16542. if (pf) {
  16543. pf[0] = fx;
  16544. }
  16545. opt->lbfgs.fx_best = fx;
  16546. // initial step
  16547. ggml_vec_norm_inv_f32(nx, &opt->lbfgs.step, d);
  16548. opt->lbfgs.j = 0;
  16549. opt->lbfgs.k = 1;
  16550. opt->lbfgs.end = 0;
  16551. opt->lbfgs.n_no_improvement = 0;
  16552. opt->just_initialized = false;
  16553. }
  16554. float * fx_best = &opt->lbfgs.fx_best;
  16555. float * step = &opt->lbfgs.step;
  16556. int * j = &opt->lbfgs.j;
  16557. int * k = &opt->lbfgs.k;
  16558. int * end = &opt->lbfgs.end;
  16559. int * n_no_improvement = &opt->lbfgs.n_no_improvement;
  16560. int ls = 0;
  16561. int bound = 0;
  16562. float ys = 0.0f;
  16563. float yy = 0.0f;
  16564. float beta = 0.0f;
  16565. int it = 0;
  16566. while (true) {
  16567. // store the current position and gradient vectors
  16568. ggml_vec_cpy_f32(nx, xp, x);
  16569. ggml_vec_cpy_f32(nx, gp, g);
  16570. ls = linesearch_backtracking(&params, nx, x, &fx, g, d, step, xp, f, gb, &cplan, np, ps, &cancel, callback, callback_data);
  16571. if (cancel) {
  16572. return GGML_OPT_CANCEL;
  16573. }
  16574. if (ls < 0) {
  16575. // linesearch failed - go back to the previous point and return
  16576. ggml_vec_cpy_f32(nx, x, xp);
  16577. ggml_vec_cpy_f32(nx, g, gp);
  16578. return ls;
  16579. }
  16580. opt->loss_after = fx;
  16581. ggml_vec_norm_f32(nx, &xnorm, x);
  16582. ggml_vec_norm_f32(nx, &gnorm, g);
  16583. GGML_PRINT_DEBUG("f = %10.6f\n", ggml_get_f32_1d(f, 0));
  16584. if (xnorm < 1.0f) {
  16585. xnorm = 1.0f;
  16586. }
  16587. if (gnorm/xnorm <= params.lbfgs.eps) {
  16588. // converged
  16589. return GGML_OPT_OK;
  16590. }
  16591. // delta-based convergence test
  16592. if (pf != NULL) {
  16593. // need at least params.past iterations to start checking for convergence
  16594. if (params.past <= k[0]) {
  16595. const float rate = (pf[k[0]%params.past] - fx)/fx;
  16596. if (fabsf(rate) < params.delta) {
  16597. return GGML_OPT_OK;
  16598. }
  16599. }
  16600. pf[k[0]%params.past] = fx;
  16601. }
  16602. // check for improvement
  16603. if (params.max_no_improvement > 0) {
  16604. if (fx < fx_best[0]) {
  16605. fx_best[0] = fx;
  16606. n_no_improvement[0] = 0;
  16607. } else {
  16608. n_no_improvement[0]++;
  16609. if (n_no_improvement[0] >= params.max_no_improvement) {
  16610. return GGML_OPT_OK;
  16611. }
  16612. }
  16613. }
  16614. if (params.lbfgs.n_iter != 0 && params.lbfgs.n_iter < it + 1) {
  16615. // reached the maximum number of iterations
  16616. return GGML_OPT_DID_NOT_CONVERGE;
  16617. }
  16618. // update vectors s and y:
  16619. // s_{k+1} = x_{k+1} - x_{k} = \step * d_{k}.
  16620. // y_{k+1} = g_{k+1} - g_{k}.
  16621. //
  16622. ggml_vec_sub_f32(nx, &lm_s[end[0]*nx], x, xp);
  16623. ggml_vec_sub_f32(nx, &lm_y[end[0]*nx], g, gp);
  16624. // compute scalars ys and yy:
  16625. // ys = y^t \cdot s -> 1 / \rho.
  16626. // yy = y^t \cdot y.
  16627. //
  16628. ggml_vec_dot_f32(nx, &ys, &lm_y[end[0]*nx], &lm_s[end[0]*nx]);
  16629. ggml_vec_dot_f32(nx, &yy, &lm_y[end[0]*nx], &lm_y[end[0]*nx]);
  16630. lm_ys[end[0]] = ys;
  16631. // find new search direction
  16632. // ref: https://en.wikipedia.org/wiki/Limited-memory_BFGS
  16633. bound = (m <= k[0]) ? m : k[0];
  16634. k[0]++;
  16635. it++;
  16636. end[0] = (end[0] + 1)%m;
  16637. // initialize search direction with -g
  16638. ggml_vec_neg_f32(nx, d, g);
  16639. j[0] = end[0];
  16640. for (int i = 0; i < bound; ++i) {
  16641. j[0] = (j[0] + m - 1) % m;
  16642. // \alpha_{j} = \rho_{j} s^{t}_{j} \cdot q_{k+1}
  16643. ggml_vec_dot_f32(nx, &lm_alpha[j[0]], &lm_s[j[0]*nx], d);
  16644. lm_alpha[j[0]] /= lm_ys[j[0]];
  16645. // q_{i} = q_{i+1} - \alpha_{i} y_{i}
  16646. ggml_vec_mad_f32(nx, d, &lm_y[j[0]*nx], -lm_alpha[j[0]]);
  16647. }
  16648. ggml_vec_scale_f32(nx, d, ys/yy);
  16649. for (int i = 0; i < bound; ++i) {
  16650. // \beta_{j} = \rho_{j} y^t_{j} \cdot \gamma_{i}
  16651. ggml_vec_dot_f32(nx, &beta, &lm_y[j[0]*nx], d);
  16652. beta /= lm_ys[j[0]];
  16653. // \gamma_{i+1} = \gamma_{i} + (\alpha_{j} - \beta_{j}) s_{j}
  16654. ggml_vec_mad_f32(nx, d, &lm_s[j[0]*nx], lm_alpha[j[0]] - beta);
  16655. j[0] = (j[0] + 1)%m;
  16656. }
  16657. step[0] = 1.0;
  16658. }
  16659. GGML_UNREACHABLE();
  16660. }
  16661. struct ggml_opt_params ggml_opt_default_params(enum ggml_opt_type type) {
  16662. struct ggml_opt_params result;
  16663. switch (type) {
  16664. case GGML_OPT_ADAM:
  16665. {
  16666. result = (struct ggml_opt_params) {
  16667. .type = GGML_OPT_ADAM,
  16668. .n_threads = 1,
  16669. .past = 0,
  16670. .delta = 1e-5f,
  16671. .max_no_improvement = 100,
  16672. .print_forward_graph = true,
  16673. .print_backward_graph = true,
  16674. .n_gradient_accumulation = 1,
  16675. .adam = {
  16676. .n_iter = 10000,
  16677. .sched = 1.000f,
  16678. .decay = 0.0f,
  16679. .decay_min_ndim = 2,
  16680. .alpha = 0.001f,
  16681. .beta1 = 0.9f,
  16682. .beta2 = 0.999f,
  16683. .eps = 1e-8f,
  16684. .eps_f = 1e-5f,
  16685. .eps_g = 1e-3f,
  16686. .gclip = 0.0f,
  16687. },
  16688. };
  16689. } break;
  16690. case GGML_OPT_LBFGS:
  16691. {
  16692. result = (struct ggml_opt_params) {
  16693. .type = GGML_OPT_LBFGS,
  16694. .n_threads = 1,
  16695. .past = 0,
  16696. .delta = 1e-5f,
  16697. .max_no_improvement = 0,
  16698. .print_forward_graph = true,
  16699. .print_backward_graph = true,
  16700. .n_gradient_accumulation = 1,
  16701. .lbfgs = {
  16702. .m = 6,
  16703. .n_iter = 100,
  16704. .max_linesearch = 20,
  16705. .eps = 1e-5f,
  16706. .ftol = 1e-4f,
  16707. .wolfe = 0.9f,
  16708. .min_step = 1e-20f,
  16709. .max_step = 1e+20f,
  16710. .linesearch = GGML_LINESEARCH_DEFAULT,
  16711. },
  16712. };
  16713. } break;
  16714. }
  16715. return result;
  16716. }
  16717. GGML_API void ggml_opt_init(
  16718. struct ggml_context * ctx,
  16719. struct ggml_opt_context * opt,
  16720. struct ggml_opt_params params,
  16721. int64_t nx) {
  16722. opt->ctx = ctx;
  16723. opt->params = params;
  16724. opt->iter = 0;
  16725. opt->nx = nx;
  16726. opt->just_initialized = true;
  16727. if (opt->ctx == NULL) {
  16728. struct ggml_init_params ctx_opt_params;
  16729. if (opt->params.type == GGML_OPT_ADAM) {
  16730. ctx_opt_params.mem_size = GGML_MEM_ALIGN*3 + ggml_tensor_overhead()*3 + ggml_type_size(GGML_TYPE_F32)*nx*3;
  16731. if (opt->params.past > 0) {
  16732. ctx_opt_params.mem_size += GGML_MEM_ALIGN + ggml_tensor_overhead() + ggml_type_size(GGML_TYPE_F32)*opt->params.past;
  16733. }
  16734. } else if (opt->params.type == GGML_OPT_LBFGS) {
  16735. ctx_opt_params.mem_size = GGML_MEM_ALIGN*9 + ggml_tensor_overhead()*9 + ggml_type_size(GGML_TYPE_F32)*(nx*5 + opt->params.lbfgs.m*2 + nx*opt->params.lbfgs.m*2);
  16736. if (opt->params.past > 0) {
  16737. ctx_opt_params.mem_size += GGML_MEM_ALIGN + ggml_tensor_overhead() + ggml_type_size(GGML_TYPE_F32)*opt->params.past;
  16738. }
  16739. }
  16740. ctx_opt_params.mem_buffer = NULL;
  16741. ctx_opt_params.no_alloc = false;
  16742. opt->ctx = ggml_init(ctx_opt_params);
  16743. }
  16744. switch (opt->params.type) {
  16745. case GGML_OPT_ADAM:
  16746. {
  16747. opt->adam.g = ggml_new_tensor_1d(opt->ctx, GGML_TYPE_F32, nx);
  16748. opt->adam.m = ggml_new_tensor_1d(opt->ctx, GGML_TYPE_F32, nx);
  16749. opt->adam.v = ggml_new_tensor_1d(opt->ctx, GGML_TYPE_F32, nx);
  16750. opt->adam.pf = params.past > 0
  16751. ? ggml_new_tensor_1d(opt->ctx, GGML_TYPE_F32, params.past)
  16752. : NULL;
  16753. ggml_set_zero(opt->adam.m);
  16754. ggml_set_zero(opt->adam.v);
  16755. if (opt->adam.pf) {
  16756. ggml_set_zero(opt->adam.pf);
  16757. }
  16758. } break;
  16759. case GGML_OPT_LBFGS:
  16760. {
  16761. opt->lbfgs.x = ggml_new_tensor_1d(opt->ctx, GGML_TYPE_F32, nx);
  16762. opt->lbfgs.xp = ggml_new_tensor_1d(opt->ctx, GGML_TYPE_F32, nx);
  16763. opt->lbfgs.g = ggml_new_tensor_1d(opt->ctx, GGML_TYPE_F32, nx);
  16764. opt->lbfgs.gp = ggml_new_tensor_1d(opt->ctx, GGML_TYPE_F32, nx);
  16765. opt->lbfgs.d = ggml_new_tensor_1d(opt->ctx, GGML_TYPE_F32, nx);
  16766. opt->lbfgs.pf = params.past > 0
  16767. ? ggml_new_tensor_1d(opt->ctx, GGML_TYPE_F32, params.past)
  16768. : NULL;
  16769. opt->lbfgs.lmal = ggml_new_tensor_1d(opt->ctx, GGML_TYPE_F32, params.lbfgs.m);
  16770. opt->lbfgs.lmys = ggml_new_tensor_1d(opt->ctx, GGML_TYPE_F32, params.lbfgs.m);
  16771. opt->lbfgs.lms = ggml_new_tensor_2d(opt->ctx, GGML_TYPE_F32, nx, params.lbfgs.m);
  16772. opt->lbfgs.lmy = ggml_new_tensor_2d(opt->ctx, GGML_TYPE_F32, nx, params.lbfgs.m);
  16773. ggml_set_zero(opt->lbfgs.x);
  16774. ggml_set_zero(opt->lbfgs.xp);
  16775. ggml_set_zero(opt->lbfgs.g);
  16776. ggml_set_zero(opt->lbfgs.gp);
  16777. ggml_set_zero(opt->lbfgs.d);
  16778. if (opt->lbfgs.pf) {
  16779. ggml_set_zero(opt->lbfgs.pf);
  16780. }
  16781. ggml_set_zero(opt->lbfgs.lmal);
  16782. ggml_set_zero(opt->lbfgs.lmys);
  16783. ggml_set_zero(opt->lbfgs.lms);
  16784. ggml_set_zero(opt->lbfgs.lmy);
  16785. } break;
  16786. }
  16787. }
  16788. enum ggml_opt_result ggml_opt(
  16789. struct ggml_context * ctx,
  16790. struct ggml_opt_params params,
  16791. struct ggml_tensor * f) {
  16792. bool free_ctx = false;
  16793. if (ctx == NULL) {
  16794. struct ggml_init_params params_ctx = {
  16795. .mem_size = 16*1024*1024,
  16796. .mem_buffer = NULL,
  16797. .no_alloc = false,
  16798. };
  16799. ctx = ggml_init(params_ctx);
  16800. if (ctx == NULL) {
  16801. return GGML_OPT_NO_CONTEXT;
  16802. }
  16803. free_ctx = true;
  16804. }
  16805. enum ggml_opt_result result = GGML_OPT_OK;
  16806. struct ggml_opt_context * opt = (struct ggml_opt_context *) alloca(sizeof(struct ggml_opt_context));
  16807. ggml_opt_init(ctx, opt, params, 0);
  16808. result = ggml_opt_resume(ctx, opt, f);
  16809. if (free_ctx) {
  16810. ggml_free(ctx);
  16811. }
  16812. return result;
  16813. }
  16814. enum ggml_opt_result ggml_opt_resume(
  16815. struct ggml_context * ctx,
  16816. struct ggml_opt_context * opt,
  16817. struct ggml_tensor * f) {
  16818. // build forward + backward compute graphs
  16819. struct ggml_tensor * gfbuf = ggml_new_tensor_1d(ctx, GGML_TYPE_I32, sizeof(struct ggml_cgraph) / ggml_type_size(GGML_TYPE_I32)+ (sizeof(struct ggml_cgraph) % ggml_type_size(GGML_TYPE_I32) ? 1 : 0));
  16820. struct ggml_tensor * gbbuf = ggml_new_tensor_1d(ctx, GGML_TYPE_I32, sizeof(struct ggml_cgraph) / ggml_type_size(GGML_TYPE_I32)+ (sizeof(struct ggml_cgraph) % ggml_type_size(GGML_TYPE_I32) ? 1 : 0));
  16821. struct ggml_cgraph * gf = (struct ggml_cgraph *) gfbuf->data;
  16822. struct ggml_cgraph * gb = (struct ggml_cgraph *) gbbuf->data;
  16823. *gf = ggml_build_forward (f);
  16824. *gb = ggml_build_backward(ctx, gf, true);
  16825. return ggml_opt_resume_g(ctx, opt, f, gf, gb, NULL, NULL);
  16826. }
  16827. enum ggml_opt_result ggml_opt_resume_g(
  16828. struct ggml_context * ctx,
  16829. struct ggml_opt_context * opt,
  16830. struct ggml_tensor * f,
  16831. struct ggml_cgraph * gf,
  16832. struct ggml_cgraph * gb,
  16833. ggml_opt_callback callback,
  16834. void * callback_data) {
  16835. // build forward + backward compute graphs
  16836. enum ggml_opt_result result = GGML_OPT_OK;
  16837. switch (opt->params.type) {
  16838. case GGML_OPT_ADAM:
  16839. {
  16840. result = ggml_opt_adam(ctx, opt, opt->params, f, gf, gb, callback, callback_data);
  16841. } break;
  16842. case GGML_OPT_LBFGS:
  16843. {
  16844. result = ggml_opt_lbfgs(ctx, opt, opt->params, f, gf, gb, callback, callback_data);
  16845. } break;
  16846. }
  16847. if (opt->params.print_forward_graph) {
  16848. ggml_graph_print (gf);
  16849. ggml_graph_dump_dot(gf, NULL, "opt-forward.dot");
  16850. }
  16851. if (opt->params.print_backward_graph) {
  16852. ggml_graph_print (gb);
  16853. ggml_graph_dump_dot(gb, gf, "opt-backward.dot");
  16854. }
  16855. return result;
  16856. }
  16857. ////////////////////////////////////////////////////////////////////////////////
  16858. size_t ggml_quantize_q4_0(const float * src, void * dst, int n, int k, int64_t * hist) {
  16859. assert(k % QK4_0 == 0);
  16860. const int nb = k / QK4_0;
  16861. for (int b = 0; b < n; b += k) {
  16862. block_q4_0 * restrict y = (block_q4_0 *) dst + b/QK4_0;
  16863. quantize_row_q4_0_reference(src + b, y, k);
  16864. for (int i = 0; i < nb; i++) {
  16865. for (int j = 0; j < QK4_0; j += 2) {
  16866. const uint8_t vi0 = y[i].qs[j/2] & 0x0F;
  16867. const uint8_t vi1 = y[i].qs[j/2] >> 4;
  16868. hist[vi0]++;
  16869. hist[vi1]++;
  16870. }
  16871. }
  16872. }
  16873. return (n/QK4_0*sizeof(block_q4_0));
  16874. }
  16875. size_t ggml_quantize_q4_1(const float * src, void * dst, int n, int k, int64_t * hist) {
  16876. assert(k % QK4_1 == 0);
  16877. const int nb = k / QK4_1;
  16878. for (int b = 0; b < n; b += k) {
  16879. block_q4_1 * restrict y = (block_q4_1 *) dst + b/QK4_1;
  16880. quantize_row_q4_1_reference(src + b, y, k);
  16881. for (int i = 0; i < nb; i++) {
  16882. for (int j = 0; j < QK4_1; j += 2) {
  16883. const uint8_t vi0 = y[i].qs[j/2] & 0x0F;
  16884. const uint8_t vi1 = y[i].qs[j/2] >> 4;
  16885. hist[vi0]++;
  16886. hist[vi1]++;
  16887. }
  16888. }
  16889. }
  16890. return (n/QK4_1*sizeof(block_q4_1));
  16891. }
  16892. size_t ggml_quantize_q5_0(const float * src, void * dst, int n, int k, int64_t * hist) {
  16893. assert(k % QK5_0 == 0);
  16894. const int nb = k / QK5_0;
  16895. for (int b = 0; b < n; b += k) {
  16896. block_q5_0 * restrict y = (block_q5_0 *)dst + b/QK5_0;
  16897. quantize_row_q5_0_reference(src + b, y, k);
  16898. for (int i = 0; i < nb; i++) {
  16899. uint32_t qh;
  16900. memcpy(&qh, &y[i].qh, sizeof(qh));
  16901. for (int j = 0; j < QK5_0; j += 2) {
  16902. const uint8_t vh0 = ((qh & (1u << (j + 0 ))) >> (j + 0 )) << 4;
  16903. const uint8_t vh1 = ((qh & (1u << (j + 16))) >> (j + 12));
  16904. // cast to 16 bins
  16905. const uint8_t vi0 = ((y[i].qs[j/2] & 0x0F) | vh0) / 2;
  16906. const uint8_t vi1 = ((y[i].qs[j/2] >> 4) | vh1) / 2;
  16907. hist[vi0]++;
  16908. hist[vi1]++;
  16909. }
  16910. }
  16911. }
  16912. return (n/QK5_0*sizeof(block_q5_0));
  16913. }
  16914. size_t ggml_quantize_q5_1(const float * src, void * dst, int n, int k, int64_t * hist) {
  16915. assert(k % QK5_1 == 0);
  16916. const int nb = k / QK5_1;
  16917. for (int b = 0; b < n; b += k) {
  16918. block_q5_1 * restrict y = (block_q5_1 *)dst + b/QK5_1;
  16919. quantize_row_q5_1_reference(src + b, y, k);
  16920. for (int i = 0; i < nb; i++) {
  16921. uint32_t qh;
  16922. memcpy(&qh, &y[i].qh, sizeof(qh));
  16923. for (int j = 0; j < QK5_1; j += 2) {
  16924. const uint8_t vh0 = ((qh & (1u << (j + 0 ))) >> (j + 0 )) << 4;
  16925. const uint8_t vh1 = ((qh & (1u << (j + 16))) >> (j + 12));
  16926. // cast to 16 bins
  16927. const uint8_t vi0 = ((y[i].qs[j/2] & 0x0F) | vh0) / 2;
  16928. const uint8_t vi1 = ((y[i].qs[j/2] >> 4) | vh1) / 2;
  16929. hist[vi0]++;
  16930. hist[vi1]++;
  16931. }
  16932. }
  16933. }
  16934. return (n/QK5_1*sizeof(block_q5_1));
  16935. }
  16936. size_t ggml_quantize_q8_0(const float * src, void * dst, int n, int k, int64_t * hist) {
  16937. assert(k % QK8_0 == 0);
  16938. const int nb = k / QK8_0;
  16939. for (int b = 0; b < n; b += k) {
  16940. block_q8_0 * restrict y = (block_q8_0 *)dst + b/QK8_0;
  16941. quantize_row_q8_0_reference(src + b, y, k);
  16942. for (int i = 0; i < nb; i++) {
  16943. for (int j = 0; j < QK8_0; ++j) {
  16944. const int8_t vi = y[i].qs[j];
  16945. hist[vi/16 + 8]++;
  16946. }
  16947. }
  16948. }
  16949. return (n/QK8_0*sizeof(block_q8_0));
  16950. }
  16951. size_t ggml_quantize_chunk(enum ggml_type type, const float * src, void * dst, int start, int n, int64_t * hist) {
  16952. size_t result = 0;
  16953. switch (type) {
  16954. case GGML_TYPE_Q4_0:
  16955. {
  16956. GGML_ASSERT(start % QK4_0 == 0);
  16957. block_q4_0 * block = (block_q4_0*)dst + start / QK4_0;
  16958. result = ggml_quantize_q4_0(src + start, block, n, n, hist);
  16959. } break;
  16960. case GGML_TYPE_Q4_1:
  16961. {
  16962. GGML_ASSERT(start % QK4_1 == 0);
  16963. block_q4_1 * block = (block_q4_1*)dst + start / QK4_1;
  16964. result = ggml_quantize_q4_1(src + start, block, n, n, hist);
  16965. } break;
  16966. case GGML_TYPE_Q5_0:
  16967. {
  16968. GGML_ASSERT(start % QK5_0 == 0);
  16969. block_q5_0 * block = (block_q5_0*)dst + start / QK5_0;
  16970. result = ggml_quantize_q5_0(src + start, block, n, n, hist);
  16971. } break;
  16972. case GGML_TYPE_Q5_1:
  16973. {
  16974. GGML_ASSERT(start % QK5_1 == 0);
  16975. block_q5_1 * block = (block_q5_1*)dst + start / QK5_1;
  16976. result = ggml_quantize_q5_1(src + start, block, n, n, hist);
  16977. } break;
  16978. case GGML_TYPE_Q8_0:
  16979. {
  16980. GGML_ASSERT(start % QK8_0 == 0);
  16981. block_q8_0 * block = (block_q8_0*)dst + start / QK8_0;
  16982. result = ggml_quantize_q8_0(src + start, block, n, n, hist);
  16983. } break;
  16984. #ifdef GGML_USE_K_QUANTS
  16985. case GGML_TYPE_Q2_K:
  16986. {
  16987. GGML_ASSERT(start % QK_K == 0);
  16988. block_q2_K * block = (block_q2_K*)dst + start / QK_K;
  16989. result = ggml_quantize_q2_K(src + start, block, n, n, hist);
  16990. } break;
  16991. case GGML_TYPE_Q3_K:
  16992. {
  16993. GGML_ASSERT(start % QK_K == 0);
  16994. block_q3_K * block = (block_q3_K*)dst + start / QK_K;
  16995. result = ggml_quantize_q3_K(src + start, block, n, n, hist);
  16996. } break;
  16997. case GGML_TYPE_Q4_K:
  16998. {
  16999. GGML_ASSERT(start % QK_K == 0);
  17000. block_q4_K * block = (block_q4_K*)dst + start / QK_K;
  17001. result = ggml_quantize_q4_K(src + start, block, n, n, hist);
  17002. } break;
  17003. case GGML_TYPE_Q5_K:
  17004. {
  17005. GGML_ASSERT(start % QK_K == 0);
  17006. block_q5_K * block = (block_q5_K*)dst + start / QK_K;
  17007. result = ggml_quantize_q5_K(src + start, block, n, n, hist);
  17008. } break;
  17009. case GGML_TYPE_Q6_K:
  17010. {
  17011. GGML_ASSERT(start % QK_K == 0);
  17012. block_q6_K * block = (block_q6_K*)dst + start / QK_K;
  17013. result = ggml_quantize_q6_K(src + start, block, n, n, hist);
  17014. } break;
  17015. #endif
  17016. case GGML_TYPE_F16:
  17017. {
  17018. int elemsize = sizeof(ggml_fp16_t);
  17019. ggml_fp32_to_fp16_row(src + start, (ggml_fp16_t *)dst + start, n);
  17020. result = n * elemsize;
  17021. } break;
  17022. case GGML_TYPE_F32:
  17023. {
  17024. int elemsize = sizeof(float);
  17025. result = n * elemsize;
  17026. memcpy((uint8_t *)dst + start * elemsize, src + start, result);
  17027. } break;
  17028. default:
  17029. assert(false);
  17030. }
  17031. return result;
  17032. }
  17033. ////////////////////////////////////////////////////////////////////////////////
  17034. struct gguf_str {
  17035. uint64_t n; // GGUFv2
  17036. char * data;
  17037. };
  17038. static const size_t GGUF_TYPE_SIZE[GGUF_TYPE_COUNT] = {
  17039. [GGUF_TYPE_UINT8] = sizeof(uint8_t),
  17040. [GGUF_TYPE_INT8] = sizeof(int8_t),
  17041. [GGUF_TYPE_UINT16] = sizeof(uint16_t),
  17042. [GGUF_TYPE_INT16] = sizeof(int16_t),
  17043. [GGUF_TYPE_UINT32] = sizeof(uint32_t),
  17044. [GGUF_TYPE_INT32] = sizeof(int32_t),
  17045. [GGUF_TYPE_FLOAT32] = sizeof(float),
  17046. [GGUF_TYPE_BOOL] = sizeof(bool),
  17047. [GGUF_TYPE_STRING] = sizeof(struct gguf_str),
  17048. [GGUF_TYPE_UINT64] = sizeof(uint64_t),
  17049. [GGUF_TYPE_INT64] = sizeof(int64_t),
  17050. [GGUF_TYPE_FLOAT64] = sizeof(double),
  17051. [GGUF_TYPE_ARRAY] = 0, // undefined
  17052. };
  17053. static_assert(GGUF_TYPE_COUNT == 13, "GGUF_TYPE_COUNT != 13");
  17054. static const char * GGUF_TYPE_NAME[GGUF_TYPE_COUNT] = {
  17055. [GGUF_TYPE_UINT8] = "u8",
  17056. [GGUF_TYPE_INT8] = "i8",
  17057. [GGUF_TYPE_UINT16] = "u16",
  17058. [GGUF_TYPE_INT16] = "i16",
  17059. [GGUF_TYPE_UINT32] = "u32",
  17060. [GGUF_TYPE_INT32] = "i32",
  17061. [GGUF_TYPE_FLOAT32] = "f32",
  17062. [GGUF_TYPE_BOOL] = "bool",
  17063. [GGUF_TYPE_STRING] = "str",
  17064. [GGUF_TYPE_ARRAY] = "arr",
  17065. [GGUF_TYPE_UINT64] = "u64",
  17066. [GGUF_TYPE_INT64] = "i64",
  17067. [GGUF_TYPE_FLOAT64] = "f64",
  17068. };
  17069. static_assert(GGUF_TYPE_COUNT == 13, "GGUF_TYPE_COUNT != 13");
  17070. union gguf_value {
  17071. uint8_t uint8;
  17072. int8_t int8;
  17073. uint16_t uint16;
  17074. int16_t int16;
  17075. uint32_t uint32;
  17076. int32_t int32;
  17077. float float32;
  17078. uint64_t uint64;
  17079. int64_t int64;
  17080. double float64;
  17081. bool bool_;
  17082. struct gguf_str str;
  17083. struct {
  17084. enum gguf_type type;
  17085. uint64_t n; // GGUFv2
  17086. void * data;
  17087. } arr;
  17088. };
  17089. struct gguf_kv {
  17090. struct gguf_str key;
  17091. enum gguf_type type;
  17092. union gguf_value value;
  17093. };
  17094. struct gguf_header {
  17095. uint32_t magic;
  17096. uint32_t version;
  17097. uint64_t n_tensors; // GGUFv2
  17098. uint64_t n_kv; // GGUFv2
  17099. };
  17100. struct gguf_tensor_info {
  17101. struct gguf_str name;
  17102. uint32_t n_dims;
  17103. uint64_t ne[GGML_MAX_DIMS];
  17104. enum ggml_type type;
  17105. uint64_t offset; // offset from start of `data`, must be a multiple of `ALIGNMENT`
  17106. // for writing API
  17107. const void * data;
  17108. size_t size;
  17109. };
  17110. struct gguf_context {
  17111. struct gguf_header header;
  17112. struct gguf_kv * kv;
  17113. struct gguf_tensor_info * infos;
  17114. size_t alignment;
  17115. size_t offset; // offset of `data` from beginning of file
  17116. size_t size; // size of `data` in bytes
  17117. //uint8_t * padding;
  17118. void * data;
  17119. };
  17120. static bool gguf_fread_el(FILE * file, void * dst, size_t size, size_t * offset) {
  17121. const size_t n = fread(dst, 1, size, file);
  17122. *offset += n;
  17123. return n == size;
  17124. }
  17125. // NOTE: temporary handling of GGUFv1 >> remove after Oct 2023
  17126. static bool gguf_fread_str_cur(FILE * file, struct gguf_str * p, size_t * offset) {
  17127. p->n = 0;
  17128. p->data = NULL;
  17129. bool ok = true;
  17130. ok = ok && gguf_fread_el(file, &p->n, sizeof(p->n), offset); p->data = calloc(p->n + 1, 1);
  17131. ok = ok && gguf_fread_el(file, p->data, p->n, offset);
  17132. return ok;
  17133. }
  17134. static bool gguf_fread_str_v1(FILE * file, struct gguf_str * p, size_t * offset) {
  17135. p->n = 0;
  17136. p->data = NULL;
  17137. bool ok = true;
  17138. uint32_t n = 0;
  17139. ok = ok && gguf_fread_el(file, &n, sizeof(n), offset); p->data = calloc(n + 1, 1); p->n = n;
  17140. ok = ok && gguf_fread_el(file, p->data, p->n, offset);
  17141. return ok;
  17142. }
  17143. struct gguf_context * gguf_init_empty(void) {
  17144. struct gguf_context * ctx = GGML_ALIGNED_MALLOC(sizeof(struct gguf_context));
  17145. ctx->header.magic = GGUF_MAGIC;
  17146. ctx->header.version = GGUF_VERSION;
  17147. ctx->header.n_tensors = 0;
  17148. ctx->header.n_kv = 0;
  17149. ctx->kv = NULL;
  17150. ctx->infos = NULL;
  17151. ctx->alignment = GGUF_DEFAULT_ALIGNMENT;
  17152. ctx->offset = 0;
  17153. ctx->size = 0;
  17154. ctx->data = NULL;
  17155. return ctx;
  17156. }
  17157. struct gguf_context * gguf_init_from_file(const char * fname, struct gguf_init_params params) {
  17158. FILE * file = fopen(fname, "rb");
  17159. if (!file) {
  17160. return NULL;
  17161. }
  17162. // offset from start of file
  17163. size_t offset = 0;
  17164. uint32_t magic = 0;
  17165. // check the magic before making allocations
  17166. {
  17167. gguf_fread_el(file, &magic, sizeof(magic), &offset);
  17168. if (magic != GGUF_MAGIC) {
  17169. fprintf(stderr, "%s: invalid magic number %08x\n", __func__, magic);
  17170. fclose(file);
  17171. return NULL;
  17172. }
  17173. }
  17174. bool ok = true;
  17175. struct gguf_context * ctx = GGML_ALIGNED_MALLOC(sizeof(struct gguf_context));
  17176. // read the header
  17177. {
  17178. ctx->header.magic = magic;
  17179. ctx->kv = NULL;
  17180. ctx->infos = NULL;
  17181. ctx->data = NULL;
  17182. ok = ok && gguf_fread_el(file, &ctx->header.version, sizeof(ctx->header.version), &offset);
  17183. if (ctx->header.version == 1) {
  17184. // NOTE: temporary handling of GGUFv1 >> remove after Oct 2023
  17185. uint32_t n_tensors = 0;
  17186. uint32_t n_kv = 0;
  17187. ok = ok && gguf_fread_el(file, &n_tensors, sizeof(n_tensors), &offset);
  17188. ok = ok && gguf_fread_el(file, &n_kv, sizeof(n_kv), &offset);
  17189. ctx->header.n_tensors = n_tensors;
  17190. ctx->header.n_kv = n_kv;
  17191. } else {
  17192. ok = ok && gguf_fread_el(file, &ctx->header.n_tensors, sizeof(ctx->header.n_tensors), &offset);
  17193. ok = ok && gguf_fread_el(file, &ctx->header.n_kv, sizeof(ctx->header.n_kv), &offset);
  17194. }
  17195. if (!ok) {
  17196. fprintf(stderr, "%s: failed to read header\n", __func__);
  17197. fclose(file);
  17198. gguf_free(ctx);
  17199. return NULL;
  17200. }
  17201. }
  17202. // NOTE: temporary handling of GGUFv1 >> remove after Oct 2023
  17203. bool (* gguf_fread_str)(FILE *, struct gguf_str *, size_t *) = gguf_fread_str_cur;
  17204. if (ctx->header.version == 1) {
  17205. gguf_fread_str = gguf_fread_str_v1;
  17206. }
  17207. // read the kv pairs
  17208. {
  17209. ctx->kv = malloc(ctx->header.n_kv * sizeof(struct gguf_kv));
  17210. for (uint32_t i = 0; i < ctx->header.n_kv; ++i) {
  17211. struct gguf_kv * kv = &ctx->kv[i];
  17212. //fprintf(stderr, "%s: reading kv %d\n", __func__, i);
  17213. ok = ok && gguf_fread_str(file, &kv->key, &offset);
  17214. ok = ok && gguf_fread_el (file, &kv->type, sizeof(kv->type), &offset);
  17215. //fprintf(stderr, "%s: reading kv with key %s\n", __func__, kv->key.data);
  17216. switch (kv->type) {
  17217. case GGUF_TYPE_UINT8: ok = ok && gguf_fread_el (file, &kv->value.uint8, sizeof(kv->value.uint8), &offset); break;
  17218. case GGUF_TYPE_INT8: ok = ok && gguf_fread_el (file, &kv->value.int8, sizeof(kv->value.int8), &offset); break;
  17219. case GGUF_TYPE_UINT16: ok = ok && gguf_fread_el (file, &kv->value.uint16, sizeof(kv->value.uint16), &offset); break;
  17220. case GGUF_TYPE_INT16: ok = ok && gguf_fread_el (file, &kv->value.int16, sizeof(kv->value.int16), &offset); break;
  17221. case GGUF_TYPE_UINT32: ok = ok && gguf_fread_el (file, &kv->value.uint32, sizeof(kv->value.uint32), &offset); break;
  17222. case GGUF_TYPE_INT32: ok = ok && gguf_fread_el (file, &kv->value.int32, sizeof(kv->value.int32), &offset); break;
  17223. case GGUF_TYPE_FLOAT32: ok = ok && gguf_fread_el (file, &kv->value.float32, sizeof(kv->value.float32), &offset); break;
  17224. case GGUF_TYPE_UINT64: ok = ok && gguf_fread_el (file, &kv->value.uint64, sizeof(kv->value.uint64), &offset); break;
  17225. case GGUF_TYPE_INT64: ok = ok && gguf_fread_el (file, &kv->value.int64, sizeof(kv->value.int64), &offset); break;
  17226. case GGUF_TYPE_FLOAT64: ok = ok && gguf_fread_el (file, &kv->value.float64, sizeof(kv->value.float64), &offset); break;
  17227. case GGUF_TYPE_BOOL: ok = ok && gguf_fread_el (file, &kv->value.bool_, sizeof(kv->value.bool_), &offset); break;
  17228. case GGUF_TYPE_STRING: ok = ok && gguf_fread_str(file, &kv->value.str, &offset); break;
  17229. case GGUF_TYPE_ARRAY:
  17230. {
  17231. ok = ok && gguf_fread_el(file, &kv->value.arr.type, sizeof(kv->value.arr.type), &offset);
  17232. if (ctx->header.version == 1) {
  17233. // NOTE: temporary handling of GGUFv1 >> remove after Oct 2023
  17234. uint32_t n = 0;
  17235. ok = ok && gguf_fread_el(file, &n, sizeof(n), &offset);
  17236. kv->value.arr.n = n;
  17237. } else {
  17238. ok = ok && gguf_fread_el(file, &kv->value.arr.n, sizeof(kv->value.arr.n), &offset);
  17239. }
  17240. switch (kv->value.arr.type) {
  17241. case GGUF_TYPE_UINT8:
  17242. case GGUF_TYPE_INT8:
  17243. case GGUF_TYPE_UINT16:
  17244. case GGUF_TYPE_INT16:
  17245. case GGUF_TYPE_UINT32:
  17246. case GGUF_TYPE_INT32:
  17247. case GGUF_TYPE_FLOAT32:
  17248. case GGUF_TYPE_UINT64:
  17249. case GGUF_TYPE_INT64:
  17250. case GGUF_TYPE_FLOAT64:
  17251. case GGUF_TYPE_BOOL:
  17252. {
  17253. kv->value.arr.data = malloc(kv->value.arr.n * GGUF_TYPE_SIZE[kv->value.arr.type]);
  17254. ok = ok && gguf_fread_el(file, kv->value.arr.data, kv->value.arr.n * GGUF_TYPE_SIZE[kv->value.arr.type], &offset);
  17255. } break;
  17256. case GGUF_TYPE_STRING:
  17257. {
  17258. kv->value.arr.data = malloc(kv->value.arr.n * sizeof(struct gguf_str));
  17259. for (uint32_t j = 0; j < kv->value.arr.n; ++j) {
  17260. ok = ok && gguf_fread_str(file, &((struct gguf_str *) kv->value.arr.data)[j], &offset);
  17261. }
  17262. } break;
  17263. case GGUF_TYPE_ARRAY:
  17264. case GGUF_TYPE_COUNT: GGML_ASSERT(false && "invalid type"); break;
  17265. }
  17266. } break;
  17267. case GGUF_TYPE_COUNT: GGML_ASSERT(false && "invalid type");
  17268. }
  17269. if (!ok) {
  17270. break;
  17271. }
  17272. }
  17273. if (!ok) {
  17274. fprintf(stderr, "%s: failed to read key-value pairs\n", __func__);
  17275. fclose(file);
  17276. gguf_free(ctx);
  17277. return NULL;
  17278. }
  17279. }
  17280. // read the tensor infos
  17281. {
  17282. ctx->infos = malloc(ctx->header.n_tensors * sizeof(struct gguf_tensor_info));
  17283. for (uint32_t i = 0; i < ctx->header.n_tensors; ++i) {
  17284. struct gguf_tensor_info * info = &ctx->infos[i];
  17285. for (int j = 0; j < GGML_MAX_DIMS; ++j) {
  17286. info->ne[j] = 1;
  17287. }
  17288. ok = ok && gguf_fread_str(file, &info->name, &offset);
  17289. ok = ok && gguf_fread_el (file, &info->n_dims, sizeof(info->n_dims), &offset);
  17290. for (uint32_t j = 0; j < info->n_dims; ++j) {
  17291. if (ctx->header.version == 1) {
  17292. // NOTE: temporary handling of GGUFv1 >> remove after Oct 2023
  17293. uint32_t t = 0;
  17294. ok = ok && gguf_fread_el(file, &t, sizeof(t), &offset);
  17295. info->ne[j] = t;
  17296. } else {
  17297. ok = ok && gguf_fread_el(file, &info->ne[j], sizeof(info->ne[j]), &offset);
  17298. }
  17299. }
  17300. ok = ok && gguf_fread_el (file, &info->type, sizeof(info->type), &offset);
  17301. ok = ok && gguf_fread_el (file, &info->offset, sizeof(info->offset), &offset);
  17302. if (!ok) {
  17303. fprintf(stderr, "%s: failed to read tensor info\n", __func__);
  17304. fclose(file);
  17305. gguf_free(ctx);
  17306. return NULL;
  17307. }
  17308. }
  17309. }
  17310. ctx->alignment = GGUF_DEFAULT_ALIGNMENT;
  17311. int alignment_idx = gguf_find_key(ctx, "general.alignment");
  17312. if (alignment_idx != -1) {
  17313. ctx->alignment = gguf_get_val_u32(ctx, alignment_idx);
  17314. }
  17315. // we require the data section to be aligned, so take into account any padding
  17316. {
  17317. const size_t offset_pad = offset % ctx->alignment;
  17318. if (offset_pad != 0) {
  17319. offset += ctx->alignment - offset_pad;
  17320. fseek(file, offset, SEEK_SET);
  17321. }
  17322. }
  17323. // store the current file offset - this is where the data section starts
  17324. ctx->offset = offset;
  17325. // compute the total size of the data section, taking into account the alignment
  17326. {
  17327. ctx->size = 0;
  17328. for (uint32_t i = 0; i < ctx->header.n_tensors; ++i) {
  17329. struct gguf_tensor_info * info = &ctx->infos[i];
  17330. const int64_t ne =
  17331. (int64_t) info->ne[0] *
  17332. (int64_t) info->ne[1] *
  17333. (int64_t) info->ne[2] *
  17334. (int64_t) info->ne[3];
  17335. if (ne % ggml_blck_size(info->type) != 0) {
  17336. fprintf(stderr, "%s: tensor '%s' number of elements (%" PRId64 ") is not a multiple of block size (%d)\n",
  17337. __func__, info->name.data, ne, ggml_blck_size(info->type));
  17338. fclose(file);
  17339. gguf_free(ctx);
  17340. return NULL;
  17341. }
  17342. const size_t size_cur = (ne*ggml_type_size(info->type))/ggml_blck_size(info->type);
  17343. ctx->size += GGML_PAD(size_cur, ctx->alignment);
  17344. }
  17345. }
  17346. // load the tensor data only if requested
  17347. if (params.ctx != NULL) {
  17348. // if the provided gguf_context is no_alloc, then we create "empty" tensors and do not read the binary blob
  17349. // otherwise, we load the binary blob into the created ggml_context as well, and point the "data" members of
  17350. // the ggml_tensor structs to the appropriate locations in the binary blob
  17351. // compute the exact size needed for the new ggml_context
  17352. const size_t mem_size =
  17353. params.no_alloc ?
  17354. (ctx->header.n_tensors )*ggml_tensor_overhead() :
  17355. (ctx->header.n_tensors + 1)*ggml_tensor_overhead() + ctx->size;
  17356. struct ggml_init_params pdata = {
  17357. .mem_size = mem_size,
  17358. .mem_buffer = NULL,
  17359. .no_alloc = params.no_alloc,
  17360. };
  17361. *params.ctx = ggml_init(pdata);
  17362. struct ggml_context * ctx_data = *params.ctx;
  17363. struct ggml_tensor * data = NULL;
  17364. if (!params.no_alloc) {
  17365. data = ggml_new_tensor_1d(ctx_data, GGML_TYPE_I8, ctx->size);
  17366. ok = ok && data != NULL;
  17367. // read the binary blob with the tensor data
  17368. ok = ok && gguf_fread_el(file, data->data, ctx->size, &offset);
  17369. if (!ok) {
  17370. fprintf(stderr, "%s: failed to read tensor data\n", __func__);
  17371. fclose(file);
  17372. ggml_free(ctx_data);
  17373. gguf_free(ctx);
  17374. return NULL;
  17375. }
  17376. ctx->data = data->data;
  17377. }
  17378. ggml_set_no_alloc(ctx_data, true);
  17379. // create the tensors
  17380. for (uint32_t i = 0; i < ctx->header.n_tensors; ++i) {
  17381. const int64_t ne[GGML_MAX_DIMS] = {
  17382. ctx->infos[i].ne[0],
  17383. ctx->infos[i].ne[1],
  17384. ctx->infos[i].ne[2],
  17385. ctx->infos[i].ne[3],
  17386. };
  17387. struct ggml_tensor * cur = ggml_new_tensor(ctx_data, ctx->infos[i].type, ctx->infos[i].n_dims, ne);
  17388. ok = ok && cur != NULL;
  17389. ggml_set_name(cur, ctx->infos[i].name.data);
  17390. if (!ok) {
  17391. break;
  17392. }
  17393. // point the data member to the appropriate location in the binary blob using the tensor infos
  17394. if (!params.no_alloc) {
  17395. //cur->data = (char *) data->data + ctx->infos[i].offset - ctx->offset; // offset from start of file
  17396. cur->data = (char *) data->data + ctx->infos[i].offset; // offset from data
  17397. }
  17398. }
  17399. if (!ok) {
  17400. fprintf(stderr, "%s: failed to read the tensor data\n", __func__);
  17401. fclose(file);
  17402. ggml_free(ctx_data);
  17403. gguf_free(ctx);
  17404. return NULL;
  17405. }
  17406. ggml_set_no_alloc(ctx_data, params.no_alloc);
  17407. }
  17408. fclose(file);
  17409. return ctx;
  17410. }
  17411. void gguf_free(struct gguf_context * ctx) {
  17412. if (ctx == NULL) {
  17413. return;
  17414. }
  17415. if (ctx->kv) {
  17416. // free string memory - not great..
  17417. for (uint32_t i = 0; i < ctx->header.n_kv; ++i) {
  17418. struct gguf_kv * kv = &ctx->kv[i];
  17419. if (kv->key.data) {
  17420. free(kv->key.data);
  17421. }
  17422. if (kv->type == GGUF_TYPE_STRING) {
  17423. if (kv->value.str.data) {
  17424. free(kv->value.str.data);
  17425. }
  17426. }
  17427. if (kv->type == GGUF_TYPE_ARRAY) {
  17428. if (kv->value.arr.data) {
  17429. if (kv->value.arr.type == GGUF_TYPE_STRING) {
  17430. for (uint32_t j = 0; j < kv->value.arr.n; ++j) {
  17431. struct gguf_str * str = &((struct gguf_str *) kv->value.arr.data)[j];
  17432. if (str->data) {
  17433. free(str->data);
  17434. }
  17435. }
  17436. }
  17437. free(kv->value.arr.data);
  17438. }
  17439. }
  17440. }
  17441. free(ctx->kv);
  17442. }
  17443. if (ctx->infos) {
  17444. for (uint32_t i = 0; i < ctx->header.n_tensors; ++i) {
  17445. struct gguf_tensor_info * info = &ctx->infos[i];
  17446. if (info->name.data) {
  17447. free(info->name.data);
  17448. }
  17449. }
  17450. free(ctx->infos);
  17451. }
  17452. GGML_ALIGNED_FREE(ctx);
  17453. }
  17454. const char * gguf_type_name(enum gguf_type type) {
  17455. return GGUF_TYPE_NAME[type];
  17456. }
  17457. int gguf_get_version(const struct gguf_context * ctx) {
  17458. return ctx->header.version;
  17459. }
  17460. size_t gguf_get_alignment(const struct gguf_context * ctx) {
  17461. return ctx->alignment;
  17462. }
  17463. size_t gguf_get_data_offset(const struct gguf_context * ctx) {
  17464. return ctx->offset;
  17465. }
  17466. void * gguf_get_data(const struct gguf_context * ctx) {
  17467. return ctx->data;
  17468. }
  17469. int gguf_get_n_kv(const struct gguf_context * ctx) {
  17470. return ctx->header.n_kv;
  17471. }
  17472. int gguf_find_key(const struct gguf_context * ctx, const char * key) {
  17473. // return -1 if key not found
  17474. int keyfound = -1;
  17475. const int n_kv = gguf_get_n_kv(ctx);
  17476. for (int i = 0; i < n_kv; ++i) {
  17477. if (strcmp(key, gguf_get_key(ctx, i)) == 0) {
  17478. keyfound = i;
  17479. break;
  17480. }
  17481. }
  17482. return keyfound;
  17483. }
  17484. const char * gguf_get_key(const struct gguf_context * ctx, int key_id) {
  17485. return ctx->kv[key_id].key.data;
  17486. }
  17487. enum gguf_type gguf_get_kv_type(const struct gguf_context * ctx, int key_id) {
  17488. return ctx->kv[key_id].type;
  17489. }
  17490. enum gguf_type gguf_get_arr_type(const struct gguf_context * ctx, int key_id) {
  17491. GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_ARRAY);
  17492. return ctx->kv[key_id].value.arr.type;
  17493. }
  17494. const void * gguf_get_arr_data(const struct gguf_context * ctx, int key_id) {
  17495. GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_ARRAY);
  17496. return ctx->kv[key_id].value.arr.data;
  17497. }
  17498. const char * gguf_get_arr_str(const struct gguf_context * ctx, int key_id, int i) {
  17499. GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_ARRAY);
  17500. struct gguf_kv * kv = &ctx->kv[key_id];
  17501. struct gguf_str * str = &((struct gguf_str *) kv->value.arr.data)[i];
  17502. return str->data;
  17503. }
  17504. int gguf_get_arr_n(const struct gguf_context * ctx, int key_id) {
  17505. GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_ARRAY);
  17506. return ctx->kv[key_id].value.arr.n;
  17507. }
  17508. uint8_t gguf_get_val_u8(const struct gguf_context * ctx, int key_id) {
  17509. GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_UINT8);
  17510. return ctx->kv[key_id].value.uint8;
  17511. }
  17512. int8_t gguf_get_val_i8(const struct gguf_context * ctx, int key_id) {
  17513. GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_INT8);
  17514. return ctx->kv[key_id].value.int8;
  17515. }
  17516. uint16_t gguf_get_val_u16(const struct gguf_context * ctx, int key_id) {
  17517. GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_UINT16);
  17518. return ctx->kv[key_id].value.uint16;
  17519. }
  17520. int16_t gguf_get_val_i16(const struct gguf_context * ctx, int key_id) {
  17521. GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_INT16);
  17522. return ctx->kv[key_id].value.int16;
  17523. }
  17524. uint32_t gguf_get_val_u32(const struct gguf_context * ctx, int key_id) {
  17525. GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_UINT32);
  17526. return ctx->kv[key_id].value.uint32;
  17527. }
  17528. int32_t gguf_get_val_i32(const struct gguf_context * ctx, int key_id) {
  17529. GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_INT32);
  17530. return ctx->kv[key_id].value.int32;
  17531. }
  17532. float gguf_get_val_f32(const struct gguf_context * ctx, int key_id) {
  17533. GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_FLOAT32);
  17534. return ctx->kv[key_id].value.float32;
  17535. }
  17536. uint64_t gguf_get_val_u64(const struct gguf_context * ctx, int key_id) {
  17537. GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_UINT64);
  17538. return ctx->kv[key_id].value.uint64;
  17539. }
  17540. int64_t gguf_get_val_i64(const struct gguf_context * ctx, int key_id) {
  17541. GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_INT64);
  17542. return ctx->kv[key_id].value.int64;
  17543. }
  17544. double gguf_get_val_f64(const struct gguf_context * ctx, int key_id) {
  17545. GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_FLOAT64);
  17546. return ctx->kv[key_id].value.float64;
  17547. }
  17548. bool gguf_get_val_bool(const struct gguf_context * ctx, int key_id) {
  17549. GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_BOOL);
  17550. return ctx->kv[key_id].value.bool_;
  17551. }
  17552. const char * gguf_get_val_str(const struct gguf_context * ctx, int key_id) {
  17553. GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_STRING);
  17554. return ctx->kv[key_id].value.str.data;
  17555. }
  17556. int gguf_get_n_tensors(const struct gguf_context * ctx) {
  17557. return ctx->header.n_tensors;
  17558. }
  17559. int gguf_find_tensor(const struct gguf_context * ctx, const char * name) {
  17560. // return -1 if tensor not found
  17561. int tensorfound = -1;
  17562. const int n_tensors = gguf_get_n_tensors(ctx);
  17563. for (int i = 0; i < n_tensors; ++i) {
  17564. if (strcmp(name, gguf_get_tensor_name(ctx, i)) == 0) {
  17565. tensorfound = i;
  17566. break;
  17567. }
  17568. }
  17569. return tensorfound;
  17570. }
  17571. size_t gguf_get_tensor_offset(const struct gguf_context * ctx, int i) {
  17572. return ctx->infos[i].offset;
  17573. }
  17574. char * gguf_get_tensor_name(const struct gguf_context * ctx, int i) {
  17575. return ctx->infos[i].name.data;
  17576. }
  17577. // returns the index
  17578. static int gguf_get_or_add_key(struct gguf_context * ctx, const char * key) {
  17579. const int idx = gguf_find_key(ctx, key);
  17580. if (idx >= 0) {
  17581. return idx;
  17582. }
  17583. const int n_kv = gguf_get_n_kv(ctx);
  17584. ctx->kv = realloc(ctx->kv, (n_kv + 1) * sizeof(struct gguf_kv));
  17585. ctx->kv[n_kv].key.n = strlen(key);
  17586. ctx->kv[n_kv].key.data = strdup(key);
  17587. ctx->header.n_kv++;
  17588. return n_kv;
  17589. }
  17590. void gguf_set_val_u8(struct gguf_context * ctx, const char * key, uint8_t val) {
  17591. const int idx = gguf_get_or_add_key(ctx, key);
  17592. ctx->kv[idx].type = GGUF_TYPE_UINT8;
  17593. ctx->kv[idx].value.uint8 = val;
  17594. }
  17595. void gguf_set_val_i8(struct gguf_context * ctx, const char * key, int8_t val) {
  17596. const int idx = gguf_get_or_add_key(ctx, key);
  17597. ctx->kv[idx].type = GGUF_TYPE_INT8;
  17598. ctx->kv[idx].value.int8 = val;
  17599. }
  17600. void gguf_set_val_u16(struct gguf_context * ctx, const char * key, uint16_t val) {
  17601. const int idx = gguf_get_or_add_key(ctx, key);
  17602. ctx->kv[idx].type = GGUF_TYPE_UINT16;
  17603. ctx->kv[idx].value.uint16 = val;
  17604. }
  17605. void gguf_set_val_i16(struct gguf_context * ctx, const char * key, int16_t val) {
  17606. const int idx = gguf_get_or_add_key(ctx, key);
  17607. ctx->kv[idx].type = GGUF_TYPE_INT16;
  17608. ctx->kv[idx].value.int16 = val;
  17609. }
  17610. void gguf_set_val_u32(struct gguf_context * ctx, const char * key, uint32_t val) {
  17611. const int idx = gguf_get_or_add_key(ctx, key);
  17612. ctx->kv[idx].type = GGUF_TYPE_UINT32;
  17613. ctx->kv[idx].value.uint32 = val;
  17614. }
  17615. void gguf_set_val_i32(struct gguf_context * ctx, const char * key, int32_t val) {
  17616. const int idx = gguf_get_or_add_key(ctx, key);
  17617. ctx->kv[idx].type = GGUF_TYPE_INT32;
  17618. ctx->kv[idx].value.int32 = val;
  17619. }
  17620. void gguf_set_val_f32(struct gguf_context * ctx, const char * key, float val) {
  17621. const int idx = gguf_get_or_add_key(ctx, key);
  17622. ctx->kv[idx].type = GGUF_TYPE_FLOAT32;
  17623. ctx->kv[idx].value.float32 = val;
  17624. }
  17625. void gguf_set_val_u64(struct gguf_context * ctx, const char * key, uint64_t val) {
  17626. const int idx = gguf_get_or_add_key(ctx, key);
  17627. ctx->kv[idx].type = GGUF_TYPE_UINT64;
  17628. ctx->kv[idx].value.uint64 = val;
  17629. }
  17630. void gguf_set_val_i64(struct gguf_context * ctx, const char * key, int64_t val) {
  17631. const int idx = gguf_get_or_add_key(ctx, key);
  17632. ctx->kv[idx].type = GGUF_TYPE_INT64;
  17633. ctx->kv[idx].value.int64 = val;
  17634. }
  17635. void gguf_set_val_f64(struct gguf_context * ctx, const char * key, double val) {
  17636. const int idx = gguf_get_or_add_key(ctx, key);
  17637. ctx->kv[idx].type = GGUF_TYPE_FLOAT64;
  17638. ctx->kv[idx].value.float64 = val;
  17639. }
  17640. void gguf_set_val_bool(struct gguf_context * ctx, const char * key, bool val) {
  17641. const int idx = gguf_get_or_add_key(ctx, key);
  17642. ctx->kv[idx].type = GGUF_TYPE_BOOL;
  17643. ctx->kv[idx].value.bool_ = val;
  17644. }
  17645. void gguf_set_val_str(struct gguf_context * ctx, const char * key, const char * val) {
  17646. const int idx = gguf_get_or_add_key(ctx, key);
  17647. ctx->kv[idx].type = GGUF_TYPE_STRING;
  17648. ctx->kv[idx].value.str.n = strlen(val);
  17649. ctx->kv[idx].value.str.data = strdup(val);
  17650. }
  17651. void gguf_set_arr_data(struct gguf_context * ctx, const char * key, enum gguf_type type, const void * data, int n) {
  17652. const int idx = gguf_get_or_add_key(ctx, key);
  17653. ctx->kv[idx].type = GGUF_TYPE_ARRAY;
  17654. ctx->kv[idx].value.arr.type = type;
  17655. ctx->kv[idx].value.arr.n = n;
  17656. ctx->kv[idx].value.arr.data = malloc(n*GGUF_TYPE_SIZE[type]);
  17657. memcpy(ctx->kv[idx].value.arr.data, data, n*GGUF_TYPE_SIZE[type]);
  17658. }
  17659. void gguf_set_arr_str(struct gguf_context * ctx, const char * key, const char ** data, int n) {
  17660. const int idx = gguf_get_or_add_key(ctx, key);
  17661. ctx->kv[idx].type = GGUF_TYPE_ARRAY;
  17662. ctx->kv[idx].value.arr.type = GGUF_TYPE_STRING;
  17663. ctx->kv[idx].value.arr.n = n;
  17664. ctx->kv[idx].value.arr.data = malloc(n*sizeof(struct gguf_str));
  17665. for (int i = 0; i < n; i++) {
  17666. struct gguf_str * str = &((struct gguf_str *)ctx->kv[idx].value.arr.data)[i];
  17667. str->n = strlen(data[i]);
  17668. str->data = strdup(data[i]);
  17669. }
  17670. }
  17671. // set or add KV pairs from another context
  17672. void gguf_set_kv(struct gguf_context * ctx, struct gguf_context * src) {
  17673. for (uint32_t i = 0; i < src->header.n_kv; i++) {
  17674. switch (src->kv[i].type) {
  17675. case GGUF_TYPE_UINT8: gguf_set_val_u8 (ctx, src->kv[i].key.data, src->kv[i].value.uint8); break;
  17676. case GGUF_TYPE_INT8: gguf_set_val_i8 (ctx, src->kv[i].key.data, src->kv[i].value.int8); break;
  17677. case GGUF_TYPE_UINT16: gguf_set_val_u16 (ctx, src->kv[i].key.data, src->kv[i].value.uint16); break;
  17678. case GGUF_TYPE_INT16: gguf_set_val_i16 (ctx, src->kv[i].key.data, src->kv[i].value.int16); break;
  17679. case GGUF_TYPE_UINT32: gguf_set_val_u32 (ctx, src->kv[i].key.data, src->kv[i].value.uint32); break;
  17680. case GGUF_TYPE_INT32: gguf_set_val_i32 (ctx, src->kv[i].key.data, src->kv[i].value.int32); break;
  17681. case GGUF_TYPE_FLOAT32: gguf_set_val_f32 (ctx, src->kv[i].key.data, src->kv[i].value.float32); break;
  17682. case GGUF_TYPE_UINT64: gguf_set_val_u64 (ctx, src->kv[i].key.data, src->kv[i].value.uint64); break;
  17683. case GGUF_TYPE_INT64: gguf_set_val_i64 (ctx, src->kv[i].key.data, src->kv[i].value.int64); break;
  17684. case GGUF_TYPE_FLOAT64: gguf_set_val_f64 (ctx, src->kv[i].key.data, src->kv[i].value.float64); break;
  17685. case GGUF_TYPE_BOOL: gguf_set_val_bool(ctx, src->kv[i].key.data, src->kv[i].value.bool_); break;
  17686. case GGUF_TYPE_STRING: gguf_set_val_str (ctx, src->kv[i].key.data, src->kv[i].value.str.data); break;
  17687. case GGUF_TYPE_ARRAY:
  17688. {
  17689. if (src->kv[i].value.arr.type == GGUF_TYPE_STRING) {
  17690. const char ** data = malloc(src->kv[i].value.arr.n*sizeof(char *));
  17691. for (uint32_t j = 0; j < src->kv[i].value.arr.n; j++) {
  17692. data[j] = ((struct gguf_str *)src->kv[i].value.arr.data)[j].data;
  17693. }
  17694. gguf_set_arr_str(ctx, src->kv[i].key.data, data, src->kv[i].value.arr.n);
  17695. free(data);
  17696. } else if (src->kv[i].value.arr.type == GGUF_TYPE_ARRAY) {
  17697. GGML_ASSERT(false && "nested arrays not supported");
  17698. } else {
  17699. gguf_set_arr_data(ctx, src->kv[i].key.data, src->kv[i].value.arr.type, src->kv[i].value.arr.data, src->kv[i].value.arr.n);
  17700. }
  17701. } break;
  17702. case GGUF_TYPE_COUNT: GGML_ASSERT(false && "invalid type"); break;
  17703. }
  17704. }
  17705. }
  17706. void gguf_add_tensor(
  17707. struct gguf_context * ctx,
  17708. const struct ggml_tensor * tensor) {
  17709. const int idx = ctx->header.n_tensors;
  17710. ctx->infos = realloc(ctx->infos, (idx + 1)*sizeof(struct gguf_tensor_info));
  17711. ctx->infos[idx].name.n = strlen(tensor->name);
  17712. ctx->infos[idx].name.data = strdup(tensor->name);
  17713. for (int i = 0; i < GGML_MAX_DIMS; ++i) {
  17714. ctx->infos[idx].ne[i] = 1;
  17715. }
  17716. ctx->infos[idx].n_dims = tensor->n_dims;
  17717. for (int i = 0; i < tensor->n_dims; i++) {
  17718. ctx->infos[idx].ne[i] = tensor->ne[i];
  17719. }
  17720. ctx->infos[idx].type = tensor->type;
  17721. ctx->infos[idx].offset = 0;
  17722. ctx->infos[idx].data = tensor->data;
  17723. ctx->infos[idx].size = ggml_nbytes(tensor);
  17724. if (ctx->header.n_tensors > 0) {
  17725. ctx->infos[idx].offset = ctx->infos[idx - 1].offset + GGML_PAD(ctx->infos[idx - 1].size, ctx->alignment);
  17726. }
  17727. ctx->header.n_tensors++;
  17728. }
  17729. void gguf_set_tensor_type(struct gguf_context * ctx, const char * name, enum ggml_type type) {
  17730. const int idx = gguf_find_tensor(ctx, name);
  17731. if (idx < 0) {
  17732. GGML_ASSERT(false && "tensor not found");
  17733. }
  17734. ctx->infos[idx].type = type;
  17735. }
  17736. void gguf_set_tensor_data(struct gguf_context * ctx, const char * name, const void * data, size_t size) {
  17737. const int idx = gguf_find_tensor(ctx, name);
  17738. if (idx < 0) {
  17739. GGML_ASSERT(false && "tensor not found");
  17740. }
  17741. ctx->infos[idx].data = data;
  17742. ctx->infos[idx].size = size;
  17743. // update offsets
  17744. for (uint32_t i = idx + 1; i < ctx->header.n_tensors; ++i) {
  17745. ctx->infos[i].offset = ctx->infos[i - 1].offset + GGML_PAD(ctx->infos[i - 1].size, ctx->alignment);
  17746. }
  17747. }
  17748. //static void gguf_fwrite_str(FILE * file, const struct gguf_str * val) {
  17749. // fwrite(&val->n, sizeof(val->n), 1, file);
  17750. // fwrite(val->data, sizeof(char), val->n, file);
  17751. //}
  17752. //
  17753. //static void gguf_fwrite_el(FILE * file, const void * val, size_t size) {
  17754. // fwrite(val, sizeof(char), size, file);
  17755. //}
  17756. struct gguf_buf {
  17757. void * data;
  17758. size_t size;
  17759. size_t offset;
  17760. };
  17761. static struct gguf_buf gguf_buf_init(size_t size) {
  17762. struct gguf_buf buf = {
  17763. /*buf.data =*/ size == 0 ? NULL : malloc(size),
  17764. /*buf.size =*/ size,
  17765. /*buf.offset =*/ 0,
  17766. };
  17767. return buf;
  17768. }
  17769. static void gguf_buf_free(struct gguf_buf buf) {
  17770. if (buf.data) {
  17771. free(buf.data);
  17772. }
  17773. }
  17774. static void gguf_buf_grow(struct gguf_buf * buf, size_t size) {
  17775. if (buf->offset + size > buf->size) {
  17776. buf->size = 1.5*(buf->offset + size);
  17777. if (buf->data) {
  17778. buf->data = realloc(buf->data, buf->size);
  17779. }
  17780. }
  17781. }
  17782. static void gguf_bwrite_str(struct gguf_buf * buf, const struct gguf_str * val) {
  17783. gguf_buf_grow(buf, sizeof(val->n) + val->n);
  17784. if (buf->data) {
  17785. memcpy((char *) buf->data + buf->offset, &val->n, sizeof(val->n));
  17786. }
  17787. buf->offset += sizeof(val->n);
  17788. if (buf->data) {
  17789. memcpy((char *) buf->data + buf->offset, val->data, val->n);
  17790. }
  17791. buf->offset += val->n;
  17792. }
  17793. static void gguf_bwrite_el(struct gguf_buf * buf, const void * val, size_t el_size) {
  17794. gguf_buf_grow(buf, el_size);
  17795. if (buf->data) {
  17796. memcpy((char *) buf->data + buf->offset, val, el_size);
  17797. }
  17798. buf->offset += el_size;
  17799. }
  17800. static void gguf_write_to_buf(const struct gguf_context * ctx, struct gguf_buf * buf, bool only_meta) {
  17801. // write header
  17802. gguf_bwrite_el(buf, &ctx->header.magic, sizeof(ctx->header.magic));
  17803. gguf_bwrite_el(buf, &ctx->header.version, sizeof(ctx->header.version));
  17804. gguf_bwrite_el(buf, &ctx->header.n_tensors, sizeof(ctx->header.n_tensors));
  17805. gguf_bwrite_el(buf, &ctx->header.n_kv, sizeof(ctx->header.n_kv));
  17806. // write key-value pairs
  17807. for (uint32_t i = 0; i < ctx->header.n_kv; ++i) {
  17808. struct gguf_kv * kv = &ctx->kv[i];
  17809. gguf_bwrite_str(buf, &kv->key);
  17810. gguf_bwrite_el (buf, &kv->type, sizeof(kv->type));
  17811. switch (kv->type) {
  17812. case GGUF_TYPE_UINT8: gguf_bwrite_el( buf, &kv->value.uint8, sizeof(kv->value.uint8) ); break;
  17813. case GGUF_TYPE_INT8: gguf_bwrite_el (buf, &kv->value.int8, sizeof(kv->value.int8) ); break;
  17814. case GGUF_TYPE_UINT16: gguf_bwrite_el (buf, &kv->value.uint16, sizeof(kv->value.uint16) ); break;
  17815. case GGUF_TYPE_INT16: gguf_bwrite_el (buf, &kv->value.int16, sizeof(kv->value.int16) ); break;
  17816. case GGUF_TYPE_UINT32: gguf_bwrite_el (buf, &kv->value.uint32, sizeof(kv->value.uint32) ); break;
  17817. case GGUF_TYPE_INT32: gguf_bwrite_el (buf, &kv->value.int32, sizeof(kv->value.int32) ); break;
  17818. case GGUF_TYPE_FLOAT32: gguf_bwrite_el (buf, &kv->value.float32, sizeof(kv->value.float32)); break;
  17819. case GGUF_TYPE_UINT64: gguf_bwrite_el (buf, &kv->value.uint64, sizeof(kv->value.uint64) ); break;
  17820. case GGUF_TYPE_INT64: gguf_bwrite_el (buf, &kv->value.int64, sizeof(kv->value.int64) ); break;
  17821. case GGUF_TYPE_FLOAT64: gguf_bwrite_el (buf, &kv->value.float64, sizeof(kv->value.float64)); break;
  17822. case GGUF_TYPE_BOOL: gguf_bwrite_el (buf, &kv->value.bool_, sizeof(kv->value.bool_) ); break;
  17823. case GGUF_TYPE_STRING: gguf_bwrite_str(buf, &kv->value.str ); break;
  17824. case GGUF_TYPE_ARRAY:
  17825. {
  17826. gguf_bwrite_el(buf, &kv->value.arr.type, sizeof(kv->value.arr.type));
  17827. gguf_bwrite_el(buf, &kv->value.arr.n, sizeof(kv->value.arr.n) );
  17828. switch (kv->value.arr.type) {
  17829. case GGUF_TYPE_UINT8:
  17830. case GGUF_TYPE_INT8:
  17831. case GGUF_TYPE_UINT16:
  17832. case GGUF_TYPE_INT16:
  17833. case GGUF_TYPE_UINT32:
  17834. case GGUF_TYPE_INT32:
  17835. case GGUF_TYPE_FLOAT32:
  17836. case GGUF_TYPE_UINT64:
  17837. case GGUF_TYPE_INT64:
  17838. case GGUF_TYPE_FLOAT64:
  17839. case GGUF_TYPE_BOOL:
  17840. {
  17841. gguf_bwrite_el(buf, kv->value.arr.data, kv->value.arr.n * GGUF_TYPE_SIZE[kv->value.arr.type]);
  17842. } break;
  17843. case GGUF_TYPE_STRING:
  17844. {
  17845. for (uint32_t j = 0; j < kv->value.arr.n; ++j) {
  17846. gguf_bwrite_str(buf, &((struct gguf_str *) kv->value.arr.data)[j]);
  17847. }
  17848. } break;
  17849. case GGUF_TYPE_ARRAY:
  17850. case GGUF_TYPE_COUNT: GGML_ASSERT(false && "invalid type"); break;
  17851. }
  17852. } break;
  17853. case GGUF_TYPE_COUNT: GGML_ASSERT(false && "invalid type");
  17854. }
  17855. }
  17856. // write tensor infos
  17857. for (uint32_t i = 0; i < ctx->header.n_tensors; ++i) {
  17858. struct gguf_tensor_info * info = &ctx->infos[i];
  17859. gguf_bwrite_str(buf, &info->name);
  17860. gguf_bwrite_el (buf, &info->n_dims, sizeof(info->n_dims));
  17861. for (uint32_t j = 0; j < info->n_dims; ++j) {
  17862. gguf_bwrite_el(buf, &info->ne[j], sizeof(info->ne[j]));
  17863. }
  17864. gguf_bwrite_el(buf, &info->type, sizeof(info->type));
  17865. gguf_bwrite_el(buf, &info->offset, sizeof(info->offset));
  17866. }
  17867. // we require the data section to be aligned, so take into account any padding
  17868. {
  17869. const size_t offset = buf->offset;
  17870. const size_t offset_pad = GGML_PAD(offset, ctx->alignment);
  17871. if (offset_pad != offset) {
  17872. uint8_t pad = 0;
  17873. for (size_t i = 0; i < offset_pad - offset; ++i) {
  17874. gguf_bwrite_el(buf, &pad, sizeof(pad));
  17875. }
  17876. }
  17877. }
  17878. if (only_meta) {
  17879. return;
  17880. }
  17881. size_t offset = 0;
  17882. // write tensor data
  17883. for (uint32_t i = 0; i < ctx->header.n_tensors; ++i) {
  17884. struct gguf_tensor_info * info = &ctx->infos[i];
  17885. const size_t size = info->size;
  17886. const size_t size_pad = GGML_PAD(size, ctx->alignment);
  17887. gguf_bwrite_el(buf, info->data, size);
  17888. if (size_pad != size) {
  17889. uint8_t pad = 0;
  17890. for (size_t j = 0; j < size_pad - size; ++j) {
  17891. gguf_bwrite_el(buf, &pad, sizeof(pad));
  17892. }
  17893. }
  17894. GGML_ASSERT(offset == info->offset);
  17895. offset += size_pad;
  17896. }
  17897. }
  17898. void gguf_write_to_file(const struct gguf_context * ctx, const char * fname, bool only_meta) {
  17899. FILE * file = fopen(fname, "wb");
  17900. if (!file) {
  17901. GGML_ASSERT(false && "failed to open file for writing");
  17902. }
  17903. struct gguf_buf buf = gguf_buf_init(16*1024);
  17904. gguf_write_to_buf(ctx, &buf, only_meta);
  17905. fwrite(buf.data, 1, buf.offset, file);
  17906. gguf_buf_free(buf);
  17907. fclose(file);
  17908. }
  17909. size_t gguf_get_meta_size(const struct gguf_context * ctx) {
  17910. // no allocs - only compute size
  17911. struct gguf_buf buf = gguf_buf_init(0);
  17912. gguf_write_to_buf(ctx, &buf, true);
  17913. return buf.offset;
  17914. }
  17915. void gguf_get_meta_data(const struct gguf_context * ctx, void * data) {
  17916. struct gguf_buf buf = gguf_buf_init(16*1024);
  17917. gguf_write_to_buf(ctx, &buf, true);
  17918. memcpy(data, buf.data, buf.offset);
  17919. gguf_buf_free(buf);
  17920. }
  17921. ////////////////////////////////////////////////////////////////////////////////
  17922. int ggml_cpu_has_avx(void) {
  17923. #if defined(__AVX__)
  17924. return 1;
  17925. #else
  17926. return 0;
  17927. #endif
  17928. }
  17929. int ggml_cpu_has_avx2(void) {
  17930. #if defined(__AVX2__)
  17931. return 1;
  17932. #else
  17933. return 0;
  17934. #endif
  17935. }
  17936. int ggml_cpu_has_avx512(void) {
  17937. #if defined(__AVX512F__)
  17938. return 1;
  17939. #else
  17940. return 0;
  17941. #endif
  17942. }
  17943. int ggml_cpu_has_avx512_vbmi(void) {
  17944. #if defined(__AVX512VBMI__)
  17945. return 1;
  17946. #else
  17947. return 0;
  17948. #endif
  17949. }
  17950. int ggml_cpu_has_avx512_vnni(void) {
  17951. #if defined(__AVX512VNNI__)
  17952. return 1;
  17953. #else
  17954. return 0;
  17955. #endif
  17956. }
  17957. int ggml_cpu_has_fma(void) {
  17958. #if defined(__FMA__)
  17959. return 1;
  17960. #else
  17961. return 0;
  17962. #endif
  17963. }
  17964. int ggml_cpu_has_neon(void) {
  17965. #if defined(__ARM_NEON)
  17966. return 1;
  17967. #else
  17968. return 0;
  17969. #endif
  17970. }
  17971. int ggml_cpu_has_arm_fma(void) {
  17972. #if defined(__ARM_FEATURE_FMA)
  17973. return 1;
  17974. #else
  17975. return 0;
  17976. #endif
  17977. }
  17978. int ggml_cpu_has_metal(void) {
  17979. #if defined(GGML_USE_METAL)
  17980. return 1;
  17981. #else
  17982. return 0;
  17983. #endif
  17984. }
  17985. int ggml_cpu_has_f16c(void) {
  17986. #if defined(__F16C__)
  17987. return 1;
  17988. #else
  17989. return 0;
  17990. #endif
  17991. }
  17992. int ggml_cpu_has_fp16_va(void) {
  17993. #if defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC)
  17994. return 1;
  17995. #else
  17996. return 0;
  17997. #endif
  17998. }
  17999. int ggml_cpu_has_wasm_simd(void) {
  18000. #if defined(__wasm_simd128__)
  18001. return 1;
  18002. #else
  18003. return 0;
  18004. #endif
  18005. }
  18006. int ggml_cpu_has_blas(void) {
  18007. #if defined(GGML_USE_ACCELERATE) || defined(GGML_USE_OPENBLAS) || defined(GGML_USE_CUBLAS) || defined(GGML_USE_CLBLAST)
  18008. return 1;
  18009. #else
  18010. return 0;
  18011. #endif
  18012. }
  18013. int ggml_cpu_has_cublas(void) {
  18014. #if defined(GGML_USE_CUBLAS)
  18015. return 1;
  18016. #else
  18017. return 0;
  18018. #endif
  18019. }
  18020. int ggml_cpu_has_clblast(void) {
  18021. #if defined(GGML_USE_CLBLAST)
  18022. return 1;
  18023. #else
  18024. return 0;
  18025. #endif
  18026. }
  18027. int ggml_cpu_has_gpublas(void) {
  18028. return ggml_cpu_has_cublas() || ggml_cpu_has_clblast();
  18029. }
  18030. int ggml_cpu_has_sse3(void) {
  18031. #if defined(__SSE3__)
  18032. return 1;
  18033. #else
  18034. return 0;
  18035. #endif
  18036. }
  18037. int ggml_cpu_has_ssse3(void) {
  18038. #if defined(__SSSE3__)
  18039. return 1;
  18040. #else
  18041. return 0;
  18042. #endif
  18043. }
  18044. int ggml_cpu_has_vsx(void) {
  18045. #if defined(__POWER9_VECTOR__)
  18046. return 1;
  18047. #else
  18048. return 0;
  18049. #endif
  18050. }
  18051. ////////////////////////////////////////////////////////////////////////////////