ggml.c 697 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788478947904791479247934794479547964797479847994800480148024803480448054806480748084809481048114812481348144815481648174818481948204821482248234824482548264827482848294830483148324833483448354836483748384839484048414842484348444845484648474848484948504851485248534854485548564857485848594860486148624863486448654866486748684869487048714872487348744875487648774878487948804881488248834884488548864887488848894890489148924893489448954896489748984899490049014902490349044905490649074908490949104911491249134914491549164917491849194920492149224923492449254926492749284929493049314932493349344935493649374938493949404941494249434944494549464947494849494950495149524953495449554956495749584959496049614962496349644965496649674968496949704971497249734974497549764977497849794980498149824983498449854986498749884989499049914992499349944995499649974998499950005001500250035004500550065007500850095010501150125013501450155016501750185019502050215022502350245025502650275028502950305031503250335034503550365037503850395040504150425043504450455046504750485049505050515052505350545055505650575058505950605061506250635064506550665067506850695070507150725073507450755076507750785079508050815082508350845085508650875088508950905091509250935094509550965097509850995100510151025103510451055106510751085109511051115112511351145115511651175118511951205121512251235124512551265127512851295130513151325133513451355136513751385139514051415142514351445145514651475148514951505151515251535154515551565157515851595160516151625163516451655166516751685169517051715172517351745175517651775178517951805181518251835184518551865187518851895190519151925193519451955196519751985199520052015202520352045205520652075208520952105211521252135214521552165217521852195220522152225223522452255226522752285229523052315232523352345235523652375238523952405241524252435244524552465247524852495250525152525253525452555256525752585259526052615262526352645265526652675268526952705271527252735274527552765277527852795280528152825283528452855286528752885289529052915292529352945295529652975298529953005301530253035304530553065307530853095310531153125313531453155316531753185319532053215322532353245325532653275328532953305331533253335334533553365337533853395340534153425343534453455346534753485349535053515352535353545355535653575358535953605361536253635364536553665367536853695370537153725373537453755376537753785379538053815382538353845385538653875388538953905391539253935394539553965397539853995400540154025403540454055406540754085409541054115412541354145415541654175418541954205421542254235424542554265427542854295430543154325433543454355436543754385439544054415442544354445445544654475448544954505451545254535454545554565457545854595460546154625463546454655466546754685469547054715472547354745475547654775478547954805481548254835484548554865487548854895490549154925493549454955496549754985499550055015502550355045505550655075508550955105511551255135514551555165517551855195520552155225523552455255526552755285529553055315532553355345535553655375538553955405541554255435544554555465547554855495550555155525553555455555556555755585559556055615562556355645565556655675568556955705571557255735574557555765577557855795580558155825583558455855586558755885589559055915592559355945595559655975598559956005601560256035604560556065607560856095610561156125613561456155616561756185619562056215622562356245625562656275628562956305631563256335634563556365637563856395640564156425643564456455646564756485649565056515652565356545655565656575658565956605661566256635664566556665667566856695670567156725673567456755676567756785679568056815682568356845685568656875688568956905691569256935694569556965697569856995700570157025703570457055706570757085709571057115712571357145715571657175718571957205721572257235724572557265727572857295730573157325733573457355736573757385739574057415742574357445745574657475748574957505751575257535754575557565757575857595760576157625763576457655766576757685769577057715772577357745775577657775778577957805781578257835784578557865787578857895790579157925793579457955796579757985799580058015802580358045805580658075808580958105811581258135814581558165817581858195820582158225823582458255826582758285829583058315832583358345835583658375838583958405841584258435844584558465847584858495850585158525853585458555856585758585859586058615862586358645865586658675868586958705871587258735874587558765877587858795880588158825883588458855886588758885889589058915892589358945895589658975898589959005901590259035904590559065907590859095910591159125913591459155916591759185919592059215922592359245925592659275928592959305931593259335934593559365937593859395940594159425943594459455946594759485949595059515952595359545955595659575958595959605961596259635964596559665967596859695970597159725973597459755976597759785979598059815982598359845985598659875988598959905991599259935994599559965997599859996000600160026003600460056006600760086009601060116012601360146015601660176018601960206021602260236024602560266027602860296030603160326033603460356036603760386039604060416042604360446045604660476048604960506051605260536054605560566057605860596060606160626063606460656066606760686069607060716072607360746075607660776078607960806081608260836084608560866087608860896090609160926093609460956096609760986099610061016102610361046105610661076108610961106111611261136114611561166117611861196120612161226123612461256126612761286129613061316132613361346135613661376138613961406141614261436144614561466147614861496150615161526153615461556156615761586159616061616162616361646165616661676168616961706171617261736174617561766177617861796180618161826183618461856186618761886189619061916192619361946195619661976198619962006201620262036204620562066207620862096210621162126213621462156216621762186219622062216222622362246225622662276228622962306231623262336234623562366237623862396240624162426243624462456246624762486249625062516252625362546255625662576258625962606261626262636264626562666267626862696270627162726273627462756276627762786279628062816282628362846285628662876288628962906291629262936294629562966297629862996300630163026303630463056306630763086309631063116312631363146315631663176318631963206321632263236324632563266327632863296330633163326333633463356336633763386339634063416342634363446345634663476348634963506351635263536354635563566357635863596360636163626363636463656366636763686369637063716372637363746375637663776378637963806381638263836384638563866387638863896390639163926393639463956396639763986399640064016402640364046405640664076408640964106411641264136414641564166417641864196420642164226423642464256426642764286429643064316432643364346435643664376438643964406441644264436444644564466447644864496450645164526453645464556456645764586459646064616462646364646465646664676468646964706471647264736474647564766477647864796480648164826483648464856486648764886489649064916492649364946495649664976498649965006501650265036504650565066507650865096510651165126513651465156516651765186519652065216522652365246525652665276528652965306531653265336534653565366537653865396540654165426543654465456546654765486549655065516552655365546555655665576558655965606561656265636564656565666567656865696570657165726573657465756576657765786579658065816582658365846585658665876588658965906591659265936594659565966597659865996600660166026603660466056606660766086609661066116612661366146615661666176618661966206621662266236624662566266627662866296630663166326633663466356636663766386639664066416642664366446645664666476648664966506651665266536654665566566657665866596660666166626663666466656666666766686669667066716672667366746675667666776678667966806681668266836684668566866687668866896690669166926693669466956696669766986699670067016702670367046705670667076708670967106711671267136714671567166717671867196720672167226723672467256726672767286729673067316732673367346735673667376738673967406741674267436744674567466747674867496750675167526753675467556756675767586759676067616762676367646765676667676768676967706771677267736774677567766777677867796780678167826783678467856786678767886789679067916792679367946795679667976798679968006801680268036804680568066807680868096810681168126813681468156816681768186819682068216822682368246825682668276828682968306831683268336834683568366837683868396840684168426843684468456846684768486849685068516852685368546855685668576858685968606861686268636864686568666867686868696870687168726873687468756876687768786879688068816882688368846885688668876888688968906891689268936894689568966897689868996900690169026903690469056906690769086909691069116912691369146915691669176918691969206921692269236924692569266927692869296930693169326933693469356936693769386939694069416942694369446945694669476948694969506951695269536954695569566957695869596960696169626963696469656966696769686969697069716972697369746975697669776978697969806981698269836984698569866987698869896990699169926993699469956996699769986999700070017002700370047005700670077008700970107011701270137014701570167017701870197020702170227023702470257026702770287029703070317032703370347035703670377038703970407041704270437044704570467047704870497050705170527053705470557056705770587059706070617062706370647065706670677068706970707071707270737074707570767077707870797080708170827083708470857086708770887089709070917092709370947095709670977098709971007101710271037104710571067107710871097110711171127113711471157116711771187119712071217122712371247125712671277128712971307131713271337134713571367137713871397140714171427143714471457146714771487149715071517152715371547155715671577158715971607161716271637164716571667167716871697170717171727173717471757176717771787179718071817182718371847185718671877188718971907191719271937194719571967197719871997200720172027203720472057206720772087209721072117212721372147215721672177218721972207221722272237224722572267227722872297230723172327233723472357236723772387239724072417242724372447245724672477248724972507251725272537254725572567257725872597260726172627263726472657266726772687269727072717272727372747275727672777278727972807281728272837284728572867287728872897290729172927293729472957296729772987299730073017302730373047305730673077308730973107311731273137314731573167317731873197320732173227323732473257326732773287329733073317332733373347335733673377338733973407341734273437344734573467347734873497350735173527353735473557356735773587359736073617362736373647365736673677368736973707371737273737374737573767377737873797380738173827383738473857386738773887389739073917392739373947395739673977398739974007401740274037404740574067407740874097410741174127413741474157416741774187419742074217422742374247425742674277428742974307431743274337434743574367437743874397440744174427443744474457446744774487449745074517452745374547455745674577458745974607461746274637464746574667467746874697470747174727473747474757476747774787479748074817482748374847485748674877488748974907491749274937494749574967497749874997500750175027503750475057506750775087509751075117512751375147515751675177518751975207521752275237524752575267527752875297530753175327533753475357536753775387539754075417542754375447545754675477548754975507551755275537554755575567557755875597560756175627563756475657566756775687569757075717572757375747575757675777578757975807581758275837584758575867587758875897590759175927593759475957596759775987599760076017602760376047605760676077608760976107611761276137614761576167617761876197620762176227623762476257626762776287629763076317632763376347635763676377638763976407641764276437644764576467647764876497650765176527653765476557656765776587659766076617662766376647665766676677668766976707671767276737674767576767677767876797680768176827683768476857686768776887689769076917692769376947695769676977698769977007701770277037704770577067707770877097710771177127713771477157716771777187719772077217722772377247725772677277728772977307731773277337734773577367737773877397740774177427743774477457746774777487749775077517752775377547755775677577758775977607761776277637764776577667767776877697770777177727773777477757776777777787779778077817782778377847785778677877788778977907791779277937794779577967797779877997800780178027803780478057806780778087809781078117812781378147815781678177818781978207821782278237824782578267827782878297830783178327833783478357836783778387839784078417842784378447845784678477848784978507851785278537854785578567857785878597860786178627863786478657866786778687869787078717872787378747875787678777878787978807881788278837884788578867887788878897890789178927893789478957896789778987899790079017902790379047905790679077908790979107911791279137914791579167917791879197920792179227923792479257926792779287929793079317932793379347935793679377938793979407941794279437944794579467947794879497950795179527953795479557956795779587959796079617962796379647965796679677968796979707971797279737974797579767977797879797980798179827983798479857986798779887989799079917992799379947995799679977998799980008001800280038004800580068007800880098010801180128013801480158016801780188019802080218022802380248025802680278028802980308031803280338034803580368037803880398040804180428043804480458046804780488049805080518052805380548055805680578058805980608061806280638064806580668067806880698070807180728073807480758076807780788079808080818082808380848085808680878088808980908091809280938094809580968097809880998100810181028103810481058106810781088109811081118112811381148115811681178118811981208121812281238124812581268127812881298130813181328133813481358136813781388139814081418142814381448145814681478148814981508151815281538154815581568157815881598160816181628163816481658166816781688169817081718172817381748175817681778178817981808181818281838184818581868187818881898190819181928193819481958196819781988199820082018202820382048205820682078208820982108211821282138214821582168217821882198220822182228223822482258226822782288229823082318232823382348235823682378238823982408241824282438244824582468247824882498250825182528253825482558256825782588259826082618262826382648265826682678268826982708271827282738274827582768277827882798280828182828283828482858286828782888289829082918292829382948295829682978298829983008301830283038304830583068307830883098310831183128313831483158316831783188319832083218322832383248325832683278328832983308331833283338334833583368337833883398340834183428343834483458346834783488349835083518352835383548355835683578358835983608361836283638364836583668367836883698370837183728373837483758376837783788379838083818382838383848385838683878388838983908391839283938394839583968397839883998400840184028403840484058406840784088409841084118412841384148415841684178418841984208421842284238424842584268427842884298430843184328433843484358436843784388439844084418442844384448445844684478448844984508451845284538454845584568457845884598460846184628463846484658466846784688469847084718472847384748475847684778478847984808481848284838484848584868487848884898490849184928493849484958496849784988499850085018502850385048505850685078508850985108511851285138514851585168517851885198520852185228523852485258526852785288529853085318532853385348535853685378538853985408541854285438544854585468547854885498550855185528553855485558556855785588559856085618562856385648565856685678568856985708571857285738574857585768577857885798580858185828583858485858586858785888589859085918592859385948595859685978598859986008601860286038604860586068607860886098610861186128613861486158616861786188619862086218622862386248625862686278628862986308631863286338634863586368637863886398640864186428643864486458646864786488649865086518652865386548655865686578658865986608661866286638664866586668667866886698670867186728673867486758676867786788679868086818682868386848685868686878688868986908691869286938694869586968697869886998700870187028703870487058706870787088709871087118712871387148715871687178718871987208721872287238724872587268727872887298730873187328733873487358736873787388739874087418742874387448745874687478748874987508751875287538754875587568757875887598760876187628763876487658766876787688769877087718772877387748775877687778778877987808781878287838784878587868787878887898790879187928793879487958796879787988799880088018802880388048805880688078808880988108811881288138814881588168817881888198820882188228823882488258826882788288829883088318832883388348835883688378838883988408841884288438844884588468847884888498850885188528853885488558856885788588859886088618862886388648865886688678868886988708871887288738874887588768877887888798880888188828883888488858886888788888889889088918892889388948895889688978898889989008901890289038904890589068907890889098910891189128913891489158916891789188919892089218922892389248925892689278928892989308931893289338934893589368937893889398940894189428943894489458946894789488949895089518952895389548955895689578958895989608961896289638964896589668967896889698970897189728973897489758976897789788979898089818982898389848985898689878988898989908991899289938994899589968997899889999000900190029003900490059006900790089009901090119012901390149015901690179018901990209021902290239024902590269027902890299030903190329033903490359036903790389039904090419042904390449045904690479048904990509051905290539054905590569057905890599060906190629063906490659066906790689069907090719072907390749075907690779078907990809081908290839084908590869087908890899090909190929093909490959096909790989099910091019102910391049105910691079108910991109111911291139114911591169117911891199120912191229123912491259126912791289129913091319132913391349135913691379138913991409141914291439144914591469147914891499150915191529153915491559156915791589159916091619162916391649165916691679168916991709171917291739174917591769177917891799180918191829183918491859186918791889189919091919192919391949195919691979198919992009201920292039204920592069207920892099210921192129213921492159216921792189219922092219222922392249225922692279228922992309231923292339234923592369237923892399240924192429243924492459246924792489249925092519252925392549255925692579258925992609261926292639264926592669267926892699270927192729273927492759276927792789279928092819282928392849285928692879288928992909291929292939294929592969297929892999300930193029303930493059306930793089309931093119312931393149315931693179318931993209321932293239324932593269327932893299330933193329333933493359336933793389339934093419342934393449345934693479348934993509351935293539354935593569357935893599360936193629363936493659366936793689369937093719372937393749375937693779378937993809381938293839384938593869387938893899390939193929393939493959396939793989399940094019402940394049405940694079408940994109411941294139414941594169417941894199420942194229423942494259426942794289429943094319432943394349435943694379438943994409441944294439444944594469447944894499450945194529453945494559456945794589459946094619462946394649465946694679468946994709471947294739474947594769477947894799480948194829483948494859486948794889489949094919492949394949495949694979498949995009501950295039504950595069507950895099510951195129513951495159516951795189519952095219522952395249525952695279528952995309531953295339534953595369537953895399540954195429543954495459546954795489549955095519552955395549555955695579558955995609561956295639564956595669567956895699570957195729573957495759576957795789579958095819582958395849585958695879588958995909591959295939594959595969597959895999600960196029603960496059606960796089609961096119612961396149615961696179618961996209621962296239624962596269627962896299630963196329633963496359636963796389639964096419642964396449645964696479648964996509651965296539654965596569657965896599660966196629663966496659666966796689669967096719672967396749675967696779678967996809681968296839684968596869687968896899690969196929693969496959696969796989699970097019702970397049705970697079708970997109711971297139714971597169717971897199720972197229723972497259726972797289729973097319732973397349735973697379738973997409741974297439744974597469747974897499750975197529753975497559756975797589759976097619762976397649765976697679768976997709771977297739774977597769777977897799780978197829783978497859786978797889789979097919792979397949795979697979798979998009801980298039804980598069807980898099810981198129813981498159816981798189819982098219822982398249825982698279828982998309831983298339834983598369837983898399840984198429843984498459846984798489849985098519852985398549855985698579858985998609861986298639864986598669867986898699870987198729873987498759876987798789879988098819882988398849885988698879888988998909891989298939894989598969897989898999900990199029903990499059906990799089909991099119912991399149915991699179918991999209921992299239924992599269927992899299930993199329933993499359936993799389939994099419942994399449945994699479948994999509951995299539954995599569957995899599960996199629963996499659966996799689969997099719972997399749975997699779978997999809981998299839984998599869987998899899990999199929993999499959996999799989999100001000110002100031000410005100061000710008100091001010011100121001310014100151001610017100181001910020100211002210023100241002510026100271002810029100301003110032100331003410035100361003710038100391004010041100421004310044100451004610047100481004910050100511005210053100541005510056100571005810059100601006110062100631006410065100661006710068100691007010071100721007310074100751007610077100781007910080100811008210083100841008510086100871008810089100901009110092100931009410095100961009710098100991010010101101021010310104101051010610107101081010910110101111011210113101141011510116101171011810119101201012110122101231012410125101261012710128101291013010131101321013310134101351013610137101381013910140101411014210143101441014510146101471014810149101501015110152101531015410155101561015710158101591016010161101621016310164101651016610167101681016910170101711017210173101741017510176101771017810179101801018110182101831018410185101861018710188101891019010191101921019310194101951019610197101981019910200102011020210203102041020510206102071020810209102101021110212102131021410215102161021710218102191022010221102221022310224102251022610227102281022910230102311023210233102341023510236102371023810239102401024110242102431024410245102461024710248102491025010251102521025310254102551025610257102581025910260102611026210263102641026510266102671026810269102701027110272102731027410275102761027710278102791028010281102821028310284102851028610287102881028910290102911029210293102941029510296102971029810299103001030110302103031030410305103061030710308103091031010311103121031310314103151031610317103181031910320103211032210323103241032510326103271032810329103301033110332103331033410335103361033710338103391034010341103421034310344103451034610347103481034910350103511035210353103541035510356103571035810359103601036110362103631036410365103661036710368103691037010371103721037310374103751037610377103781037910380103811038210383103841038510386103871038810389103901039110392103931039410395103961039710398103991040010401104021040310404104051040610407104081040910410104111041210413104141041510416104171041810419104201042110422104231042410425104261042710428104291043010431104321043310434104351043610437104381043910440104411044210443104441044510446104471044810449104501045110452104531045410455104561045710458104591046010461104621046310464104651046610467104681046910470104711047210473104741047510476104771047810479104801048110482104831048410485104861048710488104891049010491104921049310494104951049610497104981049910500105011050210503105041050510506105071050810509105101051110512105131051410515105161051710518105191052010521105221052310524105251052610527105281052910530105311053210533105341053510536105371053810539105401054110542105431054410545105461054710548105491055010551105521055310554105551055610557105581055910560105611056210563105641056510566105671056810569105701057110572105731057410575105761057710578105791058010581105821058310584105851058610587105881058910590105911059210593105941059510596105971059810599106001060110602106031060410605106061060710608106091061010611106121061310614106151061610617106181061910620106211062210623106241062510626106271062810629106301063110632106331063410635106361063710638106391064010641106421064310644106451064610647106481064910650106511065210653106541065510656106571065810659106601066110662106631066410665106661066710668106691067010671106721067310674106751067610677106781067910680106811068210683106841068510686106871068810689106901069110692106931069410695106961069710698106991070010701107021070310704107051070610707107081070910710107111071210713107141071510716107171071810719107201072110722107231072410725107261072710728107291073010731107321073310734107351073610737107381073910740107411074210743107441074510746107471074810749107501075110752107531075410755107561075710758107591076010761107621076310764107651076610767107681076910770107711077210773107741077510776107771077810779107801078110782107831078410785107861078710788107891079010791107921079310794107951079610797107981079910800108011080210803108041080510806108071080810809108101081110812108131081410815108161081710818108191082010821108221082310824108251082610827108281082910830108311083210833108341083510836108371083810839108401084110842108431084410845108461084710848108491085010851108521085310854108551085610857108581085910860108611086210863108641086510866108671086810869108701087110872108731087410875108761087710878108791088010881108821088310884108851088610887108881088910890108911089210893108941089510896108971089810899109001090110902109031090410905109061090710908109091091010911109121091310914109151091610917109181091910920109211092210923109241092510926109271092810929109301093110932109331093410935109361093710938109391094010941109421094310944109451094610947109481094910950109511095210953109541095510956109571095810959109601096110962109631096410965109661096710968109691097010971109721097310974109751097610977109781097910980109811098210983109841098510986109871098810989109901099110992109931099410995109961099710998109991100011001110021100311004110051100611007110081100911010110111101211013110141101511016110171101811019110201102111022110231102411025110261102711028110291103011031110321103311034110351103611037110381103911040110411104211043110441104511046110471104811049110501105111052110531105411055110561105711058110591106011061110621106311064110651106611067110681106911070110711107211073110741107511076110771107811079110801108111082110831108411085110861108711088110891109011091110921109311094110951109611097110981109911100111011110211103111041110511106111071110811109111101111111112111131111411115111161111711118111191112011121111221112311124111251112611127111281112911130111311113211133111341113511136111371113811139111401114111142111431114411145111461114711148111491115011151111521115311154111551115611157111581115911160111611116211163111641116511166111671116811169111701117111172111731117411175111761117711178111791118011181111821118311184111851118611187111881118911190111911119211193111941119511196111971119811199112001120111202112031120411205112061120711208112091121011211112121121311214112151121611217112181121911220112211122211223112241122511226112271122811229112301123111232112331123411235112361123711238112391124011241112421124311244112451124611247112481124911250112511125211253112541125511256112571125811259112601126111262112631126411265112661126711268112691127011271112721127311274112751127611277112781127911280112811128211283112841128511286112871128811289112901129111292112931129411295112961129711298112991130011301113021130311304113051130611307113081130911310113111131211313113141131511316113171131811319113201132111322113231132411325113261132711328113291133011331113321133311334113351133611337113381133911340113411134211343113441134511346113471134811349113501135111352113531135411355113561135711358113591136011361113621136311364113651136611367113681136911370113711137211373113741137511376113771137811379113801138111382113831138411385113861138711388113891139011391113921139311394113951139611397113981139911400114011140211403114041140511406114071140811409114101141111412114131141411415114161141711418114191142011421114221142311424114251142611427114281142911430114311143211433114341143511436114371143811439114401144111442114431144411445114461144711448114491145011451114521145311454114551145611457114581145911460114611146211463114641146511466114671146811469114701147111472114731147411475114761147711478114791148011481114821148311484114851148611487114881148911490114911149211493114941149511496114971149811499115001150111502115031150411505115061150711508115091151011511115121151311514115151151611517115181151911520115211152211523115241152511526115271152811529115301153111532115331153411535115361153711538115391154011541115421154311544115451154611547115481154911550115511155211553115541155511556115571155811559115601156111562115631156411565115661156711568115691157011571115721157311574115751157611577115781157911580115811158211583115841158511586115871158811589115901159111592115931159411595115961159711598115991160011601116021160311604116051160611607116081160911610116111161211613116141161511616116171161811619116201162111622116231162411625116261162711628116291163011631116321163311634116351163611637116381163911640116411164211643116441164511646116471164811649116501165111652116531165411655116561165711658116591166011661116621166311664116651166611667116681166911670116711167211673116741167511676116771167811679116801168111682116831168411685116861168711688116891169011691116921169311694116951169611697116981169911700117011170211703117041170511706117071170811709117101171111712117131171411715117161171711718117191172011721117221172311724117251172611727117281172911730117311173211733117341173511736117371173811739117401174111742117431174411745117461174711748117491175011751117521175311754117551175611757117581175911760117611176211763117641176511766117671176811769117701177111772117731177411775117761177711778117791178011781117821178311784117851178611787117881178911790117911179211793117941179511796117971179811799118001180111802118031180411805118061180711808118091181011811118121181311814118151181611817118181181911820118211182211823118241182511826118271182811829118301183111832118331183411835118361183711838118391184011841118421184311844118451184611847118481184911850118511185211853118541185511856118571185811859118601186111862118631186411865118661186711868118691187011871118721187311874118751187611877118781187911880118811188211883118841188511886118871188811889118901189111892118931189411895118961189711898118991190011901119021190311904119051190611907119081190911910119111191211913119141191511916119171191811919119201192111922119231192411925119261192711928119291193011931119321193311934119351193611937119381193911940119411194211943119441194511946119471194811949119501195111952119531195411955119561195711958119591196011961119621196311964119651196611967119681196911970119711197211973119741197511976119771197811979119801198111982119831198411985119861198711988119891199011991119921199311994119951199611997119981199912000120011200212003120041200512006120071200812009120101201112012120131201412015120161201712018120191202012021120221202312024120251202612027120281202912030120311203212033120341203512036120371203812039120401204112042120431204412045120461204712048120491205012051120521205312054120551205612057120581205912060120611206212063120641206512066120671206812069120701207112072120731207412075120761207712078120791208012081120821208312084120851208612087120881208912090120911209212093120941209512096120971209812099121001210112102121031210412105121061210712108121091211012111121121211312114121151211612117121181211912120121211212212123121241212512126121271212812129121301213112132121331213412135121361213712138121391214012141121421214312144121451214612147121481214912150121511215212153121541215512156121571215812159121601216112162121631216412165121661216712168121691217012171121721217312174121751217612177121781217912180121811218212183121841218512186121871218812189121901219112192121931219412195121961219712198121991220012201122021220312204122051220612207122081220912210122111221212213122141221512216122171221812219122201222112222122231222412225122261222712228122291223012231122321223312234122351223612237122381223912240122411224212243122441224512246122471224812249122501225112252122531225412255122561225712258122591226012261122621226312264122651226612267122681226912270122711227212273122741227512276122771227812279122801228112282122831228412285122861228712288122891229012291122921229312294122951229612297122981229912300123011230212303123041230512306123071230812309123101231112312123131231412315123161231712318123191232012321123221232312324123251232612327123281232912330123311233212333123341233512336123371233812339123401234112342123431234412345123461234712348123491235012351123521235312354123551235612357123581235912360123611236212363123641236512366123671236812369123701237112372123731237412375123761237712378123791238012381123821238312384123851238612387123881238912390123911239212393123941239512396123971239812399124001240112402124031240412405124061240712408124091241012411124121241312414124151241612417124181241912420124211242212423124241242512426124271242812429124301243112432124331243412435124361243712438124391244012441124421244312444124451244612447124481244912450124511245212453124541245512456124571245812459124601246112462124631246412465124661246712468124691247012471124721247312474124751247612477124781247912480124811248212483124841248512486124871248812489124901249112492124931249412495124961249712498124991250012501125021250312504125051250612507125081250912510125111251212513125141251512516125171251812519125201252112522125231252412525125261252712528125291253012531125321253312534125351253612537125381253912540125411254212543125441254512546125471254812549125501255112552125531255412555125561255712558125591256012561125621256312564125651256612567125681256912570125711257212573125741257512576125771257812579125801258112582125831258412585125861258712588125891259012591125921259312594125951259612597125981259912600126011260212603126041260512606126071260812609126101261112612126131261412615126161261712618126191262012621126221262312624126251262612627126281262912630126311263212633126341263512636126371263812639126401264112642126431264412645126461264712648126491265012651126521265312654126551265612657126581265912660126611266212663126641266512666126671266812669126701267112672126731267412675126761267712678126791268012681126821268312684126851268612687126881268912690126911269212693126941269512696126971269812699127001270112702127031270412705127061270712708127091271012711127121271312714127151271612717127181271912720127211272212723127241272512726127271272812729127301273112732127331273412735127361273712738127391274012741127421274312744127451274612747127481274912750127511275212753127541275512756127571275812759127601276112762127631276412765127661276712768127691277012771127721277312774127751277612777127781277912780127811278212783127841278512786127871278812789127901279112792127931279412795127961279712798127991280012801128021280312804128051280612807128081280912810128111281212813128141281512816128171281812819128201282112822128231282412825128261282712828128291283012831128321283312834128351283612837128381283912840128411284212843128441284512846128471284812849128501285112852128531285412855128561285712858128591286012861128621286312864128651286612867128681286912870128711287212873128741287512876128771287812879128801288112882128831288412885128861288712888128891289012891128921289312894128951289612897128981289912900129011290212903129041290512906129071290812909129101291112912129131291412915129161291712918129191292012921129221292312924129251292612927129281292912930129311293212933129341293512936129371293812939129401294112942129431294412945129461294712948129491295012951129521295312954129551295612957129581295912960129611296212963129641296512966129671296812969129701297112972129731297412975129761297712978129791298012981129821298312984129851298612987129881298912990129911299212993129941299512996129971299812999130001300113002130031300413005130061300713008130091301013011130121301313014130151301613017130181301913020130211302213023130241302513026130271302813029130301303113032130331303413035130361303713038130391304013041130421304313044130451304613047130481304913050130511305213053130541305513056130571305813059130601306113062130631306413065130661306713068130691307013071130721307313074130751307613077130781307913080130811308213083130841308513086130871308813089130901309113092130931309413095130961309713098130991310013101131021310313104131051310613107131081310913110131111311213113131141311513116131171311813119131201312113122131231312413125131261312713128131291313013131131321313313134131351313613137131381313913140131411314213143131441314513146131471314813149131501315113152131531315413155131561315713158131591316013161131621316313164131651316613167131681316913170131711317213173131741317513176131771317813179131801318113182131831318413185131861318713188131891319013191131921319313194131951319613197131981319913200132011320213203132041320513206132071320813209132101321113212132131321413215132161321713218132191322013221132221322313224132251322613227132281322913230132311323213233132341323513236132371323813239132401324113242132431324413245132461324713248132491325013251132521325313254132551325613257132581325913260132611326213263132641326513266132671326813269132701327113272132731327413275132761327713278132791328013281132821328313284132851328613287132881328913290132911329213293132941329513296132971329813299133001330113302133031330413305133061330713308133091331013311133121331313314133151331613317133181331913320133211332213323133241332513326133271332813329133301333113332133331333413335133361333713338133391334013341133421334313344133451334613347133481334913350133511335213353133541335513356133571335813359133601336113362133631336413365133661336713368133691337013371133721337313374133751337613377133781337913380133811338213383133841338513386133871338813389133901339113392133931339413395133961339713398133991340013401134021340313404134051340613407134081340913410134111341213413134141341513416134171341813419134201342113422134231342413425134261342713428134291343013431134321343313434134351343613437134381343913440134411344213443134441344513446134471344813449134501345113452134531345413455134561345713458134591346013461134621346313464134651346613467134681346913470134711347213473134741347513476134771347813479134801348113482134831348413485134861348713488134891349013491134921349313494134951349613497134981349913500135011350213503135041350513506135071350813509135101351113512135131351413515135161351713518135191352013521135221352313524135251352613527135281352913530135311353213533135341353513536135371353813539135401354113542135431354413545135461354713548135491355013551135521355313554135551355613557135581355913560135611356213563135641356513566135671356813569135701357113572135731357413575135761357713578135791358013581135821358313584135851358613587135881358913590135911359213593135941359513596135971359813599136001360113602136031360413605136061360713608136091361013611136121361313614136151361613617136181361913620136211362213623136241362513626136271362813629136301363113632136331363413635136361363713638136391364013641136421364313644136451364613647136481364913650136511365213653136541365513656136571365813659136601366113662136631366413665136661366713668136691367013671136721367313674136751367613677136781367913680136811368213683136841368513686136871368813689136901369113692136931369413695136961369713698136991370013701137021370313704137051370613707137081370913710137111371213713137141371513716137171371813719137201372113722137231372413725137261372713728137291373013731137321373313734137351373613737137381373913740137411374213743137441374513746137471374813749137501375113752137531375413755137561375713758137591376013761137621376313764137651376613767137681376913770137711377213773137741377513776137771377813779137801378113782137831378413785137861378713788137891379013791137921379313794137951379613797137981379913800138011380213803138041380513806138071380813809138101381113812138131381413815138161381713818138191382013821138221382313824138251382613827138281382913830138311383213833138341383513836138371383813839138401384113842138431384413845138461384713848138491385013851138521385313854138551385613857138581385913860138611386213863138641386513866138671386813869138701387113872138731387413875138761387713878138791388013881138821388313884138851388613887138881388913890138911389213893138941389513896138971389813899139001390113902139031390413905139061390713908139091391013911139121391313914139151391613917139181391913920139211392213923139241392513926139271392813929139301393113932139331393413935139361393713938139391394013941139421394313944139451394613947139481394913950139511395213953139541395513956139571395813959139601396113962139631396413965139661396713968139691397013971139721397313974139751397613977139781397913980139811398213983139841398513986139871398813989139901399113992139931399413995139961399713998139991400014001140021400314004140051400614007140081400914010140111401214013140141401514016140171401814019140201402114022140231402414025140261402714028140291403014031140321403314034140351403614037140381403914040140411404214043140441404514046140471404814049140501405114052140531405414055140561405714058140591406014061140621406314064140651406614067140681406914070140711407214073140741407514076140771407814079140801408114082140831408414085140861408714088140891409014091140921409314094140951409614097140981409914100141011410214103141041410514106141071410814109141101411114112141131411414115141161411714118141191412014121141221412314124141251412614127141281412914130141311413214133141341413514136141371413814139141401414114142141431414414145141461414714148141491415014151141521415314154141551415614157141581415914160141611416214163141641416514166141671416814169141701417114172141731417414175141761417714178141791418014181141821418314184141851418614187141881418914190141911419214193141941419514196141971419814199142001420114202142031420414205142061420714208142091421014211142121421314214142151421614217142181421914220142211422214223142241422514226142271422814229142301423114232142331423414235142361423714238142391424014241142421424314244142451424614247142481424914250142511425214253142541425514256142571425814259142601426114262142631426414265142661426714268142691427014271142721427314274142751427614277142781427914280142811428214283142841428514286142871428814289142901429114292142931429414295142961429714298142991430014301143021430314304143051430614307143081430914310143111431214313143141431514316143171431814319143201432114322143231432414325143261432714328143291433014331143321433314334143351433614337143381433914340143411434214343143441434514346143471434814349143501435114352143531435414355143561435714358143591436014361143621436314364143651436614367143681436914370143711437214373143741437514376143771437814379143801438114382143831438414385143861438714388143891439014391143921439314394143951439614397143981439914400144011440214403144041440514406144071440814409144101441114412144131441414415144161441714418144191442014421144221442314424144251442614427144281442914430144311443214433144341443514436144371443814439144401444114442144431444414445144461444714448144491445014451144521445314454144551445614457144581445914460144611446214463144641446514466144671446814469144701447114472144731447414475144761447714478144791448014481144821448314484144851448614487144881448914490144911449214493144941449514496144971449814499145001450114502145031450414505145061450714508145091451014511145121451314514145151451614517145181451914520145211452214523145241452514526145271452814529145301453114532145331453414535145361453714538145391454014541145421454314544145451454614547145481454914550145511455214553145541455514556145571455814559145601456114562145631456414565145661456714568145691457014571145721457314574145751457614577145781457914580145811458214583145841458514586145871458814589145901459114592145931459414595145961459714598145991460014601146021460314604146051460614607146081460914610146111461214613146141461514616146171461814619146201462114622146231462414625146261462714628146291463014631146321463314634146351463614637146381463914640146411464214643146441464514646146471464814649146501465114652146531465414655146561465714658146591466014661146621466314664146651466614667146681466914670146711467214673146741467514676146771467814679146801468114682146831468414685146861468714688146891469014691146921469314694146951469614697146981469914700147011470214703147041470514706147071470814709147101471114712147131471414715147161471714718147191472014721147221472314724147251472614727147281472914730147311473214733147341473514736147371473814739147401474114742147431474414745147461474714748147491475014751147521475314754147551475614757147581475914760147611476214763147641476514766147671476814769147701477114772147731477414775147761477714778147791478014781147821478314784147851478614787147881478914790147911479214793147941479514796147971479814799148001480114802148031480414805148061480714808148091481014811148121481314814148151481614817148181481914820148211482214823148241482514826148271482814829148301483114832148331483414835148361483714838148391484014841148421484314844148451484614847148481484914850148511485214853148541485514856148571485814859148601486114862148631486414865148661486714868148691487014871148721487314874148751487614877148781487914880148811488214883148841488514886148871488814889148901489114892148931489414895148961489714898148991490014901149021490314904149051490614907149081490914910149111491214913149141491514916149171491814919149201492114922149231492414925149261492714928149291493014931149321493314934149351493614937149381493914940149411494214943149441494514946149471494814949149501495114952149531495414955149561495714958149591496014961149621496314964149651496614967149681496914970149711497214973149741497514976149771497814979149801498114982149831498414985149861498714988149891499014991149921499314994149951499614997149981499915000150011500215003150041500515006150071500815009150101501115012150131501415015150161501715018150191502015021150221502315024150251502615027150281502915030150311503215033150341503515036150371503815039150401504115042150431504415045150461504715048150491505015051150521505315054150551505615057150581505915060150611506215063150641506515066150671506815069150701507115072150731507415075150761507715078150791508015081150821508315084150851508615087150881508915090150911509215093150941509515096150971509815099151001510115102151031510415105151061510715108151091511015111151121511315114151151511615117151181511915120151211512215123151241512515126151271512815129151301513115132151331513415135151361513715138151391514015141151421514315144151451514615147151481514915150151511515215153151541515515156151571515815159151601516115162151631516415165151661516715168151691517015171151721517315174151751517615177151781517915180151811518215183151841518515186151871518815189151901519115192151931519415195151961519715198151991520015201152021520315204152051520615207152081520915210152111521215213152141521515216152171521815219152201522115222152231522415225152261522715228152291523015231152321523315234152351523615237152381523915240152411524215243152441524515246152471524815249152501525115252152531525415255152561525715258152591526015261152621526315264152651526615267152681526915270152711527215273152741527515276152771527815279152801528115282152831528415285152861528715288152891529015291152921529315294152951529615297152981529915300153011530215303153041530515306153071530815309153101531115312153131531415315153161531715318153191532015321153221532315324153251532615327153281532915330153311533215333153341533515336153371533815339153401534115342153431534415345153461534715348153491535015351153521535315354153551535615357153581535915360153611536215363153641536515366153671536815369153701537115372153731537415375153761537715378153791538015381153821538315384153851538615387153881538915390153911539215393153941539515396153971539815399154001540115402154031540415405154061540715408154091541015411154121541315414154151541615417154181541915420154211542215423154241542515426154271542815429154301543115432154331543415435154361543715438154391544015441154421544315444154451544615447154481544915450154511545215453154541545515456154571545815459154601546115462154631546415465154661546715468154691547015471154721547315474154751547615477154781547915480154811548215483154841548515486154871548815489154901549115492154931549415495154961549715498154991550015501155021550315504155051550615507155081550915510155111551215513155141551515516155171551815519155201552115522155231552415525155261552715528155291553015531155321553315534155351553615537155381553915540155411554215543155441554515546155471554815549155501555115552155531555415555155561555715558155591556015561155621556315564155651556615567155681556915570155711557215573155741557515576155771557815579155801558115582155831558415585155861558715588155891559015591155921559315594155951559615597155981559915600156011560215603156041560515606156071560815609156101561115612156131561415615156161561715618156191562015621156221562315624156251562615627156281562915630156311563215633156341563515636156371563815639156401564115642156431564415645156461564715648156491565015651156521565315654156551565615657156581565915660156611566215663156641566515666156671566815669156701567115672156731567415675156761567715678156791568015681156821568315684156851568615687156881568915690156911569215693156941569515696156971569815699157001570115702157031570415705157061570715708157091571015711157121571315714157151571615717157181571915720157211572215723157241572515726157271572815729157301573115732157331573415735157361573715738157391574015741157421574315744157451574615747157481574915750157511575215753157541575515756157571575815759157601576115762157631576415765157661576715768157691577015771157721577315774157751577615777157781577915780157811578215783157841578515786157871578815789157901579115792157931579415795157961579715798157991580015801158021580315804158051580615807158081580915810158111581215813158141581515816158171581815819158201582115822158231582415825158261582715828158291583015831158321583315834158351583615837158381583915840158411584215843158441584515846158471584815849158501585115852158531585415855158561585715858158591586015861158621586315864158651586615867158681586915870158711587215873158741587515876158771587815879158801588115882158831588415885158861588715888158891589015891158921589315894158951589615897158981589915900159011590215903159041590515906159071590815909159101591115912159131591415915159161591715918159191592015921159221592315924159251592615927159281592915930159311593215933159341593515936159371593815939159401594115942159431594415945159461594715948159491595015951159521595315954159551595615957159581595915960159611596215963159641596515966159671596815969159701597115972159731597415975159761597715978159791598015981159821598315984159851598615987159881598915990159911599215993159941599515996159971599815999160001600116002160031600416005160061600716008160091601016011160121601316014160151601616017160181601916020160211602216023160241602516026160271602816029160301603116032160331603416035160361603716038160391604016041160421604316044160451604616047160481604916050160511605216053160541605516056160571605816059160601606116062160631606416065160661606716068160691607016071160721607316074160751607616077160781607916080160811608216083160841608516086160871608816089160901609116092160931609416095160961609716098160991610016101161021610316104161051610616107161081610916110161111611216113161141611516116161171611816119161201612116122161231612416125161261612716128161291613016131161321613316134161351613616137161381613916140161411614216143161441614516146161471614816149161501615116152161531615416155161561615716158161591616016161161621616316164161651616616167161681616916170161711617216173161741617516176161771617816179161801618116182161831618416185161861618716188161891619016191161921619316194161951619616197161981619916200162011620216203162041620516206162071620816209162101621116212162131621416215162161621716218162191622016221162221622316224162251622616227162281622916230162311623216233162341623516236162371623816239162401624116242162431624416245162461624716248162491625016251162521625316254162551625616257162581625916260162611626216263162641626516266162671626816269162701627116272162731627416275162761627716278162791628016281162821628316284162851628616287162881628916290162911629216293162941629516296162971629816299163001630116302163031630416305163061630716308163091631016311163121631316314163151631616317163181631916320163211632216323163241632516326163271632816329163301633116332163331633416335163361633716338163391634016341163421634316344163451634616347163481634916350163511635216353163541635516356163571635816359163601636116362163631636416365163661636716368163691637016371163721637316374163751637616377163781637916380163811638216383163841638516386163871638816389163901639116392163931639416395163961639716398163991640016401164021640316404164051640616407164081640916410164111641216413164141641516416164171641816419164201642116422164231642416425164261642716428164291643016431164321643316434164351643616437164381643916440164411644216443164441644516446164471644816449164501645116452164531645416455164561645716458164591646016461164621646316464164651646616467164681646916470164711647216473164741647516476164771647816479164801648116482164831648416485164861648716488164891649016491164921649316494164951649616497164981649916500165011650216503165041650516506165071650816509165101651116512165131651416515165161651716518165191652016521165221652316524165251652616527165281652916530165311653216533165341653516536165371653816539165401654116542165431654416545165461654716548165491655016551165521655316554165551655616557165581655916560165611656216563165641656516566165671656816569165701657116572165731657416575165761657716578165791658016581165821658316584165851658616587165881658916590165911659216593165941659516596165971659816599166001660116602166031660416605166061660716608166091661016611166121661316614166151661616617166181661916620166211662216623166241662516626166271662816629166301663116632166331663416635166361663716638166391664016641166421664316644166451664616647166481664916650166511665216653166541665516656166571665816659166601666116662166631666416665166661666716668166691667016671166721667316674166751667616677166781667916680166811668216683166841668516686166871668816689166901669116692166931669416695166961669716698166991670016701167021670316704167051670616707167081670916710167111671216713167141671516716167171671816719167201672116722167231672416725167261672716728167291673016731167321673316734167351673616737167381673916740167411674216743167441674516746167471674816749167501675116752167531675416755167561675716758167591676016761167621676316764167651676616767167681676916770167711677216773167741677516776167771677816779167801678116782167831678416785167861678716788167891679016791167921679316794167951679616797167981679916800168011680216803168041680516806168071680816809168101681116812168131681416815168161681716818168191682016821168221682316824168251682616827168281682916830168311683216833168341683516836168371683816839168401684116842168431684416845168461684716848168491685016851168521685316854168551685616857168581685916860168611686216863168641686516866168671686816869168701687116872168731687416875168761687716878168791688016881168821688316884168851688616887168881688916890168911689216893168941689516896168971689816899169001690116902169031690416905169061690716908169091691016911169121691316914169151691616917169181691916920169211692216923169241692516926169271692816929169301693116932169331693416935169361693716938169391694016941169421694316944169451694616947169481694916950169511695216953169541695516956169571695816959169601696116962169631696416965169661696716968169691697016971169721697316974169751697616977169781697916980169811698216983169841698516986169871698816989169901699116992169931699416995169961699716998169991700017001170021700317004170051700617007170081700917010170111701217013170141701517016170171701817019170201702117022170231702417025170261702717028170291703017031170321703317034170351703617037170381703917040170411704217043170441704517046170471704817049170501705117052170531705417055170561705717058170591706017061170621706317064170651706617067170681706917070170711707217073170741707517076170771707817079170801708117082170831708417085170861708717088170891709017091170921709317094170951709617097170981709917100171011710217103171041710517106171071710817109171101711117112171131711417115171161711717118171191712017121171221712317124171251712617127171281712917130171311713217133171341713517136171371713817139171401714117142171431714417145171461714717148171491715017151171521715317154171551715617157171581715917160171611716217163171641716517166171671716817169171701717117172171731717417175171761717717178171791718017181171821718317184171851718617187171881718917190171911719217193171941719517196171971719817199172001720117202172031720417205172061720717208172091721017211172121721317214172151721617217172181721917220172211722217223172241722517226172271722817229172301723117232172331723417235172361723717238172391724017241172421724317244172451724617247172481724917250172511725217253172541725517256172571725817259172601726117262172631726417265172661726717268172691727017271172721727317274172751727617277172781727917280172811728217283172841728517286172871728817289172901729117292172931729417295172961729717298172991730017301173021730317304173051730617307173081730917310173111731217313173141731517316173171731817319173201732117322173231732417325173261732717328173291733017331173321733317334173351733617337173381733917340173411734217343173441734517346173471734817349173501735117352173531735417355173561735717358173591736017361173621736317364173651736617367173681736917370173711737217373173741737517376173771737817379173801738117382173831738417385173861738717388173891739017391173921739317394173951739617397173981739917400174011740217403174041740517406174071740817409174101741117412174131741417415174161741717418174191742017421174221742317424174251742617427174281742917430174311743217433174341743517436174371743817439174401744117442174431744417445174461744717448174491745017451174521745317454174551745617457174581745917460174611746217463174641746517466174671746817469174701747117472174731747417475174761747717478174791748017481174821748317484174851748617487174881748917490174911749217493174941749517496174971749817499175001750117502175031750417505175061750717508175091751017511175121751317514175151751617517175181751917520175211752217523175241752517526175271752817529175301753117532175331753417535175361753717538175391754017541175421754317544175451754617547175481754917550175511755217553175541755517556175571755817559175601756117562175631756417565175661756717568175691757017571175721757317574175751757617577175781757917580175811758217583175841758517586175871758817589175901759117592175931759417595175961759717598175991760017601176021760317604176051760617607176081760917610176111761217613176141761517616176171761817619176201762117622176231762417625176261762717628176291763017631176321763317634176351763617637176381763917640176411764217643176441764517646176471764817649176501765117652176531765417655176561765717658176591766017661176621766317664176651766617667176681766917670176711767217673176741767517676176771767817679176801768117682176831768417685176861768717688176891769017691176921769317694176951769617697176981769917700177011770217703177041770517706177071770817709177101771117712177131771417715177161771717718177191772017721177221772317724177251772617727177281772917730177311773217733177341773517736177371773817739177401774117742177431774417745177461774717748177491775017751177521775317754177551775617757177581775917760177611776217763177641776517766177671776817769177701777117772177731777417775177761777717778177791778017781177821778317784177851778617787177881778917790177911779217793177941779517796177971779817799178001780117802178031780417805178061780717808178091781017811178121781317814178151781617817178181781917820178211782217823178241782517826178271782817829178301783117832178331783417835178361783717838178391784017841178421784317844178451784617847178481784917850178511785217853178541785517856178571785817859178601786117862178631786417865178661786717868178691787017871178721787317874178751787617877178781787917880178811788217883178841788517886178871788817889178901789117892178931789417895178961789717898178991790017901179021790317904179051790617907179081790917910179111791217913179141791517916179171791817919179201792117922179231792417925179261792717928179291793017931179321793317934179351793617937179381793917940179411794217943179441794517946179471794817949179501795117952179531795417955179561795717958179591796017961179621796317964179651796617967179681796917970179711797217973179741797517976179771797817979179801798117982179831798417985179861798717988179891799017991179921799317994179951799617997179981799918000180011800218003180041800518006180071800818009180101801118012180131801418015180161801718018180191802018021180221802318024180251802618027180281802918030180311803218033180341803518036180371803818039180401804118042180431804418045180461804718048180491805018051180521805318054180551805618057180581805918060180611806218063180641806518066180671806818069180701807118072180731807418075180761807718078180791808018081180821808318084180851808618087180881808918090180911809218093180941809518096180971809818099181001810118102181031810418105181061810718108181091811018111181121811318114181151811618117181181811918120181211812218123181241812518126181271812818129181301813118132181331813418135181361813718138181391814018141181421814318144181451814618147181481814918150181511815218153181541815518156181571815818159181601816118162181631816418165181661816718168181691817018171181721817318174181751817618177181781817918180181811818218183181841818518186181871818818189181901819118192181931819418195181961819718198181991820018201182021820318204182051820618207182081820918210182111821218213182141821518216182171821818219182201822118222182231822418225182261822718228182291823018231182321823318234182351823618237182381823918240182411824218243182441824518246182471824818249182501825118252182531825418255182561825718258182591826018261182621826318264182651826618267182681826918270182711827218273182741827518276182771827818279182801828118282182831828418285182861828718288182891829018291182921829318294182951829618297182981829918300183011830218303183041830518306183071830818309183101831118312183131831418315183161831718318183191832018321183221832318324183251832618327183281832918330183311833218333183341833518336183371833818339183401834118342183431834418345183461834718348183491835018351183521835318354183551835618357183581835918360183611836218363183641836518366183671836818369183701837118372183731837418375183761837718378183791838018381183821838318384183851838618387183881838918390183911839218393183941839518396183971839818399184001840118402184031840418405184061840718408184091841018411184121841318414184151841618417184181841918420184211842218423184241842518426184271842818429184301843118432184331843418435184361843718438184391844018441184421844318444184451844618447184481844918450184511845218453184541845518456184571845818459184601846118462184631846418465184661846718468184691847018471184721847318474184751847618477184781847918480184811848218483184841848518486184871848818489184901849118492184931849418495184961849718498184991850018501185021850318504185051850618507185081850918510185111851218513185141851518516185171851818519185201852118522185231852418525185261852718528185291853018531185321853318534185351853618537185381853918540185411854218543185441854518546185471854818549185501855118552185531855418555185561855718558185591856018561185621856318564185651856618567185681856918570185711857218573185741857518576185771857818579185801858118582185831858418585185861858718588185891859018591185921859318594185951859618597185981859918600186011860218603186041860518606186071860818609186101861118612186131861418615186161861718618186191862018621186221862318624186251862618627186281862918630186311863218633186341863518636186371863818639186401864118642186431864418645186461864718648186491865018651186521865318654186551865618657186581865918660186611866218663186641866518666186671866818669186701867118672186731867418675186761867718678186791868018681186821868318684186851868618687186881868918690186911869218693186941869518696186971869818699187001870118702187031870418705187061870718708187091871018711187121871318714187151871618717187181871918720187211872218723187241872518726187271872818729187301873118732187331873418735187361873718738187391874018741187421874318744187451874618747187481874918750187511875218753187541875518756187571875818759187601876118762187631876418765187661876718768187691877018771187721877318774187751877618777187781877918780187811878218783187841878518786187871878818789187901879118792187931879418795187961879718798187991880018801188021880318804188051880618807188081880918810188111881218813188141881518816188171881818819188201882118822188231882418825188261882718828188291883018831188321883318834188351883618837188381883918840188411884218843188441884518846188471884818849188501885118852188531885418855188561885718858188591886018861188621886318864188651886618867188681886918870188711887218873188741887518876188771887818879188801888118882188831888418885188861888718888188891889018891188921889318894188951889618897188981889918900189011890218903189041890518906189071890818909189101891118912189131891418915189161891718918189191892018921189221892318924189251892618927189281892918930189311893218933189341893518936189371893818939189401894118942189431894418945189461894718948189491895018951189521895318954189551895618957189581895918960189611896218963189641896518966189671896818969189701897118972189731897418975189761897718978189791898018981189821898318984189851898618987189881898918990189911899218993189941899518996189971899818999190001900119002190031900419005190061900719008190091901019011190121901319014190151901619017190181901919020190211902219023190241902519026190271902819029190301903119032190331903419035190361903719038190391904019041190421904319044190451904619047190481904919050190511905219053190541905519056190571905819059190601906119062190631906419065190661906719068190691907019071190721907319074190751907619077190781907919080190811908219083190841908519086190871908819089190901909119092190931909419095190961909719098190991910019101191021910319104191051910619107191081910919110191111911219113191141911519116191171911819119191201912119122191231912419125191261912719128191291913019131191321913319134191351913619137191381913919140191411914219143191441914519146191471914819149191501915119152191531915419155191561915719158191591916019161191621916319164191651916619167191681916919170191711917219173191741917519176191771917819179191801918119182191831918419185191861918719188191891919019191191921919319194191951919619197191981919919200192011920219203192041920519206192071920819209192101921119212192131921419215192161921719218192191922019221192221922319224192251922619227192281922919230192311923219233192341923519236192371923819239192401924119242192431924419245192461924719248192491925019251192521925319254192551925619257192581925919260192611926219263192641926519266192671926819269192701927119272192731927419275192761927719278192791928019281192821928319284192851928619287192881928919290192911929219293192941929519296192971929819299193001930119302193031930419305193061930719308193091931019311193121931319314193151931619317193181931919320193211932219323193241932519326193271932819329193301933119332193331933419335193361933719338193391934019341193421934319344193451934619347193481934919350193511935219353193541935519356193571935819359193601936119362193631936419365193661936719368193691937019371193721937319374193751937619377193781937919380193811938219383193841938519386193871938819389193901939119392193931939419395193961939719398193991940019401194021940319404194051940619407194081940919410194111941219413194141941519416194171941819419194201942119422194231942419425194261942719428194291943019431194321943319434194351943619437194381943919440194411944219443194441944519446194471944819449194501945119452194531945419455194561945719458194591946019461194621946319464194651946619467194681946919470194711947219473194741947519476194771947819479194801948119482194831948419485194861948719488194891949019491194921949319494194951949619497194981949919500195011950219503195041950519506195071950819509195101951119512195131951419515195161951719518195191952019521195221952319524195251952619527195281952919530195311953219533195341953519536195371953819539195401954119542195431954419545195461954719548195491955019551195521955319554195551955619557195581955919560195611956219563195641956519566195671956819569195701957119572195731957419575195761957719578195791958019581195821958319584195851958619587195881958919590195911959219593195941959519596195971959819599196001960119602196031960419605196061960719608196091961019611196121961319614196151961619617196181961919620196211962219623196241962519626196271962819629196301963119632196331963419635196361963719638196391964019641196421964319644196451964619647196481964919650196511965219653196541965519656196571965819659196601966119662196631966419665196661966719668196691967019671196721967319674196751967619677196781967919680196811968219683196841968519686196871968819689196901969119692196931969419695196961969719698196991970019701197021970319704197051970619707197081970919710197111971219713197141971519716197171971819719197201972119722197231972419725197261972719728197291973019731197321973319734197351973619737197381973919740197411974219743197441974519746197471974819749197501975119752197531975419755197561975719758197591976019761197621976319764197651976619767197681976919770197711977219773197741977519776197771977819779197801978119782197831978419785197861978719788197891979019791197921979319794197951979619797197981979919800198011980219803198041980519806198071980819809198101981119812198131981419815198161981719818198191982019821198221982319824198251982619827198281982919830198311983219833198341983519836198371983819839198401984119842198431984419845198461984719848198491985019851198521985319854198551985619857198581985919860198611986219863198641986519866198671986819869198701987119872198731987419875198761987719878198791988019881198821988319884198851988619887198881988919890198911989219893198941989519896198971989819899199001990119902199031990419905199061990719908199091991019911199121991319914199151991619917199181991919920199211992219923199241992519926199271992819929199301993119932199331993419935199361993719938199391994019941199421994319944199451994619947199481994919950199511995219953199541995519956199571995819959199601996119962199631996419965199661996719968199691997019971199721997319974199751997619977199781997919980199811998219983199841998519986199871998819989199901999119992199931999419995199961999719998199992000020001200022000320004200052000620007200082000920010200112001220013200142001520016200172001820019200202002120022200232002420025200262002720028200292003020031200322003320034200352003620037200382003920040200412004220043200442004520046200472004820049200502005120052200532005420055200562005720058200592006020061200622006320064200652006620067200682006920070200712007220073200742007520076200772007820079200802008120082200832008420085200862008720088200892009020091200922009320094200952009620097200982009920100201012010220103201042010520106201072010820109201102011120112201132011420115201162011720118201192012020121201222012320124201252012620127201282012920130201312013220133201342013520136201372013820139201402014120142201432014420145201462014720148201492015020151201522015320154201552015620157201582015920160201612016220163201642016520166201672016820169201702017120172201732017420175201762017720178201792018020181201822018320184201852018620187201882018920190201912019220193201942019520196201972019820199202002020120202202032020420205202062020720208202092021020211202122021320214202152021620217202182021920220202212022220223202242022520226202272022820229202302023120232202332023420235202362023720238202392024020241202422024320244202452024620247202482024920250202512025220253202542025520256202572025820259202602026120262202632026420265202662026720268202692027020271202722027320274202752027620277202782027920280202812028220283202842028520286202872028820289202902029120292202932029420295202962029720298202992030020301203022030320304203052030620307203082030920310203112031220313203142031520316203172031820319203202032120322203232032420325203262032720328203292033020331203322033320334203352033620337203382033920340203412034220343203442034520346203472034820349203502035120352203532035420355203562035720358203592036020361203622036320364203652036620367203682036920370203712037220373203742037520376203772037820379203802038120382203832038420385203862038720388203892039020391203922039320394203952039620397203982039920400204012040220403204042040520406204072040820409204102041120412204132041420415204162041720418204192042020421204222042320424204252042620427204282042920430204312043220433204342043520436204372043820439204402044120442204432044420445204462044720448204492045020451204522045320454204552045620457204582045920460204612046220463204642046520466204672046820469204702047120472204732047420475204762047720478204792048020481204822048320484204852048620487204882048920490204912049220493204942049520496204972049820499205002050120502205032050420505205062050720508205092051020511205122051320514205152051620517205182051920520205212052220523205242052520526205272052820529205302053120532205332053420535205362053720538205392054020541205422054320544205452054620547205482054920550205512055220553205542055520556205572055820559205602056120562205632056420565205662056720568205692057020571205722057320574205752057620577205782057920580205812058220583205842058520586205872058820589205902059120592205932059420595205962059720598205992060020601206022060320604206052060620607206082060920610206112061220613206142061520616206172061820619206202062120622206232062420625206262062720628206292063020631206322063320634206352063620637206382063920640206412064220643206442064520646206472064820649206502065120652206532065420655206562065720658206592066020661206622066320664206652066620667206682066920670206712067220673206742067520676206772067820679206802068120682206832068420685206862068720688206892069020691206922069320694206952069620697206982069920700207012070220703207042070520706207072070820709207102071120712207132071420715207162071720718207192072020721207222072320724207252072620727207282072920730207312073220733207342073520736207372073820739207402074120742207432074420745207462074720748207492075020751207522075320754207552075620757207582075920760207612076220763207642076520766207672076820769207702077120772207732077420775207762077720778207792078020781207822078320784207852078620787207882078920790207912079220793207942079520796207972079820799208002080120802208032080420805208062080720808208092081020811208122081320814208152081620817208182081920820208212082220823208242082520826208272082820829208302083120832208332083420835208362083720838208392084020841208422084320844208452084620847208482084920850208512085220853208542085520856208572085820859208602086120862208632086420865208662086720868208692087020871208722087320874208752087620877208782087920880208812088220883208842088520886208872088820889208902089120892208932089420895208962089720898208992090020901209022090320904209052090620907209082090920910209112091220913209142091520916209172091820919209202092120922209232092420925209262092720928209292093020931209322093320934209352093620937209382093920940209412094220943209442094520946209472094820949209502095120952209532095420955209562095720958209592096020961209622096320964209652096620967209682096920970209712097220973209742097520976209772097820979209802098120982209832098420985209862098720988209892099020991209922099320994209952099620997209982099921000210012100221003210042100521006210072100821009210102101121012210132101421015210162101721018210192102021021210222102321024210252102621027210282102921030210312103221033210342103521036210372103821039210402104121042210432104421045210462104721048210492105021051210522105321054210552105621057210582105921060210612106221063210642106521066210672106821069210702107121072210732107421075210762107721078210792108021081210822108321084210852108621087210882108921090210912109221093210942109521096210972109821099211002110121102211032110421105211062110721108211092111021111211122111321114211152111621117211182111921120211212112221123211242112521126211272112821129211302113121132211332113421135211362113721138211392114021141211422114321144211452114621147211482114921150211512115221153211542115521156211572115821159211602116121162211632116421165211662116721168211692117021171211722117321174211752117621177211782117921180211812118221183211842118521186211872118821189211902119121192211932119421195211962119721198211992120021201212022120321204212052120621207212082120921210212112121221213212142121521216212172121821219212202122121222212232122421225212262122721228212292123021231212322123321234212352123621237212382123921240212412124221243212442124521246212472124821249212502125121252212532125421255212562125721258212592126021261212622126321264212652126621267212682126921270212712127221273212742127521276212772127821279212802128121282212832128421285212862128721288212892129021291212922129321294212952129621297212982129921300213012130221303213042130521306213072130821309213102131121312213132131421315213162131721318213192132021321213222132321324213252132621327213282132921330213312133221333213342133521336213372133821339213402134121342213432134421345213462134721348213492135021351213522135321354213552135621357213582135921360213612136221363213642136521366213672136821369213702137121372213732137421375213762137721378213792138021381213822138321384213852138621387213882138921390213912139221393213942139521396213972139821399214002140121402214032140421405214062140721408214092141021411214122141321414214152141621417214182141921420214212142221423214242142521426214272142821429214302143121432214332143421435214362143721438214392144021441214422144321444214452144621447214482144921450214512145221453214542145521456214572145821459214602146121462214632146421465214662146721468214692147021471214722147321474214752147621477214782147921480214812148221483214842148521486214872148821489214902149121492214932149421495214962149721498214992150021501215022150321504215052150621507215082150921510215112151221513215142151521516215172151821519215202152121522215232152421525215262152721528215292153021531215322153321534215352153621537215382153921540215412154221543215442154521546215472154821549215502155121552215532155421555215562155721558215592156021561215622156321564215652156621567215682156921570215712157221573215742157521576215772157821579215802158121582215832158421585215862158721588215892159021591215922159321594
  1. #define _CRT_SECURE_NO_DEPRECATE // Disables ridiculous "unsafe" warnigns on Windows
  2. #include "ggml.h"
  3. #ifdef GGML_USE_K_QUANTS
  4. #include "k_quants.h"
  5. #endif
  6. #if defined(_MSC_VER) || defined(__MINGW32__)
  7. #include <malloc.h> // using malloc.h with MSC/MINGW
  8. #elif !defined(__FreeBSD__) && !defined(__NetBSD__) && !defined(__OpenBSD__)
  9. #include <alloca.h>
  10. #endif
  11. #include <assert.h>
  12. #include <errno.h>
  13. #include <time.h>
  14. #include <math.h>
  15. #include <stdlib.h>
  16. #include <string.h>
  17. #include <stdint.h>
  18. #include <inttypes.h>
  19. #include <stdio.h>
  20. #include <float.h>
  21. #include <limits.h>
  22. #include <stdarg.h>
  23. #include <signal.h>
  24. #ifdef GGML_USE_METAL
  25. #include <unistd.h>
  26. #endif
  27. // static_assert should be a #define, but if it's not,
  28. // fall back to the _Static_assert C11 keyword.
  29. // if C99 - static_assert is noop
  30. // ref: https://stackoverflow.com/a/53923785/4039976
  31. #ifndef static_assert
  32. #if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201100L)
  33. #define static_assert(cond, msg) _Static_assert(cond, msg)
  34. #else
  35. #define static_assert(cond, msg) struct global_scope_noop_trick
  36. #endif
  37. #endif
  38. #if defined(_MSC_VER)
  39. // disable "possible loss of data" to avoid hundreds of casts
  40. // we should just be careful :)
  41. #pragma warning(disable: 4244 4267)
  42. // disable POSIX deprecation warnigns
  43. // these functions are never going away, anyway
  44. #pragma warning(disable: 4996)
  45. #endif
  46. #if defined(_WIN32)
  47. #include <windows.h>
  48. typedef volatile LONG atomic_int;
  49. typedef atomic_int atomic_bool;
  50. static void atomic_store(atomic_int * ptr, LONG val) {
  51. InterlockedExchange(ptr, val);
  52. }
  53. static LONG atomic_load(atomic_int * ptr) {
  54. return InterlockedCompareExchange(ptr, 0, 0);
  55. }
  56. static LONG atomic_fetch_add(atomic_int * ptr, LONG inc) {
  57. return InterlockedExchangeAdd(ptr, inc);
  58. }
  59. static LONG atomic_fetch_sub(atomic_int * ptr, LONG dec) {
  60. return atomic_fetch_add(ptr, -(dec));
  61. }
  62. typedef HANDLE pthread_t;
  63. typedef DWORD thread_ret_t;
  64. static int pthread_create(pthread_t * out, void * unused, thread_ret_t(*func)(void *), void * arg) {
  65. (void) unused;
  66. HANDLE handle = CreateThread(NULL, 0, (LPTHREAD_START_ROUTINE) func, arg, 0, NULL);
  67. if (handle == NULL)
  68. {
  69. return EAGAIN;
  70. }
  71. *out = handle;
  72. return 0;
  73. }
  74. static int pthread_join(pthread_t thread, void * unused) {
  75. (void) unused;
  76. int ret = (int) WaitForSingleObject(thread, INFINITE);
  77. CloseHandle(thread);
  78. return ret;
  79. }
  80. static int sched_yield (void) {
  81. Sleep (0);
  82. return 0;
  83. }
  84. #else
  85. #include <pthread.h>
  86. #include <stdatomic.h>
  87. typedef void * thread_ret_t;
  88. #include <sys/types.h>
  89. #include <sys/stat.h>
  90. #include <unistd.h>
  91. #endif
  92. #ifdef GGML_USE_CPU_HBM
  93. #include <hbwmalloc.h>
  94. #endif
  95. // __FMA__ and __F16C__ are not defined in MSVC, however they are implied with AVX2/AVX512
  96. #if defined(_MSC_VER) && (defined(__AVX2__) || defined(__AVX512F__))
  97. #ifndef __FMA__
  98. #define __FMA__
  99. #endif
  100. #ifndef __F16C__
  101. #define __F16C__
  102. #endif
  103. #ifndef __SSE3__
  104. #define __SSE3__
  105. #endif
  106. #endif
  107. /*#define GGML_PERF*/
  108. #define GGML_DEBUG 0
  109. #define GGML_GELU_FP16
  110. #define GGML_GELU_QUICK_FP16
  111. #define GGML_SILU_FP16
  112. // #define GGML_CROSS_ENTROPY_EXP_FP16
  113. // #define GGML_FLASH_ATTN_EXP_FP16
  114. #define GGML_SOFT_MAX_UNROLL 4
  115. #define GGML_VEC_DOT_UNROLL 2
  116. #define GGML_VEC_MAD_UNROLL 32
  117. //
  118. // logging
  119. //
  120. #if (GGML_DEBUG >= 1)
  121. #define GGML_PRINT_DEBUG(...) printf(__VA_ARGS__)
  122. #else
  123. #define GGML_PRINT_DEBUG(...)
  124. #endif
  125. #if (GGML_DEBUG >= 5)
  126. #define GGML_PRINT_DEBUG_5(...) printf(__VA_ARGS__)
  127. #else
  128. #define GGML_PRINT_DEBUG_5(...)
  129. #endif
  130. #if (GGML_DEBUG >= 10)
  131. #define GGML_PRINT_DEBUG_10(...) printf(__VA_ARGS__)
  132. #else
  133. #define GGML_PRINT_DEBUG_10(...)
  134. #endif
  135. #define GGML_PRINT(...) printf(__VA_ARGS__)
  136. #ifdef GGML_USE_ACCELERATE
  137. // uncomment to use vDSP for soft max computation
  138. // note: not sure if it is actually faster
  139. //#define GGML_SOFT_MAX_ACCELERATE
  140. #endif
  141. //
  142. // logging
  143. //
  144. #if (GGML_DEBUG >= 1)
  145. #define GGML_PRINT_DEBUG(...) printf(__VA_ARGS__)
  146. #else
  147. #define GGML_PRINT_DEBUG(...)
  148. #endif
  149. #if (GGML_DEBUG >= 5)
  150. #define GGML_PRINT_DEBUG_5(...) printf(__VA_ARGS__)
  151. #else
  152. #define GGML_PRINT_DEBUG_5(...)
  153. #endif
  154. #if (GGML_DEBUG >= 10)
  155. #define GGML_PRINT_DEBUG_10(...) printf(__VA_ARGS__)
  156. #else
  157. #define GGML_PRINT_DEBUG_10(...)
  158. #endif
  159. #define GGML_PRINT(...) printf(__VA_ARGS__)
  160. //
  161. // end of logging block
  162. //
  163. #if defined(_MSC_VER) || defined(__MINGW32__)
  164. #define GGML_ALIGNED_MALLOC(size) _aligned_malloc(size, GGML_MEM_ALIGN)
  165. #define GGML_ALIGNED_FREE(ptr) _aligned_free(ptr)
  166. #else
  167. inline static void * ggml_aligned_malloc(size_t size) {
  168. if (size == 0) {
  169. GGML_PRINT("WARNING: Behavior may be unexpected when allocating 0 bytes for ggml_aligned_malloc!\n");
  170. return NULL;
  171. }
  172. void * aligned_memory = NULL;
  173. #ifdef GGML_USE_CPU_HBM
  174. int result = hbw_posix_memalign(&aligned_memory, 16, size);
  175. #elif GGML_USE_METAL
  176. int result = posix_memalign(&aligned_memory, sysconf(_SC_PAGESIZE), size);
  177. #else
  178. int result = posix_memalign(&aligned_memory, GGML_MEM_ALIGN, size);
  179. #endif
  180. if (result != 0) {
  181. // Handle allocation failure
  182. const char *error_desc = "unknown allocation error";
  183. switch (result) {
  184. case EINVAL:
  185. error_desc = "invalid alignment value";
  186. break;
  187. case ENOMEM:
  188. error_desc = "insufficient memory";
  189. break;
  190. }
  191. GGML_PRINT("%s: %s (attempted to allocate %6.2f MB)\n", __func__, error_desc, size/(1024.0*1024.0));
  192. return NULL;
  193. }
  194. return aligned_memory;
  195. }
  196. #define GGML_ALIGNED_MALLOC(size) ggml_aligned_malloc(size)
  197. #ifdef GGML_USE_CPU_HBM
  198. #define GGML_ALIGNED_FREE(ptr) if(NULL != ptr) hbw_free(ptr)
  199. #else
  200. #define GGML_ALIGNED_FREE(ptr) free(ptr)
  201. #endif
  202. #endif
  203. #define UNUSED GGML_UNUSED
  204. #define SWAP(x, y, T) do { T SWAP = x; x = y; y = SWAP; } while (0)
  205. //
  206. // tensor access macros
  207. //
  208. #define GGML_TENSOR_UNARY_OP_LOCALS \
  209. GGML_TENSOR_LOCALS(int64_t, ne0, src0, ne) \
  210. GGML_TENSOR_LOCALS(size_t, nb0, src0, nb) \
  211. GGML_TENSOR_LOCALS(int64_t, ne, dst, ne) \
  212. GGML_TENSOR_LOCALS(size_t, nb, dst, nb)
  213. #define GGML_TENSOR_BINARY_OP_LOCALS \
  214. GGML_TENSOR_LOCALS(int64_t, ne0, src0, ne) \
  215. GGML_TENSOR_LOCALS(size_t, nb0, src0, nb) \
  216. GGML_TENSOR_LOCALS(int64_t, ne1, src1, ne) \
  217. GGML_TENSOR_LOCALS(size_t, nb1, src1, nb) \
  218. GGML_TENSOR_LOCALS(int64_t, ne, dst, ne) \
  219. GGML_TENSOR_LOCALS(size_t, nb, dst, nb)
  220. #if defined(GGML_USE_ACCELERATE)
  221. #include <Accelerate/Accelerate.h>
  222. #if defined(GGML_USE_CLBLAST) // allow usage of CLBlast alongside Accelerate functions
  223. #include "ggml-opencl.h"
  224. #endif
  225. #elif defined(GGML_USE_OPENBLAS)
  226. #if defined(GGML_BLAS_USE_MKL)
  227. #include <mkl.h>
  228. #else
  229. #include <cblas.h>
  230. #endif
  231. #elif defined(GGML_USE_CUBLAS)
  232. #include "ggml-cuda.h"
  233. #elif defined(GGML_USE_CLBLAST)
  234. #include "ggml-opencl.h"
  235. #endif
  236. #undef MIN
  237. #undef MAX
  238. #define MIN(a, b) ((a) < (b) ? (a) : (b))
  239. #define MAX(a, b) ((a) > (b) ? (a) : (b))
  240. // floating point type used to accumulate sums
  241. typedef double ggml_float;
  242. // 16-bit float
  243. // on Arm, we use __fp16
  244. // on x86, we use uint16_t
  245. #if defined(__ARM_NEON) && !defined(_MSC_VER)
  246. // if YCM cannot find <arm_neon.h>, make a symbolic link to it, for example:
  247. //
  248. // $ ln -sfn /Library/Developer/CommandLineTools/usr/lib/clang/13.1.6/include/arm_neon.h ./src/
  249. //
  250. #include <arm_neon.h>
  251. #define GGML_COMPUTE_FP16_TO_FP32(x) ((float) (x))
  252. #define GGML_COMPUTE_FP32_TO_FP16(x) (x)
  253. #define GGML_FP16_TO_FP32(x) ((float) (x))
  254. #define GGML_FP32_TO_FP16(x) (x)
  255. #else
  256. #ifdef __wasm_simd128__
  257. #include <wasm_simd128.h>
  258. #else
  259. #ifdef __POWER9_VECTOR__
  260. #include <altivec.h>
  261. #undef bool
  262. #define bool _Bool
  263. #else
  264. #if defined(_MSC_VER) || defined(__MINGW32__)
  265. #include <intrin.h>
  266. #else
  267. #if defined(__AVX__) || defined(__AVX2__) || defined(__AVX512F__) || defined(__SSSE3__) || defined(__SSE3__)
  268. #if !defined(__riscv)
  269. #include <immintrin.h>
  270. #endif
  271. #endif
  272. #endif
  273. #endif
  274. #endif
  275. #ifdef __riscv_v_intrinsic
  276. #include <riscv_vector.h>
  277. #endif
  278. #ifdef __F16C__
  279. #ifdef _MSC_VER
  280. #define GGML_COMPUTE_FP16_TO_FP32(x) _mm_cvtss_f32(_mm_cvtph_ps(_mm_cvtsi32_si128(x)))
  281. #define GGML_COMPUTE_FP32_TO_FP16(x) _mm_extract_epi16(_mm_cvtps_ph(_mm_set_ss(x), 0), 0)
  282. #else
  283. #define GGML_COMPUTE_FP16_TO_FP32(x) _cvtsh_ss(x)
  284. #define GGML_COMPUTE_FP32_TO_FP16(x) _cvtss_sh(x, 0)
  285. #endif
  286. #elif defined(__POWER9_VECTOR__)
  287. #define GGML_COMPUTE_FP16_TO_FP32(x) ggml_compute_fp16_to_fp32(x)
  288. #define GGML_COMPUTE_FP32_TO_FP16(x) ggml_compute_fp32_to_fp16(x)
  289. /* the inline asm below is about 12% faster than the lookup method */
  290. #define GGML_FP16_TO_FP32(x) GGML_COMPUTE_FP16_TO_FP32(x)
  291. #define GGML_FP32_TO_FP16(x) GGML_COMPUTE_FP32_TO_FP16(x)
  292. static inline float ggml_compute_fp16_to_fp32(ggml_fp16_t h) {
  293. register float f;
  294. register double d;
  295. __asm__(
  296. "mtfprd %0,%2\n"
  297. "xscvhpdp %0,%0\n"
  298. "frsp %1,%0\n" :
  299. /* temp */ "=d"(d),
  300. /* out */ "=f"(f):
  301. /* in */ "r"(h));
  302. return f;
  303. }
  304. static inline ggml_fp16_t ggml_compute_fp32_to_fp16(float f) {
  305. register double d;
  306. register ggml_fp16_t r;
  307. __asm__( /* xscvdphp can work on double or single precision */
  308. "xscvdphp %0,%2\n"
  309. "mffprd %1,%0\n" :
  310. /* temp */ "=d"(d),
  311. /* out */ "=r"(r):
  312. /* in */ "f"(f));
  313. return r;
  314. }
  315. #else
  316. // FP16 <-> FP32
  317. // ref: https://github.com/Maratyszcza/FP16
  318. static inline float fp32_from_bits(uint32_t w) {
  319. union {
  320. uint32_t as_bits;
  321. float as_value;
  322. } fp32;
  323. fp32.as_bits = w;
  324. return fp32.as_value;
  325. }
  326. static inline uint32_t fp32_to_bits(float f) {
  327. union {
  328. float as_value;
  329. uint32_t as_bits;
  330. } fp32;
  331. fp32.as_value = f;
  332. return fp32.as_bits;
  333. }
  334. static inline float ggml_compute_fp16_to_fp32(ggml_fp16_t h) {
  335. const uint32_t w = (uint32_t) h << 16;
  336. const uint32_t sign = w & UINT32_C(0x80000000);
  337. const uint32_t two_w = w + w;
  338. const uint32_t exp_offset = UINT32_C(0xE0) << 23;
  339. #if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) || defined(__GNUC__) && !defined(__STRICT_ANSI__)
  340. const float exp_scale = 0x1.0p-112f;
  341. #else
  342. const float exp_scale = fp32_from_bits(UINT32_C(0x7800000));
  343. #endif
  344. const float normalized_value = fp32_from_bits((two_w >> 4) + exp_offset) * exp_scale;
  345. const uint32_t magic_mask = UINT32_C(126) << 23;
  346. const float magic_bias = 0.5f;
  347. const float denormalized_value = fp32_from_bits((two_w >> 17) | magic_mask) - magic_bias;
  348. const uint32_t denormalized_cutoff = UINT32_C(1) << 27;
  349. const uint32_t result = sign |
  350. (two_w < denormalized_cutoff ? fp32_to_bits(denormalized_value) : fp32_to_bits(normalized_value));
  351. return fp32_from_bits(result);
  352. }
  353. static inline ggml_fp16_t ggml_compute_fp32_to_fp16(float f) {
  354. #if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) || defined(__GNUC__) && !defined(__STRICT_ANSI__)
  355. const float scale_to_inf = 0x1.0p+112f;
  356. const float scale_to_zero = 0x1.0p-110f;
  357. #else
  358. const float scale_to_inf = fp32_from_bits(UINT32_C(0x77800000));
  359. const float scale_to_zero = fp32_from_bits(UINT32_C(0x08800000));
  360. #endif
  361. float base = (fabsf(f) * scale_to_inf) * scale_to_zero;
  362. const uint32_t w = fp32_to_bits(f);
  363. const uint32_t shl1_w = w + w;
  364. const uint32_t sign = w & UINT32_C(0x80000000);
  365. uint32_t bias = shl1_w & UINT32_C(0xFF000000);
  366. if (bias < UINT32_C(0x71000000)) {
  367. bias = UINT32_C(0x71000000);
  368. }
  369. base = fp32_from_bits((bias >> 1) + UINT32_C(0x07800000)) + base;
  370. const uint32_t bits = fp32_to_bits(base);
  371. const uint32_t exp_bits = (bits >> 13) & UINT32_C(0x00007C00);
  372. const uint32_t mantissa_bits = bits & UINT32_C(0x00000FFF);
  373. const uint32_t nonsign = exp_bits + mantissa_bits;
  374. return (sign >> 16) | (shl1_w > UINT32_C(0xFF000000) ? UINT16_C(0x7E00) : nonsign);
  375. }
  376. #define GGML_COMPUTE_FP16_TO_FP32(x) ggml_compute_fp16_to_fp32(x)
  377. #define GGML_COMPUTE_FP32_TO_FP16(x) ggml_compute_fp32_to_fp16(x)
  378. #endif // __F16C__
  379. #endif // __ARM_NEON
  380. //
  381. // global data
  382. //
  383. // precomputed gelu table for f16 (128 KB)
  384. static ggml_fp16_t table_gelu_f16[1 << 16];
  385. // precomputed quick gelu table for f16 (128 KB)
  386. static ggml_fp16_t table_gelu_quick_f16[1 << 16];
  387. // precomputed silu table for f16 (128 KB)
  388. static ggml_fp16_t table_silu_f16[1 << 16];
  389. // precomputed exp table for f16 (128 KB)
  390. static ggml_fp16_t table_exp_f16[1 << 16];
  391. // precomputed f32 table for f16 (256 KB)
  392. static float table_f32_f16[1 << 16];
  393. #if defined(__ARM_NEON) || defined(__wasm_simd128__)
  394. #define B1(c,s,n) 0x ## n ## c , 0x ## n ## s
  395. #define B2(c,s,n) B1(c,s,n ## c), B1(c,s,n ## s)
  396. #define B3(c,s,n) B2(c,s,n ## c), B2(c,s,n ## s)
  397. #define B4(c,s,n) B3(c,s,n ## c), B3(c,s,n ## s)
  398. #define B5(c,s,n) B4(c,s,n ## c), B4(c,s,n ## s)
  399. #define B6(c,s,n) B5(c,s,n ## c), B5(c,s,n ## s)
  400. #define B7(c,s,n) B6(c,s,n ## c), B6(c,s,n ## s)
  401. #define B8(c,s ) B7(c,s, c), B7(c,s, s)
  402. // precomputed tables for expanding 8bits to 8 bytes:
  403. static const uint64_t table_b2b_0[1 << 8] = { B8(00, 10) }; // ( b) << 4
  404. static const uint64_t table_b2b_1[1 << 8] = { B8(10, 00) }; // (!b) << 4
  405. #endif
  406. // On ARM NEON, it's quicker to directly convert x -> x instead of calling into ggml_lookup_fp16_to_fp32,
  407. // so we define GGML_FP16_TO_FP32 and GGML_FP32_TO_FP16 elsewhere for NEON.
  408. // This is also true for POWER9.
  409. #if !defined(GGML_FP16_TO_FP32) || !defined(GGML_FP32_TO_FP16)
  410. inline static float ggml_lookup_fp16_to_fp32(ggml_fp16_t f) {
  411. uint16_t s;
  412. memcpy(&s, &f, sizeof(uint16_t));
  413. return table_f32_f16[s];
  414. }
  415. #define GGML_FP16_TO_FP32(x) ggml_lookup_fp16_to_fp32(x)
  416. #define GGML_FP32_TO_FP16(x) GGML_COMPUTE_FP32_TO_FP16(x)
  417. #endif
  418. // note: do not use these inside ggml.c
  419. // these are meant to be used via the ggml.h API
  420. float ggml_fp16_to_fp32(ggml_fp16_t x) {
  421. return (float) GGML_FP16_TO_FP32(x);
  422. }
  423. ggml_fp16_t ggml_fp32_to_fp16(float x) {
  424. return GGML_FP32_TO_FP16(x);
  425. }
  426. void ggml_fp16_to_fp32_row(const ggml_fp16_t * x, float * y, int n) {
  427. for (int i = 0; i < n; i++) {
  428. y[i] = GGML_FP16_TO_FP32(x[i]);
  429. }
  430. }
  431. void ggml_fp32_to_fp16_row(const float * x, ggml_fp16_t * y, int n) {
  432. int i = 0;
  433. #if defined(__F16C__)
  434. for (; i + 7 < n; i += 8) {
  435. __m256 x_vec = _mm256_loadu_ps(x + i);
  436. __m128i y_vec = _mm256_cvtps_ph(x_vec, _MM_FROUND_TO_NEAREST_INT);
  437. _mm_storeu_si128((__m128i *)(y + i), y_vec);
  438. }
  439. for(; i + 3 < n; i += 4) {
  440. __m128 x_vec = _mm_loadu_ps(x + i);
  441. __m128i y_vec = _mm_cvtps_ph(x_vec, _MM_FROUND_TO_NEAREST_INT);
  442. _mm_storel_epi64((__m128i *)(y + i), y_vec);
  443. }
  444. #endif
  445. for (; i < n; i++) {
  446. y[i] = GGML_FP32_TO_FP16(x[i]);
  447. }
  448. }
  449. //
  450. // timing
  451. //
  452. #if defined(_MSC_VER) || defined(__MINGW32__)
  453. static int64_t timer_freq, timer_start;
  454. void ggml_time_init(void) {
  455. LARGE_INTEGER t;
  456. QueryPerformanceFrequency(&t);
  457. timer_freq = t.QuadPart;
  458. // The multiplication by 1000 or 1000000 below can cause an overflow if timer_freq
  459. // and the uptime is high enough.
  460. // We subtract the program start time to reduce the likelihood of that happening.
  461. QueryPerformanceCounter(&t);
  462. timer_start = t.QuadPart;
  463. }
  464. int64_t ggml_time_ms(void) {
  465. LARGE_INTEGER t;
  466. QueryPerformanceCounter(&t);
  467. return ((t.QuadPart-timer_start) * 1000) / timer_freq;
  468. }
  469. int64_t ggml_time_us(void) {
  470. LARGE_INTEGER t;
  471. QueryPerformanceCounter(&t);
  472. return ((t.QuadPart-timer_start) * 1000000) / timer_freq;
  473. }
  474. #else
  475. void ggml_time_init(void) {}
  476. int64_t ggml_time_ms(void) {
  477. struct timespec ts;
  478. clock_gettime(CLOCK_MONOTONIC, &ts);
  479. return (int64_t)ts.tv_sec*1000 + (int64_t)ts.tv_nsec/1000000;
  480. }
  481. int64_t ggml_time_us(void) {
  482. struct timespec ts;
  483. clock_gettime(CLOCK_MONOTONIC, &ts);
  484. return (int64_t)ts.tv_sec*1000000 + (int64_t)ts.tv_nsec/1000;
  485. }
  486. #endif
  487. int64_t ggml_cycles(void) {
  488. return clock();
  489. }
  490. int64_t ggml_cycles_per_ms(void) {
  491. return CLOCKS_PER_SEC/1000;
  492. }
  493. #ifdef GGML_PERF
  494. #define ggml_perf_time_ms() ggml_time_ms()
  495. #define ggml_perf_time_us() ggml_time_us()
  496. #define ggml_perf_cycles() ggml_cycles()
  497. #define ggml_perf_cycles_per_ms() ggml_cycles_per_ms()
  498. #else
  499. #define ggml_perf_time_ms() 0
  500. #define ggml_perf_time_us() 0
  501. #define ggml_perf_cycles() 0
  502. #define ggml_perf_cycles_per_ms() 0
  503. #endif
  504. //
  505. // cache line
  506. //
  507. #if defined(__cpp_lib_hardware_interference_size)
  508. #define CACHE_LINE_SIZE hardware_destructive_interference_size
  509. #else
  510. #if defined(__POWER9_VECTOR__)
  511. #define CACHE_LINE_SIZE 128
  512. #else
  513. #define CACHE_LINE_SIZE 64
  514. #endif
  515. #endif
  516. static const size_t CACHE_LINE_SIZE_F32 = CACHE_LINE_SIZE/sizeof(float);
  517. //
  518. // quantization
  519. //
  520. #define MM256_SET_M128I(a, b) _mm256_insertf128_si256(_mm256_castsi128_si256(b), (a), 1)
  521. #if defined(__AVX__) || defined(__AVX2__) || defined(__AVX512F__) || defined(__SSSE3__)
  522. // multiply int8_t, add results pairwise twice
  523. static inline __m128i mul_sum_i8_pairs(const __m128i x, const __m128i y) {
  524. // Get absolute values of x vectors
  525. const __m128i ax = _mm_sign_epi8(x, x);
  526. // Sign the values of the y vectors
  527. const __m128i sy = _mm_sign_epi8(y, x);
  528. // Perform multiplication and create 16-bit values
  529. const __m128i dot = _mm_maddubs_epi16(ax, sy);
  530. const __m128i ones = _mm_set1_epi16(1);
  531. return _mm_madd_epi16(ones, dot);
  532. }
  533. #if __AVX__ || __AVX2__ || __AVX512F__
  534. // horizontally add 8 floats
  535. static inline float hsum_float_8(const __m256 x) {
  536. __m128 res = _mm256_extractf128_ps(x, 1);
  537. res = _mm_add_ps(res, _mm256_castps256_ps128(x));
  538. res = _mm_add_ps(res, _mm_movehl_ps(res, res));
  539. res = _mm_add_ss(res, _mm_movehdup_ps(res));
  540. return _mm_cvtss_f32(res);
  541. }
  542. // horizontally add 8 int32_t
  543. static inline int hsum_i32_8(const __m256i a) {
  544. const __m128i sum128 = _mm_add_epi32(_mm256_castsi256_si128(a), _mm256_extractf128_si256(a, 1));
  545. const __m128i hi64 = _mm_unpackhi_epi64(sum128, sum128);
  546. const __m128i sum64 = _mm_add_epi32(hi64, sum128);
  547. const __m128i hi32 = _mm_shuffle_epi32(sum64, _MM_SHUFFLE(2, 3, 0, 1));
  548. return _mm_cvtsi128_si32(_mm_add_epi32(sum64, hi32));
  549. }
  550. // horizontally add 4 int32_t
  551. static inline int hsum_i32_4(const __m128i a) {
  552. const __m128i hi64 = _mm_unpackhi_epi64(a, a);
  553. const __m128i sum64 = _mm_add_epi32(hi64, a);
  554. const __m128i hi32 = _mm_shuffle_epi32(sum64, _MM_SHUFFLE(2, 3, 0, 1));
  555. return _mm_cvtsi128_si32(_mm_add_epi32(sum64, hi32));
  556. }
  557. #if defined(__AVX2__) || defined(__AVX512F__)
  558. // spread 32 bits to 32 bytes { 0x00, 0xFF }
  559. static inline __m256i bytes_from_bits_32(const uint8_t * x) {
  560. uint32_t x32;
  561. memcpy(&x32, x, sizeof(uint32_t));
  562. const __m256i shuf_mask = _mm256_set_epi64x(
  563. 0x0303030303030303, 0x0202020202020202,
  564. 0x0101010101010101, 0x0000000000000000);
  565. __m256i bytes = _mm256_shuffle_epi8(_mm256_set1_epi32(x32), shuf_mask);
  566. const __m256i bit_mask = _mm256_set1_epi64x(0x7fbfdfeff7fbfdfe);
  567. bytes = _mm256_or_si256(bytes, bit_mask);
  568. return _mm256_cmpeq_epi8(bytes, _mm256_set1_epi64x(-1));
  569. }
  570. // Unpack 32 4-bit fields into 32 bytes
  571. // The output vector contains 32 bytes, each one in [ 0 .. 15 ] interval
  572. static inline __m256i bytes_from_nibbles_32(const uint8_t * rsi)
  573. {
  574. const __m128i tmp = _mm_loadu_si128((const __m128i *)rsi);
  575. const __m256i bytes = MM256_SET_M128I(_mm_srli_epi16(tmp, 4), tmp);
  576. const __m256i lowMask = _mm256_set1_epi8( 0xF );
  577. return _mm256_and_si256(lowMask, bytes);
  578. }
  579. // add int16_t pairwise and return as float vector
  580. static inline __m256 sum_i16_pairs_float(const __m256i x) {
  581. const __m256i ones = _mm256_set1_epi16(1);
  582. const __m256i summed_pairs = _mm256_madd_epi16(ones, x);
  583. return _mm256_cvtepi32_ps(summed_pairs);
  584. }
  585. static inline __m256 mul_sum_us8_pairs_float(const __m256i ax, const __m256i sy) {
  586. #if __AVXVNNI__
  587. const __m256i zero = _mm256_setzero_si256();
  588. const __m256i summed_pairs = _mm256_dpbusd_epi32(zero, ax, sy);
  589. return _mm256_cvtepi32_ps(summed_pairs);
  590. #else
  591. // Perform multiplication and create 16-bit values
  592. const __m256i dot = _mm256_maddubs_epi16(ax, sy);
  593. return sum_i16_pairs_float(dot);
  594. #endif
  595. }
  596. // multiply int8_t, add results pairwise twice and return as float vector
  597. static inline __m256 mul_sum_i8_pairs_float(const __m256i x, const __m256i y) {
  598. #if __AVXVNNIINT8__
  599. const __m256i zero = _mm256_setzero_si256();
  600. const __m256i summed_pairs = _mm256_dpbssd_epi32(zero, x, y);
  601. return _mm256_cvtepi32_ps(summed_pairs);
  602. #else
  603. // Get absolute values of x vectors
  604. const __m256i ax = _mm256_sign_epi8(x, x);
  605. // Sign the values of the y vectors
  606. const __m256i sy = _mm256_sign_epi8(y, x);
  607. return mul_sum_us8_pairs_float(ax, sy);
  608. #endif
  609. }
  610. static inline __m128i packNibbles( __m256i bytes )
  611. {
  612. // Move bits within 16-bit lanes from 0000_abcd_0000_efgh into 0000_0000_abcd_efgh
  613. #if __AVX512F__
  614. const __m256i bytes_srli_4 = _mm256_srli_epi16(bytes, 4); // 0000_0000_abcd_0000
  615. bytes = _mm256_or_si256(bytes, bytes_srli_4); // 0000_abcd_abcd_efgh
  616. return _mm256_cvtepi16_epi8(bytes); // abcd_efgh
  617. #else
  618. const __m256i lowByte = _mm256_set1_epi16( 0xFF );
  619. __m256i high = _mm256_andnot_si256( lowByte, bytes );
  620. __m256i low = _mm256_and_si256( lowByte, bytes );
  621. high = _mm256_srli_epi16( high, 4 );
  622. bytes = _mm256_or_si256( low, high );
  623. // Compress uint16_t lanes into bytes
  624. __m128i r0 = _mm256_castsi256_si128( bytes );
  625. __m128i r1 = _mm256_extracti128_si256( bytes, 1 );
  626. return _mm_packus_epi16( r0, r1 );
  627. #endif
  628. }
  629. #elif defined(__AVX__)
  630. // spread 32 bits to 32 bytes { 0x00, 0xFF }
  631. static inline __m256i bytes_from_bits_32(const uint8_t * x) {
  632. uint32_t x32;
  633. memcpy(&x32, x, sizeof(uint32_t));
  634. const __m128i shuf_maskl = _mm_set_epi64x(0x0101010101010101, 0x0000000000000000);
  635. const __m128i shuf_maskh = _mm_set_epi64x(0x0303030303030303, 0x0202020202020202);
  636. __m128i bytesl = _mm_shuffle_epi8(_mm_set1_epi32(x32), shuf_maskl);
  637. __m128i bytesh = _mm_shuffle_epi8(_mm_set1_epi32(x32), shuf_maskh);
  638. const __m128i bit_mask = _mm_set1_epi64x(0x7fbfdfeff7fbfdfe);
  639. bytesl = _mm_or_si128(bytesl, bit_mask);
  640. bytesh = _mm_or_si128(bytesh, bit_mask);
  641. bytesl = _mm_cmpeq_epi8(bytesl, _mm_set1_epi64x(-1));
  642. bytesh = _mm_cmpeq_epi8(bytesh, _mm_set1_epi64x(-1));
  643. return MM256_SET_M128I(bytesh, bytesl);
  644. }
  645. // Unpack 32 4-bit fields into 32 bytes
  646. // The output vector contains 32 bytes, each one in [ 0 .. 15 ] interval
  647. static inline __m256i bytes_from_nibbles_32(const uint8_t * rsi)
  648. {
  649. // Load 16 bytes from memory
  650. __m128i tmpl = _mm_loadu_si128((const __m128i *)rsi);
  651. __m128i tmph = _mm_srli_epi16(tmpl, 4);
  652. const __m128i lowMask = _mm_set1_epi8(0xF);
  653. tmpl = _mm_and_si128(lowMask, tmpl);
  654. tmph = _mm_and_si128(lowMask, tmph);
  655. return MM256_SET_M128I(tmph, tmpl);
  656. }
  657. // add int16_t pairwise and return as float vector
  658. static inline __m256 sum_i16_pairs_float(const __m128i xh, const __m128i xl) {
  659. const __m128i ones = _mm_set1_epi16(1);
  660. const __m128i summed_pairsl = _mm_madd_epi16(ones, xl);
  661. const __m128i summed_pairsh = _mm_madd_epi16(ones, xh);
  662. const __m256i summed_pairs = MM256_SET_M128I(summed_pairsh, summed_pairsl);
  663. return _mm256_cvtepi32_ps(summed_pairs);
  664. }
  665. static inline __m256 mul_sum_us8_pairs_float(const __m256i ax, const __m256i sy) {
  666. const __m128i axl = _mm256_castsi256_si128(ax);
  667. const __m128i axh = _mm256_extractf128_si256(ax, 1);
  668. const __m128i syl = _mm256_castsi256_si128(sy);
  669. const __m128i syh = _mm256_extractf128_si256(sy, 1);
  670. // Perform multiplication and create 16-bit values
  671. const __m128i dotl = _mm_maddubs_epi16(axl, syl);
  672. const __m128i doth = _mm_maddubs_epi16(axh, syh);
  673. return sum_i16_pairs_float(doth, dotl);
  674. }
  675. // multiply int8_t, add results pairwise twice and return as float vector
  676. static inline __m256 mul_sum_i8_pairs_float(const __m256i x, const __m256i y) {
  677. const __m128i xl = _mm256_castsi256_si128(x);
  678. const __m128i xh = _mm256_extractf128_si256(x, 1);
  679. const __m128i yl = _mm256_castsi256_si128(y);
  680. const __m128i yh = _mm256_extractf128_si256(y, 1);
  681. // Get absolute values of x vectors
  682. const __m128i axl = _mm_sign_epi8(xl, xl);
  683. const __m128i axh = _mm_sign_epi8(xh, xh);
  684. // Sign the values of the y vectors
  685. const __m128i syl = _mm_sign_epi8(yl, xl);
  686. const __m128i syh = _mm_sign_epi8(yh, xh);
  687. // Perform multiplication and create 16-bit values
  688. const __m128i dotl = _mm_maddubs_epi16(axl, syl);
  689. const __m128i doth = _mm_maddubs_epi16(axh, syh);
  690. return sum_i16_pairs_float(doth, dotl);
  691. }
  692. static inline __m128i packNibbles( __m128i bytes1, __m128i bytes2 )
  693. {
  694. // Move bits within 16-bit lanes from 0000_abcd_0000_efgh into 0000_0000_abcd_efgh
  695. const __m128i lowByte = _mm_set1_epi16( 0xFF );
  696. __m128i high = _mm_andnot_si128( lowByte, bytes1 );
  697. __m128i low = _mm_and_si128( lowByte, bytes1 );
  698. high = _mm_srli_epi16( high, 4 );
  699. bytes1 = _mm_or_si128( low, high );
  700. high = _mm_andnot_si128( lowByte, bytes2 );
  701. low = _mm_and_si128( lowByte, bytes2 );
  702. high = _mm_srli_epi16( high, 4 );
  703. bytes2 = _mm_or_si128( low, high );
  704. return _mm_packus_epi16( bytes1, bytes2);
  705. }
  706. #endif
  707. #elif defined(__SSSE3__)
  708. // horizontally add 4x4 floats
  709. static inline float hsum_float_4x4(const __m128 a, const __m128 b, const __m128 c, const __m128 d) {
  710. __m128 res_0 =_mm_hadd_ps(a, b);
  711. __m128 res_1 =_mm_hadd_ps(c, d);
  712. __m128 res =_mm_hadd_ps(res_0, res_1);
  713. res =_mm_hadd_ps(res, res);
  714. res =_mm_hadd_ps(res, res);
  715. return _mm_cvtss_f32(res);
  716. }
  717. #endif // __AVX__ || __AVX2__ || __AVX512F__
  718. #endif // defined(__AVX__) || defined(__AVX2__) || defined(__AVX512F__) || defined(__SSSE3__)
  719. #if defined(__ARM_NEON)
  720. #if !defined(__aarch64__)
  721. inline static int32_t vaddvq_s32(int32x4_t v) {
  722. return vgetq_lane_s32(v, 0) + vgetq_lane_s32(v, 1) + vgetq_lane_s32(v, 2) + vgetq_lane_s32(v, 3);
  723. }
  724. inline static float vaddvq_f32(float32x4_t v) {
  725. return vgetq_lane_f32(v, 0) + vgetq_lane_f32(v, 1) + vgetq_lane_f32(v, 2) + vgetq_lane_f32(v, 3);
  726. }
  727. inline static float vmaxvq_f32(float32x4_t v) {
  728. return
  729. MAX(MAX(vgetq_lane_f32(v, 0), vgetq_lane_f32(v, 1)),
  730. MAX(vgetq_lane_f32(v, 2), vgetq_lane_f32(v, 3)));
  731. }
  732. inline static int32x4_t vcvtnq_s32_f32(float32x4_t v) {
  733. int32x4_t res;
  734. res[0] = roundf(vgetq_lane_f32(v, 0));
  735. res[1] = roundf(vgetq_lane_f32(v, 1));
  736. res[2] = roundf(vgetq_lane_f32(v, 2));
  737. res[3] = roundf(vgetq_lane_f32(v, 3));
  738. return res;
  739. }
  740. #endif
  741. #endif
  742. #define QK4_0 32
  743. typedef struct {
  744. ggml_fp16_t d; // delta
  745. uint8_t qs[QK4_0 / 2]; // nibbles / quants
  746. } block_q4_0;
  747. static_assert(sizeof(block_q4_0) == sizeof(ggml_fp16_t) + QK4_0 / 2, "wrong q4_0 block size/padding");
  748. #define QK4_1 32
  749. typedef struct {
  750. ggml_fp16_t d; // delta
  751. ggml_fp16_t m; // min
  752. uint8_t qs[QK4_1 / 2]; // nibbles / quants
  753. } block_q4_1;
  754. static_assert(sizeof(block_q4_1) == 2 * sizeof(ggml_fp16_t) + QK4_1 / 2, "wrong q4_1 block size/padding");
  755. #define QK5_0 32
  756. typedef struct {
  757. ggml_fp16_t d; // delta
  758. uint8_t qh[4]; // 5-th bit of quants
  759. uint8_t qs[QK5_0 / 2]; // nibbles / quants
  760. } block_q5_0;
  761. static_assert(sizeof(block_q5_0) == sizeof(ggml_fp16_t) + sizeof(uint32_t) + QK5_0 / 2, "wrong q5_0 block size/padding");
  762. #define QK5_1 32
  763. typedef struct {
  764. ggml_fp16_t d; // delta
  765. ggml_fp16_t m; // min
  766. uint8_t qh[4]; // 5-th bit of quants
  767. uint8_t qs[QK5_1 / 2]; // nibbles / quants
  768. } block_q5_1;
  769. static_assert(sizeof(block_q5_1) == 2 * sizeof(ggml_fp16_t) + sizeof(uint32_t) + QK5_1 / 2, "wrong q5_1 block size/padding");
  770. #define QK8_0 32
  771. typedef struct {
  772. ggml_fp16_t d; // delta
  773. int8_t qs[QK8_0]; // quants
  774. } block_q8_0;
  775. static_assert(sizeof(block_q8_0) == sizeof(ggml_fp16_t) + QK8_0, "wrong q8_0 block size/padding");
  776. #define QK8_1 32
  777. typedef struct {
  778. float d; // delta
  779. float s; // d * sum(qs[i])
  780. int8_t qs[QK8_1]; // quants
  781. } block_q8_1;
  782. static_assert(sizeof(block_q8_1) == 2*sizeof(float) + QK8_1, "wrong q8_1 block size/padding");
  783. // reference implementation for deterministic creation of model files
  784. static void quantize_row_q4_0_reference(const float * restrict x, block_q4_0 * restrict y, int k) {
  785. static const int qk = QK4_0;
  786. assert(k % qk == 0);
  787. const int nb = k / qk;
  788. for (int i = 0; i < nb; i++) {
  789. float amax = 0.0f; // absolute max
  790. float max = 0.0f;
  791. for (int j = 0; j < qk; j++) {
  792. const float v = x[i*qk + j];
  793. if (amax < fabsf(v)) {
  794. amax = fabsf(v);
  795. max = v;
  796. }
  797. }
  798. const float d = max / -8;
  799. const float id = d ? 1.0f/d : 0.0f;
  800. y[i].d = GGML_FP32_TO_FP16(d);
  801. for (int j = 0; j < qk/2; ++j) {
  802. const float x0 = x[i*qk + 0 + j]*id;
  803. const float x1 = x[i*qk + qk/2 + j]*id;
  804. const uint8_t xi0 = MIN(15, (int8_t)(x0 + 8.5f));
  805. const uint8_t xi1 = MIN(15, (int8_t)(x1 + 8.5f));
  806. y[i].qs[j] = xi0;
  807. y[i].qs[j] |= xi1 << 4;
  808. }
  809. }
  810. }
  811. static void quantize_row_q4_0(const float * restrict x, void * restrict y, int k) {
  812. quantize_row_q4_0_reference(x, y, k);
  813. }
  814. static void quantize_row_q4_1_reference(const float * restrict x, block_q4_1 * restrict y, int k) {
  815. const int qk = QK4_1;
  816. assert(k % qk == 0);
  817. const int nb = k / qk;
  818. for (int i = 0; i < nb; i++) {
  819. float min = FLT_MAX;
  820. float max = -FLT_MAX;
  821. for (int j = 0; j < qk; j++) {
  822. const float v = x[i*qk + j];
  823. if (v < min) min = v;
  824. if (v > max) max = v;
  825. }
  826. const float d = (max - min) / ((1 << 4) - 1);
  827. const float id = d ? 1.0f/d : 0.0f;
  828. y[i].d = GGML_FP32_TO_FP16(d);
  829. y[i].m = GGML_FP32_TO_FP16(min);
  830. for (int j = 0; j < qk/2; ++j) {
  831. const float x0 = (x[i*qk + 0 + j] - min)*id;
  832. const float x1 = (x[i*qk + qk/2 + j] - min)*id;
  833. const uint8_t xi0 = MIN(15, (int8_t)(x0 + 0.5f));
  834. const uint8_t xi1 = MIN(15, (int8_t)(x1 + 0.5f));
  835. y[i].qs[j] = xi0;
  836. y[i].qs[j] |= xi1 << 4;
  837. }
  838. }
  839. }
  840. static void quantize_row_q4_1(const float * restrict x, void * restrict y, int k) {
  841. quantize_row_q4_1_reference(x, y, k);
  842. }
  843. static void quantize_row_q5_0_reference(const float * restrict x, block_q5_0 * restrict y, int k) {
  844. static const int qk = QK5_0;
  845. assert(k % qk == 0);
  846. const int nb = k / qk;
  847. for (int i = 0; i < nb; i++) {
  848. float amax = 0.0f; // absolute max
  849. float max = 0.0f;
  850. for (int j = 0; j < qk; j++) {
  851. const float v = x[i*qk + j];
  852. if (amax < fabsf(v)) {
  853. amax = fabsf(v);
  854. max = v;
  855. }
  856. }
  857. const float d = max / -16;
  858. const float id = d ? 1.0f/d : 0.0f;
  859. y[i].d = GGML_FP32_TO_FP16(d);
  860. uint32_t qh = 0;
  861. for (int j = 0; j < qk/2; ++j) {
  862. const float x0 = x[i*qk + 0 + j]*id;
  863. const float x1 = x[i*qk + qk/2 + j]*id;
  864. const uint8_t xi0 = MIN(31, (int8_t)(x0 + 16.5f));
  865. const uint8_t xi1 = MIN(31, (int8_t)(x1 + 16.5f));
  866. y[i].qs[j] = (xi0 & 0x0F) | ((xi1 & 0x0F) << 4);
  867. // get the 5-th bit and store it in qh at the right position
  868. qh |= ((xi0 & 0x10) >> 4) << (j + 0);
  869. qh |= ((xi1 & 0x10) >> 4) << (j + qk/2);
  870. }
  871. memcpy(&y[i].qh, &qh, sizeof(qh));
  872. }
  873. }
  874. static void quantize_row_q5_0(const float * restrict x, void * restrict y, int k) {
  875. quantize_row_q5_0_reference(x, y, k);
  876. }
  877. static void quantize_row_q5_1_reference(const float * restrict x, block_q5_1 * restrict y, int k) {
  878. const int qk = QK5_1;
  879. assert(k % qk == 0);
  880. const int nb = k / qk;
  881. for (int i = 0; i < nb; i++) {
  882. float min = FLT_MAX;
  883. float max = -FLT_MAX;
  884. for (int j = 0; j < qk; j++) {
  885. const float v = x[i*qk + j];
  886. if (v < min) min = v;
  887. if (v > max) max = v;
  888. }
  889. const float d = (max - min) / ((1 << 5) - 1);
  890. const float id = d ? 1.0f/d : 0.0f;
  891. y[i].d = GGML_FP32_TO_FP16(d);
  892. y[i].m = GGML_FP32_TO_FP16(min);
  893. uint32_t qh = 0;
  894. for (int j = 0; j < qk/2; ++j) {
  895. const float x0 = (x[i*qk + 0 + j] - min)*id;
  896. const float x1 = (x[i*qk + qk/2 + j] - min)*id;
  897. const uint8_t xi0 = (uint8_t)(x0 + 0.5f);
  898. const uint8_t xi1 = (uint8_t)(x1 + 0.5f);
  899. y[i].qs[j] = (xi0 & 0x0F) | ((xi1 & 0x0F) << 4);
  900. // get the 5-th bit and store it in qh at the right position
  901. qh |= ((xi0 & 0x10) >> 4) << (j + 0);
  902. qh |= ((xi1 & 0x10) >> 4) << (j + qk/2);
  903. }
  904. memcpy(&y[i].qh, &qh, sizeof(y[i].qh));
  905. }
  906. }
  907. static void quantize_row_q5_1(const float * restrict x, void * restrict y, int k) {
  908. quantize_row_q5_1_reference(x, y, k);
  909. }
  910. // reference implementation for deterministic creation of model files
  911. static void quantize_row_q8_0_reference(const float * restrict x, block_q8_0 * restrict y, int k) {
  912. assert(k % QK8_0 == 0);
  913. const int nb = k / QK8_0;
  914. for (int i = 0; i < nb; i++) {
  915. float amax = 0.0f; // absolute max
  916. for (int j = 0; j < QK8_0; j++) {
  917. const float v = x[i*QK8_0 + j];
  918. amax = MAX(amax, fabsf(v));
  919. }
  920. const float d = amax / ((1 << 7) - 1);
  921. const float id = d ? 1.0f/d : 0.0f;
  922. y[i].d = GGML_FP32_TO_FP16(d);
  923. for (int j = 0; j < QK8_0; ++j) {
  924. const float x0 = x[i*QK8_0 + j]*id;
  925. y[i].qs[j] = roundf(x0);
  926. }
  927. }
  928. }
  929. static void quantize_row_q8_0(const float * restrict x, void * restrict vy, int k) {
  930. assert(QK8_0 == 32);
  931. assert(k % QK8_0 == 0);
  932. const int nb = k / QK8_0;
  933. block_q8_0 * restrict y = vy;
  934. #if defined(__ARM_NEON)
  935. for (int i = 0; i < nb; i++) {
  936. float32x4_t srcv [8];
  937. float32x4_t asrcv[8];
  938. float32x4_t amaxv[8];
  939. for (int j = 0; j < 8; j++) srcv[j] = vld1q_f32(x + i*32 + 4*j);
  940. for (int j = 0; j < 8; j++) asrcv[j] = vabsq_f32(srcv[j]);
  941. for (int j = 0; j < 4; j++) amaxv[2*j] = vmaxq_f32(asrcv[2*j], asrcv[2*j+1]);
  942. for (int j = 0; j < 2; j++) amaxv[4*j] = vmaxq_f32(amaxv[4*j], amaxv[4*j+2]);
  943. for (int j = 0; j < 1; j++) amaxv[8*j] = vmaxq_f32(amaxv[8*j], amaxv[8*j+4]);
  944. const float amax = vmaxvq_f32(amaxv[0]);
  945. const float d = amax / ((1 << 7) - 1);
  946. const float id = d ? 1.0f/d : 0.0f;
  947. y[i].d = GGML_FP32_TO_FP16(d);
  948. for (int j = 0; j < 8; j++) {
  949. const float32x4_t v = vmulq_n_f32(srcv[j], id);
  950. const int32x4_t vi = vcvtnq_s32_f32(v);
  951. y[i].qs[4*j + 0] = vgetq_lane_s32(vi, 0);
  952. y[i].qs[4*j + 1] = vgetq_lane_s32(vi, 1);
  953. y[i].qs[4*j + 2] = vgetq_lane_s32(vi, 2);
  954. y[i].qs[4*j + 3] = vgetq_lane_s32(vi, 3);
  955. }
  956. }
  957. #elif defined(__wasm_simd128__)
  958. for (int i = 0; i < nb; i++) {
  959. v128_t srcv [8];
  960. v128_t asrcv[8];
  961. v128_t amaxv[8];
  962. for (int j = 0; j < 8; j++) srcv[j] = wasm_v128_load(x + i*32 + 4*j);
  963. for (int j = 0; j < 8; j++) asrcv[j] = wasm_f32x4_abs(srcv[j]);
  964. for (int j = 0; j < 4; j++) amaxv[2*j] = wasm_f32x4_max(asrcv[2*j], asrcv[2*j+1]);
  965. for (int j = 0; j < 2; j++) amaxv[4*j] = wasm_f32x4_max(amaxv[4*j], amaxv[4*j+2]);
  966. for (int j = 0; j < 1; j++) amaxv[8*j] = wasm_f32x4_max(amaxv[8*j], amaxv[8*j+4]);
  967. const float amax = MAX(MAX(wasm_f32x4_extract_lane(amaxv[0], 0),
  968. wasm_f32x4_extract_lane(amaxv[0], 1)),
  969. MAX(wasm_f32x4_extract_lane(amaxv[0], 2),
  970. wasm_f32x4_extract_lane(amaxv[0], 3)));
  971. const float d = amax / ((1 << 7) - 1);
  972. const float id = d ? 1.0f/d : 0.0f;
  973. y[i].d = GGML_FP32_TO_FP16(d);
  974. for (int j = 0; j < 8; j++) {
  975. const v128_t v = wasm_f32x4_mul(srcv[j], wasm_f32x4_splat(id));
  976. const v128_t vi = wasm_i32x4_trunc_sat_f32x4(v);
  977. y[i].qs[4*j + 0] = wasm_i32x4_extract_lane(vi, 0);
  978. y[i].qs[4*j + 1] = wasm_i32x4_extract_lane(vi, 1);
  979. y[i].qs[4*j + 2] = wasm_i32x4_extract_lane(vi, 2);
  980. y[i].qs[4*j + 3] = wasm_i32x4_extract_lane(vi, 3);
  981. }
  982. }
  983. #elif defined(__AVX2__) || defined(__AVX__)
  984. for (int i = 0; i < nb; i++) {
  985. // Load elements into 4 AVX vectors
  986. __m256 v0 = _mm256_loadu_ps( x );
  987. __m256 v1 = _mm256_loadu_ps( x + 8 );
  988. __m256 v2 = _mm256_loadu_ps( x + 16 );
  989. __m256 v3 = _mm256_loadu_ps( x + 24 );
  990. x += 32;
  991. // Compute max(abs(e)) for the block
  992. const __m256 signBit = _mm256_set1_ps( -0.0f );
  993. __m256 maxAbs = _mm256_andnot_ps( signBit, v0 );
  994. maxAbs = _mm256_max_ps( maxAbs, _mm256_andnot_ps( signBit, v1 ) );
  995. maxAbs = _mm256_max_ps( maxAbs, _mm256_andnot_ps( signBit, v2 ) );
  996. maxAbs = _mm256_max_ps( maxAbs, _mm256_andnot_ps( signBit, v3 ) );
  997. __m128 max4 = _mm_max_ps( _mm256_extractf128_ps( maxAbs, 1 ), _mm256_castps256_ps128( maxAbs ) );
  998. max4 = _mm_max_ps( max4, _mm_movehl_ps( max4, max4 ) );
  999. max4 = _mm_max_ss( max4, _mm_movehdup_ps( max4 ) );
  1000. const float maxScalar = _mm_cvtss_f32( max4 );
  1001. // Quantize these floats
  1002. const float d = maxScalar / 127.f;
  1003. y[i].d = GGML_FP32_TO_FP16(d);
  1004. const float id = ( maxScalar != 0.0f ) ? 127.f / maxScalar : 0.0f;
  1005. const __m256 mul = _mm256_set1_ps( id );
  1006. // Apply the multiplier
  1007. v0 = _mm256_mul_ps( v0, mul );
  1008. v1 = _mm256_mul_ps( v1, mul );
  1009. v2 = _mm256_mul_ps( v2, mul );
  1010. v3 = _mm256_mul_ps( v3, mul );
  1011. // Round to nearest integer
  1012. v0 = _mm256_round_ps( v0, _MM_ROUND_NEAREST );
  1013. v1 = _mm256_round_ps( v1, _MM_ROUND_NEAREST );
  1014. v2 = _mm256_round_ps( v2, _MM_ROUND_NEAREST );
  1015. v3 = _mm256_round_ps( v3, _MM_ROUND_NEAREST );
  1016. // Convert floats to integers
  1017. __m256i i0 = _mm256_cvtps_epi32( v0 );
  1018. __m256i i1 = _mm256_cvtps_epi32( v1 );
  1019. __m256i i2 = _mm256_cvtps_epi32( v2 );
  1020. __m256i i3 = _mm256_cvtps_epi32( v3 );
  1021. #if defined(__AVX2__)
  1022. // Convert int32 to int16
  1023. i0 = _mm256_packs_epi32( i0, i1 ); // 0, 1, 2, 3, 8, 9, 10, 11, 4, 5, 6, 7, 12, 13, 14, 15
  1024. i2 = _mm256_packs_epi32( i2, i3 ); // 16, 17, 18, 19, 24, 25, 26, 27, 20, 21, 22, 23, 28, 29, 30, 31
  1025. // Convert int16 to int8
  1026. i0 = _mm256_packs_epi16( i0, i2 ); // 0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27, 4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31
  1027. // We got our precious signed bytes, but the order is now wrong
  1028. // These AVX2 pack instructions process 16-byte pieces independently
  1029. // The following instruction is fixing the order
  1030. const __m256i perm = _mm256_setr_epi32( 0, 4, 1, 5, 2, 6, 3, 7 );
  1031. i0 = _mm256_permutevar8x32_epi32( i0, perm );
  1032. _mm256_storeu_si256((__m256i *)y[i].qs, i0);
  1033. #else
  1034. // Since we don't have in AVX some necessary functions,
  1035. // we split the registers in half and call AVX2 analogs from SSE
  1036. __m128i ni0 = _mm256_castsi256_si128( i0 );
  1037. __m128i ni1 = _mm256_extractf128_si256( i0, 1);
  1038. __m128i ni2 = _mm256_castsi256_si128( i1 );
  1039. __m128i ni3 = _mm256_extractf128_si256( i1, 1);
  1040. __m128i ni4 = _mm256_castsi256_si128( i2 );
  1041. __m128i ni5 = _mm256_extractf128_si256( i2, 1);
  1042. __m128i ni6 = _mm256_castsi256_si128( i3 );
  1043. __m128i ni7 = _mm256_extractf128_si256( i3, 1);
  1044. // Convert int32 to int16
  1045. ni0 = _mm_packs_epi32( ni0, ni1 );
  1046. ni2 = _mm_packs_epi32( ni2, ni3 );
  1047. ni4 = _mm_packs_epi32( ni4, ni5 );
  1048. ni6 = _mm_packs_epi32( ni6, ni7 );
  1049. // Convert int16 to int8
  1050. ni0 = _mm_packs_epi16( ni0, ni2 );
  1051. ni4 = _mm_packs_epi16( ni4, ni6 );
  1052. _mm_storeu_si128((__m128i *)(y[i].qs + 0), ni0);
  1053. _mm_storeu_si128((__m128i *)(y[i].qs + 16), ni4);
  1054. #endif
  1055. }
  1056. #else
  1057. // scalar
  1058. quantize_row_q8_0_reference(x, y, k);
  1059. #endif
  1060. }
  1061. // reference implementation for deterministic creation of model files
  1062. static void quantize_row_q8_1_reference(const float * restrict x, block_q8_1 * restrict y, int k) {
  1063. assert(QK8_1 == 32);
  1064. assert(k % QK8_1 == 0);
  1065. const int nb = k / QK8_1;
  1066. for (int i = 0; i < nb; i++) {
  1067. float amax = 0.0f; // absolute max
  1068. for (int j = 0; j < QK8_1; j++) {
  1069. const float v = x[i*QK8_1 + j];
  1070. amax = MAX(amax, fabsf(v));
  1071. }
  1072. const float d = amax / ((1 << 7) - 1);
  1073. const float id = d ? 1.0f/d : 0.0f;
  1074. y[i].d = d;
  1075. int sum = 0;
  1076. for (int j = 0; j < QK8_1/2; ++j) {
  1077. const float v0 = x[i*QK8_1 + j]*id;
  1078. const float v1 = x[i*QK8_1 + QK8_1/2 + j]*id;
  1079. y[i].qs[ j] = roundf(v0);
  1080. y[i].qs[QK8_1/2 + j] = roundf(v1);
  1081. sum += y[i].qs[ j];
  1082. sum += y[i].qs[QK8_1/2 + j];
  1083. }
  1084. y[i].s = sum*d;
  1085. }
  1086. }
  1087. static void quantize_row_q8_1(const float * restrict x, void * restrict vy, int k) {
  1088. assert(k % QK8_1 == 0);
  1089. const int nb = k / QK8_1;
  1090. block_q8_1 * restrict y = vy;
  1091. #if defined(__ARM_NEON)
  1092. for (int i = 0; i < nb; i++) {
  1093. float32x4_t srcv [8];
  1094. float32x4_t asrcv[8];
  1095. float32x4_t amaxv[8];
  1096. for (int j = 0; j < 8; j++) srcv[j] = vld1q_f32(x + i*32 + 4*j);
  1097. for (int j = 0; j < 8; j++) asrcv[j] = vabsq_f32(srcv[j]);
  1098. for (int j = 0; j < 4; j++) amaxv[2*j] = vmaxq_f32(asrcv[2*j], asrcv[2*j+1]);
  1099. for (int j = 0; j < 2; j++) amaxv[4*j] = vmaxq_f32(amaxv[4*j], amaxv[4*j+2]);
  1100. for (int j = 0; j < 1; j++) amaxv[8*j] = vmaxq_f32(amaxv[8*j], amaxv[8*j+4]);
  1101. const float amax = vmaxvq_f32(amaxv[0]);
  1102. const float d = amax / ((1 << 7) - 1);
  1103. const float id = d ? 1.0f/d : 0.0f;
  1104. y[i].d = d;
  1105. int32x4_t accv = vdupq_n_s32(0);
  1106. for (int j = 0; j < 8; j++) {
  1107. const float32x4_t v = vmulq_n_f32(srcv[j], id);
  1108. const int32x4_t vi = vcvtnq_s32_f32(v);
  1109. y[i].qs[4*j + 0] = vgetq_lane_s32(vi, 0);
  1110. y[i].qs[4*j + 1] = vgetq_lane_s32(vi, 1);
  1111. y[i].qs[4*j + 2] = vgetq_lane_s32(vi, 2);
  1112. y[i].qs[4*j + 3] = vgetq_lane_s32(vi, 3);
  1113. accv = vaddq_s32(accv, vi);
  1114. }
  1115. y[i].s = d * vaddvq_s32(accv);
  1116. }
  1117. #elif defined(__wasm_simd128__)
  1118. for (int i = 0; i < nb; i++) {
  1119. v128_t srcv [8];
  1120. v128_t asrcv[8];
  1121. v128_t amaxv[8];
  1122. for (int j = 0; j < 8; j++) srcv[j] = wasm_v128_load(x + i*32 + 4*j);
  1123. for (int j = 0; j < 8; j++) asrcv[j] = wasm_f32x4_abs(srcv[j]);
  1124. for (int j = 0; j < 4; j++) amaxv[2*j] = wasm_f32x4_max(asrcv[2*j], asrcv[2*j+1]);
  1125. for (int j = 0; j < 2; j++) amaxv[4*j] = wasm_f32x4_max(amaxv[4*j], amaxv[4*j+2]);
  1126. for (int j = 0; j < 1; j++) amaxv[8*j] = wasm_f32x4_max(amaxv[8*j], amaxv[8*j+4]);
  1127. const float amax = MAX(MAX(wasm_f32x4_extract_lane(amaxv[0], 0),
  1128. wasm_f32x4_extract_lane(amaxv[0], 1)),
  1129. MAX(wasm_f32x4_extract_lane(amaxv[0], 2),
  1130. wasm_f32x4_extract_lane(amaxv[0], 3)));
  1131. const float d = amax / ((1 << 7) - 1);
  1132. const float id = d ? 1.0f/d : 0.0f;
  1133. y[i].d = d;
  1134. v128_t accv = wasm_i32x4_splat(0);
  1135. for (int j = 0; j < 8; j++) {
  1136. const v128_t v = wasm_f32x4_mul(srcv[j], wasm_f32x4_splat(id));
  1137. const v128_t vi = wasm_i32x4_trunc_sat_f32x4(v);
  1138. y[i].qs[4*j + 0] = wasm_i32x4_extract_lane(vi, 0);
  1139. y[i].qs[4*j + 1] = wasm_i32x4_extract_lane(vi, 1);
  1140. y[i].qs[4*j + 2] = wasm_i32x4_extract_lane(vi, 2);
  1141. y[i].qs[4*j + 3] = wasm_i32x4_extract_lane(vi, 3);
  1142. accv = wasm_i32x4_add(accv, vi);
  1143. }
  1144. y[i].s = d * (wasm_i32x4_extract_lane(accv, 0) +
  1145. wasm_i32x4_extract_lane(accv, 1) +
  1146. wasm_i32x4_extract_lane(accv, 2) +
  1147. wasm_i32x4_extract_lane(accv, 3));
  1148. }
  1149. #elif defined(__AVX2__) || defined(__AVX__)
  1150. for (int i = 0; i < nb; i++) {
  1151. // Load elements into 4 AVX vectors
  1152. __m256 v0 = _mm256_loadu_ps( x );
  1153. __m256 v1 = _mm256_loadu_ps( x + 8 );
  1154. __m256 v2 = _mm256_loadu_ps( x + 16 );
  1155. __m256 v3 = _mm256_loadu_ps( x + 24 );
  1156. x += 32;
  1157. // Compute max(abs(e)) for the block
  1158. const __m256 signBit = _mm256_set1_ps( -0.0f );
  1159. __m256 maxAbs = _mm256_andnot_ps( signBit, v0 );
  1160. maxAbs = _mm256_max_ps( maxAbs, _mm256_andnot_ps( signBit, v1 ) );
  1161. maxAbs = _mm256_max_ps( maxAbs, _mm256_andnot_ps( signBit, v2 ) );
  1162. maxAbs = _mm256_max_ps( maxAbs, _mm256_andnot_ps( signBit, v3 ) );
  1163. __m128 max4 = _mm_max_ps( _mm256_extractf128_ps( maxAbs, 1 ), _mm256_castps256_ps128( maxAbs ) );
  1164. max4 = _mm_max_ps( max4, _mm_movehl_ps( max4, max4 ) );
  1165. max4 = _mm_max_ss( max4, _mm_movehdup_ps( max4 ) );
  1166. const float maxScalar = _mm_cvtss_f32( max4 );
  1167. // Quantize these floats
  1168. const float d = maxScalar / 127.f;
  1169. y[i].d = d;
  1170. const float id = ( maxScalar != 0.0f ) ? 127.f / maxScalar : 0.0f;
  1171. const __m256 mul = _mm256_set1_ps( id );
  1172. // Apply the multiplier
  1173. v0 = _mm256_mul_ps( v0, mul );
  1174. v1 = _mm256_mul_ps( v1, mul );
  1175. v2 = _mm256_mul_ps( v2, mul );
  1176. v3 = _mm256_mul_ps( v3, mul );
  1177. // Round to nearest integer
  1178. v0 = _mm256_round_ps( v0, _MM_ROUND_NEAREST );
  1179. v1 = _mm256_round_ps( v1, _MM_ROUND_NEAREST );
  1180. v2 = _mm256_round_ps( v2, _MM_ROUND_NEAREST );
  1181. v3 = _mm256_round_ps( v3, _MM_ROUND_NEAREST );
  1182. // Convert floats to integers
  1183. __m256i i0 = _mm256_cvtps_epi32( v0 );
  1184. __m256i i1 = _mm256_cvtps_epi32( v1 );
  1185. __m256i i2 = _mm256_cvtps_epi32( v2 );
  1186. __m256i i3 = _mm256_cvtps_epi32( v3 );
  1187. #if defined(__AVX2__)
  1188. // Compute the sum of the quants and set y[i].s
  1189. y[i].s = d * hsum_i32_8(_mm256_add_epi32(_mm256_add_epi32(i0, i1), _mm256_add_epi32(i2, i3)));
  1190. // Convert int32 to int16
  1191. i0 = _mm256_packs_epi32( i0, i1 ); // 0, 1, 2, 3, 8, 9, 10, 11, 4, 5, 6, 7, 12, 13, 14, 15
  1192. i2 = _mm256_packs_epi32( i2, i3 ); // 16, 17, 18, 19, 24, 25, 26, 27, 20, 21, 22, 23, 28, 29, 30, 31
  1193. // Convert int16 to int8
  1194. i0 = _mm256_packs_epi16( i0, i2 ); // 0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27, 4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31
  1195. // We got our precious signed bytes, but the order is now wrong
  1196. // These AVX2 pack instructions process 16-byte pieces independently
  1197. // The following instruction is fixing the order
  1198. const __m256i perm = _mm256_setr_epi32( 0, 4, 1, 5, 2, 6, 3, 7 );
  1199. i0 = _mm256_permutevar8x32_epi32( i0, perm );
  1200. _mm256_storeu_si256((__m256i *)y[i].qs, i0);
  1201. #else
  1202. // Since we don't have in AVX some necessary functions,
  1203. // we split the registers in half and call AVX2 analogs from SSE
  1204. __m128i ni0 = _mm256_castsi256_si128( i0 );
  1205. __m128i ni1 = _mm256_extractf128_si256( i0, 1);
  1206. __m128i ni2 = _mm256_castsi256_si128( i1 );
  1207. __m128i ni3 = _mm256_extractf128_si256( i1, 1);
  1208. __m128i ni4 = _mm256_castsi256_si128( i2 );
  1209. __m128i ni5 = _mm256_extractf128_si256( i2, 1);
  1210. __m128i ni6 = _mm256_castsi256_si128( i3 );
  1211. __m128i ni7 = _mm256_extractf128_si256( i3, 1);
  1212. // Compute the sum of the quants and set y[i].s
  1213. const __m128i s0 = _mm_add_epi32(_mm_add_epi32(ni0, ni1), _mm_add_epi32(ni2, ni3));
  1214. const __m128i s1 = _mm_add_epi32(_mm_add_epi32(ni4, ni5), _mm_add_epi32(ni6, ni7));
  1215. y[i].s = d * hsum_i32_4(_mm_add_epi32(s0, s1));
  1216. // Convert int32 to int16
  1217. ni0 = _mm_packs_epi32( ni0, ni1 );
  1218. ni2 = _mm_packs_epi32( ni2, ni3 );
  1219. ni4 = _mm_packs_epi32( ni4, ni5 );
  1220. ni6 = _mm_packs_epi32( ni6, ni7 );
  1221. // Convert int16 to int8
  1222. ni0 = _mm_packs_epi16( ni0, ni2 );
  1223. ni4 = _mm_packs_epi16( ni4, ni6 );
  1224. _mm_storeu_si128((__m128i *)(y[i].qs + 0), ni0);
  1225. _mm_storeu_si128((__m128i *)(y[i].qs + 16), ni4);
  1226. #endif
  1227. }
  1228. #else
  1229. // scalar
  1230. quantize_row_q8_1_reference(x, y, k);
  1231. #endif
  1232. }
  1233. static void dequantize_row_q4_0(const block_q4_0 * restrict x, float * restrict y, int k) {
  1234. static const int qk = QK4_0;
  1235. assert(k % qk == 0);
  1236. const int nb = k / qk;
  1237. for (int i = 0; i < nb; i++) {
  1238. const float d = GGML_FP16_TO_FP32(x[i].d);
  1239. for (int j = 0; j < qk/2; ++j) {
  1240. const int x0 = (x[i].qs[j] & 0x0F) - 8;
  1241. const int x1 = (x[i].qs[j] >> 4) - 8;
  1242. y[i*qk + j + 0 ] = x0*d;
  1243. y[i*qk + j + qk/2] = x1*d;
  1244. }
  1245. }
  1246. }
  1247. static void dequantize_row_q4_1(const block_q4_1 * restrict x, float * restrict y, int k) {
  1248. static const int qk = QK4_1;
  1249. assert(k % qk == 0);
  1250. const int nb = k / qk;
  1251. for (int i = 0; i < nb; i++) {
  1252. const float d = GGML_FP16_TO_FP32(x[i].d);
  1253. const float m = GGML_FP16_TO_FP32(x[i].m);
  1254. for (int j = 0; j < qk/2; ++j) {
  1255. const int x0 = (x[i].qs[j] & 0x0F);
  1256. const int x1 = (x[i].qs[j] >> 4);
  1257. y[i*qk + j + 0 ] = x0*d + m;
  1258. y[i*qk + j + qk/2] = x1*d + m;
  1259. }
  1260. }
  1261. }
  1262. static void dequantize_row_q5_0(const block_q5_0 * restrict x, float * restrict y, int k) {
  1263. static const int qk = QK5_0;
  1264. assert(k % qk == 0);
  1265. const int nb = k / qk;
  1266. for (int i = 0; i < nb; i++) {
  1267. const float d = GGML_FP16_TO_FP32(x[i].d);
  1268. uint32_t qh;
  1269. memcpy(&qh, x[i].qh, sizeof(qh));
  1270. for (int j = 0; j < qk/2; ++j) {
  1271. const uint8_t xh_0 = ((qh >> (j + 0)) << 4) & 0x10;
  1272. const uint8_t xh_1 = ((qh >> (j + 12)) ) & 0x10;
  1273. const int32_t x0 = ((x[i].qs[j] & 0x0F) | xh_0) - 16;
  1274. const int32_t x1 = ((x[i].qs[j] >> 4) | xh_1) - 16;
  1275. y[i*qk + j + 0 ] = x0*d;
  1276. y[i*qk + j + qk/2] = x1*d;
  1277. }
  1278. }
  1279. }
  1280. static void dequantize_row_q5_1(const block_q5_1 * restrict x, float * restrict y, int k) {
  1281. static const int qk = QK5_1;
  1282. assert(k % qk == 0);
  1283. const int nb = k / qk;
  1284. for (int i = 0; i < nb; i++) {
  1285. const float d = GGML_FP16_TO_FP32(x[i].d);
  1286. const float m = GGML_FP16_TO_FP32(x[i].m);
  1287. uint32_t qh;
  1288. memcpy(&qh, x[i].qh, sizeof(qh));
  1289. for (int j = 0; j < qk/2; ++j) {
  1290. const uint8_t xh_0 = ((qh >> (j + 0)) << 4) & 0x10;
  1291. const uint8_t xh_1 = ((qh >> (j + 12)) ) & 0x10;
  1292. const int x0 = (x[i].qs[j] & 0x0F) | xh_0;
  1293. const int x1 = (x[i].qs[j] >> 4) | xh_1;
  1294. y[i*qk + j + 0 ] = x0*d + m;
  1295. y[i*qk + j + qk/2] = x1*d + m;
  1296. }
  1297. }
  1298. }
  1299. static void dequantize_row_q8_0(const void * restrict vx, float * restrict y, int k) {
  1300. static const int qk = QK8_0;
  1301. assert(k % qk == 0);
  1302. const int nb = k / qk;
  1303. const block_q8_0 * restrict x = vx;
  1304. for (int i = 0; i < nb; i++) {
  1305. const float d = GGML_FP16_TO_FP32(x[i].d);
  1306. for (int j = 0; j < qk; ++j) {
  1307. y[i*qk + j] = x[i].qs[j]*d;
  1308. }
  1309. }
  1310. }
  1311. static void ggml_vec_dot_f32(const int n, float * restrict s, const float * restrict x, const float * restrict y);
  1312. static void ggml_vec_dot_f16(const int n, float * restrict s, ggml_fp16_t * restrict x, ggml_fp16_t * restrict y);
  1313. static void ggml_vec_dot_q4_0_q8_0(const int n, float * restrict s, const void * restrict vx, const void * restrict vy);
  1314. static void ggml_vec_dot_q4_1_q8_1(const int n, float * restrict s, const void * restrict vx, const void * restrict vy);
  1315. static void ggml_vec_dot_q5_0_q8_0(const int n, float * restrict s, const void * restrict vx, const void * restrict vy);
  1316. static void ggml_vec_dot_q5_1_q8_1(const int n, float * restrict s, const void * restrict vx, const void * restrict vy);
  1317. static void ggml_vec_dot_q8_0_q8_0(const int n, float * restrict s, const void * restrict vx, const void * restrict vy);
  1318. static const ggml_type_traits_t type_traits[GGML_TYPE_COUNT] = {
  1319. [GGML_TYPE_I8] = {
  1320. .type_name = "i8",
  1321. .blck_size = 1,
  1322. .type_size = sizeof(int8_t),
  1323. .is_quantized = false,
  1324. },
  1325. [GGML_TYPE_I16] = {
  1326. .type_name = "i16",
  1327. .blck_size = 1,
  1328. .type_size = sizeof(int16_t),
  1329. .is_quantized = false,
  1330. },
  1331. [GGML_TYPE_I32] = {
  1332. .type_name = "i32",
  1333. .blck_size = 1,
  1334. .type_size = sizeof(int32_t),
  1335. .is_quantized = false,
  1336. },
  1337. [GGML_TYPE_F32] = {
  1338. .type_name = "f32",
  1339. .blck_size = 1,
  1340. .type_size = sizeof(float),
  1341. .is_quantized = false,
  1342. .vec_dot = (ggml_vec_dot_t) ggml_vec_dot_f32,
  1343. .vec_dot_type = GGML_TYPE_F32,
  1344. },
  1345. [GGML_TYPE_F16] = {
  1346. .type_name = "f16",
  1347. .blck_size = 1,
  1348. .type_size = sizeof(ggml_fp16_t),
  1349. .is_quantized = false,
  1350. .to_float = (ggml_to_float_t) ggml_fp16_to_fp32_row,
  1351. .from_float = (ggml_from_float_t) ggml_fp32_to_fp16_row,
  1352. .from_float_reference = (ggml_from_float_t) ggml_fp32_to_fp16_row,
  1353. .vec_dot = (ggml_vec_dot_t) ggml_vec_dot_f16,
  1354. .vec_dot_type = GGML_TYPE_F16,
  1355. },
  1356. [GGML_TYPE_Q4_0] = {
  1357. .type_name = "q4_0",
  1358. .blck_size = QK4_0,
  1359. .type_size = sizeof(block_q4_0),
  1360. .is_quantized = true,
  1361. .to_float = (ggml_to_float_t) dequantize_row_q4_0,
  1362. .from_float = quantize_row_q4_0,
  1363. .from_float_reference = (ggml_from_float_t) quantize_row_q4_0_reference,
  1364. .vec_dot = ggml_vec_dot_q4_0_q8_0,
  1365. .vec_dot_type = GGML_TYPE_Q8_0,
  1366. },
  1367. [GGML_TYPE_Q4_1] = {
  1368. .type_name = "q4_1",
  1369. .blck_size = QK4_1,
  1370. .type_size = sizeof(block_q4_1),
  1371. .is_quantized = true,
  1372. .to_float = (ggml_to_float_t) dequantize_row_q4_1,
  1373. .from_float = quantize_row_q4_1,
  1374. .from_float_reference = (ggml_from_float_t) quantize_row_q4_1_reference,
  1375. .vec_dot = ggml_vec_dot_q4_1_q8_1,
  1376. .vec_dot_type = GGML_TYPE_Q8_1,
  1377. },
  1378. [GGML_TYPE_Q5_0] = {
  1379. .type_name = "q5_0",
  1380. .blck_size = QK5_0,
  1381. .type_size = sizeof(block_q5_0),
  1382. .is_quantized = true,
  1383. .to_float = (ggml_to_float_t) dequantize_row_q5_0,
  1384. .from_float = quantize_row_q5_0,
  1385. .from_float_reference = (ggml_from_float_t) quantize_row_q5_0_reference,
  1386. .vec_dot = ggml_vec_dot_q5_0_q8_0,
  1387. .vec_dot_type = GGML_TYPE_Q8_0,
  1388. },
  1389. [GGML_TYPE_Q5_1] = {
  1390. .type_name = "q5_1",
  1391. .blck_size = QK5_1,
  1392. .type_size = sizeof(block_q5_1),
  1393. .is_quantized = true,
  1394. .to_float = (ggml_to_float_t) dequantize_row_q5_1,
  1395. .from_float = quantize_row_q5_1,
  1396. .from_float_reference = (ggml_from_float_t) quantize_row_q5_1_reference,
  1397. .vec_dot = ggml_vec_dot_q5_1_q8_1,
  1398. .vec_dot_type = GGML_TYPE_Q8_1,
  1399. },
  1400. [GGML_TYPE_Q8_0] = {
  1401. .type_name = "q8_0",
  1402. .blck_size = QK8_0,
  1403. .type_size = sizeof(block_q8_0),
  1404. .is_quantized = true,
  1405. .to_float = dequantize_row_q8_0,
  1406. .from_float = quantize_row_q8_0,
  1407. .from_float_reference = (ggml_from_float_t) quantize_row_q8_0_reference,
  1408. .vec_dot = ggml_vec_dot_q8_0_q8_0,
  1409. .vec_dot_type = GGML_TYPE_Q8_0,
  1410. },
  1411. [GGML_TYPE_Q8_1] = {
  1412. .type_name = "q8_1",
  1413. .blck_size = QK8_1,
  1414. .type_size = sizeof(block_q8_1),
  1415. .is_quantized = true,
  1416. .from_float = quantize_row_q8_1,
  1417. .from_float_reference = (ggml_from_float_t) quantize_row_q8_1_reference,
  1418. .vec_dot_type = GGML_TYPE_Q8_1,
  1419. },
  1420. #ifdef GGML_USE_K_QUANTS
  1421. [GGML_TYPE_Q2_K] = {
  1422. .type_name = "q2_K",
  1423. .blck_size = QK_K,
  1424. .type_size = sizeof(block_q2_K),
  1425. .is_quantized = true,
  1426. .to_float = (ggml_to_float_t) dequantize_row_q2_K,
  1427. .from_float = quantize_row_q2_K,
  1428. .from_float_reference = (ggml_from_float_t) quantize_row_q2_K_reference,
  1429. .vec_dot = ggml_vec_dot_q2_K_q8_K,
  1430. .vec_dot_type = GGML_TYPE_Q8_K,
  1431. },
  1432. [GGML_TYPE_Q3_K] = {
  1433. .type_name = "q3_K",
  1434. .blck_size = QK_K,
  1435. .type_size = sizeof(block_q3_K),
  1436. .is_quantized = true,
  1437. .to_float = (ggml_to_float_t) dequantize_row_q3_K,
  1438. .from_float = quantize_row_q3_K,
  1439. .from_float_reference = (ggml_from_float_t) quantize_row_q3_K_reference,
  1440. .vec_dot = ggml_vec_dot_q3_K_q8_K,
  1441. .vec_dot_type = GGML_TYPE_Q8_K,
  1442. },
  1443. [GGML_TYPE_Q4_K] = {
  1444. .type_name = "q4_K",
  1445. .blck_size = QK_K,
  1446. .type_size = sizeof(block_q4_K),
  1447. .is_quantized = true,
  1448. .to_float = (ggml_to_float_t) dequantize_row_q4_K,
  1449. .from_float = quantize_row_q4_K,
  1450. .from_float_reference = (ggml_from_float_t) quantize_row_q4_K_reference,
  1451. .vec_dot = ggml_vec_dot_q4_K_q8_K,
  1452. .vec_dot_type = GGML_TYPE_Q8_K,
  1453. },
  1454. [GGML_TYPE_Q5_K] = {
  1455. .type_name = "q5_K",
  1456. .blck_size = QK_K,
  1457. .type_size = sizeof(block_q5_K),
  1458. .is_quantized = true,
  1459. .to_float = (ggml_to_float_t) dequantize_row_q5_K,
  1460. .from_float = quantize_row_q5_K,
  1461. .from_float_reference = (ggml_from_float_t) quantize_row_q5_K_reference,
  1462. .vec_dot = ggml_vec_dot_q5_K_q8_K,
  1463. .vec_dot_type = GGML_TYPE_Q8_K,
  1464. },
  1465. [GGML_TYPE_Q6_K] = {
  1466. .type_name = "q6_K",
  1467. .blck_size = QK_K,
  1468. .type_size = sizeof(block_q6_K),
  1469. .is_quantized = true,
  1470. .to_float = (ggml_to_float_t) dequantize_row_q6_K,
  1471. .from_float = quantize_row_q6_K,
  1472. .from_float_reference = (ggml_from_float_t) quantize_row_q6_K_reference,
  1473. .vec_dot = ggml_vec_dot_q6_K_q8_K,
  1474. .vec_dot_type = GGML_TYPE_Q8_K,
  1475. },
  1476. [GGML_TYPE_Q8_K] = {
  1477. .type_name = "q8_K",
  1478. .blck_size = QK_K,
  1479. .type_size = sizeof(block_q8_K),
  1480. .is_quantized = true,
  1481. .from_float = quantize_row_q8_K,
  1482. }
  1483. #endif
  1484. };
  1485. // For internal test use
  1486. ggml_type_traits_t ggml_internal_get_type_traits(enum ggml_type type) {
  1487. GGML_ASSERT(type < GGML_TYPE_COUNT);
  1488. return type_traits[type];
  1489. }
  1490. //
  1491. // simd mappings
  1492. //
  1493. // we define a common set of C macros which map to specific intrinsics based on the current architecture
  1494. // we then implement the fundamental computation operations below using only these macros
  1495. // adding support for new architectures requires to define the corresponding SIMD macros
  1496. //
  1497. // GGML_F32_STEP / GGML_F16_STEP
  1498. // number of elements to process in a single step
  1499. //
  1500. // GGML_F32_EPR / GGML_F16_EPR
  1501. // number of elements to fit in a single register
  1502. //
  1503. #if defined(__ARM_NEON) && defined(__ARM_FEATURE_FMA)
  1504. #define GGML_SIMD
  1505. // F32 NEON
  1506. #define GGML_F32_STEP 16
  1507. #define GGML_F32_EPR 4
  1508. #define GGML_F32x4 float32x4_t
  1509. #define GGML_F32x4_ZERO vdupq_n_f32(0.0f)
  1510. #define GGML_F32x4_SET1(x) vdupq_n_f32(x)
  1511. #define GGML_F32x4_LOAD vld1q_f32
  1512. #define GGML_F32x4_STORE vst1q_f32
  1513. #define GGML_F32x4_FMA(a, b, c) vfmaq_f32(a, b, c)
  1514. #define GGML_F32x4_ADD vaddq_f32
  1515. #define GGML_F32x4_MUL vmulq_f32
  1516. #define GGML_F32x4_REDUCE_ONE(x) vaddvq_f32(x)
  1517. #define GGML_F32x4_REDUCE(res, x) \
  1518. { \
  1519. int offset = GGML_F32_ARR >> 1; \
  1520. for (int i = 0; i < offset; ++i) { \
  1521. x[i] = vaddq_f32(x[i], x[offset+i]); \
  1522. } \
  1523. offset >>= 1; \
  1524. for (int i = 0; i < offset; ++i) { \
  1525. x[i] = vaddq_f32(x[i], x[offset+i]); \
  1526. } \
  1527. offset >>= 1; \
  1528. for (int i = 0; i < offset; ++i) { \
  1529. x[i] = vaddq_f32(x[i], x[offset+i]); \
  1530. } \
  1531. res = GGML_F32x4_REDUCE_ONE(x[0]); \
  1532. }
  1533. #define GGML_F32_VEC GGML_F32x4
  1534. #define GGML_F32_VEC_ZERO GGML_F32x4_ZERO
  1535. #define GGML_F32_VEC_SET1 GGML_F32x4_SET1
  1536. #define GGML_F32_VEC_LOAD GGML_F32x4_LOAD
  1537. #define GGML_F32_VEC_STORE GGML_F32x4_STORE
  1538. #define GGML_F32_VEC_FMA GGML_F32x4_FMA
  1539. #define GGML_F32_VEC_ADD GGML_F32x4_ADD
  1540. #define GGML_F32_VEC_MUL GGML_F32x4_MUL
  1541. #define GGML_F32_VEC_REDUCE GGML_F32x4_REDUCE
  1542. // F16 NEON
  1543. #if defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC)
  1544. #define GGML_F16_STEP 32
  1545. #define GGML_F16_EPR 8
  1546. #define GGML_F16x8 float16x8_t
  1547. #define GGML_F16x8_ZERO vdupq_n_f16(0.0f)
  1548. #define GGML_F16x8_SET1(x) vdupq_n_f16(x)
  1549. #define GGML_F16x8_LOAD vld1q_f16
  1550. #define GGML_F16x8_STORE vst1q_f16
  1551. #define GGML_F16x8_FMA(a, b, c) vfmaq_f16(a, b, c)
  1552. #define GGML_F16x8_ADD vaddq_f16
  1553. #define GGML_F16x8_MUL vmulq_f16
  1554. #define GGML_F16x8_REDUCE(res, x) \
  1555. do { \
  1556. int offset = GGML_F16_ARR >> 1; \
  1557. for (int i = 0; i < offset; ++i) { \
  1558. x[i] = vaddq_f16(x[i], x[offset+i]); \
  1559. } \
  1560. offset >>= 1; \
  1561. for (int i = 0; i < offset; ++i) { \
  1562. x[i] = vaddq_f16(x[i], x[offset+i]); \
  1563. } \
  1564. offset >>= 1; \
  1565. for (int i = 0; i < offset; ++i) { \
  1566. x[i] = vaddq_f16(x[i], x[offset+i]); \
  1567. } \
  1568. const float32x4_t t0 = vcvt_f32_f16(vget_low_f16 (x[0])); \
  1569. const float32x4_t t1 = vcvt_f32_f16(vget_high_f16(x[0])); \
  1570. res = (ggml_float) vaddvq_f32(vaddq_f32(t0, t1)); \
  1571. } while (0)
  1572. #define GGML_F16_VEC GGML_F16x8
  1573. #define GGML_F16_VEC_ZERO GGML_F16x8_ZERO
  1574. #define GGML_F16_VEC_SET1 GGML_F16x8_SET1
  1575. #define GGML_F16_VEC_LOAD(p, i) GGML_F16x8_LOAD(p)
  1576. #define GGML_F16_VEC_STORE(p, r, i) GGML_F16x8_STORE(p, r[i])
  1577. #define GGML_F16_VEC_FMA GGML_F16x8_FMA
  1578. #define GGML_F16_VEC_ADD GGML_F16x8_ADD
  1579. #define GGML_F16_VEC_MUL GGML_F16x8_MUL
  1580. #define GGML_F16_VEC_REDUCE GGML_F16x8_REDUCE
  1581. #else
  1582. // if FP16 vector arithmetic is not supported, we use FP32 instead
  1583. // and take advantage of the vcvt_ functions to convert to/from FP16
  1584. #define GGML_F16_STEP 16
  1585. #define GGML_F16_EPR 4
  1586. #define GGML_F32Cx4 float32x4_t
  1587. #define GGML_F32Cx4_ZERO vdupq_n_f32(0.0f)
  1588. #define GGML_F32Cx4_SET1(x) vdupq_n_f32(x)
  1589. #define GGML_F32Cx4_LOAD(x) vcvt_f32_f16(vld1_f16(x))
  1590. #define GGML_F32Cx4_STORE(x, y) vst1_f16(x, vcvt_f16_f32(y))
  1591. #define GGML_F32Cx4_FMA(a, b, c) vfmaq_f32(a, b, c)
  1592. #define GGML_F32Cx4_ADD vaddq_f32
  1593. #define GGML_F32Cx4_MUL vmulq_f32
  1594. #define GGML_F32Cx4_REDUCE GGML_F32x4_REDUCE
  1595. #define GGML_F16_VEC GGML_F32Cx4
  1596. #define GGML_F16_VEC_ZERO GGML_F32Cx4_ZERO
  1597. #define GGML_F16_VEC_SET1 GGML_F32Cx4_SET1
  1598. #define GGML_F16_VEC_LOAD(p, i) GGML_F32Cx4_LOAD(p)
  1599. #define GGML_F16_VEC_STORE(p, r, i) GGML_F32Cx4_STORE(p, r[i])
  1600. #define GGML_F16_VEC_FMA GGML_F32Cx4_FMA
  1601. #define GGML_F16_VEC_ADD GGML_F32Cx4_ADD
  1602. #define GGML_F16_VEC_MUL GGML_F32Cx4_MUL
  1603. #define GGML_F16_VEC_REDUCE GGML_F32Cx4_REDUCE
  1604. #endif
  1605. #elif defined(__AVX__)
  1606. #define GGML_SIMD
  1607. // F32 AVX
  1608. #define GGML_F32_STEP 32
  1609. #define GGML_F32_EPR 8
  1610. #define GGML_F32x8 __m256
  1611. #define GGML_F32x8_ZERO _mm256_setzero_ps()
  1612. #define GGML_F32x8_SET1(x) _mm256_set1_ps(x)
  1613. #define GGML_F32x8_LOAD _mm256_loadu_ps
  1614. #define GGML_F32x8_STORE _mm256_storeu_ps
  1615. #if defined(__FMA__)
  1616. #define GGML_F32x8_FMA(a, b, c) _mm256_fmadd_ps(b, c, a)
  1617. #else
  1618. #define GGML_F32x8_FMA(a, b, c) _mm256_add_ps(_mm256_mul_ps(b, c), a)
  1619. #endif
  1620. #define GGML_F32x8_ADD _mm256_add_ps
  1621. #define GGML_F32x8_MUL _mm256_mul_ps
  1622. #define GGML_F32x8_REDUCE(res, x) \
  1623. do { \
  1624. int offset = GGML_F32_ARR >> 1; \
  1625. for (int i = 0; i < offset; ++i) { \
  1626. x[i] = _mm256_add_ps(x[i], x[offset+i]); \
  1627. } \
  1628. offset >>= 1; \
  1629. for (int i = 0; i < offset; ++i) { \
  1630. x[i] = _mm256_add_ps(x[i], x[offset+i]); \
  1631. } \
  1632. offset >>= 1; \
  1633. for (int i = 0; i < offset; ++i) { \
  1634. x[i] = _mm256_add_ps(x[i], x[offset+i]); \
  1635. } \
  1636. const __m128 t0 = _mm_add_ps(_mm256_castps256_ps128(x[0]), \
  1637. _mm256_extractf128_ps(x[0], 1)); \
  1638. const __m128 t1 = _mm_hadd_ps(t0, t0); \
  1639. res = _mm_cvtss_f32(_mm_hadd_ps(t1, t1)); \
  1640. } while (0)
  1641. // TODO: is this optimal ?
  1642. #define GGML_F32_VEC GGML_F32x8
  1643. #define GGML_F32_VEC_ZERO GGML_F32x8_ZERO
  1644. #define GGML_F32_VEC_SET1 GGML_F32x8_SET1
  1645. #define GGML_F32_VEC_LOAD GGML_F32x8_LOAD
  1646. #define GGML_F32_VEC_STORE GGML_F32x8_STORE
  1647. #define GGML_F32_VEC_FMA GGML_F32x8_FMA
  1648. #define GGML_F32_VEC_ADD GGML_F32x8_ADD
  1649. #define GGML_F32_VEC_MUL GGML_F32x8_MUL
  1650. #define GGML_F32_VEC_REDUCE GGML_F32x8_REDUCE
  1651. // F16 AVX
  1652. #define GGML_F16_STEP 32
  1653. #define GGML_F16_EPR 8
  1654. // F16 arithmetic is not supported by AVX, so we use F32 instead
  1655. #define GGML_F32Cx8 __m256
  1656. #define GGML_F32Cx8_ZERO _mm256_setzero_ps()
  1657. #define GGML_F32Cx8_SET1(x) _mm256_set1_ps(x)
  1658. #if defined(__F16C__)
  1659. // the _mm256_cvt intrinsics require F16C
  1660. #define GGML_F32Cx8_LOAD(x) _mm256_cvtph_ps(_mm_loadu_si128((__m128i *)(x)))
  1661. #define GGML_F32Cx8_STORE(x, y) _mm_storeu_si128((__m128i *)(x), _mm256_cvtps_ph(y, 0))
  1662. #else
  1663. static inline __m256 __avx_f32cx8_load(ggml_fp16_t *x) {
  1664. float tmp[8];
  1665. for (int i = 0; i < 8; i++) {
  1666. tmp[i] = GGML_FP16_TO_FP32(x[i]);
  1667. }
  1668. return _mm256_loadu_ps(tmp);
  1669. }
  1670. static inline void __avx_f32cx8_store(ggml_fp16_t *x, __m256 y) {
  1671. float arr[8];
  1672. _mm256_storeu_ps(arr, y);
  1673. for (int i = 0; i < 8; i++)
  1674. x[i] = GGML_FP32_TO_FP16(arr[i]);
  1675. }
  1676. #define GGML_F32Cx8_LOAD(x) __avx_f32cx8_load(x)
  1677. #define GGML_F32Cx8_STORE(x, y) __avx_f32cx8_store(x, y)
  1678. #endif
  1679. #define GGML_F32Cx8_FMA GGML_F32x8_FMA
  1680. #define GGML_F32Cx8_ADD _mm256_add_ps
  1681. #define GGML_F32Cx8_MUL _mm256_mul_ps
  1682. #define GGML_F32Cx8_REDUCE GGML_F32x8_REDUCE
  1683. #define GGML_F16_VEC GGML_F32Cx8
  1684. #define GGML_F16_VEC_ZERO GGML_F32Cx8_ZERO
  1685. #define GGML_F16_VEC_SET1 GGML_F32Cx8_SET1
  1686. #define GGML_F16_VEC_LOAD(p, i) GGML_F32Cx8_LOAD(p)
  1687. #define GGML_F16_VEC_STORE(p, r, i) GGML_F32Cx8_STORE(p, r[i])
  1688. #define GGML_F16_VEC_FMA GGML_F32Cx8_FMA
  1689. #define GGML_F16_VEC_ADD GGML_F32Cx8_ADD
  1690. #define GGML_F16_VEC_MUL GGML_F32Cx8_MUL
  1691. #define GGML_F16_VEC_REDUCE GGML_F32Cx8_REDUCE
  1692. #elif defined(__POWER9_VECTOR__)
  1693. #define GGML_SIMD
  1694. // F32 POWER9
  1695. #define GGML_F32_STEP 32
  1696. #define GGML_F32_EPR 4
  1697. #define GGML_F32x4 vector float
  1698. #define GGML_F32x4_ZERO 0.0f
  1699. #define GGML_F32x4_SET1 vec_splats
  1700. #define GGML_F32x4_LOAD(p) vec_xl(0, p)
  1701. #define GGML_F32x4_STORE(p, r) vec_xst(r, 0, p)
  1702. #define GGML_F32x4_FMA(a, b, c) vec_madd(b, c, a)
  1703. #define GGML_F32x4_ADD vec_add
  1704. #define GGML_F32x4_MUL vec_mul
  1705. #define GGML_F32x4_REDUCE(res, x) \
  1706. { \
  1707. int offset = GGML_F32_ARR >> 1; \
  1708. for (int i = 0; i < offset; ++i) { \
  1709. x[i] = vec_add(x[i], x[offset+i]); \
  1710. } \
  1711. offset >>= 1; \
  1712. for (int i = 0; i < offset; ++i) { \
  1713. x[i] = vec_add(x[i], x[offset+i]); \
  1714. } \
  1715. offset >>= 1; \
  1716. for (int i = 0; i < offset; ++i) { \
  1717. x[i] = vec_add(x[i], x[offset+i]); \
  1718. } \
  1719. res = vec_extract(x[0], 0) + \
  1720. vec_extract(x[0], 1) + \
  1721. vec_extract(x[0], 2) + \
  1722. vec_extract(x[0], 3); \
  1723. }
  1724. #define GGML_F32_VEC GGML_F32x4
  1725. #define GGML_F32_VEC_ZERO GGML_F32x4_ZERO
  1726. #define GGML_F32_VEC_SET1 GGML_F32x4_SET1
  1727. #define GGML_F32_VEC_LOAD GGML_F32x4_LOAD
  1728. #define GGML_F32_VEC_STORE GGML_F32x4_STORE
  1729. #define GGML_F32_VEC_FMA GGML_F32x4_FMA
  1730. #define GGML_F32_VEC_ADD GGML_F32x4_ADD
  1731. #define GGML_F32_VEC_MUL GGML_F32x4_MUL
  1732. #define GGML_F32_VEC_REDUCE GGML_F32x4_REDUCE
  1733. // F16 POWER9
  1734. #define GGML_F16_STEP GGML_F32_STEP
  1735. #define GGML_F16_EPR GGML_F32_EPR
  1736. #define GGML_F16_VEC GGML_F32x4
  1737. #define GGML_F16_VEC_ZERO GGML_F32x4_ZERO
  1738. #define GGML_F16_VEC_SET1 GGML_F32x4_SET1
  1739. #define GGML_F16_VEC_FMA GGML_F32x4_FMA
  1740. #define GGML_F16_VEC_REDUCE GGML_F32x4_REDUCE
  1741. // Use vec_xl, not vec_ld, in case the load address is not aligned.
  1742. #define GGML_F16_VEC_LOAD(p, i) (i & 0x1) ? \
  1743. vec_extract_fp32_from_shorth(vec_xl(0, p - GGML_F16_EPR)) : \
  1744. vec_extract_fp32_from_shortl(vec_xl(0, p))
  1745. #define GGML_ENDIAN_BYTE(i) ((unsigned char *)&(uint16_t){1})[i]
  1746. #define GGML_F16_VEC_STORE(p, r, i) \
  1747. if (i & 0x1) \
  1748. vec_xst(vec_pack_to_short_fp32(r[i - GGML_ENDIAN_BYTE(1)], \
  1749. r[i - GGML_ENDIAN_BYTE(0)]), \
  1750. 0, p - GGML_F16_EPR)
  1751. #elif defined(__wasm_simd128__)
  1752. #define GGML_SIMD
  1753. // F32 WASM
  1754. #define GGML_F32_STEP 16
  1755. #define GGML_F32_EPR 4
  1756. #define GGML_F32x4 v128_t
  1757. #define GGML_F32x4_ZERO wasm_f32x4_splat(0.0f)
  1758. #define GGML_F32x4_SET1(x) wasm_f32x4_splat(x)
  1759. #define GGML_F32x4_LOAD wasm_v128_load
  1760. #define GGML_F32x4_STORE wasm_v128_store
  1761. #define GGML_F32x4_FMA(a, b, c) wasm_f32x4_add(wasm_f32x4_mul(b, c), a)
  1762. #define GGML_F32x4_ADD wasm_f32x4_add
  1763. #define GGML_F32x4_MUL wasm_f32x4_mul
  1764. #define GGML_F32x4_REDUCE(res, x) \
  1765. { \
  1766. int offset = GGML_F32_ARR >> 1; \
  1767. for (int i = 0; i < offset; ++i) { \
  1768. x[i] = wasm_f32x4_add(x[i], x[offset+i]); \
  1769. } \
  1770. offset >>= 1; \
  1771. for (int i = 0; i < offset; ++i) { \
  1772. x[i] = wasm_f32x4_add(x[i], x[offset+i]); \
  1773. } \
  1774. offset >>= 1; \
  1775. for (int i = 0; i < offset; ++i) { \
  1776. x[i] = wasm_f32x4_add(x[i], x[offset+i]); \
  1777. } \
  1778. res = wasm_f32x4_extract_lane(x[0], 0) + \
  1779. wasm_f32x4_extract_lane(x[0], 1) + \
  1780. wasm_f32x4_extract_lane(x[0], 2) + \
  1781. wasm_f32x4_extract_lane(x[0], 3); \
  1782. }
  1783. #define GGML_F32_VEC GGML_F32x4
  1784. #define GGML_F32_VEC_ZERO GGML_F32x4_ZERO
  1785. #define GGML_F32_VEC_SET1 GGML_F32x4_SET1
  1786. #define GGML_F32_VEC_LOAD GGML_F32x4_LOAD
  1787. #define GGML_F32_VEC_STORE GGML_F32x4_STORE
  1788. #define GGML_F32_VEC_FMA GGML_F32x4_FMA
  1789. #define GGML_F32_VEC_ADD GGML_F32x4_ADD
  1790. #define GGML_F32_VEC_MUL GGML_F32x4_MUL
  1791. #define GGML_F32_VEC_REDUCE GGML_F32x4_REDUCE
  1792. // F16 WASM
  1793. #define GGML_F16_STEP 16
  1794. #define GGML_F16_EPR 4
  1795. inline static v128_t __wasm_f16x4_load(const ggml_fp16_t * p) {
  1796. float tmp[4];
  1797. tmp[0] = GGML_FP16_TO_FP32(p[0]);
  1798. tmp[1] = GGML_FP16_TO_FP32(p[1]);
  1799. tmp[2] = GGML_FP16_TO_FP32(p[2]);
  1800. tmp[3] = GGML_FP16_TO_FP32(p[3]);
  1801. return wasm_v128_load(tmp);
  1802. }
  1803. inline static void __wasm_f16x4_store(ggml_fp16_t * p, v128_t x) {
  1804. float tmp[4];
  1805. wasm_v128_store(tmp, x);
  1806. p[0] = GGML_FP32_TO_FP16(tmp[0]);
  1807. p[1] = GGML_FP32_TO_FP16(tmp[1]);
  1808. p[2] = GGML_FP32_TO_FP16(tmp[2]);
  1809. p[3] = GGML_FP32_TO_FP16(tmp[3]);
  1810. }
  1811. #define GGML_F16x4 v128_t
  1812. #define GGML_F16x4_ZERO wasm_f32x4_splat(0.0f)
  1813. #define GGML_F16x4_SET1(x) wasm_f32x4_splat(x)
  1814. #define GGML_F16x4_LOAD(x) __wasm_f16x4_load(x)
  1815. #define GGML_F16x4_STORE(x, y) __wasm_f16x4_store(x, y)
  1816. #define GGML_F16x4_FMA GGML_F32x4_FMA
  1817. #define GGML_F16x4_ADD wasm_f32x4_add
  1818. #define GGML_F16x4_MUL wasm_f32x4_mul
  1819. #define GGML_F16x4_REDUCE(res, x) \
  1820. { \
  1821. int offset = GGML_F16_ARR >> 1; \
  1822. for (int i = 0; i < offset; ++i) { \
  1823. x[i] = wasm_f32x4_add(x[i], x[offset+i]); \
  1824. } \
  1825. offset >>= 1; \
  1826. for (int i = 0; i < offset; ++i) { \
  1827. x[i] = wasm_f32x4_add(x[i], x[offset+i]); \
  1828. } \
  1829. offset >>= 1; \
  1830. for (int i = 0; i < offset; ++i) { \
  1831. x[i] = wasm_f32x4_add(x[i], x[offset+i]); \
  1832. } \
  1833. res = wasm_f32x4_extract_lane(x[0], 0) + \
  1834. wasm_f32x4_extract_lane(x[0], 1) + \
  1835. wasm_f32x4_extract_lane(x[0], 2) + \
  1836. wasm_f32x4_extract_lane(x[0], 3); \
  1837. }
  1838. #define GGML_F16_VEC GGML_F16x4
  1839. #define GGML_F16_VEC_ZERO GGML_F16x4_ZERO
  1840. #define GGML_F16_VEC_SET1 GGML_F16x4_SET1
  1841. #define GGML_F16_VEC_LOAD(p, i) GGML_F16x4_LOAD(p)
  1842. #define GGML_F16_VEC_STORE(p, r, i) GGML_F16x4_STORE(p, r[i])
  1843. #define GGML_F16_VEC_FMA GGML_F16x4_FMA
  1844. #define GGML_F16_VEC_ADD GGML_F16x4_ADD
  1845. #define GGML_F16_VEC_MUL GGML_F16x4_MUL
  1846. #define GGML_F16_VEC_REDUCE GGML_F16x4_REDUCE
  1847. #elif defined(__SSE3__)
  1848. #define GGML_SIMD
  1849. // F32 SSE
  1850. #define GGML_F32_STEP 32
  1851. #define GGML_F32_EPR 4
  1852. #define GGML_F32x4 __m128
  1853. #define GGML_F32x4_ZERO _mm_setzero_ps()
  1854. #define GGML_F32x4_SET1(x) _mm_set1_ps(x)
  1855. #define GGML_F32x4_LOAD _mm_loadu_ps
  1856. #define GGML_F32x4_STORE _mm_storeu_ps
  1857. #if defined(__FMA__)
  1858. // TODO: Does this work?
  1859. #define GGML_F32x4_FMA(a, b, c) _mm_fmadd_ps(b, c, a)
  1860. #else
  1861. #define GGML_F32x4_FMA(a, b, c) _mm_add_ps(_mm_mul_ps(b, c), a)
  1862. #endif
  1863. #define GGML_F32x4_ADD _mm_add_ps
  1864. #define GGML_F32x4_MUL _mm_mul_ps
  1865. #define GGML_F32x4_REDUCE(res, x) \
  1866. { \
  1867. int offset = GGML_F32_ARR >> 1; \
  1868. for (int i = 0; i < offset; ++i) { \
  1869. x[i] = _mm_add_ps(x[i], x[offset+i]); \
  1870. } \
  1871. offset >>= 1; \
  1872. for (int i = 0; i < offset; ++i) { \
  1873. x[i] = _mm_add_ps(x[i], x[offset+i]); \
  1874. } \
  1875. offset >>= 1; \
  1876. for (int i = 0; i < offset; ++i) { \
  1877. x[i] = _mm_add_ps(x[i], x[offset+i]); \
  1878. } \
  1879. const __m128 t0 = _mm_hadd_ps(x[0], x[0]); \
  1880. res = _mm_cvtss_f32(_mm_hadd_ps(t0, t0)); \
  1881. }
  1882. // TODO: is this optimal ?
  1883. #define GGML_F32_VEC GGML_F32x4
  1884. #define GGML_F32_VEC_ZERO GGML_F32x4_ZERO
  1885. #define GGML_F32_VEC_SET1 GGML_F32x4_SET1
  1886. #define GGML_F32_VEC_LOAD GGML_F32x4_LOAD
  1887. #define GGML_F32_VEC_STORE GGML_F32x4_STORE
  1888. #define GGML_F32_VEC_FMA GGML_F32x4_FMA
  1889. #define GGML_F32_VEC_ADD GGML_F32x4_ADD
  1890. #define GGML_F32_VEC_MUL GGML_F32x4_MUL
  1891. #define GGML_F32_VEC_REDUCE GGML_F32x4_REDUCE
  1892. // F16 SSE
  1893. #define GGML_F16_STEP 32
  1894. #define GGML_F16_EPR 4
  1895. static inline __m128 __sse_f16x4_load(ggml_fp16_t *x) {
  1896. float tmp[4];
  1897. tmp[0] = GGML_FP16_TO_FP32(x[0]);
  1898. tmp[1] = GGML_FP16_TO_FP32(x[1]);
  1899. tmp[2] = GGML_FP16_TO_FP32(x[2]);
  1900. tmp[3] = GGML_FP16_TO_FP32(x[3]);
  1901. return _mm_loadu_ps(tmp);
  1902. }
  1903. static inline void __sse_f16x4_store(ggml_fp16_t *x, __m128 y) {
  1904. float arr[4];
  1905. _mm_storeu_ps(arr, y);
  1906. x[0] = GGML_FP32_TO_FP16(arr[0]);
  1907. x[1] = GGML_FP32_TO_FP16(arr[1]);
  1908. x[2] = GGML_FP32_TO_FP16(arr[2]);
  1909. x[3] = GGML_FP32_TO_FP16(arr[3]);
  1910. }
  1911. #define GGML_F32Cx4 __m128
  1912. #define GGML_F32Cx4_ZERO _mm_setzero_ps()
  1913. #define GGML_F32Cx4_SET1(x) _mm_set1_ps(x)
  1914. #define GGML_F32Cx4_LOAD(x) __sse_f16x4_load(x)
  1915. #define GGML_F32Cx4_STORE(x, y) __sse_f16x4_store(x, y)
  1916. #define GGML_F32Cx4_FMA GGML_F32x4_FMA
  1917. #define GGML_F32Cx4_ADD _mm_add_ps
  1918. #define GGML_F32Cx4_MUL _mm_mul_ps
  1919. #define GGML_F32Cx4_REDUCE GGML_F32x4_REDUCE
  1920. #define GGML_F16_VEC GGML_F32Cx4
  1921. #define GGML_F16_VEC_ZERO GGML_F32Cx4_ZERO
  1922. #define GGML_F16_VEC_SET1 GGML_F32Cx4_SET1
  1923. #define GGML_F16_VEC_LOAD(p, i) GGML_F32Cx4_LOAD(p)
  1924. #define GGML_F16_VEC_STORE(p, r, i) GGML_F32Cx4_STORE(p, r[i])
  1925. #define GGML_F16_VEC_FMA GGML_F32Cx4_FMA
  1926. #define GGML_F16_VEC_ADD GGML_F32Cx4_ADD
  1927. #define GGML_F16_VEC_MUL GGML_F32Cx4_MUL
  1928. #define GGML_F16_VEC_REDUCE GGML_F32Cx4_REDUCE
  1929. #endif
  1930. // GGML_F32_ARR / GGML_F16_ARR
  1931. // number of registers to use per step
  1932. #ifdef GGML_SIMD
  1933. #define GGML_F32_ARR (GGML_F32_STEP/GGML_F32_EPR)
  1934. #define GGML_F16_ARR (GGML_F16_STEP/GGML_F16_EPR)
  1935. #endif
  1936. //
  1937. // fundamental operations
  1938. //
  1939. inline static void ggml_vec_set_i8(const int n, int8_t * x, const int8_t v) { for (int i = 0; i < n; ++i) x[i] = v; }
  1940. inline static void ggml_vec_set_i16(const int n, int16_t * x, const int16_t v) { for (int i = 0; i < n; ++i) x[i] = v; }
  1941. inline static void ggml_vec_set_i32(const int n, int32_t * x, const int32_t v) { for (int i = 0; i < n; ++i) x[i] = v; }
  1942. inline static void ggml_vec_set_f16(const int n, ggml_fp16_t * x, const int32_t v) { for (int i = 0; i < n; ++i) x[i] = v; }
  1943. inline static void ggml_vec_add_f32 (const int n, float * z, const float * x, const float * y) { for (int i = 0; i < n; ++i) z[i] = x[i] + y[i]; }
  1944. inline static void ggml_vec_add1_f32(const int n, float * z, const float * x, const float v) { for (int i = 0; i < n; ++i) z[i] = x[i] + v; }
  1945. inline static void ggml_vec_acc_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] += x[i]; }
  1946. inline static void ggml_vec_acc1_f32(const int n, float * y, const float v) { for (int i = 0; i < n; ++i) y[i] += v; }
  1947. inline static void ggml_vec_sub_f32 (const int n, float * z, const float * x, const float * y) { for (int i = 0; i < n; ++i) z[i] = x[i] - y[i]; }
  1948. inline static void ggml_vec_set_f32 (const int n, float * x, const float v) { for (int i = 0; i < n; ++i) x[i] = v; }
  1949. inline static void ggml_vec_cpy_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = x[i]; }
  1950. inline static void ggml_vec_neg_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = -x[i]; }
  1951. inline static void ggml_vec_mul_f32 (const int n, float * z, const float * x, const float * y) { for (int i = 0; i < n; ++i) z[i] = x[i]*y[i]; }
  1952. inline static void ggml_vec_div_f32 (const int n, float * z, const float * x, const float * y) { for (int i = 0; i < n; ++i) z[i] = x[i]/y[i]; }
  1953. static void ggml_vec_dot_f32(const int n, float * restrict s, const float * restrict x, const float * restrict y) {
  1954. #ifdef GGML_SIMD
  1955. float sumf = 0.0f;
  1956. const int np = (n & ~(GGML_F32_STEP - 1));
  1957. GGML_F32_VEC sum[GGML_F32_ARR] = { GGML_F32_VEC_ZERO };
  1958. GGML_F32_VEC ax[GGML_F32_ARR];
  1959. GGML_F32_VEC ay[GGML_F32_ARR];
  1960. for (int i = 0; i < np; i += GGML_F32_STEP) {
  1961. for (int j = 0; j < GGML_F32_ARR; j++) {
  1962. ax[j] = GGML_F32_VEC_LOAD(x + i + j*GGML_F32_EPR);
  1963. ay[j] = GGML_F32_VEC_LOAD(y + i + j*GGML_F32_EPR);
  1964. sum[j] = GGML_F32_VEC_FMA(sum[j], ax[j], ay[j]);
  1965. }
  1966. }
  1967. // reduce sum0..sum3 to sum0
  1968. GGML_F32_VEC_REDUCE(sumf, sum);
  1969. // leftovers
  1970. for (int i = np; i < n; ++i) {
  1971. sumf += x[i]*y[i];
  1972. }
  1973. #else
  1974. // scalar
  1975. ggml_float sumf = 0.0;
  1976. for (int i = 0; i < n; ++i) {
  1977. sumf += (ggml_float)(x[i]*y[i]);
  1978. }
  1979. #endif
  1980. *s = sumf;
  1981. }
  1982. static void ggml_vec_dot_f16(const int n, float * restrict s, ggml_fp16_t * restrict x, ggml_fp16_t * restrict y) {
  1983. ggml_float sumf = 0.0;
  1984. #if defined(GGML_SIMD)
  1985. const int np = (n & ~(GGML_F16_STEP - 1));
  1986. GGML_F16_VEC sum[GGML_F16_ARR] = { GGML_F16_VEC_ZERO };
  1987. GGML_F16_VEC ax[GGML_F16_ARR];
  1988. GGML_F16_VEC ay[GGML_F16_ARR];
  1989. for (int i = 0; i < np; i += GGML_F16_STEP) {
  1990. for (int j = 0; j < GGML_F16_ARR; j++) {
  1991. ax[j] = GGML_F16_VEC_LOAD(x + i + j*GGML_F16_EPR, j);
  1992. ay[j] = GGML_F16_VEC_LOAD(y + i + j*GGML_F16_EPR, j);
  1993. sum[j] = GGML_F16_VEC_FMA(sum[j], ax[j], ay[j]);
  1994. }
  1995. }
  1996. // reduce sum0..sum3 to sum0
  1997. GGML_F16_VEC_REDUCE(sumf, sum);
  1998. // leftovers
  1999. for (int i = np; i < n; ++i) {
  2000. sumf += (ggml_float)(GGML_FP16_TO_FP32(x[i])*GGML_FP16_TO_FP32(y[i]));
  2001. }
  2002. #else
  2003. for (int i = 0; i < n; ++i) {
  2004. sumf += (ggml_float)(GGML_FP16_TO_FP32(x[i])*GGML_FP16_TO_FP32(y[i]));
  2005. }
  2006. #endif
  2007. *s = sumf;
  2008. }
  2009. static void ggml_vec_dot_q4_0_q8_0(const int n, float * restrict s, const void * restrict vx, const void * restrict vy) {
  2010. const int qk = QK8_0;
  2011. const int nb = n / qk;
  2012. assert(n % qk == 0);
  2013. const block_q4_0 * restrict x = vx;
  2014. const block_q8_0 * restrict y = vy;
  2015. #if defined(__ARM_NEON)
  2016. float32x4_t sumv0 = vdupq_n_f32(0.0f);
  2017. float32x4_t sumv1 = vdupq_n_f32(0.0f);
  2018. GGML_ASSERT(nb % 2 == 0); // TODO: handle odd nb
  2019. for (int i = 0; i < nb; i += 2) {
  2020. const block_q4_0 * restrict x0 = &x[i + 0];
  2021. const block_q4_0 * restrict x1 = &x[i + 1];
  2022. const block_q8_0 * restrict y0 = &y[i + 0];
  2023. const block_q8_0 * restrict y1 = &y[i + 1];
  2024. const uint8x16_t m4b = vdupq_n_u8(0x0F);
  2025. const int8x16_t s8b = vdupq_n_s8(0x8);
  2026. const uint8x16_t v0_0 = vld1q_u8(x0->qs);
  2027. const uint8x16_t v0_1 = vld1q_u8(x1->qs);
  2028. // 4-bit -> 8-bit
  2029. const int8x16_t v0_0l = vreinterpretq_s8_u8(vandq_u8 (v0_0, m4b));
  2030. const int8x16_t v0_0h = vreinterpretq_s8_u8(vshrq_n_u8(v0_0, 4));
  2031. const int8x16_t v0_1l = vreinterpretq_s8_u8(vandq_u8 (v0_1, m4b));
  2032. const int8x16_t v0_1h = vreinterpretq_s8_u8(vshrq_n_u8(v0_1, 4));
  2033. // sub 8
  2034. const int8x16_t v0_0ls = vsubq_s8(v0_0l, s8b);
  2035. const int8x16_t v0_0hs = vsubq_s8(v0_0h, s8b);
  2036. const int8x16_t v0_1ls = vsubq_s8(v0_1l, s8b);
  2037. const int8x16_t v0_1hs = vsubq_s8(v0_1h, s8b);
  2038. // load y
  2039. const int8x16_t v1_0l = vld1q_s8(y0->qs);
  2040. const int8x16_t v1_0h = vld1q_s8(y0->qs + 16);
  2041. const int8x16_t v1_1l = vld1q_s8(y1->qs);
  2042. const int8x16_t v1_1h = vld1q_s8(y1->qs + 16);
  2043. #if defined(__ARM_FEATURE_DOTPROD)
  2044. // dot product into int32x4_t
  2045. const int32x4_t p_0 = vdotq_s32(vdotq_s32(vdupq_n_s32(0), v0_0ls, v1_0l), v0_0hs, v1_0h);
  2046. const int32x4_t p_1 = vdotq_s32(vdotq_s32(vdupq_n_s32(0), v0_1ls, v1_1l), v0_1hs, v1_1h);
  2047. sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(p_0), GGML_FP16_TO_FP32(x0->d)*GGML_FP16_TO_FP32(y0->d));
  2048. sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(p_1), GGML_FP16_TO_FP32(x1->d)*GGML_FP16_TO_FP32(y1->d));
  2049. #else
  2050. const int16x8_t pl0l = vmull_s8(vget_low_s8 (v0_0ls), vget_low_s8 (v1_0l));
  2051. const int16x8_t pl0h = vmull_s8(vget_high_s8(v0_0ls), vget_high_s8(v1_0l));
  2052. const int16x8_t ph0l = vmull_s8(vget_low_s8 (v0_0hs), vget_low_s8 (v1_0h));
  2053. const int16x8_t ph0h = vmull_s8(vget_high_s8(v0_0hs), vget_high_s8(v1_0h));
  2054. const int16x8_t pl1l = vmull_s8(vget_low_s8 (v0_1ls), vget_low_s8 (v1_1l));
  2055. const int16x8_t pl1h = vmull_s8(vget_high_s8(v0_1ls), vget_high_s8(v1_1l));
  2056. const int16x8_t ph1l = vmull_s8(vget_low_s8 (v0_1hs), vget_low_s8 (v1_1h));
  2057. const int16x8_t ph1h = vmull_s8(vget_high_s8(v0_1hs), vget_high_s8(v1_1h));
  2058. const int32x4_t pl0 = vaddq_s32(vpaddlq_s16(pl0l), vpaddlq_s16(pl0h));
  2059. const int32x4_t ph0 = vaddq_s32(vpaddlq_s16(ph0l), vpaddlq_s16(ph0h));
  2060. const int32x4_t pl1 = vaddq_s32(vpaddlq_s16(pl1l), vpaddlq_s16(pl1h));
  2061. const int32x4_t ph1 = vaddq_s32(vpaddlq_s16(ph1l), vpaddlq_s16(ph1h));
  2062. sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(vaddq_s32(pl0, ph0)), GGML_FP16_TO_FP32(x0->d)*GGML_FP16_TO_FP32(y0->d));
  2063. sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(vaddq_s32(pl1, ph1)), GGML_FP16_TO_FP32(x1->d)*GGML_FP16_TO_FP32(y1->d));
  2064. #endif
  2065. }
  2066. *s = vaddvq_f32(sumv0) + vaddvq_f32(sumv1);
  2067. #elif defined(__AVX2__)
  2068. // Initialize accumulator with zeros
  2069. __m256 acc = _mm256_setzero_ps();
  2070. // Main loop
  2071. for (int i = 0; i < nb; ++i) {
  2072. /* Compute combined scale for the block */
  2073. const __m256 d = _mm256_set1_ps( GGML_FP16_TO_FP32(x[i].d) * GGML_FP16_TO_FP32(y[i].d) );
  2074. __m256i bx = bytes_from_nibbles_32(x[i].qs);
  2075. // Now we have a vector with bytes in [ 0 .. 15 ] interval. Offset them into [ -8 .. +7 ] interval.
  2076. const __m256i off = _mm256_set1_epi8( 8 );
  2077. bx = _mm256_sub_epi8( bx, off );
  2078. __m256i by = _mm256_loadu_si256((const __m256i *)y[i].qs);
  2079. const __m256 q = mul_sum_i8_pairs_float(bx, by);
  2080. /* Multiply q with scale and accumulate */
  2081. acc = _mm256_fmadd_ps( d, q, acc );
  2082. }
  2083. *s = hsum_float_8(acc);
  2084. #elif defined(__AVX__)
  2085. // Initialize accumulator with zeros
  2086. __m256 acc = _mm256_setzero_ps();
  2087. // Main loop
  2088. for (int i = 0; i < nb; ++i) {
  2089. // Compute combined scale for the block
  2090. const __m256 d = _mm256_set1_ps( GGML_FP16_TO_FP32(x[i].d) * GGML_FP16_TO_FP32(y[i].d) );
  2091. const __m128i lowMask = _mm_set1_epi8(0xF);
  2092. const __m128i off = _mm_set1_epi8(8);
  2093. const __m128i tmp = _mm_loadu_si128((const __m128i *)x[i].qs);
  2094. __m128i bx = _mm_and_si128(lowMask, tmp);
  2095. __m128i by = _mm_loadu_si128((const __m128i *)y[i].qs);
  2096. bx = _mm_sub_epi8(bx, off);
  2097. const __m128i i32_0 = mul_sum_i8_pairs(bx, by);
  2098. bx = _mm_and_si128(lowMask, _mm_srli_epi64(tmp, 4));
  2099. by = _mm_loadu_si128((const __m128i *)(y[i].qs + 16));
  2100. bx = _mm_sub_epi8(bx, off);
  2101. const __m128i i32_1 = mul_sum_i8_pairs(bx, by);
  2102. // Convert int32_t to float
  2103. __m256 p = _mm256_cvtepi32_ps(MM256_SET_M128I(i32_0, i32_1));
  2104. // Apply the scale, and accumulate
  2105. acc = _mm256_add_ps(_mm256_mul_ps( d, p ), acc);
  2106. }
  2107. *s = hsum_float_8(acc);
  2108. #elif defined(__SSSE3__)
  2109. // set constants
  2110. const __m128i lowMask = _mm_set1_epi8(0xF);
  2111. const __m128i off = _mm_set1_epi8(8);
  2112. // Initialize accumulator with zeros
  2113. __m128 acc_0 = _mm_setzero_ps();
  2114. __m128 acc_1 = _mm_setzero_ps();
  2115. __m128 acc_2 = _mm_setzero_ps();
  2116. __m128 acc_3 = _mm_setzero_ps();
  2117. // First round without accumulation
  2118. {
  2119. _mm_prefetch(&x[0] + sizeof(block_q4_0), _MM_HINT_T0);
  2120. _mm_prefetch(&y[0] + sizeof(block_q8_0), _MM_HINT_T0);
  2121. // Compute combined scale for the block 0 and 1
  2122. const __m128 d_0_1 = _mm_set1_ps( GGML_FP16_TO_FP32(x[0].d) * GGML_FP16_TO_FP32(y[0].d) );
  2123. const __m128i tmp_0_1 = _mm_loadu_si128((const __m128i *)x[0].qs);
  2124. __m128i bx_0 = _mm_and_si128(lowMask, tmp_0_1);
  2125. __m128i by_0 = _mm_loadu_si128((const __m128i *)y[0].qs);
  2126. bx_0 = _mm_sub_epi8(bx_0, off);
  2127. const __m128i i32_0 = mul_sum_i8_pairs(bx_0, by_0);
  2128. __m128i bx_1 = _mm_and_si128(lowMask, _mm_srli_epi64(tmp_0_1, 4));
  2129. __m128i by_1 = _mm_loadu_si128((const __m128i *)(y[0].qs + 16));
  2130. bx_1 = _mm_sub_epi8(bx_1, off);
  2131. const __m128i i32_1 = mul_sum_i8_pairs(bx_1, by_1);
  2132. _mm_prefetch(&x[1] + sizeof(block_q4_0), _MM_HINT_T0);
  2133. _mm_prefetch(&y[1] + sizeof(block_q8_0), _MM_HINT_T0);
  2134. // Compute combined scale for the block 2 and 3
  2135. const __m128 d_2_3 = _mm_set1_ps( GGML_FP16_TO_FP32(x[1].d) * GGML_FP16_TO_FP32(y[1].d) );
  2136. const __m128i tmp_2_3 = _mm_loadu_si128((const __m128i *)x[1].qs);
  2137. __m128i bx_2 = _mm_and_si128(lowMask, tmp_2_3);
  2138. __m128i by_2 = _mm_loadu_si128((const __m128i *)y[1].qs);
  2139. bx_2 = _mm_sub_epi8(bx_2, off);
  2140. const __m128i i32_2 = mul_sum_i8_pairs(bx_2, by_2);
  2141. __m128i bx_3 = _mm_and_si128(lowMask, _mm_srli_epi64(tmp_2_3, 4));
  2142. __m128i by_3 = _mm_loadu_si128((const __m128i *)(y[1].qs + 16));
  2143. bx_3 = _mm_sub_epi8(bx_3, off);
  2144. const __m128i i32_3 = mul_sum_i8_pairs(bx_3, by_3);
  2145. // Convert int32_t to float
  2146. __m128 p0 = _mm_cvtepi32_ps(i32_0);
  2147. __m128 p1 = _mm_cvtepi32_ps(i32_1);
  2148. __m128 p2 = _mm_cvtepi32_ps(i32_2);
  2149. __m128 p3 = _mm_cvtepi32_ps(i32_3);
  2150. // Apply the scale
  2151. acc_0 = _mm_mul_ps( d_0_1, p0 );
  2152. acc_1 = _mm_mul_ps( d_0_1, p1 );
  2153. acc_2 = _mm_mul_ps( d_2_3, p2 );
  2154. acc_3 = _mm_mul_ps( d_2_3, p3 );
  2155. }
  2156. // Main loop
  2157. GGML_ASSERT(nb % 2 == 0); // TODO: handle odd nb
  2158. for (int i = 2; i < nb; i+=2) {
  2159. _mm_prefetch(&x[i] + sizeof(block_q4_0), _MM_HINT_T0);
  2160. _mm_prefetch(&y[i] + sizeof(block_q8_0), _MM_HINT_T0);
  2161. // Compute combined scale for the block 0 and 1
  2162. const __m128 d_0_1 = _mm_set1_ps( GGML_FP16_TO_FP32(x[i].d) * GGML_FP16_TO_FP32(y[i].d) );
  2163. const __m128i tmp_0_1 = _mm_loadu_si128((const __m128i *)x[i].qs);
  2164. __m128i bx_0 = _mm_and_si128(lowMask, tmp_0_1);
  2165. __m128i by_0 = _mm_loadu_si128((const __m128i *)y[i].qs);
  2166. bx_0 = _mm_sub_epi8(bx_0, off);
  2167. const __m128i i32_0 = mul_sum_i8_pairs(bx_0, by_0);
  2168. __m128i bx_1 = _mm_and_si128(lowMask, _mm_srli_epi64(tmp_0_1, 4));
  2169. __m128i by_1 = _mm_loadu_si128((const __m128i *)(y[i].qs + 16));
  2170. bx_1 = _mm_sub_epi8(bx_1, off);
  2171. const __m128i i32_1 = mul_sum_i8_pairs(bx_1, by_1);
  2172. _mm_prefetch(&x[i] + 2 * sizeof(block_q4_0), _MM_HINT_T0);
  2173. _mm_prefetch(&y[i] + 2 * sizeof(block_q8_0), _MM_HINT_T0);
  2174. // Compute combined scale for the block 2 and 3
  2175. const __m128 d_2_3 = _mm_set1_ps( GGML_FP16_TO_FP32(x[i + 1].d) * GGML_FP16_TO_FP32(y[i + 1].d) );
  2176. const __m128i tmp_2_3 = _mm_loadu_si128((const __m128i *)x[i + 1].qs);
  2177. __m128i bx_2 = _mm_and_si128(lowMask, tmp_2_3);
  2178. __m128i by_2 = _mm_loadu_si128((const __m128i *)y[i + 1].qs);
  2179. bx_2 = _mm_sub_epi8(bx_2, off);
  2180. const __m128i i32_2 = mul_sum_i8_pairs(bx_2, by_2);
  2181. __m128i bx_3 = _mm_and_si128(lowMask, _mm_srli_epi64(tmp_2_3, 4));
  2182. __m128i by_3 = _mm_loadu_si128((const __m128i *)(y[i + 1].qs + 16));
  2183. bx_3 = _mm_sub_epi8(bx_3, off);
  2184. const __m128i i32_3 = mul_sum_i8_pairs(bx_3, by_3);
  2185. // Convert int32_t to float
  2186. __m128 p0 = _mm_cvtepi32_ps(i32_0);
  2187. __m128 p1 = _mm_cvtepi32_ps(i32_1);
  2188. __m128 p2 = _mm_cvtepi32_ps(i32_2);
  2189. __m128 p3 = _mm_cvtepi32_ps(i32_3);
  2190. // Apply the scale
  2191. __m128 p0_d = _mm_mul_ps( d_0_1, p0 );
  2192. __m128 p1_d = _mm_mul_ps( d_0_1, p1 );
  2193. __m128 p2_d = _mm_mul_ps( d_2_3, p2 );
  2194. __m128 p3_d = _mm_mul_ps( d_2_3, p3 );
  2195. // Acummulate
  2196. acc_0 = _mm_add_ps(p0_d, acc_0);
  2197. acc_1 = _mm_add_ps(p1_d, acc_1);
  2198. acc_2 = _mm_add_ps(p2_d, acc_2);
  2199. acc_3 = _mm_add_ps(p3_d, acc_3);
  2200. }
  2201. *s = hsum_float_4x4(acc_0, acc_1, acc_2, acc_3);
  2202. #elif defined(__riscv_v_intrinsic)
  2203. float sumf = 0.0;
  2204. size_t vl = __riscv_vsetvl_e8m1(qk/2);
  2205. for (int i = 0; i < nb; i++) {
  2206. vuint8m1_t tx = __riscv_vle8_v_u8m1(x[i].qs, vl);
  2207. vint8m1_t y0 = __riscv_vle8_v_i8m1(y[i].qs, vl);
  2208. vint8m1_t y1 = __riscv_vle8_v_i8m1(y[i].qs+16, vl);
  2209. vuint8m1_t x_a = __riscv_vand_vx_u8m1(tx, 0x0F, vl);
  2210. vuint8m1_t x_l = __riscv_vsrl_vx_u8m1(tx, 0x04, vl);
  2211. vint8m1_t x_ai = __riscv_vreinterpret_v_u8m1_i8m1(x_a);
  2212. vint8m1_t x_li = __riscv_vreinterpret_v_u8m1_i8m1(x_l);
  2213. vint8m1_t v0 = __riscv_vsub_vx_i8m1(x_ai, 8, vl);
  2214. vint8m1_t v1 = __riscv_vsub_vx_i8m1(x_li, 8, vl);
  2215. vint16m2_t vec_mul1 = __riscv_vwmul_vv_i16m2(v0, y0, vl);
  2216. vint16m2_t vec_mul2 = __riscv_vwmul_vv_i16m2(v1, y1, vl);
  2217. vint32m1_t vec_zero = __riscv_vmv_v_x_i32m1(0, vl);
  2218. vint32m1_t vs1 = __riscv_vwredsum_vs_i16m2_i32m1(vec_mul1, vec_zero, vl);
  2219. vint32m1_t vs2 = __riscv_vwredsum_vs_i16m2_i32m1(vec_mul2, vec_zero, vl);
  2220. int sumi = __riscv_vmv_x_s_i32m1_i32(vs1);
  2221. sumi += __riscv_vmv_x_s_i32m1_i32(vs2);
  2222. sumf += sumi*GGML_FP16_TO_FP32(x[i].d)*GGML_FP16_TO_FP32(y[i].d);
  2223. }
  2224. *s = sumf;
  2225. #else
  2226. // scalar
  2227. float sumf = 0.0;
  2228. for (int i = 0; i < nb; i++) {
  2229. int sumi = 0;
  2230. for (int j = 0; j < qk/2; ++j) {
  2231. const int v0 = (x[i].qs[j] & 0x0F) - 8;
  2232. const int v1 = (x[i].qs[j] >> 4) - 8;
  2233. sumi += (v0 * y[i].qs[j]) + (v1 * y[i].qs[j + qk/2]);
  2234. }
  2235. sumf += sumi*GGML_FP16_TO_FP32(x[i].d)*GGML_FP16_TO_FP32(y[i].d);
  2236. }
  2237. *s = sumf;
  2238. #endif
  2239. }
  2240. static void ggml_vec_dot_q4_1_q8_1(const int n, float * restrict s, const void * restrict vx, const void * restrict vy) {
  2241. const int qk = QK8_1;
  2242. const int nb = n / qk;
  2243. assert(n % qk == 0);
  2244. const block_q4_1 * restrict x = vx;
  2245. const block_q8_1 * restrict y = vy;
  2246. // TODO: add WASM SIMD
  2247. #if defined(__ARM_NEON)
  2248. float32x4_t sumv0 = vdupq_n_f32(0.0f);
  2249. float32x4_t sumv1 = vdupq_n_f32(0.0f);
  2250. float summs = 0;
  2251. GGML_ASSERT(nb % 2 == 0); // TODO: handle odd nb
  2252. for (int i = 0; i < nb; i += 2) {
  2253. const block_q4_1 * restrict x0 = &x[i + 0];
  2254. const block_q4_1 * restrict x1 = &x[i + 1];
  2255. const block_q8_1 * restrict y0 = &y[i + 0];
  2256. const block_q8_1 * restrict y1 = &y[i + 1];
  2257. summs += GGML_FP16_TO_FP32(x0->m) * y0->s + GGML_FP16_TO_FP32(x1->m) * y1->s;
  2258. const uint8x16_t m4b = vdupq_n_u8(0x0F);
  2259. const uint8x16_t v0_0 = vld1q_u8(x0->qs);
  2260. const uint8x16_t v0_1 = vld1q_u8(x1->qs);
  2261. // 4-bit -> 8-bit
  2262. const int8x16_t v0_0l = vreinterpretq_s8_u8(vandq_u8 (v0_0, m4b));
  2263. const int8x16_t v0_0h = vreinterpretq_s8_u8(vshrq_n_u8(v0_0, 4));
  2264. const int8x16_t v0_1l = vreinterpretq_s8_u8(vandq_u8 (v0_1, m4b));
  2265. const int8x16_t v0_1h = vreinterpretq_s8_u8(vshrq_n_u8(v0_1, 4));
  2266. // load y
  2267. const int8x16_t v1_0l = vld1q_s8(y0->qs);
  2268. const int8x16_t v1_0h = vld1q_s8(y0->qs + 16);
  2269. const int8x16_t v1_1l = vld1q_s8(y1->qs);
  2270. const int8x16_t v1_1h = vld1q_s8(y1->qs + 16);
  2271. #if defined(__ARM_FEATURE_DOTPROD)
  2272. // dot product into int32x4_t
  2273. const int32x4_t p_0 = vdotq_s32(vdotq_s32(vdupq_n_s32(0), v0_0l, v1_0l), v0_0h, v1_0h);
  2274. const int32x4_t p_1 = vdotq_s32(vdotq_s32(vdupq_n_s32(0), v0_1l, v1_1l), v0_1h, v1_1h);
  2275. sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(p_0), GGML_FP16_TO_FP32(x0->d)*y0->d);
  2276. sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(p_1), GGML_FP16_TO_FP32(x1->d)*y1->d);
  2277. #else
  2278. const int16x8_t pl0l = vmull_s8(vget_low_s8 (v0_0l), vget_low_s8 (v1_0l));
  2279. const int16x8_t pl0h = vmull_s8(vget_high_s8(v0_0l), vget_high_s8(v1_0l));
  2280. const int16x8_t ph0l = vmull_s8(vget_low_s8 (v0_0h), vget_low_s8 (v1_0h));
  2281. const int16x8_t ph0h = vmull_s8(vget_high_s8(v0_0h), vget_high_s8(v1_0h));
  2282. const int16x8_t pl1l = vmull_s8(vget_low_s8 (v0_1l), vget_low_s8 (v1_1l));
  2283. const int16x8_t pl1h = vmull_s8(vget_high_s8(v0_1l), vget_high_s8(v1_1l));
  2284. const int16x8_t ph1l = vmull_s8(vget_low_s8 (v0_1h), vget_low_s8 (v1_1h));
  2285. const int16x8_t ph1h = vmull_s8(vget_high_s8(v0_1h), vget_high_s8(v1_1h));
  2286. const int32x4_t pl0 = vaddq_s32(vpaddlq_s16(pl0l), vpaddlq_s16(pl0h));
  2287. const int32x4_t ph0 = vaddq_s32(vpaddlq_s16(ph0l), vpaddlq_s16(ph0h));
  2288. const int32x4_t pl1 = vaddq_s32(vpaddlq_s16(pl1l), vpaddlq_s16(pl1h));
  2289. const int32x4_t ph1 = vaddq_s32(vpaddlq_s16(ph1l), vpaddlq_s16(ph1h));
  2290. sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(vaddq_s32(pl0, ph0)), GGML_FP16_TO_FP32(x0->d)*y0->d);
  2291. sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(vaddq_s32(pl1, ph1)), GGML_FP16_TO_FP32(x1->d)*y1->d);
  2292. #endif
  2293. }
  2294. *s = vaddvq_f32(sumv0) + vaddvq_f32(sumv1) + summs;
  2295. #elif defined(__AVX2__) || defined(__AVX__)
  2296. // Initialize accumulator with zeros
  2297. __m256 acc = _mm256_setzero_ps();
  2298. float summs = 0;
  2299. // Main loop
  2300. for (int i = 0; i < nb; ++i) {
  2301. const float d0 = GGML_FP16_TO_FP32(x[i].d);
  2302. const float d1 = y[i].d;
  2303. summs += GGML_FP16_TO_FP32(x[i].m) * y[i].s;
  2304. const __m256 d0v = _mm256_set1_ps( d0 );
  2305. const __m256 d1v = _mm256_set1_ps( d1 );
  2306. // Compute combined scales
  2307. const __m256 d0d1 = _mm256_mul_ps( d0v, d1v );
  2308. // Load 16 bytes, and unpack 4 bit fields into bytes, making 32 bytes
  2309. const __m256i bx = bytes_from_nibbles_32(x[i].qs);
  2310. const __m256i by = _mm256_loadu_si256( (const __m256i *)y[i].qs );
  2311. const __m256 xy = mul_sum_us8_pairs_float(bx, by);
  2312. // Accumulate d0*d1*x*y
  2313. #if defined(__AVX2__)
  2314. acc = _mm256_fmadd_ps( d0d1, xy, acc );
  2315. #else
  2316. acc = _mm256_add_ps( _mm256_mul_ps( d0d1, xy ), acc );
  2317. #endif
  2318. }
  2319. *s = hsum_float_8(acc) + summs;
  2320. #elif defined(__riscv_v_intrinsic)
  2321. float sumf = 0.0;
  2322. size_t vl = __riscv_vsetvl_e8m1(qk/2);
  2323. for (int i = 0; i < nb; i++) {
  2324. vuint8m1_t tx = __riscv_vle8_v_u8m1(x[i].qs, vl);
  2325. vint8m1_t y0 = __riscv_vle8_v_i8m1(y[i].qs, vl);
  2326. vint8m1_t y1 = __riscv_vle8_v_i8m1(y[i].qs+16, vl);
  2327. vuint8m1_t x_a = __riscv_vand_vx_u8m1(tx, 0x0F, vl);
  2328. vuint8m1_t x_l = __riscv_vsrl_vx_u8m1(tx, 0x04, vl);
  2329. vint8m1_t v0 = __riscv_vreinterpret_v_u8m1_i8m1(x_a);
  2330. vint8m1_t v1 = __riscv_vreinterpret_v_u8m1_i8m1(x_l);
  2331. vint16m2_t vec_mul1 = __riscv_vwmul_vv_i16m2(v0, y0, vl);
  2332. vint16m2_t vec_mul2 = __riscv_vwmul_vv_i16m2(v1, y1, vl);
  2333. vint32m1_t vec_zero = __riscv_vmv_v_x_i32m1(0, vl);
  2334. vint32m1_t vs1 = __riscv_vwredsum_vs_i16m2_i32m1(vec_mul1, vec_zero, vl);
  2335. vint32m1_t vs2 = __riscv_vwredsum_vs_i16m2_i32m1(vec_mul2, vec_zero, vl);
  2336. int sumi = __riscv_vmv_x_s_i32m1_i32(vs1);
  2337. sumi += __riscv_vmv_x_s_i32m1_i32(vs2);
  2338. sumf += (GGML_FP16_TO_FP32(x[i].d)*y[i].d)*sumi + GGML_FP16_TO_FP32(x[i].m)*y[i].s;
  2339. }
  2340. *s = sumf;
  2341. #else
  2342. // scalar
  2343. float sumf = 0.0;
  2344. for (int i = 0; i < nb; i++) {
  2345. int sumi = 0;
  2346. for (int j = 0; j < qk/2; ++j) {
  2347. const int v0 = (x[i].qs[j] & 0x0F);
  2348. const int v1 = (x[i].qs[j] >> 4);
  2349. sumi += (v0 * y[i].qs[j]) + (v1 * y[i].qs[j + qk/2]);
  2350. }
  2351. sumf += (GGML_FP16_TO_FP32(x[i].d)*y[i].d)*sumi + GGML_FP16_TO_FP32(x[i].m)*y[i].s;
  2352. }
  2353. *s = sumf;
  2354. #endif
  2355. }
  2356. static void ggml_vec_dot_q5_0_q8_0(const int n, float * restrict s, const void * restrict vx, const void * restrict vy) {
  2357. const int qk = QK8_0;
  2358. const int nb = n / qk;
  2359. assert(n % qk == 0);
  2360. assert(qk == QK5_0);
  2361. const block_q5_0 * restrict x = vx;
  2362. const block_q8_0 * restrict y = vy;
  2363. #if defined(__ARM_NEON)
  2364. float32x4_t sumv0 = vdupq_n_f32(0.0f);
  2365. float32x4_t sumv1 = vdupq_n_f32(0.0f);
  2366. uint32_t qh0;
  2367. uint32_t qh1;
  2368. uint64_t tmp0[4];
  2369. uint64_t tmp1[4];
  2370. GGML_ASSERT(nb % 2 == 0); // TODO: handle odd nb
  2371. for (int i = 0; i < nb; i += 2) {
  2372. const block_q5_0 * restrict x0 = &x[i];
  2373. const block_q5_0 * restrict x1 = &x[i + 1];
  2374. const block_q8_0 * restrict y0 = &y[i];
  2375. const block_q8_0 * restrict y1 = &y[i + 1];
  2376. const uint8x16_t m4b = vdupq_n_u8(0x0F);
  2377. // extract the 5th bit via lookup table ((!b) << 4)
  2378. memcpy(&qh0, x0->qh, sizeof(qh0));
  2379. memcpy(&qh1, x1->qh, sizeof(qh1));
  2380. tmp0[0] = table_b2b_1[(qh0 >> 0) & 0xFF];
  2381. tmp0[1] = table_b2b_1[(qh0 >> 8) & 0xFF];
  2382. tmp0[2] = table_b2b_1[(qh0 >> 16) & 0xFF];
  2383. tmp0[3] = table_b2b_1[(qh0 >> 24) ];
  2384. tmp1[0] = table_b2b_1[(qh1 >> 0) & 0xFF];
  2385. tmp1[1] = table_b2b_1[(qh1 >> 8) & 0xFF];
  2386. tmp1[2] = table_b2b_1[(qh1 >> 16) & 0xFF];
  2387. tmp1[3] = table_b2b_1[(qh1 >> 24) ];
  2388. const int8x16_t qhl0 = vld1q_s8((const int8_t *)(tmp0 + 0));
  2389. const int8x16_t qhh0 = vld1q_s8((const int8_t *)(tmp0 + 2));
  2390. const int8x16_t qhl1 = vld1q_s8((const int8_t *)(tmp1 + 0));
  2391. const int8x16_t qhh1 = vld1q_s8((const int8_t *)(tmp1 + 2));
  2392. const uint8x16_t v0_0 = vld1q_u8(x0->qs);
  2393. const uint8x16_t v0_1 = vld1q_u8(x1->qs);
  2394. // 4-bit -> 8-bit
  2395. int8x16_t v0_0l = vreinterpretq_s8_u8(vandq_u8 (v0_0, m4b));
  2396. int8x16_t v0_0h = vreinterpretq_s8_u8(vshrq_n_u8(v0_0, 4));
  2397. int8x16_t v0_1l = vreinterpretq_s8_u8(vandq_u8 (v0_1, m4b));
  2398. int8x16_t v0_1h = vreinterpretq_s8_u8(vshrq_n_u8(v0_1, 4));
  2399. // add high bit and sub 16 (equivalent to sub 0x10 when bit is zero)
  2400. const int8x16_t v0_0lf = vsubq_s8(v0_0l, qhl0);
  2401. const int8x16_t v0_0hf = vsubq_s8(v0_0h, qhh0);
  2402. const int8x16_t v0_1lf = vsubq_s8(v0_1l, qhl1);
  2403. const int8x16_t v0_1hf = vsubq_s8(v0_1h, qhh1);
  2404. // load y
  2405. const int8x16_t v1_0l = vld1q_s8(y0->qs);
  2406. const int8x16_t v1_0h = vld1q_s8(y0->qs + 16);
  2407. const int8x16_t v1_1l = vld1q_s8(y1->qs);
  2408. const int8x16_t v1_1h = vld1q_s8(y1->qs + 16);
  2409. #if defined(__ARM_FEATURE_DOTPROD)
  2410. sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(vaddq_s32(
  2411. vdotq_s32(vdupq_n_s32(0), v0_0lf, v1_0l),
  2412. vdotq_s32(vdupq_n_s32(0), v0_0hf, v1_0h))), GGML_FP16_TO_FP32(x0->d)*GGML_FP16_TO_FP32(y0->d));
  2413. sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(vaddq_s32(
  2414. vdotq_s32(vdupq_n_s32(0), v0_1lf, v1_1l),
  2415. vdotq_s32(vdupq_n_s32(0), v0_1hf, v1_1h))), GGML_FP16_TO_FP32(x1->d)*GGML_FP16_TO_FP32(y1->d));
  2416. #else
  2417. const int16x8_t pl0l = vmull_s8(vget_low_s8 (v0_0lf), vget_low_s8 (v1_0l));
  2418. const int16x8_t pl0h = vmull_s8(vget_high_s8(v0_0lf), vget_high_s8(v1_0l));
  2419. const int16x8_t ph0l = vmull_s8(vget_low_s8 (v0_0hf), vget_low_s8 (v1_0h));
  2420. const int16x8_t ph0h = vmull_s8(vget_high_s8(v0_0hf), vget_high_s8(v1_0h));
  2421. const int16x8_t pl1l = vmull_s8(vget_low_s8 (v0_1lf), vget_low_s8 (v1_1l));
  2422. const int16x8_t pl1h = vmull_s8(vget_high_s8(v0_1lf), vget_high_s8(v1_1l));
  2423. const int16x8_t ph1l = vmull_s8(vget_low_s8 (v0_1hf), vget_low_s8 (v1_1h));
  2424. const int16x8_t ph1h = vmull_s8(vget_high_s8(v0_1hf), vget_high_s8(v1_1h));
  2425. const int32x4_t pl0 = vaddq_s32(vpaddlq_s16(pl0l), vpaddlq_s16(pl0h));
  2426. const int32x4_t ph0 = vaddq_s32(vpaddlq_s16(ph0l), vpaddlq_s16(ph0h));
  2427. const int32x4_t pl1 = vaddq_s32(vpaddlq_s16(pl1l), vpaddlq_s16(pl1h));
  2428. const int32x4_t ph1 = vaddq_s32(vpaddlq_s16(ph1l), vpaddlq_s16(ph1h));
  2429. sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(vaddq_s32(pl0, ph0)), GGML_FP16_TO_FP32(x0->d)*GGML_FP16_TO_FP32(y0->d));
  2430. sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(vaddq_s32(pl1, ph1)), GGML_FP16_TO_FP32(x1->d)*GGML_FP16_TO_FP32(y1->d));
  2431. #endif
  2432. }
  2433. *s = vaddvq_f32(sumv0) + vaddvq_f32(sumv1);
  2434. #elif defined(__wasm_simd128__)
  2435. v128_t sumv = wasm_f32x4_splat(0.0f);
  2436. uint32_t qh;
  2437. uint64_t tmp[4];
  2438. // TODO: check if unrolling this is better
  2439. for (int i = 0; i < nb; ++i) {
  2440. const block_q5_0 * restrict x0 = &x[i];
  2441. const block_q8_0 * restrict y0 = &y[i];
  2442. const v128_t m4b = wasm_i8x16_splat(0x0F);
  2443. // extract the 5th bit
  2444. memcpy(&qh, x0->qh, sizeof(qh));
  2445. tmp[0] = table_b2b_1[(qh >> 0) & 0xFF];
  2446. tmp[1] = table_b2b_1[(qh >> 8) & 0xFF];
  2447. tmp[2] = table_b2b_1[(qh >> 16) & 0xFF];
  2448. tmp[3] = table_b2b_1[(qh >> 24) ];
  2449. const v128_t qhl = wasm_v128_load(tmp + 0);
  2450. const v128_t qhh = wasm_v128_load(tmp + 2);
  2451. const v128_t v0 = wasm_v128_load(x0->qs);
  2452. // 4-bit -> 8-bit
  2453. const v128_t v0l = wasm_v128_and (v0, m4b);
  2454. const v128_t v0h = wasm_u8x16_shr(v0, 4);
  2455. // add high bit and sub 16 (equivalent to sub 0x10 when bit is zero)
  2456. const v128_t v0lf = wasm_i8x16_sub(v0l, qhl);
  2457. const v128_t v0hf = wasm_i8x16_sub(v0h, qhh);
  2458. // load y
  2459. const v128_t v1l = wasm_v128_load(y0->qs);
  2460. const v128_t v1h = wasm_v128_load(y0->qs + 16);
  2461. // int8x16 -> int16x8
  2462. const v128_t v0lfl = wasm_i16x8_extend_low_i8x16 (v0lf);
  2463. const v128_t v0lfh = wasm_i16x8_extend_high_i8x16(v0lf);
  2464. const v128_t v0hfl = wasm_i16x8_extend_low_i8x16 (v0hf);
  2465. const v128_t v0hfh = wasm_i16x8_extend_high_i8x16(v0hf);
  2466. const v128_t v1ll = wasm_i16x8_extend_low_i8x16 (v1l);
  2467. const v128_t v1lh = wasm_i16x8_extend_high_i8x16(v1l);
  2468. const v128_t v1hl = wasm_i16x8_extend_low_i8x16 (v1h);
  2469. const v128_t v1hh = wasm_i16x8_extend_high_i8x16(v1h);
  2470. // dot product
  2471. sumv = wasm_f32x4_add(sumv, wasm_f32x4_mul(wasm_f32x4_convert_i32x4(
  2472. wasm_i32x4_add(
  2473. wasm_i32x4_add(wasm_i32x4_dot_i16x8(v0lfl, v1ll),
  2474. wasm_i32x4_dot_i16x8(v0lfh, v1lh)),
  2475. wasm_i32x4_add(wasm_i32x4_dot_i16x8(v0hfl, v1hl),
  2476. wasm_i32x4_dot_i16x8(v0hfh, v1hh)))),
  2477. wasm_f32x4_splat(GGML_FP16_TO_FP32(x0->d) * GGML_FP16_TO_FP32(y0->d))));
  2478. }
  2479. *s = wasm_f32x4_extract_lane(sumv, 0) + wasm_f32x4_extract_lane(sumv, 1) +
  2480. wasm_f32x4_extract_lane(sumv, 2) + wasm_f32x4_extract_lane(sumv, 3);
  2481. #elif defined(__AVX2__)
  2482. // Initialize accumulator with zeros
  2483. __m256 acc = _mm256_setzero_ps();
  2484. // Main loop
  2485. for (int i = 0; i < nb; i++) {
  2486. /* Compute combined scale for the block */
  2487. const __m256 d = _mm256_set1_ps(GGML_FP16_TO_FP32(x[i].d) * GGML_FP16_TO_FP32(y[i].d));
  2488. __m256i bx = bytes_from_nibbles_32(x[i].qs);
  2489. __m256i bxhi = bytes_from_bits_32(x[i].qh);
  2490. bxhi = _mm256_andnot_si256(bxhi, _mm256_set1_epi8((char)0xF0));
  2491. bx = _mm256_or_si256(bx, bxhi);
  2492. __m256i by = _mm256_loadu_si256((const __m256i *)y[i].qs);
  2493. const __m256 q = mul_sum_i8_pairs_float(bx, by);
  2494. /* Multiply q with scale and accumulate */
  2495. acc = _mm256_fmadd_ps(d, q, acc);
  2496. }
  2497. *s = hsum_float_8(acc);
  2498. #elif defined(__AVX__)
  2499. // Initialize accumulator with zeros
  2500. __m256 acc = _mm256_setzero_ps();
  2501. __m128i mask = _mm_set1_epi8((char)0xF0);
  2502. // Main loop
  2503. for (int i = 0; i < nb; i++) {
  2504. /* Compute combined scale for the block */
  2505. const __m256 d = _mm256_set1_ps(GGML_FP16_TO_FP32(x[i].d) * GGML_FP16_TO_FP32(y[i].d));
  2506. __m256i bx = bytes_from_nibbles_32(x[i].qs);
  2507. const __m256i bxhi = bytes_from_bits_32(x[i].qh);
  2508. __m128i bxhil = _mm256_castsi256_si128(bxhi);
  2509. __m128i bxhih = _mm256_extractf128_si256(bxhi, 1);
  2510. bxhil = _mm_andnot_si128(bxhil, mask);
  2511. bxhih = _mm_andnot_si128(bxhih, mask);
  2512. __m128i bxl = _mm256_castsi256_si128(bx);
  2513. __m128i bxh = _mm256_extractf128_si256(bx, 1);
  2514. bxl = _mm_or_si128(bxl, bxhil);
  2515. bxh = _mm_or_si128(bxh, bxhih);
  2516. bx = MM256_SET_M128I(bxh, bxl);
  2517. const __m256i by = _mm256_loadu_si256((const __m256i *)y[i].qs);
  2518. const __m256 q = mul_sum_i8_pairs_float(bx, by);
  2519. /* Multiply q with scale and accumulate */
  2520. acc = _mm256_add_ps(_mm256_mul_ps(d, q), acc);
  2521. }
  2522. *s = hsum_float_8(acc);
  2523. #elif defined(__riscv_v_intrinsic)
  2524. float sumf = 0.0;
  2525. uint32_t qh;
  2526. // These temp values are for masking and shift operations
  2527. uint32_t temp_1[16] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15};
  2528. uint32_t temp_2[16] = {0x1, 0x2, 0x4, 0x8, 0x10, 0x20, 0x40, 0x80,
  2529. 0x100, 0x200, 0x400, 0x800, 0x1000, 0x2000, 0x4000, 0x8000};
  2530. size_t vl = __riscv_vsetvl_e8m1(qk/2);
  2531. for (int i = 0; i < nb; i++) {
  2532. memcpy(&qh, x[i].qh, sizeof(uint32_t));
  2533. // temporary registers
  2534. vuint32m4_t vt_1 = __riscv_vle32_v_u32m4(temp_2, vl);
  2535. vuint32m4_t vt_2 = __riscv_vle32_v_u32m4(temp_1, vl);
  2536. vuint32m4_t vt_3 = __riscv_vsll_vx_u32m4(vt_1, 16, vl);
  2537. vuint32m4_t vt_4 = __riscv_vadd_vx_u32m4(vt_2, 12, vl);
  2538. // ((qh & (1u << (j + 0 ))) >> (j + 0 )) << 4;
  2539. vuint32m4_t xha_0 = __riscv_vand_vx_u32m4(vt_1, qh, vl);
  2540. vuint32m4_t xhr_0 = __riscv_vsrl_vv_u32m4(xha_0, vt_2, vl);
  2541. vuint32m4_t xhl_0 = __riscv_vsll_vx_u32m4(xhr_0, 4, vl);
  2542. // ((qh & (1u << (j + 16))) >> (j + 12));
  2543. vuint32m4_t xha_1 = __riscv_vand_vx_u32m4(vt_3, qh, vl);
  2544. vuint32m4_t xhl_1 = __riscv_vsrl_vv_u32m4(xha_1, vt_4, vl);
  2545. // narrowing
  2546. vuint16m2_t xhc_0 = __riscv_vncvt_x_x_w_u16m2(xhl_0, vl);
  2547. vuint8m1_t xh_0 = __riscv_vncvt_x_x_w_u8m1(xhc_0, vl);
  2548. vuint16m2_t xhc_1 = __riscv_vncvt_x_x_w_u16m2(xhl_1, vl);
  2549. vuint8m1_t xh_1 = __riscv_vncvt_x_x_w_u8m1(xhc_1, vl);
  2550. // load
  2551. vuint8m1_t tx = __riscv_vle8_v_u8m1(x[i].qs, vl);
  2552. vint8m1_t y0 = __riscv_vle8_v_i8m1(y[i].qs, vl);
  2553. vint8m1_t y1 = __riscv_vle8_v_i8m1(y[i].qs+16, vl);
  2554. vuint8m1_t x_at = __riscv_vand_vx_u8m1(tx, 0x0F, vl);
  2555. vuint8m1_t x_lt = __riscv_vsrl_vx_u8m1(tx, 0x04, vl);
  2556. vuint8m1_t x_a = __riscv_vor_vv_u8m1(x_at, xh_0, vl);
  2557. vuint8m1_t x_l = __riscv_vor_vv_u8m1(x_lt, xh_1, vl);
  2558. vint8m1_t x_ai = __riscv_vreinterpret_v_u8m1_i8m1(x_a);
  2559. vint8m1_t x_li = __riscv_vreinterpret_v_u8m1_i8m1(x_l);
  2560. vint8m1_t v0 = __riscv_vsub_vx_i8m1(x_ai, 16, vl);
  2561. vint8m1_t v1 = __riscv_vsub_vx_i8m1(x_li, 16, vl);
  2562. vint16m2_t vec_mul1 = __riscv_vwmul_vv_i16m2(v0, y0, vl);
  2563. vint16m2_t vec_mul2 = __riscv_vwmul_vv_i16m2(v1, y1, vl);
  2564. vint32m1_t vec_zero = __riscv_vmv_v_x_i32m1(0, vl);
  2565. vint32m1_t vs1 = __riscv_vwredsum_vs_i16m2_i32m1(vec_mul1, vec_zero, vl);
  2566. vint32m1_t vs2 = __riscv_vwredsum_vs_i16m2_i32m1(vec_mul2, vec_zero, vl);
  2567. int sumi = __riscv_vmv_x_s_i32m1_i32(vs1);
  2568. sumi += __riscv_vmv_x_s_i32m1_i32(vs2);
  2569. sumf += (GGML_FP16_TO_FP32(x[i].d)*GGML_FP16_TO_FP32(y[i].d)) * sumi;
  2570. }
  2571. *s = sumf;
  2572. #else
  2573. // scalar
  2574. float sumf = 0.0;
  2575. for (int i = 0; i < nb; i++) {
  2576. uint32_t qh;
  2577. memcpy(&qh, x[i].qh, sizeof(qh));
  2578. int sumi = 0;
  2579. for (int j = 0; j < qk/2; ++j) {
  2580. const uint8_t xh_0 = ((qh & (1u << (j + 0 ))) >> (j + 0 )) << 4;
  2581. const uint8_t xh_1 = ((qh & (1u << (j + 16))) >> (j + 12));
  2582. const int32_t x0 = ((x[i].qs[j] & 0x0F) | xh_0) - 16;
  2583. const int32_t x1 = ((x[i].qs[j] >> 4) | xh_1) - 16;
  2584. sumi += (x0 * y[i].qs[j]) + (x1 * y[i].qs[j + qk/2]);
  2585. }
  2586. sumf += (GGML_FP16_TO_FP32(x[i].d)*GGML_FP16_TO_FP32(y[i].d)) * sumi;
  2587. }
  2588. *s = sumf;
  2589. #endif
  2590. }
  2591. static void ggml_vec_dot_q5_1_q8_1(const int n, float * restrict s, const void * restrict vx, const void * restrict vy) {
  2592. const int qk = QK8_1;
  2593. const int nb = n / qk;
  2594. assert(n % qk == 0);
  2595. assert(qk == QK5_1);
  2596. const block_q5_1 * restrict x = vx;
  2597. const block_q8_1 * restrict y = vy;
  2598. #if defined(__ARM_NEON)
  2599. float32x4_t sumv0 = vdupq_n_f32(0.0f);
  2600. float32x4_t sumv1 = vdupq_n_f32(0.0f);
  2601. float summs0 = 0.0f;
  2602. float summs1 = 0.0f;
  2603. uint32_t qh0;
  2604. uint32_t qh1;
  2605. uint64_t tmp0[4];
  2606. uint64_t tmp1[4];
  2607. GGML_ASSERT(nb % 2 == 0); // TODO: handle odd nb
  2608. for (int i = 0; i < nb; i += 2) {
  2609. const block_q5_1 * restrict x0 = &x[i];
  2610. const block_q5_1 * restrict x1 = &x[i + 1];
  2611. const block_q8_1 * restrict y0 = &y[i];
  2612. const block_q8_1 * restrict y1 = &y[i + 1];
  2613. const uint8x16_t m4b = vdupq_n_u8(0x0F);
  2614. summs0 += GGML_FP16_TO_FP32(x0->m) * y0->s;
  2615. summs1 += GGML_FP16_TO_FP32(x1->m) * y1->s;
  2616. // extract the 5th bit via lookup table ((b) << 4)
  2617. memcpy(&qh0, x0->qh, sizeof(qh0));
  2618. memcpy(&qh1, x1->qh, sizeof(qh1));
  2619. tmp0[0] = table_b2b_0[(qh0 >> 0) & 0xFF];
  2620. tmp0[1] = table_b2b_0[(qh0 >> 8) & 0xFF];
  2621. tmp0[2] = table_b2b_0[(qh0 >> 16) & 0xFF];
  2622. tmp0[3] = table_b2b_0[(qh0 >> 24) ];
  2623. tmp1[0] = table_b2b_0[(qh1 >> 0) & 0xFF];
  2624. tmp1[1] = table_b2b_0[(qh1 >> 8) & 0xFF];
  2625. tmp1[2] = table_b2b_0[(qh1 >> 16) & 0xFF];
  2626. tmp1[3] = table_b2b_0[(qh1 >> 24) ];
  2627. const int8x16_t qhl0 = vld1q_s8((const int8_t *)(tmp0 + 0));
  2628. const int8x16_t qhh0 = vld1q_s8((const int8_t *)(tmp0 + 2));
  2629. const int8x16_t qhl1 = vld1q_s8((const int8_t *)(tmp1 + 0));
  2630. const int8x16_t qhh1 = vld1q_s8((const int8_t *)(tmp1 + 2));
  2631. const uint8x16_t v0_0 = vld1q_u8(x0->qs);
  2632. const uint8x16_t v0_1 = vld1q_u8(x1->qs);
  2633. // 4-bit -> 8-bit
  2634. const int8x16_t v0_0l = vreinterpretq_s8_u8(vandq_u8 (v0_0, m4b));
  2635. const int8x16_t v0_0h = vreinterpretq_s8_u8(vshrq_n_u8(v0_0, 4));
  2636. const int8x16_t v0_1l = vreinterpretq_s8_u8(vandq_u8 (v0_1, m4b));
  2637. const int8x16_t v0_1h = vreinterpretq_s8_u8(vshrq_n_u8(v0_1, 4));
  2638. // add high bit
  2639. const int8x16_t v0_0lf = vorrq_s8(v0_0l, qhl0);
  2640. const int8x16_t v0_0hf = vorrq_s8(v0_0h, qhh0);
  2641. const int8x16_t v0_1lf = vorrq_s8(v0_1l, qhl1);
  2642. const int8x16_t v0_1hf = vorrq_s8(v0_1h, qhh1);
  2643. // load y
  2644. const int8x16_t v1_0l = vld1q_s8(y0->qs);
  2645. const int8x16_t v1_0h = vld1q_s8(y0->qs + 16);
  2646. const int8x16_t v1_1l = vld1q_s8(y1->qs);
  2647. const int8x16_t v1_1h = vld1q_s8(y1->qs + 16);
  2648. #if defined(__ARM_FEATURE_DOTPROD)
  2649. sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(vaddq_s32(
  2650. vdotq_s32(vdupq_n_s32(0), v0_0lf, v1_0l),
  2651. vdotq_s32(vdupq_n_s32(0), v0_0hf, v1_0h))), GGML_FP16_TO_FP32(x0->d)*y0->d);
  2652. sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(vaddq_s32(
  2653. vdotq_s32(vdupq_n_s32(0), v0_1lf, v1_1l),
  2654. vdotq_s32(vdupq_n_s32(0), v0_1hf, v1_1h))), GGML_FP16_TO_FP32(x1->d)*y1->d);
  2655. #else
  2656. const int16x8_t pl0l = vmull_s8(vget_low_s8 (v0_0lf), vget_low_s8 (v1_0l));
  2657. const int16x8_t pl0h = vmull_s8(vget_high_s8(v0_0lf), vget_high_s8(v1_0l));
  2658. const int16x8_t ph0l = vmull_s8(vget_low_s8 (v0_0hf), vget_low_s8 (v1_0h));
  2659. const int16x8_t ph0h = vmull_s8(vget_high_s8(v0_0hf), vget_high_s8(v1_0h));
  2660. const int16x8_t pl1l = vmull_s8(vget_low_s8 (v0_1lf), vget_low_s8 (v1_1l));
  2661. const int16x8_t pl1h = vmull_s8(vget_high_s8(v0_1lf), vget_high_s8(v1_1l));
  2662. const int16x8_t ph1l = vmull_s8(vget_low_s8 (v0_1hf), vget_low_s8 (v1_1h));
  2663. const int16x8_t ph1h = vmull_s8(vget_high_s8(v0_1hf), vget_high_s8(v1_1h));
  2664. const int32x4_t pl0 = vaddq_s32(vpaddlq_s16(pl0l), vpaddlq_s16(pl0h));
  2665. const int32x4_t ph0 = vaddq_s32(vpaddlq_s16(ph0l), vpaddlq_s16(ph0h));
  2666. const int32x4_t pl1 = vaddq_s32(vpaddlq_s16(pl1l), vpaddlq_s16(pl1h));
  2667. const int32x4_t ph1 = vaddq_s32(vpaddlq_s16(ph1l), vpaddlq_s16(ph1h));
  2668. sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(vaddq_s32(pl0, ph0)), GGML_FP16_TO_FP32(x0->d)*y0->d);
  2669. sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(vaddq_s32(pl1, ph1)), GGML_FP16_TO_FP32(x1->d)*y1->d);
  2670. #endif
  2671. }
  2672. *s = vaddvq_f32(sumv0) + vaddvq_f32(sumv1) + summs0 + summs1;
  2673. #elif defined(__wasm_simd128__)
  2674. v128_t sumv = wasm_f32x4_splat(0.0f);
  2675. float summs = 0.0f;
  2676. uint32_t qh;
  2677. uint64_t tmp[4];
  2678. // TODO: check if unrolling this is better
  2679. for (int i = 0; i < nb; ++i) {
  2680. const block_q5_1 * restrict x0 = &x[i];
  2681. const block_q8_1 * restrict y0 = &y[i];
  2682. summs += GGML_FP16_TO_FP32(x0->m) * y0->s;
  2683. const v128_t m4b = wasm_i8x16_splat(0x0F);
  2684. // extract the 5th bit
  2685. memcpy(&qh, x0->qh, sizeof(qh));
  2686. tmp[0] = table_b2b_0[(qh >> 0) & 0xFF];
  2687. tmp[1] = table_b2b_0[(qh >> 8) & 0xFF];
  2688. tmp[2] = table_b2b_0[(qh >> 16) & 0xFF];
  2689. tmp[3] = table_b2b_0[(qh >> 24) ];
  2690. const v128_t qhl = wasm_v128_load(tmp + 0);
  2691. const v128_t qhh = wasm_v128_load(tmp + 2);
  2692. const v128_t v0 = wasm_v128_load(x0->qs);
  2693. // 4-bit -> 8-bit
  2694. const v128_t v0l = wasm_v128_and (v0, m4b);
  2695. const v128_t v0h = wasm_u8x16_shr(v0, 4);
  2696. // add high bit
  2697. const v128_t v0lf = wasm_v128_or(v0l, qhl);
  2698. const v128_t v0hf = wasm_v128_or(v0h, qhh);
  2699. // load y
  2700. const v128_t v1l = wasm_v128_load(y0->qs);
  2701. const v128_t v1h = wasm_v128_load(y0->qs + 16);
  2702. // int8x16 -> int16x8
  2703. const v128_t v0lfl = wasm_i16x8_extend_low_i8x16 (v0lf);
  2704. const v128_t v0lfh = wasm_i16x8_extend_high_i8x16(v0lf);
  2705. const v128_t v0hfl = wasm_i16x8_extend_low_i8x16 (v0hf);
  2706. const v128_t v0hfh = wasm_i16x8_extend_high_i8x16(v0hf);
  2707. const v128_t v1ll = wasm_i16x8_extend_low_i8x16 (v1l);
  2708. const v128_t v1lh = wasm_i16x8_extend_high_i8x16(v1l);
  2709. const v128_t v1hl = wasm_i16x8_extend_low_i8x16 (v1h);
  2710. const v128_t v1hh = wasm_i16x8_extend_high_i8x16(v1h);
  2711. // dot product
  2712. sumv = wasm_f32x4_add(sumv,
  2713. wasm_f32x4_mul(wasm_f32x4_convert_i32x4(wasm_i32x4_add(
  2714. wasm_i32x4_add(wasm_i32x4_dot_i16x8(v0lfl, v1ll),
  2715. wasm_i32x4_dot_i16x8(v0lfh, v1lh)),
  2716. wasm_i32x4_add(wasm_i32x4_dot_i16x8(v0hfl, v1hl),
  2717. wasm_i32x4_dot_i16x8(v0hfh, v1hh)))),
  2718. wasm_f32x4_splat(GGML_FP16_TO_FP32(x0->d) * y0->d)));
  2719. }
  2720. *s = wasm_f32x4_extract_lane(sumv, 0) + wasm_f32x4_extract_lane(sumv, 1) +
  2721. wasm_f32x4_extract_lane(sumv, 2) + wasm_f32x4_extract_lane(sumv, 3) + summs;
  2722. #elif defined(__AVX2__)
  2723. // Initialize accumulator with zeros
  2724. __m256 acc = _mm256_setzero_ps();
  2725. float summs = 0.0f;
  2726. // Main loop
  2727. for (int i = 0; i < nb; i++) {
  2728. const __m256 dx = _mm256_set1_ps(GGML_FP16_TO_FP32(x[i].d));
  2729. summs += GGML_FP16_TO_FP32(x[i].m) * y[i].s;
  2730. __m256i bx = bytes_from_nibbles_32(x[i].qs);
  2731. __m256i bxhi = bytes_from_bits_32(x[i].qh);
  2732. bxhi = _mm256_and_si256(bxhi, _mm256_set1_epi8(0x10));
  2733. bx = _mm256_or_si256(bx, bxhi);
  2734. const __m256 dy = _mm256_set1_ps(y[i].d);
  2735. const __m256i by = _mm256_loadu_si256((const __m256i *)y[i].qs);
  2736. const __m256 q = mul_sum_us8_pairs_float(bx, by);
  2737. acc = _mm256_fmadd_ps(q, _mm256_mul_ps(dx, dy), acc);
  2738. }
  2739. *s = hsum_float_8(acc) + summs;
  2740. #elif defined(__AVX__)
  2741. // Initialize accumulator with zeros
  2742. __m256 acc = _mm256_setzero_ps();
  2743. __m128i mask = _mm_set1_epi8(0x10);
  2744. float summs = 0.0f;
  2745. // Main loop
  2746. for (int i = 0; i < nb; i++) {
  2747. const __m256 dx = _mm256_set1_ps(GGML_FP16_TO_FP32(x[i].d));
  2748. summs += GGML_FP16_TO_FP32(x[i].m) * y[i].s;
  2749. __m256i bx = bytes_from_nibbles_32(x[i].qs);
  2750. const __m256i bxhi = bytes_from_bits_32(x[i].qh);
  2751. __m128i bxhil = _mm256_castsi256_si128(bxhi);
  2752. __m128i bxhih = _mm256_extractf128_si256(bxhi, 1);
  2753. bxhil = _mm_and_si128(bxhil, mask);
  2754. bxhih = _mm_and_si128(bxhih, mask);
  2755. __m128i bxl = _mm256_castsi256_si128(bx);
  2756. __m128i bxh = _mm256_extractf128_si256(bx, 1);
  2757. bxl = _mm_or_si128(bxl, bxhil);
  2758. bxh = _mm_or_si128(bxh, bxhih);
  2759. bx = MM256_SET_M128I(bxh, bxl);
  2760. const __m256 dy = _mm256_set1_ps(y[i].d);
  2761. const __m256i by = _mm256_loadu_si256((const __m256i *)y[i].qs);
  2762. const __m256 q = mul_sum_us8_pairs_float(bx, by);
  2763. acc = _mm256_add_ps(_mm256_mul_ps(q, _mm256_mul_ps(dx, dy)), acc);
  2764. }
  2765. *s = hsum_float_8(acc) + summs;
  2766. #elif defined(__riscv_v_intrinsic)
  2767. float sumf = 0.0;
  2768. uint32_t qh;
  2769. // These temp values are for shift operations
  2770. uint32_t temp_1[16] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15};
  2771. size_t vl = __riscv_vsetvl_e8m1(qk/2);
  2772. for (int i = 0; i < nb; i++) {
  2773. memcpy(&qh, x[i].qh, sizeof(uint32_t));
  2774. // temporary registers
  2775. vuint32m4_t vt_1 = __riscv_vle32_v_u32m4(temp_1, vl);
  2776. vuint32m4_t vt_2 = __riscv_vadd_vx_u32m4(vt_1, 12, vl);
  2777. // load qh
  2778. vuint32m4_t vqh = __riscv_vmv_v_x_u32m4(qh, vl);
  2779. // ((qh >> (j + 0)) << 4) & 0x10;
  2780. vuint32m4_t xhr_0 = __riscv_vsrl_vv_u32m4(vqh, vt_1, vl);
  2781. vuint32m4_t xhl_0 = __riscv_vsll_vx_u32m4(xhr_0, 4, vl);
  2782. vuint32m4_t xha_0 = __riscv_vand_vx_u32m4(xhl_0, 0x10, vl);
  2783. // ((qh >> (j + 12)) ) & 0x10;
  2784. vuint32m4_t xhr_1 = __riscv_vsrl_vv_u32m4(vqh, vt_2, vl);
  2785. vuint32m4_t xha_1 = __riscv_vand_vx_u32m4(xhr_1, 0x10, vl);
  2786. // narrowing
  2787. vuint16m2_t xhc_0 = __riscv_vncvt_x_x_w_u16m2(xha_0, vl);
  2788. vuint8m1_t xh_0 = __riscv_vncvt_x_x_w_u8m1(xhc_0, vl);
  2789. vuint16m2_t xhc_1 = __riscv_vncvt_x_x_w_u16m2(xha_1, vl);
  2790. vuint8m1_t xh_1 = __riscv_vncvt_x_x_w_u8m1(xhc_1, vl);
  2791. // load
  2792. vuint8m1_t tx = __riscv_vle8_v_u8m1(x[i].qs, vl);
  2793. vint8m1_t y0 = __riscv_vle8_v_i8m1(y[i].qs, vl);
  2794. vint8m1_t y1 = __riscv_vle8_v_i8m1(y[i].qs+16, vl);
  2795. vuint8m1_t x_at = __riscv_vand_vx_u8m1(tx, 0x0F, vl);
  2796. vuint8m1_t x_lt = __riscv_vsrl_vx_u8m1(tx, 0x04, vl);
  2797. vuint8m1_t x_a = __riscv_vor_vv_u8m1(x_at, xh_0, vl);
  2798. vuint8m1_t x_l = __riscv_vor_vv_u8m1(x_lt, xh_1, vl);
  2799. vint8m1_t v0 = __riscv_vreinterpret_v_u8m1_i8m1(x_a);
  2800. vint8m1_t v1 = __riscv_vreinterpret_v_u8m1_i8m1(x_l);
  2801. vint16m2_t vec_mul1 = __riscv_vwmul_vv_i16m2(v0, y0, vl);
  2802. vint16m2_t vec_mul2 = __riscv_vwmul_vv_i16m2(v1, y1, vl);
  2803. vint32m1_t vec_zero = __riscv_vmv_v_x_i32m1(0, vl);
  2804. vint32m1_t vs1 = __riscv_vwredsum_vs_i16m2_i32m1(vec_mul1, vec_zero, vl);
  2805. vint32m1_t vs2 = __riscv_vwredsum_vs_i16m2_i32m1(vec_mul2, vec_zero, vl);
  2806. int sumi = __riscv_vmv_x_s_i32m1_i32(vs1);
  2807. sumi += __riscv_vmv_x_s_i32m1_i32(vs2);
  2808. sumf += (GGML_FP16_TO_FP32(x[i].d)*y[i].d)*sumi + GGML_FP16_TO_FP32(x[i].m)*y[i].s;
  2809. }
  2810. *s = sumf;
  2811. #else
  2812. // scalar
  2813. float sumf = 0.0;
  2814. for (int i = 0; i < nb; i++) {
  2815. uint32_t qh;
  2816. memcpy(&qh, x[i].qh, sizeof(qh));
  2817. int sumi = 0;
  2818. for (int j = 0; j < qk/2; ++j) {
  2819. const uint8_t xh_0 = ((qh >> (j + 0)) << 4) & 0x10;
  2820. const uint8_t xh_1 = ((qh >> (j + 12)) ) & 0x10;
  2821. const int32_t x0 = (x[i].qs[j] & 0xF) | xh_0;
  2822. const int32_t x1 = (x[i].qs[j] >> 4) | xh_1;
  2823. sumi += (x0 * y[i].qs[j]) + (x1 * y[i].qs[j + qk/2]);
  2824. }
  2825. sumf += (GGML_FP16_TO_FP32(x[i].d)*y[i].d)*sumi + GGML_FP16_TO_FP32(x[i].m)*y[i].s;
  2826. }
  2827. *s = sumf;
  2828. #endif
  2829. }
  2830. static void ggml_vec_dot_q8_0_q8_0(const int n, float * restrict s, const void * restrict vx, const void * restrict vy) {
  2831. const int qk = QK8_0;
  2832. const int nb = n / qk;
  2833. assert(n % qk == 0);
  2834. const block_q8_0 * restrict x = vx;
  2835. const block_q8_0 * restrict y = vy;
  2836. #if defined(__ARM_NEON)
  2837. float32x4_t sumv0 = vdupq_n_f32(0.0f);
  2838. float32x4_t sumv1 = vdupq_n_f32(0.0f);
  2839. GGML_ASSERT(nb % 2 == 0); // TODO: handle odd nb
  2840. for (int i = 0; i < nb; i += 2) {
  2841. const block_q8_0 * restrict x0 = &x[i + 0];
  2842. const block_q8_0 * restrict x1 = &x[i + 1];
  2843. const block_q8_0 * restrict y0 = &y[i + 0];
  2844. const block_q8_0 * restrict y1 = &y[i + 1];
  2845. const int8x16_t x0_0 = vld1q_s8(x0->qs);
  2846. const int8x16_t x0_1 = vld1q_s8(x0->qs + 16);
  2847. const int8x16_t x1_0 = vld1q_s8(x1->qs);
  2848. const int8x16_t x1_1 = vld1q_s8(x1->qs + 16);
  2849. // load y
  2850. const int8x16_t y0_0 = vld1q_s8(y0->qs);
  2851. const int8x16_t y0_1 = vld1q_s8(y0->qs + 16);
  2852. const int8x16_t y1_0 = vld1q_s8(y1->qs);
  2853. const int8x16_t y1_1 = vld1q_s8(y1->qs + 16);
  2854. #if defined(__ARM_FEATURE_DOTPROD)
  2855. sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(vaddq_s32(
  2856. vdotq_s32(vdupq_n_s32(0), x0_0, y0_0),
  2857. vdotq_s32(vdupq_n_s32(0), x0_1, y0_1))), GGML_FP16_TO_FP32(x0->d)*GGML_FP16_TO_FP32(y0->d));
  2858. sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(vaddq_s32(
  2859. vdotq_s32(vdupq_n_s32(0), x1_0, y1_0),
  2860. vdotq_s32(vdupq_n_s32(0), x1_1, y1_1))), GGML_FP16_TO_FP32(x1->d)*GGML_FP16_TO_FP32(y1->d));
  2861. #else
  2862. const int16x8_t p0_0 = vmull_s8(vget_low_s8 (x0_0), vget_low_s8 (y0_0));
  2863. const int16x8_t p0_1 = vmull_s8(vget_high_s8(x0_0), vget_high_s8(y0_0));
  2864. const int16x8_t p0_2 = vmull_s8(vget_low_s8 (x0_1), vget_low_s8 (y0_1));
  2865. const int16x8_t p0_3 = vmull_s8(vget_high_s8(x0_1), vget_high_s8(y0_1));
  2866. const int16x8_t p1_0 = vmull_s8(vget_low_s8 (x1_0), vget_low_s8 (y1_0));
  2867. const int16x8_t p1_1 = vmull_s8(vget_high_s8(x1_0), vget_high_s8(y1_0));
  2868. const int16x8_t p1_2 = vmull_s8(vget_low_s8 (x1_1), vget_low_s8 (y1_1));
  2869. const int16x8_t p1_3 = vmull_s8(vget_high_s8(x1_1), vget_high_s8(y1_1));
  2870. const int32x4_t p0 = vaddq_s32(vpaddlq_s16(p0_0), vpaddlq_s16(p0_1));
  2871. const int32x4_t p1 = vaddq_s32(vpaddlq_s16(p0_2), vpaddlq_s16(p0_3));
  2872. const int32x4_t p2 = vaddq_s32(vpaddlq_s16(p1_0), vpaddlq_s16(p1_1));
  2873. const int32x4_t p3 = vaddq_s32(vpaddlq_s16(p1_2), vpaddlq_s16(p1_3));
  2874. sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(vaddq_s32(p0, p1)), GGML_FP16_TO_FP32(x0->d)*GGML_FP16_TO_FP32(y0->d));
  2875. sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(vaddq_s32(p2, p3)), GGML_FP16_TO_FP32(x1->d)*GGML_FP16_TO_FP32(y1->d));
  2876. #endif
  2877. }
  2878. *s = vaddvq_f32(sumv0) + vaddvq_f32(sumv1);
  2879. #elif defined(__AVX2__) || defined(__AVX__)
  2880. // Initialize accumulator with zeros
  2881. __m256 acc = _mm256_setzero_ps();
  2882. // Main loop
  2883. for (int i = 0; i < nb; ++i) {
  2884. // Compute combined scale for the block
  2885. const __m256 d = _mm256_set1_ps(GGML_FP16_TO_FP32(x[i].d) * GGML_FP16_TO_FP32(y[i].d));
  2886. __m256i bx = _mm256_loadu_si256((const __m256i *)x[i].qs);
  2887. __m256i by = _mm256_loadu_si256((const __m256i *)y[i].qs);
  2888. const __m256 q = mul_sum_i8_pairs_float(bx, by);
  2889. // Multiply q with scale and accumulate
  2890. #if defined(__AVX2__)
  2891. acc = _mm256_fmadd_ps( d, q, acc );
  2892. #else
  2893. acc = _mm256_add_ps( _mm256_mul_ps( d, q ), acc );
  2894. #endif
  2895. }
  2896. *s = hsum_float_8(acc);
  2897. #elif defined(__riscv_v_intrinsic)
  2898. float sumf = 0.0;
  2899. size_t vl = __riscv_vsetvl_e8m1(qk);
  2900. for (int i = 0; i < nb; i++) {
  2901. // load elements
  2902. vint8m1_t bx = __riscv_vle8_v_i8m1(x[i].qs, vl);
  2903. vint8m1_t by = __riscv_vle8_v_i8m1(y[i].qs, vl);
  2904. vint16m2_t vw_mul = __riscv_vwmul_vv_i16m2(bx, by, vl);
  2905. vint32m1_t v_zero = __riscv_vmv_v_x_i32m1(0, vl);
  2906. vint32m1_t v_sum = __riscv_vwredsum_vs_i16m2_i32m1(vw_mul, v_zero, vl);
  2907. int sumi = __riscv_vmv_x_s_i32m1_i32(v_sum);
  2908. sumf += sumi*(GGML_FP16_TO_FP32(x[i].d)*GGML_FP16_TO_FP32(y[i].d));
  2909. }
  2910. *s = sumf;
  2911. #else
  2912. // scalar
  2913. float sumf = 0.0;
  2914. for (int i = 0; i < nb; i++) {
  2915. int sumi = 0;
  2916. for (int j = 0; j < qk; j++) {
  2917. sumi += x[i].qs[j]*y[i].qs[j];
  2918. }
  2919. sumf += sumi*(GGML_FP16_TO_FP32(x[i].d)*GGML_FP16_TO_FP32(y[i].d));
  2920. }
  2921. *s = sumf;
  2922. #endif
  2923. }
  2924. // compute GGML_VEC_DOT_UNROLL dot products at once
  2925. // xs - x row stride in bytes
  2926. inline static void ggml_vec_dot_f16_unroll(const int n, const int xs, float * restrict s, void * restrict xv, ggml_fp16_t * restrict y) {
  2927. ggml_float sumf[GGML_VEC_DOT_UNROLL] = { 0.0 };
  2928. ggml_fp16_t * restrict x[GGML_VEC_DOT_UNROLL];
  2929. for (int i = 0; i < GGML_VEC_DOT_UNROLL; ++i) {
  2930. x[i] = (ggml_fp16_t *) ((char *) xv + i*xs);
  2931. }
  2932. #if defined(GGML_SIMD)
  2933. const int np = (n & ~(GGML_F16_STEP - 1));
  2934. GGML_F16_VEC sum[GGML_VEC_DOT_UNROLL][GGML_F16_ARR] = { { GGML_F16_VEC_ZERO } };
  2935. GGML_F16_VEC ax[GGML_F16_ARR];
  2936. GGML_F16_VEC ay[GGML_F16_ARR];
  2937. for (int i = 0; i < np; i += GGML_F16_STEP) {
  2938. for (int j = 0; j < GGML_F16_ARR; j++) {
  2939. ay[j] = GGML_F16_VEC_LOAD(y + i + j*GGML_F16_EPR, j);
  2940. for (int k = 0; k < GGML_VEC_DOT_UNROLL; ++k) {
  2941. ax[j] = GGML_F16_VEC_LOAD(x[k] + i + j*GGML_F16_EPR, j);
  2942. sum[k][j] = GGML_F16_VEC_FMA(sum[k][j], ax[j], ay[j]);
  2943. }
  2944. }
  2945. }
  2946. // reduce sum0..sum3 to sum0
  2947. for (int k = 0; k < GGML_VEC_DOT_UNROLL; ++k) {
  2948. GGML_F16_VEC_REDUCE(sumf[k], sum[k]);
  2949. }
  2950. // leftovers
  2951. for (int i = np; i < n; ++i) {
  2952. for (int j = 0; j < GGML_VEC_DOT_UNROLL; ++j) {
  2953. sumf[j] += (ggml_float)(GGML_FP16_TO_FP32(x[j][i])*GGML_FP16_TO_FP32(y[i]));
  2954. }
  2955. }
  2956. #else
  2957. for (int i = 0; i < n; ++i) {
  2958. for (int j = 0; j < GGML_VEC_DOT_UNROLL; ++j) {
  2959. sumf[j] += (ggml_float)(GGML_FP16_TO_FP32(x[j][i])*GGML_FP16_TO_FP32(y[i]));
  2960. }
  2961. }
  2962. #endif
  2963. for (int i = 0; i < GGML_VEC_DOT_UNROLL; ++i) {
  2964. s[i] = sumf[i];
  2965. }
  2966. }
  2967. inline static void ggml_vec_mad_f32(const int n, float * restrict y, const float * restrict x, const float v) {
  2968. #if defined(GGML_SIMD)
  2969. const int np = (n & ~(GGML_F32_STEP - 1));
  2970. GGML_F32_VEC vx = GGML_F32_VEC_SET1(v);
  2971. GGML_F32_VEC ax[GGML_F32_ARR];
  2972. GGML_F32_VEC ay[GGML_F32_ARR];
  2973. for (int i = 0; i < np; i += GGML_F32_STEP) {
  2974. for (int j = 0; j < GGML_F32_ARR; j++) {
  2975. ax[j] = GGML_F32_VEC_LOAD(x + i + j*GGML_F32_EPR);
  2976. ay[j] = GGML_F32_VEC_LOAD(y + i + j*GGML_F32_EPR);
  2977. ay[j] = GGML_F32_VEC_FMA(ay[j], ax[j], vx);
  2978. GGML_F32_VEC_STORE(y + i + j*GGML_F32_EPR, ay[j]);
  2979. }
  2980. }
  2981. // leftovers
  2982. for (int i = np; i < n; ++i) {
  2983. y[i] += x[i]*v;
  2984. }
  2985. #else
  2986. // scalar
  2987. for (int i = 0; i < n; ++i) {
  2988. y[i] += x[i]*v;
  2989. }
  2990. #endif
  2991. }
  2992. // xs and vs are byte strides of x and v
  2993. inline static void ggml_vec_mad_f32_unroll(const int n, const int xs, const int vs, float * restrict y, const float * restrict xv, const float * restrict vv) {
  2994. const float * restrict x[GGML_VEC_MAD_UNROLL];
  2995. const float * restrict v[GGML_VEC_MAD_UNROLL];
  2996. for (int i = 0; i < GGML_VEC_MAD_UNROLL; ++i) {
  2997. x[i] = (const float *) ((const char *) xv + i*xs);
  2998. v[i] = (const float *) ((const char *) vv + i*vs);
  2999. }
  3000. #if defined(GGML_SIMD)
  3001. const int np = (n & ~(GGML_F32_STEP - 1));
  3002. GGML_F32_VEC vx[GGML_VEC_MAD_UNROLL];
  3003. for (int k = 0; k < GGML_VEC_MAD_UNROLL; ++k) {
  3004. vx[k] = GGML_F32_VEC_SET1(v[k][0]);
  3005. }
  3006. GGML_F32_VEC ax[GGML_VEC_MAD_UNROLL][GGML_F32_ARR];
  3007. GGML_F32_VEC ay[GGML_F32_ARR];
  3008. for (int i = 0; i < np; i += GGML_F32_STEP) {
  3009. for (int j = 0; j < GGML_F32_ARR; j++) {
  3010. ay[j] = GGML_F32_VEC_LOAD(y + i + j*GGML_F32_EPR);
  3011. for (int k = 0; k < GGML_VEC_MAD_UNROLL; ++k) {
  3012. ax[k][j] = GGML_F32_VEC_LOAD(x[k] + i + j*GGML_F32_EPR);
  3013. ay[j] = GGML_F32_VEC_FMA(ay[j], ax[k][j], vx[k]);
  3014. }
  3015. GGML_F32_VEC_STORE(y + i + j*GGML_F32_EPR, ay[j]);
  3016. }
  3017. }
  3018. // leftovers
  3019. for (int k = 0; k < GGML_VEC_MAD_UNROLL; ++k) {
  3020. for (int i = np; i < n; ++i) {
  3021. y[i] += x[k][i]*v[k][0];
  3022. }
  3023. }
  3024. #else
  3025. // scalar
  3026. for (int k = 0; k < GGML_VEC_MAD_UNROLL; ++k) {
  3027. for (int i = 0; i < n; ++i) {
  3028. y[i] += x[k][i]*v[k][0];
  3029. }
  3030. }
  3031. #endif
  3032. }
  3033. //inline static void ggml_vec_scale_f32(const int n, float * y, const float v) { for (int i = 0; i < n; ++i) y[i] *= v; }
  3034. inline static void ggml_vec_scale_f32(const int n, float * y, const float v) {
  3035. #if defined(GGML_USE_ACCELERATE)
  3036. vDSP_vsmul(y, 1, &v, y, 1, n);
  3037. #elif defined(GGML_SIMD)
  3038. const int np = (n & ~(GGML_F32_STEP - 1));
  3039. GGML_F32_VEC vx = GGML_F32_VEC_SET1(v);
  3040. GGML_F32_VEC ay[GGML_F32_ARR];
  3041. for (int i = 0; i < np; i += GGML_F32_STEP) {
  3042. for (int j = 0; j < GGML_F32_ARR; j++) {
  3043. ay[j] = GGML_F32_VEC_LOAD(y + i + j*GGML_F32_EPR);
  3044. ay[j] = GGML_F32_VEC_MUL(ay[j], vx);
  3045. GGML_F32_VEC_STORE(y + i + j*GGML_F32_EPR, ay[j]);
  3046. }
  3047. }
  3048. // leftovers
  3049. for (int i = np; i < n; ++i) {
  3050. y[i] *= v;
  3051. }
  3052. #else
  3053. // scalar
  3054. for (int i = 0; i < n; ++i) {
  3055. y[i] *= v;
  3056. }
  3057. #endif
  3058. }
  3059. inline static void ggml_vec_norm_f32 (const int n, float * s, const float * x) { ggml_vec_dot_f32(n, s, x, x); *s = sqrtf(*s); }
  3060. inline static void ggml_vec_sqr_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = x[i]*x[i]; }
  3061. inline static void ggml_vec_sqrt_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = sqrtf(x[i]); }
  3062. inline static void ggml_vec_log_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = logf(x[i]); }
  3063. inline static void ggml_vec_abs_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = fabsf(x[i]); }
  3064. inline static void ggml_vec_sgn_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = (x[i] > 0.f) ? 1.f : ((x[i] < 0.f) ? -1.f : 0.f); }
  3065. inline static void ggml_vec_step_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = (x[i] > 0.f) ? 1.f : 0.f; }
  3066. inline static void ggml_vec_tanh_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = tanhf(x[i]); }
  3067. inline static void ggml_vec_elu_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = (x[i] > 0.f) ? x[i] : expf(x[i])-1; }
  3068. inline static void ggml_vec_relu_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = (x[i] > 0.f) ? x[i] : 0.f; }
  3069. static const float GELU_COEF_A = 0.044715f;
  3070. static const float GELU_QUICK_COEF = -1.702f;
  3071. static const float SQRT_2_OVER_PI = 0.79788456080286535587989211986876f;
  3072. inline static float ggml_gelu_f32(float x) {
  3073. return 0.5f*x*(1.0f + tanhf(SQRT_2_OVER_PI*x*(1.0f + GELU_COEF_A*x*x)));
  3074. }
  3075. inline static void ggml_vec_gelu_f16(const int n, ggml_fp16_t * y, const ggml_fp16_t * x) {
  3076. const uint16_t * i16 = (const uint16_t *) x;
  3077. for (int i = 0; i < n; ++i) {
  3078. y[i] = table_gelu_f16[i16[i]];
  3079. }
  3080. }
  3081. #ifdef GGML_GELU_FP16
  3082. inline static void ggml_vec_gelu_f32(const int n, float * y, const float * x) {
  3083. uint16_t t;
  3084. for (int i = 0; i < n; ++i) {
  3085. ggml_fp16_t fp16 = GGML_FP32_TO_FP16(x[i]);
  3086. memcpy(&t, &fp16, sizeof(uint16_t));
  3087. y[i] = GGML_FP16_TO_FP32(table_gelu_f16[t]);
  3088. }
  3089. }
  3090. #else
  3091. inline static void ggml_vec_gelu_f32(const int n, float * y, const float * x) {
  3092. for (int i = 0; i < n; ++i) {
  3093. y[i] = ggml_gelu_f32(x[i]);
  3094. }
  3095. }
  3096. #endif
  3097. inline static float ggml_gelu_quick_f32(float x) {
  3098. return x*(1.0f/(1.0f+expf(GELU_QUICK_COEF*x)));
  3099. }
  3100. //inline static void ggml_vec_gelu_quick_f16(const int n, ggml_fp16_t * y, const ggml_fp16_t * x) {
  3101. // const uint16_t * i16 = (const uint16_t *) x;
  3102. // for (int i = 0; i < n; ++i) {
  3103. // y[i] = table_gelu_quick_f16[i16[i]];
  3104. // }
  3105. //}
  3106. #ifdef GGML_GELU_QUICK_FP16
  3107. inline static void ggml_vec_gelu_quick_f32(const int n, float * y, const float * x) {
  3108. uint16_t t;
  3109. for (int i = 0; i < n; ++i) {
  3110. ggml_fp16_t fp16 = GGML_FP32_TO_FP16(x[i]);
  3111. memcpy(&t, &fp16, sizeof(uint16_t));
  3112. y[i] = GGML_FP16_TO_FP32(table_gelu_quick_f16[t]);
  3113. }
  3114. }
  3115. #else
  3116. inline static void ggml_vec_gelu_quick_f32(const int n, float * y, const float * x) {
  3117. for (int i = 0; i < n; ++i) {
  3118. y[i] = ggml_gelu_quick_f32(x[i]);
  3119. }
  3120. }
  3121. #endif
  3122. // Sigmoid Linear Unit (SiLU) function
  3123. inline static float ggml_silu_f32(float x) {
  3124. return x/(1.0f + expf(-x));
  3125. }
  3126. //inline static void ggml_vec_silu_f16(const int n, ggml_fp16_t * y, const ggml_fp16_t * x) {
  3127. // const uint16_t * i16 = (const uint16_t *) x;
  3128. // for (int i = 0; i < n; ++i) {
  3129. // y[i] = table_silu_f16[i16[i]];
  3130. // }
  3131. //}
  3132. #ifdef GGML_SILU_FP16
  3133. inline static void ggml_vec_silu_f32(const int n, float * y, const float * x) {
  3134. uint16_t t;
  3135. for (int i = 0; i < n; ++i) {
  3136. ggml_fp16_t fp16 = GGML_FP32_TO_FP16(x[i]);
  3137. memcpy(&t, &fp16, sizeof(uint16_t));
  3138. y[i] = GGML_FP16_TO_FP32(table_silu_f16[t]);
  3139. }
  3140. }
  3141. #else
  3142. inline static void ggml_vec_silu_f32(const int n, float * y, const float * x) {
  3143. for (int i = 0; i < n; ++i) {
  3144. y[i] = ggml_silu_f32(x[i]);
  3145. }
  3146. }
  3147. #endif
  3148. inline static float ggml_silu_backward_f32(float x, float dy) {
  3149. const float s = 1.0f/(1.0f + expf(-x));
  3150. return dy*s*(1.0f + x*(1.0f - s));
  3151. }
  3152. #ifdef GGML_SILU_FP16
  3153. inline static void ggml_vec_silu_backward_f32(const int n, float * dx, const float * x, const float * dy) {
  3154. for (int i = 0; i < n; ++i) {
  3155. // we did not use x[i] to compute forward silu but its f16 equivalent
  3156. // take derivative at f16 of x[i]:
  3157. ggml_fp16_t fp16 = GGML_FP32_TO_FP16(x[i]);
  3158. float usedx = GGML_FP16_TO_FP32(fp16);
  3159. dx[i] = ggml_silu_backward_f32(usedx, dy[i]);
  3160. }
  3161. }
  3162. #else
  3163. inline static void ggml_vec_silu_backward_f32(const int n, float * dx, const float * x, const float * dy) {
  3164. for (int i = 0; i < n; ++i) {
  3165. dx[i] = ggml_silu_backward_f32(x[i], dy[i]);
  3166. }
  3167. }
  3168. #endif
  3169. inline static void ggml_vec_sum_f32(const int n, float * s, const float * x) {
  3170. #ifndef GGML_USE_ACCELERATE
  3171. ggml_float sum = 0.0;
  3172. for (int i = 0; i < n; ++i) {
  3173. sum += (ggml_float)x[i];
  3174. }
  3175. *s = sum;
  3176. #else
  3177. vDSP_sve(x, 1, s, n);
  3178. #endif
  3179. }
  3180. inline static void ggml_vec_sum_f32_ggf(const int n, ggml_float * s, const float * x) {
  3181. ggml_float sum = 0.0;
  3182. for (int i = 0; i < n; ++i) {
  3183. sum += (ggml_float)x[i];
  3184. }
  3185. *s = sum;
  3186. }
  3187. inline static void ggml_vec_sum_f16_ggf(const int n, float * s, const ggml_fp16_t * x) {
  3188. float sum = 0.0f;
  3189. for (int i = 0; i < n; ++i) {
  3190. sum += GGML_FP16_TO_FP32(x[i]);
  3191. }
  3192. *s = sum;
  3193. }
  3194. inline static void ggml_vec_max_f32(const int n, float * s, const float * x) {
  3195. #ifndef GGML_USE_ACCELERATE
  3196. float max = -INFINITY;
  3197. for (int i = 0; i < n; ++i) {
  3198. max = MAX(max, x[i]);
  3199. }
  3200. *s = max;
  3201. #else
  3202. vDSP_maxv(x, 1, s, n);
  3203. #endif
  3204. }
  3205. inline static void ggml_vec_norm_inv_f32(const int n, float * s, const float * x) {
  3206. ggml_vec_norm_f32(n, s, x);
  3207. *s = 1.f/(*s);
  3208. }
  3209. inline static void ggml_vec_argmax_f32(const int n, int * s, const float * x) {
  3210. float max = -INFINITY;
  3211. int idx = 0;
  3212. for (int i = 0; i < n; ++i) {
  3213. max = MAX(max, x[i]);
  3214. if (max == x[i]) { idx = i; }
  3215. }
  3216. *s = idx;
  3217. }
  3218. //
  3219. // data types
  3220. //
  3221. static const char * GGML_OP_NAME[GGML_OP_COUNT] = {
  3222. "NONE",
  3223. "DUP",
  3224. "ADD",
  3225. "ADD1",
  3226. "ACC",
  3227. "SUB",
  3228. "MUL",
  3229. "DIV",
  3230. "SQR",
  3231. "SQRT",
  3232. "LOG",
  3233. "SUM",
  3234. "SUM_ROWS",
  3235. "MEAN",
  3236. "ARGMAX",
  3237. "REPEAT",
  3238. "REPEAT_BACK",
  3239. "CONCAT",
  3240. "SILU_BACK",
  3241. "NORM",
  3242. "RMS_NORM",
  3243. "RMS_NORM_BACK",
  3244. "GROUP_NORM",
  3245. "MUL_MAT",
  3246. "OUT_PROD",
  3247. "SCALE",
  3248. "SET",
  3249. "CPY",
  3250. "CONT",
  3251. "RESHAPE",
  3252. "VIEW",
  3253. "PERMUTE",
  3254. "TRANSPOSE",
  3255. "GET_ROWS",
  3256. "GET_ROWS_BACK",
  3257. "DIAG",
  3258. "DIAG_MASK_INF",
  3259. "DIAG_MASK_ZERO",
  3260. "SOFT_MAX",
  3261. "SOFT_MAX_BACK",
  3262. "ROPE",
  3263. "ROPE_BACK",
  3264. "ALIBI",
  3265. "CLAMP",
  3266. "CONV_1D",
  3267. "CONV_2D",
  3268. "CONV_TRANSPOSE_2D",
  3269. "POOL_1D",
  3270. "POOL_2D",
  3271. "UPSCALE",
  3272. "FLASH_ATTN",
  3273. "FLASH_FF",
  3274. "FLASH_ATTN_BACK",
  3275. "WIN_PART",
  3276. "WIN_UNPART",
  3277. "GET_REL_POS",
  3278. "ADD_REL_POS",
  3279. "UNARY",
  3280. "MAP_UNARY",
  3281. "MAP_BINARY",
  3282. "MAP_CUSTOM1_F32",
  3283. "MAP_CUSTOM2_F32",
  3284. "MAP_CUSTOM3_F32",
  3285. "MAP_CUSTOM1",
  3286. "MAP_CUSTOM2",
  3287. "MAP_CUSTOM3",
  3288. "CROSS_ENTROPY_LOSS",
  3289. "CROSS_ENTROPY_LOSS_BACK",
  3290. };
  3291. static_assert(GGML_OP_COUNT == 68, "GGML_OP_COUNT != 68");
  3292. static const char * GGML_OP_SYMBOL[GGML_OP_COUNT] = {
  3293. "none",
  3294. "x",
  3295. "x+y",
  3296. "x+y",
  3297. "view(x,nb,offset)+=y->x",
  3298. "x-y",
  3299. "x*y",
  3300. "x/y",
  3301. "x^2",
  3302. "√x",
  3303. "log(x)",
  3304. "Σx",
  3305. "Σx_k",
  3306. "Σx/n",
  3307. "argmax(x)",
  3308. "repeat(x)",
  3309. "repeat_back(x)",
  3310. "concat(x, y)",
  3311. "silu_back(x)",
  3312. "norm(x)",
  3313. "rms_norm(x)",
  3314. "rms_norm_back(x)",
  3315. "group_norm(x)",
  3316. "X*Y",
  3317. "X*Y",
  3318. "x*v",
  3319. "y-\\>view(x)",
  3320. "x-\\>y",
  3321. "cont(x)",
  3322. "reshape(x)",
  3323. "view(x)",
  3324. "permute(x)",
  3325. "transpose(x)",
  3326. "get_rows(x)",
  3327. "get_rows_back(x)",
  3328. "diag(x)",
  3329. "diag_mask_inf(x)",
  3330. "diag_mask_zero(x)",
  3331. "soft_max(x)",
  3332. "soft_max_back(x)",
  3333. "rope(x)",
  3334. "rope_back(x)",
  3335. "alibi(x)",
  3336. "clamp(x)",
  3337. "conv_1d(x)",
  3338. "conv_2d(x)",
  3339. "conv_transpose_2d(x)",
  3340. "pool_1d(x)",
  3341. "pool_2d(x)",
  3342. "upscale(x)",
  3343. "flash_attn(x)",
  3344. "flash_ff(x)",
  3345. "flash_attn_back(x)",
  3346. "win_part(x)",
  3347. "win_unpart(x)",
  3348. "get_rel_pos(x)",
  3349. "add_rel_pos(x)",
  3350. "unary(x)",
  3351. "f(x)",
  3352. "f(x,y)",
  3353. "custom_f32(x)",
  3354. "custom_f32(x,y)",
  3355. "custom_f32(x,y,z)",
  3356. "custom(x)",
  3357. "custom(x,y)",
  3358. "custom(x,y,z)",
  3359. "cross_entropy_loss(x,y)",
  3360. "cross_entropy_loss_back(x,y)",
  3361. };
  3362. static_assert(GGML_OP_COUNT == 68, "GGML_OP_COUNT != 68");
  3363. static_assert(GGML_OP_POOL_COUNT == 2, "GGML_OP_POOL_COUNT != 2");
  3364. static_assert(sizeof(struct ggml_object)%GGML_MEM_ALIGN == 0, "ggml_object size must be a multiple of GGML_MEM_ALIGN");
  3365. static_assert(sizeof(struct ggml_tensor)%GGML_MEM_ALIGN == 0, "ggml_tensor size must be a multiple of GGML_MEM_ALIGN");
  3366. // WARN:
  3367. // Mis-confguration can lead to problem that's hard to reason about:
  3368. // * At best it crash or talks nosense.
  3369. // * At worst it talks slightly difference but hard to perceive.
  3370. //
  3371. // An op has to enable INIT or FINALIZE when any of it's branch needs that pass.
  3372. // Take care about compile options (e.g., GGML_USE_xxx).
  3373. static bool GGML_OP_HAS_INIT [GGML_OP_COUNT] = { 0 };
  3374. static bool GGML_OP_HAS_FINALIZE[GGML_OP_COUNT] = { 0 };
  3375. static void ggml_setup_op_has_task_pass(void) {
  3376. { // INIT
  3377. bool * p = GGML_OP_HAS_INIT;
  3378. p[GGML_OP_ACC ] = true;
  3379. p[GGML_OP_MUL_MAT ] = true;
  3380. p[GGML_OP_OUT_PROD ] = true;
  3381. p[GGML_OP_SET ] = true;
  3382. p[GGML_OP_GET_ROWS_BACK ] = true;
  3383. p[GGML_OP_DIAG_MASK_INF ] = true;
  3384. p[GGML_OP_DIAG_MASK_ZERO ] = true;
  3385. p[GGML_OP_CONV_1D ] = true;
  3386. p[GGML_OP_CONV_2D ] = true;
  3387. p[GGML_OP_CONV_TRANSPOSE_2D ] = true;
  3388. p[GGML_OP_FLASH_ATTN_BACK ] = true;
  3389. p[GGML_OP_CROSS_ENTROPY_LOSS ] = true;
  3390. p[GGML_OP_ADD_REL_POS ] = true;
  3391. }
  3392. { // FINALIZE
  3393. bool * p = GGML_OP_HAS_FINALIZE;
  3394. p[GGML_OP_CROSS_ENTROPY_LOSS ] = true;
  3395. }
  3396. }
  3397. //
  3398. // ggml context
  3399. //
  3400. struct ggml_context {
  3401. size_t mem_size;
  3402. void * mem_buffer;
  3403. bool mem_buffer_owned;
  3404. bool no_alloc;
  3405. bool no_alloc_save; // this is used to save the no_alloc state when using scratch buffers
  3406. int n_objects;
  3407. struct ggml_object * objects_begin;
  3408. struct ggml_object * objects_end;
  3409. struct ggml_scratch scratch;
  3410. struct ggml_scratch scratch_save;
  3411. };
  3412. struct ggml_context_container {
  3413. bool used;
  3414. struct ggml_context context;
  3415. };
  3416. //
  3417. // NUMA support
  3418. //
  3419. #define GGML_NUMA_MAX_NODES 8
  3420. #define GGML_NUMA_MAX_CPUS 512
  3421. struct ggml_numa_node {
  3422. uint32_t cpus[GGML_NUMA_MAX_CPUS]; // hardware threads on this node
  3423. uint32_t n_cpus;
  3424. };
  3425. struct ggml_numa_nodes {
  3426. struct ggml_numa_node nodes[GGML_NUMA_MAX_NODES];
  3427. uint32_t n_nodes;
  3428. uint32_t total_cpus; // hardware threads on system
  3429. };
  3430. //
  3431. // ggml state
  3432. //
  3433. struct ggml_state {
  3434. struct ggml_context_container contexts[GGML_MAX_CONTEXTS];
  3435. struct ggml_numa_nodes numa;
  3436. };
  3437. // global state
  3438. static struct ggml_state g_state;
  3439. static atomic_int g_state_barrier = 0;
  3440. // barrier via spin lock
  3441. inline static void ggml_critical_section_start(void) {
  3442. int processing = atomic_fetch_add(&g_state_barrier, 1);
  3443. while (processing > 0) {
  3444. // wait for other threads to finish
  3445. atomic_fetch_sub(&g_state_barrier, 1);
  3446. sched_yield(); // TODO: reconsider this
  3447. processing = atomic_fetch_add(&g_state_barrier, 1);
  3448. }
  3449. }
  3450. // TODO: make this somehow automatically executed
  3451. // some sort of "sentry" mechanism
  3452. inline static void ggml_critical_section_end(void) {
  3453. atomic_fetch_sub(&g_state_barrier, 1);
  3454. }
  3455. void ggml_numa_init(void) {
  3456. if (g_state.numa.n_nodes > 0) {
  3457. fprintf(stderr, "ggml_numa_init: NUMA already initialized\n");
  3458. return;
  3459. }
  3460. #ifdef __linux__
  3461. struct stat st;
  3462. char path[256];
  3463. int rv;
  3464. // enumerate nodes
  3465. while (g_state.numa.n_nodes < GGML_NUMA_MAX_NODES) {
  3466. rv = snprintf(path, sizeof(path), "/sys/devices/system/node/node%u", g_state.numa.n_nodes);
  3467. GGML_ASSERT(rv > 0 && (unsigned)rv < sizeof(path));
  3468. if (stat(path, &st) != 0) { break; }
  3469. ++g_state.numa.n_nodes;
  3470. }
  3471. // enumerate CPUs
  3472. while (g_state.numa.total_cpus < GGML_NUMA_MAX_CPUS) {
  3473. rv = snprintf(path, sizeof(path), "/sys/devices/system/cpu/cpu%u", g_state.numa.total_cpus);
  3474. GGML_ASSERT(rv > 0 && (unsigned)rv < sizeof(path));
  3475. if (stat(path, &st) != 0) { break; }
  3476. ++g_state.numa.total_cpus;
  3477. }
  3478. GGML_PRINT_DEBUG("found %u numa nodes, %u CPUs\n", g_state.numa.n_nodes, g_state.numa.total_cpus);
  3479. if (g_state.numa.n_nodes < 1 || g_state.numa.total_cpus < 1) {
  3480. g_state.numa.n_nodes = 0;
  3481. return;
  3482. }
  3483. for (uint32_t n = 0; n < g_state.numa.n_nodes; ++n) {
  3484. struct ggml_numa_node * node = &g_state.numa.nodes[n];
  3485. GGML_PRINT_DEBUG("CPUs on node %u:", n);
  3486. node->n_cpus = 0;
  3487. for (uint32_t c = 0; c < g_state.numa.total_cpus; ++c) {
  3488. rv = snprintf(path, sizeof(path), "/sys/devices/system/node/node%u/cpu%u", n, c);
  3489. GGML_ASSERT(rv > 0 && (unsigned)rv < sizeof(path));
  3490. if (stat(path, &st) == 0) {
  3491. node->cpus[node->n_cpus++] = c;
  3492. GGML_PRINT_DEBUG(" %u", c);
  3493. }
  3494. }
  3495. GGML_PRINT_DEBUG("\n");
  3496. }
  3497. if (ggml_is_numa()) {
  3498. FILE *fptr = fopen("/proc/sys/kernel/numa_balancing", "r");
  3499. if (fptr != NULL) {
  3500. char buf[42];
  3501. if (fgets(buf, sizeof(buf), fptr) && strncmp(buf, "0\n", sizeof(buf)) != 0) {
  3502. GGML_PRINT("WARNING: /proc/sys/kernel/numa_balancing is enabled, this has been observed to impair performance\n");
  3503. }
  3504. fclose(fptr);
  3505. }
  3506. }
  3507. #else
  3508. // TODO
  3509. #endif
  3510. }
  3511. bool ggml_is_numa(void) {
  3512. return g_state.numa.n_nodes > 1;
  3513. }
  3514. ////////////////////////////////////////////////////////////////////////////////
  3515. void ggml_print_object(const struct ggml_object * obj) {
  3516. GGML_PRINT(" - ggml_object: type = %d, offset = %zu, size = %zu, next = %p\n",
  3517. obj->type, obj->offs, obj->size, (const void *) obj->next);
  3518. }
  3519. void ggml_print_objects(const struct ggml_context * ctx) {
  3520. struct ggml_object * obj = ctx->objects_begin;
  3521. GGML_PRINT("%s: objects in context %p:\n", __func__, (const void *) ctx);
  3522. while (obj != NULL) {
  3523. ggml_print_object(obj);
  3524. obj = obj->next;
  3525. }
  3526. GGML_PRINT("%s: --- end ---\n", __func__);
  3527. }
  3528. int64_t ggml_nelements(const struct ggml_tensor * tensor) {
  3529. static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function");
  3530. return tensor->ne[0]*tensor->ne[1]*tensor->ne[2]*tensor->ne[3];
  3531. }
  3532. int64_t ggml_nrows(const struct ggml_tensor * tensor) {
  3533. static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function");
  3534. return tensor->ne[1]*tensor->ne[2]*tensor->ne[3];
  3535. }
  3536. size_t ggml_nbytes(const struct ggml_tensor * tensor) {
  3537. size_t nbytes;
  3538. size_t blck_size = ggml_blck_size(tensor->type);
  3539. if (blck_size == 1) {
  3540. nbytes = ggml_type_size(tensor->type);
  3541. for (int i = 0; i < GGML_MAX_DIMS; ++i) {
  3542. nbytes += (tensor->ne[i] - 1)*tensor->nb[i];
  3543. }
  3544. }
  3545. else {
  3546. nbytes = tensor->ne[0]*tensor->nb[0]/blck_size;
  3547. for (int i = 1; i < GGML_MAX_DIMS; ++i) {
  3548. nbytes += (tensor->ne[i] - 1)*tensor->nb[i];
  3549. }
  3550. }
  3551. return nbytes;
  3552. }
  3553. size_t ggml_nbytes_pad(const struct ggml_tensor * tensor) {
  3554. return GGML_PAD(ggml_nbytes(tensor), GGML_MEM_ALIGN);
  3555. }
  3556. size_t ggml_nbytes_split(const struct ggml_tensor * tensor, int nrows_split) {
  3557. static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function");
  3558. return (nrows_split*tensor->ne[0]*ggml_type_size(tensor->type))/ggml_blck_size(tensor->type);
  3559. }
  3560. int ggml_blck_size(enum ggml_type type) {
  3561. return type_traits[type].blck_size;
  3562. }
  3563. size_t ggml_type_size(enum ggml_type type) {
  3564. return type_traits[type].type_size;
  3565. }
  3566. float ggml_type_sizef(enum ggml_type type) {
  3567. return ((float)(type_traits[type].type_size))/type_traits[type].blck_size;
  3568. }
  3569. const char * ggml_type_name(enum ggml_type type) {
  3570. return type_traits[type].type_name;
  3571. }
  3572. bool ggml_is_quantized(enum ggml_type type) {
  3573. return type_traits[type].is_quantized;
  3574. }
  3575. const char * ggml_op_name(enum ggml_op op) {
  3576. return GGML_OP_NAME[op];
  3577. }
  3578. const char * ggml_op_symbol(enum ggml_op op) {
  3579. return GGML_OP_SYMBOL[op];
  3580. }
  3581. size_t ggml_element_size(const struct ggml_tensor * tensor) {
  3582. return ggml_type_size(tensor->type);
  3583. }
  3584. static inline bool ggml_is_scalar(const struct ggml_tensor * tensor) {
  3585. static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function");
  3586. return tensor->ne[0] == 1 && tensor->ne[1] == 1 && tensor->ne[2] == 1 && tensor->ne[3] == 1;
  3587. }
  3588. static inline bool ggml_is_vector(const struct ggml_tensor * tensor) {
  3589. static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function");
  3590. return tensor->ne[1] == 1 && tensor->ne[2] == 1 && tensor->ne[3] == 1;
  3591. }
  3592. static inline bool ggml_is_matrix(const struct ggml_tensor * tensor) {
  3593. static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function");
  3594. return tensor->ne[2] == 1 && tensor->ne[3] == 1;
  3595. }
  3596. static inline bool ggml_can_mul_mat(const struct ggml_tensor * t0, const struct ggml_tensor * t1) {
  3597. static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function");
  3598. return (t0->ne[0] == t1->ne[0]) &&
  3599. (t1->ne[2]%t0->ne[2] == 0) && // verify t0 is broadcastable
  3600. (t1->ne[3]%t0->ne[3] == 0);
  3601. }
  3602. static inline bool ggml_can_out_prod(const struct ggml_tensor * t0, const struct ggml_tensor * t1) {
  3603. static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function");
  3604. return (t0->ne[1] == t1->ne[1]) &&
  3605. (t1->ne[2]%t0->ne[2] == 0) && // verify t0 is broadcastable
  3606. (t1->ne[3]%t0->ne[3] == 0);
  3607. }
  3608. enum ggml_type ggml_ftype_to_ggml_type(enum ggml_ftype ftype) {
  3609. enum ggml_type wtype = GGML_TYPE_COUNT;
  3610. switch (ftype) {
  3611. case GGML_FTYPE_ALL_F32: wtype = GGML_TYPE_F32; break;
  3612. case GGML_FTYPE_MOSTLY_F16: wtype = GGML_TYPE_F16; break;
  3613. case GGML_FTYPE_MOSTLY_Q4_0: wtype = GGML_TYPE_Q4_0; break;
  3614. case GGML_FTYPE_MOSTLY_Q4_1: wtype = GGML_TYPE_Q4_1; break;
  3615. case GGML_FTYPE_MOSTLY_Q5_0: wtype = GGML_TYPE_Q5_0; break;
  3616. case GGML_FTYPE_MOSTLY_Q5_1: wtype = GGML_TYPE_Q5_1; break;
  3617. case GGML_FTYPE_MOSTLY_Q8_0: wtype = GGML_TYPE_Q8_0; break;
  3618. case GGML_FTYPE_MOSTLY_Q2_K: wtype = GGML_TYPE_Q2_K; break;
  3619. case GGML_FTYPE_MOSTLY_Q3_K: wtype = GGML_TYPE_Q3_K; break;
  3620. case GGML_FTYPE_MOSTLY_Q4_K: wtype = GGML_TYPE_Q4_K; break;
  3621. case GGML_FTYPE_MOSTLY_Q5_K: wtype = GGML_TYPE_Q5_K; break;
  3622. case GGML_FTYPE_MOSTLY_Q6_K: wtype = GGML_TYPE_Q6_K; break;
  3623. case GGML_FTYPE_UNKNOWN: wtype = GGML_TYPE_COUNT; break;
  3624. case GGML_FTYPE_MOSTLY_Q4_1_SOME_F16: wtype = GGML_TYPE_COUNT; break;
  3625. }
  3626. GGML_ASSERT(wtype != GGML_TYPE_COUNT);
  3627. return wtype;
  3628. }
  3629. size_t ggml_tensor_overhead(void) {
  3630. return GGML_OBJECT_SIZE + GGML_TENSOR_SIZE;
  3631. }
  3632. bool ggml_is_transposed(const struct ggml_tensor * tensor) {
  3633. return tensor->nb[0] > tensor->nb[1];
  3634. }
  3635. bool ggml_is_contiguous(const struct ggml_tensor * tensor) {
  3636. static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function");
  3637. return
  3638. tensor->nb[0] == ggml_type_size(tensor->type) &&
  3639. tensor->nb[1] == (tensor->nb[0]*tensor->ne[0])/ggml_blck_size(tensor->type) &&
  3640. tensor->nb[2] == tensor->nb[1]*tensor->ne[1] &&
  3641. tensor->nb[3] == tensor->nb[2]*tensor->ne[2];
  3642. }
  3643. static inline bool ggml_is_contiguous_except_dim_1(const struct ggml_tensor * tensor) {
  3644. static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function");
  3645. return
  3646. tensor->nb[0] == ggml_type_size(tensor->type) &&
  3647. tensor->nb[2] == tensor->nb[1]*tensor->ne[1] &&
  3648. tensor->nb[3] == tensor->nb[2]*tensor->ne[2];
  3649. }
  3650. bool ggml_is_permuted(const struct ggml_tensor * tensor) {
  3651. static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function");
  3652. return tensor->nb[0] > tensor->nb[1] || tensor->nb[1] > tensor->nb[2] || tensor->nb[2] > tensor->nb[3];
  3653. }
  3654. static inline bool ggml_is_padded_1d(const struct ggml_tensor * tensor) {
  3655. static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function");
  3656. return
  3657. tensor->nb[0] == ggml_type_size(tensor->type) &&
  3658. tensor->nb[2] == tensor->nb[1]*tensor->ne[1] &&
  3659. tensor->nb[3] == tensor->nb[2]*tensor->ne[2];
  3660. }
  3661. bool ggml_are_same_shape(const struct ggml_tensor * t0, const struct ggml_tensor * t1) {
  3662. static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function");
  3663. return
  3664. (t0->ne[0] == t1->ne[0] ) &&
  3665. (t0->ne[1] == t1->ne[1] ) &&
  3666. (t0->ne[2] == t1->ne[2] ) &&
  3667. (t0->ne[3] == t1->ne[3] );
  3668. }
  3669. // check if t1 can be represented as a repeatition of t0
  3670. static inline bool ggml_can_repeat(const struct ggml_tensor * t0, const struct ggml_tensor * t1) {
  3671. static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function");
  3672. return
  3673. (t1->ne[0]%t0->ne[0] == 0) &&
  3674. (t1->ne[1]%t0->ne[1] == 0) &&
  3675. (t1->ne[2]%t0->ne[2] == 0) &&
  3676. (t1->ne[3]%t0->ne[3] == 0);
  3677. }
  3678. static inline bool ggml_can_repeat_rows(const struct ggml_tensor * t0, const struct ggml_tensor * t1) {
  3679. static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function");
  3680. return (t0->ne[0] == t1->ne[0]) && ggml_can_repeat(t0, t1);
  3681. }
  3682. static inline int ggml_up32(int n) {
  3683. return (n + 31) & ~31;
  3684. }
  3685. //static inline int ggml_up64(int n) {
  3686. // return (n + 63) & ~63;
  3687. //}
  3688. static inline int ggml_up(int n, int m) {
  3689. // assert m is a power of 2
  3690. GGML_ASSERT((m & (m - 1)) == 0);
  3691. return (n + m - 1) & ~(m - 1);
  3692. }
  3693. // assert that pointer is aligned to GGML_MEM_ALIGN
  3694. #define ggml_assert_aligned(ptr) \
  3695. GGML_ASSERT(((uintptr_t) (ptr))%GGML_MEM_ALIGN == 0)
  3696. ////////////////////////////////////////////////////////////////////////////////
  3697. struct ggml_context * ggml_init(struct ggml_init_params params) {
  3698. // make this function thread safe
  3699. ggml_critical_section_start();
  3700. static bool is_first_call = true;
  3701. if (is_first_call) {
  3702. // initialize time system (required on Windows)
  3703. ggml_time_init();
  3704. // initialize GELU, Quick GELU, SILU and EXP F32 tables
  3705. {
  3706. const uint64_t t_start = ggml_time_us(); UNUSED(t_start);
  3707. ggml_fp16_t ii;
  3708. for (int i = 0; i < (1 << 16); ++i) {
  3709. uint16_t ui = i;
  3710. memcpy(&ii, &ui, sizeof(ii));
  3711. const float f = table_f32_f16[i] = GGML_COMPUTE_FP16_TO_FP32(ii);
  3712. table_gelu_f16[i] = GGML_FP32_TO_FP16(ggml_gelu_f32(f));
  3713. table_gelu_quick_f16[i] = GGML_FP32_TO_FP16(ggml_gelu_quick_f32(f));
  3714. table_silu_f16[i] = GGML_FP32_TO_FP16(ggml_silu_f32(f));
  3715. table_exp_f16[i] = GGML_FP32_TO_FP16(expf(f));
  3716. }
  3717. const uint64_t t_end = ggml_time_us(); UNUSED(t_end);
  3718. GGML_PRINT_DEBUG("%s: GELU, Quick GELU, SILU and EXP tables initialized in %f ms\n", __func__, (t_end - t_start)/1000.0f);
  3719. }
  3720. // initialize g_state
  3721. {
  3722. const uint64_t t_start = ggml_time_us(); UNUSED(t_start);
  3723. g_state = (struct ggml_state) {
  3724. /*.contexts =*/ { { 0 } },
  3725. /*.numa =*/ {
  3726. .n_nodes = 0,
  3727. .total_cpus = 0,
  3728. },
  3729. };
  3730. for (int i = 0; i < GGML_MAX_CONTEXTS; ++i) {
  3731. g_state.contexts[i].used = false;
  3732. }
  3733. const uint64_t t_end = ggml_time_us(); UNUSED(t_end);
  3734. GGML_PRINT_DEBUG("%s: g_state initialized in %f ms\n", __func__, (t_end - t_start)/1000.0f);
  3735. }
  3736. #if defined(GGML_USE_CUBLAS)
  3737. ggml_init_cublas();
  3738. #elif defined(GGML_USE_CLBLAST)
  3739. ggml_cl_init();
  3740. #endif
  3741. ggml_setup_op_has_task_pass();
  3742. is_first_call = false;
  3743. }
  3744. // find non-used context in g_state
  3745. struct ggml_context * ctx = NULL;
  3746. for (int i = 0; i < GGML_MAX_CONTEXTS; i++) {
  3747. if (!g_state.contexts[i].used) {
  3748. g_state.contexts[i].used = true;
  3749. ctx = &g_state.contexts[i].context;
  3750. GGML_PRINT_DEBUG("%s: found unused context %d\n", __func__, i);
  3751. break;
  3752. }
  3753. }
  3754. if (ctx == NULL) {
  3755. GGML_PRINT_DEBUG("%s: no unused context found\n", __func__);
  3756. ggml_critical_section_end();
  3757. return NULL;
  3758. }
  3759. // allow to call ggml_init with 0 size
  3760. if (params.mem_size == 0) {
  3761. params.mem_size = GGML_MEM_ALIGN;
  3762. }
  3763. const size_t mem_size = params.mem_buffer ? params.mem_size : GGML_PAD(params.mem_size, GGML_MEM_ALIGN);
  3764. *ctx = (struct ggml_context) {
  3765. /*.mem_size =*/ mem_size,
  3766. /*.mem_buffer =*/ params.mem_buffer ? params.mem_buffer : GGML_ALIGNED_MALLOC(mem_size),
  3767. /*.mem_buffer_owned =*/ params.mem_buffer ? false : true,
  3768. /*.no_alloc =*/ params.no_alloc,
  3769. /*.no_alloc_save =*/ params.no_alloc,
  3770. /*.n_objects =*/ 0,
  3771. /*.objects_begin =*/ NULL,
  3772. /*.objects_end =*/ NULL,
  3773. /*.scratch =*/ { 0, 0, NULL, },
  3774. /*.scratch_save =*/ { 0, 0, NULL, },
  3775. };
  3776. GGML_ASSERT(ctx->mem_buffer != NULL);
  3777. ggml_assert_aligned(ctx->mem_buffer);
  3778. GGML_PRINT_DEBUG("%s: context initialized\n", __func__);
  3779. ggml_critical_section_end();
  3780. return ctx;
  3781. }
  3782. void ggml_free(struct ggml_context * ctx) {
  3783. // make this function thread safe
  3784. ggml_critical_section_start();
  3785. bool found = false;
  3786. for (int i = 0; i < GGML_MAX_CONTEXTS; i++) {
  3787. if (&g_state.contexts[i].context == ctx) {
  3788. g_state.contexts[i].used = false;
  3789. GGML_PRINT_DEBUG("%s: context %d has been freed. memory used = %zu\n",
  3790. __func__, i, ggml_used_mem(ctx));
  3791. if (ctx->mem_buffer_owned) {
  3792. GGML_ALIGNED_FREE(ctx->mem_buffer);
  3793. }
  3794. found = true;
  3795. break;
  3796. }
  3797. }
  3798. if (!found) {
  3799. GGML_PRINT_DEBUG("%s: context not found\n", __func__);
  3800. }
  3801. ggml_critical_section_end();
  3802. }
  3803. size_t ggml_used_mem(const struct ggml_context * ctx) {
  3804. return ctx->objects_end == NULL ? 0 : ctx->objects_end->offs + ctx->objects_end->size;
  3805. }
  3806. size_t ggml_set_scratch(struct ggml_context * ctx, struct ggml_scratch scratch) {
  3807. const size_t result = ctx->scratch.data ? ctx->scratch.offs : 0;
  3808. ctx->scratch = scratch;
  3809. return result;
  3810. }
  3811. bool ggml_get_no_alloc(struct ggml_context * ctx) {
  3812. return ctx->no_alloc;
  3813. }
  3814. void ggml_set_no_alloc(struct ggml_context * ctx, bool no_alloc) {
  3815. ctx->no_alloc = no_alloc;
  3816. }
  3817. void * ggml_get_mem_buffer(const struct ggml_context * ctx) {
  3818. return ctx->mem_buffer;
  3819. }
  3820. size_t ggml_get_mem_size(const struct ggml_context * ctx) {
  3821. return ctx->mem_size;
  3822. }
  3823. size_t ggml_get_max_tensor_size(const struct ggml_context * ctx) {
  3824. size_t max_size = 0;
  3825. struct ggml_object * obj = ctx->objects_begin;
  3826. while (obj != NULL) {
  3827. if (obj->type == GGML_OBJECT_TENSOR) {
  3828. struct ggml_tensor * tensor = (struct ggml_tensor *) ((char *) ctx->mem_buffer + obj->offs);
  3829. const size_t size = ggml_nbytes(tensor);
  3830. if (max_size < size) {
  3831. max_size = size;
  3832. }
  3833. }
  3834. obj = obj->next;
  3835. }
  3836. return max_size;
  3837. }
  3838. // IMPORTANT:
  3839. // when creating "opt" tensors, always save and load the scratch buffer
  3840. // this is an error prone process, but it is necessary to support inplace
  3841. // operators when using scratch buffers
  3842. // TODO: implement a better way
  3843. static void ggml_scratch_save(struct ggml_context * ctx) {
  3844. // this is needed to allow opt tensors to store their data
  3845. // TODO: again, need to find a better way
  3846. ctx->no_alloc_save = ctx->no_alloc;
  3847. ctx->no_alloc = false;
  3848. ctx->scratch_save = ctx->scratch;
  3849. ctx->scratch.data = NULL;
  3850. }
  3851. static void ggml_scratch_load(struct ggml_context * ctx) {
  3852. ctx->no_alloc = ctx->no_alloc_save;
  3853. ctx->scratch = ctx->scratch_save;
  3854. }
  3855. ////////////////////////////////////////////////////////////////////////////////
  3856. static struct ggml_object * ggml_new_object(struct ggml_context * ctx, enum ggml_object_type type, size_t size) {
  3857. // always insert objects at the end of the context's memory pool
  3858. struct ggml_object * obj_cur = ctx->objects_end;
  3859. const size_t cur_offs = obj_cur == NULL ? 0 : obj_cur->offs;
  3860. const size_t cur_size = obj_cur == NULL ? 0 : obj_cur->size;
  3861. const size_t cur_end = cur_offs + cur_size;
  3862. // align to GGML_MEM_ALIGN
  3863. size_t size_needed = GGML_PAD(size, GGML_MEM_ALIGN);
  3864. char * const mem_buffer = ctx->mem_buffer;
  3865. struct ggml_object * const obj_new = (struct ggml_object *)(mem_buffer + cur_end);
  3866. if (cur_end + size_needed + GGML_OBJECT_SIZE > ctx->mem_size) {
  3867. GGML_PRINT("%s: not enough space in the context's memory pool (needed %zu, available %zu)\n",
  3868. __func__, cur_end + size_needed, ctx->mem_size);
  3869. assert(false);
  3870. return NULL;
  3871. }
  3872. *obj_new = (struct ggml_object) {
  3873. .offs = cur_end + GGML_OBJECT_SIZE,
  3874. .size = size_needed,
  3875. .next = NULL,
  3876. .type = type,
  3877. };
  3878. ggml_assert_aligned(mem_buffer + obj_new->offs);
  3879. if (obj_cur != NULL) {
  3880. obj_cur->next = obj_new;
  3881. } else {
  3882. // this is the first object in this context
  3883. ctx->objects_begin = obj_new;
  3884. }
  3885. ctx->objects_end = obj_new;
  3886. //printf("%s: inserted new object at %zu, size = %zu\n", __func__, cur_end, obj_new->size);
  3887. return obj_new;
  3888. }
  3889. static struct ggml_tensor * ggml_new_tensor_impl(
  3890. struct ggml_context * ctx,
  3891. enum ggml_type type,
  3892. int n_dims,
  3893. const int64_t * ne,
  3894. struct ggml_tensor * view_src,
  3895. size_t view_offs) {
  3896. assert(n_dims >= 1 && n_dims <= GGML_MAX_DIMS);
  3897. // find the base tensor and absolute offset
  3898. if (view_src != NULL && view_src->view_src != NULL) {
  3899. view_offs += view_src->view_offs;
  3900. view_src = view_src->view_src;
  3901. }
  3902. size_t data_size = ggml_type_size(type)*(ne[0]/ggml_blck_size(type));
  3903. for (int i = 1; i < n_dims; i++) {
  3904. data_size *= ne[i];
  3905. }
  3906. GGML_ASSERT(view_src == NULL || data_size + view_offs <= ggml_nbytes(view_src));
  3907. void * data = view_src != NULL ? view_src->data : NULL;
  3908. if (data != NULL) {
  3909. data = (char *) data + view_offs;
  3910. }
  3911. size_t obj_alloc_size = 0;
  3912. if (view_src == NULL && !ctx->no_alloc) {
  3913. if (ctx->scratch.data != NULL) {
  3914. // allocate tensor data in the scratch buffer
  3915. if (ctx->scratch.offs + data_size > ctx->scratch.size) {
  3916. GGML_PRINT("%s: not enough space in the scratch memory pool (needed %zu, available %zu)\n",
  3917. __func__, ctx->scratch.offs + data_size, ctx->scratch.size);
  3918. assert(false);
  3919. return NULL;
  3920. }
  3921. data = (char * const) ctx->scratch.data + ctx->scratch.offs;
  3922. ctx->scratch.offs += data_size;
  3923. } else {
  3924. // allocate tensor data in the context's memory pool
  3925. obj_alloc_size = data_size;
  3926. }
  3927. }
  3928. struct ggml_object * const obj_new = ggml_new_object(ctx, GGML_OBJECT_TENSOR, GGML_TENSOR_SIZE + obj_alloc_size);
  3929. // TODO: for recoverable errors, we would need to free the data allocated from the scratch buffer here
  3930. struct ggml_tensor * const result = (struct ggml_tensor *)((char *)ctx->mem_buffer + obj_new->offs);
  3931. *result = (struct ggml_tensor) {
  3932. /*.type =*/ type,
  3933. /*.backend =*/ GGML_BACKEND_CPU,
  3934. /*.n_dims =*/ n_dims,
  3935. /*.ne =*/ { 1, 1, 1, 1 },
  3936. /*.nb =*/ { 0, 0, 0, 0 },
  3937. /*.op =*/ GGML_OP_NONE,
  3938. /*.op_params =*/ { 0 },
  3939. /*.is_param =*/ false,
  3940. /*.grad =*/ NULL,
  3941. /*.src =*/ { NULL },
  3942. /*.perf_runs =*/ 0,
  3943. /*.perf_cycles =*/ 0,
  3944. /*.perf_time_us =*/ 0,
  3945. /*.view_src =*/ view_src,
  3946. /*.view_offs =*/ view_offs,
  3947. /*.data =*/ obj_alloc_size > 0 ? (void *)(result + 1) : data,
  3948. /*.name =*/ { 0 },
  3949. /*.extra =*/ NULL,
  3950. /*.padding =*/ { 0 },
  3951. };
  3952. // TODO: this should not be needed as long as we don't rely on aligned SIMD loads
  3953. //ggml_assert_aligned(result->data);
  3954. for (int i = 0; i < n_dims; i++) {
  3955. result->ne[i] = ne[i];
  3956. }
  3957. result->nb[0] = ggml_type_size(type);
  3958. result->nb[1] = result->nb[0]*(result->ne[0]/ggml_blck_size(type));
  3959. for (int i = 2; i < GGML_MAX_DIMS; i++) {
  3960. result->nb[i] = result->nb[i - 1]*result->ne[i - 1];
  3961. }
  3962. ctx->n_objects++;
  3963. return result;
  3964. }
  3965. struct ggml_tensor * ggml_new_tensor(
  3966. struct ggml_context * ctx,
  3967. enum ggml_type type,
  3968. int n_dims,
  3969. const int64_t * ne) {
  3970. return ggml_new_tensor_impl(ctx, type, n_dims, ne, NULL, 0);
  3971. }
  3972. struct ggml_tensor * ggml_new_tensor_1d(
  3973. struct ggml_context * ctx,
  3974. enum ggml_type type,
  3975. int64_t ne0) {
  3976. return ggml_new_tensor(ctx, type, 1, &ne0);
  3977. }
  3978. struct ggml_tensor * ggml_new_tensor_2d(
  3979. struct ggml_context * ctx,
  3980. enum ggml_type type,
  3981. int64_t ne0,
  3982. int64_t ne1) {
  3983. const int64_t ne[2] = { ne0, ne1 };
  3984. return ggml_new_tensor(ctx, type, 2, ne);
  3985. }
  3986. struct ggml_tensor * ggml_new_tensor_3d(
  3987. struct ggml_context * ctx,
  3988. enum ggml_type type,
  3989. int64_t ne0,
  3990. int64_t ne1,
  3991. int64_t ne2) {
  3992. const int64_t ne[3] = { ne0, ne1, ne2 };
  3993. return ggml_new_tensor(ctx, type, 3, ne);
  3994. }
  3995. struct ggml_tensor * ggml_new_tensor_4d(
  3996. struct ggml_context * ctx,
  3997. enum ggml_type type,
  3998. int64_t ne0,
  3999. int64_t ne1,
  4000. int64_t ne2,
  4001. int64_t ne3) {
  4002. const int64_t ne[4] = { ne0, ne1, ne2, ne3 };
  4003. return ggml_new_tensor(ctx, type, 4, ne);
  4004. }
  4005. struct ggml_tensor * ggml_new_i32(struct ggml_context * ctx, int32_t value) {
  4006. ggml_scratch_save(ctx);
  4007. struct ggml_tensor * result = ggml_new_tensor_1d(ctx, GGML_TYPE_I32, 1);
  4008. ggml_scratch_load(ctx);
  4009. ggml_set_i32(result, value);
  4010. return result;
  4011. }
  4012. struct ggml_tensor * ggml_new_f32(struct ggml_context * ctx, float value) {
  4013. ggml_scratch_save(ctx);
  4014. struct ggml_tensor * result = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, 1);
  4015. ggml_scratch_load(ctx);
  4016. ggml_set_f32(result, value);
  4017. return result;
  4018. }
  4019. struct ggml_tensor * ggml_dup_tensor(struct ggml_context * ctx, const struct ggml_tensor * src) {
  4020. return ggml_new_tensor(ctx, src->type, src->n_dims, src->ne);
  4021. }
  4022. static void ggml_set_op_params(struct ggml_tensor * tensor, const void * params, size_t params_size) {
  4023. GGML_ASSERT(tensor != NULL); // silence -Warray-bounds warnings
  4024. assert(params_size <= GGML_MAX_OP_PARAMS);
  4025. memcpy(tensor->op_params, params, params_size);
  4026. }
  4027. static int32_t ggml_get_op_params_i32(const struct ggml_tensor * tensor, uint32_t i) {
  4028. assert(i < GGML_MAX_OP_PARAMS / sizeof(int32_t));
  4029. return ((const int32_t *)(tensor->op_params))[i];
  4030. }
  4031. static void ggml_set_op_params_i32(struct ggml_tensor * tensor, uint32_t i, int32_t value) {
  4032. assert(i < GGML_MAX_OP_PARAMS / sizeof(int32_t));
  4033. ((int32_t *)(tensor->op_params))[i] = value;
  4034. }
  4035. struct ggml_tensor * ggml_set_zero(struct ggml_tensor * tensor) {
  4036. memset(tensor->data, 0, ggml_nbytes(tensor));
  4037. return tensor;
  4038. }
  4039. struct ggml_tensor * ggml_set_i32 (struct ggml_tensor * tensor, int32_t value) {
  4040. const int n = ggml_nrows(tensor);
  4041. const int nc = tensor->ne[0];
  4042. const size_t n1 = tensor->nb[1];
  4043. char * const data = tensor->data;
  4044. switch (tensor->type) {
  4045. case GGML_TYPE_I8:
  4046. {
  4047. assert(tensor->nb[0] == sizeof(int8_t));
  4048. for (int i = 0; i < n; i++) {
  4049. ggml_vec_set_i8(nc, (int8_t *)(data + i*n1), value);
  4050. }
  4051. } break;
  4052. case GGML_TYPE_I16:
  4053. {
  4054. assert(tensor->nb[0] == sizeof(int16_t));
  4055. for (int i = 0; i < n; i++) {
  4056. ggml_vec_set_i16(nc, (int16_t *)(data + i*n1), value);
  4057. }
  4058. } break;
  4059. case GGML_TYPE_I32:
  4060. {
  4061. assert(tensor->nb[0] == sizeof(int32_t));
  4062. for (int i = 0; i < n; i++) {
  4063. ggml_vec_set_i32(nc, (int32_t *)(data + i*n1), value);
  4064. }
  4065. } break;
  4066. case GGML_TYPE_F16:
  4067. {
  4068. assert(tensor->nb[0] == sizeof(ggml_fp16_t));
  4069. for (int i = 0; i < n; i++) {
  4070. ggml_vec_set_f16(nc, (ggml_fp16_t *)(data + i*n1), GGML_FP32_TO_FP16(value));
  4071. }
  4072. } break;
  4073. case GGML_TYPE_F32:
  4074. {
  4075. assert(tensor->nb[0] == sizeof(float));
  4076. for (int i = 0; i < n; i++) {
  4077. ggml_vec_set_f32(nc, (float *)(data + i*n1), value);
  4078. }
  4079. } break;
  4080. default:
  4081. {
  4082. GGML_ASSERT(false);
  4083. } break;
  4084. }
  4085. return tensor;
  4086. }
  4087. struct ggml_tensor * ggml_set_f32(struct ggml_tensor * tensor, float value) {
  4088. const int n = ggml_nrows(tensor);
  4089. const int nc = tensor->ne[0];
  4090. const size_t n1 = tensor->nb[1];
  4091. char * const data = tensor->data;
  4092. switch (tensor->type) {
  4093. case GGML_TYPE_I8:
  4094. {
  4095. assert(tensor->nb[0] == sizeof(int8_t));
  4096. for (int i = 0; i < n; i++) {
  4097. ggml_vec_set_i8(nc, (int8_t *)(data + i*n1), value);
  4098. }
  4099. } break;
  4100. case GGML_TYPE_I16:
  4101. {
  4102. assert(tensor->nb[0] == sizeof(int16_t));
  4103. for (int i = 0; i < n; i++) {
  4104. ggml_vec_set_i16(nc, (int16_t *)(data + i*n1), value);
  4105. }
  4106. } break;
  4107. case GGML_TYPE_I32:
  4108. {
  4109. assert(tensor->nb[0] == sizeof(int32_t));
  4110. for (int i = 0; i < n; i++) {
  4111. ggml_vec_set_i32(nc, (int32_t *)(data + i*n1), value);
  4112. }
  4113. } break;
  4114. case GGML_TYPE_F16:
  4115. {
  4116. assert(tensor->nb[0] == sizeof(ggml_fp16_t));
  4117. for (int i = 0; i < n; i++) {
  4118. ggml_vec_set_f16(nc, (ggml_fp16_t *)(data + i*n1), GGML_FP32_TO_FP16(value));
  4119. }
  4120. } break;
  4121. case GGML_TYPE_F32:
  4122. {
  4123. assert(tensor->nb[0] == sizeof(float));
  4124. for (int i = 0; i < n; i++) {
  4125. ggml_vec_set_f32(nc, (float *)(data + i*n1), value);
  4126. }
  4127. } break;
  4128. default:
  4129. {
  4130. GGML_ASSERT(false);
  4131. } break;
  4132. }
  4133. return tensor;
  4134. }
  4135. void ggml_unravel_index(const struct ggml_tensor * tensor, int64_t i, int64_t * i0, int64_t * i1, int64_t * i2, int64_t * i3) {
  4136. const int64_t ne2 = tensor->ne[2];
  4137. const int64_t ne1 = tensor->ne[1];
  4138. const int64_t ne0 = tensor->ne[0];
  4139. const int64_t i3_ = (i/(ne2*ne1*ne0));
  4140. const int64_t i2_ = (i - i3_*ne2*ne1*ne0)/(ne1*ne0);
  4141. const int64_t i1_ = (i - i3_*ne2*ne1*ne0 - i2_*ne1*ne0)/ne0;
  4142. const int64_t i0_ = (i - i3_*ne2*ne1*ne0 - i2_*ne1*ne0 - i1_*ne0);
  4143. if (i0) {
  4144. * i0 = i0_;
  4145. }
  4146. if (i1) {
  4147. * i1 = i1_;
  4148. }
  4149. if (i2) {
  4150. * i2 = i2_;
  4151. }
  4152. if (i3) {
  4153. * i3 = i3_;
  4154. }
  4155. }
  4156. int32_t ggml_get_i32_1d(const struct ggml_tensor * tensor, int i) {
  4157. if (!ggml_is_contiguous(tensor)) {
  4158. int64_t id[4] = { 0, 0, 0, 0 };
  4159. ggml_unravel_index(tensor, i, &id[0], &id[1], &id[2], &id[3]);
  4160. return ggml_get_i32_nd(tensor, id[0], id[1], id[2], id[3]);
  4161. }
  4162. switch (tensor->type) {
  4163. case GGML_TYPE_I8:
  4164. {
  4165. GGML_ASSERT(tensor->nb[0] == sizeof(int8_t));
  4166. return ((int8_t *)(tensor->data))[i];
  4167. }
  4168. case GGML_TYPE_I16:
  4169. {
  4170. GGML_ASSERT(tensor->nb[0] == sizeof(int16_t));
  4171. return ((int16_t *)(tensor->data))[i];
  4172. }
  4173. case GGML_TYPE_I32:
  4174. {
  4175. GGML_ASSERT(tensor->nb[0] == sizeof(int32_t));
  4176. return ((int32_t *)(tensor->data))[i];
  4177. }
  4178. case GGML_TYPE_F16:
  4179. {
  4180. GGML_ASSERT(tensor->nb[0] == sizeof(ggml_fp16_t));
  4181. return GGML_FP16_TO_FP32(((ggml_fp16_t *)(tensor->data))[i]);
  4182. }
  4183. case GGML_TYPE_F32:
  4184. {
  4185. GGML_ASSERT(tensor->nb[0] == sizeof(float));
  4186. return ((float *)(tensor->data))[i];
  4187. }
  4188. default:
  4189. {
  4190. GGML_ASSERT(false);
  4191. }
  4192. }
  4193. return 0.0f;
  4194. }
  4195. void ggml_set_i32_1d(const struct ggml_tensor * tensor, int i, int32_t value) {
  4196. if (!ggml_is_contiguous(tensor)) {
  4197. int64_t id[4] = { 0, 0, 0, 0 };
  4198. ggml_unravel_index(tensor, i, &id[0], &id[1], &id[2], &id[3]);
  4199. ggml_set_i32_nd(tensor, id[0], id[1], id[2], id[3], value);
  4200. return;
  4201. }
  4202. switch (tensor->type) {
  4203. case GGML_TYPE_I8:
  4204. {
  4205. GGML_ASSERT(tensor->nb[0] == sizeof(int8_t));
  4206. ((int8_t *)(tensor->data))[i] = value;
  4207. } break;
  4208. case GGML_TYPE_I16:
  4209. {
  4210. GGML_ASSERT(tensor->nb[0] == sizeof(int16_t));
  4211. ((int16_t *)(tensor->data))[i] = value;
  4212. } break;
  4213. case GGML_TYPE_I32:
  4214. {
  4215. GGML_ASSERT(tensor->nb[0] == sizeof(int32_t));
  4216. ((int32_t *)(tensor->data))[i] = value;
  4217. } break;
  4218. case GGML_TYPE_F16:
  4219. {
  4220. GGML_ASSERT(tensor->nb[0] == sizeof(ggml_fp16_t));
  4221. ((ggml_fp16_t *)(tensor->data))[i] = GGML_FP32_TO_FP16(value);
  4222. } break;
  4223. case GGML_TYPE_F32:
  4224. {
  4225. GGML_ASSERT(tensor->nb[0] == sizeof(float));
  4226. ((float *)(tensor->data))[i] = value;
  4227. } break;
  4228. default:
  4229. {
  4230. GGML_ASSERT(false);
  4231. } break;
  4232. }
  4233. }
  4234. int32_t ggml_get_i32_nd(const struct ggml_tensor * tensor, int i0, int i1, int i2, int i3) {
  4235. void * data = (char *) tensor->data + i0*tensor->nb[0] + i1*tensor->nb[1] + i2*tensor->nb[2] + i3*tensor->nb[3];
  4236. switch (tensor->type) {
  4237. case GGML_TYPE_I8:
  4238. return ((int8_t *) data)[0];
  4239. case GGML_TYPE_I16:
  4240. return ((int16_t *) data)[0];
  4241. case GGML_TYPE_I32:
  4242. return ((int32_t *) data)[0];
  4243. case GGML_TYPE_F16:
  4244. return GGML_FP16_TO_FP32(((ggml_fp16_t *) data)[0]);
  4245. case GGML_TYPE_F32:
  4246. return ((float *) data)[0];
  4247. default:
  4248. GGML_ASSERT(false);
  4249. }
  4250. return 0.0f;
  4251. }
  4252. void ggml_set_i32_nd(const struct ggml_tensor * tensor, int i0, int i1, int i2, int i3, int32_t value) {
  4253. void * data = (char *) tensor->data + i0*tensor->nb[0] + i1*tensor->nb[1] + i2*tensor->nb[2] + i3*tensor->nb[3];
  4254. switch (tensor->type) {
  4255. case GGML_TYPE_I8:
  4256. {
  4257. ((int8_t *)(data))[0] = value;
  4258. } break;
  4259. case GGML_TYPE_I16:
  4260. {
  4261. ((int16_t *)(data))[0] = value;
  4262. } break;
  4263. case GGML_TYPE_I32:
  4264. {
  4265. ((int32_t *)(data))[0] = value;
  4266. } break;
  4267. case GGML_TYPE_F16:
  4268. {
  4269. ((ggml_fp16_t *)(data))[0] = GGML_FP32_TO_FP16(value);
  4270. } break;
  4271. case GGML_TYPE_F32:
  4272. {
  4273. ((float *)(data))[0] = value;
  4274. } break;
  4275. default:
  4276. {
  4277. GGML_ASSERT(false);
  4278. } break;
  4279. }
  4280. }
  4281. float ggml_get_f32_1d(const struct ggml_tensor * tensor, int i) {
  4282. if (!ggml_is_contiguous(tensor)) {
  4283. int64_t id[4] = { 0, 0, 0, 0 };
  4284. ggml_unravel_index(tensor, i, &id[0], &id[1], &id[2], &id[3]);
  4285. return ggml_get_f32_nd(tensor, id[0], id[1], id[2], id[3]);
  4286. }
  4287. switch (tensor->type) {
  4288. case GGML_TYPE_I8:
  4289. {
  4290. GGML_ASSERT(tensor->nb[0] == sizeof(int8_t));
  4291. return ((int8_t *)(tensor->data))[i];
  4292. }
  4293. case GGML_TYPE_I16:
  4294. {
  4295. GGML_ASSERT(tensor->nb[0] == sizeof(int16_t));
  4296. return ((int16_t *)(tensor->data))[i];
  4297. }
  4298. case GGML_TYPE_I32:
  4299. {
  4300. GGML_ASSERT(tensor->nb[0] == sizeof(int32_t));
  4301. return ((int32_t *)(tensor->data))[i];
  4302. }
  4303. case GGML_TYPE_F16:
  4304. {
  4305. GGML_ASSERT(tensor->nb[0] == sizeof(ggml_fp16_t));
  4306. return GGML_FP16_TO_FP32(((ggml_fp16_t *)(tensor->data))[i]);
  4307. }
  4308. case GGML_TYPE_F32:
  4309. {
  4310. GGML_ASSERT(tensor->nb[0] == sizeof(float));
  4311. return ((float *)(tensor->data))[i];
  4312. }
  4313. default:
  4314. {
  4315. GGML_ASSERT(false);
  4316. }
  4317. }
  4318. return 0.0f;
  4319. }
  4320. void ggml_set_f32_1d(const struct ggml_tensor * tensor, int i, float value) {
  4321. if (!ggml_is_contiguous(tensor)) {
  4322. int64_t id[4] = { 0, 0, 0, 0 };
  4323. ggml_unravel_index(tensor, i, &id[0], &id[1], &id[2], &id[3]);
  4324. ggml_set_f32_nd(tensor, id[0], id[1], id[2], id[3], value);
  4325. return;
  4326. }
  4327. switch (tensor->type) {
  4328. case GGML_TYPE_I8:
  4329. {
  4330. GGML_ASSERT(tensor->nb[0] == sizeof(int8_t));
  4331. ((int8_t *)(tensor->data))[i] = value;
  4332. } break;
  4333. case GGML_TYPE_I16:
  4334. {
  4335. GGML_ASSERT(tensor->nb[0] == sizeof(int16_t));
  4336. ((int16_t *)(tensor->data))[i] = value;
  4337. } break;
  4338. case GGML_TYPE_I32:
  4339. {
  4340. GGML_ASSERT(tensor->nb[0] == sizeof(int32_t));
  4341. ((int32_t *)(tensor->data))[i] = value;
  4342. } break;
  4343. case GGML_TYPE_F16:
  4344. {
  4345. GGML_ASSERT(tensor->nb[0] == sizeof(ggml_fp16_t));
  4346. ((ggml_fp16_t *)(tensor->data))[i] = GGML_FP32_TO_FP16(value);
  4347. } break;
  4348. case GGML_TYPE_F32:
  4349. {
  4350. GGML_ASSERT(tensor->nb[0] == sizeof(float));
  4351. ((float *)(tensor->data))[i] = value;
  4352. } break;
  4353. default:
  4354. {
  4355. GGML_ASSERT(false);
  4356. } break;
  4357. }
  4358. }
  4359. float ggml_get_f32_nd(const struct ggml_tensor * tensor, int i0, int i1, int i2, int i3) {
  4360. void * data = (char *) tensor->data + i0*tensor->nb[0] + i1*tensor->nb[1] + i2*tensor->nb[2] + i3*tensor->nb[3];
  4361. switch (tensor->type) {
  4362. case GGML_TYPE_I8:
  4363. return ((int8_t *) data)[0];
  4364. case GGML_TYPE_I16:
  4365. return ((int16_t *) data)[0];
  4366. case GGML_TYPE_I32:
  4367. return ((int32_t *) data)[0];
  4368. case GGML_TYPE_F16:
  4369. return GGML_FP16_TO_FP32(((ggml_fp16_t *) data)[0]);
  4370. case GGML_TYPE_F32:
  4371. return ((float *) data)[0];
  4372. default:
  4373. GGML_ASSERT(false);
  4374. }
  4375. return 0.0f;
  4376. }
  4377. void ggml_set_f32_nd(const struct ggml_tensor * tensor, int i0, int i1, int i2, int i3, float value) {
  4378. void * data = (char *) tensor->data + i0*tensor->nb[0] + i1*tensor->nb[1] + i2*tensor->nb[2] + i3*tensor->nb[3];
  4379. switch (tensor->type) {
  4380. case GGML_TYPE_I8:
  4381. {
  4382. ((int8_t *)(data))[0] = value;
  4383. } break;
  4384. case GGML_TYPE_I16:
  4385. {
  4386. ((int16_t *)(data))[0] = value;
  4387. } break;
  4388. case GGML_TYPE_I32:
  4389. {
  4390. ((int32_t *)(data))[0] = value;
  4391. } break;
  4392. case GGML_TYPE_F16:
  4393. {
  4394. ((ggml_fp16_t *)(data))[0] = GGML_FP32_TO_FP16(value);
  4395. } break;
  4396. case GGML_TYPE_F32:
  4397. {
  4398. ((float *)(data))[0] = value;
  4399. } break;
  4400. default:
  4401. {
  4402. GGML_ASSERT(false);
  4403. } break;
  4404. }
  4405. }
  4406. void * ggml_get_data(const struct ggml_tensor * tensor) {
  4407. return tensor->data;
  4408. }
  4409. float * ggml_get_data_f32(const struct ggml_tensor * tensor) {
  4410. assert(tensor->type == GGML_TYPE_F32);
  4411. return (float *)(tensor->data);
  4412. }
  4413. enum ggml_unary_op ggml_get_unary_op(const struct ggml_tensor * tensor) {
  4414. GGML_ASSERT(tensor->op == GGML_OP_UNARY);
  4415. return (enum ggml_unary_op) ggml_get_op_params_i32(tensor, 0);
  4416. }
  4417. const char * ggml_get_name(const struct ggml_tensor * tensor) {
  4418. return tensor->name;
  4419. }
  4420. struct ggml_tensor * ggml_set_name(struct ggml_tensor * tensor, const char * name) {
  4421. strncpy(tensor->name, name, sizeof(tensor->name));
  4422. tensor->name[sizeof(tensor->name) - 1] = '\0';
  4423. return tensor;
  4424. }
  4425. struct ggml_tensor * ggml_format_name(struct ggml_tensor * tensor, const char * fmt, ...) {
  4426. va_list args;
  4427. va_start(args, fmt);
  4428. vsnprintf(tensor->name, sizeof(tensor->name), fmt, args);
  4429. va_end(args);
  4430. return tensor;
  4431. }
  4432. struct ggml_tensor * ggml_view_tensor(
  4433. struct ggml_context * ctx,
  4434. struct ggml_tensor * src) {
  4435. struct ggml_tensor * result = ggml_new_tensor_impl(ctx, src->type, src->n_dims, src->ne, src, 0);
  4436. ggml_format_name(result, "%s (view)", src->name);
  4437. for (int i = 0; i < GGML_MAX_DIMS; i++) {
  4438. result->nb[i] = src->nb[i];
  4439. }
  4440. return result;
  4441. }
  4442. struct ggml_tensor * ggml_get_tensor(struct ggml_context * ctx, const char * name) {
  4443. struct ggml_object * obj = ctx->objects_begin;
  4444. char * const mem_buffer = ctx->mem_buffer;
  4445. while (obj != NULL) {
  4446. if (obj->type == GGML_OBJECT_TENSOR) {
  4447. struct ggml_tensor * cur = (struct ggml_tensor *)(mem_buffer + obj->offs);
  4448. if (strcmp(cur->name, name) == 0) {
  4449. return cur;
  4450. }
  4451. }
  4452. obj = obj->next;
  4453. }
  4454. return NULL;
  4455. }
  4456. ////////////////////////////////////////////////////////////////////////////////
  4457. // ggml_dup
  4458. static struct ggml_tensor * ggml_dup_impl(
  4459. struct ggml_context * ctx,
  4460. struct ggml_tensor * a,
  4461. bool inplace) {
  4462. bool is_node = false;
  4463. if (!inplace && (a->grad)) {
  4464. is_node = true;
  4465. }
  4466. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  4467. result->op = GGML_OP_DUP;
  4468. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4469. result->src[0] = a;
  4470. return result;
  4471. }
  4472. struct ggml_tensor * ggml_dup(
  4473. struct ggml_context * ctx,
  4474. struct ggml_tensor * a) {
  4475. return ggml_dup_impl(ctx, a, false);
  4476. }
  4477. struct ggml_tensor * ggml_dup_inplace(
  4478. struct ggml_context * ctx,
  4479. struct ggml_tensor * a) {
  4480. return ggml_dup_impl(ctx, a, true);
  4481. }
  4482. // ggml_add
  4483. static struct ggml_tensor * ggml_add_impl(
  4484. struct ggml_context * ctx,
  4485. struct ggml_tensor * a,
  4486. struct ggml_tensor * b,
  4487. bool inplace) {
  4488. // TODO: support less-strict constraint
  4489. // GGML_ASSERT(ggml_can_repeat(b, a));
  4490. GGML_ASSERT(ggml_can_repeat_rows(b, a));
  4491. bool is_node = false;
  4492. if (!inplace && (a->grad || b->grad)) {
  4493. // TODO: support backward pass for broadcasting
  4494. GGML_ASSERT(ggml_are_same_shape(a, b));
  4495. is_node = true;
  4496. }
  4497. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  4498. result->op = GGML_OP_ADD;
  4499. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4500. result->src[0] = a;
  4501. result->src[1] = b;
  4502. return result;
  4503. }
  4504. struct ggml_tensor * ggml_add(
  4505. struct ggml_context * ctx,
  4506. struct ggml_tensor * a,
  4507. struct ggml_tensor * b) {
  4508. return ggml_add_impl(ctx, a, b, false);
  4509. }
  4510. struct ggml_tensor * ggml_add_inplace(
  4511. struct ggml_context * ctx,
  4512. struct ggml_tensor * a,
  4513. struct ggml_tensor * b) {
  4514. return ggml_add_impl(ctx, a, b, true);
  4515. }
  4516. // ggml_add_cast
  4517. static struct ggml_tensor * ggml_add_cast_impl(
  4518. struct ggml_context * ctx,
  4519. struct ggml_tensor * a,
  4520. struct ggml_tensor * b,
  4521. enum ggml_type type) {
  4522. // TODO: support less-strict constraint
  4523. // GGML_ASSERT(ggml_can_repeat(b, a));
  4524. GGML_ASSERT(ggml_can_repeat_rows(b, a));
  4525. GGML_ASSERT(ggml_is_quantized(a->type)); // currently only supported for quantized input
  4526. bool is_node = false;
  4527. if (a->grad || b->grad) {
  4528. // TODO: support backward pass for broadcasting
  4529. GGML_ASSERT(ggml_are_same_shape(a, b));
  4530. is_node = true;
  4531. }
  4532. struct ggml_tensor * result = ggml_new_tensor(ctx, type, a->n_dims, a->ne);
  4533. result->op = GGML_OP_ADD;
  4534. result->grad = is_node ? ggml_new_tensor(ctx, GGML_TYPE_F32, a->n_dims, a->ne) : NULL;
  4535. result->src[0] = a;
  4536. result->src[1] = b;
  4537. return result;
  4538. }
  4539. struct ggml_tensor * ggml_add_cast(
  4540. struct ggml_context * ctx,
  4541. struct ggml_tensor * a,
  4542. struct ggml_tensor * b,
  4543. enum ggml_type type) {
  4544. return ggml_add_cast_impl(ctx, a, b, type);
  4545. }
  4546. // ggml_add1
  4547. static struct ggml_tensor * ggml_add1_impl(
  4548. struct ggml_context * ctx,
  4549. struct ggml_tensor * a,
  4550. struct ggml_tensor * b,
  4551. bool inplace) {
  4552. GGML_ASSERT(ggml_is_scalar(b));
  4553. GGML_ASSERT(ggml_is_padded_1d(a));
  4554. bool is_node = false;
  4555. if (a->grad || b->grad) {
  4556. is_node = true;
  4557. }
  4558. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  4559. result->op = GGML_OP_ADD1;
  4560. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4561. result->src[0] = a;
  4562. result->src[1] = b;
  4563. return result;
  4564. }
  4565. struct ggml_tensor * ggml_add1(
  4566. struct ggml_context * ctx,
  4567. struct ggml_tensor * a,
  4568. struct ggml_tensor * b) {
  4569. return ggml_add1_impl(ctx, a, b, false);
  4570. }
  4571. struct ggml_tensor * ggml_add1_inplace(
  4572. struct ggml_context * ctx,
  4573. struct ggml_tensor * a,
  4574. struct ggml_tensor * b) {
  4575. return ggml_add1_impl(ctx, a, b, true);
  4576. }
  4577. // ggml_acc
  4578. static struct ggml_tensor * ggml_acc_impl(
  4579. struct ggml_context * ctx,
  4580. struct ggml_tensor * a,
  4581. struct ggml_tensor * b,
  4582. size_t nb1,
  4583. size_t nb2,
  4584. size_t nb3,
  4585. size_t offset,
  4586. bool inplace) {
  4587. GGML_ASSERT(ggml_nelements(b) <= ggml_nelements(a));
  4588. GGML_ASSERT(ggml_is_contiguous(a));
  4589. GGML_ASSERT(a->type == GGML_TYPE_F32);
  4590. GGML_ASSERT(b->type == GGML_TYPE_F32);
  4591. bool is_node = false;
  4592. if (!inplace && (a->grad || b->grad)) {
  4593. is_node = true;
  4594. }
  4595. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  4596. int32_t params[] = { nb1, nb2, nb3, offset, inplace ? 1 : 0 };
  4597. ggml_set_op_params(result, params, sizeof(params));
  4598. result->op = GGML_OP_ACC;
  4599. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4600. result->src[0] = a;
  4601. result->src[1] = b;
  4602. return result;
  4603. }
  4604. struct ggml_tensor * ggml_acc(
  4605. struct ggml_context * ctx,
  4606. struct ggml_tensor * a,
  4607. struct ggml_tensor * b,
  4608. size_t nb1,
  4609. size_t nb2,
  4610. size_t nb3,
  4611. size_t offset) {
  4612. return ggml_acc_impl(ctx, a, b, nb1, nb2, nb3, offset, false);
  4613. }
  4614. struct ggml_tensor * ggml_acc_inplace(
  4615. struct ggml_context * ctx,
  4616. struct ggml_tensor * a,
  4617. struct ggml_tensor * b,
  4618. size_t nb1,
  4619. size_t nb2,
  4620. size_t nb3,
  4621. size_t offset) {
  4622. return ggml_acc_impl(ctx, a, b, nb1, nb2, nb3, offset, true);
  4623. }
  4624. // ggml_sub
  4625. static struct ggml_tensor * ggml_sub_impl(
  4626. struct ggml_context * ctx,
  4627. struct ggml_tensor * a,
  4628. struct ggml_tensor * b,
  4629. bool inplace) {
  4630. GGML_ASSERT(ggml_are_same_shape(a, b));
  4631. bool is_node = false;
  4632. if (!inplace && (a->grad || b->grad)) {
  4633. is_node = true;
  4634. }
  4635. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  4636. result->op = GGML_OP_SUB;
  4637. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4638. result->src[0] = a;
  4639. result->src[1] = b;
  4640. return result;
  4641. }
  4642. struct ggml_tensor * ggml_sub(
  4643. struct ggml_context * ctx,
  4644. struct ggml_tensor * a,
  4645. struct ggml_tensor * b) {
  4646. return ggml_sub_impl(ctx, a, b, false);
  4647. }
  4648. struct ggml_tensor * ggml_sub_inplace(
  4649. struct ggml_context * ctx,
  4650. struct ggml_tensor * a,
  4651. struct ggml_tensor * b) {
  4652. return ggml_sub_impl(ctx, a, b, true);
  4653. }
  4654. // ggml_mul
  4655. static struct ggml_tensor * ggml_mul_impl(
  4656. struct ggml_context * ctx,
  4657. struct ggml_tensor * a,
  4658. struct ggml_tensor * b,
  4659. bool inplace) {
  4660. // TODO: support less-strict constraint
  4661. // GGML_ASSERT(ggml_can_repeat(b, a));
  4662. GGML_ASSERT(ggml_can_repeat_rows(b, a));
  4663. bool is_node = false;
  4664. if (!inplace && (a->grad || b->grad)) {
  4665. // TODO: support backward pass for broadcasting
  4666. GGML_ASSERT(ggml_are_same_shape(a, b));
  4667. is_node = true;
  4668. }
  4669. if (inplace) {
  4670. GGML_ASSERT(!is_node);
  4671. }
  4672. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  4673. result->op = GGML_OP_MUL;
  4674. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4675. result->src[0] = a;
  4676. result->src[1] = b;
  4677. return result;
  4678. }
  4679. struct ggml_tensor * ggml_mul(
  4680. struct ggml_context * ctx,
  4681. struct ggml_tensor * a,
  4682. struct ggml_tensor * b) {
  4683. return ggml_mul_impl(ctx, a, b, false);
  4684. }
  4685. struct ggml_tensor * ggml_mul_inplace(
  4686. struct ggml_context * ctx,
  4687. struct ggml_tensor * a,
  4688. struct ggml_tensor * b) {
  4689. return ggml_mul_impl(ctx, a, b, true);
  4690. }
  4691. // ggml_div
  4692. static struct ggml_tensor * ggml_div_impl(
  4693. struct ggml_context * ctx,
  4694. struct ggml_tensor * a,
  4695. struct ggml_tensor * b,
  4696. bool inplace) {
  4697. GGML_ASSERT(ggml_are_same_shape(a, b));
  4698. bool is_node = false;
  4699. if (!inplace && (a->grad || b->grad)) {
  4700. is_node = true;
  4701. }
  4702. if (inplace) {
  4703. GGML_ASSERT(!is_node);
  4704. }
  4705. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  4706. result->op = GGML_OP_DIV;
  4707. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4708. result->src[0] = a;
  4709. result->src[1] = b;
  4710. return result;
  4711. }
  4712. struct ggml_tensor * ggml_div(
  4713. struct ggml_context * ctx,
  4714. struct ggml_tensor * a,
  4715. struct ggml_tensor * b) {
  4716. return ggml_div_impl(ctx, a, b, false);
  4717. }
  4718. struct ggml_tensor * ggml_div_inplace(
  4719. struct ggml_context * ctx,
  4720. struct ggml_tensor * a,
  4721. struct ggml_tensor * b) {
  4722. return ggml_div_impl(ctx, a, b, true);
  4723. }
  4724. // ggml_sqr
  4725. static struct ggml_tensor * ggml_sqr_impl(
  4726. struct ggml_context * ctx,
  4727. struct ggml_tensor * a,
  4728. bool inplace) {
  4729. bool is_node = false;
  4730. if (!inplace && (a->grad)) {
  4731. is_node = true;
  4732. }
  4733. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  4734. result->op = GGML_OP_SQR;
  4735. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4736. result->src[0] = a;
  4737. return result;
  4738. }
  4739. struct ggml_tensor * ggml_sqr(
  4740. struct ggml_context * ctx,
  4741. struct ggml_tensor * a) {
  4742. return ggml_sqr_impl(ctx, a, false);
  4743. }
  4744. struct ggml_tensor * ggml_sqr_inplace(
  4745. struct ggml_context * ctx,
  4746. struct ggml_tensor * a) {
  4747. return ggml_sqr_impl(ctx, a, true);
  4748. }
  4749. // ggml_sqrt
  4750. static struct ggml_tensor * ggml_sqrt_impl(
  4751. struct ggml_context * ctx,
  4752. struct ggml_tensor * a,
  4753. bool inplace) {
  4754. bool is_node = false;
  4755. if (!inplace && (a->grad)) {
  4756. is_node = true;
  4757. }
  4758. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  4759. result->op = GGML_OP_SQRT;
  4760. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4761. result->src[0] = a;
  4762. return result;
  4763. }
  4764. struct ggml_tensor * ggml_sqrt(
  4765. struct ggml_context * ctx,
  4766. struct ggml_tensor * a) {
  4767. return ggml_sqrt_impl(ctx, a, false);
  4768. }
  4769. struct ggml_tensor * ggml_sqrt_inplace(
  4770. struct ggml_context * ctx,
  4771. struct ggml_tensor * a) {
  4772. return ggml_sqrt_impl(ctx, a, true);
  4773. }
  4774. // ggml_log
  4775. static struct ggml_tensor * ggml_log_impl(
  4776. struct ggml_context * ctx,
  4777. struct ggml_tensor * a,
  4778. bool inplace) {
  4779. bool is_node = false;
  4780. if (!inplace && (a->grad)) {
  4781. is_node = true;
  4782. }
  4783. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  4784. result->op = GGML_OP_LOG;
  4785. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4786. result->src[0] = a;
  4787. return result;
  4788. }
  4789. struct ggml_tensor * ggml_log(
  4790. struct ggml_context * ctx,
  4791. struct ggml_tensor * a) {
  4792. return ggml_log_impl(ctx, a, false);
  4793. }
  4794. struct ggml_tensor * ggml_log_inplace(
  4795. struct ggml_context * ctx,
  4796. struct ggml_tensor * a) {
  4797. return ggml_log_impl(ctx, a, true);
  4798. }
  4799. // ggml_sum
  4800. struct ggml_tensor * ggml_sum(
  4801. struct ggml_context * ctx,
  4802. struct ggml_tensor * a) {
  4803. bool is_node = false;
  4804. if (a->grad) {
  4805. is_node = true;
  4806. }
  4807. struct ggml_tensor * result = ggml_new_tensor_1d(ctx, a->type, 1);
  4808. result->op = GGML_OP_SUM;
  4809. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4810. result->src[0] = a;
  4811. return result;
  4812. }
  4813. // ggml_sum_rows
  4814. struct ggml_tensor * ggml_sum_rows(
  4815. struct ggml_context * ctx,
  4816. struct ggml_tensor * a) {
  4817. bool is_node = false;
  4818. if (a->grad) {
  4819. is_node = true;
  4820. }
  4821. int64_t ne[4] = {1,1,1,1};
  4822. for (int i=1; i<a->n_dims; ++i) {
  4823. ne[i] = a->ne[i];
  4824. }
  4825. struct ggml_tensor * result = ggml_new_tensor(ctx, a->type, a->n_dims, ne);
  4826. result->op = GGML_OP_SUM_ROWS;
  4827. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4828. result->src[0] = a;
  4829. return result;
  4830. }
  4831. // ggml_mean
  4832. struct ggml_tensor * ggml_mean(
  4833. struct ggml_context * ctx,
  4834. struct ggml_tensor * a) {
  4835. bool is_node = false;
  4836. if (a->grad) {
  4837. GGML_ASSERT(false); // TODO: implement
  4838. is_node = true;
  4839. }
  4840. int64_t ne[GGML_MAX_DIMS] = { 1, a->ne[1], a->ne[2], a->ne[3] };
  4841. struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, a->n_dims, ne);
  4842. result->op = GGML_OP_MEAN;
  4843. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4844. result->src[0] = a;
  4845. return result;
  4846. }
  4847. // ggml_argmax
  4848. struct ggml_tensor * ggml_argmax(
  4849. struct ggml_context * ctx,
  4850. struct ggml_tensor * a) {
  4851. GGML_ASSERT(ggml_is_matrix(a));
  4852. bool is_node = false;
  4853. if (a->grad) {
  4854. GGML_ASSERT(false);
  4855. is_node = true;
  4856. }
  4857. int64_t ne[GGML_MAX_DIMS] = { a->ne[1], 1, 1, 1 };
  4858. struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_I32, a->n_dims, ne);
  4859. result->op = GGML_OP_ARGMAX;
  4860. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4861. result->src[0] = a;
  4862. return result;
  4863. }
  4864. // ggml_repeat
  4865. struct ggml_tensor * ggml_repeat(
  4866. struct ggml_context * ctx,
  4867. struct ggml_tensor * a,
  4868. struct ggml_tensor * b) {
  4869. GGML_ASSERT(ggml_can_repeat(a, b));
  4870. bool is_node = false;
  4871. if (a->grad) {
  4872. is_node = true;
  4873. }
  4874. struct ggml_tensor * result = ggml_new_tensor(ctx, a->type, b->n_dims, b->ne);
  4875. result->op = GGML_OP_REPEAT;
  4876. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4877. result->src[0] = a;
  4878. return result;
  4879. }
  4880. // ggml_repeat_back
  4881. struct ggml_tensor * ggml_repeat_back(
  4882. struct ggml_context * ctx,
  4883. struct ggml_tensor * a,
  4884. struct ggml_tensor * b) {
  4885. GGML_ASSERT(ggml_can_repeat(b, a));
  4886. bool is_node = false;
  4887. if (a->grad) {
  4888. is_node = true;
  4889. }
  4890. if (ggml_are_same_shape(a, b) && !is_node) {
  4891. return a;
  4892. }
  4893. struct ggml_tensor * result = ggml_new_tensor(ctx, a->type, b->n_dims, b->ne);
  4894. result->op = GGML_OP_REPEAT_BACK;
  4895. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4896. result->src[0] = a;
  4897. return result;
  4898. }
  4899. // ggml_concat
  4900. struct ggml_tensor * ggml_concat(
  4901. struct ggml_context* ctx,
  4902. struct ggml_tensor* a,
  4903. struct ggml_tensor* b) {
  4904. GGML_ASSERT(a->ne[0] == b->ne[0] && a->ne[1] == b->ne[1] && a->ne[3] == b->ne[3]);
  4905. bool is_node = false;
  4906. if (a->grad || b->grad) {
  4907. is_node = true;
  4908. }
  4909. struct ggml_tensor * result = ggml_new_tensor_4d(ctx, a->type, a->ne[0], a->ne[1], a->ne[2] + b->ne[2], a->ne[3]);
  4910. result->op = GGML_OP_CONCAT;
  4911. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4912. result->src[0] = a;
  4913. result->src[1] = b;
  4914. return result;
  4915. }
  4916. // ggml_abs
  4917. struct ggml_tensor * ggml_abs(
  4918. struct ggml_context * ctx,
  4919. struct ggml_tensor * a) {
  4920. return ggml_unary(ctx, a, GGML_UNARY_OP_ABS);
  4921. }
  4922. struct ggml_tensor * ggml_abs_inplace(
  4923. struct ggml_context * ctx,
  4924. struct ggml_tensor * a) {
  4925. return ggml_unary_inplace(ctx, a, GGML_UNARY_OP_ABS);
  4926. }
  4927. // ggml_sgn
  4928. struct ggml_tensor * ggml_sgn(
  4929. struct ggml_context * ctx,
  4930. struct ggml_tensor * a) {
  4931. return ggml_unary(ctx, a, GGML_UNARY_OP_SGN);
  4932. }
  4933. struct ggml_tensor * ggml_sgn_inplace(
  4934. struct ggml_context * ctx,
  4935. struct ggml_tensor * a) {
  4936. return ggml_unary_inplace(ctx, a, GGML_UNARY_OP_SGN);
  4937. }
  4938. // ggml_neg
  4939. struct ggml_tensor * ggml_neg(
  4940. struct ggml_context * ctx,
  4941. struct ggml_tensor * a) {
  4942. return ggml_unary(ctx, a, GGML_UNARY_OP_NEG);
  4943. }
  4944. struct ggml_tensor * ggml_neg_inplace(
  4945. struct ggml_context * ctx,
  4946. struct ggml_tensor * a) {
  4947. return ggml_unary_inplace(ctx, a, GGML_UNARY_OP_NEG);
  4948. }
  4949. // ggml_step
  4950. struct ggml_tensor * ggml_step(
  4951. struct ggml_context * ctx,
  4952. struct ggml_tensor * a) {
  4953. return ggml_unary(ctx, a, GGML_UNARY_OP_STEP);
  4954. }
  4955. struct ggml_tensor * ggml_step_inplace(
  4956. struct ggml_context * ctx,
  4957. struct ggml_tensor * a) {
  4958. return ggml_unary_inplace(ctx, a, GGML_UNARY_OP_STEP);
  4959. }
  4960. // ggml_tanh
  4961. struct ggml_tensor * ggml_tanh(
  4962. struct ggml_context * ctx,
  4963. struct ggml_tensor * a) {
  4964. return ggml_unary(ctx, a, GGML_UNARY_OP_TANH);
  4965. }
  4966. struct ggml_tensor * ggml_tanh_inplace(
  4967. struct ggml_context * ctx,
  4968. struct ggml_tensor * a) {
  4969. return ggml_unary_inplace(ctx, a, GGML_UNARY_OP_TANH);
  4970. }
  4971. // ggml_elu
  4972. struct ggml_tensor * ggml_elu(
  4973. struct ggml_context * ctx,
  4974. struct ggml_tensor * a) {
  4975. return ggml_unary(ctx, a, GGML_UNARY_OP_ELU);
  4976. }
  4977. struct ggml_tensor * ggml_elu_inplace(
  4978. struct ggml_context * ctx,
  4979. struct ggml_tensor * a) {
  4980. return ggml_unary_inplace(ctx, a, GGML_UNARY_OP_ELU);
  4981. }
  4982. // ggml_relu
  4983. struct ggml_tensor * ggml_relu(
  4984. struct ggml_context * ctx,
  4985. struct ggml_tensor * a) {
  4986. return ggml_unary(ctx, a, GGML_UNARY_OP_RELU);
  4987. }
  4988. struct ggml_tensor * ggml_relu_inplace(
  4989. struct ggml_context * ctx,
  4990. struct ggml_tensor * a) {
  4991. return ggml_unary_inplace(ctx, a, GGML_UNARY_OP_RELU);
  4992. }
  4993. // ggml_gelu
  4994. struct ggml_tensor * ggml_gelu(
  4995. struct ggml_context * ctx,
  4996. struct ggml_tensor * a) {
  4997. return ggml_unary(ctx, a, GGML_UNARY_OP_GELU);
  4998. }
  4999. struct ggml_tensor * ggml_gelu_inplace(
  5000. struct ggml_context * ctx,
  5001. struct ggml_tensor * a) {
  5002. return ggml_unary_inplace(ctx, a, GGML_UNARY_OP_GELU);
  5003. }
  5004. // ggml_gelu_quick
  5005. struct ggml_tensor * ggml_gelu_quick(
  5006. struct ggml_context * ctx,
  5007. struct ggml_tensor * a) {
  5008. return ggml_unary(ctx, a, GGML_UNARY_OP_GELU_QUICK);
  5009. }
  5010. struct ggml_tensor * ggml_gelu_quick_inplace(
  5011. struct ggml_context * ctx,
  5012. struct ggml_tensor * a) {
  5013. return ggml_unary_inplace(ctx, a, GGML_UNARY_OP_GELU_QUICK);
  5014. }
  5015. // ggml_silu
  5016. struct ggml_tensor * ggml_silu(
  5017. struct ggml_context * ctx,
  5018. struct ggml_tensor * a) {
  5019. return ggml_unary(ctx, a, GGML_UNARY_OP_SILU);
  5020. }
  5021. struct ggml_tensor * ggml_silu_inplace(
  5022. struct ggml_context * ctx,
  5023. struct ggml_tensor * a) {
  5024. return ggml_unary_inplace(ctx, a, GGML_UNARY_OP_SILU);
  5025. }
  5026. // ggml_silu_back
  5027. struct ggml_tensor * ggml_silu_back(
  5028. struct ggml_context * ctx,
  5029. struct ggml_tensor * a,
  5030. struct ggml_tensor * b) {
  5031. bool is_node = false;
  5032. if (a->grad || b->grad) {
  5033. // TODO: implement backward
  5034. is_node = true;
  5035. }
  5036. struct ggml_tensor * result = ggml_dup_tensor(ctx, a);
  5037. result->op = GGML_OP_SILU_BACK;
  5038. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5039. result->src[0] = a;
  5040. result->src[1] = b;
  5041. return result;
  5042. }
  5043. // ggml_norm
  5044. static struct ggml_tensor * ggml_norm_impl(
  5045. struct ggml_context * ctx,
  5046. struct ggml_tensor * a,
  5047. float eps,
  5048. bool inplace) {
  5049. bool is_node = false;
  5050. if (!inplace && (a->grad)) {
  5051. GGML_ASSERT(false); // TODO: implement backward
  5052. is_node = true;
  5053. }
  5054. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  5055. ggml_set_op_params(result, &eps, sizeof(eps));
  5056. result->op = GGML_OP_NORM;
  5057. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5058. result->src[0] = a;
  5059. return result;
  5060. }
  5061. struct ggml_tensor * ggml_norm(
  5062. struct ggml_context * ctx,
  5063. struct ggml_tensor * a,
  5064. float eps) {
  5065. return ggml_norm_impl(ctx, a, eps, false);
  5066. }
  5067. struct ggml_tensor * ggml_norm_inplace(
  5068. struct ggml_context * ctx,
  5069. struct ggml_tensor * a,
  5070. float eps) {
  5071. return ggml_norm_impl(ctx, a, eps, true);
  5072. }
  5073. // ggml_rms_norm
  5074. static struct ggml_tensor * ggml_rms_norm_impl(
  5075. struct ggml_context * ctx,
  5076. struct ggml_tensor * a,
  5077. float eps,
  5078. bool inplace) {
  5079. bool is_node = false;
  5080. if (!inplace && (a->grad)) {
  5081. is_node = true;
  5082. }
  5083. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  5084. ggml_set_op_params(result, &eps, sizeof(eps));
  5085. result->op = GGML_OP_RMS_NORM;
  5086. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5087. result->src[0] = a;
  5088. return result;
  5089. }
  5090. struct ggml_tensor * ggml_rms_norm(
  5091. struct ggml_context * ctx,
  5092. struct ggml_tensor * a,
  5093. float eps) {
  5094. return ggml_rms_norm_impl(ctx, a, eps, false);
  5095. }
  5096. struct ggml_tensor * ggml_rms_norm_inplace(
  5097. struct ggml_context * ctx,
  5098. struct ggml_tensor * a,
  5099. float eps) {
  5100. return ggml_rms_norm_impl(ctx, a, eps, true);
  5101. }
  5102. // ggml_rms_norm_back
  5103. struct ggml_tensor * ggml_rms_norm_back(
  5104. struct ggml_context * ctx,
  5105. struct ggml_tensor * a,
  5106. struct ggml_tensor * b,
  5107. float eps) {
  5108. bool is_node = false;
  5109. if (a->grad) {
  5110. // TODO: implement backward
  5111. is_node = true;
  5112. }
  5113. struct ggml_tensor * result = ggml_dup_tensor(ctx, a);
  5114. ggml_set_op_params(result, &eps, sizeof(eps));
  5115. result->op = GGML_OP_RMS_NORM_BACK;
  5116. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5117. result->src[0] = a;
  5118. result->src[1] = b;
  5119. return result;
  5120. }
  5121. // ggml_group_norm
  5122. static struct ggml_tensor * ggml_group_norm_impl(
  5123. struct ggml_context * ctx,
  5124. struct ggml_tensor * a,
  5125. int n_groups,
  5126. bool inplace) {
  5127. bool is_node = false;
  5128. if (!inplace && (a->grad)) {
  5129. GGML_ASSERT(false); // TODO: implement backward
  5130. is_node = true;
  5131. }
  5132. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  5133. result->op = GGML_OP_GROUP_NORM;
  5134. result->op_params[0] = n_groups;
  5135. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5136. result->src[0] = a;
  5137. result->src[1] = NULL; // TODO: maybe store epsilon here?
  5138. return result;
  5139. }
  5140. struct ggml_tensor * ggml_group_norm(
  5141. struct ggml_context * ctx,
  5142. struct ggml_tensor * a,
  5143. int n_groups) {
  5144. return ggml_group_norm_impl(ctx, a, n_groups, false);
  5145. }
  5146. struct ggml_tensor * ggml_group_norm_inplace(
  5147. struct ggml_context * ctx,
  5148. struct ggml_tensor * a,
  5149. int n_groups) {
  5150. return ggml_group_norm_impl(ctx, a, n_groups, true);
  5151. }
  5152. // ggml_mul_mat
  5153. struct ggml_tensor * ggml_mul_mat(
  5154. struct ggml_context * ctx,
  5155. struct ggml_tensor * a,
  5156. struct ggml_tensor * b) {
  5157. GGML_ASSERT(ggml_can_mul_mat(a, b));
  5158. GGML_ASSERT(!ggml_is_transposed(a));
  5159. bool is_node = false;
  5160. if (a->grad || b->grad) {
  5161. is_node = true;
  5162. }
  5163. const int64_t ne[4] = { a->ne[1], b->ne[1], b->ne[2], b->ne[3] };
  5164. struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, MAX(a->n_dims, b->n_dims), ne);
  5165. result->op = GGML_OP_MUL_MAT;
  5166. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5167. result->src[0] = a;
  5168. result->src[1] = b;
  5169. return result;
  5170. }
  5171. // ggml_out_prod
  5172. struct ggml_tensor * ggml_out_prod(
  5173. struct ggml_context * ctx,
  5174. struct ggml_tensor * a,
  5175. struct ggml_tensor * b) {
  5176. GGML_ASSERT(ggml_can_out_prod(a, b));
  5177. GGML_ASSERT(!ggml_is_transposed(a));
  5178. bool is_node = false;
  5179. if (a->grad || b->grad) {
  5180. is_node = true;
  5181. }
  5182. // a is broadcastable to b for ne[2] and ne[3] -> use b->ne[2] and b->ne[3]
  5183. const int64_t ne[4] = { a->ne[0], b->ne[0], b->ne[2], b->ne[3] };
  5184. struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, MAX(a->n_dims, b->n_dims), ne);
  5185. result->op = GGML_OP_OUT_PROD;
  5186. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5187. result->src[0] = a;
  5188. result->src[1] = b;
  5189. return result;
  5190. }
  5191. // ggml_scale
  5192. static struct ggml_tensor * ggml_scale_impl(
  5193. struct ggml_context * ctx,
  5194. struct ggml_tensor * a,
  5195. struct ggml_tensor * b,
  5196. bool inplace) {
  5197. GGML_ASSERT(ggml_is_scalar(b));
  5198. GGML_ASSERT(ggml_is_padded_1d(a));
  5199. bool is_node = false;
  5200. if (a->grad || b->grad) {
  5201. is_node = true;
  5202. }
  5203. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  5204. result->op = GGML_OP_SCALE;
  5205. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5206. result->src[0] = a;
  5207. result->src[1] = b;
  5208. return result;
  5209. }
  5210. struct ggml_tensor * ggml_scale(
  5211. struct ggml_context * ctx,
  5212. struct ggml_tensor * a,
  5213. struct ggml_tensor * b) {
  5214. return ggml_scale_impl(ctx, a, b, false);
  5215. }
  5216. struct ggml_tensor * ggml_scale_inplace(
  5217. struct ggml_context * ctx,
  5218. struct ggml_tensor * a,
  5219. struct ggml_tensor * b) {
  5220. return ggml_scale_impl(ctx, a, b, true);
  5221. }
  5222. // ggml_set
  5223. static struct ggml_tensor * ggml_set_impl(
  5224. struct ggml_context * ctx,
  5225. struct ggml_tensor * a,
  5226. struct ggml_tensor * b,
  5227. size_t nb1,
  5228. size_t nb2,
  5229. size_t nb3,
  5230. size_t offset,
  5231. bool inplace) {
  5232. GGML_ASSERT(ggml_nelements(a) >= ggml_nelements(b));
  5233. bool is_node = false;
  5234. if (a->grad || b->grad) {
  5235. is_node = true;
  5236. }
  5237. // make a view of the destination
  5238. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  5239. int32_t params[] = { nb1, nb2, nb3, offset, inplace ? 1 : 0 };
  5240. ggml_set_op_params(result, params, sizeof(params));
  5241. result->op = GGML_OP_SET;
  5242. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5243. result->src[0] = a;
  5244. result->src[1] = b;
  5245. return result;
  5246. }
  5247. struct ggml_tensor * ggml_set(
  5248. struct ggml_context * ctx,
  5249. struct ggml_tensor * a,
  5250. struct ggml_tensor * b,
  5251. size_t nb1,
  5252. size_t nb2,
  5253. size_t nb3,
  5254. size_t offset) {
  5255. return ggml_set_impl(ctx, a, b, nb1, nb2, nb3, offset, false);
  5256. }
  5257. struct ggml_tensor * ggml_set_inplace(
  5258. struct ggml_context * ctx,
  5259. struct ggml_tensor * a,
  5260. struct ggml_tensor * b,
  5261. size_t nb1,
  5262. size_t nb2,
  5263. size_t nb3,
  5264. size_t offset) {
  5265. return ggml_set_impl(ctx, a, b, nb1, nb2, nb3, offset, true);
  5266. }
  5267. struct ggml_tensor * ggml_set_1d(
  5268. struct ggml_context * ctx,
  5269. struct ggml_tensor * a,
  5270. struct ggml_tensor * b,
  5271. size_t offset) {
  5272. return ggml_set_impl(ctx, a, b, a->nb[1], a->nb[2], a->nb[3], offset, false);
  5273. }
  5274. struct ggml_tensor * ggml_set_1d_inplace(
  5275. struct ggml_context * ctx,
  5276. struct ggml_tensor * a,
  5277. struct ggml_tensor * b,
  5278. size_t offset) {
  5279. return ggml_set_impl(ctx, a, b, a->nb[1], a->nb[2], a->nb[3], offset, true);
  5280. }
  5281. struct ggml_tensor * ggml_set_2d(
  5282. struct ggml_context * ctx,
  5283. struct ggml_tensor * a,
  5284. struct ggml_tensor * b,
  5285. size_t nb1,
  5286. size_t offset) {
  5287. return ggml_set_impl(ctx, a, b, nb1, a->nb[2], a->nb[3], offset, false);
  5288. }
  5289. struct ggml_tensor * ggml_set_2d_inplace(
  5290. struct ggml_context * ctx,
  5291. struct ggml_tensor * a,
  5292. struct ggml_tensor * b,
  5293. size_t nb1,
  5294. size_t offset) {
  5295. return ggml_set_impl(ctx, a, b, nb1, a->nb[2], a->nb[3], offset, false);
  5296. }
  5297. // ggml_cpy
  5298. static struct ggml_tensor * ggml_cpy_impl(
  5299. struct ggml_context * ctx,
  5300. struct ggml_tensor * a,
  5301. struct ggml_tensor * b,
  5302. bool inplace) {
  5303. GGML_ASSERT(ggml_nelements(a) == ggml_nelements(b));
  5304. bool is_node = false;
  5305. if (!inplace && (a->grad || b->grad)) {
  5306. is_node = true;
  5307. }
  5308. // make a view of the destination
  5309. struct ggml_tensor * result = ggml_view_tensor(ctx, b);
  5310. if (strlen(b->name) > 0) {
  5311. ggml_format_name(result, "%s (copy of %s)", b->name, a->name);
  5312. } else {
  5313. ggml_format_name(result, "%s (copy)", a->name);
  5314. }
  5315. result->op = GGML_OP_CPY;
  5316. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5317. result->src[0] = a;
  5318. result->src[1] = b;
  5319. return result;
  5320. }
  5321. struct ggml_tensor * ggml_cpy(
  5322. struct ggml_context * ctx,
  5323. struct ggml_tensor * a,
  5324. struct ggml_tensor * b) {
  5325. return ggml_cpy_impl(ctx, a, b, false);
  5326. }
  5327. struct ggml_tensor * ggml_cpy_inplace(
  5328. struct ggml_context * ctx,
  5329. struct ggml_tensor * a,
  5330. struct ggml_tensor * b) {
  5331. return ggml_cpy_impl(ctx, a, b, true);
  5332. }
  5333. // ggml_cont
  5334. static struct ggml_tensor * ggml_cont_impl(
  5335. struct ggml_context * ctx,
  5336. struct ggml_tensor * a,
  5337. bool inplace) {
  5338. bool is_node = false;
  5339. if (!inplace && a->grad) {
  5340. is_node = true;
  5341. }
  5342. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  5343. ggml_format_name(result, "%s (cont)", a->name);
  5344. result->op = GGML_OP_CONT;
  5345. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5346. result->src[0] = a;
  5347. return result;
  5348. }
  5349. struct ggml_tensor * ggml_cont(
  5350. struct ggml_context * ctx,
  5351. struct ggml_tensor * a) {
  5352. return ggml_cont_impl(ctx, a, false);
  5353. }
  5354. struct ggml_tensor * ggml_cont_inplace(
  5355. struct ggml_context * ctx,
  5356. struct ggml_tensor * a) {
  5357. return ggml_cont_impl(ctx, a, true);
  5358. }
  5359. // make contiguous, with new shape
  5360. GGML_API struct ggml_tensor * ggml_cont_1d(
  5361. struct ggml_context * ctx,
  5362. struct ggml_tensor * a,
  5363. int64_t ne0) {
  5364. return ggml_cont_4d(ctx, a, ne0, 1, 1, 1);
  5365. }
  5366. GGML_API struct ggml_tensor * ggml_cont_2d(
  5367. struct ggml_context * ctx,
  5368. struct ggml_tensor * a,
  5369. int64_t ne0,
  5370. int64_t ne1) {
  5371. return ggml_cont_4d(ctx, a, ne0, ne1, 1, 1);
  5372. }
  5373. GGML_API struct ggml_tensor * ggml_cont_3d(
  5374. struct ggml_context * ctx,
  5375. struct ggml_tensor * a,
  5376. int64_t ne0,
  5377. int64_t ne1,
  5378. int64_t ne2) {
  5379. return ggml_cont_4d(ctx, a, ne0, ne1, ne2, 1);
  5380. }
  5381. struct ggml_tensor * ggml_cont_4d(
  5382. struct ggml_context * ctx,
  5383. struct ggml_tensor * a,
  5384. int64_t ne0,
  5385. int64_t ne1,
  5386. int64_t ne2,
  5387. int64_t ne3) {
  5388. GGML_ASSERT(ggml_nelements(a) == (ne0*ne1*ne2*ne3));
  5389. bool is_node = false;
  5390. struct ggml_tensor * result = ggml_new_tensor_4d(ctx, a->type, ne0, ne1, ne2, ne3);
  5391. ggml_format_name(result, "%s (cont)", a->name);
  5392. result->op = GGML_OP_CONT;
  5393. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5394. result->src[0] = a;
  5395. return result;
  5396. }
  5397. // ggml_reshape
  5398. struct ggml_tensor * ggml_reshape(
  5399. struct ggml_context * ctx,
  5400. struct ggml_tensor * a,
  5401. struct ggml_tensor * b) {
  5402. GGML_ASSERT(ggml_is_contiguous(a));
  5403. // as only the shape of b is relevant, and not its memory layout, b is allowed to be non contiguous.
  5404. GGML_ASSERT(ggml_nelements(a) == ggml_nelements(b));
  5405. bool is_node = false;
  5406. if (a->grad) {
  5407. is_node = true;
  5408. }
  5409. if (b->grad) {
  5410. // gradient propagation is not supported
  5411. //GGML_ASSERT(false);
  5412. }
  5413. struct ggml_tensor * result = ggml_new_tensor_impl(ctx, a->type, b->n_dims, b->ne, a, 0);
  5414. ggml_format_name(result, "%s (reshaped)", a->name);
  5415. result->op = GGML_OP_RESHAPE;
  5416. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5417. result->src[0] = a;
  5418. return result;
  5419. }
  5420. struct ggml_tensor * ggml_reshape_1d(
  5421. struct ggml_context * ctx,
  5422. struct ggml_tensor * a,
  5423. int64_t ne0) {
  5424. GGML_ASSERT(ggml_is_contiguous(a));
  5425. GGML_ASSERT(ggml_nelements(a) == ne0);
  5426. bool is_node = false;
  5427. if (a->grad) {
  5428. is_node = true;
  5429. }
  5430. const int64_t ne[1] = { ne0 };
  5431. struct ggml_tensor * result = ggml_new_tensor_impl(ctx, a->type, 1, ne, a, 0);
  5432. ggml_format_name(result, "%s (reshaped)", a->name);
  5433. result->op = GGML_OP_RESHAPE;
  5434. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5435. result->src[0] = a;
  5436. return result;
  5437. }
  5438. struct ggml_tensor * ggml_reshape_2d(
  5439. struct ggml_context * ctx,
  5440. struct ggml_tensor * a,
  5441. int64_t ne0,
  5442. int64_t ne1) {
  5443. GGML_ASSERT(ggml_is_contiguous(a));
  5444. GGML_ASSERT(ggml_nelements(a) == ne0*ne1);
  5445. bool is_node = false;
  5446. if (a->grad) {
  5447. is_node = true;
  5448. }
  5449. const int64_t ne[2] = { ne0, ne1 };
  5450. struct ggml_tensor * result = ggml_new_tensor_impl(ctx, a->type, 2, ne, a, 0);
  5451. ggml_format_name(result, "%s (reshaped)", a->name);
  5452. result->op = GGML_OP_RESHAPE;
  5453. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5454. result->src[0] = a;
  5455. return result;
  5456. }
  5457. struct ggml_tensor * ggml_reshape_3d(
  5458. struct ggml_context * ctx,
  5459. struct ggml_tensor * a,
  5460. int64_t ne0,
  5461. int64_t ne1,
  5462. int64_t ne2) {
  5463. GGML_ASSERT(ggml_is_contiguous(a));
  5464. GGML_ASSERT(ggml_nelements(a) == ne0*ne1*ne2);
  5465. bool is_node = false;
  5466. if (a->grad) {
  5467. is_node = true;
  5468. }
  5469. const int64_t ne[3] = { ne0, ne1, ne2 };
  5470. struct ggml_tensor * result = ggml_new_tensor_impl(ctx, a->type, 3, ne, a, 0);
  5471. ggml_format_name(result, "%s (reshaped)", a->name);
  5472. result->op = GGML_OP_RESHAPE;
  5473. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5474. result->src[0] = a;
  5475. return result;
  5476. }
  5477. struct ggml_tensor * ggml_reshape_4d(
  5478. struct ggml_context * ctx,
  5479. struct ggml_tensor * a,
  5480. int64_t ne0,
  5481. int64_t ne1,
  5482. int64_t ne2,
  5483. int64_t ne3) {
  5484. GGML_ASSERT(ggml_is_contiguous(a));
  5485. GGML_ASSERT(ggml_nelements(a) == ne0*ne1*ne2*ne3);
  5486. bool is_node = false;
  5487. if (a->grad) {
  5488. is_node = true;
  5489. }
  5490. const int64_t ne[4] = { ne0, ne1, ne2, ne3 };
  5491. struct ggml_tensor * result = ggml_new_tensor_impl(ctx, a->type, 4, ne, a, 0);
  5492. ggml_format_name(result, "%s (reshaped)", a->name);
  5493. result->op = GGML_OP_RESHAPE;
  5494. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5495. result->src[0] = a;
  5496. return result;
  5497. }
  5498. static struct ggml_tensor * ggml_view_impl(
  5499. struct ggml_context * ctx,
  5500. struct ggml_tensor * a,
  5501. int n_dims,
  5502. const int64_t * ne,
  5503. size_t offset) {
  5504. bool is_node = false;
  5505. if (a->grad) {
  5506. is_node = true;
  5507. }
  5508. struct ggml_tensor * result = ggml_new_tensor_impl(ctx, a->type, n_dims, ne, a, offset);
  5509. ggml_format_name(result, "%s (view)", a->name);
  5510. ggml_set_op_params(result, &offset, sizeof(offset));
  5511. result->op = GGML_OP_VIEW;
  5512. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5513. result->src[0] = a;
  5514. return result;
  5515. }
  5516. // ggml_view_1d
  5517. struct ggml_tensor * ggml_view_1d(
  5518. struct ggml_context * ctx,
  5519. struct ggml_tensor * a,
  5520. int64_t ne0,
  5521. size_t offset) {
  5522. struct ggml_tensor * result = ggml_view_impl(ctx, a, 1, &ne0, offset);
  5523. return result;
  5524. }
  5525. // ggml_view_2d
  5526. struct ggml_tensor * ggml_view_2d(
  5527. struct ggml_context * ctx,
  5528. struct ggml_tensor * a,
  5529. int64_t ne0,
  5530. int64_t ne1,
  5531. size_t nb1,
  5532. size_t offset) {
  5533. const int64_t ne[2] = { ne0, ne1 };
  5534. struct ggml_tensor * result = ggml_view_impl(ctx, a, 2, ne, offset);
  5535. result->nb[1] = nb1;
  5536. result->nb[2] = result->nb[1]*ne1;
  5537. result->nb[3] = result->nb[2];
  5538. return result;
  5539. }
  5540. // ggml_view_3d
  5541. struct ggml_tensor * ggml_view_3d(
  5542. struct ggml_context * ctx,
  5543. struct ggml_tensor * a,
  5544. int64_t ne0,
  5545. int64_t ne1,
  5546. int64_t ne2,
  5547. size_t nb1,
  5548. size_t nb2,
  5549. size_t offset) {
  5550. const int64_t ne[3] = { ne0, ne1, ne2 };
  5551. struct ggml_tensor * result = ggml_view_impl(ctx, a, 3, ne, offset);
  5552. result->nb[1] = nb1;
  5553. result->nb[2] = nb2;
  5554. result->nb[3] = result->nb[2]*ne2;
  5555. return result;
  5556. }
  5557. // ggml_view_4d
  5558. struct ggml_tensor * ggml_view_4d(
  5559. struct ggml_context * ctx,
  5560. struct ggml_tensor * a,
  5561. int64_t ne0,
  5562. int64_t ne1,
  5563. int64_t ne2,
  5564. int64_t ne3,
  5565. size_t nb1,
  5566. size_t nb2,
  5567. size_t nb3,
  5568. size_t offset) {
  5569. const int64_t ne[4] = { ne0, ne1, ne2, ne3 };
  5570. struct ggml_tensor * result = ggml_view_impl(ctx, a, 4, ne, offset);
  5571. result->nb[1] = nb1;
  5572. result->nb[2] = nb2;
  5573. result->nb[3] = nb3;
  5574. return result;
  5575. }
  5576. // ggml_permute
  5577. struct ggml_tensor * ggml_permute(
  5578. struct ggml_context * ctx,
  5579. struct ggml_tensor * a,
  5580. int axis0,
  5581. int axis1,
  5582. int axis2,
  5583. int axis3) {
  5584. GGML_ASSERT(axis0 >= 0 && axis0 < GGML_MAX_DIMS);
  5585. GGML_ASSERT(axis1 >= 0 && axis1 < GGML_MAX_DIMS);
  5586. GGML_ASSERT(axis2 >= 0 && axis2 < GGML_MAX_DIMS);
  5587. GGML_ASSERT(axis3 >= 0 && axis3 < GGML_MAX_DIMS);
  5588. GGML_ASSERT(axis0 != axis1);
  5589. GGML_ASSERT(axis0 != axis2);
  5590. GGML_ASSERT(axis0 != axis3);
  5591. GGML_ASSERT(axis1 != axis2);
  5592. GGML_ASSERT(axis1 != axis3);
  5593. GGML_ASSERT(axis2 != axis3);
  5594. bool is_node = false;
  5595. if (a->grad) {
  5596. is_node = true;
  5597. }
  5598. struct ggml_tensor * result = ggml_view_tensor(ctx, a);
  5599. ggml_format_name(result, "%s (permuted)", a->name);
  5600. int ne[GGML_MAX_DIMS];
  5601. int nb[GGML_MAX_DIMS];
  5602. ne[axis0] = a->ne[0];
  5603. ne[axis1] = a->ne[1];
  5604. ne[axis2] = a->ne[2];
  5605. ne[axis3] = a->ne[3];
  5606. nb[axis0] = a->nb[0];
  5607. nb[axis1] = a->nb[1];
  5608. nb[axis2] = a->nb[2];
  5609. nb[axis3] = a->nb[3];
  5610. result->ne[0] = ne[0];
  5611. result->ne[1] = ne[1];
  5612. result->ne[2] = ne[2];
  5613. result->ne[3] = ne[3];
  5614. result->nb[0] = nb[0];
  5615. result->nb[1] = nb[1];
  5616. result->nb[2] = nb[2];
  5617. result->nb[3] = nb[3];
  5618. result->op = GGML_OP_PERMUTE;
  5619. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5620. result->src[0] = a;
  5621. int32_t params[] = { axis0, axis1, axis2, axis3 };
  5622. ggml_set_op_params(result, params, sizeof(params));
  5623. return result;
  5624. }
  5625. // ggml_transpose
  5626. struct ggml_tensor * ggml_transpose(
  5627. struct ggml_context * ctx,
  5628. struct ggml_tensor * a) {
  5629. bool is_node = false;
  5630. if (a->grad) {
  5631. is_node = true;
  5632. }
  5633. struct ggml_tensor * result = ggml_view_tensor(ctx, a);
  5634. ggml_format_name(result, "%s (transposed)", a->name);
  5635. result->ne[0] = a->ne[1];
  5636. result->ne[1] = a->ne[0];
  5637. result->nb[0] = a->nb[1];
  5638. result->nb[1] = a->nb[0];
  5639. result->op = GGML_OP_TRANSPOSE;
  5640. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5641. result->src[0] = a;
  5642. return result;
  5643. }
  5644. // ggml_get_rows
  5645. struct ggml_tensor * ggml_get_rows(
  5646. struct ggml_context * ctx,
  5647. struct ggml_tensor * a,
  5648. struct ggml_tensor * b) {
  5649. GGML_ASSERT(ggml_is_matrix(a) && ggml_is_vector(b) && b->type == GGML_TYPE_I32);
  5650. bool is_node = false;
  5651. if (a->grad || b->grad) {
  5652. is_node = true;
  5653. }
  5654. // TODO: implement non F32 return
  5655. //struct ggml_tensor * result = ggml_new_tensor_2d(ctx, a->type, a->ne[0], b->ne[0]);
  5656. struct ggml_tensor * result = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, a->ne[0], b->ne[0]);
  5657. result->op = GGML_OP_GET_ROWS;
  5658. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5659. result->src[0] = a;
  5660. result->src[1] = b;
  5661. return result;
  5662. }
  5663. // ggml_get_rows_back
  5664. struct ggml_tensor * ggml_get_rows_back(
  5665. struct ggml_context * ctx,
  5666. struct ggml_tensor * a,
  5667. struct ggml_tensor * b,
  5668. struct ggml_tensor * c) {
  5669. GGML_ASSERT(ggml_is_matrix(a) && ggml_is_vector(b) && b->type == GGML_TYPE_I32);
  5670. GGML_ASSERT(ggml_is_matrix(c) && (a->ne[0] == c->ne[0]));
  5671. bool is_node = false;
  5672. if (a->grad || b->grad) {
  5673. is_node = true;
  5674. }
  5675. // TODO: implement non F32 return
  5676. //struct ggml_tensor * result = ggml_new_tensor_2d(ctx, a->type, a->ne[0], b->ne[0]);
  5677. struct ggml_tensor * result = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, c->ne[0], c->ne[1]);
  5678. result->op = GGML_OP_GET_ROWS_BACK;
  5679. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5680. result->src[0] = a;
  5681. result->src[1] = b;
  5682. return result;
  5683. }
  5684. // ggml_diag
  5685. struct ggml_tensor * ggml_diag(
  5686. struct ggml_context * ctx,
  5687. struct ggml_tensor * a) {
  5688. GGML_ASSERT(a->ne[1] == 1);
  5689. bool is_node = false;
  5690. if (a->grad) {
  5691. is_node = true;
  5692. }
  5693. const int64_t ne[4] = { a->ne[0], a->ne[0], a->ne[2], a->ne[3] };
  5694. struct ggml_tensor * result = ggml_new_tensor(ctx, a->type, MAX(a->n_dims, 2), ne);
  5695. result->op = GGML_OP_DIAG;
  5696. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5697. result->src[0] = a;
  5698. return result;
  5699. }
  5700. // ggml_diag_mask_inf
  5701. static struct ggml_tensor * ggml_diag_mask_inf_impl(
  5702. struct ggml_context * ctx,
  5703. struct ggml_tensor * a,
  5704. int n_past,
  5705. bool inplace) {
  5706. bool is_node = false;
  5707. if (a->grad) {
  5708. is_node = true;
  5709. }
  5710. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  5711. int32_t params[] = { n_past };
  5712. ggml_set_op_params(result, params, sizeof(params));
  5713. result->op = GGML_OP_DIAG_MASK_INF;
  5714. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5715. result->src[0] = a;
  5716. return result;
  5717. }
  5718. struct ggml_tensor * ggml_diag_mask_inf(
  5719. struct ggml_context * ctx,
  5720. struct ggml_tensor * a,
  5721. int n_past) {
  5722. return ggml_diag_mask_inf_impl(ctx, a, n_past, false);
  5723. }
  5724. struct ggml_tensor * ggml_diag_mask_inf_inplace(
  5725. struct ggml_context * ctx,
  5726. struct ggml_tensor * a,
  5727. int n_past) {
  5728. return ggml_diag_mask_inf_impl(ctx, a, n_past, true);
  5729. }
  5730. // ggml_diag_mask_zero
  5731. static struct ggml_tensor * ggml_diag_mask_zero_impl(
  5732. struct ggml_context * ctx,
  5733. struct ggml_tensor * a,
  5734. int n_past,
  5735. bool inplace) {
  5736. bool is_node = false;
  5737. if (a->grad) {
  5738. is_node = true;
  5739. }
  5740. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  5741. int32_t params[] = { n_past };
  5742. ggml_set_op_params(result, params, sizeof(params));
  5743. result->op = GGML_OP_DIAG_MASK_ZERO;
  5744. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5745. result->src[0] = a;
  5746. return result;
  5747. }
  5748. struct ggml_tensor * ggml_diag_mask_zero(
  5749. struct ggml_context * ctx,
  5750. struct ggml_tensor * a,
  5751. int n_past) {
  5752. return ggml_diag_mask_zero_impl(ctx, a, n_past, false);
  5753. }
  5754. struct ggml_tensor * ggml_diag_mask_zero_inplace(
  5755. struct ggml_context * ctx,
  5756. struct ggml_tensor * a,
  5757. int n_past) {
  5758. return ggml_diag_mask_zero_impl(ctx, a, n_past, true);
  5759. }
  5760. // ggml_soft_max
  5761. static struct ggml_tensor * ggml_soft_max_impl(
  5762. struct ggml_context * ctx,
  5763. struct ggml_tensor * a,
  5764. bool inplace) {
  5765. bool is_node = false;
  5766. if (a->grad) {
  5767. is_node = true;
  5768. }
  5769. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  5770. result->op = GGML_OP_SOFT_MAX;
  5771. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5772. result->src[0] = a;
  5773. return result;
  5774. }
  5775. struct ggml_tensor * ggml_soft_max(
  5776. struct ggml_context * ctx,
  5777. struct ggml_tensor * a) {
  5778. return ggml_soft_max_impl(ctx, a, false);
  5779. }
  5780. struct ggml_tensor * ggml_soft_max_inplace(
  5781. struct ggml_context * ctx,
  5782. struct ggml_tensor * a) {
  5783. return ggml_soft_max_impl(ctx, a, true);
  5784. }
  5785. // ggml_soft_max_back
  5786. static struct ggml_tensor * ggml_soft_max_back_impl(
  5787. struct ggml_context * ctx,
  5788. struct ggml_tensor * a,
  5789. struct ggml_tensor * b,
  5790. bool inplace) {
  5791. bool is_node = false;
  5792. if (a->grad || b->grad) {
  5793. is_node = true; // TODO : implement backward pass
  5794. }
  5795. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  5796. result->op = GGML_OP_SOFT_MAX_BACK;
  5797. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5798. result->src[0] = a;
  5799. result->src[1] = b;
  5800. return result;
  5801. }
  5802. struct ggml_tensor * ggml_soft_max_back(
  5803. struct ggml_context * ctx,
  5804. struct ggml_tensor * a,
  5805. struct ggml_tensor * b) {
  5806. return ggml_soft_max_back_impl(ctx, a, b, false);
  5807. }
  5808. struct ggml_tensor * ggml_soft_max_back_inplace(
  5809. struct ggml_context * ctx,
  5810. struct ggml_tensor * a,
  5811. struct ggml_tensor * b) {
  5812. return ggml_soft_max_back_impl(ctx, a, b, true);
  5813. }
  5814. // ggml_rope
  5815. static struct ggml_tensor * ggml_rope_impl(
  5816. struct ggml_context * ctx,
  5817. struct ggml_tensor * a,
  5818. struct ggml_tensor * b,
  5819. int n_dims,
  5820. int mode,
  5821. int n_ctx,
  5822. float freq_base,
  5823. float freq_scale,
  5824. float xpos_base,
  5825. bool xpos_down,
  5826. bool inplace) {
  5827. GGML_ASSERT(ggml_is_vector(b));
  5828. GGML_ASSERT(b->type == GGML_TYPE_I32);
  5829. GGML_ASSERT(a->ne[2] == b->ne[0]);
  5830. bool is_node = false;
  5831. if (a->grad) {
  5832. is_node = true;
  5833. }
  5834. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  5835. int32_t params[8] = { /*n_past*/ 0, n_dims, mode, n_ctx };
  5836. memcpy(params + 4, &freq_base, sizeof(float));
  5837. memcpy(params + 5, &freq_scale, sizeof(float));
  5838. memcpy(params + 6, &xpos_base, sizeof(float));
  5839. memcpy(params + 7, &xpos_down, sizeof(bool));
  5840. ggml_set_op_params(result, params, sizeof(params));
  5841. result->op = GGML_OP_ROPE;
  5842. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5843. result->src[0] = a;
  5844. result->src[1] = b;
  5845. return result;
  5846. }
  5847. struct ggml_tensor * ggml_rope(
  5848. struct ggml_context * ctx,
  5849. struct ggml_tensor * a,
  5850. struct ggml_tensor * b,
  5851. int n_dims,
  5852. int mode,
  5853. int n_ctx) {
  5854. return ggml_rope_impl(ctx, a, b, n_dims, mode, n_ctx, 10000.0f, 1.0f, 0.0f, false, false);
  5855. }
  5856. struct ggml_tensor * ggml_rope_inplace(
  5857. struct ggml_context * ctx,
  5858. struct ggml_tensor * a,
  5859. struct ggml_tensor * b,
  5860. int n_dims,
  5861. int mode,
  5862. int n_ctx) {
  5863. return ggml_rope_impl(ctx, a, b, n_dims, mode, n_ctx, 10000.0f, 1.0f, 0.0f, false, true);
  5864. }
  5865. struct ggml_tensor * ggml_rope_custom(
  5866. struct ggml_context * ctx,
  5867. struct ggml_tensor * a,
  5868. struct ggml_tensor * b,
  5869. int n_dims,
  5870. int mode,
  5871. int n_ctx,
  5872. float freq_base,
  5873. float freq_scale) {
  5874. return ggml_rope_impl(ctx, a, b, n_dims, mode, n_ctx, freq_base, freq_scale, 0.0f, false, false);
  5875. }
  5876. struct ggml_tensor * ggml_rope_custom_inplace(
  5877. struct ggml_context * ctx,
  5878. struct ggml_tensor * a,
  5879. struct ggml_tensor * b,
  5880. int n_dims,
  5881. int mode,
  5882. int n_ctx,
  5883. float freq_base,
  5884. float freq_scale) {
  5885. return ggml_rope_impl(ctx, a, b, n_dims, mode, n_ctx, freq_base, freq_scale, 0.0f, false, true);
  5886. }
  5887. struct ggml_tensor * ggml_rope_xpos_inplace(
  5888. struct ggml_context * ctx,
  5889. struct ggml_tensor * a,
  5890. struct ggml_tensor * b,
  5891. int n_dims,
  5892. float base,
  5893. bool down) {
  5894. return ggml_rope_impl(ctx, a, b, n_dims, 0, 0, 10000.0f, 1.0f, base, down, true);
  5895. }
  5896. // ggml_rope_back
  5897. struct ggml_tensor * ggml_rope_back(
  5898. struct ggml_context * ctx,
  5899. struct ggml_tensor * a,
  5900. struct ggml_tensor * b,
  5901. int n_dims,
  5902. int mode,
  5903. int n_ctx,
  5904. float freq_base,
  5905. float freq_scale,
  5906. float xpos_base,
  5907. bool xpos_down) {
  5908. GGML_ASSERT(ggml_is_vector(b));
  5909. GGML_ASSERT(b->type == GGML_TYPE_I32);
  5910. GGML_ASSERT(a->ne[2] == b->ne[0]);
  5911. GGML_ASSERT((mode & 4) == 0 && "ggml_rope_back() for ChatGLM not implemented yet");
  5912. bool is_node = false;
  5913. if (a->grad) {
  5914. is_node = false; // TODO: implement backward
  5915. }
  5916. struct ggml_tensor * result = ggml_dup_tensor(ctx, a);
  5917. int32_t params[8] = { /*n_past*/ 0, n_dims, mode, n_ctx };
  5918. memcpy(params + 4, &freq_base, sizeof(float));
  5919. memcpy(params + 5, &freq_scale, sizeof(float));
  5920. memcpy(params + 6, &xpos_base, sizeof(float));
  5921. memcpy(params + 7, &xpos_down, sizeof(bool));
  5922. ggml_set_op_params(result, params, sizeof(params));
  5923. result->op = GGML_OP_ROPE_BACK;
  5924. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5925. result->src[0] = a;
  5926. result->src[1] = b;
  5927. return result;
  5928. }
  5929. // ggml_alibi
  5930. struct ggml_tensor * ggml_alibi(
  5931. struct ggml_context * ctx,
  5932. struct ggml_tensor * a,
  5933. int n_past,
  5934. int n_head,
  5935. float bias_max) {
  5936. GGML_ASSERT(n_past >= 0);
  5937. bool is_node = false;
  5938. if (a->grad) {
  5939. GGML_ASSERT(false); // TODO: implement backward
  5940. is_node = true;
  5941. }
  5942. // TODO: when implement backward, fix this:
  5943. //struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  5944. struct ggml_tensor * result = ggml_view_tensor(ctx, a);
  5945. int32_t op_params[3] = { n_past, n_head };
  5946. memcpy(op_params + 2, &bias_max, sizeof(float));
  5947. ggml_set_op_params(result, op_params, sizeof(op_params));
  5948. result->op = GGML_OP_ALIBI;
  5949. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5950. result->src[0] = a;
  5951. return result;
  5952. }
  5953. // ggml_clamp
  5954. struct ggml_tensor * ggml_clamp(
  5955. struct ggml_context * ctx,
  5956. struct ggml_tensor * a,
  5957. float min,
  5958. float max) {
  5959. bool is_node = false;
  5960. if (a->grad) {
  5961. GGML_ASSERT(false); // TODO: implement backward
  5962. is_node = true;
  5963. }
  5964. // TODO: when implement backward, fix this:
  5965. struct ggml_tensor * result = ggml_view_tensor(ctx, a);
  5966. float params[] = { min, max };
  5967. ggml_set_op_params(result, params, sizeof(params));
  5968. result->op = GGML_OP_CLAMP;
  5969. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5970. result->src[0] = a;
  5971. return result;
  5972. }
  5973. // ggml_conv_1d
  5974. static int64_t ggml_calc_conv_output_size(int64_t ins, int64_t ks, int s, int p, int d) {
  5975. return (ins + 2 * p - d * (ks - 1) - 1) / s + 1;
  5976. }
  5977. GGML_API struct ggml_tensor * ggml_conv_1d(
  5978. struct ggml_context * ctx,
  5979. struct ggml_tensor * a,
  5980. struct ggml_tensor * b,
  5981. int s0,
  5982. int p0,
  5983. int d0) {
  5984. GGML_ASSERT(ggml_is_matrix(b));
  5985. GGML_ASSERT(a->ne[1] == b->ne[1]);
  5986. bool is_node = false;
  5987. if (a->grad || b->grad) {
  5988. GGML_ASSERT(false); // TODO: implement backward
  5989. is_node = true;
  5990. }
  5991. const int64_t ne[4] = {
  5992. ggml_calc_conv_output_size(b->ne[0], a->ne[0], s0, p0, d0),
  5993. a->ne[2], 1, 1,
  5994. };
  5995. struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, 2, ne);
  5996. int32_t params[] = { s0, p0, d0 };
  5997. ggml_set_op_params(result, params, sizeof(params));
  5998. result->op = GGML_OP_CONV_1D;
  5999. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  6000. result->src[0] = a;
  6001. result->src[1] = b;
  6002. return result;
  6003. }
  6004. // ggml_conv_1d_ph
  6005. struct ggml_tensor* ggml_conv_1d_ph(
  6006. struct ggml_context * ctx,
  6007. struct ggml_tensor * a,
  6008. struct ggml_tensor * b,
  6009. int s,
  6010. int d) {
  6011. return ggml_conv_1d(ctx, a, b, s, a->ne[0] / 2, d);
  6012. }
  6013. // ggml_conv_2d
  6014. struct ggml_tensor * ggml_conv_2d(
  6015. struct ggml_context * ctx,
  6016. struct ggml_tensor * a,
  6017. struct ggml_tensor * b,
  6018. int s0,
  6019. int s1,
  6020. int p0,
  6021. int p1,
  6022. int d0,
  6023. int d1) {
  6024. GGML_ASSERT(a->ne[2] == b->ne[2]);
  6025. bool is_node = false;
  6026. if (a->grad || b->grad) {
  6027. GGML_ASSERT(false); // TODO: implement backward
  6028. is_node = true;
  6029. }
  6030. const int64_t ne[4] = {
  6031. ggml_calc_conv_output_size(b->ne[0], a->ne[0], s0, p0, d0),
  6032. ggml_calc_conv_output_size(b->ne[1], a->ne[1], s1, p1, d1),
  6033. a->ne[3], b->ne[3],
  6034. };
  6035. struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, 4, ne);
  6036. int32_t params[] = { s0, s1, p0, p1, d0, d1 };
  6037. ggml_set_op_params(result, params, sizeof(params));
  6038. result->op = GGML_OP_CONV_2D;
  6039. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  6040. result->src[0] = a;
  6041. result->src[1] = b;
  6042. return result;
  6043. }
  6044. // ggml_conv_2d_sk_p0
  6045. struct ggml_tensor * ggml_conv_2d_sk_p0(
  6046. struct ggml_context * ctx,
  6047. struct ggml_tensor * a,
  6048. struct ggml_tensor * b) {
  6049. return ggml_conv_2d(ctx, a, b, a->ne[0], a->ne[1], 0, 0, 1, 1);
  6050. }
  6051. // ggml_conv_2d_s1_ph
  6052. struct ggml_tensor * ggml_conv_2d_s1_ph(
  6053. struct ggml_context * ctx,
  6054. struct ggml_tensor * a,
  6055. struct ggml_tensor * b) {
  6056. return ggml_conv_2d(ctx, a, b, 1, 1, a->ne[0] / 2, a->ne[1] / 2, 1, 1);
  6057. }
  6058. // ggml_conv_transpose_2d_p0
  6059. static int64_t ggml_calc_conv_transpose_output_size(int64_t ins, int64_t ks, int s, int p) {
  6060. return (ins - 1) * s - 2 * p + ks;
  6061. }
  6062. struct ggml_tensor * ggml_conv_transpose_2d_p0(
  6063. struct ggml_context * ctx,
  6064. struct ggml_tensor * a,
  6065. struct ggml_tensor * b,
  6066. int stride) {
  6067. GGML_ASSERT(a->ne[3] == b->ne[2]);
  6068. bool is_node = false;
  6069. if (a->grad || b->grad) {
  6070. GGML_ASSERT(false); // TODO: implement backward
  6071. is_node = true;
  6072. }
  6073. const int64_t ne[4] = {
  6074. ggml_calc_conv_transpose_output_size(b->ne[0], a->ne[0], stride, 0 /*p0*/),
  6075. ggml_calc_conv_transpose_output_size(b->ne[1], a->ne[1], stride, 0 /*p1*/),
  6076. a->ne[2], b->ne[3],
  6077. };
  6078. struct ggml_tensor* result = ggml_new_tensor(ctx, GGML_TYPE_F32, 4, ne);
  6079. ggml_set_op_params_i32(result, 0, stride);
  6080. result->op = GGML_OP_CONV_TRANSPOSE_2D;
  6081. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  6082. result->src[0] = a;
  6083. result->src[1] = b;
  6084. return result;
  6085. }
  6086. // ggml_pool_*
  6087. static int64_t ggml_calc_pool_output_size(int64_t ins, int ks, int s, int p) {
  6088. return (ins + 2 * p - ks) / s + 1;
  6089. }
  6090. // ggml_pool_1d
  6091. struct ggml_tensor * ggml_pool_1d(
  6092. struct ggml_context * ctx,
  6093. struct ggml_tensor * a,
  6094. enum ggml_op_pool op,
  6095. int k0,
  6096. int s0,
  6097. int p0) {
  6098. bool is_node = false;
  6099. if (a->grad) {
  6100. GGML_ASSERT(false); // TODO: implement backward
  6101. is_node = true;
  6102. }
  6103. const int64_t ne[3] = {
  6104. ggml_calc_pool_output_size(a->ne[0], k0, s0, p0),
  6105. a->ne[1],
  6106. };
  6107. struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, 2, ne);
  6108. int32_t params[] = { op, k0, s0, p0 };
  6109. ggml_set_op_params(result, params, sizeof(params));
  6110. result->op = GGML_OP_POOL_1D;
  6111. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  6112. result->src[0] = a;
  6113. return result;
  6114. }
  6115. // ggml_pool_2d
  6116. struct ggml_tensor * ggml_pool_2d(
  6117. struct ggml_context * ctx,
  6118. struct ggml_tensor * a,
  6119. enum ggml_op_pool op,
  6120. int k0,
  6121. int k1,
  6122. int s0,
  6123. int s1,
  6124. int p0,
  6125. int p1) {
  6126. bool is_node = false;
  6127. if (a->grad) {
  6128. GGML_ASSERT(false); // TODO: implement backward
  6129. is_node = true;
  6130. }
  6131. const int64_t ne[3] = {
  6132. ggml_calc_pool_output_size(a->ne[0], k0, s0, p0),
  6133. ggml_calc_pool_output_size(a->ne[1], k1, s1, p1),
  6134. a->ne[2],
  6135. };
  6136. struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, 3, ne);
  6137. int32_t params[] = { op, k0, k1, s0, s1, p0, p1 };
  6138. ggml_set_op_params(result, params, sizeof(params));
  6139. result->op = GGML_OP_POOL_2D;
  6140. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  6141. result->src[0] = a;
  6142. return result;
  6143. }
  6144. // ggml_upscale
  6145. static struct ggml_tensor * ggml_upscale_impl(
  6146. struct ggml_context * ctx,
  6147. struct ggml_tensor * a,
  6148. int scale_factor) {
  6149. bool is_node = false;
  6150. if (a->grad) {
  6151. GGML_ASSERT(false); // TODO: implement backward
  6152. is_node = true;
  6153. }
  6154. struct ggml_tensor * result = ggml_new_tensor_4d(ctx, a->type,
  6155. a->ne[0] * scale_factor,
  6156. a->ne[1] * scale_factor,
  6157. a->ne[2], a->ne[3]);
  6158. result->op = GGML_OP_UPSCALE;
  6159. result->op_params[0] = scale_factor;
  6160. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  6161. result->src[0] = a;
  6162. result->src[1] = NULL;
  6163. return result;
  6164. }
  6165. struct ggml_tensor * ggml_upscale(
  6166. struct ggml_context * ctx,
  6167. struct ggml_tensor * a,
  6168. int scale_factor) {
  6169. return ggml_upscale_impl(ctx, a, scale_factor);
  6170. }
  6171. // ggml_flash_attn
  6172. struct ggml_tensor * ggml_flash_attn(
  6173. struct ggml_context * ctx,
  6174. struct ggml_tensor * q,
  6175. struct ggml_tensor * k,
  6176. struct ggml_tensor * v,
  6177. bool masked) {
  6178. GGML_ASSERT(ggml_can_mul_mat(k, q));
  6179. // TODO: check if vT can be multiplied by (k*qT)
  6180. bool is_node = false;
  6181. if (q->grad || k->grad || v->grad) {
  6182. is_node = true;
  6183. }
  6184. //struct ggml_tensor * result = ggml_dup_tensor(ctx, q);
  6185. struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, q->n_dims, q->ne);
  6186. int32_t t = masked ? 1 : 0;
  6187. ggml_set_op_params(result, &t, sizeof(t));
  6188. result->op = GGML_OP_FLASH_ATTN;
  6189. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  6190. result->src[0] = q;
  6191. result->src[1] = k;
  6192. result->src[2] = v;
  6193. return result;
  6194. }
  6195. // ggml_flash_ff
  6196. struct ggml_tensor * ggml_flash_ff(
  6197. struct ggml_context * ctx,
  6198. struct ggml_tensor * a,
  6199. struct ggml_tensor * b0,
  6200. struct ggml_tensor * b1,
  6201. struct ggml_tensor * c0,
  6202. struct ggml_tensor * c1) {
  6203. GGML_ASSERT(ggml_can_mul_mat(b0, a));
  6204. // TODO: more checks
  6205. bool is_node = false;
  6206. if (a->grad || b0->grad || b1->grad || c0->grad || c1->grad) {
  6207. is_node = true;
  6208. }
  6209. //struct ggml_tensor * result = ggml_dup_tensor(ctx, a);
  6210. struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, a->n_dims, a->ne);
  6211. result->op = GGML_OP_FLASH_FF;
  6212. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  6213. result->src[0] = a;
  6214. result->src[1] = b0;
  6215. result->src[2] = b1;
  6216. result->src[3] = c0;
  6217. result->src[4] = c1;
  6218. return result;
  6219. }
  6220. // ggml_flash_attn_back
  6221. struct ggml_tensor * ggml_flash_attn_back(
  6222. struct ggml_context * ctx,
  6223. struct ggml_tensor * q,
  6224. struct ggml_tensor * k,
  6225. struct ggml_tensor * v,
  6226. struct ggml_tensor * d,
  6227. bool masked) {
  6228. GGML_ASSERT(ggml_can_mul_mat(k, q));
  6229. // TODO: check if vT can be multiplied by (k*qT)
  6230. // d shape [D,N,ne2,ne3]
  6231. // q shape [D,N,ne2,ne3]
  6232. // k shape [D,M,kvne2,ne3]
  6233. // v shape [M,D,kvne2,ne3]
  6234. const int64_t D = q->ne[0];
  6235. const int64_t N = q->ne[1];
  6236. const int64_t M = k->ne[1];
  6237. const int64_t ne2 = q->ne[2];
  6238. const int64_t ne3 = q->ne[3];
  6239. const int64_t kvne2 = k->ne[2];
  6240. GGML_ASSERT(k->ne[0] == D);
  6241. GGML_ASSERT(v->ne[0] == M);
  6242. GGML_ASSERT(v->ne[1] == D);
  6243. GGML_ASSERT(d->ne[0] == D);
  6244. GGML_ASSERT(d->ne[1] == N);
  6245. GGML_ASSERT(k->ne[2] == kvne2);
  6246. GGML_ASSERT(k->ne[3] == ne3);
  6247. GGML_ASSERT(v->ne[2] == kvne2);
  6248. GGML_ASSERT(v->ne[3] == ne3);
  6249. GGML_ASSERT(d->ne[2] == ne2);
  6250. GGML_ASSERT(d->ne[3] == ne3);
  6251. GGML_ASSERT(ne2 % kvne2 == 0);
  6252. bool is_node = false;
  6253. if (q->grad || k->grad || v->grad) {
  6254. // when using this operation (in backwards pass) these grads are set.
  6255. // we don't want to create (big) grad of our result, so is_node is false.
  6256. is_node = false;
  6257. }
  6258. // store gradients of q, k and v as continuous tensors concatenated in result.
  6259. // note: v and gradv are actually transposed, i.e. v->ne[0] != D.
  6260. const int64_t elem_q = ggml_nelements(q);
  6261. const int64_t elem_k = ggml_nelements(k);
  6262. const int64_t elem_v = ggml_nelements(v);
  6263. enum ggml_type result_type = GGML_TYPE_F32;
  6264. GGML_ASSERT(ggml_blck_size(result_type) == 1);
  6265. const size_t tsize = ggml_type_size(result_type);
  6266. const size_t offs_q = 0;
  6267. const size_t offs_k = offs_q + GGML_PAD(elem_q * tsize, GGML_MEM_ALIGN);
  6268. const size_t offs_v = offs_k + GGML_PAD(elem_k * tsize, GGML_MEM_ALIGN);
  6269. const size_t end = offs_v + GGML_PAD(elem_v * tsize, GGML_MEM_ALIGN);
  6270. const size_t nelements = (end + tsize - 1)/tsize;
  6271. struct ggml_tensor * result = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, nelements);
  6272. int32_t masked_i = masked ? 1 : 0;
  6273. ggml_set_op_params(result, &masked_i, sizeof(masked_i));
  6274. result->op = GGML_OP_FLASH_ATTN_BACK;
  6275. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  6276. result->src[0] = q;
  6277. result->src[1] = k;
  6278. result->src[2] = v;
  6279. result->src[3] = d;
  6280. return result;
  6281. }
  6282. // ggml_win_part
  6283. struct ggml_tensor * ggml_win_part(
  6284. struct ggml_context * ctx,
  6285. struct ggml_tensor * a,
  6286. int w) {
  6287. GGML_ASSERT(a->ne[3] == 1);
  6288. GGML_ASSERT(a->type == GGML_TYPE_F32);
  6289. bool is_node = false;
  6290. if (a->grad) {
  6291. GGML_ASSERT(false); // TODO: implement backward
  6292. is_node = true;
  6293. }
  6294. // padding
  6295. const int px = (w - a->ne[1]%w)%w;
  6296. const int py = (w - a->ne[2]%w)%w;
  6297. const int npx = (px + a->ne[1])/w;
  6298. const int npy = (py + a->ne[2])/w;
  6299. const int np = npx*npy;
  6300. const int64_t ne[4] = { a->ne[0], w, w, np, };
  6301. struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, 4, ne);
  6302. int32_t params[] = { npx, npy, w };
  6303. ggml_set_op_params(result, params, sizeof(params));
  6304. result->op = GGML_OP_WIN_PART;
  6305. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  6306. result->src[0] = a;
  6307. return result;
  6308. }
  6309. // ggml_win_unpart
  6310. struct ggml_tensor * ggml_win_unpart(
  6311. struct ggml_context * ctx,
  6312. struct ggml_tensor * a,
  6313. int w0,
  6314. int h0,
  6315. int w) {
  6316. GGML_ASSERT(a->type == GGML_TYPE_F32);
  6317. bool is_node = false;
  6318. if (a->grad) {
  6319. GGML_ASSERT(false); // TODO: implement backward
  6320. is_node = true;
  6321. }
  6322. const int64_t ne[4] = { a->ne[0], w0, h0, 1, };
  6323. struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, 3, ne);
  6324. int32_t params[] = { w };
  6325. ggml_set_op_params(result, params, sizeof(params));
  6326. result->op = GGML_OP_WIN_UNPART;
  6327. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  6328. result->src[0] = a;
  6329. return result;
  6330. }
  6331. // ggml_get_rel_pos
  6332. struct ggml_tensor * ggml_get_rel_pos(
  6333. struct ggml_context * ctx,
  6334. struct ggml_tensor * a,
  6335. int qh,
  6336. int kh) {
  6337. GGML_ASSERT(qh == kh);
  6338. GGML_ASSERT(2*MAX(qh, kh) - 1 == a->ne[1]);
  6339. bool is_node = false;
  6340. if (a->grad) {
  6341. GGML_ASSERT(false); // TODO: implement backward
  6342. is_node = true;
  6343. }
  6344. const int64_t ne[4] = { a->ne[0], kh, qh, 1, };
  6345. struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F16, 3, ne);
  6346. result->op = GGML_OP_GET_REL_POS;
  6347. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  6348. result->src[0] = a;
  6349. result->src[1] = NULL;
  6350. return result;
  6351. }
  6352. // ggml_add_rel_pos
  6353. static struct ggml_tensor * ggml_add_rel_pos_impl(
  6354. struct ggml_context * ctx,
  6355. struct ggml_tensor * a,
  6356. struct ggml_tensor * pw,
  6357. struct ggml_tensor * ph,
  6358. bool inplace) {
  6359. GGML_ASSERT(ggml_are_same_shape(pw, ph));
  6360. GGML_ASSERT(ggml_is_contiguous(a));
  6361. GGML_ASSERT(ggml_is_contiguous(pw));
  6362. GGML_ASSERT(ggml_is_contiguous(ph));
  6363. GGML_ASSERT(ph->type == GGML_TYPE_F32);
  6364. GGML_ASSERT(pw->type == GGML_TYPE_F32);
  6365. GGML_ASSERT(pw->ne[3] == a->ne[2]);
  6366. GGML_ASSERT(pw->ne[0]*pw->ne[0] == a->ne[0]);
  6367. GGML_ASSERT(pw->ne[1]*pw->ne[2] == a->ne[1]);
  6368. bool is_node = false;
  6369. if (!inplace && (a->grad || pw->grad || ph->grad)) {
  6370. is_node = true;
  6371. }
  6372. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  6373. ggml_set_op_params_i32(result, 0, inplace ? 1 : 0);
  6374. result->op = GGML_OP_ADD_REL_POS;
  6375. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  6376. result->src[0] = a;
  6377. result->src[1] = pw;
  6378. result->src[2] = ph;
  6379. return result;
  6380. }
  6381. struct ggml_tensor * ggml_add_rel_pos(
  6382. struct ggml_context * ctx,
  6383. struct ggml_tensor * a,
  6384. struct ggml_tensor * pw,
  6385. struct ggml_tensor * ph) {
  6386. return ggml_add_rel_pos_impl(ctx, a, pw, ph, false);
  6387. }
  6388. struct ggml_tensor * ggml_add_rel_pos_inplace(
  6389. struct ggml_context * ctx,
  6390. struct ggml_tensor * a,
  6391. struct ggml_tensor * pw,
  6392. struct ggml_tensor * ph) {
  6393. return ggml_add_rel_pos_impl(ctx, a, pw, ph, true);
  6394. }
  6395. // gmml_unary
  6396. static struct ggml_tensor * ggml_unary_impl(
  6397. struct ggml_context * ctx,
  6398. struct ggml_tensor * a,
  6399. enum ggml_unary_op op,
  6400. bool inplace) {
  6401. bool is_node = false;
  6402. if (!inplace && (a->grad)) {
  6403. is_node = true;
  6404. }
  6405. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  6406. ggml_set_op_params_i32(result, 0, (int32_t) op);
  6407. result->op = GGML_OP_UNARY;
  6408. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  6409. result->src[0] = a;
  6410. return result;
  6411. }
  6412. struct ggml_tensor * ggml_unary(
  6413. struct ggml_context * ctx,
  6414. struct ggml_tensor * a,
  6415. enum ggml_unary_op op) {
  6416. return ggml_unary_impl(ctx, a, op, false);
  6417. }
  6418. struct ggml_tensor * ggml_unary_inplace(
  6419. struct ggml_context * ctx,
  6420. struct ggml_tensor * a,
  6421. enum ggml_unary_op op) {
  6422. return ggml_unary_impl(ctx, a, op, true);
  6423. }
  6424. // ggml_map_unary
  6425. static struct ggml_tensor * ggml_map_unary_impl_f32(
  6426. struct ggml_context * ctx,
  6427. struct ggml_tensor * a,
  6428. const ggml_unary_op_f32_t fun,
  6429. bool inplace) {
  6430. bool is_node = false;
  6431. if (!inplace && a->grad) {
  6432. is_node = true;
  6433. }
  6434. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  6435. ggml_set_op_params(result, (const void *) &fun, sizeof(fun));
  6436. result->op = GGML_OP_MAP_UNARY;
  6437. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  6438. result->src[0] = a;
  6439. return result;
  6440. }
  6441. struct ggml_tensor * ggml_map_unary_f32(
  6442. struct ggml_context * ctx,
  6443. struct ggml_tensor * a,
  6444. const ggml_unary_op_f32_t fun) {
  6445. return ggml_map_unary_impl_f32(ctx, a, fun, false);
  6446. }
  6447. struct ggml_tensor * ggml_map_unary_inplace_f32(
  6448. struct ggml_context * ctx,
  6449. struct ggml_tensor * a,
  6450. const ggml_unary_op_f32_t fun) {
  6451. return ggml_map_unary_impl_f32(ctx, a, fun, true);
  6452. }
  6453. // ggml_map_binary
  6454. static struct ggml_tensor * ggml_map_binary_impl_f32(
  6455. struct ggml_context * ctx,
  6456. struct ggml_tensor * a,
  6457. struct ggml_tensor * b,
  6458. const ggml_binary_op_f32_t fun,
  6459. bool inplace) {
  6460. GGML_ASSERT(ggml_are_same_shape(a, b));
  6461. bool is_node = false;
  6462. if (!inplace && (a->grad || b->grad)) {
  6463. is_node = true;
  6464. }
  6465. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  6466. ggml_set_op_params(result, (const void *) &fun, sizeof(fun));
  6467. result->op = GGML_OP_MAP_BINARY;
  6468. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  6469. result->src[0] = a;
  6470. result->src[1] = b;
  6471. return result;
  6472. }
  6473. struct ggml_tensor * ggml_map_binary_f32(
  6474. struct ggml_context * ctx,
  6475. struct ggml_tensor * a,
  6476. struct ggml_tensor * b,
  6477. const ggml_binary_op_f32_t fun) {
  6478. return ggml_map_binary_impl_f32(ctx, a, b, fun, false);
  6479. }
  6480. struct ggml_tensor * ggml_map_binary_inplace_f32(
  6481. struct ggml_context * ctx,
  6482. struct ggml_tensor * a,
  6483. struct ggml_tensor * b,
  6484. const ggml_binary_op_f32_t fun) {
  6485. return ggml_map_binary_impl_f32(ctx, a, b, fun, true);
  6486. }
  6487. // ggml_map_custom1_f32
  6488. static struct ggml_tensor * ggml_map_custom1_impl_f32(
  6489. struct ggml_context * ctx,
  6490. struct ggml_tensor * a,
  6491. const ggml_custom1_op_f32_t fun,
  6492. bool inplace) {
  6493. bool is_node = false;
  6494. if (!inplace && a->grad) {
  6495. is_node = true;
  6496. }
  6497. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  6498. ggml_set_op_params(result, (const void *) &fun, sizeof(fun));
  6499. result->op = GGML_OP_MAP_CUSTOM1_F32;
  6500. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  6501. result->src[0] = a;
  6502. return result;
  6503. }
  6504. struct ggml_tensor * ggml_map_custom1_f32(
  6505. struct ggml_context * ctx,
  6506. struct ggml_tensor * a,
  6507. const ggml_custom1_op_f32_t fun) {
  6508. return ggml_map_custom1_impl_f32(ctx, a, fun, false);
  6509. }
  6510. struct ggml_tensor * ggml_map_custom1_inplace_f32(
  6511. struct ggml_context * ctx,
  6512. struct ggml_tensor * a,
  6513. const ggml_custom1_op_f32_t fun) {
  6514. return ggml_map_custom1_impl_f32(ctx, a, fun, true);
  6515. }
  6516. // ggml_map_custom2_f32
  6517. static struct ggml_tensor * ggml_map_custom2_impl_f32(
  6518. struct ggml_context * ctx,
  6519. struct ggml_tensor * a,
  6520. struct ggml_tensor * b,
  6521. const ggml_custom2_op_f32_t fun,
  6522. bool inplace) {
  6523. bool is_node = false;
  6524. if (!inplace && (a->grad || b->grad)) {
  6525. is_node = true;
  6526. }
  6527. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  6528. ggml_set_op_params(result, (const void *) &fun, sizeof(fun));
  6529. result->op = GGML_OP_MAP_CUSTOM2_F32;
  6530. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  6531. result->src[0] = a;
  6532. result->src[1] = b;
  6533. return result;
  6534. }
  6535. struct ggml_tensor * ggml_map_custom2_f32(
  6536. struct ggml_context * ctx,
  6537. struct ggml_tensor * a,
  6538. struct ggml_tensor * b,
  6539. const ggml_custom2_op_f32_t fun) {
  6540. return ggml_map_custom2_impl_f32(ctx, a, b, fun, false);
  6541. }
  6542. struct ggml_tensor * ggml_map_custom2_inplace_f32(
  6543. struct ggml_context * ctx,
  6544. struct ggml_tensor * a,
  6545. struct ggml_tensor * b,
  6546. const ggml_custom2_op_f32_t fun) {
  6547. return ggml_map_custom2_impl_f32(ctx, a, b, fun, true);
  6548. }
  6549. // ggml_map_custom3_f32
  6550. static struct ggml_tensor * ggml_map_custom3_impl_f32(
  6551. struct ggml_context * ctx,
  6552. struct ggml_tensor * a,
  6553. struct ggml_tensor * b,
  6554. struct ggml_tensor * c,
  6555. const ggml_custom3_op_f32_t fun,
  6556. bool inplace) {
  6557. bool is_node = false;
  6558. if (!inplace && (a->grad || b->grad || c->grad)) {
  6559. is_node = true;
  6560. }
  6561. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  6562. ggml_set_op_params(result, (const void *) &fun, sizeof(fun));
  6563. result->op = GGML_OP_MAP_CUSTOM3_F32;
  6564. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  6565. result->src[0] = a;
  6566. result->src[1] = b;
  6567. result->src[2] = c;
  6568. return result;
  6569. }
  6570. struct ggml_tensor * ggml_map_custom3_f32(
  6571. struct ggml_context * ctx,
  6572. struct ggml_tensor * a,
  6573. struct ggml_tensor * b,
  6574. struct ggml_tensor * c,
  6575. const ggml_custom3_op_f32_t fun) {
  6576. return ggml_map_custom3_impl_f32(ctx, a, b, c, fun, false);
  6577. }
  6578. struct ggml_tensor * ggml_map_custom3_inplace_f32(
  6579. struct ggml_context * ctx,
  6580. struct ggml_tensor * a,
  6581. struct ggml_tensor * b,
  6582. struct ggml_tensor * c,
  6583. const ggml_custom3_op_f32_t fun) {
  6584. return ggml_map_custom3_impl_f32(ctx, a, b, c, fun, true);
  6585. }
  6586. // ggml_map_custom1
  6587. struct ggml_map_custom1_op_params {
  6588. ggml_custom1_op_t fun;
  6589. int n_tasks;
  6590. void * userdata;
  6591. };
  6592. static struct ggml_tensor * ggml_map_custom1_impl(
  6593. struct ggml_context * ctx,
  6594. struct ggml_tensor * a,
  6595. const ggml_custom1_op_t fun,
  6596. int n_tasks,
  6597. void * userdata,
  6598. bool inplace) {
  6599. GGML_ASSERT(n_tasks == GGML_N_TASKS_MAX || n_tasks > 0);
  6600. bool is_node = false;
  6601. if (!inplace && a->grad) {
  6602. is_node = true;
  6603. }
  6604. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  6605. struct ggml_map_custom1_op_params params = {
  6606. /*.fun =*/ fun,
  6607. /*.n_tasks =*/ n_tasks,
  6608. /*.userdata =*/ userdata
  6609. };
  6610. ggml_set_op_params(result, (const void *) &params, sizeof(params));
  6611. result->op = GGML_OP_MAP_CUSTOM1;
  6612. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  6613. result->src[0] = a;
  6614. return result;
  6615. }
  6616. struct ggml_tensor * ggml_map_custom1(
  6617. struct ggml_context * ctx,
  6618. struct ggml_tensor * a,
  6619. const ggml_custom1_op_t fun,
  6620. int n_tasks,
  6621. void * userdata) {
  6622. return ggml_map_custom1_impl(ctx, a, fun, n_tasks, userdata, false);
  6623. }
  6624. struct ggml_tensor * ggml_map_custom1_inplace(
  6625. struct ggml_context * ctx,
  6626. struct ggml_tensor * a,
  6627. const ggml_custom1_op_t fun,
  6628. int n_tasks,
  6629. void * userdata) {
  6630. return ggml_map_custom1_impl(ctx, a, fun, n_tasks, userdata, true);
  6631. }
  6632. // ggml_map_custom2
  6633. struct ggml_map_custom2_op_params {
  6634. ggml_custom2_op_t fun;
  6635. int n_tasks;
  6636. void * userdata;
  6637. };
  6638. static struct ggml_tensor * ggml_map_custom2_impl(
  6639. struct ggml_context * ctx,
  6640. struct ggml_tensor * a,
  6641. struct ggml_tensor * b,
  6642. const ggml_custom2_op_t fun,
  6643. int n_tasks,
  6644. void * userdata,
  6645. bool inplace) {
  6646. GGML_ASSERT(n_tasks == GGML_N_TASKS_MAX || n_tasks > 0);
  6647. bool is_node = false;
  6648. if (!inplace && (a->grad || b->grad)) {
  6649. is_node = true;
  6650. }
  6651. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  6652. struct ggml_map_custom2_op_params params = {
  6653. /*.fun =*/ fun,
  6654. /*.n_tasks =*/ n_tasks,
  6655. /*.userdata =*/ userdata
  6656. };
  6657. ggml_set_op_params(result, (const void *) &params, sizeof(params));
  6658. result->op = GGML_OP_MAP_CUSTOM2;
  6659. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  6660. result->src[0] = a;
  6661. result->src[1] = b;
  6662. return result;
  6663. }
  6664. struct ggml_tensor * ggml_map_custom2(
  6665. struct ggml_context * ctx,
  6666. struct ggml_tensor * a,
  6667. struct ggml_tensor * b,
  6668. const ggml_custom2_op_t fun,
  6669. int n_tasks,
  6670. void * userdata) {
  6671. return ggml_map_custom2_impl(ctx, a, b, fun, n_tasks, userdata, false);
  6672. }
  6673. struct ggml_tensor * ggml_map_custom2_inplace(
  6674. struct ggml_context * ctx,
  6675. struct ggml_tensor * a,
  6676. struct ggml_tensor * b,
  6677. const ggml_custom2_op_t fun,
  6678. int n_tasks,
  6679. void * userdata) {
  6680. return ggml_map_custom2_impl(ctx, a, b, fun, n_tasks, userdata, true);
  6681. }
  6682. // ggml_map_custom3
  6683. struct ggml_map_custom3_op_params {
  6684. ggml_custom3_op_t fun;
  6685. int n_tasks;
  6686. void * userdata;
  6687. };
  6688. static struct ggml_tensor * ggml_map_custom3_impl(
  6689. struct ggml_context * ctx,
  6690. struct ggml_tensor * a,
  6691. struct ggml_tensor * b,
  6692. struct ggml_tensor * c,
  6693. const ggml_custom3_op_t fun,
  6694. int n_tasks,
  6695. void * userdata,
  6696. bool inplace) {
  6697. GGML_ASSERT(n_tasks == GGML_N_TASKS_MAX || n_tasks > 0);
  6698. bool is_node = false;
  6699. if (!inplace && (a->grad || b->grad || c->grad)) {
  6700. is_node = true;
  6701. }
  6702. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  6703. struct ggml_map_custom3_op_params params = {
  6704. /*.fun =*/ fun,
  6705. /*.n_tasks =*/ n_tasks,
  6706. /*.userdata =*/ userdata
  6707. };
  6708. ggml_set_op_params(result, (const void *) &params, sizeof(params));
  6709. result->op = GGML_OP_MAP_CUSTOM3;
  6710. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  6711. result->src[0] = a;
  6712. result->src[1] = b;
  6713. result->src[2] = c;
  6714. return result;
  6715. }
  6716. struct ggml_tensor * ggml_map_custom3(
  6717. struct ggml_context * ctx,
  6718. struct ggml_tensor * a,
  6719. struct ggml_tensor * b,
  6720. struct ggml_tensor * c,
  6721. const ggml_custom3_op_t fun,
  6722. int n_tasks,
  6723. void * userdata) {
  6724. return ggml_map_custom3_impl(ctx, a, b, c, fun, n_tasks, userdata, false);
  6725. }
  6726. struct ggml_tensor * ggml_map_custom3_inplace(
  6727. struct ggml_context * ctx,
  6728. struct ggml_tensor * a,
  6729. struct ggml_tensor * b,
  6730. struct ggml_tensor * c,
  6731. const ggml_custom3_op_t fun,
  6732. int n_tasks,
  6733. void * userdata) {
  6734. return ggml_map_custom3_impl(ctx, a, b, c, fun, n_tasks, userdata, true);
  6735. }
  6736. // ggml_cross_entropy_loss
  6737. struct ggml_tensor * ggml_cross_entropy_loss(
  6738. struct ggml_context * ctx,
  6739. struct ggml_tensor * a,
  6740. struct ggml_tensor * b) {
  6741. GGML_ASSERT(ggml_are_same_shape(a, b));
  6742. bool is_node = false;
  6743. if (a->grad || b->grad) {
  6744. is_node = true;
  6745. }
  6746. struct ggml_tensor * result = ggml_new_tensor_1d(ctx, a->type, 1);
  6747. result->op = GGML_OP_CROSS_ENTROPY_LOSS;
  6748. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  6749. result->src[0] = a;
  6750. result->src[1] = b;
  6751. return result;
  6752. }
  6753. // ggml_cross_entropy_loss_back
  6754. struct ggml_tensor * ggml_cross_entropy_loss_back(
  6755. struct ggml_context * ctx,
  6756. struct ggml_tensor * a,
  6757. struct ggml_tensor * b,
  6758. struct ggml_tensor * c) {
  6759. GGML_ASSERT(ggml_are_same_shape(a, b));
  6760. GGML_ASSERT(ggml_is_scalar(c));
  6761. struct ggml_tensor * result = ggml_dup_tensor(ctx, a);
  6762. result->op = GGML_OP_CROSS_ENTROPY_LOSS_BACK;
  6763. result->grad = NULL;
  6764. result->src[0] = a;
  6765. result->src[1] = b;
  6766. result->src[2] = c;
  6767. return result;
  6768. }
  6769. ////////////////////////////////////////////////////////////////////////////////
  6770. void ggml_set_param(
  6771. struct ggml_context * ctx,
  6772. struct ggml_tensor * tensor) {
  6773. tensor->is_param = true;
  6774. GGML_ASSERT(tensor->grad == NULL);
  6775. tensor->grad = ggml_dup_tensor(ctx, tensor);
  6776. }
  6777. // ggml_compute_forward_dup
  6778. static void ggml_compute_forward_dup_same_cont(
  6779. const struct ggml_compute_params * params,
  6780. const struct ggml_tensor * src0,
  6781. struct ggml_tensor * dst) {
  6782. GGML_ASSERT(ggml_nelements(dst) == ggml_nelements(src0));
  6783. GGML_ASSERT(ggml_is_contiguous(dst) && ggml_is_contiguous(src0));
  6784. GGML_ASSERT(src0->type == dst->type);
  6785. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  6786. return;
  6787. }
  6788. const size_t nb00 = src0->nb[0];
  6789. const size_t nb0 = dst->nb[0];
  6790. const int ith = params->ith; // thread index
  6791. const int nth = params->nth; // number of threads
  6792. // parallelize by elements
  6793. const int ne = ggml_nelements(dst);
  6794. const int dr = (ne + nth - 1) / nth;
  6795. const int ie0 = dr * ith;
  6796. const int ie1 = MIN(ie0 + dr, ne);
  6797. if (ie0 < ie1) {
  6798. memcpy(
  6799. ((char *) dst->data + ie0*nb0),
  6800. ((char *) src0->data + ie0*nb00),
  6801. (ie1 - ie0) * ggml_type_size(src0->type));
  6802. }
  6803. }
  6804. static void ggml_compute_forward_dup_f16(
  6805. const struct ggml_compute_params * params,
  6806. const struct ggml_tensor * src0,
  6807. struct ggml_tensor * dst) {
  6808. GGML_ASSERT(ggml_nelements(dst) == ggml_nelements(src0));
  6809. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  6810. return;
  6811. }
  6812. GGML_TENSOR_UNARY_OP_LOCALS
  6813. const int ith = params->ith; // thread index
  6814. const int nth = params->nth; // number of threads
  6815. if (ggml_is_contiguous(src0) && ggml_is_contiguous(dst) && src0->type == dst->type) {
  6816. ggml_compute_forward_dup_same_cont(params, src0, dst);
  6817. return;
  6818. }
  6819. // parallelize by rows
  6820. const int nr = ne01;
  6821. // number of rows per thread
  6822. const int dr = (nr + nth - 1) / nth;
  6823. // row range for this thread
  6824. const int ir0 = dr * ith;
  6825. const int ir1 = MIN(ir0 + dr, nr);
  6826. if (src0->type == dst->type &&
  6827. ne00 == ne0 &&
  6828. nb00 == ggml_type_size(src0->type) && nb0 == ggml_type_size(dst->type)) {
  6829. // copy by rows
  6830. const size_t rs = ne00*nb00;
  6831. for (int64_t i03 = 0; i03 < ne03; i03++) {
  6832. for (int64_t i02 = 0; i02 < ne02; i02++) {
  6833. for (int64_t i01 = ir0; i01 < ir1; i01++) {
  6834. memcpy(
  6835. ((char *) dst->data + i01*nb1 + i02*nb2 + i03*nb3),
  6836. ((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03),
  6837. rs);
  6838. }
  6839. }
  6840. }
  6841. return;
  6842. }
  6843. // TODO: add more special-case implementations for tensor shapes/strides that can benefit from memcpy
  6844. if (ggml_is_contiguous(dst)) {
  6845. if (nb00 == sizeof(ggml_fp16_t)) {
  6846. if (dst->type == GGML_TYPE_F16) {
  6847. size_t id = 0;
  6848. const size_t rs = ne00 * nb00;
  6849. char * dst_ptr = (char *) dst->data;
  6850. for (int i03 = 0; i03 < ne03; i03++) {
  6851. for (int i02 = 0; i02 < ne02; i02++) {
  6852. id += rs * ir0;
  6853. for (int i01 = ir0; i01 < ir1; i01++) {
  6854. const char * src0_ptr = (char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03;
  6855. memcpy(dst_ptr + id, src0_ptr, rs);
  6856. id += rs;
  6857. }
  6858. id += rs * (ne01 - ir1);
  6859. }
  6860. }
  6861. } else if (dst->type == GGML_TYPE_F32) {
  6862. size_t id = 0;
  6863. float * dst_ptr = (float *) dst->data;
  6864. for (int i03 = 0; i03 < ne03; i03++) {
  6865. for (int i02 = 0; i02 < ne02; i02++) {
  6866. id += ne00 * ir0;
  6867. for (int i01 = ir0; i01 < ir1; i01++) {
  6868. const ggml_fp16_t * src0_ptr = (ggml_fp16_t *) ((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03);
  6869. for (int i00 = 0; i00 < ne00; i00++) {
  6870. dst_ptr[id] = GGML_FP16_TO_FP32(src0_ptr[i00]);
  6871. id++;
  6872. }
  6873. }
  6874. id += ne00 * (ne01 - ir1);
  6875. }
  6876. }
  6877. } else if (type_traits[dst->type].from_float) {
  6878. ggml_from_float_t const quantize_row_q = type_traits[dst->type].from_float;
  6879. float * src0_f32 = (float *) params->wdata + (ne00 + CACHE_LINE_SIZE_F32) * ith;
  6880. size_t id = 0;
  6881. size_t rs = nb0 * (ne00 / ggml_blck_size(dst->type));
  6882. char * dst_ptr = (char *) dst->data;
  6883. for (int i03 = 0; i03 < ne03; i03++) {
  6884. for (int i02 = 0; i02 < ne02; i02++) {
  6885. id += rs * ir0;
  6886. for (int i01 = ir0; i01 < ir1; i01++) {
  6887. const ggml_fp16_t * src0_ptr = (ggml_fp16_t *) ((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03);
  6888. for (int i00 = 0; i00 < ne00; i00++) {
  6889. src0_f32[i00] = GGML_FP16_TO_FP32(src0_ptr[i00]);
  6890. }
  6891. quantize_row_q(src0_f32, dst_ptr + id, ne00);
  6892. id += rs;
  6893. }
  6894. id += rs * (ne01 - ir1);
  6895. }
  6896. }
  6897. } else {
  6898. GGML_ASSERT(false); // TODO: implement
  6899. }
  6900. } else {
  6901. //printf("%s: this is not optimal - fix me\n", __func__);
  6902. if (dst->type == GGML_TYPE_F32) {
  6903. size_t id = 0;
  6904. float * dst_ptr = (float *) dst->data;
  6905. for (int i03 = 0; i03 < ne03; i03++) {
  6906. for (int i02 = 0; i02 < ne02; i02++) {
  6907. id += ne00 * ir0;
  6908. for (int i01 = ir0; i01 < ir1; i01++) {
  6909. for (int i00 = 0; i00 < ne00; i00++) {
  6910. const ggml_fp16_t * src0_ptr = (ggml_fp16_t *) ((char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03);
  6911. dst_ptr[id] = GGML_FP16_TO_FP32(*src0_ptr);
  6912. id++;
  6913. }
  6914. }
  6915. id += ne00 * (ne01 - ir1);
  6916. }
  6917. }
  6918. } else if (dst->type == GGML_TYPE_F16) {
  6919. size_t id = 0;
  6920. ggml_fp16_t * dst_ptr = (ggml_fp16_t *) dst->data;
  6921. for (int i03 = 0; i03 < ne03; i03++) {
  6922. for (int i02 = 0; i02 < ne02; i02++) {
  6923. id += ne00 * ir0;
  6924. for (int i01 = ir0; i01 < ir1; i01++) {
  6925. for (int i00 = 0; i00 < ne00; i00++) {
  6926. const ggml_fp16_t * src0_ptr = (ggml_fp16_t *) ((char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03);
  6927. dst_ptr[id] = *src0_ptr;
  6928. id++;
  6929. }
  6930. }
  6931. id += ne00 * (ne01 - ir1);
  6932. }
  6933. }
  6934. } else {
  6935. GGML_ASSERT(false); // TODO: implement
  6936. }
  6937. }
  6938. return;
  6939. }
  6940. // dst counters
  6941. int64_t i10 = 0;
  6942. int64_t i11 = 0;
  6943. int64_t i12 = 0;
  6944. int64_t i13 = 0;
  6945. if (dst->type == GGML_TYPE_F16) {
  6946. for (int64_t i03 = 0; i03 < ne03; i03++) {
  6947. for (int64_t i02 = 0; i02 < ne02; i02++) {
  6948. i10 += ne00 * ir0;
  6949. while (i10 >= ne0) {
  6950. i10 -= ne0;
  6951. if (++i11 == ne1) {
  6952. i11 = 0;
  6953. if (++i12 == ne2) {
  6954. i12 = 0;
  6955. if (++i13 == ne3) {
  6956. i13 = 0;
  6957. }
  6958. }
  6959. }
  6960. }
  6961. for (int64_t i01 = ir0; i01 < ir1; i01++) {
  6962. for (int64_t i00 = 0; i00 < ne00; i00++) {
  6963. const char * src0_ptr = ((char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03);
  6964. char * dst_ptr = ((char *) dst->data + i10*nb0 + i11*nb1 + i12*nb2 + i13*nb3);
  6965. memcpy(dst_ptr, src0_ptr, sizeof(ggml_fp16_t));
  6966. if (++i10 == ne00) {
  6967. i10 = 0;
  6968. if (++i11 == ne01) {
  6969. i11 = 0;
  6970. if (++i12 == ne02) {
  6971. i12 = 0;
  6972. if (++i13 == ne03) {
  6973. i13 = 0;
  6974. }
  6975. }
  6976. }
  6977. }
  6978. }
  6979. }
  6980. i10 += ne00 * (ne01 - ir1);
  6981. while (i10 >= ne0) {
  6982. i10 -= ne0;
  6983. if (++i11 == ne1) {
  6984. i11 = 0;
  6985. if (++i12 == ne2) {
  6986. i12 = 0;
  6987. if (++i13 == ne3) {
  6988. i13 = 0;
  6989. }
  6990. }
  6991. }
  6992. }
  6993. }
  6994. }
  6995. } else if (dst->type == GGML_TYPE_F32) {
  6996. for (int64_t i03 = 0; i03 < ne03; i03++) {
  6997. for (int64_t i02 = 0; i02 < ne02; i02++) {
  6998. i10 += ne00 * ir0;
  6999. while (i10 >= ne0) {
  7000. i10 -= ne0;
  7001. if (++i11 == ne1) {
  7002. i11 = 0;
  7003. if (++i12 == ne2) {
  7004. i12 = 0;
  7005. if (++i13 == ne3) {
  7006. i13 = 0;
  7007. }
  7008. }
  7009. }
  7010. }
  7011. for (int64_t i01 = ir0; i01 < ir1; i01++) {
  7012. for (int64_t i00 = 0; i00 < ne00; i00++) {
  7013. const char * src0_ptr = ((char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03);
  7014. char * dst_ptr = ((char *) dst->data + i10*nb0 + i11*nb1 + i12*nb2 + i13*nb3);
  7015. *(float *) dst_ptr = GGML_FP16_TO_FP32(*(const ggml_fp16_t *) src0_ptr);
  7016. if (++i10 == ne0) {
  7017. i10 = 0;
  7018. if (++i11 == ne1) {
  7019. i11 = 0;
  7020. if (++i12 == ne2) {
  7021. i12 = 0;
  7022. if (++i13 == ne3) {
  7023. i13 = 0;
  7024. }
  7025. }
  7026. }
  7027. }
  7028. }
  7029. }
  7030. i10 += ne00 * (ne01 - ir1);
  7031. while (i10 >= ne0) {
  7032. i10 -= ne0;
  7033. if (++i11 == ne1) {
  7034. i11 = 0;
  7035. if (++i12 == ne2) {
  7036. i12 = 0;
  7037. if (++i13 == ne3) {
  7038. i13 = 0;
  7039. }
  7040. }
  7041. }
  7042. }
  7043. }
  7044. }
  7045. } else {
  7046. GGML_ASSERT(false); // TODO: implement
  7047. }
  7048. }
  7049. static void ggml_compute_forward_dup_f32(
  7050. const struct ggml_compute_params * params,
  7051. const struct ggml_tensor * src0,
  7052. struct ggml_tensor * dst) {
  7053. GGML_ASSERT(ggml_nelements(dst) == ggml_nelements(src0));
  7054. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  7055. return;
  7056. }
  7057. GGML_TENSOR_UNARY_OP_LOCALS
  7058. const int ith = params->ith; // thread index
  7059. const int nth = params->nth; // number of threads
  7060. if (ggml_is_contiguous(src0) && ggml_is_contiguous(dst) && src0->type == dst->type) {
  7061. ggml_compute_forward_dup_same_cont(params, src0, dst);
  7062. return;
  7063. }
  7064. // parallelize by rows
  7065. const int nr = ne01;
  7066. // number of rows per thread
  7067. const int dr = (nr + nth - 1) / nth;
  7068. // row range for this thread
  7069. const int ir0 = dr * ith;
  7070. const int ir1 = MIN(ir0 + dr, nr);
  7071. if (src0->type == dst->type &&
  7072. ne00 == ne0 &&
  7073. nb00 == ggml_type_size(src0->type) && nb0 == ggml_type_size(dst->type)) {
  7074. // copy by rows
  7075. const size_t rs = ne00*nb00;
  7076. for (int64_t i03 = 0; i03 < ne03; i03++) {
  7077. for (int64_t i02 = 0; i02 < ne02; i02++) {
  7078. for (int64_t i01 = ir0; i01 < ir1; i01++) {
  7079. memcpy(
  7080. ((char *) dst->data + i01*nb1 + i02*nb2 + i03*nb3),
  7081. ((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03),
  7082. rs);
  7083. }
  7084. }
  7085. }
  7086. return;
  7087. }
  7088. if (ggml_is_contiguous(dst)) {
  7089. // TODO: simplify
  7090. if (nb00 == sizeof(float)) {
  7091. if (dst->type == GGML_TYPE_F32) {
  7092. size_t id = 0;
  7093. const size_t rs = ne00 * nb00;
  7094. char * dst_ptr = (char *) dst->data;
  7095. for (int i03 = 0; i03 < ne03; i03++) {
  7096. for (int i02 = 0; i02 < ne02; i02++) {
  7097. id += rs * ir0;
  7098. for (int i01 = ir0; i01 < ir1; i01++) {
  7099. const char * src0_ptr = (char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03;
  7100. memcpy(dst_ptr + id, src0_ptr, rs);
  7101. id += rs;
  7102. }
  7103. id += rs * (ne01 - ir1);
  7104. }
  7105. }
  7106. } else if (type_traits[dst->type].from_float) {
  7107. ggml_from_float_t const quantize_row_q = type_traits[dst->type].from_float;
  7108. size_t id = 0;
  7109. size_t rs = nb0 * (ne00 / ggml_blck_size(dst->type));
  7110. char * dst_ptr = (char *) dst->data;
  7111. for (int i03 = 0; i03 < ne03; i03++) {
  7112. for (int i02 = 0; i02 < ne02; i02++) {
  7113. id += rs * ir0;
  7114. for (int i01 = ir0; i01 < ir1; i01++) {
  7115. const float * src0_ptr = (float *) ((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03);
  7116. quantize_row_q(src0_ptr, dst_ptr + id, ne00);
  7117. id += rs;
  7118. }
  7119. id += rs * (ne01 - ir1);
  7120. }
  7121. }
  7122. } else {
  7123. GGML_ASSERT(false); // TODO: implement
  7124. }
  7125. } else {
  7126. //printf("%s: this is not optimal - fix me\n", __func__);
  7127. if (dst->type == GGML_TYPE_F32) {
  7128. size_t id = 0;
  7129. float * dst_ptr = (float *) dst->data;
  7130. for (int i03 = 0; i03 < ne03; i03++) {
  7131. for (int i02 = 0; i02 < ne02; i02++) {
  7132. id += ne00 * ir0;
  7133. for (int i01 = ir0; i01 < ir1; i01++) {
  7134. for (int i00 = 0; i00 < ne00; i00++) {
  7135. const float * src0_ptr = (float *) ((char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03);
  7136. dst_ptr[id] = *src0_ptr;
  7137. id++;
  7138. }
  7139. }
  7140. id += ne00 * (ne01 - ir1);
  7141. }
  7142. }
  7143. } else if (dst->type == GGML_TYPE_F16) {
  7144. size_t id = 0;
  7145. ggml_fp16_t * dst_ptr = (ggml_fp16_t *) dst->data;
  7146. for (int i03 = 0; i03 < ne03; i03++) {
  7147. for (int i02 = 0; i02 < ne02; i02++) {
  7148. id += ne00 * ir0;
  7149. for (int i01 = ir0; i01 < ir1; i01++) {
  7150. for (int i00 = 0; i00 < ne00; i00++) {
  7151. const float * src0_ptr = (float *) ((char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03);
  7152. dst_ptr[id] = GGML_FP32_TO_FP16(*src0_ptr);
  7153. id++;
  7154. }
  7155. }
  7156. id += ne00 * (ne01 - ir1);
  7157. }
  7158. }
  7159. } else {
  7160. GGML_ASSERT(false); // TODO: implement
  7161. }
  7162. }
  7163. return;
  7164. }
  7165. // dst counters
  7166. int64_t i10 = 0;
  7167. int64_t i11 = 0;
  7168. int64_t i12 = 0;
  7169. int64_t i13 = 0;
  7170. if (dst->type == GGML_TYPE_F32) {
  7171. for (int64_t i03 = 0; i03 < ne03; i03++) {
  7172. for (int64_t i02 = 0; i02 < ne02; i02++) {
  7173. i10 += ne00 * ir0;
  7174. while (i10 >= ne0) {
  7175. i10 -= ne0;
  7176. if (++i11 == ne1) {
  7177. i11 = 0;
  7178. if (++i12 == ne2) {
  7179. i12 = 0;
  7180. if (++i13 == ne3) {
  7181. i13 = 0;
  7182. }
  7183. }
  7184. }
  7185. }
  7186. for (int64_t i01 = ir0; i01 < ir1; i01++) {
  7187. for (int64_t i00 = 0; i00 < ne00; i00++) {
  7188. const char * src0_ptr = ((char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03);
  7189. char * dst_ptr = ((char *) dst->data + i10*nb0 + i11*nb1 + i12*nb2 + i13*nb3);
  7190. memcpy(dst_ptr, src0_ptr, sizeof(float));
  7191. if (++i10 == ne0) {
  7192. i10 = 0;
  7193. if (++i11 == ne1) {
  7194. i11 = 0;
  7195. if (++i12 == ne2) {
  7196. i12 = 0;
  7197. if (++i13 == ne3) {
  7198. i13 = 0;
  7199. }
  7200. }
  7201. }
  7202. }
  7203. }
  7204. }
  7205. i10 += ne00 * (ne01 - ir1);
  7206. while (i10 >= ne0) {
  7207. i10 -= ne0;
  7208. if (++i11 == ne1) {
  7209. i11 = 0;
  7210. if (++i12 == ne2) {
  7211. i12 = 0;
  7212. if (++i13 == ne3) {
  7213. i13 = 0;
  7214. }
  7215. }
  7216. }
  7217. }
  7218. }
  7219. }
  7220. } else if (dst->type == GGML_TYPE_F16) {
  7221. for (int64_t i03 = 0; i03 < ne03; i03++) {
  7222. for (int64_t i02 = 0; i02 < ne02; i02++) {
  7223. i10 += ne00 * ir0;
  7224. while (i10 >= ne0) {
  7225. i10 -= ne0;
  7226. if (++i11 == ne1) {
  7227. i11 = 0;
  7228. if (++i12 == ne2) {
  7229. i12 = 0;
  7230. if (++i13 == ne3) {
  7231. i13 = 0;
  7232. }
  7233. }
  7234. }
  7235. }
  7236. for (int64_t i01 = ir0; i01 < ir1; i01++) {
  7237. for (int64_t i00 = 0; i00 < ne00; i00++) {
  7238. const char * src0_ptr = ((char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03);
  7239. char * dst_ptr = ((char *) dst->data + i10*nb0 + i11*nb1 + i12*nb2 + i13*nb3);
  7240. *(ggml_fp16_t *) dst_ptr = GGML_FP32_TO_FP16(*(const float *) src0_ptr);
  7241. if (++i10 == ne0) {
  7242. i10 = 0;
  7243. if (++i11 == ne1) {
  7244. i11 = 0;
  7245. if (++i12 == ne2) {
  7246. i12 = 0;
  7247. if (++i13 == ne3) {
  7248. i13 = 0;
  7249. }
  7250. }
  7251. }
  7252. }
  7253. }
  7254. }
  7255. i10 += ne00 * (ne01 - ir1);
  7256. while (i10 >= ne0) {
  7257. i10 -= ne0;
  7258. if (++i11 == ne1) {
  7259. i11 = 0;
  7260. if (++i12 == ne2) {
  7261. i12 = 0;
  7262. if (++i13 == ne3) {
  7263. i13 = 0;
  7264. }
  7265. }
  7266. }
  7267. }
  7268. }
  7269. }
  7270. } else {
  7271. GGML_ASSERT(false); // TODO: implement
  7272. }
  7273. }
  7274. static void ggml_compute_forward_dup(
  7275. const struct ggml_compute_params * params,
  7276. const struct ggml_tensor * src0,
  7277. struct ggml_tensor * dst) {
  7278. if (ggml_is_contiguous(src0) && ggml_is_contiguous(dst) && src0->type == dst->type) {
  7279. ggml_compute_forward_dup_same_cont(params, src0, dst);
  7280. return;
  7281. }
  7282. switch (src0->type) {
  7283. case GGML_TYPE_F16:
  7284. {
  7285. ggml_compute_forward_dup_f16(params, src0, dst);
  7286. } break;
  7287. case GGML_TYPE_F32:
  7288. {
  7289. ggml_compute_forward_dup_f32(params, src0, dst);
  7290. } break;
  7291. default:
  7292. {
  7293. GGML_ASSERT(false);
  7294. } break;
  7295. }
  7296. }
  7297. // ggml_compute_forward_add
  7298. static void ggml_compute_forward_add_f32(
  7299. const struct ggml_compute_params * params,
  7300. const struct ggml_tensor * src0,
  7301. const struct ggml_tensor * src1,
  7302. struct ggml_tensor * dst) {
  7303. GGML_ASSERT(ggml_can_repeat_rows(src1, src0) && ggml_are_same_shape(src0, dst));
  7304. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  7305. return;
  7306. }
  7307. const int ith = params->ith;
  7308. const int nth = params->nth;
  7309. const int nr = ggml_nrows(src0);
  7310. GGML_TENSOR_BINARY_OP_LOCALS
  7311. GGML_ASSERT( nb0 == sizeof(float));
  7312. GGML_ASSERT(nb00 == sizeof(float));
  7313. // rows per thread
  7314. const int dr = (nr + nth - 1)/nth;
  7315. // row range for this thread
  7316. const int ir0 = dr*ith;
  7317. const int ir1 = MIN(ir0 + dr, nr);
  7318. if (nb10 == sizeof(float)) {
  7319. for (int ir = ir0; ir < ir1; ++ir) {
  7320. // src1 is broadcastable across src0 and dst in i1, i2, i3
  7321. const int64_t i03 = ir/(ne02*ne01);
  7322. const int64_t i02 = (ir - i03*ne02*ne01)/ne01;
  7323. const int64_t i01 = (ir - i03*ne02*ne01 - i02*ne01);
  7324. const int64_t i13 = i03 % ne13;
  7325. const int64_t i12 = i02 % ne12;
  7326. const int64_t i11 = i01 % ne11;
  7327. float * dst_ptr = (float *) ((char *) dst->data + i03*nb3 + i02*nb2 + i01*nb1 );
  7328. float * src0_ptr = (float *) ((char *) src0->data + i03*nb03 + i02*nb02 + i01*nb01);
  7329. float * src1_ptr = (float *) ((char *) src1->data + i13*nb13 + i12*nb12 + i11*nb11);
  7330. #ifdef GGML_USE_ACCELERATE
  7331. vDSP_vadd(src0_ptr, 1, src1_ptr, 1, dst_ptr, 1, ne00);
  7332. #else
  7333. ggml_vec_add_f32(ne00, dst_ptr, src0_ptr, src1_ptr);
  7334. #endif
  7335. }
  7336. } else {
  7337. // src1 is not contiguous
  7338. for (int ir = ir0; ir < ir1; ++ir) {
  7339. // src1 is broadcastable across src0 and dst in i1, i2, i3
  7340. const int64_t i03 = ir/(ne02*ne01);
  7341. const int64_t i02 = (ir - i03*ne02*ne01)/ne01;
  7342. const int64_t i01 = (ir - i03*ne02*ne01 - i02*ne01);
  7343. const int64_t i13 = i03 % ne13;
  7344. const int64_t i12 = i02 % ne12;
  7345. const int64_t i11 = i01 % ne11;
  7346. float * dst_ptr = (float *) ((char *) dst->data + i03*nb3 + i02*nb2 + i01*nb1 );
  7347. float * src0_ptr = (float *) ((char *) src0->data + i03*nb03 + i02*nb02 + i01*nb01);
  7348. for (int i0 = 0; i0 < ne0; i0++) {
  7349. float * src1_ptr = (float *) ((char *) src1->data + i13*nb13 + i12*nb12 + i11*nb11 + i0*nb10);
  7350. dst_ptr[i0] = src0_ptr[i0] + *src1_ptr;
  7351. }
  7352. }
  7353. }
  7354. }
  7355. static void ggml_compute_forward_add_f16_f32(
  7356. const struct ggml_compute_params * params,
  7357. const struct ggml_tensor * src0,
  7358. const struct ggml_tensor * src1,
  7359. struct ggml_tensor * dst) {
  7360. GGML_ASSERT(ggml_are_same_shape(src0, src1) && ggml_are_same_shape(src0, dst));
  7361. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  7362. return;
  7363. }
  7364. const int ith = params->ith;
  7365. const int nth = params->nth;
  7366. const int nr = ggml_nrows(src0);
  7367. GGML_TENSOR_BINARY_OP_LOCALS
  7368. GGML_ASSERT(src0->type == GGML_TYPE_F16);
  7369. GGML_ASSERT(src1->type == GGML_TYPE_F32);
  7370. GGML_ASSERT(dst->type == GGML_TYPE_F16);
  7371. GGML_ASSERT( nb0 == sizeof(ggml_fp16_t));
  7372. GGML_ASSERT(nb00 == sizeof(ggml_fp16_t));
  7373. // rows per thread
  7374. const int dr = (nr + nth - 1)/nth;
  7375. // row range for this thread
  7376. const int ir0 = dr*ith;
  7377. const int ir1 = MIN(ir0 + dr, nr);
  7378. if (nb10 == sizeof(float)) {
  7379. for (int ir = ir0; ir < ir1; ++ir) {
  7380. // src0, src1 and dst are same shape => same indices
  7381. const int i3 = ir/(ne2*ne1);
  7382. const int i2 = (ir - i3*ne2*ne1)/ne1;
  7383. const int i1 = (ir - i3*ne2*ne1 - i2*ne1);
  7384. ggml_fp16_t * dst_ptr = (ggml_fp16_t *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1);
  7385. ggml_fp16_t * src0_ptr = (ggml_fp16_t *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01);
  7386. float * src1_ptr = (float *) ((char *) src1->data + i3*nb13 + i2*nb12 + i1*nb11);
  7387. for (int i = 0; i < ne0; i++) {
  7388. dst_ptr[i] = GGML_FP32_TO_FP16(GGML_FP16_TO_FP32(src0_ptr[i]) + src1_ptr[i]);
  7389. }
  7390. }
  7391. }
  7392. else {
  7393. // src1 is not contiguous
  7394. GGML_ASSERT(false);
  7395. }
  7396. }
  7397. static void ggml_compute_forward_add_f16_f16(
  7398. const struct ggml_compute_params * params,
  7399. const struct ggml_tensor * src0,
  7400. const struct ggml_tensor * src1,
  7401. struct ggml_tensor * dst) {
  7402. GGML_ASSERT(ggml_are_same_shape(src0, src1) && ggml_are_same_shape(src0, dst));
  7403. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  7404. return;
  7405. }
  7406. const int ith = params->ith;
  7407. const int nth = params->nth;
  7408. const int nr = ggml_nrows(src0);
  7409. GGML_TENSOR_BINARY_OP_LOCALS
  7410. GGML_ASSERT(src0->type == GGML_TYPE_F16);
  7411. GGML_ASSERT(src1->type == GGML_TYPE_F16);
  7412. GGML_ASSERT(dst->type == GGML_TYPE_F16);
  7413. GGML_ASSERT( nb0 == sizeof(ggml_fp16_t));
  7414. GGML_ASSERT(nb00 == sizeof(ggml_fp16_t));
  7415. // rows per thread
  7416. const int dr = (nr + nth - 1)/nth;
  7417. // row range for this thread
  7418. const int ir0 = dr*ith;
  7419. const int ir1 = MIN(ir0 + dr, nr);
  7420. if (nb10 == sizeof(ggml_fp16_t)) {
  7421. for (int ir = ir0; ir < ir1; ++ir) {
  7422. // src0, src1 and dst are same shape => same indices
  7423. const int i3 = ir/(ne2*ne1);
  7424. const int i2 = (ir - i3*ne2*ne1)/ne1;
  7425. const int i1 = (ir - i3*ne2*ne1 - i2*ne1);
  7426. ggml_fp16_t * dst_ptr = (ggml_fp16_t *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1);
  7427. ggml_fp16_t * src0_ptr = (ggml_fp16_t *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01);
  7428. ggml_fp16_t * src1_ptr = (ggml_fp16_t *) ((char *) src1->data + i3*nb13 + i2*nb12 + i1*nb11);
  7429. for (int i = 0; i < ne0; i++) {
  7430. dst_ptr[i] = GGML_FP32_TO_FP16(GGML_FP16_TO_FP32(src0_ptr[i]) + GGML_FP16_TO_FP32(src1_ptr[i]));
  7431. }
  7432. }
  7433. }
  7434. else {
  7435. // src1 is not contiguous
  7436. GGML_ASSERT(false);
  7437. }
  7438. }
  7439. static void ggml_compute_forward_add_q_f32(
  7440. const struct ggml_compute_params * params,
  7441. const struct ggml_tensor * src0,
  7442. const struct ggml_tensor * src1,
  7443. struct ggml_tensor * dst) {
  7444. GGML_ASSERT(ggml_are_same_shape(src0, src1) && ggml_are_same_shape(src0, dst));
  7445. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  7446. return;
  7447. }
  7448. const int nr = ggml_nrows(src0);
  7449. GGML_TENSOR_BINARY_OP_LOCALS
  7450. const int ith = params->ith;
  7451. const int nth = params->nth;
  7452. const enum ggml_type type = src0->type;
  7453. const enum ggml_type dtype = dst->type;
  7454. ggml_to_float_t const dequantize_row_q = type_traits[type].to_float;
  7455. ggml_from_float_t const quantize_row_q = type_traits[dtype].from_float;
  7456. // we don't support permuted src0 or src1
  7457. GGML_ASSERT(nb00 == ggml_type_size(type));
  7458. GGML_ASSERT(nb10 == sizeof(float));
  7459. // dst cannot be transposed or permuted
  7460. GGML_ASSERT(nb0 <= nb1);
  7461. GGML_ASSERT(nb1 <= nb2);
  7462. GGML_ASSERT(nb2 <= nb3);
  7463. GGML_ASSERT(ggml_is_quantized(src0->type));
  7464. GGML_ASSERT(src1->type == GGML_TYPE_F32);
  7465. // rows per thread
  7466. const int dr = (nr + nth - 1)/nth;
  7467. // row range for this thread
  7468. const int ir0 = dr*ith;
  7469. const int ir1 = MIN(ir0 + dr, nr);
  7470. float * wdata = (float *) params->wdata + (ne00 + CACHE_LINE_SIZE_F32) * ith;
  7471. for (int ir = ir0; ir < ir1; ++ir) {
  7472. // src0 indices
  7473. const int i03 = ir/(ne02*ne01);
  7474. const int i02 = (ir - i03*ne02*ne01)/ne01;
  7475. const int i01 = (ir - i03*ne02*ne01 - i02*ne01);
  7476. // src1 and dst are same shape as src0 => same indices
  7477. const int i13 = i03;
  7478. const int i12 = i02;
  7479. const int i11 = i01;
  7480. const int i3 = i03;
  7481. const int i2 = i02;
  7482. const int i1 = i01;
  7483. void * src0_row = (void *) ((char *) src0->data + (i01*nb01 + i02*nb02 + i03*nb03));
  7484. float * src1_row = (float *)((char *) src1->data + (i11*nb11 + i12*nb12 + i13*nb13));
  7485. void * dst_row = (void *) ((char *) dst->data + ( i1*nb1 + i2*nb2 + i3*nb3));
  7486. assert(ne00 % 32 == 0);
  7487. // unquantize row from src0 to temp buffer
  7488. dequantize_row_q(src0_row, wdata, ne00);
  7489. // add src1
  7490. ggml_vec_acc_f32(ne00, wdata, src1_row);
  7491. // quantize row to dst
  7492. if (quantize_row_q != NULL) {
  7493. quantize_row_q(wdata, dst_row, ne00);
  7494. } else {
  7495. memcpy(dst_row, wdata, ne0*nb0);
  7496. }
  7497. }
  7498. }
  7499. static void ggml_compute_forward_add(
  7500. const struct ggml_compute_params * params,
  7501. const struct ggml_tensor * src0,
  7502. const struct ggml_tensor * src1,
  7503. struct ggml_tensor * dst) {
  7504. switch (src0->type) {
  7505. case GGML_TYPE_F32:
  7506. {
  7507. ggml_compute_forward_add_f32(params, src0, src1, dst);
  7508. } break;
  7509. case GGML_TYPE_F16:
  7510. {
  7511. if (src1->type == GGML_TYPE_F16) {
  7512. ggml_compute_forward_add_f16_f16(params, src0, src1, dst);
  7513. }
  7514. else if (src1->type == GGML_TYPE_F32) {
  7515. ggml_compute_forward_add_f16_f32(params, src0, src1, dst);
  7516. }
  7517. else {
  7518. GGML_ASSERT(false);
  7519. }
  7520. } break;
  7521. case GGML_TYPE_Q4_0:
  7522. case GGML_TYPE_Q4_1:
  7523. case GGML_TYPE_Q5_0:
  7524. case GGML_TYPE_Q5_1:
  7525. case GGML_TYPE_Q8_0:
  7526. case GGML_TYPE_Q2_K:
  7527. case GGML_TYPE_Q3_K:
  7528. case GGML_TYPE_Q4_K:
  7529. case GGML_TYPE_Q5_K:
  7530. case GGML_TYPE_Q6_K:
  7531. {
  7532. ggml_compute_forward_add_q_f32(params, src0, src1, dst);
  7533. } break;
  7534. default:
  7535. {
  7536. GGML_ASSERT(false);
  7537. } break;
  7538. }
  7539. }
  7540. // ggml_compute_forward_add1
  7541. static void ggml_compute_forward_add1_f32(
  7542. const struct ggml_compute_params * params,
  7543. const struct ggml_tensor * src0,
  7544. const struct ggml_tensor * src1,
  7545. struct ggml_tensor * dst) {
  7546. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  7547. GGML_ASSERT(ggml_is_scalar(src1));
  7548. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  7549. return;
  7550. }
  7551. const int ith = params->ith;
  7552. const int nth = params->nth;
  7553. const int nr = ggml_nrows(src0);
  7554. GGML_TENSOR_UNARY_OP_LOCALS
  7555. GGML_ASSERT( nb0 == sizeof(float));
  7556. GGML_ASSERT(nb00 == sizeof(float));
  7557. // rows per thread
  7558. const int dr = (nr + nth - 1)/nth;
  7559. // row range for this thread
  7560. const int ir0 = dr*ith;
  7561. const int ir1 = MIN(ir0 + dr, nr);
  7562. for (int ir = ir0; ir < ir1; ++ir) {
  7563. // src0 and dst are same shape => same indices
  7564. const int i3 = ir/(ne2*ne1);
  7565. const int i2 = (ir - i3*ne2*ne1)/ne1;
  7566. const int i1 = (ir - i3*ne2*ne1 - i2*ne1);
  7567. #ifdef GGML_USE_ACCELERATE
  7568. UNUSED(ggml_vec_add1_f32);
  7569. vDSP_vadd(
  7570. (float *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01), 1,
  7571. (float *) ((char *) src1->data), 0,
  7572. (float *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 ), 1,
  7573. ne0);
  7574. #else
  7575. ggml_vec_add1_f32(ne0,
  7576. (float *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 ),
  7577. (float *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01),
  7578. *(float *) src1->data);
  7579. #endif
  7580. }
  7581. }
  7582. static void ggml_compute_forward_add1_f16_f32(
  7583. const struct ggml_compute_params * params,
  7584. const struct ggml_tensor * src0,
  7585. const struct ggml_tensor * src1,
  7586. struct ggml_tensor * dst) {
  7587. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  7588. GGML_ASSERT(ggml_is_scalar(src1));
  7589. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  7590. return;
  7591. }
  7592. // scalar to add
  7593. const float v = *(float *) src1->data;
  7594. const int ith = params->ith;
  7595. const int nth = params->nth;
  7596. const int nr = ggml_nrows(src0);
  7597. GGML_TENSOR_UNARY_OP_LOCALS
  7598. GGML_ASSERT(src0->type == GGML_TYPE_F16);
  7599. GGML_ASSERT(src1->type == GGML_TYPE_F32);
  7600. GGML_ASSERT(dst->type == GGML_TYPE_F16);
  7601. GGML_ASSERT( nb0 == sizeof(ggml_fp16_t));
  7602. GGML_ASSERT(nb00 == sizeof(ggml_fp16_t));
  7603. // rows per thread
  7604. const int dr = (nr + nth - 1)/nth;
  7605. // row range for this thread
  7606. const int ir0 = dr*ith;
  7607. const int ir1 = MIN(ir0 + dr, nr);
  7608. for (int ir = ir0; ir < ir1; ++ir) {
  7609. // src0 and dst are same shape => same indices
  7610. const int i3 = ir/(ne2*ne1);
  7611. const int i2 = (ir - i3*ne2*ne1)/ne1;
  7612. const int i1 = (ir - i3*ne2*ne1 - i2*ne1);
  7613. ggml_fp16_t * dst_ptr = (ggml_fp16_t *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 );
  7614. ggml_fp16_t * src0_ptr = (ggml_fp16_t *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01);
  7615. for (int i = 0; i < ne0; i++) {
  7616. dst_ptr[i] = GGML_FP32_TO_FP16(GGML_FP16_TO_FP32(src0_ptr[i]) + v);
  7617. }
  7618. }
  7619. }
  7620. static void ggml_compute_forward_add1_f16_f16(
  7621. const struct ggml_compute_params * params,
  7622. const struct ggml_tensor * src0,
  7623. const struct ggml_tensor * src1,
  7624. struct ggml_tensor * dst) {
  7625. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  7626. GGML_ASSERT(ggml_is_scalar(src1));
  7627. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  7628. return;
  7629. }
  7630. // scalar to add
  7631. const float v = GGML_FP16_TO_FP32(*(ggml_fp16_t *) src1->data);
  7632. const int ith = params->ith;
  7633. const int nth = params->nth;
  7634. const int nr = ggml_nrows(src0);
  7635. GGML_TENSOR_UNARY_OP_LOCALS
  7636. GGML_ASSERT(src0->type == GGML_TYPE_F16);
  7637. GGML_ASSERT(src1->type == GGML_TYPE_F16);
  7638. GGML_ASSERT(dst->type == GGML_TYPE_F16);
  7639. GGML_ASSERT( nb0 == sizeof(ggml_fp16_t));
  7640. GGML_ASSERT(nb00 == sizeof(ggml_fp16_t));
  7641. // rows per thread
  7642. const int dr = (nr + nth - 1)/nth;
  7643. // row range for this thread
  7644. const int ir0 = dr*ith;
  7645. const int ir1 = MIN(ir0 + dr, nr);
  7646. for (int ir = ir0; ir < ir1; ++ir) {
  7647. // src0 and dst are same shape => same indices
  7648. const int i3 = ir/(ne2*ne1);
  7649. const int i2 = (ir - i3*ne2*ne1)/ne1;
  7650. const int i1 = (ir - i3*ne2*ne1 - i2*ne1);
  7651. ggml_fp16_t * dst_ptr = (ggml_fp16_t *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 );
  7652. ggml_fp16_t * src0_ptr = (ggml_fp16_t *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01);
  7653. for (int i = 0; i < ne0; i++) {
  7654. dst_ptr[i] = GGML_FP32_TO_FP16(GGML_FP16_TO_FP32(src0_ptr[i]) + v);
  7655. }
  7656. }
  7657. }
  7658. static void ggml_compute_forward_add1_q_f32(
  7659. const struct ggml_compute_params * params,
  7660. const struct ggml_tensor * src0,
  7661. const struct ggml_tensor * src1,
  7662. struct ggml_tensor * dst) {
  7663. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  7664. GGML_ASSERT(ggml_is_scalar(src1));
  7665. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  7666. return;
  7667. }
  7668. // scalar to add
  7669. const float v = *(float *) src1->data;
  7670. const int ith = params->ith;
  7671. const int nth = params->nth;
  7672. const int nr = ggml_nrows(src0);
  7673. GGML_TENSOR_UNARY_OP_LOCALS
  7674. const enum ggml_type type = src0->type;
  7675. ggml_to_float_t const dequantize_row_q = type_traits[type].to_float;
  7676. ggml_from_float_t const quantize_row_q = type_traits[type].from_float;
  7677. // we don't support permuted src0
  7678. GGML_ASSERT(nb00 == ggml_type_size(type));
  7679. // dst cannot be transposed or permuted
  7680. GGML_ASSERT(nb0 <= nb1);
  7681. GGML_ASSERT(nb1 <= nb2);
  7682. GGML_ASSERT(nb2 <= nb3);
  7683. GGML_ASSERT(ggml_is_quantized(src0->type));
  7684. GGML_ASSERT(dst->type == src0->type);
  7685. GGML_ASSERT(src1->type == GGML_TYPE_F32);
  7686. // rows per thread
  7687. const int dr = (nr + nth - 1)/nth;
  7688. // row range for this thread
  7689. const int ir0 = dr*ith;
  7690. const int ir1 = MIN(ir0 + dr, nr);
  7691. float * wdata = (float *) params->wdata + (ne0 + CACHE_LINE_SIZE_F32) * ith;
  7692. for (int ir = ir0; ir < ir1; ++ir) {
  7693. // src0 and dst are same shape => same indices
  7694. const int i3 = ir/(ne2*ne1);
  7695. const int i2 = (ir - i3*ne2*ne1)/ne1;
  7696. const int i1 = (ir - i3*ne2*ne1 - i2*ne1);
  7697. void * src0_row = (void *) ((char *) src0->data + (i1*nb01 + i2*nb02 + i3*nb03));
  7698. void * dst_row = (void *) ((char *) dst->data + (i1*nb1 + i2*nb2 + i3*nb0 ));
  7699. assert(ne0 % 32 == 0);
  7700. // unquantize row from src0 to temp buffer
  7701. dequantize_row_q(src0_row, wdata, ne0);
  7702. // add src1
  7703. ggml_vec_acc1_f32(ne0, wdata, v);
  7704. // quantize row to dst
  7705. quantize_row_q(wdata, dst_row, ne0);
  7706. }
  7707. }
  7708. static void ggml_compute_forward_add1(
  7709. const struct ggml_compute_params * params,
  7710. const struct ggml_tensor * src0,
  7711. const struct ggml_tensor * src1,
  7712. struct ggml_tensor * dst) {
  7713. switch (src0->type) {
  7714. case GGML_TYPE_F32:
  7715. {
  7716. ggml_compute_forward_add1_f32(params, src0, src1, dst);
  7717. } break;
  7718. case GGML_TYPE_F16:
  7719. {
  7720. if (src1->type == GGML_TYPE_F16) {
  7721. ggml_compute_forward_add1_f16_f16(params, src0, src1, dst);
  7722. }
  7723. else if (src1->type == GGML_TYPE_F32) {
  7724. ggml_compute_forward_add1_f16_f32(params, src0, src1, dst);
  7725. }
  7726. else {
  7727. GGML_ASSERT(false);
  7728. }
  7729. } break;
  7730. case GGML_TYPE_Q4_0:
  7731. case GGML_TYPE_Q4_1:
  7732. case GGML_TYPE_Q5_0:
  7733. case GGML_TYPE_Q5_1:
  7734. case GGML_TYPE_Q8_0:
  7735. case GGML_TYPE_Q8_1:
  7736. case GGML_TYPE_Q2_K:
  7737. case GGML_TYPE_Q3_K:
  7738. case GGML_TYPE_Q4_K:
  7739. case GGML_TYPE_Q5_K:
  7740. case GGML_TYPE_Q6_K:
  7741. {
  7742. ggml_compute_forward_add1_q_f32(params, src0, src1, dst);
  7743. } break;
  7744. default:
  7745. {
  7746. GGML_ASSERT(false);
  7747. } break;
  7748. }
  7749. }
  7750. // ggml_compute_forward_acc
  7751. static void ggml_compute_forward_acc_f32(
  7752. const struct ggml_compute_params * params,
  7753. const struct ggml_tensor * src0,
  7754. const struct ggml_tensor * src1,
  7755. struct ggml_tensor * dst) {
  7756. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  7757. GGML_ASSERT(ggml_is_contiguous(dst) && ggml_is_contiguous(src0));
  7758. // view src0 and dst with these strides and data offset inbytes during acc
  7759. // nb0 is implicitely element_size because src0 and dst are contiguous
  7760. size_t nb1 = ((int32_t *) dst->op_params)[0];
  7761. size_t nb2 = ((int32_t *) dst->op_params)[1];
  7762. size_t nb3 = ((int32_t *) dst->op_params)[2];
  7763. size_t offset = ((int32_t *) dst->op_params)[3];
  7764. bool inplace = (bool) ((int32_t *) dst->op_params)[4];
  7765. if (!inplace && (params->type == GGML_TASK_INIT)) {
  7766. // memcpy needs to be synchronized across threads to avoid race conditions.
  7767. // => do it in INIT phase
  7768. memcpy(
  7769. ((char *) dst->data),
  7770. ((char *) src0->data),
  7771. ggml_nbytes(dst));
  7772. }
  7773. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  7774. return;
  7775. }
  7776. const int ith = params->ith;
  7777. const int nth = params->nth;
  7778. const int nr = ggml_nrows(src1);
  7779. const int nc = src1->ne[0];
  7780. GGML_TENSOR_LOCALS(int64_t, ne1, src1, ne)
  7781. GGML_TENSOR_LOCALS(size_t, nb1, src1, nb)
  7782. // src0 and dst as viewed during acc
  7783. const size_t nb0 = ggml_element_size(src0);
  7784. const size_t nb00 = nb0;
  7785. const size_t nb01 = nb1;
  7786. const size_t nb02 = nb2;
  7787. const size_t nb03 = nb3;
  7788. GGML_ASSERT(offset + (ne10 == 0 ? 0 : ne10-1)*nb0 + (ne11 == 0 ? 0 : ne11-1)*nb1 + (ne12 == 0 ? 0 : ne12-1)*nb2 + (ne13 == 0 ? 0 : ne13-1)*nb3 < ggml_nbytes(dst));
  7789. GGML_ASSERT(offset + (ne10 == 0 ? 0 : ne10-1)*nb00 + (ne11 == 0 ? 0 : ne11-1)*nb01 + (ne12 == 0 ? 0 : ne12-1)*nb02 + (ne13 == 0 ? 0 : ne13-1)*nb03 < ggml_nbytes(src0));
  7790. GGML_ASSERT(nb10 == sizeof(float));
  7791. // rows per thread
  7792. const int dr = (nr + nth - 1)/nth;
  7793. // row range for this thread
  7794. const int ir0 = dr*ith;
  7795. const int ir1 = MIN(ir0 + dr, nr);
  7796. for (int ir = ir0; ir < ir1; ++ir) {
  7797. // src0 and dst are viewed with shape of src1 and offset
  7798. // => same indices
  7799. const int i3 = ir/(ne12*ne11);
  7800. const int i2 = (ir - i3*ne12*ne11)/ne11;
  7801. const int i1 = (ir - i3*ne12*ne11 - i2*ne11);
  7802. #ifdef GGML_USE_ACCELERATE
  7803. vDSP_vadd(
  7804. (float *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + offset), 1,
  7805. (float *) ((char *) src1->data + i3*nb13 + i2*nb12 + i1*nb11), 1,
  7806. (float *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + offset), 1, nc);
  7807. #else
  7808. ggml_vec_add_f32(nc,
  7809. (float *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + offset),
  7810. (float *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + offset),
  7811. (float *) ((char *) src1->data + i3*nb13 + i2*nb12 + i1*nb11));
  7812. #endif
  7813. }
  7814. }
  7815. static void ggml_compute_forward_acc(
  7816. const struct ggml_compute_params * params,
  7817. const struct ggml_tensor * src0,
  7818. const struct ggml_tensor * src1,
  7819. struct ggml_tensor * dst) {
  7820. switch (src0->type) {
  7821. case GGML_TYPE_F32:
  7822. {
  7823. ggml_compute_forward_acc_f32(params, src0, src1, dst);
  7824. } break;
  7825. case GGML_TYPE_F16:
  7826. case GGML_TYPE_Q4_0:
  7827. case GGML_TYPE_Q4_1:
  7828. case GGML_TYPE_Q5_0:
  7829. case GGML_TYPE_Q5_1:
  7830. case GGML_TYPE_Q8_0:
  7831. case GGML_TYPE_Q8_1:
  7832. case GGML_TYPE_Q2_K:
  7833. case GGML_TYPE_Q3_K:
  7834. case GGML_TYPE_Q4_K:
  7835. case GGML_TYPE_Q5_K:
  7836. case GGML_TYPE_Q6_K:
  7837. default:
  7838. {
  7839. GGML_ASSERT(false);
  7840. } break;
  7841. }
  7842. }
  7843. // ggml_compute_forward_sub
  7844. static void ggml_compute_forward_sub_f32(
  7845. const struct ggml_compute_params * params,
  7846. const struct ggml_tensor * src0,
  7847. const struct ggml_tensor * src1,
  7848. struct ggml_tensor * dst) {
  7849. assert(params->ith == 0);
  7850. assert(ggml_are_same_shape(src0, src1) && ggml_are_same_shape(src0, dst));
  7851. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  7852. return;
  7853. }
  7854. const int nr = ggml_nrows(src0);
  7855. GGML_TENSOR_BINARY_OP_LOCALS
  7856. GGML_ASSERT( nb0 == sizeof(float));
  7857. GGML_ASSERT(nb00 == sizeof(float));
  7858. if (nb10 == sizeof(float)) {
  7859. for (int ir = 0; ir < nr; ++ir) {
  7860. // src0, src1 and dst are same shape => same indices
  7861. const int i3 = ir/(ne2*ne1);
  7862. const int i2 = (ir - i3*ne2*ne1)/ne1;
  7863. const int i1 = (ir - i3*ne2*ne1 - i2*ne1);
  7864. #ifdef GGML_USE_ACCELERATE
  7865. vDSP_vsub(
  7866. (float *) ((char *) src1->data + i3*nb13 + i2*nb12 + i1*nb11), 1,
  7867. (float *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01), 1,
  7868. (float *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 ), 1,
  7869. ne0);
  7870. #else
  7871. ggml_vec_sub_f32(ne0,
  7872. (float *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 ),
  7873. (float *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01),
  7874. (float *) ((char *) src1->data + i3*nb13 + i2*nb12 + i1*nb11));
  7875. #endif
  7876. // }
  7877. // }
  7878. }
  7879. } else {
  7880. // src1 is not contiguous
  7881. for (int ir = 0; ir < nr; ++ir) {
  7882. // src0, src1 and dst are same shape => same indices
  7883. const int i3 = ir/(ne2*ne1);
  7884. const int i2 = (ir - i3*ne2*ne1)/ne1;
  7885. const int i1 = (ir - i3*ne2*ne1 - i2*ne1);
  7886. float * dst_ptr = (float *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 );
  7887. float * src0_ptr = (float *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01);
  7888. for (int i0 = 0; i0 < ne0; i0++) {
  7889. float * src1_ptr = (float *) ((char *) src1->data + i3*nb13 + i2*nb12 + i1*nb11 + i0*nb10);
  7890. dst_ptr[i0] = src0_ptr[i0] - *src1_ptr;
  7891. }
  7892. }
  7893. }
  7894. }
  7895. static void ggml_compute_forward_sub(
  7896. const struct ggml_compute_params * params,
  7897. const struct ggml_tensor * src0,
  7898. const struct ggml_tensor * src1,
  7899. struct ggml_tensor * dst) {
  7900. switch (src0->type) {
  7901. case GGML_TYPE_F32:
  7902. {
  7903. ggml_compute_forward_sub_f32(params, src0, src1, dst);
  7904. } break;
  7905. default:
  7906. {
  7907. GGML_ASSERT(false);
  7908. } break;
  7909. }
  7910. }
  7911. // ggml_compute_forward_mul
  7912. static void ggml_compute_forward_mul_f32(
  7913. const struct ggml_compute_params * params,
  7914. const struct ggml_tensor * src0,
  7915. const struct ggml_tensor * src1,
  7916. struct ggml_tensor * dst) {
  7917. GGML_ASSERT(ggml_can_repeat_rows(src1, src0) && ggml_are_same_shape(src0, dst));
  7918. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  7919. return;
  7920. }
  7921. const int ith = params->ith;
  7922. const int nth = params->nth;
  7923. #ifdef GGML_USE_CLBLAST
  7924. if (src1->backend == GGML_BACKEND_GPU) {
  7925. if (ith == 0) {
  7926. ggml_cl_mul(src0, src1, dst);
  7927. }
  7928. return;
  7929. }
  7930. #endif
  7931. const int64_t nr = ggml_nrows(src0);
  7932. GGML_TENSOR_BINARY_OP_LOCALS
  7933. GGML_ASSERT( nb0 == sizeof(float));
  7934. GGML_ASSERT(nb00 == sizeof(float));
  7935. GGML_ASSERT(ne00 == ne10);
  7936. if (nb10 == sizeof(float)) {
  7937. for (int64_t ir = ith; ir < nr; ir += nth) {
  7938. // src0 and dst are same shape => same indices
  7939. const int64_t i03 = ir/(ne02*ne01);
  7940. const int64_t i02 = (ir - i03*ne02*ne01)/ne01;
  7941. const int64_t i01 = (ir - i03*ne02*ne01 - i02*ne01);
  7942. const int64_t i13 = i03 % ne13;
  7943. const int64_t i12 = i02 % ne12;
  7944. const int64_t i11 = i01 % ne11;
  7945. float * dst_ptr = (float *) ((char *) dst->data + i03*nb3 + i02*nb2 + i01*nb1 );
  7946. float * src0_ptr = (float *) ((char *) src0->data + i03*nb03 + i02*nb02 + i01*nb01);
  7947. float * src1_ptr = (float *) ((char *) src1->data + i13*nb13 + i12*nb12 + i11*nb11);
  7948. #ifdef GGML_USE_ACCELERATE
  7949. UNUSED(ggml_vec_mul_f32);
  7950. vDSP_vmul( src0_ptr, 1, src1_ptr, 1, dst_ptr, 1, ne00);
  7951. #else
  7952. ggml_vec_mul_f32(ne00, dst_ptr, src0_ptr, src1_ptr);
  7953. #endif
  7954. // }
  7955. // }
  7956. }
  7957. } else {
  7958. // src1 is not contiguous
  7959. for (int64_t ir = ith; ir < nr; ir += nth) {
  7960. // src0 and dst are same shape => same indices
  7961. // src1 is broadcastable across src0 and dst in i1, i2, i3
  7962. const int64_t i03 = ir/(ne02*ne01);
  7963. const int64_t i02 = (ir - i03*ne02*ne01)/ne01;
  7964. const int64_t i01 = (ir - i03*ne02*ne01 - i02*ne01);
  7965. const int64_t i13 = i03 % ne13;
  7966. const int64_t i12 = i02 % ne12;
  7967. const int64_t i11 = i01 % ne11;
  7968. float * dst_ptr = (float *) ((char *) dst->data + i03*nb3 + i02*nb2 + i01*nb1 );
  7969. float * src0_ptr = (float *) ((char *) src0->data + i03*nb03 + i02*nb02 + i01*nb01);
  7970. for (int64_t i0 = 0; i0 < ne00; i0++) {
  7971. float * src1_ptr = (float *) ((char *) src1->data + i13*nb13 + i12*nb12 + i11*nb11 + i0*nb10);
  7972. dst_ptr[i0] = src0_ptr[i0] * (*src1_ptr);
  7973. }
  7974. }
  7975. }
  7976. }
  7977. static void ggml_compute_forward_mul(
  7978. const struct ggml_compute_params * params,
  7979. const struct ggml_tensor * src0,
  7980. const struct ggml_tensor * src1,
  7981. struct ggml_tensor * dst) {
  7982. GGML_ASSERT(src1->type == GGML_TYPE_F32 && "only f32 src1 supported for now");
  7983. switch (src0->type) {
  7984. case GGML_TYPE_F32:
  7985. {
  7986. ggml_compute_forward_mul_f32(params, src0, src1, dst);
  7987. } break;
  7988. default:
  7989. {
  7990. GGML_ASSERT(false);
  7991. } break;
  7992. }
  7993. }
  7994. // ggml_compute_forward_div
  7995. static void ggml_compute_forward_div_f32(
  7996. const struct ggml_compute_params * params,
  7997. const struct ggml_tensor * src0,
  7998. const struct ggml_tensor * src1,
  7999. struct ggml_tensor * dst) {
  8000. assert(params->ith == 0);
  8001. assert(ggml_are_same_shape(src0, src1) && ggml_are_same_shape(src0, dst));
  8002. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  8003. return;
  8004. }
  8005. const int nr = ggml_nrows(src0);
  8006. GGML_TENSOR_BINARY_OP_LOCALS
  8007. GGML_ASSERT( nb0 == sizeof(float));
  8008. GGML_ASSERT(nb00 == sizeof(float));
  8009. if (nb10 == sizeof(float)) {
  8010. for (int ir = 0; ir < nr; ++ir) {
  8011. // src0, src1 and dst are same shape => same indices
  8012. const int i3 = ir/(ne2*ne1);
  8013. const int i2 = (ir - i3*ne2*ne1)/ne1;
  8014. const int i1 = (ir - i3*ne2*ne1 - i2*ne1);
  8015. #ifdef GGML_USE_ACCELERATE
  8016. UNUSED(ggml_vec_div_f32);
  8017. vDSP_vdiv(
  8018. (float *) ((char *) src1->data + i3*nb13 + i2*nb12 + i1*nb11), 1,
  8019. (float *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01), 1,
  8020. (float *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 ), 1,
  8021. ne0);
  8022. #else
  8023. ggml_vec_div_f32(ne0,
  8024. (float *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 ),
  8025. (float *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01),
  8026. (float *) ((char *) src1->data + i3*nb13 + i2*nb12 + i1*nb11));
  8027. #endif
  8028. // }
  8029. // }
  8030. }
  8031. } else {
  8032. // src1 is not contiguous
  8033. for (int ir = 0; ir < nr; ++ir) {
  8034. // src0, src1 and dst are same shape => same indices
  8035. const int i3 = ir/(ne2*ne1);
  8036. const int i2 = (ir - i3*ne2*ne1)/ne1;
  8037. const int i1 = (ir - i3*ne2*ne1 - i2*ne1);
  8038. float * dst_ptr = (float *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 );
  8039. float * src0_ptr = (float *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01);
  8040. for (int i0 = 0; i0 < ne0; i0++) {
  8041. float * src1_ptr = (float *) ((char *) src1->data + i3*nb13 + i2*nb12 + i1*nb11 + i0*nb10);
  8042. dst_ptr[i0] = src0_ptr[i0] / (*src1_ptr);
  8043. }
  8044. }
  8045. }
  8046. }
  8047. static void ggml_compute_forward_div(
  8048. const struct ggml_compute_params * params,
  8049. const struct ggml_tensor * src0,
  8050. const struct ggml_tensor * src1,
  8051. struct ggml_tensor * dst) {
  8052. switch (src0->type) {
  8053. case GGML_TYPE_F32:
  8054. {
  8055. ggml_compute_forward_div_f32(params, src0, src1, dst);
  8056. } break;
  8057. default:
  8058. {
  8059. GGML_ASSERT(false);
  8060. } break;
  8061. }
  8062. }
  8063. // ggml_compute_forward_sqr
  8064. static void ggml_compute_forward_sqr_f32(
  8065. const struct ggml_compute_params * params,
  8066. const struct ggml_tensor * src0,
  8067. struct ggml_tensor * dst) {
  8068. assert(params->ith == 0);
  8069. assert(ggml_are_same_shape(src0, dst));
  8070. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  8071. return;
  8072. }
  8073. const int n = ggml_nrows(src0);
  8074. const int nc = src0->ne[0];
  8075. assert( dst->nb[0] == sizeof(float));
  8076. assert(src0->nb[0] == sizeof(float));
  8077. for (int i = 0; i < n; i++) {
  8078. ggml_vec_sqr_f32(nc,
  8079. (float *) ((char *) dst->data + i*( dst->nb[1])),
  8080. (float *) ((char *) src0->data + i*(src0->nb[1])));
  8081. }
  8082. }
  8083. static void ggml_compute_forward_sqr(
  8084. const struct ggml_compute_params * params,
  8085. const struct ggml_tensor * src0,
  8086. struct ggml_tensor * dst) {
  8087. switch (src0->type) {
  8088. case GGML_TYPE_F32:
  8089. {
  8090. ggml_compute_forward_sqr_f32(params, src0, dst);
  8091. } break;
  8092. default:
  8093. {
  8094. GGML_ASSERT(false);
  8095. } break;
  8096. }
  8097. }
  8098. // ggml_compute_forward_sqrt
  8099. static void ggml_compute_forward_sqrt_f32(
  8100. const struct ggml_compute_params * params,
  8101. const struct ggml_tensor * src0,
  8102. struct ggml_tensor * dst) {
  8103. assert(params->ith == 0);
  8104. assert(ggml_are_same_shape(src0, dst));
  8105. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  8106. return;
  8107. }
  8108. const int n = ggml_nrows(src0);
  8109. const int nc = src0->ne[0];
  8110. assert( dst->nb[0] == sizeof(float));
  8111. assert(src0->nb[0] == sizeof(float));
  8112. for (int i = 0; i < n; i++) {
  8113. ggml_vec_sqrt_f32(nc,
  8114. (float *) ((char *) dst->data + i*( dst->nb[1])),
  8115. (float *) ((char *) src0->data + i*(src0->nb[1])));
  8116. }
  8117. }
  8118. static void ggml_compute_forward_sqrt(
  8119. const struct ggml_compute_params * params,
  8120. const struct ggml_tensor * src0,
  8121. struct ggml_tensor * dst) {
  8122. switch (src0->type) {
  8123. case GGML_TYPE_F32:
  8124. {
  8125. ggml_compute_forward_sqrt_f32(params, src0, dst);
  8126. } break;
  8127. default:
  8128. {
  8129. GGML_ASSERT(false);
  8130. } break;
  8131. }
  8132. }
  8133. // ggml_compute_forward_log
  8134. static void ggml_compute_forward_log_f32(
  8135. const struct ggml_compute_params * params,
  8136. const struct ggml_tensor * src0,
  8137. struct ggml_tensor * dst) {
  8138. GGML_ASSERT(params->ith == 0);
  8139. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  8140. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  8141. return;
  8142. }
  8143. const int n = ggml_nrows(src0);
  8144. const int nc = src0->ne[0];
  8145. GGML_ASSERT( dst->nb[0] == sizeof(float));
  8146. GGML_ASSERT(src0->nb[0] == sizeof(float));
  8147. for (int i = 0; i < n; i++) {
  8148. ggml_vec_log_f32(nc,
  8149. (float *) ((char *) dst->data + i*( dst->nb[1])),
  8150. (float *) ((char *) src0->data + i*(src0->nb[1])));
  8151. }
  8152. }
  8153. static void ggml_compute_forward_log(
  8154. const struct ggml_compute_params * params,
  8155. const struct ggml_tensor * src0,
  8156. struct ggml_tensor * dst) {
  8157. switch (src0->type) {
  8158. case GGML_TYPE_F32:
  8159. {
  8160. ggml_compute_forward_log_f32(params, src0, dst);
  8161. } break;
  8162. default:
  8163. {
  8164. GGML_ASSERT(false);
  8165. } break;
  8166. }
  8167. }
  8168. // ggml_compute_forward_sum
  8169. static void ggml_compute_forward_sum_f32(
  8170. const struct ggml_compute_params * params,
  8171. const struct ggml_tensor * src0,
  8172. struct ggml_tensor * dst) {
  8173. assert(params->ith == 0);
  8174. assert(ggml_is_scalar(dst));
  8175. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  8176. return;
  8177. }
  8178. assert(ggml_is_scalar(dst));
  8179. assert(src0->nb[0] == sizeof(float));
  8180. GGML_TENSOR_LOCALS(int64_t, ne0, src0, ne)
  8181. GGML_TENSOR_LOCALS(size_t, nb0, src0, nb)
  8182. ggml_float sum = 0;
  8183. ggml_float row_sum = 0;
  8184. for (int64_t i03 = 0; i03 < ne03; i03++) {
  8185. for (int64_t i02 = 0; i02 < ne02; i02++) {
  8186. for (int64_t i01 = 0; i01 < ne01; i01++) {
  8187. ggml_vec_sum_f32_ggf(ne00,
  8188. &row_sum,
  8189. (float *) ((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03));
  8190. sum += row_sum;
  8191. }
  8192. }
  8193. }
  8194. ((float *) dst->data)[0] = sum;
  8195. }
  8196. static void ggml_compute_forward_sum_f16(
  8197. const struct ggml_compute_params * params,
  8198. const struct ggml_tensor * src0,
  8199. struct ggml_tensor * dst) {
  8200. assert(params->ith == 0);
  8201. assert(ggml_is_scalar(dst));
  8202. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  8203. return;
  8204. }
  8205. assert(src0->nb[0] == sizeof(ggml_fp16_t));
  8206. GGML_TENSOR_LOCALS(int64_t, ne0, src0, ne)
  8207. GGML_TENSOR_LOCALS(size_t, nb0, src0, nb)
  8208. float sum = 0;
  8209. float row_sum = 0;
  8210. for (int64_t i03 = 0; i03 < ne03; i03++) {
  8211. for (int64_t i02 = 0; i02 < ne02; i02++) {
  8212. for (int64_t i01 = 0; i01 < ne01; i01++) {
  8213. ggml_vec_sum_f16_ggf(ne00,
  8214. &row_sum,
  8215. (ggml_fp16_t *) ((char *) src0->data + i01 * nb01 + i02 * nb02 + i03 * nb03));
  8216. sum += row_sum;
  8217. }
  8218. }
  8219. }
  8220. ((ggml_fp16_t *) dst->data)[0] = GGML_FP32_TO_FP16(sum);
  8221. }
  8222. static void ggml_compute_forward_sum(
  8223. const struct ggml_compute_params * params,
  8224. const struct ggml_tensor * src0,
  8225. struct ggml_tensor * dst) {
  8226. switch (src0->type) {
  8227. case GGML_TYPE_F32:
  8228. {
  8229. ggml_compute_forward_sum_f32(params, src0, dst);
  8230. } break;
  8231. case GGML_TYPE_F16:
  8232. {
  8233. ggml_compute_forward_sum_f16(params, src0, dst);
  8234. } break;
  8235. default:
  8236. {
  8237. GGML_ASSERT(false);
  8238. } break;
  8239. }
  8240. }
  8241. // ggml_compute_forward_sum_rows
  8242. static void ggml_compute_forward_sum_rows_f32(
  8243. const struct ggml_compute_params * params,
  8244. const struct ggml_tensor * src0,
  8245. struct ggml_tensor * dst) {
  8246. GGML_ASSERT(params->ith == 0);
  8247. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  8248. return;
  8249. }
  8250. GGML_ASSERT(src0->nb[0] == sizeof(float));
  8251. GGML_ASSERT(dst->nb[0] == sizeof(float));
  8252. GGML_TENSOR_UNARY_OP_LOCALS
  8253. GGML_ASSERT(ne0 == 1);
  8254. GGML_ASSERT(ne1 == ne01);
  8255. GGML_ASSERT(ne2 == ne02);
  8256. GGML_ASSERT(ne3 == ne03);
  8257. for (int64_t i3 = 0; i3 < ne03; i3++) {
  8258. for (int64_t i2 = 0; i2 < ne02; i2++) {
  8259. for (int64_t i1 = 0; i1 < ne01; i1++) {
  8260. float * src_row = (float *) ((char *) src0->data + i1*nb01 + i2*nb02 + i3*nb03);
  8261. float * dst_row = (float *) ((char *) dst->data + i1*nb1 + i2*nb2 + i3*nb3);
  8262. float row_sum = 0;
  8263. ggml_vec_sum_f32(ne00, &row_sum, src_row);
  8264. dst_row[0] = row_sum;
  8265. }
  8266. }
  8267. }
  8268. }
  8269. static void ggml_compute_forward_sum_rows(
  8270. const struct ggml_compute_params * params,
  8271. const struct ggml_tensor * src0,
  8272. struct ggml_tensor * dst) {
  8273. switch (src0->type) {
  8274. case GGML_TYPE_F32:
  8275. {
  8276. ggml_compute_forward_sum_rows_f32(params, src0, dst);
  8277. } break;
  8278. default:
  8279. {
  8280. GGML_ASSERT(false);
  8281. } break;
  8282. }
  8283. }
  8284. // ggml_compute_forward_mean
  8285. static void ggml_compute_forward_mean_f32(
  8286. const struct ggml_compute_params * params,
  8287. const struct ggml_tensor * src0,
  8288. struct ggml_tensor * dst) {
  8289. assert(params->ith == 0);
  8290. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  8291. return;
  8292. }
  8293. assert(src0->nb[0] == sizeof(float));
  8294. GGML_TENSOR_UNARY_OP_LOCALS
  8295. assert(ne0 == 1);
  8296. assert(ne1 == ne01);
  8297. assert(ne2 == ne02);
  8298. assert(ne3 == ne03);
  8299. UNUSED(ne0);
  8300. UNUSED(ne1);
  8301. UNUSED(ne2);
  8302. UNUSED(ne3);
  8303. for (int64_t i03 = 0; i03 < ne03; i03++) {
  8304. for (int64_t i02 = 0; i02 < ne02; i02++) {
  8305. for (int64_t i01 = 0; i01 < ne01; i01++) {
  8306. ggml_vec_sum_f32(ne00,
  8307. (float *) ((char *) dst->data + i01*nb1 + i02*nb2 + i03*nb3),
  8308. (float *) ((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03));
  8309. *(float *) ((char *) dst->data + i01*nb1 + i02*nb2 + i03*nb3) /= (float) ne00;
  8310. }
  8311. }
  8312. }
  8313. }
  8314. static void ggml_compute_forward_mean(
  8315. const struct ggml_compute_params * params,
  8316. const struct ggml_tensor * src0,
  8317. struct ggml_tensor * dst) {
  8318. switch (src0->type) {
  8319. case GGML_TYPE_F32:
  8320. {
  8321. ggml_compute_forward_mean_f32(params, src0, dst);
  8322. } break;
  8323. default:
  8324. {
  8325. GGML_ASSERT(false);
  8326. } break;
  8327. }
  8328. }
  8329. // ggml_compute_forward_argmax
  8330. static void ggml_compute_forward_argmax_f32(
  8331. const struct ggml_compute_params * params,
  8332. const struct ggml_tensor * src0,
  8333. struct ggml_tensor * dst) {
  8334. assert(params->ith == 0);
  8335. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  8336. return;
  8337. }
  8338. assert(src0->nb[0] == sizeof(float));
  8339. assert(dst->nb[0] == sizeof(float));
  8340. const int64_t ne00 = src0->ne[0];
  8341. const int64_t ne01 = src0->ne[1];
  8342. const size_t nb01 = src0->nb[1];
  8343. const size_t nb0 = dst->nb[0];
  8344. for (int64_t i1 = 0; i1 < ne01; i1++) {
  8345. float * src = (float *) ((char *) src0->data + i1*nb01);
  8346. int32_t * dst_ = (int32_t *) ((char *) dst->data + i1*nb0);
  8347. int v = 0;
  8348. ggml_vec_argmax_f32(ne00, &v, src);
  8349. dst_[0] = v;
  8350. }
  8351. }
  8352. static void ggml_compute_forward_argmax(
  8353. const struct ggml_compute_params * params,
  8354. const struct ggml_tensor * src0,
  8355. struct ggml_tensor * dst) {
  8356. switch (src0->type) {
  8357. case GGML_TYPE_F32:
  8358. {
  8359. ggml_compute_forward_argmax_f32(params, src0, dst);
  8360. } break;
  8361. default:
  8362. {
  8363. GGML_ASSERT(false);
  8364. } break;
  8365. }
  8366. }
  8367. // ggml_compute_forward_repeat
  8368. static void ggml_compute_forward_repeat_f32(
  8369. const struct ggml_compute_params * params,
  8370. const struct ggml_tensor * src0,
  8371. struct ggml_tensor * dst) {
  8372. GGML_ASSERT(params->ith == 0);
  8373. GGML_ASSERT(ggml_can_repeat(src0, dst));
  8374. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  8375. return;
  8376. }
  8377. GGML_TENSOR_UNARY_OP_LOCALS
  8378. // guaranteed to be an integer due to the check in ggml_can_repeat
  8379. const int nr0 = (int)(ne0/ne00);
  8380. const int nr1 = (int)(ne1/ne01);
  8381. const int nr2 = (int)(ne2/ne02);
  8382. const int nr3 = (int)(ne3/ne03);
  8383. // TODO: support for transposed / permuted tensors
  8384. GGML_ASSERT(nb0 == sizeof(float));
  8385. GGML_ASSERT(nb00 == sizeof(float));
  8386. // TODO: maybe this is not optimal?
  8387. for (int i3 = 0; i3 < nr3; i3++) {
  8388. for (int k3 = 0; k3 < ne03; k3++) {
  8389. for (int i2 = 0; i2 < nr2; i2++) {
  8390. for (int k2 = 0; k2 < ne02; k2++) {
  8391. for (int i1 = 0; i1 < nr1; i1++) {
  8392. for (int k1 = 0; k1 < ne01; k1++) {
  8393. for (int i0 = 0; i0 < nr0; i0++) {
  8394. ggml_vec_cpy_f32(ne00,
  8395. (float *) ((char *) dst->data + (i3*ne03 + k3)*nb3 + (i2*ne02 + k2)*nb2 + (i1*ne01 + k1)*nb1 + (i0*ne00)*nb0),
  8396. (float *) ((char *) src0->data + ( k3)*nb03 + ( k2)*nb02 + ( k1)*nb01));
  8397. }
  8398. }
  8399. }
  8400. }
  8401. }
  8402. }
  8403. }
  8404. }
  8405. static void ggml_compute_forward_repeat_f16(
  8406. const struct ggml_compute_params * params,
  8407. const struct ggml_tensor * src0,
  8408. struct ggml_tensor * dst) {
  8409. GGML_ASSERT(params->ith == 0);
  8410. GGML_ASSERT(ggml_can_repeat(src0, dst));
  8411. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  8412. return;
  8413. }
  8414. GGML_TENSOR_UNARY_OP_LOCALS;
  8415. // guaranteed to be an integer due to the check in ggml_can_repeat
  8416. const int nr0 = (int)(ne0/ne00);
  8417. const int nr1 = (int)(ne1/ne01);
  8418. const int nr2 = (int)(ne2/ne02);
  8419. const int nr3 = (int)(ne3/ne03);
  8420. // TODO: support for transposed / permuted tensors
  8421. GGML_ASSERT(nb0 == sizeof(ggml_fp16_t));
  8422. GGML_ASSERT(nb00 == sizeof(ggml_fp16_t));
  8423. // TODO: maybe this is not optimal?
  8424. for (int i3 = 0; i3 < nr3; i3++) {
  8425. for (int k3 = 0; k3 < ne03; k3++) {
  8426. for (int i2 = 0; i2 < nr2; i2++) {
  8427. for (int k2 = 0; k2 < ne02; k2++) {
  8428. for (int i1 = 0; i1 < nr1; i1++) {
  8429. for (int k1 = 0; k1 < ne01; k1++) {
  8430. for (int i0 = 0; i0 < nr0; i0++) {
  8431. ggml_fp16_t * y = (ggml_fp16_t *) ((char *) dst->data + (i3*ne03 + k3)*nb3 + (i2*ne02 + k2)*nb2 + (i1*ne01 + k1)*nb1 + (i0*ne00)*nb0);
  8432. ggml_fp16_t * x = (ggml_fp16_t *) ((char *) src0->data + ( k3)*nb03 + ( k2)*nb02 + ( k1)*nb01);
  8433. // ggml_vec_cpy_f16(ne00, y, x)
  8434. for (int i = 0; i < ne00; ++i) {
  8435. y[i] = x[i];
  8436. }
  8437. }
  8438. }
  8439. }
  8440. }
  8441. }
  8442. }
  8443. }
  8444. }
  8445. static void ggml_compute_forward_repeat(
  8446. const struct ggml_compute_params * params,
  8447. const struct ggml_tensor * src0,
  8448. struct ggml_tensor * dst) {
  8449. switch (src0->type) {
  8450. case GGML_TYPE_F16:
  8451. {
  8452. ggml_compute_forward_repeat_f16(params, src0, dst);
  8453. } break;
  8454. case GGML_TYPE_F32:
  8455. {
  8456. ggml_compute_forward_repeat_f32(params, src0, dst);
  8457. } break;
  8458. default:
  8459. {
  8460. GGML_ASSERT(false);
  8461. } break;
  8462. }
  8463. }
  8464. // ggml_compute_forward_repeat_back
  8465. static void ggml_compute_forward_repeat_back_f32(
  8466. const struct ggml_compute_params * params,
  8467. const struct ggml_tensor * src0,
  8468. struct ggml_tensor * dst) {
  8469. GGML_ASSERT(params->ith == 0);
  8470. GGML_ASSERT(ggml_can_repeat(dst, src0));
  8471. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  8472. return;
  8473. }
  8474. GGML_TENSOR_UNARY_OP_LOCALS
  8475. // guaranteed to be an integer due to the check in ggml_can_repeat
  8476. const int nr0 = (int)(ne00/ne0);
  8477. const int nr1 = (int)(ne01/ne1);
  8478. const int nr2 = (int)(ne02/ne2);
  8479. const int nr3 = (int)(ne03/ne3);
  8480. // TODO: support for transposed / permuted tensors
  8481. GGML_ASSERT(nb0 == sizeof(float));
  8482. GGML_ASSERT(nb00 == sizeof(float));
  8483. if (ggml_is_contiguous(dst)) {
  8484. ggml_vec_set_f32(ne0*ne1*ne2*ne3, dst->data, 0);
  8485. } else {
  8486. for (int k3 = 0; k3 < ne3; k3++) {
  8487. for (int k2 = 0; k2 < ne2; k2++) {
  8488. for (int k1 = 0; k1 < ne1; k1++) {
  8489. ggml_vec_set_f32(ne0,
  8490. (float *) ((char *) dst->data + k1*nb1 + k2*nb2 + k3*nb3),
  8491. 0);
  8492. }
  8493. }
  8494. }
  8495. }
  8496. // TODO: maybe this is not optimal?
  8497. for (int i3 = 0; i3 < nr3; i3++) {
  8498. for (int k3 = 0; k3 < ne3; k3++) {
  8499. for (int i2 = 0; i2 < nr2; i2++) {
  8500. for (int k2 = 0; k2 < ne2; k2++) {
  8501. for (int i1 = 0; i1 < nr1; i1++) {
  8502. for (int k1 = 0; k1 < ne1; k1++) {
  8503. for (int i0 = 0; i0 < nr0; i0++) {
  8504. ggml_vec_acc_f32(ne0,
  8505. (float *) ((char *) dst->data + ( k3)*nb3 + ( k2)*nb2 + ( k1)*nb1),
  8506. (float *) ((char *) src0->data + (i3*ne3 + k3)*nb03 + (i2*ne2 + k2)*nb02 + (i1*ne1 + k1)*nb01 + (i0*ne0)*nb00));
  8507. }
  8508. }
  8509. }
  8510. }
  8511. }
  8512. }
  8513. }
  8514. }
  8515. static void ggml_compute_forward_repeat_back(
  8516. const struct ggml_compute_params * params,
  8517. const struct ggml_tensor * src0,
  8518. struct ggml_tensor * dst) {
  8519. switch (src0->type) {
  8520. case GGML_TYPE_F32:
  8521. {
  8522. ggml_compute_forward_repeat_back_f32(params, src0, dst);
  8523. } break;
  8524. default:
  8525. {
  8526. GGML_ASSERT(false);
  8527. } break;
  8528. }
  8529. }
  8530. // ggml_compute_forward_concat
  8531. static void ggml_compute_forward_concat_f32(
  8532. const struct ggml_compute_params * params,
  8533. const struct ggml_tensor * src0,
  8534. const struct ggml_tensor * src1,
  8535. struct ggml_tensor * dst) {
  8536. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  8537. return;
  8538. }
  8539. GGML_ASSERT(src0->nb[0] == sizeof(float));
  8540. const int ith = params->ith;
  8541. GGML_TENSOR_BINARY_OP_LOCALS
  8542. // TODO: support for transposed / permuted tensors
  8543. GGML_ASSERT(nb0 == sizeof(float));
  8544. GGML_ASSERT(nb00 == sizeof(float));
  8545. GGML_ASSERT(nb10 == sizeof(float));
  8546. for (int i3 = 0; i3 < ne3; i3++) {
  8547. for (int i2 = ith; i2 < ne2; i2++) {
  8548. if (i2 < ne02) { // src0
  8549. for (int i1 = 0; i1 < ne1; i1++) {
  8550. for (int i0 = 0; i0 < ne0; i0++) {
  8551. const float * x = (float *)((char *) src0->data + i0 * nb00 + i1 * nb01 + i2 * nb02 + i3 * nb03);
  8552. float * y = (float *)((char *)dst->data + i0 * nb0 + i1 * nb1 + i2 * nb2 + i3 * nb3);
  8553. *y = *x;
  8554. }
  8555. }
  8556. } // src1
  8557. else {
  8558. for (int i1 = 0; i1 < ne1; i1++) {
  8559. for (int i0 = 0; i0 < ne0; i0++) {
  8560. const float * x = (float *)((char *) src1->data + i0 * nb10 + i1 * nb11 + (i2 - ne02) * nb12 + i3 * nb13);
  8561. float * y = (float *)((char *)dst->data + i0 * nb0 + i1 * nb1 + i2 * nb2 + i3 * nb3);
  8562. *y = *x;
  8563. }
  8564. }
  8565. }
  8566. }
  8567. }
  8568. }
  8569. static void ggml_compute_forward_concat(
  8570. const struct ggml_compute_params* params,
  8571. const struct ggml_tensor* src0,
  8572. const struct ggml_tensor* src1,
  8573. struct ggml_tensor* dst) {
  8574. switch (src0->type) {
  8575. case GGML_TYPE_F32:
  8576. {
  8577. ggml_compute_forward_concat_f32(params, src0, src1, dst);
  8578. } break;
  8579. default:
  8580. {
  8581. GGML_ASSERT(false);
  8582. } break;
  8583. }
  8584. }
  8585. // ggml_compute_forward_abs
  8586. static void ggml_compute_forward_abs_f32(
  8587. const struct ggml_compute_params * params,
  8588. const struct ggml_tensor * src0,
  8589. struct ggml_tensor * dst) {
  8590. assert(params->ith == 0);
  8591. assert(ggml_are_same_shape(src0, dst));
  8592. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  8593. return;
  8594. }
  8595. const int n = ggml_nrows(src0);
  8596. const int nc = src0->ne[0];
  8597. assert(dst->nb[0] == sizeof(float));
  8598. assert(src0->nb[0] == sizeof(float));
  8599. for (int i = 0; i < n; i++) {
  8600. ggml_vec_abs_f32(nc,
  8601. (float *) ((char *) dst->data + i*( dst->nb[1])),
  8602. (float *) ((char *) src0->data + i*(src0->nb[1])));
  8603. }
  8604. }
  8605. static void ggml_compute_forward_abs(
  8606. const struct ggml_compute_params * params,
  8607. const struct ggml_tensor * src0,
  8608. struct ggml_tensor * dst) {
  8609. switch (src0->type) {
  8610. case GGML_TYPE_F32:
  8611. {
  8612. ggml_compute_forward_abs_f32(params, src0, dst);
  8613. } break;
  8614. default:
  8615. {
  8616. GGML_ASSERT(false);
  8617. } break;
  8618. }
  8619. }
  8620. // ggml_compute_forward_sgn
  8621. static void ggml_compute_forward_sgn_f32(
  8622. const struct ggml_compute_params * params,
  8623. const struct ggml_tensor * src0,
  8624. struct ggml_tensor * dst) {
  8625. assert(params->ith == 0);
  8626. assert(ggml_are_same_shape(src0, dst));
  8627. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  8628. return;
  8629. }
  8630. const int n = ggml_nrows(src0);
  8631. const int nc = src0->ne[0];
  8632. assert(dst->nb[0] == sizeof(float));
  8633. assert(src0->nb[0] == sizeof(float));
  8634. for (int i = 0; i < n; i++) {
  8635. ggml_vec_sgn_f32(nc,
  8636. (float *) ((char *) dst->data + i*( dst->nb[1])),
  8637. (float *) ((char *) src0->data + i*(src0->nb[1])));
  8638. }
  8639. }
  8640. static void ggml_compute_forward_sgn(
  8641. const struct ggml_compute_params * params,
  8642. const struct ggml_tensor * src0,
  8643. struct ggml_tensor * dst) {
  8644. switch (src0->type) {
  8645. case GGML_TYPE_F32:
  8646. {
  8647. ggml_compute_forward_sgn_f32(params, src0, dst);
  8648. } break;
  8649. default:
  8650. {
  8651. GGML_ASSERT(false);
  8652. } break;
  8653. }
  8654. }
  8655. // ggml_compute_forward_neg
  8656. static void ggml_compute_forward_neg_f32(
  8657. const struct ggml_compute_params * params,
  8658. const struct ggml_tensor * src0,
  8659. struct ggml_tensor * dst) {
  8660. assert(params->ith == 0);
  8661. assert(ggml_are_same_shape(src0, dst));
  8662. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  8663. return;
  8664. }
  8665. const int n = ggml_nrows(src0);
  8666. const int nc = src0->ne[0];
  8667. assert(dst->nb[0] == sizeof(float));
  8668. assert(src0->nb[0] == sizeof(float));
  8669. for (int i = 0; i < n; i++) {
  8670. ggml_vec_neg_f32(nc,
  8671. (float *) ((char *) dst->data + i*( dst->nb[1])),
  8672. (float *) ((char *) src0->data + i*(src0->nb[1])));
  8673. }
  8674. }
  8675. static void ggml_compute_forward_neg(
  8676. const struct ggml_compute_params * params,
  8677. const struct ggml_tensor * src0,
  8678. struct ggml_tensor * dst) {
  8679. switch (src0->type) {
  8680. case GGML_TYPE_F32:
  8681. {
  8682. ggml_compute_forward_neg_f32(params, src0, dst);
  8683. } break;
  8684. default:
  8685. {
  8686. GGML_ASSERT(false);
  8687. } break;
  8688. }
  8689. }
  8690. // ggml_compute_forward_step
  8691. static void ggml_compute_forward_step_f32(
  8692. const struct ggml_compute_params * params,
  8693. const struct ggml_tensor * src0,
  8694. struct ggml_tensor * dst) {
  8695. assert(params->ith == 0);
  8696. assert(ggml_are_same_shape(src0, dst));
  8697. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  8698. return;
  8699. }
  8700. const int n = ggml_nrows(src0);
  8701. const int nc = src0->ne[0];
  8702. assert(dst->nb[0] == sizeof(float));
  8703. assert(src0->nb[0] == sizeof(float));
  8704. for (int i = 0; i < n; i++) {
  8705. ggml_vec_step_f32(nc,
  8706. (float *) ((char *) dst->data + i*( dst->nb[1])),
  8707. (float *) ((char *) src0->data + i*(src0->nb[1])));
  8708. }
  8709. }
  8710. static void ggml_compute_forward_step(
  8711. const struct ggml_compute_params * params,
  8712. const struct ggml_tensor * src0,
  8713. struct ggml_tensor * dst) {
  8714. switch (src0->type) {
  8715. case GGML_TYPE_F32:
  8716. {
  8717. ggml_compute_forward_step_f32(params, src0, dst);
  8718. } break;
  8719. default:
  8720. {
  8721. GGML_ASSERT(false);
  8722. } break;
  8723. }
  8724. }
  8725. // ggml_compute_forward_tanh
  8726. static void ggml_compute_forward_tanh_f32(
  8727. const struct ggml_compute_params * params,
  8728. const struct ggml_tensor * src0,
  8729. struct ggml_tensor * dst) {
  8730. assert(params->ith == 0);
  8731. assert(ggml_are_same_shape(src0, dst));
  8732. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  8733. return;
  8734. }
  8735. const int n = ggml_nrows(src0);
  8736. const int nc = src0->ne[0];
  8737. assert(dst->nb[0] == sizeof(float));
  8738. assert(src0->nb[0] == sizeof(float));
  8739. for (int i = 0; i < n; i++) {
  8740. ggml_vec_tanh_f32(nc,
  8741. (float *) ((char *) dst->data + i*( dst->nb[1])),
  8742. (float *) ((char *) src0->data + i*(src0->nb[1])));
  8743. }
  8744. }
  8745. static void ggml_compute_forward_tanh(
  8746. const struct ggml_compute_params * params,
  8747. const struct ggml_tensor * src0,
  8748. struct ggml_tensor * dst) {
  8749. switch (src0->type) {
  8750. case GGML_TYPE_F32:
  8751. {
  8752. ggml_compute_forward_tanh_f32(params, src0, dst);
  8753. } break;
  8754. default:
  8755. {
  8756. GGML_ASSERT(false);
  8757. } break;
  8758. }
  8759. }
  8760. // ggml_compute_forward_elu
  8761. static void ggml_compute_forward_elu_f32(
  8762. const struct ggml_compute_params * params,
  8763. const struct ggml_tensor * src0,
  8764. struct ggml_tensor * dst) {
  8765. assert(params->ith == 0);
  8766. assert(ggml_are_same_shape(src0, dst));
  8767. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  8768. return;
  8769. }
  8770. const int n = ggml_nrows(src0);
  8771. const int nc = src0->ne[0];
  8772. assert(dst->nb[0] == sizeof(float));
  8773. assert(src0->nb[0] == sizeof(float));
  8774. for (int i = 0; i < n; i++) {
  8775. ggml_vec_elu_f32(nc,
  8776. (float *) ((char *) dst->data + i*( dst->nb[1])),
  8777. (float *) ((char *) src0->data + i*(src0->nb[1])));
  8778. }
  8779. }
  8780. static void ggml_compute_forward_elu(
  8781. const struct ggml_compute_params * params,
  8782. const struct ggml_tensor * src0,
  8783. struct ggml_tensor * dst) {
  8784. switch (src0->type) {
  8785. case GGML_TYPE_F32:
  8786. {
  8787. ggml_compute_forward_elu_f32(params, src0, dst);
  8788. } break;
  8789. default:
  8790. {
  8791. GGML_ASSERT(false);
  8792. } break;
  8793. }
  8794. }
  8795. // ggml_compute_forward_relu
  8796. static void ggml_compute_forward_relu_f32(
  8797. const struct ggml_compute_params * params,
  8798. const struct ggml_tensor * src0,
  8799. struct ggml_tensor * dst) {
  8800. assert(params->ith == 0);
  8801. assert(ggml_are_same_shape(src0, dst));
  8802. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  8803. return;
  8804. }
  8805. const int n = ggml_nrows(src0);
  8806. const int nc = src0->ne[0];
  8807. assert(dst->nb[0] == sizeof(float));
  8808. assert(src0->nb[0] == sizeof(float));
  8809. for (int i = 0; i < n; i++) {
  8810. ggml_vec_relu_f32(nc,
  8811. (float *) ((char *) dst->data + i*( dst->nb[1])),
  8812. (float *) ((char *) src0->data + i*(src0->nb[1])));
  8813. }
  8814. }
  8815. static void ggml_compute_forward_relu(
  8816. const struct ggml_compute_params * params,
  8817. const struct ggml_tensor * src0,
  8818. struct ggml_tensor * dst) {
  8819. switch (src0->type) {
  8820. case GGML_TYPE_F32:
  8821. {
  8822. ggml_compute_forward_relu_f32(params, src0, dst);
  8823. } break;
  8824. default:
  8825. {
  8826. GGML_ASSERT(false);
  8827. } break;
  8828. }
  8829. }
  8830. // ggml_compute_forward_gelu
  8831. static void ggml_compute_forward_gelu_f32(
  8832. const struct ggml_compute_params * params,
  8833. const struct ggml_tensor * src0,
  8834. struct ggml_tensor * dst) {
  8835. GGML_ASSERT(ggml_is_contiguous_except_dim_1(src0));
  8836. GGML_ASSERT(ggml_is_contiguous_except_dim_1(dst));
  8837. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  8838. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  8839. return;
  8840. }
  8841. const int ith = params->ith;
  8842. const int nth = params->nth;
  8843. const int nc = src0->ne[0];
  8844. const int nr = ggml_nrows(src0);
  8845. // rows per thread
  8846. const int dr = (nr + nth - 1)/nth;
  8847. // row range for this thread
  8848. const int ir0 = dr*ith;
  8849. const int ir1 = MIN(ir0 + dr, nr);
  8850. for (int i1 = ir0; i1 < ir1; i1++) {
  8851. ggml_vec_gelu_f32(nc,
  8852. (float *) ((char *) dst->data + i1*( dst->nb[1])),
  8853. (float *) ((char *) src0->data + i1*(src0->nb[1])));
  8854. #ifndef NDEBUG
  8855. for (int k = 0; k < nc; k++) {
  8856. const float x = ((float *) ((char *) dst->data + i1*( dst->nb[1])))[k];
  8857. UNUSED(x);
  8858. assert(!isnan(x));
  8859. assert(!isinf(x));
  8860. }
  8861. #endif
  8862. }
  8863. }
  8864. static void ggml_compute_forward_gelu(
  8865. const struct ggml_compute_params * params,
  8866. const struct ggml_tensor * src0,
  8867. struct ggml_tensor * dst) {
  8868. switch (src0->type) {
  8869. case GGML_TYPE_F32:
  8870. {
  8871. ggml_compute_forward_gelu_f32(params, src0, dst);
  8872. } break;
  8873. default:
  8874. {
  8875. GGML_ASSERT(false);
  8876. } break;
  8877. }
  8878. }
  8879. // ggml_compute_forward_gelu_quick
  8880. static void ggml_compute_forward_gelu_quick_f32(
  8881. const struct ggml_compute_params * params,
  8882. const struct ggml_tensor * src0,
  8883. struct ggml_tensor * dst) {
  8884. GGML_ASSERT(ggml_is_contiguous_except_dim_1(src0));
  8885. GGML_ASSERT(ggml_is_contiguous_except_dim_1(dst));
  8886. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  8887. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  8888. return;
  8889. }
  8890. const int ith = params->ith;
  8891. const int nth = params->nth;
  8892. const int nc = src0->ne[0];
  8893. const int nr = ggml_nrows(src0);
  8894. // rows per thread
  8895. const int dr = (nr + nth - 1)/nth;
  8896. // row range for this thread
  8897. const int ir0 = dr*ith;
  8898. const int ir1 = MIN(ir0 + dr, nr);
  8899. for (int i1 = ir0; i1 < ir1; i1++) {
  8900. ggml_vec_gelu_quick_f32(nc,
  8901. (float *) ((char *) dst->data + i1*( dst->nb[1])),
  8902. (float *) ((char *) src0->data + i1*(src0->nb[1])));
  8903. #ifndef NDEBUG
  8904. for (int k = 0; k < nc; k++) {
  8905. const float x = ((float *) ((char *) dst->data + i1*( dst->nb[1])))[k];
  8906. UNUSED(x);
  8907. assert(!isnan(x));
  8908. assert(!isinf(x));
  8909. }
  8910. #endif
  8911. }
  8912. }
  8913. static void ggml_compute_forward_gelu_quick(
  8914. const struct ggml_compute_params * params,
  8915. const struct ggml_tensor * src0,
  8916. struct ggml_tensor * dst) {
  8917. switch (src0->type) {
  8918. case GGML_TYPE_F32:
  8919. {
  8920. ggml_compute_forward_gelu_quick_f32(params, src0, dst);
  8921. } break;
  8922. default:
  8923. {
  8924. GGML_ASSERT(false);
  8925. } break;
  8926. }
  8927. }
  8928. // ggml_compute_forward_silu
  8929. static void ggml_compute_forward_silu_f32(
  8930. const struct ggml_compute_params * params,
  8931. const struct ggml_tensor * src0,
  8932. struct ggml_tensor * dst) {
  8933. GGML_ASSERT(ggml_is_contiguous_except_dim_1(src0));
  8934. GGML_ASSERT(ggml_is_contiguous_except_dim_1(dst));
  8935. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  8936. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  8937. return;
  8938. }
  8939. const int ith = params->ith;
  8940. const int nth = params->nth;
  8941. const int nc = src0->ne[0];
  8942. const int nr = ggml_nrows(src0);
  8943. // rows per thread
  8944. const int dr = (nr + nth - 1)/nth;
  8945. // row range for this thread
  8946. const int ir0 = dr*ith;
  8947. const int ir1 = MIN(ir0 + dr, nr);
  8948. for (int i1 = ir0; i1 < ir1; i1++) {
  8949. ggml_vec_silu_f32(nc,
  8950. (float *) ((char *) dst->data + i1*( dst->nb[1])),
  8951. (float *) ((char *) src0->data + i1*(src0->nb[1])));
  8952. #ifndef NDEBUG
  8953. for (int k = 0; k < nc; k++) {
  8954. const float x = ((float *) ((char *) dst->data + i1*( dst->nb[1])))[k];
  8955. UNUSED(x);
  8956. assert(!isnan(x));
  8957. assert(!isinf(x));
  8958. }
  8959. #endif
  8960. }
  8961. }
  8962. static void ggml_compute_forward_silu(
  8963. const struct ggml_compute_params * params,
  8964. const struct ggml_tensor * src0,
  8965. struct ggml_tensor * dst) {
  8966. switch (src0->type) {
  8967. case GGML_TYPE_F32:
  8968. {
  8969. ggml_compute_forward_silu_f32(params, src0, dst);
  8970. } break;
  8971. default:
  8972. {
  8973. GGML_ASSERT(false);
  8974. } break;
  8975. }
  8976. }
  8977. // ggml_compute_forward_silu_back
  8978. static void ggml_compute_forward_silu_back_f32(
  8979. const struct ggml_compute_params * params,
  8980. const struct ggml_tensor * src0,
  8981. const struct ggml_tensor * grad,
  8982. struct ggml_tensor * dst) {
  8983. GGML_ASSERT(ggml_is_contiguous_except_dim_1(grad));
  8984. GGML_ASSERT(ggml_is_contiguous_except_dim_1(src0));
  8985. GGML_ASSERT(ggml_is_contiguous_except_dim_1(dst));
  8986. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  8987. GGML_ASSERT(ggml_are_same_shape(src0, grad));
  8988. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  8989. return;
  8990. }
  8991. const int ith = params->ith;
  8992. const int nth = params->nth;
  8993. const int nc = src0->ne[0];
  8994. const int nr = ggml_nrows(src0);
  8995. // rows per thread
  8996. const int dr = (nr + nth - 1)/nth;
  8997. // row range for this thread
  8998. const int ir0 = dr*ith;
  8999. const int ir1 = MIN(ir0 + dr, nr);
  9000. for (int i1 = ir0; i1 < ir1; i1++) {
  9001. ggml_vec_silu_backward_f32(nc,
  9002. (float *) ((char *) dst->data + i1*( dst->nb[1])),
  9003. (float *) ((char *) src0->data + i1*(src0->nb[1])),
  9004. (float *) ((char *) grad->data + i1*(grad->nb[1])));
  9005. #ifndef NDEBUG
  9006. for (int k = 0; k < nc; k++) {
  9007. const float x = ((float *) ((char *) dst->data + i1*( dst->nb[1])))[k];
  9008. UNUSED(x);
  9009. assert(!isnan(x));
  9010. assert(!isinf(x));
  9011. }
  9012. #endif
  9013. }
  9014. }
  9015. static void ggml_compute_forward_silu_back(
  9016. const struct ggml_compute_params * params,
  9017. const struct ggml_tensor * src0,
  9018. const struct ggml_tensor * grad,
  9019. struct ggml_tensor * dst) {
  9020. switch (src0->type) {
  9021. case GGML_TYPE_F32:
  9022. {
  9023. ggml_compute_forward_silu_back_f32(params, src0, grad, dst);
  9024. } break;
  9025. default:
  9026. {
  9027. GGML_ASSERT(false);
  9028. } break;
  9029. }
  9030. }
  9031. // ggml_compute_forward_norm
  9032. static void ggml_compute_forward_norm_f32(
  9033. const struct ggml_compute_params * params,
  9034. const struct ggml_tensor * src0,
  9035. struct ggml_tensor * dst) {
  9036. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  9037. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  9038. return;
  9039. }
  9040. GGML_ASSERT(src0->nb[0] == sizeof(float));
  9041. const int ith = params->ith;
  9042. const int nth = params->nth;
  9043. GGML_TENSOR_UNARY_OP_LOCALS
  9044. float eps;
  9045. memcpy(&eps, dst->op_params, sizeof(float));
  9046. // TODO: optimize
  9047. for (int64_t i03 = 0; i03 < ne03; i03++) {
  9048. for (int64_t i02 = 0; i02 < ne02; i02++) {
  9049. for (int64_t i01 = ith; i01 < ne01; i01 += nth) {
  9050. const float * x = (float *) ((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03);
  9051. ggml_float sum = 0.0;
  9052. for (int64_t i00 = 0; i00 < ne00; i00++) {
  9053. sum += (ggml_float)x[i00];
  9054. }
  9055. float mean = sum/ne00;
  9056. float * y = (float *) ((char *) dst->data + i01*nb1 + i02*nb2 + i03*nb3);
  9057. ggml_float sum2 = 0.0;
  9058. for (int64_t i00 = 0; i00 < ne00; i00++) {
  9059. float v = x[i00] - mean;
  9060. y[i00] = v;
  9061. sum2 += (ggml_float)(v*v);
  9062. }
  9063. float variance = sum2/ne00;
  9064. const float scale = 1.0f/sqrtf(variance + eps);
  9065. ggml_vec_scale_f32(ne00, y, scale);
  9066. }
  9067. }
  9068. }
  9069. }
  9070. static void ggml_compute_forward_norm(
  9071. const struct ggml_compute_params * params,
  9072. const struct ggml_tensor * src0,
  9073. struct ggml_tensor * dst) {
  9074. switch (src0->type) {
  9075. case GGML_TYPE_F32:
  9076. {
  9077. ggml_compute_forward_norm_f32(params, src0, dst);
  9078. } break;
  9079. default:
  9080. {
  9081. GGML_ASSERT(false);
  9082. } break;
  9083. }
  9084. }
  9085. // ggml_compute_forward_group_rms_norm
  9086. static void ggml_compute_forward_rms_norm_f32(
  9087. const struct ggml_compute_params * params,
  9088. const struct ggml_tensor * src0,
  9089. struct ggml_tensor * dst) {
  9090. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  9091. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  9092. return;
  9093. }
  9094. GGML_ASSERT(src0->nb[0] == sizeof(float));
  9095. const int ith = params->ith;
  9096. const int nth = params->nth;
  9097. GGML_TENSOR_UNARY_OP_LOCALS
  9098. float eps;
  9099. memcpy(&eps, dst->op_params, sizeof(float));
  9100. // TODO: optimize
  9101. for (int64_t i03 = 0; i03 < ne03; i03++) {
  9102. for (int64_t i02 = 0; i02 < ne02; i02++) {
  9103. for (int64_t i01 = ith; i01 < ne01; i01 += nth) {
  9104. const float * x = (float *) ((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03);
  9105. ggml_float sum = 0.0;
  9106. for (int64_t i00 = 0; i00 < ne00; i00++) {
  9107. sum += (ggml_float)(x[i00] * x[i00]);
  9108. }
  9109. const float mean = sum/ne00;
  9110. float * y = (float *) ((char *) dst->data + i01*nb1 + i02*nb2 + i03*nb3);
  9111. memcpy(y, x, ne00 * sizeof(float));
  9112. // for (int i00 = 0; i00 < ne00; i00++) {
  9113. // y[i00] = x[i00];
  9114. // }
  9115. const float scale = 1.0f/sqrtf(mean + eps);
  9116. ggml_vec_scale_f32(ne00, y, scale);
  9117. }
  9118. }
  9119. }
  9120. }
  9121. static void ggml_compute_forward_rms_norm(
  9122. const struct ggml_compute_params * params,
  9123. const struct ggml_tensor * src0,
  9124. struct ggml_tensor * dst) {
  9125. switch (src0->type) {
  9126. case GGML_TYPE_F32:
  9127. {
  9128. ggml_compute_forward_rms_norm_f32(params, src0, dst);
  9129. } break;
  9130. default:
  9131. {
  9132. GGML_ASSERT(false);
  9133. } break;
  9134. }
  9135. }
  9136. static void ggml_compute_forward_rms_norm_back_f32(
  9137. const struct ggml_compute_params * params,
  9138. const struct ggml_tensor * src0,
  9139. const struct ggml_tensor * src1,
  9140. struct ggml_tensor * dst) {
  9141. GGML_ASSERT(ggml_are_same_shape(src0, dst) && ggml_are_same_shape(src0, src1));
  9142. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  9143. return;
  9144. }
  9145. GGML_ASSERT(src0->nb[0] == sizeof(float));
  9146. const int ith = params->ith;
  9147. const int nth = params->nth;
  9148. GGML_TENSOR_BINARY_OP_LOCALS
  9149. float eps;
  9150. memcpy(&eps, dst->op_params, sizeof(float));
  9151. // TODO: optimize
  9152. for (int64_t i03 = 0; i03 < ne03; i03++) {
  9153. for (int64_t i02 = 0; i02 < ne02; i02++) {
  9154. for (int64_t i01 = ith; i01 < ne01; i01 += nth) {
  9155. // src1 is same shape as src0 => same indices
  9156. const int64_t i11 = i01;
  9157. const int64_t i12 = i02;
  9158. const int64_t i13 = i03;
  9159. const float * x = (float *) ((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03);
  9160. const float * dz = (float *) ((char *) src1->data + i11*nb11 + i12*nb12 + i13*nb13);
  9161. ggml_float sum_xx = 0.0;
  9162. ggml_float sum_xdz = 0.0;
  9163. for (int64_t i00 = 0; i00 < ne00; i00++) {
  9164. sum_xx += (ggml_float)(x[i00] * x[i00]);
  9165. sum_xdz += (ggml_float)(x[i00] * dz[i00]);
  9166. }
  9167. //const float mean = (float)(sum_xx)/ne00;
  9168. const float mean_eps = (float)(sum_xx)/ne00 + eps;
  9169. const float sum_eps = (float)(sum_xx) + eps*ne00;
  9170. //const float mean_xdz = (float)(sum_xdz)/ne00;
  9171. // we could cache rms from forward pass to improve performance.
  9172. // to do this implement ggml_rms and compose ggml_rms_norm using ggml_rms.
  9173. //const float rms = sqrtf(mean_eps);
  9174. const float rrms = 1.0f / sqrtf(mean_eps);
  9175. //const float scale = -rrms/(ne00 * mean_eps); // -1/(n*rms**3)
  9176. {
  9177. // z = rms_norm(x)
  9178. //
  9179. // rms_norm(src0) =
  9180. // scale(
  9181. // src0,
  9182. // div(
  9183. // 1,
  9184. // sqrt(
  9185. // add(
  9186. // scale(
  9187. // sum(
  9188. // sqr(
  9189. // src0)),
  9190. // (1.0/N)),
  9191. // eps))));
  9192. // postorder:
  9193. // ## op args grad
  9194. // 00 param src0 grad[#00]
  9195. // 01 const 1
  9196. // 02 sqr (#00) grad[#02]
  9197. // 03 sum (#02) grad[#03]
  9198. // 04 const 1/N
  9199. // 05 scale (#03, #04) grad[#05]
  9200. // 06 const eps
  9201. // 07 add (#05, #06) grad[#07]
  9202. // 08 sqrt (#07) grad[#08]
  9203. // 09 div (#01,#08) grad[#09]
  9204. // 10 scale (#00,#09) grad[#10]
  9205. //
  9206. // backward pass, given grad[#10]
  9207. // #10: scale
  9208. // grad[#00] += scale(grad[#10],#09)
  9209. // grad[#09] += sum(mul(grad[#10],#00))
  9210. // #09: div
  9211. // grad[#08] += neg(mul(grad[#09], div(#09,#08)))
  9212. // #08: sqrt
  9213. // grad[#07] += mul(grad[#08], div(0.5, #08))
  9214. // #07: add
  9215. // grad[#05] += grad[#07]
  9216. // #05: scale
  9217. // grad[#03] += scale(grad[#05],#04)
  9218. // #03: sum
  9219. // grad[#02] += repeat(grad[#03], #02)
  9220. // #02:
  9221. // grad[#00] += scale(mul(#00, grad[#02]), 2.0)
  9222. //
  9223. // substitute and simplify:
  9224. // grad[#00] = scale(grad(#10), #09) + scale(mul(#00, grad[#02]), 2.0)
  9225. // grad[#02] = repeat(grad[#03], #02)
  9226. // grad[#02] = repeat(scale(grad[#05],#04), #02)
  9227. // grad[#02] = repeat(scale(grad[#07],#04), #02)
  9228. // grad[#02] = repeat(scale(mul(grad[#08], div(0.5, #08)),#04), #02)
  9229. // grad[#02] = repeat(scale(mul(neg(mul(grad[#09], div(#09,#08))), div(0.5, #08)),#04), #02)
  9230. // grad[#02] = repeat(scale(mul(neg(mul(sum(mul(grad[#10],#00)), div(#09,#08))), div(0.5, #08)),#04), #02)
  9231. // grad[#02] = repeat(-(sum(mul(grad[#10],#00)) * div(#09,#08) * div(0.5, #08) * (1/N)), #02)
  9232. // grad[#02] = repeat(-(sum(mul(grad[#10],#00)) * div(div(#01,#08),#08) * div(0.5, #08) * (1/N)), #02)
  9233. // grad[#02] = repeat(-(sum(mul(grad[#10],#00)) * div(1,#08*#08) * div(0.5, #08) * (1/N)), #02)
  9234. // grad[#02] = repeat(-(sum(mul(grad[#10],#00)) * div(1,#07) * div(0.5, #08) * (1/N)), #02)
  9235. // grad[#00] = scale(grad(#10), #09) + scale(mul(#00, grad[#02]), 2.0)
  9236. // grad[#00] = scale(grad(#10), #09) + scale(mul(#00, repeat(-(sum(mul(grad[#10],#00)) * div(1,#07) * div(0.5, #08) * (1/N)), #02)), 2.0)
  9237. // grad[#00] = scale(grad(#10), #09) + scale(scale(#00, -(sum(mul(grad[#10],#00)) * div(1,#07) * div(0.5, #08) * (1/N))), 2.0)
  9238. // grad[#00] = scale(grad(#10), #09) + scale(#00, -(sum(mul(grad[#10],#00)) * div(1,#07) * div(1,#08) * (1/N)))
  9239. // grad[#00] = scale(grad(#10), #09) + scale(#00, sum(mul(grad[#10],#00)) * div(1,#07*#08) * (-1/N))
  9240. // grad[#00] = scale(grad(#10), #09) + scale(#00, sum(mul(grad[#10],#00)) * div(1,#07*#08) * (-1/N))
  9241. // grad[#00] = scale(grad(#10), #09) + scale(#00, sum(mul(grad[#10],#00)) * div(1,mean_eps*rms) * (-1/N))
  9242. // grad[#00] = scale(grad(#10), #09) + scale(#00, sum(mul(grad[#10],#00)) * div(-1,rms*N*mean_eps))
  9243. // grad[#00] = scale(grad(#10), #09) + scale(#00, sum(mul(grad[#10],#00)) * div(-1,rms*N*(sum_xx/N+eps)))
  9244. // grad[#00] = scale(grad(#10), #09) + scale(#00, sum(mul(grad[#10],#00)) * div(-1,rms*N*sum_xx+rms*N*eps))
  9245. // grad[#00] = scale(dz, rrms) + scale(x, sum(mul(dz,x)) * div(-1,rms*N*mean_eps))
  9246. // grad[#00] = scale(dz, rrms) + scale(x, sum_xdz * div(-1,rms*N*mean_eps))
  9247. // a = b*c + d*e
  9248. // a = b*c*f/f + d*e*f/f
  9249. // a = (b*c*f + d*e*f)*(1/f)
  9250. // a = (b*c*(1/c) + d*e*(1/c))*(1/(1/c))
  9251. // a = (b + d*e/c)*c
  9252. // b = dz, c = rrms, d = x, e = sum_xdz * div(-1,rms*N*mean_eps)
  9253. // a = (dz + x*sum_xdz * div(-1,rms*N*mean_eps)/rrms)*rrms
  9254. // a = (dz + x*sum_xdz * div(-1,rms*N*mean_eps)*rms)*rrms
  9255. // a = (dz + x*sum_xdz * div(-rms,rms*N*mean_eps))*rrms
  9256. // a = (dz + x*sum_xdz * div(-1,N*mean_eps))*rrms
  9257. // a = (dz + x*div(-sum_xdz,N*mean_eps))*rrms
  9258. // a = (dz + x*div(-mean_xdz,mean_eps))*rrms
  9259. // grad[#00] = scale(dz + scale(x, div(-mean_xdz,mean_eps)),rrms)
  9260. // grad[#00] = scale(dz + scale(x, -mean_xdz/mean_eps),rrms)
  9261. // dx = scale(dz + scale(x, -mean_xdz/mean_eps),rrms)
  9262. }
  9263. // dx = scale(dz + scale(x, -mean_xdz/mean_eps),rrms)
  9264. // post-order:
  9265. // dx := x
  9266. // dx := scale(dx,-mean_xdz/mean_eps)
  9267. // dx := add(dx, dz)
  9268. // dx := scale(dx, rrms)
  9269. float * dx = (float *) ((char *) dst->data + i01*nb1 + i02*nb2 + i03*nb3);
  9270. ggml_vec_cpy_f32 (ne00, dx, x);
  9271. // ggml_vec_scale_f32(ne00, dx, -mean_xdz/mean_eps);
  9272. ggml_vec_scale_f32(ne00, dx, (float)(-sum_xdz)/sum_eps);
  9273. ggml_vec_acc_f32 (ne00, dx, dz);
  9274. ggml_vec_scale_f32(ne00, dx, rrms);
  9275. }
  9276. }
  9277. }
  9278. }
  9279. static void ggml_compute_forward_rms_norm_back(
  9280. const struct ggml_compute_params * params,
  9281. const struct ggml_tensor * src0,
  9282. const struct ggml_tensor * src1,
  9283. struct ggml_tensor * dst) {
  9284. switch (src0->type) {
  9285. case GGML_TYPE_F32:
  9286. {
  9287. ggml_compute_forward_rms_norm_back_f32(params, src0, src1, dst);
  9288. } break;
  9289. default:
  9290. {
  9291. GGML_ASSERT(false);
  9292. } break;
  9293. }
  9294. }
  9295. // ggml_compute_forward_group_norm
  9296. static void ggml_compute_forward_group_norm_f32(
  9297. const struct ggml_compute_params * params,
  9298. const struct ggml_tensor * src0,
  9299. struct ggml_tensor * dst) {
  9300. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  9301. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  9302. return;
  9303. }
  9304. GGML_ASSERT(src0->nb[0] == sizeof(float));
  9305. const int ith = params->ith;
  9306. const int nth = params->nth;
  9307. GGML_TENSOR_UNARY_OP_LOCALS
  9308. const float eps = 1e-6f; // TODO: make this a parameter
  9309. // TODO: optimize
  9310. int n_channels = src0->ne[2];
  9311. int n_groups = dst->op_params[0];
  9312. int n_channels_per_group = (n_channels + n_groups - 1) / n_groups;
  9313. for (int i = ith; i < n_groups; i+=nth) {
  9314. int start = i * n_channels_per_group;
  9315. int end = start + n_channels_per_group;
  9316. if (end > n_channels) {
  9317. end = n_channels;
  9318. }
  9319. int step = end - start;
  9320. for (int64_t i03 = 0; i03 < ne03; i03++) {
  9321. ggml_float sum = 0.0;
  9322. for (int64_t i02 = start; i02 < end; i02++) {
  9323. for (int64_t i01 = 0; i01 < ne01; i01++) {
  9324. const float * x = (float *)((char *) src0->data + i01 * nb01 + i02 * nb02 + i03 * nb03);
  9325. for (int64_t i00 = 0; i00 < ne00; i00++) {
  9326. sum += (ggml_float)x[i00];
  9327. }
  9328. }
  9329. }
  9330. float mean = sum / (ne00 * ne01 * step);
  9331. ggml_float sum2 = 0.0;
  9332. for (int64_t i02 = start; i02 < end; i02++) {
  9333. for (int64_t i01 = 0; i01 < ne01; i01++) {
  9334. const float * x = (float *)((char *) src0->data + i01 * nb01 + i02 * nb02 + i03 * nb03);
  9335. float * y = (float *)((char *) dst->data + i01 * nb1 + i02 * nb2 + i03 * nb3);
  9336. for (int64_t i00 = 0; i00 < ne00; i00++) {
  9337. float v = x[i00] - mean;
  9338. y[i00] = v;
  9339. sum2 += (ggml_float)(v * v);
  9340. }
  9341. }
  9342. }
  9343. float variance = sum2 / (ne00 * ne01 * step);
  9344. const float scale = 1.0f / sqrtf(variance + eps);
  9345. for (int64_t i02 = start; i02 < end; i02++) {
  9346. for (int64_t i01 = 0; i01 < ne01; i01++) {
  9347. float * y = (float *)((char *) dst->data + i01 * nb1 + i02 * nb2 + i03 * nb3);
  9348. ggml_vec_scale_f32(ne00, y, scale);
  9349. }
  9350. }
  9351. }
  9352. }
  9353. }
  9354. static void ggml_compute_forward_group_norm(
  9355. const struct ggml_compute_params * params,
  9356. const struct ggml_tensor * src0,
  9357. struct ggml_tensor * dst) {
  9358. switch (src0->type) {
  9359. case GGML_TYPE_F32:
  9360. {
  9361. ggml_compute_forward_group_norm_f32(params, src0, dst);
  9362. } break;
  9363. default:
  9364. {
  9365. GGML_ASSERT(false);
  9366. } break;
  9367. }
  9368. }
  9369. // ggml_compute_forward_mul_mat
  9370. #if defined(GGML_USE_ACCELERATE) || defined(GGML_USE_OPENBLAS)
  9371. // helper function to determine if it is better to use BLAS or not
  9372. // for large matrices, BLAS is faster
  9373. static bool ggml_compute_forward_mul_mat_use_blas(
  9374. const struct ggml_tensor * src0,
  9375. const struct ggml_tensor * src1,
  9376. struct ggml_tensor * dst) {
  9377. //const int64_t ne00 = src0->ne[0];
  9378. //const int64_t ne01 = src0->ne[1];
  9379. const int64_t ne10 = src1->ne[0];
  9380. const int64_t ne0 = dst->ne[0];
  9381. const int64_t ne1 = dst->ne[1];
  9382. // TODO: find the optimal values for these
  9383. if (ggml_is_contiguous(src0) &&
  9384. ggml_is_contiguous(src1) &&
  9385. (ne0 >= 32 && ne1 >= 32 && ne10 >= 32)) {
  9386. /*printf("BLAS: %d %d %d %d %d\n", ne0, ne1, ne10, ne00, ne01);*/
  9387. return true;
  9388. }
  9389. return false;
  9390. }
  9391. #endif
  9392. static void ggml_compute_forward_mul_mat(
  9393. const struct ggml_compute_params * params,
  9394. const struct ggml_tensor * src0,
  9395. const struct ggml_tensor * src1,
  9396. struct ggml_tensor * dst) {
  9397. int64_t t0 = ggml_perf_time_us();
  9398. UNUSED(t0);
  9399. GGML_TENSOR_BINARY_OP_LOCALS
  9400. const int ith = params->ith;
  9401. const int nth = params->nth;
  9402. const enum ggml_type type = src0->type;
  9403. const bool src1_cont = ggml_is_contiguous(src1);
  9404. ggml_vec_dot_t const vec_dot = type_traits[type].vec_dot;
  9405. enum ggml_type const vec_dot_type = type_traits[type].vec_dot_type;
  9406. ggml_from_float_t const from_float_to_vec_dot = type_traits[vec_dot_type].from_float;
  9407. GGML_ASSERT(ne0 == ne01);
  9408. GGML_ASSERT(ne1 == ne11);
  9409. GGML_ASSERT(ne2 == ne12);
  9410. GGML_ASSERT(ne3 == ne13);
  9411. // we don't support permuted src0 or src1
  9412. GGML_ASSERT(nb00 == ggml_type_size(type));
  9413. GGML_ASSERT(nb10 == sizeof(float));
  9414. // dst cannot be transposed or permuted
  9415. GGML_ASSERT(nb0 == sizeof(float));
  9416. GGML_ASSERT(nb0 <= nb1);
  9417. GGML_ASSERT(nb1 <= nb2);
  9418. GGML_ASSERT(nb2 <= nb3);
  9419. // broadcast factors
  9420. const int64_t r2 = ne12/ne02;
  9421. const int64_t r3 = ne13/ne03;
  9422. // nb01 >= nb00 - src0 is not transposed
  9423. // compute by src0 rows
  9424. #if defined(GGML_USE_CLBLAST)
  9425. if (ggml_cl_can_mul_mat(src0, src1, dst)) {
  9426. // TODO: handle case when src0 is broadcast-able into src1 across 2nd,3rd dimension
  9427. // ref: https://github.com/ggerganov/ggml/pull/224
  9428. GGML_ASSERT(ne02 == ne12);
  9429. GGML_ASSERT(ne03 == ne13);
  9430. if (params->ith == 0 && params->type == GGML_TASK_COMPUTE) {
  9431. ggml_cl_mul_mat(src0, src1, dst, params->wdata, params->wsize);
  9432. }
  9433. return;
  9434. }
  9435. #endif
  9436. #if defined(GGML_USE_ACCELERATE) || defined(GGML_USE_OPENBLAS)
  9437. if (ggml_compute_forward_mul_mat_use_blas(src0, src1, dst)) {
  9438. if (params->ith != 0) {
  9439. return;
  9440. }
  9441. if (params->type == GGML_TASK_INIT) {
  9442. return;
  9443. }
  9444. if (params->type == GGML_TASK_FINALIZE) {
  9445. return;
  9446. }
  9447. for (int64_t i13 = 0; i13 < ne13; i13++) {
  9448. for (int64_t i12 = 0; i12 < ne12; i12++) {
  9449. // broadcast src0 into src1 across 2nd,3rd dimension
  9450. const int64_t i03 = i13/r3;
  9451. const int64_t i02 = i12/r2;
  9452. const void * x = (char *) src0->data + i02*nb02 + i03*nb03;
  9453. const float * y = (float *) ((char *) src1->data + i12*nb12 + i13*nb13);
  9454. float * d = (float *) ((char *) dst->data + i12*nb2 + i13*nb3);
  9455. if (type != GGML_TYPE_F32) {
  9456. float * const wdata = params->wdata;
  9457. ggml_to_float_t const to_float = type_traits[type].to_float;
  9458. size_t id = 0;
  9459. for (int64_t i01 = 0; i01 < ne01; ++i01) {
  9460. to_float((const char *) x + i01*nb01, wdata + id, ne00);
  9461. id += ne00;
  9462. }
  9463. assert(id*sizeof(float) <= params->wsize);
  9464. x = wdata;
  9465. }
  9466. cblas_sgemm(CblasRowMajor, CblasNoTrans, CblasTrans,
  9467. ne11, ne01, ne10,
  9468. 1.0f, y, ne10,
  9469. x, ne00,
  9470. 0.0f, d, ne01);
  9471. }
  9472. }
  9473. //printf("CBLAS = %f ms, %d x %d x %d x %d\n", (ggml_perf_time_us() - t0)/1000.0, ne0, ne1, ne2, ne3);
  9474. return;
  9475. }
  9476. #endif
  9477. if (params->type == GGML_TASK_INIT) {
  9478. if (src1->type != vec_dot_type) {
  9479. char * wdata = params->wdata;
  9480. const size_t row_size = ne10*ggml_type_size(vec_dot_type)/ggml_blck_size(vec_dot_type);
  9481. for (int64_t i13 = 0; i13 < ne13; ++i13) {
  9482. for (int64_t i12 = 0; i12 < ne12; ++i12) {
  9483. for (int64_t i11 = 0; i11 < ne11; ++i11) {
  9484. from_float_to_vec_dot((float *)((char *) src1->data + i13*nb13 + i12*nb12 + i11*nb11), (void *) wdata, ne10);
  9485. wdata += row_size;
  9486. }
  9487. }
  9488. }
  9489. }
  9490. return;
  9491. }
  9492. if (params->type == GGML_TASK_FINALIZE) {
  9493. return;
  9494. }
  9495. const void * wdata = (src1->type == vec_dot_type) ? src1->data : params->wdata;
  9496. const size_t row_size = ne10*ggml_type_size(vec_dot_type)/ggml_blck_size(vec_dot_type);
  9497. const int64_t nr0 = ne01; // src0 rows
  9498. const int64_t nr1 = ne11*ne12*ne13; // src1 rows
  9499. //printf("nr0 = %lld, nr1 = %lld\n", nr0, nr1);
  9500. // distribute the thread work across the inner or outer loop based on which one is larger
  9501. const int64_t nth0 = nr0 > nr1 ? nth : 1; // parallelize by src0 rows
  9502. const int64_t nth1 = nr0 > nr1 ? 1 : nth; // parallelize by src1 rows
  9503. const int64_t ith0 = ith % nth0;
  9504. const int64_t ith1 = ith / nth0;
  9505. const int64_t dr0 = (nr0 + nth0 - 1)/nth0;
  9506. const int64_t dr1 = (nr1 + nth1 - 1)/nth1;
  9507. const int64_t ir010 = dr0*ith0;
  9508. const int64_t ir011 = MIN(ir010 + dr0, nr0);
  9509. const int64_t ir110 = dr1*ith1;
  9510. const int64_t ir111 = MIN(ir110 + dr1, nr1);
  9511. //printf("ir010 = %6lld, ir011 = %6lld, ir110 = %6lld, ir111 = %6lld\n", ir010, ir011, ir110, ir111);
  9512. // threads with no work simply yield (not sure if it helps)
  9513. if (ir010 >= ir011 || ir110 >= ir111) {
  9514. sched_yield();
  9515. return;
  9516. }
  9517. assert(ne12 % ne02 == 0);
  9518. assert(ne13 % ne03 == 0);
  9519. // block-tiling attempt
  9520. const int64_t blck_0 = 16;
  9521. const int64_t blck_1 = 16;
  9522. // attempt to reduce false-sharing (does not seem to make a difference)
  9523. float tmp[16];
  9524. for (int64_t iir1 = ir110; iir1 < ir111; iir1 += blck_1) {
  9525. for (int64_t iir0 = ir010; iir0 < ir011; iir0 += blck_0) {
  9526. for (int64_t ir1 = iir1; ir1 < iir1 + blck_1 && ir1 < ir111; ++ir1) {
  9527. const int64_t i13 = (ir1/(ne12*ne11));
  9528. const int64_t i12 = (ir1 - i13*ne12*ne11)/ne11;
  9529. const int64_t i11 = (ir1 - i13*ne12*ne11 - i12*ne11);
  9530. // broadcast src0 into src1
  9531. const int64_t i03 = i13/r3;
  9532. const int64_t i02 = i12/r2;
  9533. const int64_t i1 = i11;
  9534. const int64_t i2 = i12;
  9535. const int64_t i3 = i13;
  9536. const char * src0_row = (const char *) src0->data + (0 + i02*nb02 + i03*nb03);
  9537. // desc: when src1 is not a contiguous memory block we have to calculate the offset using the strides
  9538. // if it is, then we have either copied the data to params->wdata and made it contiguous or we are using
  9539. // the original src1 data pointer, so we should index using the indices directly
  9540. // TODO: this is a bit of a hack, we should probably have a better way to handle this
  9541. const char * src1_col = (const char *) wdata +
  9542. (src1_cont || src1->type != vec_dot_type
  9543. ? (i11 + i12*ne11 + i13*ne12*ne11)*row_size
  9544. : (i11*nb11 + i12*nb12 + i13*nb13));
  9545. float * dst_col = (float *) ((char *) dst->data + (i1*nb1 + i2*nb2 + i3*nb3));
  9546. //for (int64_t ir0 = iir0; ir0 < iir0 + blck_0 && ir0 < ir011; ++ir0) {
  9547. // vec_dot(ne00, &dst_col[ir0], src0_row + ir0*nb01, src1_col);
  9548. //}
  9549. for (int64_t ir0 = iir0; ir0 < iir0 + blck_0 && ir0 < ir011; ++ir0) {
  9550. vec_dot(ne00, &tmp[ir0 - iir0], src0_row + ir0*nb01, src1_col);
  9551. }
  9552. memcpy(&dst_col[iir0], tmp, (MIN(iir0 + blck_0, ir011) - iir0)*sizeof(float));
  9553. }
  9554. }
  9555. }
  9556. }
  9557. // ggml_compute_forward_out_prod
  9558. static void ggml_compute_forward_out_prod_f32(
  9559. const struct ggml_compute_params * params,
  9560. const struct ggml_tensor * src0,
  9561. const struct ggml_tensor * src1,
  9562. struct ggml_tensor * dst) {
  9563. // int64_t t0 = ggml_perf_time_us();
  9564. // UNUSED(t0);
  9565. GGML_TENSOR_BINARY_OP_LOCALS
  9566. const int ith = params->ith;
  9567. const int nth = params->nth;
  9568. GGML_ASSERT(ne02 == ne12);
  9569. GGML_ASSERT(ne03 == ne13);
  9570. GGML_ASSERT(ne2 == ne12);
  9571. GGML_ASSERT(ne3 == ne13);
  9572. // we don't support permuted src0 or src1
  9573. GGML_ASSERT(nb00 == sizeof(float));
  9574. // dst cannot be transposed or permuted
  9575. GGML_ASSERT(nb0 == sizeof(float));
  9576. // GGML_ASSERT(nb0 <= nb1);
  9577. // GGML_ASSERT(nb1 <= nb2);
  9578. // GGML_ASSERT(nb2 <= nb3);
  9579. GGML_ASSERT(ne0 == ne00);
  9580. GGML_ASSERT(ne1 == ne10);
  9581. GGML_ASSERT(ne2 == ne02);
  9582. GGML_ASSERT(ne3 == ne03);
  9583. // nb01 >= nb00 - src0 is not transposed
  9584. // compute by src0 rows
  9585. // TODO: #if defined(GGML_USE_CUBLAS) ggml_cuda_out_prod
  9586. // TODO: #if defined(GGML_USE_ACCELERATE) || defined(GGML_USE_OPENBLAS) || defined(GGML_USE_CLBLAST)
  9587. if (params->type == GGML_TASK_INIT) {
  9588. ggml_vec_set_f32(ne0*ne1*ne2*ne3, dst->data, 0);
  9589. return;
  9590. }
  9591. if (params->type == GGML_TASK_FINALIZE) {
  9592. return;
  9593. }
  9594. // dst[:,:,:,:] = 0
  9595. // for i2,i3:
  9596. // for i1:
  9597. // for i01:
  9598. // for i0:
  9599. // dst[i0,i1,i2,i3] += src0[i0,i01,i2,i3] * src1[i1,i01,i2,i3]
  9600. // parallelize by last three dimensions
  9601. // total rows in dst
  9602. const int64_t nr = ne1*ne2*ne3;
  9603. // rows per thread
  9604. const int64_t dr = (nr + nth - 1)/nth;
  9605. // row range for this thread
  9606. const int64_t ir0 = dr*ith;
  9607. const int64_t ir1 = MIN(ir0 + dr, nr);
  9608. // block-tiling attempt
  9609. const int64_t blck_0 = MAX(GGML_VEC_MAD_UNROLL, 32);
  9610. const int64_t blck_1 = 16;
  9611. for (int64_t bir = ir0; bir < ir1; bir += blck_1) {
  9612. const int64_t bir1 = MIN(bir + blck_1, ir1);
  9613. for (int64_t bi01 = 0; bi01 < ne01; bi01 += blck_0) {
  9614. const int64_t bne01 = MIN(bi01 + blck_0, ne01);
  9615. for (int64_t ir = bir; ir < bir1; ++ir) {
  9616. // dst indices
  9617. const int64_t i3 = ir/(ne2*ne1);
  9618. const int64_t i2 = (ir - i3*ne2*ne1)/ne1;
  9619. const int64_t i1 = (ir - i3*ne2*ne1 - i2*ne1);
  9620. const int64_t i02 = i2;
  9621. const int64_t i03 = i3;
  9622. //const int64_t i10 = i1;
  9623. const int64_t i12 = i2;
  9624. const int64_t i13 = i3;
  9625. #if GGML_VEC_MAD_UNROLL > 2
  9626. const int64_t bne01_unroll = bne01 - (bne01 % GGML_VEC_MAD_UNROLL);
  9627. for (int64_t i01 = bi01; i01 < bne01_unroll; i01 += GGML_VEC_MAD_UNROLL) {
  9628. const int64_t i11 = i01;
  9629. float * s0 = (float *) ((char *) src0->data + ( i01*nb01 + i02*nb02 + i03*nb03));
  9630. float * s1 = (float *) ((char *) src1->data + (i1*nb10 + i11*nb11 + i12*nb12 + i13*nb13));
  9631. float * d = (float *) ((char *) dst->data + ( i1*nb1 + i2*nb2 + i3*nb3));
  9632. ggml_vec_mad_f32_unroll(ne0, nb01, nb11, d, s0, s1);
  9633. }
  9634. for (int64_t i01 = bne01_unroll; i01 < bne01; ++i01) {
  9635. const int64_t i11 = i01;
  9636. float * s0 = (float *) ((char *) src0->data + ( i01*nb01 + i02*nb02 + i03*nb03));
  9637. float * s1 = (float *) ((char *) src1->data + (i1*nb10 + i11*nb11 + i12*nb12 + i13*nb13));
  9638. float * d = (float *) ((char *) dst->data + ( i1*nb1 + i2*nb2 + i3*nb3));
  9639. ggml_vec_mad_f32(ne0, d, s0, *s1);
  9640. }
  9641. #else
  9642. for (int64_t i01 = bi01; i01 < bne01; ++i01) {
  9643. const int64_t i11 = i01;
  9644. float * s0 = (float *) ((char *) src0->data + ( i01*nb01 + i02*nb02 + i03*nb03));
  9645. float * s1 = (float *) ((char *) src1->data + (i1*nb10 + i11*nb11 + i12*nb12 + i13*nb13));
  9646. float * d = (float *) ((char *) dst->data + ( i1*nb1 + i2*nb2 + i3*nb3));
  9647. ggml_vec_mad_f32(ne0, d, s0, *s1);
  9648. }
  9649. #endif
  9650. }
  9651. }
  9652. }
  9653. //int64_t t1 = ggml_perf_time_us();
  9654. //static int64_t acc = 0;
  9655. //acc += t1 - t0;
  9656. //if (t1 - t0 > 10) {
  9657. // printf("\n");
  9658. // printf("ne00 = %5d, ne01 = %5d, ne02 = %5d, ne03 = %5d\n", ne00, ne01, ne02, ne03);
  9659. // printf("nb00 = %5d, nb01 = %5d, nb02 = %5d, nb03 = %5d\n", nb00, nb01, nb02, nb03);
  9660. // printf("ne10 = %5d, ne11 = %5d, ne12 = %5d, ne13 = %5d\n", ne10, ne11, ne12, ne13);
  9661. // printf("nb10 = %5d, nb11 = %5d, nb12 = %5d, nb13 = %5d\n", nb10, nb11, nb12, nb13);
  9662. // printf("XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX task %d/%d: %d us, acc = %d\n", ith, nth, (int) (t1 - t0), (int) acc);
  9663. //}
  9664. }
  9665. static void ggml_compute_forward_out_prod_q_f32(
  9666. const struct ggml_compute_params * params,
  9667. const struct ggml_tensor * src0,
  9668. const struct ggml_tensor * src1,
  9669. struct ggml_tensor * dst) {
  9670. // int64_t t0 = ggml_perf_time_us();
  9671. // UNUSED(t0);
  9672. GGML_TENSOR_BINARY_OP_LOCALS;
  9673. const int ith = params->ith;
  9674. const int nth = params->nth;
  9675. const enum ggml_type type = src0->type;
  9676. ggml_to_float_t const dequantize_row_q = type_traits[type].to_float;
  9677. GGML_ASSERT(ne02 == ne12);
  9678. GGML_ASSERT(ne03 == ne13);
  9679. GGML_ASSERT(ne2 == ne12);
  9680. GGML_ASSERT(ne3 == ne13);
  9681. // we don't support permuted src0 dim0
  9682. GGML_ASSERT(nb00 == ggml_type_size(type));
  9683. // dst dim0 cannot be transposed or permuted
  9684. GGML_ASSERT(nb0 == sizeof(float));
  9685. // GGML_ASSERT(nb0 <= nb1);
  9686. // GGML_ASSERT(nb1 <= nb2);
  9687. // GGML_ASSERT(nb2 <= nb3);
  9688. GGML_ASSERT(ne0 == ne00);
  9689. GGML_ASSERT(ne1 == ne10);
  9690. GGML_ASSERT(ne2 == ne02);
  9691. GGML_ASSERT(ne3 == ne03);
  9692. // nb01 >= nb00 - src0 is not transposed
  9693. // compute by src0 rows
  9694. // TODO: #if defined(GGML_USE_CUBLAS) ggml_cuda_out_prod
  9695. // TODO: #if defined(GGML_USE_ACCELERATE) || defined(GGML_USE_OPENBLAS) || defined(GGML_USE_CLBLAST)
  9696. if (params->type == GGML_TASK_INIT) {
  9697. ggml_vec_set_f32(ne0*ne1*ne2*ne3, dst->data, 0);
  9698. return;
  9699. }
  9700. if (params->type == GGML_TASK_FINALIZE) {
  9701. return;
  9702. }
  9703. // parallelize by last three dimensions
  9704. // total rows in dst
  9705. const int64_t nr = ne1*ne2*ne3;
  9706. // rows per thread
  9707. const int64_t dr = (nr + nth - 1)/nth;
  9708. // row range for this thread
  9709. const int64_t ir0 = dr*ith;
  9710. const int64_t ir1 = MIN(ir0 + dr, nr);
  9711. // dst[:,:,:,:] = 0
  9712. // for i2,i3:
  9713. // for i1:
  9714. // for i01:
  9715. // for i0:
  9716. // dst[i0,i1,i2,i3] += src0[i0,i01,i2,i3] * src1[i1,i01,i2,i3]
  9717. float * wdata = (float *) params->wdata + (ne0 + CACHE_LINE_SIZE_F32) * ith;
  9718. for (int64_t ir = ir0; ir < ir1; ++ir) {
  9719. // dst indices
  9720. const int64_t i3 = ir/(ne2*ne1);
  9721. const int64_t i2 = (ir - i3*ne2*ne1)/ne1;
  9722. const int64_t i1 = (ir - i3*ne2*ne1 - i2*ne1);
  9723. const int64_t i02 = i2;
  9724. const int64_t i03 = i3;
  9725. //const int64_t i10 = i1;
  9726. const int64_t i12 = i2;
  9727. const int64_t i13 = i3;
  9728. for (int64_t i01 = 0; i01 < ne01; ++i01) {
  9729. const int64_t i11 = i01;
  9730. float * s0 = (float *) ((char *) src0->data + ( i01*nb01 + i02*nb02 + i03*nb03));
  9731. float * s1 = (float *) ((char *) src1->data + (i1*nb10 + i11*nb11 + i12*nb12 + i13*nb13));
  9732. float * d = (float *) ((char *) dst->data + ( i1*nb1 + i2*nb2 + i3*nb3));
  9733. dequantize_row_q(s0, wdata, ne0);
  9734. ggml_vec_mad_f32(ne0, d, wdata, *s1);
  9735. }
  9736. }
  9737. //int64_t t1 = ggml_perf_time_us();
  9738. //static int64_t acc = 0;
  9739. //acc += t1 - t0;
  9740. //if (t1 - t0 > 10) {
  9741. // printf("\n");
  9742. // printf("ne00 = %5d, ne01 = %5d, ne02 = %5d, ne03 = %5d\n", ne00, ne01, ne02, ne03);
  9743. // printf("nb00 = %5d, nb01 = %5d, nb02 = %5d, nb03 = %5d\n", nb00, nb01, nb02, nb03);
  9744. // printf("ne10 = %5d, ne11 = %5d, ne12 = %5d, ne13 = %5d\n", ne10, ne11, ne12, ne13);
  9745. // printf("nb10 = %5d, nb11 = %5d, nb12 = %5d, nb13 = %5d\n", nb10, nb11, nb12, nb13);
  9746. // printf("XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX task %d/%d: %d us, acc = %d\n", ith, nth, (int) (t1 - t0), (int) acc);
  9747. //}
  9748. }
  9749. static void ggml_compute_forward_out_prod(
  9750. const struct ggml_compute_params * params,
  9751. const struct ggml_tensor * src0,
  9752. const struct ggml_tensor * src1,
  9753. struct ggml_tensor * dst) {
  9754. switch (src0->type) {
  9755. case GGML_TYPE_Q4_0:
  9756. case GGML_TYPE_Q4_1:
  9757. case GGML_TYPE_Q5_0:
  9758. case GGML_TYPE_Q5_1:
  9759. case GGML_TYPE_Q8_0:
  9760. case GGML_TYPE_Q2_K:
  9761. case GGML_TYPE_Q3_K:
  9762. case GGML_TYPE_Q4_K:
  9763. case GGML_TYPE_Q5_K:
  9764. case GGML_TYPE_Q6_K:
  9765. {
  9766. ggml_compute_forward_out_prod_q_f32(params, src0, src1, dst);
  9767. } break;
  9768. case GGML_TYPE_F16:
  9769. {
  9770. GGML_ASSERT(false); // todo
  9771. // ggml_compute_forward_out_prod_f16_f32(params, src0, src1, dst);
  9772. } break;
  9773. case GGML_TYPE_F32:
  9774. {
  9775. ggml_compute_forward_out_prod_f32(params, src0, src1, dst);
  9776. } break;
  9777. default:
  9778. {
  9779. GGML_ASSERT(false);
  9780. } break;
  9781. }
  9782. }
  9783. // ggml_compute_forward_scale
  9784. static void ggml_compute_forward_scale_f32(
  9785. const struct ggml_compute_params * params,
  9786. const struct ggml_tensor * src0,
  9787. const struct ggml_tensor * src1,
  9788. struct ggml_tensor * dst) {
  9789. GGML_ASSERT(ggml_is_contiguous(src0));
  9790. GGML_ASSERT(ggml_is_contiguous(dst));
  9791. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  9792. GGML_ASSERT(ggml_is_scalar(src1));
  9793. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  9794. return;
  9795. }
  9796. // scale factor
  9797. const float v = *(float *) src1->data;
  9798. const int ith = params->ith;
  9799. const int nth = params->nth;
  9800. const int nc = src0->ne[0];
  9801. const int nr = ggml_nrows(src0);
  9802. // rows per thread
  9803. const int dr = (nr + nth - 1)/nth;
  9804. // row range for this thread
  9805. const int ir0 = dr*ith;
  9806. const int ir1 = MIN(ir0 + dr, nr);
  9807. const size_t nb01 = src0->nb[1];
  9808. const size_t nb1 = dst->nb[1];
  9809. for (int i1 = ir0; i1 < ir1; i1++) {
  9810. if (dst->data != src0->data) {
  9811. // src0 is same shape as dst => same indices
  9812. memcpy((char *)dst->data + i1*nb1, (char *)src0->data + i1*nb01, nc * sizeof(float));
  9813. }
  9814. ggml_vec_scale_f32(nc, (float *) ((char *) dst->data + i1*nb1), v);
  9815. }
  9816. }
  9817. static void ggml_compute_forward_scale(
  9818. const struct ggml_compute_params * params,
  9819. const struct ggml_tensor * src0,
  9820. const struct ggml_tensor * src1,
  9821. struct ggml_tensor * dst) {
  9822. switch (src0->type) {
  9823. case GGML_TYPE_F32:
  9824. {
  9825. ggml_compute_forward_scale_f32(params, src0, src1, dst);
  9826. } break;
  9827. default:
  9828. {
  9829. GGML_ASSERT(false);
  9830. } break;
  9831. }
  9832. }
  9833. // ggml_compute_forward_set
  9834. static void ggml_compute_forward_set_f32(
  9835. const struct ggml_compute_params * params,
  9836. const struct ggml_tensor * src0,
  9837. const struct ggml_tensor * src1,
  9838. struct ggml_tensor * dst) {
  9839. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  9840. GGML_ASSERT(ggml_is_contiguous(dst) && ggml_is_contiguous(src0));
  9841. // view src0 and dst with these strides and data offset inbytes during set
  9842. // nb0 is implicitely element_size because src0 and dst are contiguous
  9843. size_t nb1 = ((int32_t *) dst->op_params)[0];
  9844. size_t nb2 = ((int32_t *) dst->op_params)[1];
  9845. size_t nb3 = ((int32_t *) dst->op_params)[2];
  9846. size_t offset = ((int32_t *) dst->op_params)[3];
  9847. bool inplace = (bool) ((int32_t *) dst->op_params)[4];
  9848. if (!inplace && (params->type == GGML_TASK_INIT)) {
  9849. // memcpy needs to be synchronized across threads to avoid race conditions.
  9850. // => do it in INIT phase
  9851. memcpy(
  9852. ((char *) dst->data),
  9853. ((char *) src0->data),
  9854. ggml_nbytes(dst));
  9855. }
  9856. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  9857. return;
  9858. }
  9859. const int ith = params->ith;
  9860. const int nth = params->nth;
  9861. const int nr = ggml_nrows(src1);
  9862. const int nc = src1->ne[0];
  9863. GGML_TENSOR_LOCALS(int64_t, ne1, src1, ne)
  9864. GGML_TENSOR_LOCALS(size_t, nb1, src1, nb)
  9865. // src0 and dst as viewed during set
  9866. const size_t nb0 = ggml_element_size(src0);
  9867. const int im0 = (ne10 == 0 ? 0 : ne10-1);
  9868. const int im1 = (ne11 == 0 ? 0 : ne11-1);
  9869. const int im2 = (ne12 == 0 ? 0 : ne12-1);
  9870. const int im3 = (ne13 == 0 ? 0 : ne13-1);
  9871. GGML_ASSERT(offset + im0*nb0 + im1*nb1 + im2*nb2 + im3*nb3 <= ggml_nbytes(dst));
  9872. GGML_ASSERT(nb10 == sizeof(float));
  9873. // rows per thread
  9874. const int dr = (nr + nth - 1)/nth;
  9875. // row range for this thread
  9876. const int ir0 = dr*ith;
  9877. const int ir1 = MIN(ir0 + dr, nr);
  9878. for (int ir = ir0; ir < ir1; ++ir) {
  9879. // src0 and dst are viewed with shape of src1 and offset
  9880. // => same indices
  9881. const int i3 = ir/(ne12*ne11);
  9882. const int i2 = (ir - i3*ne12*ne11)/ne11;
  9883. const int i1 = (ir - i3*ne12*ne11 - i2*ne11);
  9884. ggml_vec_cpy_f32(nc,
  9885. (float *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + offset),
  9886. (float *) ((char *) src1->data + i3*nb13 + i2*nb12 + i1*nb11));
  9887. }
  9888. }
  9889. static void ggml_compute_forward_set(
  9890. const struct ggml_compute_params * params,
  9891. const struct ggml_tensor * src0,
  9892. const struct ggml_tensor * src1,
  9893. struct ggml_tensor * dst) {
  9894. switch (src0->type) {
  9895. case GGML_TYPE_F32:
  9896. {
  9897. ggml_compute_forward_set_f32(params, src0, src1, dst);
  9898. } break;
  9899. case GGML_TYPE_F16:
  9900. case GGML_TYPE_Q4_0:
  9901. case GGML_TYPE_Q4_1:
  9902. case GGML_TYPE_Q5_0:
  9903. case GGML_TYPE_Q5_1:
  9904. case GGML_TYPE_Q8_0:
  9905. case GGML_TYPE_Q8_1:
  9906. case GGML_TYPE_Q2_K:
  9907. case GGML_TYPE_Q3_K:
  9908. case GGML_TYPE_Q4_K:
  9909. case GGML_TYPE_Q5_K:
  9910. case GGML_TYPE_Q6_K:
  9911. default:
  9912. {
  9913. GGML_ASSERT(false);
  9914. } break;
  9915. }
  9916. }
  9917. // ggml_compute_forward_cpy
  9918. static void ggml_compute_forward_cpy(
  9919. const struct ggml_compute_params * params,
  9920. const struct ggml_tensor * src0,
  9921. struct ggml_tensor * dst) {
  9922. ggml_compute_forward_dup(params, src0, dst);
  9923. }
  9924. // ggml_compute_forward_cont
  9925. static void ggml_compute_forward_cont(
  9926. const struct ggml_compute_params * params,
  9927. const struct ggml_tensor * src0,
  9928. struct ggml_tensor * dst) {
  9929. ggml_compute_forward_dup(params, src0, dst);
  9930. }
  9931. // ggml_compute_forward_reshape
  9932. static void ggml_compute_forward_reshape(
  9933. const struct ggml_compute_params * params,
  9934. const struct ggml_tensor * src0,
  9935. struct ggml_tensor * dst) {
  9936. // NOP
  9937. UNUSED(params);
  9938. UNUSED(src0);
  9939. UNUSED(dst);
  9940. }
  9941. // ggml_compute_forward_view
  9942. static void ggml_compute_forward_view(
  9943. const struct ggml_compute_params * params,
  9944. const struct ggml_tensor * src0) {
  9945. // NOP
  9946. UNUSED(params);
  9947. UNUSED(src0);
  9948. }
  9949. // ggml_compute_forward_permute
  9950. static void ggml_compute_forward_permute(
  9951. const struct ggml_compute_params * params,
  9952. const struct ggml_tensor * src0) {
  9953. // NOP
  9954. UNUSED(params);
  9955. UNUSED(src0);
  9956. }
  9957. // ggml_compute_forward_transpose
  9958. static void ggml_compute_forward_transpose(
  9959. const struct ggml_compute_params * params,
  9960. const struct ggml_tensor * src0) {
  9961. // NOP
  9962. UNUSED(params);
  9963. UNUSED(src0);
  9964. }
  9965. // ggml_compute_forward_get_rows
  9966. static void ggml_compute_forward_get_rows_q(
  9967. const struct ggml_compute_params * params,
  9968. const struct ggml_tensor * src0,
  9969. const struct ggml_tensor * src1,
  9970. struct ggml_tensor * dst) {
  9971. assert(params->ith == 0);
  9972. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  9973. return;
  9974. }
  9975. const int nc = src0->ne[0];
  9976. const int nr = ggml_nelements(src1);
  9977. const enum ggml_type type = src0->type;
  9978. ggml_to_float_t const dequantize_row_q = type_traits[type].to_float;
  9979. assert( dst->ne[0] == nc);
  9980. assert( dst->ne[1] == nr);
  9981. assert(src0->nb[0] == ggml_type_size(type));
  9982. for (int i = 0; i < nr; ++i) {
  9983. const int r = ((int32_t *) src1->data)[i];
  9984. dequantize_row_q(
  9985. (const void *) ((char *) src0->data + r*src0->nb[1]),
  9986. (float *) ((char *) dst->data + i*dst->nb[1]), nc);
  9987. }
  9988. }
  9989. static void ggml_compute_forward_get_rows_f16(
  9990. const struct ggml_compute_params * params,
  9991. const struct ggml_tensor * src0,
  9992. const struct ggml_tensor * src1,
  9993. struct ggml_tensor * dst) {
  9994. assert(params->ith == 0);
  9995. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  9996. return;
  9997. }
  9998. const int nc = src0->ne[0];
  9999. const int nr = ggml_nelements(src1);
  10000. assert( dst->ne[0] == nc);
  10001. assert( dst->ne[1] == nr);
  10002. assert(src0->nb[0] == sizeof(ggml_fp16_t));
  10003. for (int i = 0; i < nr; ++i) {
  10004. const int r = ((int32_t *) src1->data)[i];
  10005. for (int j = 0; j < nc; ++j) {
  10006. ggml_fp16_t v = ((ggml_fp16_t *) ((char *) src0->data + r*src0->nb[1]))[j];
  10007. ((float *) ((char *) dst->data + i*dst->nb[1]))[j] = GGML_FP16_TO_FP32(v);
  10008. }
  10009. }
  10010. }
  10011. static void ggml_compute_forward_get_rows_f32(
  10012. const struct ggml_compute_params * params,
  10013. const struct ggml_tensor * src0,
  10014. const struct ggml_tensor * src1,
  10015. struct ggml_tensor * dst) {
  10016. assert(params->ith == 0);
  10017. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  10018. return;
  10019. }
  10020. const int nc = src0->ne[0];
  10021. const int nr = ggml_nelements(src1);
  10022. assert( dst->ne[0] == nc);
  10023. assert( dst->ne[1] == nr);
  10024. assert(src0->nb[0] == sizeof(float));
  10025. for (int i = 0; i < nr; ++i) {
  10026. const int r = ((int32_t *) src1->data)[i];
  10027. ggml_vec_cpy_f32(nc,
  10028. (float *) ((char *) dst->data + i*dst->nb[1]),
  10029. (float *) ((char *) src0->data + r*src0->nb[1]));
  10030. }
  10031. }
  10032. static void ggml_compute_forward_get_rows(
  10033. const struct ggml_compute_params * params,
  10034. const struct ggml_tensor * src0,
  10035. const struct ggml_tensor * src1,
  10036. struct ggml_tensor * dst) {
  10037. switch (src0->type) {
  10038. case GGML_TYPE_Q4_0:
  10039. case GGML_TYPE_Q4_1:
  10040. case GGML_TYPE_Q5_0:
  10041. case GGML_TYPE_Q5_1:
  10042. case GGML_TYPE_Q8_0:
  10043. case GGML_TYPE_Q8_1:
  10044. case GGML_TYPE_Q2_K:
  10045. case GGML_TYPE_Q3_K:
  10046. case GGML_TYPE_Q4_K:
  10047. case GGML_TYPE_Q5_K:
  10048. case GGML_TYPE_Q6_K:
  10049. {
  10050. ggml_compute_forward_get_rows_q(params, src0, src1, dst);
  10051. } break;
  10052. case GGML_TYPE_F16:
  10053. {
  10054. ggml_compute_forward_get_rows_f16(params, src0, src1, dst);
  10055. } break;
  10056. case GGML_TYPE_F32:
  10057. {
  10058. ggml_compute_forward_get_rows_f32(params, src0, src1, dst);
  10059. } break;
  10060. default:
  10061. {
  10062. GGML_ASSERT(false);
  10063. } break;
  10064. }
  10065. //static bool first = true;
  10066. //printf("ne0 = %d, ne1 = %d, ne2 = %d\n", dst->ne[0], dst->ne[1], dst->ne[2]);
  10067. //if (first) {
  10068. // first = false;
  10069. //} else {
  10070. // for (int k = 0; k < dst->ne[1]; ++k) {
  10071. // for (int j = 0; j < dst->ne[0]/16; ++j) {
  10072. // for (int i = 0; i < 16; ++i) {
  10073. // printf("%8.4f ", ((float *) dst->data)[k*dst->ne[0] + j*16 + i]);
  10074. // }
  10075. // printf("\n");
  10076. // }
  10077. // printf("\n");
  10078. // }
  10079. // printf("\n");
  10080. // exit(0);
  10081. //}
  10082. }
  10083. // ggml_compute_forward_get_rows_back
  10084. static void ggml_compute_forward_get_rows_back_f32_f16(
  10085. const struct ggml_compute_params * params,
  10086. const struct ggml_tensor * src0,
  10087. const struct ggml_tensor * src1,
  10088. struct ggml_tensor * dst) {
  10089. GGML_ASSERT(params->ith == 0);
  10090. GGML_ASSERT(ggml_is_contiguous(dst));
  10091. // ggml_compute_forward_dup_same_cont(params, opt0, dst);
  10092. if (params->type == GGML_TASK_INIT) {
  10093. memset(dst->data, 0, ggml_nbytes(dst));
  10094. }
  10095. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  10096. return;
  10097. }
  10098. const int nc = src0->ne[0];
  10099. const int nr = ggml_nelements(src1);
  10100. GGML_ASSERT( dst->ne[0] == nc);
  10101. GGML_ASSERT(src0->nb[0] == sizeof(ggml_fp16_t));
  10102. for (int i = 0; i < nr; ++i) {
  10103. const int r = ((int32_t *) src1->data)[i];
  10104. for (int j = 0; j < nc; ++j) {
  10105. ggml_fp16_t v = ((ggml_fp16_t *) ((char *) src0->data + i*src0->nb[1]))[j];
  10106. ((float *) ((char *) dst->data + r*dst->nb[1]))[j] += GGML_FP16_TO_FP32(v);
  10107. }
  10108. }
  10109. }
  10110. static void ggml_compute_forward_get_rows_back_f32(
  10111. const struct ggml_compute_params * params,
  10112. const struct ggml_tensor * src0,
  10113. const struct ggml_tensor * src1,
  10114. struct ggml_tensor * dst) {
  10115. GGML_ASSERT(params->ith == 0);
  10116. GGML_ASSERT(ggml_is_contiguous(dst));
  10117. // ggml_compute_forward_dup_same_cont(params, opt0, dst);
  10118. if (params->type == GGML_TASK_INIT) {
  10119. memset(dst->data, 0, ggml_nbytes(dst));
  10120. }
  10121. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  10122. return;
  10123. }
  10124. const int nc = src0->ne[0];
  10125. const int nr = ggml_nelements(src1);
  10126. GGML_ASSERT( dst->ne[0] == nc);
  10127. GGML_ASSERT(src0->nb[0] == sizeof(float));
  10128. for (int i = 0; i < nr; ++i) {
  10129. const int r = ((int32_t *) src1->data)[i];
  10130. ggml_vec_add_f32(nc,
  10131. (float *) ((char *) dst->data + r*dst->nb[1]),
  10132. (float *) ((char *) dst->data + r*dst->nb[1]),
  10133. (float *) ((char *) src0->data + i*src0->nb[1]));
  10134. }
  10135. }
  10136. static void ggml_compute_forward_get_rows_back(
  10137. const struct ggml_compute_params * params,
  10138. const struct ggml_tensor * src0,
  10139. const struct ggml_tensor * src1,
  10140. struct ggml_tensor * dst) {
  10141. switch (src0->type) {
  10142. case GGML_TYPE_F16:
  10143. {
  10144. ggml_compute_forward_get_rows_back_f32_f16(params, src0, src1, dst);
  10145. } break;
  10146. case GGML_TYPE_F32:
  10147. {
  10148. ggml_compute_forward_get_rows_back_f32(params, src0, src1, dst);
  10149. } break;
  10150. default:
  10151. {
  10152. GGML_ASSERT(false);
  10153. } break;
  10154. }
  10155. //static bool first = true;
  10156. //printf("ne0 = %d, ne1 = %d, ne2 = %d\n", dst->ne[0], dst->ne[1], dst->ne[2]);
  10157. //if (first) {
  10158. // first = false;
  10159. //} else {
  10160. // for (int k = 0; k < dst->ne[1]; ++k) {
  10161. // for (int j = 0; j < dst->ne[0]/16; ++j) {
  10162. // for (int i = 0; i < 16; ++i) {
  10163. // printf("%8.4f ", ((float *) dst->data)[k*dst->ne[0] + j*16 + i]);
  10164. // }
  10165. // printf("\n");
  10166. // }
  10167. // printf("\n");
  10168. // }
  10169. // printf("\n");
  10170. // exit(0);
  10171. //}
  10172. }
  10173. // ggml_compute_forward_diag
  10174. static void ggml_compute_forward_diag_f32(
  10175. const struct ggml_compute_params * params,
  10176. const struct ggml_tensor * src0,
  10177. struct ggml_tensor * dst) {
  10178. GGML_ASSERT(params->ith == 0);
  10179. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  10180. return;
  10181. }
  10182. // TODO: handle transposed/permuted matrices
  10183. GGML_TENSOR_UNARY_OP_LOCALS
  10184. GGML_ASSERT(ne00 == ne0);
  10185. GGML_ASSERT(ne00 == ne1);
  10186. GGML_ASSERT(ne01 == 1);
  10187. GGML_ASSERT(ne02 == ne2);
  10188. GGML_ASSERT(ne03 == ne3);
  10189. GGML_ASSERT(nb00 == sizeof(float));
  10190. GGML_ASSERT(nb0 == sizeof(float));
  10191. for (int i3 = 0; i3 < ne3; i3++) {
  10192. for (int i2 = 0; i2 < ne2; i2++) {
  10193. for (int i1 = 0; i1 < ne1; i1++) {
  10194. float * d = (float *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1);
  10195. float * s = (float *)((char *) src0->data + i3*nb03 + i2*nb02);
  10196. for (int i0 = 0; i0 < i1; i0++) {
  10197. d[i0] = 0;
  10198. }
  10199. d[i1] = s[i1];
  10200. for (int i0 = i1+1; i0 < ne0; i0++) {
  10201. d[i0] = 0;
  10202. }
  10203. }
  10204. }
  10205. }
  10206. }
  10207. static void ggml_compute_forward_diag(
  10208. const struct ggml_compute_params * params,
  10209. const struct ggml_tensor * src0,
  10210. struct ggml_tensor * dst) {
  10211. switch (src0->type) {
  10212. case GGML_TYPE_F32:
  10213. {
  10214. ggml_compute_forward_diag_f32(params, src0, dst);
  10215. } break;
  10216. default:
  10217. {
  10218. GGML_ASSERT(false);
  10219. } break;
  10220. }
  10221. }
  10222. // ggml_compute_forward_diag_mask_inf
  10223. static void ggml_compute_forward_diag_mask_f32(
  10224. const struct ggml_compute_params * params,
  10225. const struct ggml_tensor * src0,
  10226. struct ggml_tensor * dst,
  10227. const float value) {
  10228. const int ith = params->ith;
  10229. const int nth = params->nth;
  10230. const int n_past = ((int32_t *) dst->op_params)[0];
  10231. const bool inplace = src0->data == dst->data;
  10232. GGML_ASSERT(n_past >= 0);
  10233. if (!inplace && (params->type == GGML_TASK_INIT)) {
  10234. // memcpy needs to be synchronized across threads to avoid race conditions.
  10235. // => do it in INIT phase
  10236. GGML_ASSERT(ggml_nelements(dst) == ggml_nelements(src0));
  10237. GGML_ASSERT(ggml_is_contiguous(dst) && ggml_is_contiguous(src0));
  10238. memcpy(
  10239. ((char *) dst->data),
  10240. ((char *) src0->data),
  10241. ggml_nbytes(dst));
  10242. }
  10243. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  10244. return;
  10245. }
  10246. // TODO: handle transposed/permuted matrices
  10247. const int n = ggml_nrows(src0);
  10248. const int nc = src0->ne[0];
  10249. const int nr = src0->ne[1];
  10250. const int nz = n/nr;
  10251. GGML_ASSERT( dst->nb[0] == sizeof(float));
  10252. GGML_ASSERT(src0->nb[0] == sizeof(float));
  10253. for (int k = 0; k < nz; k++) {
  10254. for (int j = ith; j < nr; j += nth) {
  10255. for (int i = n_past; i < nc; i++) {
  10256. if (i > n_past + j) {
  10257. *(float *)((char *) dst->data + k*dst->nb[2] + j*dst->nb[1] + i*dst->nb[0]) = value;
  10258. }
  10259. }
  10260. }
  10261. }
  10262. }
  10263. static void ggml_compute_forward_diag_mask_inf(
  10264. const struct ggml_compute_params * params,
  10265. const struct ggml_tensor * src0,
  10266. struct ggml_tensor * dst) {
  10267. switch (src0->type) {
  10268. case GGML_TYPE_F32:
  10269. {
  10270. ggml_compute_forward_diag_mask_f32(params, src0, dst, -INFINITY);
  10271. } break;
  10272. default:
  10273. {
  10274. GGML_ASSERT(false);
  10275. } break;
  10276. }
  10277. }
  10278. static void ggml_compute_forward_diag_mask_zero(
  10279. const struct ggml_compute_params * params,
  10280. const struct ggml_tensor * src0,
  10281. struct ggml_tensor * dst) {
  10282. switch (src0->type) {
  10283. case GGML_TYPE_F32:
  10284. {
  10285. ggml_compute_forward_diag_mask_f32(params, src0, dst, 0);
  10286. } break;
  10287. default:
  10288. {
  10289. GGML_ASSERT(false);
  10290. } break;
  10291. }
  10292. }
  10293. // ggml_compute_forward_soft_max
  10294. static void ggml_compute_forward_soft_max_f32(
  10295. const struct ggml_compute_params * params,
  10296. const struct ggml_tensor * src0,
  10297. struct ggml_tensor * dst) {
  10298. GGML_ASSERT(ggml_is_contiguous(src0));
  10299. GGML_ASSERT(ggml_is_contiguous(dst));
  10300. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  10301. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  10302. return;
  10303. }
  10304. // TODO: handle transposed/permuted matrices
  10305. const int ith = params->ith;
  10306. const int nth = params->nth;
  10307. const int nc = src0->ne[0];
  10308. const int nr = ggml_nrows(src0);
  10309. // rows per thread
  10310. const int dr = (nr + nth - 1)/nth;
  10311. // row range for this thread
  10312. const int ir0 = dr*ith;
  10313. const int ir1 = MIN(ir0 + dr, nr);
  10314. for (int i1 = ir0; i1 < ir1; i1++) {
  10315. float *sp = (float *)((char *) src0->data + i1*src0->nb[1]);
  10316. float *dp = (float *)((char *) dst->data + i1*dst->nb[1]);
  10317. #ifndef NDEBUG
  10318. for (int i = 0; i < nc; ++i) {
  10319. //printf("p[%d] = %f\n", i, p[i]);
  10320. assert(!isnan(sp[i]));
  10321. }
  10322. #endif
  10323. float max = -INFINITY;
  10324. ggml_vec_max_f32(nc, &max, sp);
  10325. ggml_float sum = 0.0;
  10326. uint16_t scvt;
  10327. for (int i = 0; i < nc; i++) {
  10328. if (sp[i] == -INFINITY) {
  10329. dp[i] = 0.0f;
  10330. } else {
  10331. // const float val = (sp[i] == -INFINITY) ? 0.0 : exp(sp[i] - max);
  10332. ggml_fp16_t s = GGML_FP32_TO_FP16(sp[i] - max);
  10333. memcpy(&scvt, &s, sizeof(scvt));
  10334. const float val = GGML_FP16_TO_FP32(table_exp_f16[scvt]);
  10335. sum += (ggml_float)val;
  10336. dp[i] = val;
  10337. }
  10338. }
  10339. assert(sum > 0.0);
  10340. sum = 1.0/sum;
  10341. ggml_vec_scale_f32(nc, dp, sum);
  10342. #ifndef NDEBUG
  10343. for (int i = 0; i < nc; ++i) {
  10344. assert(!isnan(dp[i]));
  10345. assert(!isinf(dp[i]));
  10346. }
  10347. #endif
  10348. }
  10349. }
  10350. static void ggml_compute_forward_soft_max(
  10351. const struct ggml_compute_params * params,
  10352. const struct ggml_tensor * src0,
  10353. struct ggml_tensor * dst) {
  10354. switch (src0->type) {
  10355. case GGML_TYPE_F32:
  10356. {
  10357. ggml_compute_forward_soft_max_f32(params, src0, dst);
  10358. } break;
  10359. default:
  10360. {
  10361. GGML_ASSERT(false);
  10362. } break;
  10363. }
  10364. }
  10365. // ggml_compute_forward_soft_max_back
  10366. static void ggml_compute_forward_soft_max_back_f32(
  10367. const struct ggml_compute_params * params,
  10368. const struct ggml_tensor * src0,
  10369. const struct ggml_tensor * src1,
  10370. struct ggml_tensor * dst) {
  10371. GGML_ASSERT(ggml_is_contiguous(src0));
  10372. GGML_ASSERT(ggml_is_contiguous(src1));
  10373. GGML_ASSERT(ggml_is_contiguous(dst));
  10374. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  10375. GGML_ASSERT(ggml_are_same_shape(src1, dst));
  10376. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  10377. return;
  10378. }
  10379. // TODO: handle transposed/permuted matrices
  10380. const int ith = params->ith;
  10381. const int nth = params->nth;
  10382. const int nc = src0->ne[0];
  10383. const int nr = ggml_nrows(src0);
  10384. // rows per thread
  10385. const int dr = (nr + nth - 1)/nth;
  10386. // row range for this thread
  10387. const int ir0 = dr*ith;
  10388. const int ir1 = MIN(ir0 + dr, nr);
  10389. for (int i1 = ir0; i1 < ir1; i1++) {
  10390. float *dy = (float *)((char *) src0->data + i1*src0->nb[1]);
  10391. float *y = (float *)((char *) src1->data + i1*src1->nb[1]);
  10392. float *dx = (float *)((char *) dst->data + i1*dst->nb[1]);
  10393. #ifndef NDEBUG
  10394. for (int i = 0; i < nc; ++i) {
  10395. //printf("p[%d] = %f\n", i, p[i]);
  10396. assert(!isnan(dy[i]));
  10397. assert(!isnan(y[i]));
  10398. }
  10399. #endif
  10400. // Jii = yi - yi*yi
  10401. // Jij = -yi*yj
  10402. // J = diag(y)-y.T*y
  10403. // dx = J * dy
  10404. // dxk = sum_i(Jki * dyi)
  10405. // dxk = sum_i(-yk*yi * dyi) - (-yk*yk)*dyk + (yk - yk*yk)*dyk
  10406. // dxk = sum_i(-yk*yi * dyi) + yk*yk*dyk + yk*dyk - yk*yk*dyk
  10407. // dxk = sum_i(-yk*yi * dyi) + yk*dyk
  10408. // dxk = -yk * sum_i(yi * dyi) + yk*dyk
  10409. // dxk = -yk * dot(y, dy) + yk*dyk
  10410. // dxk = yk * (- dot(y, dy) + dyk)
  10411. // dxk = yk * (dyk - dot(y, dy))
  10412. //
  10413. // post-order:
  10414. // dot_y_dy := dot(y, dy)
  10415. // dx := dy
  10416. // dx := dx - dot_y_dy
  10417. // dx := dx * y
  10418. // linear runtime, no additional memory
  10419. float dot_y_dy = 0;
  10420. ggml_vec_dot_f32 (nc, &dot_y_dy, y, dy);
  10421. ggml_vec_cpy_f32 (nc, dx, dy);
  10422. ggml_vec_acc1_f32(nc, dx, -dot_y_dy);
  10423. ggml_vec_mul_f32 (nc, dx, dx, y);
  10424. #ifndef NDEBUG
  10425. for (int i = 0; i < nc; ++i) {
  10426. assert(!isnan(dx[i]));
  10427. assert(!isinf(dx[i]));
  10428. }
  10429. #endif
  10430. }
  10431. }
  10432. static void ggml_compute_forward_soft_max_back(
  10433. const struct ggml_compute_params * params,
  10434. const struct ggml_tensor * src0,
  10435. const struct ggml_tensor * src1,
  10436. struct ggml_tensor * dst) {
  10437. switch (src0->type) {
  10438. case GGML_TYPE_F32:
  10439. {
  10440. ggml_compute_forward_soft_max_back_f32(params, src0, src1, dst);
  10441. } break;
  10442. default:
  10443. {
  10444. GGML_ASSERT(false);
  10445. } break;
  10446. }
  10447. }
  10448. // ggml_compute_forward_alibi
  10449. static void ggml_compute_forward_alibi_f32(
  10450. const struct ggml_compute_params * params,
  10451. const struct ggml_tensor * src0,
  10452. struct ggml_tensor * dst) {
  10453. assert(params->ith == 0);
  10454. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  10455. return;
  10456. }
  10457. const int n_past = ((int32_t *) dst->op_params)[0];
  10458. const int n_head = ((int32_t *) dst->op_params)[1];
  10459. float max_bias;
  10460. memcpy(&max_bias, (int32_t *) dst->op_params + 2, sizeof(float));
  10461. assert(n_past >= 0);
  10462. const int ne0 = src0->ne[0]; // all_seq_len = n_past + ne1
  10463. const int ne1 = src0->ne[1]; // seq_len_without_past
  10464. const int ne2 = src0->ne[2]; // n_head -> this is k
  10465. //const int ne3 = src0->ne[3]; // 1 -> bsz
  10466. const int n = ggml_nrows(src0);
  10467. const int ne2_ne3 = n/ne1; // ne2*ne3
  10468. const int nb0 = src0->nb[0];
  10469. const int nb1 = src0->nb[1];
  10470. const int nb2 = src0->nb[2];
  10471. //const int nb3 = src0->nb[3];
  10472. GGML_ASSERT(nb0 == sizeof(float));
  10473. GGML_ASSERT(ne1 + n_past == ne0);
  10474. GGML_ASSERT(n_head == ne2);
  10475. // add alibi to src0 (KQ_scaled)
  10476. const int n_heads_log2_floor = 1 << (int) floor(log2(n_head));
  10477. const float m0 = powf(2.0f, -(max_bias) / n_heads_log2_floor);
  10478. const float m1 = powf(2.0f, -(max_bias / 2.0f) / n_heads_log2_floor);
  10479. for (int i = 0; i < ne0; i++) {
  10480. for (int j = 0; j < ne1; j++) {
  10481. for (int k = 0; k < ne2_ne3; k++) {
  10482. float * const src = (float *)((char *) src0->data + i*nb0 + j*nb1 + k*nb2);
  10483. float * pdst = (float *)((char *) dst->data + i*nb0 + j*nb1 + k*nb2);
  10484. // TODO: k*nb2 or k*nb3
  10485. float m_k;
  10486. if (k < n_heads_log2_floor) {
  10487. m_k = powf(m0, k + 1);
  10488. } else {
  10489. m_k = powf(m1, 2 * (k - n_heads_log2_floor) + 1);
  10490. }
  10491. pdst[0] = i * m_k + src[0];
  10492. }
  10493. }
  10494. }
  10495. }
  10496. static void ggml_compute_forward_alibi_f16(
  10497. const struct ggml_compute_params * params,
  10498. const struct ggml_tensor * src0,
  10499. struct ggml_tensor * dst) {
  10500. assert(params->ith == 0);
  10501. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  10502. return;
  10503. }
  10504. //const int n_past = ((int32_t *) dst->op_params)[0];
  10505. const int n_head = ((int32_t *) dst->op_params)[1];
  10506. float max_bias;
  10507. memcpy(&max_bias, (int32_t *) dst->op_params + 2, sizeof(float));
  10508. const int ne0 = src0->ne[0]; // all_seq_len = n_past + ne1
  10509. const int ne1 = src0->ne[1]; // seq_len_without_past
  10510. const int ne2 = src0->ne[2]; // n_head -> this is k
  10511. //const int ne3 = src0->ne[3]; // 1 -> bsz
  10512. const int n = ggml_nrows(src0);
  10513. const int ne2_ne3 = n/ne1; // ne2*ne3
  10514. const int nb0 = src0->nb[0];
  10515. const int nb1 = src0->nb[1];
  10516. const int nb2 = src0->nb[2];
  10517. //const int nb3 = src0->nb[3];
  10518. GGML_ASSERT(nb0 == sizeof(ggml_fp16_t));
  10519. //GGML_ASSERT(ne1 + n_past == ne0); (void) n_past;
  10520. GGML_ASSERT(n_head == ne2);
  10521. // add alibi to src0 (KQ_scaled)
  10522. const int n_heads_log2_floor = 1 << (int) floor(log2(n_head));
  10523. const float m0 = powf(2.0f, -(max_bias) / n_heads_log2_floor);
  10524. const float m1 = powf(2.0f, -(max_bias / 2.0f) / n_heads_log2_floor);
  10525. for (int i = 0; i < ne0; i++) {
  10526. for (int j = 0; j < ne1; j++) {
  10527. for (int k = 0; k < ne2_ne3; k++) {
  10528. ggml_fp16_t * const src = (ggml_fp16_t *)((char *) src0->data + i*nb0 + j*nb1 + k*nb2);
  10529. float * pdst = (float *)((char *) dst->data + i*nb0 + j*nb1 + k*nb2);
  10530. // TODO: k*nb2 or k*nb3
  10531. float m_k;
  10532. if (k < n_heads_log2_floor) {
  10533. m_k = powf(m0, k + 1);
  10534. } else {
  10535. m_k = powf(m1, 2 * (k - n_heads_log2_floor) + 1);
  10536. }
  10537. // we return F32
  10538. pdst[0] = i * m_k + GGML_FP16_TO_FP32(src[0]);
  10539. }
  10540. }
  10541. }
  10542. }
  10543. static void ggml_compute_forward_alibi(
  10544. const struct ggml_compute_params * params,
  10545. const struct ggml_tensor * src0,
  10546. struct ggml_tensor * dst) {
  10547. switch (src0->type) {
  10548. case GGML_TYPE_F16:
  10549. {
  10550. ggml_compute_forward_alibi_f16(params, src0, dst);
  10551. } break;
  10552. case GGML_TYPE_F32:
  10553. {
  10554. ggml_compute_forward_alibi_f32(params, src0, dst);
  10555. } break;
  10556. case GGML_TYPE_Q4_0:
  10557. case GGML_TYPE_Q4_1:
  10558. case GGML_TYPE_Q5_0:
  10559. case GGML_TYPE_Q5_1:
  10560. case GGML_TYPE_Q8_0:
  10561. case GGML_TYPE_Q8_1:
  10562. case GGML_TYPE_Q2_K:
  10563. case GGML_TYPE_Q3_K:
  10564. case GGML_TYPE_Q4_K:
  10565. case GGML_TYPE_Q5_K:
  10566. case GGML_TYPE_Q6_K:
  10567. case GGML_TYPE_Q8_K:
  10568. case GGML_TYPE_I8:
  10569. case GGML_TYPE_I16:
  10570. case GGML_TYPE_I32:
  10571. case GGML_TYPE_COUNT:
  10572. {
  10573. GGML_ASSERT(false);
  10574. } break;
  10575. }
  10576. }
  10577. // ggml_compute_forward_clamp
  10578. static void ggml_compute_forward_clamp_f32(
  10579. const struct ggml_compute_params * params,
  10580. const struct ggml_tensor * src0,
  10581. struct ggml_tensor * dst) {
  10582. assert(params->ith == 0);
  10583. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  10584. return;
  10585. }
  10586. float min;
  10587. float max;
  10588. memcpy(&min, (float *) dst->op_params + 0, sizeof(float));
  10589. memcpy(&max, (float *) dst->op_params + 1, sizeof(float));
  10590. const int ith = params->ith;
  10591. const int nth = params->nth;
  10592. const int n = ggml_nrows(src0);
  10593. const int nc = src0->ne[0];
  10594. const size_t nb00 = src0->nb[0];
  10595. const size_t nb01 = src0->nb[1];
  10596. const size_t nb0 = dst->nb[0];
  10597. const size_t nb1 = dst->nb[1];
  10598. GGML_ASSERT( nb0 == sizeof(float));
  10599. GGML_ASSERT(nb00 == sizeof(float));
  10600. for (int j = ith; j < n; j += nth) {
  10601. float * dst_ptr = (float *) ((char *) dst->data + j*nb1);
  10602. float * src0_ptr = (float *) ((char *) src0->data + j*nb01);
  10603. for (int i = 0; i < nc; i++) {
  10604. dst_ptr[i] = MAX(MIN(src0_ptr[i], max), min);
  10605. }
  10606. }
  10607. }
  10608. static void ggml_compute_forward_clamp(
  10609. const struct ggml_compute_params * params,
  10610. const struct ggml_tensor * src0,
  10611. struct ggml_tensor * dst) {
  10612. switch (src0->type) {
  10613. case GGML_TYPE_F32:
  10614. {
  10615. ggml_compute_forward_clamp_f32(params, src0, dst);
  10616. } break;
  10617. case GGML_TYPE_F16:
  10618. case GGML_TYPE_Q4_0:
  10619. case GGML_TYPE_Q4_1:
  10620. case GGML_TYPE_Q5_0:
  10621. case GGML_TYPE_Q5_1:
  10622. case GGML_TYPE_Q8_0:
  10623. case GGML_TYPE_Q8_1:
  10624. case GGML_TYPE_Q2_K:
  10625. case GGML_TYPE_Q3_K:
  10626. case GGML_TYPE_Q4_K:
  10627. case GGML_TYPE_Q5_K:
  10628. case GGML_TYPE_Q6_K:
  10629. case GGML_TYPE_Q8_K:
  10630. case GGML_TYPE_I8:
  10631. case GGML_TYPE_I16:
  10632. case GGML_TYPE_I32:
  10633. case GGML_TYPE_COUNT:
  10634. {
  10635. GGML_ASSERT(false);
  10636. } break;
  10637. }
  10638. }
  10639. // ggml_compute_forward_rope
  10640. static void ggml_compute_forward_rope_f32(
  10641. const struct ggml_compute_params * params,
  10642. const struct ggml_tensor * src0,
  10643. const struct ggml_tensor * src1,
  10644. struct ggml_tensor * dst) {
  10645. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  10646. return;
  10647. }
  10648. float freq_base;
  10649. float freq_scale;
  10650. // these two only relevant for xPos RoPE:
  10651. float xpos_base;
  10652. bool xpos_down;
  10653. //const int n_past = ((int32_t *) dst->op_params)[0];
  10654. const int n_dims = ((int32_t *) dst->op_params)[1];
  10655. const int mode = ((int32_t *) dst->op_params)[2];
  10656. const int n_ctx = ((int32_t *) dst->op_params)[3];
  10657. memcpy(&freq_base, (int32_t *) dst->op_params + 4, sizeof(float));
  10658. memcpy(&freq_scale, (int32_t *) dst->op_params + 5, sizeof(float));
  10659. memcpy(&xpos_base, (int32_t *) dst->op_params + 6, sizeof(float));
  10660. memcpy(&xpos_down, (int32_t *) dst->op_params + 7, sizeof(bool));
  10661. GGML_TENSOR_UNARY_OP_LOCALS
  10662. //printf("ne0: %d, ne1: %d, ne2: %d, ne3: %d\n", ne0, ne1, ne2, ne3);
  10663. //printf("n_past = %d, ne2 = %d\n", n_past, ne2);
  10664. GGML_ASSERT(nb00 == sizeof(float));
  10665. const int ith = params->ith;
  10666. const int nth = params->nth;
  10667. const int nr = ggml_nrows(dst);
  10668. GGML_ASSERT(n_dims <= ne0);
  10669. GGML_ASSERT(n_dims % 2 == 0);
  10670. // rows per thread
  10671. const int dr = (nr + nth - 1)/nth;
  10672. // row range for this thread
  10673. const int ir0 = dr*ith;
  10674. const int ir1 = MIN(ir0 + dr, nr);
  10675. // row index used to determine which thread to use
  10676. int ir = 0;
  10677. const float theta_scale = powf(freq_base, -2.0f/n_dims);
  10678. const bool is_neox = mode & 2;
  10679. const bool is_glm = mode & 4;
  10680. const int32_t * pos = (const int32_t *) src1->data;
  10681. for (int64_t i3 = 0; i3 < ne3; i3++) {
  10682. for (int64_t i2 = 0; i2 < ne2; i2++) {
  10683. const int64_t p = pos[i2];
  10684. for (int64_t i1 = 0; i1 < ne1; i1++) {
  10685. if (ir++ < ir0) continue;
  10686. if (ir > ir1) break;
  10687. float theta = freq_scale * (float)p;
  10688. if (is_glm) {
  10689. theta = MIN(p, n_ctx - 2);
  10690. float block_theta = MAX(p - (n_ctx - 2), 0);
  10691. for (int64_t i0 = 0; i0 < ne0 / 4; i0++) {
  10692. const float cos_theta = cosf(theta);
  10693. const float sin_theta = sinf(theta);
  10694. const float cos_block_theta = cosf(block_theta);
  10695. const float sin_block_theta = sinf(block_theta);
  10696. theta *= theta_scale;
  10697. block_theta *= theta_scale;
  10698. const float * const src = (float *)((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00);
  10699. float * dst_data = (float *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0);
  10700. const float x0 = src[0];
  10701. const float x1 = src[n_dims/2];
  10702. const float x2 = src[n_dims];
  10703. const float x3 = src[n_dims/2*3];
  10704. dst_data[0] = x0*cos_theta - x1*sin_theta;
  10705. dst_data[n_dims/2] = x0*sin_theta + x1*cos_theta;
  10706. dst_data[n_dims] = x2*cos_block_theta - x3*sin_block_theta;
  10707. dst_data[n_dims/2*3] = x2*sin_block_theta + x3*cos_block_theta;
  10708. }
  10709. } else if (!is_neox) {
  10710. for (int64_t i0 = 0; i0 < ne0; i0 += 2) {
  10711. const float cos_theta = cosf(theta);
  10712. const float sin_theta = sinf(theta);
  10713. // zeta scaling for xPos only:
  10714. float zeta = xpos_base != 0.0f ? powf((i0 + 0.4f * ne0) / (1.4f * ne0), p / xpos_base) : 1.0f;
  10715. if (xpos_down) zeta = 1.0f / zeta;
  10716. theta *= theta_scale;
  10717. const float * const src = (float *)((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00);
  10718. float * dst_data = (float *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0);
  10719. const float x0 = src[0];
  10720. const float x1 = src[1];
  10721. dst_data[0] = x0*cos_theta*zeta - x1*sin_theta*zeta;
  10722. dst_data[1] = x0*sin_theta*zeta + x1*cos_theta*zeta;
  10723. }
  10724. } else {
  10725. // TODO: this might be wrong for ne0 != n_dims - need double check
  10726. // ref: https://github.com/huggingface/transformers/blob/main/src/transformers/models/gpt_neox/modeling_gpt_neox.py#LL251C1-L294C28
  10727. for (int64_t ib = 0; ib < ne0/n_dims; ++ib) {
  10728. for (int64_t ic = 0; ic < n_dims; ic += 2) {
  10729. const float cos_theta = cosf(theta);
  10730. const float sin_theta = sinf(theta);
  10731. theta *= theta_scale;
  10732. const int64_t i0 = ib*n_dims + ic/2;
  10733. const float * const src = (float *)((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00);
  10734. float * dst_data = (float *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0);
  10735. const float x0 = src[0];
  10736. const float x1 = src[n_dims/2];
  10737. dst_data[0] = x0*cos_theta - x1*sin_theta;
  10738. dst_data[n_dims/2] = x0*sin_theta + x1*cos_theta;
  10739. }
  10740. }
  10741. }
  10742. }
  10743. }
  10744. }
  10745. }
  10746. static void ggml_compute_forward_rope_f16(
  10747. const struct ggml_compute_params * params,
  10748. const struct ggml_tensor * src0,
  10749. const struct ggml_tensor * src1,
  10750. struct ggml_tensor * dst) {
  10751. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  10752. return;
  10753. }
  10754. float freq_base;
  10755. float freq_scale;
  10756. //const int n_past = ((int32_t *) dst->op_params)[0];
  10757. const int n_dims = ((int32_t *) dst->op_params)[1];
  10758. const int mode = ((int32_t *) dst->op_params)[2];
  10759. const int n_ctx = ((int32_t *) dst->op_params)[3];
  10760. memcpy(&freq_base, (int32_t *) dst->op_params + 4, sizeof(float));
  10761. memcpy(&freq_scale, (int32_t *) dst->op_params + 5, sizeof(float));
  10762. GGML_TENSOR_UNARY_OP_LOCALS
  10763. //printf("ne0: %d, ne1: %d, ne2: %d, ne3: %d\n", ne0, ne1, ne2, ne3);
  10764. //printf("n_past = %d, ne2 = %d\n", n_past, ne2);
  10765. GGML_ASSERT(nb0 == sizeof(ggml_fp16_t));
  10766. const int ith = params->ith;
  10767. const int nth = params->nth;
  10768. const int nr = ggml_nrows(dst);
  10769. GGML_ASSERT(n_dims <= ne0);
  10770. GGML_ASSERT(n_dims % 2 == 0);
  10771. // rows per thread
  10772. const int dr = (nr + nth - 1)/nth;
  10773. // row range for this thread
  10774. const int ir0 = dr*ith;
  10775. const int ir1 = MIN(ir0 + dr, nr);
  10776. // row index used to determine which thread to use
  10777. int ir = 0;
  10778. const float theta_scale = powf(freq_base, -2.0f/n_dims);
  10779. const bool is_neox = mode & 2;
  10780. const bool is_glm = mode & 4;
  10781. const int32_t * pos = (const int32_t *) src1->data;
  10782. for (int64_t i3 = 0; i3 < ne3; i3++) {
  10783. for (int64_t i2 = 0; i2 < ne2; i2++) {
  10784. const int64_t p = pos[i2];
  10785. for (int64_t i1 = 0; i1 < ne1; i1++) {
  10786. if (ir++ < ir0) continue;
  10787. if (ir > ir1) break;
  10788. float theta = freq_scale * (float)p;
  10789. if (is_glm) {
  10790. theta = MIN(p, n_ctx - 2);
  10791. float block_theta = MAX(p - (n_ctx - 2), 0);
  10792. for (int64_t i0 = 0; i0 < ne0 / 4; i0++) {
  10793. const float cos_theta = cosf(theta);
  10794. const float sin_theta = sinf(theta);
  10795. const float cos_block_theta = cosf(block_theta);
  10796. const float sin_block_theta = sinf(block_theta);
  10797. theta *= theta_scale;
  10798. block_theta *= theta_scale;
  10799. const ggml_fp16_t * const src = (ggml_fp16_t *)((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00);
  10800. ggml_fp16_t * dst_data = (ggml_fp16_t *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0);
  10801. const float x0 = GGML_FP16_TO_FP32(src[0]);
  10802. const float x1 = GGML_FP16_TO_FP32(src[n_dims/2]);
  10803. const float x2 = GGML_FP16_TO_FP32(src[n_dims]);
  10804. const float x3 = GGML_FP16_TO_FP32(src[n_dims/2*3]);
  10805. dst_data[0] = GGML_FP32_TO_FP16(x0*cos_theta - x1*sin_theta);
  10806. dst_data[n_dims/2] = GGML_FP32_TO_FP16(x0*sin_theta + x1*cos_theta);
  10807. dst_data[n_dims] = GGML_FP32_TO_FP16(x2*cos_block_theta - x3*sin_block_theta);
  10808. dst_data[n_dims/2*3] = GGML_FP32_TO_FP16(x2*sin_block_theta + x3*cos_block_theta);
  10809. }
  10810. } if (!is_neox) {
  10811. for (int64_t i0 = 0; i0 < ne0; i0 += 2) {
  10812. const float cos_theta = cosf(theta);
  10813. const float sin_theta = sinf(theta);
  10814. theta *= theta_scale;
  10815. const ggml_fp16_t * const src = (ggml_fp16_t *)((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00);
  10816. ggml_fp16_t * dst_data = (ggml_fp16_t *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0);
  10817. const float x0 = GGML_FP16_TO_FP32(src[0]);
  10818. const float x1 = GGML_FP16_TO_FP32(src[1]);
  10819. dst_data[0] = GGML_FP32_TO_FP16(x0*cos_theta - x1*sin_theta);
  10820. dst_data[1] = GGML_FP32_TO_FP16(x0*sin_theta + x1*cos_theta);
  10821. }
  10822. } else {
  10823. // TODO: this might be wrong for ne0 != n_dims - need double check
  10824. // ref: https://github.com/huggingface/transformers/blob/main/src/transformers/models/gpt_neox/modeling_gpt_neox.py#LL251C1-L294C28
  10825. for (int64_t ib = 0; ib < ne0/n_dims; ++ib) {
  10826. for (int64_t ic = 0; ic < n_dims; ic += 2) {
  10827. const float cos_theta = cosf(theta);
  10828. const float sin_theta = sinf(theta);
  10829. theta *= theta_scale;
  10830. const int64_t i0 = ib*n_dims + ic/2;
  10831. const ggml_fp16_t * const src = (ggml_fp16_t *)((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00);
  10832. ggml_fp16_t * dst_data = (ggml_fp16_t *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0);
  10833. const float x0 = GGML_FP16_TO_FP32(src[0]);
  10834. const float x1 = GGML_FP16_TO_FP32(src[n_dims/2]);
  10835. dst_data[0] = GGML_FP32_TO_FP16(x0*cos_theta - x1*sin_theta);
  10836. dst_data[n_dims/2] = GGML_FP32_TO_FP16(x0*sin_theta + x1*cos_theta);
  10837. }
  10838. }
  10839. }
  10840. }
  10841. }
  10842. }
  10843. }
  10844. static void ggml_compute_forward_rope(
  10845. const struct ggml_compute_params * params,
  10846. const struct ggml_tensor * src0,
  10847. const struct ggml_tensor * src1,
  10848. struct ggml_tensor * dst) {
  10849. switch (src0->type) {
  10850. case GGML_TYPE_F16:
  10851. {
  10852. ggml_compute_forward_rope_f16(params, src0, src1, dst);
  10853. } break;
  10854. case GGML_TYPE_F32:
  10855. {
  10856. ggml_compute_forward_rope_f32(params, src0, src1, dst);
  10857. } break;
  10858. default:
  10859. {
  10860. GGML_ASSERT(false);
  10861. } break;
  10862. }
  10863. }
  10864. // ggml_compute_forward_rope_back
  10865. static void ggml_compute_forward_rope_back_f32(
  10866. const struct ggml_compute_params * params,
  10867. const struct ggml_tensor * src0,
  10868. const struct ggml_tensor * src1,
  10869. struct ggml_tensor * dst) {
  10870. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  10871. return;
  10872. }
  10873. // y = rope(x, src1)
  10874. // dx = rope_back(dy, src1)
  10875. // src0 is dy, src1 contains options
  10876. float freq_base;
  10877. float freq_scale;
  10878. // these two only relevant for xPos RoPE:
  10879. float xpos_base;
  10880. bool xpos_down;
  10881. //const int n_past = ((int32_t *) dst->op_params)[0];
  10882. const int n_dims = ((int32_t *) dst->op_params)[1];
  10883. const int mode = ((int32_t *) dst->op_params)[2];
  10884. const int n_ctx = ((int32_t *) dst->op_params)[3]; UNUSED(n_ctx);
  10885. memcpy(&freq_base, (int32_t *) dst->op_params + 4, sizeof(float));
  10886. memcpy(&freq_scale, (int32_t *) dst->op_params + 5, sizeof(float));
  10887. memcpy(&xpos_base, (int32_t *) dst->op_params + 6, sizeof(float));
  10888. memcpy(&xpos_down, (int32_t *) dst->op_params + 7, sizeof(bool));
  10889. GGML_TENSOR_UNARY_OP_LOCALS
  10890. //printf("ne0: %d, ne1: %d, ne2: %d, ne3: %d\n", ne0, ne1, ne2, ne3);
  10891. //printf("n_past = %d, ne2 = %d\n", n_past, ne2);
  10892. assert(nb0 == sizeof(float));
  10893. const int ith = params->ith;
  10894. const int nth = params->nth;
  10895. const int nr = ggml_nrows(dst);
  10896. // rows per thread
  10897. const int dr = (nr + nth - 1)/nth;
  10898. // row range for this thread
  10899. const int ir0 = dr*ith;
  10900. const int ir1 = MIN(ir0 + dr, nr);
  10901. // row index used to determine which thread to use
  10902. int ir = 0;
  10903. const float theta_scale = powf(freq_base, -2.0f/n_dims);
  10904. const bool is_neox = mode & 2;
  10905. const int32_t * pos = (const int32_t *) src1->data;
  10906. for (int64_t i3 = 0; i3 < ne3; i3++) {
  10907. for (int64_t i2 = 0; i2 < ne2; i2++) {
  10908. const int64_t p = pos[i2];
  10909. for (int64_t i1 = 0; i1 < ne1; i1++) {
  10910. if (ir++ < ir0) continue;
  10911. if (ir > ir1) break;
  10912. float theta = freq_scale * (float)p;
  10913. if (!is_neox) {
  10914. for (int64_t i0 = 0; i0 < ne0; i0 += 2) {
  10915. const float cos_theta = cosf(theta);
  10916. const float sin_theta = sinf(theta);
  10917. // zeta scaling for xPos only:
  10918. float zeta = xpos_base != 0.0f ? powf((i0 + 0.4f * ne0) / (1.4f * ne0), p / xpos_base) : 1.0f;
  10919. if (xpos_down) zeta = 1.0f / zeta;
  10920. theta *= theta_scale;
  10921. const float * const dy = (float *)((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00);
  10922. float * dx = (float *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0);
  10923. const float dy0 = dy[0];
  10924. const float dy1 = dy[1];
  10925. dx[0] = dy0*cos_theta*zeta + dy1*sin_theta*zeta;
  10926. dx[1] = - dy0*sin_theta*zeta + dy1*cos_theta*zeta;
  10927. }
  10928. } else {
  10929. for (int64_t ib = 0; ib < ne0/n_dims; ++ib) {
  10930. for (int64_t ic = 0; ic < n_dims; ic += 2) {
  10931. const float cos_theta = cosf(theta);
  10932. const float sin_theta = sinf(theta);
  10933. theta *= theta_scale;
  10934. const int64_t i0 = ib*n_dims + ic/2;
  10935. const float * const dy = (float *)((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00);
  10936. float * dx = (float *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0);
  10937. const float dy0 = dy[0];
  10938. const float dy1 = dy[n_dims/2];
  10939. dx[0] = dy0*cos_theta + dy1*sin_theta;
  10940. dx[n_dims/2] = - dy0*sin_theta + dy1*cos_theta;
  10941. }
  10942. }
  10943. }
  10944. }
  10945. }
  10946. }
  10947. }
  10948. static void ggml_compute_forward_rope_back_f16(
  10949. const struct ggml_compute_params * params,
  10950. const struct ggml_tensor * src0,
  10951. const struct ggml_tensor * src1,
  10952. struct ggml_tensor * dst) {
  10953. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  10954. return;
  10955. }
  10956. // y = rope(x, src1)
  10957. // dx = rope_back(dy, src1)
  10958. // src0 is dy, src1 contains options
  10959. //const int n_past = ((int32_t *) dst->op_params)[0];
  10960. const int n_dims = ((int32_t *) dst->op_params)[1];
  10961. const int mode = ((int32_t *) dst->op_params)[2];
  10962. GGML_TENSOR_UNARY_OP_LOCALS
  10963. //printf("ne0: %d, ne1: %d, ne2: %d, ne3: %d\n", ne0, ne1, ne2, ne3);
  10964. //printf("n_past = %d, ne2 = %d\n", n_past, ne2);
  10965. assert(nb0 == sizeof(ggml_fp16_t));
  10966. const int ith = params->ith;
  10967. const int nth = params->nth;
  10968. const int nr = ggml_nrows(dst);
  10969. // rows per thread
  10970. const int dr = (nr + nth - 1)/nth;
  10971. // row range for this thread
  10972. const int ir0 = dr*ith;
  10973. const int ir1 = MIN(ir0 + dr, nr);
  10974. // row index used to determine which thread to use
  10975. int ir = 0;
  10976. const float theta_scale = powf(10000.0, -2.0f/n_dims);
  10977. const bool is_neox = mode & 2;
  10978. const int32_t * pos = (const int32_t *) src1->data;
  10979. for (int64_t i3 = 0; i3 < ne3; i3++) {
  10980. for (int64_t i2 = 0; i2 < ne2; i2++) {
  10981. const int64_t p = pos[i2];
  10982. for (int64_t i1 = 0; i1 < ne1; i1++) {
  10983. if (ir++ < ir0) continue;
  10984. if (ir > ir1) break;
  10985. float theta = (float)p;
  10986. if (!is_neox) {
  10987. for (int64_t i0 = 0; i0 < ne0; i0 += 2) {
  10988. const float cos_theta = cosf(theta);
  10989. const float sin_theta = sinf(theta);
  10990. theta *= theta_scale;
  10991. const ggml_fp16_t * const dy = (ggml_fp16_t *)((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00);
  10992. ggml_fp16_t * dx = (ggml_fp16_t *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0);
  10993. const float dy0 = GGML_FP16_TO_FP32(dy[0]);
  10994. const float dy1 = GGML_FP16_TO_FP32(dy[1]);
  10995. dx[0] = GGML_FP32_TO_FP16( dy0*cos_theta + dy1*sin_theta);
  10996. dx[1] = GGML_FP32_TO_FP16(-dy0*sin_theta + dy1*cos_theta);
  10997. }
  10998. } else {
  10999. for (int64_t ib = 0; ib < ne0/n_dims; ++ib) {
  11000. for (int64_t ic = 0; ic < n_dims; ic += 2) {
  11001. const float cos_theta = cosf(theta);
  11002. const float sin_theta = sinf(theta);
  11003. theta *= theta_scale;
  11004. const int64_t i0 = ib*n_dims + ic/2;
  11005. const ggml_fp16_t * const dy = (ggml_fp16_t *)((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00);
  11006. ggml_fp16_t * dx = (ggml_fp16_t *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0);
  11007. const float dy0 = GGML_FP16_TO_FP32(dy[0]);
  11008. const float dy1 = GGML_FP16_TO_FP32(dy[n_dims/2]);
  11009. dx[0] = GGML_FP32_TO_FP16( dy0*cos_theta + dy1*sin_theta);
  11010. dx[n_dims/2] = GGML_FP32_TO_FP16(-dy0*sin_theta + dy1*cos_theta);
  11011. }
  11012. }
  11013. }
  11014. }
  11015. }
  11016. }
  11017. }
  11018. static void ggml_compute_forward_rope_back(
  11019. const struct ggml_compute_params * params,
  11020. const struct ggml_tensor * src0,
  11021. const struct ggml_tensor * src1,
  11022. struct ggml_tensor * dst) {
  11023. switch (src0->type) {
  11024. case GGML_TYPE_F16:
  11025. {
  11026. ggml_compute_forward_rope_back_f16(params, src0, src1, dst);
  11027. } break;
  11028. case GGML_TYPE_F32:
  11029. {
  11030. ggml_compute_forward_rope_back_f32(params, src0, src1, dst);
  11031. } break;
  11032. default:
  11033. {
  11034. GGML_ASSERT(false);
  11035. } break;
  11036. }
  11037. }
  11038. // ggml_compute_forward_conv_1d
  11039. static void ggml_compute_forward_conv_1d_s1_ph_f16_f32(
  11040. const struct ggml_compute_params * params,
  11041. const struct ggml_tensor * src0,
  11042. const struct ggml_tensor * src1,
  11043. struct ggml_tensor * dst) {
  11044. GGML_ASSERT(src0->type == GGML_TYPE_F16);
  11045. GGML_ASSERT(src1->type == GGML_TYPE_F32);
  11046. GGML_ASSERT( dst->type == GGML_TYPE_F32);
  11047. int64_t t0 = ggml_perf_time_us();
  11048. UNUSED(t0);
  11049. GGML_TENSOR_BINARY_OP_LOCALS
  11050. const int ith = params->ith;
  11051. const int nth = params->nth;
  11052. const int nk = ne00;
  11053. const int nh = nk/2;
  11054. const int ew0 = ggml_up32(ne01);
  11055. GGML_ASSERT(ne00 % 2 == 1); // TODO: support even kernel sizes
  11056. GGML_ASSERT(nb00 == sizeof(ggml_fp16_t));
  11057. GGML_ASSERT(nb10 == sizeof(float));
  11058. if (params->type == GGML_TASK_INIT) {
  11059. // TODO: fix this memset (wsize is overestimated)
  11060. memset(params->wdata, 0, params->wsize);
  11061. // prepare kernel data (src0)
  11062. {
  11063. ggml_fp16_t * const wdata = (ggml_fp16_t *) params->wdata + 0;
  11064. for (int64_t i02 = 0; i02 < ne02; i02++) {
  11065. for (int64_t i01 = 0; i01 < ne01; i01++) {
  11066. const ggml_fp16_t * const src = (ggml_fp16_t *)((char *) src0->data + i02*nb02 + i01*nb01);
  11067. ggml_fp16_t * dst_data = wdata + i02*ew0*ne00;
  11068. for (int64_t i00 = 0; i00 < ne00; i00++) {
  11069. dst_data[i00*ew0 + i01] = src[i00];
  11070. }
  11071. }
  11072. }
  11073. }
  11074. // prepare source data (src1)
  11075. {
  11076. ggml_fp16_t * const wdata = (ggml_fp16_t *) params->wdata + ne02*ew0*ne00;
  11077. for (int64_t i11 = 0; i11 < ne11; i11++) {
  11078. const float * const src = (float *)((char *) src1->data + i11*nb11);
  11079. ggml_fp16_t * dst_data = wdata;
  11080. for (int64_t i10 = 0; i10 < ne10; i10++) {
  11081. dst_data[(i10 + nh)*ew0 + i11] = GGML_FP32_TO_FP16(src[i10]);
  11082. }
  11083. }
  11084. }
  11085. return;
  11086. }
  11087. if (params->type == GGML_TASK_FINALIZE) {
  11088. return;
  11089. }
  11090. // total rows in dst
  11091. const int nr = ne02;
  11092. // rows per thread
  11093. const int dr = (nr + nth - 1)/nth;
  11094. // row range for this thread
  11095. const int ir0 = dr*ith;
  11096. const int ir1 = MIN(ir0 + dr, nr);
  11097. for (int i1 = ir0; i1 < ir1; i1++) {
  11098. float * dst_data = (float *)((char *) dst->data + i1*nb1);
  11099. for (int64_t i0 = 0; i0 < ne10; ++i0) {
  11100. dst_data[i0] = 0;
  11101. for (int k = -nh; k <= nh; k++) {
  11102. float v = 0.0f;
  11103. ggml_vec_dot_f16(ew0, &v,
  11104. (ggml_fp16_t *) params->wdata + i1*ew0*ne00 + (nh + k)*ew0,
  11105. (ggml_fp16_t *) params->wdata + ne02*ew0*ne00 + (i0 + nh + k)*ew0);
  11106. dst_data[i0] += v;
  11107. }
  11108. }
  11109. }
  11110. }
  11111. static void ggml_compute_forward_conv_1d_s1_ph_f32(
  11112. const struct ggml_compute_params * params,
  11113. const struct ggml_tensor * src0,
  11114. const struct ggml_tensor * src1,
  11115. struct ggml_tensor * dst) {
  11116. GGML_ASSERT(src0->type == GGML_TYPE_F32);
  11117. GGML_ASSERT(src1->type == GGML_TYPE_F32);
  11118. GGML_ASSERT( dst->type == GGML_TYPE_F32);
  11119. int64_t t0 = ggml_perf_time_us();
  11120. UNUSED(t0);
  11121. GGML_TENSOR_BINARY_OP_LOCALS
  11122. const int ith = params->ith;
  11123. const int nth = params->nth;
  11124. const int nk = ne00;
  11125. const int nh = nk/2;
  11126. const int ew0 = ggml_up32(ne01);
  11127. GGML_ASSERT(ne00 % 2 == 1); // TODO: support even kernel sizes
  11128. GGML_ASSERT(nb00 == sizeof(float));
  11129. GGML_ASSERT(nb10 == sizeof(float));
  11130. if (params->type == GGML_TASK_INIT) {
  11131. // TODO: fix this memset (wsize is overestimated)
  11132. memset(params->wdata, 0, params->wsize);
  11133. // prepare kernel data (src0)
  11134. {
  11135. float * const wdata = (float *) params->wdata + 0;
  11136. for (int64_t i02 = 0; i02 < ne02; i02++) {
  11137. for (int64_t i01 = 0; i01 < ne01; i01++) {
  11138. const float * const src = (float *)((char *) src0->data + i02*nb02 + i01*nb01);
  11139. float * dst_data = wdata + i02*ew0*ne00;
  11140. for (int64_t i00 = 0; i00 < ne00; i00++) {
  11141. dst_data[i00*ew0 + i01] = src[i00];
  11142. }
  11143. }
  11144. }
  11145. }
  11146. // prepare source data (src1)
  11147. {
  11148. float * const wdata = (float *) params->wdata + ne02*ew0*ne00;
  11149. for (int64_t i11 = 0; i11 < ne11; i11++) {
  11150. const float * const src = (float *)((char *) src1->data + i11*nb11);
  11151. float * dst_data = wdata;
  11152. for (int64_t i10 = 0; i10 < ne10; i10++) {
  11153. dst_data[(i10 + nh)*ew0 + i11] = src[i10];
  11154. }
  11155. }
  11156. }
  11157. return;
  11158. }
  11159. if (params->type == GGML_TASK_FINALIZE) {
  11160. return;
  11161. }
  11162. // total rows in dst
  11163. const int nr = ne02;
  11164. // rows per thread
  11165. const int dr = (nr + nth - 1)/nth;
  11166. // row range for this thread
  11167. const int ir0 = dr*ith;
  11168. const int ir1 = MIN(ir0 + dr, nr);
  11169. for (int i1 = ir0; i1 < ir1; i1++) {
  11170. float * dst_data = (float *)((char *) dst->data + i1*nb1);
  11171. for (int64_t i0 = 0; i0 < ne10; ++i0) {
  11172. dst_data[i0] = 0;
  11173. for (int k = -nh; k <= nh; k++) {
  11174. float v = 0.0f;
  11175. ggml_vec_dot_f32(ew0, &v,
  11176. (float *) params->wdata + i1*ew0*ne00 + (nh + k)*ew0,
  11177. (float *) params->wdata + ne02*ew0*ne00 + (i0 + nh + k)*ew0);
  11178. dst_data[i0] += v;
  11179. }
  11180. }
  11181. }
  11182. }
  11183. static void ggml_compute_forward_conv_1d_s1_ph(
  11184. const struct ggml_compute_params * params,
  11185. const struct ggml_tensor * src0,
  11186. const struct ggml_tensor * src1,
  11187. struct ggml_tensor * dst) {
  11188. switch (src0->type) {
  11189. case GGML_TYPE_F16:
  11190. {
  11191. ggml_compute_forward_conv_1d_s1_ph_f16_f32(params, src0, src1, dst);
  11192. } break;
  11193. case GGML_TYPE_F32:
  11194. {
  11195. ggml_compute_forward_conv_1d_s1_ph_f32(params, src0, src1, dst);
  11196. } break;
  11197. default:
  11198. {
  11199. GGML_ASSERT(false);
  11200. } break;
  11201. }
  11202. }
  11203. static void ggml_compute_forward_conv_1d_s2_ph_f16_f32(
  11204. const struct ggml_compute_params * params,
  11205. const struct ggml_tensor * src0,
  11206. const struct ggml_tensor * src1,
  11207. struct ggml_tensor * dst) {
  11208. GGML_ASSERT(src0->type == GGML_TYPE_F16);
  11209. GGML_ASSERT(src1->type == GGML_TYPE_F32);
  11210. GGML_ASSERT( dst->type == GGML_TYPE_F32);
  11211. int64_t t0 = ggml_perf_time_us();
  11212. UNUSED(t0);
  11213. GGML_TENSOR_BINARY_OP_LOCALS
  11214. const int ith = params->ith;
  11215. const int nth = params->nth;
  11216. const int nk = ne00;
  11217. const int nh = nk/2;
  11218. const int ew0 = ggml_up32(ne01);
  11219. GGML_ASSERT(ne00 % 2 == 1); // TODO: support even kernel sizes
  11220. GGML_ASSERT(nb00 == sizeof(ggml_fp16_t));
  11221. GGML_ASSERT(nb10 == sizeof(float));
  11222. if (params->type == GGML_TASK_INIT) {
  11223. // TODO: fix this memset (wsize is overestimated)
  11224. memset(params->wdata, 0, params->wsize);
  11225. // prepare kernel data (src0)
  11226. {
  11227. ggml_fp16_t * const wdata = (ggml_fp16_t *) params->wdata + 0;
  11228. for (int64_t i02 = 0; i02 < ne02; i02++) {
  11229. for (int64_t i01 = 0; i01 < ne01; i01++) {
  11230. const ggml_fp16_t * const src = (ggml_fp16_t *)((char *) src0->data + i02*nb02 + i01*nb01);
  11231. ggml_fp16_t * dst_data = wdata + i02*ew0*ne00;
  11232. for (int64_t i00 = 0; i00 < ne00; i00++) {
  11233. dst_data[i00*ew0 + i01] = src[i00];
  11234. }
  11235. }
  11236. }
  11237. }
  11238. // prepare source data (src1)
  11239. {
  11240. ggml_fp16_t * const wdata = (ggml_fp16_t *) params->wdata + ne02*ew0*ne00;
  11241. for (int64_t i11 = 0; i11 < ne11; i11++) {
  11242. const float * const src = (float *)((char *) src1->data + i11*nb11);
  11243. ggml_fp16_t * dst_data = wdata;
  11244. for (int64_t i10 = 0; i10 < ne10; i10++) {
  11245. dst_data[(i10 + nh)*ew0 + i11] = GGML_FP32_TO_FP16(src[i10]);
  11246. }
  11247. }
  11248. }
  11249. return;
  11250. }
  11251. if (params->type == GGML_TASK_FINALIZE) {
  11252. return;
  11253. }
  11254. // total rows in dst
  11255. const int nr = ne02;
  11256. // rows per thread
  11257. const int dr = (nr + nth - 1)/nth;
  11258. // row range for this thread
  11259. const int ir0 = dr*ith;
  11260. const int ir1 = MIN(ir0 + dr, nr);
  11261. for (int i1 = ir0; i1 < ir1; i1++) {
  11262. float * dst_data = (float *)((char *) dst->data + i1*nb1);
  11263. for (int64_t i0 = 0; i0 < ne10; i0 += 2) {
  11264. dst_data[i0/2] = 0;
  11265. for (int k = -nh; k <= nh; k++) {
  11266. float v = 0.0f;
  11267. ggml_vec_dot_f16(ew0, &v,
  11268. (ggml_fp16_t *) params->wdata + i1*ew0*ne00 + (nh + k)*ew0,
  11269. (ggml_fp16_t *) params->wdata + ne02*ew0*ne00 + (i0 + nh + k)*ew0);
  11270. dst_data[i0/2] += v;
  11271. }
  11272. }
  11273. }
  11274. }
  11275. static void ggml_compute_forward_conv_1d_s2_ph_f32(
  11276. const struct ggml_compute_params * params,
  11277. const struct ggml_tensor * src0,
  11278. const struct ggml_tensor * src1,
  11279. struct ggml_tensor * dst) {
  11280. GGML_ASSERT(src0->type == GGML_TYPE_F32);
  11281. GGML_ASSERT(src1->type == GGML_TYPE_F32);
  11282. GGML_ASSERT( dst->type == GGML_TYPE_F32);
  11283. int64_t t0 = ggml_perf_time_us();
  11284. UNUSED(t0);
  11285. GGML_TENSOR_BINARY_OP_LOCALS
  11286. const int ith = params->ith;
  11287. const int nth = params->nth;
  11288. const int nk = ne00;
  11289. const int nh = nk/2;
  11290. const int ew0 = ggml_up32(ne01);
  11291. GGML_ASSERT(ne00 % 2 == 1); // TODO: support even kernel sizes
  11292. GGML_ASSERT(nb00 == sizeof(float));
  11293. GGML_ASSERT(nb10 == sizeof(float));
  11294. if (params->type == GGML_TASK_INIT) {
  11295. // TODO: fix this memset (wsize is overestimated)
  11296. memset(params->wdata, 0, params->wsize);
  11297. // prepare kernel data (src0)
  11298. {
  11299. float * const wdata = (float *) params->wdata + 0;
  11300. for (int64_t i02 = 0; i02 < ne02; i02++) {
  11301. for (int64_t i01 = 0; i01 < ne01; i01++) {
  11302. const float * const src = (float *)((char *) src0->data + i02*nb02 + i01*nb01);
  11303. float * dst_data = wdata + i02*ew0*ne00;
  11304. for (int64_t i00 = 0; i00 < ne00; i00++) {
  11305. dst_data[i00*ew0 + i01] = src[i00];
  11306. }
  11307. }
  11308. }
  11309. }
  11310. // prepare source data (src1)
  11311. {
  11312. float * const wdata = (float *) params->wdata + ne02*ew0*ne00;
  11313. for (int64_t i11 = 0; i11 < ne11; i11++) {
  11314. const float * const src = (float *)((char *) src1->data + i11*nb11);
  11315. float * dst_data = wdata;
  11316. for (int64_t i10 = 0; i10 < ne10; i10++) {
  11317. dst_data[(i10 + nh)*ew0 + i11] = src[i10];
  11318. }
  11319. }
  11320. }
  11321. return;
  11322. }
  11323. if (params->type == GGML_TASK_FINALIZE) {
  11324. return;
  11325. }
  11326. // total rows in dst
  11327. const int nr = ne02;
  11328. // rows per thread
  11329. const int dr = (nr + nth - 1)/nth;
  11330. // row range for this thread
  11331. const int ir0 = dr*ith;
  11332. const int ir1 = MIN(ir0 + dr, nr);
  11333. for (int i1 = ir0; i1 < ir1; i1++) {
  11334. float * dst_data = (float *)((char *) dst->data + i1*nb1);
  11335. for (int64_t i0 = 0; i0 < ne10; i0 += 2) {
  11336. dst_data[i0/2] = 0;
  11337. for (int k = -nh; k <= nh; k++) {
  11338. float v = 0.0f;
  11339. ggml_vec_dot_f32(ew0, &v,
  11340. (float *) params->wdata + i1*ew0*ne00 + (nh + k)*ew0,
  11341. (float *) params->wdata + ne02*ew0*ne00 + (i0 + nh + k)*ew0);
  11342. dst_data[i0/2] += v;
  11343. }
  11344. }
  11345. }
  11346. }
  11347. static void ggml_compute_forward_conv_1d_s2_ph(
  11348. const struct ggml_compute_params * params,
  11349. const struct ggml_tensor * src0,
  11350. const struct ggml_tensor * src1,
  11351. struct ggml_tensor * dst) {
  11352. switch (src0->type) {
  11353. case GGML_TYPE_F16:
  11354. {
  11355. ggml_compute_forward_conv_1d_s2_ph_f16_f32(params, src0, src1, dst);
  11356. } break;
  11357. case GGML_TYPE_F32:
  11358. {
  11359. ggml_compute_forward_conv_1d_s2_ph_f32(params, src0, src1, dst);
  11360. } break;
  11361. default:
  11362. {
  11363. GGML_ASSERT(false);
  11364. } break;
  11365. }
  11366. }
  11367. // ggml_compute_forward_conv_1d
  11368. static void ggml_compute_forward_conv_1d(
  11369. const struct ggml_compute_params * params,
  11370. const struct ggml_tensor * src0,
  11371. const struct ggml_tensor * src1,
  11372. struct ggml_tensor * dst) {
  11373. const int32_t s0 = ((const int32_t*)(dst->op_params))[0];
  11374. const int32_t p0 = ((const int32_t*)(dst->op_params))[1];
  11375. const int32_t d0 = ((const int32_t*)(dst->op_params))[2];
  11376. GGML_ASSERT(d0 == 1); // dilation not supported
  11377. GGML_ASSERT(p0 == src0->ne[0]/2); // only half padding supported
  11378. if (s0 == 1) {
  11379. ggml_compute_forward_conv_1d_s1_ph(params, src0, src1, dst);
  11380. } else if (s0 == 2) {
  11381. ggml_compute_forward_conv_1d_s2_ph(params, src0, src1, dst);
  11382. } else {
  11383. GGML_ASSERT(false); // only stride 1 and 2 supported
  11384. }
  11385. }
  11386. // ggml_compute_forward_conv_2d
  11387. static void ggml_compute_forward_conv_2d_f16_f32(
  11388. const struct ggml_compute_params * params,
  11389. const struct ggml_tensor * src0,
  11390. const struct ggml_tensor * src1,
  11391. struct ggml_tensor * dst) {
  11392. GGML_ASSERT(src0->type == GGML_TYPE_F16);
  11393. GGML_ASSERT(src1->type == GGML_TYPE_F32);
  11394. GGML_ASSERT( dst->type == GGML_TYPE_F32);
  11395. int64_t t0 = ggml_perf_time_us();
  11396. UNUSED(t0);
  11397. GGML_TENSOR_BINARY_OP_LOCALS
  11398. const int ith = params->ith;
  11399. const int nth = params->nth;
  11400. const int nk0 = ne00;
  11401. const int nk1 = ne01;
  11402. // size of the convolution row - the kernel size unrolled across all channels
  11403. const int ew0 = nk0*nk1*ne02;
  11404. const int32_t s0 = ((const int32_t*)(dst->op_params))[0];
  11405. const int32_t s1 = ((const int32_t*)(dst->op_params))[1];
  11406. const int32_t p0 = ((const int32_t*)(dst->op_params))[2];
  11407. const int32_t p1 = ((const int32_t*)(dst->op_params))[3];
  11408. const int32_t d0 = ((const int32_t*)(dst->op_params))[4];
  11409. const int32_t d1 = ((const int32_t*)(dst->op_params))[5];
  11410. GGML_ASSERT(nb00 == sizeof(ggml_fp16_t));
  11411. GGML_ASSERT(nb10 == sizeof(float));
  11412. if (params->type == GGML_TASK_INIT) {
  11413. memset(params->wdata, 0, params->wsize);
  11414. // prepare source data (src1)
  11415. {
  11416. ggml_fp16_t * const wdata = (ggml_fp16_t *) params->wdata + 0;
  11417. for (int i12 = 0; i12 < ne12; i12++) {
  11418. const float * const src = (float *)((char *) src1->data + i12*nb12);
  11419. ggml_fp16_t * dst_data = wdata;
  11420. for (int i1 = 0; i1 < ne1; i1++) {
  11421. for (int i0 = 0; i0 < ne0; i0++) {
  11422. for (int ik1 = 0; ik1 < nk1; ik1++) {
  11423. for (int ik0 = 0; ik0 < nk0; ik0++) {
  11424. const int idx0 = i0*s0 + ik0*d0 - p0;
  11425. const int idx1 = i1*s1 + ik1*d1 - p1;
  11426. if (!(idx1 < 0 || idx1 >= ne11 || idx0 < 0 || idx0 >= ne10)) {
  11427. dst_data[(i1*ne0 + i0)*ew0 + i12*(nk0*nk1) + ik1*nk0 + ik0] =
  11428. GGML_FP32_TO_FP16(src[idx1*ne10 + idx0]);
  11429. }
  11430. }
  11431. }
  11432. }
  11433. }
  11434. }
  11435. }
  11436. return;
  11437. }
  11438. if (params->type == GGML_TASK_FINALIZE) {
  11439. return;
  11440. }
  11441. // total patches in dst
  11442. const int np = ne2;
  11443. // patches per thread
  11444. const int dp = (np + nth - 1)/nth;
  11445. // patch range for this thread
  11446. const int ip0 = dp*ith;
  11447. const int ip1 = MIN(ip0 + dp, np);
  11448. ggml_fp16_t * const wdata = (ggml_fp16_t *) params->wdata + 0;
  11449. for (int i3 = 0; i3 < ne3; i3++) {
  11450. for (int i2 = ip0; i2 < ip1; i2++) {
  11451. float * dst_data = (float *)((char *) dst->data + i3*nb3 + i2*nb2);
  11452. for (int i1 = 0; i1 < ne1; ++i1) {
  11453. for (int i0 = 0; i0 < ne0; ++i0) {
  11454. ggml_vec_dot_f16(ew0, dst_data + i1*ne0 + i0,
  11455. (ggml_fp16_t *) ((char *) src0->data + i2*nb03),
  11456. (ggml_fp16_t *) wdata + i3*nb3 + (i1*ne0 + i0)*ew0);
  11457. }
  11458. }
  11459. }
  11460. }
  11461. }
  11462. static void ggml_compute_forward_conv_2d(
  11463. const struct ggml_compute_params * params,
  11464. const struct ggml_tensor * src0,
  11465. const struct ggml_tensor * src1,
  11466. struct ggml_tensor * dst) {
  11467. switch (src0->type) {
  11468. case GGML_TYPE_F16:
  11469. {
  11470. ggml_compute_forward_conv_2d_f16_f32(params, src0, src1, dst);
  11471. } break;
  11472. case GGML_TYPE_F32:
  11473. {
  11474. //ggml_compute_forward_conv_2d_f32(params, src0, src1, dst);
  11475. GGML_ASSERT(false);
  11476. } break;
  11477. default:
  11478. {
  11479. GGML_ASSERT(false);
  11480. } break;
  11481. }
  11482. }
  11483. // ggml_compute_forward_conv_transpose_2d
  11484. static void ggml_compute_forward_conv_transpose_2d(
  11485. const struct ggml_compute_params * params,
  11486. const struct ggml_tensor * src0,
  11487. const struct ggml_tensor * src1,
  11488. struct ggml_tensor * dst) {
  11489. GGML_ASSERT(src0->type == GGML_TYPE_F16);
  11490. GGML_ASSERT(src1->type == GGML_TYPE_F32);
  11491. GGML_ASSERT( dst->type == GGML_TYPE_F32);
  11492. int64_t t0 = ggml_perf_time_us();
  11493. UNUSED(t0);
  11494. GGML_TENSOR_BINARY_OP_LOCALS
  11495. const int ith = params->ith;
  11496. const int nth = params->nth;
  11497. const int nk = ne00*ne01*ne02*ne03;
  11498. GGML_ASSERT(nb00 == sizeof(ggml_fp16_t));
  11499. GGML_ASSERT(nb10 == sizeof(float));
  11500. if (params->type == GGML_TASK_INIT) {
  11501. memset(params->wdata, 0, params->wsize);
  11502. // permute kernel data (src0) from (Kw x Kh x Cout x Cin) to (Cin x Kw x Kh x Cout)
  11503. {
  11504. ggml_fp16_t * const wdata = (ggml_fp16_t *) params->wdata + 0;
  11505. for (int64_t i03 = 0; i03 < ne03; i03++) {
  11506. for (int64_t i02 = 0; i02 < ne02; i02++) {
  11507. const ggml_fp16_t * const src = (ggml_fp16_t *)((char *) src0->data + i03*nb03 + i02*nb02);
  11508. ggml_fp16_t * dst_data = wdata + i02*ne01*ne00*ne03;
  11509. for (int64_t i01 = 0; i01 < ne01; i01++) {
  11510. for (int64_t i00 = 0; i00 < ne00; i00++) {
  11511. dst_data[i01*ne00*ne03 + i00*ne03 + i03] = src[i01 * ne00 + i00];
  11512. }
  11513. }
  11514. }
  11515. }
  11516. }
  11517. // permute source data (src1) from (Sw x Sh x Cin) to (Cin x Sw x Sh)
  11518. {
  11519. ggml_fp16_t * const wdata = (ggml_fp16_t *) params->wdata + nk;
  11520. for (int i12 = 0; i12 < ne12; i12++) {
  11521. for (int i11 = 0; i11 < ne11; i11++) {
  11522. const float * const src = (float *)((char *) src1->data + i12*nb12 + i11*nb11);
  11523. ggml_fp16_t * dst_data = wdata + i11*ne10*ne12;
  11524. for (int i10 = 0; i10 < ne10; i10++) {
  11525. dst_data[i10*ne12 + i12] = GGML_FP32_TO_FP16(src[i10]);
  11526. }
  11527. }
  11528. }
  11529. }
  11530. return;
  11531. }
  11532. if (params->type == GGML_TASK_FINALIZE) {
  11533. return;
  11534. }
  11535. const int32_t stride = ggml_get_op_params_i32(dst, 0);
  11536. // total patches in dst
  11537. const int np = ne2;
  11538. // patches per thread
  11539. const int dp = (np + nth - 1)/nth;
  11540. // patch range for this thread
  11541. const int ip0 = dp*ith;
  11542. const int ip1 = MIN(ip0 + dp, np);
  11543. ggml_fp16_t * const wdata = (ggml_fp16_t *) params->wdata + 0;
  11544. ggml_fp16_t * const wdata_src = wdata + nk;
  11545. for (int i2 = ip0; i2 < ip1; i2++) { // Cout
  11546. float * dst_data = (float *)((char *) dst->data + i2*nb2);
  11547. ggml_fp16_t * wdata_kernel = wdata + i2*ne01*ne00*ne03;
  11548. for (int i11 = 0; i11 < ne11; i11++) {
  11549. for (int i10 = 0; i10 < ne10; i10++) {
  11550. const int i1n = i11*ne10*ne12 + i10*ne12;
  11551. for (int i01 = 0; i01 < ne01; i01++) {
  11552. for (int i00 = 0; i00 < ne00; i00++) {
  11553. float v = 0;
  11554. ggml_vec_dot_f16(ne03, &v,
  11555. wdata_src + i1n,
  11556. wdata_kernel + i01*ne00*ne03 + i00*ne03);
  11557. dst_data[(i11*stride + i01)*ne0 + i10*stride + i00] += v;
  11558. }
  11559. }
  11560. }
  11561. }
  11562. }
  11563. }
  11564. // ggml_compute_forward_pool_1d_sk_p0
  11565. static void ggml_compute_forward_pool_1d_sk_p0(
  11566. const struct ggml_compute_params * params,
  11567. const enum ggml_op_pool op,
  11568. const struct ggml_tensor * src,
  11569. const int k,
  11570. struct ggml_tensor * dst) {
  11571. assert(src->type == GGML_TYPE_F32);
  11572. assert(params->ith == 0);
  11573. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  11574. return;
  11575. }
  11576. const char * cdata = (const char *)src->data;
  11577. const char * const data_end = cdata + ggml_nbytes(src);
  11578. float * drow = (float *)dst->data;
  11579. const int64_t rs = dst->ne[0];
  11580. while (cdata < data_end) {
  11581. const float * const srow = (const float *)cdata;
  11582. int j = 0;
  11583. for (int64_t i = 0; i < rs; ++i) {
  11584. switch (op) {
  11585. case GGML_OP_POOL_AVG: drow[i] = 0; break;
  11586. case GGML_OP_POOL_MAX: drow[i] = -FLT_MAX; break;
  11587. case GGML_OP_POOL_COUNT: GGML_ASSERT(false); break;
  11588. }
  11589. for (int ki = 0; ki < k; ++ki) {
  11590. switch (op) {
  11591. case GGML_OP_POOL_AVG: drow[i] += srow[j]; break;
  11592. case GGML_OP_POOL_MAX: if (srow[j] > drow[i]) drow[i] = srow[j]; break;
  11593. case GGML_OP_POOL_COUNT: GGML_ASSERT(false); break;
  11594. }
  11595. ++j;
  11596. }
  11597. switch (op) {
  11598. case GGML_OP_POOL_AVG: drow[i] /= k; break;
  11599. case GGML_OP_POOL_MAX: break;
  11600. case GGML_OP_POOL_COUNT: GGML_ASSERT(false); break;
  11601. }
  11602. }
  11603. cdata += src->nb[1];
  11604. drow += rs;
  11605. }
  11606. }
  11607. // ggml_compute_forward_pool_1d
  11608. static void ggml_compute_forward_pool_1d(
  11609. const struct ggml_compute_params * params,
  11610. const struct ggml_tensor * src0,
  11611. struct ggml_tensor * dst) {
  11612. const int32_t * opts = (const int32_t *)dst->op_params;
  11613. enum ggml_op_pool op = opts[0];
  11614. const int k0 = opts[1];
  11615. const int s0 = opts[2];
  11616. const int p0 = opts[3];
  11617. GGML_ASSERT(p0 == 0); // padding not supported
  11618. GGML_ASSERT(k0 == s0); // only s = k supported
  11619. ggml_compute_forward_pool_1d_sk_p0(params, op, src0, k0, dst);
  11620. }
  11621. // ggml_compute_forward_pool_2d_sk_p0
  11622. static void ggml_compute_forward_pool_2d_sk_p0(
  11623. const struct ggml_compute_params * params,
  11624. const enum ggml_op_pool op,
  11625. const struct ggml_tensor * src,
  11626. const int k0,
  11627. const int k1,
  11628. struct ggml_tensor * dst) {
  11629. assert(src->type == GGML_TYPE_F32);
  11630. assert(params->ith == 0);
  11631. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  11632. return;
  11633. }
  11634. const char * cdata = (const char*)src->data;
  11635. const char * const data_end = cdata + ggml_nbytes(src);
  11636. const int64_t px = dst->ne[0];
  11637. const int64_t py = dst->ne[1];
  11638. const int64_t pa = px * py;
  11639. float * dplane = (float *)dst->data;
  11640. const int ka = k0 * k1;
  11641. while (cdata < data_end) {
  11642. for (int oy = 0; oy < py; ++oy) {
  11643. float * const drow = dplane + oy * px;
  11644. for (int ox = 0; ox < px; ++ox) {
  11645. float * const out = drow + ox;
  11646. switch (op) {
  11647. case GGML_OP_POOL_AVG: *out = 0; break;
  11648. case GGML_OP_POOL_MAX: *out = -FLT_MAX; break;
  11649. case GGML_OP_POOL_COUNT: GGML_ASSERT(false); break;
  11650. }
  11651. const int ix = ox * k0;
  11652. const int iy = oy * k1;
  11653. for (int ky = 0; ky < k1; ++ky) {
  11654. const float * const srow = (const float *)(cdata + src->nb[1] * (iy + ky));
  11655. for (int kx = 0; kx < k0; ++kx) {
  11656. int j = ix + kx;
  11657. switch (op) {
  11658. case GGML_OP_POOL_AVG: *out += srow[j]; break;
  11659. case GGML_OP_POOL_MAX: if (srow[j] > *out) *out = srow[j]; break;
  11660. case GGML_OP_POOL_COUNT: GGML_ASSERT(false); break;
  11661. }
  11662. }
  11663. }
  11664. switch (op) {
  11665. case GGML_OP_POOL_AVG: *out /= ka; break;
  11666. case GGML_OP_POOL_MAX: break;
  11667. case GGML_OP_POOL_COUNT: GGML_ASSERT(false); break;
  11668. }
  11669. }
  11670. }
  11671. cdata += src->nb[2];
  11672. dplane += pa;
  11673. }
  11674. }
  11675. // ggml_compute_forward_pool_2d
  11676. static void ggml_compute_forward_pool_2d(
  11677. const struct ggml_compute_params * params,
  11678. const struct ggml_tensor * src0,
  11679. struct ggml_tensor * dst) {
  11680. const int32_t * opts = (const int32_t *)dst->op_params;
  11681. enum ggml_op_pool op = opts[0];
  11682. const int k0 = opts[1];
  11683. const int k1 = opts[2];
  11684. const int s0 = opts[3];
  11685. const int s1 = opts[4];
  11686. const int p0 = opts[5];
  11687. const int p1 = opts[6];
  11688. GGML_ASSERT(p0 == 0);
  11689. GGML_ASSERT(p1 == 0); // padding not supported
  11690. GGML_ASSERT(k0 == s0);
  11691. GGML_ASSERT(k1 == s1); // only s = k supported
  11692. ggml_compute_forward_pool_2d_sk_p0(params, op, src0, k0, k1, dst);
  11693. }
  11694. // ggml_compute_forward_upscale
  11695. static void ggml_compute_forward_upscale_f32(
  11696. const struct ggml_compute_params * params,
  11697. const struct ggml_tensor * src0,
  11698. struct ggml_tensor * dst) {
  11699. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  11700. return;
  11701. }
  11702. GGML_ASSERT(src0->nb[0] == sizeof(float));
  11703. const int ith = params->ith;
  11704. GGML_TENSOR_UNARY_OP_LOCALS
  11705. const int scale_factor = dst->op_params[0];
  11706. // TODO: optimize
  11707. for (int i03 = 0; i03 < ne03; i03++) {
  11708. for (int i02 = ith; i02 < ne02; i02++) {
  11709. for (int m = 0; m < dst->ne[1]; m++) {
  11710. int i01 = m / scale_factor;
  11711. for (int n = 0; n < dst->ne[0]; n++) {
  11712. int i00 = n / scale_factor;
  11713. const float * x = (float *)((char *) src0->data + i00 * nb00 +i01 * nb01 + i02 * nb02 + i03 * nb03);
  11714. float * y = (float *)((char *) dst->data + n * dst->nb[0] + m * dst->nb[1] + i02 * dst->nb[2] + i03 * dst->nb[3]);
  11715. *y = *x;
  11716. }
  11717. }
  11718. }
  11719. }
  11720. }
  11721. static void ggml_compute_forward_upscale(
  11722. const struct ggml_compute_params * params,
  11723. const struct ggml_tensor * src0,
  11724. struct ggml_tensor * dst) {
  11725. switch (src0->type) {
  11726. case GGML_TYPE_F32:
  11727. {
  11728. ggml_compute_forward_upscale_f32(params, src0, dst);
  11729. } break;
  11730. default:
  11731. {
  11732. GGML_ASSERT(false);
  11733. } break;
  11734. }
  11735. }
  11736. // ggml_compute_forward_flash_attn
  11737. static void ggml_compute_forward_flash_attn_f32(
  11738. const struct ggml_compute_params * params,
  11739. const struct ggml_tensor * q,
  11740. const struct ggml_tensor * k,
  11741. const struct ggml_tensor * v,
  11742. const bool masked,
  11743. struct ggml_tensor * dst) {
  11744. int64_t t0 = ggml_perf_time_us();
  11745. UNUSED(t0);
  11746. GGML_TENSOR_LOCALS(int64_t, neq, q, ne)
  11747. GGML_TENSOR_LOCALS(size_t, nbq, q, nb)
  11748. GGML_TENSOR_LOCALS(int64_t, nek, k, ne)
  11749. GGML_TENSOR_LOCALS(size_t, nbk, k, nb)
  11750. GGML_TENSOR_LOCALS(int64_t, nev, v, ne)
  11751. GGML_TENSOR_LOCALS(size_t, nbv, v, nb)
  11752. GGML_TENSOR_LOCALS(int64_t, ne, dst, ne)
  11753. GGML_TENSOR_LOCALS(size_t, nb, dst, nb)
  11754. const int ith = params->ith;
  11755. const int nth = params->nth;
  11756. const int64_t D = neq0;
  11757. const int64_t N = neq1;
  11758. const int64_t P = nek1 - N;
  11759. const int64_t M = P + N;
  11760. const int Mup = ggml_up(M, GGML_SOFT_MAX_UNROLL);
  11761. GGML_ASSERT(ne0 == D);
  11762. GGML_ASSERT(ne1 == N);
  11763. GGML_ASSERT(P >= 0);
  11764. GGML_ASSERT(nbq0 == sizeof(float));
  11765. GGML_ASSERT(nbk0 == sizeof(float));
  11766. GGML_ASSERT(nbv0 == sizeof(float));
  11767. GGML_ASSERT(neq0 == D);
  11768. GGML_ASSERT(nek0 == D);
  11769. GGML_ASSERT(nev1 == D);
  11770. GGML_ASSERT(neq1 == N);
  11771. GGML_ASSERT(nek1 == N + P);
  11772. GGML_ASSERT(nev1 == D);
  11773. // dst cannot be transposed or permuted
  11774. GGML_ASSERT(nb0 == sizeof(float));
  11775. GGML_ASSERT(nb0 <= nb1);
  11776. GGML_ASSERT(nb1 <= nb2);
  11777. GGML_ASSERT(nb2 <= nb3);
  11778. if (params->type == GGML_TASK_INIT) {
  11779. return;
  11780. }
  11781. if (params->type == GGML_TASK_FINALIZE) {
  11782. return;
  11783. }
  11784. // parallelize by q rows using ggml_vec_dot_f32
  11785. // total rows in q
  11786. const int nr = neq1*neq2*neq3;
  11787. // rows per thread
  11788. const int dr = (nr + nth - 1)/nth;
  11789. // row range for this thread
  11790. const int ir0 = dr*ith;
  11791. const int ir1 = MIN(ir0 + dr, nr);
  11792. const float scale = 1.0f/sqrtf(D);
  11793. //printf("P=%d N=%d D=%d ir0=%d ir1=%d scale = %f\n", P, N, D, ir0, ir1, scale);
  11794. for (int ir = ir0; ir < ir1; ++ir) {
  11795. // q indices
  11796. const int iq3 = ir/(neq2*neq1);
  11797. const int iq2 = (ir - iq3*neq2*neq1)/neq1;
  11798. const int iq1 = (ir - iq3*neq2*neq1 - iq2*neq1);
  11799. float * S = (float *) params->wdata + ith*(Mup + CACHE_LINE_SIZE_F32);
  11800. for (int i = M; i < Mup; ++i) {
  11801. S[i] = -INFINITY;
  11802. }
  11803. const int64_t masked_begin = masked ? (P + iq1 + 1) : M;
  11804. for (int64_t ic = 0; ic < masked_begin; ++ic) {
  11805. // k indices
  11806. const int ik3 = iq3;
  11807. const int ik2 = iq2 % nek2;
  11808. const int ik1 = ic;
  11809. // S indices
  11810. const int i1 = ik1;
  11811. ggml_vec_dot_f32(neq0,
  11812. S + i1,
  11813. (float *) ((char *) k->data + (ik1*nbk1 + ik2*nbk2 + ik3*nbk3)),
  11814. (float *) ((char *) q->data + (iq1*nbq1 + iq2*nbq2 + iq3*nbq3)));
  11815. }
  11816. // scale
  11817. ggml_vec_scale_f32(masked_begin, S, scale);
  11818. for (int64_t i = masked_begin; i < M; i++) {
  11819. S[i] = -INFINITY;
  11820. }
  11821. // softmax
  11822. // exclude known -INF S[..] values from max and loop
  11823. // dont forget to set their SW values to zero
  11824. {
  11825. float max = -INFINITY;
  11826. ggml_vec_max_f32(masked_begin, &max, S);
  11827. ggml_float sum = 0.0;
  11828. {
  11829. #ifdef GGML_SOFT_MAX_ACCELERATE
  11830. max = -max;
  11831. vDSP_vsadd(S, 1, &max, S, 1, Mup);
  11832. vvexpf(S, S, &Mup);
  11833. ggml_vec_sum_f32(Mup, &sum, S);
  11834. #else
  11835. uint16_t scvt[GGML_SOFT_MAX_UNROLL]; UNUSED(scvt);
  11836. ggml_float sump[GGML_SOFT_MAX_UNROLL] = { 0.0 };
  11837. for (int i = 0; i < Mup; i += GGML_SOFT_MAX_UNROLL) {
  11838. if (i >= masked_begin) {
  11839. break;
  11840. }
  11841. float * SS = S + i;
  11842. for (int j = 0; j < GGML_SOFT_MAX_UNROLL; ++j) {
  11843. if (i + j >= masked_begin) {
  11844. break;
  11845. } else if (SS[j] == -INFINITY) {
  11846. SS[j] = 0.0f;
  11847. } else {
  11848. #ifndef GGML_FLASH_ATTN_EXP_FP16
  11849. const float val = expf(SS[j] - max);
  11850. #else
  11851. ggml_fp16_t s = GGML_FP32_TO_FP16(SS[j] - max);
  11852. memcpy(&scvt[j], &s, sizeof(uint16_t));
  11853. const float val = GGML_FP16_TO_FP32(table_exp_f16[scvt[j]]);
  11854. #endif
  11855. sump[j] += (ggml_float)val;
  11856. SS[j] = val;
  11857. }
  11858. }
  11859. }
  11860. for (int i = 0; i < GGML_SOFT_MAX_UNROLL; i++) {
  11861. sum += sump[i];
  11862. }
  11863. #endif
  11864. }
  11865. assert(sum > 0.0);
  11866. sum = 1.0/sum;
  11867. ggml_vec_scale_f32(masked_begin, S, sum);
  11868. #ifndef NDEBUG
  11869. for (int i = 0; i < masked_begin; ++i) {
  11870. assert(!isnan(S[i]));
  11871. assert(!isinf(S[i]));
  11872. }
  11873. #endif
  11874. }
  11875. for (int64_t ic = 0; ic < nev1; ++ic) {
  11876. // dst indices
  11877. const int i1 = iq1;
  11878. const int i2 = iq2;
  11879. const int i3 = iq3;
  11880. // v indices
  11881. const int iv2 = iq2 % nev2;
  11882. const int iv3 = iq3;
  11883. ggml_vec_dot_f32(masked_begin,
  11884. (float *) ((char *) dst->data + (ic*nb0 + i1*nb1 + i2*nb2 + i3*nb3)),
  11885. (float *) ((char *) v->data + ( ic*nbv1 + iv2*nbv2 + iv3*nbv3)),
  11886. S);
  11887. }
  11888. }
  11889. }
  11890. static void ggml_compute_forward_flash_attn_f16(
  11891. const struct ggml_compute_params * params,
  11892. const struct ggml_tensor * q,
  11893. const struct ggml_tensor * k,
  11894. const struct ggml_tensor * v,
  11895. const bool masked,
  11896. struct ggml_tensor * dst) {
  11897. int64_t t0 = ggml_perf_time_us();
  11898. UNUSED(t0);
  11899. GGML_TENSOR_LOCALS(int64_t, neq, q, ne)
  11900. GGML_TENSOR_LOCALS(size_t, nbq, q, nb)
  11901. GGML_TENSOR_LOCALS(int64_t, nek, k, ne)
  11902. GGML_TENSOR_LOCALS(size_t, nbk, k, nb)
  11903. GGML_TENSOR_LOCALS(int64_t, nev, v, ne)
  11904. GGML_TENSOR_LOCALS(size_t, nbv, v, nb)
  11905. GGML_TENSOR_LOCALS(int64_t, ne, dst, ne)
  11906. GGML_TENSOR_LOCALS(size_t, nb, dst, nb)
  11907. const int ith = params->ith;
  11908. const int nth = params->nth;
  11909. const int64_t D = neq0;
  11910. const int64_t N = neq1;
  11911. const int64_t P = nek1 - N;
  11912. const int64_t M = P + N;
  11913. const int Mup = ggml_up(M, GGML_SOFT_MAX_UNROLL);
  11914. GGML_ASSERT(ne0 == D);
  11915. GGML_ASSERT(ne1 == N);
  11916. GGML_ASSERT(P >= 0);
  11917. GGML_ASSERT(nbq0 == sizeof(ggml_fp16_t));
  11918. GGML_ASSERT(nbk0 == sizeof(ggml_fp16_t));
  11919. GGML_ASSERT(nbv0 == sizeof(ggml_fp16_t));
  11920. GGML_ASSERT(neq0 == D);
  11921. GGML_ASSERT(nek0 == D);
  11922. GGML_ASSERT(nev1 == D);
  11923. GGML_ASSERT(neq1 == N);
  11924. GGML_ASSERT(nek1 == N + P);
  11925. GGML_ASSERT(nev1 == D);
  11926. // dst cannot be transposed or permuted
  11927. GGML_ASSERT(nb0 == sizeof(float));
  11928. GGML_ASSERT(nb0 <= nb1);
  11929. GGML_ASSERT(nb1 <= nb2);
  11930. GGML_ASSERT(nb2 <= nb3);
  11931. if (params->type == GGML_TASK_INIT) {
  11932. return;
  11933. }
  11934. if (params->type == GGML_TASK_FINALIZE) {
  11935. return;
  11936. }
  11937. // parallelize by q rows using ggml_vec_dot_f32
  11938. // total rows in q
  11939. const int nr = neq1*neq2*neq3;
  11940. // rows per thread
  11941. const int dr = (nr + nth - 1)/nth;
  11942. // row range for this thread
  11943. const int ir0 = dr*ith;
  11944. const int ir1 = MIN(ir0 + dr, nr);
  11945. const float scale = 1.0f/sqrtf(D);
  11946. //printf("P=%d N=%d D=%d ir0=%d ir1=%d scale = %f\n", P, N, D, ir0, ir1, scale);
  11947. for (int ir = ir0; ir < ir1; ++ir) {
  11948. // q indices
  11949. const int iq3 = ir/(neq2*neq1);
  11950. const int iq2 = (ir - iq3*neq2*neq1)/neq1;
  11951. const int iq1 = (ir - iq3*neq2*neq1 - iq2*neq1);
  11952. float * S = (float *) params->wdata + ith*(2*Mup + CACHE_LINE_SIZE_F32);
  11953. for (int i = M; i < Mup; ++i) {
  11954. S[i] = -INFINITY;
  11955. }
  11956. if (GGML_VEC_DOT_UNROLL > 2 || nek1 % GGML_VEC_DOT_UNROLL != 0) {
  11957. for (int64_t ic = 0; ic < nek1; ++ic) {
  11958. // k indices
  11959. const int ik3 = iq3;
  11960. const int ik2 = iq2 % nek2;
  11961. const int ik1 = ic;
  11962. // S indices
  11963. const int i1 = ik1;
  11964. ggml_vec_dot_f16(neq0,
  11965. S + i1,
  11966. (ggml_fp16_t *) ((char *) k->data + (ik1*nbk1 + ik2*nbk2 + ik3*nbk3)),
  11967. (ggml_fp16_t *) ((char *) q->data + (iq1*nbq1 + iq2*nbq2 + iq3*nbq3)));
  11968. }
  11969. } else {
  11970. for (int64_t ic = 0; ic < nek1; ic += GGML_VEC_DOT_UNROLL) {
  11971. // k indices
  11972. const int ik3 = iq3;
  11973. const int ik2 = iq2 % nek2;
  11974. const int ik1 = ic;
  11975. // S indices
  11976. const int i1 = ik1;
  11977. ggml_vec_dot_f16_unroll(neq0, nbk1,
  11978. S + i1,
  11979. ((char *) k->data + (ik1*nbk1 + ik2*nbk2 + ik3*nbk3)),
  11980. (ggml_fp16_t *) ((char *) q->data + (iq1*nbq1 + iq2*nbq2 + iq3*nbq3)));
  11981. }
  11982. }
  11983. // scale
  11984. ggml_vec_scale_f32(nek1, S, scale);
  11985. if (masked) {
  11986. for (int64_t i = P; i < M; i++) {
  11987. if (i > P + iq1) {
  11988. S[i] = -INFINITY;
  11989. }
  11990. }
  11991. }
  11992. // softmax
  11993. // todo: exclude known -INF S[..] values from max and loop, assuming their results to be zero.
  11994. // dont forget to set their S values to zero
  11995. {
  11996. float max = -INFINITY;
  11997. ggml_vec_max_f32(M, &max, S);
  11998. ggml_float sum = 0.0;
  11999. {
  12000. #ifdef GGML_SOFT_MAX_ACCELERATE
  12001. max = -max;
  12002. vDSP_vsadd(S, 1, &max, S, 1, Mup);
  12003. vvexpf(S, S, &Mup);
  12004. ggml_vec_sum_f32(Mup, &sum, S);
  12005. #else
  12006. uint16_t scvt[GGML_SOFT_MAX_UNROLL];
  12007. ggml_float sump[GGML_SOFT_MAX_UNROLL] = { 0.0 };
  12008. for (int i = 0; i < Mup; i += GGML_SOFT_MAX_UNROLL) {
  12009. float * SS = S + i;
  12010. for (int j = 0; j < GGML_SOFT_MAX_UNROLL; ++j) {
  12011. if (SS[j] == -INFINITY) {
  12012. SS[j] = 0.0f;
  12013. } else {
  12014. ggml_fp16_t s = GGML_FP32_TO_FP16(SS[j] - max);
  12015. memcpy(&scvt[j], &s, sizeof(uint16_t));
  12016. const float val = GGML_FP16_TO_FP32(table_exp_f16[scvt[j]]);
  12017. sump[j] += (ggml_float)val;
  12018. SS[j] = val;
  12019. }
  12020. }
  12021. }
  12022. for (int i = 0; i < GGML_SOFT_MAX_UNROLL; i++) {
  12023. sum += sump[i];
  12024. }
  12025. #endif
  12026. }
  12027. assert(sum > 0.0);
  12028. sum = 1.0/sum;
  12029. ggml_vec_scale_f32(M, S, sum);
  12030. #ifndef NDEBUG
  12031. for (int i = 0; i < M; ++i) {
  12032. assert(!isnan(S[i]));
  12033. assert(!isinf(S[i]));
  12034. }
  12035. #endif
  12036. }
  12037. ggml_fp16_t * S16 = (ggml_fp16_t *) ((float *) params->wdata + ith*(2*Mup + CACHE_LINE_SIZE_F32) + Mup);
  12038. for (int64_t i = 0; i < M; i++) {
  12039. S16[i] = GGML_FP32_TO_FP16(S[i]);
  12040. }
  12041. // todo: exclude known zero S[..] values from dot (reducing nev0 and increasing begin of v and S16).
  12042. if (GGML_VEC_DOT_UNROLL == 1 || (nev1 % GGML_VEC_DOT_UNROLL != 0)) {
  12043. for (int64_t ic = 0; ic < nev1; ++ic) {
  12044. // dst indices
  12045. const int i1 = iq1;
  12046. const int i2 = iq2;
  12047. const int i3 = iq3;
  12048. // v indices
  12049. const int iv2 = iq2 % nev2;
  12050. const int iv3 = iq3;
  12051. ggml_vec_dot_f16(nev0,
  12052. (float *) ((char *) dst->data + (ic*nb0 + i1*nb1 + i2*nb2 + i3*nb3)),
  12053. (ggml_fp16_t *) ((char *) v->data + ( ic*nbv1 + iv2*nbv2 + iv3*nbv3)),
  12054. S16);
  12055. }
  12056. } else {
  12057. for (int64_t ic = 0; ic < nev1; ic += GGML_VEC_DOT_UNROLL) {
  12058. // dst indices
  12059. const int i1 = iq1;
  12060. const int i2 = iq2;
  12061. const int i3 = iq3;
  12062. // v indices
  12063. const int iv2 = iq2 % nev2;
  12064. const int iv3 = iq3;
  12065. ggml_vec_dot_f16_unroll(nev0, nbv1,
  12066. (float *) ((char *) dst->data + (ic*nb0 + i1*nb1 + i2*nb2 + i3*nb3)),
  12067. ((char *) v->data + ( ic*nbv1 + iv2*nbv2 + iv3*nbv3)),
  12068. S16);
  12069. }
  12070. }
  12071. }
  12072. }
  12073. static void ggml_compute_forward_flash_attn(
  12074. const struct ggml_compute_params * params,
  12075. const struct ggml_tensor * q,
  12076. const struct ggml_tensor * k,
  12077. const struct ggml_tensor * v,
  12078. const bool masked,
  12079. struct ggml_tensor * dst) {
  12080. switch (q->type) {
  12081. case GGML_TYPE_F16:
  12082. {
  12083. ggml_compute_forward_flash_attn_f16(params, q, k, v, masked, dst);
  12084. } break;
  12085. case GGML_TYPE_F32:
  12086. {
  12087. ggml_compute_forward_flash_attn_f32(params, q, k, v, masked, dst);
  12088. } break;
  12089. default:
  12090. {
  12091. GGML_ASSERT(false);
  12092. } break;
  12093. }
  12094. }
  12095. // ggml_compute_forward_flash_ff
  12096. static void ggml_compute_forward_flash_ff_f16(
  12097. const struct ggml_compute_params * params,
  12098. const struct ggml_tensor * a, // F16
  12099. const struct ggml_tensor * b0, // F16 fc_w
  12100. const struct ggml_tensor * b1, // F32 fc_b
  12101. const struct ggml_tensor * c0, // F16 proj_w
  12102. const struct ggml_tensor * c1, // F32 proj_b
  12103. struct ggml_tensor * dst) {
  12104. int64_t t0 = ggml_perf_time_us();
  12105. UNUSED(t0);
  12106. GGML_TENSOR_LOCALS(int64_t, nea, a, ne)
  12107. GGML_TENSOR_LOCALS(size_t, nba, a, nb)
  12108. GGML_TENSOR_LOCALS(int64_t, neb0, b0, ne)
  12109. GGML_TENSOR_LOCALS(size_t, nbb0, b0, nb)
  12110. GGML_TENSOR_LOCALS(int64_t, neb1, b1, ne)
  12111. GGML_TENSOR_LOCALS(size_t, nbb1, b1, nb)
  12112. GGML_TENSOR_LOCALS(int64_t, nec0, c0, ne)
  12113. GGML_TENSOR_LOCALS(size_t, nbc0, c0, nb)
  12114. GGML_TENSOR_LOCALS(int64_t, nec1, c1, ne)
  12115. GGML_TENSOR_LOCALS(size_t, nbc1, c1, nb)
  12116. GGML_TENSOR_LOCALS(int64_t, ne, dst, ne)
  12117. GGML_TENSOR_LOCALS(size_t, nb, dst, nb)
  12118. const int ith = params->ith;
  12119. const int nth = params->nth;
  12120. const int64_t D = nea0;
  12121. //const int64_t N = nea1;
  12122. const int64_t M = neb01;
  12123. GGML_ASSERT(ne0 == nea0);
  12124. GGML_ASSERT(ne1 == nea1);
  12125. GGML_ASSERT(ne2 == nea2);
  12126. GGML_ASSERT(nba0 == sizeof(ggml_fp16_t));
  12127. GGML_ASSERT(nbb00 == sizeof(ggml_fp16_t));
  12128. GGML_ASSERT(nbb10 == sizeof(float));
  12129. GGML_ASSERT(nbc00 == sizeof(ggml_fp16_t));
  12130. GGML_ASSERT(nbc10 == sizeof(float));
  12131. GGML_ASSERT(neb00 == D);
  12132. GGML_ASSERT(neb01 == M);
  12133. GGML_ASSERT(neb10 == M);
  12134. GGML_ASSERT(neb11 == 1);
  12135. GGML_ASSERT(nec00 == M);
  12136. GGML_ASSERT(nec01 == D);
  12137. GGML_ASSERT(nec10 == D);
  12138. GGML_ASSERT(nec11 == 1);
  12139. // dst cannot be transposed or permuted
  12140. GGML_ASSERT(nb0 == sizeof(float));
  12141. GGML_ASSERT(nb0 <= nb1);
  12142. GGML_ASSERT(nb1 <= nb2);
  12143. GGML_ASSERT(nb2 <= nb3);
  12144. if (params->type == GGML_TASK_INIT) {
  12145. return;
  12146. }
  12147. if (params->type == GGML_TASK_FINALIZE) {
  12148. return;
  12149. }
  12150. // parallelize by a rows using ggml_vec_dot_f32
  12151. // total rows in a
  12152. const int nr = nea1*nea2*nea3;
  12153. // rows per thread
  12154. const int dr = (nr + nth - 1)/nth;
  12155. // row range for this thread
  12156. const int ir0 = dr*ith;
  12157. const int ir1 = MIN(ir0 + dr, nr);
  12158. for (int ir = ir0; ir < ir1; ++ir) {
  12159. // a indices
  12160. const int ia3 = ir/(nea2*nea1);
  12161. const int ia2 = (ir - ia3*nea2*nea1)/nea1;
  12162. const int ia1 = (ir - ia3*nea2*nea1 - ia2*nea1);
  12163. float * S = (float *) params->wdata + ith*(2*M + CACHE_LINE_SIZE_F32);
  12164. for (int64_t ic = 0; ic < neb01; ++ic) {
  12165. // b0 indices
  12166. const int ib03 = ia3;
  12167. const int ib02 = ia2;
  12168. const int ib01 = ic;
  12169. // S indices
  12170. const int i1 = ib01;
  12171. ggml_vec_dot_f16(nea0,
  12172. S + i1,
  12173. (ggml_fp16_t *) ((char *) b0->data + (ib01*nbb01 + ib02*nbb02 + ib03*nbb03)),
  12174. (ggml_fp16_t *) ((char *) a->data + ( ia1*nba1 + ia2*nba2 + ia3*nba3)));
  12175. }
  12176. ggml_vec_add_f32(neb01, S, S, (float *) b1->data);
  12177. //ggml_vec_gelu_f32(neb01, S, S);
  12178. ggml_fp16_t * S16 = (ggml_fp16_t *) ((float *) params->wdata + ith*(2*M + CACHE_LINE_SIZE_F32) + M);
  12179. for (int64_t i = 0; i < M; i++) {
  12180. S16[i] = GGML_FP32_TO_FP16(S[i]);
  12181. }
  12182. ggml_vec_gelu_f16(neb01, S16, S16);
  12183. {
  12184. // dst indices
  12185. const int i1 = ia1;
  12186. const int i2 = ia2;
  12187. const int i3 = ia3;
  12188. for (int64_t ic = 0; ic < nec01; ++ic) {
  12189. ggml_vec_dot_f16(neb01,
  12190. (float *) ((char *) dst->data + (ic*nb0 + i1*nb1 + i2*nb2 + i3*nb3)),
  12191. (ggml_fp16_t *) ((char *) c0->data + ( ic*nbc01 + i2*nbc02 + i3*nbc03)),
  12192. S16);
  12193. }
  12194. ggml_vec_add_f32(nec01,
  12195. (float *) ((char *) dst->data + (i1*nb1 + i2*nb2 + i3*nb3)),
  12196. (float *) ((char *) dst->data + (i1*nb1 + i2*nb2 + i3*nb3)),
  12197. (float *) c1->data);
  12198. }
  12199. }
  12200. }
  12201. static void ggml_compute_forward_flash_ff(
  12202. const struct ggml_compute_params * params,
  12203. const struct ggml_tensor * a,
  12204. const struct ggml_tensor * b0,
  12205. const struct ggml_tensor * b1,
  12206. const struct ggml_tensor * c0,
  12207. const struct ggml_tensor * c1,
  12208. struct ggml_tensor * dst) {
  12209. switch (b0->type) {
  12210. case GGML_TYPE_F16:
  12211. {
  12212. ggml_compute_forward_flash_ff_f16(params, a, b0, b1, c0, c1, dst);
  12213. } break;
  12214. case GGML_TYPE_F32:
  12215. {
  12216. GGML_ASSERT(false); // TODO
  12217. } break;
  12218. default:
  12219. {
  12220. GGML_ASSERT(false);
  12221. } break;
  12222. }
  12223. }
  12224. // ggml_compute_forward_flash_attn_back
  12225. static void ggml_compute_forward_flash_attn_back_f32(
  12226. const struct ggml_compute_params * params,
  12227. const struct ggml_tensor * q,
  12228. const struct ggml_tensor * k,
  12229. const struct ggml_tensor * v,
  12230. const struct ggml_tensor * d,
  12231. const bool masked,
  12232. struct ggml_tensor * dst) {
  12233. int64_t t0 = ggml_perf_time_us();
  12234. UNUSED(t0);
  12235. GGML_TENSOR_LOCALS(int64_t, neq, q, ne)
  12236. GGML_TENSOR_LOCALS(size_t, nbq, q, nb)
  12237. GGML_TENSOR_LOCALS(int64_t, nek, k, ne)
  12238. GGML_TENSOR_LOCALS(size_t, nbk, k, nb)
  12239. GGML_TENSOR_LOCALS(int64_t, nev, v, ne)
  12240. GGML_TENSOR_LOCALS(size_t, nbv, v, nb)
  12241. GGML_TENSOR_LOCALS(int64_t, ned, d, ne)
  12242. GGML_TENSOR_LOCALS(size_t, nbd, d, nb)
  12243. GGML_TENSOR_LOCALS(int64_t, ne, dst, ne)
  12244. GGML_TENSOR_LOCALS(size_t, nb, dst, nb)
  12245. const int ith = params->ith;
  12246. const int nth = params->nth;
  12247. const int64_t D = neq0;
  12248. const int64_t N = neq1;
  12249. const int64_t P = nek1 - N;
  12250. const int64_t M = P + N;
  12251. const int Mup = ggml_up(M, GGML_SOFT_MAX_UNROLL);
  12252. const int mxDM = MAX(D, Mup);
  12253. // GGML_ASSERT(ne0 == D);
  12254. // GGML_ASSERT(ne1 == N);
  12255. GGML_ASSERT(P >= 0);
  12256. GGML_ASSERT(nbq0 == sizeof(float));
  12257. GGML_ASSERT(nbk0 == sizeof(float));
  12258. GGML_ASSERT(nbv0 == sizeof(float));
  12259. GGML_ASSERT(neq0 == D);
  12260. GGML_ASSERT(nek0 == D);
  12261. GGML_ASSERT(nev1 == D);
  12262. GGML_ASSERT(ned0 == D);
  12263. GGML_ASSERT(neq1 == N);
  12264. GGML_ASSERT(nek1 == N + P);
  12265. GGML_ASSERT(nev1 == D);
  12266. GGML_ASSERT(ned1 == N);
  12267. // dst cannot be transposed or permuted
  12268. GGML_ASSERT(nb0 == sizeof(float));
  12269. GGML_ASSERT(nb0 <= nb1);
  12270. GGML_ASSERT(nb1 <= nb2);
  12271. GGML_ASSERT(nb2 <= nb3);
  12272. if (params->type == GGML_TASK_INIT) {
  12273. if (ith == 0) {
  12274. memset(dst->data, 0, nb0*ne0*ne1*ne2*ne3);
  12275. }
  12276. return;
  12277. }
  12278. if (params->type == GGML_TASK_FINALIZE) {
  12279. return;
  12280. }
  12281. const int64_t elem_q = ggml_nelements(q);
  12282. const int64_t elem_k = ggml_nelements(k);
  12283. enum ggml_type result_type = dst->type;
  12284. GGML_ASSERT(ggml_blck_size(result_type) == 1);
  12285. const size_t tsize = ggml_type_size(result_type);
  12286. const size_t offs_q = 0;
  12287. const size_t offs_k = offs_q + GGML_PAD(elem_q * tsize, GGML_MEM_ALIGN);
  12288. const size_t offs_v = offs_k + GGML_PAD(elem_k * tsize, GGML_MEM_ALIGN);
  12289. void * grad_q = (char *) dst->data;
  12290. void * grad_k = (char *) dst->data + offs_k;
  12291. void * grad_v = (char *) dst->data + offs_v;
  12292. const size_t nbgq1 = nb0*neq0;
  12293. const size_t nbgq2 = nb0*neq0*neq1;
  12294. const size_t nbgq3 = nb0*neq0*neq1*neq2;
  12295. const size_t nbgk1 = nb0*nek0;
  12296. const size_t nbgk2 = nb0*nek0*nek1;
  12297. const size_t nbgk3 = nb0*nek0*nek1*neq2;
  12298. const size_t nbgv1 = nb0*nev0;
  12299. const size_t nbgv2 = nb0*nev0*nev1;
  12300. const size_t nbgv3 = nb0*nev0*nev1*neq2;
  12301. // parallelize by k rows using ggml_vec_dot_f32
  12302. // total rows in k
  12303. const int nr = nek2*nek3;
  12304. // rows per thread
  12305. const int dr = (nr + nth - 1)/nth;
  12306. // row range for this thread
  12307. const int ir0 = dr*ith;
  12308. const int ir1 = MIN(ir0 + dr, nr);
  12309. const float scale = 1.0f/sqrtf(D);
  12310. //printf("P=%d N=%d D=%d ir0=%d ir1=%d scale = %f\n", P, N, D, ir0, ir1, scale);
  12311. // how often k2 (and v2) is repeated in q2
  12312. int nrep = neq2/nek2;
  12313. for (int ir = ir0; ir < ir1; ++ir) {
  12314. // q indices
  12315. const int ik3 = ir/(nek2);
  12316. const int ik2 = ir - ik3*nek2;
  12317. const int iq3 = ik3;
  12318. const int id3 = ik3;
  12319. const int iv3 = ik3;
  12320. const int iv2 = ik2;
  12321. for (int irep = 0; irep < nrep; ++irep) {
  12322. const int iq2 = ik2 + irep*nek2;
  12323. const int id2 = iq2;
  12324. // (ik2 + irep*nek2) % nek2 == ik2
  12325. for (int iq1 = 0; iq1 < neq1; ++iq1) {
  12326. const int id1 = iq1;
  12327. // not sure about CACHE_LINE_SIZE_F32..
  12328. // - maybe it must not be multiplied by 2 and excluded from .. in SM 1*(..) offset?
  12329. float * S = (float *) params->wdata + ith*2*(mxDM + CACHE_LINE_SIZE_F32) + 0*(mxDM+CACHE_LINE_SIZE_F32);
  12330. float * SM = (float *) params->wdata + ith*2*(mxDM + CACHE_LINE_SIZE_F32) + 1*(mxDM+CACHE_LINE_SIZE_F32);
  12331. for (int i = M; i < Mup; ++i) {
  12332. S[i] = -INFINITY;
  12333. }
  12334. const int64_t masked_begin = masked ? (P + iq1 + 1) : M;
  12335. for (int64_t ic = 0; ic < masked_begin; ++ic) {
  12336. // k indices
  12337. const int ik1 = ic;
  12338. // S indices
  12339. const int i1 = ik1;
  12340. ggml_vec_dot_f32(neq0,
  12341. S + i1,
  12342. (float *) ((char *) k->data + (ik1*nbk1 + ik2*nbk2 + ik3*nbk3)),
  12343. (float *) ((char *) q->data + (iq1*nbq1 + iq2*nbq2 + iq3*nbq3)));
  12344. }
  12345. // scale
  12346. ggml_vec_scale_f32(masked_begin, S, scale);
  12347. for (int64_t i = masked_begin; i < M; i++) {
  12348. S[i] = -INFINITY;
  12349. }
  12350. // softmax
  12351. // exclude known -INF S[..] values from max and loop
  12352. // dont forget to set their SM values to zero
  12353. {
  12354. float max = -INFINITY;
  12355. ggml_vec_max_f32(masked_begin, &max, S);
  12356. ggml_float sum = 0.0;
  12357. {
  12358. #ifdef GGML_SOFT_MAX_ACCELERATE
  12359. max = -max;
  12360. vDSP_vsadd(SM, 1, &max, SM, 1, Mup);
  12361. vvexpf(SM, SM, &Mup);
  12362. ggml_vec_sum_f32(Mup, &sum, SM);
  12363. #else
  12364. uint16_t scvt[GGML_SOFT_MAX_UNROLL]; UNUSED(scvt);
  12365. ggml_float sump[GGML_SOFT_MAX_UNROLL] = { 0.0 };
  12366. for (int i = 0; i < Mup; i += GGML_SOFT_MAX_UNROLL) {
  12367. if (i >= masked_begin) {
  12368. break;
  12369. }
  12370. float * SR = S + i;
  12371. float * SW = SM + i;
  12372. for (int j = 0; j < GGML_SOFT_MAX_UNROLL; ++j) {
  12373. if (i + j >= masked_begin) {
  12374. break;
  12375. } else if (SR[j] == -INFINITY) {
  12376. SW[j] = 0.0f;
  12377. } else {
  12378. #ifndef GGML_FLASH_ATTN_EXP_FP16
  12379. const float val = expf(SR[j] - max);
  12380. #else
  12381. ggml_fp16_t s = GGML_FP32_TO_FP16(SR[j] - max);
  12382. memcpy(&scvt[j], &s, sizeof(uint16_t));
  12383. const float val = GGML_FP16_TO_FP32(table_exp_f16[scvt[j]]);
  12384. #endif
  12385. sump[j] += (ggml_float)val;
  12386. SW[j] = val;
  12387. }
  12388. }
  12389. }
  12390. for (int i = 0; i < GGML_SOFT_MAX_UNROLL; i++) {
  12391. sum += sump[i];
  12392. }
  12393. #endif
  12394. }
  12395. assert(sum > 0.0);
  12396. sum = 1.0/sum;
  12397. ggml_vec_scale_f32(masked_begin, SM, sum);
  12398. }
  12399. // step-by-step explanation
  12400. {
  12401. // forward-process shape grads from backward process
  12402. // parallel_for ik2,ik3:
  12403. // for irep:
  12404. // iq2 = ik2 + irep*nek2
  12405. // k[:D,:M,:,:] [D,M,:,:] grad[k][:D,:M,ik2,ik3] += grad[kcur]
  12406. // q[:D,:N,:,:] [D,N,:,:] grad[q][:D,iq1,iq2,iq3] += grad[qcur]
  12407. // v[:M,:D,:,:] [M,D,:,:] grad[v][:M,:D,iv2,iv3] += grad[vcur]
  12408. // for iq1:
  12409. // kcur = k[:D,:M,ik2,ik3] [D,M,1,1] grad[kcur] = grad[S1].T @ qcur
  12410. // qcur = q[:D,iq1,iq2,iq3] [D,1,1,1] grad[qcur] = grad[S1] @ kcur
  12411. // vcur = v[:M,:D,iv2,iv3] [M,D,1,1] grad[vcur] = grad[S5].T @ S4
  12412. // S0 = -Inf [D,1,1,1]
  12413. // ~S1[i] = dot(kcur[:D,i], qcur)
  12414. // S1 = qcur @ kcur.T [M,1,1,1] grad[S1] = grad[S2] * scale
  12415. // S2 = S1 * scale [M,1,1,1] grad[S2] = diag_mask_zero(grad[S3], P)
  12416. // S3 = diag_mask_inf(S2, P) [M,1,1,1] grad[S3] = S4 * (grad[S4] - dot(S4, grad[S4]))
  12417. // S4 = softmax(S3) [M,1,1,1] grad[S4] = grad[S5] @ vcur
  12418. // ~S5[i] = dot(vcur[:,i], S4)
  12419. // S5 = S4 @ vcur.T [D,1,1,1] grad[S5] = d[:D,id1,id2,id3]
  12420. // ~dst[i,iq1,iq2,iq3] = S5[i] ^
  12421. // dst[:D,iq1,iq2,iq3] = S5 | grad[dst[:D,iq1,iq2,iq3]] = d[:D,id1,id2,id3]
  12422. // dst backward-/ grad[dst] = d
  12423. //
  12424. // output gradients with their dependencies:
  12425. //
  12426. // grad[kcur] = grad[S1].T @ qcur
  12427. // grad[S1] = diag_mask_zero(grad[S3], P) * scale
  12428. // grad[S3] = S4 * (grad[S4] - dot(S4, grad[S4]))
  12429. // grad[S4] = grad[S5] @ vcur
  12430. // grad[S4] = d[:D,id1,id2,id3] @ vcur
  12431. // grad[qcur] = grad[S1] @ kcur
  12432. // grad[vcur] = grad[S5].T @ S4
  12433. // grad[vcur] = d[:D,id1,id2,id3].T @ S4
  12434. //
  12435. // in post-order:
  12436. //
  12437. // S1 = qcur @ kcur.T
  12438. // S2 = S1 * scale
  12439. // S3 = diag_mask_inf(S2, P)
  12440. // S4 = softmax(S3)
  12441. // grad[S4] = d[:D,id1,id2,id3] @ vcur
  12442. // grad[S3] = S4 * (grad[S4] - dot(S4, grad[S4]))
  12443. // grad[S1] = diag_mask_zero(grad[S3], P) * scale
  12444. // grad[qcur] = grad[S1] @ kcur
  12445. // grad[kcur] = grad[S1].T @ qcur
  12446. // grad[vcur] = d[:D,id1,id2,id3].T @ S4
  12447. //
  12448. // using less variables (SM=S4):
  12449. //
  12450. // S = diag_mask_inf(qcur @ kcur.T * scale, P)
  12451. // SM = softmax(S)
  12452. // S = d[:D,iq1,iq2,iq3] @ vcur
  12453. // dot_SM_gradSM = dot(SM, S)
  12454. // S = SM * (S - dot(SM, S))
  12455. // S = diag_mask_zero(S, P) * scale
  12456. //
  12457. // grad[q][:D,iq1,iq2,iq3] += S @ kcur
  12458. // grad[k][:D,:M,ik2,ik3] += S.T @ qcur
  12459. // grad[v][:M,:D,iv2,iv3] += d[:D,id1,id2,id3].T @ SM
  12460. }
  12461. // S = gradSM = d[:D,id1,id2,id3] @ vcur[:,:,iv2,iv3]
  12462. // S = d[:D,id1,id2,id3] @ vcur[:,:,iv2,iv3]
  12463. // for ic:
  12464. // S[:M] += vcur[:M,ic,iv2,iv3] * d[ic,id1,id2,id3]
  12465. // exclude known future zero S[..] values from operation
  12466. ggml_vec_set_f32(masked_begin, S, 0);
  12467. for (int64_t ic = 0; ic < D; ++ic) {
  12468. ggml_vec_mad_f32(masked_begin,
  12469. S,
  12470. (float *) ((char *) v->data + ( ic*nbv1 + iv2*nbv2 + iv3*nbv3)),
  12471. *(float *) ((char *) d->data + (ic*nbd0 + id1*nbd1 + id2*nbd2 + id3*nbd3)));
  12472. }
  12473. // S = SM * (S - dot(SM, S))
  12474. float dot_SM_gradSM = 0;
  12475. ggml_vec_dot_f32 (masked_begin, &dot_SM_gradSM, SM, S);
  12476. ggml_vec_acc1_f32(M, S, -dot_SM_gradSM);
  12477. ggml_vec_mul_f32 (masked_begin, S, S, SM);
  12478. // S = diag_mask_zero(S, P) * scale
  12479. // already done by above ggml_vec_set_f32
  12480. // exclude known zero S[..] values from operation
  12481. ggml_vec_scale_f32(masked_begin, S, scale);
  12482. // S shape [M,1]
  12483. // SM shape [M,1]
  12484. // kcur shape [D,M]
  12485. // qcur shape [D,1]
  12486. // vcur shape [M,D]
  12487. // grad[q][:D,iq1,iq2,iq3] += S @ kcur
  12488. // grad[q][:D,iq1,iq2,iq3] += shape[M,1] @ shape[D,M]
  12489. // for ic:
  12490. // grad[q][:D,iq1,iq2,iq3] += S[ic] * kcur[:D,ic,ik2,ik3]
  12491. // exclude known zero S[..] values from loop
  12492. for (int64_t ic = 0; ic < masked_begin; ++ic) {
  12493. ggml_vec_mad_f32(D,
  12494. (float *) ((char *) grad_q + (iq1*nbgq1 + iq2*nbgq2 + iq3*nbgq3)),
  12495. (float *) ((char *) k->data + (ic*nbk1 + ik2*nbk2 + ik3*nbk3)),
  12496. S[ic]);
  12497. }
  12498. // grad[k][:D,:M,iq2,iq3] += S.T @ qcur
  12499. // for ic:
  12500. // grad[k][:D,ic,iq2,iq3] += S.T[0,ic] * qcur[:D,0]
  12501. // grad[k][:D,ic,iq2,iq3] += S[ic] * qcur[:D,0]
  12502. // exclude known zero S[..] values from loop
  12503. for (int64_t ic = 0; ic < masked_begin; ++ic) {
  12504. ggml_vec_mad_f32(D,
  12505. (float *) ((char *) grad_k + (ic*nbgk1 + ik2*nbgk2 + ik3*nbgk3)),
  12506. (float *) ((char *) q->data + (iq1*nbq1 + iq2*nbq2 + iq3*nbq3)),
  12507. S[ic]);
  12508. }
  12509. // grad[v][:M,:D,iv2,iv3] += d[:D,id1,id2,id3].T @ SM
  12510. // for ic:
  12511. // grad[v][:M,ic,iv2,iv3] += d[:D,id1,id2,id3].T[0,ic] * SM[:M]
  12512. // grad[v][:M,ic,iv2,iv3] += d[ic,id1,id2,id3] * SM[:M]
  12513. // exclude known zero SM[..] values from mad
  12514. for (int64_t ic = 0; ic < D; ++ic) {
  12515. ggml_vec_mad_f32(masked_begin,
  12516. (float *) ((char *) grad_v + ( ic*nbgv1 + iv2*nbgv2 + iv3*nbgv3)),
  12517. SM,
  12518. *(float *) ((char *) d->data + (ic*nbd0 + id1*nbd1 + id2*nbd2 + id3*nbd3)));
  12519. }
  12520. }
  12521. }
  12522. }
  12523. }
  12524. static void ggml_compute_forward_flash_attn_back(
  12525. const struct ggml_compute_params * params,
  12526. const struct ggml_tensor * q,
  12527. const struct ggml_tensor * k,
  12528. const struct ggml_tensor * v,
  12529. const struct ggml_tensor * d,
  12530. const bool masked,
  12531. struct ggml_tensor * dst) {
  12532. switch (q->type) {
  12533. case GGML_TYPE_F32:
  12534. {
  12535. ggml_compute_forward_flash_attn_back_f32(params, q, k, v, d, masked, dst);
  12536. } break;
  12537. default:
  12538. {
  12539. GGML_ASSERT(false);
  12540. } break;
  12541. }
  12542. }
  12543. // ggml_compute_forward_win_part
  12544. static void ggml_compute_forward_win_part_f32(
  12545. const struct ggml_compute_params * params,
  12546. const struct ggml_tensor * src0,
  12547. struct ggml_tensor * dst) {
  12548. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  12549. return;
  12550. }
  12551. GGML_TENSOR_LOCALS(int64_t, ne0, src0, ne)
  12552. GGML_TENSOR_LOCALS(int64_t, ne, dst, ne)
  12553. const int32_t nep0 = ((const int32_t *)(dst->op_params))[0];
  12554. const int32_t nep1 = ((const int32_t *)(dst->op_params))[1];
  12555. const int32_t w = ((const int32_t *)(dst->op_params))[2];
  12556. assert(ne00 == ne0);
  12557. assert(ne3 == nep0*nep1);
  12558. // TODO: optimize / multi-thread
  12559. for (int py = 0; py < nep1; ++py) {
  12560. for (int px = 0; px < nep0; ++px) {
  12561. const int64_t i3 = py*nep0 + px;
  12562. for (int64_t i2 = 0; i2 < ne2; ++i2) {
  12563. for (int64_t i1 = 0; i1 < ne1; ++i1) {
  12564. for (int64_t i0 = 0; i0 < ne0; ++i0) {
  12565. const int64_t i02 = py*w + i2;
  12566. const int64_t i01 = px*w + i1;
  12567. const int64_t i00 = i0;
  12568. const int64_t i = i3*ne2*ne1*ne0 + i2*ne1*ne0 + i1*ne0 + i0;
  12569. const int64_t j = i02*ne01*ne00 + i01*ne00 + i00;
  12570. if (py*w + i2 >= ne02 || px*w + i1 >= ne01) {
  12571. ((float *) dst->data)[i] = 0.0f;
  12572. } else {
  12573. ((float *) dst->data)[i] = ((float *) src0->data)[j];
  12574. }
  12575. }
  12576. }
  12577. }
  12578. }
  12579. }
  12580. }
  12581. static void ggml_compute_forward_win_part(
  12582. const struct ggml_compute_params * params,
  12583. const struct ggml_tensor * src0,
  12584. struct ggml_tensor * dst) {
  12585. switch (src0->type) {
  12586. case GGML_TYPE_F32:
  12587. {
  12588. ggml_compute_forward_win_part_f32(params, src0, dst);
  12589. } break;
  12590. default:
  12591. {
  12592. GGML_ASSERT(false);
  12593. } break;
  12594. }
  12595. }
  12596. // ggml_compute_forward_win_unpart
  12597. static void ggml_compute_forward_win_unpart_f32(
  12598. const struct ggml_compute_params * params,
  12599. const struct ggml_tensor * src0,
  12600. struct ggml_tensor * dst) {
  12601. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  12602. return;
  12603. }
  12604. GGML_TENSOR_LOCALS(int64_t, ne0, src0, ne)
  12605. GGML_TENSOR_LOCALS(int64_t, ne, dst, ne)
  12606. const int32_t w = ((const int32_t *)(dst->op_params))[0];
  12607. // padding
  12608. const int px = (w - ne1%w)%w;
  12609. //const int py = (w - ne2%w)%w;
  12610. const int npx = (px + ne1)/w;
  12611. //const int npy = (py + ne2)/w;
  12612. assert(ne0 == ne00);
  12613. // TODO: optimize / multi-thread
  12614. for (int64_t i2 = 0; i2 < ne2; ++i2) {
  12615. for (int64_t i1 = 0; i1 < ne1; ++i1) {
  12616. for (int64_t i0 = 0; i0 < ne0; ++i0) {
  12617. const int ip2 = i2/w;
  12618. const int ip1 = i1/w;
  12619. const int64_t i02 = i2%w;
  12620. const int64_t i01 = i1%w;
  12621. const int64_t i00 = i0;
  12622. const int64_t i = (ip2*npx + ip1)*ne02*ne01*ne00 + i02*ne01*ne00 + i01*ne00 + i00;
  12623. const int64_t j = i2*ne1*ne0 + i1*ne0 + i0;
  12624. ((float *) dst->data)[j] = ((float *) src0->data)[i];
  12625. }
  12626. }
  12627. }
  12628. }
  12629. static void ggml_compute_forward_win_unpart(
  12630. const struct ggml_compute_params * params,
  12631. const struct ggml_tensor * src0,
  12632. struct ggml_tensor * dst) {
  12633. switch (src0->type) {
  12634. case GGML_TYPE_F32:
  12635. {
  12636. ggml_compute_forward_win_unpart_f32(params, src0, dst);
  12637. } break;
  12638. default:
  12639. {
  12640. GGML_ASSERT(false);
  12641. } break;
  12642. }
  12643. }
  12644. //gmml_compute_forward_unary
  12645. static void ggml_compute_forward_unary(
  12646. const struct ggml_compute_params * params,
  12647. const struct ggml_tensor * src0,
  12648. struct ggml_tensor * dst) {
  12649. const enum ggml_unary_op op = ggml_get_unary_op(dst);
  12650. switch (op) {
  12651. case GGML_UNARY_OP_ABS:
  12652. {
  12653. ggml_compute_forward_abs(params, src0, dst);
  12654. } break;
  12655. case GGML_UNARY_OP_SGN:
  12656. {
  12657. ggml_compute_forward_sgn(params, src0, dst);
  12658. } break;
  12659. case GGML_UNARY_OP_NEG:
  12660. {
  12661. ggml_compute_forward_neg(params, src0, dst);
  12662. } break;
  12663. case GGML_UNARY_OP_STEP:
  12664. {
  12665. ggml_compute_forward_step(params, src0, dst);
  12666. } break;
  12667. case GGML_UNARY_OP_TANH:
  12668. {
  12669. ggml_compute_forward_tanh(params, src0, dst);
  12670. } break;
  12671. case GGML_UNARY_OP_ELU:
  12672. {
  12673. ggml_compute_forward_elu(params, src0, dst);
  12674. } break;
  12675. case GGML_UNARY_OP_RELU:
  12676. {
  12677. ggml_compute_forward_relu(params, src0, dst);
  12678. } break;
  12679. case GGML_UNARY_OP_GELU:
  12680. {
  12681. ggml_compute_forward_gelu(params, src0, dst);
  12682. } break;
  12683. case GGML_UNARY_OP_GELU_QUICK:
  12684. {
  12685. ggml_compute_forward_gelu_quick(params, src0, dst);
  12686. } break;
  12687. case GGML_UNARY_OP_SILU:
  12688. {
  12689. ggml_compute_forward_silu(params, src0, dst);
  12690. } break;
  12691. default:
  12692. {
  12693. GGML_ASSERT(false);
  12694. } break;
  12695. }
  12696. }
  12697. // ggml_compute_forward_get_rel_pos
  12698. static void ggml_compute_forward_get_rel_pos_f16(
  12699. const struct ggml_compute_params * params,
  12700. const struct ggml_tensor * src0,
  12701. struct ggml_tensor * dst) {
  12702. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  12703. return;
  12704. }
  12705. // ref: https://github.com/facebookresearch/segment-anything/blob/main/segment_anything/modeling/image_encoder.py#L292-L322
  12706. GGML_TENSOR_UNARY_OP_LOCALS
  12707. const int64_t w = ne1;
  12708. ggml_fp16_t * src0_data = (ggml_fp16_t *) src0->data;
  12709. ggml_fp16_t * dst_data = (ggml_fp16_t *) dst->data;
  12710. for (int64_t i2 = 0; i2 < ne2; ++i2) {
  12711. for (int64_t i1 = 0; i1 < ne1; ++i1) {
  12712. const int64_t pos = (w - i1 - 1) + i2;
  12713. for (int64_t i0 = 0; i0 < ne0; ++i0) {
  12714. dst_data[i2*ne1*ne0 + i1*ne0 + i0] = src0_data[pos*ne00 + i0];
  12715. }
  12716. }
  12717. }
  12718. }
  12719. static void ggml_compute_forward_get_rel_pos(
  12720. const struct ggml_compute_params * params,
  12721. const struct ggml_tensor * src0,
  12722. struct ggml_tensor * dst) {
  12723. switch (src0->type) {
  12724. case GGML_TYPE_F16:
  12725. {
  12726. ggml_compute_forward_get_rel_pos_f16(params, src0, dst);
  12727. } break;
  12728. default:
  12729. {
  12730. GGML_ASSERT(false);
  12731. } break;
  12732. }
  12733. }
  12734. // ggml_compute_forward_add_rel_pos
  12735. static void ggml_compute_forward_add_rel_pos_f32(
  12736. const struct ggml_compute_params * params,
  12737. const struct ggml_tensor * src0,
  12738. const struct ggml_tensor * src1,
  12739. const struct ggml_tensor * src2,
  12740. struct ggml_tensor * dst) {
  12741. const bool inplace = (bool) ((int32_t *) dst->op_params)[0];
  12742. if (!inplace && params->type == GGML_TASK_INIT) {
  12743. memcpy((char *) dst->data, (char *) src0->data, ggml_nbytes(dst));
  12744. return;
  12745. }
  12746. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  12747. return;
  12748. }
  12749. int64_t t0 = ggml_perf_time_us();
  12750. UNUSED(t0);
  12751. // ref: https://github.com/facebookresearch/segment-anything/blob/main/segment_anything/modeling/image_encoder.py#L357-L359
  12752. float * src1_data = (float *) src1->data;
  12753. float * src2_data = (float *) src2->data;
  12754. float * dst_data = (float *) dst->data;
  12755. const int64_t ne10 = src1->ne[0];
  12756. const int64_t ne11 = src1->ne[1];
  12757. const int64_t ne12 = src1->ne[2];
  12758. const int64_t ne13 = src1->ne[3];
  12759. const int ith = params->ith;
  12760. const int nth = params->nth;
  12761. // total patches in dst
  12762. const int np = ne13;
  12763. // patches per thread
  12764. const int dp = (np + nth - 1)/nth;
  12765. // patch range for this thread
  12766. const int ip0 = dp*ith;
  12767. const int ip1 = MIN(ip0 + dp, np);
  12768. for (int64_t i13 = ip0; i13 < ip1; ++i13) {
  12769. for (int64_t i12 = 0; i12 < ne12; ++i12) {
  12770. for (int64_t i11 = 0; i11 < ne11; ++i11) {
  12771. const int64_t jp1 = i13*ne12*ne11*ne10 + i12*ne11*ne10 + i11*ne10;
  12772. for (int64_t i10 = 0; i10 < ne10; ++i10) {
  12773. const int64_t jp0 = jp1 + i10;
  12774. const float src1_e = src1_data[jp0];
  12775. const float src2_e = src2_data[jp0];
  12776. const int64_t jdh = jp0 * ne10;
  12777. const int64_t jdw = jdh - (ne10 - 1) * i10;
  12778. for (int64_t j = 0; j < ne10; ++j) {
  12779. dst_data[jdh + j ] += src2_e;
  12780. dst_data[jdw + j*ne10] += src1_e;
  12781. }
  12782. }
  12783. }
  12784. }
  12785. }
  12786. }
  12787. static void ggml_compute_forward_add_rel_pos(
  12788. const struct ggml_compute_params * params,
  12789. const struct ggml_tensor * src0,
  12790. const struct ggml_tensor * src1,
  12791. const struct ggml_tensor * src2,
  12792. struct ggml_tensor * dst) {
  12793. switch (src0->type) {
  12794. case GGML_TYPE_F32:
  12795. {
  12796. ggml_compute_forward_add_rel_pos_f32(params, src0, src1, src2, dst);
  12797. } break;
  12798. default:
  12799. {
  12800. GGML_ASSERT(false);
  12801. } break;
  12802. }
  12803. }
  12804. // ggml_compute_forward_map_unary
  12805. static void ggml_compute_forward_map_unary_f32(
  12806. const struct ggml_compute_params * params,
  12807. const struct ggml_tensor * src0,
  12808. struct ggml_tensor * dst,
  12809. const ggml_unary_op_f32_t fun) {
  12810. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  12811. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  12812. return;
  12813. }
  12814. const int n = ggml_nrows(src0);
  12815. const int nc = src0->ne[0];
  12816. assert( dst->nb[0] == sizeof(float));
  12817. assert(src0->nb[0] == sizeof(float));
  12818. for (int i = 0; i < n; i++) {
  12819. fun(nc,
  12820. (float *) ((char *) dst->data + i*( dst->nb[1])),
  12821. (float *) ((char *) src0->data + i*(src0->nb[1])));
  12822. }
  12823. }
  12824. static void ggml_compute_forward_map_unary(
  12825. const struct ggml_compute_params * params,
  12826. const struct ggml_tensor * src0,
  12827. struct ggml_tensor * dst,
  12828. const ggml_unary_op_f32_t fun) {
  12829. switch (src0->type) {
  12830. case GGML_TYPE_F32:
  12831. {
  12832. ggml_compute_forward_map_unary_f32(params, src0, dst, fun);
  12833. } break;
  12834. default:
  12835. {
  12836. GGML_ASSERT(false);
  12837. } break;
  12838. }
  12839. }
  12840. // ggml_compute_forward_map_binary
  12841. static void ggml_compute_forward_map_binary_f32(
  12842. const struct ggml_compute_params * params,
  12843. const struct ggml_tensor * src0,
  12844. const struct ggml_tensor * src1,
  12845. struct ggml_tensor * dst,
  12846. const ggml_binary_op_f32_t fun) {
  12847. assert(params->ith == 0);
  12848. assert(ggml_are_same_shape(src0, src1) && ggml_are_same_shape(src0, dst));
  12849. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  12850. return;
  12851. }
  12852. const int n = ggml_nrows(src0);
  12853. const int nc = src0->ne[0];
  12854. assert( dst->nb[0] == sizeof(float));
  12855. assert(src0->nb[0] == sizeof(float));
  12856. assert(src1->nb[0] == sizeof(float));
  12857. for (int i = 0; i < n; i++) {
  12858. fun(nc,
  12859. (float *) ((char *) dst->data + i*( dst->nb[1])),
  12860. (float *) ((char *) src0->data + i*(src0->nb[1])),
  12861. (float *) ((char *) src1->data + i*(src1->nb[1])));
  12862. }
  12863. }
  12864. static void ggml_compute_forward_map_binary(
  12865. const struct ggml_compute_params * params,
  12866. const struct ggml_tensor * src0,
  12867. const struct ggml_tensor * src1,
  12868. struct ggml_tensor * dst,
  12869. const ggml_binary_op_f32_t fun) {
  12870. switch (src0->type) {
  12871. case GGML_TYPE_F32:
  12872. {
  12873. ggml_compute_forward_map_binary_f32(params, src0, src1, dst, fun);
  12874. } break;
  12875. default:
  12876. {
  12877. GGML_ASSERT(false);
  12878. } break;
  12879. }
  12880. }
  12881. // ggml_compute_forward_map_custom1
  12882. static void ggml_compute_forward_map_custom1_f32(
  12883. const struct ggml_compute_params * params,
  12884. const struct ggml_tensor * a,
  12885. struct ggml_tensor * dst,
  12886. const ggml_custom1_op_f32_t fun) {
  12887. assert(params->ith == 0);
  12888. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  12889. return;
  12890. }
  12891. fun(dst, a);
  12892. }
  12893. // ggml_compute_forward_map_custom2
  12894. static void ggml_compute_forward_map_custom2_f32(
  12895. const struct ggml_compute_params * params,
  12896. const struct ggml_tensor * a,
  12897. const struct ggml_tensor * b,
  12898. struct ggml_tensor * dst,
  12899. const ggml_custom2_op_f32_t fun) {
  12900. assert(params->ith == 0);
  12901. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  12902. return;
  12903. }
  12904. fun(dst, a, b);
  12905. }
  12906. // ggml_compute_forward_map_custom3
  12907. static void ggml_compute_forward_map_custom3_f32(
  12908. const struct ggml_compute_params * params,
  12909. const struct ggml_tensor * a,
  12910. const struct ggml_tensor * b,
  12911. const struct ggml_tensor * c,
  12912. struct ggml_tensor * dst,
  12913. const ggml_custom3_op_f32_t fun) {
  12914. assert(params->ith == 0);
  12915. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  12916. return;
  12917. }
  12918. fun(dst, a, b, c);
  12919. }
  12920. // ggml_compute_forward_map_custom1
  12921. static void ggml_compute_forward_map_custom1(
  12922. const struct ggml_compute_params * params,
  12923. const struct ggml_tensor * a,
  12924. struct ggml_tensor * dst) {
  12925. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  12926. return;
  12927. }
  12928. struct ggml_map_custom1_op_params * p = (struct ggml_map_custom1_op_params *) dst->op_params;
  12929. p->fun(dst, a, params->ith, params->nth, p->userdata);
  12930. }
  12931. // ggml_compute_forward_map_custom2
  12932. static void ggml_compute_forward_map_custom2(
  12933. const struct ggml_compute_params * params,
  12934. const struct ggml_tensor * a,
  12935. const struct ggml_tensor * b,
  12936. struct ggml_tensor * dst) {
  12937. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  12938. return;
  12939. }
  12940. struct ggml_map_custom2_op_params * p = (struct ggml_map_custom2_op_params *) dst->op_params;
  12941. p->fun(dst, a, b, params->ith, params->nth, p->userdata);
  12942. }
  12943. // ggml_compute_forward_map_custom3
  12944. static void ggml_compute_forward_map_custom3(
  12945. const struct ggml_compute_params * params,
  12946. const struct ggml_tensor * a,
  12947. const struct ggml_tensor * b,
  12948. const struct ggml_tensor * c,
  12949. struct ggml_tensor * dst) {
  12950. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  12951. return;
  12952. }
  12953. struct ggml_map_custom3_op_params * p = (struct ggml_map_custom3_op_params *) dst->op_params;
  12954. p->fun(dst, a, b, c, params->ith, params->nth, p->userdata);
  12955. }
  12956. // ggml_compute_forward_cross_entropy_loss
  12957. static void ggml_compute_forward_cross_entropy_loss_f32(
  12958. const struct ggml_compute_params * params,
  12959. const struct ggml_tensor * src0,
  12960. const struct ggml_tensor * src1,
  12961. struct ggml_tensor * dst) {
  12962. GGML_ASSERT(ggml_is_contiguous(src0));
  12963. GGML_ASSERT(ggml_is_contiguous(src1));
  12964. GGML_ASSERT(ggml_is_scalar(dst));
  12965. GGML_ASSERT(ggml_are_same_shape(src0, src1));
  12966. const int ith = params->ith;
  12967. const int nth = params->nth;
  12968. float * sums = (float *) params->wdata;
  12969. // TODO: handle transposed/permuted matrices
  12970. const int nc = src0->ne[0];
  12971. const int nr = ggml_nrows(src0);
  12972. GGML_ASSERT(params->wsize >= sizeof(float) * (nth + nth * nc));
  12973. if (params->type == GGML_TASK_INIT) {
  12974. if (ith == 0) {
  12975. memset(sums, 0, sizeof(float) * (nth + nth * nc));
  12976. }
  12977. return;
  12978. }
  12979. if (params->type == GGML_TASK_FINALIZE) {
  12980. if (ith == 0) {
  12981. float * dp = (float *) dst->data;
  12982. ggml_vec_sum_f32(nth, dp, sums);
  12983. dp[0] *= -1.0f / (float) nr;
  12984. }
  12985. return;
  12986. }
  12987. const double eps = 1e-9;
  12988. // rows per thread
  12989. const int dr = (nr + nth - 1)/nth;
  12990. // row range for this thread
  12991. const int ir0 = dr*ith;
  12992. const int ir1 = MIN(ir0 + dr, nr);
  12993. for (int i1 = ir0; i1 < ir1; i1++) {
  12994. float * s0 = (float *)((char *) src0->data + i1*src0->nb[1]);
  12995. float * s1 = (float *)((char *) src1->data + i1*src1->nb[1]);
  12996. float * st = ((float *) params->wdata) + nth + ith*nc;
  12997. #ifndef NDEBUG
  12998. for (int i = 0; i < nc; ++i) {
  12999. //printf("p[%d] = %f\n", i, p[i]);
  13000. assert(!isnan(s0[i]));
  13001. assert(!isnan(s1[i]));
  13002. }
  13003. #endif
  13004. // soft_max
  13005. ggml_float sum = 0.0;
  13006. {
  13007. float max = -INFINITY;
  13008. ggml_vec_max_f32(nc, &max, s0);
  13009. uint16_t scvt; UNUSED(scvt);
  13010. for (int i = 0; i < nc; i++) {
  13011. if (s0[i] == -INFINITY) {
  13012. st[i] = 0.0f;
  13013. } else {
  13014. #ifndef GGML_CROSS_ENTROPY_EXP_FP16
  13015. const float s = s0[i] - max;
  13016. const float val = expf(s);
  13017. #else
  13018. ggml_fp16_t s = GGML_FP32_TO_FP16(s0[i] - max);
  13019. memcpy(&scvt, &s, sizeof(scvt));
  13020. const float val = GGML_FP16_TO_FP32(table_exp_f16[scvt]);
  13021. #endif
  13022. sum += (ggml_float)val;
  13023. st[i] = val;
  13024. }
  13025. }
  13026. assert(sum > 0.0);
  13027. // sum = 1.0/sum;
  13028. }
  13029. // avoid log(0) by rescaling from [0..1] to [eps..1]
  13030. sum = (1.0 - eps) / sum;
  13031. ggml_vec_scale_f32(nc, st, sum);
  13032. ggml_vec_add1_f32(nc, st, st, eps);
  13033. ggml_vec_log_f32(nc, st, st);
  13034. ggml_vec_mul_f32(nc, st, st, s1);
  13035. float st_sum = 0;
  13036. ggml_vec_sum_f32(nc, &st_sum, st);
  13037. sums[ith] += st_sum;
  13038. #ifndef NDEBUG
  13039. for (int i = 0; i < nc; ++i) {
  13040. assert(!isnan(st[i]));
  13041. assert(!isinf(st[i]));
  13042. }
  13043. #endif
  13044. }
  13045. }
  13046. static void ggml_compute_forward_cross_entropy_loss(
  13047. const struct ggml_compute_params * params,
  13048. const struct ggml_tensor * src0,
  13049. const struct ggml_tensor * src1,
  13050. struct ggml_tensor * dst) {
  13051. switch (src0->type) {
  13052. case GGML_TYPE_F32:
  13053. {
  13054. ggml_compute_forward_cross_entropy_loss_f32(params, src0, src1, dst);
  13055. } break;
  13056. default:
  13057. {
  13058. GGML_ASSERT(false);
  13059. } break;
  13060. }
  13061. }
  13062. // ggml_compute_forward_cross_entropy_loss_back
  13063. static void ggml_compute_forward_cross_entropy_loss_back_f32(
  13064. const struct ggml_compute_params * params,
  13065. const struct ggml_tensor * src0,
  13066. const struct ggml_tensor * src1,
  13067. const struct ggml_tensor * opt0,
  13068. struct ggml_tensor * dst) {
  13069. GGML_ASSERT(ggml_is_contiguous(dst));
  13070. GGML_ASSERT(ggml_is_contiguous(src0));
  13071. GGML_ASSERT(ggml_is_contiguous(src1));
  13072. GGML_ASSERT(ggml_is_contiguous(opt0));
  13073. GGML_ASSERT(ggml_are_same_shape(src0, src1) && ggml_are_same_shape(src0, dst));
  13074. const int64_t ith = params->ith;
  13075. const int64_t nth = params->nth;
  13076. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  13077. return;
  13078. }
  13079. const double eps = 1e-9;
  13080. // TODO: handle transposed/permuted matrices
  13081. const int64_t nc = src0->ne[0];
  13082. const int64_t nr = ggml_nrows(src0);
  13083. // rows per thread
  13084. const int64_t dr = (nr + nth - 1)/nth;
  13085. // row range for this thread
  13086. const int64_t ir0 = dr*ith;
  13087. const int64_t ir1 = MIN(ir0 + dr, nr);
  13088. float * d = (float *) opt0->data;
  13089. for (int64_t i1 = ir0; i1 < ir1; i1++) {
  13090. float * ds0 = (float *)((char *) dst->data + i1*dst->nb[1]);
  13091. float * s0 = (float *)((char *) src0->data + i1*src0->nb[1]);
  13092. float * s1 = (float *)((char *) src1->data + i1*src1->nb[1]);
  13093. #ifndef NDEBUG
  13094. for (int i = 0; i < nc; ++i) {
  13095. //printf("p[%d] = %f\n", i, p[i]);
  13096. assert(!isnan(s0[i]));
  13097. assert(!isnan(s1[i]));
  13098. }
  13099. #endif
  13100. // soft_max
  13101. ggml_float sum = 0.0;
  13102. {
  13103. float max = -INFINITY;
  13104. ggml_vec_max_f32(nc, &max, s0);
  13105. uint16_t scvt; UNUSED(scvt);
  13106. for (int i = 0; i < nc; i++) {
  13107. if (s0[i] == -INFINITY) {
  13108. ds0[i] = 0.0f;
  13109. } else {
  13110. #ifndef GGML_CROSS_ENTROPY_EXP_FP16
  13111. const float s = s0[i] - max;
  13112. const float val = expf(s);
  13113. #else
  13114. ggml_fp16_t s = GGML_FP32_TO_FP16(s0[i] - max);
  13115. memcpy(&scvt, &s, sizeof(scvt));
  13116. const float val = GGML_FP16_TO_FP32(table_exp_f16[scvt]);
  13117. #endif
  13118. sum += (ggml_float)val;
  13119. ds0[i] = val;
  13120. }
  13121. }
  13122. assert(sum > 0.0);
  13123. sum = (1.0 - eps)/sum;
  13124. }
  13125. // grad(src0) = (softmax(src0) - src1) * grad(cross_entropy_loss(src0, src1)) / nr
  13126. ggml_vec_scale_f32(nc, ds0, sum);
  13127. ggml_vec_add1_f32(nc, ds0, ds0, eps);
  13128. ggml_vec_sub_f32(nc, ds0, ds0, s1);
  13129. ggml_vec_scale_f32(nc, ds0, d[0] / (float) nr);
  13130. #ifndef NDEBUG
  13131. for (int i = 0; i < nc; ++i) {
  13132. assert(!isnan(ds0[i]));
  13133. assert(!isinf(ds0[i]));
  13134. }
  13135. #endif
  13136. }
  13137. }
  13138. static void ggml_compute_forward_cross_entropy_loss_back(
  13139. const struct ggml_compute_params * params,
  13140. const struct ggml_tensor * src0,
  13141. const struct ggml_tensor * src1,
  13142. const struct ggml_tensor * opt0,
  13143. struct ggml_tensor * dst) {
  13144. switch (src0->type) {
  13145. case GGML_TYPE_F32:
  13146. {
  13147. ggml_compute_forward_cross_entropy_loss_back_f32(params, src0, src1, opt0, dst);
  13148. } break;
  13149. default:
  13150. {
  13151. GGML_ASSERT(false);
  13152. } break;
  13153. }
  13154. }
  13155. /////////////////////////////////
  13156. static void ggml_compute_forward(struct ggml_compute_params * params, struct ggml_tensor * tensor) {
  13157. GGML_ASSERT(params);
  13158. #ifdef GGML_USE_CUBLAS
  13159. bool skip_cpu = ggml_cuda_compute_forward(params, tensor);
  13160. if (skip_cpu) {
  13161. return;
  13162. }
  13163. GGML_ASSERT(tensor->src[0] == NULL || tensor->src[0]->backend == GGML_BACKEND_CPU);
  13164. GGML_ASSERT(tensor->src[1] == NULL || tensor->src[1]->backend == GGML_BACKEND_CPU);
  13165. #endif // GGML_USE_CUBLAS
  13166. switch (tensor->op) {
  13167. case GGML_OP_DUP:
  13168. {
  13169. ggml_compute_forward_dup(params, tensor->src[0], tensor);
  13170. } break;
  13171. case GGML_OP_ADD:
  13172. {
  13173. ggml_compute_forward_add(params, tensor->src[0], tensor->src[1], tensor);
  13174. } break;
  13175. case GGML_OP_ADD1:
  13176. {
  13177. ggml_compute_forward_add1(params, tensor->src[0], tensor->src[1], tensor);
  13178. } break;
  13179. case GGML_OP_ACC:
  13180. {
  13181. ggml_compute_forward_acc(params, tensor->src[0], tensor->src[1], tensor);
  13182. } break;
  13183. case GGML_OP_SUB:
  13184. {
  13185. ggml_compute_forward_sub(params, tensor->src[0], tensor->src[1], tensor);
  13186. } break;
  13187. case GGML_OP_MUL:
  13188. {
  13189. ggml_compute_forward_mul(params, tensor->src[0], tensor->src[1], tensor);
  13190. } break;
  13191. case GGML_OP_DIV:
  13192. {
  13193. ggml_compute_forward_div(params, tensor->src[0], tensor->src[1], tensor);
  13194. } break;
  13195. case GGML_OP_SQR:
  13196. {
  13197. ggml_compute_forward_sqr(params, tensor->src[0], tensor);
  13198. } break;
  13199. case GGML_OP_SQRT:
  13200. {
  13201. ggml_compute_forward_sqrt(params, tensor->src[0], tensor);
  13202. } break;
  13203. case GGML_OP_LOG:
  13204. {
  13205. ggml_compute_forward_log(params, tensor->src[0], tensor);
  13206. } break;
  13207. case GGML_OP_SUM:
  13208. {
  13209. ggml_compute_forward_sum(params, tensor->src[0], tensor);
  13210. } break;
  13211. case GGML_OP_SUM_ROWS:
  13212. {
  13213. ggml_compute_forward_sum_rows(params, tensor->src[0], tensor);
  13214. } break;
  13215. case GGML_OP_MEAN:
  13216. {
  13217. ggml_compute_forward_mean(params, tensor->src[0], tensor);
  13218. } break;
  13219. case GGML_OP_ARGMAX:
  13220. {
  13221. ggml_compute_forward_argmax(params, tensor->src[0], tensor);
  13222. } break;
  13223. case GGML_OP_REPEAT:
  13224. {
  13225. ggml_compute_forward_repeat(params, tensor->src[0], tensor);
  13226. } break;
  13227. case GGML_OP_REPEAT_BACK:
  13228. {
  13229. ggml_compute_forward_repeat_back(params, tensor->src[0], tensor);
  13230. } break;
  13231. case GGML_OP_CONCAT:
  13232. {
  13233. ggml_compute_forward_concat(params, tensor->src[0], tensor->src[1], tensor);
  13234. } break;
  13235. case GGML_OP_SILU_BACK:
  13236. {
  13237. ggml_compute_forward_silu_back(params, tensor->src[0], tensor->src[1], tensor);
  13238. } break;
  13239. case GGML_OP_NORM:
  13240. {
  13241. ggml_compute_forward_norm(params, tensor->src[0], tensor);
  13242. } break;
  13243. case GGML_OP_RMS_NORM:
  13244. {
  13245. ggml_compute_forward_rms_norm(params, tensor->src[0], tensor);
  13246. } break;
  13247. case GGML_OP_RMS_NORM_BACK:
  13248. {
  13249. ggml_compute_forward_rms_norm_back(params, tensor->src[0], tensor->src[1], tensor);
  13250. } break;
  13251. case GGML_OP_GROUP_NORM:
  13252. {
  13253. ggml_compute_forward_group_norm(params, tensor->src[0], tensor);
  13254. } break;
  13255. case GGML_OP_MUL_MAT:
  13256. {
  13257. ggml_compute_forward_mul_mat(params, tensor->src[0], tensor->src[1], tensor);
  13258. } break;
  13259. case GGML_OP_OUT_PROD:
  13260. {
  13261. ggml_compute_forward_out_prod(params, tensor->src[0], tensor->src[1], tensor);
  13262. } break;
  13263. case GGML_OP_SCALE:
  13264. {
  13265. ggml_compute_forward_scale(params, tensor->src[0], tensor->src[1], tensor);
  13266. } break;
  13267. case GGML_OP_SET:
  13268. {
  13269. ggml_compute_forward_set(params, tensor->src[0], tensor->src[1], tensor);
  13270. } break;
  13271. case GGML_OP_CPY:
  13272. {
  13273. ggml_compute_forward_cpy(params, tensor->src[0], tensor);
  13274. } break;
  13275. case GGML_OP_CONT:
  13276. {
  13277. ggml_compute_forward_cont(params, tensor->src[0], tensor);
  13278. } break;
  13279. case GGML_OP_RESHAPE:
  13280. {
  13281. ggml_compute_forward_reshape(params, tensor->src[0], tensor);
  13282. } break;
  13283. case GGML_OP_VIEW:
  13284. {
  13285. ggml_compute_forward_view(params, tensor->src[0]);
  13286. } break;
  13287. case GGML_OP_PERMUTE:
  13288. {
  13289. ggml_compute_forward_permute(params, tensor->src[0]);
  13290. } break;
  13291. case GGML_OP_TRANSPOSE:
  13292. {
  13293. ggml_compute_forward_transpose(params, tensor->src[0]);
  13294. } break;
  13295. case GGML_OP_GET_ROWS:
  13296. {
  13297. ggml_compute_forward_get_rows(params, tensor->src[0], tensor->src[1], tensor);
  13298. } break;
  13299. case GGML_OP_GET_ROWS_BACK:
  13300. {
  13301. ggml_compute_forward_get_rows_back(params, tensor->src[0], tensor->src[1], tensor);
  13302. } break;
  13303. case GGML_OP_DIAG:
  13304. {
  13305. ggml_compute_forward_diag(params, tensor->src[0], tensor);
  13306. } break;
  13307. case GGML_OP_DIAG_MASK_INF:
  13308. {
  13309. ggml_compute_forward_diag_mask_inf(params, tensor->src[0], tensor);
  13310. } break;
  13311. case GGML_OP_DIAG_MASK_ZERO:
  13312. {
  13313. ggml_compute_forward_diag_mask_zero(params, tensor->src[0], tensor);
  13314. } break;
  13315. case GGML_OP_SOFT_MAX:
  13316. {
  13317. ggml_compute_forward_soft_max(params, tensor->src[0], tensor);
  13318. } break;
  13319. case GGML_OP_SOFT_MAX_BACK:
  13320. {
  13321. ggml_compute_forward_soft_max_back(params, tensor->src[0], tensor->src[1], tensor);
  13322. } break;
  13323. case GGML_OP_ROPE:
  13324. {
  13325. ggml_compute_forward_rope(params, tensor->src[0], tensor->src[1], tensor);
  13326. } break;
  13327. case GGML_OP_ROPE_BACK:
  13328. {
  13329. ggml_compute_forward_rope_back(params, tensor->src[0], tensor->src[1], tensor);
  13330. } break;
  13331. case GGML_OP_ALIBI:
  13332. {
  13333. ggml_compute_forward_alibi(params, tensor->src[0], tensor);
  13334. } break;
  13335. case GGML_OP_CLAMP:
  13336. {
  13337. ggml_compute_forward_clamp(params, tensor->src[0], tensor);
  13338. } break;
  13339. case GGML_OP_CONV_1D:
  13340. {
  13341. ggml_compute_forward_conv_1d(params, tensor->src[0], tensor->src[1], tensor);
  13342. } break;
  13343. case GGML_OP_CONV_2D:
  13344. {
  13345. ggml_compute_forward_conv_2d(params, tensor->src[0], tensor->src[1], tensor);
  13346. } break;
  13347. case GGML_OP_CONV_TRANSPOSE_2D:
  13348. {
  13349. ggml_compute_forward_conv_transpose_2d(params, tensor->src[0], tensor->src[1], tensor);
  13350. } break;
  13351. case GGML_OP_POOL_1D:
  13352. {
  13353. ggml_compute_forward_pool_1d(params, tensor->src[0], tensor);
  13354. } break;
  13355. case GGML_OP_POOL_2D:
  13356. {
  13357. ggml_compute_forward_pool_2d(params, tensor->src[0], tensor);
  13358. } break;
  13359. case GGML_OP_UPSCALE:
  13360. {
  13361. ggml_compute_forward_upscale(params, tensor->src[0], tensor);
  13362. } break;
  13363. case GGML_OP_FLASH_ATTN:
  13364. {
  13365. const int32_t t = ggml_get_op_params_i32(tensor, 0);
  13366. GGML_ASSERT(t == 0 || t == 1);
  13367. const bool masked = t != 0;
  13368. ggml_compute_forward_flash_attn(params, tensor->src[0], tensor->src[1], tensor->src[2], masked, tensor);
  13369. } break;
  13370. case GGML_OP_FLASH_FF:
  13371. {
  13372. ggml_compute_forward_flash_ff(params, tensor->src[0], tensor->src[1], tensor->src[2], tensor->src[3], tensor->src[4], tensor);
  13373. } break;
  13374. case GGML_OP_FLASH_ATTN_BACK:
  13375. {
  13376. int32_t t = ggml_get_op_params_i32(tensor, 0);
  13377. GGML_ASSERT(t == 0 || t == 1);
  13378. bool masked = t != 0;
  13379. ggml_compute_forward_flash_attn_back(params, tensor->src[0], tensor->src[1], tensor->src[2], tensor->src[3], masked, tensor);
  13380. } break;
  13381. case GGML_OP_WIN_PART:
  13382. {
  13383. ggml_compute_forward_win_part(params, tensor->src[0], tensor);
  13384. } break;
  13385. case GGML_OP_WIN_UNPART:
  13386. {
  13387. ggml_compute_forward_win_unpart(params, tensor->src[0], tensor);
  13388. } break;
  13389. case GGML_OP_UNARY:
  13390. {
  13391. ggml_compute_forward_unary(params, tensor->src[0], tensor);
  13392. } break;
  13393. case GGML_OP_GET_REL_POS:
  13394. {
  13395. ggml_compute_forward_get_rel_pos(params, tensor->src[0], tensor);
  13396. } break;
  13397. case GGML_OP_ADD_REL_POS:
  13398. {
  13399. ggml_compute_forward_add_rel_pos(params, tensor->src[0], tensor->src[1], tensor->src[2], tensor);
  13400. } break;
  13401. case GGML_OP_MAP_UNARY:
  13402. {
  13403. ggml_unary_op_f32_t fun;
  13404. memcpy(&fun, tensor->op_params, sizeof(fun));
  13405. ggml_compute_forward_map_unary(params, tensor->src[0], tensor, fun);
  13406. }
  13407. break;
  13408. case GGML_OP_MAP_BINARY:
  13409. {
  13410. ggml_binary_op_f32_t fun;
  13411. memcpy(&fun, tensor->op_params, sizeof(fun));
  13412. ggml_compute_forward_map_binary(params, tensor->src[0], tensor->src[1], tensor, fun);
  13413. }
  13414. break;
  13415. case GGML_OP_MAP_CUSTOM1_F32:
  13416. {
  13417. ggml_custom1_op_f32_t fun;
  13418. memcpy(&fun, tensor->op_params, sizeof(fun));
  13419. ggml_compute_forward_map_custom1_f32(params, tensor->src[0], tensor, fun);
  13420. }
  13421. break;
  13422. case GGML_OP_MAP_CUSTOM2_F32:
  13423. {
  13424. ggml_custom2_op_f32_t fun;
  13425. memcpy(&fun, tensor->op_params, sizeof(fun));
  13426. ggml_compute_forward_map_custom2_f32(params, tensor->src[0], tensor->src[1], tensor, fun);
  13427. }
  13428. break;
  13429. case GGML_OP_MAP_CUSTOM3_F32:
  13430. {
  13431. ggml_custom3_op_f32_t fun;
  13432. memcpy(&fun, tensor->op_params, sizeof(fun));
  13433. ggml_compute_forward_map_custom3_f32(params, tensor->src[0], tensor->src[1], tensor->src[2], tensor, fun);
  13434. }
  13435. break;
  13436. case GGML_OP_MAP_CUSTOM1:
  13437. {
  13438. ggml_compute_forward_map_custom1(params, tensor->src[0], tensor);
  13439. }
  13440. break;
  13441. case GGML_OP_MAP_CUSTOM2:
  13442. {
  13443. ggml_compute_forward_map_custom2(params, tensor->src[0], tensor->src[1], tensor);
  13444. }
  13445. break;
  13446. case GGML_OP_MAP_CUSTOM3:
  13447. {
  13448. ggml_compute_forward_map_custom3(params, tensor->src[0], tensor->src[1], tensor->src[2], tensor);
  13449. }
  13450. break;
  13451. case GGML_OP_CROSS_ENTROPY_LOSS:
  13452. {
  13453. ggml_compute_forward_cross_entropy_loss(params, tensor->src[0], tensor->src[1], tensor);
  13454. }
  13455. break;
  13456. case GGML_OP_CROSS_ENTROPY_LOSS_BACK:
  13457. {
  13458. ggml_compute_forward_cross_entropy_loss_back(params, tensor->src[0], tensor->src[1], tensor->src[2], tensor);
  13459. }
  13460. break;
  13461. case GGML_OP_NONE:
  13462. {
  13463. // nop
  13464. } break;
  13465. case GGML_OP_COUNT:
  13466. {
  13467. GGML_ASSERT(false);
  13468. } break;
  13469. }
  13470. }
  13471. ////////////////////////////////////////////////////////////////////////////////
  13472. static_assert(GGML_GRAPH_HASHTABLE_SIZE > GGML_MAX_NODES * 2, "GGML_GRAPH_HT_SIZE is too small");
  13473. static size_t hash(void * p) {
  13474. return (size_t)p % GGML_GRAPH_HASHTABLE_SIZE;
  13475. }
  13476. static size_t hash_find(void * hash_table[], void * p) {
  13477. size_t h = hash(p);
  13478. // linear probing
  13479. size_t i = h;
  13480. while (hash_table[i] != NULL && hash_table[i] != p) {
  13481. i = (i + 1) % GGML_GRAPH_HASHTABLE_SIZE;
  13482. if (i == h) {
  13483. // visited all hash table entries -> not found
  13484. return GGML_GRAPH_HASHTABLE_SIZE;
  13485. }
  13486. }
  13487. return i;
  13488. }
  13489. static bool hash_insert(void * hash_table[], void * p) {
  13490. size_t i = hash_find(hash_table, p);
  13491. GGML_ASSERT(i < GGML_GRAPH_HASHTABLE_SIZE); // assert that not full
  13492. if (hash_table[i] == p) {
  13493. return true;
  13494. }
  13495. // insert
  13496. GGML_ASSERT(hash_table[i] == NULL);
  13497. hash_table[i] = p;
  13498. return false;
  13499. }
  13500. static bool hash_contains(void * hash_table[], void * p) {
  13501. size_t i = hash_find(hash_table, p);
  13502. return (i < GGML_GRAPH_HASHTABLE_SIZE) && (hash_table[i] == p);
  13503. }
  13504. struct hash_map {
  13505. void * keys[GGML_GRAPH_HASHTABLE_SIZE];
  13506. void * vals[GGML_GRAPH_HASHTABLE_SIZE];
  13507. };
  13508. static struct hash_map * new_hash_map(void) {
  13509. struct hash_map * result = malloc(sizeof(struct hash_map));
  13510. for (int i=0; i<GGML_GRAPH_HASHTABLE_SIZE; ++i) {
  13511. result->keys[i] = NULL;
  13512. result->vals[i] = NULL;
  13513. }
  13514. return result;
  13515. }
  13516. static void free_hash_map(struct hash_map * map) {
  13517. free(map);
  13518. }
  13519. // gradient checkpointing
  13520. static struct ggml_tensor * ggml_recompute_graph_node(
  13521. struct ggml_context * ctx,
  13522. struct ggml_cgraph * graph,
  13523. struct hash_map * replacements,
  13524. struct ggml_tensor * node) {
  13525. if (node == NULL) {
  13526. return NULL;
  13527. }
  13528. if (node->is_param) {
  13529. return node;
  13530. }
  13531. if (!hash_contains(graph->visited_hash_table, node)) {
  13532. return node;
  13533. }
  13534. int count_children = 0;
  13535. for (int k = 0; k < GGML_MAX_SRC; ++k) {
  13536. if (node->src[k]) {
  13537. ++count_children;
  13538. }
  13539. }
  13540. if (count_children == 0) {
  13541. return node;
  13542. }
  13543. size_t i = hash_find(replacements->keys, node);
  13544. GGML_ASSERT(i < GGML_GRAPH_HASHTABLE_SIZE); // assert that not full
  13545. if (replacements->keys[i] == node) {
  13546. return (struct ggml_tensor *) replacements->vals[i];
  13547. }
  13548. struct ggml_tensor * clone = ggml_new_tensor(ctx, node->type, node->n_dims, node->ne);
  13549. // insert clone into replacements
  13550. GGML_ASSERT(replacements->keys[i] == NULL); // assert that we don't overwrite
  13551. replacements->keys[i] = node;
  13552. replacements->vals[i] = clone;
  13553. clone->op = node->op;
  13554. clone->grad = node->grad;
  13555. clone->is_param = node->is_param;
  13556. clone->extra = node->extra;
  13557. for (int k = 0; k < GGML_MAX_DIMS; ++k) {
  13558. clone->nb[k] = node->nb[k];
  13559. }
  13560. for (int k = 0; k < GGML_MAX_SRC; ++k) {
  13561. clone->src[k] = ggml_recompute_graph_node(ctx, graph, replacements, node->src[k]);
  13562. }
  13563. if (node->view_src != NULL) {
  13564. clone->data = (node->view_src->data == NULL)
  13565. ? NULL // view_src not yet allocated
  13566. : (char *) node->view_src->data // view_src already allocated
  13567. + node->view_offs;
  13568. clone->view_src = node->view_src;
  13569. clone->view_offs = node->view_offs;
  13570. }
  13571. GGML_ASSERT(sizeof(node->op_params) == sizeof(int32_t) * (GGML_MAX_OP_PARAMS / sizeof(int32_t)));
  13572. GGML_ASSERT(sizeof(node->name) == GGML_MAX_NAME);
  13573. memcpy(clone->op_params, node->op_params, sizeof(node->op_params));
  13574. ggml_format_name(clone, "%s (clone)", ggml_get_name(node));
  13575. return clone;
  13576. }
  13577. void ggml_build_backward_gradient_checkpointing(
  13578. struct ggml_context * ctx,
  13579. struct ggml_cgraph * gf,
  13580. struct ggml_cgraph * gb,
  13581. struct ggml_cgraph * gb_tmp,
  13582. struct ggml_tensor * * checkpoints,
  13583. int n_checkpoints) {
  13584. *gb_tmp = *gf;
  13585. ggml_build_backward_expand(ctx, gf, gb_tmp, true);
  13586. if (n_checkpoints <= 0) {
  13587. *gb = *gb_tmp;
  13588. return;
  13589. }
  13590. struct hash_map * replacements = new_hash_map();
  13591. // insert checkpoints in replacements
  13592. for (int i = 0; i < n_checkpoints; ++i) {
  13593. size_t k = hash_find(replacements->keys, checkpoints[i]);
  13594. GGML_ASSERT(k < GGML_GRAPH_HASHTABLE_SIZE); // assert that not full
  13595. GGML_ASSERT(replacements->keys[k] == NULL); // assert that we don't overwrite
  13596. replacements->keys[k] = checkpoints[i];
  13597. replacements->vals[k] = checkpoints[i];
  13598. }
  13599. *gb = *gf;
  13600. // rewrite gb_tmp->nodes[gf->n_nodes:gb_tmp->n_nodes],
  13601. // replacing references to gb_tmp->nodes[0:gf->n_nodes] ( == gf->nodes[0:gf->n_nodes]),
  13602. // by recomputing them from checkpoints
  13603. for (int i = gf->n_nodes; i<gb_tmp->n_nodes; ++i) {
  13604. struct ggml_tensor * node = gb_tmp->nodes[i];
  13605. for (int k = 0; k < GGML_MAX_SRC; ++k) {
  13606. // insert new tensors recomputing src, reusing already made replacements,
  13607. // remember replacements: remember new tensors with mapping from corresponding gf nodes
  13608. // recurse for input tensors,
  13609. // unless (i.e. terminating when) input tensors are replacments (like checkpoints)
  13610. node->src[k] = ggml_recompute_graph_node(ctx, gf, replacements, node->src[k]);
  13611. }
  13612. // insert rewritten backward node with replacements made into resulting backward graph gb
  13613. ggml_build_forward_expand(gb, node);
  13614. }
  13615. free_hash_map(replacements);
  13616. }
  13617. // functions to change gradients considering the case that input a might be initial gradient with zero value
  13618. static struct ggml_tensor * ggml_add_or_set(struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b, void * zero_table[]) {
  13619. if (hash_contains(zero_table, a)) {
  13620. return b;
  13621. } else {
  13622. return ggml_add_impl(ctx, a, b, false);
  13623. }
  13624. }
  13625. static struct ggml_tensor * ggml_acc_or_set(struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b, size_t nb1, size_t nb2, size_t nb3, size_t offset, void * zero_table[]) {
  13626. if (hash_contains(zero_table, a)) {
  13627. struct ggml_tensor * a_zero = ggml_scale(ctx, a, ggml_new_f32(ctx, 0));
  13628. return ggml_acc_impl(ctx, a_zero, b, nb1, nb2, nb3, offset, false);
  13629. } else {
  13630. return ggml_acc_impl(ctx, a, b, nb1, nb2, nb3, offset, false);
  13631. }
  13632. }
  13633. static struct ggml_tensor * ggml_add1_or_set(struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b, void * zero_table[]) {
  13634. if (hash_contains(zero_table, a)) {
  13635. return ggml_repeat(ctx, b, a);
  13636. } else {
  13637. return ggml_add1_impl(ctx, a, b, false);
  13638. }
  13639. }
  13640. static struct ggml_tensor * ggml_sub_or_set(struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b, void * zero_table[]) {
  13641. if (hash_contains(zero_table, a)) {
  13642. return ggml_neg(ctx, b);
  13643. } else {
  13644. return ggml_sub_impl(ctx, a, b, false);
  13645. }
  13646. }
  13647. static void ggml_compute_backward(struct ggml_context * ctx, struct ggml_tensor * tensor, void * zero_table[]) {
  13648. struct ggml_tensor * src0 = tensor->src[0];
  13649. struct ggml_tensor * src1 = tensor->src[1];
  13650. switch (tensor->op) {
  13651. case GGML_OP_DUP:
  13652. {
  13653. if (src0->grad) {
  13654. src0->grad = ggml_add_or_set(ctx, src0->grad, tensor->grad, zero_table);
  13655. }
  13656. } break;
  13657. case GGML_OP_ADD:
  13658. {
  13659. if (src0->grad) {
  13660. src0->grad = ggml_add_or_set(ctx, src0->grad, tensor->grad, zero_table);
  13661. }
  13662. if (src1->grad) {
  13663. src1->grad = ggml_add_or_set(ctx, src1->grad, tensor->grad, zero_table);
  13664. }
  13665. } break;
  13666. case GGML_OP_ADD1:
  13667. {
  13668. if (src0->grad) {
  13669. src0->grad = ggml_add_or_set(ctx, src0->grad, tensor->grad, zero_table);
  13670. }
  13671. if (src1->grad) {
  13672. src1->grad = ggml_add_or_set(ctx,
  13673. src1->grad,
  13674. ggml_mean(ctx, tensor->grad), // TODO: should probably be sum instead of mean
  13675. zero_table);
  13676. }
  13677. } break;
  13678. case GGML_OP_ACC:
  13679. {
  13680. if (src0->grad) {
  13681. src0->grad = ggml_add_or_set(ctx, src0->grad, tensor->grad, zero_table);
  13682. }
  13683. if (src1->grad) {
  13684. const size_t nb1 = ((int32_t *) tensor->op_params)[0];
  13685. const size_t nb2 = ((int32_t *) tensor->op_params)[1];
  13686. const size_t nb3 = ((int32_t *) tensor->op_params)[2];
  13687. const size_t offset = ((int32_t *) tensor->op_params)[3];
  13688. struct ggml_tensor * tensor_grad_view = ggml_view_4d(ctx,
  13689. tensor->grad,
  13690. src1->grad->ne[0],
  13691. src1->grad->ne[1],
  13692. src1->grad->ne[2],
  13693. src1->grad->ne[3],
  13694. nb1, nb2, nb3, offset);
  13695. src1->grad =
  13696. ggml_add_or_set(ctx,
  13697. src1->grad,
  13698. ggml_reshape(ctx,
  13699. ggml_cont(ctx, tensor_grad_view),
  13700. src1->grad),
  13701. zero_table);
  13702. }
  13703. } break;
  13704. case GGML_OP_SUB:
  13705. {
  13706. if (src0->grad) {
  13707. src0->grad = ggml_add_or_set(ctx, src0->grad, tensor->grad, zero_table);
  13708. }
  13709. if (src1->grad) {
  13710. src1->grad = ggml_sub_or_set(ctx, src1->grad, tensor->grad, zero_table);
  13711. }
  13712. } break;
  13713. case GGML_OP_MUL:
  13714. {
  13715. if (src0->grad) {
  13716. src0->grad =
  13717. ggml_add_or_set(ctx,
  13718. src0->grad,
  13719. ggml_mul(ctx, src1, tensor->grad),
  13720. zero_table);
  13721. }
  13722. if (src1->grad) {
  13723. src1->grad =
  13724. ggml_add_or_set(ctx,
  13725. src1->grad,
  13726. ggml_mul(ctx, src0, tensor->grad),
  13727. zero_table);
  13728. }
  13729. } break;
  13730. case GGML_OP_DIV:
  13731. {
  13732. if (src0->grad) {
  13733. src0->grad =
  13734. ggml_add_or_set(ctx,
  13735. src0->grad,
  13736. ggml_div(ctx, tensor->grad, src1),
  13737. zero_table);
  13738. }
  13739. if (src1->grad) {
  13740. src1->grad =
  13741. ggml_sub_or_set(ctx,
  13742. src1->grad,
  13743. ggml_mul(ctx,
  13744. tensor->grad,
  13745. ggml_div(ctx, tensor, src1)),
  13746. zero_table);
  13747. }
  13748. } break;
  13749. case GGML_OP_SQR:
  13750. {
  13751. if (src0->grad) {
  13752. src0->grad =
  13753. ggml_add_or_set(ctx,
  13754. src0->grad,
  13755. ggml_scale(ctx,
  13756. ggml_mul(ctx, src0, tensor->grad),
  13757. ggml_new_f32(ctx, 2.0f)),
  13758. zero_table);
  13759. }
  13760. } break;
  13761. case GGML_OP_SQRT:
  13762. {
  13763. if (src0->grad) {
  13764. src0->grad =
  13765. ggml_add_or_set(ctx,
  13766. src0->grad,
  13767. ggml_scale(ctx,
  13768. ggml_div(ctx,
  13769. tensor->grad,
  13770. tensor),
  13771. ggml_new_f32(ctx, 0.5f)),
  13772. zero_table);
  13773. }
  13774. } break;
  13775. case GGML_OP_LOG:
  13776. {
  13777. if (src0->grad) {
  13778. src0->grad =
  13779. ggml_add_or_set(ctx,
  13780. src0->grad,
  13781. ggml_div(ctx,
  13782. tensor->grad,
  13783. src0),
  13784. zero_table);
  13785. }
  13786. } break;
  13787. case GGML_OP_SUM:
  13788. {
  13789. if (src0->grad) {
  13790. src0->grad =
  13791. ggml_add1_or_set(ctx,
  13792. src0->grad,
  13793. tensor->grad,
  13794. zero_table);
  13795. }
  13796. } break;
  13797. case GGML_OP_SUM_ROWS:
  13798. {
  13799. if (src0->grad) {
  13800. src0->grad =
  13801. ggml_add_or_set(ctx,
  13802. src0->grad,
  13803. ggml_repeat(ctx,
  13804. tensor->grad,
  13805. src0->grad),
  13806. zero_table);
  13807. }
  13808. } break;
  13809. case GGML_OP_MEAN:
  13810. case GGML_OP_ARGMAX:
  13811. {
  13812. GGML_ASSERT(false); // TODO: implement
  13813. } break;
  13814. case GGML_OP_REPEAT:
  13815. {
  13816. // necessary for llama
  13817. if (src0->grad) {
  13818. src0->grad = ggml_add_or_set(ctx,
  13819. src0->grad,
  13820. ggml_repeat_back(ctx, tensor->grad, src0->grad),
  13821. zero_table);
  13822. }
  13823. } break;
  13824. case GGML_OP_REPEAT_BACK:
  13825. {
  13826. if (src0->grad) {
  13827. // TODO: test this
  13828. src0->grad = ggml_add_or_set(ctx,
  13829. src0->grad,
  13830. ggml_repeat(ctx, tensor->grad, src0->grad),
  13831. zero_table);
  13832. }
  13833. } break;
  13834. case GGML_OP_CONCAT:
  13835. {
  13836. GGML_ASSERT(false); // TODO: implement
  13837. } break;
  13838. case GGML_OP_SILU_BACK:
  13839. {
  13840. GGML_ASSERT(false); // TODO: not implemented
  13841. } break;
  13842. case GGML_OP_NORM:
  13843. {
  13844. GGML_ASSERT(false); // TODO: not implemented
  13845. } break;
  13846. case GGML_OP_RMS_NORM:
  13847. {
  13848. // necessary for llama
  13849. if (src0->grad) {
  13850. float eps;
  13851. memcpy(&eps, tensor->op_params, sizeof(float));
  13852. src0->grad = ggml_add_or_set(ctx,
  13853. src0->grad,
  13854. ggml_rms_norm_back(ctx, src0, tensor->grad, eps),
  13855. zero_table);
  13856. }
  13857. } break;
  13858. case GGML_OP_RMS_NORM_BACK:
  13859. {
  13860. GGML_ASSERT(false); // TODO: not implemented
  13861. } break;
  13862. case GGML_OP_GROUP_NORM:
  13863. {
  13864. GGML_ASSERT(false); // TODO: not implemented
  13865. } break;
  13866. case GGML_OP_MUL_MAT:
  13867. {
  13868. // https://cs231n.github.io/optimization-2/#staged
  13869. // # forward pass
  13870. // s0 = np.random.randn(5, 10)
  13871. // s1 = np.random.randn(10, 3)
  13872. // t = s0.dot(s1)
  13873. // # now suppose we had the gradient on t from above in the circuit
  13874. // dt = np.random.randn(*t.shape) # same shape as t
  13875. // ds0 = dt.dot(s1.T) #.T gives the transpose of the matrix
  13876. // ds1 = t.T.dot(dt)
  13877. // tensor.shape [m,p,qq,rr]
  13878. // src0.shape [n,m,q1,r1]
  13879. // src1.shape [n,p,qq,rr]
  13880. // necessary for llama
  13881. if (src0->grad) {
  13882. struct ggml_tensor * s1_tg =
  13883. ggml_out_prod(ctx, // [n,m,qq,rr]
  13884. src1, // [n,p,qq,rr]
  13885. tensor->grad); // [m,p,qq,rr]
  13886. const int64_t qq = s1_tg->ne[2];
  13887. const int64_t rr = s1_tg->ne[3];
  13888. const int64_t q1 = src0->ne[2];
  13889. const int64_t r1 = src0->ne[3];
  13890. const bool ne2_broadcasted = qq > q1;
  13891. const bool ne3_broadcasted = rr > r1;
  13892. if (ne2_broadcasted || ne3_broadcasted) {
  13893. // sum broadcast repetitions of s1_tg into shape of src0
  13894. s1_tg = ggml_repeat_back(ctx, s1_tg, src0);
  13895. }
  13896. src0->grad =
  13897. ggml_add_or_set(ctx,
  13898. src0->grad, // [n,m,q1,r1]
  13899. s1_tg, // [n,m,q1,r1]
  13900. zero_table);
  13901. }
  13902. if (src1->grad) {
  13903. src1->grad =
  13904. ggml_add_or_set(ctx,
  13905. src1->grad, // [n,p,qq,rr]
  13906. // ggml_mul_mat(ctx, // [n,p,qq,rr]
  13907. // ggml_cont(ctx, // [m,n,q1,r1]
  13908. // ggml_transpose(ctx, src0)), // [m,n,q1,r1]
  13909. // tensor->grad), // [m,p,qq,rr]
  13910. // // when src0 is bigger than tensor->grad (this is mostly the case in llama),
  13911. // // avoid transpose of src0, rather transpose smaller tensor->grad
  13912. // // and then use ggml_out_prod
  13913. ggml_out_prod(ctx, // [n,p,qq,rr]
  13914. src0, // [n,m,q1,r1]
  13915. ggml_transpose(ctx, // [p,m,qq,rr]
  13916. tensor->grad)), // [m,p,qq,rr]
  13917. zero_table);
  13918. }
  13919. } break;
  13920. case GGML_OP_OUT_PROD:
  13921. {
  13922. GGML_ASSERT(false); // TODO: not implemented
  13923. } break;
  13924. case GGML_OP_SCALE:
  13925. {
  13926. // necessary for llama
  13927. if (src0->grad) {
  13928. src0->grad =
  13929. ggml_add_or_set(ctx,
  13930. src0->grad,
  13931. ggml_scale_impl(ctx, tensor->grad, src1, false),
  13932. zero_table);
  13933. }
  13934. if (src1->grad) {
  13935. src1->grad =
  13936. ggml_add_or_set(ctx,
  13937. src1->grad,
  13938. ggml_sum(ctx, ggml_mul_impl(ctx, tensor->grad, src0, false)),
  13939. zero_table);
  13940. }
  13941. } break;
  13942. case GGML_OP_SET:
  13943. {
  13944. const size_t nb1 = ((int32_t *) tensor->op_params)[0];
  13945. const size_t nb2 = ((int32_t *) tensor->op_params)[1];
  13946. const size_t nb3 = ((int32_t *) tensor->op_params)[2];
  13947. const size_t offset = ((int32_t *) tensor->op_params)[3];
  13948. struct ggml_tensor * tensor_grad_view = NULL;
  13949. if (src0->grad || src1->grad) {
  13950. GGML_ASSERT(src0->type == tensor->type);
  13951. GGML_ASSERT(tensor->grad->type == tensor->type);
  13952. GGML_ASSERT(tensor->grad->type == src1->grad->type);
  13953. tensor_grad_view = ggml_view_4d(ctx,
  13954. tensor->grad,
  13955. src1->grad->ne[0],
  13956. src1->grad->ne[1],
  13957. src1->grad->ne[2],
  13958. src1->grad->ne[3],
  13959. nb1, nb2, nb3, offset);
  13960. }
  13961. if (src0->grad) {
  13962. src0->grad = ggml_add_or_set(ctx,
  13963. src0->grad,
  13964. ggml_acc_impl(ctx,
  13965. tensor->grad,
  13966. ggml_neg(ctx, tensor_grad_view),
  13967. nb1, nb2, nb3, offset, false),
  13968. zero_table);
  13969. }
  13970. if (src1->grad) {
  13971. src1->grad =
  13972. ggml_add_or_set(ctx,
  13973. src1->grad,
  13974. ggml_reshape(ctx,
  13975. ggml_cont(ctx, tensor_grad_view),
  13976. src1->grad),
  13977. zero_table);
  13978. }
  13979. } break;
  13980. case GGML_OP_CPY:
  13981. {
  13982. // necessary for llama
  13983. // cpy overwrites value of src1 by src0 and returns view(src1)
  13984. // the overwriting is mathematically equivalent to:
  13985. // tensor = src0 * 1 + src1 * 0
  13986. if (src0->grad) {
  13987. // dsrc0 = dtensor * 1
  13988. src0->grad = ggml_add_or_set(ctx, src0->grad, tensor->grad, zero_table);
  13989. }
  13990. if (src1->grad) {
  13991. // dsrc1 = dtensor * 0 -> noop
  13992. }
  13993. } break;
  13994. case GGML_OP_CONT:
  13995. {
  13996. // same as cpy
  13997. if (src0->grad) {
  13998. GGML_ASSERT(ggml_is_contiguous(src0->grad));
  13999. GGML_ASSERT(ggml_is_contiguous(tensor->grad));
  14000. src0->grad = ggml_add_or_set(ctx, src0->grad, tensor->grad, zero_table);
  14001. }
  14002. } break;
  14003. case GGML_OP_RESHAPE:
  14004. {
  14005. // necessary for llama
  14006. if (src0->grad) {
  14007. src0->grad =
  14008. ggml_add_or_set(ctx, src0->grad,
  14009. ggml_reshape(ctx,
  14010. ggml_is_contiguous(tensor->grad)
  14011. ? tensor->grad
  14012. : ggml_cont(ctx, tensor->grad),
  14013. src0->grad),
  14014. zero_table);
  14015. }
  14016. } break;
  14017. case GGML_OP_VIEW:
  14018. {
  14019. // necessary for llama
  14020. if (src0->grad) {
  14021. size_t offset;
  14022. memcpy(&offset, tensor->op_params, sizeof(offset));
  14023. size_t nb1 = tensor->nb[1];
  14024. size_t nb2 = tensor->nb[2];
  14025. size_t nb3 = tensor->nb[3];
  14026. if (src0->type != src0->grad->type) {
  14027. // gradient is typically F32, but src0 could be other type
  14028. size_t ng = ggml_element_size(src0->grad);
  14029. size_t n0 = ggml_element_size(src0);
  14030. GGML_ASSERT(offset % n0 == 0);
  14031. GGML_ASSERT(nb1 % n0 == 0);
  14032. GGML_ASSERT(nb2 % n0 == 0);
  14033. GGML_ASSERT(nb3 % n0 == 0);
  14034. offset = (offset / n0) * ng;
  14035. nb1 = (nb1 / n0) * ng;
  14036. nb2 = (nb2 / n0) * ng;
  14037. nb3 = (nb3 / n0) * ng;
  14038. }
  14039. src0->grad = ggml_acc_or_set(ctx, src0->grad, tensor->grad, nb1, nb2, nb3, offset, zero_table);
  14040. }
  14041. } break;
  14042. case GGML_OP_PERMUTE:
  14043. {
  14044. // necessary for llama
  14045. if (src0->grad) {
  14046. int32_t * axes = (int32_t *) tensor->op_params;
  14047. int axis0 = axes[0] & 0x3;
  14048. int axis1 = axes[1] & 0x3;
  14049. int axis2 = axes[2] & 0x3;
  14050. int axis3 = axes[3] & 0x3;
  14051. int axes_backward[4] = {0,0,0,0};
  14052. axes_backward[axis0] = 0;
  14053. axes_backward[axis1] = 1;
  14054. axes_backward[axis2] = 2;
  14055. axes_backward[axis3] = 3;
  14056. src0->grad =
  14057. ggml_add_or_set(ctx, src0->grad,
  14058. ggml_permute(ctx,
  14059. tensor->grad,
  14060. axes_backward[0],
  14061. axes_backward[1],
  14062. axes_backward[2],
  14063. axes_backward[3]),
  14064. zero_table);
  14065. }
  14066. } break;
  14067. case GGML_OP_TRANSPOSE:
  14068. {
  14069. // necessary for llama
  14070. if (src0->grad) {
  14071. src0->grad =
  14072. ggml_add_or_set(ctx, src0->grad,
  14073. ggml_transpose(ctx, tensor->grad),
  14074. zero_table);
  14075. }
  14076. } break;
  14077. case GGML_OP_GET_ROWS:
  14078. {
  14079. // necessary for llama (only for tokenizer)
  14080. if (src0->grad) {
  14081. src0->grad =
  14082. ggml_add_or_set(ctx, src0->grad,
  14083. // last ggml_get_rows_back argument src0->grad is only
  14084. // necessary to setup correct output shape
  14085. ggml_get_rows_back(ctx, tensor->grad, src1, src0->grad),
  14086. zero_table);
  14087. }
  14088. if (src1->grad) {
  14089. // noop
  14090. }
  14091. } break;
  14092. case GGML_OP_GET_ROWS_BACK:
  14093. {
  14094. GGML_ASSERT(false); // TODO: not implemented
  14095. } break;
  14096. case GGML_OP_DIAG:
  14097. {
  14098. GGML_ASSERT(false); // TODO: not implemented
  14099. } break;
  14100. case GGML_OP_DIAG_MASK_INF:
  14101. {
  14102. // necessary for llama
  14103. if (src0->grad) {
  14104. const int n_past = ((int32_t *) tensor->op_params)[0];
  14105. src0->grad =
  14106. ggml_add_or_set(ctx, src0->grad,
  14107. ggml_diag_mask_zero_impl(ctx, tensor->grad, n_past, false),
  14108. zero_table);
  14109. }
  14110. } break;
  14111. case GGML_OP_DIAG_MASK_ZERO:
  14112. {
  14113. // necessary for llama
  14114. if (src0->grad) {
  14115. const int n_past = ((int32_t *) tensor->op_params)[0];
  14116. src0->grad =
  14117. ggml_add_or_set(ctx, src0->grad,
  14118. ggml_diag_mask_zero_impl(ctx, tensor->grad, n_past, false),
  14119. zero_table);
  14120. }
  14121. } break;
  14122. case GGML_OP_SOFT_MAX:
  14123. {
  14124. // necessary for llama
  14125. if (src0->grad) {
  14126. src0->grad =
  14127. ggml_add_or_set(ctx, src0->grad,
  14128. ggml_soft_max_back(ctx, tensor->grad, tensor),
  14129. zero_table);
  14130. }
  14131. } break;
  14132. case GGML_OP_SOFT_MAX_BACK:
  14133. {
  14134. GGML_ASSERT(false); // TODO: not implemented
  14135. } break;
  14136. case GGML_OP_ROPE:
  14137. {
  14138. // necessary for llama
  14139. if (src0->grad) {
  14140. //const int n_past = ((int32_t *) tensor->op_params)[0];
  14141. const int n_dims = ((int32_t *) tensor->op_params)[1];
  14142. const int mode = ((int32_t *) tensor->op_params)[2];
  14143. const int n_ctx = ((int32_t *) tensor->op_params)[3];
  14144. float freq_base;
  14145. float freq_scale;
  14146. float xpos_base;
  14147. bool xpos_down;
  14148. memcpy(&freq_base, (int32_t *) tensor->op_params + 4, sizeof(float));
  14149. memcpy(&freq_scale, (int32_t *) tensor->op_params + 5, sizeof(float));
  14150. memcpy(&xpos_base, (int32_t *) tensor->op_params + 6, sizeof(float));
  14151. memcpy(&xpos_down, (int32_t *) tensor->op_params + 7, sizeof(bool));
  14152. src0->grad = ggml_add_or_set(ctx,
  14153. src0->grad,
  14154. ggml_rope_back(ctx,
  14155. tensor->grad,
  14156. src1,
  14157. n_dims,
  14158. mode,
  14159. n_ctx,
  14160. freq_base,
  14161. freq_scale,
  14162. xpos_base,
  14163. xpos_down),
  14164. zero_table);
  14165. }
  14166. } break;
  14167. case GGML_OP_ROPE_BACK:
  14168. {
  14169. if (src0->grad) {
  14170. //const int n_past = ((int32_t *) tensor->op_params)[0];
  14171. const int n_dims = ((int32_t *) tensor->op_params)[1];
  14172. const int mode = ((int32_t *) tensor->op_params)[2];
  14173. const int n_ctx = ((int32_t *) tensor->op_params)[3];
  14174. float freq_base;
  14175. float freq_scale;
  14176. float xpos_base;
  14177. bool xpos_down;
  14178. memcpy(&freq_base, (int32_t *) tensor->op_params + 4, sizeof(float));
  14179. memcpy(&freq_scale, (int32_t *) tensor->op_params + 5, sizeof(float));
  14180. memcpy(&xpos_base, (int32_t *) tensor->op_params + 6, sizeof(float));
  14181. memcpy(&xpos_down, (int32_t *) tensor->op_params + 7, sizeof(bool));
  14182. src0->grad = ggml_add_or_set(ctx,
  14183. src0->grad,
  14184. ggml_rope_impl(ctx,
  14185. tensor->grad,
  14186. src1,
  14187. n_dims,
  14188. mode,
  14189. n_ctx,
  14190. freq_base,
  14191. freq_scale,
  14192. xpos_base,
  14193. xpos_down,
  14194. false),
  14195. zero_table);
  14196. }
  14197. } break;
  14198. case GGML_OP_ALIBI:
  14199. {
  14200. GGML_ASSERT(false); // TODO: not implemented
  14201. } break;
  14202. case GGML_OP_CLAMP:
  14203. {
  14204. GGML_ASSERT(false); // TODO: not implemented
  14205. } break;
  14206. case GGML_OP_CONV_1D:
  14207. {
  14208. GGML_ASSERT(false); // TODO: not implemented
  14209. } break;
  14210. case GGML_OP_CONV_2D:
  14211. {
  14212. GGML_ASSERT(false); // TODO: not implemented
  14213. } break;
  14214. case GGML_OP_CONV_TRANSPOSE_2D:
  14215. {
  14216. GGML_ASSERT(false); // TODO: not implemented
  14217. } break;
  14218. case GGML_OP_POOL_1D:
  14219. {
  14220. GGML_ASSERT(false); // TODO: not implemented
  14221. } break;
  14222. case GGML_OP_POOL_2D:
  14223. {
  14224. GGML_ASSERT(false); // TODO: not implemented
  14225. } break;
  14226. case GGML_OP_UPSCALE:
  14227. {
  14228. GGML_ASSERT(false); // TODO: not implemented
  14229. } break;
  14230. case GGML_OP_FLASH_ATTN:
  14231. {
  14232. struct ggml_tensor * flash_grad = NULL;
  14233. if (src0->grad || src1->grad || tensor->src[2]->grad) {
  14234. int32_t t = ggml_get_op_params_i32(tensor, 0);
  14235. GGML_ASSERT(t == 0 || t == 1);
  14236. bool masked = t != 0;
  14237. flash_grad =
  14238. ggml_flash_attn_back(ctx,
  14239. src0,
  14240. src1,
  14241. tensor->src[2],
  14242. tensor->grad,
  14243. masked);
  14244. }
  14245. struct ggml_tensor * src2 = tensor->src[2];
  14246. const int64_t elem_q = ggml_nelements(src0);
  14247. const int64_t elem_k = ggml_nelements(src1);
  14248. const int64_t elem_v = ggml_nelements(src2);
  14249. enum ggml_type result_type = flash_grad->type;
  14250. GGML_ASSERT(ggml_blck_size(result_type) == 1);
  14251. const size_t tsize = ggml_type_size(result_type);
  14252. const size_t offs_q = 0;
  14253. const size_t offs_k = offs_q + GGML_PAD(elem_q * tsize, GGML_MEM_ALIGN);
  14254. const size_t offs_v = offs_k + GGML_PAD(elem_k * tsize, GGML_MEM_ALIGN);
  14255. if (src0->grad) {
  14256. struct ggml_tensor * view_q = ggml_view_1d(ctx, flash_grad, elem_q, offs_q);
  14257. struct ggml_tensor * grad_q = ggml_reshape(ctx, view_q, src0);
  14258. src0->grad = ggml_add_or_set(ctx,
  14259. src0->grad,
  14260. grad_q,
  14261. zero_table);
  14262. }
  14263. if (src1->grad) {
  14264. struct ggml_tensor * view_k = ggml_view_1d(ctx, flash_grad, elem_k, offs_k);
  14265. struct ggml_tensor * grad_k = ggml_reshape(ctx, view_k, src1);
  14266. src1->grad = ggml_add_or_set(ctx,
  14267. src1->grad,
  14268. grad_k,
  14269. zero_table);
  14270. }
  14271. if (src2->grad) {
  14272. struct ggml_tensor * view_v = ggml_view_1d(ctx, flash_grad, elem_v, offs_v);
  14273. struct ggml_tensor * grad_v = ggml_reshape(ctx, view_v, src2);
  14274. src2->grad = ggml_add_or_set(ctx,
  14275. src2->grad,
  14276. grad_v,
  14277. zero_table);
  14278. }
  14279. } break;
  14280. case GGML_OP_FLASH_FF:
  14281. {
  14282. GGML_ASSERT(false); // not supported
  14283. } break;
  14284. case GGML_OP_FLASH_ATTN_BACK:
  14285. {
  14286. GGML_ASSERT(false); // not supported
  14287. } break;
  14288. case GGML_OP_WIN_PART:
  14289. case GGML_OP_WIN_UNPART:
  14290. case GGML_OP_UNARY:
  14291. {
  14292. switch (ggml_get_unary_op(tensor)) {
  14293. case GGML_UNARY_OP_ABS:
  14294. {
  14295. if (src0->grad) {
  14296. src0->grad =
  14297. ggml_add_or_set(ctx,
  14298. src0->grad,
  14299. ggml_mul(ctx,
  14300. ggml_sgn(ctx, src0),
  14301. tensor->grad),
  14302. zero_table);
  14303. }
  14304. } break;
  14305. case GGML_UNARY_OP_SGN:
  14306. {
  14307. if (src0->grad) {
  14308. // noop
  14309. }
  14310. } break;
  14311. case GGML_UNARY_OP_NEG:
  14312. {
  14313. if (src0->grad) {
  14314. src0->grad = ggml_sub_or_set(ctx, src0->grad, tensor->grad, zero_table);
  14315. }
  14316. } break;
  14317. case GGML_UNARY_OP_STEP:
  14318. {
  14319. if (src0->grad) {
  14320. // noop
  14321. }
  14322. } break;
  14323. case GGML_UNARY_OP_TANH:
  14324. {
  14325. GGML_ASSERT(false); // TODO: not implemented
  14326. } break;
  14327. case GGML_UNARY_OP_ELU:
  14328. {
  14329. GGML_ASSERT(false); // TODO: not implemented
  14330. } break;
  14331. case GGML_UNARY_OP_RELU:
  14332. {
  14333. if (src0->grad) {
  14334. src0->grad = ggml_add_or_set(ctx,
  14335. src0->grad,
  14336. ggml_mul(ctx,
  14337. ggml_step(ctx, src0),
  14338. tensor->grad),
  14339. zero_table);
  14340. }
  14341. } break;
  14342. case GGML_UNARY_OP_GELU:
  14343. {
  14344. GGML_ASSERT(false); // TODO: not implemented
  14345. } break;
  14346. case GGML_UNARY_OP_GELU_QUICK:
  14347. {
  14348. GGML_ASSERT(false); // TODO: not implemented
  14349. } break;
  14350. case GGML_UNARY_OP_SILU:
  14351. {
  14352. // necessary for llama
  14353. if (src0->grad) {
  14354. src0->grad = ggml_add_or_set(ctx,
  14355. src0->grad,
  14356. ggml_silu_back(ctx, src0, tensor->grad),
  14357. zero_table);
  14358. }
  14359. } break;
  14360. default:
  14361. GGML_ASSERT(false);
  14362. }
  14363. } break;
  14364. case GGML_OP_GET_REL_POS:
  14365. case GGML_OP_ADD_REL_POS:
  14366. case GGML_OP_MAP_UNARY:
  14367. case GGML_OP_MAP_BINARY:
  14368. case GGML_OP_MAP_CUSTOM1_F32:
  14369. case GGML_OP_MAP_CUSTOM2_F32:
  14370. case GGML_OP_MAP_CUSTOM3_F32:
  14371. case GGML_OP_MAP_CUSTOM1:
  14372. case GGML_OP_MAP_CUSTOM2:
  14373. case GGML_OP_MAP_CUSTOM3:
  14374. {
  14375. GGML_ASSERT(false); // not supported
  14376. } break;
  14377. case GGML_OP_CROSS_ENTROPY_LOSS:
  14378. {
  14379. if (src0->grad) {
  14380. src0->grad = ggml_add_or_set(ctx,
  14381. src0->grad,
  14382. ggml_cross_entropy_loss_back(ctx,
  14383. src0,
  14384. src1,
  14385. tensor->grad),
  14386. zero_table);
  14387. }
  14388. } break;
  14389. case GGML_OP_CROSS_ENTROPY_LOSS_BACK:
  14390. {
  14391. GGML_ASSERT(false); // not supported
  14392. } break;
  14393. case GGML_OP_NONE:
  14394. {
  14395. // nop
  14396. } break;
  14397. case GGML_OP_COUNT:
  14398. {
  14399. GGML_ASSERT(false);
  14400. } break;
  14401. }
  14402. for (int i = 0; i < GGML_MAX_SRC; ++i) {
  14403. if (tensor->src[i] && tensor->src[i]->grad) {
  14404. GGML_ASSERT(ggml_are_same_shape(tensor->src[i], tensor->src[i]->grad));
  14405. }
  14406. }
  14407. }
  14408. static void ggml_visit_parents(struct ggml_cgraph * cgraph, struct ggml_tensor * node) {
  14409. if (node->grad == NULL) {
  14410. // this usually happens when we generate intermediate nodes from constants in the backward pass
  14411. // it can also happen during forward pass, if the user performs computations with constants
  14412. if (node->op != GGML_OP_NONE) {
  14413. //GGML_PRINT_DEBUG("%s: warning: node %p has no grad, but op %d\n", __func__, (void *) node, node->op);
  14414. }
  14415. }
  14416. // check if already visited
  14417. if (hash_insert(cgraph->visited_hash_table, node)) {
  14418. return;
  14419. }
  14420. for (int i = 0; i < GGML_MAX_SRC; ++i) {
  14421. const int k =
  14422. (cgraph->order == GGML_CGRAPH_EVAL_ORDER_LEFT_TO_RIGHT) ? i :
  14423. (cgraph->order == GGML_CGRAPH_EVAL_ORDER_RIGHT_TO_LEFT) ? (GGML_MAX_SRC-1-i) :
  14424. /* unknown order, just fall back to using i*/ i;
  14425. if (node->src[k]) {
  14426. ggml_visit_parents(cgraph, node->src[k]);
  14427. }
  14428. }
  14429. if (node->op == GGML_OP_NONE && node->grad == NULL) {
  14430. // reached a leaf node, not part of the gradient graph (e.g. a constant)
  14431. GGML_ASSERT(cgraph->n_leafs < GGML_MAX_NODES);
  14432. if (strlen(node->name) == 0) {
  14433. ggml_format_name(node, "leaf_%d", cgraph->n_leafs);
  14434. }
  14435. cgraph->leafs[cgraph->n_leafs] = node;
  14436. cgraph->n_leafs++;
  14437. } else {
  14438. GGML_ASSERT(cgraph->n_nodes < GGML_MAX_NODES);
  14439. if (strlen(node->name) == 0) {
  14440. ggml_format_name(node, "node_%d", cgraph->n_nodes);
  14441. }
  14442. cgraph->nodes[cgraph->n_nodes] = node;
  14443. cgraph->grads[cgraph->n_nodes] = node->grad;
  14444. cgraph->n_nodes++;
  14445. }
  14446. }
  14447. static void ggml_build_forward_impl(struct ggml_cgraph * cgraph, struct ggml_tensor * tensor, bool expand) {
  14448. if (!expand) {
  14449. cgraph->n_nodes = 0;
  14450. cgraph->n_leafs = 0;
  14451. }
  14452. const int n0 = cgraph->n_nodes;
  14453. UNUSED(n0);
  14454. ggml_visit_parents(cgraph, tensor);
  14455. const int n_new = cgraph->n_nodes - n0;
  14456. GGML_PRINT_DEBUG("%s: visited %d new nodes\n", __func__, n_new);
  14457. if (n_new > 0) {
  14458. // the last added node should always be starting point
  14459. GGML_ASSERT(cgraph->nodes[cgraph->n_nodes - 1] == tensor);
  14460. }
  14461. }
  14462. void ggml_build_forward_expand(struct ggml_cgraph * cgraph, struct ggml_tensor * tensor) {
  14463. ggml_build_forward_impl(cgraph, tensor, true);
  14464. }
  14465. struct ggml_cgraph ggml_build_forward(struct ggml_tensor * tensor) {
  14466. struct ggml_cgraph result = {
  14467. /*.n_nodes =*/ 0,
  14468. /*.n_leafs =*/ 0,
  14469. /*.nodes =*/ { NULL },
  14470. /*.grads =*/ { NULL },
  14471. /*.leafs =*/ { NULL },
  14472. /*.hash_table =*/ { NULL },
  14473. /*.order =*/ GGML_CGRAPH_EVAL_ORDER_LEFT_TO_RIGHT,
  14474. /*.perf_runs =*/ 0,
  14475. /*.perf_cycles =*/ 0,
  14476. /*.perf_time_us =*/ 0,
  14477. };
  14478. ggml_build_forward_impl(&result, tensor, false);
  14479. return result;
  14480. }
  14481. void ggml_build_backward_expand(struct ggml_context * ctx, struct ggml_cgraph * gf, struct ggml_cgraph * gb, bool keep) {
  14482. GGML_ASSERT(gf->n_nodes > 0);
  14483. // if we are keeping the gradient graph, we have to detach the gradient nodes from the original graph
  14484. if (keep) {
  14485. for (int i = 0; i < gf->n_nodes; i++) {
  14486. struct ggml_tensor * node = gf->nodes[i];
  14487. if (node->grad) {
  14488. node->grad = ggml_dup_tensor(ctx, node);
  14489. gf->grads[i] = node->grad;
  14490. }
  14491. }
  14492. }
  14493. // remember original gradients which start with zero values
  14494. void ** zero_table = malloc(sizeof(void *) * GGML_GRAPH_HASHTABLE_SIZE);
  14495. memset(zero_table, 0, sizeof(void*) * GGML_GRAPH_HASHTABLE_SIZE);
  14496. for (int i = 0; i < gf->n_nodes; i++) {
  14497. if (gf->grads[i]) {
  14498. hash_insert(zero_table, gf->grads[i]);
  14499. }
  14500. }
  14501. for (int i = gf->n_nodes - 1; i >= 0; i--) {
  14502. struct ggml_tensor * node = gf->nodes[i];
  14503. // inplace operations to add gradients are not created by ggml_compute_backward
  14504. // use allocator to automatically make inplace operations
  14505. if (node->grad) {
  14506. ggml_compute_backward(ctx, node, zero_table);
  14507. }
  14508. }
  14509. for (int i = 0; i < gf->n_nodes; i++) {
  14510. struct ggml_tensor * node = gf->nodes[i];
  14511. if (node->is_param) {
  14512. GGML_PRINT_DEBUG("%s: found root node %p\n", __func__, (void *) node);
  14513. ggml_build_forward_expand(gb, node->grad);
  14514. }
  14515. }
  14516. free(zero_table);
  14517. }
  14518. struct ggml_cgraph ggml_build_backward(struct ggml_context * ctx, struct ggml_cgraph * gf, bool keep) {
  14519. struct ggml_cgraph result = *gf;
  14520. ggml_build_backward_expand(ctx, gf, &result, keep);
  14521. return result;
  14522. }
  14523. struct ggml_cgraph * ggml_new_graph(struct ggml_context * ctx) {
  14524. struct ggml_object * obj = ggml_new_object(ctx, GGML_OBJECT_GRAPH, GGML_GRAPH_SIZE);
  14525. struct ggml_cgraph * cgraph = (struct ggml_cgraph *) ((char *) ctx->mem_buffer + obj->offs);
  14526. *cgraph = (struct ggml_cgraph) {
  14527. /*.n_nodes =*/ 0,
  14528. /*.n_leafs =*/ 0,
  14529. /*.nodes =*/ { NULL },
  14530. /*.grads =*/ { NULL },
  14531. /*.leafs =*/ { NULL },
  14532. /*.hash_table =*/ { NULL },
  14533. /*.order =*/ GGML_CGRAPH_EVAL_ORDER_LEFT_TO_RIGHT,
  14534. /*.perf_runs =*/ 0,
  14535. /*.perf_cycles =*/ 0,
  14536. /*.perf_time_us =*/ 0,
  14537. };
  14538. return cgraph;
  14539. }
  14540. struct ggml_cgraph * ggml_build_forward_ctx(struct ggml_context * ctx, struct ggml_tensor * tensor) {
  14541. struct ggml_cgraph * cgraph = ggml_new_graph(ctx);
  14542. ggml_build_forward_impl(cgraph, tensor, false);
  14543. return cgraph;
  14544. }
  14545. size_t ggml_graph_overhead(void) {
  14546. return GGML_OBJECT_SIZE + GGML_PAD(GGML_GRAPH_SIZE, GGML_MEM_ALIGN);
  14547. }
  14548. //
  14549. // thread data
  14550. //
  14551. // synchronization is done via busy loops
  14552. // I tried using spin locks, but not sure how to use them correctly - the things I tried were slower than busy loops
  14553. //
  14554. #ifdef __APPLE__
  14555. //#include <os/lock.h>
  14556. //
  14557. //typedef os_unfair_lock ggml_lock_t;
  14558. //
  14559. //#define ggml_lock_init(x) UNUSED(x)
  14560. //#define ggml_lock_destroy(x) UNUSED(x)
  14561. //#define ggml_lock_lock os_unfair_lock_lock
  14562. //#define ggml_lock_unlock os_unfair_lock_unlock
  14563. //
  14564. //#define GGML_LOCK_INITIALIZER OS_UNFAIR_LOCK_INIT
  14565. typedef int ggml_lock_t;
  14566. #define ggml_lock_init(x) UNUSED(x)
  14567. #define ggml_lock_destroy(x) UNUSED(x)
  14568. #define ggml_lock_lock(x) UNUSED(x)
  14569. #define ggml_lock_unlock(x) UNUSED(x)
  14570. #define GGML_LOCK_INITIALIZER 0
  14571. typedef pthread_t ggml_thread_t;
  14572. #define ggml_thread_create pthread_create
  14573. #define ggml_thread_join pthread_join
  14574. #else
  14575. //typedef pthread_spinlock_t ggml_lock_t;
  14576. //#define ggml_lock_init(x) pthread_spin_init(x, PTHREAD_PROCESS_PRIVATE)
  14577. //#define ggml_lock_destroy pthread_spin_destroy
  14578. //#define ggml_lock_lock pthread_spin_lock
  14579. //#define ggml_lock_unlock pthread_spin_unlock
  14580. typedef int ggml_lock_t;
  14581. #define ggml_lock_init(x) UNUSED(x)
  14582. #define ggml_lock_destroy(x) UNUSED(x)
  14583. #if defined(__x86_64__) || (defined(_MSC_VER) && defined(_M_AMD64))
  14584. #define ggml_lock_lock(x) _mm_pause()
  14585. #else
  14586. #define ggml_lock_lock(x) UNUSED(x)
  14587. #endif
  14588. #define ggml_lock_unlock(x) UNUSED(x)
  14589. #define GGML_LOCK_INITIALIZER 0
  14590. typedef pthread_t ggml_thread_t;
  14591. #define ggml_thread_create pthread_create
  14592. #define ggml_thread_join pthread_join
  14593. #endif
  14594. // Android's libc implementation "bionic" does not support setting affinity
  14595. #if defined(__linux__) && !defined(__BIONIC__)
  14596. static void set_numa_thread_affinity(int thread_n, int n_threads) {
  14597. if (!ggml_is_numa()) {
  14598. return;
  14599. }
  14600. // run thread on node_num thread_n / (threads per node)
  14601. const int node_num = thread_n / ((n_threads + g_state.numa.n_nodes - 1) / g_state.numa.n_nodes);
  14602. struct ggml_numa_node * node = &g_state.numa.nodes[node_num];
  14603. size_t setsize = CPU_ALLOC_SIZE(g_state.numa.total_cpus);
  14604. cpu_set_t * cpus = CPU_ALLOC(g_state.numa.total_cpus);
  14605. CPU_ZERO_S(setsize, cpus);
  14606. for (size_t i = 0; i < node->n_cpus; ++i) {
  14607. CPU_SET_S(node->cpus[i], setsize, cpus);
  14608. }
  14609. int rv = pthread_setaffinity_np(pthread_self(), setsize, cpus);
  14610. if (rv) {
  14611. fprintf(stderr, "warning: pthread_setaffinity_np() failed: %s\n",
  14612. strerror(rv));
  14613. }
  14614. CPU_FREE(cpus);
  14615. }
  14616. static void clear_numa_thread_affinity(void) {
  14617. if (!ggml_is_numa()) {
  14618. return;
  14619. }
  14620. size_t setsize = CPU_ALLOC_SIZE(g_state.numa.total_cpus);
  14621. cpu_set_t * cpus = CPU_ALLOC(g_state.numa.total_cpus);
  14622. CPU_ZERO_S(setsize, cpus);
  14623. for (unsigned i = 0; i < g_state.numa.total_cpus; ++i) {
  14624. CPU_SET_S(i, setsize, cpus);
  14625. }
  14626. int rv = pthread_setaffinity_np(pthread_self(), setsize, cpus);
  14627. if (rv) {
  14628. fprintf(stderr, "warning: pthread_setaffinity_np() failed: %s\n",
  14629. strerror(rv));
  14630. }
  14631. CPU_FREE(cpus);
  14632. }
  14633. #else
  14634. // TODO: Windows etc.
  14635. // (the linux implementation may also work on BSD, someone should test)
  14636. static void set_numa_thread_affinity(int thread_n, int n_threads) { UNUSED(thread_n); UNUSED(n_threads); }
  14637. static void clear_numa_thread_affinity(void) {}
  14638. #endif
  14639. struct ggml_compute_state_shared {
  14640. const struct ggml_cgraph * cgraph;
  14641. const struct ggml_cplan * cplan;
  14642. int64_t perf_node_start_cycles;
  14643. int64_t perf_node_start_time_us;
  14644. const int n_threads;
  14645. // synchronization primitives
  14646. atomic_int n_active; // num active threads
  14647. atomic_int node_n; // active graph node
  14648. bool (*abort_callback)(void * data); // abort ggml_graph_compute when true
  14649. void * abort_callback_data;
  14650. };
  14651. struct ggml_compute_state {
  14652. ggml_thread_t thrd;
  14653. int ith;
  14654. struct ggml_compute_state_shared * shared;
  14655. };
  14656. static void ggml_graph_compute_perf_stats_node(struct ggml_tensor * node, const struct ggml_compute_state_shared * st) {
  14657. int64_t cycles_cur = ggml_perf_cycles() - st->perf_node_start_cycles;
  14658. int64_t time_us_cur = ggml_perf_time_us() - st->perf_node_start_time_us;
  14659. node->perf_runs++;
  14660. node->perf_cycles += cycles_cur;
  14661. node->perf_time_us += time_us_cur;
  14662. }
  14663. static thread_ret_t ggml_graph_compute_thread(void * data) {
  14664. struct ggml_compute_state * state = (struct ggml_compute_state *) data;
  14665. const struct ggml_cgraph * cgraph = state->shared->cgraph;
  14666. const struct ggml_cplan * cplan = state->shared->cplan;
  14667. const int * n_tasks_arr = cplan->n_tasks;
  14668. const int n_threads = state->shared->n_threads;
  14669. set_numa_thread_affinity(state->ith, n_threads);
  14670. int node_n = -1;
  14671. while (true) {
  14672. if (cplan->abort_callback && cplan->abort_callback(cplan->abort_callback_data)) {
  14673. state->shared->node_n += 1;
  14674. return (thread_ret_t) GGML_EXIT_ABORTED;
  14675. }
  14676. if (atomic_fetch_sub(&state->shared->n_active, 1) == 1) {
  14677. // all other threads are finished and spinning
  14678. // do finalize and init here so we don't have synchronize again
  14679. struct ggml_compute_params params = {
  14680. /*.type =*/ GGML_TASK_FINALIZE,
  14681. /*.ith =*/ 0,
  14682. /*.nth =*/ 0,
  14683. /*.wsize =*/ cplan->work_size,
  14684. /*.wdata =*/ cplan->work_data,
  14685. };
  14686. if (node_n != -1) {
  14687. /* FINALIZE */
  14688. struct ggml_tensor * node = state->shared->cgraph->nodes[node_n];
  14689. if (GGML_OP_HAS_FINALIZE[node->op]) {
  14690. params.nth = n_tasks_arr[node_n];
  14691. ggml_compute_forward(&params, node);
  14692. }
  14693. ggml_graph_compute_perf_stats_node(node, state->shared);
  14694. }
  14695. // distribute new work or execute it direct if 1T
  14696. while (++node_n < cgraph->n_nodes) {
  14697. GGML_PRINT_DEBUG_5("%s: %d/%d\n", __func__, node_n, cgraph->n_nodes);
  14698. struct ggml_tensor * node = cgraph->nodes[node_n];
  14699. const int n_tasks = n_tasks_arr[node_n];
  14700. state->shared->perf_node_start_cycles = ggml_perf_cycles();
  14701. state->shared->perf_node_start_time_us = ggml_perf_time_us();
  14702. params.nth = n_tasks;
  14703. /* INIT */
  14704. if (GGML_OP_HAS_INIT[node->op]) {
  14705. params.type = GGML_TASK_INIT;
  14706. ggml_compute_forward(&params, node);
  14707. }
  14708. if (n_tasks == 1) {
  14709. // TODO: maybe push node_n to the atomic but if other threads see n_tasks is 1,
  14710. // they do something more efficient than spinning (?)
  14711. params.type = GGML_TASK_COMPUTE;
  14712. ggml_compute_forward(&params, node);
  14713. if (GGML_OP_HAS_FINALIZE[node->op]) {
  14714. params.type = GGML_TASK_FINALIZE;
  14715. ggml_compute_forward(&params, node);
  14716. }
  14717. ggml_graph_compute_perf_stats_node(node, state->shared);
  14718. } else {
  14719. break;
  14720. }
  14721. if (cplan->abort_callback && cplan->abort_callback(cplan->abort_callback_data)) {
  14722. break;
  14723. }
  14724. }
  14725. atomic_store(&state->shared->n_active, n_threads);
  14726. atomic_store(&state->shared->node_n, node_n);
  14727. } else {
  14728. // wait for other threads to finish
  14729. const int last = node_n;
  14730. while (true) {
  14731. // TODO: this sched_yield can have significant impact on the performance - either positive or negative
  14732. // depending on the workload and the operating system.
  14733. // since it is not clear what is the best approach, it should potentially become user-configurable
  14734. // ref: https://github.com/ggerganov/ggml/issues/291
  14735. #if defined(GGML_USE_ACCELERATE) || defined(GGML_USE_OPENBLAS)
  14736. sched_yield();
  14737. #endif
  14738. node_n = atomic_load(&state->shared->node_n);
  14739. if (node_n != last) break;
  14740. };
  14741. }
  14742. // check if we should stop
  14743. if (node_n >= cgraph->n_nodes) break;
  14744. /* COMPUTE */
  14745. struct ggml_tensor * node = cgraph->nodes[node_n];
  14746. const int n_tasks = n_tasks_arr[node_n];
  14747. struct ggml_compute_params params = {
  14748. /*.type =*/ GGML_TASK_COMPUTE,
  14749. /*.ith =*/ state->ith,
  14750. /*.nth =*/ n_tasks,
  14751. /*.wsize =*/ cplan->work_size,
  14752. /*.wdata =*/ cplan->work_data,
  14753. };
  14754. if (state->ith < n_tasks) {
  14755. ggml_compute_forward(&params, node);
  14756. }
  14757. }
  14758. return GGML_EXIT_SUCCESS;
  14759. }
  14760. struct ggml_cplan ggml_graph_plan(struct ggml_cgraph * cgraph, int n_threads) {
  14761. if (n_threads <= 0) {
  14762. n_threads = GGML_DEFAULT_N_THREADS;
  14763. }
  14764. size_t work_size = 0;
  14765. struct ggml_cplan cplan;
  14766. memset(&cplan, 0, sizeof(struct ggml_cplan));
  14767. // thread scheduling for the different operations + work buffer size estimation
  14768. for (int i = 0; i < cgraph->n_nodes; i++) {
  14769. int n_tasks = 1;
  14770. struct ggml_tensor * node = cgraph->nodes[i];
  14771. switch (node->op) {
  14772. case GGML_OP_CPY:
  14773. case GGML_OP_DUP:
  14774. {
  14775. n_tasks = n_threads;
  14776. size_t cur = 0;
  14777. if (ggml_is_quantized(node->type)) {
  14778. cur = ggml_type_size(GGML_TYPE_F32) * node->ne[0] * n_tasks;
  14779. }
  14780. work_size = MAX(work_size, cur);
  14781. } break;
  14782. case GGML_OP_ADD:
  14783. case GGML_OP_ADD1:
  14784. {
  14785. n_tasks = n_threads;
  14786. size_t cur = 0;
  14787. if (ggml_is_quantized(node->src[0]->type)) {
  14788. cur = ggml_type_size(GGML_TYPE_F32) * node->src[0]->ne[0] * n_tasks;
  14789. }
  14790. work_size = MAX(work_size, cur);
  14791. } break;
  14792. case GGML_OP_ACC:
  14793. {
  14794. n_tasks = n_threads;
  14795. size_t cur = 0;
  14796. if (ggml_is_quantized(node->src[0]->type)) {
  14797. cur = ggml_type_size(GGML_TYPE_F32) * node->src[1]->ne[0] * n_tasks;
  14798. }
  14799. work_size = MAX(work_size, cur);
  14800. } break;
  14801. case GGML_OP_SUB:
  14802. case GGML_OP_DIV:
  14803. case GGML_OP_SQR:
  14804. case GGML_OP_SQRT:
  14805. case GGML_OP_LOG:
  14806. case GGML_OP_SUM:
  14807. case GGML_OP_SUM_ROWS:
  14808. case GGML_OP_MEAN:
  14809. case GGML_OP_ARGMAX:
  14810. case GGML_OP_REPEAT:
  14811. case GGML_OP_REPEAT_BACK:
  14812. {
  14813. n_tasks = 1;
  14814. } break;
  14815. case GGML_OP_UNARY:
  14816. {
  14817. switch (ggml_get_unary_op(node)) {
  14818. case GGML_UNARY_OP_ABS:
  14819. case GGML_UNARY_OP_SGN:
  14820. case GGML_UNARY_OP_NEG:
  14821. case GGML_UNARY_OP_STEP:
  14822. case GGML_UNARY_OP_TANH:
  14823. case GGML_UNARY_OP_ELU:
  14824. case GGML_UNARY_OP_RELU:
  14825. {
  14826. n_tasks = 1;
  14827. } break;
  14828. case GGML_UNARY_OP_GELU:
  14829. case GGML_UNARY_OP_GELU_QUICK:
  14830. case GGML_UNARY_OP_SILU:
  14831. {
  14832. n_tasks = n_threads;
  14833. } break;
  14834. }
  14835. } break;
  14836. case GGML_OP_SILU_BACK:
  14837. case GGML_OP_MUL:
  14838. case GGML_OP_NORM:
  14839. case GGML_OP_RMS_NORM:
  14840. case GGML_OP_RMS_NORM_BACK:
  14841. case GGML_OP_GROUP_NORM:
  14842. {
  14843. n_tasks = n_threads;
  14844. } break;
  14845. case GGML_OP_CONCAT:
  14846. case GGML_OP_MUL_MAT:
  14847. {
  14848. n_tasks = n_threads;
  14849. // TODO: use different scheduling for different matrix sizes
  14850. //const int nr0 = ggml_nrows(node->src[0]);
  14851. //const int nr1 = ggml_nrows(node->src[1]);
  14852. //n_tasks = MIN(n_threads, MAX(1, nr0/128));
  14853. //printf("nr0 = %8d, nr1 = %8d, nr0*nr1 = %8d, n_tasks%d\n", nr0, nr1, nr0*nr1, n_tasks);
  14854. size_t cur = 0;
  14855. const enum ggml_type vec_dot_type = type_traits[node->src[0]->type].vec_dot_type;
  14856. #if defined(GGML_USE_CUBLAS)
  14857. if (ggml_cuda_can_mul_mat(node->src[0], node->src[1], node)) {
  14858. n_tasks = 1; // TODO: this actually is doing nothing
  14859. // the threads are still spinning
  14860. } else
  14861. #elif defined(GGML_USE_CLBLAST)
  14862. if (ggml_cl_can_mul_mat(node->src[0], node->src[1], node)) {
  14863. n_tasks = 1; // TODO: this actually is doing nothing
  14864. // the threads are still spinning
  14865. cur = ggml_cl_mul_mat_get_wsize(node->src[0], node->src[1], node);
  14866. } else
  14867. #endif
  14868. #if defined(GGML_USE_ACCELERATE) || defined(GGML_USE_OPENBLAS)
  14869. if (ggml_compute_forward_mul_mat_use_blas(node->src[0], node->src[1], node)) {
  14870. n_tasks = 1; // TODO: this actually is doing nothing
  14871. // the threads are still spinning
  14872. if (node->src[0]->type != GGML_TYPE_F32) {
  14873. // here we need memory just for single 2D matrix from src0
  14874. cur = ggml_type_size(GGML_TYPE_F32)*(node->src[0]->ne[0]*node->src[0]->ne[1]);
  14875. }
  14876. } else
  14877. #endif
  14878. if (node->src[1]->type != vec_dot_type) {
  14879. cur = ggml_type_size(vec_dot_type)*ggml_nelements(node->src[1])/ggml_blck_size(vec_dot_type);
  14880. } else {
  14881. cur = 0;
  14882. }
  14883. work_size = MAX(work_size, cur);
  14884. } break;
  14885. case GGML_OP_OUT_PROD:
  14886. {
  14887. n_tasks = n_threads;
  14888. size_t cur = 0;
  14889. if (ggml_is_quantized(node->src[0]->type)) {
  14890. cur = ggml_type_size(GGML_TYPE_F32) * node->src[0]->ne[0] * n_tasks;
  14891. }
  14892. work_size = MAX(work_size, cur);
  14893. } break;
  14894. case GGML_OP_SCALE:
  14895. {
  14896. n_tasks = 1;
  14897. } break;
  14898. case GGML_OP_SET:
  14899. case GGML_OP_CONT:
  14900. case GGML_OP_RESHAPE:
  14901. case GGML_OP_VIEW:
  14902. case GGML_OP_PERMUTE:
  14903. case GGML_OP_TRANSPOSE:
  14904. case GGML_OP_GET_ROWS:
  14905. case GGML_OP_GET_ROWS_BACK:
  14906. case GGML_OP_DIAG:
  14907. {
  14908. n_tasks = 1;
  14909. } break;
  14910. case GGML_OP_DIAG_MASK_ZERO:
  14911. case GGML_OP_DIAG_MASK_INF:
  14912. case GGML_OP_SOFT_MAX:
  14913. case GGML_OP_SOFT_MAX_BACK:
  14914. case GGML_OP_ROPE:
  14915. case GGML_OP_ROPE_BACK:
  14916. case GGML_OP_ADD_REL_POS:
  14917. {
  14918. n_tasks = n_threads;
  14919. } break;
  14920. case GGML_OP_ALIBI:
  14921. {
  14922. n_tasks = 1; //TODO
  14923. } break;
  14924. case GGML_OP_CLAMP:
  14925. {
  14926. n_tasks = 1; //TODO
  14927. } break;
  14928. case GGML_OP_CONV_1D:
  14929. {
  14930. n_tasks = n_threads;
  14931. GGML_ASSERT(node->src[0]->ne[3] == 1);
  14932. GGML_ASSERT(node->src[1]->ne[2] == 1);
  14933. GGML_ASSERT(node->src[1]->ne[3] == 1);
  14934. size_t cur = 0;
  14935. const int nk = node->src[0]->ne[0];
  14936. if (node->src[0]->type == GGML_TYPE_F16 &&
  14937. node->src[1]->type == GGML_TYPE_F32) {
  14938. cur = sizeof(ggml_fp16_t)*(
  14939. nk*ggml_up32(node->src[0]->ne[1])*node->src[0]->ne[2] +
  14940. ( 2*(nk/2) + node->src[1]->ne[0])*node->src[1]->ne[1]
  14941. );
  14942. } else if (node->src[0]->type == GGML_TYPE_F32 &&
  14943. node->src[1]->type == GGML_TYPE_F32) {
  14944. cur = sizeof(float)*(
  14945. nk*ggml_up32(node->src[0]->ne[1])*node->src[0]->ne[2] +
  14946. ( 2*(nk/2) + node->src[1]->ne[0])*node->src[1]->ne[1]
  14947. );
  14948. } else {
  14949. GGML_ASSERT(false);
  14950. }
  14951. work_size = MAX(work_size, cur);
  14952. } break;
  14953. case GGML_OP_CONV_2D:
  14954. {
  14955. n_tasks = n_threads;
  14956. const int64_t ne00 = node->src[0]->ne[0]; // W
  14957. const int64_t ne01 = node->src[0]->ne[1]; // H
  14958. const int64_t ne02 = node->src[0]->ne[2]; // C
  14959. const int64_t ne03 = node->src[0]->ne[3]; // N
  14960. const int64_t ne10 = node->src[1]->ne[0]; // W
  14961. const int64_t ne11 = node->src[1]->ne[1]; // H
  14962. const int64_t ne12 = node->src[1]->ne[2]; // C
  14963. const int64_t ne0 = node->ne[0];
  14964. const int64_t ne1 = node->ne[1];
  14965. const int64_t ne2 = node->ne[2];
  14966. const int64_t nk = ne00*ne01;
  14967. const int64_t ew0 = nk * ne02;
  14968. UNUSED(ne03);
  14969. UNUSED(ne2);
  14970. size_t cur = 0;
  14971. if (node->src[0]->type == GGML_TYPE_F16 &&
  14972. node->src[1]->type == GGML_TYPE_F32) {
  14973. cur = sizeof(ggml_fp16_t)*(ne0*ne1*ew0);
  14974. } else if (node->src[0]->type == GGML_TYPE_F32 &&
  14975. node->src[1]->type == GGML_TYPE_F32) {
  14976. cur = sizeof(float)* (ne10*ne11*ne12);
  14977. } else {
  14978. GGML_ASSERT(false);
  14979. }
  14980. work_size = MAX(work_size, cur);
  14981. } break;
  14982. case GGML_OP_CONV_TRANSPOSE_2D:
  14983. {
  14984. n_tasks = n_threads;
  14985. const int64_t ne00 = node->src[0]->ne[0]; // W
  14986. const int64_t ne01 = node->src[0]->ne[1]; // H
  14987. const int64_t ne02 = node->src[0]->ne[2]; // Channels Out
  14988. const int64_t ne03 = node->src[0]->ne[3]; // Channels In
  14989. const int64_t ne10 = node->src[1]->ne[0]; // W
  14990. const int64_t ne11 = node->src[1]->ne[1]; // H
  14991. const int64_t ne12 = node->src[1]->ne[2]; // Channels In
  14992. size_t cur = 0;
  14993. cur += sizeof(ggml_fp16_t)*ne00*ne01*ne02*ne03;
  14994. cur += sizeof(ggml_fp16_t)*ne10*ne11*ne12;
  14995. work_size = MAX(work_size, cur);
  14996. } break;
  14997. case GGML_OP_POOL_1D:
  14998. case GGML_OP_POOL_2D:
  14999. {
  15000. n_tasks = 1;
  15001. } break;
  15002. case GGML_OP_UPSCALE:
  15003. {
  15004. n_tasks = n_threads;
  15005. } break;
  15006. case GGML_OP_FLASH_ATTN:
  15007. {
  15008. n_tasks = n_threads;
  15009. size_t cur = 0;
  15010. const int64_t ne11 = ggml_up(node->src[1]->ne[1], GGML_SOFT_MAX_UNROLL);
  15011. if (node->src[1]->type == GGML_TYPE_F32) {
  15012. cur = sizeof(float)*ne11*n_tasks; // TODO: this can become (n_tasks-1)
  15013. cur += sizeof(float)*ne11*n_tasks; // this is overestimated by x2
  15014. }
  15015. if (node->src[1]->type == GGML_TYPE_F16) {
  15016. cur = sizeof(float)*ne11*n_tasks; // TODO: this can become (n_tasks-1)
  15017. cur += sizeof(float)*ne11*n_tasks; // this is overestimated by x2
  15018. }
  15019. work_size = MAX(work_size, cur);
  15020. } break;
  15021. case GGML_OP_FLASH_FF:
  15022. {
  15023. n_tasks = n_threads;
  15024. size_t cur = 0;
  15025. if (node->src[1]->type == GGML_TYPE_F32) {
  15026. cur = sizeof(float)*node->src[1]->ne[1]*n_tasks; // TODO: this can become (n_tasks-1)
  15027. cur += sizeof(float)*node->src[1]->ne[1]*n_tasks; // this is overestimated by x2
  15028. }
  15029. if (node->src[1]->type == GGML_TYPE_F16) {
  15030. cur = sizeof(float)*node->src[1]->ne[1]*n_tasks; // TODO: this can become (n_tasks-1)
  15031. cur += sizeof(float)*node->src[1]->ne[1]*n_tasks; // this is overestimated by x2
  15032. }
  15033. work_size = MAX(work_size, cur);
  15034. } break;
  15035. case GGML_OP_FLASH_ATTN_BACK:
  15036. {
  15037. n_tasks = n_threads;
  15038. size_t cur = 0;
  15039. const int64_t D = node->src[0]->ne[0];
  15040. const int64_t ne11 = ggml_up(node->src[1]->ne[1], GGML_SOFT_MAX_UNROLL);
  15041. const int64_t mxDn = MAX(D, ne11) * 2; // *2 because of S and SM in ggml_compute_forward_flash_attn_back
  15042. if (node->src[1]->type == GGML_TYPE_F32) {
  15043. cur = sizeof(float)*mxDn*n_tasks; // TODO: this can become (n_tasks-1)
  15044. cur += sizeof(float)*mxDn*n_tasks; // this is overestimated by x2
  15045. }
  15046. if (node->src[1]->type == GGML_TYPE_F16) {
  15047. cur = sizeof(float)*mxDn*n_tasks; // TODO: this can become (n_tasks-1)
  15048. cur += sizeof(float)*mxDn*n_tasks; // this is overestimated by x2
  15049. }
  15050. work_size = MAX(work_size, cur);
  15051. } break;
  15052. case GGML_OP_WIN_PART:
  15053. case GGML_OP_WIN_UNPART:
  15054. case GGML_OP_GET_REL_POS:
  15055. case GGML_OP_MAP_UNARY:
  15056. case GGML_OP_MAP_BINARY:
  15057. case GGML_OP_MAP_CUSTOM1_F32:
  15058. case GGML_OP_MAP_CUSTOM2_F32:
  15059. case GGML_OP_MAP_CUSTOM3_F32:
  15060. {
  15061. n_tasks = 1;
  15062. } break;
  15063. case GGML_OP_MAP_CUSTOM1:
  15064. {
  15065. struct ggml_map_custom1_op_params * p = (struct ggml_map_custom1_op_params *) node->op_params;
  15066. if (p->n_tasks == GGML_N_TASKS_MAX) {
  15067. n_tasks = n_threads;
  15068. } else {
  15069. n_tasks = MIN(p->n_tasks, n_threads);
  15070. }
  15071. } break;
  15072. case GGML_OP_MAP_CUSTOM2:
  15073. {
  15074. struct ggml_map_custom2_op_params * p = (struct ggml_map_custom2_op_params *) node->op_params;
  15075. if (p->n_tasks == GGML_N_TASKS_MAX) {
  15076. n_tasks = n_threads;
  15077. } else {
  15078. n_tasks = MIN(p->n_tasks, n_threads);
  15079. }
  15080. } break;
  15081. case GGML_OP_MAP_CUSTOM3:
  15082. {
  15083. struct ggml_map_custom3_op_params * p = (struct ggml_map_custom3_op_params *) node->op_params;
  15084. if (p->n_tasks == GGML_N_TASKS_MAX) {
  15085. n_tasks = n_threads;
  15086. } else {
  15087. n_tasks = MIN(p->n_tasks, n_threads);
  15088. }
  15089. } break;
  15090. case GGML_OP_CROSS_ENTROPY_LOSS:
  15091. {
  15092. n_tasks = n_threads;
  15093. size_t cur = ggml_type_size(node->type)*(n_tasks + node->src[0]->ne[0]*n_tasks);
  15094. work_size = MAX(work_size, cur);
  15095. } break;
  15096. case GGML_OP_CROSS_ENTROPY_LOSS_BACK:
  15097. {
  15098. n_tasks = n_threads;
  15099. } break;
  15100. case GGML_OP_NONE:
  15101. {
  15102. n_tasks = 1;
  15103. } break;
  15104. case GGML_OP_COUNT:
  15105. {
  15106. GGML_ASSERT(false);
  15107. } break;
  15108. }
  15109. cplan.n_tasks[i] = n_tasks;
  15110. }
  15111. if (work_size > 0) {
  15112. work_size += CACHE_LINE_SIZE*(n_threads - 1);
  15113. }
  15114. cplan.n_threads = n_threads;
  15115. cplan.work_size = work_size;
  15116. cplan.work_data = NULL;
  15117. return cplan;
  15118. }
  15119. int ggml_graph_compute(struct ggml_cgraph * cgraph, struct ggml_cplan * cplan) {
  15120. {
  15121. GGML_ASSERT(cplan);
  15122. GGML_ASSERT(cplan->n_threads > 0);
  15123. if (cplan->work_size > 0) {
  15124. GGML_ASSERT(cplan->work_data);
  15125. }
  15126. for (int i = 0; i < cgraph->n_nodes; ++i) {
  15127. if (cgraph->nodes[i]->op != GGML_OP_NONE) {
  15128. GGML_ASSERT(cplan->n_tasks[i] > 0);
  15129. }
  15130. }
  15131. }
  15132. const int n_threads = cplan->n_threads;
  15133. struct ggml_compute_state_shared state_shared = {
  15134. /*.cgraph =*/ cgraph,
  15135. /*.cgraph_plan =*/ cplan,
  15136. /*.perf_node_start_cycles =*/ 0,
  15137. /*.perf_node_start_time_us =*/ 0,
  15138. /*.n_threads =*/ n_threads,
  15139. /*.n_active =*/ n_threads,
  15140. /*.node_n =*/ -1,
  15141. /*.abort_callback =*/ NULL,
  15142. /*.abort_callback_data =*/ NULL,
  15143. };
  15144. struct ggml_compute_state * workers = alloca(sizeof(struct ggml_compute_state)*n_threads);
  15145. // create thread pool
  15146. if (n_threads > 1) {
  15147. for (int j = 1; j < n_threads; ++j) {
  15148. workers[j] = (struct ggml_compute_state) {
  15149. .thrd = 0,
  15150. .ith = j,
  15151. .shared = &state_shared,
  15152. };
  15153. const int rc = ggml_thread_create(&workers[j].thrd, NULL, ggml_graph_compute_thread, &workers[j]);
  15154. GGML_ASSERT(rc == 0);
  15155. UNUSED(rc);
  15156. }
  15157. }
  15158. workers[0].ith = 0;
  15159. workers[0].shared = &state_shared;
  15160. const int64_t perf_start_cycles = ggml_perf_cycles();
  15161. const int64_t perf_start_time_us = ggml_perf_time_us();
  15162. // this is a work thread too
  15163. int compute_status = (size_t) ggml_graph_compute_thread(&workers[0]);
  15164. // don't leave affinity set on the main thread
  15165. clear_numa_thread_affinity();
  15166. // join or kill thread pool
  15167. if (n_threads > 1) {
  15168. for (int j = 1; j < n_threads; j++) {
  15169. const int rc = ggml_thread_join(workers[j].thrd, NULL);
  15170. GGML_ASSERT(rc == 0);
  15171. }
  15172. }
  15173. // performance stats (graph)
  15174. {
  15175. int64_t perf_cycles_cur = ggml_perf_cycles() - perf_start_cycles;
  15176. int64_t perf_time_us_cur = ggml_perf_time_us() - perf_start_time_us;
  15177. cgraph->perf_runs++;
  15178. cgraph->perf_cycles += perf_cycles_cur;
  15179. cgraph->perf_time_us += perf_time_us_cur;
  15180. GGML_PRINT_DEBUG("%s: perf (%d) - cpu = %.3f / %.3f ms, wall = %.3f / %.3f ms\n",
  15181. __func__, cgraph->perf_runs,
  15182. (double) perf_cycles_cur / (double) ggml_cycles_per_ms(),
  15183. (double) cgraph->perf_cycles / (double) ggml_cycles_per_ms() / (double) cgraph->perf_runs,
  15184. (double) perf_time_us_cur / 1000.0,
  15185. (double) cgraph->perf_time_us / 1000.0 / cgraph->perf_runs);
  15186. }
  15187. return compute_status;
  15188. }
  15189. void ggml_graph_reset(struct ggml_cgraph * cgraph) {
  15190. for (int i = 0; i < cgraph->n_nodes; i++) {
  15191. struct ggml_tensor * grad = cgraph->grads[i];
  15192. if (grad) {
  15193. ggml_set_zero(grad);
  15194. }
  15195. }
  15196. }
  15197. void ggml_graph_compute_with_ctx(struct ggml_context * ctx, struct ggml_cgraph * cgraph, int n_threads) {
  15198. struct ggml_cplan cplan = ggml_graph_plan(cgraph, n_threads);
  15199. struct ggml_object * obj = ggml_new_object(ctx, GGML_OBJECT_WORK_BUFFER, cplan.work_size);
  15200. cplan.work_data = (uint8_t *)ctx->mem_buffer + obj->offs;
  15201. ggml_graph_compute(cgraph, &cplan);
  15202. }
  15203. struct ggml_tensor * ggml_graph_get_tensor(struct ggml_cgraph * cgraph, const char * name) {
  15204. for (int i = 0; i < cgraph->n_leafs; i++) {
  15205. struct ggml_tensor * leaf = cgraph->leafs[i];
  15206. if (strcmp(leaf->name, name) == 0) {
  15207. return leaf;
  15208. }
  15209. }
  15210. for (int i = 0; i < cgraph->n_nodes; i++) {
  15211. struct ggml_tensor * node = cgraph->nodes[i];
  15212. if (strcmp(node->name, name) == 0) {
  15213. return node;
  15214. }
  15215. }
  15216. return NULL;
  15217. }
  15218. static void ggml_graph_export_leaf(const struct ggml_tensor * tensor, FILE * fout) {
  15219. const int64_t * ne = tensor->ne;
  15220. const size_t * nb = tensor->nb;
  15221. fprintf(fout, "%-6s %-12s %8d %" PRId64 " %" PRId64 " %" PRId64 " %" PRId64 " %16zu %16zu %16zu %16zu %16p %32s\n",
  15222. ggml_type_name(tensor->type),
  15223. ggml_op_name (tensor->op),
  15224. tensor->n_dims,
  15225. ne[0], ne[1], ne[2], ne[3],
  15226. nb[0], nb[1], nb[2], nb[3],
  15227. tensor->data,
  15228. tensor->name);
  15229. }
  15230. static void ggml_graph_export_node(const struct ggml_tensor * tensor, const char * arg, FILE * fout) {
  15231. const int64_t * ne = tensor->ne;
  15232. const size_t * nb = tensor->nb;
  15233. fprintf(fout, "%-6s %-6s %-12s %8d %" PRId64 " %" PRId64 " %" PRId64 " %" PRId64 " %16zu %16zu %16zu %16zu %16p %32s\n",
  15234. arg,
  15235. ggml_type_name(tensor->type),
  15236. ggml_op_name (tensor->op),
  15237. tensor->n_dims,
  15238. ne[0], ne[1], ne[2], ne[3],
  15239. nb[0], nb[1], nb[2], nb[3],
  15240. tensor->data,
  15241. tensor->name);
  15242. }
  15243. void ggml_graph_export(const struct ggml_cgraph * cgraph, const char * fname) {
  15244. uint64_t size_eval = 0;
  15245. // compute size of intermediate results
  15246. // TODO: does not take into account scratch buffers !!!!
  15247. for (int i = 0; i < cgraph->n_nodes; ++i) {
  15248. size_eval += ggml_nbytes_pad(cgraph->nodes[i]);
  15249. }
  15250. // print
  15251. {
  15252. FILE * fout = stdout;
  15253. fprintf(fout, "\n");
  15254. fprintf(fout, "%-16s %8x\n", "magic", GGML_FILE_MAGIC);
  15255. fprintf(fout, "%-16s %8d\n", "version", GGML_FILE_VERSION);
  15256. fprintf(fout, "%-16s %8d\n", "leafs", cgraph->n_leafs);
  15257. fprintf(fout, "%-16s %8d\n", "nodes", cgraph->n_nodes);
  15258. fprintf(fout, "%-16s %" PRIu64 "\n", "eval", size_eval);
  15259. // header
  15260. fprintf(fout, "\n");
  15261. fprintf(fout, "%-6s %-12s %8s %8s %8s %8s %8s %16s %16s %16s %16s %16s %16s\n",
  15262. "TYPE", "OP", "NDIMS", "NE0", "NE1", "NE2", "NE3", "NB0", "NB1", "NB2", "NB3", "DATA", "NAME");
  15263. for (int i = 0; i < cgraph->n_leafs; ++i) {
  15264. ggml_graph_export_leaf(cgraph->leafs[i], fout);
  15265. GGML_ASSERT(cgraph->leafs[i]->op == GGML_OP_NONE);
  15266. GGML_ASSERT(cgraph->leafs[i]->src[0] == NULL);
  15267. GGML_ASSERT(cgraph->leafs[i]->src[1] == NULL);
  15268. }
  15269. // header
  15270. fprintf(fout, "\n");
  15271. fprintf(fout, "%-6s %-6s %-12s %8s %8s %8s %8s %8s %16s %16s %16s %16s %8s %16s %16s\n",
  15272. "ARG", "TYPE", "OP", "NDIMS", "NE0", "NE1", "NE2", "NE3", "NB0", "NB1", "NB2", "NB3", "NTASKS", "DATA", "NAME");
  15273. for (int i = 0; i < cgraph->n_nodes; ++i) {
  15274. ggml_graph_export_node(cgraph->nodes[i], "DST", fout);
  15275. for (int j = 0; j < GGML_MAX_SRC; ++j) {
  15276. if (cgraph->nodes[i]->src[j]) {
  15277. ggml_graph_export_node(cgraph->nodes[i]->src[j], "SRC", fout);
  15278. }
  15279. }
  15280. fprintf(fout, "\n");
  15281. }
  15282. fprintf(fout, "\n");
  15283. }
  15284. // write binary data
  15285. {
  15286. FILE * fout = fopen(fname, "wb");
  15287. if (!fout) {
  15288. fprintf(stderr, "%s: failed to open %s\n", __func__, fname);
  15289. return;
  15290. }
  15291. // header
  15292. {
  15293. const uint32_t magic = GGML_FILE_MAGIC;
  15294. const uint32_t version = GGML_FILE_VERSION;
  15295. const uint32_t n_leafs = cgraph->n_leafs;
  15296. const uint32_t nodes = cgraph->n_nodes;
  15297. fwrite(&magic, sizeof(uint32_t), 1, fout);
  15298. fwrite(&version, sizeof(uint32_t), 1, fout);
  15299. fwrite(&n_leafs, sizeof(uint32_t), 1, fout);
  15300. fwrite(&nodes, sizeof(uint32_t), 1, fout);
  15301. fwrite(&size_eval, sizeof(uint64_t), 1, fout);
  15302. }
  15303. // leafs
  15304. {
  15305. for (int i = 0; i < cgraph->n_leafs; ++i) {
  15306. const struct ggml_tensor * tensor = cgraph->leafs[i];
  15307. const uint32_t type = tensor->type;
  15308. const uint32_t op = tensor->op;
  15309. const uint32_t n_dims = tensor->n_dims;
  15310. fwrite(&type, sizeof(uint32_t), 1, fout);
  15311. fwrite(&op, sizeof(uint32_t), 1, fout);
  15312. fwrite(&n_dims, sizeof(uint32_t), 1, fout);
  15313. for (int j = 0; j < GGML_MAX_DIMS; ++j) {
  15314. const uint64_t ne = tensor->ne[j];
  15315. const uint64_t nb = tensor->nb[j];
  15316. fwrite(&ne, sizeof(uint64_t), 1, fout);
  15317. fwrite(&nb, sizeof(uint64_t), 1, fout);
  15318. }
  15319. fwrite(tensor->name, sizeof(char), GGML_MAX_NAME, fout);
  15320. fwrite(tensor->op_params, sizeof(char), GGML_MAX_OP_PARAMS, fout);
  15321. // dump the data
  15322. // TODO: pad this to 32 byte boundary
  15323. {
  15324. const size_t size = ggml_nbytes(tensor);
  15325. fwrite(tensor->data, sizeof(char), size, fout);
  15326. }
  15327. }
  15328. }
  15329. // nodes
  15330. {
  15331. for (int i = 0; i < cgraph->n_nodes; ++i) {
  15332. const struct ggml_tensor * tensor = cgraph->nodes[i];
  15333. const uint32_t type = tensor->type;
  15334. const uint32_t op = tensor->op;
  15335. const uint32_t n_dims = tensor->n_dims;
  15336. fwrite(&type, sizeof(uint32_t), 1, fout);
  15337. fwrite(&op, sizeof(uint32_t), 1, fout);
  15338. fwrite(&n_dims, sizeof(uint32_t), 1, fout);
  15339. for (int j = 0; j < GGML_MAX_DIMS; ++j) {
  15340. const uint64_t ne = tensor->ne[j];
  15341. const uint64_t nb = tensor->nb[j];
  15342. fwrite(&ne, sizeof(uint64_t), 1, fout);
  15343. fwrite(&nb, sizeof(uint64_t), 1, fout);
  15344. }
  15345. fwrite(tensor->name, sizeof(char), GGML_MAX_NAME, fout);
  15346. fwrite(tensor->op_params, sizeof(char), GGML_MAX_OP_PARAMS, fout);
  15347. // output the op arguments
  15348. {
  15349. struct ggml_tensor * args[GGML_MAX_SRC] = { NULL };
  15350. for (int j = 0; j < GGML_MAX_SRC; ++j) {
  15351. args[j] = tensor->src[j];
  15352. }
  15353. for (int j = 0; j < GGML_MAX_SRC; ++j) {
  15354. if (args[j]) {
  15355. int32_t idx = -1;
  15356. // check if leaf
  15357. {
  15358. for (int k = 0; k < cgraph->n_leafs; ++k) {
  15359. if (args[j] == cgraph->leafs[k]) {
  15360. idx = k;
  15361. break;
  15362. }
  15363. }
  15364. }
  15365. // check if node
  15366. if (idx == -1) {
  15367. for (int k = 0; k < cgraph->n_nodes; ++k) {
  15368. if (args[j] == cgraph->nodes[k]) {
  15369. idx = GGML_MAX_NODES + k;
  15370. break;
  15371. }
  15372. }
  15373. }
  15374. if (idx == -1) {
  15375. fprintf(stderr, "%s: failed to find tensor, arg = %d, node = %d\n", __func__, j, i);
  15376. return;
  15377. }
  15378. fwrite(&idx, sizeof(int32_t), 1, fout);
  15379. } else {
  15380. const int32_t nul = -1;
  15381. fwrite(&nul, sizeof(int32_t), 1, fout);
  15382. }
  15383. }
  15384. }
  15385. }
  15386. }
  15387. fclose(fout);
  15388. }
  15389. }
  15390. struct ggml_cgraph ggml_graph_import(const char * fname, struct ggml_context ** ctx_data, struct ggml_context ** ctx_eval) {
  15391. assert(*ctx_data == NULL);
  15392. assert(*ctx_eval == NULL);
  15393. struct ggml_cgraph result = { 0 };
  15394. struct ggml_tensor * data = NULL;
  15395. // read file into data
  15396. {
  15397. FILE * fin = fopen(fname, "rb");
  15398. if (!fin) {
  15399. fprintf(stderr, "%s: failed to open %s\n", __func__, fname);
  15400. return result;
  15401. }
  15402. size_t fsize = 0;
  15403. fseek(fin, 0, SEEK_END);
  15404. fsize = ftell(fin);
  15405. fseek(fin, 0, SEEK_SET);
  15406. // create the data context
  15407. {
  15408. const size_t overhead = 1*ggml_tensor_overhead();
  15409. struct ggml_init_params params = {
  15410. .mem_size = fsize + overhead,
  15411. .mem_buffer = NULL,
  15412. .no_alloc = false,
  15413. };
  15414. *ctx_data = ggml_init(params);
  15415. if (!*ctx_data) {
  15416. fprintf(stderr, "%s: failed to create ggml context\n", __func__);
  15417. fclose(fin);
  15418. return result;
  15419. }
  15420. }
  15421. data = ggml_new_tensor_1d(*ctx_data, GGML_TYPE_I8, fsize);
  15422. {
  15423. const size_t ret = fread(data->data, sizeof(char), fsize, fin);
  15424. if (ret != fsize) {
  15425. fprintf(stderr, "%s: failed to read %s\n", __func__, fname);
  15426. fclose(fin);
  15427. return result;
  15428. }
  15429. }
  15430. fclose(fin);
  15431. }
  15432. // populate result
  15433. {
  15434. char * ptr = (char *) data->data;
  15435. const uint32_t magic = *(const uint32_t *) ptr; ptr += sizeof(magic);
  15436. if (magic != GGML_FILE_MAGIC) {
  15437. fprintf(stderr, "%s: invalid magic number, got %08x\n", __func__, magic);
  15438. return result;
  15439. }
  15440. const uint32_t version = *(const uint32_t *) ptr; ptr += sizeof(version);
  15441. if (version != GGML_FILE_VERSION) {
  15442. fprintf(stderr, "%s: invalid version number\n", __func__);
  15443. return result;
  15444. }
  15445. const uint32_t n_leafs = *(const uint32_t *) ptr; ptr += sizeof(n_leafs);
  15446. const uint32_t n_nodes = *(const uint32_t *) ptr; ptr += sizeof(n_nodes);
  15447. const uint64_t size_eval = *(const uint64_t *) ptr; ptr += sizeof(size_eval);
  15448. result.n_leafs = n_leafs;
  15449. result.n_nodes = n_nodes;
  15450. // create the data context
  15451. {
  15452. const size_t overhead = (n_leafs + n_nodes)*ggml_tensor_overhead();
  15453. struct ggml_init_params params = {
  15454. .mem_size = size_eval + overhead,
  15455. .mem_buffer = NULL,
  15456. .no_alloc = true,
  15457. };
  15458. *ctx_eval = ggml_init(params);
  15459. if (!*ctx_eval) {
  15460. fprintf(stderr, "%s: failed to create ggml context\n", __func__);
  15461. return result;
  15462. }
  15463. }
  15464. // leafs
  15465. {
  15466. uint32_t type;
  15467. uint32_t op;
  15468. uint32_t n_dims;
  15469. for (uint32_t i = 0; i < n_leafs; ++i) {
  15470. type = *(const uint32_t *) ptr; ptr += sizeof(type);
  15471. op = *(const uint32_t *) ptr; ptr += sizeof(op);
  15472. n_dims = *(const uint32_t *) ptr; ptr += sizeof(n_dims);
  15473. int64_t ne[GGML_MAX_DIMS];
  15474. size_t nb[GGML_MAX_DIMS];
  15475. for (int j = 0; j < GGML_MAX_DIMS; ++j) {
  15476. uint64_t ne_cur;
  15477. uint64_t nb_cur;
  15478. ne_cur = *(const uint64_t *) ptr; ptr += sizeof(ne_cur);
  15479. nb_cur = *(const uint64_t *) ptr; ptr += sizeof(nb_cur);
  15480. ne[j] = ne_cur;
  15481. nb[j] = nb_cur;
  15482. }
  15483. struct ggml_tensor * tensor = ggml_new_tensor(*ctx_eval, (enum ggml_type) type, n_dims, ne);
  15484. tensor->op = (enum ggml_op) op;
  15485. memcpy(tensor->name, ptr, GGML_MAX_NAME); ptr += GGML_MAX_NAME;
  15486. memcpy(tensor->op_params, ptr, GGML_MAX_OP_PARAMS); ptr += GGML_MAX_OP_PARAMS;
  15487. tensor->data = (void *) ptr;
  15488. for (int j = 0; j < GGML_MAX_DIMS; ++j) {
  15489. tensor->nb[j] = nb[j];
  15490. }
  15491. result.leafs[i] = tensor;
  15492. ptr += ggml_nbytes(tensor);
  15493. fprintf(stderr, "%s: loaded leaf %d: '%16s', %3d dims, %9zu bytes\n", __func__, i, tensor->name, n_dims, ggml_nbytes(tensor));
  15494. }
  15495. }
  15496. ggml_set_no_alloc(*ctx_eval, false);
  15497. // nodes
  15498. {
  15499. uint32_t type;
  15500. uint32_t op;
  15501. uint32_t n_dims;
  15502. for (uint32_t i = 0; i < n_nodes; ++i) {
  15503. type = *(const uint32_t *) ptr; ptr += sizeof(type);
  15504. op = *(const uint32_t *) ptr; ptr += sizeof(op);
  15505. n_dims = *(const uint32_t *) ptr; ptr += sizeof(n_dims);
  15506. enum ggml_op eop = (enum ggml_op) op;
  15507. int64_t ne[GGML_MAX_DIMS];
  15508. size_t nb[GGML_MAX_DIMS];
  15509. for (int j = 0; j < GGML_MAX_DIMS; ++j) {
  15510. uint64_t ne_cur;
  15511. uint64_t nb_cur;
  15512. ne_cur = *(const uint64_t *) ptr; ptr += sizeof(ne_cur);
  15513. nb_cur = *(const uint64_t *) ptr; ptr += sizeof(nb_cur);
  15514. ne[j] = ne_cur;
  15515. nb[j] = nb_cur;
  15516. }
  15517. const char * ptr_name = ptr; ptr += GGML_MAX_NAME;
  15518. const char * ptr_op_params = ptr; ptr += GGML_MAX_OP_PARAMS;
  15519. const int32_t * ptr_arg_idx = (const int32_t *) ptr; ptr += GGML_MAX_SRC*sizeof(int32_t);
  15520. struct ggml_tensor * args[GGML_MAX_SRC] = { NULL };
  15521. // parse args
  15522. for (int j = 0; j < GGML_MAX_SRC; ++j) {
  15523. const int32_t arg_idx = ptr_arg_idx[j];
  15524. if (arg_idx == -1) {
  15525. continue;
  15526. }
  15527. if (arg_idx < GGML_MAX_NODES) {
  15528. args[j] = result.leafs[arg_idx];
  15529. } else {
  15530. args[j] = result.nodes[arg_idx - GGML_MAX_NODES];
  15531. }
  15532. }
  15533. // create the tensor
  15534. // "view" operations are handled differently
  15535. // TODO: handle inplace ops - currently a copy is always made
  15536. struct ggml_tensor * tensor = NULL;
  15537. switch (eop) {
  15538. // TODO: implement other view ops
  15539. case GGML_OP_RESHAPE:
  15540. {
  15541. tensor = ggml_reshape_4d(*ctx_eval, args[0], ne[0], ne[1], ne[2], ne[3]);
  15542. } break;
  15543. case GGML_OP_VIEW:
  15544. {
  15545. tensor = ggml_view_4d(*ctx_eval, args[0], ne[0], ne[1], ne[2], ne[3], 0, 0, 0, 0);
  15546. size_t offs;
  15547. memcpy(&offs, ptr_op_params, sizeof(offs));
  15548. tensor->data = ((char *) tensor->data) + offs;
  15549. } break;
  15550. case GGML_OP_TRANSPOSE:
  15551. {
  15552. tensor = ggml_transpose(*ctx_eval, args[0]);
  15553. } break;
  15554. case GGML_OP_PERMUTE:
  15555. {
  15556. tensor = ggml_view_4d(*ctx_eval, args[0], ne[0], ne[1], ne[2], ne[3], 0, 0, 0, 0);
  15557. } break;
  15558. default:
  15559. {
  15560. tensor = ggml_new_tensor(*ctx_eval, (enum ggml_type) type, n_dims, ne);
  15561. tensor->op = eop;
  15562. } break;
  15563. }
  15564. memcpy(tensor->name, ptr_name, GGML_MAX_NAME);
  15565. memcpy(tensor->op_params, ptr_op_params, GGML_MAX_OP_PARAMS);
  15566. for (int j = 0; j < GGML_MAX_DIMS; ++j) {
  15567. tensor->nb[j] = nb[j];
  15568. }
  15569. for (int j = 0; j < GGML_MAX_SRC; ++j) {
  15570. tensor->src[j] = args[j];
  15571. }
  15572. result.nodes[i] = tensor;
  15573. fprintf(stderr, "%s: loaded node %d: '%16s', %3d dims, %9zu bytes\n", __func__, i, tensor->name, n_dims, ggml_nbytes(tensor));
  15574. }
  15575. }
  15576. }
  15577. return result;
  15578. }
  15579. void ggml_graph_print(const struct ggml_cgraph * cgraph) {
  15580. int64_t perf_total_per_op_us[GGML_OP_COUNT] = {0};
  15581. GGML_PRINT("=== GRAPH ===\n");
  15582. GGML_PRINT("n_nodes = %d\n", cgraph->n_nodes);
  15583. for (int i = 0; i < cgraph->n_nodes; i++) {
  15584. struct ggml_tensor * node = cgraph->nodes[i];
  15585. perf_total_per_op_us[node->op] += MAX(1, node->perf_time_us);
  15586. GGML_PRINT(" - %3d: [ %5" PRId64 ", %5" PRId64 ", %5" PRId64 "] %16s %s (%3d) cpu = %7.3f / %7.3f ms, wall = %7.3f / %7.3f ms\n",
  15587. i,
  15588. node->ne[0], node->ne[1], node->ne[2],
  15589. ggml_op_name(node->op), node->is_param ? "x" : node->grad ? "g" : " ", node->perf_runs,
  15590. (double) node->perf_cycles / (double) ggml_cycles_per_ms(),
  15591. (double) node->perf_cycles / (double) ggml_cycles_per_ms() / (double) node->perf_runs,
  15592. (double) node->perf_time_us / 1000.0,
  15593. (double) node->perf_time_us / 1000.0 / node->perf_runs);
  15594. }
  15595. GGML_PRINT("n_leafs = %d\n", cgraph->n_leafs);
  15596. for (int i = 0; i < cgraph->n_leafs; i++) {
  15597. struct ggml_tensor * node = cgraph->leafs[i];
  15598. GGML_PRINT(" - %3d: [ %5" PRId64 ", %5" PRId64 "] %8s %16s\n",
  15599. i,
  15600. node->ne[0], node->ne[1],
  15601. ggml_op_name(node->op),
  15602. ggml_get_name(node));
  15603. }
  15604. for (int i = 0; i < GGML_OP_COUNT; i++) {
  15605. if (perf_total_per_op_us[i] == 0) {
  15606. continue;
  15607. }
  15608. GGML_PRINT("perf_total_per_op_us[%16s] = %7.3f ms\n", ggml_op_name(i), (double) perf_total_per_op_us[i] / 1000.0);
  15609. }
  15610. GGML_PRINT("========================================\n");
  15611. }
  15612. // check if node is part of the graph
  15613. static bool ggml_graph_find(const struct ggml_cgraph * cgraph, const struct ggml_tensor * node) {
  15614. if (cgraph == NULL) {
  15615. return true;
  15616. }
  15617. for (int i = 0; i < cgraph->n_nodes; i++) {
  15618. if (cgraph->nodes[i] == node) {
  15619. return true;
  15620. }
  15621. }
  15622. return false;
  15623. }
  15624. static struct ggml_tensor * ggml_graph_get_parent(const struct ggml_cgraph * cgraph, const struct ggml_tensor * node) {
  15625. for (int i = 0; i < cgraph->n_nodes; i++) {
  15626. struct ggml_tensor * parent = cgraph->nodes[i];
  15627. if (parent->grad == node) {
  15628. return parent;
  15629. }
  15630. }
  15631. return NULL;
  15632. }
  15633. static void ggml_graph_dump_dot_node_edge(FILE * fp, const struct ggml_cgraph * gb, struct ggml_tensor * node, struct ggml_tensor * parent, const char * label) {
  15634. struct ggml_tensor * gparent = ggml_graph_get_parent(gb, node);
  15635. struct ggml_tensor * gparent0 = ggml_graph_get_parent(gb, parent);
  15636. fprintf(fp, " \"%p\":%s -> \"%p\":%s [ arrowhead = %s; style = %s; label = \"%s\"; ]\n",
  15637. gparent0 ? (void *) gparent0 : (void *) parent,
  15638. gparent0 ? "g" : "x",
  15639. gparent ? (void *) gparent : (void *) node,
  15640. gparent ? "g" : "x",
  15641. gparent ? "empty" : "vee",
  15642. gparent ? "dashed" : "solid",
  15643. label);
  15644. }
  15645. static void ggml_graph_dump_dot_leaf_edge(FILE * fp, struct ggml_tensor * node, struct ggml_tensor * parent, const char * label) {
  15646. fprintf(fp, " \"%p\":%s -> \"%p\":%s [ label = \"%s\"; ]\n",
  15647. (void *) parent, "x",
  15648. (void *) node, "x",
  15649. label);
  15650. }
  15651. void ggml_graph_dump_dot(const struct ggml_cgraph * gb, const struct ggml_cgraph * gf, const char * filename) {
  15652. char color[16];
  15653. FILE * fp = fopen(filename, "w");
  15654. GGML_ASSERT(fp);
  15655. fprintf(fp, "digraph G {\n");
  15656. fprintf(fp, " newrank = true;\n");
  15657. fprintf(fp, " rankdir = LR;\n");
  15658. for (int i = 0; i < gb->n_nodes; i++) {
  15659. struct ggml_tensor * node = gb->nodes[i];
  15660. if (ggml_graph_get_parent(gb, node) != NULL) {
  15661. continue;
  15662. }
  15663. if (node->is_param) {
  15664. snprintf(color, sizeof(color), "yellow");
  15665. } else if (node->grad) {
  15666. if (ggml_graph_find(gf, node)) {
  15667. snprintf(color, sizeof(color), "green");
  15668. } else {
  15669. snprintf(color, sizeof(color), "lightblue");
  15670. }
  15671. } else {
  15672. snprintf(color, sizeof(color), "white");
  15673. }
  15674. fprintf(fp, " \"%p\" [ "
  15675. "style = filled; fillcolor = %s; shape = record; "
  15676. "label=\"",
  15677. (void *) node, color);
  15678. if (strlen(node->name) > 0) {
  15679. fprintf(fp, "%s (%s)|", node->name, ggml_type_name(node->type));
  15680. } else {
  15681. fprintf(fp, "(%s)|", ggml_type_name(node->type));
  15682. }
  15683. if (node->n_dims == 2) {
  15684. fprintf(fp, "%d [%" PRId64 ", %" PRId64 "] | <x>%s", i, node->ne[0], node->ne[1], ggml_op_symbol(node->op));
  15685. } else {
  15686. fprintf(fp, "%d [%" PRId64 ", %" PRId64 ", %" PRId64 "] | <x>%s", i, node->ne[0], node->ne[1], node->ne[2], ggml_op_symbol(node->op));
  15687. }
  15688. if (node->grad) {
  15689. fprintf(fp, " | <g>%s\"; ]\n", ggml_op_symbol(node->grad->op));
  15690. } else {
  15691. fprintf(fp, "\"; ]\n");
  15692. }
  15693. }
  15694. for (int i = 0; i < gb->n_leafs; i++) {
  15695. struct ggml_tensor * node = gb->leafs[i];
  15696. snprintf(color, sizeof(color), "pink");
  15697. fprintf(fp, " \"%p\" [ "
  15698. "style = filled; fillcolor = %s; shape = record; "
  15699. "label=\"<x>",
  15700. (void *) node, color);
  15701. if (strlen(node->name) > 0) {
  15702. fprintf(fp, "%s (%s)|", node->name, ggml_type_name(node->type));
  15703. } else {
  15704. fprintf(fp, "(%s)|", ggml_type_name(node->type));
  15705. }
  15706. fprintf(fp, "CONST %d [%" PRId64 ", %" PRId64 "]", i, node->ne[0], node->ne[1]);
  15707. if (ggml_nelements(node) < 5) {
  15708. fprintf(fp, " | (");
  15709. for (int j = 0; j < ggml_nelements(node); j++) {
  15710. if (node->type == GGML_TYPE_I8 || node->type == GGML_TYPE_I16 || node->type == GGML_TYPE_I32) {
  15711. fprintf(fp, "%d", ggml_get_i32_1d(node, j));
  15712. }
  15713. else if (node->type == GGML_TYPE_F32 || node->type == GGML_TYPE_F16) {
  15714. fprintf(fp, "%.1e", (double)ggml_get_f32_1d(node, j));
  15715. }
  15716. else {
  15717. fprintf(fp, "#");
  15718. }
  15719. if (j < ggml_nelements(node) - 1) {
  15720. fprintf(fp, ", ");
  15721. }
  15722. }
  15723. fprintf(fp, ")");
  15724. }
  15725. fprintf(fp, "\"; ]\n");
  15726. }
  15727. for (int i = 0; i < gb->n_nodes; i++) {
  15728. struct ggml_tensor * node = gb->nodes[i];
  15729. for (int j = 0; j < GGML_MAX_SRC; j++) {
  15730. if (node->src[j]) {
  15731. char label[16];
  15732. snprintf(label, sizeof(label), "src %d", j);
  15733. ggml_graph_dump_dot_node_edge(fp, gb, node, node->src[j], label);
  15734. }
  15735. }
  15736. }
  15737. for (int i = 0; i < gb->n_leafs; i++) {
  15738. struct ggml_tensor * node = gb->leafs[i];
  15739. for (int j = 0; j < GGML_MAX_SRC; j++) {
  15740. if (node->src[j]) {
  15741. char label[16];
  15742. snprintf(label, sizeof(label), "src %d", j);
  15743. ggml_graph_dump_dot_leaf_edge(fp, node, node->src[j], label);
  15744. }
  15745. }
  15746. }
  15747. fprintf(fp, "}\n");
  15748. fclose(fp);
  15749. GGML_PRINT("%s: dot -Tpng %s -o %s.png && open %s.png\n", __func__, filename, filename, filename);
  15750. }
  15751. ////////////////////////////////////////////////////////////////////////////////
  15752. static void ggml_opt_set_params(int np, struct ggml_tensor * const ps[], const float * x) {
  15753. int i = 0;
  15754. for (int p = 0; p < np; ++p) {
  15755. const int64_t ne = ggml_nelements(ps[p]) ;
  15756. // TODO: add function to set tensor from array
  15757. for (int64_t j = 0; j < ne; ++j) {
  15758. ggml_set_f32_1d(ps[p], j, x[i++]);
  15759. }
  15760. }
  15761. }
  15762. static void ggml_opt_get_params(int np, struct ggml_tensor * const ps[], float * x) {
  15763. int i = 0;
  15764. for (int p = 0; p < np; ++p) {
  15765. const int64_t ne = ggml_nelements(ps[p]) ;
  15766. // TODO: add function to get all elements at once
  15767. for (int64_t j = 0; j < ne; ++j) {
  15768. x[i++] = ggml_get_f32_1d(ps[p], j);
  15769. }
  15770. }
  15771. }
  15772. static void ggml_opt_get_grad(int np, struct ggml_tensor * const ps[], float * g) {
  15773. int64_t i = 0;
  15774. for (int p = 0; p < np; ++p) {
  15775. const int64_t ne = ggml_nelements(ps[p]) ;
  15776. // TODO: add function to get all elements at once
  15777. for (int64_t j = 0; j < ne; ++j) {
  15778. g[i++] = ggml_get_f32_1d(ps[p]->grad, j);
  15779. }
  15780. }
  15781. }
  15782. static void ggml_opt_acc_grad(int np, struct ggml_tensor * const ps[], float * g, float scale) {
  15783. int64_t i = 0;
  15784. for (int p = 0; p < np; ++p) {
  15785. const int64_t ne = ggml_nelements(ps[p]) ;
  15786. // TODO: add function to get all elements at once
  15787. for (int64_t j = 0; j < ne; ++j) {
  15788. g[i++] += ggml_get_f32_1d(ps[p]->grad, j) * scale;
  15789. }
  15790. }
  15791. }
  15792. //
  15793. // ADAM
  15794. //
  15795. // ref: https://arxiv.org/pdf/1412.6980.pdf
  15796. //
  15797. static enum ggml_opt_result ggml_opt_adam(
  15798. struct ggml_context * ctx,
  15799. struct ggml_opt_context * opt,
  15800. struct ggml_opt_params params,
  15801. struct ggml_tensor * f,
  15802. struct ggml_cgraph * gf,
  15803. struct ggml_cgraph * gb,
  15804. ggml_opt_callback callback,
  15805. void * callback_data) {
  15806. GGML_ASSERT(ggml_is_scalar(f));
  15807. // these will store the parameters we want to optimize
  15808. struct ggml_tensor * ps[GGML_MAX_PARAMS];
  15809. int np = 0;
  15810. int64_t nx = 0;
  15811. for (int i = 0; i < gf->n_nodes; ++i) {
  15812. if (gf->nodes[i]->is_param) {
  15813. GGML_PRINT_DEBUG("found param %d: grad->op = %d\n", np, gf->nodes[i]->grad->op);
  15814. GGML_ASSERT(np < GGML_MAX_PARAMS);
  15815. ps[np++] = gf->nodes[i];
  15816. nx += ggml_nelements(gf->nodes[i]);
  15817. }
  15818. }
  15819. if ((opt->params.type != params.type) || (opt->nx != nx) || (opt->params.past != params.past)) {
  15820. int iter = opt->iter;
  15821. ggml_opt_init(opt->ctx, opt, params, nx);
  15822. opt->iter = iter;
  15823. }
  15824. // constants
  15825. float sched = params.adam.sched;
  15826. const float alpha = params.adam.alpha;
  15827. const float decay = params.adam.decay * alpha;
  15828. const float beta1 = params.adam.beta1;
  15829. const float beta2 = params.adam.beta2;
  15830. const float eps = params.adam.eps;
  15831. const float gclip = params.adam.gclip;
  15832. const int decay_min_ndim = params.adam.decay_min_ndim;
  15833. const int n_accum = MAX(1, params.n_gradient_accumulation);
  15834. const float accum_norm = 1.0f / (float) n_accum;
  15835. float * g = opt->adam.g->data; // gradients
  15836. float * m = opt->adam.m->data; // first moment
  15837. float * v = opt->adam.v->data; // second moment
  15838. float * pf = params.past > 0 ? opt->adam.pf->data : NULL; // past function values
  15839. struct ggml_cplan cplan = ggml_graph_plan(gb, params.n_threads);
  15840. struct ggml_object * obj = ggml_new_object(ctx, GGML_OBJECT_WORK_BUFFER, cplan.work_size);
  15841. cplan.work_data = (uint8_t *)ctx->mem_buffer + obj->offs;
  15842. bool cancel = false;
  15843. // compute the function value
  15844. float fx = 0;
  15845. ggml_set_zero(opt->adam.g);
  15846. for (int accum_step = 0; accum_step < n_accum; ++accum_step) {
  15847. if (callback) {
  15848. callback(callback_data, accum_step, &sched, &cancel);
  15849. if (cancel) {
  15850. break;
  15851. }
  15852. }
  15853. // ggml_graph_reset (gf);
  15854. ggml_set_f32 (f->grad, 1.0f);
  15855. ggml_graph_compute(gb, &cplan);
  15856. ggml_opt_acc_grad(np, ps, g, accum_norm);
  15857. fx += ggml_get_f32_1d(f, 0);
  15858. }
  15859. if (cancel) {
  15860. return GGML_OPT_DID_NOT_CONVERGE;
  15861. }
  15862. fx *= accum_norm;
  15863. opt->adam.fx_prev = fx;
  15864. opt->adam.fx_best = opt->adam.fx_prev;
  15865. if (pf) {
  15866. pf[opt->iter % params.past] = opt->adam.fx_prev;
  15867. }
  15868. opt->loss_before = opt->adam.fx_prev;
  15869. opt->loss_after = opt->adam.fx_prev;
  15870. // initialize
  15871. if (opt->just_initialized) {
  15872. opt->adam.n_no_improvement = 0;
  15873. opt->just_initialized = false;
  15874. }
  15875. float * fx_best = &opt->adam.fx_best;
  15876. float * fx_prev = &opt->adam.fx_prev;
  15877. int * n_no_improvement = &opt->adam.n_no_improvement;
  15878. int iter0 = opt->iter;
  15879. // run the optimizer
  15880. for (int t = 0; t < params.adam.n_iter; ++t) {
  15881. if (cancel) {
  15882. break;
  15883. }
  15884. opt->iter = iter0 + t + 1;
  15885. GGML_PRINT_DEBUG ("=== iter %d ===\n", t);
  15886. GGML_PRINT_DEBUG ("f = %10.6f\n", ggml_get_f32_1d(f, 0));
  15887. GGML_PRINT_DEBUG_5("df/dx0 = %10.6f\n", ggml_get_f32_1d(ps[0]->grad, 0));
  15888. GGML_PRINT_DEBUG_5("df/dx1 = %10.6f\n", ggml_get_f32_1d(ps[1]->grad, 0));
  15889. for (int i = 0; i < np; ++i) {
  15890. GGML_PRINT_DEBUG("param %d: %10.6f, g = %10.6f\n", i,
  15891. ggml_get_f32_1d(ps[i], 0), ggml_get_f32_1d(ps[i]->grad, 0));
  15892. }
  15893. const int64_t t_start_wall = ggml_time_us();
  15894. const int64_t t_start_cpu = ggml_cycles();
  15895. UNUSED(t_start_wall);
  15896. UNUSED(t_start_cpu);
  15897. {
  15898. float gnorm = 1.0f;
  15899. if (gclip > 0.0f) {
  15900. // gradient clipping
  15901. ggml_float sum = 0.0;
  15902. for (int64_t i = 0; i < nx; ++i) {
  15903. sum += (ggml_float)(g[i]*g[i]);
  15904. }
  15905. ggml_float norm = sqrt(sum);
  15906. if (norm > (ggml_float) gclip) {
  15907. gnorm = (float) ((ggml_float) gclip / norm);
  15908. }
  15909. }
  15910. const float beta1h = alpha*sched/(1.0f - powf(beta1, opt->iter));
  15911. const float beta2h = 1.0f/(1.0f - powf(beta2, opt->iter));
  15912. int64_t i = 0;
  15913. for (int p = 0; p < np; ++p) {
  15914. const int64_t ne = ggml_nelements(ps[p]);
  15915. const float p_decay = ((ps[p]->n_dims >= decay_min_ndim) ? decay : 0.0f) * sched;
  15916. for (int64_t j = 0; j < ne; ++j) {
  15917. float x = ggml_get_f32_1d(ps[p], j);
  15918. float g_ = g[i]*gnorm;
  15919. m[i] = m[i]*beta1 + g_*(1.0f - beta1);
  15920. v[i] = v[i]*beta2 + g_*g_*(1.0f - beta2);
  15921. float mh = m[i]*beta1h;
  15922. float vh = v[i]*beta2h;
  15923. vh = sqrtf(vh) + eps;
  15924. x = x*(1.0f - p_decay) - mh/vh;
  15925. ggml_set_f32_1d(ps[p], j, x);
  15926. ++i;
  15927. }
  15928. }
  15929. }
  15930. fx = 0;
  15931. ggml_set_zero(opt->adam.g);
  15932. for (int accum_step = 0; accum_step < n_accum; ++accum_step) {
  15933. if (callback) {
  15934. callback(callback_data, accum_step, &sched, &cancel);
  15935. if (cancel) {
  15936. break;
  15937. }
  15938. }
  15939. // ggml_graph_reset (gf);
  15940. ggml_set_f32 (f->grad, 1.0f);
  15941. ggml_graph_compute(gb, &cplan);
  15942. ggml_opt_acc_grad(np, ps, g, accum_norm);
  15943. fx += ggml_get_f32_1d(f, 0);
  15944. }
  15945. if (cancel) {
  15946. break;
  15947. }
  15948. fx *= accum_norm;
  15949. opt->loss_after = fx;
  15950. // check convergence
  15951. if (fabsf(fx - fx_prev[0])/fx < params.adam.eps_f) {
  15952. GGML_PRINT_DEBUG("converged\n");
  15953. return GGML_OPT_OK;
  15954. }
  15955. // delta-based convergence test
  15956. if (pf != NULL) {
  15957. // need at least params.past iterations to start checking for convergence
  15958. if (params.past <= iter0 + t) {
  15959. const float rate = (pf[(iter0 + t)%params.past] - fx)/fx;
  15960. if (fabsf(rate) < params.delta) {
  15961. return GGML_OPT_OK;
  15962. }
  15963. }
  15964. pf[(iter0 + t)%params.past] = fx;
  15965. }
  15966. // check for improvement
  15967. if (params.max_no_improvement > 0) {
  15968. if (fx_best[0] > fx) {
  15969. fx_best[0] = fx;
  15970. n_no_improvement[0] = 0;
  15971. } else {
  15972. ++n_no_improvement[0];
  15973. if (n_no_improvement[0] >= params.max_no_improvement) {
  15974. return GGML_OPT_OK;
  15975. }
  15976. }
  15977. }
  15978. fx_prev[0] = fx;
  15979. {
  15980. const int64_t t_end_cpu = ggml_cycles();
  15981. GGML_PRINT_DEBUG("time iter: %5.3f s\n", ((float)(t_end_cpu - t_start_cpu))/CLOCKS_PER_SEC);
  15982. UNUSED(t_end_cpu);
  15983. const int64_t t_end_wall = ggml_time_us();
  15984. GGML_PRINT_DEBUG("wall time iter: %5.3f s\n", (t_end_wall - t_start_wall)/1e6);
  15985. UNUSED(t_end_wall);
  15986. }
  15987. }
  15988. return GGML_OPT_DID_NOT_CONVERGE;
  15989. }
  15990. //
  15991. // L-BFGS
  15992. //
  15993. // the L-BFGS implementation below is based on the following implementation:
  15994. //
  15995. // https://github.com/chokkan/liblbfgs
  15996. //
  15997. struct ggml_lbfgs_iteration_data {
  15998. float alpha;
  15999. float ys;
  16000. float * s;
  16001. float * y;
  16002. };
  16003. static enum ggml_opt_result linesearch_backtracking(
  16004. const struct ggml_opt_params * params,
  16005. int nx,
  16006. float * x,
  16007. float * fx,
  16008. float * g,
  16009. float * d,
  16010. float * step,
  16011. const float * xp,
  16012. struct ggml_tensor * f,
  16013. struct ggml_cgraph * gb,
  16014. struct ggml_cplan * cplan,
  16015. const int np,
  16016. struct ggml_tensor * ps[],
  16017. bool * cancel,
  16018. ggml_opt_callback callback,
  16019. void * callback_data) {
  16020. int count = 0;
  16021. float width = 0.0f;
  16022. float dg = 0.0f;
  16023. float finit = 0.0f;
  16024. float dginit = 0.0f;
  16025. float dgtest = 0.0f;
  16026. const float dec = 0.5f;
  16027. const float inc = 2.1f;
  16028. const int n_accum = MAX(1, params->n_gradient_accumulation);
  16029. const float accum_norm = 1.0f / (float) n_accum;
  16030. if (*step <= 0.f) {
  16031. return GGML_LINESEARCH_INVALID_PARAMETERS;
  16032. }
  16033. // compute the initial gradient in the search direction
  16034. ggml_vec_dot_f32(nx, &dginit, g, d);
  16035. // make sure that d points to a descent direction
  16036. if (0 < dginit) {
  16037. return GGML_LINESEARCH_FAIL;
  16038. }
  16039. // initialize local variables
  16040. finit = *fx;
  16041. dgtest = params->lbfgs.ftol*dginit;
  16042. while (!*cancel) {
  16043. ggml_vec_cpy_f32(nx, x, xp);
  16044. ggml_vec_mad_f32(nx, x, d, *step);
  16045. // evaluate the function and gradient values
  16046. {
  16047. ggml_opt_set_params(np, ps, x);
  16048. *fx = 0;
  16049. memset(g, 0, sizeof(float)*nx);
  16050. for (int accum_step = 0; accum_step < n_accum; ++accum_step) {
  16051. if (callback) {
  16052. // LBFG-S does not support learning rate -> ignore learning schedule
  16053. float sched = 0;
  16054. callback(callback_data, accum_step, &sched, cancel);
  16055. if (*cancel) {
  16056. break;
  16057. }
  16058. }
  16059. // ggml_graph_reset (gf);
  16060. ggml_set_f32 (f->grad, 1.0f);
  16061. ggml_graph_compute(gb, cplan);
  16062. ggml_opt_acc_grad(np, ps, g, accum_norm);
  16063. *fx += ggml_get_f32_1d(f, 0);
  16064. }
  16065. if (*cancel) {
  16066. break;
  16067. }
  16068. *fx *= accum_norm;
  16069. }
  16070. ++count;
  16071. if (*fx > finit + (*step)*dgtest) {
  16072. width = dec;
  16073. } else {
  16074. // Armijo condition is satisfied
  16075. if (params->lbfgs.linesearch == GGML_LINESEARCH_BACKTRACKING_ARMIJO) {
  16076. return count;
  16077. }
  16078. ggml_vec_dot_f32(nx, &dg, g, d);
  16079. // check the Wolfe condition
  16080. if (dg < params->lbfgs.wolfe * dginit) {
  16081. width = inc;
  16082. } else {
  16083. if(params->lbfgs.linesearch == GGML_LINESEARCH_BACKTRACKING_WOLFE) {
  16084. // regular Wolfe conditions
  16085. return count;
  16086. }
  16087. if(dg > -params->lbfgs.wolfe*dginit) {
  16088. width = dec;
  16089. } else {
  16090. // strong Wolfe condition (GGML_LINESEARCH_BACKTRACKING_STRONG_WOLFE)
  16091. return count;
  16092. }
  16093. }
  16094. }
  16095. if (*step < params->lbfgs.min_step) {
  16096. return GGML_LINESEARCH_MINIMUM_STEP;
  16097. }
  16098. if (*step > params->lbfgs.max_step) {
  16099. return GGML_LINESEARCH_MAXIMUM_STEP;
  16100. }
  16101. if (params->lbfgs.max_linesearch <= count) {
  16102. return GGML_LINESEARCH_MAXIMUM_ITERATIONS;
  16103. }
  16104. (*step) *= width;
  16105. }
  16106. GGML_UNREACHABLE();
  16107. }
  16108. static enum ggml_opt_result ggml_opt_lbfgs(
  16109. struct ggml_context * ctx,
  16110. struct ggml_opt_context * opt,
  16111. struct ggml_opt_params params,
  16112. struct ggml_tensor * f,
  16113. struct ggml_cgraph * gf,
  16114. struct ggml_cgraph * gb,
  16115. ggml_opt_callback callback,
  16116. void * callback_data) {
  16117. if (params.lbfgs.linesearch == GGML_LINESEARCH_BACKTRACKING_WOLFE ||
  16118. params.lbfgs.linesearch == GGML_LINESEARCH_BACKTRACKING_STRONG_WOLFE) {
  16119. if (params.lbfgs.wolfe <= params.lbfgs.ftol || 1.f <= params.lbfgs.wolfe) {
  16120. return GGML_OPT_INVALID_WOLFE;
  16121. }
  16122. }
  16123. const int m = params.lbfgs.m;
  16124. // these will store the parameters we want to optimize
  16125. struct ggml_tensor * ps[GGML_MAX_PARAMS];
  16126. int np = 0;
  16127. int nx = 0;
  16128. for (int i = 0; i < gf->n_nodes; ++i) {
  16129. if (gf->nodes[i]->is_param) {
  16130. GGML_PRINT_DEBUG("found param %d: grad->op = %d\n", np, gf->nodes[i]->grad->op);
  16131. GGML_ASSERT(np < GGML_MAX_PARAMS);
  16132. ps[np++] = gf->nodes[i];
  16133. nx += ggml_nelements(gf->nodes[i]);
  16134. }
  16135. }
  16136. if ((opt->params.type != params.type) || (opt->nx != nx) || (opt->params.past != params.past) || (opt->params.lbfgs.m != params.lbfgs.m)) {
  16137. int iter = opt->iter;
  16138. ggml_opt_init(ctx, opt, params, nx);
  16139. opt->iter = iter;
  16140. }
  16141. struct ggml_cplan cplan = ggml_graph_plan(gb, params.n_threads);
  16142. struct ggml_object * obj = ggml_new_object(ctx, GGML_OBJECT_WORK_BUFFER, cplan.work_size);
  16143. cplan.work_data = (uint8_t *)ctx->mem_buffer + obj->offs;
  16144. float * x = opt->lbfgs.x->data; // current parameters
  16145. float * xp = opt->lbfgs.xp->data; // previous parameters
  16146. float * g = opt->lbfgs.g->data; // current gradient
  16147. float * gp = opt->lbfgs.gp->data; // previous gradient
  16148. float * d = opt->lbfgs.d->data; // search direction
  16149. float * pf = params.past > 0 ? opt->lbfgs.pf->data : NULL; // past function values
  16150. const int n_accum = MAX(1, params.n_gradient_accumulation);
  16151. const float accum_norm = 1.0f / (float) n_accum;
  16152. float fx = 0.0f; // cost function value
  16153. float xnorm = 0.0f; // ||x||
  16154. float gnorm = 0.0f; // ||g||
  16155. // initialize x from the graph nodes
  16156. ggml_opt_get_params(np, ps, x);
  16157. // the L-BFGS memory
  16158. float * lm_alpha = opt->lbfgs.lmal->data;
  16159. float * lm_ys = opt->lbfgs.lmys->data;
  16160. float * lm_s = opt->lbfgs.lms->data;
  16161. float * lm_y = opt->lbfgs.lmy->data;
  16162. bool cancel = false;
  16163. // evaluate the function value and its gradient
  16164. {
  16165. ggml_opt_set_params(np, ps, x);
  16166. fx = 0;
  16167. memset(g, 0, sizeof(float)*nx);
  16168. for (int accum_step = 0; accum_step < n_accum; ++accum_step) {
  16169. if (callback) {
  16170. // LBFG-S does not support learning rate -> ignore learning schedule
  16171. float sched = 0;
  16172. callback(callback_data, accum_step, &sched, &cancel);
  16173. if (cancel) {
  16174. break;
  16175. }
  16176. }
  16177. // ggml_graph_reset (gf);
  16178. ggml_set_f32 (f->grad, 1.0f);
  16179. ggml_graph_compute(gb, &cplan);
  16180. ggml_opt_acc_grad(np, ps, g, accum_norm);
  16181. fx += ggml_get_f32_1d(f, 0);
  16182. }
  16183. if (cancel) {
  16184. return GGML_OPT_DID_NOT_CONVERGE;
  16185. }
  16186. fx *= accum_norm;
  16187. opt->loss_before = fx;
  16188. opt->loss_after = fx;
  16189. }
  16190. // search direction = -gradient
  16191. ggml_vec_neg_f32(nx, d, g);
  16192. // ||x||, ||g||
  16193. ggml_vec_norm_f32(nx, &xnorm, x);
  16194. ggml_vec_norm_f32(nx, &gnorm, g);
  16195. if (xnorm < 1.0f) {
  16196. xnorm = 1.0f;
  16197. }
  16198. // already optimized
  16199. if (gnorm/xnorm <= params.lbfgs.eps) {
  16200. return GGML_OPT_OK;
  16201. }
  16202. if (opt->just_initialized) {
  16203. if (pf) {
  16204. pf[0] = fx;
  16205. }
  16206. opt->lbfgs.fx_best = fx;
  16207. // initial step
  16208. ggml_vec_norm_inv_f32(nx, &opt->lbfgs.step, d);
  16209. opt->lbfgs.j = 0;
  16210. opt->lbfgs.k = 1;
  16211. opt->lbfgs.end = 0;
  16212. opt->lbfgs.n_no_improvement = 0;
  16213. opt->just_initialized = false;
  16214. }
  16215. float * fx_best = &opt->lbfgs.fx_best;
  16216. float * step = &opt->lbfgs.step;
  16217. int * j = &opt->lbfgs.j;
  16218. int * k = &opt->lbfgs.k;
  16219. int * end = &opt->lbfgs.end;
  16220. int * n_no_improvement = &opt->lbfgs.n_no_improvement;
  16221. int ls = 0;
  16222. int bound = 0;
  16223. float ys = 0.0f;
  16224. float yy = 0.0f;
  16225. float beta = 0.0f;
  16226. int it = 0;
  16227. while (true) {
  16228. // store the current position and gradient vectors
  16229. ggml_vec_cpy_f32(nx, xp, x);
  16230. ggml_vec_cpy_f32(nx, gp, g);
  16231. ls = linesearch_backtracking(&params, nx, x, &fx, g, d, step, xp, f, gb, &cplan, np, ps, &cancel, callback, callback_data);
  16232. if (!cancel) {
  16233. break;
  16234. }
  16235. if (ls < 0) {
  16236. // linesearch failed - go back to the previous point and return
  16237. ggml_vec_cpy_f32(nx, x, xp);
  16238. ggml_vec_cpy_f32(nx, g, gp);
  16239. return ls;
  16240. }
  16241. opt->loss_after = fx;
  16242. ggml_vec_norm_f32(nx, &xnorm, x);
  16243. ggml_vec_norm_f32(nx, &gnorm, g);
  16244. GGML_PRINT_DEBUG("f = %10.6f\n", ggml_get_f32_1d(f, 0));
  16245. if (xnorm < 1.0f) {
  16246. xnorm = 1.0f;
  16247. }
  16248. if (gnorm/xnorm <= params.lbfgs.eps) {
  16249. // converged
  16250. return GGML_OPT_OK;
  16251. }
  16252. // delta-based convergence test
  16253. if (pf != NULL) {
  16254. // need at least params.past iterations to start checking for convergence
  16255. if (params.past <= k[0]) {
  16256. const float rate = (pf[k[0]%params.past] - fx)/fx;
  16257. if (fabsf(rate) < params.delta) {
  16258. return GGML_OPT_OK;
  16259. }
  16260. }
  16261. pf[k[0]%params.past] = fx;
  16262. }
  16263. // check for improvement
  16264. if (params.max_no_improvement > 0) {
  16265. if (fx < fx_best[0]) {
  16266. fx_best[0] = fx;
  16267. n_no_improvement[0] = 0;
  16268. } else {
  16269. n_no_improvement[0]++;
  16270. if (n_no_improvement[0] >= params.max_no_improvement) {
  16271. return GGML_OPT_OK;
  16272. }
  16273. }
  16274. }
  16275. if (params.lbfgs.n_iter != 0 && params.lbfgs.n_iter < it + 1) {
  16276. // reached the maximum number of iterations
  16277. return GGML_OPT_DID_NOT_CONVERGE;
  16278. }
  16279. // update vectors s and y:
  16280. // s_{k+1} = x_{k+1} - x_{k} = \step * d_{k}.
  16281. // y_{k+1} = g_{k+1} - g_{k}.
  16282. //
  16283. ggml_vec_sub_f32(nx, &lm_s[end[0]*nx], x, xp);
  16284. ggml_vec_sub_f32(nx, &lm_y[end[0]*nx], g, gp);
  16285. // compute scalars ys and yy:
  16286. // ys = y^t \cdot s -> 1 / \rho.
  16287. // yy = y^t \cdot y.
  16288. //
  16289. ggml_vec_dot_f32(nx, &ys, &lm_y[end[0]*nx], &lm_s[end[0]*nx]);
  16290. ggml_vec_dot_f32(nx, &yy, &lm_y[end[0]*nx], &lm_y[end[0]*nx]);
  16291. lm_ys[end[0]] = ys;
  16292. // find new search direction
  16293. // ref: https://en.wikipedia.org/wiki/Limited-memory_BFGS
  16294. bound = (m <= k[0]) ? m : k[0];
  16295. k[0]++;
  16296. it++;
  16297. end[0] = (end[0] + 1)%m;
  16298. // initialize search direction with -g
  16299. ggml_vec_neg_f32(nx, d, g);
  16300. j[0] = end[0];
  16301. for (int i = 0; i < bound; ++i) {
  16302. j[0] = (j[0] + m - 1) % m;
  16303. // \alpha_{j} = \rho_{j} s^{t}_{j} \cdot q_{k+1}
  16304. ggml_vec_dot_f32(nx, &lm_alpha[j[0]], &lm_s[j[0]*nx], d);
  16305. lm_alpha[j[0]] /= lm_ys[j[0]];
  16306. // q_{i} = q_{i+1} - \alpha_{i} y_{i}
  16307. ggml_vec_mad_f32(nx, d, &lm_y[j[0]*nx], -lm_alpha[j[0]]);
  16308. }
  16309. ggml_vec_scale_f32(nx, d, ys/yy);
  16310. for (int i = 0; i < bound; ++i) {
  16311. // \beta_{j} = \rho_{j} y^t_{j} \cdot \gamma_{i}
  16312. ggml_vec_dot_f32(nx, &beta, &lm_y[j[0]*nx], d);
  16313. beta /= lm_ys[j[0]];
  16314. // \gamma_{i+1} = \gamma_{i} + (\alpha_{j} - \beta_{j}) s_{j}
  16315. ggml_vec_mad_f32(nx, d, &lm_s[j[0]*nx], lm_alpha[j[0]] - beta);
  16316. j[0] = (j[0] + 1)%m;
  16317. }
  16318. step[0] = 1.0;
  16319. }
  16320. GGML_UNREACHABLE();
  16321. }
  16322. struct ggml_opt_params ggml_opt_default_params(enum ggml_opt_type type) {
  16323. struct ggml_opt_params result;
  16324. switch (type) {
  16325. case GGML_OPT_ADAM:
  16326. {
  16327. result = (struct ggml_opt_params) {
  16328. .type = GGML_OPT_ADAM,
  16329. .n_threads = 1,
  16330. .past = 0,
  16331. .delta = 1e-5f,
  16332. .max_no_improvement = 100,
  16333. .print_forward_graph = true,
  16334. .print_backward_graph = true,
  16335. .n_gradient_accumulation = 1,
  16336. .adam = {
  16337. .n_iter = 10000,
  16338. .sched = 1.000f,
  16339. .decay = 0.0f,
  16340. .decay_min_ndim = 2,
  16341. .alpha = 0.001f,
  16342. .beta1 = 0.9f,
  16343. .beta2 = 0.999f,
  16344. .eps = 1e-8f,
  16345. .eps_f = 1e-5f,
  16346. .eps_g = 1e-3f,
  16347. .gclip = 0.0f,
  16348. },
  16349. };
  16350. } break;
  16351. case GGML_OPT_LBFGS:
  16352. {
  16353. result = (struct ggml_opt_params) {
  16354. .type = GGML_OPT_LBFGS,
  16355. .n_threads = 1,
  16356. .past = 0,
  16357. .delta = 1e-5f,
  16358. .max_no_improvement = 0,
  16359. .print_forward_graph = true,
  16360. .print_backward_graph = true,
  16361. .n_gradient_accumulation = 1,
  16362. .lbfgs = {
  16363. .m = 6,
  16364. .n_iter = 100,
  16365. .max_linesearch = 20,
  16366. .eps = 1e-5f,
  16367. .ftol = 1e-4f,
  16368. .wolfe = 0.9f,
  16369. .min_step = 1e-20f,
  16370. .max_step = 1e+20f,
  16371. .linesearch = GGML_LINESEARCH_DEFAULT,
  16372. },
  16373. };
  16374. } break;
  16375. }
  16376. return result;
  16377. }
  16378. GGML_API void ggml_opt_init(
  16379. struct ggml_context * ctx,
  16380. struct ggml_opt_context * opt,
  16381. struct ggml_opt_params params,
  16382. int64_t nx) {
  16383. opt->ctx = ctx;
  16384. opt->params = params;
  16385. opt->iter = 0;
  16386. opt->nx = nx;
  16387. opt->just_initialized = true;
  16388. if (opt->ctx == NULL) {
  16389. struct ggml_init_params ctx_opt_params;
  16390. if (opt->params.type == GGML_OPT_ADAM) {
  16391. ctx_opt_params.mem_size = GGML_MEM_ALIGN*3 + ggml_tensor_overhead()*3 + ggml_type_size(GGML_TYPE_F32)*nx*3;
  16392. if (opt->params.past > 0) {
  16393. ctx_opt_params.mem_size += GGML_MEM_ALIGN + ggml_tensor_overhead() + ggml_type_size(GGML_TYPE_F32)*opt->params.past;
  16394. }
  16395. } else if (opt->params.type == GGML_OPT_LBFGS) {
  16396. ctx_opt_params.mem_size = GGML_MEM_ALIGN*9 + ggml_tensor_overhead()*9 + ggml_type_size(GGML_TYPE_F32)*(nx*5 + opt->params.lbfgs.m*2 + nx*opt->params.lbfgs.m*2);
  16397. if (opt->params.past > 0) {
  16398. ctx_opt_params.mem_size += GGML_MEM_ALIGN + ggml_tensor_overhead() + ggml_type_size(GGML_TYPE_F32)*opt->params.past;
  16399. }
  16400. }
  16401. ctx_opt_params.mem_buffer = NULL;
  16402. ctx_opt_params.no_alloc = false;
  16403. opt->ctx = ggml_init(ctx_opt_params);
  16404. }
  16405. switch (opt->params.type) {
  16406. case GGML_OPT_ADAM:
  16407. {
  16408. opt->adam.g = ggml_new_tensor_1d(opt->ctx, GGML_TYPE_F32, nx);
  16409. opt->adam.m = ggml_new_tensor_1d(opt->ctx, GGML_TYPE_F32, nx);
  16410. opt->adam.v = ggml_new_tensor_1d(opt->ctx, GGML_TYPE_F32, nx);
  16411. opt->adam.pf = params.past > 0
  16412. ? ggml_new_tensor_1d(opt->ctx, GGML_TYPE_F32, params.past)
  16413. : NULL;
  16414. ggml_set_zero(opt->adam.m);
  16415. ggml_set_zero(opt->adam.v);
  16416. if (opt->adam.pf) {
  16417. ggml_set_zero(opt->adam.pf);
  16418. }
  16419. } break;
  16420. case GGML_OPT_LBFGS:
  16421. {
  16422. opt->lbfgs.x = ggml_new_tensor_1d(opt->ctx, GGML_TYPE_F32, nx);
  16423. opt->lbfgs.xp = ggml_new_tensor_1d(opt->ctx, GGML_TYPE_F32, nx);
  16424. opt->lbfgs.g = ggml_new_tensor_1d(opt->ctx, GGML_TYPE_F32, nx);
  16425. opt->lbfgs.gp = ggml_new_tensor_1d(opt->ctx, GGML_TYPE_F32, nx);
  16426. opt->lbfgs.d = ggml_new_tensor_1d(opt->ctx, GGML_TYPE_F32, nx);
  16427. opt->lbfgs.pf = params.past > 0
  16428. ? ggml_new_tensor_1d(opt->ctx, GGML_TYPE_F32, params.past)
  16429. : NULL;
  16430. opt->lbfgs.lmal = ggml_new_tensor_1d(opt->ctx, GGML_TYPE_F32, params.lbfgs.m);
  16431. opt->lbfgs.lmys = ggml_new_tensor_1d(opt->ctx, GGML_TYPE_F32, params.lbfgs.m);
  16432. opt->lbfgs.lms = ggml_new_tensor_2d(opt->ctx, GGML_TYPE_F32, nx, params.lbfgs.m);
  16433. opt->lbfgs.lmy = ggml_new_tensor_2d(opt->ctx, GGML_TYPE_F32, nx, params.lbfgs.m);
  16434. ggml_set_zero(opt->lbfgs.x);
  16435. ggml_set_zero(opt->lbfgs.xp);
  16436. ggml_set_zero(opt->lbfgs.g);
  16437. ggml_set_zero(opt->lbfgs.gp);
  16438. ggml_set_zero(opt->lbfgs.d);
  16439. if (opt->lbfgs.pf) {
  16440. ggml_set_zero(opt->lbfgs.pf);
  16441. }
  16442. ggml_set_zero(opt->lbfgs.lmal);
  16443. ggml_set_zero(opt->lbfgs.lmys);
  16444. ggml_set_zero(opt->lbfgs.lms);
  16445. ggml_set_zero(opt->lbfgs.lmy);
  16446. } break;
  16447. }
  16448. }
  16449. enum ggml_opt_result ggml_opt(
  16450. struct ggml_context * ctx,
  16451. struct ggml_opt_params params,
  16452. struct ggml_tensor * f) {
  16453. bool free_ctx = false;
  16454. if (ctx == NULL) {
  16455. struct ggml_init_params params_ctx = {
  16456. .mem_size = 16*1024*1024,
  16457. .mem_buffer = NULL,
  16458. .no_alloc = false,
  16459. };
  16460. ctx = ggml_init(params_ctx);
  16461. if (ctx == NULL) {
  16462. return GGML_OPT_NO_CONTEXT;
  16463. }
  16464. free_ctx = true;
  16465. }
  16466. enum ggml_opt_result result = GGML_OPT_OK;
  16467. struct ggml_opt_context * opt = (struct ggml_opt_context *) alloca(sizeof(struct ggml_opt_context));
  16468. ggml_opt_init(ctx, opt, params, 0);
  16469. result = ggml_opt_resume(ctx, opt, f);
  16470. if (free_ctx) {
  16471. ggml_free(ctx);
  16472. }
  16473. return result;
  16474. }
  16475. enum ggml_opt_result ggml_opt_resume(
  16476. struct ggml_context * ctx,
  16477. struct ggml_opt_context * opt,
  16478. struct ggml_tensor * f) {
  16479. // build forward + backward compute graphs
  16480. struct ggml_tensor * gfbuf = ggml_new_tensor_1d(ctx, GGML_TYPE_I32, sizeof(struct ggml_cgraph) / ggml_type_size(GGML_TYPE_I32)+ (sizeof(struct ggml_cgraph) % ggml_type_size(GGML_TYPE_I32) ? 1 : 0));
  16481. struct ggml_tensor * gbbuf = ggml_new_tensor_1d(ctx, GGML_TYPE_I32, sizeof(struct ggml_cgraph) / ggml_type_size(GGML_TYPE_I32)+ (sizeof(struct ggml_cgraph) % ggml_type_size(GGML_TYPE_I32) ? 1 : 0));
  16482. struct ggml_cgraph * gf = (struct ggml_cgraph *) gfbuf->data;
  16483. struct ggml_cgraph * gb = (struct ggml_cgraph *) gbbuf->data;
  16484. *gf = ggml_build_forward (f);
  16485. *gb = ggml_build_backward(ctx, gf, true);
  16486. return ggml_opt_resume_g(ctx, opt, f, gf, gb, NULL, NULL);
  16487. }
  16488. enum ggml_opt_result ggml_opt_resume_g(
  16489. struct ggml_context * ctx,
  16490. struct ggml_opt_context * opt,
  16491. struct ggml_tensor * f,
  16492. struct ggml_cgraph * gf,
  16493. struct ggml_cgraph * gb,
  16494. ggml_opt_callback callback,
  16495. void * callback_data) {
  16496. // build forward + backward compute graphs
  16497. enum ggml_opt_result result = GGML_OPT_OK;
  16498. switch (opt->params.type) {
  16499. case GGML_OPT_ADAM:
  16500. {
  16501. result = ggml_opt_adam(ctx, opt, opt->params, f, gf, gb, callback, callback_data);
  16502. } break;
  16503. case GGML_OPT_LBFGS:
  16504. {
  16505. result = ggml_opt_lbfgs(ctx, opt, opt->params, f, gf, gb, callback, callback_data);
  16506. } break;
  16507. }
  16508. if (opt->params.print_forward_graph) {
  16509. ggml_graph_print (gf);
  16510. ggml_graph_dump_dot(gf, NULL, "opt-forward.dot");
  16511. }
  16512. if (opt->params.print_backward_graph) {
  16513. ggml_graph_print (gb);
  16514. ggml_graph_dump_dot(gb, gf, "opt-backward.dot");
  16515. }
  16516. return result;
  16517. }
  16518. ////////////////////////////////////////////////////////////////////////////////
  16519. size_t ggml_quantize_q4_0(const float * src, void * dst, int n, int k, int64_t * hist) {
  16520. assert(k % QK4_0 == 0);
  16521. const int nb = k / QK4_0;
  16522. for (int b = 0; b < n; b += k) {
  16523. block_q4_0 * restrict y = (block_q4_0 *) dst + b/QK4_0;
  16524. quantize_row_q4_0_reference(src + b, y, k);
  16525. for (int i = 0; i < nb; i++) {
  16526. for (int j = 0; j < QK4_0; j += 2) {
  16527. const uint8_t vi0 = y[i].qs[j/2] & 0x0F;
  16528. const uint8_t vi1 = y[i].qs[j/2] >> 4;
  16529. hist[vi0]++;
  16530. hist[vi1]++;
  16531. }
  16532. }
  16533. }
  16534. return (n/QK4_0*sizeof(block_q4_0));
  16535. }
  16536. size_t ggml_quantize_q4_1(const float * src, void * dst, int n, int k, int64_t * hist) {
  16537. assert(k % QK4_1 == 0);
  16538. const int nb = k / QK4_1;
  16539. for (int b = 0; b < n; b += k) {
  16540. block_q4_1 * restrict y = (block_q4_1 *) dst + b/QK4_1;
  16541. quantize_row_q4_1_reference(src + b, y, k);
  16542. for (int i = 0; i < nb; i++) {
  16543. for (int j = 0; j < QK4_1; j += 2) {
  16544. const uint8_t vi0 = y[i].qs[j/2] & 0x0F;
  16545. const uint8_t vi1 = y[i].qs[j/2] >> 4;
  16546. hist[vi0]++;
  16547. hist[vi1]++;
  16548. }
  16549. }
  16550. }
  16551. return (n/QK4_1*sizeof(block_q4_1));
  16552. }
  16553. size_t ggml_quantize_q5_0(const float * src, void * dst, int n, int k, int64_t * hist) {
  16554. assert(k % QK5_0 == 0);
  16555. const int nb = k / QK5_0;
  16556. for (int b = 0; b < n; b += k) {
  16557. block_q5_0 * restrict y = (block_q5_0 *)dst + b/QK5_0;
  16558. quantize_row_q5_0_reference(src + b, y, k);
  16559. for (int i = 0; i < nb; i++) {
  16560. uint32_t qh;
  16561. memcpy(&qh, &y[i].qh, sizeof(qh));
  16562. for (int j = 0; j < QK5_0; j += 2) {
  16563. const uint8_t vh0 = ((qh & (1u << (j + 0 ))) >> (j + 0 )) << 4;
  16564. const uint8_t vh1 = ((qh & (1u << (j + 16))) >> (j + 12));
  16565. // cast to 16 bins
  16566. const uint8_t vi0 = ((y[i].qs[j/2] & 0x0F) | vh0) / 2;
  16567. const uint8_t vi1 = ((y[i].qs[j/2] >> 4) | vh1) / 2;
  16568. hist[vi0]++;
  16569. hist[vi1]++;
  16570. }
  16571. }
  16572. }
  16573. return (n/QK5_0*sizeof(block_q5_0));
  16574. }
  16575. size_t ggml_quantize_q5_1(const float * src, void * dst, int n, int k, int64_t * hist) {
  16576. assert(k % QK5_1 == 0);
  16577. const int nb = k / QK5_1;
  16578. for (int b = 0; b < n; b += k) {
  16579. block_q5_1 * restrict y = (block_q5_1 *)dst + b/QK5_1;
  16580. quantize_row_q5_1_reference(src + b, y, k);
  16581. for (int i = 0; i < nb; i++) {
  16582. uint32_t qh;
  16583. memcpy(&qh, &y[i].qh, sizeof(qh));
  16584. for (int j = 0; j < QK5_1; j += 2) {
  16585. const uint8_t vh0 = ((qh & (1u << (j + 0 ))) >> (j + 0 )) << 4;
  16586. const uint8_t vh1 = ((qh & (1u << (j + 16))) >> (j + 12));
  16587. // cast to 16 bins
  16588. const uint8_t vi0 = ((y[i].qs[j/2] & 0x0F) | vh0) / 2;
  16589. const uint8_t vi1 = ((y[i].qs[j/2] >> 4) | vh1) / 2;
  16590. hist[vi0]++;
  16591. hist[vi1]++;
  16592. }
  16593. }
  16594. }
  16595. return (n/QK5_1*sizeof(block_q5_1));
  16596. }
  16597. size_t ggml_quantize_q8_0(const float * src, void * dst, int n, int k, int64_t * hist) {
  16598. assert(k % QK8_0 == 0);
  16599. const int nb = k / QK8_0;
  16600. for (int b = 0; b < n; b += k) {
  16601. block_q8_0 * restrict y = (block_q8_0 *)dst + b/QK8_0;
  16602. quantize_row_q8_0_reference(src + b, y, k);
  16603. for (int i = 0; i < nb; i++) {
  16604. for (int j = 0; j < QK8_0; ++j) {
  16605. const int8_t vi = y[i].qs[j];
  16606. hist[vi/16 + 8]++;
  16607. }
  16608. }
  16609. }
  16610. return (n/QK8_0*sizeof(block_q8_0));
  16611. }
  16612. size_t ggml_quantize_chunk(enum ggml_type type, const float * src, void * dst, int start, int n, int64_t * hist) {
  16613. size_t result = 0;
  16614. switch (type) {
  16615. case GGML_TYPE_Q4_0:
  16616. {
  16617. GGML_ASSERT(start % QK4_0 == 0);
  16618. block_q4_0 * block = (block_q4_0*)dst + start / QK4_0;
  16619. result = ggml_quantize_q4_0(src + start, block, n, n, hist);
  16620. } break;
  16621. case GGML_TYPE_Q4_1:
  16622. {
  16623. GGML_ASSERT(start % QK4_1 == 0);
  16624. block_q4_1 * block = (block_q4_1*)dst + start / QK4_1;
  16625. result = ggml_quantize_q4_1(src + start, block, n, n, hist);
  16626. } break;
  16627. case GGML_TYPE_Q5_0:
  16628. {
  16629. GGML_ASSERT(start % QK5_0 == 0);
  16630. block_q5_0 * block = (block_q5_0*)dst + start / QK5_0;
  16631. result = ggml_quantize_q5_0(src + start, block, n, n, hist);
  16632. } break;
  16633. case GGML_TYPE_Q5_1:
  16634. {
  16635. GGML_ASSERT(start % QK5_1 == 0);
  16636. block_q5_1 * block = (block_q5_1*)dst + start / QK5_1;
  16637. result = ggml_quantize_q5_1(src + start, block, n, n, hist);
  16638. } break;
  16639. case GGML_TYPE_Q8_0:
  16640. {
  16641. GGML_ASSERT(start % QK8_0 == 0);
  16642. block_q8_0 * block = (block_q8_0*)dst + start / QK8_0;
  16643. result = ggml_quantize_q8_0(src + start, block, n, n, hist);
  16644. } break;
  16645. #ifdef GGML_USE_K_QUANTS
  16646. case GGML_TYPE_Q2_K:
  16647. {
  16648. GGML_ASSERT(start % QK_K == 0);
  16649. block_q2_K * block = (block_q2_K*)dst + start / QK_K;
  16650. result = ggml_quantize_q2_K(src + start, block, n, n, hist);
  16651. } break;
  16652. case GGML_TYPE_Q3_K:
  16653. {
  16654. GGML_ASSERT(start % QK_K == 0);
  16655. block_q3_K * block = (block_q3_K*)dst + start / QK_K;
  16656. result = ggml_quantize_q3_K(src + start, block, n, n, hist);
  16657. } break;
  16658. case GGML_TYPE_Q4_K:
  16659. {
  16660. GGML_ASSERT(start % QK_K == 0);
  16661. block_q4_K * block = (block_q4_K*)dst + start / QK_K;
  16662. result = ggml_quantize_q4_K(src + start, block, n, n, hist);
  16663. } break;
  16664. case GGML_TYPE_Q5_K:
  16665. {
  16666. GGML_ASSERT(start % QK_K == 0);
  16667. block_q5_K * block = (block_q5_K*)dst + start / QK_K;
  16668. result = ggml_quantize_q5_K(src + start, block, n, n, hist);
  16669. } break;
  16670. case GGML_TYPE_Q6_K:
  16671. {
  16672. GGML_ASSERT(start % QK_K == 0);
  16673. block_q6_K * block = (block_q6_K*)dst + start / QK_K;
  16674. result = ggml_quantize_q6_K(src + start, block, n, n, hist);
  16675. } break;
  16676. #endif
  16677. case GGML_TYPE_F16:
  16678. {
  16679. int elemsize = sizeof(ggml_fp16_t);
  16680. ggml_fp32_to_fp16_row(src + start, (ggml_fp16_t *)dst + start, n);
  16681. result = n * elemsize;
  16682. } break;
  16683. case GGML_TYPE_F32:
  16684. {
  16685. int elemsize = sizeof(float);
  16686. result = n * elemsize;
  16687. memcpy((uint8_t *)dst + start * elemsize, src + start, result);
  16688. } break;
  16689. default:
  16690. assert(false);
  16691. }
  16692. return result;
  16693. }
  16694. ////////////////////////////////////////////////////////////////////////////////
  16695. struct gguf_str {
  16696. uint64_t n; // GGUFv2
  16697. char * data;
  16698. };
  16699. static const size_t GGUF_TYPE_SIZE[GGUF_TYPE_COUNT] = {
  16700. [GGUF_TYPE_UINT8] = sizeof(uint8_t),
  16701. [GGUF_TYPE_INT8] = sizeof(int8_t),
  16702. [GGUF_TYPE_UINT16] = sizeof(uint16_t),
  16703. [GGUF_TYPE_INT16] = sizeof(int16_t),
  16704. [GGUF_TYPE_UINT32] = sizeof(uint32_t),
  16705. [GGUF_TYPE_INT32] = sizeof(int32_t),
  16706. [GGUF_TYPE_FLOAT32] = sizeof(float),
  16707. [GGUF_TYPE_BOOL] = sizeof(bool),
  16708. [GGUF_TYPE_STRING] = sizeof(struct gguf_str),
  16709. [GGUF_TYPE_UINT64] = sizeof(uint64_t),
  16710. [GGUF_TYPE_INT64] = sizeof(int64_t),
  16711. [GGUF_TYPE_FLOAT64] = sizeof(double),
  16712. [GGUF_TYPE_ARRAY] = 0, // undefined
  16713. };
  16714. static_assert(GGUF_TYPE_COUNT == 13, "GGUF_TYPE_COUNT != 13");
  16715. static const char * GGUF_TYPE_NAME[GGUF_TYPE_COUNT] = {
  16716. [GGUF_TYPE_UINT8] = "u8",
  16717. [GGUF_TYPE_INT8] = "i8",
  16718. [GGUF_TYPE_UINT16] = "u16",
  16719. [GGUF_TYPE_INT16] = "i16",
  16720. [GGUF_TYPE_UINT32] = "u32",
  16721. [GGUF_TYPE_INT32] = "i32",
  16722. [GGUF_TYPE_FLOAT32] = "f32",
  16723. [GGUF_TYPE_BOOL] = "bool",
  16724. [GGUF_TYPE_STRING] = "str",
  16725. [GGUF_TYPE_ARRAY] = "arr",
  16726. [GGUF_TYPE_UINT64] = "u64",
  16727. [GGUF_TYPE_INT64] = "i64",
  16728. [GGUF_TYPE_FLOAT64] = "f64",
  16729. };
  16730. static_assert(GGUF_TYPE_COUNT == 13, "GGUF_TYPE_COUNT != 13");
  16731. union gguf_value {
  16732. uint8_t uint8;
  16733. int8_t int8;
  16734. uint16_t uint16;
  16735. int16_t int16;
  16736. uint32_t uint32;
  16737. int32_t int32;
  16738. float float32;
  16739. uint64_t uint64;
  16740. int64_t int64;
  16741. double float64;
  16742. bool bool_;
  16743. struct gguf_str str;
  16744. struct {
  16745. enum gguf_type type;
  16746. uint64_t n; // GGUFv2
  16747. void * data;
  16748. } arr;
  16749. };
  16750. struct gguf_kv {
  16751. struct gguf_str key;
  16752. enum gguf_type type;
  16753. union gguf_value value;
  16754. };
  16755. struct gguf_header {
  16756. uint32_t magic;
  16757. uint32_t version;
  16758. uint64_t n_tensors; // GGUFv2
  16759. uint64_t n_kv; // GGUFv2
  16760. };
  16761. struct gguf_tensor_info {
  16762. struct gguf_str name;
  16763. uint32_t n_dims;
  16764. uint64_t ne[GGML_MAX_DIMS];
  16765. enum ggml_type type;
  16766. uint64_t offset; // offset from start of `data`, must be a multiple of `ALIGNMENT`
  16767. // for writing API
  16768. const void * data;
  16769. size_t size;
  16770. };
  16771. struct gguf_context {
  16772. struct gguf_header header;
  16773. struct gguf_kv * kv;
  16774. struct gguf_tensor_info * infos;
  16775. size_t alignment;
  16776. size_t offset; // offset of `data` from beginning of file
  16777. size_t size; // size of `data` in bytes
  16778. //uint8_t * padding;
  16779. void * data;
  16780. };
  16781. static bool gguf_fread_el(FILE * file, void * dst, size_t size, size_t * offset) {
  16782. const size_t n = fread(dst, 1, size, file);
  16783. *offset += n;
  16784. return n == size;
  16785. }
  16786. // NOTE: temporary handling of GGUFv1 >> remove after Oct 2023
  16787. static bool gguf_fread_str_cur(FILE * file, struct gguf_str * p, size_t * offset) {
  16788. p->n = 0;
  16789. p->data = NULL;
  16790. bool ok = true;
  16791. ok = ok && gguf_fread_el(file, &p->n, sizeof(p->n), offset); p->data = calloc(p->n + 1, 1);
  16792. ok = ok && gguf_fread_el(file, p->data, p->n, offset);
  16793. return ok;
  16794. }
  16795. static bool gguf_fread_str_v1(FILE * file, struct gguf_str * p, size_t * offset) {
  16796. p->n = 0;
  16797. p->data = NULL;
  16798. bool ok = true;
  16799. uint32_t n = 0;
  16800. ok = ok && gguf_fread_el(file, &n, sizeof(n), offset); p->data = calloc(n + 1, 1); p->n = n;
  16801. ok = ok && gguf_fread_el(file, p->data, p->n, offset);
  16802. return ok;
  16803. }
  16804. struct gguf_context * gguf_init_empty(void) {
  16805. struct gguf_context * ctx = GGML_ALIGNED_MALLOC(sizeof(struct gguf_context));
  16806. ctx->header.magic = GGUF_MAGIC;
  16807. ctx->header.version = GGUF_VERSION;
  16808. ctx->header.n_tensors = 0;
  16809. ctx->header.n_kv = 0;
  16810. ctx->kv = NULL;
  16811. ctx->infos = NULL;
  16812. ctx->alignment = GGUF_DEFAULT_ALIGNMENT;
  16813. ctx->offset = 0;
  16814. ctx->size = 0;
  16815. ctx->data = NULL;
  16816. return ctx;
  16817. }
  16818. struct gguf_context * gguf_init_from_file(const char * fname, struct gguf_init_params params) {
  16819. FILE * file = fopen(fname, "rb");
  16820. if (!file) {
  16821. return NULL;
  16822. }
  16823. // offset from start of file
  16824. size_t offset = 0;
  16825. uint32_t magic = 0;
  16826. // check the magic before making allocations
  16827. {
  16828. gguf_fread_el(file, &magic, sizeof(magic), &offset);
  16829. if (magic != GGUF_MAGIC) {
  16830. fprintf(stderr, "%s: invalid magic number %08x\n", __func__, magic);
  16831. fclose(file);
  16832. return NULL;
  16833. }
  16834. }
  16835. bool ok = true;
  16836. struct gguf_context * ctx = GGML_ALIGNED_MALLOC(sizeof(struct gguf_context));
  16837. // read the header
  16838. {
  16839. ctx->header.magic = magic;
  16840. ctx->kv = NULL;
  16841. ctx->infos = NULL;
  16842. ctx->data = NULL;
  16843. ok = ok && gguf_fread_el(file, &ctx->header.version, sizeof(ctx->header.version), &offset);
  16844. if (ctx->header.version == 1) {
  16845. // NOTE: temporary handling of GGUFv1 >> remove after Oct 2023
  16846. uint32_t n_tensors = 0;
  16847. uint32_t n_kv = 0;
  16848. ok = ok && gguf_fread_el(file, &n_tensors, sizeof(n_tensors), &offset);
  16849. ok = ok && gguf_fread_el(file, &n_kv, sizeof(n_kv), &offset);
  16850. ctx->header.n_tensors = n_tensors;
  16851. ctx->header.n_kv = n_kv;
  16852. } else {
  16853. ok = ok && gguf_fread_el(file, &ctx->header.n_tensors, sizeof(ctx->header.n_tensors), &offset);
  16854. ok = ok && gguf_fread_el(file, &ctx->header.n_kv, sizeof(ctx->header.n_kv), &offset);
  16855. }
  16856. if (!ok) {
  16857. fprintf(stderr, "%s: failed to read header\n", __func__);
  16858. fclose(file);
  16859. gguf_free(ctx);
  16860. return NULL;
  16861. }
  16862. }
  16863. // NOTE: temporary handling of GGUFv1 >> remove after Oct 2023
  16864. bool (* gguf_fread_str)(FILE *, struct gguf_str *, size_t *) = gguf_fread_str_cur;
  16865. if (ctx->header.version == 1) {
  16866. gguf_fread_str = gguf_fread_str_v1;
  16867. }
  16868. // read the kv pairs
  16869. {
  16870. ctx->kv = malloc(ctx->header.n_kv * sizeof(struct gguf_kv));
  16871. for (uint32_t i = 0; i < ctx->header.n_kv; ++i) {
  16872. struct gguf_kv * kv = &ctx->kv[i];
  16873. //fprintf(stderr, "%s: reading kv %d\n", __func__, i);
  16874. ok = ok && gguf_fread_str(file, &kv->key, &offset);
  16875. ok = ok && gguf_fread_el (file, &kv->type, sizeof(kv->type), &offset);
  16876. //fprintf(stderr, "%s: reading kv with key %s\n", __func__, kv->key.data);
  16877. switch (kv->type) {
  16878. case GGUF_TYPE_UINT8: ok = ok && gguf_fread_el (file, &kv->value.uint8, sizeof(kv->value.uint8), &offset); break;
  16879. case GGUF_TYPE_INT8: ok = ok && gguf_fread_el (file, &kv->value.int8, sizeof(kv->value.int8), &offset); break;
  16880. case GGUF_TYPE_UINT16: ok = ok && gguf_fread_el (file, &kv->value.uint16, sizeof(kv->value.uint16), &offset); break;
  16881. case GGUF_TYPE_INT16: ok = ok && gguf_fread_el (file, &kv->value.int16, sizeof(kv->value.int16), &offset); break;
  16882. case GGUF_TYPE_UINT32: ok = ok && gguf_fread_el (file, &kv->value.uint32, sizeof(kv->value.uint32), &offset); break;
  16883. case GGUF_TYPE_INT32: ok = ok && gguf_fread_el (file, &kv->value.int32, sizeof(kv->value.int32), &offset); break;
  16884. case GGUF_TYPE_FLOAT32: ok = ok && gguf_fread_el (file, &kv->value.float32, sizeof(kv->value.float32), &offset); break;
  16885. case GGUF_TYPE_UINT64: ok = ok && gguf_fread_el (file, &kv->value.uint64, sizeof(kv->value.uint64), &offset); break;
  16886. case GGUF_TYPE_INT64: ok = ok && gguf_fread_el (file, &kv->value.int64, sizeof(kv->value.int64), &offset); break;
  16887. case GGUF_TYPE_FLOAT64: ok = ok && gguf_fread_el (file, &kv->value.float64, sizeof(kv->value.float64), &offset); break;
  16888. case GGUF_TYPE_BOOL: ok = ok && gguf_fread_el (file, &kv->value.bool_, sizeof(kv->value.bool_), &offset); break;
  16889. case GGUF_TYPE_STRING: ok = ok && gguf_fread_str(file, &kv->value.str, &offset); break;
  16890. case GGUF_TYPE_ARRAY:
  16891. {
  16892. ok = ok && gguf_fread_el(file, &kv->value.arr.type, sizeof(kv->value.arr.type), &offset);
  16893. if (ctx->header.version == 1) {
  16894. // NOTE: temporary handling of GGUFv1 >> remove after Oct 2023
  16895. uint32_t n = 0;
  16896. ok = ok && gguf_fread_el(file, &n, sizeof(n), &offset);
  16897. kv->value.arr.n = n;
  16898. } else {
  16899. ok = ok && gguf_fread_el(file, &kv->value.arr.n, sizeof(kv->value.arr.n), &offset);
  16900. }
  16901. switch (kv->value.arr.type) {
  16902. case GGUF_TYPE_UINT8:
  16903. case GGUF_TYPE_INT8:
  16904. case GGUF_TYPE_UINT16:
  16905. case GGUF_TYPE_INT16:
  16906. case GGUF_TYPE_UINT32:
  16907. case GGUF_TYPE_INT32:
  16908. case GGUF_TYPE_FLOAT32:
  16909. case GGUF_TYPE_UINT64:
  16910. case GGUF_TYPE_INT64:
  16911. case GGUF_TYPE_FLOAT64:
  16912. case GGUF_TYPE_BOOL:
  16913. {
  16914. kv->value.arr.data = malloc(kv->value.arr.n * GGUF_TYPE_SIZE[kv->value.arr.type]);
  16915. ok = ok && gguf_fread_el(file, kv->value.arr.data, kv->value.arr.n * GGUF_TYPE_SIZE[kv->value.arr.type], &offset);
  16916. } break;
  16917. case GGUF_TYPE_STRING:
  16918. {
  16919. kv->value.arr.data = malloc(kv->value.arr.n * sizeof(struct gguf_str));
  16920. for (uint32_t j = 0; j < kv->value.arr.n; ++j) {
  16921. ok = ok && gguf_fread_str(file, &((struct gguf_str *) kv->value.arr.data)[j], &offset);
  16922. }
  16923. } break;
  16924. case GGUF_TYPE_ARRAY:
  16925. case GGUF_TYPE_COUNT: GGML_ASSERT(false && "invalid type"); break;
  16926. }
  16927. } break;
  16928. case GGUF_TYPE_COUNT: GGML_ASSERT(false && "invalid type");
  16929. }
  16930. if (!ok) {
  16931. break;
  16932. }
  16933. }
  16934. if (!ok) {
  16935. fprintf(stderr, "%s: failed to read key-value pairs\n", __func__);
  16936. fclose(file);
  16937. gguf_free(ctx);
  16938. return NULL;
  16939. }
  16940. }
  16941. // read the tensor infos
  16942. {
  16943. ctx->infos = malloc(ctx->header.n_tensors * sizeof(struct gguf_tensor_info));
  16944. for (uint32_t i = 0; i < ctx->header.n_tensors; ++i) {
  16945. struct gguf_tensor_info * info = &ctx->infos[i];
  16946. for (int j = 0; j < GGML_MAX_DIMS; ++j) {
  16947. info->ne[j] = 1;
  16948. }
  16949. ok = ok && gguf_fread_str(file, &info->name, &offset);
  16950. ok = ok && gguf_fread_el (file, &info->n_dims, sizeof(info->n_dims), &offset);
  16951. for (uint32_t j = 0; j < info->n_dims; ++j) {
  16952. if (ctx->header.version == 1) {
  16953. // NOTE: temporary handling of GGUFv1 >> remove after Oct 2023
  16954. uint32_t t = 0;
  16955. ok = ok && gguf_fread_el(file, &t, sizeof(t), &offset);
  16956. info->ne[j] = t;
  16957. } else {
  16958. ok = ok && gguf_fread_el(file, &info->ne[j], sizeof(info->ne[j]), &offset);
  16959. }
  16960. }
  16961. ok = ok && gguf_fread_el (file, &info->type, sizeof(info->type), &offset);
  16962. ok = ok && gguf_fread_el (file, &info->offset, sizeof(info->offset), &offset);
  16963. if (!ok) {
  16964. fprintf(stderr, "%s: failed to read tensor info\n", __func__);
  16965. fclose(file);
  16966. gguf_free(ctx);
  16967. return NULL;
  16968. }
  16969. }
  16970. }
  16971. ctx->alignment = GGUF_DEFAULT_ALIGNMENT;
  16972. int alignment_idx = gguf_find_key(ctx, "general.alignment");
  16973. if (alignment_idx != -1) {
  16974. ctx->alignment = gguf_get_val_u32(ctx, alignment_idx);
  16975. }
  16976. // we require the data section to be aligned, so take into account any padding
  16977. {
  16978. const size_t offset_pad = offset % ctx->alignment;
  16979. if (offset_pad != 0) {
  16980. offset += ctx->alignment - offset_pad;
  16981. fseek(file, offset, SEEK_SET);
  16982. }
  16983. }
  16984. // store the current file offset - this is where the data section starts
  16985. ctx->offset = offset;
  16986. // compute the total size of the data section, taking into account the alignment
  16987. {
  16988. ctx->size = 0;
  16989. for (uint32_t i = 0; i < ctx->header.n_tensors; ++i) {
  16990. struct gguf_tensor_info * info = &ctx->infos[i];
  16991. const int64_t ne =
  16992. (int64_t) info->ne[0] *
  16993. (int64_t) info->ne[1] *
  16994. (int64_t) info->ne[2] *
  16995. (int64_t) info->ne[3];
  16996. if (ne % ggml_blck_size(info->type) != 0) {
  16997. fprintf(stderr, "%s: tensor '%s' number of elements (%" PRId64 ") is not a multiple of block size (%d)\n",
  16998. __func__, info->name.data, ne, ggml_blck_size(info->type));
  16999. fclose(file);
  17000. gguf_free(ctx);
  17001. return NULL;
  17002. }
  17003. const size_t size_cur = (ne*ggml_type_size(info->type))/ggml_blck_size(info->type);
  17004. ctx->size += GGML_PAD(size_cur, ctx->alignment);
  17005. }
  17006. }
  17007. // load the tensor data only if requested
  17008. if (params.ctx != NULL) {
  17009. // if the provided gguf_context is no_alloc, then we create "empty" tensors and do not read the binary blob
  17010. // otherwise, we load the binary blob into the created ggml_context as well, and point the "data" members of
  17011. // the ggml_tensor structs to the appropriate locations in the binary blob
  17012. // compute the exact size needed for the new ggml_context
  17013. const size_t mem_size =
  17014. params.no_alloc ?
  17015. (ctx->header.n_tensors )*ggml_tensor_overhead() :
  17016. (ctx->header.n_tensors + 1)*ggml_tensor_overhead() + ctx->size;
  17017. struct ggml_init_params pdata = {
  17018. .mem_size = mem_size,
  17019. .mem_buffer = NULL,
  17020. .no_alloc = params.no_alloc,
  17021. };
  17022. *params.ctx = ggml_init(pdata);
  17023. struct ggml_context * ctx_data = *params.ctx;
  17024. struct ggml_tensor * data = NULL;
  17025. if (!params.no_alloc) {
  17026. data = ggml_new_tensor_1d(ctx_data, GGML_TYPE_I8, ctx->size);
  17027. ok = ok && data != NULL;
  17028. // read the binary blob with the tensor data
  17029. ok = ok && gguf_fread_el(file, data->data, ctx->size, &offset);
  17030. if (!ok) {
  17031. fprintf(stderr, "%s: failed to read tensor data\n", __func__);
  17032. fclose(file);
  17033. ggml_free(ctx_data);
  17034. gguf_free(ctx);
  17035. return NULL;
  17036. }
  17037. ctx->data = data->data;
  17038. }
  17039. ggml_set_no_alloc(ctx_data, true);
  17040. // create the tensors
  17041. for (uint32_t i = 0; i < ctx->header.n_tensors; ++i) {
  17042. const int64_t ne[GGML_MAX_DIMS] = {
  17043. ctx->infos[i].ne[0],
  17044. ctx->infos[i].ne[1],
  17045. ctx->infos[i].ne[2],
  17046. ctx->infos[i].ne[3],
  17047. };
  17048. struct ggml_tensor * cur = ggml_new_tensor(ctx_data, ctx->infos[i].type, ctx->infos[i].n_dims, ne);
  17049. ok = ok && cur != NULL;
  17050. ggml_set_name(cur, ctx->infos[i].name.data);
  17051. if (!ok) {
  17052. break;
  17053. }
  17054. // point the data member to the appropriate location in the binary blob using the tensor infos
  17055. if (!params.no_alloc) {
  17056. //cur->data = (char *) data->data + ctx->infos[i].offset - ctx->offset; // offset from start of file
  17057. cur->data = (char *) data->data + ctx->infos[i].offset; // offset from data
  17058. }
  17059. }
  17060. if (!ok) {
  17061. fprintf(stderr, "%s: failed to read the tensor data\n", __func__);
  17062. fclose(file);
  17063. ggml_free(ctx_data);
  17064. gguf_free(ctx);
  17065. return NULL;
  17066. }
  17067. ggml_set_no_alloc(ctx_data, params.no_alloc);
  17068. }
  17069. fclose(file);
  17070. return ctx;
  17071. }
  17072. void gguf_free(struct gguf_context * ctx) {
  17073. if (ctx == NULL) {
  17074. return;
  17075. }
  17076. if (ctx->kv) {
  17077. // free string memory - not great..
  17078. for (uint32_t i = 0; i < ctx->header.n_kv; ++i) {
  17079. struct gguf_kv * kv = &ctx->kv[i];
  17080. if (kv->key.data) {
  17081. free(kv->key.data);
  17082. }
  17083. if (kv->type == GGUF_TYPE_STRING) {
  17084. if (kv->value.str.data) {
  17085. free(kv->value.str.data);
  17086. }
  17087. }
  17088. if (kv->type == GGUF_TYPE_ARRAY) {
  17089. if (kv->value.arr.data) {
  17090. if (kv->value.arr.type == GGUF_TYPE_STRING) {
  17091. for (uint32_t j = 0; j < kv->value.arr.n; ++j) {
  17092. struct gguf_str * str = &((struct gguf_str *) kv->value.arr.data)[j];
  17093. if (str->data) {
  17094. free(str->data);
  17095. }
  17096. }
  17097. }
  17098. free(kv->value.arr.data);
  17099. }
  17100. }
  17101. }
  17102. free(ctx->kv);
  17103. }
  17104. if (ctx->infos) {
  17105. for (uint32_t i = 0; i < ctx->header.n_tensors; ++i) {
  17106. struct gguf_tensor_info * info = &ctx->infos[i];
  17107. if (info->name.data) {
  17108. free(info->name.data);
  17109. }
  17110. }
  17111. free(ctx->infos);
  17112. }
  17113. GGML_ALIGNED_FREE(ctx);
  17114. }
  17115. const char * gguf_type_name(enum gguf_type type) {
  17116. return GGUF_TYPE_NAME[type];
  17117. }
  17118. int gguf_get_version(const struct gguf_context * ctx) {
  17119. return ctx->header.version;
  17120. }
  17121. size_t gguf_get_alignment(const struct gguf_context * ctx) {
  17122. return ctx->alignment;
  17123. }
  17124. size_t gguf_get_data_offset(const struct gguf_context * ctx) {
  17125. return ctx->offset;
  17126. }
  17127. void * gguf_get_data(const struct gguf_context * ctx) {
  17128. return ctx->data;
  17129. }
  17130. int gguf_get_n_kv(const struct gguf_context * ctx) {
  17131. return ctx->header.n_kv;
  17132. }
  17133. int gguf_find_key(const struct gguf_context * ctx, const char * key) {
  17134. // return -1 if key not found
  17135. int keyfound = -1;
  17136. const int n_kv = gguf_get_n_kv(ctx);
  17137. for (int i = 0; i < n_kv; ++i) {
  17138. if (strcmp(key, gguf_get_key(ctx, i)) == 0) {
  17139. keyfound = i;
  17140. break;
  17141. }
  17142. }
  17143. return keyfound;
  17144. }
  17145. const char * gguf_get_key(const struct gguf_context * ctx, int key_id) {
  17146. return ctx->kv[key_id].key.data;
  17147. }
  17148. enum gguf_type gguf_get_kv_type(const struct gguf_context * ctx, int key_id) {
  17149. return ctx->kv[key_id].type;
  17150. }
  17151. enum gguf_type gguf_get_arr_type(const struct gguf_context * ctx, int key_id) {
  17152. GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_ARRAY);
  17153. return ctx->kv[key_id].value.arr.type;
  17154. }
  17155. const void * gguf_get_arr_data(const struct gguf_context * ctx, int key_id) {
  17156. GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_ARRAY);
  17157. return ctx->kv[key_id].value.arr.data;
  17158. }
  17159. const char * gguf_get_arr_str(const struct gguf_context * ctx, int key_id, int i) {
  17160. GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_ARRAY);
  17161. struct gguf_kv * kv = &ctx->kv[key_id];
  17162. struct gguf_str * str = &((struct gguf_str *) kv->value.arr.data)[i];
  17163. return str->data;
  17164. }
  17165. int gguf_get_arr_n(const struct gguf_context * ctx, int key_id) {
  17166. GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_ARRAY);
  17167. return ctx->kv[key_id].value.arr.n;
  17168. }
  17169. uint8_t gguf_get_val_u8(const struct gguf_context * ctx, int key_id) {
  17170. GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_UINT8);
  17171. return ctx->kv[key_id].value.uint8;
  17172. }
  17173. int8_t gguf_get_val_i8(const struct gguf_context * ctx, int key_id) {
  17174. GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_INT8);
  17175. return ctx->kv[key_id].value.int8;
  17176. }
  17177. uint16_t gguf_get_val_u16(const struct gguf_context * ctx, int key_id) {
  17178. GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_UINT16);
  17179. return ctx->kv[key_id].value.uint16;
  17180. }
  17181. int16_t gguf_get_val_i16(const struct gguf_context * ctx, int key_id) {
  17182. GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_INT16);
  17183. return ctx->kv[key_id].value.int16;
  17184. }
  17185. uint32_t gguf_get_val_u32(const struct gguf_context * ctx, int key_id) {
  17186. GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_UINT32);
  17187. return ctx->kv[key_id].value.uint32;
  17188. }
  17189. int32_t gguf_get_val_i32(const struct gguf_context * ctx, int key_id) {
  17190. GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_INT32);
  17191. return ctx->kv[key_id].value.int32;
  17192. }
  17193. float gguf_get_val_f32(const struct gguf_context * ctx, int key_id) {
  17194. GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_FLOAT32);
  17195. return ctx->kv[key_id].value.float32;
  17196. }
  17197. uint64_t gguf_get_val_u64(const struct gguf_context * ctx, int key_id) {
  17198. GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_UINT64);
  17199. return ctx->kv[key_id].value.uint64;
  17200. }
  17201. int64_t gguf_get_val_i64(const struct gguf_context * ctx, int key_id) {
  17202. GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_INT64);
  17203. return ctx->kv[key_id].value.int64;
  17204. }
  17205. double gguf_get_val_f64(const struct gguf_context * ctx, int key_id) {
  17206. GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_FLOAT64);
  17207. return ctx->kv[key_id].value.float64;
  17208. }
  17209. bool gguf_get_val_bool(const struct gguf_context * ctx, int key_id) {
  17210. GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_BOOL);
  17211. return ctx->kv[key_id].value.bool_;
  17212. }
  17213. const char * gguf_get_val_str(const struct gguf_context * ctx, int key_id) {
  17214. GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_STRING);
  17215. return ctx->kv[key_id].value.str.data;
  17216. }
  17217. int gguf_get_n_tensors(const struct gguf_context * ctx) {
  17218. return ctx->header.n_tensors;
  17219. }
  17220. int gguf_find_tensor(const struct gguf_context * ctx, const char * name) {
  17221. // return -1 if tensor not found
  17222. int tensorfound = -1;
  17223. const int n_tensors = gguf_get_n_tensors(ctx);
  17224. for (int i = 0; i < n_tensors; ++i) {
  17225. if (strcmp(name, gguf_get_tensor_name(ctx, i)) == 0) {
  17226. tensorfound = i;
  17227. break;
  17228. }
  17229. }
  17230. return tensorfound;
  17231. }
  17232. size_t gguf_get_tensor_offset(const struct gguf_context * ctx, int i) {
  17233. return ctx->infos[i].offset;
  17234. }
  17235. char * gguf_get_tensor_name(const struct gguf_context * ctx, int i) {
  17236. return ctx->infos[i].name.data;
  17237. }
  17238. // returns the index
  17239. static int gguf_get_or_add_key(struct gguf_context * ctx, const char * key) {
  17240. const int idx = gguf_find_key(ctx, key);
  17241. if (idx >= 0) {
  17242. return idx;
  17243. }
  17244. const int n_kv = gguf_get_n_kv(ctx);
  17245. ctx->kv = realloc(ctx->kv, (n_kv + 1) * sizeof(struct gguf_kv));
  17246. ctx->kv[n_kv].key.n = strlen(key);
  17247. ctx->kv[n_kv].key.data = strdup(key);
  17248. ctx->header.n_kv++;
  17249. return n_kv;
  17250. }
  17251. void gguf_set_val_u8(struct gguf_context * ctx, const char * key, uint8_t val) {
  17252. const int idx = gguf_get_or_add_key(ctx, key);
  17253. ctx->kv[idx].type = GGUF_TYPE_UINT8;
  17254. ctx->kv[idx].value.uint8 = val;
  17255. }
  17256. void gguf_set_val_i8(struct gguf_context * ctx, const char * key, int8_t val) {
  17257. const int idx = gguf_get_or_add_key(ctx, key);
  17258. ctx->kv[idx].type = GGUF_TYPE_INT8;
  17259. ctx->kv[idx].value.int8 = val;
  17260. }
  17261. void gguf_set_val_u16(struct gguf_context * ctx, const char * key, uint16_t val) {
  17262. const int idx = gguf_get_or_add_key(ctx, key);
  17263. ctx->kv[idx].type = GGUF_TYPE_UINT16;
  17264. ctx->kv[idx].value.uint16 = val;
  17265. }
  17266. void gguf_set_val_i16(struct gguf_context * ctx, const char * key, int16_t val) {
  17267. const int idx = gguf_get_or_add_key(ctx, key);
  17268. ctx->kv[idx].type = GGUF_TYPE_INT16;
  17269. ctx->kv[idx].value.int16 = val;
  17270. }
  17271. void gguf_set_val_u32(struct gguf_context * ctx, const char * key, uint32_t val) {
  17272. const int idx = gguf_get_or_add_key(ctx, key);
  17273. ctx->kv[idx].type = GGUF_TYPE_UINT32;
  17274. ctx->kv[idx].value.uint32 = val;
  17275. }
  17276. void gguf_set_val_i32(struct gguf_context * ctx, const char * key, int32_t val) {
  17277. const int idx = gguf_get_or_add_key(ctx, key);
  17278. ctx->kv[idx].type = GGUF_TYPE_INT32;
  17279. ctx->kv[idx].value.int32 = val;
  17280. }
  17281. void gguf_set_val_f32(struct gguf_context * ctx, const char * key, float val) {
  17282. const int idx = gguf_get_or_add_key(ctx, key);
  17283. ctx->kv[idx].type = GGUF_TYPE_FLOAT32;
  17284. ctx->kv[idx].value.float32 = val;
  17285. }
  17286. void gguf_set_val_u64(struct gguf_context * ctx, const char * key, uint64_t val) {
  17287. const int idx = gguf_get_or_add_key(ctx, key);
  17288. ctx->kv[idx].type = GGUF_TYPE_UINT64;
  17289. ctx->kv[idx].value.uint64 = val;
  17290. }
  17291. void gguf_set_val_i64(struct gguf_context * ctx, const char * key, int64_t val) {
  17292. const int idx = gguf_get_or_add_key(ctx, key);
  17293. ctx->kv[idx].type = GGUF_TYPE_INT64;
  17294. ctx->kv[idx].value.int64 = val;
  17295. }
  17296. void gguf_set_val_f64(struct gguf_context * ctx, const char * key, double val) {
  17297. const int idx = gguf_get_or_add_key(ctx, key);
  17298. ctx->kv[idx].type = GGUF_TYPE_FLOAT64;
  17299. ctx->kv[idx].value.float64 = val;
  17300. }
  17301. void gguf_set_val_bool(struct gguf_context * ctx, const char * key, bool val) {
  17302. const int idx = gguf_get_or_add_key(ctx, key);
  17303. ctx->kv[idx].type = GGUF_TYPE_BOOL;
  17304. ctx->kv[idx].value.bool_ = val;
  17305. }
  17306. void gguf_set_val_str(struct gguf_context * ctx, const char * key, const char * val) {
  17307. const int idx = gguf_get_or_add_key(ctx, key);
  17308. ctx->kv[idx].type = GGUF_TYPE_STRING;
  17309. ctx->kv[idx].value.str.n = strlen(val);
  17310. ctx->kv[idx].value.str.data = strdup(val);
  17311. }
  17312. void gguf_set_arr_data(struct gguf_context * ctx, const char * key, enum gguf_type type, const void * data, int n) {
  17313. const int idx = gguf_get_or_add_key(ctx, key);
  17314. ctx->kv[idx].type = GGUF_TYPE_ARRAY;
  17315. ctx->kv[idx].value.arr.type = type;
  17316. ctx->kv[idx].value.arr.n = n;
  17317. ctx->kv[idx].value.arr.data = malloc(n*GGUF_TYPE_SIZE[type]);
  17318. memcpy(ctx->kv[idx].value.arr.data, data, n*GGUF_TYPE_SIZE[type]);
  17319. }
  17320. void gguf_set_arr_str(struct gguf_context * ctx, const char * key, const char ** data, int n) {
  17321. const int idx = gguf_get_or_add_key(ctx, key);
  17322. ctx->kv[idx].type = GGUF_TYPE_ARRAY;
  17323. ctx->kv[idx].value.arr.type = GGUF_TYPE_STRING;
  17324. ctx->kv[idx].value.arr.n = n;
  17325. ctx->kv[idx].value.arr.data = malloc(n*sizeof(struct gguf_str));
  17326. for (int i = 0; i < n; i++) {
  17327. struct gguf_str * str = &((struct gguf_str *)ctx->kv[idx].value.arr.data)[i];
  17328. str->n = strlen(data[i]);
  17329. str->data = strdup(data[i]);
  17330. }
  17331. }
  17332. // set or add KV pairs from another context
  17333. void gguf_set_kv(struct gguf_context * ctx, struct gguf_context * src) {
  17334. for (uint32_t i = 0; i < src->header.n_kv; i++) {
  17335. switch (src->kv[i].type) {
  17336. case GGUF_TYPE_UINT8: gguf_set_val_u8 (ctx, src->kv[i].key.data, src->kv[i].value.uint8); break;
  17337. case GGUF_TYPE_INT8: gguf_set_val_i8 (ctx, src->kv[i].key.data, src->kv[i].value.int8); break;
  17338. case GGUF_TYPE_UINT16: gguf_set_val_u16 (ctx, src->kv[i].key.data, src->kv[i].value.uint16); break;
  17339. case GGUF_TYPE_INT16: gguf_set_val_i16 (ctx, src->kv[i].key.data, src->kv[i].value.int16); break;
  17340. case GGUF_TYPE_UINT32: gguf_set_val_u32 (ctx, src->kv[i].key.data, src->kv[i].value.uint32); break;
  17341. case GGUF_TYPE_INT32: gguf_set_val_i32 (ctx, src->kv[i].key.data, src->kv[i].value.int32); break;
  17342. case GGUF_TYPE_FLOAT32: gguf_set_val_f32 (ctx, src->kv[i].key.data, src->kv[i].value.float32); break;
  17343. case GGUF_TYPE_UINT64: gguf_set_val_u64 (ctx, src->kv[i].key.data, src->kv[i].value.uint64); break;
  17344. case GGUF_TYPE_INT64: gguf_set_val_i64 (ctx, src->kv[i].key.data, src->kv[i].value.int64); break;
  17345. case GGUF_TYPE_FLOAT64: gguf_set_val_f64 (ctx, src->kv[i].key.data, src->kv[i].value.float64); break;
  17346. case GGUF_TYPE_BOOL: gguf_set_val_bool(ctx, src->kv[i].key.data, src->kv[i].value.bool_); break;
  17347. case GGUF_TYPE_STRING: gguf_set_val_str (ctx, src->kv[i].key.data, src->kv[i].value.str.data); break;
  17348. case GGUF_TYPE_ARRAY:
  17349. {
  17350. if (src->kv[i].value.arr.type == GGUF_TYPE_STRING) {
  17351. const char ** data = malloc(src->kv[i].value.arr.n*sizeof(char *));
  17352. for (uint32_t j = 0; j < src->kv[i].value.arr.n; j++) {
  17353. data[j] = ((struct gguf_str *)src->kv[i].value.arr.data)[j].data;
  17354. }
  17355. gguf_set_arr_str(ctx, src->kv[i].key.data, data, src->kv[i].value.arr.n);
  17356. free(data);
  17357. } else if (src->kv[i].value.arr.type == GGUF_TYPE_ARRAY) {
  17358. GGML_ASSERT(false && "nested arrays not supported");
  17359. } else {
  17360. gguf_set_arr_data(ctx, src->kv[i].key.data, src->kv[i].value.arr.type, src->kv[i].value.arr.data, src->kv[i].value.arr.n);
  17361. }
  17362. } break;
  17363. case GGUF_TYPE_COUNT: GGML_ASSERT(false && "invalid type"); break;
  17364. }
  17365. }
  17366. }
  17367. void gguf_add_tensor(
  17368. struct gguf_context * ctx,
  17369. const struct ggml_tensor * tensor) {
  17370. const int idx = ctx->header.n_tensors;
  17371. ctx->infos = realloc(ctx->infos, (idx + 1)*sizeof(struct gguf_tensor_info));
  17372. ctx->infos[idx].name.n = strlen(tensor->name);
  17373. ctx->infos[idx].name.data = strdup(tensor->name);
  17374. for (int i = 0; i < GGML_MAX_DIMS; ++i) {
  17375. ctx->infos[idx].ne[i] = 1;
  17376. }
  17377. ctx->infos[idx].n_dims = tensor->n_dims;
  17378. for (int i = 0; i < tensor->n_dims; i++) {
  17379. ctx->infos[idx].ne[i] = tensor->ne[i];
  17380. }
  17381. ctx->infos[idx].type = tensor->type;
  17382. ctx->infos[idx].offset = 0;
  17383. ctx->infos[idx].data = tensor->data;
  17384. ctx->infos[idx].size = ggml_nbytes(tensor);
  17385. if (ctx->header.n_tensors > 0) {
  17386. ctx->infos[idx].offset = ctx->infos[idx - 1].offset + GGML_PAD(ctx->infos[idx - 1].size, ctx->alignment);
  17387. }
  17388. ctx->header.n_tensors++;
  17389. }
  17390. void gguf_set_tensor_type(struct gguf_context * ctx, const char * name, enum ggml_type type) {
  17391. const int idx = gguf_find_tensor(ctx, name);
  17392. if (idx < 0) {
  17393. GGML_ASSERT(false && "tensor not found");
  17394. }
  17395. ctx->infos[idx].type = type;
  17396. }
  17397. void gguf_set_tensor_data(struct gguf_context * ctx, const char * name, const void * data, size_t size) {
  17398. const int idx = gguf_find_tensor(ctx, name);
  17399. if (idx < 0) {
  17400. GGML_ASSERT(false && "tensor not found");
  17401. }
  17402. ctx->infos[idx].data = data;
  17403. ctx->infos[idx].size = size;
  17404. // update offsets
  17405. for (uint32_t i = idx + 1; i < ctx->header.n_tensors; ++i) {
  17406. ctx->infos[i].offset = ctx->infos[i - 1].offset + GGML_PAD(ctx->infos[i - 1].size, ctx->alignment);
  17407. }
  17408. }
  17409. //static void gguf_fwrite_str(FILE * file, const struct gguf_str * val) {
  17410. // fwrite(&val->n, sizeof(val->n), 1, file);
  17411. // fwrite(val->data, sizeof(char), val->n, file);
  17412. //}
  17413. //
  17414. //static void gguf_fwrite_el(FILE * file, const void * val, size_t size) {
  17415. // fwrite(val, sizeof(char), size, file);
  17416. //}
  17417. struct gguf_buf {
  17418. void * data;
  17419. size_t size;
  17420. size_t offset;
  17421. };
  17422. static struct gguf_buf gguf_buf_init(size_t size) {
  17423. struct gguf_buf buf = {
  17424. /*buf.data =*/ size == 0 ? NULL : malloc(size),
  17425. /*buf.size =*/ size,
  17426. /*buf.offset =*/ 0,
  17427. };
  17428. return buf;
  17429. }
  17430. static void gguf_buf_free(struct gguf_buf buf) {
  17431. if (buf.data) {
  17432. free(buf.data);
  17433. }
  17434. }
  17435. static void gguf_buf_grow(struct gguf_buf * buf, size_t size) {
  17436. if (buf->offset + size > buf->size) {
  17437. buf->size = 1.5*(buf->offset + size);
  17438. if (buf->data) {
  17439. buf->data = realloc(buf->data, buf->size);
  17440. }
  17441. }
  17442. }
  17443. static void gguf_bwrite_str(struct gguf_buf * buf, const struct gguf_str * val) {
  17444. gguf_buf_grow(buf, sizeof(val->n) + val->n);
  17445. if (buf->data) {
  17446. memcpy((char *) buf->data + buf->offset, &val->n, sizeof(val->n));
  17447. }
  17448. buf->offset += sizeof(val->n);
  17449. if (buf->data) {
  17450. memcpy((char *) buf->data + buf->offset, val->data, val->n);
  17451. }
  17452. buf->offset += val->n;
  17453. }
  17454. static void gguf_bwrite_el(struct gguf_buf * buf, const void * val, size_t el_size) {
  17455. gguf_buf_grow(buf, el_size);
  17456. if (buf->data) {
  17457. memcpy((char *) buf->data + buf->offset, val, el_size);
  17458. }
  17459. buf->offset += el_size;
  17460. }
  17461. static void gguf_write_to_buf(const struct gguf_context * ctx, struct gguf_buf * buf, bool only_meta) {
  17462. // write header
  17463. gguf_bwrite_el(buf, &ctx->header.magic, sizeof(ctx->header.magic));
  17464. gguf_bwrite_el(buf, &ctx->header.version, sizeof(ctx->header.version));
  17465. gguf_bwrite_el(buf, &ctx->header.n_tensors, sizeof(ctx->header.n_tensors));
  17466. gguf_bwrite_el(buf, &ctx->header.n_kv, sizeof(ctx->header.n_kv));
  17467. // write key-value pairs
  17468. for (uint32_t i = 0; i < ctx->header.n_kv; ++i) {
  17469. struct gguf_kv * kv = &ctx->kv[i];
  17470. gguf_bwrite_str(buf, &kv->key);
  17471. gguf_bwrite_el (buf, &kv->type, sizeof(kv->type));
  17472. switch (kv->type) {
  17473. case GGUF_TYPE_UINT8: gguf_bwrite_el( buf, &kv->value.uint8, sizeof(kv->value.uint8) ); break;
  17474. case GGUF_TYPE_INT8: gguf_bwrite_el (buf, &kv->value.int8, sizeof(kv->value.int8) ); break;
  17475. case GGUF_TYPE_UINT16: gguf_bwrite_el (buf, &kv->value.uint16, sizeof(kv->value.uint16) ); break;
  17476. case GGUF_TYPE_INT16: gguf_bwrite_el (buf, &kv->value.int16, sizeof(kv->value.int16) ); break;
  17477. case GGUF_TYPE_UINT32: gguf_bwrite_el (buf, &kv->value.uint32, sizeof(kv->value.uint32) ); break;
  17478. case GGUF_TYPE_INT32: gguf_bwrite_el (buf, &kv->value.int32, sizeof(kv->value.int32) ); break;
  17479. case GGUF_TYPE_FLOAT32: gguf_bwrite_el (buf, &kv->value.float32, sizeof(kv->value.float32)); break;
  17480. case GGUF_TYPE_UINT64: gguf_bwrite_el (buf, &kv->value.uint64, sizeof(kv->value.uint64) ); break;
  17481. case GGUF_TYPE_INT64: gguf_bwrite_el (buf, &kv->value.int64, sizeof(kv->value.int64) ); break;
  17482. case GGUF_TYPE_FLOAT64: gguf_bwrite_el (buf, &kv->value.float64, sizeof(kv->value.float64)); break;
  17483. case GGUF_TYPE_BOOL: gguf_bwrite_el (buf, &kv->value.bool_, sizeof(kv->value.bool_) ); break;
  17484. case GGUF_TYPE_STRING: gguf_bwrite_str(buf, &kv->value.str ); break;
  17485. case GGUF_TYPE_ARRAY:
  17486. {
  17487. gguf_bwrite_el(buf, &kv->value.arr.type, sizeof(kv->value.arr.type));
  17488. gguf_bwrite_el(buf, &kv->value.arr.n, sizeof(kv->value.arr.n) );
  17489. switch (kv->value.arr.type) {
  17490. case GGUF_TYPE_UINT8:
  17491. case GGUF_TYPE_INT8:
  17492. case GGUF_TYPE_UINT16:
  17493. case GGUF_TYPE_INT16:
  17494. case GGUF_TYPE_UINT32:
  17495. case GGUF_TYPE_INT32:
  17496. case GGUF_TYPE_FLOAT32:
  17497. case GGUF_TYPE_UINT64:
  17498. case GGUF_TYPE_INT64:
  17499. case GGUF_TYPE_FLOAT64:
  17500. case GGUF_TYPE_BOOL:
  17501. {
  17502. gguf_bwrite_el(buf, kv->value.arr.data, kv->value.arr.n * GGUF_TYPE_SIZE[kv->value.arr.type]);
  17503. } break;
  17504. case GGUF_TYPE_STRING:
  17505. {
  17506. for (uint32_t j = 0; j < kv->value.arr.n; ++j) {
  17507. gguf_bwrite_str(buf, &((struct gguf_str *) kv->value.arr.data)[j]);
  17508. }
  17509. } break;
  17510. case GGUF_TYPE_ARRAY:
  17511. case GGUF_TYPE_COUNT: GGML_ASSERT(false && "invalid type"); break;
  17512. }
  17513. } break;
  17514. case GGUF_TYPE_COUNT: GGML_ASSERT(false && "invalid type");
  17515. }
  17516. }
  17517. // write tensor infos
  17518. for (uint32_t i = 0; i < ctx->header.n_tensors; ++i) {
  17519. struct gguf_tensor_info * info = &ctx->infos[i];
  17520. gguf_bwrite_str(buf, &info->name);
  17521. gguf_bwrite_el (buf, &info->n_dims, sizeof(info->n_dims));
  17522. for (uint32_t j = 0; j < info->n_dims; ++j) {
  17523. gguf_bwrite_el(buf, &info->ne[j], sizeof(info->ne[j]));
  17524. }
  17525. gguf_bwrite_el(buf, &info->type, sizeof(info->type));
  17526. gguf_bwrite_el(buf, &info->offset, sizeof(info->offset));
  17527. }
  17528. // we require the data section to be aligned, so take into account any padding
  17529. {
  17530. const size_t offset = buf->offset;
  17531. const size_t offset_pad = GGML_PAD(offset, ctx->alignment);
  17532. if (offset_pad != offset) {
  17533. uint8_t pad = 0;
  17534. for (size_t i = 0; i < offset_pad - offset; ++i) {
  17535. gguf_bwrite_el(buf, &pad, sizeof(pad));
  17536. }
  17537. }
  17538. }
  17539. if (only_meta) {
  17540. return;
  17541. }
  17542. size_t offset = 0;
  17543. // write tensor data
  17544. for (uint32_t i = 0; i < ctx->header.n_tensors; ++i) {
  17545. struct gguf_tensor_info * info = &ctx->infos[i];
  17546. const size_t size = info->size;
  17547. const size_t size_pad = GGML_PAD(size, ctx->alignment);
  17548. gguf_bwrite_el(buf, info->data, size);
  17549. if (size_pad != size) {
  17550. uint8_t pad = 0;
  17551. for (size_t j = 0; j < size_pad - size; ++j) {
  17552. gguf_bwrite_el(buf, &pad, sizeof(pad));
  17553. }
  17554. }
  17555. GGML_ASSERT(offset == info->offset);
  17556. offset += size_pad;
  17557. }
  17558. }
  17559. void gguf_write_to_file(const struct gguf_context * ctx, const char * fname, bool only_meta) {
  17560. FILE * file = fopen(fname, "wb");
  17561. if (!file) {
  17562. GGML_ASSERT(false && "failed to open file for writing");
  17563. }
  17564. struct gguf_buf buf = gguf_buf_init(16*1024);
  17565. gguf_write_to_buf(ctx, &buf, only_meta);
  17566. fwrite(buf.data, 1, buf.offset, file);
  17567. gguf_buf_free(buf);
  17568. fclose(file);
  17569. }
  17570. size_t gguf_get_meta_size(const struct gguf_context * ctx) {
  17571. // no allocs - only compute size
  17572. struct gguf_buf buf = gguf_buf_init(0);
  17573. gguf_write_to_buf(ctx, &buf, true);
  17574. return buf.offset;
  17575. }
  17576. void gguf_get_meta_data(const struct gguf_context * ctx, void * data) {
  17577. struct gguf_buf buf = gguf_buf_init(16*1024);
  17578. gguf_write_to_buf(ctx, &buf, true);
  17579. memcpy(data, buf.data, buf.offset);
  17580. gguf_buf_free(buf);
  17581. }
  17582. ////////////////////////////////////////////////////////////////////////////////
  17583. int ggml_cpu_has_avx(void) {
  17584. #if defined(__AVX__)
  17585. return 1;
  17586. #else
  17587. return 0;
  17588. #endif
  17589. }
  17590. int ggml_cpu_has_avx2(void) {
  17591. #if defined(__AVX2__)
  17592. return 1;
  17593. #else
  17594. return 0;
  17595. #endif
  17596. }
  17597. int ggml_cpu_has_avx512(void) {
  17598. #if defined(__AVX512F__)
  17599. return 1;
  17600. #else
  17601. return 0;
  17602. #endif
  17603. }
  17604. int ggml_cpu_has_avx512_vbmi(void) {
  17605. #if defined(__AVX512VBMI__)
  17606. return 1;
  17607. #else
  17608. return 0;
  17609. #endif
  17610. }
  17611. int ggml_cpu_has_avx512_vnni(void) {
  17612. #if defined(__AVX512VNNI__)
  17613. return 1;
  17614. #else
  17615. return 0;
  17616. #endif
  17617. }
  17618. int ggml_cpu_has_fma(void) {
  17619. #if defined(__FMA__)
  17620. return 1;
  17621. #else
  17622. return 0;
  17623. #endif
  17624. }
  17625. int ggml_cpu_has_neon(void) {
  17626. #if defined(__ARM_NEON)
  17627. return 1;
  17628. #else
  17629. return 0;
  17630. #endif
  17631. }
  17632. int ggml_cpu_has_arm_fma(void) {
  17633. #if defined(__ARM_FEATURE_FMA)
  17634. return 1;
  17635. #else
  17636. return 0;
  17637. #endif
  17638. }
  17639. int ggml_cpu_has_metal(void) {
  17640. #if defined(GGML_USE_METAL)
  17641. return 1;
  17642. #else
  17643. return 0;
  17644. #endif
  17645. }
  17646. int ggml_cpu_has_f16c(void) {
  17647. #if defined(__F16C__)
  17648. return 1;
  17649. #else
  17650. return 0;
  17651. #endif
  17652. }
  17653. int ggml_cpu_has_fp16_va(void) {
  17654. #if defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC)
  17655. return 1;
  17656. #else
  17657. return 0;
  17658. #endif
  17659. }
  17660. int ggml_cpu_has_wasm_simd(void) {
  17661. #if defined(__wasm_simd128__)
  17662. return 1;
  17663. #else
  17664. return 0;
  17665. #endif
  17666. }
  17667. int ggml_cpu_has_blas(void) {
  17668. #if defined(GGML_USE_ACCELERATE) || defined(GGML_USE_OPENBLAS) || defined(GGML_USE_CUBLAS) || defined(GGML_USE_CLBLAST)
  17669. return 1;
  17670. #else
  17671. return 0;
  17672. #endif
  17673. }
  17674. int ggml_cpu_has_cublas(void) {
  17675. #if defined(GGML_USE_CUBLAS)
  17676. return 1;
  17677. #else
  17678. return 0;
  17679. #endif
  17680. }
  17681. int ggml_cpu_has_clblast(void) {
  17682. #if defined(GGML_USE_CLBLAST)
  17683. return 1;
  17684. #else
  17685. return 0;
  17686. #endif
  17687. }
  17688. int ggml_cpu_has_gpublas(void) {
  17689. return ggml_cpu_has_cublas() || ggml_cpu_has_clblast();
  17690. }
  17691. int ggml_cpu_has_sse3(void) {
  17692. #if defined(__SSE3__)
  17693. return 1;
  17694. #else
  17695. return 0;
  17696. #endif
  17697. }
  17698. int ggml_cpu_has_ssse3(void) {
  17699. #if defined(__SSSE3__)
  17700. return 1;
  17701. #else
  17702. return 0;
  17703. #endif
  17704. }
  17705. int ggml_cpu_has_vsx(void) {
  17706. #if defined(__POWER9_VECTOR__)
  17707. return 1;
  17708. #else
  17709. return 0;
  17710. #endif
  17711. }
  17712. ////////////////////////////////////////////////////////////////////////////////