ggml.c 669 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316431743184319432043214322432343244325432643274328432943304331433243334334433543364337433843394340434143424343434443454346434743484349435043514352435343544355435643574358435943604361436243634364436543664367436843694370437143724373437443754376437743784379438043814382438343844385438643874388438943904391439243934394439543964397439843994400440144024403440444054406440744084409441044114412441344144415441644174418441944204421442244234424442544264427442844294430443144324433443444354436443744384439444044414442444344444445444644474448444944504451445244534454445544564457445844594460446144624463446444654466446744684469447044714472447344744475447644774478447944804481448244834484448544864487448844894490449144924493449444954496449744984499450045014502450345044505450645074508450945104511451245134514451545164517451845194520452145224523452445254526452745284529453045314532453345344535453645374538453945404541454245434544454545464547454845494550455145524553455445554556455745584559456045614562456345644565456645674568456945704571457245734574457545764577457845794580458145824583458445854586458745884589459045914592459345944595459645974598459946004601460246034604460546064607460846094610461146124613461446154616461746184619462046214622462346244625462646274628462946304631463246334634463546364637463846394640464146424643464446454646464746484649465046514652465346544655465646574658465946604661466246634664466546664667466846694670467146724673467446754676467746784679468046814682468346844685468646874688468946904691469246934694469546964697469846994700470147024703470447054706470747084709471047114712471347144715471647174718471947204721472247234724472547264727472847294730473147324733473447354736473747384739474047414742474347444745474647474748474947504751475247534754475547564757475847594760476147624763476447654766476747684769477047714772477347744775477647774778477947804781478247834784478547864787478847894790479147924793479447954796479747984799480048014802480348044805480648074808480948104811481248134814481548164817481848194820482148224823482448254826482748284829483048314832483348344835483648374838483948404841484248434844484548464847484848494850485148524853485448554856485748584859486048614862486348644865486648674868486948704871487248734874487548764877487848794880488148824883488448854886488748884889489048914892489348944895489648974898489949004901490249034904490549064907490849094910491149124913491449154916491749184919492049214922492349244925492649274928492949304931493249334934493549364937493849394940494149424943494449454946494749484949495049514952495349544955495649574958495949604961496249634964496549664967496849694970497149724973497449754976497749784979498049814982498349844985498649874988498949904991499249934994499549964997499849995000500150025003500450055006500750085009501050115012501350145015501650175018501950205021502250235024502550265027502850295030503150325033503450355036503750385039504050415042504350445045504650475048504950505051505250535054505550565057505850595060506150625063506450655066506750685069507050715072507350745075507650775078507950805081508250835084508550865087508850895090509150925093509450955096509750985099510051015102510351045105510651075108510951105111511251135114511551165117511851195120512151225123512451255126512751285129513051315132513351345135513651375138513951405141514251435144514551465147514851495150515151525153515451555156515751585159516051615162516351645165516651675168516951705171517251735174517551765177517851795180518151825183518451855186518751885189519051915192519351945195519651975198519952005201520252035204520552065207520852095210521152125213521452155216521752185219522052215222522352245225522652275228522952305231523252335234523552365237523852395240524152425243524452455246524752485249525052515252525352545255525652575258525952605261526252635264526552665267526852695270527152725273527452755276527752785279528052815282528352845285528652875288528952905291529252935294529552965297529852995300530153025303530453055306530753085309531053115312531353145315531653175318531953205321532253235324532553265327532853295330533153325333533453355336533753385339534053415342534353445345534653475348534953505351535253535354535553565357535853595360536153625363536453655366536753685369537053715372537353745375537653775378537953805381538253835384538553865387538853895390539153925393539453955396539753985399540054015402540354045405540654075408540954105411541254135414541554165417541854195420542154225423542454255426542754285429543054315432543354345435543654375438543954405441544254435444544554465447544854495450545154525453545454555456545754585459546054615462546354645465546654675468546954705471547254735474547554765477547854795480548154825483548454855486548754885489549054915492549354945495549654975498549955005501550255035504550555065507550855095510551155125513551455155516551755185519552055215522552355245525552655275528552955305531553255335534553555365537553855395540554155425543554455455546554755485549555055515552555355545555555655575558555955605561556255635564556555665567556855695570557155725573557455755576557755785579558055815582558355845585558655875588558955905591559255935594559555965597559855995600560156025603560456055606560756085609561056115612561356145615561656175618561956205621562256235624562556265627562856295630563156325633563456355636563756385639564056415642564356445645564656475648564956505651565256535654565556565657565856595660566156625663566456655666566756685669567056715672567356745675567656775678567956805681568256835684568556865687568856895690569156925693569456955696569756985699570057015702570357045705570657075708570957105711571257135714571557165717571857195720572157225723572457255726572757285729573057315732573357345735573657375738573957405741574257435744574557465747574857495750575157525753575457555756575757585759576057615762576357645765576657675768576957705771577257735774577557765777577857795780578157825783578457855786578757885789579057915792579357945795579657975798579958005801580258035804580558065807580858095810581158125813581458155816581758185819582058215822582358245825582658275828582958305831583258335834583558365837583858395840584158425843584458455846584758485849585058515852585358545855585658575858585958605861586258635864586558665867586858695870587158725873587458755876587758785879588058815882588358845885588658875888588958905891589258935894589558965897589858995900590159025903590459055906590759085909591059115912591359145915591659175918591959205921592259235924592559265927592859295930593159325933593459355936593759385939594059415942594359445945594659475948594959505951595259535954595559565957595859595960596159625963596459655966596759685969597059715972597359745975597659775978597959805981598259835984598559865987598859895990599159925993599459955996599759985999600060016002600360046005600660076008600960106011601260136014601560166017601860196020602160226023602460256026602760286029603060316032603360346035603660376038603960406041604260436044604560466047604860496050605160526053605460556056605760586059606060616062606360646065606660676068606960706071607260736074607560766077607860796080608160826083608460856086608760886089609060916092609360946095609660976098609961006101610261036104610561066107610861096110611161126113611461156116611761186119612061216122612361246125612661276128612961306131613261336134613561366137613861396140614161426143614461456146614761486149615061516152615361546155615661576158615961606161616261636164616561666167616861696170617161726173617461756176617761786179618061816182618361846185618661876188618961906191619261936194619561966197619861996200620162026203620462056206620762086209621062116212621362146215621662176218621962206221622262236224622562266227622862296230623162326233623462356236623762386239624062416242624362446245624662476248624962506251625262536254625562566257625862596260626162626263626462656266626762686269627062716272627362746275627662776278627962806281628262836284628562866287628862896290629162926293629462956296629762986299630063016302630363046305630663076308630963106311631263136314631563166317631863196320632163226323632463256326632763286329633063316332633363346335633663376338633963406341634263436344634563466347634863496350635163526353635463556356635763586359636063616362636363646365636663676368636963706371637263736374637563766377637863796380638163826383638463856386638763886389639063916392639363946395639663976398639964006401640264036404640564066407640864096410641164126413641464156416641764186419642064216422642364246425642664276428642964306431643264336434643564366437643864396440644164426443644464456446644764486449645064516452645364546455645664576458645964606461646264636464646564666467646864696470647164726473647464756476647764786479648064816482648364846485648664876488648964906491649264936494649564966497649864996500650165026503650465056506650765086509651065116512651365146515651665176518651965206521652265236524652565266527652865296530653165326533653465356536653765386539654065416542654365446545654665476548654965506551655265536554655565566557655865596560656165626563656465656566656765686569657065716572657365746575657665776578657965806581658265836584658565866587658865896590659165926593659465956596659765986599660066016602660366046605660666076608660966106611661266136614661566166617661866196620662166226623662466256626662766286629663066316632663366346635663666376638663966406641664266436644664566466647664866496650665166526653665466556656665766586659666066616662666366646665666666676668666966706671667266736674667566766677667866796680668166826683668466856686668766886689669066916692669366946695669666976698669967006701670267036704670567066707670867096710671167126713671467156716671767186719672067216722672367246725672667276728672967306731673267336734673567366737673867396740674167426743674467456746674767486749675067516752675367546755675667576758675967606761676267636764676567666767676867696770677167726773677467756776677767786779678067816782678367846785678667876788678967906791679267936794679567966797679867996800680168026803680468056806680768086809681068116812681368146815681668176818681968206821682268236824682568266827682868296830683168326833683468356836683768386839684068416842684368446845684668476848684968506851685268536854685568566857685868596860686168626863686468656866686768686869687068716872687368746875687668776878687968806881688268836884688568866887688868896890689168926893689468956896689768986899690069016902690369046905690669076908690969106911691269136914691569166917691869196920692169226923692469256926692769286929693069316932693369346935693669376938693969406941694269436944694569466947694869496950695169526953695469556956695769586959696069616962696369646965696669676968696969706971697269736974697569766977697869796980698169826983698469856986698769886989699069916992699369946995699669976998699970007001700270037004700570067007700870097010701170127013701470157016701770187019702070217022702370247025702670277028702970307031703270337034703570367037703870397040704170427043704470457046704770487049705070517052705370547055705670577058705970607061706270637064706570667067706870697070707170727073707470757076707770787079708070817082708370847085708670877088708970907091709270937094709570967097709870997100710171027103710471057106710771087109711071117112711371147115711671177118711971207121712271237124712571267127712871297130713171327133713471357136713771387139714071417142714371447145714671477148714971507151715271537154715571567157715871597160716171627163716471657166716771687169717071717172717371747175717671777178717971807181718271837184718571867187718871897190719171927193719471957196719771987199720072017202720372047205720672077208720972107211721272137214721572167217721872197220722172227223722472257226722772287229723072317232723372347235723672377238723972407241724272437244724572467247724872497250725172527253725472557256725772587259726072617262726372647265726672677268726972707271727272737274727572767277727872797280728172827283728472857286728772887289729072917292729372947295729672977298729973007301730273037304730573067307730873097310731173127313731473157316731773187319732073217322732373247325732673277328732973307331733273337334733573367337733873397340734173427343734473457346734773487349735073517352735373547355735673577358735973607361736273637364736573667367736873697370737173727373737473757376737773787379738073817382738373847385738673877388738973907391739273937394739573967397739873997400740174027403740474057406740774087409741074117412741374147415741674177418741974207421742274237424742574267427742874297430743174327433743474357436743774387439744074417442744374447445744674477448744974507451745274537454745574567457745874597460746174627463746474657466746774687469747074717472747374747475747674777478747974807481748274837484748574867487748874897490749174927493749474957496749774987499750075017502750375047505750675077508750975107511751275137514751575167517751875197520752175227523752475257526752775287529753075317532753375347535753675377538753975407541754275437544754575467547754875497550755175527553755475557556755775587559756075617562756375647565756675677568756975707571757275737574757575767577757875797580758175827583758475857586758775887589759075917592759375947595759675977598759976007601760276037604760576067607760876097610761176127613761476157616761776187619762076217622762376247625762676277628762976307631763276337634763576367637763876397640764176427643764476457646764776487649765076517652765376547655765676577658765976607661766276637664766576667667766876697670767176727673767476757676767776787679768076817682768376847685768676877688768976907691769276937694769576967697769876997700770177027703770477057706770777087709771077117712771377147715771677177718771977207721772277237724772577267727772877297730773177327733773477357736773777387739774077417742774377447745774677477748774977507751775277537754775577567757775877597760776177627763776477657766776777687769777077717772777377747775777677777778777977807781778277837784778577867787778877897790779177927793779477957796779777987799780078017802780378047805780678077808780978107811781278137814781578167817781878197820782178227823782478257826782778287829783078317832783378347835783678377838783978407841784278437844784578467847784878497850785178527853785478557856785778587859786078617862786378647865786678677868786978707871787278737874787578767877787878797880788178827883788478857886788778887889789078917892789378947895789678977898789979007901790279037904790579067907790879097910791179127913791479157916791779187919792079217922792379247925792679277928792979307931793279337934793579367937793879397940794179427943794479457946794779487949795079517952795379547955795679577958795979607961796279637964796579667967796879697970797179727973797479757976797779787979798079817982798379847985798679877988798979907991799279937994799579967997799879998000800180028003800480058006800780088009801080118012801380148015801680178018801980208021802280238024802580268027802880298030803180328033803480358036803780388039804080418042804380448045804680478048804980508051805280538054805580568057805880598060806180628063806480658066806780688069807080718072807380748075807680778078807980808081808280838084808580868087808880898090809180928093809480958096809780988099810081018102810381048105810681078108810981108111811281138114811581168117811881198120812181228123812481258126812781288129813081318132813381348135813681378138813981408141814281438144814581468147814881498150815181528153815481558156815781588159816081618162816381648165816681678168816981708171817281738174817581768177817881798180818181828183818481858186818781888189819081918192819381948195819681978198819982008201820282038204820582068207820882098210821182128213821482158216821782188219822082218222822382248225822682278228822982308231823282338234823582368237823882398240824182428243824482458246824782488249825082518252825382548255825682578258825982608261826282638264826582668267826882698270827182728273827482758276827782788279828082818282828382848285828682878288828982908291829282938294829582968297829882998300830183028303830483058306830783088309831083118312831383148315831683178318831983208321832283238324832583268327832883298330833183328333833483358336833783388339834083418342834383448345834683478348834983508351835283538354835583568357835883598360836183628363836483658366836783688369837083718372837383748375837683778378837983808381838283838384838583868387838883898390839183928393839483958396839783988399840084018402840384048405840684078408840984108411841284138414841584168417841884198420842184228423842484258426842784288429843084318432843384348435843684378438843984408441844284438444844584468447844884498450845184528453845484558456845784588459846084618462846384648465846684678468846984708471847284738474847584768477847884798480848184828483848484858486848784888489849084918492849384948495849684978498849985008501850285038504850585068507850885098510851185128513851485158516851785188519852085218522852385248525852685278528852985308531853285338534853585368537853885398540854185428543854485458546854785488549855085518552855385548555855685578558855985608561856285638564856585668567856885698570857185728573857485758576857785788579858085818582858385848585858685878588858985908591859285938594859585968597859885998600860186028603860486058606860786088609861086118612861386148615861686178618861986208621862286238624862586268627862886298630863186328633863486358636863786388639864086418642864386448645864686478648864986508651865286538654865586568657865886598660866186628663866486658666866786688669867086718672867386748675867686778678867986808681868286838684868586868687868886898690869186928693869486958696869786988699870087018702870387048705870687078708870987108711871287138714871587168717871887198720872187228723872487258726872787288729873087318732873387348735873687378738873987408741874287438744874587468747874887498750875187528753875487558756875787588759876087618762876387648765876687678768876987708771877287738774877587768777877887798780878187828783878487858786878787888789879087918792879387948795879687978798879988008801880288038804880588068807880888098810881188128813881488158816881788188819882088218822882388248825882688278828882988308831883288338834883588368837883888398840884188428843884488458846884788488849885088518852885388548855885688578858885988608861886288638864886588668867886888698870887188728873887488758876887788788879888088818882888388848885888688878888888988908891889288938894889588968897889888998900890189028903890489058906890789088909891089118912891389148915891689178918891989208921892289238924892589268927892889298930893189328933893489358936893789388939894089418942894389448945894689478948894989508951895289538954895589568957895889598960896189628963896489658966896789688969897089718972897389748975897689778978897989808981898289838984898589868987898889898990899189928993899489958996899789988999900090019002900390049005900690079008900990109011901290139014901590169017901890199020902190229023902490259026902790289029903090319032903390349035903690379038903990409041904290439044904590469047904890499050905190529053905490559056905790589059906090619062906390649065906690679068906990709071907290739074907590769077907890799080908190829083908490859086908790889089909090919092909390949095909690979098909991009101910291039104910591069107910891099110911191129113911491159116911791189119912091219122912391249125912691279128912991309131913291339134913591369137913891399140914191429143914491459146914791489149915091519152915391549155915691579158915991609161916291639164916591669167916891699170917191729173917491759176917791789179918091819182918391849185918691879188918991909191919291939194919591969197919891999200920192029203920492059206920792089209921092119212921392149215921692179218921992209221922292239224922592269227922892299230923192329233923492359236923792389239924092419242924392449245924692479248924992509251925292539254925592569257925892599260926192629263926492659266926792689269927092719272927392749275927692779278927992809281928292839284928592869287928892899290929192929293929492959296929792989299930093019302930393049305930693079308930993109311931293139314931593169317931893199320932193229323932493259326932793289329933093319332933393349335933693379338933993409341934293439344934593469347934893499350935193529353935493559356935793589359936093619362936393649365936693679368936993709371937293739374937593769377937893799380938193829383938493859386938793889389939093919392939393949395939693979398939994009401940294039404940594069407940894099410941194129413941494159416941794189419942094219422942394249425942694279428942994309431943294339434943594369437943894399440944194429443944494459446944794489449945094519452945394549455945694579458945994609461946294639464946594669467946894699470947194729473947494759476947794789479948094819482948394849485948694879488948994909491949294939494949594969497949894999500950195029503950495059506950795089509951095119512951395149515951695179518951995209521952295239524952595269527952895299530953195329533953495359536953795389539954095419542954395449545954695479548954995509551955295539554955595569557955895599560956195629563956495659566956795689569957095719572957395749575957695779578957995809581958295839584958595869587958895899590959195929593959495959596959795989599960096019602960396049605960696079608960996109611961296139614961596169617961896199620962196229623962496259626962796289629963096319632963396349635963696379638963996409641964296439644964596469647964896499650965196529653965496559656965796589659966096619662966396649665966696679668966996709671967296739674967596769677967896799680968196829683968496859686968796889689969096919692969396949695969696979698969997009701970297039704970597069707970897099710971197129713971497159716971797189719972097219722972397249725972697279728972997309731973297339734973597369737973897399740974197429743974497459746974797489749975097519752975397549755975697579758975997609761976297639764976597669767976897699770977197729773977497759776977797789779978097819782978397849785978697879788978997909791979297939794979597969797979897999800980198029803980498059806980798089809981098119812981398149815981698179818981998209821982298239824982598269827982898299830983198329833983498359836983798389839984098419842984398449845984698479848984998509851985298539854985598569857985898599860986198629863986498659866986798689869987098719872987398749875987698779878987998809881988298839884988598869887988898899890989198929893989498959896989798989899990099019902990399049905990699079908990999109911991299139914991599169917991899199920992199229923992499259926992799289929993099319932993399349935993699379938993999409941994299439944994599469947994899499950995199529953995499559956995799589959996099619962996399649965996699679968996999709971997299739974997599769977997899799980998199829983998499859986998799889989999099919992999399949995999699979998999910000100011000210003100041000510006100071000810009100101001110012100131001410015100161001710018100191002010021100221002310024100251002610027100281002910030100311003210033100341003510036100371003810039100401004110042100431004410045100461004710048100491005010051100521005310054100551005610057100581005910060100611006210063100641006510066100671006810069100701007110072100731007410075100761007710078100791008010081100821008310084100851008610087100881008910090100911009210093100941009510096100971009810099101001010110102101031010410105101061010710108101091011010111101121011310114101151011610117101181011910120101211012210123101241012510126101271012810129101301013110132101331013410135101361013710138101391014010141101421014310144101451014610147101481014910150101511015210153101541015510156101571015810159101601016110162101631016410165101661016710168101691017010171101721017310174101751017610177101781017910180101811018210183101841018510186101871018810189101901019110192101931019410195101961019710198101991020010201102021020310204102051020610207102081020910210102111021210213102141021510216102171021810219102201022110222102231022410225102261022710228102291023010231102321023310234102351023610237102381023910240102411024210243102441024510246102471024810249102501025110252102531025410255102561025710258102591026010261102621026310264102651026610267102681026910270102711027210273102741027510276102771027810279102801028110282102831028410285102861028710288102891029010291102921029310294102951029610297102981029910300103011030210303103041030510306103071030810309103101031110312103131031410315103161031710318103191032010321103221032310324103251032610327103281032910330103311033210333103341033510336103371033810339103401034110342103431034410345103461034710348103491035010351103521035310354103551035610357103581035910360103611036210363103641036510366103671036810369103701037110372103731037410375103761037710378103791038010381103821038310384103851038610387103881038910390103911039210393103941039510396103971039810399104001040110402104031040410405104061040710408104091041010411104121041310414104151041610417104181041910420104211042210423104241042510426104271042810429104301043110432104331043410435104361043710438104391044010441104421044310444104451044610447104481044910450104511045210453104541045510456104571045810459104601046110462104631046410465104661046710468104691047010471104721047310474104751047610477104781047910480104811048210483104841048510486104871048810489104901049110492104931049410495104961049710498104991050010501105021050310504105051050610507105081050910510105111051210513105141051510516105171051810519105201052110522105231052410525105261052710528105291053010531105321053310534105351053610537105381053910540105411054210543105441054510546105471054810549105501055110552105531055410555105561055710558105591056010561105621056310564105651056610567105681056910570105711057210573105741057510576105771057810579105801058110582105831058410585105861058710588105891059010591105921059310594105951059610597105981059910600106011060210603106041060510606106071060810609106101061110612106131061410615106161061710618106191062010621106221062310624106251062610627106281062910630106311063210633106341063510636106371063810639106401064110642106431064410645106461064710648106491065010651106521065310654106551065610657106581065910660106611066210663106641066510666106671066810669106701067110672106731067410675106761067710678106791068010681106821068310684106851068610687106881068910690106911069210693106941069510696106971069810699107001070110702107031070410705107061070710708107091071010711107121071310714107151071610717107181071910720107211072210723107241072510726107271072810729107301073110732107331073410735107361073710738107391074010741107421074310744107451074610747107481074910750107511075210753107541075510756107571075810759107601076110762107631076410765107661076710768107691077010771107721077310774107751077610777107781077910780107811078210783107841078510786107871078810789107901079110792107931079410795107961079710798107991080010801108021080310804108051080610807108081080910810108111081210813108141081510816108171081810819108201082110822108231082410825108261082710828108291083010831108321083310834108351083610837108381083910840108411084210843108441084510846108471084810849108501085110852108531085410855108561085710858108591086010861108621086310864108651086610867108681086910870108711087210873108741087510876108771087810879108801088110882108831088410885108861088710888108891089010891108921089310894108951089610897108981089910900109011090210903109041090510906109071090810909109101091110912109131091410915109161091710918109191092010921109221092310924109251092610927109281092910930109311093210933109341093510936109371093810939109401094110942109431094410945109461094710948109491095010951109521095310954109551095610957109581095910960109611096210963109641096510966109671096810969109701097110972109731097410975109761097710978109791098010981109821098310984109851098610987109881098910990109911099210993109941099510996109971099810999110001100111002110031100411005110061100711008110091101011011110121101311014110151101611017110181101911020110211102211023110241102511026110271102811029110301103111032110331103411035110361103711038110391104011041110421104311044110451104611047110481104911050110511105211053110541105511056110571105811059110601106111062110631106411065110661106711068110691107011071110721107311074110751107611077110781107911080110811108211083110841108511086110871108811089110901109111092110931109411095110961109711098110991110011101111021110311104111051110611107111081110911110111111111211113111141111511116111171111811119111201112111122111231112411125111261112711128111291113011131111321113311134111351113611137111381113911140111411114211143111441114511146111471114811149111501115111152111531115411155111561115711158111591116011161111621116311164111651116611167111681116911170111711117211173111741117511176111771117811179111801118111182111831118411185111861118711188111891119011191111921119311194111951119611197111981119911200112011120211203112041120511206112071120811209112101121111212112131121411215112161121711218112191122011221112221122311224112251122611227112281122911230112311123211233112341123511236112371123811239112401124111242112431124411245112461124711248112491125011251112521125311254112551125611257112581125911260112611126211263112641126511266112671126811269112701127111272112731127411275112761127711278112791128011281112821128311284112851128611287112881128911290112911129211293112941129511296112971129811299113001130111302113031130411305113061130711308113091131011311113121131311314113151131611317113181131911320113211132211323113241132511326113271132811329113301133111332113331133411335113361133711338113391134011341113421134311344113451134611347113481134911350113511135211353113541135511356113571135811359113601136111362113631136411365113661136711368113691137011371113721137311374113751137611377113781137911380113811138211383113841138511386113871138811389113901139111392113931139411395113961139711398113991140011401114021140311404114051140611407114081140911410114111141211413114141141511416114171141811419114201142111422114231142411425114261142711428114291143011431114321143311434114351143611437114381143911440114411144211443114441144511446114471144811449114501145111452114531145411455114561145711458114591146011461114621146311464114651146611467114681146911470114711147211473114741147511476114771147811479114801148111482114831148411485114861148711488114891149011491114921149311494114951149611497114981149911500115011150211503115041150511506115071150811509115101151111512115131151411515115161151711518115191152011521115221152311524115251152611527115281152911530115311153211533115341153511536115371153811539115401154111542115431154411545115461154711548115491155011551115521155311554115551155611557115581155911560115611156211563115641156511566115671156811569115701157111572115731157411575115761157711578115791158011581115821158311584115851158611587115881158911590115911159211593115941159511596115971159811599116001160111602116031160411605116061160711608116091161011611116121161311614116151161611617116181161911620116211162211623116241162511626116271162811629116301163111632116331163411635116361163711638116391164011641116421164311644116451164611647116481164911650116511165211653116541165511656116571165811659116601166111662116631166411665116661166711668116691167011671116721167311674116751167611677116781167911680116811168211683116841168511686116871168811689116901169111692116931169411695116961169711698116991170011701117021170311704117051170611707117081170911710117111171211713117141171511716117171171811719117201172111722117231172411725117261172711728117291173011731117321173311734117351173611737117381173911740117411174211743117441174511746117471174811749117501175111752117531175411755117561175711758117591176011761117621176311764117651176611767117681176911770117711177211773117741177511776117771177811779117801178111782117831178411785117861178711788117891179011791117921179311794117951179611797117981179911800118011180211803118041180511806118071180811809118101181111812118131181411815118161181711818118191182011821118221182311824118251182611827118281182911830118311183211833118341183511836118371183811839118401184111842118431184411845118461184711848118491185011851118521185311854118551185611857118581185911860118611186211863118641186511866118671186811869118701187111872118731187411875118761187711878118791188011881118821188311884118851188611887118881188911890118911189211893118941189511896118971189811899119001190111902119031190411905119061190711908119091191011911119121191311914119151191611917119181191911920119211192211923119241192511926119271192811929119301193111932119331193411935119361193711938119391194011941119421194311944119451194611947119481194911950119511195211953119541195511956119571195811959119601196111962119631196411965119661196711968119691197011971119721197311974119751197611977119781197911980119811198211983119841198511986119871198811989119901199111992119931199411995119961199711998119991200012001120021200312004120051200612007120081200912010120111201212013120141201512016120171201812019120201202112022120231202412025120261202712028120291203012031120321203312034120351203612037120381203912040120411204212043120441204512046120471204812049120501205112052120531205412055120561205712058120591206012061120621206312064120651206612067120681206912070120711207212073120741207512076120771207812079120801208112082120831208412085120861208712088120891209012091120921209312094120951209612097120981209912100121011210212103121041210512106121071210812109121101211112112121131211412115121161211712118121191212012121121221212312124121251212612127121281212912130121311213212133121341213512136121371213812139121401214112142121431214412145121461214712148121491215012151121521215312154121551215612157121581215912160121611216212163121641216512166121671216812169121701217112172121731217412175121761217712178121791218012181121821218312184121851218612187121881218912190121911219212193121941219512196121971219812199122001220112202122031220412205122061220712208122091221012211122121221312214122151221612217122181221912220122211222212223122241222512226122271222812229122301223112232122331223412235122361223712238122391224012241122421224312244122451224612247122481224912250122511225212253122541225512256122571225812259122601226112262122631226412265122661226712268122691227012271122721227312274122751227612277122781227912280122811228212283122841228512286122871228812289122901229112292122931229412295122961229712298122991230012301123021230312304123051230612307123081230912310123111231212313123141231512316123171231812319123201232112322123231232412325123261232712328123291233012331123321233312334123351233612337123381233912340123411234212343123441234512346123471234812349123501235112352123531235412355123561235712358123591236012361123621236312364123651236612367123681236912370123711237212373123741237512376123771237812379123801238112382123831238412385123861238712388123891239012391123921239312394123951239612397123981239912400124011240212403124041240512406124071240812409124101241112412124131241412415124161241712418124191242012421124221242312424124251242612427124281242912430124311243212433124341243512436124371243812439124401244112442124431244412445124461244712448124491245012451124521245312454124551245612457124581245912460124611246212463124641246512466124671246812469124701247112472124731247412475124761247712478124791248012481124821248312484124851248612487124881248912490124911249212493124941249512496124971249812499125001250112502125031250412505125061250712508125091251012511125121251312514125151251612517125181251912520125211252212523125241252512526125271252812529125301253112532125331253412535125361253712538125391254012541125421254312544125451254612547125481254912550125511255212553125541255512556125571255812559125601256112562125631256412565125661256712568125691257012571125721257312574125751257612577125781257912580125811258212583125841258512586125871258812589125901259112592125931259412595125961259712598125991260012601126021260312604126051260612607126081260912610126111261212613126141261512616126171261812619126201262112622126231262412625126261262712628126291263012631126321263312634126351263612637126381263912640126411264212643126441264512646126471264812649126501265112652126531265412655126561265712658126591266012661126621266312664126651266612667126681266912670126711267212673126741267512676126771267812679126801268112682126831268412685126861268712688126891269012691126921269312694126951269612697126981269912700127011270212703127041270512706127071270812709127101271112712127131271412715127161271712718127191272012721127221272312724127251272612727127281272912730127311273212733127341273512736127371273812739127401274112742127431274412745127461274712748127491275012751127521275312754127551275612757127581275912760127611276212763127641276512766127671276812769127701277112772127731277412775127761277712778127791278012781127821278312784127851278612787127881278912790127911279212793127941279512796127971279812799128001280112802128031280412805128061280712808128091281012811128121281312814128151281612817128181281912820128211282212823128241282512826128271282812829128301283112832128331283412835128361283712838128391284012841128421284312844128451284612847128481284912850128511285212853128541285512856128571285812859128601286112862128631286412865128661286712868128691287012871128721287312874128751287612877128781287912880128811288212883128841288512886128871288812889128901289112892128931289412895128961289712898128991290012901129021290312904129051290612907129081290912910129111291212913129141291512916129171291812919129201292112922129231292412925129261292712928129291293012931129321293312934129351293612937129381293912940129411294212943129441294512946129471294812949129501295112952129531295412955129561295712958129591296012961129621296312964129651296612967129681296912970129711297212973129741297512976129771297812979129801298112982129831298412985129861298712988129891299012991129921299312994129951299612997129981299913000130011300213003130041300513006130071300813009130101301113012130131301413015130161301713018130191302013021130221302313024130251302613027130281302913030130311303213033130341303513036130371303813039130401304113042130431304413045130461304713048130491305013051130521305313054130551305613057130581305913060130611306213063130641306513066130671306813069130701307113072130731307413075130761307713078130791308013081130821308313084130851308613087130881308913090130911309213093130941309513096130971309813099131001310113102131031310413105131061310713108131091311013111131121311313114131151311613117131181311913120131211312213123131241312513126131271312813129131301313113132131331313413135131361313713138131391314013141131421314313144131451314613147131481314913150131511315213153131541315513156131571315813159131601316113162131631316413165131661316713168131691317013171131721317313174131751317613177131781317913180131811318213183131841318513186131871318813189131901319113192131931319413195131961319713198131991320013201132021320313204132051320613207132081320913210132111321213213132141321513216132171321813219132201322113222132231322413225132261322713228132291323013231132321323313234132351323613237132381323913240132411324213243132441324513246132471324813249132501325113252132531325413255132561325713258132591326013261132621326313264132651326613267132681326913270132711327213273132741327513276132771327813279132801328113282132831328413285132861328713288132891329013291132921329313294132951329613297132981329913300133011330213303133041330513306133071330813309133101331113312133131331413315133161331713318133191332013321133221332313324133251332613327133281332913330133311333213333133341333513336133371333813339133401334113342133431334413345133461334713348133491335013351133521335313354133551335613357133581335913360133611336213363133641336513366133671336813369133701337113372133731337413375133761337713378133791338013381133821338313384133851338613387133881338913390133911339213393133941339513396133971339813399134001340113402134031340413405134061340713408134091341013411134121341313414134151341613417134181341913420134211342213423134241342513426134271342813429134301343113432134331343413435134361343713438134391344013441134421344313444134451344613447134481344913450134511345213453134541345513456134571345813459134601346113462134631346413465134661346713468134691347013471134721347313474134751347613477134781347913480134811348213483134841348513486134871348813489134901349113492134931349413495134961349713498134991350013501135021350313504135051350613507135081350913510135111351213513135141351513516135171351813519135201352113522135231352413525135261352713528135291353013531135321353313534135351353613537135381353913540135411354213543135441354513546135471354813549135501355113552135531355413555135561355713558135591356013561135621356313564135651356613567135681356913570135711357213573135741357513576135771357813579135801358113582135831358413585135861358713588135891359013591135921359313594135951359613597135981359913600136011360213603136041360513606136071360813609136101361113612136131361413615136161361713618136191362013621136221362313624136251362613627136281362913630136311363213633136341363513636136371363813639136401364113642136431364413645136461364713648136491365013651136521365313654136551365613657136581365913660136611366213663136641366513666136671366813669136701367113672136731367413675136761367713678136791368013681136821368313684136851368613687136881368913690136911369213693136941369513696136971369813699137001370113702137031370413705137061370713708137091371013711137121371313714137151371613717137181371913720137211372213723137241372513726137271372813729137301373113732137331373413735137361373713738137391374013741137421374313744137451374613747137481374913750137511375213753137541375513756137571375813759137601376113762137631376413765137661376713768137691377013771137721377313774137751377613777137781377913780137811378213783137841378513786137871378813789137901379113792137931379413795137961379713798137991380013801138021380313804138051380613807138081380913810138111381213813138141381513816138171381813819138201382113822138231382413825138261382713828138291383013831138321383313834138351383613837138381383913840138411384213843138441384513846138471384813849138501385113852138531385413855138561385713858138591386013861138621386313864138651386613867138681386913870138711387213873138741387513876138771387813879138801388113882138831388413885138861388713888138891389013891138921389313894138951389613897138981389913900139011390213903139041390513906139071390813909139101391113912139131391413915139161391713918139191392013921139221392313924139251392613927139281392913930139311393213933139341393513936139371393813939139401394113942139431394413945139461394713948139491395013951139521395313954139551395613957139581395913960139611396213963139641396513966139671396813969139701397113972139731397413975139761397713978139791398013981139821398313984139851398613987139881398913990139911399213993139941399513996139971399813999140001400114002140031400414005140061400714008140091401014011140121401314014140151401614017140181401914020140211402214023140241402514026140271402814029140301403114032140331403414035140361403714038140391404014041140421404314044140451404614047140481404914050140511405214053140541405514056140571405814059140601406114062140631406414065140661406714068140691407014071140721407314074140751407614077140781407914080140811408214083140841408514086140871408814089140901409114092140931409414095140961409714098140991410014101141021410314104141051410614107141081410914110141111411214113141141411514116141171411814119141201412114122141231412414125141261412714128141291413014131141321413314134141351413614137141381413914140141411414214143141441414514146141471414814149141501415114152141531415414155141561415714158141591416014161141621416314164141651416614167141681416914170141711417214173141741417514176141771417814179141801418114182141831418414185141861418714188141891419014191141921419314194141951419614197141981419914200142011420214203142041420514206142071420814209142101421114212142131421414215142161421714218142191422014221142221422314224142251422614227142281422914230142311423214233142341423514236142371423814239142401424114242142431424414245142461424714248142491425014251142521425314254142551425614257142581425914260142611426214263142641426514266142671426814269142701427114272142731427414275142761427714278142791428014281142821428314284142851428614287142881428914290142911429214293142941429514296142971429814299143001430114302143031430414305143061430714308143091431014311143121431314314143151431614317143181431914320143211432214323143241432514326143271432814329143301433114332143331433414335143361433714338143391434014341143421434314344143451434614347143481434914350143511435214353143541435514356143571435814359143601436114362143631436414365143661436714368143691437014371143721437314374143751437614377143781437914380143811438214383143841438514386143871438814389143901439114392143931439414395143961439714398143991440014401144021440314404144051440614407144081440914410144111441214413144141441514416144171441814419144201442114422144231442414425144261442714428144291443014431144321443314434144351443614437144381443914440144411444214443144441444514446144471444814449144501445114452144531445414455144561445714458144591446014461144621446314464144651446614467144681446914470144711447214473144741447514476144771447814479144801448114482144831448414485144861448714488144891449014491144921449314494144951449614497144981449914500145011450214503145041450514506145071450814509145101451114512145131451414515145161451714518145191452014521145221452314524145251452614527145281452914530145311453214533145341453514536145371453814539145401454114542145431454414545145461454714548145491455014551145521455314554145551455614557145581455914560145611456214563145641456514566145671456814569145701457114572145731457414575145761457714578145791458014581145821458314584145851458614587145881458914590145911459214593145941459514596145971459814599146001460114602146031460414605146061460714608146091461014611146121461314614146151461614617146181461914620146211462214623146241462514626146271462814629146301463114632146331463414635146361463714638146391464014641146421464314644146451464614647146481464914650146511465214653146541465514656146571465814659146601466114662146631466414665146661466714668146691467014671146721467314674146751467614677146781467914680146811468214683146841468514686146871468814689146901469114692146931469414695146961469714698146991470014701147021470314704147051470614707147081470914710147111471214713147141471514716147171471814719147201472114722147231472414725147261472714728147291473014731147321473314734147351473614737147381473914740147411474214743147441474514746147471474814749147501475114752147531475414755147561475714758147591476014761147621476314764147651476614767147681476914770147711477214773147741477514776147771477814779147801478114782147831478414785147861478714788147891479014791147921479314794147951479614797147981479914800148011480214803148041480514806148071480814809148101481114812148131481414815148161481714818148191482014821148221482314824148251482614827148281482914830148311483214833148341483514836148371483814839148401484114842148431484414845148461484714848148491485014851148521485314854148551485614857148581485914860148611486214863148641486514866148671486814869148701487114872148731487414875148761487714878148791488014881148821488314884148851488614887148881488914890148911489214893148941489514896148971489814899149001490114902149031490414905149061490714908149091491014911149121491314914149151491614917149181491914920149211492214923149241492514926149271492814929149301493114932149331493414935149361493714938149391494014941149421494314944149451494614947149481494914950149511495214953149541495514956149571495814959149601496114962149631496414965149661496714968149691497014971149721497314974149751497614977149781497914980149811498214983149841498514986149871498814989149901499114992149931499414995149961499714998149991500015001150021500315004150051500615007150081500915010150111501215013150141501515016150171501815019150201502115022150231502415025150261502715028150291503015031150321503315034150351503615037150381503915040150411504215043150441504515046150471504815049150501505115052150531505415055150561505715058150591506015061150621506315064150651506615067150681506915070150711507215073150741507515076150771507815079150801508115082150831508415085150861508715088150891509015091150921509315094150951509615097150981509915100151011510215103151041510515106151071510815109151101511115112151131511415115151161511715118151191512015121151221512315124151251512615127151281512915130151311513215133151341513515136151371513815139151401514115142151431514415145151461514715148151491515015151151521515315154151551515615157151581515915160151611516215163151641516515166151671516815169151701517115172151731517415175151761517715178151791518015181151821518315184151851518615187151881518915190151911519215193151941519515196151971519815199152001520115202152031520415205152061520715208152091521015211152121521315214152151521615217152181521915220152211522215223152241522515226152271522815229152301523115232152331523415235152361523715238152391524015241152421524315244152451524615247152481524915250152511525215253152541525515256152571525815259152601526115262152631526415265152661526715268152691527015271152721527315274152751527615277152781527915280152811528215283152841528515286152871528815289152901529115292152931529415295152961529715298152991530015301153021530315304153051530615307153081530915310153111531215313153141531515316153171531815319153201532115322153231532415325153261532715328153291533015331153321533315334153351533615337153381533915340153411534215343153441534515346153471534815349153501535115352153531535415355153561535715358153591536015361153621536315364153651536615367153681536915370153711537215373153741537515376153771537815379153801538115382153831538415385153861538715388153891539015391153921539315394153951539615397153981539915400154011540215403154041540515406154071540815409154101541115412154131541415415154161541715418154191542015421154221542315424154251542615427154281542915430154311543215433154341543515436154371543815439154401544115442154431544415445154461544715448154491545015451154521545315454154551545615457154581545915460154611546215463154641546515466154671546815469154701547115472154731547415475154761547715478154791548015481154821548315484154851548615487154881548915490154911549215493154941549515496154971549815499155001550115502155031550415505155061550715508155091551015511155121551315514155151551615517155181551915520155211552215523155241552515526155271552815529155301553115532155331553415535155361553715538155391554015541155421554315544155451554615547155481554915550155511555215553155541555515556155571555815559155601556115562155631556415565155661556715568155691557015571155721557315574155751557615577155781557915580155811558215583155841558515586155871558815589155901559115592155931559415595155961559715598155991560015601156021560315604156051560615607156081560915610156111561215613156141561515616156171561815619156201562115622156231562415625156261562715628156291563015631156321563315634156351563615637156381563915640156411564215643156441564515646156471564815649156501565115652156531565415655156561565715658156591566015661156621566315664156651566615667156681566915670156711567215673156741567515676156771567815679156801568115682156831568415685156861568715688156891569015691156921569315694156951569615697156981569915700157011570215703157041570515706157071570815709157101571115712157131571415715157161571715718157191572015721157221572315724157251572615727157281572915730157311573215733157341573515736157371573815739157401574115742157431574415745157461574715748157491575015751157521575315754157551575615757157581575915760157611576215763157641576515766157671576815769157701577115772157731577415775157761577715778157791578015781157821578315784157851578615787157881578915790157911579215793157941579515796157971579815799158001580115802158031580415805158061580715808158091581015811158121581315814158151581615817158181581915820158211582215823158241582515826158271582815829158301583115832158331583415835158361583715838158391584015841158421584315844158451584615847158481584915850158511585215853158541585515856158571585815859158601586115862158631586415865158661586715868158691587015871158721587315874158751587615877158781587915880158811588215883158841588515886158871588815889158901589115892158931589415895158961589715898158991590015901159021590315904159051590615907159081590915910159111591215913159141591515916159171591815919159201592115922159231592415925159261592715928159291593015931159321593315934159351593615937159381593915940159411594215943159441594515946159471594815949159501595115952159531595415955159561595715958159591596015961159621596315964159651596615967159681596915970159711597215973159741597515976159771597815979159801598115982159831598415985159861598715988159891599015991159921599315994159951599615997159981599916000160011600216003160041600516006160071600816009160101601116012160131601416015160161601716018160191602016021160221602316024160251602616027160281602916030160311603216033160341603516036160371603816039160401604116042160431604416045160461604716048160491605016051160521605316054160551605616057160581605916060160611606216063160641606516066160671606816069160701607116072160731607416075160761607716078160791608016081160821608316084160851608616087160881608916090160911609216093160941609516096160971609816099161001610116102161031610416105161061610716108161091611016111161121611316114161151611616117161181611916120161211612216123161241612516126161271612816129161301613116132161331613416135161361613716138161391614016141161421614316144161451614616147161481614916150161511615216153161541615516156161571615816159161601616116162161631616416165161661616716168161691617016171161721617316174161751617616177161781617916180161811618216183161841618516186161871618816189161901619116192161931619416195161961619716198161991620016201162021620316204162051620616207162081620916210162111621216213162141621516216162171621816219162201622116222162231622416225162261622716228162291623016231162321623316234162351623616237162381623916240162411624216243162441624516246162471624816249162501625116252162531625416255162561625716258162591626016261162621626316264162651626616267162681626916270162711627216273162741627516276162771627816279162801628116282162831628416285162861628716288162891629016291162921629316294162951629616297162981629916300163011630216303163041630516306163071630816309163101631116312163131631416315163161631716318163191632016321163221632316324163251632616327163281632916330163311633216333163341633516336163371633816339163401634116342163431634416345163461634716348163491635016351163521635316354163551635616357163581635916360163611636216363163641636516366163671636816369163701637116372163731637416375163761637716378163791638016381163821638316384163851638616387163881638916390163911639216393163941639516396163971639816399164001640116402164031640416405164061640716408164091641016411164121641316414164151641616417164181641916420164211642216423164241642516426164271642816429164301643116432164331643416435164361643716438164391644016441164421644316444164451644616447164481644916450164511645216453164541645516456164571645816459164601646116462164631646416465164661646716468164691647016471164721647316474164751647616477164781647916480164811648216483164841648516486164871648816489164901649116492164931649416495164961649716498164991650016501165021650316504165051650616507165081650916510165111651216513165141651516516165171651816519165201652116522165231652416525165261652716528165291653016531165321653316534165351653616537165381653916540165411654216543165441654516546165471654816549165501655116552165531655416555165561655716558165591656016561165621656316564165651656616567165681656916570165711657216573165741657516576165771657816579165801658116582165831658416585165861658716588165891659016591165921659316594165951659616597165981659916600166011660216603166041660516606166071660816609166101661116612166131661416615166161661716618166191662016621166221662316624166251662616627166281662916630166311663216633166341663516636166371663816639166401664116642166431664416645166461664716648166491665016651166521665316654166551665616657166581665916660166611666216663166641666516666166671666816669166701667116672166731667416675166761667716678166791668016681166821668316684166851668616687166881668916690166911669216693166941669516696166971669816699167001670116702167031670416705167061670716708167091671016711167121671316714167151671616717167181671916720167211672216723167241672516726167271672816729167301673116732167331673416735167361673716738167391674016741167421674316744167451674616747167481674916750167511675216753167541675516756167571675816759167601676116762167631676416765167661676716768167691677016771167721677316774167751677616777167781677916780167811678216783167841678516786167871678816789167901679116792167931679416795167961679716798167991680016801168021680316804168051680616807168081680916810168111681216813168141681516816168171681816819168201682116822168231682416825168261682716828168291683016831168321683316834168351683616837168381683916840168411684216843168441684516846168471684816849168501685116852168531685416855168561685716858168591686016861168621686316864168651686616867168681686916870168711687216873168741687516876168771687816879168801688116882168831688416885168861688716888168891689016891168921689316894168951689616897168981689916900169011690216903169041690516906169071690816909169101691116912169131691416915169161691716918169191692016921169221692316924169251692616927169281692916930169311693216933169341693516936169371693816939169401694116942169431694416945169461694716948169491695016951169521695316954169551695616957169581695916960169611696216963169641696516966169671696816969169701697116972169731697416975169761697716978169791698016981169821698316984169851698616987169881698916990169911699216993169941699516996169971699816999170001700117002170031700417005170061700717008170091701017011170121701317014170151701617017170181701917020170211702217023170241702517026170271702817029170301703117032170331703417035170361703717038170391704017041170421704317044170451704617047170481704917050170511705217053170541705517056170571705817059170601706117062170631706417065170661706717068170691707017071170721707317074170751707617077170781707917080170811708217083170841708517086170871708817089170901709117092170931709417095170961709717098170991710017101171021710317104171051710617107171081710917110171111711217113171141711517116171171711817119171201712117122171231712417125171261712717128171291713017131171321713317134171351713617137171381713917140171411714217143171441714517146171471714817149171501715117152171531715417155171561715717158171591716017161171621716317164171651716617167171681716917170171711717217173171741717517176171771717817179171801718117182171831718417185171861718717188171891719017191171921719317194171951719617197171981719917200172011720217203172041720517206172071720817209172101721117212172131721417215172161721717218172191722017221172221722317224172251722617227172281722917230172311723217233172341723517236172371723817239172401724117242172431724417245172461724717248172491725017251172521725317254172551725617257172581725917260172611726217263172641726517266172671726817269172701727117272172731727417275172761727717278172791728017281172821728317284172851728617287172881728917290172911729217293172941729517296172971729817299173001730117302173031730417305173061730717308173091731017311173121731317314173151731617317173181731917320173211732217323173241732517326173271732817329173301733117332173331733417335173361733717338173391734017341173421734317344173451734617347173481734917350173511735217353173541735517356173571735817359173601736117362173631736417365173661736717368173691737017371173721737317374173751737617377173781737917380173811738217383173841738517386173871738817389173901739117392173931739417395173961739717398173991740017401174021740317404174051740617407174081740917410174111741217413174141741517416174171741817419174201742117422174231742417425174261742717428174291743017431174321743317434174351743617437174381743917440174411744217443174441744517446174471744817449174501745117452174531745417455174561745717458174591746017461174621746317464174651746617467174681746917470174711747217473174741747517476174771747817479174801748117482174831748417485174861748717488174891749017491174921749317494174951749617497174981749917500175011750217503175041750517506175071750817509175101751117512175131751417515175161751717518175191752017521175221752317524175251752617527175281752917530175311753217533175341753517536175371753817539175401754117542175431754417545175461754717548175491755017551175521755317554175551755617557175581755917560175611756217563175641756517566175671756817569175701757117572175731757417575175761757717578175791758017581175821758317584175851758617587175881758917590175911759217593175941759517596175971759817599176001760117602176031760417605176061760717608176091761017611176121761317614176151761617617176181761917620176211762217623176241762517626176271762817629176301763117632176331763417635176361763717638176391764017641176421764317644176451764617647176481764917650176511765217653176541765517656176571765817659176601766117662176631766417665176661766717668176691767017671176721767317674176751767617677176781767917680176811768217683176841768517686176871768817689176901769117692176931769417695176961769717698176991770017701177021770317704177051770617707177081770917710177111771217713177141771517716177171771817719177201772117722177231772417725177261772717728177291773017731177321773317734177351773617737177381773917740177411774217743177441774517746177471774817749177501775117752177531775417755177561775717758177591776017761177621776317764177651776617767177681776917770177711777217773177741777517776177771777817779177801778117782177831778417785177861778717788177891779017791177921779317794177951779617797177981779917800178011780217803178041780517806178071780817809178101781117812178131781417815178161781717818178191782017821178221782317824178251782617827178281782917830178311783217833178341783517836178371783817839178401784117842178431784417845178461784717848178491785017851178521785317854178551785617857178581785917860178611786217863178641786517866178671786817869178701787117872178731787417875178761787717878178791788017881178821788317884178851788617887178881788917890178911789217893178941789517896178971789817899179001790117902179031790417905179061790717908179091791017911179121791317914179151791617917179181791917920179211792217923179241792517926179271792817929179301793117932179331793417935179361793717938179391794017941179421794317944179451794617947179481794917950179511795217953179541795517956179571795817959179601796117962179631796417965179661796717968179691797017971179721797317974179751797617977179781797917980179811798217983179841798517986179871798817989179901799117992179931799417995179961799717998179991800018001180021800318004180051800618007180081800918010180111801218013180141801518016180171801818019180201802118022180231802418025180261802718028180291803018031180321803318034180351803618037180381803918040180411804218043180441804518046180471804818049180501805118052180531805418055180561805718058180591806018061180621806318064180651806618067180681806918070180711807218073180741807518076180771807818079180801808118082180831808418085180861808718088180891809018091180921809318094180951809618097180981809918100181011810218103181041810518106181071810818109181101811118112181131811418115181161811718118181191812018121181221812318124181251812618127181281812918130181311813218133181341813518136181371813818139181401814118142181431814418145181461814718148181491815018151181521815318154181551815618157181581815918160181611816218163181641816518166181671816818169181701817118172181731817418175181761817718178181791818018181181821818318184181851818618187181881818918190181911819218193181941819518196181971819818199182001820118202182031820418205182061820718208182091821018211182121821318214182151821618217182181821918220182211822218223182241822518226182271822818229182301823118232182331823418235182361823718238182391824018241182421824318244182451824618247182481824918250182511825218253182541825518256182571825818259182601826118262182631826418265182661826718268182691827018271182721827318274182751827618277182781827918280182811828218283182841828518286182871828818289182901829118292182931829418295182961829718298182991830018301183021830318304183051830618307183081830918310183111831218313183141831518316183171831818319183201832118322183231832418325183261832718328183291833018331183321833318334183351833618337183381833918340183411834218343183441834518346183471834818349183501835118352183531835418355183561835718358183591836018361183621836318364183651836618367183681836918370183711837218373183741837518376183771837818379183801838118382183831838418385183861838718388183891839018391183921839318394183951839618397183981839918400184011840218403184041840518406184071840818409184101841118412184131841418415184161841718418184191842018421184221842318424184251842618427184281842918430184311843218433184341843518436184371843818439184401844118442184431844418445184461844718448184491845018451184521845318454184551845618457184581845918460184611846218463184641846518466184671846818469184701847118472184731847418475184761847718478184791848018481184821848318484184851848618487184881848918490184911849218493184941849518496184971849818499185001850118502185031850418505185061850718508185091851018511185121851318514185151851618517185181851918520185211852218523185241852518526185271852818529185301853118532185331853418535185361853718538185391854018541185421854318544185451854618547185481854918550185511855218553185541855518556185571855818559185601856118562185631856418565185661856718568185691857018571185721857318574185751857618577185781857918580185811858218583185841858518586185871858818589185901859118592185931859418595185961859718598185991860018601186021860318604186051860618607186081860918610186111861218613186141861518616186171861818619186201862118622186231862418625186261862718628186291863018631186321863318634186351863618637186381863918640186411864218643186441864518646186471864818649186501865118652186531865418655186561865718658186591866018661186621866318664186651866618667186681866918670186711867218673186741867518676186771867818679186801868118682186831868418685186861868718688186891869018691186921869318694186951869618697186981869918700187011870218703187041870518706187071870818709187101871118712187131871418715187161871718718187191872018721187221872318724187251872618727187281872918730187311873218733187341873518736187371873818739187401874118742187431874418745187461874718748187491875018751187521875318754187551875618757187581875918760187611876218763187641876518766187671876818769187701877118772187731877418775187761877718778187791878018781187821878318784187851878618787187881878918790187911879218793187941879518796187971879818799188001880118802188031880418805188061880718808188091881018811188121881318814188151881618817188181881918820188211882218823188241882518826188271882818829188301883118832188331883418835188361883718838188391884018841188421884318844188451884618847188481884918850188511885218853188541885518856188571885818859188601886118862188631886418865188661886718868188691887018871188721887318874188751887618877188781887918880188811888218883188841888518886188871888818889188901889118892188931889418895188961889718898188991890018901189021890318904189051890618907189081890918910189111891218913189141891518916189171891818919189201892118922189231892418925189261892718928189291893018931189321893318934189351893618937189381893918940189411894218943189441894518946189471894818949189501895118952189531895418955189561895718958189591896018961189621896318964189651896618967189681896918970189711897218973189741897518976189771897818979189801898118982189831898418985189861898718988189891899018991189921899318994189951899618997189981899919000190011900219003190041900519006190071900819009190101901119012190131901419015190161901719018190191902019021190221902319024190251902619027190281902919030190311903219033190341903519036190371903819039190401904119042190431904419045190461904719048190491905019051190521905319054190551905619057190581905919060190611906219063190641906519066190671906819069190701907119072190731907419075190761907719078190791908019081190821908319084190851908619087190881908919090190911909219093190941909519096190971909819099191001910119102191031910419105191061910719108191091911019111191121911319114191151911619117191181911919120191211912219123191241912519126191271912819129191301913119132191331913419135191361913719138191391914019141191421914319144191451914619147191481914919150191511915219153191541915519156191571915819159191601916119162191631916419165191661916719168191691917019171191721917319174191751917619177191781917919180191811918219183191841918519186191871918819189191901919119192191931919419195191961919719198191991920019201192021920319204192051920619207192081920919210192111921219213192141921519216192171921819219192201922119222192231922419225192261922719228192291923019231192321923319234192351923619237192381923919240192411924219243192441924519246192471924819249192501925119252192531925419255192561925719258192591926019261192621926319264192651926619267192681926919270192711927219273192741927519276192771927819279192801928119282192831928419285192861928719288192891929019291192921929319294192951929619297192981929919300193011930219303193041930519306193071930819309193101931119312193131931419315193161931719318193191932019321193221932319324193251932619327193281932919330193311933219333193341933519336193371933819339193401934119342193431934419345193461934719348193491935019351193521935319354193551935619357193581935919360193611936219363193641936519366193671936819369193701937119372193731937419375193761937719378193791938019381193821938319384193851938619387193881938919390193911939219393193941939519396193971939819399194001940119402194031940419405194061940719408194091941019411194121941319414194151941619417194181941919420194211942219423194241942519426194271942819429194301943119432194331943419435194361943719438194391944019441194421944319444194451944619447194481944919450194511945219453194541945519456194571945819459194601946119462194631946419465194661946719468194691947019471194721947319474194751947619477194781947919480194811948219483194841948519486194871948819489194901949119492194931949419495194961949719498194991950019501195021950319504195051950619507195081950919510195111951219513195141951519516195171951819519195201952119522195231952419525195261952719528195291953019531195321953319534195351953619537195381953919540195411954219543195441954519546195471954819549195501955119552195531955419555195561955719558195591956019561195621956319564195651956619567195681956919570195711957219573195741957519576195771957819579195801958119582195831958419585195861958719588195891959019591195921959319594195951959619597195981959919600196011960219603196041960519606196071960819609196101961119612196131961419615196161961719618196191962019621196221962319624196251962619627196281962919630196311963219633196341963519636196371963819639196401964119642196431964419645196461964719648196491965019651196521965319654196551965619657196581965919660196611966219663196641966519666196671966819669196701967119672196731967419675196761967719678196791968019681196821968319684196851968619687196881968919690196911969219693196941969519696196971969819699197001970119702197031970419705197061970719708197091971019711197121971319714197151971619717197181971919720197211972219723197241972519726197271972819729197301973119732197331973419735197361973719738197391974019741197421974319744197451974619747197481974919750197511975219753197541975519756197571975819759197601976119762197631976419765197661976719768197691977019771197721977319774197751977619777197781977919780197811978219783197841978519786197871978819789197901979119792197931979419795197961979719798197991980019801198021980319804198051980619807198081980919810198111981219813198141981519816198171981819819198201982119822198231982419825198261982719828198291983019831198321983319834198351983619837198381983919840198411984219843198441984519846198471984819849198501985119852198531985419855198561985719858198591986019861198621986319864198651986619867198681986919870198711987219873198741987519876198771987819879198801988119882198831988419885198861988719888198891989019891198921989319894198951989619897198981989919900199011990219903199041990519906199071990819909199101991119912199131991419915199161991719918199191992019921199221992319924199251992619927199281992919930199311993219933199341993519936199371993819939199401994119942199431994419945199461994719948199491995019951199521995319954199551995619957199581995919960199611996219963199641996519966199671996819969199701997119972199731997419975199761997719978199791998019981199821998319984199851998619987199881998919990199911999219993199941999519996199971999819999200002000120002200032000420005200062000720008200092001020011200122001320014200152001620017200182001920020200212002220023200242002520026200272002820029200302003120032200332003420035200362003720038200392004020041200422004320044200452004620047200482004920050200512005220053200542005520056200572005820059200602006120062200632006420065200662006720068200692007020071200722007320074200752007620077200782007920080200812008220083200842008520086200872008820089200902009120092200932009420095200962009720098200992010020101201022010320104201052010620107201082010920110201112011220113201142011520116201172011820119201202012120122201232012420125201262012720128201292013020131201322013320134201352013620137201382013920140201412014220143201442014520146201472014820149201502015120152201532015420155201562015720158201592016020161201622016320164201652016620167201682016920170201712017220173201742017520176201772017820179201802018120182201832018420185201862018720188201892019020191201922019320194201952019620197201982019920200202012020220203202042020520206202072020820209202102021120212202132021420215202162021720218202192022020221202222022320224202252022620227202282022920230202312023220233202342023520236202372023820239202402024120242202432024420245202462024720248202492025020251202522025320254202552025620257202582025920260202612026220263202642026520266202672026820269202702027120272202732027420275202762027720278202792028020281202822028320284202852028620287202882028920290202912029220293202942029520296202972029820299203002030120302203032030420305203062030720308203092031020311203122031320314203152031620317203182031920320203212032220323203242032520326203272032820329203302033120332203332033420335203362033720338203392034020341203422034320344203452034620347203482034920350203512035220353203542035520356203572035820359203602036120362203632036420365203662036720368203692037020371203722037320374203752037620377203782037920380203812038220383203842038520386203872038820389203902039120392203932039420395203962039720398203992040020401204022040320404204052040620407204082040920410204112041220413204142041520416204172041820419204202042120422204232042420425204262042720428204292043020431204322043320434204352043620437204382043920440204412044220443204442044520446204472044820449204502045120452204532045420455204562045720458204592046020461204622046320464204652046620467204682046920470204712047220473204742047520476204772047820479204802048120482204832048420485204862048720488204892049020491204922049320494204952049620497204982049920500205012050220503205042050520506205072050820509205102051120512205132051420515205162051720518205192052020521205222052320524205252052620527205282052920530205312053220533205342053520536205372053820539205402054120542205432054420545205462054720548205492055020551205522055320554205552055620557205582055920560205612056220563205642056520566205672056820569205702057120572205732057420575205762057720578205792058020581205822058320584205852058620587205882058920590205912059220593205942059520596205972059820599206002060120602206032060420605206062060720608206092061020611206122061320614206152061620617206182061920620206212062220623206242062520626206272062820629206302063120632206332063420635206362063720638206392064020641206422064320644206452064620647206482064920650206512065220653206542065520656206572065820659206602066120662206632066420665206662066720668206692067020671206722067320674206752067620677206782067920680206812068220683206842068520686206872068820689206902069120692206932069420695206962069720698206992070020701207022070320704207052070620707207082070920710207112071220713207142071520716207172071820719207202072120722207232072420725207262072720728207292073020731207322073320734207352073620737207382073920740207412074220743207442074520746207472074820749207502075120752207532075420755207562075720758207592076020761207622076320764207652076620767207682076920770207712077220773207742077520776207772077820779207802078120782207832078420785207862078720788207892079020791207922079320794207952079620797207982079920800208012080220803208042080520806208072080820809208102081120812208132081420815208162081720818208192082020821208222082320824
  1. #define _CRT_SECURE_NO_DEPRECATE // Disables ridiculous "unsafe" warnigns on Windows
  2. #include "ggml.h"
  3. #ifdef GGML_USE_K_QUANTS
  4. #include "k_quants.h"
  5. #endif
  6. #if defined(_MSC_VER) || defined(__MINGW32__)
  7. #include <malloc.h> // using malloc.h with MSC/MINGW
  8. #elif !defined(__FreeBSD__) && !defined(__NetBSD__) && !defined(__OpenBSD__)
  9. #include <alloca.h>
  10. #endif
  11. #include <assert.h>
  12. #include <errno.h>
  13. #include <time.h>
  14. #include <math.h>
  15. #include <stdlib.h>
  16. #include <string.h>
  17. #include <stdint.h>
  18. #include <inttypes.h>
  19. #include <stdio.h>
  20. #include <float.h>
  21. #include <limits.h>
  22. #include <stdarg.h>
  23. #include <signal.h>
  24. #ifdef GGML_USE_METAL
  25. #include <unistd.h>
  26. #endif
  27. // static_assert should be a #define, but if it's not,
  28. // fall back to the _Static_assert C11 keyword.
  29. // if C99 - static_assert is noop
  30. // ref: https://stackoverflow.com/a/53923785/4039976
  31. #ifndef static_assert
  32. #if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201100L)
  33. #define static_assert(cond, msg) _Static_assert(cond, msg)
  34. #else
  35. #define static_assert(cond, msg) struct global_scope_noop_trick
  36. #endif
  37. #endif
  38. #if defined(_MSC_VER)
  39. // disable "possible loss of data" to avoid hundreds of casts
  40. // we should just be careful :)
  41. #pragma warning(disable: 4244 4267)
  42. // disable POSIX deprecation warnigns
  43. // these functions are never going away, anyway
  44. #pragma warning(disable: 4996)
  45. #endif
  46. #if defined(_WIN32)
  47. #include <windows.h>
  48. typedef volatile LONG atomic_int;
  49. typedef atomic_int atomic_bool;
  50. static void atomic_store(atomic_int * ptr, LONG val) {
  51. InterlockedExchange(ptr, val);
  52. }
  53. static LONG atomic_load(atomic_int * ptr) {
  54. return InterlockedCompareExchange(ptr, 0, 0);
  55. }
  56. static LONG atomic_fetch_add(atomic_int * ptr, LONG inc) {
  57. return InterlockedExchangeAdd(ptr, inc);
  58. }
  59. static LONG atomic_fetch_sub(atomic_int * ptr, LONG dec) {
  60. return atomic_fetch_add(ptr, -(dec));
  61. }
  62. typedef HANDLE pthread_t;
  63. typedef DWORD thread_ret_t;
  64. static int pthread_create(pthread_t * out, void * unused, thread_ret_t(*func)(void *), void * arg) {
  65. (void) unused;
  66. HANDLE handle = CreateThread(NULL, 0, (LPTHREAD_START_ROUTINE) func, arg, 0, NULL);
  67. if (handle == NULL)
  68. {
  69. return EAGAIN;
  70. }
  71. *out = handle;
  72. return 0;
  73. }
  74. static int pthread_join(pthread_t thread, void * unused) {
  75. (void) unused;
  76. return (int) WaitForSingleObject(thread, INFINITE);
  77. }
  78. static int sched_yield (void) {
  79. Sleep (0);
  80. return 0;
  81. }
  82. #else
  83. #include <pthread.h>
  84. #include <stdatomic.h>
  85. typedef void * thread_ret_t;
  86. #include <sys/types.h>
  87. #include <sys/stat.h>
  88. #include <unistd.h>
  89. #endif
  90. #ifdef GGML_USE_CPU_HBM
  91. #include <hbwmalloc.h>
  92. #endif
  93. // __FMA__ and __F16C__ are not defined in MSVC, however they are implied with AVX2/AVX512
  94. #if defined(_MSC_VER) && (defined(__AVX2__) || defined(__AVX512F__))
  95. #ifndef __FMA__
  96. #define __FMA__
  97. #endif
  98. #ifndef __F16C__
  99. #define __F16C__
  100. #endif
  101. #ifndef __SSE3__
  102. #define __SSE3__
  103. #endif
  104. #endif
  105. /*#define GGML_PERF*/
  106. #define GGML_DEBUG 0
  107. #define GGML_GELU_FP16
  108. #define GGML_GELU_QUICK_FP16
  109. #define GGML_SILU_FP16
  110. // #define GGML_CROSS_ENTROPY_EXP_FP16
  111. // #define GGML_FLASH_ATTN_EXP_FP16
  112. #define GGML_SOFT_MAX_UNROLL 4
  113. #define GGML_VEC_DOT_UNROLL 2
  114. //
  115. // logging
  116. //
  117. #if (GGML_DEBUG >= 1)
  118. #define GGML_PRINT_DEBUG(...) printf(__VA_ARGS__)
  119. #else
  120. #define GGML_PRINT_DEBUG(...)
  121. #endif
  122. #if (GGML_DEBUG >= 5)
  123. #define GGML_PRINT_DEBUG_5(...) printf(__VA_ARGS__)
  124. #else
  125. #define GGML_PRINT_DEBUG_5(...)
  126. #endif
  127. #if (GGML_DEBUG >= 10)
  128. #define GGML_PRINT_DEBUG_10(...) printf(__VA_ARGS__)
  129. #else
  130. #define GGML_PRINT_DEBUG_10(...)
  131. #endif
  132. #define GGML_PRINT(...) printf(__VA_ARGS__)
  133. #ifdef GGML_USE_ACCELERATE
  134. // uncomment to use vDSP for soft max computation
  135. // note: not sure if it is actually faster
  136. //#define GGML_SOFT_MAX_ACCELERATE
  137. #endif
  138. //
  139. // logging
  140. //
  141. #if (GGML_DEBUG >= 1)
  142. #define GGML_PRINT_DEBUG(...) printf(__VA_ARGS__)
  143. #else
  144. #define GGML_PRINT_DEBUG(...)
  145. #endif
  146. #if (GGML_DEBUG >= 5)
  147. #define GGML_PRINT_DEBUG_5(...) printf(__VA_ARGS__)
  148. #else
  149. #define GGML_PRINT_DEBUG_5(...)
  150. #endif
  151. #if (GGML_DEBUG >= 10)
  152. #define GGML_PRINT_DEBUG_10(...) printf(__VA_ARGS__)
  153. #else
  154. #define GGML_PRINT_DEBUG_10(...)
  155. #endif
  156. #define GGML_PRINT(...) printf(__VA_ARGS__)
  157. //
  158. // end of logging block
  159. //
  160. #if defined(_MSC_VER) || defined(__MINGW32__)
  161. #define GGML_ALIGNED_MALLOC(size) _aligned_malloc(size, GGML_MEM_ALIGN)
  162. #define GGML_ALIGNED_FREE(ptr) _aligned_free(ptr)
  163. #else
  164. inline static void * ggml_aligned_malloc(size_t size) {
  165. if (size == 0) {
  166. GGML_PRINT("WARNING: Behavior may be unexpected when allocating 0 bytes for ggml_aligned_malloc!\n");
  167. return NULL;
  168. }
  169. void * aligned_memory = NULL;
  170. #ifdef GGML_USE_CPU_HBM
  171. int result = hbw_posix_memalign(&aligned_memory, 16, size);
  172. #elif GGML_USE_METAL
  173. int result = posix_memalign(&aligned_memory, sysconf(_SC_PAGESIZE), size);
  174. #else
  175. int result = posix_memalign(&aligned_memory, GGML_MEM_ALIGN, size);
  176. #endif
  177. if (result != 0) {
  178. // Handle allocation failure
  179. const char *error_desc = "unknown allocation error";
  180. switch (result) {
  181. case EINVAL:
  182. error_desc = "invalid alignment value";
  183. break;
  184. case ENOMEM:
  185. error_desc = "insufficient memory";
  186. break;
  187. }
  188. GGML_PRINT("%s: %s (attempted to allocate %6.2f MB)\n", __func__, error_desc, size/(1024.0*1024.0));
  189. return NULL;
  190. }
  191. return aligned_memory;
  192. }
  193. #define GGML_ALIGNED_MALLOC(size) ggml_aligned_malloc(size)
  194. #ifdef GGML_USE_CPU_HBM
  195. #define GGML_ALIGNED_FREE(ptr) if(NULL != ptr) hbw_free(ptr)
  196. #else
  197. #define GGML_ALIGNED_FREE(ptr) free(ptr)
  198. #endif
  199. #endif
  200. #define UNUSED GGML_UNUSED
  201. #define SWAP(x, y, T) do { T SWAP = x; x = y; y = SWAP; } while (0)
  202. //
  203. // tensor access macros
  204. //
  205. #define GGML_TENSOR_UNARY_OP_LOCALS \
  206. GGML_TENSOR_LOCALS(int64_t, ne0, src0, ne); \
  207. GGML_TENSOR_LOCALS(size_t, nb0, src0, nb); \
  208. GGML_TENSOR_LOCALS(int64_t, ne, dst, ne); \
  209. GGML_TENSOR_LOCALS(size_t, nb, dst, nb);
  210. #define GGML_TENSOR_BINARY_OP_LOCALS \
  211. GGML_TENSOR_LOCALS(int64_t, ne0, src0, ne); \
  212. GGML_TENSOR_LOCALS(size_t, nb0, src0, nb); \
  213. GGML_TENSOR_LOCALS(int64_t, ne1, src1, ne); \
  214. GGML_TENSOR_LOCALS(size_t, nb1, src1, nb); \
  215. GGML_TENSOR_LOCALS(int64_t, ne, dst, ne); \
  216. GGML_TENSOR_LOCALS(size_t, nb, dst, nb);
  217. #if defined(GGML_USE_ACCELERATE)
  218. #include <Accelerate/Accelerate.h>
  219. #if defined(GGML_USE_CLBLAST) // allow usage of CLBlast alongside Accelerate functions
  220. #include "ggml-opencl.h"
  221. #endif
  222. #elif defined(GGML_USE_OPENBLAS)
  223. #if defined(GGML_BLAS_USE_MKL)
  224. #include <mkl.h>
  225. #else
  226. #include <cblas.h>
  227. #endif
  228. #elif defined(GGML_USE_CUBLAS)
  229. #include "ggml-cuda.h"
  230. #elif defined(GGML_USE_CLBLAST)
  231. #include "ggml-opencl.h"
  232. #endif
  233. #undef MIN
  234. #undef MAX
  235. #define MIN(a, b) ((a) < (b) ? (a) : (b))
  236. #define MAX(a, b) ((a) > (b) ? (a) : (b))
  237. // floating point type used to accumulate sums
  238. typedef double ggml_float;
  239. // 16-bit float
  240. // on Arm, we use __fp16
  241. // on x86, we use uint16_t
  242. #if defined(__ARM_NEON) && !defined(_MSC_VER)
  243. // if YCM cannot find <arm_neon.h>, make a symbolic link to it, for example:
  244. //
  245. // $ ln -sfn /Library/Developer/CommandLineTools/usr/lib/clang/13.1.6/include/arm_neon.h ./src/
  246. //
  247. #include <arm_neon.h>
  248. #define GGML_COMPUTE_FP16_TO_FP32(x) ((float) (x))
  249. #define GGML_COMPUTE_FP32_TO_FP16(x) (x)
  250. #define GGML_FP16_TO_FP32(x) ((float) (x))
  251. #define GGML_FP32_TO_FP16(x) (x)
  252. #else
  253. #ifdef __wasm_simd128__
  254. #include <wasm_simd128.h>
  255. #else
  256. #ifdef __POWER9_VECTOR__
  257. #include <altivec.h>
  258. #undef bool
  259. #define bool _Bool
  260. #else
  261. #if defined(_MSC_VER) || defined(__MINGW32__)
  262. #include <intrin.h>
  263. #else
  264. #if defined(__AVX__) || defined(__AVX2__) || defined(__AVX512F__) || defined(__SSSE3__) || defined(__SSE3__)
  265. #if !defined(__riscv)
  266. #include <immintrin.h>
  267. #endif
  268. #endif
  269. #endif
  270. #endif
  271. #endif
  272. #ifdef __riscv_v_intrinsic
  273. #include <riscv_vector.h>
  274. #endif
  275. #ifdef __F16C__
  276. #ifdef _MSC_VER
  277. #define GGML_COMPUTE_FP16_TO_FP32(x) _mm_cvtss_f32(_mm_cvtph_ps(_mm_cvtsi32_si128(x)))
  278. #define GGML_COMPUTE_FP32_TO_FP16(x) _mm_extract_epi16(_mm_cvtps_ph(_mm_set_ss(x), 0), 0)
  279. #else
  280. #define GGML_COMPUTE_FP16_TO_FP32(x) _cvtsh_ss(x)
  281. #define GGML_COMPUTE_FP32_TO_FP16(x) _cvtss_sh(x, 0)
  282. #endif
  283. #elif defined(__POWER9_VECTOR__)
  284. #define GGML_COMPUTE_FP16_TO_FP32(x) ggml_compute_fp16_to_fp32(x)
  285. #define GGML_COMPUTE_FP32_TO_FP16(x) ggml_compute_fp32_to_fp16(x)
  286. /* the inline asm below is about 12% faster than the lookup method */
  287. #define GGML_FP16_TO_FP32(x) GGML_COMPUTE_FP16_TO_FP32(x)
  288. #define GGML_FP32_TO_FP16(x) GGML_COMPUTE_FP32_TO_FP16(x)
  289. static inline float ggml_compute_fp16_to_fp32(ggml_fp16_t h) {
  290. register float f;
  291. register double d;
  292. __asm__(
  293. "mtfprd %0,%2\n"
  294. "xscvhpdp %0,%0\n"
  295. "frsp %1,%0\n" :
  296. /* temp */ "=d"(d),
  297. /* out */ "=f"(f):
  298. /* in */ "r"(h));
  299. return f;
  300. }
  301. static inline ggml_fp16_t ggml_compute_fp32_to_fp16(float f) {
  302. register double d;
  303. register ggml_fp16_t r;
  304. __asm__( /* xscvdphp can work on double or single precision */
  305. "xscvdphp %0,%2\n"
  306. "mffprd %1,%0\n" :
  307. /* temp */ "=d"(d),
  308. /* out */ "=r"(r):
  309. /* in */ "f"(f));
  310. return r;
  311. }
  312. #else
  313. // FP16 <-> FP32
  314. // ref: https://github.com/Maratyszcza/FP16
  315. static inline float fp32_from_bits(uint32_t w) {
  316. union {
  317. uint32_t as_bits;
  318. float as_value;
  319. } fp32;
  320. fp32.as_bits = w;
  321. return fp32.as_value;
  322. }
  323. static inline uint32_t fp32_to_bits(float f) {
  324. union {
  325. float as_value;
  326. uint32_t as_bits;
  327. } fp32;
  328. fp32.as_value = f;
  329. return fp32.as_bits;
  330. }
  331. static inline float ggml_compute_fp16_to_fp32(ggml_fp16_t h) {
  332. const uint32_t w = (uint32_t) h << 16;
  333. const uint32_t sign = w & UINT32_C(0x80000000);
  334. const uint32_t two_w = w + w;
  335. const uint32_t exp_offset = UINT32_C(0xE0) << 23;
  336. #if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) || defined(__GNUC__) && !defined(__STRICT_ANSI__)
  337. const float exp_scale = 0x1.0p-112f;
  338. #else
  339. const float exp_scale = fp32_from_bits(UINT32_C(0x7800000));
  340. #endif
  341. const float normalized_value = fp32_from_bits((two_w >> 4) + exp_offset) * exp_scale;
  342. const uint32_t magic_mask = UINT32_C(126) << 23;
  343. const float magic_bias = 0.5f;
  344. const float denormalized_value = fp32_from_bits((two_w >> 17) | magic_mask) - magic_bias;
  345. const uint32_t denormalized_cutoff = UINT32_C(1) << 27;
  346. const uint32_t result = sign |
  347. (two_w < denormalized_cutoff ? fp32_to_bits(denormalized_value) : fp32_to_bits(normalized_value));
  348. return fp32_from_bits(result);
  349. }
  350. static inline ggml_fp16_t ggml_compute_fp32_to_fp16(float f) {
  351. #if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) || defined(__GNUC__) && !defined(__STRICT_ANSI__)
  352. const float scale_to_inf = 0x1.0p+112f;
  353. const float scale_to_zero = 0x1.0p-110f;
  354. #else
  355. const float scale_to_inf = fp32_from_bits(UINT32_C(0x77800000));
  356. const float scale_to_zero = fp32_from_bits(UINT32_C(0x08800000));
  357. #endif
  358. float base = (fabsf(f) * scale_to_inf) * scale_to_zero;
  359. const uint32_t w = fp32_to_bits(f);
  360. const uint32_t shl1_w = w + w;
  361. const uint32_t sign = w & UINT32_C(0x80000000);
  362. uint32_t bias = shl1_w & UINT32_C(0xFF000000);
  363. if (bias < UINT32_C(0x71000000)) {
  364. bias = UINT32_C(0x71000000);
  365. }
  366. base = fp32_from_bits((bias >> 1) + UINT32_C(0x07800000)) + base;
  367. const uint32_t bits = fp32_to_bits(base);
  368. const uint32_t exp_bits = (bits >> 13) & UINT32_C(0x00007C00);
  369. const uint32_t mantissa_bits = bits & UINT32_C(0x00000FFF);
  370. const uint32_t nonsign = exp_bits + mantissa_bits;
  371. return (sign >> 16) | (shl1_w > UINT32_C(0xFF000000) ? UINT16_C(0x7E00) : nonsign);
  372. }
  373. #define GGML_COMPUTE_FP16_TO_FP32(x) ggml_compute_fp16_to_fp32(x)
  374. #define GGML_COMPUTE_FP32_TO_FP16(x) ggml_compute_fp32_to_fp16(x)
  375. #endif // __F16C__
  376. #endif // __ARM_NEON
  377. //
  378. // global data
  379. //
  380. // precomputed gelu table for f16 (128 KB)
  381. static ggml_fp16_t table_gelu_f16[1 << 16];
  382. // precomputed quick gelu table for f16 (128 KB)
  383. static ggml_fp16_t table_gelu_quick_f16[1 << 16];
  384. // precomputed silu table for f16 (128 KB)
  385. static ggml_fp16_t table_silu_f16[1 << 16];
  386. // precomputed exp table for f16 (128 KB)
  387. static ggml_fp16_t table_exp_f16[1 << 16];
  388. // precomputed f32 table for f16 (256 KB)
  389. static float table_f32_f16[1 << 16];
  390. #if defined(__ARM_NEON) || defined(__wasm_simd128__)
  391. #define B1(c,s,n) 0x ## n ## c , 0x ## n ## s
  392. #define B2(c,s,n) B1(c,s,n ## c), B1(c,s,n ## s)
  393. #define B3(c,s,n) B2(c,s,n ## c), B2(c,s,n ## s)
  394. #define B4(c,s,n) B3(c,s,n ## c), B3(c,s,n ## s)
  395. #define B5(c,s,n) B4(c,s,n ## c), B4(c,s,n ## s)
  396. #define B6(c,s,n) B5(c,s,n ## c), B5(c,s,n ## s)
  397. #define B7(c,s,n) B6(c,s,n ## c), B6(c,s,n ## s)
  398. #define B8(c,s ) B7(c,s, c), B7(c,s, s)
  399. // precomputed tables for expanding 8bits to 8 bytes:
  400. static const uint64_t table_b2b_0[1 << 8] = { B8(00, 10) }; // ( b) << 4
  401. static const uint64_t table_b2b_1[1 << 8] = { B8(10, 00) }; // (!b) << 4
  402. #endif
  403. // On ARM NEON, it's quicker to directly convert x -> x instead of calling into ggml_lookup_fp16_to_fp32,
  404. // so we define GGML_FP16_TO_FP32 and GGML_FP32_TO_FP16 elsewhere for NEON.
  405. // This is also true for POWER9.
  406. #if !defined(GGML_FP16_TO_FP32) || !defined(GGML_FP32_TO_FP16)
  407. inline static float ggml_lookup_fp16_to_fp32(ggml_fp16_t f) {
  408. uint16_t s;
  409. memcpy(&s, &f, sizeof(uint16_t));
  410. return table_f32_f16[s];
  411. }
  412. #define GGML_FP16_TO_FP32(x) ggml_lookup_fp16_to_fp32(x)
  413. #define GGML_FP32_TO_FP16(x) GGML_COMPUTE_FP32_TO_FP16(x)
  414. #endif
  415. // note: do not use these inside ggml.c
  416. // these are meant to be used via the ggml.h API
  417. float ggml_fp16_to_fp32(ggml_fp16_t x) {
  418. return (float) GGML_FP16_TO_FP32(x);
  419. }
  420. ggml_fp16_t ggml_fp32_to_fp16(float x) {
  421. return GGML_FP32_TO_FP16(x);
  422. }
  423. void ggml_fp16_to_fp32_row(const ggml_fp16_t * x, float * y, int n) {
  424. for (int i = 0; i < n; i++) {
  425. y[i] = GGML_FP16_TO_FP32(x[i]);
  426. }
  427. }
  428. void ggml_fp32_to_fp16_row(const float * x, ggml_fp16_t * y, int n) {
  429. int i = 0;
  430. #if defined(__F16C__)
  431. for (; i + 7 < n; i += 8) {
  432. __m256 x_vec = _mm256_loadu_ps(x + i);
  433. __m128i y_vec = _mm256_cvtps_ph(x_vec, _MM_FROUND_TO_NEAREST_INT);
  434. _mm_storeu_si128((__m128i *)(y + i), y_vec);
  435. }
  436. for(; i + 3 < n; i += 4) {
  437. __m128 x_vec = _mm_loadu_ps(x + i);
  438. __m128i y_vec = _mm_cvtps_ph(x_vec, _MM_FROUND_TO_NEAREST_INT);
  439. _mm_storel_epi64((__m128i *)(y + i), y_vec);
  440. }
  441. #endif
  442. for (; i < n; i++) {
  443. y[i] = GGML_FP32_TO_FP16(x[i]);
  444. }
  445. }
  446. //
  447. // timing
  448. //
  449. #if defined(_MSC_VER) || defined(__MINGW32__)
  450. static int64_t timer_freq, timer_start;
  451. void ggml_time_init(void) {
  452. LARGE_INTEGER t;
  453. QueryPerformanceFrequency(&t);
  454. timer_freq = t.QuadPart;
  455. // The multiplication by 1000 or 1000000 below can cause an overflow if timer_freq
  456. // and the uptime is high enough.
  457. // We subtract the program start time to reduce the likelihood of that happening.
  458. QueryPerformanceCounter(&t);
  459. timer_start = t.QuadPart;
  460. }
  461. int64_t ggml_time_ms(void) {
  462. LARGE_INTEGER t;
  463. QueryPerformanceCounter(&t);
  464. return ((t.QuadPart-timer_start) * 1000) / timer_freq;
  465. }
  466. int64_t ggml_time_us(void) {
  467. LARGE_INTEGER t;
  468. QueryPerformanceCounter(&t);
  469. return ((t.QuadPart-timer_start) * 1000000) / timer_freq;
  470. }
  471. #else
  472. void ggml_time_init(void) {}
  473. int64_t ggml_time_ms(void) {
  474. struct timespec ts;
  475. clock_gettime(CLOCK_MONOTONIC, &ts);
  476. return (int64_t)ts.tv_sec*1000 + (int64_t)ts.tv_nsec/1000000;
  477. }
  478. int64_t ggml_time_us(void) {
  479. struct timespec ts;
  480. clock_gettime(CLOCK_MONOTONIC, &ts);
  481. return (int64_t)ts.tv_sec*1000000 + (int64_t)ts.tv_nsec/1000;
  482. }
  483. #endif
  484. int64_t ggml_cycles(void) {
  485. return clock();
  486. }
  487. int64_t ggml_cycles_per_ms(void) {
  488. return CLOCKS_PER_SEC/1000;
  489. }
  490. #ifdef GGML_PERF
  491. #define ggml_perf_time_ms() ggml_time_ms()
  492. #define ggml_perf_time_us() ggml_time_us()
  493. #define ggml_perf_cycles() ggml_cycles()
  494. #define ggml_perf_cycles_per_ms() ggml_cycles_per_ms()
  495. #else
  496. #define ggml_perf_time_ms() 0
  497. #define ggml_perf_time_us() 0
  498. #define ggml_perf_cycles() 0
  499. #define ggml_perf_cycles_per_ms() 0
  500. #endif
  501. //
  502. // cache line
  503. //
  504. #if defined(__cpp_lib_hardware_interference_size)
  505. #define CACHE_LINE_SIZE hardware_destructive_interference_size
  506. #else
  507. #if defined(__POWER9_VECTOR__)
  508. #define CACHE_LINE_SIZE 128
  509. #else
  510. #define CACHE_LINE_SIZE 64
  511. #endif
  512. #endif
  513. static const size_t CACHE_LINE_SIZE_F32 = CACHE_LINE_SIZE/sizeof(float);
  514. //
  515. // quantization
  516. //
  517. #define MM256_SET_M128I(a, b) _mm256_insertf128_si256(_mm256_castsi128_si256(b), (a), 1)
  518. #if defined(__AVX__) || defined(__AVX2__) || defined(__AVX512F__) || defined(__SSSE3__)
  519. // multiply int8_t, add results pairwise twice
  520. static inline __m128i mul_sum_i8_pairs(const __m128i x, const __m128i y) {
  521. // Get absolute values of x vectors
  522. const __m128i ax = _mm_sign_epi8(x, x);
  523. // Sign the values of the y vectors
  524. const __m128i sy = _mm_sign_epi8(y, x);
  525. // Perform multiplication and create 16-bit values
  526. const __m128i dot = _mm_maddubs_epi16(ax, sy);
  527. const __m128i ones = _mm_set1_epi16(1);
  528. return _mm_madd_epi16(ones, dot);
  529. }
  530. #if __AVX__ || __AVX2__ || __AVX512F__
  531. // horizontally add 8 floats
  532. static inline float hsum_float_8(const __m256 x) {
  533. __m128 res = _mm256_extractf128_ps(x, 1);
  534. res = _mm_add_ps(res, _mm256_castps256_ps128(x));
  535. res = _mm_add_ps(res, _mm_movehl_ps(res, res));
  536. res = _mm_add_ss(res, _mm_movehdup_ps(res));
  537. return _mm_cvtss_f32(res);
  538. }
  539. // horizontally add 8 int32_t
  540. static inline int hsum_i32_8(const __m256i a) {
  541. const __m128i sum128 = _mm_add_epi32(_mm256_castsi256_si128(a), _mm256_extractf128_si256(a, 1));
  542. const __m128i hi64 = _mm_unpackhi_epi64(sum128, sum128);
  543. const __m128i sum64 = _mm_add_epi32(hi64, sum128);
  544. const __m128i hi32 = _mm_shuffle_epi32(sum64, _MM_SHUFFLE(2, 3, 0, 1));
  545. return _mm_cvtsi128_si32(_mm_add_epi32(sum64, hi32));
  546. }
  547. // horizontally add 4 int32_t
  548. static inline int hsum_i32_4(const __m128i a) {
  549. const __m128i hi64 = _mm_unpackhi_epi64(a, a);
  550. const __m128i sum64 = _mm_add_epi32(hi64, a);
  551. const __m128i hi32 = _mm_shuffle_epi32(sum64, _MM_SHUFFLE(2, 3, 0, 1));
  552. return _mm_cvtsi128_si32(_mm_add_epi32(sum64, hi32));
  553. }
  554. #if defined(__AVX2__) || defined(__AVX512F__)
  555. // spread 32 bits to 32 bytes { 0x00, 0xFF }
  556. static inline __m256i bytes_from_bits_32(const uint8_t * x) {
  557. uint32_t x32;
  558. memcpy(&x32, x, sizeof(uint32_t));
  559. const __m256i shuf_mask = _mm256_set_epi64x(
  560. 0x0303030303030303, 0x0202020202020202,
  561. 0x0101010101010101, 0x0000000000000000);
  562. __m256i bytes = _mm256_shuffle_epi8(_mm256_set1_epi32(x32), shuf_mask);
  563. const __m256i bit_mask = _mm256_set1_epi64x(0x7fbfdfeff7fbfdfe);
  564. bytes = _mm256_or_si256(bytes, bit_mask);
  565. return _mm256_cmpeq_epi8(bytes, _mm256_set1_epi64x(-1));
  566. }
  567. // Unpack 32 4-bit fields into 32 bytes
  568. // The output vector contains 32 bytes, each one in [ 0 .. 15 ] interval
  569. static inline __m256i bytes_from_nibbles_32(const uint8_t * rsi)
  570. {
  571. const __m128i tmp = _mm_loadu_si128((const __m128i *)rsi);
  572. const __m256i bytes = MM256_SET_M128I(_mm_srli_epi16(tmp, 4), tmp);
  573. const __m256i lowMask = _mm256_set1_epi8( 0xF );
  574. return _mm256_and_si256(lowMask, bytes);
  575. }
  576. // add int16_t pairwise and return as float vector
  577. static inline __m256 sum_i16_pairs_float(const __m256i x) {
  578. const __m256i ones = _mm256_set1_epi16(1);
  579. const __m256i summed_pairs = _mm256_madd_epi16(ones, x);
  580. return _mm256_cvtepi32_ps(summed_pairs);
  581. }
  582. static inline __m256 mul_sum_us8_pairs_float(const __m256i ax, const __m256i sy) {
  583. #if __AVXVNNI__
  584. const __m256i zero = _mm256_setzero_si256();
  585. const __m256i summed_pairs = _mm256_dpbusd_epi32(zero, ax, sy);
  586. return _mm256_cvtepi32_ps(summed_pairs);
  587. #else
  588. // Perform multiplication and create 16-bit values
  589. const __m256i dot = _mm256_maddubs_epi16(ax, sy);
  590. return sum_i16_pairs_float(dot);
  591. #endif
  592. }
  593. // multiply int8_t, add results pairwise twice and return as float vector
  594. static inline __m256 mul_sum_i8_pairs_float(const __m256i x, const __m256i y) {
  595. #if __AVXVNNIINT8__
  596. const __m256i zero = _mm256_setzero_si256();
  597. const __m256i summed_pairs = _mm256_dpbssd_epi32(zero, x, y);
  598. return _mm256_cvtepi32_ps(summed_pairs);
  599. #else
  600. // Get absolute values of x vectors
  601. const __m256i ax = _mm256_sign_epi8(x, x);
  602. // Sign the values of the y vectors
  603. const __m256i sy = _mm256_sign_epi8(y, x);
  604. return mul_sum_us8_pairs_float(ax, sy);
  605. #endif
  606. }
  607. static inline __m128i packNibbles( __m256i bytes )
  608. {
  609. // Move bits within 16-bit lanes from 0000_abcd_0000_efgh into 0000_0000_abcd_efgh
  610. #if __AVX512F__
  611. const __m256i bytes_srli_4 = _mm256_srli_epi16(bytes, 4); // 0000_0000_abcd_0000
  612. bytes = _mm256_or_si256(bytes, bytes_srli_4); // 0000_abcd_abcd_efgh
  613. return _mm256_cvtepi16_epi8(bytes); // abcd_efgh
  614. #else
  615. const __m256i lowByte = _mm256_set1_epi16( 0xFF );
  616. __m256i high = _mm256_andnot_si256( lowByte, bytes );
  617. __m256i low = _mm256_and_si256( lowByte, bytes );
  618. high = _mm256_srli_epi16( high, 4 );
  619. bytes = _mm256_or_si256( low, high );
  620. // Compress uint16_t lanes into bytes
  621. __m128i r0 = _mm256_castsi256_si128( bytes );
  622. __m128i r1 = _mm256_extracti128_si256( bytes, 1 );
  623. return _mm_packus_epi16( r0, r1 );
  624. #endif
  625. }
  626. #elif defined(__AVX__)
  627. // spread 32 bits to 32 bytes { 0x00, 0xFF }
  628. static inline __m256i bytes_from_bits_32(const uint8_t * x) {
  629. uint32_t x32;
  630. memcpy(&x32, x, sizeof(uint32_t));
  631. const __m128i shuf_maskl = _mm_set_epi64x(0x0101010101010101, 0x0000000000000000);
  632. const __m128i shuf_maskh = _mm_set_epi64x(0x0303030303030303, 0x0202020202020202);
  633. __m128i bytesl = _mm_shuffle_epi8(_mm_set1_epi32(x32), shuf_maskl);
  634. __m128i bytesh = _mm_shuffle_epi8(_mm_set1_epi32(x32), shuf_maskh);
  635. const __m128i bit_mask = _mm_set1_epi64x(0x7fbfdfeff7fbfdfe);
  636. bytesl = _mm_or_si128(bytesl, bit_mask);
  637. bytesh = _mm_or_si128(bytesh, bit_mask);
  638. bytesl = _mm_cmpeq_epi8(bytesl, _mm_set1_epi64x(-1));
  639. bytesh = _mm_cmpeq_epi8(bytesh, _mm_set1_epi64x(-1));
  640. return MM256_SET_M128I(bytesh, bytesl);
  641. }
  642. // Unpack 32 4-bit fields into 32 bytes
  643. // The output vector contains 32 bytes, each one in [ 0 .. 15 ] interval
  644. static inline __m256i bytes_from_nibbles_32(const uint8_t * rsi)
  645. {
  646. // Load 16 bytes from memory
  647. __m128i tmpl = _mm_loadu_si128((const __m128i *)rsi);
  648. __m128i tmph = _mm_srli_epi16(tmpl, 4);
  649. const __m128i lowMask = _mm_set1_epi8(0xF);
  650. tmpl = _mm_and_si128(lowMask, tmpl);
  651. tmph = _mm_and_si128(lowMask, tmph);
  652. return MM256_SET_M128I(tmph, tmpl);
  653. }
  654. // add int16_t pairwise and return as float vector
  655. static inline __m256 sum_i16_pairs_float(const __m128i xh, const __m128i xl) {
  656. const __m128i ones = _mm_set1_epi16(1);
  657. const __m128i summed_pairsl = _mm_madd_epi16(ones, xl);
  658. const __m128i summed_pairsh = _mm_madd_epi16(ones, xh);
  659. const __m256i summed_pairs = MM256_SET_M128I(summed_pairsh, summed_pairsl);
  660. return _mm256_cvtepi32_ps(summed_pairs);
  661. }
  662. static inline __m256 mul_sum_us8_pairs_float(const __m256i ax, const __m256i sy) {
  663. const __m128i axl = _mm256_castsi256_si128(ax);
  664. const __m128i axh = _mm256_extractf128_si256(ax, 1);
  665. const __m128i syl = _mm256_castsi256_si128(sy);
  666. const __m128i syh = _mm256_extractf128_si256(sy, 1);
  667. // Perform multiplication and create 16-bit values
  668. const __m128i dotl = _mm_maddubs_epi16(axl, syl);
  669. const __m128i doth = _mm_maddubs_epi16(axh, syh);
  670. return sum_i16_pairs_float(doth, dotl);
  671. }
  672. // multiply int8_t, add results pairwise twice and return as float vector
  673. static inline __m256 mul_sum_i8_pairs_float(const __m256i x, const __m256i y) {
  674. const __m128i xl = _mm256_castsi256_si128(x);
  675. const __m128i xh = _mm256_extractf128_si256(x, 1);
  676. const __m128i yl = _mm256_castsi256_si128(y);
  677. const __m128i yh = _mm256_extractf128_si256(y, 1);
  678. // Get absolute values of x vectors
  679. const __m128i axl = _mm_sign_epi8(xl, xl);
  680. const __m128i axh = _mm_sign_epi8(xh, xh);
  681. // Sign the values of the y vectors
  682. const __m128i syl = _mm_sign_epi8(yl, xl);
  683. const __m128i syh = _mm_sign_epi8(yh, xh);
  684. // Perform multiplication and create 16-bit values
  685. const __m128i dotl = _mm_maddubs_epi16(axl, syl);
  686. const __m128i doth = _mm_maddubs_epi16(axh, syh);
  687. return sum_i16_pairs_float(doth, dotl);
  688. }
  689. static inline __m128i packNibbles( __m128i bytes1, __m128i bytes2 )
  690. {
  691. // Move bits within 16-bit lanes from 0000_abcd_0000_efgh into 0000_0000_abcd_efgh
  692. const __m128i lowByte = _mm_set1_epi16( 0xFF );
  693. __m128i high = _mm_andnot_si128( lowByte, bytes1 );
  694. __m128i low = _mm_and_si128( lowByte, bytes1 );
  695. high = _mm_srli_epi16( high, 4 );
  696. bytes1 = _mm_or_si128( low, high );
  697. high = _mm_andnot_si128( lowByte, bytes2 );
  698. low = _mm_and_si128( lowByte, bytes2 );
  699. high = _mm_srli_epi16( high, 4 );
  700. bytes2 = _mm_or_si128( low, high );
  701. return _mm_packus_epi16( bytes1, bytes2);
  702. }
  703. #endif
  704. #elif defined(__SSSE3__)
  705. // horizontally add 4x4 floats
  706. static inline float hsum_float_4x4(const __m128 a, const __m128 b, const __m128 c, const __m128 d) {
  707. __m128 res_0 =_mm_hadd_ps(a, b);
  708. __m128 res_1 =_mm_hadd_ps(c, d);
  709. __m128 res =_mm_hadd_ps(res_0, res_1);
  710. res =_mm_hadd_ps(res, res);
  711. res =_mm_hadd_ps(res, res);
  712. return _mm_cvtss_f32(res);
  713. }
  714. #endif // __AVX__ || __AVX2__ || __AVX512F__
  715. #endif // defined(__AVX__) || defined(__AVX2__) || defined(__AVX512F__) || defined(__SSSE3__)
  716. #if defined(__ARM_NEON)
  717. #if !defined(__aarch64__)
  718. inline static int32_t vaddvq_s32(int32x4_t v) {
  719. return vgetq_lane_s32(v, 0) + vgetq_lane_s32(v, 1) + vgetq_lane_s32(v, 2) + vgetq_lane_s32(v, 3);
  720. }
  721. inline static float vaddvq_f32(float32x4_t v) {
  722. return vgetq_lane_f32(v, 0) + vgetq_lane_f32(v, 1) + vgetq_lane_f32(v, 2) + vgetq_lane_f32(v, 3);
  723. }
  724. inline static float vmaxvq_f32(float32x4_t v) {
  725. return
  726. MAX(MAX(vgetq_lane_f32(v, 0), vgetq_lane_f32(v, 1)),
  727. MAX(vgetq_lane_f32(v, 2), vgetq_lane_f32(v, 3)));
  728. }
  729. inline static int32x4_t vcvtnq_s32_f32(float32x4_t v) {
  730. int32x4_t res;
  731. res[0] = roundf(vgetq_lane_f32(v, 0));
  732. res[1] = roundf(vgetq_lane_f32(v, 1));
  733. res[2] = roundf(vgetq_lane_f32(v, 2));
  734. res[3] = roundf(vgetq_lane_f32(v, 3));
  735. return res;
  736. }
  737. #endif
  738. #endif
  739. #define QK4_0 32
  740. typedef struct {
  741. ggml_fp16_t d; // delta
  742. uint8_t qs[QK4_0 / 2]; // nibbles / quants
  743. } block_q4_0;
  744. static_assert(sizeof(block_q4_0) == sizeof(ggml_fp16_t) + QK4_0 / 2, "wrong q4_0 block size/padding");
  745. #define QK4_1 32
  746. typedef struct {
  747. ggml_fp16_t d; // delta
  748. ggml_fp16_t m; // min
  749. uint8_t qs[QK4_1 / 2]; // nibbles / quants
  750. } block_q4_1;
  751. static_assert(sizeof(block_q4_1) == 2 * sizeof(ggml_fp16_t) + QK4_1 / 2, "wrong q4_1 block size/padding");
  752. #define QK5_0 32
  753. typedef struct {
  754. ggml_fp16_t d; // delta
  755. uint8_t qh[4]; // 5-th bit of quants
  756. uint8_t qs[QK5_0 / 2]; // nibbles / quants
  757. } block_q5_0;
  758. static_assert(sizeof(block_q5_0) == sizeof(ggml_fp16_t) + sizeof(uint32_t) + QK5_0 / 2, "wrong q5_0 block size/padding");
  759. #define QK5_1 32
  760. typedef struct {
  761. ggml_fp16_t d; // delta
  762. ggml_fp16_t m; // min
  763. uint8_t qh[4]; // 5-th bit of quants
  764. uint8_t qs[QK5_1 / 2]; // nibbles / quants
  765. } block_q5_1;
  766. static_assert(sizeof(block_q5_1) == 2 * sizeof(ggml_fp16_t) + sizeof(uint32_t) + QK5_1 / 2, "wrong q5_1 block size/padding");
  767. #define QK8_0 32
  768. typedef struct {
  769. ggml_fp16_t d; // delta
  770. int8_t qs[QK8_0]; // quants
  771. } block_q8_0;
  772. static_assert(sizeof(block_q8_0) == sizeof(ggml_fp16_t) + QK8_0, "wrong q8_0 block size/padding");
  773. #define QK8_1 32
  774. typedef struct {
  775. float d; // delta
  776. float s; // d * sum(qs[i])
  777. int8_t qs[QK8_1]; // quants
  778. } block_q8_1;
  779. static_assert(sizeof(block_q8_1) == 2*sizeof(float) + QK8_1, "wrong q8_1 block size/padding");
  780. // reference implementation for deterministic creation of model files
  781. static void quantize_row_q4_0_reference(const float * restrict x, block_q4_0 * restrict y, int k) {
  782. static const int qk = QK4_0;
  783. assert(k % qk == 0);
  784. const int nb = k / qk;
  785. for (int i = 0; i < nb; i++) {
  786. float amax = 0.0f; // absolute max
  787. float max = 0.0f;
  788. for (int j = 0; j < qk; j++) {
  789. const float v = x[i*qk + j];
  790. if (amax < fabsf(v)) {
  791. amax = fabsf(v);
  792. max = v;
  793. }
  794. }
  795. const float d = max / -8;
  796. const float id = d ? 1.0f/d : 0.0f;
  797. y[i].d = GGML_FP32_TO_FP16(d);
  798. for (int j = 0; j < qk/2; ++j) {
  799. const float x0 = x[i*qk + 0 + j]*id;
  800. const float x1 = x[i*qk + qk/2 + j]*id;
  801. const uint8_t xi0 = MIN(15, (int8_t)(x0 + 8.5f));
  802. const uint8_t xi1 = MIN(15, (int8_t)(x1 + 8.5f));
  803. y[i].qs[j] = xi0;
  804. y[i].qs[j] |= xi1 << 4;
  805. }
  806. }
  807. }
  808. static void quantize_row_q4_0(const float * restrict x, void * restrict y, int k) {
  809. quantize_row_q4_0_reference(x, y, k);
  810. }
  811. static void quantize_row_q4_1_reference(const float * restrict x, block_q4_1 * restrict y, int k) {
  812. const int qk = QK4_1;
  813. assert(k % qk == 0);
  814. const int nb = k / qk;
  815. for (int i = 0; i < nb; i++) {
  816. float min = FLT_MAX;
  817. float max = -FLT_MAX;
  818. for (int j = 0; j < qk; j++) {
  819. const float v = x[i*qk + j];
  820. if (v < min) min = v;
  821. if (v > max) max = v;
  822. }
  823. const float d = (max - min) / ((1 << 4) - 1);
  824. const float id = d ? 1.0f/d : 0.0f;
  825. y[i].d = GGML_FP32_TO_FP16(d);
  826. y[i].m = GGML_FP32_TO_FP16(min);
  827. for (int j = 0; j < qk/2; ++j) {
  828. const float x0 = (x[i*qk + 0 + j] - min)*id;
  829. const float x1 = (x[i*qk + qk/2 + j] - min)*id;
  830. const uint8_t xi0 = MIN(15, (int8_t)(x0 + 0.5f));
  831. const uint8_t xi1 = MIN(15, (int8_t)(x1 + 0.5f));
  832. y[i].qs[j] = xi0;
  833. y[i].qs[j] |= xi1 << 4;
  834. }
  835. }
  836. }
  837. static void quantize_row_q4_1(const float * restrict x, void * restrict y, int k) {
  838. quantize_row_q4_1_reference(x, y, k);
  839. }
  840. static void quantize_row_q5_0_reference(const float * restrict x, block_q5_0 * restrict y, int k) {
  841. static const int qk = QK5_0;
  842. assert(k % qk == 0);
  843. const int nb = k / qk;
  844. for (int i = 0; i < nb; i++) {
  845. float amax = 0.0f; // absolute max
  846. float max = 0.0f;
  847. for (int j = 0; j < qk; j++) {
  848. const float v = x[i*qk + j];
  849. if (amax < fabsf(v)) {
  850. amax = fabsf(v);
  851. max = v;
  852. }
  853. }
  854. const float d = max / -16;
  855. const float id = d ? 1.0f/d : 0.0f;
  856. y[i].d = GGML_FP32_TO_FP16(d);
  857. uint32_t qh = 0;
  858. for (int j = 0; j < qk/2; ++j) {
  859. const float x0 = x[i*qk + 0 + j]*id;
  860. const float x1 = x[i*qk + qk/2 + j]*id;
  861. const uint8_t xi0 = MIN(31, (int8_t)(x0 + 16.5f));
  862. const uint8_t xi1 = MIN(31, (int8_t)(x1 + 16.5f));
  863. y[i].qs[j] = (xi0 & 0x0F) | ((xi1 & 0x0F) << 4);
  864. // get the 5-th bit and store it in qh at the right position
  865. qh |= ((xi0 & 0x10) >> 4) << (j + 0);
  866. qh |= ((xi1 & 0x10) >> 4) << (j + qk/2);
  867. }
  868. memcpy(&y[i].qh, &qh, sizeof(qh));
  869. }
  870. }
  871. static void quantize_row_q5_0(const float * restrict x, void * restrict y, int k) {
  872. quantize_row_q5_0_reference(x, y, k);
  873. }
  874. static void quantize_row_q5_1_reference(const float * restrict x, block_q5_1 * restrict y, int k) {
  875. const int qk = QK5_1;
  876. assert(k % qk == 0);
  877. const int nb = k / qk;
  878. for (int i = 0; i < nb; i++) {
  879. float min = FLT_MAX;
  880. float max = -FLT_MAX;
  881. for (int j = 0; j < qk; j++) {
  882. const float v = x[i*qk + j];
  883. if (v < min) min = v;
  884. if (v > max) max = v;
  885. }
  886. const float d = (max - min) / ((1 << 5) - 1);
  887. const float id = d ? 1.0f/d : 0.0f;
  888. y[i].d = GGML_FP32_TO_FP16(d);
  889. y[i].m = GGML_FP32_TO_FP16(min);
  890. uint32_t qh = 0;
  891. for (int j = 0; j < qk/2; ++j) {
  892. const float x0 = (x[i*qk + 0 + j] - min)*id;
  893. const float x1 = (x[i*qk + qk/2 + j] - min)*id;
  894. const uint8_t xi0 = (uint8_t)(x0 + 0.5f);
  895. const uint8_t xi1 = (uint8_t)(x1 + 0.5f);
  896. y[i].qs[j] = (xi0 & 0x0F) | ((xi1 & 0x0F) << 4);
  897. // get the 5-th bit and store it in qh at the right position
  898. qh |= ((xi0 & 0x10) >> 4) << (j + 0);
  899. qh |= ((xi1 & 0x10) >> 4) << (j + qk/2);
  900. }
  901. memcpy(&y[i].qh, &qh, sizeof(y[i].qh));
  902. }
  903. }
  904. static void quantize_row_q5_1(const float * restrict x, void * restrict y, int k) {
  905. quantize_row_q5_1_reference(x, y, k);
  906. }
  907. // reference implementation for deterministic creation of model files
  908. static void quantize_row_q8_0_reference(const float * restrict x, block_q8_0 * restrict y, int k) {
  909. assert(k % QK8_0 == 0);
  910. const int nb = k / QK8_0;
  911. for (int i = 0; i < nb; i++) {
  912. float amax = 0.0f; // absolute max
  913. for (int j = 0; j < QK8_0; j++) {
  914. const float v = x[i*QK8_0 + j];
  915. amax = MAX(amax, fabsf(v));
  916. }
  917. const float d = amax / ((1 << 7) - 1);
  918. const float id = d ? 1.0f/d : 0.0f;
  919. y[i].d = GGML_FP32_TO_FP16(d);
  920. for (int j = 0; j < QK8_0; ++j) {
  921. const float x0 = x[i*QK8_0 + j]*id;
  922. y[i].qs[j] = roundf(x0);
  923. }
  924. }
  925. }
  926. static void quantize_row_q8_0(const float * restrict x, void * restrict vy, int k) {
  927. assert(QK8_0 == 32);
  928. assert(k % QK8_0 == 0);
  929. const int nb = k / QK8_0;
  930. block_q8_0 * restrict y = vy;
  931. #if defined(__ARM_NEON)
  932. for (int i = 0; i < nb; i++) {
  933. float32x4_t srcv [8];
  934. float32x4_t asrcv[8];
  935. float32x4_t amaxv[8];
  936. for (int j = 0; j < 8; j++) srcv[j] = vld1q_f32(x + i*32 + 4*j);
  937. for (int j = 0; j < 8; j++) asrcv[j] = vabsq_f32(srcv[j]);
  938. for (int j = 0; j < 4; j++) amaxv[2*j] = vmaxq_f32(asrcv[2*j], asrcv[2*j+1]);
  939. for (int j = 0; j < 2; j++) amaxv[4*j] = vmaxq_f32(amaxv[4*j], amaxv[4*j+2]);
  940. for (int j = 0; j < 1; j++) amaxv[8*j] = vmaxq_f32(amaxv[8*j], amaxv[8*j+4]);
  941. const float amax = vmaxvq_f32(amaxv[0]);
  942. const float d = amax / ((1 << 7) - 1);
  943. const float id = d ? 1.0f/d : 0.0f;
  944. y[i].d = GGML_FP32_TO_FP16(d);
  945. for (int j = 0; j < 8; j++) {
  946. const float32x4_t v = vmulq_n_f32(srcv[j], id);
  947. const int32x4_t vi = vcvtnq_s32_f32(v);
  948. y[i].qs[4*j + 0] = vgetq_lane_s32(vi, 0);
  949. y[i].qs[4*j + 1] = vgetq_lane_s32(vi, 1);
  950. y[i].qs[4*j + 2] = vgetq_lane_s32(vi, 2);
  951. y[i].qs[4*j + 3] = vgetq_lane_s32(vi, 3);
  952. }
  953. }
  954. #elif defined(__wasm_simd128__)
  955. for (int i = 0; i < nb; i++) {
  956. v128_t srcv [8];
  957. v128_t asrcv[8];
  958. v128_t amaxv[8];
  959. for (int j = 0; j < 8; j++) srcv[j] = wasm_v128_load(x + i*32 + 4*j);
  960. for (int j = 0; j < 8; j++) asrcv[j] = wasm_f32x4_abs(srcv[j]);
  961. for (int j = 0; j < 4; j++) amaxv[2*j] = wasm_f32x4_max(asrcv[2*j], asrcv[2*j+1]);
  962. for (int j = 0; j < 2; j++) amaxv[4*j] = wasm_f32x4_max(amaxv[4*j], amaxv[4*j+2]);
  963. for (int j = 0; j < 1; j++) amaxv[8*j] = wasm_f32x4_max(amaxv[8*j], amaxv[8*j+4]);
  964. const float amax = MAX(MAX(wasm_f32x4_extract_lane(amaxv[0], 0),
  965. wasm_f32x4_extract_lane(amaxv[0], 1)),
  966. MAX(wasm_f32x4_extract_lane(amaxv[0], 2),
  967. wasm_f32x4_extract_lane(amaxv[0], 3)));
  968. const float d = amax / ((1 << 7) - 1);
  969. const float id = d ? 1.0f/d : 0.0f;
  970. y[i].d = GGML_FP32_TO_FP16(d);
  971. for (int j = 0; j < 8; j++) {
  972. const v128_t v = wasm_f32x4_mul(srcv[j], wasm_f32x4_splat(id));
  973. const v128_t vi = wasm_i32x4_trunc_sat_f32x4(v);
  974. y[i].qs[4*j + 0] = wasm_i32x4_extract_lane(vi, 0);
  975. y[i].qs[4*j + 1] = wasm_i32x4_extract_lane(vi, 1);
  976. y[i].qs[4*j + 2] = wasm_i32x4_extract_lane(vi, 2);
  977. y[i].qs[4*j + 3] = wasm_i32x4_extract_lane(vi, 3);
  978. }
  979. }
  980. #elif defined(__AVX2__) || defined(__AVX__)
  981. for (int i = 0; i < nb; i++) {
  982. // Load elements into 4 AVX vectors
  983. __m256 v0 = _mm256_loadu_ps( x );
  984. __m256 v1 = _mm256_loadu_ps( x + 8 );
  985. __m256 v2 = _mm256_loadu_ps( x + 16 );
  986. __m256 v3 = _mm256_loadu_ps( x + 24 );
  987. x += 32;
  988. // Compute max(abs(e)) for the block
  989. const __m256 signBit = _mm256_set1_ps( -0.0f );
  990. __m256 maxAbs = _mm256_andnot_ps( signBit, v0 );
  991. maxAbs = _mm256_max_ps( maxAbs, _mm256_andnot_ps( signBit, v1 ) );
  992. maxAbs = _mm256_max_ps( maxAbs, _mm256_andnot_ps( signBit, v2 ) );
  993. maxAbs = _mm256_max_ps( maxAbs, _mm256_andnot_ps( signBit, v3 ) );
  994. __m128 max4 = _mm_max_ps( _mm256_extractf128_ps( maxAbs, 1 ), _mm256_castps256_ps128( maxAbs ) );
  995. max4 = _mm_max_ps( max4, _mm_movehl_ps( max4, max4 ) );
  996. max4 = _mm_max_ss( max4, _mm_movehdup_ps( max4 ) );
  997. const float maxScalar = _mm_cvtss_f32( max4 );
  998. // Quantize these floats
  999. const float d = maxScalar / 127.f;
  1000. y[i].d = GGML_FP32_TO_FP16(d);
  1001. const float id = ( maxScalar != 0.0f ) ? 127.f / maxScalar : 0.0f;
  1002. const __m256 mul = _mm256_set1_ps( id );
  1003. // Apply the multiplier
  1004. v0 = _mm256_mul_ps( v0, mul );
  1005. v1 = _mm256_mul_ps( v1, mul );
  1006. v2 = _mm256_mul_ps( v2, mul );
  1007. v3 = _mm256_mul_ps( v3, mul );
  1008. // Round to nearest integer
  1009. v0 = _mm256_round_ps( v0, _MM_ROUND_NEAREST );
  1010. v1 = _mm256_round_ps( v1, _MM_ROUND_NEAREST );
  1011. v2 = _mm256_round_ps( v2, _MM_ROUND_NEAREST );
  1012. v3 = _mm256_round_ps( v3, _MM_ROUND_NEAREST );
  1013. // Convert floats to integers
  1014. __m256i i0 = _mm256_cvtps_epi32( v0 );
  1015. __m256i i1 = _mm256_cvtps_epi32( v1 );
  1016. __m256i i2 = _mm256_cvtps_epi32( v2 );
  1017. __m256i i3 = _mm256_cvtps_epi32( v3 );
  1018. #if defined(__AVX2__)
  1019. // Convert int32 to int16
  1020. i0 = _mm256_packs_epi32( i0, i1 ); // 0, 1, 2, 3, 8, 9, 10, 11, 4, 5, 6, 7, 12, 13, 14, 15
  1021. i2 = _mm256_packs_epi32( i2, i3 ); // 16, 17, 18, 19, 24, 25, 26, 27, 20, 21, 22, 23, 28, 29, 30, 31
  1022. // Convert int16 to int8
  1023. i0 = _mm256_packs_epi16( i0, i2 ); // 0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27, 4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31
  1024. // We got our precious signed bytes, but the order is now wrong
  1025. // These AVX2 pack instructions process 16-byte pieces independently
  1026. // The following instruction is fixing the order
  1027. const __m256i perm = _mm256_setr_epi32( 0, 4, 1, 5, 2, 6, 3, 7 );
  1028. i0 = _mm256_permutevar8x32_epi32( i0, perm );
  1029. _mm256_storeu_si256((__m256i *)y[i].qs, i0);
  1030. #else
  1031. // Since we don't have in AVX some necessary functions,
  1032. // we split the registers in half and call AVX2 analogs from SSE
  1033. __m128i ni0 = _mm256_castsi256_si128( i0 );
  1034. __m128i ni1 = _mm256_extractf128_si256( i0, 1);
  1035. __m128i ni2 = _mm256_castsi256_si128( i1 );
  1036. __m128i ni3 = _mm256_extractf128_si256( i1, 1);
  1037. __m128i ni4 = _mm256_castsi256_si128( i2 );
  1038. __m128i ni5 = _mm256_extractf128_si256( i2, 1);
  1039. __m128i ni6 = _mm256_castsi256_si128( i3 );
  1040. __m128i ni7 = _mm256_extractf128_si256( i3, 1);
  1041. // Convert int32 to int16
  1042. ni0 = _mm_packs_epi32( ni0, ni1 );
  1043. ni2 = _mm_packs_epi32( ni2, ni3 );
  1044. ni4 = _mm_packs_epi32( ni4, ni5 );
  1045. ni6 = _mm_packs_epi32( ni6, ni7 );
  1046. // Convert int16 to int8
  1047. ni0 = _mm_packs_epi16( ni0, ni2 );
  1048. ni4 = _mm_packs_epi16( ni4, ni6 );
  1049. _mm_storeu_si128((__m128i *)(y[i].qs + 0), ni0);
  1050. _mm_storeu_si128((__m128i *)(y[i].qs + 16), ni4);
  1051. #endif
  1052. }
  1053. #else
  1054. // scalar
  1055. quantize_row_q8_0_reference(x, y, k);
  1056. #endif
  1057. }
  1058. // reference implementation for deterministic creation of model files
  1059. static void quantize_row_q8_1_reference(const float * restrict x, block_q8_1 * restrict y, int k) {
  1060. assert(QK8_1 == 32);
  1061. assert(k % QK8_1 == 0);
  1062. const int nb = k / QK8_1;
  1063. for (int i = 0; i < nb; i++) {
  1064. float amax = 0.0f; // absolute max
  1065. for (int j = 0; j < QK8_1; j++) {
  1066. const float v = x[i*QK8_1 + j];
  1067. amax = MAX(amax, fabsf(v));
  1068. }
  1069. const float d = amax / ((1 << 7) - 1);
  1070. const float id = d ? 1.0f/d : 0.0f;
  1071. y[i].d = d;
  1072. int sum = 0;
  1073. for (int j = 0; j < QK8_1/2; ++j) {
  1074. const float v0 = x[i*QK8_1 + j]*id;
  1075. const float v1 = x[i*QK8_1 + QK8_1/2 + j]*id;
  1076. y[i].qs[ j] = roundf(v0);
  1077. y[i].qs[QK8_1/2 + j] = roundf(v1);
  1078. sum += y[i].qs[ j];
  1079. sum += y[i].qs[QK8_1/2 + j];
  1080. }
  1081. y[i].s = sum*d;
  1082. }
  1083. }
  1084. static void quantize_row_q8_1(const float * restrict x, void * restrict vy, int k) {
  1085. assert(k % QK8_1 == 0);
  1086. const int nb = k / QK8_1;
  1087. block_q8_1 * restrict y = vy;
  1088. #if defined(__ARM_NEON)
  1089. for (int i = 0; i < nb; i++) {
  1090. float32x4_t srcv [8];
  1091. float32x4_t asrcv[8];
  1092. float32x4_t amaxv[8];
  1093. for (int j = 0; j < 8; j++) srcv[j] = vld1q_f32(x + i*32 + 4*j);
  1094. for (int j = 0; j < 8; j++) asrcv[j] = vabsq_f32(srcv[j]);
  1095. for (int j = 0; j < 4; j++) amaxv[2*j] = vmaxq_f32(asrcv[2*j], asrcv[2*j+1]);
  1096. for (int j = 0; j < 2; j++) amaxv[4*j] = vmaxq_f32(amaxv[4*j], amaxv[4*j+2]);
  1097. for (int j = 0; j < 1; j++) amaxv[8*j] = vmaxq_f32(amaxv[8*j], amaxv[8*j+4]);
  1098. const float amax = vmaxvq_f32(amaxv[0]);
  1099. const float d = amax / ((1 << 7) - 1);
  1100. const float id = d ? 1.0f/d : 0.0f;
  1101. y[i].d = d;
  1102. int32x4_t accv = vdupq_n_s32(0);
  1103. for (int j = 0; j < 8; j++) {
  1104. const float32x4_t v = vmulq_n_f32(srcv[j], id);
  1105. const int32x4_t vi = vcvtnq_s32_f32(v);
  1106. y[i].qs[4*j + 0] = vgetq_lane_s32(vi, 0);
  1107. y[i].qs[4*j + 1] = vgetq_lane_s32(vi, 1);
  1108. y[i].qs[4*j + 2] = vgetq_lane_s32(vi, 2);
  1109. y[i].qs[4*j + 3] = vgetq_lane_s32(vi, 3);
  1110. accv = vaddq_s32(accv, vi);
  1111. }
  1112. y[i].s = d * vaddvq_s32(accv);
  1113. }
  1114. #elif defined(__wasm_simd128__)
  1115. for (int i = 0; i < nb; i++) {
  1116. v128_t srcv [8];
  1117. v128_t asrcv[8];
  1118. v128_t amaxv[8];
  1119. for (int j = 0; j < 8; j++) srcv[j] = wasm_v128_load(x + i*32 + 4*j);
  1120. for (int j = 0; j < 8; j++) asrcv[j] = wasm_f32x4_abs(srcv[j]);
  1121. for (int j = 0; j < 4; j++) amaxv[2*j] = wasm_f32x4_max(asrcv[2*j], asrcv[2*j+1]);
  1122. for (int j = 0; j < 2; j++) amaxv[4*j] = wasm_f32x4_max(amaxv[4*j], amaxv[4*j+2]);
  1123. for (int j = 0; j < 1; j++) amaxv[8*j] = wasm_f32x4_max(amaxv[8*j], amaxv[8*j+4]);
  1124. const float amax = MAX(MAX(wasm_f32x4_extract_lane(amaxv[0], 0),
  1125. wasm_f32x4_extract_lane(amaxv[0], 1)),
  1126. MAX(wasm_f32x4_extract_lane(amaxv[0], 2),
  1127. wasm_f32x4_extract_lane(amaxv[0], 3)));
  1128. const float d = amax / ((1 << 7) - 1);
  1129. const float id = d ? 1.0f/d : 0.0f;
  1130. y[i].d = d;
  1131. v128_t accv = wasm_i32x4_splat(0);
  1132. for (int j = 0; j < 8; j++) {
  1133. const v128_t v = wasm_f32x4_mul(srcv[j], wasm_f32x4_splat(id));
  1134. const v128_t vi = wasm_i32x4_trunc_sat_f32x4(v);
  1135. y[i].qs[4*j + 0] = wasm_i32x4_extract_lane(vi, 0);
  1136. y[i].qs[4*j + 1] = wasm_i32x4_extract_lane(vi, 1);
  1137. y[i].qs[4*j + 2] = wasm_i32x4_extract_lane(vi, 2);
  1138. y[i].qs[4*j + 3] = wasm_i32x4_extract_lane(vi, 3);
  1139. accv = wasm_i32x4_add(accv, vi);
  1140. }
  1141. y[i].s = d * (wasm_i32x4_extract_lane(accv, 0) +
  1142. wasm_i32x4_extract_lane(accv, 1) +
  1143. wasm_i32x4_extract_lane(accv, 2) +
  1144. wasm_i32x4_extract_lane(accv, 3));
  1145. }
  1146. #elif defined(__AVX2__) || defined(__AVX__)
  1147. for (int i = 0; i < nb; i++) {
  1148. // Load elements into 4 AVX vectors
  1149. __m256 v0 = _mm256_loadu_ps( x );
  1150. __m256 v1 = _mm256_loadu_ps( x + 8 );
  1151. __m256 v2 = _mm256_loadu_ps( x + 16 );
  1152. __m256 v3 = _mm256_loadu_ps( x + 24 );
  1153. x += 32;
  1154. // Compute max(abs(e)) for the block
  1155. const __m256 signBit = _mm256_set1_ps( -0.0f );
  1156. __m256 maxAbs = _mm256_andnot_ps( signBit, v0 );
  1157. maxAbs = _mm256_max_ps( maxAbs, _mm256_andnot_ps( signBit, v1 ) );
  1158. maxAbs = _mm256_max_ps( maxAbs, _mm256_andnot_ps( signBit, v2 ) );
  1159. maxAbs = _mm256_max_ps( maxAbs, _mm256_andnot_ps( signBit, v3 ) );
  1160. __m128 max4 = _mm_max_ps( _mm256_extractf128_ps( maxAbs, 1 ), _mm256_castps256_ps128( maxAbs ) );
  1161. max4 = _mm_max_ps( max4, _mm_movehl_ps( max4, max4 ) );
  1162. max4 = _mm_max_ss( max4, _mm_movehdup_ps( max4 ) );
  1163. const float maxScalar = _mm_cvtss_f32( max4 );
  1164. // Quantize these floats
  1165. const float d = maxScalar / 127.f;
  1166. y[i].d = d;
  1167. const float id = ( maxScalar != 0.0f ) ? 127.f / maxScalar : 0.0f;
  1168. const __m256 mul = _mm256_set1_ps( id );
  1169. // Apply the multiplier
  1170. v0 = _mm256_mul_ps( v0, mul );
  1171. v1 = _mm256_mul_ps( v1, mul );
  1172. v2 = _mm256_mul_ps( v2, mul );
  1173. v3 = _mm256_mul_ps( v3, mul );
  1174. // Round to nearest integer
  1175. v0 = _mm256_round_ps( v0, _MM_ROUND_NEAREST );
  1176. v1 = _mm256_round_ps( v1, _MM_ROUND_NEAREST );
  1177. v2 = _mm256_round_ps( v2, _MM_ROUND_NEAREST );
  1178. v3 = _mm256_round_ps( v3, _MM_ROUND_NEAREST );
  1179. // Convert floats to integers
  1180. __m256i i0 = _mm256_cvtps_epi32( v0 );
  1181. __m256i i1 = _mm256_cvtps_epi32( v1 );
  1182. __m256i i2 = _mm256_cvtps_epi32( v2 );
  1183. __m256i i3 = _mm256_cvtps_epi32( v3 );
  1184. #if defined(__AVX2__)
  1185. // Compute the sum of the quants and set y[i].s
  1186. y[i].s = d * hsum_i32_8(_mm256_add_epi32(_mm256_add_epi32(i0, i1), _mm256_add_epi32(i2, i3)));
  1187. // Convert int32 to int16
  1188. i0 = _mm256_packs_epi32( i0, i1 ); // 0, 1, 2, 3, 8, 9, 10, 11, 4, 5, 6, 7, 12, 13, 14, 15
  1189. i2 = _mm256_packs_epi32( i2, i3 ); // 16, 17, 18, 19, 24, 25, 26, 27, 20, 21, 22, 23, 28, 29, 30, 31
  1190. // Convert int16 to int8
  1191. i0 = _mm256_packs_epi16( i0, i2 ); // 0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27, 4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31
  1192. // We got our precious signed bytes, but the order is now wrong
  1193. // These AVX2 pack instructions process 16-byte pieces independently
  1194. // The following instruction is fixing the order
  1195. const __m256i perm = _mm256_setr_epi32( 0, 4, 1, 5, 2, 6, 3, 7 );
  1196. i0 = _mm256_permutevar8x32_epi32( i0, perm );
  1197. _mm256_storeu_si256((__m256i *)y[i].qs, i0);
  1198. #else
  1199. // Since we don't have in AVX some necessary functions,
  1200. // we split the registers in half and call AVX2 analogs from SSE
  1201. __m128i ni0 = _mm256_castsi256_si128( i0 );
  1202. __m128i ni1 = _mm256_extractf128_si256( i0, 1);
  1203. __m128i ni2 = _mm256_castsi256_si128( i1 );
  1204. __m128i ni3 = _mm256_extractf128_si256( i1, 1);
  1205. __m128i ni4 = _mm256_castsi256_si128( i2 );
  1206. __m128i ni5 = _mm256_extractf128_si256( i2, 1);
  1207. __m128i ni6 = _mm256_castsi256_si128( i3 );
  1208. __m128i ni7 = _mm256_extractf128_si256( i3, 1);
  1209. // Compute the sum of the quants and set y[i].s
  1210. const __m128i s0 = _mm_add_epi32(_mm_add_epi32(ni0, ni1), _mm_add_epi32(ni2, ni3));
  1211. const __m128i s1 = _mm_add_epi32(_mm_add_epi32(ni4, ni5), _mm_add_epi32(ni6, ni7));
  1212. y[i].s = d * hsum_i32_4(_mm_add_epi32(s0, s1));
  1213. // Convert int32 to int16
  1214. ni0 = _mm_packs_epi32( ni0, ni1 );
  1215. ni2 = _mm_packs_epi32( ni2, ni3 );
  1216. ni4 = _mm_packs_epi32( ni4, ni5 );
  1217. ni6 = _mm_packs_epi32( ni6, ni7 );
  1218. // Convert int16 to int8
  1219. ni0 = _mm_packs_epi16( ni0, ni2 );
  1220. ni4 = _mm_packs_epi16( ni4, ni6 );
  1221. _mm_storeu_si128((__m128i *)(y[i].qs + 0), ni0);
  1222. _mm_storeu_si128((__m128i *)(y[i].qs + 16), ni4);
  1223. #endif
  1224. }
  1225. #else
  1226. // scalar
  1227. quantize_row_q8_1_reference(x, y, k);
  1228. #endif
  1229. }
  1230. static void dequantize_row_q4_0(const block_q4_0 * restrict x, float * restrict y, int k) {
  1231. static const int qk = QK4_0;
  1232. assert(k % qk == 0);
  1233. const int nb = k / qk;
  1234. for (int i = 0; i < nb; i++) {
  1235. const float d = GGML_FP16_TO_FP32(x[i].d);
  1236. for (int j = 0; j < qk/2; ++j) {
  1237. const int x0 = (x[i].qs[j] & 0x0F) - 8;
  1238. const int x1 = (x[i].qs[j] >> 4) - 8;
  1239. y[i*qk + j + 0 ] = x0*d;
  1240. y[i*qk + j + qk/2] = x1*d;
  1241. }
  1242. }
  1243. }
  1244. static void dequantize_row_q4_1(const block_q4_1 * restrict x, float * restrict y, int k) {
  1245. static const int qk = QK4_1;
  1246. assert(k % qk == 0);
  1247. const int nb = k / qk;
  1248. for (int i = 0; i < nb; i++) {
  1249. const float d = GGML_FP16_TO_FP32(x[i].d);
  1250. const float m = GGML_FP16_TO_FP32(x[i].m);
  1251. for (int j = 0; j < qk/2; ++j) {
  1252. const int x0 = (x[i].qs[j] & 0x0F);
  1253. const int x1 = (x[i].qs[j] >> 4);
  1254. y[i*qk + j + 0 ] = x0*d + m;
  1255. y[i*qk + j + qk/2] = x1*d + m;
  1256. }
  1257. }
  1258. }
  1259. static void dequantize_row_q5_0(const block_q5_0 * restrict x, float * restrict y, int k) {
  1260. static const int qk = QK5_0;
  1261. assert(k % qk == 0);
  1262. const int nb = k / qk;
  1263. for (int i = 0; i < nb; i++) {
  1264. const float d = GGML_FP16_TO_FP32(x[i].d);
  1265. uint32_t qh;
  1266. memcpy(&qh, x[i].qh, sizeof(qh));
  1267. for (int j = 0; j < qk/2; ++j) {
  1268. const uint8_t xh_0 = ((qh >> (j + 0)) << 4) & 0x10;
  1269. const uint8_t xh_1 = ((qh >> (j + 12)) ) & 0x10;
  1270. const int32_t x0 = ((x[i].qs[j] & 0x0F) | xh_0) - 16;
  1271. const int32_t x1 = ((x[i].qs[j] >> 4) | xh_1) - 16;
  1272. y[i*qk + j + 0 ] = x0*d;
  1273. y[i*qk + j + qk/2] = x1*d;
  1274. }
  1275. }
  1276. }
  1277. static void dequantize_row_q5_1(const block_q5_1 * restrict x, float * restrict y, int k) {
  1278. static const int qk = QK5_1;
  1279. assert(k % qk == 0);
  1280. const int nb = k / qk;
  1281. for (int i = 0; i < nb; i++) {
  1282. const float d = GGML_FP16_TO_FP32(x[i].d);
  1283. const float m = GGML_FP16_TO_FP32(x[i].m);
  1284. uint32_t qh;
  1285. memcpy(&qh, x[i].qh, sizeof(qh));
  1286. for (int j = 0; j < qk/2; ++j) {
  1287. const uint8_t xh_0 = ((qh >> (j + 0)) << 4) & 0x10;
  1288. const uint8_t xh_1 = ((qh >> (j + 12)) ) & 0x10;
  1289. const int x0 = (x[i].qs[j] & 0x0F) | xh_0;
  1290. const int x1 = (x[i].qs[j] >> 4) | xh_1;
  1291. y[i*qk + j + 0 ] = x0*d + m;
  1292. y[i*qk + j + qk/2] = x1*d + m;
  1293. }
  1294. }
  1295. }
  1296. static void dequantize_row_q8_0(const void * restrict vx, float * restrict y, int k) {
  1297. static const int qk = QK8_0;
  1298. assert(k % qk == 0);
  1299. const int nb = k / qk;
  1300. const block_q8_0 * restrict x = vx;
  1301. for (int i = 0; i < nb; i++) {
  1302. const float d = GGML_FP16_TO_FP32(x[i].d);
  1303. for (int j = 0; j < qk; ++j) {
  1304. y[i*qk + j] = x[i].qs[j]*d;
  1305. }
  1306. }
  1307. }
  1308. static void ggml_vec_dot_f32(const int n, float * restrict s, const float * restrict x, const float * restrict y);
  1309. static void ggml_vec_dot_f16(const int n, float * restrict s, ggml_fp16_t * restrict x, ggml_fp16_t * restrict y);
  1310. static void ggml_vec_dot_q4_0_q8_0(const int n, float * restrict s, const void * restrict vx, const void * restrict vy);
  1311. static void ggml_vec_dot_q4_1_q8_1(const int n, float * restrict s, const void * restrict vx, const void * restrict vy);
  1312. static void ggml_vec_dot_q5_0_q8_0(const int n, float * restrict s, const void * restrict vx, const void * restrict vy);
  1313. static void ggml_vec_dot_q5_1_q8_1(const int n, float * restrict s, const void * restrict vx, const void * restrict vy);
  1314. static void ggml_vec_dot_q8_0_q8_0(const int n, float * restrict s, const void * restrict vx, const void * restrict vy);
  1315. static const ggml_type_traits_t type_traits[GGML_TYPE_COUNT] = {
  1316. [GGML_TYPE_I8] = {
  1317. .type_name = "i8",
  1318. .blck_size = 1,
  1319. .type_size = sizeof(int8_t),
  1320. .is_quantized = false,
  1321. },
  1322. [GGML_TYPE_I16] = {
  1323. .type_name = "i16",
  1324. .blck_size = 1,
  1325. .type_size = sizeof(int16_t),
  1326. .is_quantized = false,
  1327. },
  1328. [GGML_TYPE_I32] = {
  1329. .type_name = "i32",
  1330. .blck_size = 1,
  1331. .type_size = sizeof(int32_t),
  1332. .is_quantized = false,
  1333. },
  1334. [GGML_TYPE_F32] = {
  1335. .type_name = "f32",
  1336. .blck_size = 1,
  1337. .type_size = sizeof(float),
  1338. .is_quantized = false,
  1339. .vec_dot = (ggml_vec_dot_t) ggml_vec_dot_f32,
  1340. .vec_dot_type = GGML_TYPE_F32,
  1341. },
  1342. [GGML_TYPE_F16] = {
  1343. .type_name = "f16",
  1344. .blck_size = 1,
  1345. .type_size = sizeof(ggml_fp16_t),
  1346. .is_quantized = false,
  1347. .to_float = (ggml_to_float_t) ggml_fp16_to_fp32_row,
  1348. .from_float = (ggml_from_float_t) ggml_fp32_to_fp16_row,
  1349. .from_float_reference = (ggml_from_float_t) ggml_fp32_to_fp16_row,
  1350. .vec_dot = (ggml_vec_dot_t) ggml_vec_dot_f16,
  1351. .vec_dot_type = GGML_TYPE_F16,
  1352. },
  1353. [GGML_TYPE_Q4_0] = {
  1354. .type_name = "q4_0",
  1355. .blck_size = QK4_0,
  1356. .type_size = sizeof(block_q4_0),
  1357. .is_quantized = true,
  1358. .to_float = (ggml_to_float_t) dequantize_row_q4_0,
  1359. .from_float = quantize_row_q4_0,
  1360. .from_float_reference = (ggml_from_float_t) quantize_row_q4_0_reference,
  1361. .vec_dot = ggml_vec_dot_q4_0_q8_0,
  1362. .vec_dot_type = GGML_TYPE_Q8_0,
  1363. },
  1364. [GGML_TYPE_Q4_1] = {
  1365. .type_name = "q4_1",
  1366. .blck_size = QK4_1,
  1367. .type_size = sizeof(block_q4_1),
  1368. .is_quantized = true,
  1369. .to_float = (ggml_to_float_t) dequantize_row_q4_1,
  1370. .from_float = quantize_row_q4_1,
  1371. .from_float_reference = (ggml_from_float_t) quantize_row_q4_1_reference,
  1372. .vec_dot = ggml_vec_dot_q4_1_q8_1,
  1373. .vec_dot_type = GGML_TYPE_Q8_1,
  1374. },
  1375. [GGML_TYPE_Q5_0] = {
  1376. .type_name = "q5_0",
  1377. .blck_size = QK5_0,
  1378. .type_size = sizeof(block_q5_0),
  1379. .is_quantized = true,
  1380. .to_float = (ggml_to_float_t) dequantize_row_q5_0,
  1381. .from_float = quantize_row_q5_0,
  1382. .from_float_reference = (ggml_from_float_t) quantize_row_q5_0_reference,
  1383. .vec_dot = ggml_vec_dot_q5_0_q8_0,
  1384. .vec_dot_type = GGML_TYPE_Q8_0,
  1385. },
  1386. [GGML_TYPE_Q5_1] = {
  1387. .type_name = "q5_1",
  1388. .blck_size = QK5_1,
  1389. .type_size = sizeof(block_q5_1),
  1390. .is_quantized = true,
  1391. .to_float = (ggml_to_float_t) dequantize_row_q5_1,
  1392. .from_float = quantize_row_q5_1,
  1393. .from_float_reference = (ggml_from_float_t) quantize_row_q5_1_reference,
  1394. .vec_dot = ggml_vec_dot_q5_1_q8_1,
  1395. .vec_dot_type = GGML_TYPE_Q8_1,
  1396. },
  1397. [GGML_TYPE_Q8_0] = {
  1398. .type_name = "q8_0",
  1399. .blck_size = QK8_0,
  1400. .type_size = sizeof(block_q8_0),
  1401. .is_quantized = true,
  1402. .to_float = dequantize_row_q8_0,
  1403. .from_float = quantize_row_q8_0,
  1404. .from_float_reference = (ggml_from_float_t) quantize_row_q8_0_reference,
  1405. .vec_dot = ggml_vec_dot_q8_0_q8_0,
  1406. .vec_dot_type = GGML_TYPE_Q8_0,
  1407. },
  1408. [GGML_TYPE_Q8_1] = {
  1409. .type_name = "q8_1",
  1410. .blck_size = QK8_1,
  1411. .type_size = sizeof(block_q8_1),
  1412. .is_quantized = true,
  1413. .from_float = quantize_row_q8_1,
  1414. .from_float_reference = (ggml_from_float_t) quantize_row_q8_1_reference,
  1415. .vec_dot_type = GGML_TYPE_Q8_1,
  1416. },
  1417. #ifdef GGML_USE_K_QUANTS
  1418. [GGML_TYPE_Q2_K] = {
  1419. .type_name = "q2_K",
  1420. .blck_size = QK_K,
  1421. .type_size = sizeof(block_q2_K),
  1422. .is_quantized = true,
  1423. .to_float = (ggml_to_float_t) dequantize_row_q2_K,
  1424. .from_float = quantize_row_q2_K,
  1425. .from_float_reference = (ggml_from_float_t) quantize_row_q2_K_reference,
  1426. .vec_dot = ggml_vec_dot_q2_K_q8_K,
  1427. .vec_dot_type = GGML_TYPE_Q8_K,
  1428. },
  1429. [GGML_TYPE_Q3_K] = {
  1430. .type_name = "q3_K",
  1431. .blck_size = QK_K,
  1432. .type_size = sizeof(block_q3_K),
  1433. .is_quantized = true,
  1434. .to_float = (ggml_to_float_t) dequantize_row_q3_K,
  1435. .from_float = quantize_row_q3_K,
  1436. .from_float_reference = (ggml_from_float_t) quantize_row_q3_K_reference,
  1437. .vec_dot = ggml_vec_dot_q3_K_q8_K,
  1438. .vec_dot_type = GGML_TYPE_Q8_K,
  1439. },
  1440. [GGML_TYPE_Q4_K] = {
  1441. .type_name = "q4_K",
  1442. .blck_size = QK_K,
  1443. .type_size = sizeof(block_q4_K),
  1444. .is_quantized = true,
  1445. .to_float = (ggml_to_float_t) dequantize_row_q4_K,
  1446. .from_float = quantize_row_q4_K,
  1447. .from_float_reference = (ggml_from_float_t) quantize_row_q4_K_reference,
  1448. .vec_dot = ggml_vec_dot_q4_K_q8_K,
  1449. .vec_dot_type = GGML_TYPE_Q8_K,
  1450. },
  1451. [GGML_TYPE_Q5_K] = {
  1452. .type_name = "q5_K",
  1453. .blck_size = QK_K,
  1454. .type_size = sizeof(block_q5_K),
  1455. .is_quantized = true,
  1456. .to_float = (ggml_to_float_t) dequantize_row_q5_K,
  1457. .from_float = quantize_row_q5_K,
  1458. .from_float_reference = (ggml_from_float_t) quantize_row_q5_K_reference,
  1459. .vec_dot = ggml_vec_dot_q5_K_q8_K,
  1460. .vec_dot_type = GGML_TYPE_Q8_K,
  1461. },
  1462. [GGML_TYPE_Q6_K] = {
  1463. .type_name = "q6_K",
  1464. .blck_size = QK_K,
  1465. .type_size = sizeof(block_q6_K),
  1466. .is_quantized = true,
  1467. .to_float = (ggml_to_float_t) dequantize_row_q6_K,
  1468. .from_float = quantize_row_q6_K,
  1469. .from_float_reference = (ggml_from_float_t) quantize_row_q6_K_reference,
  1470. .vec_dot = ggml_vec_dot_q6_K_q8_K,
  1471. .vec_dot_type = GGML_TYPE_Q8_K,
  1472. },
  1473. [GGML_TYPE_Q8_K] = {
  1474. .type_name = "q8_K",
  1475. .blck_size = QK_K,
  1476. .type_size = sizeof(block_q8_K),
  1477. .is_quantized = true,
  1478. .from_float = quantize_row_q8_K,
  1479. }
  1480. #endif
  1481. };
  1482. // For internal test use
  1483. ggml_type_traits_t ggml_internal_get_type_traits(enum ggml_type type) {
  1484. GGML_ASSERT(type < GGML_TYPE_COUNT);
  1485. return type_traits[type];
  1486. }
  1487. //
  1488. // simd mappings
  1489. //
  1490. // we define a common set of C macros which map to specific intrinsics based on the current architecture
  1491. // we then implement the fundamental computation operations below using only these macros
  1492. // adding support for new architectures requires to define the corresponding SIMD macros
  1493. //
  1494. // GGML_F32_STEP / GGML_F16_STEP
  1495. // number of elements to process in a single step
  1496. //
  1497. // GGML_F32_EPR / GGML_F16_EPR
  1498. // number of elements to fit in a single register
  1499. //
  1500. #if defined(__ARM_NEON) && defined(__ARM_FEATURE_FMA)
  1501. #define GGML_SIMD
  1502. // F32 NEON
  1503. #define GGML_F32_STEP 16
  1504. #define GGML_F32_EPR 4
  1505. #define GGML_F32x4 float32x4_t
  1506. #define GGML_F32x4_ZERO vdupq_n_f32(0.0f)
  1507. #define GGML_F32x4_SET1(x) vdupq_n_f32(x)
  1508. #define GGML_F32x4_LOAD vld1q_f32
  1509. #define GGML_F32x4_STORE vst1q_f32
  1510. #define GGML_F32x4_FMA(a, b, c) vfmaq_f32(a, b, c)
  1511. #define GGML_F32x4_ADD vaddq_f32
  1512. #define GGML_F32x4_MUL vmulq_f32
  1513. #define GGML_F32x4_REDUCE_ONE(x) vaddvq_f32(x)
  1514. #define GGML_F32x4_REDUCE(res, x) \
  1515. { \
  1516. int offset = GGML_F32_ARR >> 1; \
  1517. for (int i = 0; i < offset; ++i) { \
  1518. x[i] = vaddq_f32(x[i], x[offset+i]); \
  1519. } \
  1520. offset >>= 1; \
  1521. for (int i = 0; i < offset; ++i) { \
  1522. x[i] = vaddq_f32(x[i], x[offset+i]); \
  1523. } \
  1524. offset >>= 1; \
  1525. for (int i = 0; i < offset; ++i) { \
  1526. x[i] = vaddq_f32(x[i], x[offset+i]); \
  1527. } \
  1528. res = GGML_F32x4_REDUCE_ONE(x[0]); \
  1529. }
  1530. #define GGML_F32_VEC GGML_F32x4
  1531. #define GGML_F32_VEC_ZERO GGML_F32x4_ZERO
  1532. #define GGML_F32_VEC_SET1 GGML_F32x4_SET1
  1533. #define GGML_F32_VEC_LOAD GGML_F32x4_LOAD
  1534. #define GGML_F32_VEC_STORE GGML_F32x4_STORE
  1535. #define GGML_F32_VEC_FMA GGML_F32x4_FMA
  1536. #define GGML_F32_VEC_ADD GGML_F32x4_ADD
  1537. #define GGML_F32_VEC_MUL GGML_F32x4_MUL
  1538. #define GGML_F32_VEC_REDUCE GGML_F32x4_REDUCE
  1539. // F16 NEON
  1540. #if defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC)
  1541. #define GGML_F16_STEP 32
  1542. #define GGML_F16_EPR 8
  1543. #define GGML_F16x8 float16x8_t
  1544. #define GGML_F16x8_ZERO vdupq_n_f16(0.0f)
  1545. #define GGML_F16x8_SET1(x) vdupq_n_f16(x)
  1546. #define GGML_F16x8_LOAD vld1q_f16
  1547. #define GGML_F16x8_STORE vst1q_f16
  1548. #define GGML_F16x8_FMA(a, b, c) vfmaq_f16(a, b, c)
  1549. #define GGML_F16x8_ADD vaddq_f16
  1550. #define GGML_F16x8_MUL vmulq_f16
  1551. #define GGML_F16x8_REDUCE(res, x) \
  1552. { \
  1553. int offset = GGML_F16_ARR >> 1; \
  1554. for (int i = 0; i < offset; ++i) { \
  1555. x[i] = vaddq_f16(x[i], x[offset+i]); \
  1556. } \
  1557. offset >>= 1; \
  1558. for (int i = 0; i < offset; ++i) { \
  1559. x[i] = vaddq_f16(x[i], x[offset+i]); \
  1560. } \
  1561. offset >>= 1; \
  1562. for (int i = 0; i < offset; ++i) { \
  1563. x[i] = vaddq_f16(x[i], x[offset+i]); \
  1564. } \
  1565. const float32x4_t t0 = vcvt_f32_f16(vget_low_f16 (x[0])); \
  1566. const float32x4_t t1 = vcvt_f32_f16(vget_high_f16(x[0])); \
  1567. res = (ggml_float) vaddvq_f32(vaddq_f32(t0, t1)); \
  1568. }
  1569. #define GGML_F16_VEC GGML_F16x8
  1570. #define GGML_F16_VEC_ZERO GGML_F16x8_ZERO
  1571. #define GGML_F16_VEC_SET1 GGML_F16x8_SET1
  1572. #define GGML_F16_VEC_LOAD(p, i) GGML_F16x8_LOAD(p)
  1573. #define GGML_F16_VEC_STORE(p, r, i) GGML_F16x8_STORE(p, r[i])
  1574. #define GGML_F16_VEC_FMA GGML_F16x8_FMA
  1575. #define GGML_F16_VEC_ADD GGML_F16x8_ADD
  1576. #define GGML_F16_VEC_MUL GGML_F16x8_MUL
  1577. #define GGML_F16_VEC_REDUCE GGML_F16x8_REDUCE
  1578. #else
  1579. // if FP16 vector arithmetic is not supported, we use FP32 instead
  1580. // and take advantage of the vcvt_ functions to convert to/from FP16
  1581. #define GGML_F16_STEP 16
  1582. #define GGML_F16_EPR 4
  1583. #define GGML_F32Cx4 float32x4_t
  1584. #define GGML_F32Cx4_ZERO vdupq_n_f32(0.0f)
  1585. #define GGML_F32Cx4_SET1(x) vdupq_n_f32(x)
  1586. #define GGML_F32Cx4_LOAD(x) vcvt_f32_f16(vld1_f16(x))
  1587. #define GGML_F32Cx4_STORE(x, y) vst1_f16(x, vcvt_f16_f32(y))
  1588. #define GGML_F32Cx4_FMA(a, b, c) vfmaq_f32(a, b, c)
  1589. #define GGML_F32Cx4_ADD vaddq_f32
  1590. #define GGML_F32Cx4_MUL vmulq_f32
  1591. #define GGML_F32Cx4_REDUCE GGML_F32x4_REDUCE
  1592. #define GGML_F16_VEC GGML_F32Cx4
  1593. #define GGML_F16_VEC_ZERO GGML_F32Cx4_ZERO
  1594. #define GGML_F16_VEC_SET1 GGML_F32Cx4_SET1
  1595. #define GGML_F16_VEC_LOAD(p, i) GGML_F32Cx4_LOAD(p)
  1596. #define GGML_F16_VEC_STORE(p, r, i) GGML_F32Cx4_STORE(p, r[i])
  1597. #define GGML_F16_VEC_FMA GGML_F32Cx4_FMA
  1598. #define GGML_F16_VEC_ADD GGML_F32Cx4_ADD
  1599. #define GGML_F16_VEC_MUL GGML_F32Cx4_MUL
  1600. #define GGML_F16_VEC_REDUCE GGML_F32Cx4_REDUCE
  1601. #endif
  1602. #elif defined(__AVX__)
  1603. #define GGML_SIMD
  1604. // F32 AVX
  1605. #define GGML_F32_STEP 32
  1606. #define GGML_F32_EPR 8
  1607. #define GGML_F32x8 __m256
  1608. #define GGML_F32x8_ZERO _mm256_setzero_ps()
  1609. #define GGML_F32x8_SET1(x) _mm256_set1_ps(x)
  1610. #define GGML_F32x8_LOAD _mm256_loadu_ps
  1611. #define GGML_F32x8_STORE _mm256_storeu_ps
  1612. #if defined(__FMA__)
  1613. #define GGML_F32x8_FMA(a, b, c) _mm256_fmadd_ps(b, c, a)
  1614. #else
  1615. #define GGML_F32x8_FMA(a, b, c) _mm256_add_ps(_mm256_mul_ps(b, c), a)
  1616. #endif
  1617. #define GGML_F32x8_ADD _mm256_add_ps
  1618. #define GGML_F32x8_MUL _mm256_mul_ps
  1619. #define GGML_F32x8_REDUCE(res, x) \
  1620. { \
  1621. int offset = GGML_F32_ARR >> 1; \
  1622. for (int i = 0; i < offset; ++i) { \
  1623. x[i] = _mm256_add_ps(x[i], x[offset+i]); \
  1624. } \
  1625. offset >>= 1; \
  1626. for (int i = 0; i < offset; ++i) { \
  1627. x[i] = _mm256_add_ps(x[i], x[offset+i]); \
  1628. } \
  1629. offset >>= 1; \
  1630. for (int i = 0; i < offset; ++i) { \
  1631. x[i] = _mm256_add_ps(x[i], x[offset+i]); \
  1632. } \
  1633. const __m128 t0 = _mm_add_ps(_mm256_castps256_ps128(x[0]), \
  1634. _mm256_extractf128_ps(x[0], 1)); \
  1635. const __m128 t1 = _mm_hadd_ps(t0, t0); \
  1636. res = _mm_cvtss_f32(_mm_hadd_ps(t1, t1)); \
  1637. }
  1638. // TODO: is this optimal ?
  1639. #define GGML_F32_VEC GGML_F32x8
  1640. #define GGML_F32_VEC_ZERO GGML_F32x8_ZERO
  1641. #define GGML_F32_VEC_SET1 GGML_F32x8_SET1
  1642. #define GGML_F32_VEC_LOAD GGML_F32x8_LOAD
  1643. #define GGML_F32_VEC_STORE GGML_F32x8_STORE
  1644. #define GGML_F32_VEC_FMA GGML_F32x8_FMA
  1645. #define GGML_F32_VEC_ADD GGML_F32x8_ADD
  1646. #define GGML_F32_VEC_MUL GGML_F32x8_MUL
  1647. #define GGML_F32_VEC_REDUCE GGML_F32x8_REDUCE
  1648. // F16 AVX
  1649. #define GGML_F16_STEP 32
  1650. #define GGML_F16_EPR 8
  1651. // F16 arithmetic is not supported by AVX, so we use F32 instead
  1652. #define GGML_F32Cx8 __m256
  1653. #define GGML_F32Cx8_ZERO _mm256_setzero_ps()
  1654. #define GGML_F32Cx8_SET1(x) _mm256_set1_ps(x)
  1655. #if defined(__F16C__)
  1656. // the _mm256_cvt intrinsics require F16C
  1657. #define GGML_F32Cx8_LOAD(x) _mm256_cvtph_ps(_mm_loadu_si128((__m128i *)(x)))
  1658. #define GGML_F32Cx8_STORE(x, y) _mm_storeu_si128((__m128i *)(x), _mm256_cvtps_ph(y, 0))
  1659. #else
  1660. static inline __m256 __avx_f32cx8_load(ggml_fp16_t *x) {
  1661. float tmp[8];
  1662. for (int i = 0; i < 8; i++) {
  1663. tmp[i] = GGML_FP16_TO_FP32(x[i]);
  1664. }
  1665. return _mm256_loadu_ps(tmp);
  1666. }
  1667. static inline void __avx_f32cx8_store(ggml_fp16_t *x, __m256 y) {
  1668. float arr[8];
  1669. _mm256_storeu_ps(arr, y);
  1670. for (int i = 0; i < 8; i++)
  1671. x[i] = GGML_FP32_TO_FP16(arr[i]);
  1672. }
  1673. #define GGML_F32Cx8_LOAD(x) __avx_f32cx8_load(x)
  1674. #define GGML_F32Cx8_STORE(x, y) __avx_f32cx8_store(x, y)
  1675. #endif
  1676. #define GGML_F32Cx8_FMA GGML_F32x8_FMA
  1677. #define GGML_F32Cx8_ADD _mm256_add_ps
  1678. #define GGML_F32Cx8_MUL _mm256_mul_ps
  1679. #define GGML_F32Cx8_REDUCE GGML_F32x8_REDUCE
  1680. #define GGML_F16_VEC GGML_F32Cx8
  1681. #define GGML_F16_VEC_ZERO GGML_F32Cx8_ZERO
  1682. #define GGML_F16_VEC_SET1 GGML_F32Cx8_SET1
  1683. #define GGML_F16_VEC_LOAD(p, i) GGML_F32Cx8_LOAD(p)
  1684. #define GGML_F16_VEC_STORE(p, r, i) GGML_F32Cx8_STORE(p, r[i])
  1685. #define GGML_F16_VEC_FMA GGML_F32Cx8_FMA
  1686. #define GGML_F16_VEC_ADD GGML_F32Cx8_ADD
  1687. #define GGML_F16_VEC_MUL GGML_F32Cx8_MUL
  1688. #define GGML_F16_VEC_REDUCE GGML_F32Cx8_REDUCE
  1689. #elif defined(__POWER9_VECTOR__)
  1690. #define GGML_SIMD
  1691. // F32 POWER9
  1692. #define GGML_F32_STEP 32
  1693. #define GGML_F32_EPR 4
  1694. #define GGML_F32x4 vector float
  1695. #define GGML_F32x4_ZERO 0.0f
  1696. #define GGML_F32x4_SET1 vec_splats
  1697. #define GGML_F32x4_LOAD(p) vec_xl(0, p)
  1698. #define GGML_F32x4_STORE(p, r) vec_xst(r, 0, p)
  1699. #define GGML_F32x4_FMA(a, b, c) vec_madd(b, c, a)
  1700. #define GGML_F32x4_ADD vec_add
  1701. #define GGML_F32x4_MUL vec_mul
  1702. #define GGML_F32x4_REDUCE(res, x) \
  1703. { \
  1704. int offset = GGML_F32_ARR >> 1; \
  1705. for (int i = 0; i < offset; ++i) { \
  1706. x[i] = vec_add(x[i], x[offset+i]); \
  1707. } \
  1708. offset >>= 1; \
  1709. for (int i = 0; i < offset; ++i) { \
  1710. x[i] = vec_add(x[i], x[offset+i]); \
  1711. } \
  1712. offset >>= 1; \
  1713. for (int i = 0; i < offset; ++i) { \
  1714. x[i] = vec_add(x[i], x[offset+i]); \
  1715. } \
  1716. res = vec_extract(x[0], 0) + \
  1717. vec_extract(x[0], 1) + \
  1718. vec_extract(x[0], 2) + \
  1719. vec_extract(x[0], 3); \
  1720. }
  1721. #define GGML_F32_VEC GGML_F32x4
  1722. #define GGML_F32_VEC_ZERO GGML_F32x4_ZERO
  1723. #define GGML_F32_VEC_SET1 GGML_F32x4_SET1
  1724. #define GGML_F32_VEC_LOAD GGML_F32x4_LOAD
  1725. #define GGML_F32_VEC_STORE GGML_F32x4_STORE
  1726. #define GGML_F32_VEC_FMA GGML_F32x4_FMA
  1727. #define GGML_F32_VEC_ADD GGML_F32x4_ADD
  1728. #define GGML_F32_VEC_MUL GGML_F32x4_MUL
  1729. #define GGML_F32_VEC_REDUCE GGML_F32x4_REDUCE
  1730. // F16 POWER9
  1731. #define GGML_F16_STEP GGML_F32_STEP
  1732. #define GGML_F16_EPR GGML_F32_EPR
  1733. #define GGML_F16_VEC GGML_F32x4
  1734. #define GGML_F16_VEC_ZERO GGML_F32x4_ZERO
  1735. #define GGML_F16_VEC_SET1 GGML_F32x4_SET1
  1736. #define GGML_F16_VEC_FMA GGML_F32x4_FMA
  1737. #define GGML_F16_VEC_REDUCE GGML_F32x4_REDUCE
  1738. // Use vec_xl, not vec_ld, in case the load address is not aligned.
  1739. #define GGML_F16_VEC_LOAD(p, i) (i & 0x1) ? \
  1740. vec_extract_fp32_from_shorth(vec_xl(0, p - GGML_F16_EPR)) : \
  1741. vec_extract_fp32_from_shortl(vec_xl(0, p))
  1742. #define GGML_ENDIAN_BYTE(i) ((unsigned char *)&(uint16_t){1})[i]
  1743. #define GGML_F16_VEC_STORE(p, r, i) \
  1744. if (i & 0x1) \
  1745. vec_xst(vec_pack_to_short_fp32(r[i - GGML_ENDIAN_BYTE(1)], \
  1746. r[i - GGML_ENDIAN_BYTE(0)]), \
  1747. 0, p - GGML_F16_EPR)
  1748. #elif defined(__wasm_simd128__)
  1749. #define GGML_SIMD
  1750. // F32 WASM
  1751. #define GGML_F32_STEP 16
  1752. #define GGML_F32_EPR 4
  1753. #define GGML_F32x4 v128_t
  1754. #define GGML_F32x4_ZERO wasm_f32x4_splat(0.0f)
  1755. #define GGML_F32x4_SET1(x) wasm_f32x4_splat(x)
  1756. #define GGML_F32x4_LOAD wasm_v128_load
  1757. #define GGML_F32x4_STORE wasm_v128_store
  1758. #define GGML_F32x4_FMA(a, b, c) wasm_f32x4_add(wasm_f32x4_mul(b, c), a)
  1759. #define GGML_F32x4_ADD wasm_f32x4_add
  1760. #define GGML_F32x4_MUL wasm_f32x4_mul
  1761. #define GGML_F32x4_REDUCE(res, x) \
  1762. { \
  1763. int offset = GGML_F32_ARR >> 1; \
  1764. for (int i = 0; i < offset; ++i) { \
  1765. x[i] = wasm_f32x4_add(x[i], x[offset+i]); \
  1766. } \
  1767. offset >>= 1; \
  1768. for (int i = 0; i < offset; ++i) { \
  1769. x[i] = wasm_f32x4_add(x[i], x[offset+i]); \
  1770. } \
  1771. offset >>= 1; \
  1772. for (int i = 0; i < offset; ++i) { \
  1773. x[i] = wasm_f32x4_add(x[i], x[offset+i]); \
  1774. } \
  1775. res = wasm_f32x4_extract_lane(x[0], 0) + \
  1776. wasm_f32x4_extract_lane(x[0], 1) + \
  1777. wasm_f32x4_extract_lane(x[0], 2) + \
  1778. wasm_f32x4_extract_lane(x[0], 3); \
  1779. }
  1780. #define GGML_F32_VEC GGML_F32x4
  1781. #define GGML_F32_VEC_ZERO GGML_F32x4_ZERO
  1782. #define GGML_F32_VEC_SET1 GGML_F32x4_SET1
  1783. #define GGML_F32_VEC_LOAD GGML_F32x4_LOAD
  1784. #define GGML_F32_VEC_STORE GGML_F32x4_STORE
  1785. #define GGML_F32_VEC_FMA GGML_F32x4_FMA
  1786. #define GGML_F32_VEC_ADD GGML_F32x4_ADD
  1787. #define GGML_F32_VEC_MUL GGML_F32x4_MUL
  1788. #define GGML_F32_VEC_REDUCE GGML_F32x4_REDUCE
  1789. // F16 WASM
  1790. #define GGML_F16_STEP 16
  1791. #define GGML_F16_EPR 4
  1792. inline static v128_t __wasm_f16x4_load(const ggml_fp16_t * p) {
  1793. float tmp[4];
  1794. tmp[0] = GGML_FP16_TO_FP32(p[0]);
  1795. tmp[1] = GGML_FP16_TO_FP32(p[1]);
  1796. tmp[2] = GGML_FP16_TO_FP32(p[2]);
  1797. tmp[3] = GGML_FP16_TO_FP32(p[3]);
  1798. return wasm_v128_load(tmp);
  1799. }
  1800. inline static void __wasm_f16x4_store(ggml_fp16_t * p, v128_t x) {
  1801. float tmp[4];
  1802. wasm_v128_store(tmp, x);
  1803. p[0] = GGML_FP32_TO_FP16(tmp[0]);
  1804. p[1] = GGML_FP32_TO_FP16(tmp[1]);
  1805. p[2] = GGML_FP32_TO_FP16(tmp[2]);
  1806. p[3] = GGML_FP32_TO_FP16(tmp[3]);
  1807. }
  1808. #define GGML_F16x4 v128_t
  1809. #define GGML_F16x4_ZERO wasm_f32x4_splat(0.0f)
  1810. #define GGML_F16x4_SET1(x) wasm_f32x4_splat(x)
  1811. #define GGML_F16x4_LOAD(x) __wasm_f16x4_load(x)
  1812. #define GGML_F16x4_STORE(x, y) __wasm_f16x4_store(x, y)
  1813. #define GGML_F16x4_FMA GGML_F32x4_FMA
  1814. #define GGML_F16x4_ADD wasm_f32x4_add
  1815. #define GGML_F16x4_MUL wasm_f32x4_mul
  1816. #define GGML_F16x4_REDUCE(res, x) \
  1817. { \
  1818. int offset = GGML_F16_ARR >> 1; \
  1819. for (int i = 0; i < offset; ++i) { \
  1820. x[i] = wasm_f32x4_add(x[i], x[offset+i]); \
  1821. } \
  1822. offset >>= 1; \
  1823. for (int i = 0; i < offset; ++i) { \
  1824. x[i] = wasm_f32x4_add(x[i], x[offset+i]); \
  1825. } \
  1826. offset >>= 1; \
  1827. for (int i = 0; i < offset; ++i) { \
  1828. x[i] = wasm_f32x4_add(x[i], x[offset+i]); \
  1829. } \
  1830. res = wasm_f32x4_extract_lane(x[0], 0) + \
  1831. wasm_f32x4_extract_lane(x[0], 1) + \
  1832. wasm_f32x4_extract_lane(x[0], 2) + \
  1833. wasm_f32x4_extract_lane(x[0], 3); \
  1834. }
  1835. #define GGML_F16_VEC GGML_F16x4
  1836. #define GGML_F16_VEC_ZERO GGML_F16x4_ZERO
  1837. #define GGML_F16_VEC_SET1 GGML_F16x4_SET1
  1838. #define GGML_F16_VEC_LOAD(p, i) GGML_F16x4_LOAD(p)
  1839. #define GGML_F16_VEC_STORE(p, r, i) GGML_F16x4_STORE(p, r[i])
  1840. #define GGML_F16_VEC_FMA GGML_F16x4_FMA
  1841. #define GGML_F16_VEC_ADD GGML_F16x4_ADD
  1842. #define GGML_F16_VEC_MUL GGML_F16x4_MUL
  1843. #define GGML_F16_VEC_REDUCE GGML_F16x4_REDUCE
  1844. #elif defined(__SSE3__)
  1845. #define GGML_SIMD
  1846. // F32 SSE
  1847. #define GGML_F32_STEP 32
  1848. #define GGML_F32_EPR 4
  1849. #define GGML_F32x4 __m128
  1850. #define GGML_F32x4_ZERO _mm_setzero_ps()
  1851. #define GGML_F32x4_SET1(x) _mm_set1_ps(x)
  1852. #define GGML_F32x4_LOAD _mm_loadu_ps
  1853. #define GGML_F32x4_STORE _mm_storeu_ps
  1854. #if defined(__FMA__)
  1855. // TODO: Does this work?
  1856. #define GGML_F32x4_FMA(a, b, c) _mm_fmadd_ps(b, c, a)
  1857. #else
  1858. #define GGML_F32x4_FMA(a, b, c) _mm_add_ps(_mm_mul_ps(b, c), a)
  1859. #endif
  1860. #define GGML_F32x4_ADD _mm_add_ps
  1861. #define GGML_F32x4_MUL _mm_mul_ps
  1862. #define GGML_F32x4_REDUCE(res, x) \
  1863. { \
  1864. int offset = GGML_F32_ARR >> 1; \
  1865. for (int i = 0; i < offset; ++i) { \
  1866. x[i] = _mm_add_ps(x[i], x[offset+i]); \
  1867. } \
  1868. offset >>= 1; \
  1869. for (int i = 0; i < offset; ++i) { \
  1870. x[i] = _mm_add_ps(x[i], x[offset+i]); \
  1871. } \
  1872. offset >>= 1; \
  1873. for (int i = 0; i < offset; ++i) { \
  1874. x[i] = _mm_add_ps(x[i], x[offset+i]); \
  1875. } \
  1876. const __m128 t0 = _mm_hadd_ps(x[0], x[0]); \
  1877. res = _mm_cvtss_f32(_mm_hadd_ps(t0, t0)); \
  1878. }
  1879. // TODO: is this optimal ?
  1880. #define GGML_F32_VEC GGML_F32x4
  1881. #define GGML_F32_VEC_ZERO GGML_F32x4_ZERO
  1882. #define GGML_F32_VEC_SET1 GGML_F32x4_SET1
  1883. #define GGML_F32_VEC_LOAD GGML_F32x4_LOAD
  1884. #define GGML_F32_VEC_STORE GGML_F32x4_STORE
  1885. #define GGML_F32_VEC_FMA GGML_F32x4_FMA
  1886. #define GGML_F32_VEC_ADD GGML_F32x4_ADD
  1887. #define GGML_F32_VEC_MUL GGML_F32x4_MUL
  1888. #define GGML_F32_VEC_REDUCE GGML_F32x4_REDUCE
  1889. // F16 SSE
  1890. #define GGML_F16_STEP 32
  1891. #define GGML_F16_EPR 4
  1892. static inline __m128 __sse_f16x4_load(ggml_fp16_t *x) {
  1893. float tmp[4];
  1894. tmp[0] = GGML_FP16_TO_FP32(x[0]);
  1895. tmp[1] = GGML_FP16_TO_FP32(x[1]);
  1896. tmp[2] = GGML_FP16_TO_FP32(x[2]);
  1897. tmp[3] = GGML_FP16_TO_FP32(x[3]);
  1898. return _mm_loadu_ps(tmp);
  1899. }
  1900. static inline void __sse_f16x4_store(ggml_fp16_t *x, __m128 y) {
  1901. float arr[4];
  1902. _mm_storeu_ps(arr, y);
  1903. x[0] = GGML_FP32_TO_FP16(arr[0]);
  1904. x[1] = GGML_FP32_TO_FP16(arr[1]);
  1905. x[2] = GGML_FP32_TO_FP16(arr[2]);
  1906. x[3] = GGML_FP32_TO_FP16(arr[3]);
  1907. }
  1908. #define GGML_F32Cx4 __m128
  1909. #define GGML_F32Cx4_ZERO _mm_setzero_ps()
  1910. #define GGML_F32Cx4_SET1(x) _mm_set1_ps(x)
  1911. #define GGML_F32Cx4_LOAD(x) __sse_f16x4_load(x)
  1912. #define GGML_F32Cx4_STORE(x, y) __sse_f16x4_store(x, y)
  1913. #define GGML_F32Cx4_FMA GGML_F32x4_FMA
  1914. #define GGML_F32Cx4_ADD _mm_add_ps
  1915. #define GGML_F32Cx4_MUL _mm_mul_ps
  1916. #define GGML_F32Cx4_REDUCE GGML_F32x4_REDUCE
  1917. #define GGML_F16_VEC GGML_F32Cx4
  1918. #define GGML_F16_VEC_ZERO GGML_F32Cx4_ZERO
  1919. #define GGML_F16_VEC_SET1 GGML_F32Cx4_SET1
  1920. #define GGML_F16_VEC_LOAD(p, i) GGML_F32Cx4_LOAD(p)
  1921. #define GGML_F16_VEC_STORE(p, r, i) GGML_F32Cx4_STORE(p, r[i])
  1922. #define GGML_F16_VEC_FMA GGML_F32Cx4_FMA
  1923. #define GGML_F16_VEC_ADD GGML_F32Cx4_ADD
  1924. #define GGML_F16_VEC_MUL GGML_F32Cx4_MUL
  1925. #define GGML_F16_VEC_REDUCE GGML_F32Cx4_REDUCE
  1926. #endif
  1927. // GGML_F32_ARR / GGML_F16_ARR
  1928. // number of registers to use per step
  1929. #ifdef GGML_SIMD
  1930. #define GGML_F32_ARR (GGML_F32_STEP/GGML_F32_EPR)
  1931. #define GGML_F16_ARR (GGML_F16_STEP/GGML_F16_EPR)
  1932. #endif
  1933. //
  1934. // fundamental operations
  1935. //
  1936. inline static void ggml_vec_set_i8(const int n, int8_t * x, const int8_t v) { for (int i = 0; i < n; ++i) x[i] = v; }
  1937. inline static void ggml_vec_set_i16(const int n, int16_t * x, const int16_t v) { for (int i = 0; i < n; ++i) x[i] = v; }
  1938. inline static void ggml_vec_set_i32(const int n, int32_t * x, const int32_t v) { for (int i = 0; i < n; ++i) x[i] = v; }
  1939. inline static void ggml_vec_set_f16(const int n, ggml_fp16_t * x, const int32_t v) { for (int i = 0; i < n; ++i) x[i] = v; }
  1940. inline static void ggml_vec_add_f32 (const int n, float * z, const float * x, const float * y) { for (int i = 0; i < n; ++i) z[i] = x[i] + y[i]; }
  1941. inline static void ggml_vec_add1_f32(const int n, float * z, const float * x, const float v) { for (int i = 0; i < n; ++i) z[i] = x[i] + v; }
  1942. inline static void ggml_vec_acc_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] += x[i]; }
  1943. inline static void ggml_vec_acc1_f32(const int n, float * y, const float v) { for (int i = 0; i < n; ++i) y[i] += v; }
  1944. inline static void ggml_vec_sub_f32 (const int n, float * z, const float * x, const float * y) { for (int i = 0; i < n; ++i) z[i] = x[i] - y[i]; }
  1945. inline static void ggml_vec_set_f32 (const int n, float * x, const float v) { for (int i = 0; i < n; ++i) x[i] = v; }
  1946. inline static void ggml_vec_cpy_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = x[i]; }
  1947. inline static void ggml_vec_neg_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = -x[i]; }
  1948. inline static void ggml_vec_mul_f32 (const int n, float * z, const float * x, const float * y) { for (int i = 0; i < n; ++i) z[i] = x[i]*y[i]; }
  1949. inline static void ggml_vec_div_f32 (const int n, float * z, const float * x, const float * y) { for (int i = 0; i < n; ++i) z[i] = x[i]/y[i]; }
  1950. static void ggml_vec_dot_f32(const int n, float * restrict s, const float * restrict x, const float * restrict y) {
  1951. #ifdef GGML_SIMD
  1952. float sumf = 0.0f;
  1953. const int np = (n & ~(GGML_F32_STEP - 1));
  1954. GGML_F32_VEC sum[GGML_F32_ARR] = { GGML_F32_VEC_ZERO };
  1955. GGML_F32_VEC ax[GGML_F32_ARR];
  1956. GGML_F32_VEC ay[GGML_F32_ARR];
  1957. for (int i = 0; i < np; i += GGML_F32_STEP) {
  1958. for (int j = 0; j < GGML_F32_ARR; j++) {
  1959. ax[j] = GGML_F32_VEC_LOAD(x + i + j*GGML_F32_EPR);
  1960. ay[j] = GGML_F32_VEC_LOAD(y + i + j*GGML_F32_EPR);
  1961. sum[j] = GGML_F32_VEC_FMA(sum[j], ax[j], ay[j]);
  1962. }
  1963. }
  1964. // reduce sum0..sum3 to sum0
  1965. GGML_F32_VEC_REDUCE(sumf, sum);
  1966. // leftovers
  1967. for (int i = np; i < n; ++i) {
  1968. sumf += x[i]*y[i];
  1969. }
  1970. #else
  1971. // scalar
  1972. ggml_float sumf = 0.0;
  1973. for (int i = 0; i < n; ++i) {
  1974. sumf += (ggml_float)(x[i]*y[i]);
  1975. }
  1976. #endif
  1977. *s = sumf;
  1978. }
  1979. static void ggml_vec_dot_f16(const int n, float * restrict s, ggml_fp16_t * restrict x, ggml_fp16_t * restrict y) {
  1980. ggml_float sumf = 0.0;
  1981. #if defined(GGML_SIMD)
  1982. const int np = (n & ~(GGML_F16_STEP - 1));
  1983. GGML_F16_VEC sum[GGML_F16_ARR] = { GGML_F16_VEC_ZERO };
  1984. GGML_F16_VEC ax[GGML_F16_ARR];
  1985. GGML_F16_VEC ay[GGML_F16_ARR];
  1986. for (int i = 0; i < np; i += GGML_F16_STEP) {
  1987. for (int j = 0; j < GGML_F16_ARR; j++) {
  1988. ax[j] = GGML_F16_VEC_LOAD(x + i + j*GGML_F16_EPR, j);
  1989. ay[j] = GGML_F16_VEC_LOAD(y + i + j*GGML_F16_EPR, j);
  1990. sum[j] = GGML_F16_VEC_FMA(sum[j], ax[j], ay[j]);
  1991. }
  1992. }
  1993. // reduce sum0..sum3 to sum0
  1994. GGML_F16_VEC_REDUCE(sumf, sum);
  1995. // leftovers
  1996. for (int i = np; i < n; ++i) {
  1997. sumf += (ggml_float)(GGML_FP16_TO_FP32(x[i])*GGML_FP16_TO_FP32(y[i]));
  1998. }
  1999. #else
  2000. for (int i = 0; i < n; ++i) {
  2001. sumf += (ggml_float)(GGML_FP16_TO_FP32(x[i])*GGML_FP16_TO_FP32(y[i]));
  2002. }
  2003. #endif
  2004. *s = sumf;
  2005. }
  2006. static void ggml_vec_dot_q4_0_q8_0(const int n, float * restrict s, const void * restrict vx, const void * restrict vy) {
  2007. const int qk = QK8_0;
  2008. const int nb = n / qk;
  2009. assert(n % qk == 0);
  2010. const block_q4_0 * restrict x = vx;
  2011. const block_q8_0 * restrict y = vy;
  2012. #if defined(__ARM_NEON)
  2013. float32x4_t sumv0 = vdupq_n_f32(0.0f);
  2014. float32x4_t sumv1 = vdupq_n_f32(0.0f);
  2015. GGML_ASSERT(nb % 2 == 0); // TODO: handle odd nb
  2016. for (int i = 0; i < nb; i += 2) {
  2017. const block_q4_0 * restrict x0 = &x[i + 0];
  2018. const block_q4_0 * restrict x1 = &x[i + 1];
  2019. const block_q8_0 * restrict y0 = &y[i + 0];
  2020. const block_q8_0 * restrict y1 = &y[i + 1];
  2021. const uint8x16_t m4b = vdupq_n_u8(0x0F);
  2022. const int8x16_t s8b = vdupq_n_s8(0x8);
  2023. const uint8x16_t v0_0 = vld1q_u8(x0->qs);
  2024. const uint8x16_t v0_1 = vld1q_u8(x1->qs);
  2025. // 4-bit -> 8-bit
  2026. const int8x16_t v0_0l = vreinterpretq_s8_u8(vandq_u8 (v0_0, m4b));
  2027. const int8x16_t v0_0h = vreinterpretq_s8_u8(vshrq_n_u8(v0_0, 4));
  2028. const int8x16_t v0_1l = vreinterpretq_s8_u8(vandq_u8 (v0_1, m4b));
  2029. const int8x16_t v0_1h = vreinterpretq_s8_u8(vshrq_n_u8(v0_1, 4));
  2030. // sub 8
  2031. const int8x16_t v0_0ls = vsubq_s8(v0_0l, s8b);
  2032. const int8x16_t v0_0hs = vsubq_s8(v0_0h, s8b);
  2033. const int8x16_t v0_1ls = vsubq_s8(v0_1l, s8b);
  2034. const int8x16_t v0_1hs = vsubq_s8(v0_1h, s8b);
  2035. // load y
  2036. const int8x16_t v1_0l = vld1q_s8(y0->qs);
  2037. const int8x16_t v1_0h = vld1q_s8(y0->qs + 16);
  2038. const int8x16_t v1_1l = vld1q_s8(y1->qs);
  2039. const int8x16_t v1_1h = vld1q_s8(y1->qs + 16);
  2040. #if defined(__ARM_FEATURE_DOTPROD)
  2041. // dot product into int32x4_t
  2042. const int32x4_t p_0 = vdotq_s32(vdotq_s32(vdupq_n_s32(0), v0_0ls, v1_0l), v0_0hs, v1_0h);
  2043. const int32x4_t p_1 = vdotq_s32(vdotq_s32(vdupq_n_s32(0), v0_1ls, v1_1l), v0_1hs, v1_1h);
  2044. sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(p_0), GGML_FP16_TO_FP32(x0->d)*GGML_FP16_TO_FP32(y0->d));
  2045. sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(p_1), GGML_FP16_TO_FP32(x1->d)*GGML_FP16_TO_FP32(y1->d));
  2046. #else
  2047. const int16x8_t pl0l = vmull_s8(vget_low_s8 (v0_0ls), vget_low_s8 (v1_0l));
  2048. const int16x8_t pl0h = vmull_s8(vget_high_s8(v0_0ls), vget_high_s8(v1_0l));
  2049. const int16x8_t ph0l = vmull_s8(vget_low_s8 (v0_0hs), vget_low_s8 (v1_0h));
  2050. const int16x8_t ph0h = vmull_s8(vget_high_s8(v0_0hs), vget_high_s8(v1_0h));
  2051. const int16x8_t pl1l = vmull_s8(vget_low_s8 (v0_1ls), vget_low_s8 (v1_1l));
  2052. const int16x8_t pl1h = vmull_s8(vget_high_s8(v0_1ls), vget_high_s8(v1_1l));
  2053. const int16x8_t ph1l = vmull_s8(vget_low_s8 (v0_1hs), vget_low_s8 (v1_1h));
  2054. const int16x8_t ph1h = vmull_s8(vget_high_s8(v0_1hs), vget_high_s8(v1_1h));
  2055. const int32x4_t pl0 = vaddq_s32(vpaddlq_s16(pl0l), vpaddlq_s16(pl0h));
  2056. const int32x4_t ph0 = vaddq_s32(vpaddlq_s16(ph0l), vpaddlq_s16(ph0h));
  2057. const int32x4_t pl1 = vaddq_s32(vpaddlq_s16(pl1l), vpaddlq_s16(pl1h));
  2058. const int32x4_t ph1 = vaddq_s32(vpaddlq_s16(ph1l), vpaddlq_s16(ph1h));
  2059. sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(vaddq_s32(pl0, ph0)), GGML_FP16_TO_FP32(x0->d)*GGML_FP16_TO_FP32(y0->d));
  2060. sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(vaddq_s32(pl1, ph1)), GGML_FP16_TO_FP32(x1->d)*GGML_FP16_TO_FP32(y1->d));
  2061. #endif
  2062. }
  2063. *s = vaddvq_f32(sumv0) + vaddvq_f32(sumv1);
  2064. #elif defined(__AVX2__)
  2065. // Initialize accumulator with zeros
  2066. __m256 acc = _mm256_setzero_ps();
  2067. // Main loop
  2068. for (int i = 0; i < nb; ++i) {
  2069. /* Compute combined scale for the block */
  2070. const __m256 d = _mm256_set1_ps( GGML_FP16_TO_FP32(x[i].d) * GGML_FP16_TO_FP32(y[i].d) );
  2071. __m256i bx = bytes_from_nibbles_32(x[i].qs);
  2072. // Now we have a vector with bytes in [ 0 .. 15 ] interval. Offset them into [ -8 .. +7 ] interval.
  2073. const __m256i off = _mm256_set1_epi8( 8 );
  2074. bx = _mm256_sub_epi8( bx, off );
  2075. __m256i by = _mm256_loadu_si256((const __m256i *)y[i].qs);
  2076. const __m256 q = mul_sum_i8_pairs_float(bx, by);
  2077. /* Multiply q with scale and accumulate */
  2078. acc = _mm256_fmadd_ps( d, q, acc );
  2079. }
  2080. *s = hsum_float_8(acc);
  2081. #elif defined(__AVX__)
  2082. // Initialize accumulator with zeros
  2083. __m256 acc = _mm256_setzero_ps();
  2084. // Main loop
  2085. for (int i = 0; i < nb; ++i) {
  2086. // Compute combined scale for the block
  2087. const __m256 d = _mm256_set1_ps( GGML_FP16_TO_FP32(x[i].d) * GGML_FP16_TO_FP32(y[i].d) );
  2088. const __m128i lowMask = _mm_set1_epi8(0xF);
  2089. const __m128i off = _mm_set1_epi8(8);
  2090. const __m128i tmp = _mm_loadu_si128((const __m128i *)x[i].qs);
  2091. __m128i bx = _mm_and_si128(lowMask, tmp);
  2092. __m128i by = _mm_loadu_si128((const __m128i *)y[i].qs);
  2093. bx = _mm_sub_epi8(bx, off);
  2094. const __m128i i32_0 = mul_sum_i8_pairs(bx, by);
  2095. bx = _mm_and_si128(lowMask, _mm_srli_epi64(tmp, 4));
  2096. by = _mm_loadu_si128((const __m128i *)(y[i].qs + 16));
  2097. bx = _mm_sub_epi8(bx, off);
  2098. const __m128i i32_1 = mul_sum_i8_pairs(bx, by);
  2099. // Convert int32_t to float
  2100. __m256 p = _mm256_cvtepi32_ps(MM256_SET_M128I(i32_0, i32_1));
  2101. // Apply the scale, and accumulate
  2102. acc = _mm256_add_ps(_mm256_mul_ps( d, p ), acc);
  2103. }
  2104. *s = hsum_float_8(acc);
  2105. #elif defined(__SSSE3__)
  2106. // set constants
  2107. const __m128i lowMask = _mm_set1_epi8(0xF);
  2108. const __m128i off = _mm_set1_epi8(8);
  2109. // Initialize accumulator with zeros
  2110. __m128 acc_0 = _mm_setzero_ps();
  2111. __m128 acc_1 = _mm_setzero_ps();
  2112. __m128 acc_2 = _mm_setzero_ps();
  2113. __m128 acc_3 = _mm_setzero_ps();
  2114. // First round without accumulation
  2115. {
  2116. _mm_prefetch(&x[0] + sizeof(block_q4_0), _MM_HINT_T0);
  2117. _mm_prefetch(&y[0] + sizeof(block_q8_0), _MM_HINT_T0);
  2118. // Compute combined scale for the block 0 and 1
  2119. const __m128 d_0_1 = _mm_set1_ps( GGML_FP16_TO_FP32(x[0].d) * GGML_FP16_TO_FP32(y[0].d) );
  2120. const __m128i tmp_0_1 = _mm_loadu_si128((const __m128i *)x[0].qs);
  2121. __m128i bx_0 = _mm_and_si128(lowMask, tmp_0_1);
  2122. __m128i by_0 = _mm_loadu_si128((const __m128i *)y[0].qs);
  2123. bx_0 = _mm_sub_epi8(bx_0, off);
  2124. const __m128i i32_0 = mul_sum_i8_pairs(bx_0, by_0);
  2125. __m128i bx_1 = _mm_and_si128(lowMask, _mm_srli_epi64(tmp_0_1, 4));
  2126. __m128i by_1 = _mm_loadu_si128((const __m128i *)(y[0].qs + 16));
  2127. bx_1 = _mm_sub_epi8(bx_1, off);
  2128. const __m128i i32_1 = mul_sum_i8_pairs(bx_1, by_1);
  2129. _mm_prefetch(&x[1] + sizeof(block_q4_0), _MM_HINT_T0);
  2130. _mm_prefetch(&y[1] + sizeof(block_q8_0), _MM_HINT_T0);
  2131. // Compute combined scale for the block 2 and 3
  2132. const __m128 d_2_3 = _mm_set1_ps( GGML_FP16_TO_FP32(x[1].d) * GGML_FP16_TO_FP32(y[1].d) );
  2133. const __m128i tmp_2_3 = _mm_loadu_si128((const __m128i *)x[1].qs);
  2134. __m128i bx_2 = _mm_and_si128(lowMask, tmp_2_3);
  2135. __m128i by_2 = _mm_loadu_si128((const __m128i *)y[1].qs);
  2136. bx_2 = _mm_sub_epi8(bx_2, off);
  2137. const __m128i i32_2 = mul_sum_i8_pairs(bx_2, by_2);
  2138. __m128i bx_3 = _mm_and_si128(lowMask, _mm_srli_epi64(tmp_2_3, 4));
  2139. __m128i by_3 = _mm_loadu_si128((const __m128i *)(y[1].qs + 16));
  2140. bx_3 = _mm_sub_epi8(bx_3, off);
  2141. const __m128i i32_3 = mul_sum_i8_pairs(bx_3, by_3);
  2142. // Convert int32_t to float
  2143. __m128 p0 = _mm_cvtepi32_ps(i32_0);
  2144. __m128 p1 = _mm_cvtepi32_ps(i32_1);
  2145. __m128 p2 = _mm_cvtepi32_ps(i32_2);
  2146. __m128 p3 = _mm_cvtepi32_ps(i32_3);
  2147. // Apply the scale
  2148. acc_0 = _mm_mul_ps( d_0_1, p0 );
  2149. acc_1 = _mm_mul_ps( d_0_1, p1 );
  2150. acc_2 = _mm_mul_ps( d_2_3, p2 );
  2151. acc_3 = _mm_mul_ps( d_2_3, p3 );
  2152. }
  2153. // Main loop
  2154. GGML_ASSERT(nb % 2 == 0); // TODO: handle odd nb
  2155. for (int i = 2; i < nb; i+=2) {
  2156. _mm_prefetch(&x[i] + sizeof(block_q4_0), _MM_HINT_T0);
  2157. _mm_prefetch(&y[i] + sizeof(block_q8_0), _MM_HINT_T0);
  2158. // Compute combined scale for the block 0 and 1
  2159. const __m128 d_0_1 = _mm_set1_ps( GGML_FP16_TO_FP32(x[i].d) * GGML_FP16_TO_FP32(y[i].d) );
  2160. const __m128i tmp_0_1 = _mm_loadu_si128((const __m128i *)x[i].qs);
  2161. __m128i bx_0 = _mm_and_si128(lowMask, tmp_0_1);
  2162. __m128i by_0 = _mm_loadu_si128((const __m128i *)y[i].qs);
  2163. bx_0 = _mm_sub_epi8(bx_0, off);
  2164. const __m128i i32_0 = mul_sum_i8_pairs(bx_0, by_0);
  2165. __m128i bx_1 = _mm_and_si128(lowMask, _mm_srli_epi64(tmp_0_1, 4));
  2166. __m128i by_1 = _mm_loadu_si128((const __m128i *)(y[i].qs + 16));
  2167. bx_1 = _mm_sub_epi8(bx_1, off);
  2168. const __m128i i32_1 = mul_sum_i8_pairs(bx_1, by_1);
  2169. _mm_prefetch(&x[i] + 2 * sizeof(block_q4_0), _MM_HINT_T0);
  2170. _mm_prefetch(&y[i] + 2 * sizeof(block_q8_0), _MM_HINT_T0);
  2171. // Compute combined scale for the block 2 and 3
  2172. const __m128 d_2_3 = _mm_set1_ps( GGML_FP16_TO_FP32(x[i + 1].d) * GGML_FP16_TO_FP32(y[i + 1].d) );
  2173. const __m128i tmp_2_3 = _mm_loadu_si128((const __m128i *)x[i + 1].qs);
  2174. __m128i bx_2 = _mm_and_si128(lowMask, tmp_2_3);
  2175. __m128i by_2 = _mm_loadu_si128((const __m128i *)y[i + 1].qs);
  2176. bx_2 = _mm_sub_epi8(bx_2, off);
  2177. const __m128i i32_2 = mul_sum_i8_pairs(bx_2, by_2);
  2178. __m128i bx_3 = _mm_and_si128(lowMask, _mm_srli_epi64(tmp_2_3, 4));
  2179. __m128i by_3 = _mm_loadu_si128((const __m128i *)(y[i + 1].qs + 16));
  2180. bx_3 = _mm_sub_epi8(bx_3, off);
  2181. const __m128i i32_3 = mul_sum_i8_pairs(bx_3, by_3);
  2182. // Convert int32_t to float
  2183. __m128 p0 = _mm_cvtepi32_ps(i32_0);
  2184. __m128 p1 = _mm_cvtepi32_ps(i32_1);
  2185. __m128 p2 = _mm_cvtepi32_ps(i32_2);
  2186. __m128 p3 = _mm_cvtepi32_ps(i32_3);
  2187. // Apply the scale
  2188. __m128 p0_d = _mm_mul_ps( d_0_1, p0 );
  2189. __m128 p1_d = _mm_mul_ps( d_0_1, p1 );
  2190. __m128 p2_d = _mm_mul_ps( d_2_3, p2 );
  2191. __m128 p3_d = _mm_mul_ps( d_2_3, p3 );
  2192. // Acummulate
  2193. acc_0 = _mm_add_ps(p0_d, acc_0);
  2194. acc_1 = _mm_add_ps(p1_d, acc_1);
  2195. acc_2 = _mm_add_ps(p2_d, acc_2);
  2196. acc_3 = _mm_add_ps(p3_d, acc_3);
  2197. }
  2198. *s = hsum_float_4x4(acc_0, acc_1, acc_2, acc_3);
  2199. #elif defined(__riscv_v_intrinsic)
  2200. float sumf = 0.0;
  2201. size_t vl = __riscv_vsetvl_e8m1(qk/2);
  2202. for (int i = 0; i < nb; i++) {
  2203. vuint8m1_t tx = __riscv_vle8_v_u8m1(x[i].qs, vl);
  2204. vint8m1_t y0 = __riscv_vle8_v_i8m1(y[i].qs, vl);
  2205. vint8m1_t y1 = __riscv_vle8_v_i8m1(y[i].qs+16, vl);
  2206. vuint8m1_t x_a = __riscv_vand_vx_u8m1(tx, 0x0F, vl);
  2207. vuint8m1_t x_l = __riscv_vsrl_vx_u8m1(tx, 0x04, vl);
  2208. vint8m1_t x_ai = __riscv_vreinterpret_v_u8m1_i8m1(x_a);
  2209. vint8m1_t x_li = __riscv_vreinterpret_v_u8m1_i8m1(x_l);
  2210. vint8m1_t v0 = __riscv_vsub_vx_i8m1(x_ai, 8, vl);
  2211. vint8m1_t v1 = __riscv_vsub_vx_i8m1(x_li, 8, vl);
  2212. vint16m2_t vec_mul1 = __riscv_vwmul_vv_i16m2(v0, y0, vl);
  2213. vint16m2_t vec_mul2 = __riscv_vwmul_vv_i16m2(v1, y1, vl);
  2214. vint32m1_t vec_zero = __riscv_vmv_v_x_i32m1(0, vl);
  2215. vint32m1_t vs1 = __riscv_vwredsum_vs_i16m2_i32m1(vec_mul1, vec_zero, vl);
  2216. vint32m1_t vs2 = __riscv_vwredsum_vs_i16m2_i32m1(vec_mul2, vec_zero, vl);
  2217. int sumi = __riscv_vmv_x_s_i32m1_i32(vs1);
  2218. sumi += __riscv_vmv_x_s_i32m1_i32(vs2);
  2219. sumf += sumi*GGML_FP16_TO_FP32(x[i].d)*GGML_FP16_TO_FP32(y[i].d);
  2220. }
  2221. *s = sumf;
  2222. #else
  2223. // scalar
  2224. float sumf = 0.0;
  2225. for (int i = 0; i < nb; i++) {
  2226. int sumi = 0;
  2227. for (int j = 0; j < qk/2; ++j) {
  2228. const int v0 = (x[i].qs[j] & 0x0F) - 8;
  2229. const int v1 = (x[i].qs[j] >> 4) - 8;
  2230. sumi += (v0 * y[i].qs[j]) + (v1 * y[i].qs[j + qk/2]);
  2231. }
  2232. sumf += sumi*GGML_FP16_TO_FP32(x[i].d)*GGML_FP16_TO_FP32(y[i].d);
  2233. }
  2234. *s = sumf;
  2235. #endif
  2236. }
  2237. static void ggml_vec_dot_q4_1_q8_1(const int n, float * restrict s, const void * restrict vx, const void * restrict vy) {
  2238. const int qk = QK8_1;
  2239. const int nb = n / qk;
  2240. assert(n % qk == 0);
  2241. const block_q4_1 * restrict x = vx;
  2242. const block_q8_1 * restrict y = vy;
  2243. // TODO: add WASM SIMD
  2244. #if defined(__ARM_NEON)
  2245. float32x4_t sumv0 = vdupq_n_f32(0.0f);
  2246. float32x4_t sumv1 = vdupq_n_f32(0.0f);
  2247. float summs = 0;
  2248. GGML_ASSERT(nb % 2 == 0); // TODO: handle odd nb
  2249. for (int i = 0; i < nb; i += 2) {
  2250. const block_q4_1 * restrict x0 = &x[i + 0];
  2251. const block_q4_1 * restrict x1 = &x[i + 1];
  2252. const block_q8_1 * restrict y0 = &y[i + 0];
  2253. const block_q8_1 * restrict y1 = &y[i + 1];
  2254. summs += GGML_FP16_TO_FP32(x0->m) * y0->s + GGML_FP16_TO_FP32(x1->m) * y1->s;
  2255. const uint8x16_t m4b = vdupq_n_u8(0x0F);
  2256. const uint8x16_t v0_0 = vld1q_u8(x0->qs);
  2257. const uint8x16_t v0_1 = vld1q_u8(x1->qs);
  2258. // 4-bit -> 8-bit
  2259. const int8x16_t v0_0l = vreinterpretq_s8_u8(vandq_u8 (v0_0, m4b));
  2260. const int8x16_t v0_0h = vreinterpretq_s8_u8(vshrq_n_u8(v0_0, 4));
  2261. const int8x16_t v0_1l = vreinterpretq_s8_u8(vandq_u8 (v0_1, m4b));
  2262. const int8x16_t v0_1h = vreinterpretq_s8_u8(vshrq_n_u8(v0_1, 4));
  2263. // load y
  2264. const int8x16_t v1_0l = vld1q_s8(y0->qs);
  2265. const int8x16_t v1_0h = vld1q_s8(y0->qs + 16);
  2266. const int8x16_t v1_1l = vld1q_s8(y1->qs);
  2267. const int8x16_t v1_1h = vld1q_s8(y1->qs + 16);
  2268. #if defined(__ARM_FEATURE_DOTPROD)
  2269. // dot product into int32x4_t
  2270. const int32x4_t p_0 = vdotq_s32(vdotq_s32(vdupq_n_s32(0), v0_0l, v1_0l), v0_0h, v1_0h);
  2271. const int32x4_t p_1 = vdotq_s32(vdotq_s32(vdupq_n_s32(0), v0_1l, v1_1l), v0_1h, v1_1h);
  2272. sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(p_0), GGML_FP16_TO_FP32(x0->d)*y0->d);
  2273. sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(p_1), GGML_FP16_TO_FP32(x1->d)*y1->d);
  2274. #else
  2275. const int16x8_t pl0l = vmull_s8(vget_low_s8 (v0_0l), vget_low_s8 (v1_0l));
  2276. const int16x8_t pl0h = vmull_s8(vget_high_s8(v0_0l), vget_high_s8(v1_0l));
  2277. const int16x8_t ph0l = vmull_s8(vget_low_s8 (v0_0h), vget_low_s8 (v1_0h));
  2278. const int16x8_t ph0h = vmull_s8(vget_high_s8(v0_0h), vget_high_s8(v1_0h));
  2279. const int16x8_t pl1l = vmull_s8(vget_low_s8 (v0_1l), vget_low_s8 (v1_1l));
  2280. const int16x8_t pl1h = vmull_s8(vget_high_s8(v0_1l), vget_high_s8(v1_1l));
  2281. const int16x8_t ph1l = vmull_s8(vget_low_s8 (v0_1h), vget_low_s8 (v1_1h));
  2282. const int16x8_t ph1h = vmull_s8(vget_high_s8(v0_1h), vget_high_s8(v1_1h));
  2283. const int32x4_t pl0 = vaddq_s32(vpaddlq_s16(pl0l), vpaddlq_s16(pl0h));
  2284. const int32x4_t ph0 = vaddq_s32(vpaddlq_s16(ph0l), vpaddlq_s16(ph0h));
  2285. const int32x4_t pl1 = vaddq_s32(vpaddlq_s16(pl1l), vpaddlq_s16(pl1h));
  2286. const int32x4_t ph1 = vaddq_s32(vpaddlq_s16(ph1l), vpaddlq_s16(ph1h));
  2287. sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(vaddq_s32(pl0, ph0)), GGML_FP16_TO_FP32(x0->d)*y0->d);
  2288. sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(vaddq_s32(pl1, ph1)), GGML_FP16_TO_FP32(x1->d)*y1->d);
  2289. #endif
  2290. }
  2291. *s = vaddvq_f32(sumv0) + vaddvq_f32(sumv1) + summs;
  2292. #elif defined(__AVX2__) || defined(__AVX__)
  2293. // Initialize accumulator with zeros
  2294. __m256 acc = _mm256_setzero_ps();
  2295. float summs = 0;
  2296. // Main loop
  2297. for (int i = 0; i < nb; ++i) {
  2298. const float d0 = GGML_FP16_TO_FP32(x[i].d);
  2299. const float d1 = y[i].d;
  2300. summs += GGML_FP16_TO_FP32(x[i].m) * y[i].s;
  2301. const __m256 d0v = _mm256_set1_ps( d0 );
  2302. const __m256 d1v = _mm256_set1_ps( d1 );
  2303. // Compute combined scales
  2304. const __m256 d0d1 = _mm256_mul_ps( d0v, d1v );
  2305. // Load 16 bytes, and unpack 4 bit fields into bytes, making 32 bytes
  2306. const __m256i bx = bytes_from_nibbles_32(x[i].qs);
  2307. const __m256i by = _mm256_loadu_si256( (const __m256i *)y[i].qs );
  2308. const __m256 xy = mul_sum_us8_pairs_float(bx, by);
  2309. // Accumulate d0*d1*x*y
  2310. #if defined(__AVX2__)
  2311. acc = _mm256_fmadd_ps( d0d1, xy, acc );
  2312. #else
  2313. acc = _mm256_add_ps( _mm256_mul_ps( d0d1, xy ), acc );
  2314. #endif
  2315. }
  2316. *s = hsum_float_8(acc) + summs;
  2317. #elif defined(__riscv_v_intrinsic)
  2318. float sumf = 0.0;
  2319. size_t vl = __riscv_vsetvl_e8m1(qk/2);
  2320. for (int i = 0; i < nb; i++) {
  2321. vuint8m1_t tx = __riscv_vle8_v_u8m1(x[i].qs, vl);
  2322. vint8m1_t y0 = __riscv_vle8_v_i8m1(y[i].qs, vl);
  2323. vint8m1_t y1 = __riscv_vle8_v_i8m1(y[i].qs+16, vl);
  2324. vuint8m1_t x_a = __riscv_vand_vx_u8m1(tx, 0x0F, vl);
  2325. vuint8m1_t x_l = __riscv_vsrl_vx_u8m1(tx, 0x04, vl);
  2326. vint8m1_t v0 = __riscv_vreinterpret_v_u8m1_i8m1(x_a);
  2327. vint8m1_t v1 = __riscv_vreinterpret_v_u8m1_i8m1(x_l);
  2328. vint16m2_t vec_mul1 = __riscv_vwmul_vv_i16m2(v0, y0, vl);
  2329. vint16m2_t vec_mul2 = __riscv_vwmul_vv_i16m2(v1, y1, vl);
  2330. vint32m1_t vec_zero = __riscv_vmv_v_x_i32m1(0, vl);
  2331. vint32m1_t vs1 = __riscv_vwredsum_vs_i16m2_i32m1(vec_mul1, vec_zero, vl);
  2332. vint32m1_t vs2 = __riscv_vwredsum_vs_i16m2_i32m1(vec_mul2, vec_zero, vl);
  2333. int sumi = __riscv_vmv_x_s_i32m1_i32(vs1);
  2334. sumi += __riscv_vmv_x_s_i32m1_i32(vs2);
  2335. sumf += (GGML_FP16_TO_FP32(x[i].d)*y[i].d)*sumi + GGML_FP16_TO_FP32(x[i].m)*y[i].s;
  2336. }
  2337. *s = sumf;
  2338. #else
  2339. // scalar
  2340. float sumf = 0.0;
  2341. for (int i = 0; i < nb; i++) {
  2342. int sumi = 0;
  2343. for (int j = 0; j < qk/2; ++j) {
  2344. const int v0 = (x[i].qs[j] & 0x0F);
  2345. const int v1 = (x[i].qs[j] >> 4);
  2346. sumi += (v0 * y[i].qs[j]) + (v1 * y[i].qs[j + qk/2]);
  2347. }
  2348. sumf += (GGML_FP16_TO_FP32(x[i].d)*y[i].d)*sumi + GGML_FP16_TO_FP32(x[i].m)*y[i].s;
  2349. }
  2350. *s = sumf;
  2351. #endif
  2352. }
  2353. static void ggml_vec_dot_q5_0_q8_0(const int n, float * restrict s, const void * restrict vx, const void * restrict vy) {
  2354. const int qk = QK8_0;
  2355. const int nb = n / qk;
  2356. assert(n % qk == 0);
  2357. assert(qk == QK5_0);
  2358. const block_q5_0 * restrict x = vx;
  2359. const block_q8_0 * restrict y = vy;
  2360. #if defined(__ARM_NEON)
  2361. float32x4_t sumv0 = vdupq_n_f32(0.0f);
  2362. float32x4_t sumv1 = vdupq_n_f32(0.0f);
  2363. uint32_t qh0;
  2364. uint32_t qh1;
  2365. uint64_t tmp0[4];
  2366. uint64_t tmp1[4];
  2367. GGML_ASSERT(nb % 2 == 0); // TODO: handle odd nb
  2368. for (int i = 0; i < nb; i += 2) {
  2369. const block_q5_0 * restrict x0 = &x[i];
  2370. const block_q5_0 * restrict x1 = &x[i + 1];
  2371. const block_q8_0 * restrict y0 = &y[i];
  2372. const block_q8_0 * restrict y1 = &y[i + 1];
  2373. const uint8x16_t m4b = vdupq_n_u8(0x0F);
  2374. // extract the 5th bit via lookup table ((!b) << 4)
  2375. memcpy(&qh0, x0->qh, sizeof(qh0));
  2376. memcpy(&qh1, x1->qh, sizeof(qh1));
  2377. tmp0[0] = table_b2b_1[(qh0 >> 0) & 0xFF];
  2378. tmp0[1] = table_b2b_1[(qh0 >> 8) & 0xFF];
  2379. tmp0[2] = table_b2b_1[(qh0 >> 16) & 0xFF];
  2380. tmp0[3] = table_b2b_1[(qh0 >> 24) ];
  2381. tmp1[0] = table_b2b_1[(qh1 >> 0) & 0xFF];
  2382. tmp1[1] = table_b2b_1[(qh1 >> 8) & 0xFF];
  2383. tmp1[2] = table_b2b_1[(qh1 >> 16) & 0xFF];
  2384. tmp1[3] = table_b2b_1[(qh1 >> 24) ];
  2385. const int8x16_t qhl0 = vld1q_s8((const int8_t *)(tmp0 + 0));
  2386. const int8x16_t qhh0 = vld1q_s8((const int8_t *)(tmp0 + 2));
  2387. const int8x16_t qhl1 = vld1q_s8((const int8_t *)(tmp1 + 0));
  2388. const int8x16_t qhh1 = vld1q_s8((const int8_t *)(tmp1 + 2));
  2389. const uint8x16_t v0_0 = vld1q_u8(x0->qs);
  2390. const uint8x16_t v0_1 = vld1q_u8(x1->qs);
  2391. // 4-bit -> 8-bit
  2392. int8x16_t v0_0l = vreinterpretq_s8_u8(vandq_u8 (v0_0, m4b));
  2393. int8x16_t v0_0h = vreinterpretq_s8_u8(vshrq_n_u8(v0_0, 4));
  2394. int8x16_t v0_1l = vreinterpretq_s8_u8(vandq_u8 (v0_1, m4b));
  2395. int8x16_t v0_1h = vreinterpretq_s8_u8(vshrq_n_u8(v0_1, 4));
  2396. // add high bit and sub 16 (equivalent to sub 0x10 when bit is zero)
  2397. const int8x16_t v0_0lf = vsubq_s8(v0_0l, qhl0);
  2398. const int8x16_t v0_0hf = vsubq_s8(v0_0h, qhh0);
  2399. const int8x16_t v0_1lf = vsubq_s8(v0_1l, qhl1);
  2400. const int8x16_t v0_1hf = vsubq_s8(v0_1h, qhh1);
  2401. // load y
  2402. const int8x16_t v1_0l = vld1q_s8(y0->qs);
  2403. const int8x16_t v1_0h = vld1q_s8(y0->qs + 16);
  2404. const int8x16_t v1_1l = vld1q_s8(y1->qs);
  2405. const int8x16_t v1_1h = vld1q_s8(y1->qs + 16);
  2406. #if defined(__ARM_FEATURE_DOTPROD)
  2407. sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(vaddq_s32(
  2408. vdotq_s32(vdupq_n_s32(0), v0_0lf, v1_0l),
  2409. vdotq_s32(vdupq_n_s32(0), v0_0hf, v1_0h))), GGML_FP16_TO_FP32(x0->d)*GGML_FP16_TO_FP32(y0->d));
  2410. sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(vaddq_s32(
  2411. vdotq_s32(vdupq_n_s32(0), v0_1lf, v1_1l),
  2412. vdotq_s32(vdupq_n_s32(0), v0_1hf, v1_1h))), GGML_FP16_TO_FP32(x1->d)*GGML_FP16_TO_FP32(y1->d));
  2413. #else
  2414. const int16x8_t pl0l = vmull_s8(vget_low_s8 (v0_0lf), vget_low_s8 (v1_0l));
  2415. const int16x8_t pl0h = vmull_s8(vget_high_s8(v0_0lf), vget_high_s8(v1_0l));
  2416. const int16x8_t ph0l = vmull_s8(vget_low_s8 (v0_0hf), vget_low_s8 (v1_0h));
  2417. const int16x8_t ph0h = vmull_s8(vget_high_s8(v0_0hf), vget_high_s8(v1_0h));
  2418. const int16x8_t pl1l = vmull_s8(vget_low_s8 (v0_1lf), vget_low_s8 (v1_1l));
  2419. const int16x8_t pl1h = vmull_s8(vget_high_s8(v0_1lf), vget_high_s8(v1_1l));
  2420. const int16x8_t ph1l = vmull_s8(vget_low_s8 (v0_1hf), vget_low_s8 (v1_1h));
  2421. const int16x8_t ph1h = vmull_s8(vget_high_s8(v0_1hf), vget_high_s8(v1_1h));
  2422. const int32x4_t pl0 = vaddq_s32(vpaddlq_s16(pl0l), vpaddlq_s16(pl0h));
  2423. const int32x4_t ph0 = vaddq_s32(vpaddlq_s16(ph0l), vpaddlq_s16(ph0h));
  2424. const int32x4_t pl1 = vaddq_s32(vpaddlq_s16(pl1l), vpaddlq_s16(pl1h));
  2425. const int32x4_t ph1 = vaddq_s32(vpaddlq_s16(ph1l), vpaddlq_s16(ph1h));
  2426. sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(vaddq_s32(pl0, ph0)), GGML_FP16_TO_FP32(x0->d)*GGML_FP16_TO_FP32(y0->d));
  2427. sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(vaddq_s32(pl1, ph1)), GGML_FP16_TO_FP32(x1->d)*GGML_FP16_TO_FP32(y1->d));
  2428. #endif
  2429. }
  2430. *s = vaddvq_f32(sumv0) + vaddvq_f32(sumv1);
  2431. #elif defined(__wasm_simd128__)
  2432. v128_t sumv = wasm_f32x4_splat(0.0f);
  2433. uint32_t qh;
  2434. uint64_t tmp[4];
  2435. // TODO: check if unrolling this is better
  2436. for (int i = 0; i < nb; ++i) {
  2437. const block_q5_0 * restrict x0 = &x[i];
  2438. const block_q8_0 * restrict y0 = &y[i];
  2439. const v128_t m4b = wasm_i8x16_splat(0x0F);
  2440. // extract the 5th bit
  2441. memcpy(&qh, x0->qh, sizeof(qh));
  2442. tmp[0] = table_b2b_1[(qh >> 0) & 0xFF];
  2443. tmp[1] = table_b2b_1[(qh >> 8) & 0xFF];
  2444. tmp[2] = table_b2b_1[(qh >> 16) & 0xFF];
  2445. tmp[3] = table_b2b_1[(qh >> 24) ];
  2446. const v128_t qhl = wasm_v128_load(tmp + 0);
  2447. const v128_t qhh = wasm_v128_load(tmp + 2);
  2448. const v128_t v0 = wasm_v128_load(x0->qs);
  2449. // 4-bit -> 8-bit
  2450. const v128_t v0l = wasm_v128_and (v0, m4b);
  2451. const v128_t v0h = wasm_u8x16_shr(v0, 4);
  2452. // add high bit and sub 16 (equivalent to sub 0x10 when bit is zero)
  2453. const v128_t v0lf = wasm_i8x16_sub(v0l, qhl);
  2454. const v128_t v0hf = wasm_i8x16_sub(v0h, qhh);
  2455. // load y
  2456. const v128_t v1l = wasm_v128_load(y0->qs);
  2457. const v128_t v1h = wasm_v128_load(y0->qs + 16);
  2458. // int8x16 -> int16x8
  2459. const v128_t v0lfl = wasm_i16x8_extend_low_i8x16 (v0lf);
  2460. const v128_t v0lfh = wasm_i16x8_extend_high_i8x16(v0lf);
  2461. const v128_t v0hfl = wasm_i16x8_extend_low_i8x16 (v0hf);
  2462. const v128_t v0hfh = wasm_i16x8_extend_high_i8x16(v0hf);
  2463. const v128_t v1ll = wasm_i16x8_extend_low_i8x16 (v1l);
  2464. const v128_t v1lh = wasm_i16x8_extend_high_i8x16(v1l);
  2465. const v128_t v1hl = wasm_i16x8_extend_low_i8x16 (v1h);
  2466. const v128_t v1hh = wasm_i16x8_extend_high_i8x16(v1h);
  2467. // dot product
  2468. sumv = wasm_f32x4_add(sumv, wasm_f32x4_mul(wasm_f32x4_convert_i32x4(
  2469. wasm_i32x4_add(
  2470. wasm_i32x4_add(wasm_i32x4_dot_i16x8(v0lfl, v1ll),
  2471. wasm_i32x4_dot_i16x8(v0lfh, v1lh)),
  2472. wasm_i32x4_add(wasm_i32x4_dot_i16x8(v0hfl, v1hl),
  2473. wasm_i32x4_dot_i16x8(v0hfh, v1hh)))),
  2474. wasm_f32x4_splat(GGML_FP16_TO_FP32(x0->d) * GGML_FP16_TO_FP32(y0->d))));
  2475. }
  2476. *s = wasm_f32x4_extract_lane(sumv, 0) + wasm_f32x4_extract_lane(sumv, 1) +
  2477. wasm_f32x4_extract_lane(sumv, 2) + wasm_f32x4_extract_lane(sumv, 3);
  2478. #elif defined(__AVX2__)
  2479. // Initialize accumulator with zeros
  2480. __m256 acc = _mm256_setzero_ps();
  2481. // Main loop
  2482. for (int i = 0; i < nb; i++) {
  2483. /* Compute combined scale for the block */
  2484. const __m256 d = _mm256_set1_ps(GGML_FP16_TO_FP32(x[i].d) * GGML_FP16_TO_FP32(y[i].d));
  2485. __m256i bx = bytes_from_nibbles_32(x[i].qs);
  2486. __m256i bxhi = bytes_from_bits_32(x[i].qh);
  2487. bxhi = _mm256_andnot_si256(bxhi, _mm256_set1_epi8((char)0xF0));
  2488. bx = _mm256_or_si256(bx, bxhi);
  2489. __m256i by = _mm256_loadu_si256((const __m256i *)y[i].qs);
  2490. const __m256 q = mul_sum_i8_pairs_float(bx, by);
  2491. /* Multiply q with scale and accumulate */
  2492. acc = _mm256_fmadd_ps(d, q, acc);
  2493. }
  2494. *s = hsum_float_8(acc);
  2495. #elif defined(__AVX__)
  2496. // Initialize accumulator with zeros
  2497. __m256 acc = _mm256_setzero_ps();
  2498. __m128i mask = _mm_set1_epi8((char)0xF0);
  2499. // Main loop
  2500. for (int i = 0; i < nb; i++) {
  2501. /* Compute combined scale for the block */
  2502. const __m256 d = _mm256_set1_ps(GGML_FP16_TO_FP32(x[i].d) * GGML_FP16_TO_FP32(y[i].d));
  2503. __m256i bx = bytes_from_nibbles_32(x[i].qs);
  2504. const __m256i bxhi = bytes_from_bits_32(x[i].qh);
  2505. __m128i bxhil = _mm256_castsi256_si128(bxhi);
  2506. __m128i bxhih = _mm256_extractf128_si256(bxhi, 1);
  2507. bxhil = _mm_andnot_si128(bxhil, mask);
  2508. bxhih = _mm_andnot_si128(bxhih, mask);
  2509. __m128i bxl = _mm256_castsi256_si128(bx);
  2510. __m128i bxh = _mm256_extractf128_si256(bx, 1);
  2511. bxl = _mm_or_si128(bxl, bxhil);
  2512. bxh = _mm_or_si128(bxh, bxhih);
  2513. bx = MM256_SET_M128I(bxh, bxl);
  2514. const __m256i by = _mm256_loadu_si256((const __m256i *)y[i].qs);
  2515. const __m256 q = mul_sum_i8_pairs_float(bx, by);
  2516. /* Multiply q with scale and accumulate */
  2517. acc = _mm256_add_ps(_mm256_mul_ps(d, q), acc);
  2518. }
  2519. *s = hsum_float_8(acc);
  2520. #elif defined(__riscv_v_intrinsic)
  2521. float sumf = 0.0;
  2522. uint32_t qh;
  2523. // These temp values are for masking and shift operations
  2524. uint32_t temp_1[16] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15};
  2525. uint32_t temp_2[16] = {0x1, 0x2, 0x4, 0x8, 0x10, 0x20, 0x40, 0x80,
  2526. 0x100, 0x200, 0x400, 0x800, 0x1000, 0x2000, 0x4000, 0x8000};
  2527. size_t vl = __riscv_vsetvl_e8m1(qk/2);
  2528. for (int i = 0; i < nb; i++) {
  2529. memcpy(&qh, x[i].qh, sizeof(uint32_t));
  2530. // temporary registers
  2531. vuint32m4_t vt_1 = __riscv_vle32_v_u32m4(temp_2, vl);
  2532. vuint32m4_t vt_2 = __riscv_vle32_v_u32m4(temp_1, vl);
  2533. vuint32m4_t vt_3 = __riscv_vsll_vx_u32m4(vt_1, 16, vl);
  2534. vuint32m4_t vt_4 = __riscv_vadd_vx_u32m4(vt_2, 12, vl);
  2535. // ((qh & (1u << (j + 0 ))) >> (j + 0 )) << 4;
  2536. vuint32m4_t xha_0 = __riscv_vand_vx_u32m4(vt_1, qh, vl);
  2537. vuint32m4_t xhr_0 = __riscv_vsrl_vv_u32m4(xha_0, vt_2, vl);
  2538. vuint32m4_t xhl_0 = __riscv_vsll_vx_u32m4(xhr_0, 4, vl);
  2539. // ((qh & (1u << (j + 16))) >> (j + 12));
  2540. vuint32m4_t xha_1 = __riscv_vand_vx_u32m4(vt_3, qh, vl);
  2541. vuint32m4_t xhl_1 = __riscv_vsrl_vv_u32m4(xha_1, vt_4, vl);
  2542. // narrowing
  2543. vuint16m2_t xhc_0 = __riscv_vncvt_x_x_w_u16m2(xhl_0, vl);
  2544. vuint8m1_t xh_0 = __riscv_vncvt_x_x_w_u8m1(xhc_0, vl);
  2545. vuint16m2_t xhc_1 = __riscv_vncvt_x_x_w_u16m2(xhl_1, vl);
  2546. vuint8m1_t xh_1 = __riscv_vncvt_x_x_w_u8m1(xhc_1, vl);
  2547. // load
  2548. vuint8m1_t tx = __riscv_vle8_v_u8m1(x[i].qs, vl);
  2549. vint8m1_t y0 = __riscv_vle8_v_i8m1(y[i].qs, vl);
  2550. vint8m1_t y1 = __riscv_vle8_v_i8m1(y[i].qs+16, vl);
  2551. vuint8m1_t x_at = __riscv_vand_vx_u8m1(tx, 0x0F, vl);
  2552. vuint8m1_t x_lt = __riscv_vsrl_vx_u8m1(tx, 0x04, vl);
  2553. vuint8m1_t x_a = __riscv_vor_vv_u8m1(x_at, xh_0, vl);
  2554. vuint8m1_t x_l = __riscv_vor_vv_u8m1(x_lt, xh_1, vl);
  2555. vint8m1_t x_ai = __riscv_vreinterpret_v_u8m1_i8m1(x_a);
  2556. vint8m1_t x_li = __riscv_vreinterpret_v_u8m1_i8m1(x_l);
  2557. vint8m1_t v0 = __riscv_vsub_vx_i8m1(x_ai, 16, vl);
  2558. vint8m1_t v1 = __riscv_vsub_vx_i8m1(x_li, 16, vl);
  2559. vint16m2_t vec_mul1 = __riscv_vwmul_vv_i16m2(v0, y0, vl);
  2560. vint16m2_t vec_mul2 = __riscv_vwmul_vv_i16m2(v1, y1, vl);
  2561. vint32m1_t vec_zero = __riscv_vmv_v_x_i32m1(0, vl);
  2562. vint32m1_t vs1 = __riscv_vwredsum_vs_i16m2_i32m1(vec_mul1, vec_zero, vl);
  2563. vint32m1_t vs2 = __riscv_vwredsum_vs_i16m2_i32m1(vec_mul2, vec_zero, vl);
  2564. int sumi = __riscv_vmv_x_s_i32m1_i32(vs1);
  2565. sumi += __riscv_vmv_x_s_i32m1_i32(vs2);
  2566. sumf += (GGML_FP16_TO_FP32(x[i].d)*GGML_FP16_TO_FP32(y[i].d)) * sumi;
  2567. }
  2568. *s = sumf;
  2569. #else
  2570. // scalar
  2571. float sumf = 0.0;
  2572. for (int i = 0; i < nb; i++) {
  2573. uint32_t qh;
  2574. memcpy(&qh, x[i].qh, sizeof(qh));
  2575. int sumi = 0;
  2576. for (int j = 0; j < qk/2; ++j) {
  2577. const uint8_t xh_0 = ((qh & (1u << (j + 0 ))) >> (j + 0 )) << 4;
  2578. const uint8_t xh_1 = ((qh & (1u << (j + 16))) >> (j + 12));
  2579. const int32_t x0 = ((x[i].qs[j] & 0x0F) | xh_0) - 16;
  2580. const int32_t x1 = ((x[i].qs[j] >> 4) | xh_1) - 16;
  2581. sumi += (x0 * y[i].qs[j]) + (x1 * y[i].qs[j + qk/2]);
  2582. }
  2583. sumf += (GGML_FP16_TO_FP32(x[i].d)*GGML_FP16_TO_FP32(y[i].d)) * sumi;
  2584. }
  2585. *s = sumf;
  2586. #endif
  2587. }
  2588. static void ggml_vec_dot_q5_1_q8_1(const int n, float * restrict s, const void * restrict vx, const void * restrict vy) {
  2589. const int qk = QK8_1;
  2590. const int nb = n / qk;
  2591. assert(n % qk == 0);
  2592. assert(qk == QK5_1);
  2593. const block_q5_1 * restrict x = vx;
  2594. const block_q8_1 * restrict y = vy;
  2595. #if defined(__ARM_NEON)
  2596. float32x4_t sumv0 = vdupq_n_f32(0.0f);
  2597. float32x4_t sumv1 = vdupq_n_f32(0.0f);
  2598. float summs0 = 0.0f;
  2599. float summs1 = 0.0f;
  2600. uint32_t qh0;
  2601. uint32_t qh1;
  2602. uint64_t tmp0[4];
  2603. uint64_t tmp1[4];
  2604. GGML_ASSERT(nb % 2 == 0); // TODO: handle odd nb
  2605. for (int i = 0; i < nb; i += 2) {
  2606. const block_q5_1 * restrict x0 = &x[i];
  2607. const block_q5_1 * restrict x1 = &x[i + 1];
  2608. const block_q8_1 * restrict y0 = &y[i];
  2609. const block_q8_1 * restrict y1 = &y[i + 1];
  2610. const uint8x16_t m4b = vdupq_n_u8(0x0F);
  2611. summs0 += GGML_FP16_TO_FP32(x0->m) * y0->s;
  2612. summs1 += GGML_FP16_TO_FP32(x1->m) * y1->s;
  2613. // extract the 5th bit via lookup table ((b) << 4)
  2614. memcpy(&qh0, x0->qh, sizeof(qh0));
  2615. memcpy(&qh1, x1->qh, sizeof(qh1));
  2616. tmp0[0] = table_b2b_0[(qh0 >> 0) & 0xFF];
  2617. tmp0[1] = table_b2b_0[(qh0 >> 8) & 0xFF];
  2618. tmp0[2] = table_b2b_0[(qh0 >> 16) & 0xFF];
  2619. tmp0[3] = table_b2b_0[(qh0 >> 24) ];
  2620. tmp1[0] = table_b2b_0[(qh1 >> 0) & 0xFF];
  2621. tmp1[1] = table_b2b_0[(qh1 >> 8) & 0xFF];
  2622. tmp1[2] = table_b2b_0[(qh1 >> 16) & 0xFF];
  2623. tmp1[3] = table_b2b_0[(qh1 >> 24) ];
  2624. const int8x16_t qhl0 = vld1q_s8((const int8_t *)(tmp0 + 0));
  2625. const int8x16_t qhh0 = vld1q_s8((const int8_t *)(tmp0 + 2));
  2626. const int8x16_t qhl1 = vld1q_s8((const int8_t *)(tmp1 + 0));
  2627. const int8x16_t qhh1 = vld1q_s8((const int8_t *)(tmp1 + 2));
  2628. const uint8x16_t v0_0 = vld1q_u8(x0->qs);
  2629. const uint8x16_t v0_1 = vld1q_u8(x1->qs);
  2630. // 4-bit -> 8-bit
  2631. const int8x16_t v0_0l = vreinterpretq_s8_u8(vandq_u8 (v0_0, m4b));
  2632. const int8x16_t v0_0h = vreinterpretq_s8_u8(vshrq_n_u8(v0_0, 4));
  2633. const int8x16_t v0_1l = vreinterpretq_s8_u8(vandq_u8 (v0_1, m4b));
  2634. const int8x16_t v0_1h = vreinterpretq_s8_u8(vshrq_n_u8(v0_1, 4));
  2635. // add high bit
  2636. const int8x16_t v0_0lf = vorrq_s8(v0_0l, qhl0);
  2637. const int8x16_t v0_0hf = vorrq_s8(v0_0h, qhh0);
  2638. const int8x16_t v0_1lf = vorrq_s8(v0_1l, qhl1);
  2639. const int8x16_t v0_1hf = vorrq_s8(v0_1h, qhh1);
  2640. // load y
  2641. const int8x16_t v1_0l = vld1q_s8(y0->qs);
  2642. const int8x16_t v1_0h = vld1q_s8(y0->qs + 16);
  2643. const int8x16_t v1_1l = vld1q_s8(y1->qs);
  2644. const int8x16_t v1_1h = vld1q_s8(y1->qs + 16);
  2645. #if defined(__ARM_FEATURE_DOTPROD)
  2646. sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(vaddq_s32(
  2647. vdotq_s32(vdupq_n_s32(0), v0_0lf, v1_0l),
  2648. vdotq_s32(vdupq_n_s32(0), v0_0hf, v1_0h))), GGML_FP16_TO_FP32(x0->d)*y0->d);
  2649. sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(vaddq_s32(
  2650. vdotq_s32(vdupq_n_s32(0), v0_1lf, v1_1l),
  2651. vdotq_s32(vdupq_n_s32(0), v0_1hf, v1_1h))), GGML_FP16_TO_FP32(x1->d)*y1->d);
  2652. #else
  2653. const int16x8_t pl0l = vmull_s8(vget_low_s8 (v0_0lf), vget_low_s8 (v1_0l));
  2654. const int16x8_t pl0h = vmull_s8(vget_high_s8(v0_0lf), vget_high_s8(v1_0l));
  2655. const int16x8_t ph0l = vmull_s8(vget_low_s8 (v0_0hf), vget_low_s8 (v1_0h));
  2656. const int16x8_t ph0h = vmull_s8(vget_high_s8(v0_0hf), vget_high_s8(v1_0h));
  2657. const int16x8_t pl1l = vmull_s8(vget_low_s8 (v0_1lf), vget_low_s8 (v1_1l));
  2658. const int16x8_t pl1h = vmull_s8(vget_high_s8(v0_1lf), vget_high_s8(v1_1l));
  2659. const int16x8_t ph1l = vmull_s8(vget_low_s8 (v0_1hf), vget_low_s8 (v1_1h));
  2660. const int16x8_t ph1h = vmull_s8(vget_high_s8(v0_1hf), vget_high_s8(v1_1h));
  2661. const int32x4_t pl0 = vaddq_s32(vpaddlq_s16(pl0l), vpaddlq_s16(pl0h));
  2662. const int32x4_t ph0 = vaddq_s32(vpaddlq_s16(ph0l), vpaddlq_s16(ph0h));
  2663. const int32x4_t pl1 = vaddq_s32(vpaddlq_s16(pl1l), vpaddlq_s16(pl1h));
  2664. const int32x4_t ph1 = vaddq_s32(vpaddlq_s16(ph1l), vpaddlq_s16(ph1h));
  2665. sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(vaddq_s32(pl0, ph0)), GGML_FP16_TO_FP32(x0->d)*y0->d);
  2666. sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(vaddq_s32(pl1, ph1)), GGML_FP16_TO_FP32(x1->d)*y1->d);
  2667. #endif
  2668. }
  2669. *s = vaddvq_f32(sumv0) + vaddvq_f32(sumv1) + summs0 + summs1;
  2670. #elif defined(__wasm_simd128__)
  2671. v128_t sumv = wasm_f32x4_splat(0.0f);
  2672. float summs = 0.0f;
  2673. uint32_t qh;
  2674. uint64_t tmp[4];
  2675. // TODO: check if unrolling this is better
  2676. for (int i = 0; i < nb; ++i) {
  2677. const block_q5_1 * restrict x0 = &x[i];
  2678. const block_q8_1 * restrict y0 = &y[i];
  2679. summs += GGML_FP16_TO_FP32(x0->m) * y0->s;
  2680. const v128_t m4b = wasm_i8x16_splat(0x0F);
  2681. // extract the 5th bit
  2682. memcpy(&qh, x0->qh, sizeof(qh));
  2683. tmp[0] = table_b2b_0[(qh >> 0) & 0xFF];
  2684. tmp[1] = table_b2b_0[(qh >> 8) & 0xFF];
  2685. tmp[2] = table_b2b_0[(qh >> 16) & 0xFF];
  2686. tmp[3] = table_b2b_0[(qh >> 24) ];
  2687. const v128_t qhl = wasm_v128_load(tmp + 0);
  2688. const v128_t qhh = wasm_v128_load(tmp + 2);
  2689. const v128_t v0 = wasm_v128_load(x0->qs);
  2690. // 4-bit -> 8-bit
  2691. const v128_t v0l = wasm_v128_and (v0, m4b);
  2692. const v128_t v0h = wasm_u8x16_shr(v0, 4);
  2693. // add high bit
  2694. const v128_t v0lf = wasm_v128_or(v0l, qhl);
  2695. const v128_t v0hf = wasm_v128_or(v0h, qhh);
  2696. // load y
  2697. const v128_t v1l = wasm_v128_load(y0->qs);
  2698. const v128_t v1h = wasm_v128_load(y0->qs + 16);
  2699. // int8x16 -> int16x8
  2700. const v128_t v0lfl = wasm_i16x8_extend_low_i8x16 (v0lf);
  2701. const v128_t v0lfh = wasm_i16x8_extend_high_i8x16(v0lf);
  2702. const v128_t v0hfl = wasm_i16x8_extend_low_i8x16 (v0hf);
  2703. const v128_t v0hfh = wasm_i16x8_extend_high_i8x16(v0hf);
  2704. const v128_t v1ll = wasm_i16x8_extend_low_i8x16 (v1l);
  2705. const v128_t v1lh = wasm_i16x8_extend_high_i8x16(v1l);
  2706. const v128_t v1hl = wasm_i16x8_extend_low_i8x16 (v1h);
  2707. const v128_t v1hh = wasm_i16x8_extend_high_i8x16(v1h);
  2708. // dot product
  2709. sumv = wasm_f32x4_add(sumv,
  2710. wasm_f32x4_mul(wasm_f32x4_convert_i32x4(wasm_i32x4_add(
  2711. wasm_i32x4_add(wasm_i32x4_dot_i16x8(v0lfl, v1ll),
  2712. wasm_i32x4_dot_i16x8(v0lfh, v1lh)),
  2713. wasm_i32x4_add(wasm_i32x4_dot_i16x8(v0hfl, v1hl),
  2714. wasm_i32x4_dot_i16x8(v0hfh, v1hh)))),
  2715. wasm_f32x4_splat(GGML_FP16_TO_FP32(x0->d) * y0->d)));
  2716. }
  2717. *s = wasm_f32x4_extract_lane(sumv, 0) + wasm_f32x4_extract_lane(sumv, 1) +
  2718. wasm_f32x4_extract_lane(sumv, 2) + wasm_f32x4_extract_lane(sumv, 3) + summs;
  2719. #elif defined(__AVX2__)
  2720. // Initialize accumulator with zeros
  2721. __m256 acc = _mm256_setzero_ps();
  2722. float summs = 0.0f;
  2723. // Main loop
  2724. for (int i = 0; i < nb; i++) {
  2725. const __m256 dx = _mm256_set1_ps(GGML_FP16_TO_FP32(x[i].d));
  2726. summs += GGML_FP16_TO_FP32(x[i].m) * y[i].s;
  2727. __m256i bx = bytes_from_nibbles_32(x[i].qs);
  2728. __m256i bxhi = bytes_from_bits_32(x[i].qh);
  2729. bxhi = _mm256_and_si256(bxhi, _mm256_set1_epi8(0x10));
  2730. bx = _mm256_or_si256(bx, bxhi);
  2731. const __m256 dy = _mm256_set1_ps(y[i].d);
  2732. const __m256i by = _mm256_loadu_si256((const __m256i *)y[i].qs);
  2733. const __m256 q = mul_sum_us8_pairs_float(bx, by);
  2734. acc = _mm256_fmadd_ps(q, _mm256_mul_ps(dx, dy), acc);
  2735. }
  2736. *s = hsum_float_8(acc) + summs;
  2737. #elif defined(__AVX__)
  2738. // Initialize accumulator with zeros
  2739. __m256 acc = _mm256_setzero_ps();
  2740. __m128i mask = _mm_set1_epi8(0x10);
  2741. float summs = 0.0f;
  2742. // Main loop
  2743. for (int i = 0; i < nb; i++) {
  2744. const __m256 dx = _mm256_set1_ps(GGML_FP16_TO_FP32(x[i].d));
  2745. summs += GGML_FP16_TO_FP32(x[i].m) * y[i].s;
  2746. __m256i bx = bytes_from_nibbles_32(x[i].qs);
  2747. const __m256i bxhi = bytes_from_bits_32(x[i].qh);
  2748. __m128i bxhil = _mm256_castsi256_si128(bxhi);
  2749. __m128i bxhih = _mm256_extractf128_si256(bxhi, 1);
  2750. bxhil = _mm_and_si128(bxhil, mask);
  2751. bxhih = _mm_and_si128(bxhih, mask);
  2752. __m128i bxl = _mm256_castsi256_si128(bx);
  2753. __m128i bxh = _mm256_extractf128_si256(bx, 1);
  2754. bxl = _mm_or_si128(bxl, bxhil);
  2755. bxh = _mm_or_si128(bxh, bxhih);
  2756. bx = MM256_SET_M128I(bxh, bxl);
  2757. const __m256 dy = _mm256_set1_ps(y[i].d);
  2758. const __m256i by = _mm256_loadu_si256((const __m256i *)y[i].qs);
  2759. const __m256 q = mul_sum_us8_pairs_float(bx, by);
  2760. acc = _mm256_add_ps(_mm256_mul_ps(q, _mm256_mul_ps(dx, dy)), acc);
  2761. }
  2762. *s = hsum_float_8(acc) + summs;
  2763. #elif defined(__riscv_v_intrinsic)
  2764. float sumf = 0.0;
  2765. uint32_t qh;
  2766. // These temp values are for shift operations
  2767. uint32_t temp_1[16] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15};
  2768. size_t vl = __riscv_vsetvl_e8m1(qk/2);
  2769. for (int i = 0; i < nb; i++) {
  2770. memcpy(&qh, x[i].qh, sizeof(uint32_t));
  2771. // temporary registers
  2772. vuint32m4_t vt_1 = __riscv_vle32_v_u32m4(temp_1, vl);
  2773. vuint32m4_t vt_2 = __riscv_vadd_vx_u32m4(vt_1, 12, vl);
  2774. // load qh
  2775. vuint32m4_t vqh = __riscv_vmv_v_x_u32m4(qh, vl);
  2776. // ((qh >> (j + 0)) << 4) & 0x10;
  2777. vuint32m4_t xhr_0 = __riscv_vsrl_vv_u32m4(vqh, vt_1, vl);
  2778. vuint32m4_t xhl_0 = __riscv_vsll_vx_u32m4(xhr_0, 4, vl);
  2779. vuint32m4_t xha_0 = __riscv_vand_vx_u32m4(xhl_0, 0x10, vl);
  2780. // ((qh >> (j + 12)) ) & 0x10;
  2781. vuint32m4_t xhr_1 = __riscv_vsrl_vv_u32m4(vqh, vt_2, vl);
  2782. vuint32m4_t xha_1 = __riscv_vand_vx_u32m4(xhr_1, 0x10, vl);
  2783. // narrowing
  2784. vuint16m2_t xhc_0 = __riscv_vncvt_x_x_w_u16m2(xha_0, vl);
  2785. vuint8m1_t xh_0 = __riscv_vncvt_x_x_w_u8m1(xhc_0, vl);
  2786. vuint16m2_t xhc_1 = __riscv_vncvt_x_x_w_u16m2(xha_1, vl);
  2787. vuint8m1_t xh_1 = __riscv_vncvt_x_x_w_u8m1(xhc_1, vl);
  2788. // load
  2789. vuint8m1_t tx = __riscv_vle8_v_u8m1(x[i].qs, vl);
  2790. vint8m1_t y0 = __riscv_vle8_v_i8m1(y[i].qs, vl);
  2791. vint8m1_t y1 = __riscv_vle8_v_i8m1(y[i].qs+16, vl);
  2792. vuint8m1_t x_at = __riscv_vand_vx_u8m1(tx, 0x0F, vl);
  2793. vuint8m1_t x_lt = __riscv_vsrl_vx_u8m1(tx, 0x04, vl);
  2794. vuint8m1_t x_a = __riscv_vor_vv_u8m1(x_at, xh_0, vl);
  2795. vuint8m1_t x_l = __riscv_vor_vv_u8m1(x_lt, xh_1, vl);
  2796. vint8m1_t v0 = __riscv_vreinterpret_v_u8m1_i8m1(x_a);
  2797. vint8m1_t v1 = __riscv_vreinterpret_v_u8m1_i8m1(x_l);
  2798. vint16m2_t vec_mul1 = __riscv_vwmul_vv_i16m2(v0, y0, vl);
  2799. vint16m2_t vec_mul2 = __riscv_vwmul_vv_i16m2(v1, y1, vl);
  2800. vint32m1_t vec_zero = __riscv_vmv_v_x_i32m1(0, vl);
  2801. vint32m1_t vs1 = __riscv_vwredsum_vs_i16m2_i32m1(vec_mul1, vec_zero, vl);
  2802. vint32m1_t vs2 = __riscv_vwredsum_vs_i16m2_i32m1(vec_mul2, vec_zero, vl);
  2803. int sumi = __riscv_vmv_x_s_i32m1_i32(vs1);
  2804. sumi += __riscv_vmv_x_s_i32m1_i32(vs2);
  2805. sumf += (GGML_FP16_TO_FP32(x[i].d)*y[i].d)*sumi + GGML_FP16_TO_FP32(x[i].m)*y[i].s;
  2806. }
  2807. *s = sumf;
  2808. #else
  2809. // scalar
  2810. float sumf = 0.0;
  2811. for (int i = 0; i < nb; i++) {
  2812. uint32_t qh;
  2813. memcpy(&qh, x[i].qh, sizeof(qh));
  2814. int sumi = 0;
  2815. for (int j = 0; j < qk/2; ++j) {
  2816. const uint8_t xh_0 = ((qh >> (j + 0)) << 4) & 0x10;
  2817. const uint8_t xh_1 = ((qh >> (j + 12)) ) & 0x10;
  2818. const int32_t x0 = (x[i].qs[j] & 0xF) | xh_0;
  2819. const int32_t x1 = (x[i].qs[j] >> 4) | xh_1;
  2820. sumi += (x0 * y[i].qs[j]) + (x1 * y[i].qs[j + qk/2]);
  2821. }
  2822. sumf += (GGML_FP16_TO_FP32(x[i].d)*y[i].d)*sumi + GGML_FP16_TO_FP32(x[i].m)*y[i].s;
  2823. }
  2824. *s = sumf;
  2825. #endif
  2826. }
  2827. static void ggml_vec_dot_q8_0_q8_0(const int n, float * restrict s, const void * restrict vx, const void * restrict vy) {
  2828. const int qk = QK8_0;
  2829. const int nb = n / qk;
  2830. assert(n % qk == 0);
  2831. const block_q8_0 * restrict x = vx;
  2832. const block_q8_0 * restrict y = vy;
  2833. #if defined(__ARM_NEON)
  2834. float32x4_t sumv0 = vdupq_n_f32(0.0f);
  2835. float32x4_t sumv1 = vdupq_n_f32(0.0f);
  2836. GGML_ASSERT(nb % 2 == 0); // TODO: handle odd nb
  2837. for (int i = 0; i < nb; i += 2) {
  2838. const block_q8_0 * restrict x0 = &x[i + 0];
  2839. const block_q8_0 * restrict x1 = &x[i + 1];
  2840. const block_q8_0 * restrict y0 = &y[i + 0];
  2841. const block_q8_0 * restrict y1 = &y[i + 1];
  2842. const int8x16_t x0_0 = vld1q_s8(x0->qs);
  2843. const int8x16_t x0_1 = vld1q_s8(x0->qs + 16);
  2844. const int8x16_t x1_0 = vld1q_s8(x1->qs);
  2845. const int8x16_t x1_1 = vld1q_s8(x1->qs + 16);
  2846. // load y
  2847. const int8x16_t y0_0 = vld1q_s8(y0->qs);
  2848. const int8x16_t y0_1 = vld1q_s8(y0->qs + 16);
  2849. const int8x16_t y1_0 = vld1q_s8(y1->qs);
  2850. const int8x16_t y1_1 = vld1q_s8(y1->qs + 16);
  2851. #if defined(__ARM_FEATURE_DOTPROD)
  2852. sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(vaddq_s32(
  2853. vdotq_s32(vdupq_n_s32(0), x0_0, y0_0),
  2854. vdotq_s32(vdupq_n_s32(0), x0_1, y0_1))), GGML_FP16_TO_FP32(x0->d)*GGML_FP16_TO_FP32(y0->d));
  2855. sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(vaddq_s32(
  2856. vdotq_s32(vdupq_n_s32(0), x1_0, y1_0),
  2857. vdotq_s32(vdupq_n_s32(0), x1_1, y1_1))), GGML_FP16_TO_FP32(x1->d)*GGML_FP16_TO_FP32(y1->d));
  2858. #else
  2859. const int16x8_t p0_0 = vmull_s8(vget_low_s8 (x0_0), vget_low_s8 (y0_0));
  2860. const int16x8_t p0_1 = vmull_s8(vget_high_s8(x0_0), vget_high_s8(y0_0));
  2861. const int16x8_t p0_2 = vmull_s8(vget_low_s8 (x0_1), vget_low_s8 (y0_1));
  2862. const int16x8_t p0_3 = vmull_s8(vget_high_s8(x0_1), vget_high_s8(y0_1));
  2863. const int16x8_t p1_0 = vmull_s8(vget_low_s8 (x1_0), vget_low_s8 (y1_0));
  2864. const int16x8_t p1_1 = vmull_s8(vget_high_s8(x1_0), vget_high_s8(y1_0));
  2865. const int16x8_t p1_2 = vmull_s8(vget_low_s8 (x1_1), vget_low_s8 (y1_1));
  2866. const int16x8_t p1_3 = vmull_s8(vget_high_s8(x1_1), vget_high_s8(y1_1));
  2867. const int32x4_t p0 = vaddq_s32(vpaddlq_s16(p0_0), vpaddlq_s16(p0_1));
  2868. const int32x4_t p1 = vaddq_s32(vpaddlq_s16(p0_2), vpaddlq_s16(p0_3));
  2869. const int32x4_t p2 = vaddq_s32(vpaddlq_s16(p1_0), vpaddlq_s16(p1_1));
  2870. const int32x4_t p3 = vaddq_s32(vpaddlq_s16(p1_2), vpaddlq_s16(p1_3));
  2871. sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(vaddq_s32(p0, p1)), GGML_FP16_TO_FP32(x0->d)*GGML_FP16_TO_FP32(y0->d));
  2872. sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(vaddq_s32(p2, p3)), GGML_FP16_TO_FP32(x1->d)*GGML_FP16_TO_FP32(y1->d));
  2873. #endif
  2874. }
  2875. *s = vaddvq_f32(sumv0) + vaddvq_f32(sumv1);
  2876. #elif defined(__AVX2__) || defined(__AVX__)
  2877. // Initialize accumulator with zeros
  2878. __m256 acc = _mm256_setzero_ps();
  2879. // Main loop
  2880. for (int i = 0; i < nb; ++i) {
  2881. // Compute combined scale for the block
  2882. const __m256 d = _mm256_set1_ps(GGML_FP16_TO_FP32(x[i].d) * GGML_FP16_TO_FP32(y[i].d));
  2883. __m256i bx = _mm256_loadu_si256((const __m256i *)x[i].qs);
  2884. __m256i by = _mm256_loadu_si256((const __m256i *)y[i].qs);
  2885. const __m256 q = mul_sum_i8_pairs_float(bx, by);
  2886. // Multiply q with scale and accumulate
  2887. #if defined(__AVX2__)
  2888. acc = _mm256_fmadd_ps( d, q, acc );
  2889. #else
  2890. acc = _mm256_add_ps( _mm256_mul_ps( d, q ), acc );
  2891. #endif
  2892. }
  2893. *s = hsum_float_8(acc);
  2894. #elif defined(__riscv_v_intrinsic)
  2895. float sumf = 0.0;
  2896. size_t vl = __riscv_vsetvl_e8m1(qk);
  2897. for (int i = 0; i < nb; i++) {
  2898. // load elements
  2899. vint8m1_t bx = __riscv_vle8_v_i8m1(x[i].qs, vl);
  2900. vint8m1_t by = __riscv_vle8_v_i8m1(y[i].qs, vl);
  2901. vint16m2_t vw_mul = __riscv_vwmul_vv_i16m2(bx, by, vl);
  2902. vint32m1_t v_zero = __riscv_vmv_v_x_i32m1(0, vl);
  2903. vint32m1_t v_sum = __riscv_vwredsum_vs_i16m2_i32m1(vw_mul, v_zero, vl);
  2904. int sumi = __riscv_vmv_x_s_i32m1_i32(v_sum);
  2905. sumf += sumi*(GGML_FP16_TO_FP32(x[i].d)*GGML_FP16_TO_FP32(y[i].d));
  2906. }
  2907. *s = sumf;
  2908. #else
  2909. // scalar
  2910. float sumf = 0.0;
  2911. for (int i = 0; i < nb; i++) {
  2912. int sumi = 0;
  2913. for (int j = 0; j < qk; j++) {
  2914. sumi += x[i].qs[j]*y[i].qs[j];
  2915. }
  2916. sumf += sumi*(GGML_FP16_TO_FP32(x[i].d)*GGML_FP16_TO_FP32(y[i].d));
  2917. }
  2918. *s = sumf;
  2919. #endif
  2920. }
  2921. // compute GGML_VEC_DOT_UNROLL dot products at once
  2922. // xs - x row stride in bytes
  2923. inline static void ggml_vec_dot_f16_unroll(const int n, const int xs, float * restrict s, void * restrict xv, ggml_fp16_t * restrict y) {
  2924. ggml_float sumf[GGML_VEC_DOT_UNROLL] = { 0.0 };
  2925. ggml_fp16_t * restrict x[GGML_VEC_DOT_UNROLL];
  2926. for (int i = 0; i < GGML_VEC_DOT_UNROLL; ++i) {
  2927. x[i] = (ggml_fp16_t *) ((char *) xv + i*xs);
  2928. }
  2929. #if defined(GGML_SIMD)
  2930. const int np = (n & ~(GGML_F16_STEP - 1));
  2931. GGML_F16_VEC sum[GGML_VEC_DOT_UNROLL][GGML_F16_ARR] = { { GGML_F16_VEC_ZERO } };
  2932. GGML_F16_VEC ax[GGML_F16_ARR];
  2933. GGML_F16_VEC ay[GGML_F16_ARR];
  2934. for (int i = 0; i < np; i += GGML_F16_STEP) {
  2935. for (int j = 0; j < GGML_F16_ARR; j++) {
  2936. ay[j] = GGML_F16_VEC_LOAD(y + i + j*GGML_F16_EPR, j);
  2937. for (int k = 0; k < GGML_VEC_DOT_UNROLL; ++k) {
  2938. ax[j] = GGML_F16_VEC_LOAD(x[k] + i + j*GGML_F16_EPR, j);
  2939. sum[k][j] = GGML_F16_VEC_FMA(sum[k][j], ax[j], ay[j]);
  2940. }
  2941. }
  2942. }
  2943. // reduce sum0..sum3 to sum0
  2944. for (int k = 0; k < GGML_VEC_DOT_UNROLL; ++k) {
  2945. GGML_F16_VEC_REDUCE(sumf[k], sum[k]);
  2946. }
  2947. // leftovers
  2948. for (int i = np; i < n; ++i) {
  2949. for (int j = 0; j < GGML_VEC_DOT_UNROLL; ++j) {
  2950. sumf[j] += (ggml_float)(GGML_FP16_TO_FP32(x[j][i])*GGML_FP16_TO_FP32(y[i]));
  2951. }
  2952. }
  2953. #else
  2954. for (int i = 0; i < n; ++i) {
  2955. for (int j = 0; j < GGML_VEC_DOT_UNROLL; ++j) {
  2956. sumf[j] += (ggml_float)(GGML_FP16_TO_FP32(x[j][i])*GGML_FP16_TO_FP32(y[i]));
  2957. }
  2958. }
  2959. #endif
  2960. for (int i = 0; i < GGML_VEC_DOT_UNROLL; ++i) {
  2961. s[i] = sumf[i];
  2962. }
  2963. }
  2964. inline static void ggml_vec_mad_f32(const int n, float * restrict y, const float * restrict x, const float v) {
  2965. #if defined(GGML_SIMD)
  2966. const int np = (n & ~(GGML_F32_STEP - 1));
  2967. GGML_F32_VEC vx = GGML_F32_VEC_SET1(v);
  2968. GGML_F32_VEC ax[GGML_F32_ARR];
  2969. GGML_F32_VEC ay[GGML_F32_ARR];
  2970. for (int i = 0; i < np; i += GGML_F32_STEP) {
  2971. for (int j = 0; j < GGML_F32_ARR; j++) {
  2972. ax[j] = GGML_F32_VEC_LOAD(x + i + j*GGML_F32_EPR);
  2973. ay[j] = GGML_F32_VEC_LOAD(y + i + j*GGML_F32_EPR);
  2974. ay[j] = GGML_F32_VEC_FMA(ay[j], ax[j], vx);
  2975. GGML_F32_VEC_STORE(y + i + j*GGML_F32_EPR, ay[j]);
  2976. }
  2977. }
  2978. // leftovers
  2979. for (int i = np; i < n; ++i) {
  2980. y[i] += x[i]*v;
  2981. }
  2982. #else
  2983. // scalar
  2984. for (int i = 0; i < n; ++i) {
  2985. y[i] += x[i]*v;
  2986. }
  2987. #endif
  2988. }
  2989. //inline static void ggml_vec_scale_f32(const int n, float * y, const float v) { for (int i = 0; i < n; ++i) y[i] *= v; }
  2990. inline static void ggml_vec_scale_f32(const int n, float * y, const float v) {
  2991. #if defined(GGML_USE_ACCELERATE)
  2992. vDSP_vsmul(y, 1, &v, y, 1, n);
  2993. #elif defined(GGML_SIMD)
  2994. const int np = (n & ~(GGML_F32_STEP - 1));
  2995. GGML_F32_VEC vx = GGML_F32_VEC_SET1(v);
  2996. GGML_F32_VEC ay[GGML_F32_ARR];
  2997. for (int i = 0; i < np; i += GGML_F32_STEP) {
  2998. for (int j = 0; j < GGML_F32_ARR; j++) {
  2999. ay[j] = GGML_F32_VEC_LOAD(y + i + j*GGML_F32_EPR);
  3000. ay[j] = GGML_F32_VEC_MUL(ay[j], vx);
  3001. GGML_F32_VEC_STORE(y + i + j*GGML_F32_EPR, ay[j]);
  3002. }
  3003. }
  3004. // leftovers
  3005. for (int i = np; i < n; ++i) {
  3006. y[i] *= v;
  3007. }
  3008. #else
  3009. // scalar
  3010. for (int i = 0; i < n; ++i) {
  3011. y[i] *= v;
  3012. }
  3013. #endif
  3014. }
  3015. inline static void ggml_vec_norm_f32 (const int n, float * s, const float * x) { ggml_vec_dot_f32(n, s, x, x); *s = sqrtf(*s); }
  3016. inline static void ggml_vec_sqr_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = x[i]*x[i]; }
  3017. inline static void ggml_vec_sqrt_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = sqrtf(x[i]); }
  3018. inline static void ggml_vec_log_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = logf(x[i]); }
  3019. inline static void ggml_vec_abs_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = fabsf(x[i]); }
  3020. inline static void ggml_vec_sgn_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = (x[i] > 0.f) ? 1.f : ((x[i] < 0.f) ? -1.f : 0.f); }
  3021. inline static void ggml_vec_step_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = (x[i] > 0.f) ? 1.f : 0.f; }
  3022. inline static void ggml_vec_tanh_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = tanhf(x[i]); }
  3023. inline static void ggml_vec_elu_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = (x[i] > 0.f) ? x[i] : expf(x[i])-1; }
  3024. inline static void ggml_vec_relu_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = (x[i] > 0.f) ? x[i] : 0.f; }
  3025. static const float GELU_COEF_A = 0.044715f;
  3026. static const float GELU_QUICK_COEF = -1.702f;
  3027. static const float SQRT_2_OVER_PI = 0.79788456080286535587989211986876f;
  3028. inline static float ggml_gelu_f32(float x) {
  3029. return 0.5f*x*(1.0f + tanhf(SQRT_2_OVER_PI*x*(1.0f + GELU_COEF_A*x*x)));
  3030. }
  3031. inline static void ggml_vec_gelu_f16(const int n, ggml_fp16_t * y, const ggml_fp16_t * x) {
  3032. const uint16_t * i16 = (const uint16_t *) x;
  3033. for (int i = 0; i < n; ++i) {
  3034. y[i] = table_gelu_f16[i16[i]];
  3035. }
  3036. }
  3037. #ifdef GGML_GELU_FP16
  3038. inline static void ggml_vec_gelu_f32(const int n, float * y, const float * x) {
  3039. uint16_t t;
  3040. for (int i = 0; i < n; ++i) {
  3041. ggml_fp16_t fp16 = GGML_FP32_TO_FP16(x[i]);
  3042. memcpy(&t, &fp16, sizeof(uint16_t));
  3043. y[i] = GGML_FP16_TO_FP32(table_gelu_f16[t]);
  3044. }
  3045. }
  3046. #else
  3047. inline static void ggml_vec_gelu_f32(const int n, float * y, const float * x) {
  3048. for (int i = 0; i < n; ++i) {
  3049. y[i] = ggml_gelu_f32(x[i]);
  3050. }
  3051. }
  3052. #endif
  3053. inline static float ggml_gelu_quick_f32(float x) {
  3054. return x*(1.0f/(1.0f+expf(GELU_QUICK_COEF*x)));
  3055. }
  3056. //inline static void ggml_vec_gelu_quick_f16(const int n, ggml_fp16_t * y, const ggml_fp16_t * x) {
  3057. // const uint16_t * i16 = (const uint16_t *) x;
  3058. // for (int i = 0; i < n; ++i) {
  3059. // y[i] = table_gelu_quick_f16[i16[i]];
  3060. // }
  3061. //}
  3062. #ifdef GGML_GELU_QUICK_FP16
  3063. inline static void ggml_vec_gelu_quick_f32(const int n, float * y, const float * x) {
  3064. uint16_t t;
  3065. for (int i = 0; i < n; ++i) {
  3066. ggml_fp16_t fp16 = GGML_FP32_TO_FP16(x[i]);
  3067. memcpy(&t, &fp16, sizeof(uint16_t));
  3068. y[i] = GGML_FP16_TO_FP32(table_gelu_quick_f16[t]);
  3069. }
  3070. }
  3071. #else
  3072. inline static void ggml_vec_gelu_quick_f32(const int n, float * y, const float * x) {
  3073. for (int i = 0; i < n; ++i) {
  3074. y[i] = ggml_gelu_quick_f32(x[i]);
  3075. }
  3076. }
  3077. #endif
  3078. // Sigmoid Linear Unit (SiLU) function
  3079. inline static float ggml_silu_f32(float x) {
  3080. return x/(1.0f + expf(-x));
  3081. }
  3082. //inline static void ggml_vec_silu_f16(const int n, ggml_fp16_t * y, const ggml_fp16_t * x) {
  3083. // const uint16_t * i16 = (const uint16_t *) x;
  3084. // for (int i = 0; i < n; ++i) {
  3085. // y[i] = table_silu_f16[i16[i]];
  3086. // }
  3087. //}
  3088. #ifdef GGML_SILU_FP16
  3089. inline static void ggml_vec_silu_f32(const int n, float * y, const float * x) {
  3090. uint16_t t;
  3091. for (int i = 0; i < n; ++i) {
  3092. ggml_fp16_t fp16 = GGML_FP32_TO_FP16(x[i]);
  3093. memcpy(&t, &fp16, sizeof(uint16_t));
  3094. y[i] = GGML_FP16_TO_FP32(table_silu_f16[t]);
  3095. }
  3096. }
  3097. #else
  3098. inline static void ggml_vec_silu_f32(const int n, float * y, const float * x) {
  3099. for (int i = 0; i < n; ++i) {
  3100. y[i] = ggml_silu_f32(x[i]);
  3101. }
  3102. }
  3103. #endif
  3104. inline static float ggml_silu_backward_f32(float x, float dy) {
  3105. const float s = 1.0f/(1.0f + expf(-x));
  3106. return dy*s*(1.0f + x*(1.0f - s));
  3107. }
  3108. #ifdef GGML_SILU_FP16
  3109. inline static void ggml_vec_silu_backward_f32(const int n, float * dx, const float * x, const float * dy) {
  3110. for (int i = 0; i < n; ++i) {
  3111. // we did not use x[i] to compute forward silu but its f16 equivalent
  3112. // take derivative at f16 of x[i]:
  3113. ggml_fp16_t fp16 = GGML_FP32_TO_FP16(x[i]);
  3114. float usedx = GGML_FP16_TO_FP32(fp16);
  3115. dx[i] = ggml_silu_backward_f32(usedx, dy[i]);
  3116. }
  3117. }
  3118. #else
  3119. inline static void ggml_vec_silu_backward_f32(const int n, float * dx, const float * x, const float * dy) {
  3120. for (int i = 0; i < n; ++i) {
  3121. dx[i] = ggml_silu_backward_f32(x[i], dy[i]);
  3122. }
  3123. }
  3124. #endif
  3125. inline static void ggml_vec_sum_f32(const int n, float * s, const float * x) {
  3126. #ifndef GGML_USE_ACCELERATE
  3127. ggml_float sum = 0.0;
  3128. for (int i = 0; i < n; ++i) {
  3129. sum += (ggml_float)x[i];
  3130. }
  3131. *s = sum;
  3132. #else
  3133. vDSP_sve(x, 1, s, n);
  3134. #endif
  3135. }
  3136. inline static void ggml_vec_sum_f32_ggf(const int n, ggml_float * s, const float * x) {
  3137. ggml_float sum = 0.0;
  3138. for (int i = 0; i < n; ++i) {
  3139. sum += (ggml_float)x[i];
  3140. }
  3141. *s = sum;
  3142. }
  3143. inline static void ggml_vec_sum_f16_ggf(const int n, float * s, const ggml_fp16_t * x) {
  3144. float sum = 0.0f;
  3145. for (int i = 0; i < n; ++i) {
  3146. sum += GGML_FP16_TO_FP32(x[i]);
  3147. }
  3148. *s = sum;
  3149. }
  3150. inline static void ggml_vec_max_f32(const int n, float * s, const float * x) {
  3151. #ifndef GGML_USE_ACCELERATE
  3152. float max = -INFINITY;
  3153. for (int i = 0; i < n; ++i) {
  3154. max = MAX(max, x[i]);
  3155. }
  3156. *s = max;
  3157. #else
  3158. vDSP_maxv(x, 1, s, n);
  3159. #endif
  3160. }
  3161. inline static void ggml_vec_norm_inv_f32(const int n, float * s, const float * x) {
  3162. ggml_vec_norm_f32(n, s, x);
  3163. *s = 1.f/(*s);
  3164. }
  3165. inline static void ggml_vec_argmax_f32(const int n, int * s, const float * x) {
  3166. float max = -INFINITY;
  3167. int idx = 0;
  3168. for (int i = 0; i < n; ++i) {
  3169. max = MAX(max, x[i]);
  3170. if (max == x[i]) { idx = i; }
  3171. }
  3172. *s = idx;
  3173. }
  3174. //
  3175. // data types
  3176. //
  3177. static const char * GGML_OP_NAME[GGML_OP_COUNT] = {
  3178. "NONE",
  3179. "DUP",
  3180. "ADD",
  3181. "ADD1",
  3182. "ACC",
  3183. "SUB",
  3184. "MUL",
  3185. "DIV",
  3186. "SQR",
  3187. "SQRT",
  3188. "LOG",
  3189. "SUM",
  3190. "SUM_ROWS",
  3191. "MEAN",
  3192. "ARGMAX",
  3193. "REPEAT",
  3194. "REPEAT_BACK",
  3195. "CONCAT",
  3196. "SILU_BACK",
  3197. "NORM",
  3198. "RMS_NORM",
  3199. "RMS_NORM_BACK",
  3200. "GROUP_NORM",
  3201. "MUL_MAT",
  3202. "OUT_PROD",
  3203. "SCALE",
  3204. "SET",
  3205. "CPY",
  3206. "CONT",
  3207. "RESHAPE",
  3208. "VIEW",
  3209. "PERMUTE",
  3210. "TRANSPOSE",
  3211. "GET_ROWS",
  3212. "GET_ROWS_BACK",
  3213. "DIAG",
  3214. "DIAG_MASK_INF",
  3215. "DIAG_MASK_ZERO",
  3216. "SOFT_MAX",
  3217. "SOFT_MAX_BACK",
  3218. "ROPE",
  3219. "ROPE_BACK",
  3220. "ALIBI",
  3221. "CLAMP",
  3222. "CONV_1D",
  3223. "CONV_2D",
  3224. "CONV_TRANSPOSE_2D",
  3225. "POOL_1D",
  3226. "POOL_2D",
  3227. "UPSCALE",
  3228. "FLASH_ATTN",
  3229. "FLASH_FF",
  3230. "FLASH_ATTN_BACK",
  3231. "WIN_PART",
  3232. "WIN_UNPART",
  3233. "GET_REL_POS",
  3234. "ADD_REL_POS",
  3235. "UNARY",
  3236. "MAP_UNARY",
  3237. "MAP_BINARY",
  3238. "MAP_CUSTOM1_F32",
  3239. "MAP_CUSTOM2_F32",
  3240. "MAP_CUSTOM3_F32",
  3241. "MAP_CUSTOM1",
  3242. "MAP_CUSTOM2",
  3243. "MAP_CUSTOM3",
  3244. "CROSS_ENTROPY_LOSS",
  3245. "CROSS_ENTROPY_LOSS_BACK",
  3246. };
  3247. static_assert(GGML_OP_COUNT == 68, "GGML_OP_COUNT != 68");
  3248. static const char * GGML_OP_SYMBOL[GGML_OP_COUNT] = {
  3249. "none",
  3250. "x",
  3251. "x+y",
  3252. "x+y",
  3253. "view(x,nb,offset)+=y->x",
  3254. "x-y",
  3255. "x*y",
  3256. "x/y",
  3257. "x^2",
  3258. "√x",
  3259. "log(x)",
  3260. "Σx",
  3261. "Σx_k",
  3262. "Σx/n",
  3263. "argmax(x)",
  3264. "repeat(x)",
  3265. "repeat_back(x)",
  3266. "concat(x, y)",
  3267. "silu_back(x)",
  3268. "norm(x)",
  3269. "rms_norm(x)",
  3270. "rms_norm_back(x)",
  3271. "group_norm(x)",
  3272. "X*Y",
  3273. "X*Y",
  3274. "x*v",
  3275. "y-\\>view(x)",
  3276. "x-\\>y",
  3277. "cont(x)",
  3278. "reshape(x)",
  3279. "view(x)",
  3280. "permute(x)",
  3281. "transpose(x)",
  3282. "get_rows(x)",
  3283. "get_rows_back(x)",
  3284. "diag(x)",
  3285. "diag_mask_inf(x)",
  3286. "diag_mask_zero(x)",
  3287. "soft_max(x)",
  3288. "soft_max_back(x)",
  3289. "rope(x)",
  3290. "rope_back(x)",
  3291. "alibi(x)",
  3292. "clamp(x)",
  3293. "conv_1d(x)",
  3294. "conv_2d(x)",
  3295. "conv_transpose_2d(x)",
  3296. "pool_1d(x)",
  3297. "pool_2d(x)",
  3298. "upscale(x)",
  3299. "flash_attn(x)",
  3300. "flash_ff(x)",
  3301. "flash_attn_back(x)",
  3302. "win_part(x)",
  3303. "win_unpart(x)",
  3304. "get_rel_pos(x)",
  3305. "add_rel_pos(x)",
  3306. "unary(x)",
  3307. "f(x)",
  3308. "f(x,y)",
  3309. "custom_f32(x)",
  3310. "custom_f32(x,y)",
  3311. "custom_f32(x,y,z)",
  3312. "custom(x)",
  3313. "custom(x,y)",
  3314. "custom(x,y,z)",
  3315. "cross_entropy_loss(x,y)",
  3316. "cross_entropy_loss_back(x,y)",
  3317. };
  3318. static_assert(GGML_OP_COUNT == 68, "GGML_OP_COUNT != 68");
  3319. static_assert(GGML_OP_POOL_COUNT == 2, "GGML_OP_POOL_COUNT != 2");
  3320. static_assert(sizeof(struct ggml_object)%GGML_MEM_ALIGN == 0, "ggml_object size must be a multiple of GGML_MEM_ALIGN");
  3321. static_assert(sizeof(struct ggml_tensor)%GGML_MEM_ALIGN == 0, "ggml_tensor size must be a multiple of GGML_MEM_ALIGN");
  3322. // WARN:
  3323. // Mis-confguration can lead to problem that's hard to reason about:
  3324. // * At best it crash or talks nosense.
  3325. // * At worst it talks slightly difference but hard to perceive.
  3326. //
  3327. // An op has to enable INIT or FINALIZE when any of it's branch needs that pass.
  3328. // Take care about compile options (e.g., GGML_USE_xxx).
  3329. static bool GGML_OP_HAS_INIT [GGML_OP_COUNT] = { 0 };
  3330. static bool GGML_OP_HAS_FINALIZE[GGML_OP_COUNT] = { 0 };
  3331. static void ggml_setup_op_has_task_pass(void) {
  3332. { // INIT
  3333. bool * p = GGML_OP_HAS_INIT;
  3334. p[GGML_OP_ACC ] = true;
  3335. p[GGML_OP_MUL_MAT ] = true;
  3336. p[GGML_OP_OUT_PROD ] = true;
  3337. p[GGML_OP_SET ] = true;
  3338. p[GGML_OP_GET_ROWS_BACK ] = true;
  3339. p[GGML_OP_DIAG_MASK_INF ] = true;
  3340. p[GGML_OP_DIAG_MASK_ZERO ] = true;
  3341. p[GGML_OP_CONV_1D ] = true;
  3342. p[GGML_OP_CONV_2D ] = true;
  3343. p[GGML_OP_CONV_TRANSPOSE_2D ] = true;
  3344. p[GGML_OP_FLASH_ATTN_BACK ] = true;
  3345. p[GGML_OP_CROSS_ENTROPY_LOSS ] = true;
  3346. p[GGML_OP_ADD_REL_POS ] = true;
  3347. }
  3348. { // FINALIZE
  3349. bool * p = GGML_OP_HAS_FINALIZE;
  3350. p[GGML_OP_CROSS_ENTROPY_LOSS ] = true;
  3351. }
  3352. }
  3353. //
  3354. // ggml context
  3355. //
  3356. struct ggml_context {
  3357. size_t mem_size;
  3358. void * mem_buffer;
  3359. bool mem_buffer_owned;
  3360. bool no_alloc;
  3361. bool no_alloc_save; // this is used to save the no_alloc state when using scratch buffers
  3362. int n_objects;
  3363. struct ggml_object * objects_begin;
  3364. struct ggml_object * objects_end;
  3365. struct ggml_scratch scratch;
  3366. struct ggml_scratch scratch_save;
  3367. };
  3368. struct ggml_context_container {
  3369. bool used;
  3370. struct ggml_context context;
  3371. };
  3372. //
  3373. // NUMA support
  3374. //
  3375. #define GGML_NUMA_MAX_NODES 8
  3376. #define GGML_NUMA_MAX_CPUS 512
  3377. struct ggml_numa_node {
  3378. uint32_t cpus[GGML_NUMA_MAX_CPUS]; // hardware threads on this node
  3379. uint32_t n_cpus;
  3380. };
  3381. struct ggml_numa_nodes {
  3382. struct ggml_numa_node nodes[GGML_NUMA_MAX_NODES];
  3383. uint32_t n_nodes;
  3384. uint32_t total_cpus; // hardware threads on system
  3385. };
  3386. //
  3387. // ggml state
  3388. //
  3389. struct ggml_state {
  3390. struct ggml_context_container contexts[GGML_MAX_CONTEXTS];
  3391. struct ggml_numa_nodes numa;
  3392. };
  3393. // global state
  3394. static struct ggml_state g_state;
  3395. static atomic_int g_state_barrier = 0;
  3396. // barrier via spin lock
  3397. inline static void ggml_critical_section_start(void) {
  3398. int processing = atomic_fetch_add(&g_state_barrier, 1);
  3399. while (processing > 0) {
  3400. // wait for other threads to finish
  3401. atomic_fetch_sub(&g_state_barrier, 1);
  3402. sched_yield(); // TODO: reconsider this
  3403. processing = atomic_fetch_add(&g_state_barrier, 1);
  3404. }
  3405. }
  3406. // TODO: make this somehow automatically executed
  3407. // some sort of "sentry" mechanism
  3408. inline static void ggml_critical_section_end(void) {
  3409. atomic_fetch_sub(&g_state_barrier, 1);
  3410. }
  3411. void ggml_numa_init(void) {
  3412. if (g_state.numa.n_nodes > 0) {
  3413. fprintf(stderr, "ggml_numa_init: NUMA already initialized\n");
  3414. return;
  3415. }
  3416. #ifdef __linux__
  3417. struct stat st;
  3418. char path[256];
  3419. int rv;
  3420. // enumerate nodes
  3421. while (g_state.numa.n_nodes < GGML_NUMA_MAX_NODES) {
  3422. rv = snprintf(path, sizeof(path), "/sys/devices/system/node/node%u", g_state.numa.n_nodes);
  3423. GGML_ASSERT(rv > 0 && (unsigned)rv < sizeof(path));
  3424. if (stat(path, &st) != 0) { break; }
  3425. ++g_state.numa.n_nodes;
  3426. }
  3427. // enumerate CPUs
  3428. while (g_state.numa.total_cpus < GGML_NUMA_MAX_CPUS) {
  3429. rv = snprintf(path, sizeof(path), "/sys/devices/system/cpu/cpu%u", g_state.numa.total_cpus);
  3430. GGML_ASSERT(rv > 0 && (unsigned)rv < sizeof(path));
  3431. if (stat(path, &st) != 0) { break; }
  3432. ++g_state.numa.total_cpus;
  3433. }
  3434. GGML_PRINT_DEBUG("found %u numa nodes, %u CPUs\n", g_state.numa.n_nodes, g_state.numa.total_cpus);
  3435. if (g_state.numa.n_nodes < 1 || g_state.numa.total_cpus < 1) {
  3436. g_state.numa.n_nodes = 0;
  3437. return;
  3438. }
  3439. for (uint32_t n = 0; n < g_state.numa.n_nodes; ++n) {
  3440. struct ggml_numa_node * node = &g_state.numa.nodes[n];
  3441. GGML_PRINT_DEBUG("CPUs on node %u:", n);
  3442. node->n_cpus = 0;
  3443. for (uint32_t c = 0; c < g_state.numa.total_cpus; ++c) {
  3444. rv = snprintf(path, sizeof(path), "/sys/devices/system/node/node%u/cpu%u", n, c);
  3445. GGML_ASSERT(rv > 0 && (unsigned)rv < sizeof(path));
  3446. if (stat(path, &st) == 0) {
  3447. node->cpus[node->n_cpus++] = c;
  3448. GGML_PRINT_DEBUG(" %u", c);
  3449. }
  3450. }
  3451. GGML_PRINT_DEBUG("\n");
  3452. }
  3453. if (ggml_is_numa()) {
  3454. FILE *fptr = fopen("/proc/sys/kernel/numa_balancing", "r");
  3455. if (fptr != NULL) {
  3456. char buf[42];
  3457. if (fgets(buf, sizeof(buf), fptr) && strncmp(buf, "0\n", sizeof(buf)) != 0) {
  3458. GGML_PRINT("WARNING: /proc/sys/kernel/numa_balancing is enabled, this has been observed to impair performance\n");
  3459. }
  3460. fclose(fptr);
  3461. }
  3462. }
  3463. #else
  3464. // TODO
  3465. #endif
  3466. }
  3467. bool ggml_is_numa(void) {
  3468. return g_state.numa.n_nodes > 1;
  3469. }
  3470. ////////////////////////////////////////////////////////////////////////////////
  3471. void ggml_print_object(const struct ggml_object * obj) {
  3472. GGML_PRINT(" - ggml_object: type = %d, offset = %zu, size = %zu, next = %p\n",
  3473. obj->type, obj->offs, obj->size, (const void *) obj->next);
  3474. }
  3475. void ggml_print_objects(const struct ggml_context * ctx) {
  3476. struct ggml_object * obj = ctx->objects_begin;
  3477. GGML_PRINT("%s: objects in context %p:\n", __func__, (const void *) ctx);
  3478. while (obj != NULL) {
  3479. ggml_print_object(obj);
  3480. obj = obj->next;
  3481. }
  3482. GGML_PRINT("%s: --- end ---\n", __func__);
  3483. }
  3484. int64_t ggml_nelements(const struct ggml_tensor * tensor) {
  3485. static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function");
  3486. return tensor->ne[0]*tensor->ne[1]*tensor->ne[2]*tensor->ne[3];
  3487. }
  3488. int64_t ggml_nrows(const struct ggml_tensor * tensor) {
  3489. static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function");
  3490. return tensor->ne[1]*tensor->ne[2]*tensor->ne[3];
  3491. }
  3492. size_t ggml_nbytes(const struct ggml_tensor * tensor) {
  3493. size_t nbytes;
  3494. size_t blck_size = ggml_blck_size(tensor->type);
  3495. if (blck_size == 1) {
  3496. nbytes = ggml_type_size(tensor->type);
  3497. for (int i = 0; i < GGML_MAX_DIMS; ++i) {
  3498. nbytes += (tensor->ne[i] - 1)*tensor->nb[i];
  3499. }
  3500. }
  3501. else {
  3502. nbytes = tensor->ne[0]*tensor->nb[0]/blck_size;
  3503. for (int i = 1; i < GGML_MAX_DIMS; ++i) {
  3504. nbytes += (tensor->ne[i] - 1)*tensor->nb[i];
  3505. }
  3506. }
  3507. return nbytes;
  3508. }
  3509. size_t ggml_nbytes_pad(const struct ggml_tensor * tensor) {
  3510. return GGML_PAD(ggml_nbytes(tensor), GGML_MEM_ALIGN);
  3511. }
  3512. size_t ggml_nbytes_split(const struct ggml_tensor * tensor, int nrows_split) {
  3513. static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function");
  3514. return (nrows_split*tensor->ne[0]*ggml_type_size(tensor->type))/ggml_blck_size(tensor->type);
  3515. }
  3516. int ggml_blck_size(enum ggml_type type) {
  3517. return type_traits[type].blck_size;
  3518. }
  3519. size_t ggml_type_size(enum ggml_type type) {
  3520. return type_traits[type].type_size;
  3521. }
  3522. float ggml_type_sizef(enum ggml_type type) {
  3523. return ((float)(type_traits[type].type_size))/type_traits[type].blck_size;
  3524. }
  3525. const char * ggml_type_name(enum ggml_type type) {
  3526. return type_traits[type].type_name;
  3527. }
  3528. bool ggml_is_quantized(enum ggml_type type) {
  3529. return type_traits[type].is_quantized;
  3530. }
  3531. const char * ggml_op_name(enum ggml_op op) {
  3532. return GGML_OP_NAME[op];
  3533. }
  3534. const char * ggml_op_symbol(enum ggml_op op) {
  3535. return GGML_OP_SYMBOL[op];
  3536. }
  3537. size_t ggml_element_size(const struct ggml_tensor * tensor) {
  3538. return ggml_type_size(tensor->type);
  3539. }
  3540. static inline bool ggml_is_scalar(const struct ggml_tensor * tensor) {
  3541. static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function");
  3542. return tensor->ne[0] == 1 && tensor->ne[1] == 1 && tensor->ne[2] == 1 && tensor->ne[3] == 1;
  3543. }
  3544. static inline bool ggml_is_vector(const struct ggml_tensor * tensor) {
  3545. static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function");
  3546. return tensor->ne[1] == 1 && tensor->ne[2] == 1 && tensor->ne[3] == 1;
  3547. }
  3548. static inline bool ggml_is_matrix(const struct ggml_tensor * tensor) {
  3549. static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function");
  3550. return tensor->ne[2] == 1 && tensor->ne[3] == 1;
  3551. }
  3552. static inline bool ggml_can_mul_mat(const struct ggml_tensor * t0, const struct ggml_tensor * t1) {
  3553. static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function");
  3554. return (t0->ne[0] == t1->ne[0]) &&
  3555. (t1->ne[2]%t0->ne[2] == 0) && // verify t0 is broadcastable
  3556. (t1->ne[3]%t0->ne[3] == 0);
  3557. }
  3558. static inline bool ggml_can_out_prod(const struct ggml_tensor * t0, const struct ggml_tensor * t1) {
  3559. static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function");
  3560. return
  3561. (t0->ne[1] == t1->ne[1]) &&
  3562. (t0->ne[2] == t1->ne[2]) &&
  3563. (t0->ne[3] == t1->ne[3]);
  3564. }
  3565. enum ggml_type ggml_ftype_to_ggml_type(enum ggml_ftype ftype) {
  3566. enum ggml_type wtype = GGML_TYPE_COUNT;
  3567. switch (ftype) {
  3568. case GGML_FTYPE_ALL_F32: wtype = GGML_TYPE_F32; break;
  3569. case GGML_FTYPE_MOSTLY_F16: wtype = GGML_TYPE_F16; break;
  3570. case GGML_FTYPE_MOSTLY_Q4_0: wtype = GGML_TYPE_Q4_0; break;
  3571. case GGML_FTYPE_MOSTLY_Q4_1: wtype = GGML_TYPE_Q4_1; break;
  3572. case GGML_FTYPE_MOSTLY_Q5_0: wtype = GGML_TYPE_Q5_0; break;
  3573. case GGML_FTYPE_MOSTLY_Q5_1: wtype = GGML_TYPE_Q5_1; break;
  3574. case GGML_FTYPE_MOSTLY_Q8_0: wtype = GGML_TYPE_Q8_0; break;
  3575. case GGML_FTYPE_MOSTLY_Q2_K: wtype = GGML_TYPE_Q2_K; break;
  3576. case GGML_FTYPE_MOSTLY_Q3_K: wtype = GGML_TYPE_Q3_K; break;
  3577. case GGML_FTYPE_MOSTLY_Q4_K: wtype = GGML_TYPE_Q4_K; break;
  3578. case GGML_FTYPE_MOSTLY_Q5_K: wtype = GGML_TYPE_Q5_K; break;
  3579. case GGML_FTYPE_MOSTLY_Q6_K: wtype = GGML_TYPE_Q6_K; break;
  3580. case GGML_FTYPE_UNKNOWN: wtype = GGML_TYPE_COUNT; break;
  3581. case GGML_FTYPE_MOSTLY_Q4_1_SOME_F16: wtype = GGML_TYPE_COUNT; break;
  3582. }
  3583. GGML_ASSERT(wtype != GGML_TYPE_COUNT);
  3584. return wtype;
  3585. }
  3586. size_t ggml_tensor_overhead(void) {
  3587. return GGML_OBJECT_SIZE + GGML_TENSOR_SIZE;
  3588. }
  3589. bool ggml_is_transposed(const struct ggml_tensor * tensor) {
  3590. return tensor->nb[0] > tensor->nb[1];
  3591. }
  3592. bool ggml_is_contiguous(const struct ggml_tensor * tensor) {
  3593. static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function");
  3594. return
  3595. tensor->nb[0] == ggml_type_size(tensor->type) &&
  3596. tensor->nb[1] == (tensor->nb[0]*tensor->ne[0])/ggml_blck_size(tensor->type) &&
  3597. tensor->nb[2] == tensor->nb[1]*tensor->ne[1] &&
  3598. tensor->nb[3] == tensor->nb[2]*tensor->ne[2];
  3599. }
  3600. static inline bool ggml_is_contiguous_except_dim_1(const struct ggml_tensor * tensor) {
  3601. static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function");
  3602. return
  3603. tensor->nb[0] == ggml_type_size(tensor->type) &&
  3604. tensor->nb[2] == tensor->nb[1]*tensor->ne[1] &&
  3605. tensor->nb[3] == tensor->nb[2]*tensor->ne[2];
  3606. }
  3607. bool ggml_is_permuted(const struct ggml_tensor * tensor) {
  3608. static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function");
  3609. return tensor->nb[0] > tensor->nb[1] || tensor->nb[1] > tensor->nb[2] || tensor->nb[2] > tensor->nb[3];
  3610. }
  3611. static inline bool ggml_is_padded_1d(const struct ggml_tensor * tensor) {
  3612. static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function");
  3613. return
  3614. tensor->nb[0] == ggml_type_size(tensor->type) &&
  3615. tensor->nb[2] == tensor->nb[1]*tensor->ne[1] &&
  3616. tensor->nb[3] == tensor->nb[2]*tensor->ne[2];
  3617. }
  3618. bool ggml_are_same_shape(const struct ggml_tensor * t0, const struct ggml_tensor * t1) {
  3619. static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function");
  3620. return
  3621. (t0->ne[0] == t1->ne[0] ) &&
  3622. (t0->ne[1] == t1->ne[1] ) &&
  3623. (t0->ne[2] == t1->ne[2] ) &&
  3624. (t0->ne[3] == t1->ne[3] );
  3625. }
  3626. // check if t1 can be represented as a repeatition of t0
  3627. static inline bool ggml_can_repeat(const struct ggml_tensor * t0, const struct ggml_tensor * t1) {
  3628. static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function");
  3629. return
  3630. (t1->ne[0]%t0->ne[0] == 0) &&
  3631. (t1->ne[1]%t0->ne[1] == 0) &&
  3632. (t1->ne[2]%t0->ne[2] == 0) &&
  3633. (t1->ne[3]%t0->ne[3] == 0);
  3634. }
  3635. static inline bool ggml_can_repeat_rows(const struct ggml_tensor * t0, const struct ggml_tensor * t1) {
  3636. static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function");
  3637. return (t0->ne[0] == t1->ne[0]) && ggml_can_repeat(t0, t1);
  3638. }
  3639. static inline int ggml_up32(int n) {
  3640. return (n + 31) & ~31;
  3641. }
  3642. //static inline int ggml_up64(int n) {
  3643. // return (n + 63) & ~63;
  3644. //}
  3645. static inline int ggml_up(int n, int m) {
  3646. // assert m is a power of 2
  3647. GGML_ASSERT((m & (m - 1)) == 0);
  3648. return (n + m - 1) & ~(m - 1);
  3649. }
  3650. // assert that pointer is aligned to GGML_MEM_ALIGN
  3651. #define ggml_assert_aligned(ptr) \
  3652. GGML_ASSERT(((uintptr_t) (ptr))%GGML_MEM_ALIGN == 0)
  3653. ////////////////////////////////////////////////////////////////////////////////
  3654. struct ggml_context * ggml_init(struct ggml_init_params params) {
  3655. // make this function thread safe
  3656. ggml_critical_section_start();
  3657. static bool is_first_call = true;
  3658. if (is_first_call) {
  3659. // initialize time system (required on Windows)
  3660. ggml_time_init();
  3661. // initialize GELU, Quick GELU, SILU and EXP F32 tables
  3662. {
  3663. const uint64_t t_start = ggml_time_us(); UNUSED(t_start);
  3664. ggml_fp16_t ii;
  3665. for (int i = 0; i < (1 << 16); ++i) {
  3666. uint16_t ui = i;
  3667. memcpy(&ii, &ui, sizeof(ii));
  3668. const float f = table_f32_f16[i] = GGML_COMPUTE_FP16_TO_FP32(ii);
  3669. table_gelu_f16[i] = GGML_FP32_TO_FP16(ggml_gelu_f32(f));
  3670. table_gelu_quick_f16[i] = GGML_FP32_TO_FP16(ggml_gelu_quick_f32(f));
  3671. table_silu_f16[i] = GGML_FP32_TO_FP16(ggml_silu_f32(f));
  3672. table_exp_f16[i] = GGML_FP32_TO_FP16(expf(f));
  3673. }
  3674. const uint64_t t_end = ggml_time_us(); UNUSED(t_end);
  3675. GGML_PRINT_DEBUG("%s: GELU, Quick GELU, SILU and EXP tables initialized in %f ms\n", __func__, (t_end - t_start)/1000.0f);
  3676. }
  3677. // initialize g_state
  3678. {
  3679. const uint64_t t_start = ggml_time_us(); UNUSED(t_start);
  3680. g_state = (struct ggml_state) {
  3681. /*.contexts =*/ { { 0 } },
  3682. /*.numa =*/ {
  3683. .n_nodes = 0,
  3684. .total_cpus = 0,
  3685. },
  3686. };
  3687. for (int i = 0; i < GGML_MAX_CONTEXTS; ++i) {
  3688. g_state.contexts[i].used = false;
  3689. }
  3690. const uint64_t t_end = ggml_time_us(); UNUSED(t_end);
  3691. GGML_PRINT_DEBUG("%s: g_state initialized in %f ms\n", __func__, (t_end - t_start)/1000.0f);
  3692. }
  3693. #if defined(GGML_USE_CUBLAS)
  3694. ggml_init_cublas();
  3695. #elif defined(GGML_USE_CLBLAST)
  3696. ggml_cl_init();
  3697. #endif
  3698. ggml_setup_op_has_task_pass();
  3699. is_first_call = false;
  3700. }
  3701. // find non-used context in g_state
  3702. struct ggml_context * ctx = NULL;
  3703. for (int i = 0; i < GGML_MAX_CONTEXTS; i++) {
  3704. if (!g_state.contexts[i].used) {
  3705. g_state.contexts[i].used = true;
  3706. ctx = &g_state.contexts[i].context;
  3707. GGML_PRINT_DEBUG("%s: found unused context %d\n", __func__, i);
  3708. break;
  3709. }
  3710. }
  3711. if (ctx == NULL) {
  3712. GGML_PRINT_DEBUG("%s: no unused context found\n", __func__);
  3713. ggml_critical_section_end();
  3714. return NULL;
  3715. }
  3716. // allow to call ggml_init with 0 size
  3717. if (params.mem_size == 0) {
  3718. params.mem_size = GGML_MEM_ALIGN;
  3719. }
  3720. const size_t mem_size = params.mem_buffer ? params.mem_size : GGML_PAD(params.mem_size, GGML_MEM_ALIGN);
  3721. *ctx = (struct ggml_context) {
  3722. /*.mem_size =*/ mem_size,
  3723. /*.mem_buffer =*/ params.mem_buffer ? params.mem_buffer : GGML_ALIGNED_MALLOC(mem_size),
  3724. /*.mem_buffer_owned =*/ params.mem_buffer ? false : true,
  3725. /*.no_alloc =*/ params.no_alloc,
  3726. /*.no_alloc_save =*/ params.no_alloc,
  3727. /*.n_objects =*/ 0,
  3728. /*.objects_begin =*/ NULL,
  3729. /*.objects_end =*/ NULL,
  3730. /*.scratch =*/ { 0, 0, NULL, },
  3731. /*.scratch_save =*/ { 0, 0, NULL, },
  3732. };
  3733. GGML_ASSERT(ctx->mem_buffer != NULL);
  3734. ggml_assert_aligned(ctx->mem_buffer);
  3735. GGML_PRINT_DEBUG("%s: context initialized\n", __func__);
  3736. ggml_critical_section_end();
  3737. return ctx;
  3738. }
  3739. void ggml_free(struct ggml_context * ctx) {
  3740. // make this function thread safe
  3741. ggml_critical_section_start();
  3742. bool found = false;
  3743. for (int i = 0; i < GGML_MAX_CONTEXTS; i++) {
  3744. if (&g_state.contexts[i].context == ctx) {
  3745. g_state.contexts[i].used = false;
  3746. GGML_PRINT_DEBUG("%s: context %d has been freed. memory used = %zu\n",
  3747. __func__, i, ggml_used_mem(ctx));
  3748. if (ctx->mem_buffer_owned) {
  3749. GGML_ALIGNED_FREE(ctx->mem_buffer);
  3750. }
  3751. found = true;
  3752. break;
  3753. }
  3754. }
  3755. if (!found) {
  3756. GGML_PRINT_DEBUG("%s: context not found\n", __func__);
  3757. }
  3758. ggml_critical_section_end();
  3759. }
  3760. size_t ggml_used_mem(const struct ggml_context * ctx) {
  3761. return ctx->objects_end == NULL ? 0 : ctx->objects_end->offs + ctx->objects_end->size;
  3762. }
  3763. size_t ggml_set_scratch(struct ggml_context * ctx, struct ggml_scratch scratch) {
  3764. const size_t result = ctx->scratch.data ? ctx->scratch.offs : 0;
  3765. ctx->scratch = scratch;
  3766. return result;
  3767. }
  3768. bool ggml_get_no_alloc(struct ggml_context * ctx) {
  3769. return ctx->no_alloc;
  3770. }
  3771. void ggml_set_no_alloc(struct ggml_context * ctx, bool no_alloc) {
  3772. ctx->no_alloc = no_alloc;
  3773. }
  3774. void * ggml_get_mem_buffer(const struct ggml_context * ctx) {
  3775. return ctx->mem_buffer;
  3776. }
  3777. size_t ggml_get_mem_size(const struct ggml_context * ctx) {
  3778. return ctx->mem_size;
  3779. }
  3780. size_t ggml_get_max_tensor_size(const struct ggml_context * ctx) {
  3781. size_t max_size = 0;
  3782. struct ggml_object * obj = ctx->objects_begin;
  3783. while (obj != NULL) {
  3784. if (obj->type == GGML_OBJECT_TENSOR) {
  3785. struct ggml_tensor * tensor = (struct ggml_tensor *) ((char *) ctx->mem_buffer + obj->offs);
  3786. const size_t size = ggml_nbytes(tensor);
  3787. if (max_size < size) {
  3788. max_size = size;
  3789. }
  3790. }
  3791. obj = obj->next;
  3792. }
  3793. return max_size;
  3794. }
  3795. // IMPORTANT:
  3796. // when creating "opt" tensors, always save and load the scratch buffer
  3797. // this is an error prone process, but it is necessary to support inplace
  3798. // operators when using scratch buffers
  3799. // TODO: implement a better way
  3800. static void ggml_scratch_save(struct ggml_context * ctx) {
  3801. // this is needed to allow opt tensors to store their data
  3802. // TODO: again, need to find a better way
  3803. ctx->no_alloc_save = ctx->no_alloc;
  3804. ctx->no_alloc = false;
  3805. ctx->scratch_save = ctx->scratch;
  3806. ctx->scratch.data = NULL;
  3807. }
  3808. static void ggml_scratch_load(struct ggml_context * ctx) {
  3809. ctx->no_alloc = ctx->no_alloc_save;
  3810. ctx->scratch = ctx->scratch_save;
  3811. }
  3812. ////////////////////////////////////////////////////////////////////////////////
  3813. static struct ggml_object * ggml_new_object(struct ggml_context * ctx, enum ggml_object_type type, size_t size) {
  3814. // always insert objects at the end of the context's memory pool
  3815. struct ggml_object * obj_cur = ctx->objects_end;
  3816. const size_t cur_offs = obj_cur == NULL ? 0 : obj_cur->offs;
  3817. const size_t cur_size = obj_cur == NULL ? 0 : obj_cur->size;
  3818. const size_t cur_end = cur_offs + cur_size;
  3819. // align to GGML_MEM_ALIGN
  3820. size_t size_needed = GGML_PAD(size, GGML_MEM_ALIGN);
  3821. char * const mem_buffer = ctx->mem_buffer;
  3822. struct ggml_object * const obj_new = (struct ggml_object *)(mem_buffer + cur_end);
  3823. if (cur_end + size_needed + GGML_OBJECT_SIZE > ctx->mem_size) {
  3824. GGML_PRINT("%s: not enough space in the context's memory pool (needed %zu, available %zu)\n",
  3825. __func__, cur_end + size_needed, ctx->mem_size);
  3826. assert(false);
  3827. return NULL;
  3828. }
  3829. *obj_new = (struct ggml_object) {
  3830. .offs = cur_end + GGML_OBJECT_SIZE,
  3831. .size = size_needed,
  3832. .next = NULL,
  3833. .type = type,
  3834. };
  3835. ggml_assert_aligned(mem_buffer + obj_new->offs);
  3836. if (obj_cur != NULL) {
  3837. obj_cur->next = obj_new;
  3838. } else {
  3839. // this is the first object in this context
  3840. ctx->objects_begin = obj_new;
  3841. }
  3842. ctx->objects_end = obj_new;
  3843. //printf("%s: inserted new object at %zu, size = %zu\n", __func__, cur_end, obj_new->size);
  3844. return obj_new;
  3845. }
  3846. static struct ggml_tensor * ggml_new_tensor_impl(
  3847. struct ggml_context * ctx,
  3848. enum ggml_type type,
  3849. int n_dims,
  3850. const int64_t * ne,
  3851. struct ggml_tensor * view_src,
  3852. size_t view_offs) {
  3853. assert(n_dims >= 1 && n_dims <= GGML_MAX_DIMS);
  3854. // find the base tensor and absolute offset
  3855. if (view_src != NULL && view_src->view_src != NULL) {
  3856. view_offs += view_src->view_offs;
  3857. view_src = view_src->view_src;
  3858. }
  3859. size_t data_size = ggml_type_size(type)*(ne[0]/ggml_blck_size(type));
  3860. for (int i = 1; i < n_dims; i++) {
  3861. data_size *= ne[i];
  3862. }
  3863. GGML_ASSERT(view_src == NULL || data_size + view_offs <= ggml_nbytes(view_src));
  3864. void * data = view_src != NULL ? view_src->data : NULL;
  3865. if (data != NULL) {
  3866. data = (char *) data + view_offs;
  3867. }
  3868. size_t obj_alloc_size = 0;
  3869. if (view_src == NULL && !ctx->no_alloc) {
  3870. if (ctx->scratch.data != NULL) {
  3871. // allocate tensor data in the scratch buffer
  3872. if (ctx->scratch.offs + data_size > ctx->scratch.size) {
  3873. GGML_PRINT("%s: not enough space in the scratch memory pool (needed %zu, available %zu)\n",
  3874. __func__, ctx->scratch.offs + data_size, ctx->scratch.size);
  3875. assert(false);
  3876. return NULL;
  3877. }
  3878. data = (char * const) ctx->scratch.data + ctx->scratch.offs;
  3879. ctx->scratch.offs += data_size;
  3880. } else {
  3881. // allocate tensor data in the context's memory pool
  3882. obj_alloc_size = data_size;
  3883. }
  3884. }
  3885. struct ggml_object * const obj_new = ggml_new_object(ctx, GGML_OBJECT_TENSOR, GGML_TENSOR_SIZE + obj_alloc_size);
  3886. // TODO: for recoverable errors, we would need to free the data allocated from the scratch buffer here
  3887. struct ggml_tensor * const result = (struct ggml_tensor *)((char *)ctx->mem_buffer + obj_new->offs);
  3888. *result = (struct ggml_tensor) {
  3889. /*.type =*/ type,
  3890. /*.backend =*/ GGML_BACKEND_CPU,
  3891. /*.n_dims =*/ n_dims,
  3892. /*.ne =*/ { 1, 1, 1, 1 },
  3893. /*.nb =*/ { 0, 0, 0, 0 },
  3894. /*.op =*/ GGML_OP_NONE,
  3895. /*.op_params =*/ { 0 },
  3896. /*.is_param =*/ false,
  3897. /*.grad =*/ NULL,
  3898. /*.src =*/ { NULL },
  3899. /*.perf_runs =*/ 0,
  3900. /*.perf_cycles =*/ 0,
  3901. /*.perf_time_us =*/ 0,
  3902. /*.view_src =*/ view_src,
  3903. /*.view_offs =*/ view_offs,
  3904. /*.data =*/ obj_alloc_size > 0 ? (void *)(result + 1) : data,
  3905. /*.name =*/ { 0 },
  3906. /*.extra =*/ NULL,
  3907. /*.padding =*/ { 0 },
  3908. };
  3909. // TODO: this should not be needed as long as we don't rely on aligned SIMD loads
  3910. //ggml_assert_aligned(result->data);
  3911. for (int i = 0; i < n_dims; i++) {
  3912. result->ne[i] = ne[i];
  3913. }
  3914. result->nb[0] = ggml_type_size(type);
  3915. result->nb[1] = result->nb[0]*(result->ne[0]/ggml_blck_size(type));
  3916. for (int i = 2; i < GGML_MAX_DIMS; i++) {
  3917. result->nb[i] = result->nb[i - 1]*result->ne[i - 1];
  3918. }
  3919. ctx->n_objects++;
  3920. return result;
  3921. }
  3922. struct ggml_tensor * ggml_new_tensor(
  3923. struct ggml_context * ctx,
  3924. enum ggml_type type,
  3925. int n_dims,
  3926. const int64_t * ne) {
  3927. return ggml_new_tensor_impl(ctx, type, n_dims, ne, NULL, 0);
  3928. }
  3929. struct ggml_tensor * ggml_new_tensor_1d(
  3930. struct ggml_context * ctx,
  3931. enum ggml_type type,
  3932. int64_t ne0) {
  3933. return ggml_new_tensor(ctx, type, 1, &ne0);
  3934. }
  3935. struct ggml_tensor * ggml_new_tensor_2d(
  3936. struct ggml_context * ctx,
  3937. enum ggml_type type,
  3938. int64_t ne0,
  3939. int64_t ne1) {
  3940. const int64_t ne[2] = { ne0, ne1 };
  3941. return ggml_new_tensor(ctx, type, 2, ne);
  3942. }
  3943. struct ggml_tensor * ggml_new_tensor_3d(
  3944. struct ggml_context * ctx,
  3945. enum ggml_type type,
  3946. int64_t ne0,
  3947. int64_t ne1,
  3948. int64_t ne2) {
  3949. const int64_t ne[3] = { ne0, ne1, ne2 };
  3950. return ggml_new_tensor(ctx, type, 3, ne);
  3951. }
  3952. struct ggml_tensor * ggml_new_tensor_4d(
  3953. struct ggml_context * ctx,
  3954. enum ggml_type type,
  3955. int64_t ne0,
  3956. int64_t ne1,
  3957. int64_t ne2,
  3958. int64_t ne3) {
  3959. const int64_t ne[4] = { ne0, ne1, ne2, ne3 };
  3960. return ggml_new_tensor(ctx, type, 4, ne);
  3961. }
  3962. struct ggml_tensor * ggml_new_i32(struct ggml_context * ctx, int32_t value) {
  3963. ggml_scratch_save(ctx);
  3964. struct ggml_tensor * result = ggml_new_tensor_1d(ctx, GGML_TYPE_I32, 1);
  3965. ggml_scratch_load(ctx);
  3966. ggml_set_i32(result, value);
  3967. return result;
  3968. }
  3969. struct ggml_tensor * ggml_new_f32(struct ggml_context * ctx, float value) {
  3970. ggml_scratch_save(ctx);
  3971. struct ggml_tensor * result = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, 1);
  3972. ggml_scratch_load(ctx);
  3973. ggml_set_f32(result, value);
  3974. return result;
  3975. }
  3976. struct ggml_tensor * ggml_dup_tensor(struct ggml_context * ctx, const struct ggml_tensor * src) {
  3977. return ggml_new_tensor(ctx, src->type, src->n_dims, src->ne);
  3978. }
  3979. static void ggml_set_op_params(struct ggml_tensor * tensor, const void * params, size_t params_size) {
  3980. GGML_ASSERT(tensor != NULL); // silence -Warray-bounds warnings
  3981. assert(params_size <= GGML_MAX_OP_PARAMS);
  3982. memcpy(tensor->op_params, params, params_size);
  3983. }
  3984. static int32_t ggml_get_op_params_i32(const struct ggml_tensor * tensor, uint32_t i) {
  3985. assert(i < GGML_MAX_OP_PARAMS / sizeof(int32_t));
  3986. return ((const int32_t *)(tensor->op_params))[i];
  3987. }
  3988. static void ggml_set_op_params_i32(struct ggml_tensor * tensor, uint32_t i, int32_t value) {
  3989. assert(i < GGML_MAX_OP_PARAMS / sizeof(int32_t));
  3990. ((int32_t *)(tensor->op_params))[i] = value;
  3991. }
  3992. struct ggml_tensor * ggml_set_zero(struct ggml_tensor * tensor) {
  3993. memset(tensor->data, 0, ggml_nbytes(tensor));
  3994. return tensor;
  3995. }
  3996. struct ggml_tensor * ggml_set_i32 (struct ggml_tensor * tensor, int32_t value) {
  3997. const int n = ggml_nrows(tensor);
  3998. const int nc = tensor->ne[0];
  3999. const size_t n1 = tensor->nb[1];
  4000. char * const data = tensor->data;
  4001. switch (tensor->type) {
  4002. case GGML_TYPE_I8:
  4003. {
  4004. assert(tensor->nb[0] == sizeof(int8_t));
  4005. for (int i = 0; i < n; i++) {
  4006. ggml_vec_set_i8(nc, (int8_t *)(data + i*n1), value);
  4007. }
  4008. } break;
  4009. case GGML_TYPE_I16:
  4010. {
  4011. assert(tensor->nb[0] == sizeof(int16_t));
  4012. for (int i = 0; i < n; i++) {
  4013. ggml_vec_set_i16(nc, (int16_t *)(data + i*n1), value);
  4014. }
  4015. } break;
  4016. case GGML_TYPE_I32:
  4017. {
  4018. assert(tensor->nb[0] == sizeof(int32_t));
  4019. for (int i = 0; i < n; i++) {
  4020. ggml_vec_set_i32(nc, (int32_t *)(data + i*n1), value);
  4021. }
  4022. } break;
  4023. case GGML_TYPE_F16:
  4024. {
  4025. assert(tensor->nb[0] == sizeof(ggml_fp16_t));
  4026. for (int i = 0; i < n; i++) {
  4027. ggml_vec_set_f16(nc, (ggml_fp16_t *)(data + i*n1), GGML_FP32_TO_FP16(value));
  4028. }
  4029. } break;
  4030. case GGML_TYPE_F32:
  4031. {
  4032. assert(tensor->nb[0] == sizeof(float));
  4033. for (int i = 0; i < n; i++) {
  4034. ggml_vec_set_f32(nc, (float *)(data + i*n1), value);
  4035. }
  4036. } break;
  4037. default:
  4038. {
  4039. GGML_ASSERT(false);
  4040. } break;
  4041. }
  4042. return tensor;
  4043. }
  4044. struct ggml_tensor * ggml_set_f32(struct ggml_tensor * tensor, float value) {
  4045. const int n = ggml_nrows(tensor);
  4046. const int nc = tensor->ne[0];
  4047. const size_t n1 = tensor->nb[1];
  4048. char * const data = tensor->data;
  4049. switch (tensor->type) {
  4050. case GGML_TYPE_I8:
  4051. {
  4052. assert(tensor->nb[0] == sizeof(int8_t));
  4053. for (int i = 0; i < n; i++) {
  4054. ggml_vec_set_i8(nc, (int8_t *)(data + i*n1), value);
  4055. }
  4056. } break;
  4057. case GGML_TYPE_I16:
  4058. {
  4059. assert(tensor->nb[0] == sizeof(int16_t));
  4060. for (int i = 0; i < n; i++) {
  4061. ggml_vec_set_i16(nc, (int16_t *)(data + i*n1), value);
  4062. }
  4063. } break;
  4064. case GGML_TYPE_I32:
  4065. {
  4066. assert(tensor->nb[0] == sizeof(int32_t));
  4067. for (int i = 0; i < n; i++) {
  4068. ggml_vec_set_i32(nc, (int32_t *)(data + i*n1), value);
  4069. }
  4070. } break;
  4071. case GGML_TYPE_F16:
  4072. {
  4073. assert(tensor->nb[0] == sizeof(ggml_fp16_t));
  4074. for (int i = 0; i < n; i++) {
  4075. ggml_vec_set_f16(nc, (ggml_fp16_t *)(data + i*n1), GGML_FP32_TO_FP16(value));
  4076. }
  4077. } break;
  4078. case GGML_TYPE_F32:
  4079. {
  4080. assert(tensor->nb[0] == sizeof(float));
  4081. for (int i = 0; i < n; i++) {
  4082. ggml_vec_set_f32(nc, (float *)(data + i*n1), value);
  4083. }
  4084. } break;
  4085. default:
  4086. {
  4087. GGML_ASSERT(false);
  4088. } break;
  4089. }
  4090. return tensor;
  4091. }
  4092. int32_t ggml_get_i32_1d(const struct ggml_tensor * tensor, int i) {
  4093. switch (tensor->type) {
  4094. case GGML_TYPE_I8:
  4095. {
  4096. GGML_ASSERT(tensor->nb[0] == sizeof(int8_t));
  4097. return ((int8_t *)(tensor->data))[i];
  4098. } break;
  4099. case GGML_TYPE_I16:
  4100. {
  4101. GGML_ASSERT(tensor->nb[0] == sizeof(int16_t));
  4102. return ((int16_t *)(tensor->data))[i];
  4103. } break;
  4104. case GGML_TYPE_I32:
  4105. {
  4106. GGML_ASSERT(tensor->nb[0] == sizeof(int32_t));
  4107. return ((int32_t *)(tensor->data))[i];
  4108. } break;
  4109. case GGML_TYPE_F16:
  4110. {
  4111. GGML_ASSERT(tensor->nb[0] == sizeof(ggml_fp16_t));
  4112. return GGML_FP16_TO_FP32(((ggml_fp16_t *)(tensor->data))[i]);
  4113. } break;
  4114. case GGML_TYPE_F32:
  4115. {
  4116. GGML_ASSERT(tensor->nb[0] == sizeof(float));
  4117. return ((float *)(tensor->data))[i];
  4118. } break;
  4119. default:
  4120. {
  4121. GGML_ASSERT(false);
  4122. } break;
  4123. }
  4124. return 0.0f;
  4125. }
  4126. void ggml_set_i32_1d(const struct ggml_tensor * tensor, int i, int32_t value) {
  4127. switch (tensor->type) {
  4128. case GGML_TYPE_I8:
  4129. {
  4130. GGML_ASSERT(tensor->nb[0] == sizeof(int8_t));
  4131. ((int8_t *)(tensor->data))[i] = value;
  4132. } break;
  4133. case GGML_TYPE_I16:
  4134. {
  4135. GGML_ASSERT(tensor->nb[0] == sizeof(int16_t));
  4136. ((int16_t *)(tensor->data))[i] = value;
  4137. } break;
  4138. case GGML_TYPE_I32:
  4139. {
  4140. GGML_ASSERT(tensor->nb[0] == sizeof(int32_t));
  4141. ((int32_t *)(tensor->data))[i] = value;
  4142. } break;
  4143. case GGML_TYPE_F16:
  4144. {
  4145. GGML_ASSERT(tensor->nb[0] == sizeof(ggml_fp16_t));
  4146. ((ggml_fp16_t *)(tensor->data))[i] = GGML_FP32_TO_FP16(value);
  4147. } break;
  4148. case GGML_TYPE_F32:
  4149. {
  4150. GGML_ASSERT(tensor->nb[0] == sizeof(float));
  4151. ((float *)(tensor->data))[i] = value;
  4152. } break;
  4153. default:
  4154. {
  4155. GGML_ASSERT(false);
  4156. } break;
  4157. }
  4158. }
  4159. float ggml_get_f32_1d(const struct ggml_tensor * tensor, int i) {
  4160. switch (tensor->type) {
  4161. case GGML_TYPE_I8:
  4162. {
  4163. GGML_ASSERT(tensor->nb[0] == sizeof(int8_t));
  4164. return ((int8_t *)(tensor->data))[i];
  4165. } break;
  4166. case GGML_TYPE_I16:
  4167. {
  4168. GGML_ASSERT(tensor->nb[0] == sizeof(int16_t));
  4169. return ((int16_t *)(tensor->data))[i];
  4170. } break;
  4171. case GGML_TYPE_I32:
  4172. {
  4173. GGML_ASSERT(tensor->nb[0] == sizeof(int32_t));
  4174. return ((int32_t *)(tensor->data))[i];
  4175. } break;
  4176. case GGML_TYPE_F16:
  4177. {
  4178. GGML_ASSERT(tensor->nb[0] == sizeof(ggml_fp16_t));
  4179. return GGML_FP16_TO_FP32(((ggml_fp16_t *)(tensor->data))[i]);
  4180. } break;
  4181. case GGML_TYPE_F32:
  4182. {
  4183. GGML_ASSERT(tensor->nb[0] == sizeof(float));
  4184. return ((float *)(tensor->data))[i];
  4185. } break;
  4186. default:
  4187. {
  4188. GGML_ASSERT(false);
  4189. } break;
  4190. }
  4191. return 0.0f;
  4192. }
  4193. void ggml_set_f32_1d(const struct ggml_tensor * tensor, int i, float value) {
  4194. switch (tensor->type) {
  4195. case GGML_TYPE_I8:
  4196. {
  4197. GGML_ASSERT(tensor->nb[0] == sizeof(int8_t));
  4198. ((int8_t *)(tensor->data))[i] = value;
  4199. } break;
  4200. case GGML_TYPE_I16:
  4201. {
  4202. GGML_ASSERT(tensor->nb[0] == sizeof(int16_t));
  4203. ((int16_t *)(tensor->data))[i] = value;
  4204. } break;
  4205. case GGML_TYPE_I32:
  4206. {
  4207. GGML_ASSERT(tensor->nb[0] == sizeof(int32_t));
  4208. ((int32_t *)(tensor->data))[i] = value;
  4209. } break;
  4210. case GGML_TYPE_F16:
  4211. {
  4212. GGML_ASSERT(tensor->nb[0] == sizeof(ggml_fp16_t));
  4213. ((ggml_fp16_t *)(tensor->data))[i] = GGML_FP32_TO_FP16(value);
  4214. } break;
  4215. case GGML_TYPE_F32:
  4216. {
  4217. GGML_ASSERT(tensor->nb[0] == sizeof(float));
  4218. ((float *)(tensor->data))[i] = value;
  4219. } break;
  4220. default:
  4221. {
  4222. GGML_ASSERT(false);
  4223. } break;
  4224. }
  4225. }
  4226. void * ggml_get_data(const struct ggml_tensor * tensor) {
  4227. return tensor->data;
  4228. }
  4229. float * ggml_get_data_f32(const struct ggml_tensor * tensor) {
  4230. assert(tensor->type == GGML_TYPE_F32);
  4231. return (float *)(tensor->data);
  4232. }
  4233. enum ggml_unary_op ggml_get_unary_op(const struct ggml_tensor * tensor) {
  4234. GGML_ASSERT(tensor->op == GGML_OP_UNARY);
  4235. return (enum ggml_unary_op) ggml_get_op_params_i32(tensor, 0);
  4236. }
  4237. const char * ggml_get_name(const struct ggml_tensor * tensor) {
  4238. return tensor->name;
  4239. }
  4240. struct ggml_tensor * ggml_set_name(struct ggml_tensor * tensor, const char * name) {
  4241. strncpy(tensor->name, name, sizeof(tensor->name));
  4242. tensor->name[sizeof(tensor->name) - 1] = '\0';
  4243. return tensor;
  4244. }
  4245. struct ggml_tensor * ggml_format_name(struct ggml_tensor * tensor, const char * fmt, ...) {
  4246. va_list args;
  4247. va_start(args, fmt);
  4248. vsnprintf(tensor->name, sizeof(tensor->name), fmt, args);
  4249. va_end(args);
  4250. return tensor;
  4251. }
  4252. struct ggml_tensor * ggml_view_tensor(
  4253. struct ggml_context * ctx,
  4254. struct ggml_tensor * src) {
  4255. struct ggml_tensor * result = ggml_new_tensor_impl(ctx, src->type, src->n_dims, src->ne, src, 0);
  4256. ggml_format_name(result, "%s (view)", src->name);
  4257. for (int i = 0; i < GGML_MAX_DIMS; i++) {
  4258. result->nb[i] = src->nb[i];
  4259. }
  4260. return result;
  4261. }
  4262. struct ggml_tensor * ggml_get_tensor(struct ggml_context * ctx, const char * name) {
  4263. struct ggml_object * obj = ctx->objects_begin;
  4264. char * const mem_buffer = ctx->mem_buffer;
  4265. while (obj != NULL) {
  4266. if (obj->type == GGML_OBJECT_TENSOR) {
  4267. struct ggml_tensor * cur = (struct ggml_tensor *)(mem_buffer + obj->offs);
  4268. if (strcmp(cur->name, name) == 0) {
  4269. return cur;
  4270. }
  4271. }
  4272. obj = obj->next;
  4273. }
  4274. return NULL;
  4275. }
  4276. ////////////////////////////////////////////////////////////////////////////////
  4277. // ggml_dup
  4278. static struct ggml_tensor * ggml_dup_impl(
  4279. struct ggml_context * ctx,
  4280. struct ggml_tensor * a,
  4281. bool inplace) {
  4282. bool is_node = false;
  4283. if (!inplace && (a->grad)) {
  4284. is_node = true;
  4285. }
  4286. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  4287. result->op = GGML_OP_DUP;
  4288. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4289. result->src[0] = a;
  4290. return result;
  4291. }
  4292. struct ggml_tensor * ggml_dup(
  4293. struct ggml_context * ctx,
  4294. struct ggml_tensor * a) {
  4295. return ggml_dup_impl(ctx, a, false);
  4296. }
  4297. struct ggml_tensor * ggml_dup_inplace(
  4298. struct ggml_context * ctx,
  4299. struct ggml_tensor * a) {
  4300. return ggml_dup_impl(ctx, a, true);
  4301. }
  4302. // ggml_add
  4303. static struct ggml_tensor * ggml_add_impl(
  4304. struct ggml_context * ctx,
  4305. struct ggml_tensor * a,
  4306. struct ggml_tensor * b,
  4307. bool inplace) {
  4308. // TODO: support less-strict constraint
  4309. // GGML_ASSERT(ggml_can_repeat(b, a));
  4310. GGML_ASSERT(ggml_can_repeat_rows(b, a));
  4311. bool is_node = false;
  4312. if (!inplace && (a->grad || b->grad)) {
  4313. // TODO: support backward pass for broadcasting
  4314. GGML_ASSERT(ggml_are_same_shape(a, b));
  4315. is_node = true;
  4316. }
  4317. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  4318. result->op = GGML_OP_ADD;
  4319. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4320. result->src[0] = a;
  4321. result->src[1] = b;
  4322. return result;
  4323. }
  4324. struct ggml_tensor * ggml_add(
  4325. struct ggml_context * ctx,
  4326. struct ggml_tensor * a,
  4327. struct ggml_tensor * b) {
  4328. return ggml_add_impl(ctx, a, b, false);
  4329. }
  4330. struct ggml_tensor * ggml_add_inplace(
  4331. struct ggml_context * ctx,
  4332. struct ggml_tensor * a,
  4333. struct ggml_tensor * b) {
  4334. return ggml_add_impl(ctx, a, b, true);
  4335. }
  4336. // ggml_add1
  4337. static struct ggml_tensor * ggml_add1_impl(
  4338. struct ggml_context * ctx,
  4339. struct ggml_tensor * a,
  4340. struct ggml_tensor * b,
  4341. bool inplace) {
  4342. GGML_ASSERT(ggml_is_scalar(b));
  4343. GGML_ASSERT(ggml_is_padded_1d(a));
  4344. bool is_node = false;
  4345. if (a->grad || b->grad) {
  4346. is_node = true;
  4347. }
  4348. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  4349. result->op = GGML_OP_ADD1;
  4350. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4351. result->src[0] = a;
  4352. result->src[1] = b;
  4353. return result;
  4354. }
  4355. struct ggml_tensor * ggml_add1(
  4356. struct ggml_context * ctx,
  4357. struct ggml_tensor * a,
  4358. struct ggml_tensor * b) {
  4359. return ggml_add1_impl(ctx, a, b, false);
  4360. }
  4361. struct ggml_tensor * ggml_add1_inplace(
  4362. struct ggml_context * ctx,
  4363. struct ggml_tensor * a,
  4364. struct ggml_tensor * b) {
  4365. return ggml_add1_impl(ctx, a, b, true);
  4366. }
  4367. // ggml_acc
  4368. static struct ggml_tensor * ggml_acc_impl(
  4369. struct ggml_context * ctx,
  4370. struct ggml_tensor * a,
  4371. struct ggml_tensor * b,
  4372. size_t nb1,
  4373. size_t nb2,
  4374. size_t nb3,
  4375. size_t offset,
  4376. bool inplace) {
  4377. GGML_ASSERT(ggml_nelements(b) <= ggml_nelements(a));
  4378. GGML_ASSERT(ggml_is_contiguous(a));
  4379. GGML_ASSERT(a->type == GGML_TYPE_F32);
  4380. GGML_ASSERT(b->type == GGML_TYPE_F32);
  4381. bool is_node = false;
  4382. if (!inplace && (a->grad || b->grad)) {
  4383. is_node = true;
  4384. }
  4385. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  4386. int32_t params[] = { nb1, nb2, nb3, offset, inplace ? 1 : 0 };
  4387. ggml_set_op_params(result, params, sizeof(params));
  4388. result->op = GGML_OP_ACC;
  4389. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4390. result->src[0] = a;
  4391. result->src[1] = b;
  4392. return result;
  4393. }
  4394. struct ggml_tensor * ggml_acc(
  4395. struct ggml_context * ctx,
  4396. struct ggml_tensor * a,
  4397. struct ggml_tensor * b,
  4398. size_t nb1,
  4399. size_t nb2,
  4400. size_t nb3,
  4401. size_t offset) {
  4402. return ggml_acc_impl(ctx, a, b, nb1, nb2, nb3, offset, false);
  4403. }
  4404. struct ggml_tensor * ggml_acc_inplace(
  4405. struct ggml_context * ctx,
  4406. struct ggml_tensor * a,
  4407. struct ggml_tensor * b,
  4408. size_t nb1,
  4409. size_t nb2,
  4410. size_t nb3,
  4411. size_t offset) {
  4412. return ggml_acc_impl(ctx, a, b, nb1, nb2, nb3, offset, true);
  4413. }
  4414. // ggml_sub
  4415. static struct ggml_tensor * ggml_sub_impl(
  4416. struct ggml_context * ctx,
  4417. struct ggml_tensor * a,
  4418. struct ggml_tensor * b,
  4419. bool inplace) {
  4420. GGML_ASSERT(ggml_are_same_shape(a, b));
  4421. bool is_node = false;
  4422. if (!inplace && (a->grad || b->grad)) {
  4423. is_node = true;
  4424. }
  4425. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  4426. result->op = GGML_OP_SUB;
  4427. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4428. result->src[0] = a;
  4429. result->src[1] = b;
  4430. return result;
  4431. }
  4432. struct ggml_tensor * ggml_sub(
  4433. struct ggml_context * ctx,
  4434. struct ggml_tensor * a,
  4435. struct ggml_tensor * b) {
  4436. return ggml_sub_impl(ctx, a, b, false);
  4437. }
  4438. struct ggml_tensor * ggml_sub_inplace(
  4439. struct ggml_context * ctx,
  4440. struct ggml_tensor * a,
  4441. struct ggml_tensor * b) {
  4442. return ggml_sub_impl(ctx, a, b, true);
  4443. }
  4444. // ggml_mul
  4445. static struct ggml_tensor * ggml_mul_impl(
  4446. struct ggml_context * ctx,
  4447. struct ggml_tensor * a,
  4448. struct ggml_tensor * b,
  4449. bool inplace) {
  4450. // TODO: support less-strict constraint
  4451. // GGML_ASSERT(ggml_can_repeat(b, a));
  4452. GGML_ASSERT(ggml_can_repeat_rows(b, a));
  4453. bool is_node = false;
  4454. if (!inplace && (a->grad || b->grad)) {
  4455. // TODO: support backward pass for broadcasting
  4456. GGML_ASSERT(ggml_are_same_shape(a, b));
  4457. is_node = true;
  4458. }
  4459. if (inplace) {
  4460. GGML_ASSERT(!is_node);
  4461. }
  4462. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  4463. result->op = GGML_OP_MUL;
  4464. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4465. result->src[0] = a;
  4466. result->src[1] = b;
  4467. return result;
  4468. }
  4469. struct ggml_tensor * ggml_mul(
  4470. struct ggml_context * ctx,
  4471. struct ggml_tensor * a,
  4472. struct ggml_tensor * b) {
  4473. return ggml_mul_impl(ctx, a, b, false);
  4474. }
  4475. struct ggml_tensor * ggml_mul_inplace(
  4476. struct ggml_context * ctx,
  4477. struct ggml_tensor * a,
  4478. struct ggml_tensor * b) {
  4479. return ggml_mul_impl(ctx, a, b, true);
  4480. }
  4481. // ggml_div
  4482. static struct ggml_tensor * ggml_div_impl(
  4483. struct ggml_context * ctx,
  4484. struct ggml_tensor * a,
  4485. struct ggml_tensor * b,
  4486. bool inplace) {
  4487. GGML_ASSERT(ggml_are_same_shape(a, b));
  4488. bool is_node = false;
  4489. if (!inplace && (a->grad || b->grad)) {
  4490. is_node = true;
  4491. }
  4492. if (inplace) {
  4493. GGML_ASSERT(!is_node);
  4494. }
  4495. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  4496. result->op = GGML_OP_DIV;
  4497. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4498. result->src[0] = a;
  4499. result->src[1] = b;
  4500. return result;
  4501. }
  4502. struct ggml_tensor * ggml_div(
  4503. struct ggml_context * ctx,
  4504. struct ggml_tensor * a,
  4505. struct ggml_tensor * b) {
  4506. return ggml_div_impl(ctx, a, b, false);
  4507. }
  4508. struct ggml_tensor * ggml_div_inplace(
  4509. struct ggml_context * ctx,
  4510. struct ggml_tensor * a,
  4511. struct ggml_tensor * b) {
  4512. return ggml_div_impl(ctx, a, b, true);
  4513. }
  4514. // ggml_sqr
  4515. static struct ggml_tensor * ggml_sqr_impl(
  4516. struct ggml_context * ctx,
  4517. struct ggml_tensor * a,
  4518. bool inplace) {
  4519. bool is_node = false;
  4520. if (!inplace && (a->grad)) {
  4521. is_node = true;
  4522. }
  4523. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  4524. result->op = GGML_OP_SQR;
  4525. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4526. result->src[0] = a;
  4527. return result;
  4528. }
  4529. struct ggml_tensor * ggml_sqr(
  4530. struct ggml_context * ctx,
  4531. struct ggml_tensor * a) {
  4532. return ggml_sqr_impl(ctx, a, false);
  4533. }
  4534. struct ggml_tensor * ggml_sqr_inplace(
  4535. struct ggml_context * ctx,
  4536. struct ggml_tensor * a) {
  4537. return ggml_sqr_impl(ctx, a, true);
  4538. }
  4539. // ggml_sqrt
  4540. static struct ggml_tensor * ggml_sqrt_impl(
  4541. struct ggml_context * ctx,
  4542. struct ggml_tensor * a,
  4543. bool inplace) {
  4544. bool is_node = false;
  4545. if (!inplace && (a->grad)) {
  4546. is_node = true;
  4547. }
  4548. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  4549. result->op = GGML_OP_SQRT;
  4550. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4551. result->src[0] = a;
  4552. return result;
  4553. }
  4554. struct ggml_tensor * ggml_sqrt(
  4555. struct ggml_context * ctx,
  4556. struct ggml_tensor * a) {
  4557. return ggml_sqrt_impl(ctx, a, false);
  4558. }
  4559. struct ggml_tensor * ggml_sqrt_inplace(
  4560. struct ggml_context * ctx,
  4561. struct ggml_tensor * a) {
  4562. return ggml_sqrt_impl(ctx, a, true);
  4563. }
  4564. // ggml_log
  4565. static struct ggml_tensor * ggml_log_impl(
  4566. struct ggml_context * ctx,
  4567. struct ggml_tensor * a,
  4568. bool inplace) {
  4569. bool is_node = false;
  4570. if (!inplace && (a->grad)) {
  4571. is_node = true;
  4572. }
  4573. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  4574. result->op = GGML_OP_LOG;
  4575. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4576. result->src[0] = a;
  4577. return result;
  4578. }
  4579. struct ggml_tensor * ggml_log(
  4580. struct ggml_context * ctx,
  4581. struct ggml_tensor * a) {
  4582. return ggml_log_impl(ctx, a, false);
  4583. }
  4584. struct ggml_tensor * ggml_log_inplace(
  4585. struct ggml_context * ctx,
  4586. struct ggml_tensor * a) {
  4587. return ggml_log_impl(ctx, a, true);
  4588. }
  4589. // ggml_sum
  4590. struct ggml_tensor * ggml_sum(
  4591. struct ggml_context * ctx,
  4592. struct ggml_tensor * a) {
  4593. bool is_node = false;
  4594. if (a->grad) {
  4595. is_node = true;
  4596. }
  4597. struct ggml_tensor * result = ggml_new_tensor_1d(ctx, a->type, 1);
  4598. result->op = GGML_OP_SUM;
  4599. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4600. result->src[0] = a;
  4601. return result;
  4602. }
  4603. // ggml_sum_rows
  4604. struct ggml_tensor * ggml_sum_rows(
  4605. struct ggml_context * ctx,
  4606. struct ggml_tensor * a) {
  4607. bool is_node = false;
  4608. if (a->grad) {
  4609. is_node = true;
  4610. }
  4611. int64_t ne[4] = {1,1,1,1};
  4612. for (int i=1; i<a->n_dims; ++i) {
  4613. ne[i] = a->ne[i];
  4614. }
  4615. struct ggml_tensor * result = ggml_new_tensor(ctx, a->type, a->n_dims, ne);
  4616. result->op = GGML_OP_SUM_ROWS;
  4617. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4618. result->src[0] = a;
  4619. return result;
  4620. }
  4621. // ggml_mean
  4622. struct ggml_tensor * ggml_mean(
  4623. struct ggml_context * ctx,
  4624. struct ggml_tensor * a) {
  4625. bool is_node = false;
  4626. if (a->grad) {
  4627. GGML_ASSERT(false); // TODO: implement
  4628. is_node = true;
  4629. }
  4630. int64_t ne[GGML_MAX_DIMS] = { 1, a->ne[1], a->ne[2], a->ne[3] };
  4631. struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, a->n_dims, ne);
  4632. result->op = GGML_OP_MEAN;
  4633. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4634. result->src[0] = a;
  4635. return result;
  4636. }
  4637. // ggml_argmax
  4638. struct ggml_tensor * ggml_argmax(
  4639. struct ggml_context * ctx,
  4640. struct ggml_tensor * a) {
  4641. GGML_ASSERT(ggml_is_matrix(a));
  4642. bool is_node = false;
  4643. if (a->grad) {
  4644. GGML_ASSERT(false);
  4645. is_node = true;
  4646. }
  4647. int64_t ne[GGML_MAX_DIMS] = { a->ne[1], 1, 1, 1 };
  4648. struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_I32, a->n_dims, ne);
  4649. result->op = GGML_OP_ARGMAX;
  4650. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4651. result->src[0] = a;
  4652. return result;
  4653. }
  4654. // ggml_repeat
  4655. struct ggml_tensor * ggml_repeat(
  4656. struct ggml_context * ctx,
  4657. struct ggml_tensor * a,
  4658. struct ggml_tensor * b) {
  4659. GGML_ASSERT(ggml_can_repeat(a, b));
  4660. bool is_node = false;
  4661. if (a->grad) {
  4662. is_node = true;
  4663. }
  4664. struct ggml_tensor * result = ggml_new_tensor(ctx, a->type, b->n_dims, b->ne);
  4665. result->op = GGML_OP_REPEAT;
  4666. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4667. result->src[0] = a;
  4668. result->src[1] = b;
  4669. return result;
  4670. }
  4671. // ggml_repeat_back
  4672. struct ggml_tensor * ggml_repeat_back(
  4673. struct ggml_context * ctx,
  4674. struct ggml_tensor * a,
  4675. struct ggml_tensor * b) {
  4676. GGML_ASSERT(ggml_can_repeat(b, a));
  4677. bool is_node = false;
  4678. if (a->grad) {
  4679. is_node = true;
  4680. }
  4681. if (ggml_are_same_shape(a, b) && !is_node) {
  4682. return a;
  4683. }
  4684. struct ggml_tensor * result = ggml_new_tensor(ctx, a->type, b->n_dims, b->ne);
  4685. result->op = GGML_OP_REPEAT_BACK;
  4686. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4687. result->src[0] = a;
  4688. result->src[1] = b;
  4689. return result;
  4690. }
  4691. // ggml_concat
  4692. struct ggml_tensor * ggml_concat(
  4693. struct ggml_context* ctx,
  4694. struct ggml_tensor* a,
  4695. struct ggml_tensor* b) {
  4696. GGML_ASSERT(a->ne[0] == b->ne[0] && a->ne[1] == b->ne[1] && a->ne[3] == b->ne[3]);
  4697. bool is_node = false;
  4698. if (a->grad || b->grad) {
  4699. is_node = true;
  4700. }
  4701. struct ggml_tensor * result = ggml_new_tensor_4d(ctx, a->type, a->ne[0], a->ne[1], a->ne[2] + b->ne[2], a->ne[3]);
  4702. result->op = GGML_OP_CONCAT;
  4703. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4704. result->src[0] = a;
  4705. result->src[1] = b;
  4706. return result;
  4707. }
  4708. // ggml_abs
  4709. struct ggml_tensor * ggml_abs(
  4710. struct ggml_context * ctx,
  4711. struct ggml_tensor * a) {
  4712. return ggml_unary(ctx, a, GGML_UNARY_OP_ABS);
  4713. }
  4714. struct ggml_tensor * ggml_abs_inplace(
  4715. struct ggml_context * ctx,
  4716. struct ggml_tensor * a) {
  4717. return ggml_unary_inplace(ctx, a, GGML_UNARY_OP_ABS);
  4718. }
  4719. // ggml_sgn
  4720. struct ggml_tensor * ggml_sgn(
  4721. struct ggml_context * ctx,
  4722. struct ggml_tensor * a) {
  4723. return ggml_unary(ctx, a, GGML_UNARY_OP_SGN);
  4724. }
  4725. struct ggml_tensor * ggml_sgn_inplace(
  4726. struct ggml_context * ctx,
  4727. struct ggml_tensor * a) {
  4728. return ggml_unary_inplace(ctx, a, GGML_UNARY_OP_SGN);
  4729. }
  4730. // ggml_neg
  4731. struct ggml_tensor * ggml_neg(
  4732. struct ggml_context * ctx,
  4733. struct ggml_tensor * a) {
  4734. return ggml_unary(ctx, a, GGML_UNARY_OP_NEG);
  4735. }
  4736. struct ggml_tensor * ggml_neg_inplace(
  4737. struct ggml_context * ctx,
  4738. struct ggml_tensor * a) {
  4739. return ggml_unary_inplace(ctx, a, GGML_UNARY_OP_NEG);
  4740. }
  4741. // ggml_step
  4742. struct ggml_tensor * ggml_step(
  4743. struct ggml_context * ctx,
  4744. struct ggml_tensor * a) {
  4745. return ggml_unary(ctx, a, GGML_UNARY_OP_STEP);
  4746. }
  4747. struct ggml_tensor * ggml_step_inplace(
  4748. struct ggml_context * ctx,
  4749. struct ggml_tensor * a) {
  4750. return ggml_unary_inplace(ctx, a, GGML_UNARY_OP_STEP);
  4751. }
  4752. // ggml_tanh
  4753. struct ggml_tensor * ggml_tanh(
  4754. struct ggml_context * ctx,
  4755. struct ggml_tensor * a) {
  4756. return ggml_unary(ctx, a, GGML_UNARY_OP_TANH);
  4757. }
  4758. struct ggml_tensor * ggml_tanh_inplace(
  4759. struct ggml_context * ctx,
  4760. struct ggml_tensor * a) {
  4761. return ggml_unary_inplace(ctx, a, GGML_UNARY_OP_TANH);
  4762. }
  4763. // ggml_elu
  4764. struct ggml_tensor * ggml_elu(
  4765. struct ggml_context * ctx,
  4766. struct ggml_tensor * a) {
  4767. return ggml_unary(ctx, a, GGML_UNARY_OP_ELU);
  4768. }
  4769. struct ggml_tensor * ggml_elu_inplace(
  4770. struct ggml_context * ctx,
  4771. struct ggml_tensor * a) {
  4772. return ggml_unary_inplace(ctx, a, GGML_UNARY_OP_ELU);
  4773. }
  4774. // ggml_relu
  4775. struct ggml_tensor * ggml_relu(
  4776. struct ggml_context * ctx,
  4777. struct ggml_tensor * a) {
  4778. return ggml_unary(ctx, a, GGML_UNARY_OP_RELU);
  4779. }
  4780. struct ggml_tensor * ggml_relu_inplace(
  4781. struct ggml_context * ctx,
  4782. struct ggml_tensor * a) {
  4783. return ggml_unary_inplace(ctx, a, GGML_UNARY_OP_RELU);
  4784. }
  4785. // ggml_gelu
  4786. struct ggml_tensor * ggml_gelu(
  4787. struct ggml_context * ctx,
  4788. struct ggml_tensor * a) {
  4789. return ggml_unary(ctx, a, GGML_UNARY_OP_GELU);
  4790. }
  4791. struct ggml_tensor * ggml_gelu_inplace(
  4792. struct ggml_context * ctx,
  4793. struct ggml_tensor * a) {
  4794. return ggml_unary_inplace(ctx, a, GGML_UNARY_OP_GELU);
  4795. }
  4796. // ggml_gelu_quick
  4797. struct ggml_tensor * ggml_gelu_quick(
  4798. struct ggml_context * ctx,
  4799. struct ggml_tensor * a) {
  4800. return ggml_unary(ctx, a, GGML_UNARY_OP_GELU_QUICK);
  4801. }
  4802. struct ggml_tensor * ggml_gelu_quick_inplace(
  4803. struct ggml_context * ctx,
  4804. struct ggml_tensor * a) {
  4805. return ggml_unary_inplace(ctx, a, GGML_UNARY_OP_GELU_QUICK);
  4806. }
  4807. // ggml_silu
  4808. struct ggml_tensor * ggml_silu(
  4809. struct ggml_context * ctx,
  4810. struct ggml_tensor * a) {
  4811. return ggml_unary(ctx, a, GGML_UNARY_OP_SILU);
  4812. }
  4813. struct ggml_tensor * ggml_silu_inplace(
  4814. struct ggml_context * ctx,
  4815. struct ggml_tensor * a) {
  4816. return ggml_unary_inplace(ctx, a, GGML_UNARY_OP_SILU);
  4817. }
  4818. // ggml_silu_back
  4819. struct ggml_tensor * ggml_silu_back(
  4820. struct ggml_context * ctx,
  4821. struct ggml_tensor * a,
  4822. struct ggml_tensor * b) {
  4823. bool is_node = false;
  4824. if (a->grad || b->grad) {
  4825. // TODO: implement backward
  4826. is_node = true;
  4827. }
  4828. struct ggml_tensor * result = ggml_dup_tensor(ctx, a);
  4829. result->op = GGML_OP_SILU_BACK;
  4830. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4831. result->src[0] = a;
  4832. result->src[1] = b;
  4833. return result;
  4834. }
  4835. // ggml_norm
  4836. static struct ggml_tensor * ggml_norm_impl(
  4837. struct ggml_context * ctx,
  4838. struct ggml_tensor * a,
  4839. float eps,
  4840. bool inplace) {
  4841. bool is_node = false;
  4842. if (!inplace && (a->grad)) {
  4843. GGML_ASSERT(false); // TODO: implement backward
  4844. is_node = true;
  4845. }
  4846. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  4847. ggml_set_op_params(result, &eps, sizeof(eps));
  4848. result->op = GGML_OP_NORM;
  4849. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4850. result->src[0] = a;
  4851. return result;
  4852. }
  4853. struct ggml_tensor * ggml_norm(
  4854. struct ggml_context * ctx,
  4855. struct ggml_tensor * a,
  4856. float eps) {
  4857. return ggml_norm_impl(ctx, a, eps, false);
  4858. }
  4859. struct ggml_tensor * ggml_norm_inplace(
  4860. struct ggml_context * ctx,
  4861. struct ggml_tensor * a,
  4862. float eps) {
  4863. return ggml_norm_impl(ctx, a, eps, true);
  4864. }
  4865. // ggml_rms_norm
  4866. static struct ggml_tensor * ggml_rms_norm_impl(
  4867. struct ggml_context * ctx,
  4868. struct ggml_tensor * a,
  4869. float eps,
  4870. bool inplace) {
  4871. bool is_node = false;
  4872. if (!inplace && (a->grad)) {
  4873. is_node = true;
  4874. }
  4875. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  4876. ggml_set_op_params(result, &eps, sizeof(eps));
  4877. result->op = GGML_OP_RMS_NORM;
  4878. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4879. result->src[0] = a;
  4880. return result;
  4881. }
  4882. struct ggml_tensor * ggml_rms_norm(
  4883. struct ggml_context * ctx,
  4884. struct ggml_tensor * a,
  4885. float eps) {
  4886. return ggml_rms_norm_impl(ctx, a, eps, false);
  4887. }
  4888. struct ggml_tensor * ggml_rms_norm_inplace(
  4889. struct ggml_context * ctx,
  4890. struct ggml_tensor * a,
  4891. float eps) {
  4892. return ggml_rms_norm_impl(ctx, a, eps, true);
  4893. }
  4894. // ggml_rms_norm_back
  4895. struct ggml_tensor * ggml_rms_norm_back(
  4896. struct ggml_context * ctx,
  4897. struct ggml_tensor * a,
  4898. struct ggml_tensor * b,
  4899. float eps) {
  4900. bool is_node = false;
  4901. if (a->grad) {
  4902. // TODO: implement backward
  4903. is_node = true;
  4904. }
  4905. struct ggml_tensor * result = ggml_dup_tensor(ctx, a);
  4906. ggml_set_op_params(result, &eps, sizeof(eps));
  4907. result->op = GGML_OP_RMS_NORM_BACK;
  4908. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4909. result->src[0] = a;
  4910. result->src[1] = b;
  4911. return result;
  4912. }
  4913. // ggml_group_norm
  4914. static struct ggml_tensor * ggml_group_norm_impl(
  4915. struct ggml_context * ctx,
  4916. struct ggml_tensor * a,
  4917. int n_groups,
  4918. bool inplace) {
  4919. bool is_node = false;
  4920. if (!inplace && (a->grad)) {
  4921. GGML_ASSERT(false); // TODO: implement backward
  4922. is_node = true;
  4923. }
  4924. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  4925. result->op = GGML_OP_GROUP_NORM;
  4926. result->op_params[0] = n_groups;
  4927. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4928. result->src[0] = a;
  4929. result->src[1] = NULL; // TODO: maybe store epsilon here?
  4930. return result;
  4931. }
  4932. struct ggml_tensor * ggml_group_norm(
  4933. struct ggml_context * ctx,
  4934. struct ggml_tensor * a,
  4935. int n_groups) {
  4936. return ggml_group_norm_impl(ctx, a, n_groups, false);
  4937. }
  4938. struct ggml_tensor * ggml_group_norm_inplace(
  4939. struct ggml_context * ctx,
  4940. struct ggml_tensor * a,
  4941. int n_groups) {
  4942. return ggml_group_norm_impl(ctx, a, n_groups, true);
  4943. }
  4944. // ggml_mul_mat
  4945. struct ggml_tensor * ggml_mul_mat(
  4946. struct ggml_context * ctx,
  4947. struct ggml_tensor * a,
  4948. struct ggml_tensor * b) {
  4949. GGML_ASSERT(ggml_can_mul_mat(a, b));
  4950. GGML_ASSERT(!ggml_is_transposed(a));
  4951. bool is_node = false;
  4952. if (a->grad || b->grad) {
  4953. is_node = true;
  4954. }
  4955. const int64_t ne[4] = { a->ne[1], b->ne[1], b->ne[2], b->ne[3] };
  4956. struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, MAX(a->n_dims, b->n_dims), ne);
  4957. result->op = GGML_OP_MUL_MAT;
  4958. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4959. result->src[0] = a;
  4960. result->src[1] = b;
  4961. return result;
  4962. }
  4963. // ggml_out_prod
  4964. struct ggml_tensor * ggml_out_prod(
  4965. struct ggml_context * ctx,
  4966. struct ggml_tensor * a,
  4967. struct ggml_tensor * b) {
  4968. GGML_ASSERT(ggml_can_out_prod(a, b));
  4969. GGML_ASSERT(!ggml_is_transposed(a));
  4970. bool is_node = false;
  4971. if (a->grad || b->grad) {
  4972. is_node = true;
  4973. }
  4974. const int64_t ne[4] = { a->ne[0], b->ne[0], a->ne[2], b->ne[3] };
  4975. struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, MIN(a->n_dims, b->n_dims), ne);
  4976. result->op = GGML_OP_OUT_PROD;
  4977. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4978. result->src[0] = a;
  4979. result->src[1] = b;
  4980. return result;
  4981. }
  4982. // ggml_scale
  4983. static struct ggml_tensor * ggml_scale_impl(
  4984. struct ggml_context * ctx,
  4985. struct ggml_tensor * a,
  4986. struct ggml_tensor * b,
  4987. bool inplace) {
  4988. GGML_ASSERT(ggml_is_scalar(b));
  4989. GGML_ASSERT(ggml_is_padded_1d(a));
  4990. bool is_node = false;
  4991. if (a->grad || b->grad) {
  4992. is_node = true;
  4993. }
  4994. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  4995. result->op = GGML_OP_SCALE;
  4996. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4997. result->src[0] = a;
  4998. result->src[1] = b;
  4999. return result;
  5000. }
  5001. struct ggml_tensor * ggml_scale(
  5002. struct ggml_context * ctx,
  5003. struct ggml_tensor * a,
  5004. struct ggml_tensor * b) {
  5005. return ggml_scale_impl(ctx, a, b, false);
  5006. }
  5007. struct ggml_tensor * ggml_scale_inplace(
  5008. struct ggml_context * ctx,
  5009. struct ggml_tensor * a,
  5010. struct ggml_tensor * b) {
  5011. return ggml_scale_impl(ctx, a, b, true);
  5012. }
  5013. // ggml_set
  5014. static struct ggml_tensor * ggml_set_impl(
  5015. struct ggml_context * ctx,
  5016. struct ggml_tensor * a,
  5017. struct ggml_tensor * b,
  5018. size_t nb1,
  5019. size_t nb2,
  5020. size_t nb3,
  5021. size_t offset,
  5022. bool inplace) {
  5023. GGML_ASSERT(ggml_nelements(a) >= ggml_nelements(b));
  5024. bool is_node = false;
  5025. if (a->grad || b->grad) {
  5026. is_node = true;
  5027. }
  5028. // make a view of the destination
  5029. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  5030. int32_t params[] = { nb1, nb2, nb3, offset, inplace ? 1 : 0 };
  5031. ggml_set_op_params(result, params, sizeof(params));
  5032. result->op = GGML_OP_SET;
  5033. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5034. result->src[0] = a;
  5035. result->src[1] = b;
  5036. return result;
  5037. }
  5038. struct ggml_tensor * ggml_set(
  5039. struct ggml_context * ctx,
  5040. struct ggml_tensor * a,
  5041. struct ggml_tensor * b,
  5042. size_t nb1,
  5043. size_t nb2,
  5044. size_t nb3,
  5045. size_t offset) {
  5046. return ggml_set_impl(ctx, a, b, nb1, nb2, nb3, offset, false);
  5047. }
  5048. struct ggml_tensor * ggml_set_inplace(
  5049. struct ggml_context * ctx,
  5050. struct ggml_tensor * a,
  5051. struct ggml_tensor * b,
  5052. size_t nb1,
  5053. size_t nb2,
  5054. size_t nb3,
  5055. size_t offset) {
  5056. return ggml_set_impl(ctx, a, b, nb1, nb2, nb3, offset, true);
  5057. }
  5058. struct ggml_tensor * ggml_set_1d(
  5059. struct ggml_context * ctx,
  5060. struct ggml_tensor * a,
  5061. struct ggml_tensor * b,
  5062. size_t offset) {
  5063. return ggml_set_impl(ctx, a, b, a->nb[1], a->nb[2], a->nb[3], offset, false);
  5064. }
  5065. struct ggml_tensor * ggml_set_1d_inplace(
  5066. struct ggml_context * ctx,
  5067. struct ggml_tensor * a,
  5068. struct ggml_tensor * b,
  5069. size_t offset) {
  5070. return ggml_set_impl(ctx, a, b, a->nb[1], a->nb[2], a->nb[3], offset, true);
  5071. }
  5072. struct ggml_tensor * ggml_set_2d(
  5073. struct ggml_context * ctx,
  5074. struct ggml_tensor * a,
  5075. struct ggml_tensor * b,
  5076. size_t nb1,
  5077. size_t offset) {
  5078. return ggml_set_impl(ctx, a, b, nb1, a->nb[2], a->nb[3], offset, false);
  5079. }
  5080. struct ggml_tensor * ggml_set_2d_inplace(
  5081. struct ggml_context * ctx,
  5082. struct ggml_tensor * a,
  5083. struct ggml_tensor * b,
  5084. size_t nb1,
  5085. size_t offset) {
  5086. return ggml_set_impl(ctx, a, b, nb1, a->nb[2], a->nb[3], offset, false);
  5087. }
  5088. // ggml_cpy
  5089. static struct ggml_tensor * ggml_cpy_impl(
  5090. struct ggml_context * ctx,
  5091. struct ggml_tensor * a,
  5092. struct ggml_tensor * b,
  5093. bool inplace) {
  5094. GGML_ASSERT(ggml_nelements(a) == ggml_nelements(b));
  5095. bool is_node = false;
  5096. if (!inplace && (a->grad || b->grad)) {
  5097. is_node = true;
  5098. }
  5099. // make a view of the destination
  5100. struct ggml_tensor * result = ggml_view_tensor(ctx, b);
  5101. if (strlen(b->name) > 0) {
  5102. ggml_format_name(result, "%s (copy of %s)", b->name, a->name);
  5103. } else {
  5104. ggml_format_name(result, "%s (copy)", a->name);
  5105. }
  5106. result->op = GGML_OP_CPY;
  5107. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5108. result->src[0] = a;
  5109. result->src[1] = b;
  5110. return result;
  5111. }
  5112. struct ggml_tensor * ggml_cpy(
  5113. struct ggml_context * ctx,
  5114. struct ggml_tensor * a,
  5115. struct ggml_tensor * b) {
  5116. return ggml_cpy_impl(ctx, a, b, false);
  5117. }
  5118. struct ggml_tensor * ggml_cpy_inplace(
  5119. struct ggml_context * ctx,
  5120. struct ggml_tensor * a,
  5121. struct ggml_tensor * b) {
  5122. return ggml_cpy_impl(ctx, a, b, true);
  5123. }
  5124. // ggml_cont
  5125. static struct ggml_tensor * ggml_cont_impl(
  5126. struct ggml_context * ctx,
  5127. struct ggml_tensor * a,
  5128. bool inplace) {
  5129. bool is_node = false;
  5130. if (!inplace && a->grad) {
  5131. is_node = true;
  5132. }
  5133. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  5134. ggml_format_name(result, "%s (cont)", a->name);
  5135. result->op = GGML_OP_CONT;
  5136. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5137. result->src[0] = a;
  5138. return result;
  5139. }
  5140. struct ggml_tensor * ggml_cont(
  5141. struct ggml_context * ctx,
  5142. struct ggml_tensor * a) {
  5143. return ggml_cont_impl(ctx, a, false);
  5144. }
  5145. struct ggml_tensor * ggml_cont_inplace(
  5146. struct ggml_context * ctx,
  5147. struct ggml_tensor * a) {
  5148. return ggml_cont_impl(ctx, a, true);
  5149. }
  5150. // ggml_reshape
  5151. struct ggml_tensor * ggml_reshape(
  5152. struct ggml_context * ctx,
  5153. struct ggml_tensor * a,
  5154. struct ggml_tensor * b) {
  5155. GGML_ASSERT(ggml_is_contiguous(a));
  5156. GGML_ASSERT(ggml_is_contiguous(b));
  5157. GGML_ASSERT(ggml_nelements(a) == ggml_nelements(b));
  5158. bool is_node = false;
  5159. if (a->grad) {
  5160. is_node = true;
  5161. }
  5162. if (b->grad) {
  5163. // gradient propagation is not supported
  5164. //GGML_ASSERT(false);
  5165. }
  5166. struct ggml_tensor * result = ggml_new_tensor_impl(ctx, a->type, b->n_dims, b->ne, a, 0);
  5167. ggml_format_name(result, "%s (reshaped)", a->name);
  5168. result->op = GGML_OP_RESHAPE;
  5169. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5170. result->src[0] = a;
  5171. return result;
  5172. }
  5173. struct ggml_tensor * ggml_reshape_1d(
  5174. struct ggml_context * ctx,
  5175. struct ggml_tensor * a,
  5176. int64_t ne0) {
  5177. GGML_ASSERT(ggml_is_contiguous(a));
  5178. GGML_ASSERT(ggml_nelements(a) == ne0);
  5179. bool is_node = false;
  5180. if (a->grad) {
  5181. is_node = true;
  5182. }
  5183. const int64_t ne[1] = { ne0 };
  5184. struct ggml_tensor * result = ggml_new_tensor_impl(ctx, a->type, 1, ne, a, 0);
  5185. ggml_format_name(result, "%s (reshaped)", a->name);
  5186. result->op = GGML_OP_RESHAPE;
  5187. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5188. result->src[0] = a;
  5189. return result;
  5190. }
  5191. struct ggml_tensor * ggml_reshape_2d(
  5192. struct ggml_context * ctx,
  5193. struct ggml_tensor * a,
  5194. int64_t ne0,
  5195. int64_t ne1) {
  5196. GGML_ASSERT(ggml_is_contiguous(a));
  5197. GGML_ASSERT(ggml_nelements(a) == ne0*ne1);
  5198. bool is_node = false;
  5199. if (a->grad) {
  5200. is_node = true;
  5201. }
  5202. const int64_t ne[2] = { ne0, ne1 };
  5203. struct ggml_tensor * result = ggml_new_tensor_impl(ctx, a->type, 2, ne, a, 0);
  5204. ggml_format_name(result, "%s (reshaped)", a->name);
  5205. result->op = GGML_OP_RESHAPE;
  5206. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5207. result->src[0] = a;
  5208. return result;
  5209. }
  5210. struct ggml_tensor * ggml_reshape_3d(
  5211. struct ggml_context * ctx,
  5212. struct ggml_tensor * a,
  5213. int64_t ne0,
  5214. int64_t ne1,
  5215. int64_t ne2) {
  5216. GGML_ASSERT(ggml_is_contiguous(a));
  5217. GGML_ASSERT(ggml_nelements(a) == ne0*ne1*ne2);
  5218. bool is_node = false;
  5219. if (a->grad) {
  5220. is_node = true;
  5221. }
  5222. const int64_t ne[3] = { ne0, ne1, ne2 };
  5223. struct ggml_tensor * result = ggml_new_tensor_impl(ctx, a->type, 3, ne, a, 0);
  5224. ggml_format_name(result, "%s (reshaped)", a->name);
  5225. result->op = GGML_OP_RESHAPE;
  5226. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5227. result->src[0] = a;
  5228. return result;
  5229. }
  5230. struct ggml_tensor * ggml_reshape_4d(
  5231. struct ggml_context * ctx,
  5232. struct ggml_tensor * a,
  5233. int64_t ne0,
  5234. int64_t ne1,
  5235. int64_t ne2,
  5236. int64_t ne3) {
  5237. GGML_ASSERT(ggml_is_contiguous(a));
  5238. GGML_ASSERT(ggml_nelements(a) == ne0*ne1*ne2*ne3);
  5239. bool is_node = false;
  5240. if (a->grad) {
  5241. is_node = true;
  5242. }
  5243. const int64_t ne[4] = { ne0, ne1, ne2, ne3 };
  5244. struct ggml_tensor * result = ggml_new_tensor_impl(ctx, a->type, 4, ne, a, 0);
  5245. ggml_format_name(result, "%s (reshaped)", a->name);
  5246. result->op = GGML_OP_RESHAPE;
  5247. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5248. result->src[0] = a;
  5249. return result;
  5250. }
  5251. static struct ggml_tensor * ggml_view_impl(
  5252. struct ggml_context * ctx,
  5253. struct ggml_tensor * a,
  5254. int n_dims,
  5255. const int64_t * ne,
  5256. size_t offset) {
  5257. bool is_node = false;
  5258. if (a->grad) {
  5259. is_node = true;
  5260. }
  5261. struct ggml_tensor * result = ggml_new_tensor_impl(ctx, a->type, n_dims, ne, a, offset);
  5262. ggml_format_name(result, "%s (view)", a->name);
  5263. ggml_set_op_params(result, &offset, sizeof(offset));
  5264. result->op = GGML_OP_VIEW;
  5265. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5266. result->src[0] = a;
  5267. return result;
  5268. }
  5269. // ggml_view_1d
  5270. struct ggml_tensor * ggml_view_1d(
  5271. struct ggml_context * ctx,
  5272. struct ggml_tensor * a,
  5273. int64_t ne0,
  5274. size_t offset) {
  5275. struct ggml_tensor * result = ggml_view_impl(ctx, a, 1, &ne0, offset);
  5276. return result;
  5277. }
  5278. // ggml_view_2d
  5279. struct ggml_tensor * ggml_view_2d(
  5280. struct ggml_context * ctx,
  5281. struct ggml_tensor * a,
  5282. int64_t ne0,
  5283. int64_t ne1,
  5284. size_t nb1,
  5285. size_t offset) {
  5286. const int64_t ne[2] = { ne0, ne1 };
  5287. struct ggml_tensor * result = ggml_view_impl(ctx, a, 2, ne, offset);
  5288. result->nb[1] = nb1;
  5289. result->nb[2] = result->nb[1]*ne1;
  5290. result->nb[3] = result->nb[2];
  5291. return result;
  5292. }
  5293. // ggml_view_3d
  5294. struct ggml_tensor * ggml_view_3d(
  5295. struct ggml_context * ctx,
  5296. struct ggml_tensor * a,
  5297. int64_t ne0,
  5298. int64_t ne1,
  5299. int64_t ne2,
  5300. size_t nb1,
  5301. size_t nb2,
  5302. size_t offset) {
  5303. const int64_t ne[3] = { ne0, ne1, ne2 };
  5304. struct ggml_tensor * result = ggml_view_impl(ctx, a, 3, ne, offset);
  5305. result->nb[1] = nb1;
  5306. result->nb[2] = nb2;
  5307. result->nb[3] = result->nb[2]*ne2;
  5308. return result;
  5309. }
  5310. // ggml_view_4d
  5311. struct ggml_tensor * ggml_view_4d(
  5312. struct ggml_context * ctx,
  5313. struct ggml_tensor * a,
  5314. int64_t ne0,
  5315. int64_t ne1,
  5316. int64_t ne2,
  5317. int64_t ne3,
  5318. size_t nb1,
  5319. size_t nb2,
  5320. size_t nb3,
  5321. size_t offset) {
  5322. const int64_t ne[4] = { ne0, ne1, ne2, ne3 };
  5323. struct ggml_tensor * result = ggml_view_impl(ctx, a, 4, ne, offset);
  5324. result->nb[1] = nb1;
  5325. result->nb[2] = nb2;
  5326. result->nb[3] = nb3;
  5327. return result;
  5328. }
  5329. // ggml_permute
  5330. struct ggml_tensor * ggml_permute(
  5331. struct ggml_context * ctx,
  5332. struct ggml_tensor * a,
  5333. int axis0,
  5334. int axis1,
  5335. int axis2,
  5336. int axis3) {
  5337. GGML_ASSERT(axis0 >= 0 && axis0 < GGML_MAX_DIMS);
  5338. GGML_ASSERT(axis1 >= 0 && axis1 < GGML_MAX_DIMS);
  5339. GGML_ASSERT(axis2 >= 0 && axis2 < GGML_MAX_DIMS);
  5340. GGML_ASSERT(axis3 >= 0 && axis3 < GGML_MAX_DIMS);
  5341. GGML_ASSERT(axis0 != axis1);
  5342. GGML_ASSERT(axis0 != axis2);
  5343. GGML_ASSERT(axis0 != axis3);
  5344. GGML_ASSERT(axis1 != axis2);
  5345. GGML_ASSERT(axis1 != axis3);
  5346. GGML_ASSERT(axis2 != axis3);
  5347. bool is_node = false;
  5348. if (a->grad) {
  5349. is_node = true;
  5350. }
  5351. struct ggml_tensor * result = ggml_view_tensor(ctx, a);
  5352. ggml_format_name(result, "%s (permuted)", a->name);
  5353. int ne[GGML_MAX_DIMS];
  5354. int nb[GGML_MAX_DIMS];
  5355. ne[axis0] = a->ne[0];
  5356. ne[axis1] = a->ne[1];
  5357. ne[axis2] = a->ne[2];
  5358. ne[axis3] = a->ne[3];
  5359. nb[axis0] = a->nb[0];
  5360. nb[axis1] = a->nb[1];
  5361. nb[axis2] = a->nb[2];
  5362. nb[axis3] = a->nb[3];
  5363. result->ne[0] = ne[0];
  5364. result->ne[1] = ne[1];
  5365. result->ne[2] = ne[2];
  5366. result->ne[3] = ne[3];
  5367. result->nb[0] = nb[0];
  5368. result->nb[1] = nb[1];
  5369. result->nb[2] = nb[2];
  5370. result->nb[3] = nb[3];
  5371. result->op = GGML_OP_PERMUTE;
  5372. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5373. result->src[0] = a;
  5374. int32_t params[] = { axis0, axis1, axis2, axis3 };
  5375. ggml_set_op_params(result, params, sizeof(params));
  5376. return result;
  5377. }
  5378. // ggml_transpose
  5379. struct ggml_tensor * ggml_transpose(
  5380. struct ggml_context * ctx,
  5381. struct ggml_tensor * a) {
  5382. bool is_node = false;
  5383. if (a->grad) {
  5384. is_node = true;
  5385. }
  5386. struct ggml_tensor * result = ggml_view_tensor(ctx, a);
  5387. ggml_format_name(result, "%s (transposed)", a->name);
  5388. result->ne[0] = a->ne[1];
  5389. result->ne[1] = a->ne[0];
  5390. result->nb[0] = a->nb[1];
  5391. result->nb[1] = a->nb[0];
  5392. result->op = GGML_OP_TRANSPOSE;
  5393. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5394. result->src[0] = a;
  5395. return result;
  5396. }
  5397. // ggml_get_rows
  5398. struct ggml_tensor * ggml_get_rows(
  5399. struct ggml_context * ctx,
  5400. struct ggml_tensor * a,
  5401. struct ggml_tensor * b) {
  5402. GGML_ASSERT(ggml_is_matrix(a) && ggml_is_vector(b) && b->type == GGML_TYPE_I32);
  5403. bool is_node = false;
  5404. if (a->grad || b->grad) {
  5405. is_node = true;
  5406. }
  5407. // TODO: implement non F32 return
  5408. //struct ggml_tensor * result = ggml_new_tensor_2d(ctx, a->type, a->ne[0], b->ne[0]);
  5409. struct ggml_tensor * result = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, a->ne[0], b->ne[0]);
  5410. result->op = GGML_OP_GET_ROWS;
  5411. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5412. result->src[0] = a;
  5413. result->src[1] = b;
  5414. return result;
  5415. }
  5416. // ggml_get_rows_back
  5417. struct ggml_tensor * ggml_get_rows_back(
  5418. struct ggml_context * ctx,
  5419. struct ggml_tensor * a,
  5420. struct ggml_tensor * b,
  5421. struct ggml_tensor * c) {
  5422. GGML_ASSERT(ggml_is_matrix(a) && ggml_is_vector(b) && b->type == GGML_TYPE_I32);
  5423. GGML_ASSERT(ggml_is_matrix(c) && (a->ne[0] == c->ne[0]));
  5424. bool is_node = false;
  5425. if (a->grad || b->grad) {
  5426. is_node = true;
  5427. }
  5428. // TODO: implement non F32 return
  5429. //struct ggml_tensor * result = ggml_new_tensor_2d(ctx, a->type, a->ne[0], b->ne[0]);
  5430. struct ggml_tensor * result = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, c->ne[0], c->ne[1]);
  5431. result->op = GGML_OP_GET_ROWS_BACK;
  5432. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5433. result->src[0] = a;
  5434. result->src[1] = b;
  5435. result->src[2] = c;
  5436. return result;
  5437. }
  5438. // ggml_diag
  5439. struct ggml_tensor * ggml_diag(
  5440. struct ggml_context * ctx,
  5441. struct ggml_tensor * a) {
  5442. GGML_ASSERT(a->ne[1] == 1);
  5443. bool is_node = false;
  5444. if (a->grad) {
  5445. is_node = true;
  5446. }
  5447. const int64_t ne[4] = { a->ne[0], a->ne[0], a->ne[2], a->ne[3] };
  5448. struct ggml_tensor * result = ggml_new_tensor(ctx, a->type, MAX(a->n_dims, 2), ne);
  5449. result->op = GGML_OP_DIAG;
  5450. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5451. result->src[0] = a;
  5452. return result;
  5453. }
  5454. // ggml_diag_mask_inf
  5455. static struct ggml_tensor * ggml_diag_mask_inf_impl(
  5456. struct ggml_context * ctx,
  5457. struct ggml_tensor * a,
  5458. int n_past,
  5459. bool inplace) {
  5460. bool is_node = false;
  5461. if (a->grad) {
  5462. is_node = true;
  5463. }
  5464. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  5465. int32_t params[] = { n_past };
  5466. ggml_set_op_params(result, params, sizeof(params));
  5467. result->op = GGML_OP_DIAG_MASK_INF;
  5468. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5469. result->src[0] = a;
  5470. return result;
  5471. }
  5472. struct ggml_tensor * ggml_diag_mask_inf(
  5473. struct ggml_context * ctx,
  5474. struct ggml_tensor * a,
  5475. int n_past) {
  5476. return ggml_diag_mask_inf_impl(ctx, a, n_past, false);
  5477. }
  5478. struct ggml_tensor * ggml_diag_mask_inf_inplace(
  5479. struct ggml_context * ctx,
  5480. struct ggml_tensor * a,
  5481. int n_past) {
  5482. return ggml_diag_mask_inf_impl(ctx, a, n_past, true);
  5483. }
  5484. // ggml_diag_mask_zero
  5485. static struct ggml_tensor * ggml_diag_mask_zero_impl(
  5486. struct ggml_context * ctx,
  5487. struct ggml_tensor * a,
  5488. int n_past,
  5489. bool inplace) {
  5490. bool is_node = false;
  5491. if (a->grad) {
  5492. is_node = true;
  5493. }
  5494. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  5495. int32_t params[] = { n_past };
  5496. ggml_set_op_params(result, params, sizeof(params));
  5497. result->op = GGML_OP_DIAG_MASK_ZERO;
  5498. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5499. result->src[0] = a;
  5500. return result;
  5501. }
  5502. struct ggml_tensor * ggml_diag_mask_zero(
  5503. struct ggml_context * ctx,
  5504. struct ggml_tensor * a,
  5505. int n_past) {
  5506. return ggml_diag_mask_zero_impl(ctx, a, n_past, false);
  5507. }
  5508. struct ggml_tensor * ggml_diag_mask_zero_inplace(
  5509. struct ggml_context * ctx,
  5510. struct ggml_tensor * a,
  5511. int n_past) {
  5512. return ggml_diag_mask_zero_impl(ctx, a, n_past, true);
  5513. }
  5514. // ggml_soft_max
  5515. static struct ggml_tensor * ggml_soft_max_impl(
  5516. struct ggml_context * ctx,
  5517. struct ggml_tensor * a,
  5518. bool inplace) {
  5519. bool is_node = false;
  5520. if (a->grad) {
  5521. is_node = true;
  5522. }
  5523. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  5524. result->op = GGML_OP_SOFT_MAX;
  5525. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5526. result->src[0] = a;
  5527. return result;
  5528. }
  5529. struct ggml_tensor * ggml_soft_max(
  5530. struct ggml_context * ctx,
  5531. struct ggml_tensor * a) {
  5532. return ggml_soft_max_impl(ctx, a, false);
  5533. }
  5534. struct ggml_tensor * ggml_soft_max_inplace(
  5535. struct ggml_context * ctx,
  5536. struct ggml_tensor * a) {
  5537. return ggml_soft_max_impl(ctx, a, true);
  5538. }
  5539. // ggml_soft_max_back
  5540. static struct ggml_tensor * ggml_soft_max_back_impl(
  5541. struct ggml_context * ctx,
  5542. struct ggml_tensor * a,
  5543. struct ggml_tensor * b,
  5544. bool inplace) {
  5545. bool is_node = false;
  5546. if (a->grad || b->grad) {
  5547. is_node = true; // TODO : implement backward pass
  5548. }
  5549. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  5550. result->op = GGML_OP_SOFT_MAX_BACK;
  5551. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5552. result->src[0] = a;
  5553. result->src[1] = b;
  5554. return result;
  5555. }
  5556. struct ggml_tensor * ggml_soft_max_back(
  5557. struct ggml_context * ctx,
  5558. struct ggml_tensor * a,
  5559. struct ggml_tensor * b) {
  5560. return ggml_soft_max_back_impl(ctx, a, b, false);
  5561. }
  5562. struct ggml_tensor * ggml_soft_max_back_inplace(
  5563. struct ggml_context * ctx,
  5564. struct ggml_tensor * a,
  5565. struct ggml_tensor * b) {
  5566. return ggml_soft_max_back_impl(ctx, a, b, true);
  5567. }
  5568. // ggml_rope
  5569. static struct ggml_tensor * ggml_rope_impl(
  5570. struct ggml_context * ctx,
  5571. struct ggml_tensor * a,
  5572. int n_past,
  5573. int n_dims,
  5574. int mode,
  5575. int n_ctx,
  5576. float freq_base,
  5577. float freq_scale,
  5578. float xpos_base,
  5579. bool xpos_down,
  5580. bool inplace) {
  5581. GGML_ASSERT(n_past >= 0);
  5582. bool is_node = false;
  5583. if (a->grad) {
  5584. is_node = true;
  5585. }
  5586. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  5587. int32_t params[8] = { n_past, n_dims, mode, n_ctx };
  5588. memcpy(params + 4, &freq_base, sizeof(float));
  5589. memcpy(params + 5, &freq_scale, sizeof(float));
  5590. memcpy(params + 6, &xpos_base, sizeof(float));
  5591. memcpy(params + 7, &xpos_down, sizeof(bool));
  5592. ggml_set_op_params(result, params, sizeof(params));
  5593. result->op = GGML_OP_ROPE;
  5594. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5595. result->src[0] = a;
  5596. return result;
  5597. }
  5598. struct ggml_tensor * ggml_rope(
  5599. struct ggml_context * ctx,
  5600. struct ggml_tensor * a,
  5601. int n_past,
  5602. int n_dims,
  5603. int mode,
  5604. int n_ctx) {
  5605. return ggml_rope_impl(ctx, a, n_past, n_dims, mode, n_ctx, 10000.0f, 1.0f, 0.0f, false, false);
  5606. }
  5607. struct ggml_tensor * ggml_rope_inplace(
  5608. struct ggml_context * ctx,
  5609. struct ggml_tensor * a,
  5610. int n_past,
  5611. int n_dims,
  5612. int mode,
  5613. int n_ctx) {
  5614. return ggml_rope_impl(ctx, a, n_past, n_dims, mode, n_ctx, 10000.0f, 1.0f, 0.0f, false, true);
  5615. }
  5616. struct ggml_tensor * ggml_rope_custom(
  5617. struct ggml_context * ctx,
  5618. struct ggml_tensor * a,
  5619. int n_past,
  5620. int n_dims,
  5621. int mode,
  5622. int n_ctx,
  5623. float freq_base,
  5624. float freq_scale) {
  5625. return ggml_rope_impl(ctx, a, n_past, n_dims, mode, n_ctx, freq_base, freq_scale, 0.0f, false, false);
  5626. }
  5627. struct ggml_tensor * ggml_rope_custom_inplace(
  5628. struct ggml_context * ctx,
  5629. struct ggml_tensor * a,
  5630. int n_past,
  5631. int n_dims,
  5632. int mode,
  5633. int n_ctx,
  5634. float freq_base,
  5635. float freq_scale) {
  5636. return ggml_rope_impl(ctx, a, n_past, n_dims, mode, n_ctx, freq_base, freq_scale, 0.0f, false, true);
  5637. }
  5638. struct ggml_tensor * ggml_rope_xpos_inplace(
  5639. struct ggml_context * ctx,
  5640. struct ggml_tensor * a,
  5641. int n_past,
  5642. int n_dims,
  5643. float base,
  5644. bool down) {
  5645. return ggml_rope_impl(ctx, a, n_past, n_dims, 0, 0, 10000.0f, 1.0f, base, down, true);
  5646. }
  5647. // ggml_rope_back
  5648. struct ggml_tensor * ggml_rope_back(
  5649. struct ggml_context * ctx,
  5650. struct ggml_tensor * a,
  5651. int n_past,
  5652. int n_dims,
  5653. int mode,
  5654. int n_ctx,
  5655. float freq_base,
  5656. float freq_scale,
  5657. float xpos_base,
  5658. bool xpos_down) {
  5659. GGML_ASSERT(n_past >= 0);
  5660. GGML_ASSERT((mode & 4) == 0 && "ggml_rope_back() for ChatGLM not implemented yet");
  5661. bool is_node = false;
  5662. if (a->grad) {
  5663. is_node = false; // TODO: implement backward
  5664. }
  5665. struct ggml_tensor * result = ggml_dup_tensor(ctx, a);
  5666. int32_t params[8] = { n_past, n_dims, mode, n_ctx };
  5667. memcpy(params + 4, &freq_base, sizeof(float));
  5668. memcpy(params + 5, &freq_scale, sizeof(float));
  5669. memcpy(params + 6, &xpos_base, sizeof(float));
  5670. memcpy(params + 7, &xpos_down, sizeof(bool));
  5671. ggml_set_op_params(result, params, sizeof(params));
  5672. result->op = GGML_OP_ROPE_BACK;
  5673. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5674. result->src[0] = a;
  5675. return result;
  5676. }
  5677. // ggml_alibi
  5678. struct ggml_tensor * ggml_alibi(
  5679. struct ggml_context * ctx,
  5680. struct ggml_tensor * a,
  5681. int n_past,
  5682. int n_head,
  5683. float bias_max) {
  5684. GGML_ASSERT(n_past >= 0);
  5685. bool is_node = false;
  5686. if (a->grad) {
  5687. GGML_ASSERT(false); // TODO: implement backward
  5688. is_node = true;
  5689. }
  5690. // TODO: when implement backward, fix this:
  5691. //struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  5692. struct ggml_tensor * result = ggml_view_tensor(ctx, a);
  5693. int32_t op_params[3] = { n_past, n_head };
  5694. memcpy(op_params + 2, &bias_max, sizeof(float));
  5695. ggml_set_op_params(result, op_params, sizeof(op_params));
  5696. result->op = GGML_OP_ALIBI;
  5697. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5698. result->src[0] = a;
  5699. return result;
  5700. }
  5701. // ggml_clamp
  5702. struct ggml_tensor * ggml_clamp(
  5703. struct ggml_context * ctx,
  5704. struct ggml_tensor * a,
  5705. float min,
  5706. float max) {
  5707. bool is_node = false;
  5708. if (a->grad) {
  5709. GGML_ASSERT(false); // TODO: implement backward
  5710. is_node = true;
  5711. }
  5712. // TODO: when implement backward, fix this:
  5713. struct ggml_tensor * result = ggml_view_tensor(ctx, a);
  5714. float params[] = { min, max };
  5715. ggml_set_op_params(result, params, sizeof(params));
  5716. result->op = GGML_OP_CLAMP;
  5717. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5718. result->src[0] = a;
  5719. return result;
  5720. }
  5721. // ggml_conv_1d
  5722. static int64_t ggml_calc_conv_output_size(int64_t ins, int64_t ks, int s, int p, int d) {
  5723. return (ins + 2 * p - d * (ks - 1) - 1) / s + 1;
  5724. }
  5725. GGML_API struct ggml_tensor * ggml_conv_1d(
  5726. struct ggml_context * ctx,
  5727. struct ggml_tensor * a,
  5728. struct ggml_tensor * b,
  5729. int s0,
  5730. int p0,
  5731. int d0) {
  5732. GGML_ASSERT(ggml_is_matrix(b));
  5733. GGML_ASSERT(a->ne[1] == b->ne[1]);
  5734. bool is_node = false;
  5735. if (a->grad || b->grad) {
  5736. GGML_ASSERT(false); // TODO: implement backward
  5737. is_node = true;
  5738. }
  5739. const int64_t ne[4] = {
  5740. ggml_calc_conv_output_size(b->ne[0], a->ne[0], s0, p0, d0),
  5741. a->ne[2], 1, 1,
  5742. };
  5743. struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, 2, ne);
  5744. int32_t params[] = { s0, p0, d0 };
  5745. ggml_set_op_params(result, params, sizeof(params));
  5746. result->op = GGML_OP_CONV_1D;
  5747. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5748. result->src[0] = a;
  5749. result->src[1] = b;
  5750. return result;
  5751. }
  5752. // ggml_conv_1d_ph
  5753. struct ggml_tensor* ggml_conv_1d_ph(
  5754. struct ggml_context * ctx,
  5755. struct ggml_tensor * a,
  5756. struct ggml_tensor * b,
  5757. int s,
  5758. int d) {
  5759. return ggml_conv_1d(ctx, a, b, s, a->ne[0] / 2, d);
  5760. }
  5761. // ggml_conv_2d
  5762. struct ggml_tensor * ggml_conv_2d(
  5763. struct ggml_context * ctx,
  5764. struct ggml_tensor * a,
  5765. struct ggml_tensor * b,
  5766. int s0,
  5767. int s1,
  5768. int p0,
  5769. int p1,
  5770. int d0,
  5771. int d1) {
  5772. GGML_ASSERT(a->ne[2] == b->ne[2]);
  5773. bool is_node = false;
  5774. if (a->grad || b->grad) {
  5775. GGML_ASSERT(false); // TODO: implement backward
  5776. is_node = true;
  5777. }
  5778. const int64_t ne[4] = {
  5779. ggml_calc_conv_output_size(b->ne[0], a->ne[0], s0, p0, d0),
  5780. ggml_calc_conv_output_size(b->ne[1], a->ne[1], s1, p1, d1),
  5781. a->ne[3], b->ne[3],
  5782. };
  5783. struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, 4, ne);
  5784. int32_t params[] = { s0, s1, p0, p1, d0, d1 };
  5785. ggml_set_op_params(result, params, sizeof(params));
  5786. result->op = GGML_OP_CONV_2D;
  5787. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5788. result->src[0] = a;
  5789. result->src[1] = b;
  5790. return result;
  5791. }
  5792. // ggml_conv_2d_sk_p0
  5793. struct ggml_tensor * ggml_conv_2d_sk_p0(
  5794. struct ggml_context * ctx,
  5795. struct ggml_tensor * a,
  5796. struct ggml_tensor * b) {
  5797. return ggml_conv_2d(ctx, a, b, a->ne[0], a->ne[1], 0, 0, 1, 1);
  5798. }
  5799. // ggml_conv_2d_s1_ph
  5800. struct ggml_tensor * ggml_conv_2d_s1_ph(
  5801. struct ggml_context * ctx,
  5802. struct ggml_tensor * a,
  5803. struct ggml_tensor * b) {
  5804. return ggml_conv_2d(ctx, a, b, 1, 1, a->ne[0] / 2, a->ne[1] / 2, 1, 1);
  5805. }
  5806. // ggml_conv_transpose_2d_p0
  5807. static int64_t ggml_calc_conv_transpose_output_size(int64_t ins, int64_t ks, int s, int p) {
  5808. return (ins - 1) * s - 2 * p + ks;
  5809. }
  5810. struct ggml_tensor * ggml_conv_transpose_2d_p0(
  5811. struct ggml_context * ctx,
  5812. struct ggml_tensor * a,
  5813. struct ggml_tensor * b,
  5814. int stride) {
  5815. GGML_ASSERT(a->ne[3] == b->ne[2]);
  5816. bool is_node = false;
  5817. if (a->grad || b->grad) {
  5818. GGML_ASSERT(false); // TODO: implement backward
  5819. is_node = true;
  5820. }
  5821. const int64_t ne[4] = {
  5822. ggml_calc_conv_transpose_output_size(b->ne[0], a->ne[0], stride, 0 /*p0*/),
  5823. ggml_calc_conv_transpose_output_size(b->ne[1], a->ne[1], stride, 0 /*p1*/),
  5824. a->ne[2], b->ne[3],
  5825. };
  5826. struct ggml_tensor* result = ggml_new_tensor(ctx, GGML_TYPE_F32, 4, ne);
  5827. ggml_set_op_params_i32(result, 0, stride);
  5828. result->op = GGML_OP_CONV_TRANSPOSE_2D;
  5829. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5830. result->src[0] = a;
  5831. result->src[1] = b;
  5832. return result;
  5833. }
  5834. // ggml_pool_*
  5835. static int64_t ggml_calc_pool_output_size(int64_t ins, int ks, int s, int p) {
  5836. return (ins + 2 * p - ks) / s + 1;
  5837. }
  5838. // ggml_pool_1d
  5839. struct ggml_tensor * ggml_pool_1d(
  5840. struct ggml_context * ctx,
  5841. struct ggml_tensor * a,
  5842. enum ggml_op_pool op,
  5843. int k0,
  5844. int s0,
  5845. int p0) {
  5846. bool is_node = false;
  5847. if (a->grad) {
  5848. GGML_ASSERT(false); // TODO: implement backward
  5849. is_node = true;
  5850. }
  5851. const int64_t ne[3] = {
  5852. ggml_calc_pool_output_size(a->ne[0], k0, s0, p0),
  5853. a->ne[1],
  5854. };
  5855. struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, 2, ne);
  5856. int32_t params[] = { op, k0, s0, p0 };
  5857. ggml_set_op_params(result, params, sizeof(params));
  5858. result->op = GGML_OP_POOL_1D;
  5859. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5860. result->src[0] = a;
  5861. return result;
  5862. }
  5863. // ggml_pool_2d
  5864. struct ggml_tensor * ggml_pool_2d(
  5865. struct ggml_context * ctx,
  5866. struct ggml_tensor * a,
  5867. enum ggml_op_pool op,
  5868. int k0,
  5869. int k1,
  5870. int s0,
  5871. int s1,
  5872. int p0,
  5873. int p1) {
  5874. bool is_node = false;
  5875. if (a->grad) {
  5876. GGML_ASSERT(false); // TODO: implement backward
  5877. is_node = true;
  5878. }
  5879. const int64_t ne[3] = {
  5880. ggml_calc_pool_output_size(a->ne[0], k0, s0, p0),
  5881. ggml_calc_pool_output_size(a->ne[1], k1, s1, p1),
  5882. a->ne[2],
  5883. };
  5884. struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, 3, ne);
  5885. int32_t params[] = { op, k0, k1, s0, s1, p0, p1 };
  5886. ggml_set_op_params(result, params, sizeof(params));
  5887. result->op = GGML_OP_POOL_2D;
  5888. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5889. result->src[0] = a;
  5890. return result;
  5891. }
  5892. // ggml_upscale
  5893. static struct ggml_tensor * ggml_upscale_impl(
  5894. struct ggml_context * ctx,
  5895. struct ggml_tensor * a,
  5896. int scale_factor) {
  5897. bool is_node = false;
  5898. if (a->grad) {
  5899. GGML_ASSERT(false); // TODO: implement backward
  5900. is_node = true;
  5901. }
  5902. struct ggml_tensor * result = ggml_new_tensor_4d(ctx, a->type,
  5903. a->ne[0] * scale_factor,
  5904. a->ne[1] * scale_factor,
  5905. a->ne[2], a->ne[3]);
  5906. result->op = GGML_OP_UPSCALE;
  5907. result->op_params[0] = scale_factor;
  5908. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5909. result->src[0] = a;
  5910. result->src[1] = NULL;
  5911. return result;
  5912. }
  5913. struct ggml_tensor * ggml_upscale(
  5914. struct ggml_context * ctx,
  5915. struct ggml_tensor * a,
  5916. int scale_factor) {
  5917. return ggml_upscale_impl(ctx, a, scale_factor);
  5918. }
  5919. // ggml_flash_attn
  5920. struct ggml_tensor * ggml_flash_attn(
  5921. struct ggml_context * ctx,
  5922. struct ggml_tensor * q,
  5923. struct ggml_tensor * k,
  5924. struct ggml_tensor * v,
  5925. bool masked) {
  5926. GGML_ASSERT(ggml_can_mul_mat(k, q));
  5927. // TODO: check if vT can be multiplied by (k*qT)
  5928. bool is_node = false;
  5929. if (q->grad || k->grad || v->grad) {
  5930. is_node = true;
  5931. }
  5932. //struct ggml_tensor * result = ggml_dup_tensor(ctx, q);
  5933. struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, q->n_dims, q->ne);
  5934. int32_t t = masked ? 1 : 0;
  5935. ggml_set_op_params(result, &t, sizeof(t));
  5936. result->op = GGML_OP_FLASH_ATTN;
  5937. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5938. result->src[0] = q;
  5939. result->src[1] = k;
  5940. result->src[2] = v;
  5941. return result;
  5942. }
  5943. // ggml_flash_ff
  5944. struct ggml_tensor * ggml_flash_ff(
  5945. struct ggml_context * ctx,
  5946. struct ggml_tensor * a,
  5947. struct ggml_tensor * b0,
  5948. struct ggml_tensor * b1,
  5949. struct ggml_tensor * c0,
  5950. struct ggml_tensor * c1) {
  5951. GGML_ASSERT(ggml_can_mul_mat(b0, a));
  5952. // TODO: more checks
  5953. bool is_node = false;
  5954. if (a->grad || b0->grad || b1->grad || c0->grad || c1->grad) {
  5955. is_node = true;
  5956. }
  5957. //struct ggml_tensor * result = ggml_dup_tensor(ctx, a);
  5958. struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, a->n_dims, a->ne);
  5959. result->op = GGML_OP_FLASH_FF;
  5960. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5961. result->src[0] = a;
  5962. result->src[1] = b0;
  5963. result->src[2] = b1;
  5964. result->src[3] = c0;
  5965. result->src[4] = c1;
  5966. return result;
  5967. }
  5968. // ggml_flash_attn_back
  5969. struct ggml_tensor * ggml_flash_attn_back(
  5970. struct ggml_context * ctx,
  5971. struct ggml_tensor * q,
  5972. struct ggml_tensor * k,
  5973. struct ggml_tensor * v,
  5974. struct ggml_tensor * d,
  5975. bool masked) {
  5976. GGML_ASSERT(ggml_can_mul_mat(k, q));
  5977. // TODO: check if vT can be multiplied by (k*qT)
  5978. // d shape [D,N,ne2,ne3]
  5979. // q shape [D,N,ne2,ne3]
  5980. // k shape [D,M,ne2,ne3]
  5981. // v shape [M,D,ne2,ne3]
  5982. const int64_t D = q->ne[0];
  5983. const int64_t N = q->ne[1];
  5984. const int64_t M = k->ne[1];
  5985. const int64_t ne2 = q->ne[2];
  5986. const int64_t ne3 = q->ne[3];
  5987. GGML_ASSERT(k->ne[0] == D);
  5988. GGML_ASSERT(v->ne[0] == M);
  5989. GGML_ASSERT(v->ne[1] == D);
  5990. GGML_ASSERT(d->ne[0] == D);
  5991. GGML_ASSERT(d->ne[1] == N);
  5992. GGML_ASSERT(k->ne[2] == ne2);
  5993. GGML_ASSERT(k->ne[3] == ne3);
  5994. GGML_ASSERT(v->ne[2] == ne2);
  5995. GGML_ASSERT(v->ne[3] == ne3);
  5996. GGML_ASSERT(d->ne[2] == ne2);
  5997. GGML_ASSERT(d->ne[3] == ne3);
  5998. bool is_node = false;
  5999. if (q->grad || k->grad || v->grad) {
  6000. // when using this operation (in backwards pass) these grads are set.
  6001. // we don't want to create (big) grad of our result, so is_node is false.
  6002. is_node = false;
  6003. }
  6004. // store gradients of q, k and v as continuous tensors concatenated in result.
  6005. // q shape[D,N,ne2,ne3] ; k shape [D,M,ne2,ne3] ; v shape [M,D,ne2,ne3]
  6006. // gradq->data = result->data
  6007. // gradk->data = result->data + nb0*D*N*ne2*ne3
  6008. // gradv->data = result->data + nb0*D*N*ne2*ne3 + nb0*D*M*ne2*ne3
  6009. // note: v and gradv are actually transposed, i.e. v->ne[0] != D.
  6010. int64_t ne[4] = {D,M+N+M,ne2,ne3};
  6011. struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, 4, ne);
  6012. int32_t masked_i = masked ? 1 : 0;
  6013. ggml_set_op_params(result, &masked_i, sizeof(masked_i));
  6014. result->op = GGML_OP_FLASH_ATTN_BACK;
  6015. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  6016. result->src[0] = q;
  6017. result->src[1] = k;
  6018. result->src[2] = v;
  6019. result->src[3] = d;
  6020. return result;
  6021. }
  6022. // ggml_win_part
  6023. struct ggml_tensor * ggml_win_part(
  6024. struct ggml_context * ctx,
  6025. struct ggml_tensor * a,
  6026. int w) {
  6027. GGML_ASSERT(a->ne[3] == 1);
  6028. GGML_ASSERT(a->type == GGML_TYPE_F32);
  6029. bool is_node = false;
  6030. if (a->grad) {
  6031. GGML_ASSERT(false); // TODO: implement backward
  6032. is_node = true;
  6033. }
  6034. // padding
  6035. const int px = (w - a->ne[1]%w)%w;
  6036. const int py = (w - a->ne[2]%w)%w;
  6037. const int npx = (px + a->ne[1])/w;
  6038. const int npy = (py + a->ne[2])/w;
  6039. const int np = npx*npy;
  6040. const int64_t ne[4] = { a->ne[0], w, w, np, };
  6041. struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, 4, ne);
  6042. int32_t params[] = { npx, npy, w };
  6043. ggml_set_op_params(result, params, sizeof(params));
  6044. result->op = GGML_OP_WIN_PART;
  6045. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  6046. result->src[0] = a;
  6047. return result;
  6048. }
  6049. // ggml_win_unpart
  6050. struct ggml_tensor * ggml_win_unpart(
  6051. struct ggml_context * ctx,
  6052. struct ggml_tensor * a,
  6053. int w0,
  6054. int h0,
  6055. int w) {
  6056. GGML_ASSERT(a->type == GGML_TYPE_F32);
  6057. bool is_node = false;
  6058. if (a->grad) {
  6059. GGML_ASSERT(false); // TODO: implement backward
  6060. is_node = true;
  6061. }
  6062. const int64_t ne[4] = { a->ne[0], w0, h0, 1, };
  6063. struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, 3, ne);
  6064. int32_t params[] = { w };
  6065. ggml_set_op_params(result, params, sizeof(params));
  6066. result->op = GGML_OP_WIN_UNPART;
  6067. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  6068. result->src[0] = a;
  6069. return result;
  6070. }
  6071. // ggml_get_rel_pos
  6072. struct ggml_tensor * ggml_get_rel_pos(
  6073. struct ggml_context * ctx,
  6074. struct ggml_tensor * a,
  6075. int qh,
  6076. int kh) {
  6077. GGML_ASSERT(qh == kh);
  6078. GGML_ASSERT(2*MAX(qh, kh) - 1 == a->ne[1]);
  6079. bool is_node = false;
  6080. if (a->grad) {
  6081. GGML_ASSERT(false); // TODO: implement backward
  6082. is_node = true;
  6083. }
  6084. const int64_t ne[4] = { a->ne[0], kh, qh, 1, };
  6085. struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F16, 3, ne);
  6086. result->op = GGML_OP_GET_REL_POS;
  6087. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  6088. result->src[0] = a;
  6089. result->src[1] = NULL;
  6090. return result;
  6091. }
  6092. // ggml_add_rel_pos
  6093. static struct ggml_tensor * ggml_add_rel_pos_impl(
  6094. struct ggml_context * ctx,
  6095. struct ggml_tensor * a,
  6096. struct ggml_tensor * pw,
  6097. struct ggml_tensor * ph,
  6098. bool inplace) {
  6099. GGML_ASSERT(ggml_are_same_shape(pw, ph));
  6100. GGML_ASSERT(ggml_is_contiguous(a));
  6101. GGML_ASSERT(ggml_is_contiguous(pw));
  6102. GGML_ASSERT(ggml_is_contiguous(ph));
  6103. GGML_ASSERT(ph->type == GGML_TYPE_F32);
  6104. GGML_ASSERT(pw->type == GGML_TYPE_F32);
  6105. GGML_ASSERT(pw->ne[3] == a->ne[2]);
  6106. GGML_ASSERT(pw->ne[0]*pw->ne[0] == a->ne[0]);
  6107. GGML_ASSERT(pw->ne[1]*pw->ne[2] == a->ne[1]);
  6108. bool is_node = false;
  6109. if (!inplace && (a->grad || pw->grad || ph->grad)) {
  6110. is_node = true;
  6111. }
  6112. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  6113. ggml_set_op_params_i32(result, 0, inplace ? 1 : 0);
  6114. result->op = GGML_OP_ADD_REL_POS;
  6115. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  6116. result->src[0] = a;
  6117. result->src[1] = pw;
  6118. result->src[2] = ph;
  6119. return result;
  6120. }
  6121. struct ggml_tensor * ggml_add_rel_pos(
  6122. struct ggml_context * ctx,
  6123. struct ggml_tensor * a,
  6124. struct ggml_tensor * pw,
  6125. struct ggml_tensor * ph) {
  6126. return ggml_add_rel_pos_impl(ctx, a, pw, ph, false);
  6127. }
  6128. struct ggml_tensor * ggml_add_rel_pos_inplace(
  6129. struct ggml_context * ctx,
  6130. struct ggml_tensor * a,
  6131. struct ggml_tensor * pw,
  6132. struct ggml_tensor * ph) {
  6133. return ggml_add_rel_pos_impl(ctx, a, pw, ph, true);
  6134. }
  6135. // gmml_unary
  6136. static struct ggml_tensor * ggml_unary_impl(
  6137. struct ggml_context * ctx,
  6138. struct ggml_tensor * a,
  6139. enum ggml_unary_op op,
  6140. bool inplace) {
  6141. bool is_node = false;
  6142. if (!inplace && (a->grad)) {
  6143. is_node = true;
  6144. }
  6145. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  6146. ggml_set_op_params_i32(result, 0, (int32_t) op);
  6147. result->op = GGML_OP_UNARY;
  6148. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  6149. result->src[0] = a;
  6150. return result;
  6151. }
  6152. struct ggml_tensor * ggml_unary(
  6153. struct ggml_context * ctx,
  6154. struct ggml_tensor * a,
  6155. enum ggml_unary_op op) {
  6156. return ggml_unary_impl(ctx, a, op, false);
  6157. }
  6158. struct ggml_tensor * ggml_unary_inplace(
  6159. struct ggml_context * ctx,
  6160. struct ggml_tensor * a,
  6161. enum ggml_unary_op op) {
  6162. return ggml_unary_impl(ctx, a, op, true);
  6163. }
  6164. // ggml_map_unary
  6165. static struct ggml_tensor * ggml_map_unary_impl_f32(
  6166. struct ggml_context * ctx,
  6167. struct ggml_tensor * a,
  6168. const ggml_unary_op_f32_t fun,
  6169. bool inplace) {
  6170. bool is_node = false;
  6171. if (!inplace && a->grad) {
  6172. is_node = true;
  6173. }
  6174. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  6175. ggml_set_op_params(result, (const void *) &fun, sizeof(fun));
  6176. result->op = GGML_OP_MAP_UNARY;
  6177. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  6178. result->src[0] = a;
  6179. return result;
  6180. }
  6181. struct ggml_tensor * ggml_map_unary_f32(
  6182. struct ggml_context * ctx,
  6183. struct ggml_tensor * a,
  6184. const ggml_unary_op_f32_t fun) {
  6185. return ggml_map_unary_impl_f32(ctx, a, fun, false);
  6186. }
  6187. struct ggml_tensor * ggml_map_unary_inplace_f32(
  6188. struct ggml_context * ctx,
  6189. struct ggml_tensor * a,
  6190. const ggml_unary_op_f32_t fun) {
  6191. return ggml_map_unary_impl_f32(ctx, a, fun, true);
  6192. }
  6193. // ggml_map_binary
  6194. static struct ggml_tensor * ggml_map_binary_impl_f32(
  6195. struct ggml_context * ctx,
  6196. struct ggml_tensor * a,
  6197. struct ggml_tensor * b,
  6198. const ggml_binary_op_f32_t fun,
  6199. bool inplace) {
  6200. GGML_ASSERT(ggml_are_same_shape(a, b));
  6201. bool is_node = false;
  6202. if (!inplace && (a->grad || b->grad)) {
  6203. is_node = true;
  6204. }
  6205. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  6206. ggml_set_op_params(result, (const void *) &fun, sizeof(fun));
  6207. result->op = GGML_OP_MAP_BINARY;
  6208. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  6209. result->src[0] = a;
  6210. result->src[1] = b;
  6211. return result;
  6212. }
  6213. struct ggml_tensor * ggml_map_binary_f32(
  6214. struct ggml_context * ctx,
  6215. struct ggml_tensor * a,
  6216. struct ggml_tensor * b,
  6217. const ggml_binary_op_f32_t fun) {
  6218. return ggml_map_binary_impl_f32(ctx, a, b, fun, false);
  6219. }
  6220. struct ggml_tensor * ggml_map_binary_inplace_f32(
  6221. struct ggml_context * ctx,
  6222. struct ggml_tensor * a,
  6223. struct ggml_tensor * b,
  6224. const ggml_binary_op_f32_t fun) {
  6225. return ggml_map_binary_impl_f32(ctx, a, b, fun, true);
  6226. }
  6227. // ggml_map_custom1_f32
  6228. static struct ggml_tensor * ggml_map_custom1_impl_f32(
  6229. struct ggml_context * ctx,
  6230. struct ggml_tensor * a,
  6231. const ggml_custom1_op_f32_t fun,
  6232. bool inplace) {
  6233. bool is_node = false;
  6234. if (!inplace && a->grad) {
  6235. is_node = true;
  6236. }
  6237. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  6238. ggml_set_op_params(result, (const void *) &fun, sizeof(fun));
  6239. result->op = GGML_OP_MAP_CUSTOM1_F32;
  6240. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  6241. result->src[0] = a;
  6242. return result;
  6243. }
  6244. struct ggml_tensor * ggml_map_custom1_f32(
  6245. struct ggml_context * ctx,
  6246. struct ggml_tensor * a,
  6247. const ggml_custom1_op_f32_t fun) {
  6248. return ggml_map_custom1_impl_f32(ctx, a, fun, false);
  6249. }
  6250. struct ggml_tensor * ggml_map_custom1_inplace_f32(
  6251. struct ggml_context * ctx,
  6252. struct ggml_tensor * a,
  6253. const ggml_custom1_op_f32_t fun) {
  6254. return ggml_map_custom1_impl_f32(ctx, a, fun, true);
  6255. }
  6256. // ggml_map_custom2_f32
  6257. static struct ggml_tensor * ggml_map_custom2_impl_f32(
  6258. struct ggml_context * ctx,
  6259. struct ggml_tensor * a,
  6260. struct ggml_tensor * b,
  6261. const ggml_custom2_op_f32_t fun,
  6262. bool inplace) {
  6263. bool is_node = false;
  6264. if (!inplace && (a->grad || b->grad)) {
  6265. is_node = true;
  6266. }
  6267. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  6268. ggml_set_op_params(result, (const void *) &fun, sizeof(fun));
  6269. result->op = GGML_OP_MAP_CUSTOM2_F32;
  6270. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  6271. result->src[0] = a;
  6272. result->src[1] = b;
  6273. return result;
  6274. }
  6275. struct ggml_tensor * ggml_map_custom2_f32(
  6276. struct ggml_context * ctx,
  6277. struct ggml_tensor * a,
  6278. struct ggml_tensor * b,
  6279. const ggml_custom2_op_f32_t fun) {
  6280. return ggml_map_custom2_impl_f32(ctx, a, b, fun, false);
  6281. }
  6282. struct ggml_tensor * ggml_map_custom2_inplace_f32(
  6283. struct ggml_context * ctx,
  6284. struct ggml_tensor * a,
  6285. struct ggml_tensor * b,
  6286. const ggml_custom2_op_f32_t fun) {
  6287. return ggml_map_custom2_impl_f32(ctx, a, b, fun, true);
  6288. }
  6289. // ggml_map_custom3_f32
  6290. static struct ggml_tensor * ggml_map_custom3_impl_f32(
  6291. struct ggml_context * ctx,
  6292. struct ggml_tensor * a,
  6293. struct ggml_tensor * b,
  6294. struct ggml_tensor * c,
  6295. const ggml_custom3_op_f32_t fun,
  6296. bool inplace) {
  6297. bool is_node = false;
  6298. if (!inplace && (a->grad || b->grad || c->grad)) {
  6299. is_node = true;
  6300. }
  6301. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  6302. ggml_set_op_params(result, (const void *) &fun, sizeof(fun));
  6303. result->op = GGML_OP_MAP_CUSTOM3_F32;
  6304. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  6305. result->src[0] = a;
  6306. result->src[1] = b;
  6307. result->src[2] = c;
  6308. return result;
  6309. }
  6310. struct ggml_tensor * ggml_map_custom3_f32(
  6311. struct ggml_context * ctx,
  6312. struct ggml_tensor * a,
  6313. struct ggml_tensor * b,
  6314. struct ggml_tensor * c,
  6315. const ggml_custom3_op_f32_t fun) {
  6316. return ggml_map_custom3_impl_f32(ctx, a, b, c, fun, false);
  6317. }
  6318. struct ggml_tensor * ggml_map_custom3_inplace_f32(
  6319. struct ggml_context * ctx,
  6320. struct ggml_tensor * a,
  6321. struct ggml_tensor * b,
  6322. struct ggml_tensor * c,
  6323. const ggml_custom3_op_f32_t fun) {
  6324. return ggml_map_custom3_impl_f32(ctx, a, b, c, fun, true);
  6325. }
  6326. // ggml_map_custom1
  6327. struct ggml_map_custom1_op_params {
  6328. ggml_custom1_op_t fun;
  6329. int n_tasks;
  6330. void * userdata;
  6331. };
  6332. static struct ggml_tensor * ggml_map_custom1_impl(
  6333. struct ggml_context * ctx,
  6334. struct ggml_tensor * a,
  6335. const ggml_custom1_op_t fun,
  6336. int n_tasks,
  6337. void * userdata,
  6338. bool inplace) {
  6339. GGML_ASSERT(n_tasks == GGML_N_TASKS_MAX || n_tasks > 0);
  6340. bool is_node = false;
  6341. if (!inplace && a->grad) {
  6342. is_node = true;
  6343. }
  6344. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  6345. struct ggml_map_custom1_op_params params = {
  6346. /*.fun =*/ fun,
  6347. /*.n_tasks =*/ n_tasks,
  6348. /*.userdata =*/ userdata
  6349. };
  6350. ggml_set_op_params(result, (const void *) &params, sizeof(params));
  6351. result->op = GGML_OP_MAP_CUSTOM1;
  6352. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  6353. result->src[0] = a;
  6354. return result;
  6355. }
  6356. struct ggml_tensor * ggml_map_custom1(
  6357. struct ggml_context * ctx,
  6358. struct ggml_tensor * a,
  6359. const ggml_custom1_op_t fun,
  6360. int n_tasks,
  6361. void * userdata) {
  6362. return ggml_map_custom1_impl(ctx, a, fun, n_tasks, userdata, false);
  6363. }
  6364. struct ggml_tensor * ggml_map_custom1_inplace(
  6365. struct ggml_context * ctx,
  6366. struct ggml_tensor * a,
  6367. const ggml_custom1_op_t fun,
  6368. int n_tasks,
  6369. void * userdata) {
  6370. return ggml_map_custom1_impl(ctx, a, fun, n_tasks, userdata, true);
  6371. }
  6372. // ggml_map_custom2
  6373. struct ggml_map_custom2_op_params {
  6374. ggml_custom2_op_t fun;
  6375. int n_tasks;
  6376. void * userdata;
  6377. };
  6378. static struct ggml_tensor * ggml_map_custom2_impl(
  6379. struct ggml_context * ctx,
  6380. struct ggml_tensor * a,
  6381. struct ggml_tensor * b,
  6382. const ggml_custom2_op_t fun,
  6383. int n_tasks,
  6384. void * userdata,
  6385. bool inplace) {
  6386. GGML_ASSERT(n_tasks == GGML_N_TASKS_MAX || n_tasks > 0);
  6387. bool is_node = false;
  6388. if (!inplace && (a->grad || b->grad)) {
  6389. is_node = true;
  6390. }
  6391. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  6392. struct ggml_map_custom2_op_params params = {
  6393. /*.fun =*/ fun,
  6394. /*.n_tasks =*/ n_tasks,
  6395. /*.userdata =*/ userdata
  6396. };
  6397. ggml_set_op_params(result, (const void *) &params, sizeof(params));
  6398. result->op = GGML_OP_MAP_CUSTOM2;
  6399. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  6400. result->src[0] = a;
  6401. result->src[1] = b;
  6402. return result;
  6403. }
  6404. struct ggml_tensor * ggml_map_custom2(
  6405. struct ggml_context * ctx,
  6406. struct ggml_tensor * a,
  6407. struct ggml_tensor * b,
  6408. const ggml_custom2_op_t fun,
  6409. int n_tasks,
  6410. void * userdata) {
  6411. return ggml_map_custom2_impl(ctx, a, b, fun, n_tasks, userdata, false);
  6412. }
  6413. struct ggml_tensor * ggml_map_custom2_inplace(
  6414. struct ggml_context * ctx,
  6415. struct ggml_tensor * a,
  6416. struct ggml_tensor * b,
  6417. const ggml_custom2_op_t fun,
  6418. int n_tasks,
  6419. void * userdata) {
  6420. return ggml_map_custom2_impl(ctx, a, b, fun, n_tasks, userdata, true);
  6421. }
  6422. // ggml_map_custom3
  6423. struct ggml_map_custom3_op_params {
  6424. ggml_custom3_op_t fun;
  6425. int n_tasks;
  6426. void * userdata;
  6427. };
  6428. static struct ggml_tensor * ggml_map_custom3_impl(
  6429. struct ggml_context * ctx,
  6430. struct ggml_tensor * a,
  6431. struct ggml_tensor * b,
  6432. struct ggml_tensor * c,
  6433. const ggml_custom3_op_t fun,
  6434. int n_tasks,
  6435. void * userdata,
  6436. bool inplace) {
  6437. GGML_ASSERT(n_tasks == GGML_N_TASKS_MAX || n_tasks > 0);
  6438. bool is_node = false;
  6439. if (!inplace && (a->grad || b->grad || c->grad)) {
  6440. is_node = true;
  6441. }
  6442. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  6443. struct ggml_map_custom3_op_params params = {
  6444. /*.fun =*/ fun,
  6445. /*.n_tasks =*/ n_tasks,
  6446. /*.userdata =*/ userdata
  6447. };
  6448. ggml_set_op_params(result, (const void *) &params, sizeof(params));
  6449. result->op = GGML_OP_MAP_CUSTOM3;
  6450. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  6451. result->src[0] = a;
  6452. result->src[1] = b;
  6453. result->src[2] = c;
  6454. return result;
  6455. }
  6456. struct ggml_tensor * ggml_map_custom3(
  6457. struct ggml_context * ctx,
  6458. struct ggml_tensor * a,
  6459. struct ggml_tensor * b,
  6460. struct ggml_tensor * c,
  6461. const ggml_custom3_op_t fun,
  6462. int n_tasks,
  6463. void * userdata) {
  6464. return ggml_map_custom3_impl(ctx, a, b, c, fun, n_tasks, userdata, false);
  6465. }
  6466. struct ggml_tensor * ggml_map_custom3_inplace(
  6467. struct ggml_context * ctx,
  6468. struct ggml_tensor * a,
  6469. struct ggml_tensor * b,
  6470. struct ggml_tensor * c,
  6471. const ggml_custom3_op_t fun,
  6472. int n_tasks,
  6473. void * userdata) {
  6474. return ggml_map_custom3_impl(ctx, a, b, c, fun, n_tasks, userdata, true);
  6475. }
  6476. // ggml_cross_entropy_loss
  6477. struct ggml_tensor * ggml_cross_entropy_loss(
  6478. struct ggml_context * ctx,
  6479. struct ggml_tensor * a,
  6480. struct ggml_tensor * b) {
  6481. GGML_ASSERT(ggml_are_same_shape(a, b));
  6482. bool is_node = false;
  6483. if (a->grad || b->grad) {
  6484. is_node = true;
  6485. }
  6486. struct ggml_tensor * result = ggml_new_tensor_1d(ctx, a->type, 1);
  6487. result->op = GGML_OP_CROSS_ENTROPY_LOSS;
  6488. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  6489. result->src[0] = a;
  6490. result->src[1] = b;
  6491. return result;
  6492. }
  6493. // ggml_cross_entropy_loss_back
  6494. struct ggml_tensor * ggml_cross_entropy_loss_back(
  6495. struct ggml_context * ctx,
  6496. struct ggml_tensor * a,
  6497. struct ggml_tensor * b,
  6498. struct ggml_tensor * c) {
  6499. GGML_ASSERT(ggml_are_same_shape(a, b));
  6500. GGML_ASSERT(ggml_is_scalar(c));
  6501. struct ggml_tensor * result = ggml_dup_tensor(ctx, a);
  6502. result->op = GGML_OP_CROSS_ENTROPY_LOSS_BACK;
  6503. result->grad = NULL;
  6504. result->src[0] = a;
  6505. result->src[1] = b;
  6506. result->src[2] = c;
  6507. return result;
  6508. }
  6509. ////////////////////////////////////////////////////////////////////////////////
  6510. void ggml_set_param(
  6511. struct ggml_context * ctx,
  6512. struct ggml_tensor * tensor) {
  6513. tensor->is_param = true;
  6514. GGML_ASSERT(tensor->grad == NULL);
  6515. tensor->grad = ggml_dup_tensor(ctx, tensor);
  6516. }
  6517. // ggml_compute_forward_dup
  6518. static void ggml_compute_forward_dup_same_cont(
  6519. const struct ggml_compute_params * params,
  6520. const struct ggml_tensor * src0,
  6521. struct ggml_tensor * dst) {
  6522. GGML_ASSERT(ggml_nelements(dst) == ggml_nelements(src0));
  6523. GGML_ASSERT(ggml_is_contiguous(dst) && ggml_is_contiguous(src0));
  6524. GGML_ASSERT(src0->type == dst->type);
  6525. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  6526. return;
  6527. }
  6528. const size_t nb00 = src0->nb[0];
  6529. const size_t nb0 = dst->nb[0];
  6530. const int ith = params->ith; // thread index
  6531. const int nth = params->nth; // number of threads
  6532. // parallelize by elements
  6533. const int ne = ggml_nelements(dst);
  6534. const int dr = (ne + nth - 1) / nth;
  6535. const int ie0 = dr * ith;
  6536. const int ie1 = MIN(ie0 + dr, ne);
  6537. if (ie0 < ie1) {
  6538. memcpy(
  6539. ((char *) dst->data + ie0*nb0),
  6540. ((char *) src0->data + ie0*nb00),
  6541. (ie1 - ie0) * ggml_type_size(src0->type));
  6542. }
  6543. }
  6544. static void ggml_compute_forward_dup_f16(
  6545. const struct ggml_compute_params * params,
  6546. const struct ggml_tensor * src0,
  6547. struct ggml_tensor * dst) {
  6548. GGML_ASSERT(ggml_nelements(dst) == ggml_nelements(src0));
  6549. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  6550. return;
  6551. }
  6552. GGML_TENSOR_UNARY_OP_LOCALS;
  6553. const int ith = params->ith; // thread index
  6554. const int nth = params->nth; // number of threads
  6555. if (ggml_is_contiguous(src0) && ggml_is_contiguous(dst) && src0->type == dst->type) {
  6556. ggml_compute_forward_dup_same_cont(params, src0, dst);
  6557. return;
  6558. }
  6559. // parallelize by rows
  6560. const int nr = ne01;
  6561. // number of rows per thread
  6562. const int dr = (nr + nth - 1) / nth;
  6563. // row range for this thread
  6564. const int ir0 = dr * ith;
  6565. const int ir1 = MIN(ir0 + dr, nr);
  6566. if (src0->type == dst->type &&
  6567. ne00 == ne0 &&
  6568. nb00 == ggml_type_size(src0->type) && nb0 == ggml_type_size(dst->type)) {
  6569. // copy by rows
  6570. const size_t rs = ne00*nb00;
  6571. for (int64_t i03 = 0; i03 < ne03; i03++) {
  6572. for (int64_t i02 = 0; i02 < ne02; i02++) {
  6573. for (int64_t i01 = ir0; i01 < ir1; i01++) {
  6574. memcpy(
  6575. ((char *) dst->data + i01*nb1 + i02*nb2 + i03*nb3),
  6576. ((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03),
  6577. rs);
  6578. }
  6579. }
  6580. }
  6581. return;
  6582. }
  6583. // TODO: add more special-case implementations for tensor shapes/strides that can benefit from memcpy
  6584. if (ggml_is_contiguous(dst)) {
  6585. if (nb00 == sizeof(ggml_fp16_t)) {
  6586. if (dst->type == GGML_TYPE_F16) {
  6587. size_t id = 0;
  6588. const size_t rs = ne00 * nb00;
  6589. char * dst_ptr = (char *) dst->data;
  6590. for (int i03 = 0; i03 < ne03; i03++) {
  6591. for (int i02 = 0; i02 < ne02; i02++) {
  6592. id += rs * ir0;
  6593. for (int i01 = ir0; i01 < ir1; i01++) {
  6594. const char * src0_ptr = (char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03;
  6595. memcpy(dst_ptr + id, src0_ptr, rs);
  6596. id += rs;
  6597. }
  6598. id += rs * (ne01 - ir1);
  6599. }
  6600. }
  6601. } else if (dst->type == GGML_TYPE_F32) {
  6602. size_t id = 0;
  6603. float * dst_ptr = (float *) dst->data;
  6604. for (int i03 = 0; i03 < ne03; i03++) {
  6605. for (int i02 = 0; i02 < ne02; i02++) {
  6606. id += ne00 * ir0;
  6607. for (int i01 = ir0; i01 < ir1; i01++) {
  6608. const ggml_fp16_t * src0_ptr = (ggml_fp16_t *) ((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03);
  6609. for (int i00 = 0; i00 < ne00; i00++) {
  6610. dst_ptr[id] = GGML_FP16_TO_FP32(src0_ptr[i00]);
  6611. id++;
  6612. }
  6613. }
  6614. id += ne00 * (ne01 - ir1);
  6615. }
  6616. }
  6617. } else if (type_traits[dst->type].from_float) {
  6618. ggml_from_float_t const quantize_row_q = type_traits[dst->type].from_float;
  6619. float * src0_f32 = (float *) params->wdata + (ne00 + CACHE_LINE_SIZE_F32) * ith;
  6620. size_t id = 0;
  6621. size_t rs = nb0 * (ne00 / ggml_blck_size(dst->type));
  6622. char * dst_ptr = (char *) dst->data;
  6623. for (int i03 = 0; i03 < ne03; i03++) {
  6624. for (int i02 = 0; i02 < ne02; i02++) {
  6625. id += rs * ir0;
  6626. for (int i01 = ir0; i01 < ir1; i01++) {
  6627. const ggml_fp16_t * src0_ptr = (ggml_fp16_t *) ((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03);
  6628. for (int i00 = 0; i00 < ne00; i00++) {
  6629. src0_f32[i00] = GGML_FP16_TO_FP32(src0_ptr[i00]);
  6630. }
  6631. quantize_row_q(src0_f32, dst_ptr + id, ne00);
  6632. id += rs;
  6633. }
  6634. id += rs * (ne01 - ir1);
  6635. }
  6636. }
  6637. } else {
  6638. GGML_ASSERT(false); // TODO: implement
  6639. }
  6640. } else {
  6641. //printf("%s: this is not optimal - fix me\n", __func__);
  6642. if (dst->type == GGML_TYPE_F32) {
  6643. size_t id = 0;
  6644. float * dst_ptr = (float *) dst->data;
  6645. for (int i03 = 0; i03 < ne03; i03++) {
  6646. for (int i02 = 0; i02 < ne02; i02++) {
  6647. id += ne00 * ir0;
  6648. for (int i01 = ir0; i01 < ir1; i01++) {
  6649. for (int i00 = 0; i00 < ne00; i00++) {
  6650. const ggml_fp16_t * src0_ptr = (ggml_fp16_t *) ((char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03);
  6651. dst_ptr[id] = GGML_FP16_TO_FP32(*src0_ptr);
  6652. id++;
  6653. }
  6654. }
  6655. id += ne00 * (ne01 - ir1);
  6656. }
  6657. }
  6658. } else if (dst->type == GGML_TYPE_F16) {
  6659. size_t id = 0;
  6660. ggml_fp16_t * dst_ptr = (ggml_fp16_t *) dst->data;
  6661. for (int i03 = 0; i03 < ne03; i03++) {
  6662. for (int i02 = 0; i02 < ne02; i02++) {
  6663. id += ne00 * ir0;
  6664. for (int i01 = ir0; i01 < ir1; i01++) {
  6665. for (int i00 = 0; i00 < ne00; i00++) {
  6666. const ggml_fp16_t * src0_ptr = (ggml_fp16_t *) ((char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03);
  6667. dst_ptr[id] = *src0_ptr;
  6668. id++;
  6669. }
  6670. }
  6671. id += ne00 * (ne01 - ir1);
  6672. }
  6673. }
  6674. } else {
  6675. GGML_ASSERT(false); // TODO: implement
  6676. }
  6677. }
  6678. return;
  6679. }
  6680. // dst counters
  6681. int64_t i10 = 0;
  6682. int64_t i11 = 0;
  6683. int64_t i12 = 0;
  6684. int64_t i13 = 0;
  6685. if (dst->type == GGML_TYPE_F16) {
  6686. for (int64_t i03 = 0; i03 < ne03; i03++) {
  6687. for (int64_t i02 = 0; i02 < ne02; i02++) {
  6688. i10 += ne00 * ir0;
  6689. while (i10 >= ne0) {
  6690. i10 -= ne0;
  6691. if (++i11 == ne1) {
  6692. i11 = 0;
  6693. if (++i12 == ne2) {
  6694. i12 = 0;
  6695. if (++i13 == ne3) {
  6696. i13 = 0;
  6697. }
  6698. }
  6699. }
  6700. }
  6701. for (int64_t i01 = ir0; i01 < ir1; i01++) {
  6702. for (int64_t i00 = 0; i00 < ne00; i00++) {
  6703. const char * src0_ptr = ((char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03);
  6704. char * dst_ptr = ((char *) dst->data + i10*nb0 + i11*nb1 + i12*nb2 + i13*nb3);
  6705. memcpy(dst_ptr, src0_ptr, sizeof(ggml_fp16_t));
  6706. if (++i10 == ne00) {
  6707. i10 = 0;
  6708. if (++i11 == ne01) {
  6709. i11 = 0;
  6710. if (++i12 == ne02) {
  6711. i12 = 0;
  6712. if (++i13 == ne03) {
  6713. i13 = 0;
  6714. }
  6715. }
  6716. }
  6717. }
  6718. }
  6719. }
  6720. i10 += ne00 * (ne01 - ir1);
  6721. while (i10 >= ne0) {
  6722. i10 -= ne0;
  6723. if (++i11 == ne1) {
  6724. i11 = 0;
  6725. if (++i12 == ne2) {
  6726. i12 = 0;
  6727. if (++i13 == ne3) {
  6728. i13 = 0;
  6729. }
  6730. }
  6731. }
  6732. }
  6733. }
  6734. }
  6735. } else if (dst->type == GGML_TYPE_F32) {
  6736. for (int64_t i03 = 0; i03 < ne03; i03++) {
  6737. for (int64_t i02 = 0; i02 < ne02; i02++) {
  6738. i10 += ne00 * ir0;
  6739. while (i10 >= ne0) {
  6740. i10 -= ne0;
  6741. if (++i11 == ne1) {
  6742. i11 = 0;
  6743. if (++i12 == ne2) {
  6744. i12 = 0;
  6745. if (++i13 == ne3) {
  6746. i13 = 0;
  6747. }
  6748. }
  6749. }
  6750. }
  6751. for (int64_t i01 = ir0; i01 < ir1; i01++) {
  6752. for (int64_t i00 = 0; i00 < ne00; i00++) {
  6753. const char * src0_ptr = ((char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03);
  6754. char * dst_ptr = ((char *) dst->data + i10*nb0 + i11*nb1 + i12*nb2 + i13*nb3);
  6755. *(float *) dst_ptr = GGML_FP16_TO_FP32(*(const ggml_fp16_t *) src0_ptr);
  6756. if (++i10 == ne0) {
  6757. i10 = 0;
  6758. if (++i11 == ne1) {
  6759. i11 = 0;
  6760. if (++i12 == ne2) {
  6761. i12 = 0;
  6762. if (++i13 == ne3) {
  6763. i13 = 0;
  6764. }
  6765. }
  6766. }
  6767. }
  6768. }
  6769. }
  6770. i10 += ne00 * (ne01 - ir1);
  6771. while (i10 >= ne0) {
  6772. i10 -= ne0;
  6773. if (++i11 == ne1) {
  6774. i11 = 0;
  6775. if (++i12 == ne2) {
  6776. i12 = 0;
  6777. if (++i13 == ne3) {
  6778. i13 = 0;
  6779. }
  6780. }
  6781. }
  6782. }
  6783. }
  6784. }
  6785. } else {
  6786. GGML_ASSERT(false); // TODO: implement
  6787. }
  6788. }
  6789. static void ggml_compute_forward_dup_f32(
  6790. const struct ggml_compute_params * params,
  6791. const struct ggml_tensor * src0,
  6792. struct ggml_tensor * dst) {
  6793. GGML_ASSERT(ggml_nelements(dst) == ggml_nelements(src0));
  6794. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  6795. return;
  6796. }
  6797. GGML_TENSOR_UNARY_OP_LOCALS;
  6798. const int ith = params->ith; // thread index
  6799. const int nth = params->nth; // number of threads
  6800. if (ggml_is_contiguous(src0) && ggml_is_contiguous(dst) && src0->type == dst->type) {
  6801. ggml_compute_forward_dup_same_cont(params, src0, dst);
  6802. return;
  6803. }
  6804. // parallelize by rows
  6805. const int nr = ne01;
  6806. // number of rows per thread
  6807. const int dr = (nr + nth - 1) / nth;
  6808. // row range for this thread
  6809. const int ir0 = dr * ith;
  6810. const int ir1 = MIN(ir0 + dr, nr);
  6811. if (src0->type == dst->type &&
  6812. ne00 == ne0 &&
  6813. nb00 == ggml_type_size(src0->type) && nb0 == ggml_type_size(dst->type)) {
  6814. // copy by rows
  6815. const size_t rs = ne00*nb00;
  6816. for (int64_t i03 = 0; i03 < ne03; i03++) {
  6817. for (int64_t i02 = 0; i02 < ne02; i02++) {
  6818. for (int64_t i01 = ir0; i01 < ir1; i01++) {
  6819. memcpy(
  6820. ((char *) dst->data + i01*nb1 + i02*nb2 + i03*nb3),
  6821. ((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03),
  6822. rs);
  6823. }
  6824. }
  6825. }
  6826. return;
  6827. }
  6828. if (ggml_is_contiguous(dst)) {
  6829. // TODO: simplify
  6830. if (nb00 == sizeof(float)) {
  6831. if (dst->type == GGML_TYPE_F32) {
  6832. size_t id = 0;
  6833. const size_t rs = ne00 * nb00;
  6834. char * dst_ptr = (char *) dst->data;
  6835. for (int i03 = 0; i03 < ne03; i03++) {
  6836. for (int i02 = 0; i02 < ne02; i02++) {
  6837. id += rs * ir0;
  6838. for (int i01 = ir0; i01 < ir1; i01++) {
  6839. const char * src0_ptr = (char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03;
  6840. memcpy(dst_ptr + id, src0_ptr, rs);
  6841. id += rs;
  6842. }
  6843. id += rs * (ne01 - ir1);
  6844. }
  6845. }
  6846. } else if (type_traits[dst->type].from_float) {
  6847. ggml_from_float_t const quantize_row_q = type_traits[dst->type].from_float;
  6848. size_t id = 0;
  6849. size_t rs = nb0 * (ne00 / ggml_blck_size(dst->type));
  6850. char * dst_ptr = (char *) dst->data;
  6851. for (int i03 = 0; i03 < ne03; i03++) {
  6852. for (int i02 = 0; i02 < ne02; i02++) {
  6853. id += rs * ir0;
  6854. for (int i01 = ir0; i01 < ir1; i01++) {
  6855. const float * src0_ptr = (float *) ((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03);
  6856. quantize_row_q(src0_ptr, dst_ptr + id, ne00);
  6857. id += rs;
  6858. }
  6859. id += rs * (ne01 - ir1);
  6860. }
  6861. }
  6862. } else {
  6863. GGML_ASSERT(false); // TODO: implement
  6864. }
  6865. } else {
  6866. //printf("%s: this is not optimal - fix me\n", __func__);
  6867. if (dst->type == GGML_TYPE_F32) {
  6868. size_t id = 0;
  6869. float * dst_ptr = (float *) dst->data;
  6870. for (int i03 = 0; i03 < ne03; i03++) {
  6871. for (int i02 = 0; i02 < ne02; i02++) {
  6872. id += ne00 * ir0;
  6873. for (int i01 = ir0; i01 < ir1; i01++) {
  6874. for (int i00 = 0; i00 < ne00; i00++) {
  6875. const float * src0_ptr = (float *) ((char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03);
  6876. dst_ptr[id] = *src0_ptr;
  6877. id++;
  6878. }
  6879. }
  6880. id += ne00 * (ne01 - ir1);
  6881. }
  6882. }
  6883. } else if (dst->type == GGML_TYPE_F16) {
  6884. size_t id = 0;
  6885. ggml_fp16_t * dst_ptr = (ggml_fp16_t *) dst->data;
  6886. for (int i03 = 0; i03 < ne03; i03++) {
  6887. for (int i02 = 0; i02 < ne02; i02++) {
  6888. id += ne00 * ir0;
  6889. for (int i01 = ir0; i01 < ir1; i01++) {
  6890. for (int i00 = 0; i00 < ne00; i00++) {
  6891. const float * src0_ptr = (float *) ((char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03);
  6892. dst_ptr[id] = GGML_FP32_TO_FP16(*src0_ptr);
  6893. id++;
  6894. }
  6895. }
  6896. id += ne00 * (ne01 - ir1);
  6897. }
  6898. }
  6899. } else {
  6900. GGML_ASSERT(false); // TODO: implement
  6901. }
  6902. }
  6903. return;
  6904. }
  6905. // dst counters
  6906. int64_t i10 = 0;
  6907. int64_t i11 = 0;
  6908. int64_t i12 = 0;
  6909. int64_t i13 = 0;
  6910. if (dst->type == GGML_TYPE_F32) {
  6911. for (int64_t i03 = 0; i03 < ne03; i03++) {
  6912. for (int64_t i02 = 0; i02 < ne02; i02++) {
  6913. i10 += ne00 * ir0;
  6914. while (i10 >= ne0) {
  6915. i10 -= ne0;
  6916. if (++i11 == ne1) {
  6917. i11 = 0;
  6918. if (++i12 == ne2) {
  6919. i12 = 0;
  6920. if (++i13 == ne3) {
  6921. i13 = 0;
  6922. }
  6923. }
  6924. }
  6925. }
  6926. for (int64_t i01 = ir0; i01 < ir1; i01++) {
  6927. for (int64_t i00 = 0; i00 < ne00; i00++) {
  6928. const char * src0_ptr = ((char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03);
  6929. char * dst_ptr = ((char *) dst->data + i10*nb0 + i11*nb1 + i12*nb2 + i13*nb3);
  6930. memcpy(dst_ptr, src0_ptr, sizeof(float));
  6931. if (++i10 == ne0) {
  6932. i10 = 0;
  6933. if (++i11 == ne1) {
  6934. i11 = 0;
  6935. if (++i12 == ne2) {
  6936. i12 = 0;
  6937. if (++i13 == ne3) {
  6938. i13 = 0;
  6939. }
  6940. }
  6941. }
  6942. }
  6943. }
  6944. }
  6945. i10 += ne00 * (ne01 - ir1);
  6946. while (i10 >= ne0) {
  6947. i10 -= ne0;
  6948. if (++i11 == ne1) {
  6949. i11 = 0;
  6950. if (++i12 == ne2) {
  6951. i12 = 0;
  6952. if (++i13 == ne3) {
  6953. i13 = 0;
  6954. }
  6955. }
  6956. }
  6957. }
  6958. }
  6959. }
  6960. } else if (dst->type == GGML_TYPE_F16) {
  6961. for (int64_t i03 = 0; i03 < ne03; i03++) {
  6962. for (int64_t i02 = 0; i02 < ne02; i02++) {
  6963. i10 += ne00 * ir0;
  6964. while (i10 >= ne0) {
  6965. i10 -= ne0;
  6966. if (++i11 == ne1) {
  6967. i11 = 0;
  6968. if (++i12 == ne2) {
  6969. i12 = 0;
  6970. if (++i13 == ne3) {
  6971. i13 = 0;
  6972. }
  6973. }
  6974. }
  6975. }
  6976. for (int64_t i01 = ir0; i01 < ir1; i01++) {
  6977. for (int64_t i00 = 0; i00 < ne00; i00++) {
  6978. const char * src0_ptr = ((char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03);
  6979. char * dst_ptr = ((char *) dst->data + i10*nb0 + i11*nb1 + i12*nb2 + i13*nb3);
  6980. *(ggml_fp16_t *) dst_ptr = GGML_FP32_TO_FP16(*(const float *) src0_ptr);
  6981. if (++i10 == ne0) {
  6982. i10 = 0;
  6983. if (++i11 == ne1) {
  6984. i11 = 0;
  6985. if (++i12 == ne2) {
  6986. i12 = 0;
  6987. if (++i13 == ne3) {
  6988. i13 = 0;
  6989. }
  6990. }
  6991. }
  6992. }
  6993. }
  6994. }
  6995. i10 += ne00 * (ne01 - ir1);
  6996. while (i10 >= ne0) {
  6997. i10 -= ne0;
  6998. if (++i11 == ne1) {
  6999. i11 = 0;
  7000. if (++i12 == ne2) {
  7001. i12 = 0;
  7002. if (++i13 == ne3) {
  7003. i13 = 0;
  7004. }
  7005. }
  7006. }
  7007. }
  7008. }
  7009. }
  7010. } else {
  7011. GGML_ASSERT(false); // TODO: implement
  7012. }
  7013. }
  7014. static void ggml_compute_forward_dup(
  7015. const struct ggml_compute_params * params,
  7016. const struct ggml_tensor * src0,
  7017. struct ggml_tensor * dst) {
  7018. if (ggml_is_contiguous(src0) && ggml_is_contiguous(dst) && src0->type == dst->type) {
  7019. ggml_compute_forward_dup_same_cont(params, src0, dst);
  7020. return;
  7021. }
  7022. switch (src0->type) {
  7023. case GGML_TYPE_F16:
  7024. {
  7025. ggml_compute_forward_dup_f16(params, src0, dst);
  7026. } break;
  7027. case GGML_TYPE_F32:
  7028. {
  7029. ggml_compute_forward_dup_f32(params, src0, dst);
  7030. } break;
  7031. default:
  7032. {
  7033. GGML_ASSERT(false);
  7034. } break;
  7035. }
  7036. }
  7037. // ggml_compute_forward_add
  7038. static void ggml_compute_forward_add_f32(
  7039. const struct ggml_compute_params * params,
  7040. const struct ggml_tensor * src0,
  7041. const struct ggml_tensor * src1,
  7042. struct ggml_tensor * dst) {
  7043. GGML_ASSERT(ggml_can_repeat_rows(src1, src0) && ggml_are_same_shape(src0, dst));
  7044. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  7045. return;
  7046. }
  7047. const int ith = params->ith;
  7048. const int nth = params->nth;
  7049. const int nr = ggml_nrows(src0);
  7050. GGML_TENSOR_BINARY_OP_LOCALS;
  7051. GGML_ASSERT( nb0 == sizeof(float));
  7052. GGML_ASSERT(nb00 == sizeof(float));
  7053. // rows per thread
  7054. const int dr = (nr + nth - 1)/nth;
  7055. // row range for this thread
  7056. const int ir0 = dr*ith;
  7057. const int ir1 = MIN(ir0 + dr, nr);
  7058. if (nb10 == sizeof(float)) {
  7059. for (int ir = ir0; ir < ir1; ++ir) {
  7060. // src1 is broadcastable across src0 and dst in i1, i2, i3
  7061. const int64_t i03 = ir/(ne02*ne01);
  7062. const int64_t i02 = (ir - i03*ne02*ne01)/ne01;
  7063. const int64_t i01 = (ir - i03*ne02*ne01 - i02*ne01);
  7064. const int64_t i13 = i03 % ne13;
  7065. const int64_t i12 = i02 % ne12;
  7066. const int64_t i11 = i01 % ne11;
  7067. float * dst_ptr = (float *) ((char *) dst->data + i03*nb3 + i02*nb2 + i01*nb1 );
  7068. float * src0_ptr = (float *) ((char *) src0->data + i03*nb03 + i02*nb02 + i01*nb01);
  7069. float * src1_ptr = (float *) ((char *) src1->data + i13*nb13 + i12*nb12 + i11*nb11);
  7070. #ifdef GGML_USE_ACCELERATE
  7071. vDSP_vadd(src0_ptr, 1, src1_ptr, 1, dst_ptr, 1, ne00);
  7072. #else
  7073. ggml_vec_add_f32(ne00, dst_ptr, src0_ptr, src1_ptr);
  7074. #endif
  7075. // }
  7076. // }
  7077. }
  7078. } else {
  7079. // src1 is not contiguous
  7080. for (int ir = ir0; ir < ir1; ++ir) {
  7081. // src1 is broadcastable across src0 and dst in i1, i2, i3
  7082. const int64_t i03 = ir/(ne02*ne01);
  7083. const int64_t i02 = (ir - i03*ne02*ne01)/ne01;
  7084. const int64_t i01 = (ir - i03*ne02*ne01 - i02*ne01);
  7085. const int64_t i13 = i03 % ne13;
  7086. const int64_t i12 = i02 % ne12;
  7087. const int64_t i11 = i01 % ne11;
  7088. float * dst_ptr = (float *) ((char *) dst->data + i03*nb3 + i02*nb2 + i01*nb1 );
  7089. float * src0_ptr = (float *) ((char *) src0->data + i03*nb03 + i02*nb02 + i01*nb01);
  7090. for (int i0 = 0; i0 < ne0; i0++) {
  7091. float * src1_ptr = (float *) ((char *) src1->data + i13*nb13 + i12*nb12 + i11*nb11 + i0*nb10);
  7092. dst_ptr[i0] = src0_ptr[i0] + *src1_ptr;
  7093. }
  7094. }
  7095. }
  7096. }
  7097. static void ggml_compute_forward_add_f16_f32(
  7098. const struct ggml_compute_params * params,
  7099. const struct ggml_tensor * src0,
  7100. const struct ggml_tensor * src1,
  7101. struct ggml_tensor * dst) {
  7102. GGML_ASSERT(ggml_are_same_shape(src0, src1) && ggml_are_same_shape(src0, dst));
  7103. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  7104. return;
  7105. }
  7106. const int ith = params->ith;
  7107. const int nth = params->nth;
  7108. const int nr = ggml_nrows(src0);
  7109. GGML_TENSOR_BINARY_OP_LOCALS;
  7110. GGML_ASSERT(src0->type == GGML_TYPE_F16);
  7111. GGML_ASSERT(src1->type == GGML_TYPE_F32);
  7112. GGML_ASSERT(dst->type == GGML_TYPE_F16);
  7113. GGML_ASSERT( nb0 == sizeof(ggml_fp16_t));
  7114. GGML_ASSERT(nb00 == sizeof(ggml_fp16_t));
  7115. // rows per thread
  7116. const int dr = (nr + nth - 1)/nth;
  7117. // row range for this thread
  7118. const int ir0 = dr*ith;
  7119. const int ir1 = MIN(ir0 + dr, nr);
  7120. if (nb10 == sizeof(float)) {
  7121. for (int ir = ir0; ir < ir1; ++ir) {
  7122. // src0, src1 and dst are same shape => same indices
  7123. const int i3 = ir/(ne2*ne1);
  7124. const int i2 = (ir - i3*ne2*ne1)/ne1;
  7125. const int i1 = (ir - i3*ne2*ne1 - i2*ne1);
  7126. ggml_fp16_t * dst_ptr = (ggml_fp16_t *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1);
  7127. ggml_fp16_t * src0_ptr = (ggml_fp16_t *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01);
  7128. float * src1_ptr = (float *) ((char *) src1->data + i3*nb13 + i2*nb12 + i1*nb11);
  7129. for (int i = 0; i < ne0; i++) {
  7130. dst_ptr[i] = GGML_FP32_TO_FP16(GGML_FP16_TO_FP32(src0_ptr[i]) + src1_ptr[i]);
  7131. }
  7132. }
  7133. }
  7134. else {
  7135. // src1 is not contiguous
  7136. GGML_ASSERT(false);
  7137. }
  7138. }
  7139. static void ggml_compute_forward_add_f16_f16(
  7140. const struct ggml_compute_params * params,
  7141. const struct ggml_tensor * src0,
  7142. const struct ggml_tensor * src1,
  7143. struct ggml_tensor * dst) {
  7144. GGML_ASSERT(ggml_are_same_shape(src0, src1) && ggml_are_same_shape(src0, dst));
  7145. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  7146. return;
  7147. }
  7148. const int ith = params->ith;
  7149. const int nth = params->nth;
  7150. const int nr = ggml_nrows(src0);
  7151. GGML_TENSOR_BINARY_OP_LOCALS;
  7152. GGML_ASSERT(src0->type == GGML_TYPE_F16);
  7153. GGML_ASSERT(src1->type == GGML_TYPE_F16);
  7154. GGML_ASSERT(dst->type == GGML_TYPE_F16);
  7155. GGML_ASSERT( nb0 == sizeof(ggml_fp16_t));
  7156. GGML_ASSERT(nb00 == sizeof(ggml_fp16_t));
  7157. // rows per thread
  7158. const int dr = (nr + nth - 1)/nth;
  7159. // row range for this thread
  7160. const int ir0 = dr*ith;
  7161. const int ir1 = MIN(ir0 + dr, nr);
  7162. if (nb10 == sizeof(ggml_fp16_t)) {
  7163. for (int ir = ir0; ir < ir1; ++ir) {
  7164. // src0, src1 and dst are same shape => same indices
  7165. const int i3 = ir/(ne2*ne1);
  7166. const int i2 = (ir - i3*ne2*ne1)/ne1;
  7167. const int i1 = (ir - i3*ne2*ne1 - i2*ne1);
  7168. ggml_fp16_t * dst_ptr = (ggml_fp16_t *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1);
  7169. ggml_fp16_t * src0_ptr = (ggml_fp16_t *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01);
  7170. ggml_fp16_t * src1_ptr = (ggml_fp16_t *) ((char *) src1->data + i3*nb13 + i2*nb12 + i1*nb11);
  7171. for (int i = 0; i < ne0; i++) {
  7172. dst_ptr[i] = GGML_FP32_TO_FP16(GGML_FP16_TO_FP32(src0_ptr[i]) + GGML_FP16_TO_FP32(src1_ptr[i]));
  7173. }
  7174. }
  7175. }
  7176. else {
  7177. // src1 is not contiguous
  7178. GGML_ASSERT(false);
  7179. }
  7180. }
  7181. static void ggml_compute_forward_add_q_f32(
  7182. const struct ggml_compute_params * params,
  7183. const struct ggml_tensor * src0,
  7184. const struct ggml_tensor * src1,
  7185. struct ggml_tensor * dst) {
  7186. GGML_ASSERT(ggml_are_same_shape(src0, src1) && ggml_are_same_shape(src0, dst));
  7187. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  7188. return;
  7189. }
  7190. const int nr = ggml_nrows(src0);
  7191. GGML_TENSOR_BINARY_OP_LOCALS;
  7192. const int ith = params->ith;
  7193. const int nth = params->nth;
  7194. const enum ggml_type type = src0->type;
  7195. ggml_to_float_t const dequantize_row_q = type_traits[type].to_float;
  7196. ggml_from_float_t const quantize_row_q = type_traits[type].from_float;
  7197. // we don't support permuted src0 or src1
  7198. GGML_ASSERT(nb00 == ggml_type_size(type));
  7199. GGML_ASSERT(nb10 == sizeof(float));
  7200. // dst cannot be transposed or permuted
  7201. GGML_ASSERT(nb0 <= nb1);
  7202. GGML_ASSERT(nb1 <= nb2);
  7203. GGML_ASSERT(nb2 <= nb3);
  7204. GGML_ASSERT(ggml_is_quantized(src0->type));
  7205. GGML_ASSERT(dst->type == src0->type);
  7206. GGML_ASSERT(src1->type == GGML_TYPE_F32);
  7207. // rows per thread
  7208. const int dr = (nr + nth - 1)/nth;
  7209. // row range for this thread
  7210. const int ir0 = dr*ith;
  7211. const int ir1 = MIN(ir0 + dr, nr);
  7212. float * wdata = (float *) params->wdata + (ne00 + CACHE_LINE_SIZE_F32) * ith;
  7213. for (int ir = ir0; ir < ir1; ++ir) {
  7214. // src0 indices
  7215. const int i03 = ir/(ne02*ne01);
  7216. const int i02 = (ir - i03*ne02*ne01)/ne01;
  7217. const int i01 = (ir - i03*ne02*ne01 - i02*ne01);
  7218. // src1 and dst are same shape as src0 => same indices
  7219. const int i13 = i03;
  7220. const int i12 = i02;
  7221. const int i11 = i01;
  7222. const int i3 = i03;
  7223. const int i2 = i02;
  7224. const int i1 = i01;
  7225. void * src0_row = (void *) ((char *) src0->data + (i01*nb01 + i02*nb02 + i03*nb03));
  7226. float * src1_row = (float *)((char *) src1->data + (i11*nb11 + i12*nb12 + i13*nb13));
  7227. void * dst_row = (void *) ((char *) dst->data + ( i1*nb1 + i2*nb2 + i3*nb3));
  7228. assert(ne00 % 32 == 0);
  7229. // unquantize row from src0 to temp buffer
  7230. dequantize_row_q(src0_row, wdata, ne00);
  7231. // add src1
  7232. ggml_vec_acc_f32(ne00, wdata, src1_row);
  7233. // quantize row to dst
  7234. quantize_row_q(wdata, dst_row, ne00);
  7235. }
  7236. }
  7237. static void ggml_compute_forward_add(
  7238. const struct ggml_compute_params * params,
  7239. const struct ggml_tensor * src0,
  7240. const struct ggml_tensor * src1,
  7241. struct ggml_tensor * dst) {
  7242. switch (src0->type) {
  7243. case GGML_TYPE_F32:
  7244. {
  7245. ggml_compute_forward_add_f32(params, src0, src1, dst);
  7246. } break;
  7247. case GGML_TYPE_F16:
  7248. {
  7249. if (src1->type == GGML_TYPE_F16) {
  7250. ggml_compute_forward_add_f16_f16(params, src0, src1, dst);
  7251. }
  7252. else if (src1->type == GGML_TYPE_F32) {
  7253. ggml_compute_forward_add_f16_f32(params, src0, src1, dst);
  7254. }
  7255. else {
  7256. GGML_ASSERT(false);
  7257. }
  7258. } break;
  7259. case GGML_TYPE_Q4_0:
  7260. case GGML_TYPE_Q4_1:
  7261. case GGML_TYPE_Q5_0:
  7262. case GGML_TYPE_Q5_1:
  7263. case GGML_TYPE_Q8_0:
  7264. case GGML_TYPE_Q2_K:
  7265. case GGML_TYPE_Q3_K:
  7266. case GGML_TYPE_Q4_K:
  7267. case GGML_TYPE_Q5_K:
  7268. case GGML_TYPE_Q6_K:
  7269. {
  7270. ggml_compute_forward_add_q_f32(params, src0, src1, dst);
  7271. } break;
  7272. default:
  7273. {
  7274. GGML_ASSERT(false);
  7275. } break;
  7276. }
  7277. }
  7278. // ggml_compute_forward_add1
  7279. static void ggml_compute_forward_add1_f32(
  7280. const struct ggml_compute_params * params,
  7281. const struct ggml_tensor * src0,
  7282. const struct ggml_tensor * src1,
  7283. struct ggml_tensor * dst) {
  7284. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  7285. GGML_ASSERT(ggml_is_scalar(src1));
  7286. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  7287. return;
  7288. }
  7289. const int ith = params->ith;
  7290. const int nth = params->nth;
  7291. const int nr = ggml_nrows(src0);
  7292. GGML_TENSOR_UNARY_OP_LOCALS;
  7293. GGML_ASSERT( nb0 == sizeof(float));
  7294. GGML_ASSERT(nb00 == sizeof(float));
  7295. // rows per thread
  7296. const int dr = (nr + nth - 1)/nth;
  7297. // row range for this thread
  7298. const int ir0 = dr*ith;
  7299. const int ir1 = MIN(ir0 + dr, nr);
  7300. for (int ir = ir0; ir < ir1; ++ir) {
  7301. // src0 and dst are same shape => same indices
  7302. const int i3 = ir/(ne2*ne1);
  7303. const int i2 = (ir - i3*ne2*ne1)/ne1;
  7304. const int i1 = (ir - i3*ne2*ne1 - i2*ne1);
  7305. #ifdef GGML_USE_ACCELERATE
  7306. UNUSED(ggml_vec_add1_f32);
  7307. vDSP_vadd(
  7308. (float *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01), 1,
  7309. (float *) ((char *) src1->data), 0,
  7310. (float *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 ), 1,
  7311. ne0);
  7312. #else
  7313. ggml_vec_add1_f32(ne0,
  7314. (float *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 ),
  7315. (float *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01),
  7316. *(float *) src1->data);
  7317. #endif
  7318. }
  7319. }
  7320. static void ggml_compute_forward_add1_f16_f32(
  7321. const struct ggml_compute_params * params,
  7322. const struct ggml_tensor * src0,
  7323. const struct ggml_tensor * src1,
  7324. struct ggml_tensor * dst) {
  7325. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  7326. GGML_ASSERT(ggml_is_scalar(src1));
  7327. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  7328. return;
  7329. }
  7330. // scalar to add
  7331. const float v = *(float *) src1->data;
  7332. const int ith = params->ith;
  7333. const int nth = params->nth;
  7334. const int nr = ggml_nrows(src0);
  7335. GGML_TENSOR_UNARY_OP_LOCALS;
  7336. GGML_ASSERT(src0->type == GGML_TYPE_F16);
  7337. GGML_ASSERT(src1->type == GGML_TYPE_F32);
  7338. GGML_ASSERT(dst->type == GGML_TYPE_F16);
  7339. GGML_ASSERT( nb0 == sizeof(ggml_fp16_t));
  7340. GGML_ASSERT(nb00 == sizeof(ggml_fp16_t));
  7341. // rows per thread
  7342. const int dr = (nr + nth - 1)/nth;
  7343. // row range for this thread
  7344. const int ir0 = dr*ith;
  7345. const int ir1 = MIN(ir0 + dr, nr);
  7346. for (int ir = ir0; ir < ir1; ++ir) {
  7347. // src0 and dst are same shape => same indices
  7348. const int i3 = ir/(ne2*ne1);
  7349. const int i2 = (ir - i3*ne2*ne1)/ne1;
  7350. const int i1 = (ir - i3*ne2*ne1 - i2*ne1);
  7351. ggml_fp16_t * dst_ptr = (ggml_fp16_t *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 );
  7352. ggml_fp16_t * src0_ptr = (ggml_fp16_t *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01);
  7353. for (int i = 0; i < ne0; i++) {
  7354. dst_ptr[i] = GGML_FP32_TO_FP16(GGML_FP16_TO_FP32(src0_ptr[i]) + v);
  7355. }
  7356. }
  7357. }
  7358. static void ggml_compute_forward_add1_f16_f16(
  7359. const struct ggml_compute_params * params,
  7360. const struct ggml_tensor * src0,
  7361. const struct ggml_tensor * src1,
  7362. struct ggml_tensor * dst) {
  7363. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  7364. GGML_ASSERT(ggml_is_scalar(src1));
  7365. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  7366. return;
  7367. }
  7368. // scalar to add
  7369. const float v = GGML_FP16_TO_FP32(*(ggml_fp16_t *) src1->data);
  7370. const int ith = params->ith;
  7371. const int nth = params->nth;
  7372. const int nr = ggml_nrows(src0);
  7373. GGML_TENSOR_UNARY_OP_LOCALS;
  7374. GGML_ASSERT(src0->type == GGML_TYPE_F16);
  7375. GGML_ASSERT(src1->type == GGML_TYPE_F16);
  7376. GGML_ASSERT(dst->type == GGML_TYPE_F16);
  7377. GGML_ASSERT( nb0 == sizeof(ggml_fp16_t));
  7378. GGML_ASSERT(nb00 == sizeof(ggml_fp16_t));
  7379. // rows per thread
  7380. const int dr = (nr + nth - 1)/nth;
  7381. // row range for this thread
  7382. const int ir0 = dr*ith;
  7383. const int ir1 = MIN(ir0 + dr, nr);
  7384. for (int ir = ir0; ir < ir1; ++ir) {
  7385. // src0 and dst are same shape => same indices
  7386. const int i3 = ir/(ne2*ne1);
  7387. const int i2 = (ir - i3*ne2*ne1)/ne1;
  7388. const int i1 = (ir - i3*ne2*ne1 - i2*ne1);
  7389. ggml_fp16_t * dst_ptr = (ggml_fp16_t *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 );
  7390. ggml_fp16_t * src0_ptr = (ggml_fp16_t *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01);
  7391. for (int i = 0; i < ne0; i++) {
  7392. dst_ptr[i] = GGML_FP32_TO_FP16(GGML_FP16_TO_FP32(src0_ptr[i]) + v);
  7393. }
  7394. }
  7395. }
  7396. static void ggml_compute_forward_add1_q_f32(
  7397. const struct ggml_compute_params * params,
  7398. const struct ggml_tensor * src0,
  7399. const struct ggml_tensor * src1,
  7400. struct ggml_tensor * dst) {
  7401. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  7402. GGML_ASSERT(ggml_is_scalar(src1));
  7403. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  7404. return;
  7405. }
  7406. // scalar to add
  7407. const float v = *(float *) src1->data;
  7408. const int ith = params->ith;
  7409. const int nth = params->nth;
  7410. const int nr = ggml_nrows(src0);
  7411. GGML_TENSOR_UNARY_OP_LOCALS;
  7412. const enum ggml_type type = src0->type;
  7413. ggml_to_float_t const dequantize_row_q = type_traits[type].to_float;
  7414. ggml_from_float_t const quantize_row_q = type_traits[type].from_float;
  7415. // we don't support permuted src0
  7416. GGML_ASSERT(nb00 == ggml_type_size(type));
  7417. // dst cannot be transposed or permuted
  7418. GGML_ASSERT(nb0 <= nb1);
  7419. GGML_ASSERT(nb1 <= nb2);
  7420. GGML_ASSERT(nb2 <= nb3);
  7421. GGML_ASSERT(ggml_is_quantized(src0->type));
  7422. GGML_ASSERT(dst->type == src0->type);
  7423. GGML_ASSERT(src1->type == GGML_TYPE_F32);
  7424. // rows per thread
  7425. const int dr = (nr + nth - 1)/nth;
  7426. // row range for this thread
  7427. const int ir0 = dr*ith;
  7428. const int ir1 = MIN(ir0 + dr, nr);
  7429. float * wdata = (float *) params->wdata + (ne0 + CACHE_LINE_SIZE_F32) * ith;
  7430. for (int ir = ir0; ir < ir1; ++ir) {
  7431. // src0 and dst are same shape => same indices
  7432. const int i3 = ir/(ne2*ne1);
  7433. const int i2 = (ir - i3*ne2*ne1)/ne1;
  7434. const int i1 = (ir - i3*ne2*ne1 - i2*ne1);
  7435. void * src0_row = (void *) ((char *) src0->data + (i1*nb01 + i2*nb02 + i3*nb03));
  7436. void * dst_row = (void *) ((char *) dst->data + (i1*nb1 + i2*nb2 + i3*nb0 ));
  7437. assert(ne0 % 32 == 0);
  7438. // unquantize row from src0 to temp buffer
  7439. dequantize_row_q(src0_row, wdata, ne0);
  7440. // add src1
  7441. ggml_vec_acc1_f32(ne0, wdata, v);
  7442. // quantize row to dst
  7443. quantize_row_q(wdata, dst_row, ne0);
  7444. }
  7445. }
  7446. static void ggml_compute_forward_add1(
  7447. const struct ggml_compute_params * params,
  7448. const struct ggml_tensor * src0,
  7449. const struct ggml_tensor * src1,
  7450. struct ggml_tensor * dst) {
  7451. switch (src0->type) {
  7452. case GGML_TYPE_F32:
  7453. {
  7454. ggml_compute_forward_add1_f32(params, src0, src1, dst);
  7455. } break;
  7456. case GGML_TYPE_F16:
  7457. {
  7458. if (src1->type == GGML_TYPE_F16) {
  7459. ggml_compute_forward_add1_f16_f16(params, src0, src1, dst);
  7460. }
  7461. else if (src1->type == GGML_TYPE_F32) {
  7462. ggml_compute_forward_add1_f16_f32(params, src0, src1, dst);
  7463. }
  7464. else {
  7465. GGML_ASSERT(false);
  7466. }
  7467. } break;
  7468. case GGML_TYPE_Q4_0:
  7469. case GGML_TYPE_Q4_1:
  7470. case GGML_TYPE_Q5_0:
  7471. case GGML_TYPE_Q5_1:
  7472. case GGML_TYPE_Q8_0:
  7473. case GGML_TYPE_Q8_1:
  7474. case GGML_TYPE_Q2_K:
  7475. case GGML_TYPE_Q3_K:
  7476. case GGML_TYPE_Q4_K:
  7477. case GGML_TYPE_Q5_K:
  7478. case GGML_TYPE_Q6_K:
  7479. {
  7480. ggml_compute_forward_add1_q_f32(params, src0, src1, dst);
  7481. } break;
  7482. default:
  7483. {
  7484. GGML_ASSERT(false);
  7485. } break;
  7486. }
  7487. }
  7488. // ggml_compute_forward_acc
  7489. static void ggml_compute_forward_acc_f32(
  7490. const struct ggml_compute_params * params,
  7491. const struct ggml_tensor * src0,
  7492. const struct ggml_tensor * src1,
  7493. struct ggml_tensor * dst) {
  7494. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  7495. GGML_ASSERT(ggml_is_contiguous(dst) && ggml_is_contiguous(src0));
  7496. // view src0 and dst with these strides and data offset inbytes during acc
  7497. // nb0 is implicitely element_size because src0 and dst are contiguous
  7498. size_t nb1 = ((int32_t *) dst->op_params)[0];
  7499. size_t nb2 = ((int32_t *) dst->op_params)[1];
  7500. size_t nb3 = ((int32_t *) dst->op_params)[2];
  7501. size_t offset = ((int32_t *) dst->op_params)[3];
  7502. bool inplace = (bool) ((int32_t *) dst->op_params)[4];
  7503. if (!inplace && (params->type == GGML_TASK_INIT)) {
  7504. // memcpy needs to be synchronized across threads to avoid race conditions.
  7505. // => do it in INIT phase
  7506. memcpy(
  7507. ((char *) dst->data),
  7508. ((char *) src0->data),
  7509. ggml_nbytes(dst));
  7510. }
  7511. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  7512. return;
  7513. }
  7514. const int ith = params->ith;
  7515. const int nth = params->nth;
  7516. const int nr = ggml_nrows(src1);
  7517. const int nc = src1->ne[0];
  7518. GGML_TENSOR_LOCALS(int64_t, ne1, src1, ne);
  7519. GGML_TENSOR_LOCALS(size_t, nb1, src1, nb);
  7520. // src0 and dst as viewed during acc
  7521. const size_t nb0 = ggml_element_size(src0);
  7522. const size_t nb00 = nb0;
  7523. const size_t nb01 = nb1;
  7524. const size_t nb02 = nb2;
  7525. const size_t nb03 = nb3;
  7526. GGML_ASSERT(offset + (ne10 == 0 ? 0 : ne10-1)*nb0 + (ne11 == 0 ? 0 : ne11-1)*nb1 + (ne12 == 0 ? 0 : ne12-1)*nb2 + (ne13 == 0 ? 0 : ne13-1)*nb3 < ggml_nbytes(dst));
  7527. GGML_ASSERT(offset + (ne10 == 0 ? 0 : ne10-1)*nb00 + (ne11 == 0 ? 0 : ne11-1)*nb01 + (ne12 == 0 ? 0 : ne12-1)*nb02 + (ne13 == 0 ? 0 : ne13-1)*nb03 < ggml_nbytes(src0));
  7528. GGML_ASSERT(nb10 == sizeof(float));
  7529. // rows per thread
  7530. const int dr = (nr + nth - 1)/nth;
  7531. // row range for this thread
  7532. const int ir0 = dr*ith;
  7533. const int ir1 = MIN(ir0 + dr, nr);
  7534. for (int ir = ir0; ir < ir1; ++ir) {
  7535. // src0 and dst are viewed with shape of src1 and offset
  7536. // => same indices
  7537. const int i3 = ir/(ne12*ne11);
  7538. const int i2 = (ir - i3*ne12*ne11)/ne11;
  7539. const int i1 = (ir - i3*ne12*ne11 - i2*ne11);
  7540. #ifdef GGML_USE_ACCELERATE
  7541. vDSP_vadd(
  7542. (float *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + offset), 1,
  7543. (float *) ((char *) src1->data + i3*nb13 + i2*nb12 + i1*nb11), 1,
  7544. (float *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + offset), 1, nc);
  7545. #else
  7546. ggml_vec_add_f32(nc,
  7547. (float *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + offset),
  7548. (float *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + offset),
  7549. (float *) ((char *) src1->data + i3*nb13 + i2*nb12 + i1*nb11));
  7550. #endif
  7551. }
  7552. }
  7553. static void ggml_compute_forward_acc(
  7554. const struct ggml_compute_params * params,
  7555. const struct ggml_tensor * src0,
  7556. const struct ggml_tensor * src1,
  7557. struct ggml_tensor * dst) {
  7558. switch (src0->type) {
  7559. case GGML_TYPE_F32:
  7560. {
  7561. ggml_compute_forward_acc_f32(params, src0, src1, dst);
  7562. } break;
  7563. case GGML_TYPE_F16:
  7564. case GGML_TYPE_Q4_0:
  7565. case GGML_TYPE_Q4_1:
  7566. case GGML_TYPE_Q5_0:
  7567. case GGML_TYPE_Q5_1:
  7568. case GGML_TYPE_Q8_0:
  7569. case GGML_TYPE_Q8_1:
  7570. case GGML_TYPE_Q2_K:
  7571. case GGML_TYPE_Q3_K:
  7572. case GGML_TYPE_Q4_K:
  7573. case GGML_TYPE_Q5_K:
  7574. case GGML_TYPE_Q6_K:
  7575. default:
  7576. {
  7577. GGML_ASSERT(false);
  7578. } break;
  7579. }
  7580. }
  7581. // ggml_compute_forward_sub
  7582. static void ggml_compute_forward_sub_f32(
  7583. const struct ggml_compute_params * params,
  7584. const struct ggml_tensor * src0,
  7585. const struct ggml_tensor * src1,
  7586. struct ggml_tensor * dst) {
  7587. assert(params->ith == 0);
  7588. assert(ggml_are_same_shape(src0, src1) && ggml_are_same_shape(src0, dst));
  7589. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  7590. return;
  7591. }
  7592. const int nr = ggml_nrows(src0);
  7593. GGML_TENSOR_BINARY_OP_LOCALS;
  7594. GGML_ASSERT( nb0 == sizeof(float));
  7595. GGML_ASSERT(nb00 == sizeof(float));
  7596. if (nb10 == sizeof(float)) {
  7597. for (int ir = 0; ir < nr; ++ir) {
  7598. // src0, src1 and dst are same shape => same indices
  7599. const int i3 = ir/(ne2*ne1);
  7600. const int i2 = (ir - i3*ne2*ne1)/ne1;
  7601. const int i1 = (ir - i3*ne2*ne1 - i2*ne1);
  7602. #ifdef GGML_USE_ACCELERATE
  7603. vDSP_vsub(
  7604. (float *) ((char *) src1->data + i3*nb13 + i2*nb12 + i1*nb11), 1,
  7605. (float *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01), 1,
  7606. (float *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 ), 1,
  7607. ne0);
  7608. #else
  7609. ggml_vec_sub_f32(ne0,
  7610. (float *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 ),
  7611. (float *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01),
  7612. (float *) ((char *) src1->data + i3*nb13 + i2*nb12 + i1*nb11));
  7613. #endif
  7614. // }
  7615. // }
  7616. }
  7617. } else {
  7618. // src1 is not contiguous
  7619. for (int ir = 0; ir < nr; ++ir) {
  7620. // src0, src1 and dst are same shape => same indices
  7621. const int i3 = ir/(ne2*ne1);
  7622. const int i2 = (ir - i3*ne2*ne1)/ne1;
  7623. const int i1 = (ir - i3*ne2*ne1 - i2*ne1);
  7624. float * dst_ptr = (float *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 );
  7625. float * src0_ptr = (float *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01);
  7626. for (int i0 = 0; i0 < ne0; i0++) {
  7627. float * src1_ptr = (float *) ((char *) src1->data + i3*nb13 + i2*nb12 + i1*nb11 + i0*nb10);
  7628. dst_ptr[i0] = src0_ptr[i0] - *src1_ptr;
  7629. }
  7630. }
  7631. }
  7632. }
  7633. static void ggml_compute_forward_sub(
  7634. const struct ggml_compute_params * params,
  7635. const struct ggml_tensor * src0,
  7636. const struct ggml_tensor * src1,
  7637. struct ggml_tensor * dst) {
  7638. switch (src0->type) {
  7639. case GGML_TYPE_F32:
  7640. {
  7641. ggml_compute_forward_sub_f32(params, src0, src1, dst);
  7642. } break;
  7643. default:
  7644. {
  7645. GGML_ASSERT(false);
  7646. } break;
  7647. }
  7648. }
  7649. // ggml_compute_forward_mul
  7650. static void ggml_compute_forward_mul_f32(
  7651. const struct ggml_compute_params * params,
  7652. const struct ggml_tensor * src0,
  7653. const struct ggml_tensor * src1,
  7654. struct ggml_tensor * dst) {
  7655. GGML_ASSERT(ggml_can_repeat_rows(src1, src0) && ggml_are_same_shape(src0, dst));
  7656. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  7657. return;
  7658. }
  7659. const int ith = params->ith;
  7660. const int nth = params->nth;
  7661. #ifdef GGML_USE_CLBLAST
  7662. if (src1->backend == GGML_BACKEND_GPU) {
  7663. if (ith == 0) {
  7664. ggml_cl_mul(src0, src1, dst);
  7665. }
  7666. return;
  7667. }
  7668. #endif
  7669. const int64_t nr = ggml_nrows(src0);
  7670. GGML_TENSOR_BINARY_OP_LOCALS;
  7671. GGML_ASSERT( nb0 == sizeof(float));
  7672. GGML_ASSERT(nb00 == sizeof(float));
  7673. GGML_ASSERT(ne00 == ne10);
  7674. if (nb10 == sizeof(float)) {
  7675. for (int64_t ir = ith; ir < nr; ir += nth) {
  7676. // src0 and dst are same shape => same indices
  7677. const int64_t i03 = ir/(ne02*ne01);
  7678. const int64_t i02 = (ir - i03*ne02*ne01)/ne01;
  7679. const int64_t i01 = (ir - i03*ne02*ne01 - i02*ne01);
  7680. const int64_t i13 = i03 % ne13;
  7681. const int64_t i12 = i02 % ne12;
  7682. const int64_t i11 = i01 % ne11;
  7683. float * dst_ptr = (float *) ((char *) dst->data + i03*nb3 + i02*nb2 + i01*nb1 );
  7684. float * src0_ptr = (float *) ((char *) src0->data + i03*nb03 + i02*nb02 + i01*nb01);
  7685. float * src1_ptr = (float *) ((char *) src1->data + i13*nb13 + i12*nb12 + i11*nb11);
  7686. #ifdef GGML_USE_ACCELERATE
  7687. UNUSED(ggml_vec_mul_f32);
  7688. vDSP_vmul( src0_ptr, 1, src1_ptr, 1, dst_ptr, 1, ne00);
  7689. #else
  7690. ggml_vec_mul_f32(ne00, dst_ptr, src0_ptr, src1_ptr);
  7691. #endif
  7692. // }
  7693. // }
  7694. }
  7695. } else {
  7696. // src1 is not contiguous
  7697. for (int64_t ir = ith; ir < nr; ir += nth) {
  7698. // src0 and dst are same shape => same indices
  7699. // src1 is broadcastable across src0 and dst in i1, i2, i3
  7700. const int64_t i03 = ir/(ne02*ne01);
  7701. const int64_t i02 = (ir - i03*ne02*ne01)/ne01;
  7702. const int64_t i01 = (ir - i03*ne02*ne01 - i02*ne01);
  7703. const int64_t i13 = i03 % ne13;
  7704. const int64_t i12 = i02 % ne12;
  7705. const int64_t i11 = i01 % ne11;
  7706. float * dst_ptr = (float *) ((char *) dst->data + i03*nb3 + i02*nb2 + i01*nb1 );
  7707. float * src0_ptr = (float *) ((char *) src0->data + i03*nb03 + i02*nb02 + i01*nb01);
  7708. for (int64_t i0 = 0; i0 < ne00; i0++) {
  7709. float * src1_ptr = (float *) ((char *) src1->data + i13*nb13 + i12*nb12 + i11*nb11 + i0*nb10);
  7710. dst_ptr[i0] = src0_ptr[i0] * (*src1_ptr);
  7711. }
  7712. }
  7713. }
  7714. }
  7715. static void ggml_compute_forward_mul(
  7716. const struct ggml_compute_params * params,
  7717. const struct ggml_tensor * src0,
  7718. const struct ggml_tensor * src1,
  7719. struct ggml_tensor * dst) {
  7720. GGML_ASSERT(src1->type == GGML_TYPE_F32 && "only f32 src1 supported for now");
  7721. switch (src0->type) {
  7722. case GGML_TYPE_F32:
  7723. {
  7724. ggml_compute_forward_mul_f32(params, src0, src1, dst);
  7725. } break;
  7726. default:
  7727. {
  7728. GGML_ASSERT(false);
  7729. } break;
  7730. }
  7731. }
  7732. // ggml_compute_forward_div
  7733. static void ggml_compute_forward_div_f32(
  7734. const struct ggml_compute_params * params,
  7735. const struct ggml_tensor * src0,
  7736. const struct ggml_tensor * src1,
  7737. struct ggml_tensor * dst) {
  7738. assert(params->ith == 0);
  7739. assert(ggml_are_same_shape(src0, src1) && ggml_are_same_shape(src0, dst));
  7740. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  7741. return;
  7742. }
  7743. const int nr = ggml_nrows(src0);
  7744. GGML_TENSOR_BINARY_OP_LOCALS;
  7745. GGML_ASSERT( nb0 == sizeof(float));
  7746. GGML_ASSERT(nb00 == sizeof(float));
  7747. if (nb10 == sizeof(float)) {
  7748. for (int ir = 0; ir < nr; ++ir) {
  7749. // src0, src1 and dst are same shape => same indices
  7750. const int i3 = ir/(ne2*ne1);
  7751. const int i2 = (ir - i3*ne2*ne1)/ne1;
  7752. const int i1 = (ir - i3*ne2*ne1 - i2*ne1);
  7753. #ifdef GGML_USE_ACCELERATE
  7754. UNUSED(ggml_vec_div_f32);
  7755. vDSP_vdiv(
  7756. (float *) ((char *) src1->data + i3*nb13 + i2*nb12 + i1*nb11), 1,
  7757. (float *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01), 1,
  7758. (float *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 ), 1,
  7759. ne0);
  7760. #else
  7761. ggml_vec_div_f32(ne0,
  7762. (float *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 ),
  7763. (float *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01),
  7764. (float *) ((char *) src1->data + i3*nb13 + i2*nb12 + i1*nb11));
  7765. #endif
  7766. // }
  7767. // }
  7768. }
  7769. } else {
  7770. // src1 is not contiguous
  7771. for (int ir = 0; ir < nr; ++ir) {
  7772. // src0, src1 and dst are same shape => same indices
  7773. const int i3 = ir/(ne2*ne1);
  7774. const int i2 = (ir - i3*ne2*ne1)/ne1;
  7775. const int i1 = (ir - i3*ne2*ne1 - i2*ne1);
  7776. float * dst_ptr = (float *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 );
  7777. float * src0_ptr = (float *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01);
  7778. for (int i0 = 0; i0 < ne0; i0++) {
  7779. float * src1_ptr = (float *) ((char *) src1->data + i3*nb13 + i2*nb12 + i1*nb11 + i0*nb10);
  7780. dst_ptr[i0] = src0_ptr[i0] / (*src1_ptr);
  7781. }
  7782. }
  7783. }
  7784. }
  7785. static void ggml_compute_forward_div(
  7786. const struct ggml_compute_params * params,
  7787. const struct ggml_tensor * src0,
  7788. const struct ggml_tensor * src1,
  7789. struct ggml_tensor * dst) {
  7790. switch (src0->type) {
  7791. case GGML_TYPE_F32:
  7792. {
  7793. ggml_compute_forward_div_f32(params, src0, src1, dst);
  7794. } break;
  7795. default:
  7796. {
  7797. GGML_ASSERT(false);
  7798. } break;
  7799. }
  7800. }
  7801. // ggml_compute_forward_sqr
  7802. static void ggml_compute_forward_sqr_f32(
  7803. const struct ggml_compute_params * params,
  7804. const struct ggml_tensor * src0,
  7805. struct ggml_tensor * dst) {
  7806. assert(params->ith == 0);
  7807. assert(ggml_are_same_shape(src0, dst));
  7808. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  7809. return;
  7810. }
  7811. const int n = ggml_nrows(src0);
  7812. const int nc = src0->ne[0];
  7813. assert( dst->nb[0] == sizeof(float));
  7814. assert(src0->nb[0] == sizeof(float));
  7815. for (int i = 0; i < n; i++) {
  7816. ggml_vec_sqr_f32(nc,
  7817. (float *) ((char *) dst->data + i*( dst->nb[1])),
  7818. (float *) ((char *) src0->data + i*(src0->nb[1])));
  7819. }
  7820. }
  7821. static void ggml_compute_forward_sqr(
  7822. const struct ggml_compute_params * params,
  7823. const struct ggml_tensor * src0,
  7824. struct ggml_tensor * dst) {
  7825. switch (src0->type) {
  7826. case GGML_TYPE_F32:
  7827. {
  7828. ggml_compute_forward_sqr_f32(params, src0, dst);
  7829. } break;
  7830. default:
  7831. {
  7832. GGML_ASSERT(false);
  7833. } break;
  7834. }
  7835. }
  7836. // ggml_compute_forward_sqrt
  7837. static void ggml_compute_forward_sqrt_f32(
  7838. const struct ggml_compute_params * params,
  7839. const struct ggml_tensor * src0,
  7840. struct ggml_tensor * dst) {
  7841. assert(params->ith == 0);
  7842. assert(ggml_are_same_shape(src0, dst));
  7843. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  7844. return;
  7845. }
  7846. const int n = ggml_nrows(src0);
  7847. const int nc = src0->ne[0];
  7848. assert( dst->nb[0] == sizeof(float));
  7849. assert(src0->nb[0] == sizeof(float));
  7850. for (int i = 0; i < n; i++) {
  7851. ggml_vec_sqrt_f32(nc,
  7852. (float *) ((char *) dst->data + i*( dst->nb[1])),
  7853. (float *) ((char *) src0->data + i*(src0->nb[1])));
  7854. }
  7855. }
  7856. static void ggml_compute_forward_sqrt(
  7857. const struct ggml_compute_params * params,
  7858. const struct ggml_tensor * src0,
  7859. struct ggml_tensor * dst) {
  7860. switch (src0->type) {
  7861. case GGML_TYPE_F32:
  7862. {
  7863. ggml_compute_forward_sqrt_f32(params, src0, dst);
  7864. } break;
  7865. default:
  7866. {
  7867. GGML_ASSERT(false);
  7868. } break;
  7869. }
  7870. }
  7871. // ggml_compute_forward_log
  7872. static void ggml_compute_forward_log_f32(
  7873. const struct ggml_compute_params * params,
  7874. const struct ggml_tensor * src0,
  7875. struct ggml_tensor * dst) {
  7876. GGML_ASSERT(params->ith == 0);
  7877. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  7878. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  7879. return;
  7880. }
  7881. const int n = ggml_nrows(src0);
  7882. const int nc = src0->ne[0];
  7883. GGML_ASSERT( dst->nb[0] == sizeof(float));
  7884. GGML_ASSERT(src0->nb[0] == sizeof(float));
  7885. for (int i = 0; i < n; i++) {
  7886. ggml_vec_log_f32(nc,
  7887. (float *) ((char *) dst->data + i*( dst->nb[1])),
  7888. (float *) ((char *) src0->data + i*(src0->nb[1])));
  7889. }
  7890. }
  7891. static void ggml_compute_forward_log(
  7892. const struct ggml_compute_params * params,
  7893. const struct ggml_tensor * src0,
  7894. struct ggml_tensor * dst) {
  7895. switch (src0->type) {
  7896. case GGML_TYPE_F32:
  7897. {
  7898. ggml_compute_forward_log_f32(params, src0, dst);
  7899. } break;
  7900. default:
  7901. {
  7902. GGML_ASSERT(false);
  7903. } break;
  7904. }
  7905. }
  7906. // ggml_compute_forward_sum
  7907. static void ggml_compute_forward_sum_f32(
  7908. const struct ggml_compute_params * params,
  7909. const struct ggml_tensor * src0,
  7910. struct ggml_tensor * dst) {
  7911. assert(params->ith == 0);
  7912. assert(ggml_is_scalar(dst));
  7913. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  7914. return;
  7915. }
  7916. assert(ggml_is_scalar(dst));
  7917. assert(src0->nb[0] == sizeof(float));
  7918. GGML_TENSOR_LOCALS(int64_t, ne0, src0, ne);
  7919. GGML_TENSOR_LOCALS(size_t, nb0, src0, nb);
  7920. ggml_float sum = 0;
  7921. ggml_float row_sum = 0;
  7922. for (int64_t i03 = 0; i03 < ne03; i03++) {
  7923. for (int64_t i02 = 0; i02 < ne02; i02++) {
  7924. for (int64_t i01 = 0; i01 < ne01; i01++) {
  7925. ggml_vec_sum_f32_ggf(ne00,
  7926. &row_sum,
  7927. (float *) ((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03));
  7928. sum += row_sum;
  7929. }
  7930. }
  7931. }
  7932. ((float *) dst->data)[0] = sum;
  7933. }
  7934. static void ggml_compute_forward_sum_f16(
  7935. const struct ggml_compute_params * params,
  7936. const struct ggml_tensor * src0,
  7937. struct ggml_tensor * dst) {
  7938. assert(params->ith == 0);
  7939. assert(ggml_is_scalar(dst));
  7940. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  7941. return;
  7942. }
  7943. assert(src0->nb[0] == sizeof(ggml_fp16_t));
  7944. GGML_TENSOR_LOCALS(int64_t, ne0, src0, ne);
  7945. GGML_TENSOR_LOCALS(size_t, nb0, src0, nb);
  7946. float sum = 0;
  7947. float row_sum = 0;
  7948. for (int64_t i03 = 0; i03 < ne03; i03++) {
  7949. for (int64_t i02 = 0; i02 < ne02; i02++) {
  7950. for (int64_t i01 = 0; i01 < ne01; i01++) {
  7951. ggml_vec_sum_f16_ggf(ne00,
  7952. &row_sum,
  7953. (ggml_fp16_t *) ((char *) src0->data + i01 * nb01 + i02 * nb02 + i03 * nb03));
  7954. sum += row_sum;
  7955. }
  7956. }
  7957. }
  7958. ((ggml_fp16_t *) dst->data)[0] = GGML_FP32_TO_FP16(sum);
  7959. }
  7960. static void ggml_compute_forward_sum(
  7961. const struct ggml_compute_params * params,
  7962. const struct ggml_tensor * src0,
  7963. struct ggml_tensor * dst) {
  7964. switch (src0->type) {
  7965. case GGML_TYPE_F32:
  7966. {
  7967. ggml_compute_forward_sum_f32(params, src0, dst);
  7968. } break;
  7969. case GGML_TYPE_F16:
  7970. {
  7971. ggml_compute_forward_sum_f16(params, src0, dst);
  7972. } break;
  7973. default:
  7974. {
  7975. GGML_ASSERT(false);
  7976. } break;
  7977. }
  7978. }
  7979. // ggml_compute_forward_sum_rows
  7980. static void ggml_compute_forward_sum_rows_f32(
  7981. const struct ggml_compute_params * params,
  7982. const struct ggml_tensor * src0,
  7983. struct ggml_tensor * dst) {
  7984. GGML_ASSERT(params->ith == 0);
  7985. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  7986. return;
  7987. }
  7988. GGML_ASSERT(src0->nb[0] == sizeof(float));
  7989. GGML_ASSERT(dst->nb[0] == sizeof(float));
  7990. GGML_TENSOR_UNARY_OP_LOCALS;
  7991. GGML_ASSERT(ne0 == 1);
  7992. GGML_ASSERT(ne1 == ne01);
  7993. GGML_ASSERT(ne2 == ne02);
  7994. GGML_ASSERT(ne3 == ne03);
  7995. for (int64_t i3 = 0; i3 < ne03; i3++) {
  7996. for (int64_t i2 = 0; i2 < ne02; i2++) {
  7997. for (int64_t i1 = 0; i1 < ne01; i1++) {
  7998. float * src_row = (float *) ((char *) src0->data + i1*nb01 + i2*nb02 + i3*nb03);
  7999. float * dst_row = (float *) ((char *) dst->data + i1*nb1 + i2*nb2 + i3*nb3);
  8000. float row_sum = 0;
  8001. ggml_vec_sum_f32(ne00, &row_sum, src_row);
  8002. dst_row[0] = row_sum;
  8003. }
  8004. }
  8005. }
  8006. }
  8007. static void ggml_compute_forward_sum_rows(
  8008. const struct ggml_compute_params * params,
  8009. const struct ggml_tensor * src0,
  8010. struct ggml_tensor * dst) {
  8011. switch (src0->type) {
  8012. case GGML_TYPE_F32:
  8013. {
  8014. ggml_compute_forward_sum_rows_f32(params, src0, dst);
  8015. } break;
  8016. default:
  8017. {
  8018. GGML_ASSERT(false);
  8019. } break;
  8020. }
  8021. }
  8022. // ggml_compute_forward_mean
  8023. static void ggml_compute_forward_mean_f32(
  8024. const struct ggml_compute_params * params,
  8025. const struct ggml_tensor * src0,
  8026. struct ggml_tensor * dst) {
  8027. assert(params->ith == 0);
  8028. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  8029. return;
  8030. }
  8031. assert(src0->nb[0] == sizeof(float));
  8032. GGML_TENSOR_UNARY_OP_LOCALS;
  8033. assert(ne0 == 1);
  8034. assert(ne1 == ne01);
  8035. assert(ne2 == ne02);
  8036. assert(ne3 == ne03);
  8037. UNUSED(ne0);
  8038. UNUSED(ne1);
  8039. UNUSED(ne2);
  8040. UNUSED(ne3);
  8041. for (int64_t i03 = 0; i03 < ne03; i03++) {
  8042. for (int64_t i02 = 0; i02 < ne02; i02++) {
  8043. for (int64_t i01 = 0; i01 < ne01; i01++) {
  8044. ggml_vec_sum_f32(ne00,
  8045. (float *) ((char *) dst->data + i01*nb1 + i02*nb2 + i03*nb3),
  8046. (float *) ((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03));
  8047. *(float *) ((char *) dst->data + i01*nb1 + i02*nb2 + i03*nb3) /= (float) ne00;
  8048. }
  8049. }
  8050. }
  8051. }
  8052. static void ggml_compute_forward_mean(
  8053. const struct ggml_compute_params * params,
  8054. const struct ggml_tensor * src0,
  8055. struct ggml_tensor * dst) {
  8056. switch (src0->type) {
  8057. case GGML_TYPE_F32:
  8058. {
  8059. ggml_compute_forward_mean_f32(params, src0, dst);
  8060. } break;
  8061. default:
  8062. {
  8063. GGML_ASSERT(false);
  8064. } break;
  8065. }
  8066. }
  8067. // ggml_compute_forward_argmax
  8068. static void ggml_compute_forward_argmax_f32(
  8069. const struct ggml_compute_params * params,
  8070. const struct ggml_tensor * src0,
  8071. struct ggml_tensor * dst) {
  8072. assert(params->ith == 0);
  8073. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  8074. return;
  8075. }
  8076. assert(src0->nb[0] == sizeof(float));
  8077. assert(dst->nb[0] == sizeof(float));
  8078. const int64_t ne00 = src0->ne[0];
  8079. const int64_t ne01 = src0->ne[1];
  8080. const size_t nb01 = src0->nb[1];
  8081. const size_t nb0 = dst->nb[0];
  8082. for (int64_t i1 = 0; i1 < ne01; i1++) {
  8083. float * src = (float *) ((char *) src0->data + i1*nb01);
  8084. int32_t * dst_ = (int32_t *) ((char *) dst->data + i1*nb0);
  8085. int v = 0;
  8086. ggml_vec_argmax_f32(ne00, &v, src);
  8087. dst_[0] = v;
  8088. }
  8089. }
  8090. static void ggml_compute_forward_argmax(
  8091. const struct ggml_compute_params * params,
  8092. const struct ggml_tensor * src0,
  8093. struct ggml_tensor * dst) {
  8094. switch (src0->type) {
  8095. case GGML_TYPE_F32:
  8096. {
  8097. ggml_compute_forward_argmax_f32(params, src0, dst);
  8098. } break;
  8099. default:
  8100. {
  8101. GGML_ASSERT(false);
  8102. } break;
  8103. }
  8104. }
  8105. // ggml_compute_forward_repeat
  8106. static void ggml_compute_forward_repeat_f32(
  8107. const struct ggml_compute_params * params,
  8108. const struct ggml_tensor * src0,
  8109. struct ggml_tensor * dst) {
  8110. GGML_ASSERT(params->ith == 0);
  8111. GGML_ASSERT(ggml_can_repeat(src0, dst));
  8112. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  8113. return;
  8114. }
  8115. GGML_TENSOR_UNARY_OP_LOCALS;
  8116. // guaranteed to be an integer due to the check in ggml_can_repeat
  8117. const int nr0 = (int)(ne0/ne00);
  8118. const int nr1 = (int)(ne1/ne01);
  8119. const int nr2 = (int)(ne2/ne02);
  8120. const int nr3 = (int)(ne3/ne03);
  8121. // TODO: support for transposed / permuted tensors
  8122. GGML_ASSERT(nb0 == sizeof(float));
  8123. GGML_ASSERT(nb00 == sizeof(float));
  8124. // TODO: maybe this is not optimal?
  8125. for (int i3 = 0; i3 < nr3; i3++) {
  8126. for (int k3 = 0; k3 < ne03; k3++) {
  8127. for (int i2 = 0; i2 < nr2; i2++) {
  8128. for (int k2 = 0; k2 < ne02; k2++) {
  8129. for (int i1 = 0; i1 < nr1; i1++) {
  8130. for (int k1 = 0; k1 < ne01; k1++) {
  8131. for (int i0 = 0; i0 < nr0; i0++) {
  8132. ggml_vec_cpy_f32(ne00,
  8133. (float *) ((char *) dst->data + (i3*ne03 + k3)*nb3 + (i2*ne02 + k2)*nb2 + (i1*ne01 + k1)*nb1 + (i0*ne00)*nb0),
  8134. (float *) ((char *) src0->data + ( k3)*nb03 + ( k2)*nb02 + ( k1)*nb01));
  8135. }
  8136. }
  8137. }
  8138. }
  8139. }
  8140. }
  8141. }
  8142. }
  8143. static void ggml_compute_forward_repeat(
  8144. const struct ggml_compute_params * params,
  8145. const struct ggml_tensor * src0,
  8146. struct ggml_tensor * dst) {
  8147. switch (src0->type) {
  8148. case GGML_TYPE_F32:
  8149. {
  8150. ggml_compute_forward_repeat_f32(params, src0, dst);
  8151. } break;
  8152. default:
  8153. {
  8154. GGML_ASSERT(false);
  8155. } break;
  8156. }
  8157. }
  8158. // ggml_compute_forward_repeat_back
  8159. static void ggml_compute_forward_repeat_back_f32(
  8160. const struct ggml_compute_params * params,
  8161. const struct ggml_tensor * src0,
  8162. struct ggml_tensor * dst) {
  8163. GGML_ASSERT(params->ith == 0);
  8164. GGML_ASSERT(ggml_can_repeat(dst, src0));
  8165. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  8166. return;
  8167. }
  8168. GGML_TENSOR_UNARY_OP_LOCALS;
  8169. // guaranteed to be an integer due to the check in ggml_can_repeat
  8170. const int nr0 = (int)(ne00/ne0);
  8171. const int nr1 = (int)(ne01/ne1);
  8172. const int nr2 = (int)(ne02/ne2);
  8173. const int nr3 = (int)(ne03/ne3);
  8174. // TODO: support for transposed / permuted tensors
  8175. GGML_ASSERT(nb0 == sizeof(float));
  8176. GGML_ASSERT(nb00 == sizeof(float));
  8177. if (ggml_is_contiguous(dst)) {
  8178. ggml_vec_set_f32(ne0*ne1*ne2*ne3, dst->data, 0);
  8179. } else {
  8180. for (int k3 = 0; k3 < ne3; k3++) {
  8181. for (int k2 = 0; k2 < ne2; k2++) {
  8182. for (int k1 = 0; k1 < ne1; k1++) {
  8183. ggml_vec_set_f32(ne0,
  8184. (float *) ((char *) dst->data + k1*nb1 + k2*nb2 + k3*nb3),
  8185. 0);
  8186. }
  8187. }
  8188. }
  8189. }
  8190. // TODO: maybe this is not optimal?
  8191. for (int i3 = 0; i3 < nr3; i3++) {
  8192. for (int k3 = 0; k3 < ne3; k3++) {
  8193. for (int i2 = 0; i2 < nr2; i2++) {
  8194. for (int k2 = 0; k2 < ne2; k2++) {
  8195. for (int i1 = 0; i1 < nr1; i1++) {
  8196. for (int k1 = 0; k1 < ne1; k1++) {
  8197. for (int i0 = 0; i0 < nr0; i0++) {
  8198. ggml_vec_acc_f32(ne0,
  8199. (float *) ((char *) dst->data + ( k3)*nb3 + ( k2)*nb2 + ( k1)*nb1),
  8200. (float *) ((char *) src0->data + (i3*ne3 + k3)*nb03 + (i2*ne2 + k2)*nb02 + (i1*ne1 + k1)*nb01 + (i0*ne0)*nb00));
  8201. }
  8202. }
  8203. }
  8204. }
  8205. }
  8206. }
  8207. }
  8208. }
  8209. static void ggml_compute_forward_repeat_back(
  8210. const struct ggml_compute_params * params,
  8211. const struct ggml_tensor * src0,
  8212. struct ggml_tensor * dst) {
  8213. switch (src0->type) {
  8214. case GGML_TYPE_F32:
  8215. {
  8216. ggml_compute_forward_repeat_back_f32(params, src0, dst);
  8217. } break;
  8218. default:
  8219. {
  8220. GGML_ASSERT(false);
  8221. } break;
  8222. }
  8223. }
  8224. // ggml_compute_forward_concat
  8225. static void ggml_compute_forward_concat_f32(
  8226. const struct ggml_compute_params * params,
  8227. const struct ggml_tensor * src0,
  8228. const struct ggml_tensor * src1,
  8229. struct ggml_tensor * dst) {
  8230. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  8231. return;
  8232. }
  8233. GGML_ASSERT(src0->nb[0] == sizeof(float));
  8234. const int ith = params->ith;
  8235. GGML_TENSOR_BINARY_OP_LOCALS;
  8236. // TODO: support for transposed / permuted tensors
  8237. GGML_ASSERT(nb0 == sizeof(float));
  8238. GGML_ASSERT(nb00 == sizeof(float));
  8239. GGML_ASSERT(nb10 == sizeof(float));
  8240. for (int i3 = 0; i3 < ne3; i3++) {
  8241. for (int i2 = ith; i2 < ne2; i2++) {
  8242. if (i2 < ne02) { // src0
  8243. for (int i1 = 0; i1 < ne1; i1++) {
  8244. for (int i0 = 0; i0 < ne0; i0++) {
  8245. const float * x = (float *)((char *) src0->data + i0 * nb00 + i1 * nb01 + i2 * nb02 + i3 * nb03);
  8246. float * y = (float *)((char *)dst->data + i0 * nb0 + i1 * nb1 + i2 * nb2 + i3 * nb3);
  8247. *y = *x;
  8248. }
  8249. }
  8250. } // src1
  8251. else {
  8252. for (int i1 = 0; i1 < ne1; i1++) {
  8253. for (int i0 = 0; i0 < ne0; i0++) {
  8254. const float * x = (float *)((char *) src1->data + i0 * nb10 + i1 * nb11 + (i2 - ne02) * nb12 + i3 * nb13);
  8255. float * y = (float *)((char *)dst->data + i0 * nb0 + i1 * nb1 + i2 * nb2 + i3 * nb3);
  8256. *y = *x;
  8257. }
  8258. }
  8259. }
  8260. }
  8261. }
  8262. }
  8263. static void ggml_compute_forward_concat(
  8264. const struct ggml_compute_params* params,
  8265. const struct ggml_tensor* src0,
  8266. const struct ggml_tensor* src1,
  8267. struct ggml_tensor* dst) {
  8268. switch (src0->type) {
  8269. case GGML_TYPE_F32:
  8270. {
  8271. ggml_compute_forward_concat_f32(params, src0, src1, dst);
  8272. } break;
  8273. default:
  8274. {
  8275. GGML_ASSERT(false);
  8276. } break;
  8277. }
  8278. }
  8279. // ggml_compute_forward_abs
  8280. static void ggml_compute_forward_abs_f32(
  8281. const struct ggml_compute_params * params,
  8282. const struct ggml_tensor * src0,
  8283. struct ggml_tensor * dst) {
  8284. assert(params->ith == 0);
  8285. assert(ggml_are_same_shape(src0, dst));
  8286. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  8287. return;
  8288. }
  8289. const int n = ggml_nrows(src0);
  8290. const int nc = src0->ne[0];
  8291. assert(dst->nb[0] == sizeof(float));
  8292. assert(src0->nb[0] == sizeof(float));
  8293. for (int i = 0; i < n; i++) {
  8294. ggml_vec_abs_f32(nc,
  8295. (float *) ((char *) dst->data + i*( dst->nb[1])),
  8296. (float *) ((char *) src0->data + i*(src0->nb[1])));
  8297. }
  8298. }
  8299. static void ggml_compute_forward_abs(
  8300. const struct ggml_compute_params * params,
  8301. const struct ggml_tensor * src0,
  8302. struct ggml_tensor * dst) {
  8303. switch (src0->type) {
  8304. case GGML_TYPE_F32:
  8305. {
  8306. ggml_compute_forward_abs_f32(params, src0, dst);
  8307. } break;
  8308. default:
  8309. {
  8310. GGML_ASSERT(false);
  8311. } break;
  8312. }
  8313. }
  8314. // ggml_compute_forward_sgn
  8315. static void ggml_compute_forward_sgn_f32(
  8316. const struct ggml_compute_params * params,
  8317. const struct ggml_tensor * src0,
  8318. struct ggml_tensor * dst) {
  8319. assert(params->ith == 0);
  8320. assert(ggml_are_same_shape(src0, dst));
  8321. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  8322. return;
  8323. }
  8324. const int n = ggml_nrows(src0);
  8325. const int nc = src0->ne[0];
  8326. assert(dst->nb[0] == sizeof(float));
  8327. assert(src0->nb[0] == sizeof(float));
  8328. for (int i = 0; i < n; i++) {
  8329. ggml_vec_sgn_f32(nc,
  8330. (float *) ((char *) dst->data + i*( dst->nb[1])),
  8331. (float *) ((char *) src0->data + i*(src0->nb[1])));
  8332. }
  8333. }
  8334. static void ggml_compute_forward_sgn(
  8335. const struct ggml_compute_params * params,
  8336. const struct ggml_tensor * src0,
  8337. struct ggml_tensor * dst) {
  8338. switch (src0->type) {
  8339. case GGML_TYPE_F32:
  8340. {
  8341. ggml_compute_forward_sgn_f32(params, src0, dst);
  8342. } break;
  8343. default:
  8344. {
  8345. GGML_ASSERT(false);
  8346. } break;
  8347. }
  8348. }
  8349. // ggml_compute_forward_neg
  8350. static void ggml_compute_forward_neg_f32(
  8351. const struct ggml_compute_params * params,
  8352. const struct ggml_tensor * src0,
  8353. struct ggml_tensor * dst) {
  8354. assert(params->ith == 0);
  8355. assert(ggml_are_same_shape(src0, dst));
  8356. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  8357. return;
  8358. }
  8359. const int n = ggml_nrows(src0);
  8360. const int nc = src0->ne[0];
  8361. assert(dst->nb[0] == sizeof(float));
  8362. assert(src0->nb[0] == sizeof(float));
  8363. for (int i = 0; i < n; i++) {
  8364. ggml_vec_neg_f32(nc,
  8365. (float *) ((char *) dst->data + i*( dst->nb[1])),
  8366. (float *) ((char *) src0->data + i*(src0->nb[1])));
  8367. }
  8368. }
  8369. static void ggml_compute_forward_neg(
  8370. const struct ggml_compute_params * params,
  8371. const struct ggml_tensor * src0,
  8372. struct ggml_tensor * dst) {
  8373. switch (src0->type) {
  8374. case GGML_TYPE_F32:
  8375. {
  8376. ggml_compute_forward_neg_f32(params, src0, dst);
  8377. } break;
  8378. default:
  8379. {
  8380. GGML_ASSERT(false);
  8381. } break;
  8382. }
  8383. }
  8384. // ggml_compute_forward_step
  8385. static void ggml_compute_forward_step_f32(
  8386. const struct ggml_compute_params * params,
  8387. const struct ggml_tensor * src0,
  8388. struct ggml_tensor * dst) {
  8389. assert(params->ith == 0);
  8390. assert(ggml_are_same_shape(src0, dst));
  8391. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  8392. return;
  8393. }
  8394. const int n = ggml_nrows(src0);
  8395. const int nc = src0->ne[0];
  8396. assert(dst->nb[0] == sizeof(float));
  8397. assert(src0->nb[0] == sizeof(float));
  8398. for (int i = 0; i < n; i++) {
  8399. ggml_vec_step_f32(nc,
  8400. (float *) ((char *) dst->data + i*( dst->nb[1])),
  8401. (float *) ((char *) src0->data + i*(src0->nb[1])));
  8402. }
  8403. }
  8404. static void ggml_compute_forward_step(
  8405. const struct ggml_compute_params * params,
  8406. const struct ggml_tensor * src0,
  8407. struct ggml_tensor * dst) {
  8408. switch (src0->type) {
  8409. case GGML_TYPE_F32:
  8410. {
  8411. ggml_compute_forward_step_f32(params, src0, dst);
  8412. } break;
  8413. default:
  8414. {
  8415. GGML_ASSERT(false);
  8416. } break;
  8417. }
  8418. }
  8419. // ggml_compute_forward_tanh
  8420. static void ggml_compute_forward_tanh_f32(
  8421. const struct ggml_compute_params * params,
  8422. const struct ggml_tensor * src0,
  8423. struct ggml_tensor * dst) {
  8424. assert(params->ith == 0);
  8425. assert(ggml_are_same_shape(src0, dst));
  8426. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  8427. return;
  8428. }
  8429. const int n = ggml_nrows(src0);
  8430. const int nc = src0->ne[0];
  8431. assert(dst->nb[0] == sizeof(float));
  8432. assert(src0->nb[0] == sizeof(float));
  8433. for (int i = 0; i < n; i++) {
  8434. ggml_vec_tanh_f32(nc,
  8435. (float *) ((char *) dst->data + i*( dst->nb[1])),
  8436. (float *) ((char *) src0->data + i*(src0->nb[1])));
  8437. }
  8438. }
  8439. static void ggml_compute_forward_tanh(
  8440. const struct ggml_compute_params * params,
  8441. const struct ggml_tensor * src0,
  8442. struct ggml_tensor * dst) {
  8443. switch (src0->type) {
  8444. case GGML_TYPE_F32:
  8445. {
  8446. ggml_compute_forward_tanh_f32(params, src0, dst);
  8447. } break;
  8448. default:
  8449. {
  8450. GGML_ASSERT(false);
  8451. } break;
  8452. }
  8453. }
  8454. // ggml_compute_forward_elu
  8455. static void ggml_compute_forward_elu_f32(
  8456. const struct ggml_compute_params * params,
  8457. const struct ggml_tensor * src0,
  8458. struct ggml_tensor * dst) {
  8459. assert(params->ith == 0);
  8460. assert(ggml_are_same_shape(src0, dst));
  8461. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  8462. return;
  8463. }
  8464. const int n = ggml_nrows(src0);
  8465. const int nc = src0->ne[0];
  8466. assert(dst->nb[0] == sizeof(float));
  8467. assert(src0->nb[0] == sizeof(float));
  8468. for (int i = 0; i < n; i++) {
  8469. ggml_vec_elu_f32(nc,
  8470. (float *) ((char *) dst->data + i*( dst->nb[1])),
  8471. (float *) ((char *) src0->data + i*(src0->nb[1])));
  8472. }
  8473. }
  8474. static void ggml_compute_forward_elu(
  8475. const struct ggml_compute_params * params,
  8476. const struct ggml_tensor * src0,
  8477. struct ggml_tensor * dst) {
  8478. switch (src0->type) {
  8479. case GGML_TYPE_F32:
  8480. {
  8481. ggml_compute_forward_elu_f32(params, src0, dst);
  8482. } break;
  8483. default:
  8484. {
  8485. GGML_ASSERT(false);
  8486. } break;
  8487. }
  8488. }
  8489. // ggml_compute_forward_relu
  8490. static void ggml_compute_forward_relu_f32(
  8491. const struct ggml_compute_params * params,
  8492. const struct ggml_tensor * src0,
  8493. struct ggml_tensor * dst) {
  8494. assert(params->ith == 0);
  8495. assert(ggml_are_same_shape(src0, dst));
  8496. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  8497. return;
  8498. }
  8499. const int n = ggml_nrows(src0);
  8500. const int nc = src0->ne[0];
  8501. assert(dst->nb[0] == sizeof(float));
  8502. assert(src0->nb[0] == sizeof(float));
  8503. for (int i = 0; i < n; i++) {
  8504. ggml_vec_relu_f32(nc,
  8505. (float *) ((char *) dst->data + i*( dst->nb[1])),
  8506. (float *) ((char *) src0->data + i*(src0->nb[1])));
  8507. }
  8508. }
  8509. static void ggml_compute_forward_relu(
  8510. const struct ggml_compute_params * params,
  8511. const struct ggml_tensor * src0,
  8512. struct ggml_tensor * dst) {
  8513. switch (src0->type) {
  8514. case GGML_TYPE_F32:
  8515. {
  8516. ggml_compute_forward_relu_f32(params, src0, dst);
  8517. } break;
  8518. default:
  8519. {
  8520. GGML_ASSERT(false);
  8521. } break;
  8522. }
  8523. }
  8524. // ggml_compute_forward_gelu
  8525. static void ggml_compute_forward_gelu_f32(
  8526. const struct ggml_compute_params * params,
  8527. const struct ggml_tensor * src0,
  8528. struct ggml_tensor * dst) {
  8529. GGML_ASSERT(ggml_is_contiguous_except_dim_1(src0));
  8530. GGML_ASSERT(ggml_is_contiguous_except_dim_1(dst));
  8531. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  8532. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  8533. return;
  8534. }
  8535. const int ith = params->ith;
  8536. const int nth = params->nth;
  8537. const int nc = src0->ne[0];
  8538. const int nr = ggml_nrows(src0);
  8539. // rows per thread
  8540. const int dr = (nr + nth - 1)/nth;
  8541. // row range for this thread
  8542. const int ir0 = dr*ith;
  8543. const int ir1 = MIN(ir0 + dr, nr);
  8544. for (int i1 = ir0; i1 < ir1; i1++) {
  8545. ggml_vec_gelu_f32(nc,
  8546. (float *) ((char *) dst->data + i1*( dst->nb[1])),
  8547. (float *) ((char *) src0->data + i1*(src0->nb[1])));
  8548. #ifndef NDEBUG
  8549. for (int k = 0; k < nc; k++) {
  8550. const float x = ((float *) ((char *) dst->data + i1*( dst->nb[1])))[k];
  8551. UNUSED(x);
  8552. assert(!isnan(x));
  8553. assert(!isinf(x));
  8554. }
  8555. #endif
  8556. }
  8557. }
  8558. static void ggml_compute_forward_gelu(
  8559. const struct ggml_compute_params * params,
  8560. const struct ggml_tensor * src0,
  8561. struct ggml_tensor * dst) {
  8562. switch (src0->type) {
  8563. case GGML_TYPE_F32:
  8564. {
  8565. ggml_compute_forward_gelu_f32(params, src0, dst);
  8566. } break;
  8567. default:
  8568. {
  8569. GGML_ASSERT(false);
  8570. } break;
  8571. }
  8572. }
  8573. // ggml_compute_forward_gelu_quick
  8574. static void ggml_compute_forward_gelu_quick_f32(
  8575. const struct ggml_compute_params * params,
  8576. const struct ggml_tensor * src0,
  8577. struct ggml_tensor * dst) {
  8578. GGML_ASSERT(ggml_is_contiguous_except_dim_1(src0));
  8579. GGML_ASSERT(ggml_is_contiguous_except_dim_1(dst));
  8580. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  8581. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  8582. return;
  8583. }
  8584. const int ith = params->ith;
  8585. const int nth = params->nth;
  8586. const int nc = src0->ne[0];
  8587. const int nr = ggml_nrows(src0);
  8588. // rows per thread
  8589. const int dr = (nr + nth - 1)/nth;
  8590. // row range for this thread
  8591. const int ir0 = dr*ith;
  8592. const int ir1 = MIN(ir0 + dr, nr);
  8593. for (int i1 = ir0; i1 < ir1; i1++) {
  8594. ggml_vec_gelu_quick_f32(nc,
  8595. (float *) ((char *) dst->data + i1*( dst->nb[1])),
  8596. (float *) ((char *) src0->data + i1*(src0->nb[1])));
  8597. #ifndef NDEBUG
  8598. for (int k = 0; k < nc; k++) {
  8599. const float x = ((float *) ((char *) dst->data + i1*( dst->nb[1])))[k];
  8600. UNUSED(x);
  8601. assert(!isnan(x));
  8602. assert(!isinf(x));
  8603. }
  8604. #endif
  8605. }
  8606. }
  8607. static void ggml_compute_forward_gelu_quick(
  8608. const struct ggml_compute_params * params,
  8609. const struct ggml_tensor * src0,
  8610. struct ggml_tensor * dst) {
  8611. switch (src0->type) {
  8612. case GGML_TYPE_F32:
  8613. {
  8614. ggml_compute_forward_gelu_quick_f32(params, src0, dst);
  8615. } break;
  8616. default:
  8617. {
  8618. GGML_ASSERT(false);
  8619. } break;
  8620. }
  8621. }
  8622. // ggml_compute_forward_silu
  8623. static void ggml_compute_forward_silu_f32(
  8624. const struct ggml_compute_params * params,
  8625. const struct ggml_tensor * src0,
  8626. struct ggml_tensor * dst) {
  8627. GGML_ASSERT(ggml_is_contiguous_except_dim_1(src0));
  8628. GGML_ASSERT(ggml_is_contiguous_except_dim_1(dst));
  8629. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  8630. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  8631. return;
  8632. }
  8633. const int ith = params->ith;
  8634. const int nth = params->nth;
  8635. const int nc = src0->ne[0];
  8636. const int nr = ggml_nrows(src0);
  8637. // rows per thread
  8638. const int dr = (nr + nth - 1)/nth;
  8639. // row range for this thread
  8640. const int ir0 = dr*ith;
  8641. const int ir1 = MIN(ir0 + dr, nr);
  8642. for (int i1 = ir0; i1 < ir1; i1++) {
  8643. ggml_vec_silu_f32(nc,
  8644. (float *) ((char *) dst->data + i1*( dst->nb[1])),
  8645. (float *) ((char *) src0->data + i1*(src0->nb[1])));
  8646. #ifndef NDEBUG
  8647. for (int k = 0; k < nc; k++) {
  8648. const float x = ((float *) ((char *) dst->data + i1*( dst->nb[1])))[k];
  8649. UNUSED(x);
  8650. assert(!isnan(x));
  8651. assert(!isinf(x));
  8652. }
  8653. #endif
  8654. }
  8655. }
  8656. static void ggml_compute_forward_silu(
  8657. const struct ggml_compute_params * params,
  8658. const struct ggml_tensor * src0,
  8659. struct ggml_tensor * dst) {
  8660. switch (src0->type) {
  8661. case GGML_TYPE_F32:
  8662. {
  8663. ggml_compute_forward_silu_f32(params, src0, dst);
  8664. } break;
  8665. default:
  8666. {
  8667. GGML_ASSERT(false);
  8668. } break;
  8669. }
  8670. }
  8671. // ggml_compute_forward_silu_back
  8672. static void ggml_compute_forward_silu_back_f32(
  8673. const struct ggml_compute_params * params,
  8674. const struct ggml_tensor * src0,
  8675. const struct ggml_tensor * grad,
  8676. struct ggml_tensor * dst) {
  8677. GGML_ASSERT(ggml_is_contiguous_except_dim_1(grad));
  8678. GGML_ASSERT(ggml_is_contiguous_except_dim_1(src0));
  8679. GGML_ASSERT(ggml_is_contiguous_except_dim_1(dst));
  8680. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  8681. GGML_ASSERT(ggml_are_same_shape(src0, grad));
  8682. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  8683. return;
  8684. }
  8685. const int ith = params->ith;
  8686. const int nth = params->nth;
  8687. const int nc = src0->ne[0];
  8688. const int nr = ggml_nrows(src0);
  8689. // rows per thread
  8690. const int dr = (nr + nth - 1)/nth;
  8691. // row range for this thread
  8692. const int ir0 = dr*ith;
  8693. const int ir1 = MIN(ir0 + dr, nr);
  8694. for (int i1 = ir0; i1 < ir1; i1++) {
  8695. ggml_vec_silu_backward_f32(nc,
  8696. (float *) ((char *) dst->data + i1*( dst->nb[1])),
  8697. (float *) ((char *) src0->data + i1*(src0->nb[1])),
  8698. (float *) ((char *) grad->data + i1*(grad->nb[1])));
  8699. #ifndef NDEBUG
  8700. for (int k = 0; k < nc; k++) {
  8701. const float x = ((float *) ((char *) dst->data + i1*( dst->nb[1])))[k];
  8702. UNUSED(x);
  8703. assert(!isnan(x));
  8704. assert(!isinf(x));
  8705. }
  8706. #endif
  8707. }
  8708. }
  8709. static void ggml_compute_forward_silu_back(
  8710. const struct ggml_compute_params * params,
  8711. const struct ggml_tensor * src0,
  8712. const struct ggml_tensor * grad,
  8713. struct ggml_tensor * dst) {
  8714. switch (src0->type) {
  8715. case GGML_TYPE_F32:
  8716. {
  8717. ggml_compute_forward_silu_back_f32(params, src0, grad, dst);
  8718. } break;
  8719. default:
  8720. {
  8721. GGML_ASSERT(false);
  8722. } break;
  8723. }
  8724. }
  8725. // ggml_compute_forward_norm
  8726. static void ggml_compute_forward_norm_f32(
  8727. const struct ggml_compute_params * params,
  8728. const struct ggml_tensor * src0,
  8729. struct ggml_tensor * dst) {
  8730. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  8731. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  8732. return;
  8733. }
  8734. GGML_ASSERT(src0->nb[0] == sizeof(float));
  8735. const int ith = params->ith;
  8736. const int nth = params->nth;
  8737. GGML_TENSOR_UNARY_OP_LOCALS;
  8738. float eps;
  8739. memcpy(&eps, dst->op_params, sizeof(float));
  8740. // TODO: optimize
  8741. for (int64_t i03 = 0; i03 < ne03; i03++) {
  8742. for (int64_t i02 = 0; i02 < ne02; i02++) {
  8743. for (int64_t i01 = ith; i01 < ne01; i01 += nth) {
  8744. const float * x = (float *) ((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03);
  8745. ggml_float sum = 0.0;
  8746. for (int64_t i00 = 0; i00 < ne00; i00++) {
  8747. sum += (ggml_float)x[i00];
  8748. }
  8749. float mean = sum/ne00;
  8750. float * y = (float *) ((char *) dst->data + i01*nb1 + i02*nb2 + i03*nb3);
  8751. ggml_float sum2 = 0.0;
  8752. for (int64_t i00 = 0; i00 < ne00; i00++) {
  8753. float v = x[i00] - mean;
  8754. y[i00] = v;
  8755. sum2 += (ggml_float)(v*v);
  8756. }
  8757. float variance = sum2/ne00;
  8758. const float scale = 1.0f/sqrtf(variance + eps);
  8759. ggml_vec_scale_f32(ne00, y, scale);
  8760. }
  8761. }
  8762. }
  8763. }
  8764. static void ggml_compute_forward_norm(
  8765. const struct ggml_compute_params * params,
  8766. const struct ggml_tensor * src0,
  8767. struct ggml_tensor * dst) {
  8768. switch (src0->type) {
  8769. case GGML_TYPE_F32:
  8770. {
  8771. ggml_compute_forward_norm_f32(params, src0, dst);
  8772. } break;
  8773. default:
  8774. {
  8775. GGML_ASSERT(false);
  8776. } break;
  8777. }
  8778. }
  8779. // ggml_compute_forward_group_rms_norm
  8780. static void ggml_compute_forward_rms_norm_f32(
  8781. const struct ggml_compute_params * params,
  8782. const struct ggml_tensor * src0,
  8783. struct ggml_tensor * dst) {
  8784. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  8785. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  8786. return;
  8787. }
  8788. GGML_ASSERT(src0->nb[0] == sizeof(float));
  8789. const int ith = params->ith;
  8790. const int nth = params->nth;
  8791. GGML_TENSOR_UNARY_OP_LOCALS;
  8792. float eps;
  8793. memcpy(&eps, dst->op_params, sizeof(float));
  8794. // TODO: optimize
  8795. for (int64_t i03 = 0; i03 < ne03; i03++) {
  8796. for (int64_t i02 = 0; i02 < ne02; i02++) {
  8797. for (int64_t i01 = ith; i01 < ne01; i01 += nth) {
  8798. const float * x = (float *) ((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03);
  8799. ggml_float sum = 0.0;
  8800. for (int64_t i00 = 0; i00 < ne00; i00++) {
  8801. sum += (ggml_float)(x[i00] * x[i00]);
  8802. }
  8803. const float mean = sum/ne00;
  8804. float * y = (float *) ((char *) dst->data + i01*nb1 + i02*nb2 + i03*nb3);
  8805. memcpy(y, x, ne00 * sizeof(float));
  8806. // for (int i00 = 0; i00 < ne00; i00++) {
  8807. // y[i00] = x[i00];
  8808. // }
  8809. const float scale = 1.0f/sqrtf(mean + eps);
  8810. ggml_vec_scale_f32(ne00, y, scale);
  8811. }
  8812. }
  8813. }
  8814. }
  8815. static void ggml_compute_forward_rms_norm(
  8816. const struct ggml_compute_params * params,
  8817. const struct ggml_tensor * src0,
  8818. struct ggml_tensor * dst) {
  8819. switch (src0->type) {
  8820. case GGML_TYPE_F32:
  8821. {
  8822. ggml_compute_forward_rms_norm_f32(params, src0, dst);
  8823. } break;
  8824. default:
  8825. {
  8826. GGML_ASSERT(false);
  8827. } break;
  8828. }
  8829. }
  8830. static void ggml_compute_forward_rms_norm_back_f32(
  8831. const struct ggml_compute_params * params,
  8832. const struct ggml_tensor * src0,
  8833. const struct ggml_tensor * src1,
  8834. struct ggml_tensor * dst) {
  8835. GGML_ASSERT(ggml_are_same_shape(src0, dst) && ggml_are_same_shape(src0, src1));
  8836. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  8837. return;
  8838. }
  8839. GGML_ASSERT(src0->nb[0] == sizeof(float));
  8840. const int ith = params->ith;
  8841. const int nth = params->nth;
  8842. GGML_TENSOR_BINARY_OP_LOCALS;
  8843. float eps;
  8844. memcpy(&eps, dst->op_params, sizeof(float));
  8845. // TODO: optimize
  8846. for (int64_t i03 = 0; i03 < ne03; i03++) {
  8847. for (int64_t i02 = 0; i02 < ne02; i02++) {
  8848. for (int64_t i01 = ith; i01 < ne01; i01 += nth) {
  8849. // src1 is same shape as src0 => same indices
  8850. const int64_t i11 = i01;
  8851. const int64_t i12 = i02;
  8852. const int64_t i13 = i03;
  8853. const float * x = (float *) ((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03);
  8854. const float * dz = (float *) ((char *) src1->data + i11*nb11 + i12*nb12 + i13*nb13);
  8855. ggml_float sum_xx = 0.0;
  8856. ggml_float sum_xdz = 0.0;
  8857. for (int64_t i00 = 0; i00 < ne00; i00++) {
  8858. sum_xx += (ggml_float)(x[i00] * x[i00]);
  8859. sum_xdz += (ggml_float)(x[i00] * dz[i00]);
  8860. }
  8861. //const float mean = (float)(sum_xx)/ne00;
  8862. const float mean_eps = (float)(sum_xx)/ne00 + eps;
  8863. const float sum_eps = (float)(sum_xx) + eps*ne00;
  8864. //const float mean_xdz = (float)(sum_xdz)/ne00;
  8865. // we could cache rms from forward pass to improve performance.
  8866. // to do this implement ggml_rms and compose ggml_rms_norm using ggml_rms.
  8867. //const float rms = sqrtf(mean_eps);
  8868. const float rrms = 1.0f / sqrtf(mean_eps);
  8869. //const float scale = -rrms/(ne00 * mean_eps); // -1/(n*rms**3)
  8870. {
  8871. // z = rms_norm(x)
  8872. //
  8873. // rms_norm(src0) =
  8874. // scale(
  8875. // src0,
  8876. // div(
  8877. // 1,
  8878. // sqrt(
  8879. // add(
  8880. // scale(
  8881. // sum(
  8882. // sqr(
  8883. // src0)),
  8884. // (1.0/N)),
  8885. // eps))));
  8886. // postorder:
  8887. // ## op args grad
  8888. // 00 param src0 grad[#00]
  8889. // 01 const 1
  8890. // 02 sqr (#00) grad[#02]
  8891. // 03 sum (#02) grad[#03]
  8892. // 04 const 1/N
  8893. // 05 scale (#03, #04) grad[#05]
  8894. // 06 const eps
  8895. // 07 add (#05, #06) grad[#07]
  8896. // 08 sqrt (#07) grad[#08]
  8897. // 09 div (#01,#08) grad[#09]
  8898. // 10 scale (#00,#09) grad[#10]
  8899. //
  8900. // backward pass, given grad[#10]
  8901. // #10: scale
  8902. // grad[#00] += scale(grad[#10],#09)
  8903. // grad[#09] += sum(mul(grad[#10],#00))
  8904. // #09: div
  8905. // grad[#08] += neg(mul(grad[#09], div(#09,#08)))
  8906. // #08: sqrt
  8907. // grad[#07] += mul(grad[#08], div(0.5, #08))
  8908. // #07: add
  8909. // grad[#05] += grad[#07]
  8910. // #05: scale
  8911. // grad[#03] += scale(grad[#05],#04)
  8912. // #03: sum
  8913. // grad[#02] += repeat(grad[#03], #02)
  8914. // #02:
  8915. // grad[#00] += scale(mul(#00, grad[#02]), 2.0)
  8916. //
  8917. // substitute and simplify:
  8918. // grad[#00] = scale(grad(#10), #09) + scale(mul(#00, grad[#02]), 2.0)
  8919. // grad[#02] = repeat(grad[#03], #02)
  8920. // grad[#02] = repeat(scale(grad[#05],#04), #02)
  8921. // grad[#02] = repeat(scale(grad[#07],#04), #02)
  8922. // grad[#02] = repeat(scale(mul(grad[#08], div(0.5, #08)),#04), #02)
  8923. // grad[#02] = repeat(scale(mul(neg(mul(grad[#09], div(#09,#08))), div(0.5, #08)),#04), #02)
  8924. // grad[#02] = repeat(scale(mul(neg(mul(sum(mul(grad[#10],#00)), div(#09,#08))), div(0.5, #08)),#04), #02)
  8925. // grad[#02] = repeat(-(sum(mul(grad[#10],#00)) * div(#09,#08) * div(0.5, #08) * (1/N)), #02)
  8926. // grad[#02] = repeat(-(sum(mul(grad[#10],#00)) * div(div(#01,#08),#08) * div(0.5, #08) * (1/N)), #02)
  8927. // grad[#02] = repeat(-(sum(mul(grad[#10],#00)) * div(1,#08*#08) * div(0.5, #08) * (1/N)), #02)
  8928. // grad[#02] = repeat(-(sum(mul(grad[#10],#00)) * div(1,#07) * div(0.5, #08) * (1/N)), #02)
  8929. // grad[#00] = scale(grad(#10), #09) + scale(mul(#00, grad[#02]), 2.0)
  8930. // grad[#00] = scale(grad(#10), #09) + scale(mul(#00, repeat(-(sum(mul(grad[#10],#00)) * div(1,#07) * div(0.5, #08) * (1/N)), #02)), 2.0)
  8931. // grad[#00] = scale(grad(#10), #09) + scale(scale(#00, -(sum(mul(grad[#10],#00)) * div(1,#07) * div(0.5, #08) * (1/N))), 2.0)
  8932. // grad[#00] = scale(grad(#10), #09) + scale(#00, -(sum(mul(grad[#10],#00)) * div(1,#07) * div(1,#08) * (1/N)))
  8933. // grad[#00] = scale(grad(#10), #09) + scale(#00, sum(mul(grad[#10],#00)) * div(1,#07*#08) * (-1/N))
  8934. // grad[#00] = scale(grad(#10), #09) + scale(#00, sum(mul(grad[#10],#00)) * div(1,#07*#08) * (-1/N))
  8935. // grad[#00] = scale(grad(#10), #09) + scale(#00, sum(mul(grad[#10],#00)) * div(1,mean_eps*rms) * (-1/N))
  8936. // grad[#00] = scale(grad(#10), #09) + scale(#00, sum(mul(grad[#10],#00)) * div(-1,rms*N*mean_eps))
  8937. // grad[#00] = scale(grad(#10), #09) + scale(#00, sum(mul(grad[#10],#00)) * div(-1,rms*N*(sum_xx/N+eps)))
  8938. // grad[#00] = scale(grad(#10), #09) + scale(#00, sum(mul(grad[#10],#00)) * div(-1,rms*N*sum_xx+rms*N*eps))
  8939. // grad[#00] = scale(dz, rrms) + scale(x, sum(mul(dz,x)) * div(-1,rms*N*mean_eps))
  8940. // grad[#00] = scale(dz, rrms) + scale(x, sum_xdz * div(-1,rms*N*mean_eps))
  8941. // a = b*c + d*e
  8942. // a = b*c*f/f + d*e*f/f
  8943. // a = (b*c*f + d*e*f)*(1/f)
  8944. // a = (b*c*(1/c) + d*e*(1/c))*(1/(1/c))
  8945. // a = (b + d*e/c)*c
  8946. // b = dz, c = rrms, d = x, e = sum_xdz * div(-1,rms*N*mean_eps)
  8947. // a = (dz + x*sum_xdz * div(-1,rms*N*mean_eps)/rrms)*rrms
  8948. // a = (dz + x*sum_xdz * div(-1,rms*N*mean_eps)*rms)*rrms
  8949. // a = (dz + x*sum_xdz * div(-rms,rms*N*mean_eps))*rrms
  8950. // a = (dz + x*sum_xdz * div(-1,N*mean_eps))*rrms
  8951. // a = (dz + x*div(-sum_xdz,N*mean_eps))*rrms
  8952. // a = (dz + x*div(-mean_xdz,mean_eps))*rrms
  8953. // grad[#00] = scale(dz + scale(x, div(-mean_xdz,mean_eps)),rrms)
  8954. // grad[#00] = scale(dz + scale(x, -mean_xdz/mean_eps),rrms)
  8955. // dx = scale(dz + scale(x, -mean_xdz/mean_eps),rrms)
  8956. }
  8957. // dx = scale(dz + scale(x, -mean_xdz/mean_eps),rrms)
  8958. // post-order:
  8959. // dx := x
  8960. // dx := scale(dx,-mean_xdz/mean_eps)
  8961. // dx := add(dx, dz)
  8962. // dx := scale(dx, rrms)
  8963. float * dx = (float *) ((char *) dst->data + i01*nb1 + i02*nb2 + i03*nb3);
  8964. ggml_vec_cpy_f32 (ne00, dx, x);
  8965. // ggml_vec_scale_f32(ne00, dx, -mean_xdz/mean_eps);
  8966. ggml_vec_scale_f32(ne00, dx, (float)(-sum_xdz)/sum_eps);
  8967. ggml_vec_acc_f32 (ne00, dx, dz);
  8968. ggml_vec_scale_f32(ne00, dx, rrms);
  8969. }
  8970. }
  8971. }
  8972. }
  8973. static void ggml_compute_forward_rms_norm_back(
  8974. const struct ggml_compute_params * params,
  8975. const struct ggml_tensor * src0,
  8976. const struct ggml_tensor * src1,
  8977. struct ggml_tensor * dst) {
  8978. switch (src0->type) {
  8979. case GGML_TYPE_F32:
  8980. {
  8981. ggml_compute_forward_rms_norm_back_f32(params, src0, src1, dst);
  8982. } break;
  8983. default:
  8984. {
  8985. GGML_ASSERT(false);
  8986. } break;
  8987. }
  8988. }
  8989. // ggml_compute_forward_group_norm
  8990. static void ggml_compute_forward_group_norm_f32(
  8991. const struct ggml_compute_params * params,
  8992. const struct ggml_tensor * src0,
  8993. struct ggml_tensor * dst) {
  8994. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  8995. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  8996. return;
  8997. }
  8998. GGML_ASSERT(src0->nb[0] == sizeof(float));
  8999. const int ith = params->ith;
  9000. const int nth = params->nth;
  9001. GGML_TENSOR_UNARY_OP_LOCALS;
  9002. const float eps = 1e-6f; // TODO: make this a parameter
  9003. // TODO: optimize
  9004. int n_channels = src0->ne[2];
  9005. int n_groups = dst->op_params[0];
  9006. int n_channels_per_group = (n_channels + n_groups - 1) / n_groups;
  9007. for (int i = ith; i < n_groups; i+=nth) {
  9008. int start = i * n_channels_per_group;
  9009. int end = start + n_channels_per_group;
  9010. if (end > n_channels) {
  9011. end = n_channels;
  9012. }
  9013. int step = end - start;
  9014. for (int64_t i03 = 0; i03 < ne03; i03++) {
  9015. ggml_float sum = 0.0;
  9016. for (int64_t i02 = start; i02 < end; i02++) {
  9017. for (int64_t i01 = 0; i01 < ne01; i01++) {
  9018. const float * x = (float *)((char *) src0->data + i01 * nb01 + i02 * nb02 + i03 * nb03);
  9019. for (int64_t i00 = 0; i00 < ne00; i00++) {
  9020. sum += (ggml_float)x[i00];
  9021. }
  9022. }
  9023. }
  9024. float mean = sum / (ne00 * ne01 * step);
  9025. ggml_float sum2 = 0.0;
  9026. for (int64_t i02 = start; i02 < end; i02++) {
  9027. for (int64_t i01 = 0; i01 < ne01; i01++) {
  9028. const float * x = (float *)((char *) src0->data + i01 * nb01 + i02 * nb02 + i03 * nb03);
  9029. float * y = (float *)((char *) dst->data + i01 * nb1 + i02 * nb2 + i03 * nb3);
  9030. for (int64_t i00 = 0; i00 < ne00; i00++) {
  9031. float v = x[i00] - mean;
  9032. y[i00] = v;
  9033. sum2 += (ggml_float)(v * v);
  9034. }
  9035. }
  9036. }
  9037. float variance = sum2 / (ne00 * ne01 * step);
  9038. const float scale = 1.0f / sqrtf(variance + eps);
  9039. for (int64_t i02 = start; i02 < end; i02++) {
  9040. for (int64_t i01 = 0; i01 < ne01; i01++) {
  9041. float * y = (float *)((char *) dst->data + i01 * nb1 + i02 * nb2 + i03 * nb3);
  9042. ggml_vec_scale_f32(ne00, y, scale);
  9043. }
  9044. }
  9045. }
  9046. }
  9047. }
  9048. static void ggml_compute_forward_group_norm(
  9049. const struct ggml_compute_params * params,
  9050. const struct ggml_tensor * src0,
  9051. struct ggml_tensor * dst) {
  9052. switch (src0->type) {
  9053. case GGML_TYPE_F32:
  9054. {
  9055. ggml_compute_forward_group_norm_f32(params, src0, dst);
  9056. } break;
  9057. default:
  9058. {
  9059. GGML_ASSERT(false);
  9060. } break;
  9061. }
  9062. }
  9063. // ggml_compute_forward_mul_mat
  9064. #if defined(GGML_USE_ACCELERATE) || defined(GGML_USE_OPENBLAS)
  9065. // helper function to determine if it is better to use BLAS or not
  9066. // for large matrices, BLAS is faster
  9067. static bool ggml_compute_forward_mul_mat_use_blas(
  9068. const struct ggml_tensor * src0,
  9069. const struct ggml_tensor * src1,
  9070. struct ggml_tensor * dst) {
  9071. //const int64_t ne00 = src0->ne[0];
  9072. //const int64_t ne01 = src0->ne[1];
  9073. const int64_t ne10 = src1->ne[0];
  9074. const int64_t ne0 = dst->ne[0];
  9075. const int64_t ne1 = dst->ne[1];
  9076. // TODO: find the optimal values for these
  9077. if (ggml_is_contiguous(src0) &&
  9078. ggml_is_contiguous(src1) &&
  9079. (ne0 >= 32 && ne1 >= 32 && ne10 >= 32)) {
  9080. /*printf("BLAS: %d %d %d %d %d\n", ne0, ne1, ne10, ne00, ne01);*/
  9081. return true;
  9082. }
  9083. return false;
  9084. }
  9085. #endif
  9086. static void ggml_compute_forward_mul_mat(
  9087. const struct ggml_compute_params * params,
  9088. const struct ggml_tensor * src0,
  9089. const struct ggml_tensor * src1,
  9090. struct ggml_tensor * dst) {
  9091. int64_t t0 = ggml_perf_time_us();
  9092. UNUSED(t0);
  9093. GGML_TENSOR_BINARY_OP_LOCALS;
  9094. const int ith = params->ith;
  9095. const int nth = params->nth;
  9096. const enum ggml_type type = src0->type;
  9097. const bool src1_cont = ggml_is_contiguous(src1);
  9098. ggml_vec_dot_t const vec_dot = type_traits[type].vec_dot;
  9099. enum ggml_type const vec_dot_type = type_traits[type].vec_dot_type;
  9100. ggml_from_float_t const from_float_to_vec_dot = type_traits[vec_dot_type].from_float;
  9101. GGML_ASSERT(ne0 == ne01);
  9102. GGML_ASSERT(ne1 == ne11);
  9103. GGML_ASSERT(ne2 == ne12);
  9104. GGML_ASSERT(ne3 == ne13);
  9105. // we don't support permuted src0 or src1
  9106. GGML_ASSERT(nb00 == ggml_type_size(type));
  9107. GGML_ASSERT(nb10 == sizeof(float));
  9108. // dst cannot be transposed or permuted
  9109. GGML_ASSERT(nb0 == sizeof(float));
  9110. GGML_ASSERT(nb0 <= nb1);
  9111. GGML_ASSERT(nb1 <= nb2);
  9112. GGML_ASSERT(nb2 <= nb3);
  9113. // broadcast factors
  9114. const int64_t r2 = ne12/ne02;
  9115. const int64_t r3 = ne13/ne03;
  9116. // nb01 >= nb00 - src0 is not transposed
  9117. // compute by src0 rows
  9118. #if defined(GGML_USE_CLBLAST)
  9119. if (ggml_cl_can_mul_mat(src0, src1, dst)) {
  9120. // TODO: handle case when src0 is broadcast-able into src1 across 2nd,3rd dimension
  9121. // ref: https://github.com/ggerganov/ggml/pull/224
  9122. GGML_ASSERT(ne02 == ne12);
  9123. GGML_ASSERT(ne03 == ne13);
  9124. if (params->ith == 0 && params->type == GGML_TASK_COMPUTE) {
  9125. ggml_cl_mul_mat(src0, src1, dst, params->wdata, params->wsize);
  9126. }
  9127. return;
  9128. }
  9129. #endif
  9130. #if defined(GGML_USE_ACCELERATE) || defined(GGML_USE_OPENBLAS)
  9131. if (ggml_compute_forward_mul_mat_use_blas(src0, src1, dst)) {
  9132. if (params->ith != 0) {
  9133. return;
  9134. }
  9135. if (params->type == GGML_TASK_INIT) {
  9136. return;
  9137. }
  9138. if (params->type == GGML_TASK_FINALIZE) {
  9139. return;
  9140. }
  9141. for (int64_t i13 = 0; i13 < ne13; i13++) {
  9142. for (int64_t i12 = 0; i12 < ne12; i12++) {
  9143. // broadcast src0 into src1 across 2nd,3rd dimension
  9144. const int64_t i03 = i13/r3;
  9145. const int64_t i02 = i12/r2;
  9146. const void * x = (char *) src0->data + i02*nb02 + i03*nb03;
  9147. const float * y = (float *) ((char *) src1->data + i12*nb12 + i13*nb13);
  9148. float * d = (float *) ((char *) dst->data + i12*nb2 + i13*nb3);
  9149. if (type != GGML_TYPE_F32) {
  9150. float * const wdata = params->wdata;
  9151. ggml_to_float_t const to_float = type_traits[type].to_float;
  9152. size_t id = 0;
  9153. for (int64_t i01 = 0; i01 < ne01; ++i01) {
  9154. to_float((const char *) x + i01*nb01, wdata + id, ne00);
  9155. id += ne00;
  9156. }
  9157. assert(id*sizeof(float) <= params->wsize);
  9158. x = wdata;
  9159. }
  9160. cblas_sgemm(CblasRowMajor, CblasNoTrans, CblasTrans,
  9161. ne11, ne01, ne10,
  9162. 1.0f, y, ne10,
  9163. x, ne00,
  9164. 0.0f, d, ne01);
  9165. }
  9166. }
  9167. //printf("CBLAS = %f ms, %d x %d x %d x %d\n", (ggml_perf_time_us() - t0)/1000.0, ne0, ne1, ne2, ne3);
  9168. return;
  9169. }
  9170. #endif
  9171. if (params->type == GGML_TASK_INIT) {
  9172. if (src1->type != vec_dot_type) {
  9173. char * wdata = params->wdata;
  9174. const size_t row_size = ne10*ggml_type_size(vec_dot_type)/ggml_blck_size(vec_dot_type);
  9175. for (int64_t i13 = 0; i13 < ne13; ++i13) {
  9176. for (int64_t i12 = 0; i12 < ne12; ++i12) {
  9177. for (int64_t i11 = 0; i11 < ne11; ++i11) {
  9178. from_float_to_vec_dot((float *)((char *) src1->data + i13*nb13 + i12*nb12 + i11*nb11), (void *) wdata, ne10);
  9179. wdata += row_size;
  9180. }
  9181. }
  9182. }
  9183. }
  9184. return;
  9185. }
  9186. if (params->type == GGML_TASK_FINALIZE) {
  9187. return;
  9188. }
  9189. const void * wdata = (src1->type == vec_dot_type) ? src1->data : params->wdata;
  9190. const size_t row_size = ne10*ggml_type_size(vec_dot_type)/ggml_blck_size(vec_dot_type);
  9191. const int64_t nr0 = ne01; // src0 rows
  9192. const int64_t nr1 = ne11*ne12*ne13; // src1 rows
  9193. //printf("nr0 = %lld, nr1 = %lld\n", nr0, nr1);
  9194. // distribute the thread work across the inner or outer loop based on which one is larger
  9195. const int64_t nth0 = nr0 > nr1 ? nth : 1; // parallelize by src0 rows
  9196. const int64_t nth1 = nr0 > nr1 ? 1 : nth; // parallelize by src1 rows
  9197. const int64_t ith0 = ith % nth0;
  9198. const int64_t ith1 = ith / nth0;
  9199. const int64_t dr0 = (nr0 + nth0 - 1)/nth0;
  9200. const int64_t dr1 = (nr1 + nth1 - 1)/nth1;
  9201. const int64_t ir010 = dr0*ith0;
  9202. const int64_t ir011 = MIN(ir010 + dr0, nr0);
  9203. const int64_t ir110 = dr1*ith1;
  9204. const int64_t ir111 = MIN(ir110 + dr1, nr1);
  9205. //printf("ir010 = %6lld, ir011 = %6lld, ir110 = %6lld, ir111 = %6lld\n", ir010, ir011, ir110, ir111);
  9206. // threads with no work simply yield (not sure if it helps)
  9207. if (ir010 >= ir011 || ir110 >= ir111) {
  9208. sched_yield();
  9209. return;
  9210. }
  9211. assert(ne12 % ne02 == 0);
  9212. assert(ne13 % ne03 == 0);
  9213. // block-tiling attempt
  9214. const int64_t blck_0 = 16;
  9215. const int64_t blck_1 = 16;
  9216. // attempt to reduce false-sharing (does not seem to make a difference)
  9217. float tmp[16];
  9218. for (int64_t iir1 = ir110; iir1 < ir111; iir1 += blck_1) {
  9219. for (int64_t iir0 = ir010; iir0 < ir011; iir0 += blck_0) {
  9220. for (int64_t ir1 = iir1; ir1 < iir1 + blck_1 && ir1 < ir111; ++ir1) {
  9221. const int64_t i13 = (ir1/(ne12*ne11));
  9222. const int64_t i12 = (ir1 - i13*ne12*ne11)/ne11;
  9223. const int64_t i11 = (ir1 - i13*ne12*ne11 - i12*ne11);
  9224. // broadcast src0 into src1
  9225. const int64_t i03 = i13/r3;
  9226. const int64_t i02 = i12/r2;
  9227. const int64_t i1 = i11;
  9228. const int64_t i2 = i12;
  9229. const int64_t i3 = i13;
  9230. const char * src0_row = (const char *) src0->data + (0 + i02*nb02 + i03*nb03);
  9231. // desc: when src1 is not a contiguous memory block we have to calculate the offset using the strides
  9232. // if it is, then we have either copied the data to params->wdata and made it contiguous or we are using
  9233. // the original src1 data pointer, so we should index using the indices directly
  9234. // TODO: this is a bit of a hack, we should probably have a better way to handle this
  9235. const char * src1_col = (const char *) wdata +
  9236. (src1_cont || src1->type != vec_dot_type
  9237. ? (i11 + i12*ne11 + i13*ne12*ne11)*row_size
  9238. : (i11*nb11 + i12*nb12 + i13*nb13));
  9239. float * dst_col = (float *) ((char *) dst->data + (i1*nb1 + i2*nb2 + i3*nb3));
  9240. //for (int64_t ir0 = iir0; ir0 < iir0 + blck_0 && ir0 < ir011; ++ir0) {
  9241. // vec_dot(ne00, &dst_col[ir0], src0_row + ir0*nb01, src1_col);
  9242. //}
  9243. for (int64_t ir0 = iir0; ir0 < iir0 + blck_0 && ir0 < ir011; ++ir0) {
  9244. vec_dot(ne00, &tmp[ir0 - iir0], src0_row + ir0*nb01, src1_col);
  9245. }
  9246. memcpy(&dst_col[iir0], tmp, (MIN(iir0 + blck_0, ir011) - iir0)*sizeof(float));
  9247. }
  9248. }
  9249. }
  9250. }
  9251. // ggml_compute_forward_out_prod
  9252. static void ggml_compute_forward_out_prod_f32(
  9253. const struct ggml_compute_params * params,
  9254. const struct ggml_tensor * src0,
  9255. const struct ggml_tensor * src1,
  9256. struct ggml_tensor * dst) {
  9257. int64_t t0 = ggml_perf_time_us();
  9258. UNUSED(t0);
  9259. GGML_TENSOR_BINARY_OP_LOCALS;
  9260. const int ith = params->ith;
  9261. const int nth = params->nth;
  9262. GGML_ASSERT(ne02 == ne12);
  9263. GGML_ASSERT(ne03 == ne13);
  9264. GGML_ASSERT(ne2 == ne12);
  9265. GGML_ASSERT(ne3 == ne13);
  9266. // we don't support permuted src0 or src1
  9267. GGML_ASSERT(nb00 == sizeof(float));
  9268. // dst cannot be transposed or permuted
  9269. GGML_ASSERT(nb0 == sizeof(float));
  9270. // GGML_ASSERT(nb0 <= nb1);
  9271. // GGML_ASSERT(nb1 <= nb2);
  9272. // GGML_ASSERT(nb2 <= nb3);
  9273. GGML_ASSERT(ne0 == ne00);
  9274. GGML_ASSERT(ne1 == ne10);
  9275. GGML_ASSERT(ne2 == ne02);
  9276. GGML_ASSERT(ne3 == ne03);
  9277. // nb01 >= nb00 - src0 is not transposed
  9278. // compute by src0 rows
  9279. // TODO: #if defined(GGML_USE_CUBLAS) ggml_cuda_out_prod
  9280. // TODO: #if defined(GGML_USE_ACCELERATE) || defined(GGML_USE_OPENBLAS) || defined(GGML_USE_CLBLAST)
  9281. if (params->type == GGML_TASK_INIT) {
  9282. ggml_vec_set_f32(ne0*ne1*ne2*ne3, dst->data, 0);
  9283. return;
  9284. }
  9285. if (params->type == GGML_TASK_FINALIZE) {
  9286. return;
  9287. }
  9288. // parallelize by last three dimensions
  9289. // total rows in dst
  9290. const int64_t nr = ne1*ne2*ne3;
  9291. // rows per thread
  9292. const int64_t dr = (nr + nth - 1)/nth;
  9293. // row range for this thread
  9294. const int64_t ir0 = dr*ith;
  9295. const int64_t ir1 = MIN(ir0 + dr, nr);
  9296. // dst[:,:,:,:] = 0
  9297. // for i2,i3:
  9298. // for i1:
  9299. // for i01:
  9300. // for i0:
  9301. // dst[i0,i1,i2,i3] += src0[i0,i01,i2,i3] * src1[i1,i01,i2,i3]
  9302. for (int64_t ir = ir0; ir < ir1; ++ir) {
  9303. // dst indices
  9304. const int64_t i3 = ir/(ne2*ne1);
  9305. const int64_t i2 = (ir - i3*ne2*ne1)/ne1;
  9306. const int64_t i1 = (ir - i3*ne2*ne1 - i2*ne1);
  9307. const int64_t i02 = i2;
  9308. const int64_t i03 = i3;
  9309. //const int64_t i10 = i1;
  9310. const int64_t i12 = i2;
  9311. const int64_t i13 = i3;
  9312. for (int64_t i01 = 0; i01 < ne01; ++i01) {
  9313. const int64_t i11 = i01;
  9314. float * s0 = (float *) ((char *) src0->data + ( i01*nb01 + i02*nb02 + i03*nb03));
  9315. float * s1 = (float *) ((char *) src1->data + (i1*nb10 + i11*nb11 + i12*nb12 + i13*nb13));
  9316. float * d = (float *) ((char *) dst->data + ( i1*nb1 + i2*nb2 + i3*nb3));
  9317. ggml_vec_mad_f32(ne0, d, s0, *s1);
  9318. // for (int64_t i0 = 0; i0 < ne0; ++i0) {
  9319. // d[i0] += s0[i0] * s1[i1];
  9320. // }
  9321. }
  9322. }
  9323. //int64_t t1 = ggml_perf_time_us();
  9324. //static int64_t acc = 0;
  9325. //acc += t1 - t0;
  9326. //if (t1 - t0 > 10) {
  9327. // printf("\n");
  9328. // printf("ne00 = %5d, ne01 = %5d, ne02 = %5d, ne03 = %5d\n", ne00, ne01, ne02, ne03);
  9329. // printf("nb00 = %5d, nb01 = %5d, nb02 = %5d, nb03 = %5d\n", nb00, nb01, nb02, nb03);
  9330. // printf("ne10 = %5d, ne11 = %5d, ne12 = %5d, ne13 = %5d\n", ne10, ne11, ne12, ne13);
  9331. // printf("nb10 = %5d, nb11 = %5d, nb12 = %5d, nb13 = %5d\n", nb10, nb11, nb12, nb13);
  9332. // printf("XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX task %d/%d: %d us, acc = %d\n", ith, nth, (int) (t1 - t0), (int) acc);
  9333. //}
  9334. }
  9335. static void ggml_compute_forward_out_prod(
  9336. const struct ggml_compute_params * params,
  9337. const struct ggml_tensor * src0,
  9338. const struct ggml_tensor * src1,
  9339. struct ggml_tensor * dst) {
  9340. switch (src0->type) {
  9341. case GGML_TYPE_Q4_0:
  9342. case GGML_TYPE_Q4_1:
  9343. case GGML_TYPE_Q5_0:
  9344. case GGML_TYPE_Q5_1:
  9345. case GGML_TYPE_Q8_0:
  9346. case GGML_TYPE_Q8_1:
  9347. {
  9348. GGML_ASSERT(false); // todo
  9349. // ggml_compute_forward_out_prod_q_f32(params, src0, src1, dst);
  9350. } break;
  9351. case GGML_TYPE_F16:
  9352. {
  9353. GGML_ASSERT(false); // todo
  9354. // ggml_compute_forward_out_prod_f16_f32(params, src0, src1, dst);
  9355. } break;
  9356. case GGML_TYPE_F32:
  9357. {
  9358. ggml_compute_forward_out_prod_f32(params, src0, src1, dst);
  9359. } break;
  9360. default:
  9361. {
  9362. GGML_ASSERT(false);
  9363. } break;
  9364. }
  9365. }
  9366. // ggml_compute_forward_scale
  9367. static void ggml_compute_forward_scale_f32(
  9368. const struct ggml_compute_params * params,
  9369. const struct ggml_tensor * src0,
  9370. const struct ggml_tensor * src1,
  9371. struct ggml_tensor * dst) {
  9372. GGML_ASSERT(ggml_is_contiguous(src0));
  9373. GGML_ASSERT(ggml_is_contiguous(dst));
  9374. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  9375. GGML_ASSERT(ggml_is_scalar(src1));
  9376. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  9377. return;
  9378. }
  9379. // scale factor
  9380. const float v = *(float *) src1->data;
  9381. const int ith = params->ith;
  9382. const int nth = params->nth;
  9383. const int nc = src0->ne[0];
  9384. const int nr = ggml_nrows(src0);
  9385. // rows per thread
  9386. const int dr = (nr + nth - 1)/nth;
  9387. // row range for this thread
  9388. const int ir0 = dr*ith;
  9389. const int ir1 = MIN(ir0 + dr, nr);
  9390. const size_t nb01 = src0->nb[1];
  9391. const size_t nb1 = dst->nb[1];
  9392. for (int i1 = ir0; i1 < ir1; i1++) {
  9393. if (dst->data != src0->data) {
  9394. // src0 is same shape as dst => same indices
  9395. memcpy((char *)dst->data + i1*nb1, (char *)src0->data + i1*nb01, nc * sizeof(float));
  9396. }
  9397. ggml_vec_scale_f32(nc, (float *) ((char *) dst->data + i1*nb1), v);
  9398. }
  9399. }
  9400. static void ggml_compute_forward_scale(
  9401. const struct ggml_compute_params * params,
  9402. const struct ggml_tensor * src0,
  9403. const struct ggml_tensor * src1,
  9404. struct ggml_tensor * dst) {
  9405. switch (src0->type) {
  9406. case GGML_TYPE_F32:
  9407. {
  9408. ggml_compute_forward_scale_f32(params, src0, src1, dst);
  9409. } break;
  9410. default:
  9411. {
  9412. GGML_ASSERT(false);
  9413. } break;
  9414. }
  9415. }
  9416. // ggml_compute_forward_set
  9417. static void ggml_compute_forward_set_f32(
  9418. const struct ggml_compute_params * params,
  9419. const struct ggml_tensor * src0,
  9420. const struct ggml_tensor * src1,
  9421. struct ggml_tensor * dst) {
  9422. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  9423. GGML_ASSERT(ggml_is_contiguous(dst) && ggml_is_contiguous(src0));
  9424. // view src0 and dst with these strides and data offset inbytes during set
  9425. // nb0 is implicitely element_size because src0 and dst are contiguous
  9426. size_t nb1 = ((int32_t *) dst->op_params)[0];
  9427. size_t nb2 = ((int32_t *) dst->op_params)[1];
  9428. size_t nb3 = ((int32_t *) dst->op_params)[2];
  9429. size_t offset = ((int32_t *) dst->op_params)[3];
  9430. bool inplace = (bool) ((int32_t *) dst->op_params)[4];
  9431. if (!inplace && (params->type == GGML_TASK_INIT)) {
  9432. // memcpy needs to be synchronized across threads to avoid race conditions.
  9433. // => do it in INIT phase
  9434. memcpy(
  9435. ((char *) dst->data),
  9436. ((char *) src0->data),
  9437. ggml_nbytes(dst));
  9438. }
  9439. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  9440. return;
  9441. }
  9442. const int ith = params->ith;
  9443. const int nth = params->nth;
  9444. const int nr = ggml_nrows(src1);
  9445. const int nc = src1->ne[0];
  9446. GGML_TENSOR_LOCALS(int64_t, ne1, src1, ne);
  9447. GGML_TENSOR_LOCALS(size_t, nb1, src1, nb);
  9448. // src0 and dst as viewed during set
  9449. const size_t nb0 = ggml_element_size(src0);
  9450. const int im0 = (ne10 == 0 ? 0 : ne10-1);
  9451. const int im1 = (ne11 == 0 ? 0 : ne11-1);
  9452. const int im2 = (ne12 == 0 ? 0 : ne12-1);
  9453. const int im3 = (ne13 == 0 ? 0 : ne13-1);
  9454. GGML_ASSERT(offset + im0*nb0 + im1*nb1 + im2*nb2 + im3*nb3 <= ggml_nbytes(dst));
  9455. GGML_ASSERT(nb10 == sizeof(float));
  9456. // rows per thread
  9457. const int dr = (nr + nth - 1)/nth;
  9458. // row range for this thread
  9459. const int ir0 = dr*ith;
  9460. const int ir1 = MIN(ir0 + dr, nr);
  9461. for (int ir = ir0; ir < ir1; ++ir) {
  9462. // src0 and dst are viewed with shape of src1 and offset
  9463. // => same indices
  9464. const int i3 = ir/(ne12*ne11);
  9465. const int i2 = (ir - i3*ne12*ne11)/ne11;
  9466. const int i1 = (ir - i3*ne12*ne11 - i2*ne11);
  9467. ggml_vec_cpy_f32(nc,
  9468. (float *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + offset),
  9469. (float *) ((char *) src1->data + i3*nb13 + i2*nb12 + i1*nb11));
  9470. }
  9471. }
  9472. static void ggml_compute_forward_set(
  9473. const struct ggml_compute_params * params,
  9474. const struct ggml_tensor * src0,
  9475. const struct ggml_tensor * src1,
  9476. struct ggml_tensor * dst) {
  9477. switch (src0->type) {
  9478. case GGML_TYPE_F32:
  9479. {
  9480. ggml_compute_forward_set_f32(params, src0, src1, dst);
  9481. } break;
  9482. case GGML_TYPE_F16:
  9483. case GGML_TYPE_Q4_0:
  9484. case GGML_TYPE_Q4_1:
  9485. case GGML_TYPE_Q5_0:
  9486. case GGML_TYPE_Q5_1:
  9487. case GGML_TYPE_Q8_0:
  9488. case GGML_TYPE_Q8_1:
  9489. case GGML_TYPE_Q2_K:
  9490. case GGML_TYPE_Q3_K:
  9491. case GGML_TYPE_Q4_K:
  9492. case GGML_TYPE_Q5_K:
  9493. case GGML_TYPE_Q6_K:
  9494. default:
  9495. {
  9496. GGML_ASSERT(false);
  9497. } break;
  9498. }
  9499. }
  9500. // ggml_compute_forward_cpy
  9501. static void ggml_compute_forward_cpy(
  9502. const struct ggml_compute_params * params,
  9503. const struct ggml_tensor * src0,
  9504. struct ggml_tensor * dst) {
  9505. ggml_compute_forward_dup(params, src0, dst);
  9506. }
  9507. // ggml_compute_forward_cont
  9508. static void ggml_compute_forward_cont(
  9509. const struct ggml_compute_params * params,
  9510. const struct ggml_tensor * src0,
  9511. struct ggml_tensor * dst) {
  9512. ggml_compute_forward_dup(params, src0, dst);
  9513. }
  9514. // ggml_compute_forward_reshape
  9515. static void ggml_compute_forward_reshape(
  9516. const struct ggml_compute_params * params,
  9517. const struct ggml_tensor * src0,
  9518. struct ggml_tensor * dst) {
  9519. // NOP
  9520. UNUSED(params);
  9521. UNUSED(src0);
  9522. UNUSED(dst);
  9523. }
  9524. // ggml_compute_forward_view
  9525. static void ggml_compute_forward_view(
  9526. const struct ggml_compute_params * params,
  9527. const struct ggml_tensor * src0) {
  9528. // NOP
  9529. UNUSED(params);
  9530. UNUSED(src0);
  9531. }
  9532. // ggml_compute_forward_permute
  9533. static void ggml_compute_forward_permute(
  9534. const struct ggml_compute_params * params,
  9535. const struct ggml_tensor * src0) {
  9536. // NOP
  9537. UNUSED(params);
  9538. UNUSED(src0);
  9539. }
  9540. // ggml_compute_forward_transpose
  9541. static void ggml_compute_forward_transpose(
  9542. const struct ggml_compute_params * params,
  9543. const struct ggml_tensor * src0) {
  9544. // NOP
  9545. UNUSED(params);
  9546. UNUSED(src0);
  9547. }
  9548. // ggml_compute_forward_get_rows
  9549. static void ggml_compute_forward_get_rows_q(
  9550. const struct ggml_compute_params * params,
  9551. const struct ggml_tensor * src0,
  9552. const struct ggml_tensor * src1,
  9553. struct ggml_tensor * dst) {
  9554. assert(params->ith == 0);
  9555. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  9556. return;
  9557. }
  9558. const int nc = src0->ne[0];
  9559. const int nr = ggml_nelements(src1);
  9560. const enum ggml_type type = src0->type;
  9561. ggml_to_float_t const dequantize_row_q = type_traits[type].to_float;
  9562. assert( dst->ne[0] == nc);
  9563. assert( dst->ne[1] == nr);
  9564. assert(src0->nb[0] == ggml_type_size(type));
  9565. for (int i = 0; i < nr; ++i) {
  9566. const int r = ((int32_t *) src1->data)[i];
  9567. dequantize_row_q(
  9568. (const void *) ((char *) src0->data + r*src0->nb[1]),
  9569. (float *) ((char *) dst->data + i*dst->nb[1]), nc);
  9570. }
  9571. }
  9572. static void ggml_compute_forward_get_rows_f16(
  9573. const struct ggml_compute_params * params,
  9574. const struct ggml_tensor * src0,
  9575. const struct ggml_tensor * src1,
  9576. struct ggml_tensor * dst) {
  9577. assert(params->ith == 0);
  9578. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  9579. return;
  9580. }
  9581. const int nc = src0->ne[0];
  9582. const int nr = ggml_nelements(src1);
  9583. assert( dst->ne[0] == nc);
  9584. assert( dst->ne[1] == nr);
  9585. assert(src0->nb[0] == sizeof(ggml_fp16_t));
  9586. for (int i = 0; i < nr; ++i) {
  9587. const int r = ((int32_t *) src1->data)[i];
  9588. for (int j = 0; j < nc; ++j) {
  9589. ggml_fp16_t v = ((ggml_fp16_t *) ((char *) src0->data + r*src0->nb[1]))[j];
  9590. ((float *) ((char *) dst->data + i*dst->nb[1]))[j] = GGML_FP16_TO_FP32(v);
  9591. }
  9592. }
  9593. }
  9594. static void ggml_compute_forward_get_rows_f32(
  9595. const struct ggml_compute_params * params,
  9596. const struct ggml_tensor * src0,
  9597. const struct ggml_tensor * src1,
  9598. struct ggml_tensor * dst) {
  9599. assert(params->ith == 0);
  9600. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  9601. return;
  9602. }
  9603. const int nc = src0->ne[0];
  9604. const int nr = ggml_nelements(src1);
  9605. assert( dst->ne[0] == nc);
  9606. assert( dst->ne[1] == nr);
  9607. assert(src0->nb[0] == sizeof(float));
  9608. for (int i = 0; i < nr; ++i) {
  9609. const int r = ((int32_t *) src1->data)[i];
  9610. ggml_vec_cpy_f32(nc,
  9611. (float *) ((char *) dst->data + i*dst->nb[1]),
  9612. (float *) ((char *) src0->data + r*src0->nb[1]));
  9613. }
  9614. }
  9615. static void ggml_compute_forward_get_rows(
  9616. const struct ggml_compute_params * params,
  9617. const struct ggml_tensor * src0,
  9618. const struct ggml_tensor * src1,
  9619. struct ggml_tensor * dst) {
  9620. switch (src0->type) {
  9621. case GGML_TYPE_Q4_0:
  9622. case GGML_TYPE_Q4_1:
  9623. case GGML_TYPE_Q5_0:
  9624. case GGML_TYPE_Q5_1:
  9625. case GGML_TYPE_Q8_0:
  9626. case GGML_TYPE_Q8_1:
  9627. case GGML_TYPE_Q2_K:
  9628. case GGML_TYPE_Q3_K:
  9629. case GGML_TYPE_Q4_K:
  9630. case GGML_TYPE_Q5_K:
  9631. case GGML_TYPE_Q6_K:
  9632. {
  9633. ggml_compute_forward_get_rows_q(params, src0, src1, dst);
  9634. } break;
  9635. case GGML_TYPE_F16:
  9636. {
  9637. ggml_compute_forward_get_rows_f16(params, src0, src1, dst);
  9638. } break;
  9639. case GGML_TYPE_F32:
  9640. {
  9641. ggml_compute_forward_get_rows_f32(params, src0, src1, dst);
  9642. } break;
  9643. default:
  9644. {
  9645. GGML_ASSERT(false);
  9646. } break;
  9647. }
  9648. //static bool first = true;
  9649. //printf("ne0 = %d, ne1 = %d, ne2 = %d\n", dst->ne[0], dst->ne[1], dst->ne[2]);
  9650. //if (first) {
  9651. // first = false;
  9652. //} else {
  9653. // for (int k = 0; k < dst->ne[1]; ++k) {
  9654. // for (int j = 0; j < dst->ne[0]/16; ++j) {
  9655. // for (int i = 0; i < 16; ++i) {
  9656. // printf("%8.4f ", ((float *) dst->data)[k*dst->ne[0] + j*16 + i]);
  9657. // }
  9658. // printf("\n");
  9659. // }
  9660. // printf("\n");
  9661. // }
  9662. // printf("\n");
  9663. // exit(0);
  9664. //}
  9665. }
  9666. // ggml_compute_forward_get_rows_back
  9667. static void ggml_compute_forward_get_rows_back_f32_f16(
  9668. const struct ggml_compute_params * params,
  9669. const struct ggml_tensor * src0,
  9670. const struct ggml_tensor * src1,
  9671. const struct ggml_tensor * opt0,
  9672. struct ggml_tensor * dst) {
  9673. GGML_ASSERT(params->ith == 0);
  9674. GGML_ASSERT(ggml_are_same_shape(opt0, dst));
  9675. GGML_ASSERT(ggml_is_contiguous(opt0));
  9676. GGML_ASSERT(ggml_is_contiguous(dst));
  9677. ggml_compute_forward_dup_same_cont(params, opt0, dst);
  9678. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  9679. return;
  9680. }
  9681. const int nc = src0->ne[0];
  9682. const int nr = ggml_nelements(src1);
  9683. GGML_ASSERT( dst->ne[0] == nc);
  9684. GGML_ASSERT(src0->nb[0] == sizeof(ggml_fp16_t));
  9685. for (int i = 0; i < nr; ++i) {
  9686. const int r = ((int32_t *) src1->data)[i];
  9687. for (int j = 0; j < nc; ++j) {
  9688. ggml_fp16_t v = ((ggml_fp16_t *) ((char *) src0->data + i*src0->nb[1]))[j];
  9689. ((float *) ((char *) dst->data + r*dst->nb[1]))[j] += GGML_FP16_TO_FP32(v);
  9690. }
  9691. }
  9692. }
  9693. static void ggml_compute_forward_get_rows_back_f32(
  9694. const struct ggml_compute_params * params,
  9695. const struct ggml_tensor * src0,
  9696. const struct ggml_tensor * src1,
  9697. const struct ggml_tensor * opt0,
  9698. struct ggml_tensor * dst) {
  9699. GGML_ASSERT(params->ith == 0);
  9700. GGML_ASSERT(ggml_are_same_shape(opt0, dst));
  9701. GGML_ASSERT(ggml_is_contiguous(opt0));
  9702. GGML_ASSERT(ggml_is_contiguous(dst));
  9703. // ggml_compute_forward_dup_same_cont(params, opt0, dst);
  9704. if (params->type == GGML_TASK_INIT) {
  9705. memset(dst->data, 0, ggml_nbytes(dst));
  9706. }
  9707. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  9708. return;
  9709. }
  9710. const int nc = src0->ne[0];
  9711. const int nr = ggml_nelements(src1);
  9712. GGML_ASSERT( dst->ne[0] == nc);
  9713. GGML_ASSERT(src0->nb[0] == sizeof(float));
  9714. for (int i = 0; i < nr; ++i) {
  9715. const int r = ((int32_t *) src1->data)[i];
  9716. ggml_vec_add_f32(nc,
  9717. (float *) ((char *) dst->data + r*dst->nb[1]),
  9718. (float *) ((char *) dst->data + r*dst->nb[1]),
  9719. (float *) ((char *) src0->data + i*src0->nb[1]));
  9720. }
  9721. }
  9722. static void ggml_compute_forward_get_rows_back(
  9723. const struct ggml_compute_params * params,
  9724. const struct ggml_tensor * src0,
  9725. const struct ggml_tensor * src1,
  9726. const struct ggml_tensor * opt0,
  9727. struct ggml_tensor * dst) {
  9728. switch (src0->type) {
  9729. case GGML_TYPE_F16:
  9730. {
  9731. ggml_compute_forward_get_rows_back_f32_f16(params, src0, src1, opt0, dst);
  9732. } break;
  9733. case GGML_TYPE_F32:
  9734. {
  9735. ggml_compute_forward_get_rows_back_f32(params, src0, src1, opt0, dst);
  9736. } break;
  9737. default:
  9738. {
  9739. GGML_ASSERT(false);
  9740. } break;
  9741. }
  9742. //static bool first = true;
  9743. //printf("ne0 = %d, ne1 = %d, ne2 = %d\n", dst->ne[0], dst->ne[1], dst->ne[2]);
  9744. //if (first) {
  9745. // first = false;
  9746. //} else {
  9747. // for (int k = 0; k < dst->ne[1]; ++k) {
  9748. // for (int j = 0; j < dst->ne[0]/16; ++j) {
  9749. // for (int i = 0; i < 16; ++i) {
  9750. // printf("%8.4f ", ((float *) dst->data)[k*dst->ne[0] + j*16 + i]);
  9751. // }
  9752. // printf("\n");
  9753. // }
  9754. // printf("\n");
  9755. // }
  9756. // printf("\n");
  9757. // exit(0);
  9758. //}
  9759. }
  9760. // ggml_compute_forward_diag
  9761. static void ggml_compute_forward_diag_f32(
  9762. const struct ggml_compute_params * params,
  9763. const struct ggml_tensor * src0,
  9764. struct ggml_tensor * dst) {
  9765. GGML_ASSERT(params->ith == 0);
  9766. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  9767. return;
  9768. }
  9769. // TODO: handle transposed/permuted matrices
  9770. GGML_TENSOR_UNARY_OP_LOCALS;
  9771. GGML_ASSERT(ne00 == ne0);
  9772. GGML_ASSERT(ne00 == ne1);
  9773. GGML_ASSERT(ne01 == 1);
  9774. GGML_ASSERT(ne02 == ne2);
  9775. GGML_ASSERT(ne03 == ne3);
  9776. GGML_ASSERT(nb00 == sizeof(float));
  9777. GGML_ASSERT(nb0 == sizeof(float));
  9778. for (int i3 = 0; i3 < ne3; i3++) {
  9779. for (int i2 = 0; i2 < ne2; i2++) {
  9780. for (int i1 = 0; i1 < ne1; i1++) {
  9781. float * d = (float *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1);
  9782. float * s = (float *)((char *) src0->data + i3*nb03 + i2*nb02);
  9783. for (int i0 = 0; i0 < i1; i0++) {
  9784. d[i0] = 0;
  9785. }
  9786. d[i1] = s[i1];
  9787. for (int i0 = i1+1; i0 < ne0; i0++) {
  9788. d[i0] = 0;
  9789. }
  9790. }
  9791. }
  9792. }
  9793. }
  9794. static void ggml_compute_forward_diag(
  9795. const struct ggml_compute_params * params,
  9796. const struct ggml_tensor * src0,
  9797. struct ggml_tensor * dst) {
  9798. switch (src0->type) {
  9799. case GGML_TYPE_F32:
  9800. {
  9801. ggml_compute_forward_diag_f32(params, src0, dst);
  9802. } break;
  9803. default:
  9804. {
  9805. GGML_ASSERT(false);
  9806. } break;
  9807. }
  9808. }
  9809. // ggml_compute_forward_diag_mask_inf
  9810. static void ggml_compute_forward_diag_mask_f32(
  9811. const struct ggml_compute_params * params,
  9812. const struct ggml_tensor * src0,
  9813. struct ggml_tensor * dst,
  9814. const float value) {
  9815. const int ith = params->ith;
  9816. const int nth = params->nth;
  9817. const int n_past = ((int32_t *) dst->op_params)[0];
  9818. const bool inplace = src0->data == dst->data;
  9819. GGML_ASSERT(n_past >= 0);
  9820. if (!inplace && (params->type == GGML_TASK_INIT)) {
  9821. // memcpy needs to be synchronized across threads to avoid race conditions.
  9822. // => do it in INIT phase
  9823. GGML_ASSERT(ggml_nelements(dst) == ggml_nelements(src0));
  9824. GGML_ASSERT(ggml_is_contiguous(dst) && ggml_is_contiguous(src0));
  9825. memcpy(
  9826. ((char *) dst->data),
  9827. ((char *) src0->data),
  9828. ggml_nbytes(dst));
  9829. }
  9830. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  9831. return;
  9832. }
  9833. // TODO: handle transposed/permuted matrices
  9834. const int n = ggml_nrows(src0);
  9835. const int nc = src0->ne[0];
  9836. const int nr = src0->ne[1];
  9837. const int nz = n/nr;
  9838. GGML_ASSERT( dst->nb[0] == sizeof(float));
  9839. GGML_ASSERT(src0->nb[0] == sizeof(float));
  9840. for (int k = 0; k < nz; k++) {
  9841. for (int j = ith; j < nr; j += nth) {
  9842. for (int i = n_past; i < nc; i++) {
  9843. if (i > n_past + j) {
  9844. *(float *)((char *) dst->data + k*dst->nb[2] + j*dst->nb[1] + i*dst->nb[0]) = value;
  9845. }
  9846. }
  9847. }
  9848. }
  9849. }
  9850. static void ggml_compute_forward_diag_mask_inf(
  9851. const struct ggml_compute_params * params,
  9852. const struct ggml_tensor * src0,
  9853. struct ggml_tensor * dst) {
  9854. switch (src0->type) {
  9855. case GGML_TYPE_F32:
  9856. {
  9857. ggml_compute_forward_diag_mask_f32(params, src0, dst, -INFINITY);
  9858. } break;
  9859. default:
  9860. {
  9861. GGML_ASSERT(false);
  9862. } break;
  9863. }
  9864. }
  9865. static void ggml_compute_forward_diag_mask_zero(
  9866. const struct ggml_compute_params * params,
  9867. const struct ggml_tensor * src0,
  9868. struct ggml_tensor * dst) {
  9869. switch (src0->type) {
  9870. case GGML_TYPE_F32:
  9871. {
  9872. ggml_compute_forward_diag_mask_f32(params, src0, dst, 0);
  9873. } break;
  9874. default:
  9875. {
  9876. GGML_ASSERT(false);
  9877. } break;
  9878. }
  9879. }
  9880. // ggml_compute_forward_soft_max
  9881. static void ggml_compute_forward_soft_max_f32(
  9882. const struct ggml_compute_params * params,
  9883. const struct ggml_tensor * src0,
  9884. struct ggml_tensor * dst) {
  9885. GGML_ASSERT(ggml_is_contiguous(src0));
  9886. GGML_ASSERT(ggml_is_contiguous(dst));
  9887. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  9888. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  9889. return;
  9890. }
  9891. // TODO: handle transposed/permuted matrices
  9892. const int ith = params->ith;
  9893. const int nth = params->nth;
  9894. const int nc = src0->ne[0];
  9895. const int nr = ggml_nrows(src0);
  9896. // rows per thread
  9897. const int dr = (nr + nth - 1)/nth;
  9898. // row range for this thread
  9899. const int ir0 = dr*ith;
  9900. const int ir1 = MIN(ir0 + dr, nr);
  9901. for (int i1 = ir0; i1 < ir1; i1++) {
  9902. float *sp = (float *)((char *) src0->data + i1*src0->nb[1]);
  9903. float *dp = (float *)((char *) dst->data + i1*dst->nb[1]);
  9904. #ifndef NDEBUG
  9905. for (int i = 0; i < nc; ++i) {
  9906. //printf("p[%d] = %f\n", i, p[i]);
  9907. assert(!isnan(sp[i]));
  9908. }
  9909. #endif
  9910. float max = -INFINITY;
  9911. ggml_vec_max_f32(nc, &max, sp);
  9912. ggml_float sum = 0.0;
  9913. uint16_t scvt;
  9914. for (int i = 0; i < nc; i++) {
  9915. if (sp[i] == -INFINITY) {
  9916. dp[i] = 0.0f;
  9917. } else {
  9918. // const float val = (sp[i] == -INFINITY) ? 0.0 : exp(sp[i] - max);
  9919. ggml_fp16_t s = GGML_FP32_TO_FP16(sp[i] - max);
  9920. memcpy(&scvt, &s, sizeof(scvt));
  9921. const float val = GGML_FP16_TO_FP32(table_exp_f16[scvt]);
  9922. sum += (ggml_float)val;
  9923. dp[i] = val;
  9924. }
  9925. }
  9926. assert(sum > 0.0);
  9927. sum = 1.0/sum;
  9928. ggml_vec_scale_f32(nc, dp, sum);
  9929. #ifndef NDEBUG
  9930. for (int i = 0; i < nc; ++i) {
  9931. assert(!isnan(dp[i]));
  9932. assert(!isinf(dp[i]));
  9933. }
  9934. #endif
  9935. }
  9936. }
  9937. static void ggml_compute_forward_soft_max(
  9938. const struct ggml_compute_params * params,
  9939. const struct ggml_tensor * src0,
  9940. struct ggml_tensor * dst) {
  9941. switch (src0->type) {
  9942. case GGML_TYPE_F32:
  9943. {
  9944. ggml_compute_forward_soft_max_f32(params, src0, dst);
  9945. } break;
  9946. default:
  9947. {
  9948. GGML_ASSERT(false);
  9949. } break;
  9950. }
  9951. }
  9952. // ggml_compute_forward_soft_max_back
  9953. static void ggml_compute_forward_soft_max_back_f32(
  9954. const struct ggml_compute_params * params,
  9955. const struct ggml_tensor * src0,
  9956. const struct ggml_tensor * src1,
  9957. struct ggml_tensor * dst) {
  9958. GGML_ASSERT(ggml_is_contiguous(src0));
  9959. GGML_ASSERT(ggml_is_contiguous(src1));
  9960. GGML_ASSERT(ggml_is_contiguous(dst));
  9961. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  9962. GGML_ASSERT(ggml_are_same_shape(src1, dst));
  9963. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  9964. return;
  9965. }
  9966. // TODO: handle transposed/permuted matrices
  9967. const int ith = params->ith;
  9968. const int nth = params->nth;
  9969. const int nc = src0->ne[0];
  9970. const int nr = ggml_nrows(src0);
  9971. // rows per thread
  9972. const int dr = (nr + nth - 1)/nth;
  9973. // row range for this thread
  9974. const int ir0 = dr*ith;
  9975. const int ir1 = MIN(ir0 + dr, nr);
  9976. for (int i1 = ir0; i1 < ir1; i1++) {
  9977. float *dy = (float *)((char *) src0->data + i1*src0->nb[1]);
  9978. float *y = (float *)((char *) src1->data + i1*src1->nb[1]);
  9979. float *dx = (float *)((char *) dst->data + i1*dst->nb[1]);
  9980. #ifndef NDEBUG
  9981. for (int i = 0; i < nc; ++i) {
  9982. //printf("p[%d] = %f\n", i, p[i]);
  9983. assert(!isnan(dy[i]));
  9984. assert(!isnan(y[i]));
  9985. }
  9986. #endif
  9987. // Jii = yi - yi*yi
  9988. // Jij = -yi*yj
  9989. // J = diag(y)-y.T*y
  9990. // dx = J * dy
  9991. // dxk = sum_i(Jki * dyi)
  9992. // dxk = sum_i(-yk*yi * dyi) - (-yk*yk)*dyk + (yk - yk*yk)*dyk
  9993. // dxk = sum_i(-yk*yi * dyi) + yk*yk*dyk + yk*dyk - yk*yk*dyk
  9994. // dxk = sum_i(-yk*yi * dyi) + yk*dyk
  9995. // dxk = -yk * sum_i(yi * dyi) + yk*dyk
  9996. // dxk = -yk * dot(y, dy) + yk*dyk
  9997. // dxk = yk * (- dot(y, dy) + dyk)
  9998. // dxk = yk * (dyk - dot(y, dy))
  9999. //
  10000. // post-order:
  10001. // dot_y_dy := dot(y, dy)
  10002. // dx := dy
  10003. // dx := dx - dot_y_dy
  10004. // dx := dx * y
  10005. // linear runtime, no additional memory
  10006. float dot_y_dy = 0;
  10007. ggml_vec_dot_f32 (nc, &dot_y_dy, y, dy);
  10008. ggml_vec_cpy_f32 (nc, dx, dy);
  10009. ggml_vec_acc1_f32(nc, dx, -dot_y_dy);
  10010. ggml_vec_mul_f32 (nc, dx, dx, y);
  10011. #ifndef NDEBUG
  10012. for (int i = 0; i < nc; ++i) {
  10013. assert(!isnan(dx[i]));
  10014. assert(!isinf(dx[i]));
  10015. }
  10016. #endif
  10017. }
  10018. }
  10019. static void ggml_compute_forward_soft_max_back(
  10020. const struct ggml_compute_params * params,
  10021. const struct ggml_tensor * src0,
  10022. const struct ggml_tensor * src1,
  10023. struct ggml_tensor * dst) {
  10024. switch (src0->type) {
  10025. case GGML_TYPE_F32:
  10026. {
  10027. ggml_compute_forward_soft_max_back_f32(params, src0, src1, dst);
  10028. } break;
  10029. default:
  10030. {
  10031. GGML_ASSERT(false);
  10032. } break;
  10033. }
  10034. }
  10035. // ggml_compute_forward_alibi
  10036. static void ggml_compute_forward_alibi_f32(
  10037. const struct ggml_compute_params * params,
  10038. const struct ggml_tensor * src0,
  10039. struct ggml_tensor * dst) {
  10040. assert(params->ith == 0);
  10041. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  10042. return;
  10043. }
  10044. const int n_past = ((int32_t *) dst->op_params)[0];
  10045. const int n_head = ((int32_t *) dst->op_params)[1];
  10046. float max_bias;
  10047. memcpy(&max_bias, (int32_t *) dst->op_params + 2, sizeof(float));
  10048. assert(n_past >= 0);
  10049. const int ne0 = src0->ne[0]; // all_seq_len = n_past + ne1
  10050. const int ne1 = src0->ne[1]; // seq_len_without_past
  10051. const int ne2 = src0->ne[2]; // n_head -> this is k
  10052. //const int ne3 = src0->ne[3]; // 1 -> bsz
  10053. const int n = ggml_nrows(src0);
  10054. const int ne2_ne3 = n/ne1; // ne2*ne3
  10055. const int nb0 = src0->nb[0];
  10056. const int nb1 = src0->nb[1];
  10057. const int nb2 = src0->nb[2];
  10058. //const int nb3 = src0->nb[3];
  10059. GGML_ASSERT(nb0 == sizeof(float));
  10060. GGML_ASSERT(ne1 + n_past == ne0);
  10061. GGML_ASSERT(n_head == ne2);
  10062. // add alibi to src0 (KQ_scaled)
  10063. const int n_heads_log2_floor = 1 << (int) floor(log2(n_head));
  10064. const float m0 = powf(2.0f, -(max_bias) / n_heads_log2_floor);
  10065. const float m1 = powf(2.0f, -(max_bias / 2.0f) / n_heads_log2_floor);
  10066. for (int i = 0; i < ne0; i++) {
  10067. for (int j = 0; j < ne1; j++) {
  10068. for (int k = 0; k < ne2_ne3; k++) {
  10069. float * const src = (float *)((char *) src0->data + i*nb0 + j*nb1 + k*nb2);
  10070. float * pdst = (float *)((char *) dst->data + i*nb0 + j*nb1 + k*nb2);
  10071. // TODO: k*nb2 or k*nb3
  10072. float m_k;
  10073. if (k < n_heads_log2_floor) {
  10074. m_k = powf(m0, k + 1);
  10075. } else {
  10076. m_k = powf(m1, 2 * (k - n_heads_log2_floor) + 1);
  10077. }
  10078. pdst[0] = i * m_k + src[0];
  10079. }
  10080. }
  10081. }
  10082. }
  10083. static void ggml_compute_forward_alibi_f16(
  10084. const struct ggml_compute_params * params,
  10085. const struct ggml_tensor * src0,
  10086. struct ggml_tensor * dst) {
  10087. assert(params->ith == 0);
  10088. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  10089. return;
  10090. }
  10091. const int n_past = ((int32_t *) dst->op_params)[0];
  10092. const int n_head = ((int32_t *) dst->op_params)[1];
  10093. float max_bias;
  10094. memcpy(&max_bias, (int32_t *) dst->op_params + 2, sizeof(float));
  10095. assert(n_past >= 0);
  10096. const int ne0 = src0->ne[0]; // all_seq_len = n_past + ne1
  10097. const int ne1 = src0->ne[1]; // seq_len_without_past
  10098. const int ne2 = src0->ne[2]; // n_head -> this is k
  10099. //const int ne3 = src0->ne[3]; // 1 -> bsz
  10100. const int n = ggml_nrows(src0);
  10101. const int ne2_ne3 = n/ne1; // ne2*ne3
  10102. const int nb0 = src0->nb[0];
  10103. const int nb1 = src0->nb[1];
  10104. const int nb2 = src0->nb[2];
  10105. //const int nb3 = src0->nb[3];
  10106. GGML_ASSERT(nb0 == sizeof(ggml_fp16_t));
  10107. GGML_ASSERT(ne1 + n_past == ne0); (void) n_past;
  10108. GGML_ASSERT(n_head == ne2);
  10109. // add alibi to src0 (KQ_scaled)
  10110. const int n_heads_log2_floor = 1 << (int) floor(log2(n_head));
  10111. const float m0 = powf(2.0f, -(max_bias) / n_heads_log2_floor);
  10112. const float m1 = powf(2.0f, -(max_bias / 2.0f) / n_heads_log2_floor);
  10113. for (int i = 0; i < ne0; i++) {
  10114. for (int j = 0; j < ne1; j++) {
  10115. for (int k = 0; k < ne2_ne3; k++) {
  10116. ggml_fp16_t * const src = (ggml_fp16_t *)((char *) src0->data + i*nb0 + j*nb1 + k*nb2);
  10117. float * pdst = (float *)((char *) dst->data + i*nb0 + j*nb1 + k*nb2);
  10118. // TODO: k*nb2 or k*nb3
  10119. float m_k;
  10120. if (k < n_heads_log2_floor) {
  10121. m_k = powf(m0, k + 1);
  10122. } else {
  10123. m_k = powf(m1, 2 * (k - n_heads_log2_floor) + 1);
  10124. }
  10125. // we return F32
  10126. pdst[0] = i * m_k + GGML_FP16_TO_FP32(src[0]);
  10127. }
  10128. }
  10129. }
  10130. }
  10131. static void ggml_compute_forward_alibi(
  10132. const struct ggml_compute_params * params,
  10133. const struct ggml_tensor * src0,
  10134. struct ggml_tensor * dst) {
  10135. switch (src0->type) {
  10136. case GGML_TYPE_F16:
  10137. {
  10138. ggml_compute_forward_alibi_f16(params, src0, dst);
  10139. } break;
  10140. case GGML_TYPE_F32:
  10141. {
  10142. ggml_compute_forward_alibi_f32(params, src0, dst);
  10143. } break;
  10144. case GGML_TYPE_Q4_0:
  10145. case GGML_TYPE_Q4_1:
  10146. case GGML_TYPE_Q5_0:
  10147. case GGML_TYPE_Q5_1:
  10148. case GGML_TYPE_Q8_0:
  10149. case GGML_TYPE_Q8_1:
  10150. case GGML_TYPE_Q2_K:
  10151. case GGML_TYPE_Q3_K:
  10152. case GGML_TYPE_Q4_K:
  10153. case GGML_TYPE_Q5_K:
  10154. case GGML_TYPE_Q6_K:
  10155. case GGML_TYPE_Q8_K:
  10156. case GGML_TYPE_I8:
  10157. case GGML_TYPE_I16:
  10158. case GGML_TYPE_I32:
  10159. case GGML_TYPE_COUNT:
  10160. {
  10161. GGML_ASSERT(false);
  10162. } break;
  10163. }
  10164. }
  10165. // ggml_compute_forward_clamp
  10166. static void ggml_compute_forward_clamp_f32(
  10167. const struct ggml_compute_params * params,
  10168. const struct ggml_tensor * src0,
  10169. struct ggml_tensor * dst) {
  10170. assert(params->ith == 0);
  10171. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  10172. return;
  10173. }
  10174. float min;
  10175. float max;
  10176. memcpy(&min, (float *) dst->op_params + 0, sizeof(float));
  10177. memcpy(&max, (float *) dst->op_params + 1, sizeof(float));
  10178. const int ith = params->ith;
  10179. const int nth = params->nth;
  10180. const int n = ggml_nrows(src0);
  10181. const int nc = src0->ne[0];
  10182. const size_t nb00 = src0->nb[0];
  10183. const size_t nb01 = src0->nb[1];
  10184. const size_t nb0 = dst->nb[0];
  10185. const size_t nb1 = dst->nb[1];
  10186. GGML_ASSERT( nb0 == sizeof(float));
  10187. GGML_ASSERT(nb00 == sizeof(float));
  10188. for (int j = ith; j < n; j += nth) {
  10189. float * dst_ptr = (float *) ((char *) dst->data + j*nb1);
  10190. float * src0_ptr = (float *) ((char *) src0->data + j*nb01);
  10191. for (int i = 0; i < nc; i++) {
  10192. dst_ptr[i] = MAX(MIN(src0_ptr[i], max), min);
  10193. }
  10194. }
  10195. }
  10196. static void ggml_compute_forward_clamp(
  10197. const struct ggml_compute_params * params,
  10198. const struct ggml_tensor * src0,
  10199. struct ggml_tensor * dst) {
  10200. switch (src0->type) {
  10201. case GGML_TYPE_F32:
  10202. {
  10203. ggml_compute_forward_clamp_f32(params, src0, dst);
  10204. } break;
  10205. case GGML_TYPE_F16:
  10206. case GGML_TYPE_Q4_0:
  10207. case GGML_TYPE_Q4_1:
  10208. case GGML_TYPE_Q5_0:
  10209. case GGML_TYPE_Q5_1:
  10210. case GGML_TYPE_Q8_0:
  10211. case GGML_TYPE_Q8_1:
  10212. case GGML_TYPE_Q2_K:
  10213. case GGML_TYPE_Q3_K:
  10214. case GGML_TYPE_Q4_K:
  10215. case GGML_TYPE_Q5_K:
  10216. case GGML_TYPE_Q6_K:
  10217. case GGML_TYPE_Q8_K:
  10218. case GGML_TYPE_I8:
  10219. case GGML_TYPE_I16:
  10220. case GGML_TYPE_I32:
  10221. case GGML_TYPE_COUNT:
  10222. {
  10223. GGML_ASSERT(false);
  10224. } break;
  10225. }
  10226. }
  10227. // ggml_compute_forward_rope
  10228. static void ggml_compute_forward_rope_f32(
  10229. const struct ggml_compute_params * params,
  10230. const struct ggml_tensor * src0,
  10231. struct ggml_tensor * dst) {
  10232. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  10233. return;
  10234. }
  10235. float freq_base;
  10236. float freq_scale;
  10237. // these two only relevant for xPos RoPE:
  10238. float xpos_base;
  10239. bool xpos_down;
  10240. const int n_past = ((int32_t *) dst->op_params)[0];
  10241. const int n_dims = ((int32_t *) dst->op_params)[1];
  10242. const int mode = ((int32_t *) dst->op_params)[2];
  10243. const int n_ctx = ((int32_t *) dst->op_params)[3];
  10244. memcpy(&freq_base, (int32_t *) dst->op_params + 4, sizeof(float));
  10245. memcpy(&freq_scale, (int32_t *) dst->op_params + 5, sizeof(float));
  10246. memcpy(&xpos_base, (int32_t *) dst->op_params + 6, sizeof(float));
  10247. memcpy(&xpos_down, (int32_t *) dst->op_params + 7, sizeof(bool));
  10248. assert(n_past >= 0);
  10249. GGML_TENSOR_UNARY_OP_LOCALS;
  10250. //printf("ne0: %d, ne1: %d, ne2: %d, ne3: %d\n", ne0, ne1, ne2, ne3);
  10251. //printf("n_past = %d, ne2 = %d\n", n_past, ne2);
  10252. GGML_ASSERT(nb00 == sizeof(float));
  10253. const int ith = params->ith;
  10254. const int nth = params->nth;
  10255. const int nr = ggml_nrows(dst);
  10256. GGML_ASSERT(n_dims <= ne0);
  10257. GGML_ASSERT(n_dims % 2 == 0);
  10258. // rows per thread
  10259. const int dr = (nr + nth - 1)/nth;
  10260. // row range for this thread
  10261. const int ir0 = dr*ith;
  10262. const int ir1 = MIN(ir0 + dr, nr);
  10263. // row index used to determine which thread to use
  10264. int ir = 0;
  10265. const float theta_scale = powf(freq_base, -2.0f/n_dims);
  10266. const bool is_neox = mode & 2;
  10267. const bool is_glm = mode & 4;
  10268. for (int64_t i3 = 0; i3 < ne3; i3++) {
  10269. for (int64_t i2 = ((mode & 1) == 0 ? 0 : n_past); i2 < ne2; i2++) {
  10270. const int64_t p = ((mode & 1) == 0 ? n_past + i2 : i2);
  10271. for (int64_t i1 = 0; i1 < ne1; i1++) {
  10272. if (ir++ < ir0) continue;
  10273. if (ir > ir1) break;
  10274. float theta = freq_scale * (float)p;
  10275. if (is_glm) {
  10276. theta = MIN(p, n_ctx - 2);
  10277. float block_theta = MAX(p - (n_ctx - 2), 0);
  10278. for (int64_t i0 = 0; i0 < ne0 / 4; i0++) {
  10279. const float cos_theta = cosf(theta);
  10280. const float sin_theta = sinf(theta);
  10281. const float cos_block_theta = cosf(block_theta);
  10282. const float sin_block_theta = sinf(block_theta);
  10283. theta *= theta_scale;
  10284. block_theta *= theta_scale;
  10285. const float * const src = (float *)((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00);
  10286. float * dst_data = (float *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0);
  10287. const float x0 = src[0];
  10288. const float x1 = src[n_dims/2];
  10289. const float x2 = src[n_dims];
  10290. const float x3 = src[n_dims/2*3];
  10291. dst_data[0] = x0*cos_theta - x1*sin_theta;
  10292. dst_data[n_dims/2] = x0*sin_theta + x1*cos_theta;
  10293. dst_data[n_dims] = x2*cos_block_theta - x3*sin_block_theta;
  10294. dst_data[n_dims/2*3] = x2*sin_block_theta + x3*cos_block_theta;
  10295. }
  10296. } else if (!is_neox) {
  10297. for (int64_t i0 = 0; i0 < ne0; i0 += 2) {
  10298. const float cos_theta = cosf(theta);
  10299. const float sin_theta = sinf(theta);
  10300. // zeta scaling for xPos only:
  10301. float zeta = xpos_base != 0.0f ? powf((i0 + 0.4f * ne0) / (1.4f * ne0), (n_past + i2) / xpos_base) : 1.0f;
  10302. if (xpos_down) zeta = 1.0f / zeta;
  10303. theta *= theta_scale;
  10304. const float * const src = (float *)((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00);
  10305. float * dst_data = (float *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0);
  10306. const float x0 = src[0];
  10307. const float x1 = src[1];
  10308. dst_data[0] = x0*cos_theta*zeta - x1*sin_theta*zeta;
  10309. dst_data[1] = x0*sin_theta*zeta + x1*cos_theta*zeta;
  10310. }
  10311. } else {
  10312. // TODO: this might be wrong for ne0 != n_dims - need double check
  10313. // ref: https://github.com/huggingface/transformers/blob/main/src/transformers/models/gpt_neox/modeling_gpt_neox.py#LL251C1-L294C28
  10314. for (int64_t ib = 0; ib < ne0/n_dims; ++ib) {
  10315. for (int64_t ic = 0; ic < n_dims; ic += 2) {
  10316. const float cos_theta = cosf(theta);
  10317. const float sin_theta = sinf(theta);
  10318. theta *= theta_scale;
  10319. const int64_t i0 = ib*n_dims + ic/2;
  10320. const float * const src = (float *)((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00);
  10321. float * dst_data = (float *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0);
  10322. const float x0 = src[0];
  10323. const float x1 = src[n_dims/2];
  10324. dst_data[0] = x0*cos_theta - x1*sin_theta;
  10325. dst_data[n_dims/2] = x0*sin_theta + x1*cos_theta;
  10326. }
  10327. }
  10328. }
  10329. }
  10330. }
  10331. }
  10332. }
  10333. static void ggml_compute_forward_rope_f16(
  10334. const struct ggml_compute_params * params,
  10335. const struct ggml_tensor * src0,
  10336. struct ggml_tensor * dst) {
  10337. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  10338. return;
  10339. }
  10340. float freq_base;
  10341. float freq_scale;
  10342. const int n_past = ((int32_t *) dst->op_params)[0];
  10343. const int n_dims = ((int32_t *) dst->op_params)[1];
  10344. const int mode = ((int32_t *) dst->op_params)[2];
  10345. const int n_ctx = ((int32_t *) dst->op_params)[3];
  10346. memcpy(&freq_base, (int32_t *) dst->op_params + 4, sizeof(float));
  10347. memcpy(&freq_scale, (int32_t *) dst->op_params + 5, sizeof(float));
  10348. assert(n_past >= 0);
  10349. GGML_TENSOR_UNARY_OP_LOCALS;
  10350. //printf("ne0: %d, ne1: %d, ne2: %d, ne3: %d\n", ne0, ne1, ne2, ne3);
  10351. //printf("n_past = %d, ne2 = %d\n", n_past, ne2);
  10352. GGML_ASSERT(nb0 == sizeof(ggml_fp16_t));
  10353. const int ith = params->ith;
  10354. const int nth = params->nth;
  10355. const int nr = ggml_nrows(dst);
  10356. GGML_ASSERT(n_dims <= ne0);
  10357. GGML_ASSERT(n_dims % 2 == 0);
  10358. // rows per thread
  10359. const int dr = (nr + nth - 1)/nth;
  10360. // row range for this thread
  10361. const int ir0 = dr*ith;
  10362. const int ir1 = MIN(ir0 + dr, nr);
  10363. // row index used to determine which thread to use
  10364. int ir = 0;
  10365. const float theta_scale = powf(freq_base, -2.0f/n_dims);
  10366. const bool is_neox = mode & 2;
  10367. const bool is_glm = mode & 4;
  10368. for (int64_t i3 = 0; i3 < ne3; i3++) {
  10369. for (int64_t i2 = ((mode & 1) == 0 ? 0 : n_past); i2 < ne2; i2++) {
  10370. const int64_t p = ((mode & 1) == 0 ? n_past + i2 : i2);
  10371. for (int64_t i1 = 0; i1 < ne1; i1++) {
  10372. if (ir++ < ir0) continue;
  10373. if (ir > ir1) break;
  10374. float theta = freq_scale * (float)p;
  10375. if (is_glm) {
  10376. theta = MIN(p, n_ctx - 2);
  10377. float block_theta = MAX(p - (n_ctx - 2), 0);
  10378. for (int64_t i0 = 0; i0 < ne0 / 4; i0++) {
  10379. const float cos_theta = cosf(theta);
  10380. const float sin_theta = sinf(theta);
  10381. const float cos_block_theta = cosf(block_theta);
  10382. const float sin_block_theta = sinf(block_theta);
  10383. theta *= theta_scale;
  10384. block_theta *= theta_scale;
  10385. const ggml_fp16_t * const src = (ggml_fp16_t *)((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00);
  10386. ggml_fp16_t * dst_data = (ggml_fp16_t *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0);
  10387. const float x0 = GGML_FP16_TO_FP32(src[0]);
  10388. const float x1 = GGML_FP16_TO_FP32(src[n_dims/2]);
  10389. const float x2 = GGML_FP16_TO_FP32(src[n_dims]);
  10390. const float x3 = GGML_FP16_TO_FP32(src[n_dims/2*3]);
  10391. dst_data[0] = GGML_FP32_TO_FP16(x0*cos_theta - x1*sin_theta);
  10392. dst_data[n_dims/2] = GGML_FP32_TO_FP16(x0*sin_theta + x1*cos_theta);
  10393. dst_data[n_dims] = GGML_FP32_TO_FP16(x2*cos_block_theta - x3*sin_block_theta);
  10394. dst_data[n_dims/2*3] = GGML_FP32_TO_FP16(x2*sin_block_theta + x3*cos_block_theta);
  10395. }
  10396. } if (!is_neox) {
  10397. for (int64_t i0 = 0; i0 < ne0; i0 += 2) {
  10398. const float cos_theta = cosf(theta);
  10399. const float sin_theta = sinf(theta);
  10400. theta *= theta_scale;
  10401. const ggml_fp16_t * const src = (ggml_fp16_t *)((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00);
  10402. ggml_fp16_t * dst_data = (ggml_fp16_t *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0);
  10403. const float x0 = GGML_FP16_TO_FP32(src[0]);
  10404. const float x1 = GGML_FP16_TO_FP32(src[1]);
  10405. dst_data[0] = GGML_FP32_TO_FP16(x0*cos_theta - x1*sin_theta);
  10406. dst_data[1] = GGML_FP32_TO_FP16(x0*sin_theta + x1*cos_theta);
  10407. }
  10408. } else {
  10409. // TODO: this might be wrong for ne0 != n_dims - need double check
  10410. // ref: https://github.com/huggingface/transformers/blob/main/src/transformers/models/gpt_neox/modeling_gpt_neox.py#LL251C1-L294C28
  10411. for (int64_t ib = 0; ib < ne0/n_dims; ++ib) {
  10412. for (int64_t ic = 0; ic < n_dims; ic += 2) {
  10413. const float cos_theta = cosf(theta);
  10414. const float sin_theta = sinf(theta);
  10415. theta *= theta_scale;
  10416. const int64_t i0 = ib*n_dims + ic/2;
  10417. const ggml_fp16_t * const src = (ggml_fp16_t *)((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00);
  10418. ggml_fp16_t * dst_data = (ggml_fp16_t *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0);
  10419. const float x0 = GGML_FP16_TO_FP32(src[0]);
  10420. const float x1 = GGML_FP16_TO_FP32(src[n_dims/2]);
  10421. dst_data[0] = GGML_FP32_TO_FP16(x0*cos_theta - x1*sin_theta);
  10422. dst_data[n_dims/2] = GGML_FP32_TO_FP16(x0*sin_theta + x1*cos_theta);
  10423. }
  10424. }
  10425. }
  10426. }
  10427. }
  10428. }
  10429. }
  10430. static void ggml_compute_forward_rope(
  10431. const struct ggml_compute_params * params,
  10432. const struct ggml_tensor * src0,
  10433. struct ggml_tensor * dst) {
  10434. switch (src0->type) {
  10435. case GGML_TYPE_F16:
  10436. {
  10437. ggml_compute_forward_rope_f16(params, src0, dst);
  10438. } break;
  10439. case GGML_TYPE_F32:
  10440. {
  10441. ggml_compute_forward_rope_f32(params, src0, dst);
  10442. } break;
  10443. default:
  10444. {
  10445. GGML_ASSERT(false);
  10446. } break;
  10447. }
  10448. }
  10449. // ggml_compute_forward_rope_back
  10450. static void ggml_compute_forward_rope_back_f32(
  10451. const struct ggml_compute_params * params,
  10452. const struct ggml_tensor * src0,
  10453. struct ggml_tensor * dst) {
  10454. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  10455. return;
  10456. }
  10457. // y = rope(x, src1)
  10458. // dx = rope_back(dy, src1)
  10459. // src0 is dy, src1 contains options
  10460. float freq_base;
  10461. float freq_scale;
  10462. // these two only relevant for xPos RoPE:
  10463. float xpos_base;
  10464. bool xpos_down;
  10465. const int n_past = ((int32_t *) dst->op_params)[0];
  10466. const int n_dims = ((int32_t *) dst->op_params)[1];
  10467. const int mode = ((int32_t *) dst->op_params)[2];
  10468. const int n_ctx = ((int32_t *) dst->op_params)[3]; UNUSED(n_ctx);
  10469. memcpy(&freq_base, (int32_t *) dst->op_params + 4, sizeof(float));
  10470. memcpy(&freq_scale, (int32_t *) dst->op_params + 5, sizeof(float));
  10471. memcpy(&xpos_base, (int32_t *) dst->op_params + 6, sizeof(float));
  10472. memcpy(&xpos_down, (int32_t *) dst->op_params + 7, sizeof(bool));
  10473. assert(n_past >= 0);
  10474. GGML_TENSOR_UNARY_OP_LOCALS;
  10475. //printf("ne0: %d, ne1: %d, ne2: %d, ne3: %d\n", ne0, ne1, ne2, ne3);
  10476. //printf("n_past = %d, ne2 = %d\n", n_past, ne2);
  10477. assert(nb0 == sizeof(float));
  10478. const int ith = params->ith;
  10479. const int nth = params->nth;
  10480. const int nr = ggml_nrows(dst);
  10481. // rows per thread
  10482. const int dr = (nr + nth - 1)/nth;
  10483. // row range for this thread
  10484. const int ir0 = dr*ith;
  10485. const int ir1 = MIN(ir0 + dr, nr);
  10486. // row index used to determine which thread to use
  10487. int ir = 0;
  10488. const float theta_scale = powf(freq_base, -2.0f/n_dims);
  10489. const bool is_neox = mode & 2;
  10490. for (int64_t i3 = 0; i3 < ne3; i3++) {
  10491. for (int64_t i2 = ((mode & 1) == 0 ? 0 : n_past); i2 < ne2; i2++) {
  10492. const int64_t p = ((mode & 1) == 0 ? n_past + i2 : i2);
  10493. for (int64_t i1 = 0; i1 < ne1; i1++) {
  10494. if (ir++ < ir0) continue;
  10495. if (ir > ir1) break;
  10496. float theta = freq_scale * (float)p;
  10497. if (!is_neox) {
  10498. for (int64_t i0 = 0; i0 < ne0; i0 += 2) {
  10499. const float cos_theta = cosf(theta);
  10500. const float sin_theta = sinf(theta);
  10501. // zeta scaling for xPos only:
  10502. float zeta = xpos_base != 0.0f ? powf((i0 + 0.4f * ne0) / (1.4f * ne0), (n_past + i2) / xpos_base) : 1.0f;
  10503. if (xpos_down) zeta = 1.0f / zeta;
  10504. theta *= theta_scale;
  10505. const float * const dy = (float *)((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00);
  10506. float * dx = (float *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0);
  10507. const float dy0 = dy[0];
  10508. const float dy1 = dy[1];
  10509. dx[0] = dy0*cos_theta*zeta + dy1*sin_theta*zeta;
  10510. dx[1] = - dy0*sin_theta*zeta + dy1*cos_theta*zeta;
  10511. }
  10512. } else {
  10513. for (int64_t ib = 0; ib < ne0/n_dims; ++ib) {
  10514. for (int64_t ic = 0; ic < n_dims; ic += 2) {
  10515. const float cos_theta = cosf(theta);
  10516. const float sin_theta = sinf(theta);
  10517. theta *= theta_scale;
  10518. const int64_t i0 = ib*n_dims + ic/2;
  10519. const float * const dy = (float *)((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00);
  10520. float * dx = (float *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0);
  10521. const float dy0 = dy[0];
  10522. const float dy1 = dy[n_dims/2];
  10523. dx[0] = dy0*cos_theta + dy1*sin_theta;
  10524. dx[n_dims/2] = - dy0*sin_theta + dy1*cos_theta;
  10525. }
  10526. }
  10527. }
  10528. }
  10529. }
  10530. }
  10531. }
  10532. static void ggml_compute_forward_rope_back_f16(
  10533. const struct ggml_compute_params * params,
  10534. const struct ggml_tensor * src0,
  10535. struct ggml_tensor * dst) {
  10536. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  10537. return;
  10538. }
  10539. // y = rope(x, src1)
  10540. // dx = rope_back(dy, src1)
  10541. // src0 is dy, src1 contains options
  10542. const int n_past = ((int32_t *) dst->op_params)[0];
  10543. const int n_dims = ((int32_t *) dst->op_params)[1];
  10544. const int mode = ((int32_t *) dst->op_params)[2];
  10545. assert(n_past >= 0);
  10546. GGML_TENSOR_UNARY_OP_LOCALS;
  10547. //printf("ne0: %d, ne1: %d, ne2: %d, ne3: %d\n", ne0, ne1, ne2, ne3);
  10548. //printf("n_past = %d, ne2 = %d\n", n_past, ne2);
  10549. assert(nb0 == sizeof(ggml_fp16_t));
  10550. const int ith = params->ith;
  10551. const int nth = params->nth;
  10552. const int nr = ggml_nrows(dst);
  10553. // rows per thread
  10554. const int dr = (nr + nth - 1)/nth;
  10555. // row range for this thread
  10556. const int ir0 = dr*ith;
  10557. const int ir1 = MIN(ir0 + dr, nr);
  10558. // row index used to determine which thread to use
  10559. int ir = 0;
  10560. const float theta_scale = powf(10000.0, -2.0f/n_dims);
  10561. const bool is_neox = mode & 2;
  10562. for (int64_t i3 = 0; i3 < ne3; i3++) {
  10563. for (int64_t i2 = ((mode & 1) == 0 ? 0 : n_past); i2 < ne2; i2++) {
  10564. const int64_t p = ((mode & 1) == 0 ? n_past + i2 : i2);
  10565. for (int64_t i1 = 0; i1 < ne1; i1++) {
  10566. if (ir++ < ir0) continue;
  10567. if (ir > ir1) break;
  10568. float theta = (float)p;
  10569. if (!is_neox) {
  10570. for (int64_t i0 = 0; i0 < ne0; i0 += 2) {
  10571. const float cos_theta = cosf(theta);
  10572. const float sin_theta = sinf(theta);
  10573. theta *= theta_scale;
  10574. const ggml_fp16_t * const dy = (ggml_fp16_t *)((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00);
  10575. ggml_fp16_t * dx = (ggml_fp16_t *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0);
  10576. const float dy0 = GGML_FP16_TO_FP32(dy[0]);
  10577. const float dy1 = GGML_FP16_TO_FP32(dy[1]);
  10578. dx[0] = GGML_FP32_TO_FP16( dy0*cos_theta + dy1*sin_theta);
  10579. dx[1] = GGML_FP32_TO_FP16(-dy0*sin_theta + dy1*cos_theta);
  10580. }
  10581. } else {
  10582. for (int64_t ib = 0; ib < ne0/n_dims; ++ib) {
  10583. for (int64_t ic = 0; ic < n_dims; ic += 2) {
  10584. const float cos_theta = cosf(theta);
  10585. const float sin_theta = sinf(theta);
  10586. theta *= theta_scale;
  10587. const int64_t i0 = ib*n_dims + ic/2;
  10588. const ggml_fp16_t * const dy = (ggml_fp16_t *)((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00);
  10589. ggml_fp16_t * dx = (ggml_fp16_t *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0);
  10590. const float dy0 = GGML_FP16_TO_FP32(dy[0]);
  10591. const float dy1 = GGML_FP16_TO_FP32(dy[n_dims/2]);
  10592. dx[0] = GGML_FP32_TO_FP16( dy0*cos_theta + dy1*sin_theta);
  10593. dx[n_dims/2] = GGML_FP32_TO_FP16(-dy0*sin_theta + dy1*cos_theta);
  10594. }
  10595. }
  10596. }
  10597. }
  10598. }
  10599. }
  10600. }
  10601. static void ggml_compute_forward_rope_back(
  10602. const struct ggml_compute_params * params,
  10603. const struct ggml_tensor * src0,
  10604. struct ggml_tensor * dst) {
  10605. switch (src0->type) {
  10606. case GGML_TYPE_F16:
  10607. {
  10608. ggml_compute_forward_rope_back_f16(params, src0, dst);
  10609. } break;
  10610. case GGML_TYPE_F32:
  10611. {
  10612. ggml_compute_forward_rope_back_f32(params, src0, dst);
  10613. } break;
  10614. default:
  10615. {
  10616. GGML_ASSERT(false);
  10617. } break;
  10618. }
  10619. }
  10620. // ggml_compute_forward_conv_1d
  10621. static void ggml_compute_forward_conv_1d_s1_ph_f16_f32(
  10622. const struct ggml_compute_params * params,
  10623. const struct ggml_tensor * src0,
  10624. const struct ggml_tensor * src1,
  10625. struct ggml_tensor * dst) {
  10626. GGML_ASSERT(src0->type == GGML_TYPE_F16);
  10627. GGML_ASSERT(src1->type == GGML_TYPE_F32);
  10628. GGML_ASSERT( dst->type == GGML_TYPE_F32);
  10629. int64_t t0 = ggml_perf_time_us();
  10630. UNUSED(t0);
  10631. GGML_TENSOR_BINARY_OP_LOCALS;
  10632. const int ith = params->ith;
  10633. const int nth = params->nth;
  10634. const int nk = ne00;
  10635. const int nh = nk/2;
  10636. const int ew0 = ggml_up32(ne01);
  10637. GGML_ASSERT(ne00 % 2 == 1); // TODO: support even kernel sizes
  10638. GGML_ASSERT(nb00 == sizeof(ggml_fp16_t));
  10639. GGML_ASSERT(nb10 == sizeof(float));
  10640. if (params->type == GGML_TASK_INIT) {
  10641. // TODO: fix this memset (wsize is overestimated)
  10642. memset(params->wdata, 0, params->wsize);
  10643. // prepare kernel data (src0)
  10644. {
  10645. ggml_fp16_t * const wdata = (ggml_fp16_t *) params->wdata + 0;
  10646. for (int64_t i02 = 0; i02 < ne02; i02++) {
  10647. for (int64_t i01 = 0; i01 < ne01; i01++) {
  10648. const ggml_fp16_t * const src = (ggml_fp16_t *)((char *) src0->data + i02*nb02 + i01*nb01);
  10649. ggml_fp16_t * dst_data = wdata + i02*ew0*ne00;
  10650. for (int64_t i00 = 0; i00 < ne00; i00++) {
  10651. dst_data[i00*ew0 + i01] = src[i00];
  10652. }
  10653. }
  10654. }
  10655. }
  10656. // prepare source data (src1)
  10657. {
  10658. ggml_fp16_t * const wdata = (ggml_fp16_t *) params->wdata + ne02*ew0*ne00;
  10659. for (int64_t i11 = 0; i11 < ne11; i11++) {
  10660. const float * const src = (float *)((char *) src1->data + i11*nb11);
  10661. ggml_fp16_t * dst_data = wdata;
  10662. for (int64_t i10 = 0; i10 < ne10; i10++) {
  10663. dst_data[(i10 + nh)*ew0 + i11] = GGML_FP32_TO_FP16(src[i10]);
  10664. }
  10665. }
  10666. }
  10667. return;
  10668. }
  10669. if (params->type == GGML_TASK_FINALIZE) {
  10670. return;
  10671. }
  10672. // total rows in dst
  10673. const int nr = ne02;
  10674. // rows per thread
  10675. const int dr = (nr + nth - 1)/nth;
  10676. // row range for this thread
  10677. const int ir0 = dr*ith;
  10678. const int ir1 = MIN(ir0 + dr, nr);
  10679. for (int i1 = ir0; i1 < ir1; i1++) {
  10680. float * dst_data = (float *)((char *) dst->data + i1*nb1);
  10681. for (int64_t i0 = 0; i0 < ne10; ++i0) {
  10682. dst_data[i0] = 0;
  10683. for (int k = -nh; k <= nh; k++) {
  10684. float v = 0.0f;
  10685. ggml_vec_dot_f16(ew0, &v,
  10686. (ggml_fp16_t *) params->wdata + i1*ew0*ne00 + (nh + k)*ew0,
  10687. (ggml_fp16_t *) params->wdata + ne02*ew0*ne00 + (i0 + nh + k)*ew0);
  10688. dst_data[i0] += v;
  10689. }
  10690. }
  10691. }
  10692. }
  10693. static void ggml_compute_forward_conv_1d_s1_ph_f32(
  10694. const struct ggml_compute_params * params,
  10695. const struct ggml_tensor * src0,
  10696. const struct ggml_tensor * src1,
  10697. struct ggml_tensor * dst) {
  10698. GGML_ASSERT(src0->type == GGML_TYPE_F32);
  10699. GGML_ASSERT(src1->type == GGML_TYPE_F32);
  10700. GGML_ASSERT( dst->type == GGML_TYPE_F32);
  10701. int64_t t0 = ggml_perf_time_us();
  10702. UNUSED(t0);
  10703. GGML_TENSOR_BINARY_OP_LOCALS;
  10704. const int ith = params->ith;
  10705. const int nth = params->nth;
  10706. const int nk = ne00;
  10707. const int nh = nk/2;
  10708. const int ew0 = ggml_up32(ne01);
  10709. GGML_ASSERT(ne00 % 2 == 1); // TODO: support even kernel sizes
  10710. GGML_ASSERT(nb00 == sizeof(float));
  10711. GGML_ASSERT(nb10 == sizeof(float));
  10712. if (params->type == GGML_TASK_INIT) {
  10713. // TODO: fix this memset (wsize is overestimated)
  10714. memset(params->wdata, 0, params->wsize);
  10715. // prepare kernel data (src0)
  10716. {
  10717. float * const wdata = (float *) params->wdata + 0;
  10718. for (int64_t i02 = 0; i02 < ne02; i02++) {
  10719. for (int64_t i01 = 0; i01 < ne01; i01++) {
  10720. const float * const src = (float *)((char *) src0->data + i02*nb02 + i01*nb01);
  10721. float * dst_data = wdata + i02*ew0*ne00;
  10722. for (int64_t i00 = 0; i00 < ne00; i00++) {
  10723. dst_data[i00*ew0 + i01] = src[i00];
  10724. }
  10725. }
  10726. }
  10727. }
  10728. // prepare source data (src1)
  10729. {
  10730. float * const wdata = (float *) params->wdata + ne02*ew0*ne00;
  10731. for (int64_t i11 = 0; i11 < ne11; i11++) {
  10732. const float * const src = (float *)((char *) src1->data + i11*nb11);
  10733. float * dst_data = wdata;
  10734. for (int64_t i10 = 0; i10 < ne10; i10++) {
  10735. dst_data[(i10 + nh)*ew0 + i11] = src[i10];
  10736. }
  10737. }
  10738. }
  10739. return;
  10740. }
  10741. if (params->type == GGML_TASK_FINALIZE) {
  10742. return;
  10743. }
  10744. // total rows in dst
  10745. const int nr = ne02;
  10746. // rows per thread
  10747. const int dr = (nr + nth - 1)/nth;
  10748. // row range for this thread
  10749. const int ir0 = dr*ith;
  10750. const int ir1 = MIN(ir0 + dr, nr);
  10751. for (int i1 = ir0; i1 < ir1; i1++) {
  10752. float * dst_data = (float *)((char *) dst->data + i1*nb1);
  10753. for (int64_t i0 = 0; i0 < ne10; ++i0) {
  10754. dst_data[i0] = 0;
  10755. for (int k = -nh; k <= nh; k++) {
  10756. float v = 0.0f;
  10757. ggml_vec_dot_f32(ew0, &v,
  10758. (float *) params->wdata + i1*ew0*ne00 + (nh + k)*ew0,
  10759. (float *) params->wdata + ne02*ew0*ne00 + (i0 + nh + k)*ew0);
  10760. dst_data[i0] += v;
  10761. }
  10762. }
  10763. }
  10764. }
  10765. static void ggml_compute_forward_conv_1d_s1_ph(
  10766. const struct ggml_compute_params * params,
  10767. const struct ggml_tensor * src0,
  10768. const struct ggml_tensor * src1,
  10769. struct ggml_tensor * dst) {
  10770. switch (src0->type) {
  10771. case GGML_TYPE_F16:
  10772. {
  10773. ggml_compute_forward_conv_1d_s1_ph_f16_f32(params, src0, src1, dst);
  10774. } break;
  10775. case GGML_TYPE_F32:
  10776. {
  10777. ggml_compute_forward_conv_1d_s1_ph_f32(params, src0, src1, dst);
  10778. } break;
  10779. default:
  10780. {
  10781. GGML_ASSERT(false);
  10782. } break;
  10783. }
  10784. }
  10785. static void ggml_compute_forward_conv_1d_s2_ph_f16_f32(
  10786. const struct ggml_compute_params * params,
  10787. const struct ggml_tensor * src0,
  10788. const struct ggml_tensor * src1,
  10789. struct ggml_tensor * dst) {
  10790. GGML_ASSERT(src0->type == GGML_TYPE_F16);
  10791. GGML_ASSERT(src1->type == GGML_TYPE_F32);
  10792. GGML_ASSERT( dst->type == GGML_TYPE_F32);
  10793. int64_t t0 = ggml_perf_time_us();
  10794. UNUSED(t0);
  10795. GGML_TENSOR_BINARY_OP_LOCALS;
  10796. const int ith = params->ith;
  10797. const int nth = params->nth;
  10798. const int nk = ne00;
  10799. const int nh = nk/2;
  10800. const int ew0 = ggml_up32(ne01);
  10801. GGML_ASSERT(ne00 % 2 == 1); // TODO: support even kernel sizes
  10802. GGML_ASSERT(nb00 == sizeof(ggml_fp16_t));
  10803. GGML_ASSERT(nb10 == sizeof(float));
  10804. if (params->type == GGML_TASK_INIT) {
  10805. // TODO: fix this memset (wsize is overestimated)
  10806. memset(params->wdata, 0, params->wsize);
  10807. // prepare kernel data (src0)
  10808. {
  10809. ggml_fp16_t * const wdata = (ggml_fp16_t *) params->wdata + 0;
  10810. for (int64_t i02 = 0; i02 < ne02; i02++) {
  10811. for (int64_t i01 = 0; i01 < ne01; i01++) {
  10812. const ggml_fp16_t * const src = (ggml_fp16_t *)((char *) src0->data + i02*nb02 + i01*nb01);
  10813. ggml_fp16_t * dst_data = wdata + i02*ew0*ne00;
  10814. for (int64_t i00 = 0; i00 < ne00; i00++) {
  10815. dst_data[i00*ew0 + i01] = src[i00];
  10816. }
  10817. }
  10818. }
  10819. }
  10820. // prepare source data (src1)
  10821. {
  10822. ggml_fp16_t * const wdata = (ggml_fp16_t *) params->wdata + ne02*ew0*ne00;
  10823. for (int64_t i11 = 0; i11 < ne11; i11++) {
  10824. const float * const src = (float *)((char *) src1->data + i11*nb11);
  10825. ggml_fp16_t * dst_data = wdata;
  10826. for (int64_t i10 = 0; i10 < ne10; i10++) {
  10827. dst_data[(i10 + nh)*ew0 + i11] = GGML_FP32_TO_FP16(src[i10]);
  10828. }
  10829. }
  10830. }
  10831. return;
  10832. }
  10833. if (params->type == GGML_TASK_FINALIZE) {
  10834. return;
  10835. }
  10836. // total rows in dst
  10837. const int nr = ne02;
  10838. // rows per thread
  10839. const int dr = (nr + nth - 1)/nth;
  10840. // row range for this thread
  10841. const int ir0 = dr*ith;
  10842. const int ir1 = MIN(ir0 + dr, nr);
  10843. for (int i1 = ir0; i1 < ir1; i1++) {
  10844. float * dst_data = (float *)((char *) dst->data + i1*nb1);
  10845. for (int64_t i0 = 0; i0 < ne10; i0 += 2) {
  10846. dst_data[i0/2] = 0;
  10847. for (int k = -nh; k <= nh; k++) {
  10848. float v = 0.0f;
  10849. ggml_vec_dot_f16(ew0, &v,
  10850. (ggml_fp16_t *) params->wdata + i1*ew0*ne00 + (nh + k)*ew0,
  10851. (ggml_fp16_t *) params->wdata + ne02*ew0*ne00 + (i0 + nh + k)*ew0);
  10852. dst_data[i0/2] += v;
  10853. }
  10854. }
  10855. }
  10856. }
  10857. static void ggml_compute_forward_conv_1d_s2_ph_f32(
  10858. const struct ggml_compute_params * params,
  10859. const struct ggml_tensor * src0,
  10860. const struct ggml_tensor * src1,
  10861. struct ggml_tensor * dst) {
  10862. GGML_ASSERT(src0->type == GGML_TYPE_F32);
  10863. GGML_ASSERT(src1->type == GGML_TYPE_F32);
  10864. GGML_ASSERT( dst->type == GGML_TYPE_F32);
  10865. int64_t t0 = ggml_perf_time_us();
  10866. UNUSED(t0);
  10867. GGML_TENSOR_BINARY_OP_LOCALS;
  10868. const int ith = params->ith;
  10869. const int nth = params->nth;
  10870. const int nk = ne00;
  10871. const int nh = nk/2;
  10872. const int ew0 = ggml_up32(ne01);
  10873. GGML_ASSERT(ne00 % 2 == 1); // TODO: support even kernel sizes
  10874. GGML_ASSERT(nb00 == sizeof(float));
  10875. GGML_ASSERT(nb10 == sizeof(float));
  10876. if (params->type == GGML_TASK_INIT) {
  10877. // TODO: fix this memset (wsize is overestimated)
  10878. memset(params->wdata, 0, params->wsize);
  10879. // prepare kernel data (src0)
  10880. {
  10881. float * const wdata = (float *) params->wdata + 0;
  10882. for (int64_t i02 = 0; i02 < ne02; i02++) {
  10883. for (int64_t i01 = 0; i01 < ne01; i01++) {
  10884. const float * const src = (float *)((char *) src0->data + i02*nb02 + i01*nb01);
  10885. float * dst_data = wdata + i02*ew0*ne00;
  10886. for (int64_t i00 = 0; i00 < ne00; i00++) {
  10887. dst_data[i00*ew0 + i01] = src[i00];
  10888. }
  10889. }
  10890. }
  10891. }
  10892. // prepare source data (src1)
  10893. {
  10894. float * const wdata = (float *) params->wdata + ne02*ew0*ne00;
  10895. for (int64_t i11 = 0; i11 < ne11; i11++) {
  10896. const float * const src = (float *)((char *) src1->data + i11*nb11);
  10897. float * dst_data = wdata;
  10898. for (int64_t i10 = 0; i10 < ne10; i10++) {
  10899. dst_data[(i10 + nh)*ew0 + i11] = src[i10];
  10900. }
  10901. }
  10902. }
  10903. return;
  10904. }
  10905. if (params->type == GGML_TASK_FINALIZE) {
  10906. return;
  10907. }
  10908. // total rows in dst
  10909. const int nr = ne02;
  10910. // rows per thread
  10911. const int dr = (nr + nth - 1)/nth;
  10912. // row range for this thread
  10913. const int ir0 = dr*ith;
  10914. const int ir1 = MIN(ir0 + dr, nr);
  10915. for (int i1 = ir0; i1 < ir1; i1++) {
  10916. float * dst_data = (float *)((char *) dst->data + i1*nb1);
  10917. for (int64_t i0 = 0; i0 < ne10; i0 += 2) {
  10918. dst_data[i0/2] = 0;
  10919. for (int k = -nh; k <= nh; k++) {
  10920. float v = 0.0f;
  10921. ggml_vec_dot_f32(ew0, &v,
  10922. (float *) params->wdata + i1*ew0*ne00 + (nh + k)*ew0,
  10923. (float *) params->wdata + ne02*ew0*ne00 + (i0 + nh + k)*ew0);
  10924. dst_data[i0/2] += v;
  10925. }
  10926. }
  10927. }
  10928. }
  10929. static void ggml_compute_forward_conv_1d_s2_ph(
  10930. const struct ggml_compute_params * params,
  10931. const struct ggml_tensor * src0,
  10932. const struct ggml_tensor * src1,
  10933. struct ggml_tensor * dst) {
  10934. switch (src0->type) {
  10935. case GGML_TYPE_F16:
  10936. {
  10937. ggml_compute_forward_conv_1d_s2_ph_f16_f32(params, src0, src1, dst);
  10938. } break;
  10939. case GGML_TYPE_F32:
  10940. {
  10941. ggml_compute_forward_conv_1d_s2_ph_f32(params, src0, src1, dst);
  10942. } break;
  10943. default:
  10944. {
  10945. GGML_ASSERT(false);
  10946. } break;
  10947. }
  10948. }
  10949. // ggml_compute_forward_conv_1d
  10950. static void ggml_compute_forward_conv_1d(
  10951. const struct ggml_compute_params * params,
  10952. const struct ggml_tensor * src0,
  10953. const struct ggml_tensor * src1,
  10954. struct ggml_tensor * dst) {
  10955. const int32_t s0 = ((const int32_t*)(dst->op_params))[0];
  10956. const int32_t p0 = ((const int32_t*)(dst->op_params))[1];
  10957. const int32_t d0 = ((const int32_t*)(dst->op_params))[2];
  10958. GGML_ASSERT(d0 == 1); // dilation not supported
  10959. GGML_ASSERT(p0 == src0->ne[0]/2); // only half padding supported
  10960. if (s0 == 1) {
  10961. ggml_compute_forward_conv_1d_s1_ph(params, src0, src1, dst);
  10962. } else if (s0 == 2) {
  10963. ggml_compute_forward_conv_1d_s2_ph(params, src0, src1, dst);
  10964. } else {
  10965. GGML_ASSERT(false); // only stride 1 and 2 supported
  10966. };
  10967. }
  10968. // ggml_compute_forward_conv_2d
  10969. static void ggml_compute_forward_conv_2d_f16_f32(
  10970. const struct ggml_compute_params * params,
  10971. const struct ggml_tensor * src0,
  10972. const struct ggml_tensor * src1,
  10973. struct ggml_tensor * dst) {
  10974. GGML_ASSERT(src0->type == GGML_TYPE_F16);
  10975. GGML_ASSERT(src1->type == GGML_TYPE_F32);
  10976. GGML_ASSERT( dst->type == GGML_TYPE_F32);
  10977. int64_t t0 = ggml_perf_time_us();
  10978. UNUSED(t0);
  10979. GGML_TENSOR_BINARY_OP_LOCALS;
  10980. const int ith = params->ith;
  10981. const int nth = params->nth;
  10982. const int nk0 = ne00;
  10983. const int nk1 = ne01;
  10984. // size of the convolution row - the kernel size unrolled across all channels
  10985. const int ew0 = nk0*nk1*ne02;
  10986. const int32_t s0 = ((const int32_t*)(dst->op_params))[0];
  10987. const int32_t s1 = ((const int32_t*)(dst->op_params))[1];
  10988. const int32_t p0 = ((const int32_t*)(dst->op_params))[2];
  10989. const int32_t p1 = ((const int32_t*)(dst->op_params))[3];
  10990. const int32_t d0 = ((const int32_t*)(dst->op_params))[4];
  10991. const int32_t d1 = ((const int32_t*)(dst->op_params))[5];
  10992. GGML_ASSERT(nb00 == sizeof(ggml_fp16_t));
  10993. GGML_ASSERT(nb10 == sizeof(float));
  10994. if (params->type == GGML_TASK_INIT) {
  10995. memset(params->wdata, 0, params->wsize);
  10996. // prepare source data (src1)
  10997. {
  10998. ggml_fp16_t * const wdata = (ggml_fp16_t *) params->wdata + 0;
  10999. for (int i12 = 0; i12 < ne12; i12++) {
  11000. const float * const src = (float *)((char *) src1->data + i12*nb12);
  11001. ggml_fp16_t * dst_data = wdata;
  11002. for (int i1 = 0; i1 < ne1; i1++) {
  11003. for (int i0 = 0; i0 < ne0; i0++) {
  11004. for (int ik1 = 0; ik1 < nk1; ik1++) {
  11005. for (int ik0 = 0; ik0 < nk0; ik0++) {
  11006. const int idx0 = i0*s0 + ik0*d0 - p0;
  11007. const int idx1 = i1*s1 + ik1*d1 - p1;
  11008. if (!(idx1 < 0 || idx1 >= ne11 || idx0 < 0 || idx0 >= ne10)) {
  11009. dst_data[(i1*ne0 + i0)*ew0 + i12*(nk0*nk1) + ik1*nk0 + ik0] =
  11010. GGML_FP32_TO_FP16(src[idx1*ne10 + idx0]);
  11011. }
  11012. }
  11013. }
  11014. }
  11015. }
  11016. }
  11017. }
  11018. return;
  11019. }
  11020. if (params->type == GGML_TASK_FINALIZE) {
  11021. return;
  11022. }
  11023. // total patches in dst
  11024. const int np = ne2;
  11025. // patches per thread
  11026. const int dp = (np + nth - 1)/nth;
  11027. // patch range for this thread
  11028. const int ip0 = dp*ith;
  11029. const int ip1 = MIN(ip0 + dp, np);
  11030. ggml_fp16_t * const wdata = (ggml_fp16_t *) params->wdata + 0;
  11031. for (int i3 = 0; i3 < ne3; i3++) {
  11032. for (int i2 = ip0; i2 < ip1; i2++) {
  11033. float * dst_data = (float *)((char *) dst->data + i3*nb3 + i2*nb2);
  11034. for (int i1 = 0; i1 < ne1; ++i1) {
  11035. for (int i0 = 0; i0 < ne0; ++i0) {
  11036. ggml_vec_dot_f16(ew0, dst_data + i1*ne0 + i0,
  11037. (ggml_fp16_t *) ((char *) src0->data + i2*nb03),
  11038. (ggml_fp16_t *) wdata + i3*nb3 + (i1*ne0 + i0)*ew0);
  11039. }
  11040. }
  11041. }
  11042. }
  11043. }
  11044. static void ggml_compute_forward_conv_2d(
  11045. const struct ggml_compute_params * params,
  11046. const struct ggml_tensor * src0,
  11047. const struct ggml_tensor * src1,
  11048. struct ggml_tensor * dst) {
  11049. switch (src0->type) {
  11050. case GGML_TYPE_F16:
  11051. {
  11052. ggml_compute_forward_conv_2d_f16_f32(params, src0, src1, dst);
  11053. } break;
  11054. case GGML_TYPE_F32:
  11055. {
  11056. //ggml_compute_forward_conv_2d_f32(params, src0, src1, dst);
  11057. GGML_ASSERT(false);
  11058. } break;
  11059. default:
  11060. {
  11061. GGML_ASSERT(false);
  11062. } break;
  11063. }
  11064. }
  11065. // ggml_compute_forward_conv_transpose_2d
  11066. static void ggml_compute_forward_conv_transpose_2d(
  11067. const struct ggml_compute_params * params,
  11068. const struct ggml_tensor * src0,
  11069. const struct ggml_tensor * src1,
  11070. struct ggml_tensor * dst) {
  11071. GGML_ASSERT(src0->type == GGML_TYPE_F16);
  11072. GGML_ASSERT(src1->type == GGML_TYPE_F32);
  11073. GGML_ASSERT( dst->type == GGML_TYPE_F32);
  11074. int64_t t0 = ggml_perf_time_us();
  11075. UNUSED(t0);
  11076. GGML_TENSOR_BINARY_OP_LOCALS;
  11077. const int ith = params->ith;
  11078. const int nth = params->nth;
  11079. const int nk = ne00*ne01*ne02*ne03;
  11080. GGML_ASSERT(nb00 == sizeof(ggml_fp16_t));
  11081. GGML_ASSERT(nb10 == sizeof(float));
  11082. if (params->type == GGML_TASK_INIT) {
  11083. memset(params->wdata, 0, params->wsize);
  11084. // permute kernel data (src0) from (Kw x Kh x Cout x Cin) to (Cin x Kw x Kh x Cout)
  11085. {
  11086. ggml_fp16_t * const wdata = (ggml_fp16_t *) params->wdata + 0;
  11087. for (int64_t i03 = 0; i03 < ne03; i03++) {
  11088. for (int64_t i02 = 0; i02 < ne02; i02++) {
  11089. const ggml_fp16_t * const src = (ggml_fp16_t *)((char *) src0->data + i03*nb03 + i02*nb02);
  11090. ggml_fp16_t * dst_data = wdata + i02*ne01*ne00*ne03;
  11091. for (int64_t i01 = 0; i01 < ne01; i01++) {
  11092. for (int64_t i00 = 0; i00 < ne00; i00++) {
  11093. dst_data[i01*ne00*ne03 + i00*ne03 + i03] = src[i01 * ne00 + i00];
  11094. }
  11095. }
  11096. }
  11097. }
  11098. }
  11099. // permute source data (src1) from (Sw x Sh x Cin) to (Cin x Sw x Sh)
  11100. {
  11101. ggml_fp16_t * const wdata = (ggml_fp16_t *) params->wdata + nk;
  11102. for (int i12 = 0; i12 < ne12; i12++) {
  11103. for (int i11 = 0; i11 < ne11; i11++) {
  11104. const float * const src = (float *)((char *) src1->data + i12*nb12 + i11*nb11);
  11105. ggml_fp16_t * dst_data = wdata + i11*ne10*ne12;
  11106. for (int i10 = 0; i10 < ne10; i10++) {
  11107. dst_data[i10*ne12 + i12] = GGML_FP32_TO_FP16(src[i10]);
  11108. }
  11109. }
  11110. }
  11111. }
  11112. return;
  11113. }
  11114. if (params->type == GGML_TASK_FINALIZE) {
  11115. return;
  11116. }
  11117. const int32_t stride = ggml_get_op_params_i32(dst, 0);
  11118. // total patches in dst
  11119. const int np = ne2;
  11120. // patches per thread
  11121. const int dp = (np + nth - 1)/nth;
  11122. // patch range for this thread
  11123. const int ip0 = dp*ith;
  11124. const int ip1 = MIN(ip0 + dp, np);
  11125. ggml_fp16_t * const wdata = (ggml_fp16_t *) params->wdata + 0;
  11126. ggml_fp16_t * const wdata_src = wdata + nk;
  11127. for (int i2 = ip0; i2 < ip1; i2++) { // Cout
  11128. float * dst_data = (float *)((char *) dst->data + i2*nb2);
  11129. ggml_fp16_t * wdata_kernel = wdata + i2*ne01*ne00*ne03;
  11130. for (int i11 = 0; i11 < ne11; i11++) {
  11131. for (int i10 = 0; i10 < ne10; i10++) {
  11132. const int i1n = i11*ne10*ne12 + i10*ne12;
  11133. for (int i01 = 0; i01 < ne01; i01++) {
  11134. for (int i00 = 0; i00 < ne00; i00++) {
  11135. float v = 0;
  11136. ggml_vec_dot_f16(ne03, &v,
  11137. wdata_src + i1n,
  11138. wdata_kernel + i01*ne00*ne03 + i00*ne03);
  11139. dst_data[(i11*stride + i01)*ne0 + i10*stride + i00] += v;
  11140. }
  11141. }
  11142. }
  11143. }
  11144. }
  11145. }
  11146. // ggml_compute_forward_pool_1d_sk_p0
  11147. static void ggml_compute_forward_pool_1d_sk_p0(
  11148. const struct ggml_compute_params * params,
  11149. const enum ggml_op_pool op,
  11150. const struct ggml_tensor * src,
  11151. const int k,
  11152. struct ggml_tensor * dst) {
  11153. assert(src->type == GGML_TYPE_F32);
  11154. assert(params->ith == 0);
  11155. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  11156. return;
  11157. }
  11158. const char * cdata = (const char *)src->data;
  11159. const char * const data_end = cdata + ggml_nbytes(src);
  11160. float * drow = (float *)dst->data;
  11161. const int64_t rs = dst->ne[0];
  11162. while (cdata < data_end) {
  11163. const float * const srow = (const float *)cdata;
  11164. int j = 0;
  11165. for (int64_t i = 0; i < rs; ++i) {
  11166. switch (op) {
  11167. case GGML_OP_POOL_AVG: drow[i] = 0; break;
  11168. case GGML_OP_POOL_MAX: drow[i] = -FLT_MAX; break;
  11169. case GGML_OP_POOL_COUNT: GGML_ASSERT(false); break;
  11170. }
  11171. for (int ki = 0; ki < k; ++ki) {
  11172. switch (op) {
  11173. case GGML_OP_POOL_AVG: drow[i] += srow[j]; break;
  11174. case GGML_OP_POOL_MAX: if (srow[j] > drow[i]) drow[i] = srow[j]; break;
  11175. case GGML_OP_POOL_COUNT: GGML_ASSERT(false); break;
  11176. }
  11177. ++j;
  11178. }
  11179. switch (op) {
  11180. case GGML_OP_POOL_AVG: drow[i] /= k; break;
  11181. case GGML_OP_POOL_MAX: break;
  11182. case GGML_OP_POOL_COUNT: GGML_ASSERT(false); break;
  11183. }
  11184. }
  11185. cdata += src->nb[1];
  11186. drow += rs;
  11187. }
  11188. }
  11189. // ggml_compute_forward_pool_1d
  11190. static void ggml_compute_forward_pool_1d(
  11191. const struct ggml_compute_params * params,
  11192. const struct ggml_tensor * src0,
  11193. struct ggml_tensor * dst) {
  11194. const int32_t * opts = (const int32_t *)dst->op_params;
  11195. enum ggml_op_pool op = opts[0];
  11196. const int k0 = opts[1];
  11197. const int s0 = opts[2];
  11198. const int p0 = opts[3];
  11199. GGML_ASSERT(p0 == 0); // padding not supported
  11200. GGML_ASSERT(k0 == s0); // only s = k supported
  11201. ggml_compute_forward_pool_1d_sk_p0(params, op, src0, k0, dst);
  11202. }
  11203. // ggml_compute_forward_pool_2d_sk_p0
  11204. static void ggml_compute_forward_pool_2d_sk_p0(
  11205. const struct ggml_compute_params * params,
  11206. const enum ggml_op_pool op,
  11207. const struct ggml_tensor * src,
  11208. const int k0,
  11209. const int k1,
  11210. struct ggml_tensor * dst) {
  11211. assert(src->type == GGML_TYPE_F32);
  11212. assert(params->ith == 0);
  11213. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  11214. return;
  11215. }
  11216. const char * cdata = (const char*)src->data;
  11217. const char * const data_end = cdata + ggml_nbytes(src);
  11218. const int64_t px = dst->ne[0];
  11219. const int64_t py = dst->ne[1];
  11220. const int64_t pa = px * py;
  11221. float * dplane = (float *)dst->data;
  11222. const int ka = k0 * k1;
  11223. while (cdata < data_end) {
  11224. for (int oy = 0; oy < py; ++oy) {
  11225. float * const drow = dplane + oy * px;
  11226. for (int ox = 0; ox < px; ++ox) {
  11227. float * const out = drow + ox;
  11228. switch (op) {
  11229. case GGML_OP_POOL_AVG: *out = 0; break;
  11230. case GGML_OP_POOL_MAX: *out = -FLT_MAX; break;
  11231. case GGML_OP_POOL_COUNT: GGML_ASSERT(false); break;
  11232. }
  11233. const int ix = ox * k0;
  11234. const int iy = oy * k1;
  11235. for (int ky = 0; ky < k1; ++ky) {
  11236. const float * const srow = (const float *)(cdata + src->nb[1] * (iy + ky));
  11237. for (int kx = 0; kx < k0; ++kx) {
  11238. int j = ix + kx;
  11239. switch (op) {
  11240. case GGML_OP_POOL_AVG: *out += srow[j]; break;
  11241. case GGML_OP_POOL_MAX: if (srow[j] > *out) *out = srow[j]; break;
  11242. case GGML_OP_POOL_COUNT: GGML_ASSERT(false); break;
  11243. }
  11244. }
  11245. }
  11246. switch (op) {
  11247. case GGML_OP_POOL_AVG: *out /= ka; break;
  11248. case GGML_OP_POOL_MAX: break;
  11249. case GGML_OP_POOL_COUNT: GGML_ASSERT(false); break;
  11250. }
  11251. }
  11252. }
  11253. cdata += src->nb[2];
  11254. dplane += pa;
  11255. }
  11256. }
  11257. // ggml_compute_forward_pool_2d
  11258. static void ggml_compute_forward_pool_2d(
  11259. const struct ggml_compute_params * params,
  11260. const struct ggml_tensor * src0,
  11261. struct ggml_tensor * dst) {
  11262. const int32_t * opts = (const int32_t *)dst->op_params;
  11263. enum ggml_op_pool op = opts[0];
  11264. const int k0 = opts[1];
  11265. const int k1 = opts[2];
  11266. const int s0 = opts[3];
  11267. const int s1 = opts[4];
  11268. const int p0 = opts[5];
  11269. const int p1 = opts[6];
  11270. GGML_ASSERT(p0 == 0);
  11271. GGML_ASSERT(p1 == 0); // padding not supported
  11272. GGML_ASSERT(k0 == s0);
  11273. GGML_ASSERT(k1 == s1); // only s = k supported
  11274. ggml_compute_forward_pool_2d_sk_p0(params, op, src0, k0, k1, dst);
  11275. }
  11276. // ggml_compute_forward_upscale
  11277. static void ggml_compute_forward_upscale_f32(
  11278. const struct ggml_compute_params * params,
  11279. const struct ggml_tensor * src0,
  11280. struct ggml_tensor * dst) {
  11281. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  11282. return;
  11283. }
  11284. GGML_ASSERT(src0->nb[0] == sizeof(float));
  11285. const int ith = params->ith;
  11286. GGML_TENSOR_UNARY_OP_LOCALS;
  11287. const int scale_factor = dst->op_params[0];
  11288. // TODO: optimize
  11289. for (int i03 = 0; i03 < ne03; i03++) {
  11290. for (int i02 = ith; i02 < ne02; i02++) {
  11291. for (int m = 0; m < dst->ne[1]; m++) {
  11292. int i01 = m / scale_factor;
  11293. for (int n = 0; n < dst->ne[0]; n++) {
  11294. int i00 = n / scale_factor;
  11295. const float * x = (float *)((char *) src0->data + i00 * nb00 +i01 * nb01 + i02 * nb02 + i03 * nb03);
  11296. float * y = (float *)((char *) dst->data + n * dst->nb[0] + m * dst->nb[1] + i02 * dst->nb[2] + i03 * dst->nb[3]);
  11297. *y = *x;
  11298. }
  11299. }
  11300. }
  11301. }
  11302. }
  11303. static void ggml_compute_forward_upscale(
  11304. const struct ggml_compute_params * params,
  11305. const struct ggml_tensor * src0,
  11306. struct ggml_tensor * dst) {
  11307. switch (src0->type) {
  11308. case GGML_TYPE_F32:
  11309. {
  11310. ggml_compute_forward_upscale_f32(params, src0, dst);
  11311. } break;
  11312. default:
  11313. {
  11314. GGML_ASSERT(false);
  11315. } break;
  11316. }
  11317. }
  11318. // ggml_compute_forward_flash_attn
  11319. static void ggml_compute_forward_flash_attn_f32(
  11320. const struct ggml_compute_params * params,
  11321. const struct ggml_tensor * q,
  11322. const struct ggml_tensor * k,
  11323. const struct ggml_tensor * v,
  11324. const bool masked,
  11325. struct ggml_tensor * dst) {
  11326. int64_t t0 = ggml_perf_time_us();
  11327. UNUSED(t0);
  11328. GGML_TENSOR_LOCALS(int64_t, neq, q, ne);
  11329. GGML_TENSOR_LOCALS(size_t, nbq, q, nb);
  11330. GGML_TENSOR_LOCALS(int64_t, nek, k, ne);
  11331. GGML_TENSOR_LOCALS(size_t, nbk, k, nb);
  11332. GGML_TENSOR_LOCALS(int64_t, nev, v, ne);
  11333. GGML_TENSOR_LOCALS(size_t, nbv, v, nb);
  11334. GGML_TENSOR_LOCALS(int64_t, ne, dst, ne);
  11335. GGML_TENSOR_LOCALS(size_t, nb, dst, nb);
  11336. const int ith = params->ith;
  11337. const int nth = params->nth;
  11338. const int64_t D = neq0;
  11339. const int64_t N = neq1;
  11340. const int64_t P = nek1 - N;
  11341. const int64_t M = P + N;
  11342. const int Mup = ggml_up(M, GGML_SOFT_MAX_UNROLL);
  11343. GGML_ASSERT(ne0 == D);
  11344. GGML_ASSERT(ne1 == N);
  11345. GGML_ASSERT(P >= 0);
  11346. GGML_ASSERT(nbq0 == sizeof(float));
  11347. GGML_ASSERT(nbk0 == sizeof(float));
  11348. GGML_ASSERT(nbv0 == sizeof(float));
  11349. GGML_ASSERT(neq0 == D);
  11350. GGML_ASSERT(nek0 == D);
  11351. GGML_ASSERT(nev1 == D);
  11352. GGML_ASSERT(neq1 == N);
  11353. GGML_ASSERT(nek1 == N + P);
  11354. GGML_ASSERT(nev1 == D);
  11355. // dst cannot be transposed or permuted
  11356. GGML_ASSERT(nb0 == sizeof(float));
  11357. GGML_ASSERT(nb0 <= nb1);
  11358. GGML_ASSERT(nb1 <= nb2);
  11359. GGML_ASSERT(nb2 <= nb3);
  11360. if (params->type == GGML_TASK_INIT) {
  11361. return;
  11362. }
  11363. if (params->type == GGML_TASK_FINALIZE) {
  11364. return;
  11365. }
  11366. // parallelize by q rows using ggml_vec_dot_f32
  11367. // total rows in q
  11368. const int nr = neq1*neq2*neq3;
  11369. // rows per thread
  11370. const int dr = (nr + nth - 1)/nth;
  11371. // row range for this thread
  11372. const int ir0 = dr*ith;
  11373. const int ir1 = MIN(ir0 + dr, nr);
  11374. const float scale = 1.0f/sqrtf(D);
  11375. //printf("P=%d N=%d D=%d ir0=%d ir1=%d scale = %f\n", P, N, D, ir0, ir1, scale);
  11376. for (int ir = ir0; ir < ir1; ++ir) {
  11377. // q indices
  11378. const int iq3 = ir/(neq2*neq1);
  11379. const int iq2 = (ir - iq3*neq2*neq1)/neq1;
  11380. const int iq1 = (ir - iq3*neq2*neq1 - iq2*neq1);
  11381. float * S = (float *) params->wdata + ith*(Mup + CACHE_LINE_SIZE_F32);
  11382. for (int i = M; i < Mup; ++i) {
  11383. S[i] = -INFINITY;
  11384. }
  11385. for (int64_t ic = 0; ic < nek1; ++ic) {
  11386. // k indices
  11387. const int ik3 = iq3;
  11388. const int ik2 = iq2;
  11389. const int ik1 = ic;
  11390. // S indices
  11391. const int i1 = ik1;
  11392. ggml_vec_dot_f32(neq0,
  11393. S + i1,
  11394. (float *) ((char *) k->data + (ik1*nbk1 + ik2*nbk2 + ik3*nbk3)),
  11395. (float *) ((char *) q->data + (iq1*nbq1 + iq2*nbq2 + iq3*nbq3)));
  11396. }
  11397. // scale
  11398. ggml_vec_scale_f32(nek1, S, scale);
  11399. if (masked) {
  11400. for (int64_t i = P; i < M; i++) {
  11401. if (i > P + iq1) {
  11402. S[i] = -INFINITY;
  11403. }
  11404. }
  11405. }
  11406. // softmax
  11407. {
  11408. float max = -INFINITY;
  11409. ggml_vec_max_f32(M, &max, S);
  11410. ggml_float sum = 0.0;
  11411. {
  11412. #ifdef GGML_SOFT_MAX_ACCELERATE
  11413. max = -max;
  11414. vDSP_vsadd(S, 1, &max, S, 1, Mup);
  11415. vvexpf(S, S, &Mup);
  11416. ggml_vec_sum_f32(Mup, &sum, S);
  11417. #else
  11418. uint16_t scvt[GGML_SOFT_MAX_UNROLL]; UNUSED(scvt);
  11419. ggml_float sump[GGML_SOFT_MAX_UNROLL] = { 0.0 };
  11420. for (int i = 0; i < Mup; i += GGML_SOFT_MAX_UNROLL) {
  11421. float * SS = S + i;
  11422. for (int j = 0; j < GGML_SOFT_MAX_UNROLL; ++j) {
  11423. if (SS[j] == -INFINITY) {
  11424. SS[j] = 0.0f;
  11425. } else {
  11426. #ifndef GGML_FLASH_ATTN_EXP_FP16
  11427. const float val = expf(SS[j] - max);
  11428. #else
  11429. ggml_fp16_t s = GGML_FP32_TO_FP16(SS[j] - max);
  11430. memcpy(&scvt[j], &s, sizeof(uint16_t));
  11431. const float val = GGML_FP16_TO_FP32(table_exp_f16[scvt[j]]);
  11432. #endif
  11433. sump[j] += (ggml_float)val;
  11434. SS[j] = val;
  11435. }
  11436. }
  11437. }
  11438. for (int i = 0; i < GGML_SOFT_MAX_UNROLL; i++) {
  11439. sum += sump[i];
  11440. }
  11441. #endif
  11442. }
  11443. assert(sum > 0.0);
  11444. sum = 1.0/sum;
  11445. ggml_vec_scale_f32(M, S, sum);
  11446. #ifndef NDEBUG
  11447. for (int i = 0; i < M; ++i) {
  11448. assert(!isnan(S[i]));
  11449. assert(!isinf(S[i]));
  11450. }
  11451. #endif
  11452. }
  11453. for (int64_t ic = 0; ic < nev1; ++ic) {
  11454. // dst indices
  11455. const int i1 = iq1;
  11456. const int i2 = iq2;
  11457. const int i3 = iq3;
  11458. ggml_vec_dot_f32(nek1,
  11459. (float *) ((char *) dst->data + (ic*nb0 + i1*nb1 + i2*nb2 + i3*nb3)),
  11460. (float *) ((char *) v->data + ( ic*nbv1 + i2*nbv2 + i3*nbv3)),
  11461. S);
  11462. }
  11463. }
  11464. }
  11465. static void ggml_compute_forward_flash_attn_f16(
  11466. const struct ggml_compute_params * params,
  11467. const struct ggml_tensor * q,
  11468. const struct ggml_tensor * k,
  11469. const struct ggml_tensor * v,
  11470. const bool masked,
  11471. struct ggml_tensor * dst) {
  11472. int64_t t0 = ggml_perf_time_us();
  11473. UNUSED(t0);
  11474. GGML_TENSOR_LOCALS(int64_t, neq, q, ne);
  11475. GGML_TENSOR_LOCALS(size_t, nbq, q, nb);
  11476. GGML_TENSOR_LOCALS(int64_t, nek, k, ne);
  11477. GGML_TENSOR_LOCALS(size_t, nbk, k, nb);
  11478. GGML_TENSOR_LOCALS(int64_t, nev, v, ne);
  11479. GGML_TENSOR_LOCALS(size_t, nbv, v, nb);
  11480. GGML_TENSOR_LOCALS(int64_t, ne, dst, ne);
  11481. GGML_TENSOR_LOCALS(size_t, nb, dst, nb);
  11482. const int ith = params->ith;
  11483. const int nth = params->nth;
  11484. const int64_t D = neq0;
  11485. const int64_t N = neq1;
  11486. const int64_t P = nek1 - N;
  11487. const int64_t M = P + N;
  11488. const int Mup = ggml_up(M, GGML_SOFT_MAX_UNROLL);
  11489. GGML_ASSERT(ne0 == D);
  11490. GGML_ASSERT(ne1 == N);
  11491. GGML_ASSERT(P >= 0);
  11492. GGML_ASSERT(nbq0 == sizeof(ggml_fp16_t));
  11493. GGML_ASSERT(nbk0 == sizeof(ggml_fp16_t));
  11494. GGML_ASSERT(nbv0 == sizeof(ggml_fp16_t));
  11495. GGML_ASSERT(neq0 == D);
  11496. GGML_ASSERT(nek0 == D);
  11497. GGML_ASSERT(nev1 == D);
  11498. GGML_ASSERT(neq1 == N);
  11499. GGML_ASSERT(nek1 == N + P);
  11500. GGML_ASSERT(nev1 == D);
  11501. // dst cannot be transposed or permuted
  11502. GGML_ASSERT(nb0 == sizeof(float));
  11503. GGML_ASSERT(nb0 <= nb1);
  11504. GGML_ASSERT(nb1 <= nb2);
  11505. GGML_ASSERT(nb2 <= nb3);
  11506. if (params->type == GGML_TASK_INIT) {
  11507. return;
  11508. }
  11509. if (params->type == GGML_TASK_FINALIZE) {
  11510. return;
  11511. }
  11512. // parallelize by q rows using ggml_vec_dot_f32
  11513. // total rows in q
  11514. const int nr = neq1*neq2*neq3;
  11515. // rows per thread
  11516. const int dr = (nr + nth - 1)/nth;
  11517. // row range for this thread
  11518. const int ir0 = dr*ith;
  11519. const int ir1 = MIN(ir0 + dr, nr);
  11520. const float scale = 1.0f/sqrtf(D);
  11521. //printf("P=%d N=%d D=%d ir0=%d ir1=%d scale = %f\n", P, N, D, ir0, ir1, scale);
  11522. for (int ir = ir0; ir < ir1; ++ir) {
  11523. // q indices
  11524. const int iq3 = ir/(neq2*neq1);
  11525. const int iq2 = (ir - iq3*neq2*neq1)/neq1;
  11526. const int iq1 = (ir - iq3*neq2*neq1 - iq2*neq1);
  11527. float * S = (float *) params->wdata + ith*(2*Mup + CACHE_LINE_SIZE_F32);
  11528. for (int i = M; i < Mup; ++i) {
  11529. S[i] = -INFINITY;
  11530. }
  11531. if (GGML_VEC_DOT_UNROLL > 2 || nek1 % GGML_VEC_DOT_UNROLL != 0) {
  11532. for (int64_t ic = 0; ic < nek1; ++ic) {
  11533. // k indices
  11534. const int ik3 = iq3;
  11535. const int ik2 = iq2;
  11536. const int ik1 = ic;
  11537. // S indices
  11538. const int i1 = ik1;
  11539. ggml_vec_dot_f16(neq0,
  11540. S + i1,
  11541. (ggml_fp16_t *) ((char *) k->data + (ik1*nbk1 + ik2*nbk2 + ik3*nbk3)),
  11542. (ggml_fp16_t *) ((char *) q->data + (iq1*nbq1 + iq2*nbq2 + iq3*nbq3)));
  11543. }
  11544. } else {
  11545. for (int64_t ic = 0; ic < nek1; ic += GGML_VEC_DOT_UNROLL) {
  11546. // k indices
  11547. const int ik3 = iq3;
  11548. const int ik2 = iq2;
  11549. const int ik1 = ic;
  11550. // S indices
  11551. const int i1 = ik1;
  11552. ggml_vec_dot_f16_unroll(neq0, nbk1,
  11553. S + i1,
  11554. ((char *) k->data + (ik1*nbk1 + ik2*nbk2 + ik3*nbk3)),
  11555. (ggml_fp16_t *) ((char *) q->data + (iq1*nbq1 + iq2*nbq2 + iq3*nbq3)));
  11556. }
  11557. }
  11558. // scale
  11559. ggml_vec_scale_f32(nek1, S, scale);
  11560. if (masked) {
  11561. for (int64_t i = P; i < M; i++) {
  11562. if (i > P + iq1) {
  11563. S[i] = -INFINITY;
  11564. }
  11565. }
  11566. }
  11567. // softmax
  11568. {
  11569. float max = -INFINITY;
  11570. ggml_vec_max_f32(M, &max, S);
  11571. ggml_float sum = 0.0;
  11572. {
  11573. #ifdef GGML_SOFT_MAX_ACCELERATE
  11574. max = -max;
  11575. vDSP_vsadd(S, 1, &max, S, 1, Mup);
  11576. vvexpf(S, S, &Mup);
  11577. ggml_vec_sum_f32(Mup, &sum, S);
  11578. #else
  11579. uint16_t scvt[GGML_SOFT_MAX_UNROLL];
  11580. ggml_float sump[GGML_SOFT_MAX_UNROLL] = { 0.0 };
  11581. for (int i = 0; i < Mup; i += GGML_SOFT_MAX_UNROLL) {
  11582. float * SS = S + i;
  11583. for (int j = 0; j < GGML_SOFT_MAX_UNROLL; ++j) {
  11584. if (SS[j] == -INFINITY) {
  11585. SS[j] = 0.0f;
  11586. } else {
  11587. ggml_fp16_t s = GGML_FP32_TO_FP16(SS[j] - max);
  11588. memcpy(&scvt[j], &s, sizeof(uint16_t));
  11589. const float val = GGML_FP16_TO_FP32(table_exp_f16[scvt[j]]);
  11590. sump[j] += (ggml_float)val;
  11591. SS[j] = val;
  11592. }
  11593. }
  11594. }
  11595. for (int i = 0; i < GGML_SOFT_MAX_UNROLL; i++) {
  11596. sum += sump[i];
  11597. }
  11598. #endif
  11599. }
  11600. assert(sum > 0.0);
  11601. sum = 1.0/sum;
  11602. ggml_vec_scale_f32(M, S, sum);
  11603. #ifndef NDEBUG
  11604. for (int i = 0; i < M; ++i) {
  11605. assert(!isnan(S[i]));
  11606. assert(!isinf(S[i]));
  11607. }
  11608. #endif
  11609. }
  11610. ggml_fp16_t * S16 = (ggml_fp16_t *) ((float *) params->wdata + ith*(2*Mup + CACHE_LINE_SIZE_F32) + Mup);
  11611. for (int64_t i = 0; i < M; i++) {
  11612. S16[i] = GGML_FP32_TO_FP16(S[i]);
  11613. }
  11614. if (GGML_VEC_DOT_UNROLL == 1 || (nev1 % GGML_VEC_DOT_UNROLL != 0)) {
  11615. for (int64_t ic = 0; ic < nev1; ++ic) {
  11616. // dst indices
  11617. const int i1 = iq1;
  11618. const int i2 = iq2;
  11619. const int i3 = iq3;
  11620. ggml_vec_dot_f16(nek1,
  11621. (float *) ((char *) dst->data + (ic*nb0 + i1*nb1 + i2*nb2 + i3*nb3)),
  11622. (ggml_fp16_t *) ((char *) v->data + ( ic*nbv1 + i2*nbv2 + i3*nbv3)),
  11623. S16);
  11624. }
  11625. } else {
  11626. for (int64_t ic = 0; ic < nev1; ic += GGML_VEC_DOT_UNROLL) {
  11627. // dst indices
  11628. const int i1 = iq1;
  11629. const int i2 = iq2;
  11630. const int i3 = iq3;
  11631. ggml_vec_dot_f16_unroll(nek1, nbv1,
  11632. (float *) ((char *) dst->data + (ic*nb0 + i1*nb1 + i2*nb2 + i3*nb3)),
  11633. ((char *) v->data + ( ic*nbv1 + i2*nbv2 + i3*nbv3)),
  11634. S16);
  11635. }
  11636. }
  11637. }
  11638. }
  11639. static void ggml_compute_forward_flash_attn(
  11640. const struct ggml_compute_params * params,
  11641. const struct ggml_tensor * q,
  11642. const struct ggml_tensor * k,
  11643. const struct ggml_tensor * v,
  11644. const bool masked,
  11645. struct ggml_tensor * dst) {
  11646. switch (q->type) {
  11647. case GGML_TYPE_F16:
  11648. {
  11649. ggml_compute_forward_flash_attn_f16(params, q, k, v, masked, dst);
  11650. } break;
  11651. case GGML_TYPE_F32:
  11652. {
  11653. ggml_compute_forward_flash_attn_f32(params, q, k, v, masked, dst);
  11654. } break;
  11655. default:
  11656. {
  11657. GGML_ASSERT(false);
  11658. } break;
  11659. }
  11660. }
  11661. // ggml_compute_forward_flash_ff
  11662. static void ggml_compute_forward_flash_ff_f16(
  11663. const struct ggml_compute_params * params,
  11664. const struct ggml_tensor * a, // F16
  11665. const struct ggml_tensor * b0, // F16 fc_w
  11666. const struct ggml_tensor * b1, // F32 fc_b
  11667. const struct ggml_tensor * c0, // F16 proj_w
  11668. const struct ggml_tensor * c1, // F32 proj_b
  11669. struct ggml_tensor * dst) {
  11670. int64_t t0 = ggml_perf_time_us();
  11671. UNUSED(t0);
  11672. GGML_TENSOR_LOCALS(int64_t, nea, a, ne);
  11673. GGML_TENSOR_LOCALS(size_t, nba, a, nb);
  11674. GGML_TENSOR_LOCALS(int64_t, neb0, b0, ne);
  11675. GGML_TENSOR_LOCALS(size_t, nbb0, b0, nb);
  11676. GGML_TENSOR_LOCALS(int64_t, neb1, b1, ne);
  11677. GGML_TENSOR_LOCALS(size_t, nbb1, b1, nb);
  11678. GGML_TENSOR_LOCALS(int64_t, nec0, c0, ne);
  11679. GGML_TENSOR_LOCALS(size_t, nbc0, c0, nb);
  11680. GGML_TENSOR_LOCALS(int64_t, nec1, c1, ne);
  11681. GGML_TENSOR_LOCALS(size_t, nbc1, c1, nb);
  11682. GGML_TENSOR_LOCALS(int64_t, ne, dst, ne);
  11683. GGML_TENSOR_LOCALS(size_t, nb, dst, nb);
  11684. const int ith = params->ith;
  11685. const int nth = params->nth;
  11686. const int64_t D = nea0;
  11687. //const int64_t N = nea1;
  11688. const int64_t M = neb01;
  11689. GGML_ASSERT(ne0 == nea0);
  11690. GGML_ASSERT(ne1 == nea1);
  11691. GGML_ASSERT(ne2 == nea2);
  11692. GGML_ASSERT(nba0 == sizeof(ggml_fp16_t));
  11693. GGML_ASSERT(nbb00 == sizeof(ggml_fp16_t));
  11694. GGML_ASSERT(nbb10 == sizeof(float));
  11695. GGML_ASSERT(nbc00 == sizeof(ggml_fp16_t));
  11696. GGML_ASSERT(nbc10 == sizeof(float));
  11697. GGML_ASSERT(neb00 == D);
  11698. GGML_ASSERT(neb01 == M);
  11699. GGML_ASSERT(neb10 == M);
  11700. GGML_ASSERT(neb11 == 1);
  11701. GGML_ASSERT(nec00 == M);
  11702. GGML_ASSERT(nec01 == D);
  11703. GGML_ASSERT(nec10 == D);
  11704. GGML_ASSERT(nec11 == 1);
  11705. // dst cannot be transposed or permuted
  11706. GGML_ASSERT(nb0 == sizeof(float));
  11707. GGML_ASSERT(nb0 <= nb1);
  11708. GGML_ASSERT(nb1 <= nb2);
  11709. GGML_ASSERT(nb2 <= nb3);
  11710. if (params->type == GGML_TASK_INIT) {
  11711. return;
  11712. }
  11713. if (params->type == GGML_TASK_FINALIZE) {
  11714. return;
  11715. }
  11716. // parallelize by a rows using ggml_vec_dot_f32
  11717. // total rows in a
  11718. const int nr = nea1*nea2*nea3;
  11719. // rows per thread
  11720. const int dr = (nr + nth - 1)/nth;
  11721. // row range for this thread
  11722. const int ir0 = dr*ith;
  11723. const int ir1 = MIN(ir0 + dr, nr);
  11724. for (int ir = ir0; ir < ir1; ++ir) {
  11725. // a indices
  11726. const int ia3 = ir/(nea2*nea1);
  11727. const int ia2 = (ir - ia3*nea2*nea1)/nea1;
  11728. const int ia1 = (ir - ia3*nea2*nea1 - ia2*nea1);
  11729. float * S = (float *) params->wdata + ith*(2*M + CACHE_LINE_SIZE_F32);
  11730. for (int64_t ic = 0; ic < neb01; ++ic) {
  11731. // b0 indices
  11732. const int ib03 = ia3;
  11733. const int ib02 = ia2;
  11734. const int ib01 = ic;
  11735. // S indices
  11736. const int i1 = ib01;
  11737. ggml_vec_dot_f16(nea0,
  11738. S + i1,
  11739. (ggml_fp16_t *) ((char *) b0->data + (ib01*nbb01 + ib02*nbb02 + ib03*nbb03)),
  11740. (ggml_fp16_t *) ((char *) a->data + ( ia1*nba1 + ia2*nba2 + ia3*nba3)));
  11741. }
  11742. ggml_vec_add_f32(neb01, S, S, (float *) b1->data);
  11743. //ggml_vec_gelu_f32(neb01, S, S);
  11744. ggml_fp16_t * S16 = (ggml_fp16_t *) ((float *) params->wdata + ith*(2*M + CACHE_LINE_SIZE_F32) + M);
  11745. for (int64_t i = 0; i < M; i++) {
  11746. S16[i] = GGML_FP32_TO_FP16(S[i]);
  11747. }
  11748. ggml_vec_gelu_f16(neb01, S16, S16);
  11749. {
  11750. // dst indices
  11751. const int i1 = ia1;
  11752. const int i2 = ia2;
  11753. const int i3 = ia3;
  11754. for (int64_t ic = 0; ic < nec01; ++ic) {
  11755. ggml_vec_dot_f16(neb01,
  11756. (float *) ((char *) dst->data + (ic*nb0 + i1*nb1 + i2*nb2 + i3*nb3)),
  11757. (ggml_fp16_t *) ((char *) c0->data + ( ic*nbc01 + i2*nbc02 + i3*nbc03)),
  11758. S16);
  11759. }
  11760. ggml_vec_add_f32(nec01,
  11761. (float *) ((char *) dst->data + (i1*nb1 + i2*nb2 + i3*nb3)),
  11762. (float *) ((char *) dst->data + (i1*nb1 + i2*nb2 + i3*nb3)),
  11763. (float *) c1->data);
  11764. }
  11765. }
  11766. }
  11767. static void ggml_compute_forward_flash_ff(
  11768. const struct ggml_compute_params * params,
  11769. const struct ggml_tensor * a,
  11770. const struct ggml_tensor * b0,
  11771. const struct ggml_tensor * b1,
  11772. const struct ggml_tensor * c0,
  11773. const struct ggml_tensor * c1,
  11774. struct ggml_tensor * dst) {
  11775. switch (b0->type) {
  11776. case GGML_TYPE_F16:
  11777. {
  11778. ggml_compute_forward_flash_ff_f16(params, a, b0, b1, c0, c1, dst);
  11779. } break;
  11780. case GGML_TYPE_F32:
  11781. {
  11782. GGML_ASSERT(false); // TODO
  11783. } break;
  11784. default:
  11785. {
  11786. GGML_ASSERT(false);
  11787. } break;
  11788. }
  11789. }
  11790. // ggml_compute_forward_flash_attn_back
  11791. static void ggml_compute_forward_flash_attn_back_f32(
  11792. const struct ggml_compute_params * params,
  11793. const struct ggml_tensor * q,
  11794. const struct ggml_tensor * k,
  11795. const struct ggml_tensor * v,
  11796. const struct ggml_tensor * d,
  11797. const bool masked,
  11798. struct ggml_tensor * dst) {
  11799. int64_t t0 = ggml_perf_time_us();
  11800. UNUSED(t0);
  11801. GGML_TENSOR_LOCALS(int64_t, neq, q, ne);
  11802. GGML_TENSOR_LOCALS(size_t, nbq, q, nb);
  11803. GGML_TENSOR_LOCALS(int64_t, nek, k, ne);
  11804. GGML_TENSOR_LOCALS(size_t, nbk, k, nb);
  11805. GGML_TENSOR_LOCALS(int64_t, nev, v, ne);
  11806. GGML_TENSOR_LOCALS(size_t, nbv, v, nb);
  11807. GGML_TENSOR_LOCALS(int64_t, ned, d, ne);
  11808. GGML_TENSOR_LOCALS(size_t, nbd, d, nb);
  11809. GGML_TENSOR_LOCALS(int64_t, ne, dst, ne);
  11810. GGML_TENSOR_LOCALS(size_t, nb, dst, nb);
  11811. const int ith = params->ith;
  11812. const int nth = params->nth;
  11813. const int64_t D = neq0;
  11814. const int64_t N = neq1;
  11815. const int64_t P = nek1 - N;
  11816. const int64_t M = P + N;
  11817. const int Mup = ggml_up(M, GGML_SOFT_MAX_UNROLL);
  11818. const int mxDM = MAX(D, Mup);
  11819. // GGML_ASSERT(ne0 == D);
  11820. // GGML_ASSERT(ne1 == N);
  11821. GGML_ASSERT(P >= 0);
  11822. GGML_ASSERT(nbq0 == sizeof(float));
  11823. GGML_ASSERT(nbk0 == sizeof(float));
  11824. GGML_ASSERT(nbv0 == sizeof(float));
  11825. GGML_ASSERT(neq0 == D);
  11826. GGML_ASSERT(nek0 == D);
  11827. GGML_ASSERT(nev1 == D);
  11828. GGML_ASSERT(ned0 == D);
  11829. GGML_ASSERT(neq1 == N);
  11830. GGML_ASSERT(nek1 == N + P);
  11831. GGML_ASSERT(nev1 == D);
  11832. GGML_ASSERT(ned1 == N);
  11833. // dst cannot be transposed or permuted
  11834. GGML_ASSERT(nb0 == sizeof(float));
  11835. GGML_ASSERT(nb0 <= nb1);
  11836. GGML_ASSERT(nb1 <= nb2);
  11837. GGML_ASSERT(nb2 <= nb3);
  11838. if (params->type == GGML_TASK_INIT) {
  11839. if (ith == 0) {
  11840. memset(dst->data, 0, nb0*ne0*ne1*ne2*ne3);
  11841. }
  11842. return;
  11843. }
  11844. if (params->type == GGML_TASK_FINALIZE) {
  11845. return;
  11846. }
  11847. // parallelize by q rows using ggml_vec_dot_f32
  11848. // total rows in q
  11849. const int nr = neq2*neq3;
  11850. // rows per thread
  11851. const int dr = (nr + nth - 1)/nth;
  11852. // row range for this thread
  11853. const int ir0 = dr*ith;
  11854. const int ir1 = MIN(ir0 + dr, nr);
  11855. const float scale = 1.0f/sqrtf(D);
  11856. //printf("P=%d N=%d D=%d ir0=%d ir1=%d scale = %f\n", P, N, D, ir0, ir1, scale);
  11857. for (int ir = ir0; ir < ir1; ++ir) {
  11858. // q indices
  11859. const int iq3 = ir/(neq2);
  11860. const int iq2 = ir - iq3*neq2;
  11861. for ( int iq1 = 0; iq1 < neq1; ++iq1) {
  11862. // not sure about CACHE_LINE_SIZE_F32..
  11863. // - maybe it must not be multiplied by 2 and excluded from .. in SM 1*(..) offset?
  11864. float * S = (float *) params->wdata + ith*2*(mxDM + CACHE_LINE_SIZE_F32) + 0*(mxDM+CACHE_LINE_SIZE_F32);
  11865. float * SM = (float *) params->wdata + ith*2*(mxDM + CACHE_LINE_SIZE_F32) + 1*(mxDM+CACHE_LINE_SIZE_F32);
  11866. for (int i = M; i < Mup; ++i) {
  11867. S[i] = -INFINITY;
  11868. }
  11869. for (int64_t ic = 0; ic < nek1; ++ic) {
  11870. // k indices
  11871. const int ik3 = iq3;
  11872. const int ik2 = iq2;
  11873. const int ik1 = ic;
  11874. // S indices
  11875. const int i1 = ik1;
  11876. ggml_vec_dot_f32(neq0,
  11877. S + i1,
  11878. (float *) ((char *) k->data + (ik1*nbk1 + ik2*nbk2 + ik3*nbk3)),
  11879. (float *) ((char *) q->data + (iq1*nbq1 + iq2*nbq2 + iq3*nbq3)));
  11880. }
  11881. // scale
  11882. ggml_vec_scale_f32(nek1, S, scale);
  11883. if (masked) {
  11884. for (int64_t i = P; i < M; i++) {
  11885. if (i > P + iq1) {
  11886. S[i] = -INFINITY;
  11887. }
  11888. }
  11889. }
  11890. // softmax
  11891. {
  11892. float max = -INFINITY;
  11893. ggml_vec_max_f32(M, &max, S);
  11894. ggml_float sum = 0.0;
  11895. {
  11896. #ifdef GGML_SOFT_MAX_ACCELERATE
  11897. max = -max;
  11898. vDSP_vsadd(SM, 1, &max, SM, 1, Mup);
  11899. vvexpf(SM, SM, &Mup);
  11900. ggml_vec_sum_f32(Mup, &sum, SM);
  11901. #else
  11902. uint16_t scvt[GGML_SOFT_MAX_UNROLL]; UNUSED(scvt);
  11903. ggml_float sump[GGML_SOFT_MAX_UNROLL] = { 0.0 };
  11904. for (int i = 0; i < Mup; i += GGML_SOFT_MAX_UNROLL) {
  11905. float * SR = S + i;
  11906. float * SW = SM + i;
  11907. for (int j = 0; j < GGML_SOFT_MAX_UNROLL; ++j) {
  11908. if (SR[j] == -INFINITY) {
  11909. SW[j] = 0.0f;
  11910. } else {
  11911. #ifndef GGML_FLASH_ATTN_EXP_FP16
  11912. const float val = expf(SR[j] - max);
  11913. #else
  11914. ggml_fp16_t s = GGML_FP32_TO_FP16(SR[j] - max);
  11915. memcpy(&scvt[j], &s, sizeof(uint16_t));
  11916. const float val = GGML_FP16_TO_FP32(table_exp_f16[scvt[j]]);
  11917. #endif
  11918. sump[j] += (ggml_float)val;
  11919. SW[j] = val;
  11920. }
  11921. }
  11922. }
  11923. for (int i = 0; i < GGML_SOFT_MAX_UNROLL; i++) {
  11924. sum += sump[i];
  11925. }
  11926. #endif
  11927. }
  11928. assert(sum > 0.0);
  11929. sum = 1.0/sum;
  11930. ggml_vec_scale_f32(M, SM, sum);
  11931. }
  11932. // step-by-step explanation
  11933. {
  11934. // forward-process shape grads from backward process
  11935. // parallel_for iq2,iq3:
  11936. // k[:D,:M,:,:] [D,M,:,:] grad[k][:D,:M,iq2,iq3] += grad[kcur]
  11937. // q[:D,:N,:,:] [D,N,:,:] grad[q][:D,iq1,iq2,iq3] += grad[qcur]
  11938. // v[:M,:D,:,:] [M,D,:,:] grad[v][:M,:D,iq2,iq3] += grad[vcur]
  11939. // for iq1:
  11940. // kcur = k[:D,:M,iq2,iq3] [D,M,1,1] grad[kcur] = grad[S1].T @ qcur
  11941. // qcur = q[:D,iq1,iq2,iq3] [D,1,1,1] grad[qcur] = grad[S1] @ kcur
  11942. // vcur = v[:M,:D,iq2,iq3] [M,D,1,1] grad[vcur] = grad[S5].T @ S4
  11943. // S0 = -Inf [D,1,1,1]
  11944. // ~S1[i] = dot(kcur[:D,i], qcur)
  11945. // S1 = qcur @ kcur.T [M,1,1,1] grad[S1] = grad[S2] * scale
  11946. // S2 = S1 * scale [M,1,1,1] grad[S2] = diag_mask_zero(grad[S3], P)
  11947. // S3 = diag_mask_inf(S2, P) [M,1,1,1] grad[S3] = S4 * (grad[S4] - dot(S4, grad[S4]))
  11948. // S4 = softmax(S3) [M,1,1,1] grad[S4] = grad[S5] @ vcur
  11949. // ~S5[i] = dot(vcur[:,i], S4)
  11950. // S5 = S4 @ vcur.T [D,1,1,1] grad[S5] = d[:D,iq1,iq2,iq3]
  11951. // ~dst[i,iq1,iq2,iq3] = S5[i] ^
  11952. // dst[:D,iq1,iq2,iq3] = S5 | grad[dst[:D,iq1,iq2,iq3]] = d[:D,iq1,iq2,iq3]
  11953. // dst backward-/ grad[dst] = d
  11954. //
  11955. // output gradients with their dependencies:
  11956. //
  11957. // grad[kcur] = grad[S1].T @ qcur
  11958. // grad[S1] = diag_mask_zero(grad[S3], P) * scale
  11959. // grad[S3] = S4 * (grad[S4] - dot(S4, grad[S4]))
  11960. // grad[S4] = grad[S5] @ vcur
  11961. // grad[S4] = d[:D,iq1,iq2,iq3] @ vcur
  11962. // grad[qcur] = grad[S1] @ kcur
  11963. // grad[vcur] = grad[S5].T @ S4
  11964. // grad[vcur] = d[:D,iq1,iq2,iq3].T @ S4
  11965. //
  11966. // in post-order:
  11967. //
  11968. // S1 = qcur @ kcur.T
  11969. // S2 = S1 * scale
  11970. // S3 = diag_mask_inf(S2, P)
  11971. // S4 = softmax(S3)
  11972. // grad[S4] = d[:D,iq1,iq2,iq3] @ vcur
  11973. // grad[S3] = S4 * (grad[S4] - dot(S4, grad[S4]))
  11974. // grad[S1] = diag_mask_zero(grad[S3], P) * scale
  11975. // grad[qcur] = grad[S1] @ kcur
  11976. // grad[kcur] = grad[S1].T @ qcur
  11977. // grad[vcur] = d[:D,iq1,iq2,iq3].T @ S4
  11978. //
  11979. // using less variables (SM=S4):
  11980. //
  11981. // S = diag_mask_inf(qcur @ kcur.T * scale, P)
  11982. // SM = softmax(S)
  11983. // S = d[:D,iq1,iq2,iq3] @ vcur
  11984. // dot_SM_gradSM = dot(SM, S)
  11985. // S = SM * (S - dot(SM, S))
  11986. // S = diag_mask_zero(S, P) * scale
  11987. //
  11988. // grad[q][:D,iq1,iq2,iq3] += S @ kcur
  11989. // grad[k][:D,:M,iq2,iq3] += S.T @ qcur
  11990. // grad[v][:M,:D,iq2,iq3] += d[:D,iq1,iq2,iq3].T @ SM
  11991. }
  11992. // S = gradSM = d[:D,iq1,iq2,iq3] @ vcur
  11993. // S = d[:D,iq1,iq2,iq3] @ vcur
  11994. // S[:M] += vcur[:M,ic] * d[ic,iq1,iq2,iq3]
  11995. ggml_vec_set_f32(M, S, 0);
  11996. for (int64_t ic = 0; ic < D; ++ic) {
  11997. // dst indices
  11998. const int i1 = iq1;
  11999. const int i2 = iq2;
  12000. const int i3 = iq3;
  12001. ggml_vec_mad_f32(M,
  12002. S,
  12003. (float *) ((char *) v->data + ( ic*nbv1 + i2*nbv2 + i3*nbv3)),
  12004. *(float *) ((char *) d->data + (ic*nbd0 + i1*nbd1 + i2*nbd2 + i3*nbd3)));
  12005. }
  12006. // S = SM * (S - dot(SM, S))
  12007. float dot_SM_gradSM = 0;
  12008. ggml_vec_dot_f32 (M, &dot_SM_gradSM, SM, S);
  12009. ggml_vec_acc1_f32(M, S, -dot_SM_gradSM);
  12010. ggml_vec_mul_f32 (M, S, S, SM);
  12011. // S = diag_mask_zero(S, P) * scale
  12012. if (masked) {
  12013. // for (int64_t i = P + iq1 + 1; i < M; i++) {
  12014. // S[i] = 0;
  12015. // }
  12016. for (int64_t i = P; i < M; i++) {
  12017. if (i > P + iq1) {
  12018. S[i] = 0;
  12019. }
  12020. }
  12021. }
  12022. ggml_vec_scale_f32(M, S, scale);
  12023. void * grad_q = (char *) dst->data;
  12024. void * grad_k = (char *) dst->data + nb0*D*N*neq2*neq3;
  12025. void * grad_v = (char *) dst->data + nb0*D*N*neq2*neq3 + nb0*D*M*neq2*neq3;
  12026. const size_t nbgq1 = nb0*neq0;
  12027. const size_t nbgq2 = nb0*neq0*neq1;
  12028. const size_t nbgq3 = nb0*neq0*neq1*neq2;
  12029. const size_t nbgk1 = nb0*nek0;
  12030. const size_t nbgk2 = nb0*nek0*nek1;
  12031. const size_t nbgk3 = nb0*nek0*nek1*neq2;
  12032. const size_t nbgv1 = nb0*nev0;
  12033. const size_t nbgv2 = nb0*nev0*nev1;
  12034. const size_t nbgv3 = nb0*nev0*nev1*neq2;
  12035. // S shape [M,1]
  12036. // SM shape [M,1]
  12037. // kcur shape [D,M]
  12038. // qcur shape [D,1]
  12039. // vcur shape [M,D]
  12040. //
  12041. // grad[q][:D,iq1,iq2,iq3] += S @ kcur
  12042. // grad[q][:D,iq1,iq2,iq3] += shape[M,1] @ shape[D,M]
  12043. // grad[q][:D,iq1,iq2,iq3] += S[ic] * kcur[:D,ic]
  12044. //
  12045. //// grad[q][ic,iq1,iq2,iq3] += dot(kcur[:,ic],S.T)
  12046. //// grad[q][ic,iq1,iq2,iq3] += dot(k[:D,ic,iq2,iq3],S.T)
  12047. for (int64_t ic = 0; ic < M; ++ic) {
  12048. // dst indices
  12049. const int i1 = iq1;
  12050. const int i2 = iq2;
  12051. const int i3 = iq3;
  12052. ggml_vec_mad_f32(D,
  12053. (float *) ((char *) grad_q + (i1*nbgq1 + i2*nbgq2 + i3*nbgq3)),
  12054. (float *) ((char *) k->data + (ic*nbk1 + i2*nbk2 + i3*nbk3)),
  12055. S[ic]);
  12056. }
  12057. // grad[k][:D,:M,iq2,iq3] += S.T @ qcur
  12058. // grad[k][:D,ic,iq2,iq3] += S.T[0,ic] * qcur[:D,0]
  12059. // grad[k][:D,ic,iq2,iq3] += S[ic] * qcur[:D,0]
  12060. for (int64_t ic = 0; ic < M; ++ic) {
  12061. // dst indices
  12062. const int i1 = iq1;
  12063. const int i2 = iq2;
  12064. const int i3 = iq3;
  12065. // ggml_vec_set_f32(D,
  12066. // (float *) ((char *) grad_k + (ic*nbgk1 + i2*nbgk2 + i3*nbgk3)),
  12067. // 0);
  12068. ggml_vec_mad_f32(D,
  12069. (float *) ((char *) grad_k + (ic*nbgk1 + i2*nbgk2 + i3*nbgk3)),
  12070. (float *) ((char *) q->data + (i1*nbq1 + i2*nbq2 + i3*nbq3)),
  12071. S[ic]);
  12072. }
  12073. // grad[v][:M,:D,iq2,iq3] += d[:D,iq1,iq2,iq3].T @ SM
  12074. // grad[v][:M,ic,iq2,iq3] += d[:D,iq1,iq2,iq3].T[0,ic] * SM[:M]
  12075. // grad[v][:M,ic,iq2,iq3] += d[ic,iq1,iq2,iq3] * SM[:M]
  12076. for (int64_t ic = 0; ic < D; ++ic) {
  12077. // dst indices
  12078. const int i1 = iq1;
  12079. const int i2 = iq2;
  12080. const int i3 = iq3;
  12081. // ggml_vec_set_f32(M,
  12082. // (float *) ((char *) grad_v + ( ic*nbgv1 + i2*nbgv2 + i3*nbgv3)),
  12083. // 0);
  12084. ggml_vec_mad_f32(M,
  12085. (float *) ((char *) grad_v + ( ic*nbgv1 + i2*nbgv2 + i3*nbgv3)),
  12086. SM,
  12087. *(float *) ((char *) d->data + (ic*nbd0 + i1*nbd1 + i2*nbd2 + i3*nbd3)));
  12088. }
  12089. }
  12090. }
  12091. }
  12092. static void ggml_compute_forward_flash_attn_back(
  12093. const struct ggml_compute_params * params,
  12094. const struct ggml_tensor * q,
  12095. const struct ggml_tensor * k,
  12096. const struct ggml_tensor * v,
  12097. const struct ggml_tensor * d,
  12098. const bool masked,
  12099. struct ggml_tensor * dst) {
  12100. switch (q->type) {
  12101. case GGML_TYPE_F32:
  12102. {
  12103. ggml_compute_forward_flash_attn_back_f32(params, q, k, v, d, masked, dst);
  12104. } break;
  12105. default:
  12106. {
  12107. GGML_ASSERT(false);
  12108. } break;
  12109. }
  12110. }
  12111. // ggml_compute_forward_win_part
  12112. static void ggml_compute_forward_win_part_f32(
  12113. const struct ggml_compute_params * params,
  12114. const struct ggml_tensor * src0,
  12115. struct ggml_tensor * dst) {
  12116. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  12117. return;
  12118. }
  12119. GGML_TENSOR_LOCALS(int64_t, ne0, src0, ne);
  12120. GGML_TENSOR_LOCALS(int64_t, ne, dst, ne);
  12121. const int32_t nep0 = ((const int32_t *)(dst->op_params))[0];
  12122. const int32_t nep1 = ((const int32_t *)(dst->op_params))[1];
  12123. const int32_t w = ((const int32_t *)(dst->op_params))[2];
  12124. assert(ne00 == ne0);
  12125. assert(ne3 == nep0*nep1);
  12126. // TODO: optimize / multi-thread
  12127. for (int py = 0; py < nep1; ++py) {
  12128. for (int px = 0; px < nep0; ++px) {
  12129. const int64_t i3 = py*nep0 + px;
  12130. for (int64_t i2 = 0; i2 < ne2; ++i2) {
  12131. for (int64_t i1 = 0; i1 < ne1; ++i1) {
  12132. for (int64_t i0 = 0; i0 < ne0; ++i0) {
  12133. const int64_t i02 = py*w + i2;
  12134. const int64_t i01 = px*w + i1;
  12135. const int64_t i00 = i0;
  12136. const int64_t i = i3*ne2*ne1*ne0 + i2*ne1*ne0 + i1*ne0 + i0;
  12137. const int64_t j = i02*ne01*ne00 + i01*ne00 + i00;
  12138. if (py*w + i2 >= ne02 || px*w + i1 >= ne01) {
  12139. ((float *) dst->data)[i] = 0.0f;
  12140. } else {
  12141. ((float *) dst->data)[i] = ((float *) src0->data)[j];
  12142. }
  12143. }
  12144. }
  12145. }
  12146. }
  12147. }
  12148. }
  12149. static void ggml_compute_forward_win_part(
  12150. const struct ggml_compute_params * params,
  12151. const struct ggml_tensor * src0,
  12152. struct ggml_tensor * dst) {
  12153. switch (src0->type) {
  12154. case GGML_TYPE_F32:
  12155. {
  12156. ggml_compute_forward_win_part_f32(params, src0, dst);
  12157. } break;
  12158. default:
  12159. {
  12160. GGML_ASSERT(false);
  12161. } break;
  12162. }
  12163. }
  12164. // ggml_compute_forward_win_unpart
  12165. static void ggml_compute_forward_win_unpart_f32(
  12166. const struct ggml_compute_params * params,
  12167. const struct ggml_tensor * src0,
  12168. struct ggml_tensor * dst) {
  12169. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  12170. return;
  12171. }
  12172. GGML_TENSOR_LOCALS(int64_t, ne0, src0, ne);
  12173. GGML_TENSOR_LOCALS(int64_t, ne, dst, ne);
  12174. const int32_t w = ((const int32_t *)(dst->op_params))[0];
  12175. // padding
  12176. const int px = (w - ne1%w)%w;
  12177. //const int py = (w - ne2%w)%w;
  12178. const int npx = (px + ne1)/w;
  12179. //const int npy = (py + ne2)/w;
  12180. assert(ne0 == ne00);
  12181. // TODO: optimize / multi-thread
  12182. for (int64_t i2 = 0; i2 < ne2; ++i2) {
  12183. for (int64_t i1 = 0; i1 < ne1; ++i1) {
  12184. for (int64_t i0 = 0; i0 < ne0; ++i0) {
  12185. const int ip2 = i2/w;
  12186. const int ip1 = i1/w;
  12187. const int64_t i02 = i2%w;
  12188. const int64_t i01 = i1%w;
  12189. const int64_t i00 = i0;
  12190. const int64_t i = (ip2*npx + ip1)*ne02*ne01*ne00 + i02*ne01*ne00 + i01*ne00 + i00;
  12191. const int64_t j = i2*ne1*ne0 + i1*ne0 + i0;
  12192. ((float *) dst->data)[j] = ((float *) src0->data)[i];
  12193. }
  12194. }
  12195. }
  12196. }
  12197. static void ggml_compute_forward_win_unpart(
  12198. const struct ggml_compute_params * params,
  12199. const struct ggml_tensor * src0,
  12200. struct ggml_tensor * dst) {
  12201. switch (src0->type) {
  12202. case GGML_TYPE_F32:
  12203. {
  12204. ggml_compute_forward_win_unpart_f32(params, src0, dst);
  12205. } break;
  12206. default:
  12207. {
  12208. GGML_ASSERT(false);
  12209. } break;
  12210. }
  12211. }
  12212. //gmml_compute_forward_unary
  12213. static void ggml_compute_forward_unary(
  12214. const struct ggml_compute_params * params,
  12215. const struct ggml_tensor * src0,
  12216. struct ggml_tensor * dst) {
  12217. const enum ggml_unary_op op = ggml_get_unary_op(dst);
  12218. switch (op) {
  12219. case GGML_UNARY_OP_ABS:
  12220. {
  12221. ggml_compute_forward_abs(params, src0, dst);
  12222. } break;
  12223. case GGML_UNARY_OP_SGN:
  12224. {
  12225. ggml_compute_forward_sgn(params, src0, dst);
  12226. } break;
  12227. case GGML_UNARY_OP_NEG:
  12228. {
  12229. ggml_compute_forward_neg(params, src0, dst);
  12230. } break;
  12231. case GGML_UNARY_OP_STEP:
  12232. {
  12233. ggml_compute_forward_step(params, src0, dst);
  12234. } break;
  12235. case GGML_UNARY_OP_TANH:
  12236. {
  12237. ggml_compute_forward_tanh(params, src0, dst);
  12238. } break;
  12239. case GGML_UNARY_OP_ELU:
  12240. {
  12241. ggml_compute_forward_elu(params, src0, dst);
  12242. } break;
  12243. case GGML_UNARY_OP_RELU:
  12244. {
  12245. ggml_compute_forward_relu(params, src0, dst);
  12246. } break;
  12247. case GGML_UNARY_OP_GELU:
  12248. {
  12249. ggml_compute_forward_gelu(params, src0, dst);
  12250. } break;
  12251. case GGML_UNARY_OP_GELU_QUICK:
  12252. {
  12253. ggml_compute_forward_gelu_quick(params, src0, dst);
  12254. } break;
  12255. case GGML_UNARY_OP_SILU:
  12256. {
  12257. ggml_compute_forward_silu(params, src0, dst);
  12258. } break;
  12259. default:
  12260. {
  12261. GGML_ASSERT(false);
  12262. } break;
  12263. }
  12264. }
  12265. // ggml_compute_forward_get_rel_pos
  12266. static void ggml_compute_forward_get_rel_pos_f16(
  12267. const struct ggml_compute_params * params,
  12268. const struct ggml_tensor * src0,
  12269. struct ggml_tensor * dst) {
  12270. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  12271. return;
  12272. }
  12273. // ref: https://github.com/facebookresearch/segment-anything/blob/main/segment_anything/modeling/image_encoder.py#L292-L322
  12274. GGML_TENSOR_UNARY_OP_LOCALS;
  12275. const int64_t w = ne1;
  12276. ggml_fp16_t * src0_data = (ggml_fp16_t *) src0->data;
  12277. ggml_fp16_t * dst_data = (ggml_fp16_t *) dst->data;
  12278. for (int64_t i2 = 0; i2 < ne2; ++i2) {
  12279. for (int64_t i1 = 0; i1 < ne1; ++i1) {
  12280. const int64_t pos = (w - i1 - 1) + i2;
  12281. for (int64_t i0 = 0; i0 < ne0; ++i0) {
  12282. dst_data[i2*ne1*ne0 + i1*ne0 + i0] = src0_data[pos*ne00 + i0];
  12283. }
  12284. }
  12285. }
  12286. }
  12287. static void ggml_compute_forward_get_rel_pos(
  12288. const struct ggml_compute_params * params,
  12289. const struct ggml_tensor * src0,
  12290. struct ggml_tensor * dst) {
  12291. switch (src0->type) {
  12292. case GGML_TYPE_F16:
  12293. {
  12294. ggml_compute_forward_get_rel_pos_f16(params, src0, dst);
  12295. } break;
  12296. default:
  12297. {
  12298. GGML_ASSERT(false);
  12299. } break;
  12300. }
  12301. }
  12302. // ggml_compute_forward_add_rel_pos
  12303. static void ggml_compute_forward_add_rel_pos_f32(
  12304. const struct ggml_compute_params * params,
  12305. const struct ggml_tensor * src0,
  12306. const struct ggml_tensor * src1,
  12307. const struct ggml_tensor * src2,
  12308. struct ggml_tensor * dst) {
  12309. const bool inplace = (bool) ((int32_t *) dst->op_params)[0];
  12310. if (!inplace && params->type == GGML_TASK_INIT) {
  12311. memcpy((char *) dst->data, (char *) src0->data, ggml_nbytes(dst));
  12312. return;
  12313. }
  12314. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  12315. return;
  12316. }
  12317. int64_t t0 = ggml_perf_time_us();
  12318. UNUSED(t0);
  12319. // ref: https://github.com/facebookresearch/segment-anything/blob/main/segment_anything/modeling/image_encoder.py#L357-L359
  12320. float * src1_data = (float *) src1->data;
  12321. float * src2_data = (float *) src2->data;
  12322. float * dst_data = (float *) dst->data;
  12323. const int64_t ne10 = src1->ne[0];
  12324. const int64_t ne11 = src1->ne[1];
  12325. const int64_t ne12 = src1->ne[2];
  12326. const int64_t ne13 = src1->ne[3];
  12327. const int ith = params->ith;
  12328. const int nth = params->nth;
  12329. // total patches in dst
  12330. const int np = ne13;
  12331. // patches per thread
  12332. const int dp = (np + nth - 1)/nth;
  12333. // patch range for this thread
  12334. const int ip0 = dp*ith;
  12335. const int ip1 = MIN(ip0 + dp, np);
  12336. for (int64_t i13 = ip0; i13 < ip1; ++i13) {
  12337. for (int64_t i12 = 0; i12 < ne12; ++i12) {
  12338. for (int64_t i11 = 0; i11 < ne11; ++i11) {
  12339. const int64_t jp1 = i13*ne12*ne11*ne10 + i12*ne11*ne10 + i11*ne10;
  12340. for (int64_t i10 = 0; i10 < ne10; ++i10) {
  12341. const int64_t jp0 = jp1 + i10;
  12342. const float src1_e = src1_data[jp0];
  12343. const float src2_e = src2_data[jp0];
  12344. const int64_t jdh = jp0 * ne10;
  12345. const int64_t jdw = jdh - (ne10 - 1) * i10;
  12346. for (int64_t j = 0; j < ne10; ++j) {
  12347. dst_data[jdh + j ] += src2_e;
  12348. dst_data[jdw + j*ne10] += src1_e;
  12349. }
  12350. }
  12351. }
  12352. }
  12353. }
  12354. }
  12355. static void ggml_compute_forward_add_rel_pos(
  12356. const struct ggml_compute_params * params,
  12357. const struct ggml_tensor * src0,
  12358. const struct ggml_tensor * src1,
  12359. const struct ggml_tensor * src2,
  12360. struct ggml_tensor * dst) {
  12361. switch (src0->type) {
  12362. case GGML_TYPE_F32:
  12363. {
  12364. ggml_compute_forward_add_rel_pos_f32(params, src0, src1, src2, dst);
  12365. } break;
  12366. default:
  12367. {
  12368. GGML_ASSERT(false);
  12369. } break;
  12370. }
  12371. }
  12372. // ggml_compute_forward_map_unary
  12373. static void ggml_compute_forward_map_unary_f32(
  12374. const struct ggml_compute_params * params,
  12375. const struct ggml_tensor * src0,
  12376. struct ggml_tensor * dst,
  12377. const ggml_unary_op_f32_t fun) {
  12378. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  12379. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  12380. return;
  12381. }
  12382. const int n = ggml_nrows(src0);
  12383. const int nc = src0->ne[0];
  12384. assert( dst->nb[0] == sizeof(float));
  12385. assert(src0->nb[0] == sizeof(float));
  12386. for (int i = 0; i < n; i++) {
  12387. fun(nc,
  12388. (float *) ((char *) dst->data + i*( dst->nb[1])),
  12389. (float *) ((char *) src0->data + i*(src0->nb[1])));
  12390. }
  12391. }
  12392. static void ggml_compute_forward_map_unary(
  12393. const struct ggml_compute_params * params,
  12394. const struct ggml_tensor * src0,
  12395. struct ggml_tensor * dst,
  12396. const ggml_unary_op_f32_t fun) {
  12397. switch (src0->type) {
  12398. case GGML_TYPE_F32:
  12399. {
  12400. ggml_compute_forward_map_unary_f32(params, src0, dst, fun);
  12401. } break;
  12402. default:
  12403. {
  12404. GGML_ASSERT(false);
  12405. } break;
  12406. }
  12407. }
  12408. // ggml_compute_forward_map_binary
  12409. static void ggml_compute_forward_map_binary_f32(
  12410. const struct ggml_compute_params * params,
  12411. const struct ggml_tensor * src0,
  12412. const struct ggml_tensor * src1,
  12413. struct ggml_tensor * dst,
  12414. const ggml_binary_op_f32_t fun) {
  12415. assert(params->ith == 0);
  12416. assert(ggml_are_same_shape(src0, src1) && ggml_are_same_shape(src0, dst));
  12417. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  12418. return;
  12419. }
  12420. const int n = ggml_nrows(src0);
  12421. const int nc = src0->ne[0];
  12422. assert( dst->nb[0] == sizeof(float));
  12423. assert(src0->nb[0] == sizeof(float));
  12424. assert(src1->nb[0] == sizeof(float));
  12425. for (int i = 0; i < n; i++) {
  12426. fun(nc,
  12427. (float *) ((char *) dst->data + i*( dst->nb[1])),
  12428. (float *) ((char *) src0->data + i*(src0->nb[1])),
  12429. (float *) ((char *) src1->data + i*(src1->nb[1])));
  12430. }
  12431. }
  12432. static void ggml_compute_forward_map_binary(
  12433. const struct ggml_compute_params * params,
  12434. const struct ggml_tensor * src0,
  12435. const struct ggml_tensor * src1,
  12436. struct ggml_tensor * dst,
  12437. const ggml_binary_op_f32_t fun) {
  12438. switch (src0->type) {
  12439. case GGML_TYPE_F32:
  12440. {
  12441. ggml_compute_forward_map_binary_f32(params, src0, src1, dst, fun);
  12442. } break;
  12443. default:
  12444. {
  12445. GGML_ASSERT(false);
  12446. } break;
  12447. }
  12448. }
  12449. // ggml_compute_forward_map_custom1
  12450. static void ggml_compute_forward_map_custom1_f32(
  12451. const struct ggml_compute_params * params,
  12452. const struct ggml_tensor * a,
  12453. struct ggml_tensor * dst,
  12454. const ggml_custom1_op_f32_t fun) {
  12455. assert(params->ith == 0);
  12456. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  12457. return;
  12458. }
  12459. fun(dst, a);
  12460. }
  12461. // ggml_compute_forward_map_custom2
  12462. static void ggml_compute_forward_map_custom2_f32(
  12463. const struct ggml_compute_params * params,
  12464. const struct ggml_tensor * a,
  12465. const struct ggml_tensor * b,
  12466. struct ggml_tensor * dst,
  12467. const ggml_custom2_op_f32_t fun) {
  12468. assert(params->ith == 0);
  12469. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  12470. return;
  12471. }
  12472. fun(dst, a, b);
  12473. }
  12474. // ggml_compute_forward_map_custom3
  12475. static void ggml_compute_forward_map_custom3_f32(
  12476. const struct ggml_compute_params * params,
  12477. const struct ggml_tensor * a,
  12478. const struct ggml_tensor * b,
  12479. const struct ggml_tensor * c,
  12480. struct ggml_tensor * dst,
  12481. const ggml_custom3_op_f32_t fun) {
  12482. assert(params->ith == 0);
  12483. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  12484. return;
  12485. }
  12486. fun(dst, a, b, c);
  12487. }
  12488. // ggml_compute_forward_map_custom1
  12489. static void ggml_compute_forward_map_custom1(
  12490. const struct ggml_compute_params * params,
  12491. const struct ggml_tensor * a,
  12492. struct ggml_tensor * dst) {
  12493. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  12494. return;
  12495. }
  12496. struct ggml_map_custom1_op_params * p = (struct ggml_map_custom1_op_params *) dst->op_params;
  12497. p->fun(dst, a, params->ith, params->nth, p->userdata);
  12498. }
  12499. // ggml_compute_forward_map_custom2
  12500. static void ggml_compute_forward_map_custom2(
  12501. const struct ggml_compute_params * params,
  12502. const struct ggml_tensor * a,
  12503. const struct ggml_tensor * b,
  12504. struct ggml_tensor * dst) {
  12505. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  12506. return;
  12507. }
  12508. struct ggml_map_custom2_op_params * p = (struct ggml_map_custom2_op_params *) dst->op_params;
  12509. p->fun(dst, a, b, params->ith, params->nth, p->userdata);
  12510. }
  12511. // ggml_compute_forward_map_custom3
  12512. static void ggml_compute_forward_map_custom3(
  12513. const struct ggml_compute_params * params,
  12514. const struct ggml_tensor * a,
  12515. const struct ggml_tensor * b,
  12516. const struct ggml_tensor * c,
  12517. struct ggml_tensor * dst) {
  12518. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  12519. return;
  12520. }
  12521. struct ggml_map_custom3_op_params * p = (struct ggml_map_custom3_op_params *) dst->op_params;
  12522. p->fun(dst, a, b, c, params->ith, params->nth, p->userdata);
  12523. }
  12524. // ggml_compute_forward_cross_entropy_loss
  12525. static void ggml_compute_forward_cross_entropy_loss_f32(
  12526. const struct ggml_compute_params * params,
  12527. const struct ggml_tensor * src0,
  12528. const struct ggml_tensor * src1,
  12529. struct ggml_tensor * dst) {
  12530. GGML_ASSERT(ggml_is_contiguous(src0));
  12531. GGML_ASSERT(ggml_is_contiguous(src1));
  12532. GGML_ASSERT(ggml_is_scalar(dst));
  12533. GGML_ASSERT(ggml_are_same_shape(src0, src1));
  12534. const int ith = params->ith;
  12535. const int nth = params->nth;
  12536. float * sums = (float *) params->wdata;
  12537. // TODO: handle transposed/permuted matrices
  12538. const int nc = src0->ne[0];
  12539. const int nr = ggml_nrows(src0);
  12540. GGML_ASSERT(params->wsize >= sizeof(float) * (nth + nth * nc));
  12541. if (params->type == GGML_TASK_INIT) {
  12542. if (ith == 0) {
  12543. memset(sums, 0, sizeof(float) * (nth + nth * nc));
  12544. }
  12545. return;
  12546. }
  12547. if (params->type == GGML_TASK_FINALIZE) {
  12548. if (ith == 0) {
  12549. float * dp = (float *) dst->data;
  12550. ggml_vec_sum_f32(nth, dp, sums);
  12551. dp[0] *= -1.0f / (float) nr;
  12552. }
  12553. return;
  12554. }
  12555. const double eps = 1e-9;
  12556. // rows per thread
  12557. const int dr = (nr + nth - 1)/nth;
  12558. // row range for this thread
  12559. const int ir0 = dr*ith;
  12560. const int ir1 = MIN(ir0 + dr, nr);
  12561. for (int i1 = ir0; i1 < ir1; i1++) {
  12562. float * s0 = (float *)((char *) src0->data + i1*src0->nb[1]);
  12563. float * s1 = (float *)((char *) src1->data + i1*src1->nb[1]);
  12564. float * st = ((float *) params->wdata) + nth + ith*nc;
  12565. #ifndef NDEBUG
  12566. for (int i = 0; i < nc; ++i) {
  12567. //printf("p[%d] = %f\n", i, p[i]);
  12568. assert(!isnan(s0[i]));
  12569. assert(!isnan(s1[i]));
  12570. }
  12571. #endif
  12572. // soft_max
  12573. ggml_float sum = 0.0;
  12574. {
  12575. float max = -INFINITY;
  12576. ggml_vec_max_f32(nc, &max, s0);
  12577. uint16_t scvt; UNUSED(scvt);
  12578. for (int i = 0; i < nc; i++) {
  12579. if (s0[i] == -INFINITY) {
  12580. st[i] = 0.0f;
  12581. } else {
  12582. #ifndef GGML_CROSS_ENTROPY_EXP_FP16
  12583. const float s = s0[i] - max;
  12584. const float val = expf(s);
  12585. #else
  12586. ggml_fp16_t s = GGML_FP32_TO_FP16(s0[i] - max);
  12587. memcpy(&scvt, &s, sizeof(scvt));
  12588. const float val = GGML_FP16_TO_FP32(table_exp_f16[scvt]);
  12589. #endif
  12590. sum += (ggml_float)val;
  12591. st[i] = val;
  12592. }
  12593. }
  12594. assert(sum > 0.0);
  12595. // sum = 1.0/sum;
  12596. }
  12597. // avoid log(0) by rescaling from [0..1] to [eps..1]
  12598. sum = (1.0 - eps) / sum;
  12599. ggml_vec_scale_f32(nc, st, sum);
  12600. ggml_vec_add1_f32(nc, st, st, eps);
  12601. ggml_vec_log_f32(nc, st, st);
  12602. ggml_vec_mul_f32(nc, st, st, s1);
  12603. float st_sum = 0;
  12604. ggml_vec_sum_f32(nc, &st_sum, st);
  12605. sums[ith] += st_sum;
  12606. #ifndef NDEBUG
  12607. for (int i = 0; i < nc; ++i) {
  12608. assert(!isnan(st[i]));
  12609. assert(!isinf(st[i]));
  12610. }
  12611. #endif
  12612. }
  12613. }
  12614. static void ggml_compute_forward_cross_entropy_loss(
  12615. const struct ggml_compute_params * params,
  12616. const struct ggml_tensor * src0,
  12617. const struct ggml_tensor * src1,
  12618. struct ggml_tensor * dst) {
  12619. switch (src0->type) {
  12620. case GGML_TYPE_F32:
  12621. {
  12622. ggml_compute_forward_cross_entropy_loss_f32(params, src0, src1, dst);
  12623. } break;
  12624. default:
  12625. {
  12626. GGML_ASSERT(false);
  12627. } break;
  12628. }
  12629. }
  12630. // ggml_compute_forward_cross_entropy_loss_back
  12631. static void ggml_compute_forward_cross_entropy_loss_back_f32(
  12632. const struct ggml_compute_params * params,
  12633. const struct ggml_tensor * src0,
  12634. const struct ggml_tensor * src1,
  12635. const struct ggml_tensor * opt0,
  12636. struct ggml_tensor * dst) {
  12637. GGML_ASSERT(ggml_is_contiguous(dst));
  12638. GGML_ASSERT(ggml_is_contiguous(src0));
  12639. GGML_ASSERT(ggml_is_contiguous(src1));
  12640. GGML_ASSERT(ggml_is_contiguous(opt0));
  12641. GGML_ASSERT(ggml_are_same_shape(src0, src1) && ggml_are_same_shape(src0, dst));
  12642. const int64_t ith = params->ith;
  12643. const int64_t nth = params->nth;
  12644. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  12645. return;
  12646. }
  12647. const double eps = 1e-9;
  12648. // TODO: handle transposed/permuted matrices
  12649. const int64_t nc = src0->ne[0];
  12650. const int64_t nr = ggml_nrows(src0);
  12651. // rows per thread
  12652. const int64_t dr = (nr + nth - 1)/nth;
  12653. // row range for this thread
  12654. const int64_t ir0 = dr*ith;
  12655. const int64_t ir1 = MIN(ir0 + dr, nr);
  12656. float * d = (float *) opt0->data;
  12657. for (int64_t i1 = ir0; i1 < ir1; i1++) {
  12658. float * ds0 = (float *)((char *) dst->data + i1*dst->nb[1]);
  12659. float * s0 = (float *)((char *) src0->data + i1*src0->nb[1]);
  12660. float * s1 = (float *)((char *) src1->data + i1*src1->nb[1]);
  12661. #ifndef NDEBUG
  12662. for (int i = 0; i < nc; ++i) {
  12663. //printf("p[%d] = %f\n", i, p[i]);
  12664. assert(!isnan(s0[i]));
  12665. assert(!isnan(s1[i]));
  12666. }
  12667. #endif
  12668. // soft_max
  12669. ggml_float sum = 0.0;
  12670. {
  12671. float max = -INFINITY;
  12672. ggml_vec_max_f32(nc, &max, s0);
  12673. uint16_t scvt; UNUSED(scvt);
  12674. for (int i = 0; i < nc; i++) {
  12675. if (s0[i] == -INFINITY) {
  12676. ds0[i] = 0.0f;
  12677. } else {
  12678. #ifndef GGML_CROSS_ENTROPY_EXP_FP16
  12679. const float s = s0[i] - max;
  12680. const float val = expf(s);
  12681. #else
  12682. ggml_fp16_t s = GGML_FP32_TO_FP16(s0[i] - max);
  12683. memcpy(&scvt, &s, sizeof(scvt));
  12684. const float val = GGML_FP16_TO_FP32(table_exp_f16[scvt]);
  12685. #endif
  12686. sum += (ggml_float)val;
  12687. ds0[i] = val;
  12688. }
  12689. }
  12690. assert(sum > 0.0);
  12691. sum = (1.0 - eps)/sum;
  12692. }
  12693. // grad(src0) = (softmax(src0) - src1) * grad(cross_entropy_loss(src0, src1)) / nr
  12694. ggml_vec_scale_f32(nc, ds0, sum);
  12695. ggml_vec_add1_f32(nc, ds0, ds0, eps);
  12696. ggml_vec_sub_f32(nc, ds0, ds0, s1);
  12697. ggml_vec_scale_f32(nc, ds0, d[0] / (float) nr);
  12698. #ifndef NDEBUG
  12699. for (int i = 0; i < nc; ++i) {
  12700. assert(!isnan(ds0[i]));
  12701. assert(!isinf(ds0[i]));
  12702. }
  12703. #endif
  12704. }
  12705. }
  12706. static void ggml_compute_forward_cross_entropy_loss_back(
  12707. const struct ggml_compute_params * params,
  12708. const struct ggml_tensor * src0,
  12709. const struct ggml_tensor * src1,
  12710. const struct ggml_tensor * opt0,
  12711. struct ggml_tensor * dst) {
  12712. switch (src0->type) {
  12713. case GGML_TYPE_F32:
  12714. {
  12715. ggml_compute_forward_cross_entropy_loss_back_f32(params, src0, src1, opt0, dst);
  12716. } break;
  12717. default:
  12718. {
  12719. GGML_ASSERT(false);
  12720. } break;
  12721. }
  12722. }
  12723. /////////////////////////////////
  12724. static void ggml_compute_forward(struct ggml_compute_params * params, struct ggml_tensor * tensor) {
  12725. GGML_ASSERT(params);
  12726. #ifdef GGML_USE_CUBLAS
  12727. bool skip_cpu = ggml_cuda_compute_forward(params, tensor);
  12728. if (skip_cpu) {
  12729. return;
  12730. }
  12731. GGML_ASSERT(tensor->src[0] == NULL || tensor->src[0]->backend == GGML_BACKEND_CPU);
  12732. GGML_ASSERT(tensor->src[1] == NULL || tensor->src[1]->backend == GGML_BACKEND_CPU);
  12733. #endif // GGML_USE_CUBLAS
  12734. switch (tensor->op) {
  12735. case GGML_OP_DUP:
  12736. {
  12737. ggml_compute_forward_dup(params, tensor->src[0], tensor);
  12738. } break;
  12739. case GGML_OP_ADD:
  12740. {
  12741. ggml_compute_forward_add(params, tensor->src[0], tensor->src[1], tensor);
  12742. } break;
  12743. case GGML_OP_ADD1:
  12744. {
  12745. ggml_compute_forward_add1(params, tensor->src[0], tensor->src[1], tensor);
  12746. } break;
  12747. case GGML_OP_ACC:
  12748. {
  12749. ggml_compute_forward_acc(params, tensor->src[0], tensor->src[1], tensor);
  12750. } break;
  12751. case GGML_OP_SUB:
  12752. {
  12753. ggml_compute_forward_sub(params, tensor->src[0], tensor->src[1], tensor);
  12754. } break;
  12755. case GGML_OP_MUL:
  12756. {
  12757. ggml_compute_forward_mul(params, tensor->src[0], tensor->src[1], tensor);
  12758. } break;
  12759. case GGML_OP_DIV:
  12760. {
  12761. ggml_compute_forward_div(params, tensor->src[0], tensor->src[1], tensor);
  12762. } break;
  12763. case GGML_OP_SQR:
  12764. {
  12765. ggml_compute_forward_sqr(params, tensor->src[0], tensor);
  12766. } break;
  12767. case GGML_OP_SQRT:
  12768. {
  12769. ggml_compute_forward_sqrt(params, tensor->src[0], tensor);
  12770. } break;
  12771. case GGML_OP_LOG:
  12772. {
  12773. ggml_compute_forward_log(params, tensor->src[0], tensor);
  12774. } break;
  12775. case GGML_OP_SUM:
  12776. {
  12777. ggml_compute_forward_sum(params, tensor->src[0], tensor);
  12778. } break;
  12779. case GGML_OP_SUM_ROWS:
  12780. {
  12781. ggml_compute_forward_sum_rows(params, tensor->src[0], tensor);
  12782. } break;
  12783. case GGML_OP_MEAN:
  12784. {
  12785. ggml_compute_forward_mean(params, tensor->src[0], tensor);
  12786. } break;
  12787. case GGML_OP_ARGMAX:
  12788. {
  12789. ggml_compute_forward_argmax(params, tensor->src[0], tensor);
  12790. } break;
  12791. case GGML_OP_REPEAT:
  12792. {
  12793. ggml_compute_forward_repeat(params, tensor->src[0], tensor);
  12794. } break;
  12795. case GGML_OP_REPEAT_BACK:
  12796. {
  12797. ggml_compute_forward_repeat_back(params, tensor->src[0], tensor);
  12798. } break;
  12799. case GGML_OP_CONCAT:
  12800. {
  12801. ggml_compute_forward_concat(params, tensor->src[0], tensor->src[1], tensor);
  12802. } break;
  12803. case GGML_OP_SILU_BACK:
  12804. {
  12805. ggml_compute_forward_silu_back(params, tensor->src[0], tensor->src[1], tensor);
  12806. } break;
  12807. case GGML_OP_NORM:
  12808. {
  12809. ggml_compute_forward_norm(params, tensor->src[0], tensor);
  12810. } break;
  12811. case GGML_OP_RMS_NORM:
  12812. {
  12813. ggml_compute_forward_rms_norm(params, tensor->src[0], tensor);
  12814. } break;
  12815. case GGML_OP_RMS_NORM_BACK:
  12816. {
  12817. ggml_compute_forward_rms_norm_back(params, tensor->src[0], tensor->src[1], tensor);
  12818. } break;
  12819. case GGML_OP_GROUP_NORM:
  12820. {
  12821. ggml_compute_forward_group_norm(params, tensor->src[0], tensor);
  12822. } break;
  12823. case GGML_OP_MUL_MAT:
  12824. {
  12825. ggml_compute_forward_mul_mat(params, tensor->src[0], tensor->src[1], tensor);
  12826. } break;
  12827. case GGML_OP_OUT_PROD:
  12828. {
  12829. ggml_compute_forward_out_prod(params, tensor->src[0], tensor->src[1], tensor);
  12830. } break;
  12831. case GGML_OP_SCALE:
  12832. {
  12833. ggml_compute_forward_scale(params, tensor->src[0], tensor->src[1], tensor);
  12834. } break;
  12835. case GGML_OP_SET:
  12836. {
  12837. ggml_compute_forward_set(params, tensor->src[0], tensor->src[1], tensor);
  12838. } break;
  12839. case GGML_OP_CPY:
  12840. {
  12841. ggml_compute_forward_cpy(params, tensor->src[0], tensor);
  12842. } break;
  12843. case GGML_OP_CONT:
  12844. {
  12845. ggml_compute_forward_cont(params, tensor->src[0], tensor);
  12846. } break;
  12847. case GGML_OP_RESHAPE:
  12848. {
  12849. ggml_compute_forward_reshape(params, tensor->src[0], tensor);
  12850. } break;
  12851. case GGML_OP_VIEW:
  12852. {
  12853. ggml_compute_forward_view(params, tensor->src[0]);
  12854. } break;
  12855. case GGML_OP_PERMUTE:
  12856. {
  12857. ggml_compute_forward_permute(params, tensor->src[0]);
  12858. } break;
  12859. case GGML_OP_TRANSPOSE:
  12860. {
  12861. ggml_compute_forward_transpose(params, tensor->src[0]);
  12862. } break;
  12863. case GGML_OP_GET_ROWS:
  12864. {
  12865. ggml_compute_forward_get_rows(params, tensor->src[0], tensor->src[1], tensor);
  12866. } break;
  12867. case GGML_OP_GET_ROWS_BACK:
  12868. {
  12869. ggml_compute_forward_get_rows_back(params, tensor->src[0], tensor->src[1], tensor->src[2], tensor);
  12870. } break;
  12871. case GGML_OP_DIAG:
  12872. {
  12873. ggml_compute_forward_diag(params, tensor->src[0], tensor);
  12874. } break;
  12875. case GGML_OP_DIAG_MASK_INF:
  12876. {
  12877. ggml_compute_forward_diag_mask_inf(params, tensor->src[0], tensor);
  12878. } break;
  12879. case GGML_OP_DIAG_MASK_ZERO:
  12880. {
  12881. ggml_compute_forward_diag_mask_zero(params, tensor->src[0], tensor);
  12882. } break;
  12883. case GGML_OP_SOFT_MAX:
  12884. {
  12885. ggml_compute_forward_soft_max(params, tensor->src[0], tensor);
  12886. } break;
  12887. case GGML_OP_SOFT_MAX_BACK:
  12888. {
  12889. ggml_compute_forward_soft_max_back(params, tensor->src[0], tensor->src[1], tensor);
  12890. } break;
  12891. case GGML_OP_ROPE:
  12892. {
  12893. ggml_compute_forward_rope(params, tensor->src[0], tensor);
  12894. } break;
  12895. case GGML_OP_ROPE_BACK:
  12896. {
  12897. ggml_compute_forward_rope_back(params, tensor->src[0], tensor);
  12898. } break;
  12899. case GGML_OP_ALIBI:
  12900. {
  12901. ggml_compute_forward_alibi(params, tensor->src[0], tensor);
  12902. } break;
  12903. case GGML_OP_CLAMP:
  12904. {
  12905. ggml_compute_forward_clamp(params, tensor->src[0], tensor);
  12906. } break;
  12907. case GGML_OP_CONV_1D:
  12908. {
  12909. ggml_compute_forward_conv_1d(params, tensor->src[0], tensor->src[1], tensor);
  12910. } break;
  12911. case GGML_OP_CONV_2D:
  12912. {
  12913. ggml_compute_forward_conv_2d(params, tensor->src[0], tensor->src[1], tensor);
  12914. } break;
  12915. case GGML_OP_CONV_TRANSPOSE_2D:
  12916. {
  12917. ggml_compute_forward_conv_transpose_2d(params, tensor->src[0], tensor->src[1], tensor);
  12918. } break;
  12919. case GGML_OP_POOL_1D:
  12920. {
  12921. ggml_compute_forward_pool_1d(params, tensor->src[0], tensor);
  12922. } break;
  12923. case GGML_OP_POOL_2D:
  12924. {
  12925. ggml_compute_forward_pool_2d(params, tensor->src[0], tensor);
  12926. } break;
  12927. case GGML_OP_UPSCALE:
  12928. {
  12929. ggml_compute_forward_upscale(params, tensor->src[0], tensor);
  12930. } break;
  12931. case GGML_OP_FLASH_ATTN:
  12932. {
  12933. const int32_t t = ggml_get_op_params_i32(tensor, 0);
  12934. GGML_ASSERT(t == 0 || t == 1);
  12935. const bool masked = t != 0;
  12936. ggml_compute_forward_flash_attn(params, tensor->src[0], tensor->src[1], tensor->src[2], masked, tensor);
  12937. } break;
  12938. case GGML_OP_FLASH_FF:
  12939. {
  12940. ggml_compute_forward_flash_ff(params, tensor->src[0], tensor->src[1], tensor->src[2], tensor->src[3], tensor->src[4], tensor);
  12941. } break;
  12942. case GGML_OP_FLASH_ATTN_BACK:
  12943. {
  12944. int32_t t = ggml_get_op_params_i32(tensor, 0);
  12945. GGML_ASSERT(t == 0 || t == 1);
  12946. bool masked = t != 0;
  12947. ggml_compute_forward_flash_attn_back(params, tensor->src[0], tensor->src[1], tensor->src[2], tensor->src[3], masked, tensor);
  12948. } break;
  12949. case GGML_OP_WIN_PART:
  12950. {
  12951. ggml_compute_forward_win_part(params, tensor->src[0], tensor);
  12952. } break;
  12953. case GGML_OP_WIN_UNPART:
  12954. {
  12955. ggml_compute_forward_win_unpart(params, tensor->src[0], tensor);
  12956. } break;
  12957. case GGML_OP_UNARY:
  12958. {
  12959. ggml_compute_forward_unary(params, tensor->src[0], tensor);
  12960. } break;
  12961. case GGML_OP_GET_REL_POS:
  12962. {
  12963. ggml_compute_forward_get_rel_pos(params, tensor->src[0], tensor);
  12964. } break;
  12965. case GGML_OP_ADD_REL_POS:
  12966. {
  12967. ggml_compute_forward_add_rel_pos(params, tensor->src[0], tensor->src[1], tensor->src[2], tensor);
  12968. } break;
  12969. case GGML_OP_MAP_UNARY:
  12970. {
  12971. ggml_unary_op_f32_t fun;
  12972. memcpy(&fun, tensor->op_params, sizeof(fun));
  12973. ggml_compute_forward_map_unary(params, tensor->src[0], tensor, fun);
  12974. }
  12975. break;
  12976. case GGML_OP_MAP_BINARY:
  12977. {
  12978. ggml_binary_op_f32_t fun;
  12979. memcpy(&fun, tensor->op_params, sizeof(fun));
  12980. ggml_compute_forward_map_binary(params, tensor->src[0], tensor->src[1], tensor, fun);
  12981. }
  12982. break;
  12983. case GGML_OP_MAP_CUSTOM1_F32:
  12984. {
  12985. ggml_custom1_op_f32_t fun;
  12986. memcpy(&fun, tensor->op_params, sizeof(fun));
  12987. ggml_compute_forward_map_custom1_f32(params, tensor->src[0], tensor, fun);
  12988. }
  12989. break;
  12990. case GGML_OP_MAP_CUSTOM2_F32:
  12991. {
  12992. ggml_custom2_op_f32_t fun;
  12993. memcpy(&fun, tensor->op_params, sizeof(fun));
  12994. ggml_compute_forward_map_custom2_f32(params, tensor->src[0], tensor->src[1], tensor, fun);
  12995. }
  12996. break;
  12997. case GGML_OP_MAP_CUSTOM3_F32:
  12998. {
  12999. ggml_custom3_op_f32_t fun;
  13000. memcpy(&fun, tensor->op_params, sizeof(fun));
  13001. ggml_compute_forward_map_custom3_f32(params, tensor->src[0], tensor->src[1], tensor->src[2], tensor, fun);
  13002. }
  13003. break;
  13004. case GGML_OP_MAP_CUSTOM1:
  13005. {
  13006. ggml_compute_forward_map_custom1(params, tensor->src[0], tensor);
  13007. }
  13008. break;
  13009. case GGML_OP_MAP_CUSTOM2:
  13010. {
  13011. ggml_compute_forward_map_custom2(params, tensor->src[0], tensor->src[1], tensor);
  13012. }
  13013. break;
  13014. case GGML_OP_MAP_CUSTOM3:
  13015. {
  13016. ggml_compute_forward_map_custom3(params, tensor->src[0], tensor->src[1], tensor->src[2], tensor);
  13017. }
  13018. break;
  13019. case GGML_OP_CROSS_ENTROPY_LOSS:
  13020. {
  13021. ggml_compute_forward_cross_entropy_loss(params, tensor->src[0], tensor->src[1], tensor);
  13022. }
  13023. break;
  13024. case GGML_OP_CROSS_ENTROPY_LOSS_BACK:
  13025. {
  13026. ggml_compute_forward_cross_entropy_loss_back(params, tensor->src[0], tensor->src[1], tensor->src[2], tensor);
  13027. }
  13028. break;
  13029. case GGML_OP_NONE:
  13030. {
  13031. // nop
  13032. } break;
  13033. case GGML_OP_COUNT:
  13034. {
  13035. GGML_ASSERT(false);
  13036. } break;
  13037. }
  13038. }
  13039. ////////////////////////////////////////////////////////////////////////////////
  13040. static void ggml_compute_backward(struct ggml_context * ctx, struct ggml_tensor * tensor, bool inplace) {
  13041. struct ggml_tensor * src0 = tensor->src[0];
  13042. struct ggml_tensor * src1 = tensor->src[1];
  13043. switch (tensor->op) {
  13044. case GGML_OP_DUP:
  13045. {
  13046. if (src0->grad) {
  13047. src0->grad = ggml_add_impl(ctx, src0->grad, tensor->grad, inplace);
  13048. }
  13049. } break;
  13050. case GGML_OP_ADD:
  13051. {
  13052. if (src0->grad) {
  13053. src0->grad = ggml_add_impl(ctx, src0->grad, tensor->grad, inplace);
  13054. }
  13055. if (src1->grad) {
  13056. src1->grad = ggml_add_impl(ctx, src1->grad, tensor->grad, inplace);
  13057. }
  13058. } break;
  13059. case GGML_OP_ADD1:
  13060. {
  13061. if (src0->grad) {
  13062. src0->grad = ggml_add_impl(ctx, src0->grad, tensor->grad, inplace);
  13063. }
  13064. if (src1->grad) {
  13065. src1->grad = ggml_add_impl(ctx,
  13066. src1->grad,
  13067. ggml_mean(ctx, tensor->grad), // TODO: should probably be sum instead of mean
  13068. inplace);
  13069. }
  13070. } break;
  13071. case GGML_OP_ACC:
  13072. {
  13073. if (src0->grad) {
  13074. src0->grad = ggml_add_impl(ctx, src0->grad, tensor->grad, inplace);
  13075. }
  13076. if (src1->grad) {
  13077. const size_t nb1 = ((int32_t *) tensor->op_params)[0];
  13078. const size_t nb2 = ((int32_t *) tensor->op_params)[1];
  13079. const size_t nb3 = ((int32_t *) tensor->op_params)[2];
  13080. const size_t offset = ((int32_t *) tensor->op_params)[3];
  13081. struct ggml_tensor * tensor_grad_view = ggml_view_4d(ctx,
  13082. tensor->grad,
  13083. src1->grad->ne[0],
  13084. src1->grad->ne[1],
  13085. src1->grad->ne[2],
  13086. src1->grad->ne[3],
  13087. nb1, nb2, nb3, offset);
  13088. src1->grad =
  13089. ggml_add_impl(ctx,
  13090. src1->grad,
  13091. ggml_reshape(ctx,
  13092. ggml_cont(ctx, tensor_grad_view),
  13093. src1->grad),
  13094. inplace);
  13095. }
  13096. } break;
  13097. case GGML_OP_SUB:
  13098. {
  13099. if (src0->grad) {
  13100. src0->grad = ggml_add_impl(ctx, src0->grad, tensor->grad, inplace);
  13101. }
  13102. if (src1->grad) {
  13103. src1->grad = ggml_sub_impl(ctx, src1->grad, tensor->grad, inplace);
  13104. }
  13105. } break;
  13106. case GGML_OP_MUL:
  13107. {
  13108. if (src0->grad) {
  13109. src0->grad =
  13110. ggml_add_impl(ctx,
  13111. src0->grad,
  13112. ggml_mul(ctx, src1, tensor->grad),
  13113. inplace);
  13114. }
  13115. if (src1->grad) {
  13116. src1->grad =
  13117. ggml_add_impl(ctx,
  13118. src1->grad,
  13119. ggml_mul(ctx, src0, tensor->grad),
  13120. inplace);
  13121. }
  13122. } break;
  13123. case GGML_OP_DIV:
  13124. {
  13125. if (src0->grad) {
  13126. src0->grad =
  13127. ggml_add_impl(ctx,
  13128. src0->grad,
  13129. ggml_div(ctx, tensor->grad, src1),
  13130. inplace);
  13131. }
  13132. if (src1->grad) {
  13133. src1->grad =
  13134. ggml_sub_impl(ctx,
  13135. src1->grad,
  13136. ggml_mul(ctx,
  13137. tensor->grad,
  13138. ggml_div(ctx, tensor, src1)),
  13139. inplace);
  13140. }
  13141. } break;
  13142. case GGML_OP_SQR:
  13143. {
  13144. if (src0->grad) {
  13145. src0->grad =
  13146. ggml_add_impl(ctx,
  13147. src0->grad,
  13148. ggml_scale(ctx,
  13149. ggml_mul(ctx, src0, tensor->grad),
  13150. ggml_new_f32(ctx, 2.0f)),
  13151. inplace);
  13152. }
  13153. } break;
  13154. case GGML_OP_SQRT:
  13155. {
  13156. if (src0->grad) {
  13157. src0->grad =
  13158. ggml_add_impl(ctx,
  13159. src0->grad,
  13160. ggml_scale(ctx,
  13161. ggml_div(ctx,
  13162. tensor->grad,
  13163. tensor),
  13164. ggml_new_f32(ctx, 0.5f)),
  13165. inplace);
  13166. }
  13167. } break;
  13168. case GGML_OP_LOG:
  13169. {
  13170. if (src0->grad) {
  13171. src0->grad =
  13172. ggml_add_impl(ctx,
  13173. src0->grad,
  13174. ggml_div(ctx,
  13175. tensor->grad,
  13176. src0),
  13177. inplace);
  13178. }
  13179. } break;
  13180. case GGML_OP_SUM:
  13181. {
  13182. if (src0->grad) {
  13183. src0->grad =
  13184. ggml_add1_impl(ctx,
  13185. src0->grad,
  13186. tensor->grad,
  13187. inplace);
  13188. }
  13189. } break;
  13190. case GGML_OP_SUM_ROWS:
  13191. {
  13192. if (src0->grad) {
  13193. src0->grad =
  13194. ggml_add_impl(ctx,
  13195. src0->grad,
  13196. ggml_repeat(ctx,
  13197. tensor->grad,
  13198. src0->grad),
  13199. inplace);
  13200. }
  13201. } break;
  13202. case GGML_OP_MEAN:
  13203. case GGML_OP_ARGMAX:
  13204. {
  13205. GGML_ASSERT(false); // TODO: implement
  13206. } break;
  13207. case GGML_OP_REPEAT:
  13208. {
  13209. // necessary for llama
  13210. if (src0->grad) {
  13211. src0->grad = ggml_add_impl(ctx,
  13212. src0->grad,
  13213. ggml_repeat_back(ctx, tensor->grad, src0->grad),
  13214. inplace);
  13215. }
  13216. } break;
  13217. case GGML_OP_REPEAT_BACK:
  13218. {
  13219. if (src0->grad) {
  13220. // TODO: test this
  13221. src0->grad = ggml_add_impl(ctx,
  13222. src0->grad,
  13223. ggml_repeat(ctx, tensor->grad, src0->grad),
  13224. inplace);
  13225. }
  13226. } break;
  13227. case GGML_OP_CONCAT:
  13228. {
  13229. GGML_ASSERT(false); // TODO: implement
  13230. } break;
  13231. case GGML_OP_SILU_BACK:
  13232. {
  13233. GGML_ASSERT(false); // TODO: not implemented
  13234. } break;
  13235. case GGML_OP_NORM:
  13236. {
  13237. GGML_ASSERT(false); // TODO: not implemented
  13238. } break;
  13239. case GGML_OP_RMS_NORM:
  13240. {
  13241. // necessary for llama
  13242. if (src0->grad) {
  13243. float eps;
  13244. memcpy(&eps, tensor->op_params, sizeof(float));
  13245. src0->grad = ggml_add_impl(ctx,
  13246. src0->grad,
  13247. ggml_rms_norm_back(ctx, src0, tensor->grad, eps),
  13248. inplace);
  13249. }
  13250. } break;
  13251. case GGML_OP_RMS_NORM_BACK:
  13252. {
  13253. GGML_ASSERT(false); // TODO: not implemented
  13254. } break;
  13255. case GGML_OP_GROUP_NORM:
  13256. {
  13257. GGML_ASSERT(false); // TODO: not implemented
  13258. } break;
  13259. case GGML_OP_MUL_MAT:
  13260. {
  13261. // https://cs231n.github.io/optimization-2/#staged
  13262. // # forward pass
  13263. // s0 = np.random.randn(5, 10)
  13264. // s1 = np.random.randn(10, 3)
  13265. // t = s0.dot(s1)
  13266. // # now suppose we had the gradient on t from above in the circuit
  13267. // dt = np.random.randn(*t.shape) # same shape as t
  13268. // ds0 = dt.dot(s1.T) #.T gives the transpose of the matrix
  13269. // ds1 = t.T.dot(dt)
  13270. // tensor.shape [m,p]
  13271. // src0.shape [n,m]
  13272. // src1.shape [n,p]
  13273. // necessary for llama
  13274. if (src0->grad) {
  13275. src0->grad =
  13276. ggml_add_impl(ctx,
  13277. src0->grad,
  13278. ggml_out_prod(ctx, // [n,m]
  13279. src1, // [n,p]
  13280. tensor->grad), // [m,p]
  13281. inplace);
  13282. }
  13283. if (src1->grad) {
  13284. src1->grad =
  13285. ggml_add_impl(ctx,
  13286. src1->grad,
  13287. // ggml_mul_mat(ctx, // [n,p]
  13288. // ggml_cont(ctx, // [m,n]
  13289. // ggml_transpose(ctx, src0)), // [m,n]
  13290. // tensor->grad), // [m,p]
  13291. // // when src0 is bigger than tensor->grad (this is mostly the case in llama),
  13292. // // avoid transpose of src0, rather transpose smaller tensor->grad
  13293. // // and then use ggml_out_prod
  13294. ggml_out_prod(ctx, // [n,p]
  13295. src0, // [n,m]
  13296. ggml_transpose(ctx, // [p,m]
  13297. tensor->grad)), // [m,p]
  13298. inplace);
  13299. }
  13300. } break;
  13301. case GGML_OP_OUT_PROD:
  13302. {
  13303. GGML_ASSERT(false); // TODO: not implemented
  13304. } break;
  13305. case GGML_OP_SCALE:
  13306. {
  13307. // necessary for llama
  13308. if (src0->grad) {
  13309. src0->grad =
  13310. ggml_add_impl(ctx,
  13311. src0->grad,
  13312. ggml_scale_impl(ctx, tensor->grad, src1, false),
  13313. inplace);
  13314. }
  13315. if (src1->grad) {
  13316. src1->grad =
  13317. ggml_add_impl(ctx,
  13318. src1->grad,
  13319. ggml_sum(ctx, ggml_mul_impl(ctx, tensor->grad, src0, false)),
  13320. inplace);
  13321. }
  13322. } break;
  13323. case GGML_OP_SET:
  13324. {
  13325. const size_t nb1 = ((int32_t *) tensor->op_params)[0];
  13326. const size_t nb2 = ((int32_t *) tensor->op_params)[1];
  13327. const size_t nb3 = ((int32_t *) tensor->op_params)[2];
  13328. const size_t offset = ((int32_t *) tensor->op_params)[3];
  13329. struct ggml_tensor * tensor_grad_view = NULL;
  13330. if (src0->grad || src1->grad) {
  13331. GGML_ASSERT(src0->type == tensor->type);
  13332. GGML_ASSERT(tensor->grad->type == tensor->type);
  13333. GGML_ASSERT(tensor->grad->type == src1->grad->type);
  13334. tensor_grad_view = ggml_view_4d(ctx,
  13335. tensor->grad,
  13336. src1->grad->ne[0],
  13337. src1->grad->ne[1],
  13338. src1->grad->ne[2],
  13339. src1->grad->ne[3],
  13340. nb1, nb2, nb3, offset);
  13341. }
  13342. if (src0->grad) {
  13343. src0->grad = ggml_add_impl(ctx,
  13344. src0->grad,
  13345. ggml_acc_impl(ctx,
  13346. tensor->grad,
  13347. ggml_neg(ctx, tensor_grad_view),
  13348. nb1, nb2, nb3, offset, false),
  13349. inplace);
  13350. }
  13351. if (src1->grad) {
  13352. src1->grad =
  13353. ggml_add_impl(ctx,
  13354. src1->grad,
  13355. ggml_reshape(ctx,
  13356. ggml_cont(ctx, tensor_grad_view),
  13357. src1->grad),
  13358. inplace);
  13359. }
  13360. } break;
  13361. case GGML_OP_CPY:
  13362. {
  13363. // necessary for llama
  13364. // cpy overwrites value of src1 by src0 and returns view(src1)
  13365. // the overwriting is mathematically equivalent to:
  13366. // tensor = src0 * 1 + src1 * 0
  13367. if (src0->grad) {
  13368. // dsrc0 = dtensor * 1
  13369. src0->grad = ggml_add_impl(ctx, src0->grad, tensor->grad, inplace);
  13370. }
  13371. if (src1->grad) {
  13372. // dsrc1 = dtensor * 0 -> noop
  13373. }
  13374. } break;
  13375. case GGML_OP_CONT:
  13376. {
  13377. // same as cpy
  13378. if (src0->grad) {
  13379. GGML_ASSERT(ggml_is_contiguous(src0->grad));
  13380. GGML_ASSERT(ggml_is_contiguous(tensor->grad));
  13381. src0->grad = ggml_add_impl(ctx, src0->grad, tensor->grad, inplace);
  13382. }
  13383. } break;
  13384. case GGML_OP_RESHAPE:
  13385. {
  13386. // necessary for llama
  13387. if (src0->grad) {
  13388. src0->grad =
  13389. ggml_add_impl(ctx, src0->grad,
  13390. ggml_reshape(ctx, tensor->grad, src0->grad),
  13391. inplace);
  13392. }
  13393. } break;
  13394. case GGML_OP_VIEW:
  13395. {
  13396. // necessary for llama
  13397. if (src0->grad) {
  13398. size_t offset;
  13399. memcpy(&offset, tensor->op_params, sizeof(offset));
  13400. size_t nb1 = tensor->nb[1];
  13401. size_t nb2 = tensor->nb[2];
  13402. size_t nb3 = tensor->nb[3];
  13403. if (src0->type != src0->grad->type) {
  13404. // gradient is typically F32, but src0 could be other type
  13405. size_t ng = ggml_element_size(src0->grad);
  13406. size_t n0 = ggml_element_size(src0);
  13407. GGML_ASSERT(offset % n0 == 0);
  13408. GGML_ASSERT(nb1 % n0 == 0);
  13409. GGML_ASSERT(nb2 % n0 == 0);
  13410. GGML_ASSERT(nb3 % n0 == 0);
  13411. offset = (offset / n0) * ng;
  13412. nb1 = (nb1 / n0) * ng;
  13413. nb2 = (nb2 / n0) * ng;
  13414. nb3 = (nb3 / n0) * ng;
  13415. }
  13416. src0->grad = ggml_acc_impl(ctx, src0->grad, tensor->grad, nb1, nb2, nb3, offset, inplace);
  13417. }
  13418. } break;
  13419. case GGML_OP_PERMUTE:
  13420. {
  13421. // necessary for llama
  13422. if (src0->grad) {
  13423. int32_t * axes = (int32_t *) tensor->op_params;
  13424. int axis0 = axes[0] & 0x3;
  13425. int axis1 = axes[1] & 0x3;
  13426. int axis2 = axes[2] & 0x3;
  13427. int axis3 = axes[3] & 0x3;
  13428. int axes_backward[4] = {0,0,0,0};
  13429. axes_backward[axis0] = 0;
  13430. axes_backward[axis1] = 1;
  13431. axes_backward[axis2] = 2;
  13432. axes_backward[axis3] = 3;
  13433. src0->grad =
  13434. ggml_add_impl(ctx, src0->grad,
  13435. ggml_permute(ctx,
  13436. tensor->grad,
  13437. axes_backward[0],
  13438. axes_backward[1],
  13439. axes_backward[2],
  13440. axes_backward[3]),
  13441. inplace);
  13442. }
  13443. } break;
  13444. case GGML_OP_TRANSPOSE:
  13445. {
  13446. // necessary for llama
  13447. if (src0->grad) {
  13448. src0->grad =
  13449. ggml_add_impl(ctx, src0->grad,
  13450. ggml_transpose(ctx, tensor->grad),
  13451. inplace);
  13452. }
  13453. } break;
  13454. case GGML_OP_GET_ROWS:
  13455. {
  13456. // necessary for llama (only for tokenizer)
  13457. if (src0->grad) {
  13458. src0->grad =
  13459. ggml_add_impl(ctx, src0->grad,
  13460. ggml_get_rows_back(ctx, tensor->grad, src1, src0->grad),
  13461. inplace);
  13462. }
  13463. if (src1->grad) {
  13464. // noop
  13465. }
  13466. } break;
  13467. case GGML_OP_GET_ROWS_BACK:
  13468. {
  13469. GGML_ASSERT(false); // TODO: not implemented
  13470. } break;
  13471. case GGML_OP_DIAG:
  13472. {
  13473. GGML_ASSERT(false); // TODO: not implemented
  13474. } break;
  13475. case GGML_OP_DIAG_MASK_INF:
  13476. {
  13477. // necessary for llama
  13478. if (src0->grad) {
  13479. const int n_past = ((int32_t *) tensor->op_params)[0];
  13480. src0->grad =
  13481. ggml_add_impl(ctx, src0->grad,
  13482. ggml_diag_mask_zero_impl(ctx, tensor->grad, n_past, false),
  13483. inplace);
  13484. }
  13485. } break;
  13486. case GGML_OP_DIAG_MASK_ZERO:
  13487. {
  13488. // necessary for llama
  13489. if (src0->grad) {
  13490. const int n_past = ((int32_t *) tensor->op_params)[0];
  13491. src0->grad =
  13492. ggml_add_impl(ctx, src0->grad,
  13493. ggml_diag_mask_zero_impl(ctx, tensor->grad, n_past, false),
  13494. inplace);
  13495. }
  13496. } break;
  13497. case GGML_OP_SOFT_MAX:
  13498. {
  13499. // necessary for llama
  13500. if (src0->grad) {
  13501. src0->grad =
  13502. ggml_add_impl(ctx, src0->grad,
  13503. ggml_soft_max_back(ctx, tensor->grad, tensor),
  13504. inplace);
  13505. }
  13506. } break;
  13507. case GGML_OP_SOFT_MAX_BACK:
  13508. {
  13509. GGML_ASSERT(false); // TODO: not implemented
  13510. } break;
  13511. case GGML_OP_ROPE:
  13512. {
  13513. // necessary for llama
  13514. if (src0->grad) {
  13515. const int n_past = ((int32_t *) tensor->op_params)[0];
  13516. const int n_dims = ((int32_t *) tensor->op_params)[1];
  13517. const int mode = ((int32_t *) tensor->op_params)[2];
  13518. const int n_ctx = ((int32_t *) tensor->op_params)[3];
  13519. float freq_base;
  13520. float freq_scale;
  13521. float xpos_base;
  13522. bool xpos_down;
  13523. memcpy(&freq_base, (int32_t *) tensor->op_params + 4, sizeof(float));
  13524. memcpy(&freq_scale, (int32_t *) tensor->op_params + 5, sizeof(float));
  13525. memcpy(&xpos_base, (int32_t *) tensor->op_params + 6, sizeof(float));
  13526. memcpy(&xpos_down, (int32_t *) tensor->op_params + 7, sizeof(bool));
  13527. src0->grad = ggml_add_impl(ctx,
  13528. src0->grad,
  13529. ggml_rope_back(ctx,
  13530. tensor->grad,
  13531. n_past,
  13532. n_dims,
  13533. mode,
  13534. n_ctx,
  13535. freq_base,
  13536. freq_scale,
  13537. xpos_base,
  13538. xpos_down),
  13539. inplace);
  13540. }
  13541. } break;
  13542. case GGML_OP_ROPE_BACK:
  13543. {
  13544. if (src0->grad) {
  13545. const int n_past = ((int32_t *) tensor->op_params)[0];
  13546. const int n_dims = ((int32_t *) tensor->op_params)[1];
  13547. const int mode = ((int32_t *) tensor->op_params)[2];
  13548. const int n_ctx = ((int32_t *) tensor->op_params)[3];
  13549. float freq_base;
  13550. float freq_scale;
  13551. float xpos_base;
  13552. bool xpos_down;
  13553. memcpy(&freq_base, (int32_t *) tensor->op_params + 4, sizeof(float));
  13554. memcpy(&freq_scale, (int32_t *) tensor->op_params + 5, sizeof(float));
  13555. memcpy(&xpos_base, (int32_t *) tensor->op_params + 6, sizeof(float));
  13556. memcpy(&xpos_down, (int32_t *) tensor->op_params + 7, sizeof(bool));
  13557. src0->grad = ggml_add_impl(ctx,
  13558. src0->grad,
  13559. ggml_rope_impl(ctx,
  13560. tensor->grad,
  13561. n_past,
  13562. n_dims,
  13563. mode,
  13564. n_ctx,
  13565. freq_base,
  13566. freq_scale,
  13567. xpos_base,
  13568. xpos_down,
  13569. false),
  13570. inplace);
  13571. }
  13572. } break;
  13573. case GGML_OP_ALIBI:
  13574. {
  13575. GGML_ASSERT(false); // TODO: not implemented
  13576. } break;
  13577. case GGML_OP_CLAMP:
  13578. {
  13579. GGML_ASSERT(false); // TODO: not implemented
  13580. } break;
  13581. case GGML_OP_CONV_1D:
  13582. {
  13583. GGML_ASSERT(false); // TODO: not implemented
  13584. } break;
  13585. case GGML_OP_CONV_2D:
  13586. {
  13587. GGML_ASSERT(false); // TODO: not implemented
  13588. } break;
  13589. case GGML_OP_CONV_TRANSPOSE_2D:
  13590. {
  13591. GGML_ASSERT(false); // TODO: not implemented
  13592. } break;
  13593. case GGML_OP_POOL_1D:
  13594. {
  13595. GGML_ASSERT(false); // TODO: not implemented
  13596. } break;
  13597. case GGML_OP_POOL_2D:
  13598. {
  13599. GGML_ASSERT(false); // TODO: not implemented
  13600. } break;
  13601. case GGML_OP_UPSCALE:
  13602. {
  13603. GGML_ASSERT(false); // TODO: not implemented
  13604. } break;
  13605. case GGML_OP_FLASH_ATTN:
  13606. {
  13607. struct ggml_tensor * flash_grad = NULL;
  13608. if (src0->grad || src1->grad || tensor->src[2]->grad) {
  13609. int32_t t = ggml_get_op_params_i32(tensor, 0);
  13610. GGML_ASSERT(t == 0 || t == 1);
  13611. bool masked = t != 0;
  13612. flash_grad =
  13613. ggml_flash_attn_back(ctx,
  13614. src0,
  13615. src1,
  13616. tensor->src[2],
  13617. tensor->grad,
  13618. masked);
  13619. }
  13620. if (src0->grad) {
  13621. struct ggml_tensor * grad_q = NULL;
  13622. const size_t nb0 = flash_grad->nb[0];
  13623. const size_t offset = 0;
  13624. switch(src0->n_dims) {
  13625. case 2:
  13626. {
  13627. grad_q = ggml_view_2d(ctx,
  13628. flash_grad,
  13629. src0->ne[0],
  13630. src0->ne[1],
  13631. nb0*src0->ne[0],
  13632. offset);
  13633. } break;
  13634. case 3:
  13635. {
  13636. grad_q = ggml_view_3d(ctx,
  13637. flash_grad,
  13638. src0->ne[0],
  13639. src0->ne[1],
  13640. src0->ne[2],
  13641. nb0*src0->ne[0],
  13642. nb0*src0->ne[0]*src0->ne[1],
  13643. offset);
  13644. } break;
  13645. case 4:
  13646. {
  13647. grad_q = ggml_view_4d(ctx,
  13648. flash_grad,
  13649. src0->ne[0],
  13650. src0->ne[1],
  13651. src0->ne[2],
  13652. src0->ne[3],
  13653. nb0*src0->ne[0],
  13654. nb0*src0->ne[0]*src0->ne[1],
  13655. nb0*src0->ne[0]*src0->ne[1]*src0->ne[2],
  13656. offset);
  13657. } break;
  13658. }
  13659. src0->grad = ggml_add_impl(ctx,
  13660. src0->grad,
  13661. grad_q,
  13662. inplace);
  13663. }
  13664. if (src1->grad) {
  13665. struct ggml_tensor * grad_k = NULL;
  13666. const size_t nb0 = flash_grad->nb[0];
  13667. const size_t offset = nb0*src0->ne[0]*src0->ne[1]*src0->ne[2]*src0->ne[3];
  13668. switch(src1->n_dims) {
  13669. case 2:
  13670. {
  13671. grad_k = ggml_view_2d(ctx,
  13672. flash_grad,
  13673. src1->ne[0],
  13674. src1->ne[1],
  13675. nb0*src1->ne[0],
  13676. offset);
  13677. } break;
  13678. case 3:
  13679. {
  13680. grad_k = ggml_view_3d(ctx,
  13681. flash_grad,
  13682. src1->ne[0],
  13683. src1->ne[1],
  13684. src1->ne[2],
  13685. nb0*src1->ne[0],
  13686. nb0*src1->ne[0]*src1->ne[1],
  13687. offset);
  13688. } break;
  13689. case 4:
  13690. {
  13691. grad_k = ggml_view_4d(ctx,
  13692. flash_grad,
  13693. src1->ne[0],
  13694. src1->ne[1],
  13695. src1->ne[2],
  13696. src1->ne[3],
  13697. nb0*src1->ne[0],
  13698. nb0*src1->ne[0]*src1->ne[1],
  13699. nb0*src1->ne[0]*src1->ne[1]*src1->ne[2],
  13700. offset);
  13701. } break;
  13702. }
  13703. src1->grad = ggml_add_impl(ctx,
  13704. src1->grad,
  13705. grad_k,
  13706. inplace);
  13707. }
  13708. struct ggml_tensor * opt0 = tensor->src[2];
  13709. if (opt0->grad) {
  13710. struct ggml_tensor * grad_v = NULL;
  13711. const size_t nb0 = flash_grad->nb[0];
  13712. const size_t offset = nb0*src0->ne[0]*src0->ne[1]*src0->ne[2]*src0->ne[3]
  13713. + nb0*src1->ne[0]*src1->ne[1]*src1->ne[2]*src1->ne[3];
  13714. switch(opt0->n_dims) {
  13715. case 2:
  13716. {
  13717. grad_v = ggml_view_2d(ctx,
  13718. flash_grad,
  13719. opt0->ne[0],
  13720. opt0->ne[1],
  13721. nb0*opt0->ne[0],
  13722. offset);
  13723. } break;
  13724. case 3:
  13725. {
  13726. grad_v = ggml_view_3d(ctx,
  13727. flash_grad,
  13728. opt0->ne[0],
  13729. opt0->ne[1],
  13730. opt0->ne[2],
  13731. nb0*opt0->ne[0],
  13732. nb0*opt0->ne[0]*opt0->ne[1],
  13733. offset);
  13734. } break;
  13735. case 4:
  13736. {
  13737. grad_v = ggml_view_4d(ctx,
  13738. flash_grad,
  13739. opt0->ne[0],
  13740. opt0->ne[1],
  13741. opt0->ne[2],
  13742. opt0->ne[3],
  13743. nb0*opt0->ne[0],
  13744. nb0*opt0->ne[0]*opt0->ne[1],
  13745. nb0*opt0->ne[0]*opt0->ne[1]*opt0->ne[2],
  13746. offset);
  13747. } break;
  13748. }
  13749. opt0->grad = ggml_add_impl(ctx,
  13750. opt0->grad,
  13751. grad_v,
  13752. inplace);
  13753. }
  13754. } break;
  13755. case GGML_OP_FLASH_FF:
  13756. {
  13757. GGML_ASSERT(false); // not supported
  13758. } break;
  13759. case GGML_OP_FLASH_ATTN_BACK:
  13760. {
  13761. GGML_ASSERT(false); // not supported
  13762. } break;
  13763. case GGML_OP_WIN_PART:
  13764. case GGML_OP_WIN_UNPART:
  13765. case GGML_OP_UNARY:
  13766. {
  13767. switch (ggml_get_unary_op(tensor)) {
  13768. case GGML_UNARY_OP_ABS:
  13769. {
  13770. if (src0->grad) {
  13771. src0->grad =
  13772. ggml_add_impl(ctx,
  13773. src0->grad,
  13774. ggml_mul(ctx,
  13775. ggml_sgn(ctx, src0),
  13776. tensor->grad),
  13777. inplace);
  13778. }
  13779. } break;
  13780. case GGML_UNARY_OP_SGN:
  13781. {
  13782. if (src0->grad) {
  13783. // noop
  13784. }
  13785. } break;
  13786. case GGML_UNARY_OP_NEG:
  13787. {
  13788. if (src0->grad) {
  13789. src0->grad = ggml_sub_impl(ctx, src0->grad, tensor->grad, inplace);
  13790. }
  13791. } break;
  13792. case GGML_UNARY_OP_STEP:
  13793. {
  13794. if (src0->grad) {
  13795. // noop
  13796. }
  13797. } break;
  13798. case GGML_UNARY_OP_TANH:
  13799. {
  13800. GGML_ASSERT(false); // TODO: not implemented
  13801. } break;
  13802. case GGML_UNARY_OP_ELU:
  13803. {
  13804. GGML_ASSERT(false); // TODO: not implemented
  13805. } break;
  13806. case GGML_UNARY_OP_RELU:
  13807. {
  13808. if (src0->grad) {
  13809. src0->grad = ggml_add_impl(ctx,
  13810. src0->grad,
  13811. ggml_mul(ctx,
  13812. ggml_step(ctx, src0),
  13813. tensor->grad),
  13814. inplace);
  13815. }
  13816. } break;
  13817. case GGML_UNARY_OP_GELU:
  13818. {
  13819. GGML_ASSERT(false); // TODO: not implemented
  13820. } break;
  13821. case GGML_UNARY_OP_GELU_QUICK:
  13822. {
  13823. GGML_ASSERT(false); // TODO: not implemented
  13824. } break;
  13825. case GGML_UNARY_OP_SILU:
  13826. {
  13827. // necessary for llama
  13828. if (src0->grad) {
  13829. src0->grad = ggml_add_impl(ctx,
  13830. src0->grad,
  13831. ggml_silu_back(ctx, src0, tensor->grad),
  13832. inplace);
  13833. }
  13834. } break;
  13835. default:
  13836. GGML_ASSERT(false);
  13837. }
  13838. } break;
  13839. case GGML_OP_GET_REL_POS:
  13840. case GGML_OP_ADD_REL_POS:
  13841. case GGML_OP_MAP_UNARY:
  13842. case GGML_OP_MAP_BINARY:
  13843. case GGML_OP_MAP_CUSTOM1_F32:
  13844. case GGML_OP_MAP_CUSTOM2_F32:
  13845. case GGML_OP_MAP_CUSTOM3_F32:
  13846. case GGML_OP_MAP_CUSTOM1:
  13847. case GGML_OP_MAP_CUSTOM2:
  13848. case GGML_OP_MAP_CUSTOM3:
  13849. {
  13850. GGML_ASSERT(false); // not supported
  13851. } break;
  13852. case GGML_OP_CROSS_ENTROPY_LOSS:
  13853. {
  13854. if (src0->grad) {
  13855. src0->grad = ggml_add_impl(ctx,
  13856. src0->grad,
  13857. ggml_cross_entropy_loss_back(ctx,
  13858. src0,
  13859. src1,
  13860. tensor->grad),
  13861. inplace);
  13862. }
  13863. } break;
  13864. case GGML_OP_CROSS_ENTROPY_LOSS_BACK:
  13865. {
  13866. GGML_ASSERT(false); // not supported
  13867. } break;
  13868. case GGML_OP_NONE:
  13869. {
  13870. // nop
  13871. } break;
  13872. case GGML_OP_COUNT:
  13873. {
  13874. GGML_ASSERT(false);
  13875. } break;
  13876. }
  13877. }
  13878. static_assert(GGML_GRAPH_HASHTABLE_SIZE > GGML_MAX_NODES * 2, "GGML_GRAPH_HT_SIZE is too small");
  13879. static size_t hash(void * p) {
  13880. return (size_t)p % GGML_GRAPH_HASHTABLE_SIZE;
  13881. }
  13882. static bool hash_insert(void * hash_table[], void * p) {
  13883. size_t h = hash(p);
  13884. // linear probing
  13885. size_t i = h;
  13886. while (hash_table[i] != NULL && hash_table[i] != p) {
  13887. i = (i + 1) % GGML_GRAPH_HASHTABLE_SIZE;
  13888. if (i == h) {
  13889. // hash table is full
  13890. GGML_ASSERT(false);
  13891. }
  13892. }
  13893. if (hash_table[i] == p) {
  13894. return true;
  13895. }
  13896. // insert
  13897. hash_table[i] = p;
  13898. return false;
  13899. }
  13900. static void ggml_visit_parents(struct ggml_cgraph * cgraph, struct ggml_tensor * node) {
  13901. if (node->grad == NULL) {
  13902. // this usually happens when we generate intermediate nodes from constants in the backward pass
  13903. // it can also happen during forward pass, if the user performs computations with constants
  13904. if (node->op != GGML_OP_NONE) {
  13905. //GGML_PRINT_DEBUG("%s: warning: node %p has no grad, but op %d\n", __func__, (void *) node, node->op);
  13906. }
  13907. }
  13908. // check if already visited
  13909. if (hash_insert(cgraph->visited_hash_table, node)) {
  13910. return;
  13911. }
  13912. for (int i = 0; i < GGML_MAX_SRC; ++i) {
  13913. if (node->src[i]) {
  13914. ggml_visit_parents(cgraph, node->src[i]);
  13915. }
  13916. }
  13917. if (node->op == GGML_OP_NONE && node->grad == NULL) {
  13918. // reached a leaf node, not part of the gradient graph (e.g. a constant)
  13919. GGML_ASSERT(cgraph->n_leafs < GGML_MAX_NODES);
  13920. if (strlen(node->name) == 0) {
  13921. ggml_format_name(node, "leaf_%d", cgraph->n_leafs);
  13922. }
  13923. cgraph->leafs[cgraph->n_leafs] = node;
  13924. cgraph->n_leafs++;
  13925. } else {
  13926. GGML_ASSERT(cgraph->n_nodes < GGML_MAX_NODES);
  13927. if (strlen(node->name) == 0) {
  13928. ggml_format_name(node, "node_%d", cgraph->n_nodes);
  13929. }
  13930. cgraph->nodes[cgraph->n_nodes] = node;
  13931. cgraph->grads[cgraph->n_nodes] = node->grad;
  13932. cgraph->n_nodes++;
  13933. }
  13934. }
  13935. static void ggml_build_forward_impl(struct ggml_cgraph * cgraph, struct ggml_tensor * tensor, bool expand) {
  13936. if (!expand) {
  13937. cgraph->n_nodes = 0;
  13938. cgraph->n_leafs = 0;
  13939. }
  13940. const int n0 = cgraph->n_nodes;
  13941. UNUSED(n0);
  13942. ggml_visit_parents(cgraph, tensor);
  13943. const int n_new = cgraph->n_nodes - n0;
  13944. GGML_PRINT_DEBUG("%s: visited %d new nodes\n", __func__, n_new);
  13945. if (n_new > 0) {
  13946. // the last added node should always be starting point
  13947. GGML_ASSERT(cgraph->nodes[cgraph->n_nodes - 1] == tensor);
  13948. }
  13949. }
  13950. void ggml_build_forward_expand(struct ggml_cgraph * cgraph, struct ggml_tensor * tensor) {
  13951. ggml_build_forward_impl(cgraph, tensor, true);
  13952. }
  13953. struct ggml_cgraph ggml_build_forward(struct ggml_tensor * tensor) {
  13954. struct ggml_cgraph result = {
  13955. /*.n_nodes =*/ 0,
  13956. /*.n_leafs =*/ 0,
  13957. /*.nodes =*/ { NULL },
  13958. /*.grads =*/ { NULL },
  13959. /*.leafs =*/ { NULL },
  13960. /*.hash_table =*/ { NULL },
  13961. /*.perf_runs =*/ 0,
  13962. /*.perf_cycles =*/ 0,
  13963. /*.perf_time_us =*/ 0,
  13964. };
  13965. ggml_build_forward_impl(&result, tensor, false);
  13966. return result;
  13967. }
  13968. void ggml_build_backward_expand(struct ggml_context * ctx, struct ggml_cgraph * gf, struct ggml_cgraph * gb, bool keep) {
  13969. GGML_ASSERT(gf->n_nodes > 0);
  13970. // if we are keeping the gradient graph, we have to detach the gradient nodes from the original graph
  13971. if (keep) {
  13972. for (int i = 0; i < gf->n_nodes; i++) {
  13973. struct ggml_tensor * node = gf->nodes[i];
  13974. if (node->grad) {
  13975. node->grad = ggml_dup_tensor(ctx, node);
  13976. gf->grads[i] = node->grad;
  13977. }
  13978. }
  13979. }
  13980. for (int i = gf->n_nodes - 1; i >= 0; i--) {
  13981. struct ggml_tensor * node = gf->nodes[i];
  13982. // because we detached the grad nodes from the original graph, we can afford inplace operations
  13983. if (node->grad) {
  13984. ggml_compute_backward(ctx, node, keep);
  13985. }
  13986. }
  13987. for (int i = 0; i < gf->n_nodes; i++) {
  13988. struct ggml_tensor * node = gf->nodes[i];
  13989. if (node->is_param) {
  13990. GGML_PRINT_DEBUG("%s: found root node %p\n", __func__, (void *) node);
  13991. ggml_build_forward_expand(gb, node->grad);
  13992. }
  13993. }
  13994. }
  13995. struct ggml_cgraph ggml_build_backward(struct ggml_context * ctx, struct ggml_cgraph * gf, bool keep) {
  13996. struct ggml_cgraph result = *gf;
  13997. ggml_build_backward_expand(ctx, gf, &result, keep);
  13998. return result;
  13999. }
  14000. struct ggml_cgraph * ggml_new_graph(struct ggml_context * ctx) {
  14001. struct ggml_object * obj = ggml_new_object(ctx, GGML_OBJECT_GRAPH, GGML_GRAPH_SIZE);
  14002. struct ggml_cgraph * cgraph = (struct ggml_cgraph *) ((char *) ctx->mem_buffer + obj->offs);
  14003. *cgraph = (struct ggml_cgraph) {
  14004. /*.n_nodes =*/ 0,
  14005. /*.n_leafs =*/ 0,
  14006. /*.nodes =*/ { NULL },
  14007. /*.grads =*/ { NULL },
  14008. /*.leafs =*/ { NULL },
  14009. /*.hash_table =*/ { NULL },
  14010. /*.perf_runs =*/ 0,
  14011. /*.perf_cycles =*/ 0,
  14012. /*.perf_time_us =*/ 0,
  14013. };
  14014. return cgraph;
  14015. }
  14016. struct ggml_cgraph * ggml_build_forward_ctx(struct ggml_context * ctx, struct ggml_tensor * tensor) {
  14017. struct ggml_cgraph * cgraph = ggml_new_graph(ctx);
  14018. ggml_build_forward_impl(cgraph, tensor, false);
  14019. return cgraph;
  14020. }
  14021. size_t ggml_graph_overhead(void) {
  14022. return GGML_OBJECT_SIZE + GGML_PAD(GGML_GRAPH_SIZE, GGML_MEM_ALIGN);
  14023. }
  14024. //
  14025. // thread data
  14026. //
  14027. // synchronization is done via busy loops
  14028. // I tried using spin locks, but not sure how to use them correctly - the things I tried were slower than busy loops
  14029. //
  14030. #ifdef __APPLE__
  14031. //#include <os/lock.h>
  14032. //
  14033. //typedef os_unfair_lock ggml_lock_t;
  14034. //
  14035. //#define ggml_lock_init(x) UNUSED(x)
  14036. //#define ggml_lock_destroy(x) UNUSED(x)
  14037. //#define ggml_lock_lock os_unfair_lock_lock
  14038. //#define ggml_lock_unlock os_unfair_lock_unlock
  14039. //
  14040. //#define GGML_LOCK_INITIALIZER OS_UNFAIR_LOCK_INIT
  14041. typedef int ggml_lock_t;
  14042. #define ggml_lock_init(x) UNUSED(x)
  14043. #define ggml_lock_destroy(x) UNUSED(x)
  14044. #define ggml_lock_lock(x) UNUSED(x)
  14045. #define ggml_lock_unlock(x) UNUSED(x)
  14046. #define GGML_LOCK_INITIALIZER 0
  14047. typedef pthread_t ggml_thread_t;
  14048. #define ggml_thread_create pthread_create
  14049. #define ggml_thread_join pthread_join
  14050. #else
  14051. //typedef pthread_spinlock_t ggml_lock_t;
  14052. //#define ggml_lock_init(x) pthread_spin_init(x, PTHREAD_PROCESS_PRIVATE)
  14053. //#define ggml_lock_destroy pthread_spin_destroy
  14054. //#define ggml_lock_lock pthread_spin_lock
  14055. //#define ggml_lock_unlock pthread_spin_unlock
  14056. typedef int ggml_lock_t;
  14057. #define ggml_lock_init(x) UNUSED(x)
  14058. #define ggml_lock_destroy(x) UNUSED(x)
  14059. #if defined(__x86_64__) || (defined(_MSC_VER) && defined(_M_AMD64))
  14060. #define ggml_lock_lock(x) _mm_pause()
  14061. #else
  14062. #define ggml_lock_lock(x) UNUSED(x)
  14063. #endif
  14064. #define ggml_lock_unlock(x) UNUSED(x)
  14065. #define GGML_LOCK_INITIALIZER 0
  14066. typedef pthread_t ggml_thread_t;
  14067. #define ggml_thread_create pthread_create
  14068. #define ggml_thread_join pthread_join
  14069. #endif
  14070. // Android's libc implementation "bionic" does not support setting affinity
  14071. #if defined(__linux__) && !defined(__BIONIC__)
  14072. static void set_numa_thread_affinity(int thread_n, int n_threads) {
  14073. if (!ggml_is_numa()) {
  14074. return;
  14075. }
  14076. // run thread on node_num thread_n / (threads per node)
  14077. const int node_num = thread_n / ((n_threads + g_state.numa.n_nodes - 1) / g_state.numa.n_nodes);
  14078. struct ggml_numa_node * node = &g_state.numa.nodes[node_num];
  14079. size_t setsize = CPU_ALLOC_SIZE(g_state.numa.total_cpus);
  14080. cpu_set_t * cpus = CPU_ALLOC(g_state.numa.total_cpus);
  14081. CPU_ZERO_S(setsize, cpus);
  14082. for (size_t i = 0; i < node->n_cpus; ++i) {
  14083. CPU_SET_S(node->cpus[i], setsize, cpus);
  14084. }
  14085. int rv = pthread_setaffinity_np(pthread_self(), setsize, cpus);
  14086. if (rv) {
  14087. fprintf(stderr, "warning: pthread_setaffinity_np() failed: %s\n",
  14088. strerror(rv));
  14089. }
  14090. CPU_FREE(cpus);
  14091. }
  14092. static void clear_numa_thread_affinity(void) {
  14093. if (!ggml_is_numa()) {
  14094. return;
  14095. }
  14096. size_t setsize = CPU_ALLOC_SIZE(g_state.numa.total_cpus);
  14097. cpu_set_t * cpus = CPU_ALLOC(g_state.numa.total_cpus);
  14098. CPU_ZERO_S(setsize, cpus);
  14099. for (unsigned i = 0; i < g_state.numa.total_cpus; ++i) {
  14100. CPU_SET_S(i, setsize, cpus);
  14101. }
  14102. int rv = pthread_setaffinity_np(pthread_self(), setsize, cpus);
  14103. if (rv) {
  14104. fprintf(stderr, "warning: pthread_setaffinity_np() failed: %s\n",
  14105. strerror(rv));
  14106. }
  14107. CPU_FREE(cpus);
  14108. }
  14109. #else
  14110. // TODO: Windows etc.
  14111. // (the linux implementation may also work on BSD, someone should test)
  14112. static void set_numa_thread_affinity(int thread_n, int n_threads) { UNUSED(thread_n); UNUSED(n_threads); }
  14113. static void clear_numa_thread_affinity(void) {}
  14114. #endif
  14115. struct ggml_compute_state_shared {
  14116. const struct ggml_cgraph * cgraph;
  14117. const struct ggml_cplan * cplan;
  14118. int64_t perf_node_start_cycles;
  14119. int64_t perf_node_start_time_us;
  14120. const int n_threads;
  14121. // synchronization primitives
  14122. atomic_int n_active; // num active threads
  14123. atomic_int node_n; // active graph node
  14124. bool (*abort_callback)(void * data); // abort ggml_graph_compute when true
  14125. void * abort_callback_data;
  14126. };
  14127. struct ggml_compute_state {
  14128. ggml_thread_t thrd;
  14129. int ith;
  14130. struct ggml_compute_state_shared * shared;
  14131. };
  14132. static void ggml_graph_compute_perf_stats_node(struct ggml_tensor * node, const struct ggml_compute_state_shared * st) {
  14133. int64_t cycles_cur = ggml_perf_cycles() - st->perf_node_start_cycles;
  14134. int64_t time_us_cur = ggml_perf_time_us() - st->perf_node_start_time_us;
  14135. node->perf_runs++;
  14136. node->perf_cycles += cycles_cur;
  14137. node->perf_time_us += time_us_cur;
  14138. }
  14139. static thread_ret_t ggml_graph_compute_thread(void * data) {
  14140. struct ggml_compute_state * state = (struct ggml_compute_state *) data;
  14141. const struct ggml_cgraph * cgraph = state->shared->cgraph;
  14142. const struct ggml_cplan * cplan = state->shared->cplan;
  14143. const int * n_tasks_arr = cplan->n_tasks;
  14144. const int n_threads = state->shared->n_threads;
  14145. set_numa_thread_affinity(state->ith, n_threads);
  14146. int node_n = -1;
  14147. while (true) {
  14148. if (cplan->abort_callback && cplan->abort_callback(cplan->abort_callback_data)) {
  14149. state->shared->node_n += 1;
  14150. return (thread_ret_t) GGML_EXIT_ABORTED;
  14151. }
  14152. if (atomic_fetch_sub(&state->shared->n_active, 1) == 1) {
  14153. // all other threads are finished and spinning
  14154. // do finalize and init here so we don't have synchronize again
  14155. struct ggml_compute_params params = {
  14156. /*.type =*/ GGML_TASK_FINALIZE,
  14157. /*.ith =*/ 0,
  14158. /*.nth =*/ 0,
  14159. /*.wsize =*/ cplan->work_size,
  14160. /*.wdata =*/ cplan->work_data,
  14161. };
  14162. if (node_n != -1) {
  14163. /* FINALIZE */
  14164. struct ggml_tensor * node = state->shared->cgraph->nodes[node_n];
  14165. if (GGML_OP_HAS_FINALIZE[node->op]) {
  14166. params.nth = n_tasks_arr[node_n];
  14167. ggml_compute_forward(&params, node);
  14168. }
  14169. ggml_graph_compute_perf_stats_node(node, state->shared);
  14170. }
  14171. // distribute new work or execute it direct if 1T
  14172. while (++node_n < cgraph->n_nodes) {
  14173. GGML_PRINT_DEBUG_5("%s: %d/%d\n", __func__, node_n, cgraph->n_nodes);
  14174. struct ggml_tensor * node = cgraph->nodes[node_n];
  14175. const int n_tasks = n_tasks_arr[node_n];
  14176. state->shared->perf_node_start_cycles = ggml_perf_cycles();
  14177. state->shared->perf_node_start_time_us = ggml_perf_time_us();
  14178. params.nth = n_tasks;
  14179. /* INIT */
  14180. if (GGML_OP_HAS_INIT[node->op]) {
  14181. params.type = GGML_TASK_INIT;
  14182. ggml_compute_forward(&params, node);
  14183. }
  14184. if (n_tasks == 1) {
  14185. // TODO: maybe push node_n to the atomic but if other threads see n_tasks is 1,
  14186. // they do something more efficient than spinning (?)
  14187. params.type = GGML_TASK_COMPUTE;
  14188. ggml_compute_forward(&params, node);
  14189. if (GGML_OP_HAS_FINALIZE[node->op]) {
  14190. params.type = GGML_TASK_FINALIZE;
  14191. ggml_compute_forward(&params, node);
  14192. }
  14193. ggml_graph_compute_perf_stats_node(node, state->shared);
  14194. } else {
  14195. break;
  14196. }
  14197. if (cplan->abort_callback && cplan->abort_callback(cplan->abort_callback_data)) {
  14198. break;
  14199. }
  14200. }
  14201. atomic_store(&state->shared->n_active, n_threads);
  14202. atomic_store(&state->shared->node_n, node_n);
  14203. } else {
  14204. // wait for other threads to finish
  14205. const int last = node_n;
  14206. do {
  14207. //sched_yield();
  14208. node_n = atomic_load(&state->shared->node_n);
  14209. } while (node_n == last);
  14210. }
  14211. // check if we should stop
  14212. if (node_n >= cgraph->n_nodes) break;
  14213. /* COMPUTE */
  14214. struct ggml_tensor * node = cgraph->nodes[node_n];
  14215. const int n_tasks = n_tasks_arr[node_n];
  14216. struct ggml_compute_params params = {
  14217. /*.type =*/ GGML_TASK_COMPUTE,
  14218. /*.ith =*/ state->ith,
  14219. /*.nth =*/ n_tasks,
  14220. /*.wsize =*/ cplan->work_size,
  14221. /*.wdata =*/ cplan->work_data,
  14222. };
  14223. if (state->ith < n_tasks) {
  14224. ggml_compute_forward(&params, node);
  14225. }
  14226. }
  14227. return GGML_EXIT_SUCCESS;
  14228. }
  14229. struct ggml_cplan ggml_graph_plan(struct ggml_cgraph * cgraph, int n_threads) {
  14230. if (n_threads <= 0) {
  14231. n_threads = GGML_DEFAULT_N_THREADS;
  14232. }
  14233. size_t work_size = 0;
  14234. struct ggml_cplan cplan;
  14235. memset(&cplan, 0, sizeof(struct ggml_cplan));
  14236. // thread scheduling for the different operations + work buffer size estimation
  14237. for (int i = 0; i < cgraph->n_nodes; i++) {
  14238. int n_tasks = 1;
  14239. struct ggml_tensor * node = cgraph->nodes[i];
  14240. switch (node->op) {
  14241. case GGML_OP_CPY:
  14242. case GGML_OP_DUP:
  14243. {
  14244. n_tasks = n_threads;
  14245. size_t cur = 0;
  14246. if (ggml_is_quantized(node->type)) {
  14247. cur = ggml_type_size(GGML_TYPE_F32) * node->ne[0] * n_tasks;
  14248. }
  14249. work_size = MAX(work_size, cur);
  14250. } break;
  14251. case GGML_OP_ADD:
  14252. case GGML_OP_ADD1:
  14253. {
  14254. n_tasks = n_threads;
  14255. size_t cur = 0;
  14256. if (ggml_is_quantized(node->src[0]->type)) {
  14257. cur = ggml_type_size(GGML_TYPE_F32) * node->src[0]->ne[0] * n_tasks;
  14258. }
  14259. work_size = MAX(work_size, cur);
  14260. } break;
  14261. case GGML_OP_ACC:
  14262. {
  14263. n_tasks = n_threads;
  14264. size_t cur = 0;
  14265. if (ggml_is_quantized(node->src[0]->type)) {
  14266. cur = ggml_type_size(GGML_TYPE_F32) * node->src[1]->ne[0] * n_tasks;
  14267. }
  14268. work_size = MAX(work_size, cur);
  14269. } break;
  14270. case GGML_OP_SUB:
  14271. case GGML_OP_DIV:
  14272. case GGML_OP_SQR:
  14273. case GGML_OP_SQRT:
  14274. case GGML_OP_LOG:
  14275. case GGML_OP_SUM:
  14276. case GGML_OP_SUM_ROWS:
  14277. case GGML_OP_MEAN:
  14278. case GGML_OP_ARGMAX:
  14279. case GGML_OP_REPEAT:
  14280. case GGML_OP_REPEAT_BACK:
  14281. {
  14282. n_tasks = 1;
  14283. } break;
  14284. case GGML_OP_UNARY:
  14285. {
  14286. switch (ggml_get_unary_op(node)) {
  14287. case GGML_UNARY_OP_ABS:
  14288. case GGML_UNARY_OP_SGN:
  14289. case GGML_UNARY_OP_NEG:
  14290. case GGML_UNARY_OP_STEP:
  14291. case GGML_UNARY_OP_TANH:
  14292. case GGML_UNARY_OP_ELU:
  14293. case GGML_UNARY_OP_RELU:
  14294. {
  14295. n_tasks = 1;
  14296. } break;
  14297. case GGML_UNARY_OP_GELU:
  14298. case GGML_UNARY_OP_GELU_QUICK:
  14299. case GGML_UNARY_OP_SILU:
  14300. {
  14301. n_tasks = n_threads;
  14302. } break;
  14303. }
  14304. } break;
  14305. case GGML_OP_SILU_BACK:
  14306. case GGML_OP_MUL:
  14307. case GGML_OP_NORM:
  14308. case GGML_OP_RMS_NORM:
  14309. case GGML_OP_RMS_NORM_BACK:
  14310. case GGML_OP_GROUP_NORM:
  14311. {
  14312. n_tasks = n_threads;
  14313. } break;
  14314. case GGML_OP_CONCAT:
  14315. case GGML_OP_MUL_MAT:
  14316. case GGML_OP_OUT_PROD:
  14317. {
  14318. n_tasks = n_threads;
  14319. // TODO: use different scheduling for different matrix sizes
  14320. //const int nr0 = ggml_nrows(node->src[0]);
  14321. //const int nr1 = ggml_nrows(node->src[1]);
  14322. //n_tasks = MIN(n_threads, MAX(1, nr0/128));
  14323. //printf("nr0 = %8d, nr1 = %8d, nr0*nr1 = %8d, n_tasks%d\n", nr0, nr1, nr0*nr1, n_tasks);
  14324. size_t cur = 0;
  14325. const enum ggml_type vec_dot_type = type_traits[node->src[0]->type].vec_dot_type;
  14326. #if defined(GGML_USE_CUBLAS)
  14327. if (ggml_cuda_can_mul_mat(node->src[0], node->src[1], node)) {
  14328. n_tasks = 1; // TODO: this actually is doing nothing
  14329. // the threads are still spinning
  14330. } else
  14331. #elif defined(GGML_USE_CLBLAST)
  14332. if (ggml_cl_can_mul_mat(node->src[0], node->src[1], node)) {
  14333. n_tasks = 1; // TODO: this actually is doing nothing
  14334. // the threads are still spinning
  14335. cur = ggml_cl_mul_mat_get_wsize(node->src[0], node->src[1], node);
  14336. } else
  14337. #endif
  14338. #if defined(GGML_USE_ACCELERATE) || defined(GGML_USE_OPENBLAS)
  14339. if (ggml_compute_forward_mul_mat_use_blas(node->src[0], node->src[1], node)) {
  14340. n_tasks = 1; // TODO: this actually is doing nothing
  14341. // the threads are still spinning
  14342. if (node->src[0]->type != GGML_TYPE_F32) {
  14343. // here we need memory just for single 2D matrix from src0
  14344. cur = ggml_type_size(GGML_TYPE_F32)*(node->src[0]->ne[0]*node->src[0]->ne[1]);
  14345. }
  14346. } else
  14347. #endif
  14348. if (node->src[1]->type != vec_dot_type) {
  14349. cur = ggml_type_size(vec_dot_type)*ggml_nelements(node->src[1])/ggml_blck_size(vec_dot_type);
  14350. } else {
  14351. cur = 0;
  14352. }
  14353. work_size = MAX(work_size, cur);
  14354. } break;
  14355. case GGML_OP_SCALE:
  14356. {
  14357. n_tasks = 1;
  14358. } break;
  14359. case GGML_OP_SET:
  14360. case GGML_OP_CONT:
  14361. case GGML_OP_RESHAPE:
  14362. case GGML_OP_VIEW:
  14363. case GGML_OP_PERMUTE:
  14364. case GGML_OP_TRANSPOSE:
  14365. case GGML_OP_GET_ROWS:
  14366. case GGML_OP_GET_ROWS_BACK:
  14367. case GGML_OP_DIAG:
  14368. {
  14369. n_tasks = 1;
  14370. } break;
  14371. case GGML_OP_DIAG_MASK_ZERO:
  14372. case GGML_OP_DIAG_MASK_INF:
  14373. case GGML_OP_SOFT_MAX:
  14374. case GGML_OP_SOFT_MAX_BACK:
  14375. case GGML_OP_ROPE:
  14376. case GGML_OP_ROPE_BACK:
  14377. case GGML_OP_ADD_REL_POS:
  14378. {
  14379. n_tasks = n_threads;
  14380. } break;
  14381. case GGML_OP_ALIBI:
  14382. {
  14383. n_tasks = 1; //TODO
  14384. } break;
  14385. case GGML_OP_CLAMP:
  14386. {
  14387. n_tasks = 1; //TODO
  14388. } break;
  14389. case GGML_OP_CONV_1D:
  14390. {
  14391. n_tasks = n_threads;
  14392. GGML_ASSERT(node->src[0]->ne[3] == 1);
  14393. GGML_ASSERT(node->src[1]->ne[2] == 1);
  14394. GGML_ASSERT(node->src[1]->ne[3] == 1);
  14395. size_t cur = 0;
  14396. const int nk = node->src[0]->ne[0];
  14397. if (node->src[0]->type == GGML_TYPE_F16 &&
  14398. node->src[1]->type == GGML_TYPE_F32) {
  14399. cur = sizeof(ggml_fp16_t)*(
  14400. nk*ggml_up32(node->src[0]->ne[1])*node->src[0]->ne[2] +
  14401. ( 2*(nk/2) + node->src[1]->ne[0])*node->src[1]->ne[1]
  14402. );
  14403. } else if (node->src[0]->type == GGML_TYPE_F32 &&
  14404. node->src[1]->type == GGML_TYPE_F32) {
  14405. cur = sizeof(float)*(
  14406. nk*ggml_up32(node->src[0]->ne[1])*node->src[0]->ne[2] +
  14407. ( 2*(nk/2) + node->src[1]->ne[0])*node->src[1]->ne[1]
  14408. );
  14409. } else {
  14410. GGML_ASSERT(false);
  14411. }
  14412. work_size = MAX(work_size, cur);
  14413. } break;
  14414. case GGML_OP_CONV_2D:
  14415. {
  14416. n_tasks = n_threads;
  14417. const int64_t ne00 = node->src[0]->ne[0]; // W
  14418. const int64_t ne01 = node->src[0]->ne[1]; // H
  14419. const int64_t ne02 = node->src[0]->ne[2]; // C
  14420. const int64_t ne03 = node->src[0]->ne[3]; // N
  14421. const int64_t ne10 = node->src[1]->ne[0]; // W
  14422. const int64_t ne11 = node->src[1]->ne[1]; // H
  14423. const int64_t ne12 = node->src[1]->ne[2]; // C
  14424. const int64_t ne0 = node->ne[0];
  14425. const int64_t ne1 = node->ne[1];
  14426. const int64_t ne2 = node->ne[2];
  14427. const int64_t nk = ne00*ne01;
  14428. const int64_t ew0 = nk * ne02;
  14429. UNUSED(ne03);
  14430. UNUSED(ne2);
  14431. size_t cur = 0;
  14432. if (node->src[0]->type == GGML_TYPE_F16 &&
  14433. node->src[1]->type == GGML_TYPE_F32) {
  14434. cur = sizeof(ggml_fp16_t)*(ne0*ne1*ew0);
  14435. } else if (node->src[0]->type == GGML_TYPE_F32 &&
  14436. node->src[1]->type == GGML_TYPE_F32) {
  14437. cur = sizeof(float)* (ne10*ne11*ne12);
  14438. } else {
  14439. GGML_ASSERT(false);
  14440. }
  14441. work_size = MAX(work_size, cur);
  14442. } break;
  14443. case GGML_OP_CONV_TRANSPOSE_2D:
  14444. {
  14445. n_tasks = n_threads;
  14446. const int64_t ne00 = node->src[0]->ne[0]; // W
  14447. const int64_t ne01 = node->src[0]->ne[1]; // H
  14448. const int64_t ne02 = node->src[0]->ne[2]; // Channels Out
  14449. const int64_t ne03 = node->src[0]->ne[3]; // Channels In
  14450. const int64_t ne10 = node->src[1]->ne[0]; // W
  14451. const int64_t ne11 = node->src[1]->ne[1]; // H
  14452. const int64_t ne12 = node->src[1]->ne[2]; // Channels In
  14453. size_t cur = 0;
  14454. cur += sizeof(ggml_fp16_t)*ne00*ne01*ne02*ne03;
  14455. cur += sizeof(ggml_fp16_t)*ne10*ne11*ne12;
  14456. work_size = MAX(work_size, cur);
  14457. } break;
  14458. case GGML_OP_POOL_1D:
  14459. case GGML_OP_POOL_2D:
  14460. {
  14461. n_tasks = 1;
  14462. } break;
  14463. case GGML_OP_UPSCALE:
  14464. {
  14465. n_tasks = n_threads;
  14466. } break;
  14467. case GGML_OP_FLASH_ATTN:
  14468. {
  14469. n_tasks = n_threads;
  14470. size_t cur = 0;
  14471. const int64_t ne11 = ggml_up(node->src[1]->ne[1], GGML_SOFT_MAX_UNROLL);
  14472. if (node->src[1]->type == GGML_TYPE_F32) {
  14473. cur = sizeof(float)*ne11*n_tasks; // TODO: this can become (n_tasks-1)
  14474. cur += sizeof(float)*ne11*n_tasks; // this is overestimated by x2
  14475. }
  14476. if (node->src[1]->type == GGML_TYPE_F16) {
  14477. cur = sizeof(float)*ne11*n_tasks; // TODO: this can become (n_tasks-1)
  14478. cur += sizeof(float)*ne11*n_tasks; // this is overestimated by x2
  14479. }
  14480. work_size = MAX(work_size, cur);
  14481. } break;
  14482. case GGML_OP_FLASH_FF:
  14483. {
  14484. n_tasks = n_threads;
  14485. size_t cur = 0;
  14486. if (node->src[1]->type == GGML_TYPE_F32) {
  14487. cur = sizeof(float)*node->src[1]->ne[1]*n_tasks; // TODO: this can become (n_tasks-1)
  14488. cur += sizeof(float)*node->src[1]->ne[1]*n_tasks; // this is overestimated by x2
  14489. }
  14490. if (node->src[1]->type == GGML_TYPE_F16) {
  14491. cur = sizeof(float)*node->src[1]->ne[1]*n_tasks; // TODO: this can become (n_tasks-1)
  14492. cur += sizeof(float)*node->src[1]->ne[1]*n_tasks; // this is overestimated by x2
  14493. }
  14494. work_size = MAX(work_size, cur);
  14495. } break;
  14496. case GGML_OP_FLASH_ATTN_BACK:
  14497. {
  14498. n_tasks = n_threads;
  14499. size_t cur = 0;
  14500. const int64_t D = node->src[0]->ne[0];
  14501. const int64_t ne11 = ggml_up(node->src[1]->ne[1], GGML_SOFT_MAX_UNROLL);
  14502. const int64_t mxDn = MAX(D, ne11) * 2; // *2 because of S and SM in ggml_compute_forward_flash_attn_back
  14503. if (node->src[1]->type == GGML_TYPE_F32) {
  14504. cur = sizeof(float)*mxDn*n_tasks; // TODO: this can become (n_tasks-1)
  14505. cur += sizeof(float)*mxDn*n_tasks; // this is overestimated by x2
  14506. }
  14507. if (node->src[1]->type == GGML_TYPE_F16) {
  14508. cur = sizeof(float)*mxDn*n_tasks; // TODO: this can become (n_tasks-1)
  14509. cur += sizeof(float)*mxDn*n_tasks; // this is overestimated by x2
  14510. }
  14511. work_size = MAX(work_size, cur);
  14512. } break;
  14513. case GGML_OP_WIN_PART:
  14514. case GGML_OP_WIN_UNPART:
  14515. case GGML_OP_GET_REL_POS:
  14516. case GGML_OP_MAP_UNARY:
  14517. case GGML_OP_MAP_BINARY:
  14518. case GGML_OP_MAP_CUSTOM1_F32:
  14519. case GGML_OP_MAP_CUSTOM2_F32:
  14520. case GGML_OP_MAP_CUSTOM3_F32:
  14521. {
  14522. n_tasks = 1;
  14523. } break;
  14524. case GGML_OP_MAP_CUSTOM1:
  14525. {
  14526. struct ggml_map_custom1_op_params * p = (struct ggml_map_custom1_op_params *) node->op_params;
  14527. if (p->n_tasks == GGML_N_TASKS_MAX) {
  14528. n_tasks = n_threads;
  14529. } else {
  14530. n_tasks = MIN(p->n_tasks, n_threads);
  14531. }
  14532. } break;
  14533. case GGML_OP_MAP_CUSTOM2:
  14534. {
  14535. struct ggml_map_custom2_op_params * p = (struct ggml_map_custom2_op_params *) node->op_params;
  14536. if (p->n_tasks == GGML_N_TASKS_MAX) {
  14537. n_tasks = n_threads;
  14538. } else {
  14539. n_tasks = MIN(p->n_tasks, n_threads);
  14540. }
  14541. } break;
  14542. case GGML_OP_MAP_CUSTOM3:
  14543. {
  14544. struct ggml_map_custom3_op_params * p = (struct ggml_map_custom3_op_params *) node->op_params;
  14545. if (p->n_tasks == GGML_N_TASKS_MAX) {
  14546. n_tasks = n_threads;
  14547. } else {
  14548. n_tasks = MIN(p->n_tasks, n_threads);
  14549. }
  14550. } break;
  14551. case GGML_OP_CROSS_ENTROPY_LOSS:
  14552. {
  14553. n_tasks = n_threads;
  14554. size_t cur = ggml_type_size(node->type)*(n_tasks + node->src[0]->ne[0]*n_tasks);
  14555. work_size = MAX(work_size, cur);
  14556. } break;
  14557. case GGML_OP_CROSS_ENTROPY_LOSS_BACK:
  14558. {
  14559. n_tasks = n_threads;
  14560. } break;
  14561. case GGML_OP_NONE:
  14562. {
  14563. n_tasks = 1;
  14564. } break;
  14565. case GGML_OP_COUNT:
  14566. {
  14567. GGML_ASSERT(false);
  14568. } break;
  14569. }
  14570. cplan.n_tasks[i] = n_tasks;
  14571. }
  14572. if (work_size > 0) {
  14573. work_size += CACHE_LINE_SIZE*(n_threads - 1);
  14574. }
  14575. cplan.n_threads = n_threads;
  14576. cplan.work_size = work_size;
  14577. cplan.work_data = NULL;
  14578. return cplan;
  14579. }
  14580. int ggml_graph_compute(struct ggml_cgraph * cgraph, struct ggml_cplan * cplan) {
  14581. {
  14582. GGML_ASSERT(cplan);
  14583. GGML_ASSERT(cplan->n_threads > 0);
  14584. if (cplan->work_size > 0) {
  14585. GGML_ASSERT(cplan->work_data);
  14586. }
  14587. for (int i = 0; i < cgraph->n_nodes; ++i) {
  14588. if (cgraph->nodes[i]->op != GGML_OP_NONE) {
  14589. GGML_ASSERT(cplan->n_tasks[i] > 0);
  14590. }
  14591. }
  14592. }
  14593. const int n_threads = cplan->n_threads;
  14594. struct ggml_compute_state_shared state_shared = {
  14595. /*.cgraph =*/ cgraph,
  14596. /*.cgraph_plan =*/ cplan,
  14597. /*.perf_node_start_cycles =*/ 0,
  14598. /*.perf_node_start_time_us =*/ 0,
  14599. /*.n_threads =*/ n_threads,
  14600. /*.n_active =*/ n_threads,
  14601. /*.node_n =*/ -1,
  14602. /*.abort_callback =*/ NULL,
  14603. /*.abort_callback_data =*/ NULL,
  14604. };
  14605. struct ggml_compute_state * workers = alloca(sizeof(struct ggml_compute_state)*n_threads);
  14606. // create thread pool
  14607. if (n_threads > 1) {
  14608. for (int j = 1; j < n_threads; ++j) {
  14609. workers[j] = (struct ggml_compute_state) {
  14610. .thrd = 0,
  14611. .ith = j,
  14612. .shared = &state_shared,
  14613. };
  14614. const int rc = ggml_thread_create(&workers[j].thrd, NULL, ggml_graph_compute_thread, &workers[j]);
  14615. GGML_ASSERT(rc == 0);
  14616. UNUSED(rc);
  14617. }
  14618. }
  14619. workers[0].ith = 0;
  14620. workers[0].shared = &state_shared;
  14621. const int64_t perf_start_cycles = ggml_perf_cycles();
  14622. const int64_t perf_start_time_us = ggml_perf_time_us();
  14623. // this is a work thread too
  14624. int compute_status = (size_t) ggml_graph_compute_thread(&workers[0]);
  14625. // don't leave affinity set on the main thread
  14626. clear_numa_thread_affinity();
  14627. // join or kill thread pool
  14628. if (n_threads > 1) {
  14629. for (int j = 1; j < n_threads; j++) {
  14630. const int rc = ggml_thread_join(workers[j].thrd, NULL);
  14631. GGML_ASSERT(rc == 0);
  14632. }
  14633. }
  14634. // performance stats (graph)
  14635. {
  14636. int64_t perf_cycles_cur = ggml_perf_cycles() - perf_start_cycles;
  14637. int64_t perf_time_us_cur = ggml_perf_time_us() - perf_start_time_us;
  14638. cgraph->perf_runs++;
  14639. cgraph->perf_cycles += perf_cycles_cur;
  14640. cgraph->perf_time_us += perf_time_us_cur;
  14641. GGML_PRINT_DEBUG("%s: perf (%d) - cpu = %.3f / %.3f ms, wall = %.3f / %.3f ms\n",
  14642. __func__, cgraph->perf_runs,
  14643. (double) perf_cycles_cur / (double) ggml_cycles_per_ms(),
  14644. (double) cgraph->perf_cycles / (double) ggml_cycles_per_ms() / (double) cgraph->perf_runs,
  14645. (double) perf_time_us_cur / 1000.0,
  14646. (double) cgraph->perf_time_us / 1000.0 / cgraph->perf_runs);
  14647. }
  14648. return compute_status;
  14649. }
  14650. void ggml_graph_reset(struct ggml_cgraph * cgraph) {
  14651. for (int i = 0; i < cgraph->n_nodes; i++) {
  14652. struct ggml_tensor * grad = cgraph->grads[i];
  14653. if (grad) {
  14654. ggml_set_zero(grad);
  14655. }
  14656. }
  14657. }
  14658. void ggml_graph_compute_with_ctx(struct ggml_context * ctx, struct ggml_cgraph * cgraph, int n_threads) {
  14659. struct ggml_cplan cplan = ggml_graph_plan(cgraph, n_threads);
  14660. struct ggml_object * obj = ggml_new_object(ctx, GGML_OBJECT_WORK_BUFFER, cplan.work_size);
  14661. cplan.work_data = (uint8_t *)ctx->mem_buffer + obj->offs;
  14662. ggml_graph_compute(cgraph, &cplan);
  14663. }
  14664. struct ggml_tensor * ggml_graph_get_tensor(struct ggml_cgraph * cgraph, const char * name) {
  14665. for (int i = 0; i < cgraph->n_leafs; i++) {
  14666. struct ggml_tensor * leaf = cgraph->leafs[i];
  14667. if (strcmp(leaf->name, name) == 0) {
  14668. return leaf;
  14669. }
  14670. }
  14671. for (int i = 0; i < cgraph->n_nodes; i++) {
  14672. struct ggml_tensor * node = cgraph->nodes[i];
  14673. if (strcmp(node->name, name) == 0) {
  14674. return node;
  14675. }
  14676. }
  14677. return NULL;
  14678. }
  14679. static void ggml_graph_export_leaf(const struct ggml_tensor * tensor, FILE * fout) {
  14680. const int64_t * ne = tensor->ne;
  14681. const size_t * nb = tensor->nb;
  14682. fprintf(fout, "%-6s %-12s %8d %" PRId64 " %" PRId64 " %" PRId64 " %" PRId64 " %16zu %16zu %16zu %16zu %16p %32s\n",
  14683. ggml_type_name(tensor->type),
  14684. ggml_op_name (tensor->op),
  14685. tensor->n_dims,
  14686. ne[0], ne[1], ne[2], ne[3],
  14687. nb[0], nb[1], nb[2], nb[3],
  14688. tensor->data,
  14689. tensor->name);
  14690. }
  14691. static void ggml_graph_export_node(const struct ggml_tensor * tensor, const char * arg, FILE * fout) {
  14692. const int64_t * ne = tensor->ne;
  14693. const size_t * nb = tensor->nb;
  14694. fprintf(fout, "%-6s %-6s %-12s %8d %" PRId64 " %" PRId64 " %" PRId64 " %" PRId64 " %16zu %16zu %16zu %16zu %16p %32s\n",
  14695. arg,
  14696. ggml_type_name(tensor->type),
  14697. ggml_op_name (tensor->op),
  14698. tensor->n_dims,
  14699. ne[0], ne[1], ne[2], ne[3],
  14700. nb[0], nb[1], nb[2], nb[3],
  14701. tensor->data,
  14702. tensor->name);
  14703. }
  14704. void ggml_graph_export(const struct ggml_cgraph * cgraph, const char * fname) {
  14705. uint64_t size_eval = 0;
  14706. // compute size of intermediate results
  14707. // TODO: does not take into account scratch buffers !!!!
  14708. for (int i = 0; i < cgraph->n_nodes; ++i) {
  14709. size_eval += ggml_nbytes_pad(cgraph->nodes[i]);
  14710. }
  14711. // print
  14712. {
  14713. FILE * fout = stdout;
  14714. fprintf(fout, "\n");
  14715. fprintf(fout, "%-16s %8x\n", "magic", GGML_FILE_MAGIC);
  14716. fprintf(fout, "%-16s %8d\n", "version", GGML_FILE_VERSION);
  14717. fprintf(fout, "%-16s %8d\n", "leafs", cgraph->n_leafs);
  14718. fprintf(fout, "%-16s %8d\n", "nodes", cgraph->n_nodes);
  14719. fprintf(fout, "%-16s %" PRIu64 "\n", "eval", size_eval);
  14720. // header
  14721. fprintf(fout, "\n");
  14722. fprintf(fout, "%-6s %-12s %8s %8s %8s %8s %8s %16s %16s %16s %16s %16s %16s\n",
  14723. "TYPE", "OP", "NDIMS", "NE0", "NE1", "NE2", "NE3", "NB0", "NB1", "NB2", "NB3", "DATA", "NAME");
  14724. for (int i = 0; i < cgraph->n_leafs; ++i) {
  14725. ggml_graph_export_leaf(cgraph->leafs[i], fout);
  14726. GGML_ASSERT(cgraph->leafs[i]->op == GGML_OP_NONE);
  14727. GGML_ASSERT(cgraph->leafs[i]->src[0] == NULL);
  14728. GGML_ASSERT(cgraph->leafs[i]->src[1] == NULL);
  14729. }
  14730. // header
  14731. fprintf(fout, "\n");
  14732. fprintf(fout, "%-6s %-6s %-12s %8s %8s %8s %8s %8s %16s %16s %16s %16s %8s %16s %16s\n",
  14733. "ARG", "TYPE", "OP", "NDIMS", "NE0", "NE1", "NE2", "NE3", "NB0", "NB1", "NB2", "NB3", "NTASKS", "DATA", "NAME");
  14734. for (int i = 0; i < cgraph->n_nodes; ++i) {
  14735. ggml_graph_export_node(cgraph->nodes[i], "DST", fout);
  14736. for (int j = 0; j < GGML_MAX_SRC; ++j) {
  14737. if (cgraph->nodes[i]->src[j]) {
  14738. ggml_graph_export_node(cgraph->nodes[i]->src[j], "SRC", fout);
  14739. }
  14740. }
  14741. fprintf(fout, "\n");
  14742. }
  14743. fprintf(fout, "\n");
  14744. }
  14745. // write binary data
  14746. {
  14747. FILE * fout = fopen(fname, "wb");
  14748. if (!fout) {
  14749. fprintf(stderr, "%s: failed to open %s\n", __func__, fname);
  14750. return;
  14751. }
  14752. // header
  14753. {
  14754. const uint32_t magic = GGML_FILE_MAGIC;
  14755. const uint32_t version = GGML_FILE_VERSION;
  14756. const uint32_t n_leafs = cgraph->n_leafs;
  14757. const uint32_t nodes = cgraph->n_nodes;
  14758. fwrite(&magic, sizeof(uint32_t), 1, fout);
  14759. fwrite(&version, sizeof(uint32_t), 1, fout);
  14760. fwrite(&n_leafs, sizeof(uint32_t), 1, fout);
  14761. fwrite(&nodes, sizeof(uint32_t), 1, fout);
  14762. fwrite(&size_eval, sizeof(uint64_t), 1, fout);
  14763. }
  14764. // leafs
  14765. {
  14766. for (int i = 0; i < cgraph->n_leafs; ++i) {
  14767. const struct ggml_tensor * tensor = cgraph->leafs[i];
  14768. const uint32_t type = tensor->type;
  14769. const uint32_t op = tensor->op;
  14770. const uint32_t n_dims = tensor->n_dims;
  14771. fwrite(&type, sizeof(uint32_t), 1, fout);
  14772. fwrite(&op, sizeof(uint32_t), 1, fout);
  14773. fwrite(&n_dims, sizeof(uint32_t), 1, fout);
  14774. for (int j = 0; j < GGML_MAX_DIMS; ++j) {
  14775. const uint64_t ne = tensor->ne[j];
  14776. const uint64_t nb = tensor->nb[j];
  14777. fwrite(&ne, sizeof(uint64_t), 1, fout);
  14778. fwrite(&nb, sizeof(uint64_t), 1, fout);
  14779. }
  14780. fwrite(tensor->name, sizeof(char), GGML_MAX_NAME, fout);
  14781. fwrite(tensor->op_params, sizeof(char), GGML_MAX_OP_PARAMS, fout);
  14782. // dump the data
  14783. // TODO: pad this to 32 byte boundary
  14784. {
  14785. const size_t size = ggml_nbytes(tensor);
  14786. fwrite(tensor->data, sizeof(char), size, fout);
  14787. }
  14788. }
  14789. }
  14790. // nodes
  14791. {
  14792. for (int i = 0; i < cgraph->n_nodes; ++i) {
  14793. const struct ggml_tensor * tensor = cgraph->nodes[i];
  14794. const uint32_t type = tensor->type;
  14795. const uint32_t op = tensor->op;
  14796. const uint32_t n_dims = tensor->n_dims;
  14797. fwrite(&type, sizeof(uint32_t), 1, fout);
  14798. fwrite(&op, sizeof(uint32_t), 1, fout);
  14799. fwrite(&n_dims, sizeof(uint32_t), 1, fout);
  14800. for (int j = 0; j < GGML_MAX_DIMS; ++j) {
  14801. const uint64_t ne = tensor->ne[j];
  14802. const uint64_t nb = tensor->nb[j];
  14803. fwrite(&ne, sizeof(uint64_t), 1, fout);
  14804. fwrite(&nb, sizeof(uint64_t), 1, fout);
  14805. }
  14806. fwrite(tensor->name, sizeof(char), GGML_MAX_NAME, fout);
  14807. fwrite(tensor->op_params, sizeof(char), GGML_MAX_OP_PARAMS, fout);
  14808. // output the op arguments
  14809. {
  14810. struct ggml_tensor * args[GGML_MAX_SRC] = { NULL };
  14811. for (int j = 0; j < GGML_MAX_SRC; ++j) {
  14812. args[j] = tensor->src[j];
  14813. }
  14814. for (int j = 0; j < GGML_MAX_SRC; ++j) {
  14815. if (args[j]) {
  14816. int32_t idx = -1;
  14817. // check if leaf
  14818. {
  14819. for (int k = 0; k < cgraph->n_leafs; ++k) {
  14820. if (args[j] == cgraph->leafs[k]) {
  14821. idx = k;
  14822. break;
  14823. }
  14824. }
  14825. }
  14826. // check if node
  14827. if (idx == -1) {
  14828. for (int k = 0; k < cgraph->n_nodes; ++k) {
  14829. if (args[j] == cgraph->nodes[k]) {
  14830. idx = GGML_MAX_NODES + k;
  14831. break;
  14832. }
  14833. }
  14834. }
  14835. if (idx == -1) {
  14836. fprintf(stderr, "%s: failed to find tensor, arg = %d, node = %d\n", __func__, j, i);
  14837. return;
  14838. }
  14839. fwrite(&idx, sizeof(int32_t), 1, fout);
  14840. } else {
  14841. const int32_t nul = -1;
  14842. fwrite(&nul, sizeof(int32_t), 1, fout);
  14843. }
  14844. }
  14845. }
  14846. }
  14847. }
  14848. fclose(fout);
  14849. }
  14850. }
  14851. struct ggml_cgraph ggml_graph_import(const char * fname, struct ggml_context ** ctx_data, struct ggml_context ** ctx_eval) {
  14852. assert(*ctx_data == NULL);
  14853. assert(*ctx_eval == NULL);
  14854. struct ggml_cgraph result = { 0 };
  14855. struct ggml_tensor * data = NULL;
  14856. // read file into data
  14857. {
  14858. FILE * fin = fopen(fname, "rb");
  14859. if (!fin) {
  14860. fprintf(stderr, "%s: failed to open %s\n", __func__, fname);
  14861. return result;
  14862. }
  14863. size_t fsize = 0;
  14864. fseek(fin, 0, SEEK_END);
  14865. fsize = ftell(fin);
  14866. fseek(fin, 0, SEEK_SET);
  14867. // create the data context
  14868. {
  14869. const size_t overhead = 1*ggml_tensor_overhead();
  14870. struct ggml_init_params params = {
  14871. .mem_size = fsize + overhead,
  14872. .mem_buffer = NULL,
  14873. .no_alloc = false,
  14874. };
  14875. *ctx_data = ggml_init(params);
  14876. if (!*ctx_data) {
  14877. fprintf(stderr, "%s: failed to create ggml context\n", __func__);
  14878. fclose(fin);
  14879. return result;
  14880. }
  14881. }
  14882. data = ggml_new_tensor_1d(*ctx_data, GGML_TYPE_I8, fsize);
  14883. {
  14884. const size_t ret = fread(data->data, sizeof(char), fsize, fin);
  14885. if (ret != fsize) {
  14886. fprintf(stderr, "%s: failed to read %s\n", __func__, fname);
  14887. fclose(fin);
  14888. return result;
  14889. }
  14890. }
  14891. fclose(fin);
  14892. }
  14893. // populate result
  14894. {
  14895. char * ptr = (char *) data->data;
  14896. const uint32_t magic = *(const uint32_t *) ptr; ptr += sizeof(magic);
  14897. if (magic != GGML_FILE_MAGIC) {
  14898. fprintf(stderr, "%s: invalid magic number, got %08x\n", __func__, magic);
  14899. return result;
  14900. }
  14901. const uint32_t version = *(const uint32_t *) ptr; ptr += sizeof(version);
  14902. if (version != GGML_FILE_VERSION) {
  14903. fprintf(stderr, "%s: invalid version number\n", __func__);
  14904. return result;
  14905. }
  14906. const uint32_t n_leafs = *(const uint32_t *) ptr; ptr += sizeof(n_leafs);
  14907. const uint32_t n_nodes = *(const uint32_t *) ptr; ptr += sizeof(n_nodes);
  14908. const uint64_t size_eval = *(const uint64_t *) ptr; ptr += sizeof(size_eval);
  14909. result.n_leafs = n_leafs;
  14910. result.n_nodes = n_nodes;
  14911. // create the data context
  14912. {
  14913. const size_t overhead = (n_leafs + n_nodes)*ggml_tensor_overhead();
  14914. struct ggml_init_params params = {
  14915. .mem_size = size_eval + overhead,
  14916. .mem_buffer = NULL,
  14917. .no_alloc = true,
  14918. };
  14919. *ctx_eval = ggml_init(params);
  14920. if (!*ctx_eval) {
  14921. fprintf(stderr, "%s: failed to create ggml context\n", __func__);
  14922. return result;
  14923. }
  14924. }
  14925. // leafs
  14926. {
  14927. uint32_t type;
  14928. uint32_t op;
  14929. uint32_t n_dims;
  14930. for (uint32_t i = 0; i < n_leafs; ++i) {
  14931. type = *(const uint32_t *) ptr; ptr += sizeof(type);
  14932. op = *(const uint32_t *) ptr; ptr += sizeof(op);
  14933. n_dims = *(const uint32_t *) ptr; ptr += sizeof(n_dims);
  14934. int64_t ne[GGML_MAX_DIMS];
  14935. size_t nb[GGML_MAX_DIMS];
  14936. for (int j = 0; j < GGML_MAX_DIMS; ++j) {
  14937. uint64_t ne_cur;
  14938. uint64_t nb_cur;
  14939. ne_cur = *(const uint64_t *) ptr; ptr += sizeof(ne_cur);
  14940. nb_cur = *(const uint64_t *) ptr; ptr += sizeof(nb_cur);
  14941. ne[j] = ne_cur;
  14942. nb[j] = nb_cur;
  14943. }
  14944. struct ggml_tensor * tensor = ggml_new_tensor(*ctx_eval, (enum ggml_type) type, n_dims, ne);
  14945. tensor->op = (enum ggml_op) op;
  14946. memcpy(tensor->name, ptr, GGML_MAX_NAME); ptr += GGML_MAX_NAME;
  14947. memcpy(tensor->op_params, ptr, GGML_MAX_OP_PARAMS); ptr += GGML_MAX_OP_PARAMS;
  14948. tensor->data = (void *) ptr;
  14949. for (int j = 0; j < GGML_MAX_DIMS; ++j) {
  14950. tensor->nb[j] = nb[j];
  14951. }
  14952. result.leafs[i] = tensor;
  14953. ptr += ggml_nbytes(tensor);
  14954. fprintf(stderr, "%s: loaded leaf %d: '%16s', %3d dims, %9zu bytes\n", __func__, i, tensor->name, n_dims, ggml_nbytes(tensor));
  14955. }
  14956. }
  14957. ggml_set_no_alloc(*ctx_eval, false);
  14958. // nodes
  14959. {
  14960. uint32_t type;
  14961. uint32_t op;
  14962. uint32_t n_dims;
  14963. for (uint32_t i = 0; i < n_nodes; ++i) {
  14964. type = *(const uint32_t *) ptr; ptr += sizeof(type);
  14965. op = *(const uint32_t *) ptr; ptr += sizeof(op);
  14966. n_dims = *(const uint32_t *) ptr; ptr += sizeof(n_dims);
  14967. enum ggml_op eop = (enum ggml_op) op;
  14968. int64_t ne[GGML_MAX_DIMS];
  14969. size_t nb[GGML_MAX_DIMS];
  14970. for (int j = 0; j < GGML_MAX_DIMS; ++j) {
  14971. uint64_t ne_cur;
  14972. uint64_t nb_cur;
  14973. ne_cur = *(const uint64_t *) ptr; ptr += sizeof(ne_cur);
  14974. nb_cur = *(const uint64_t *) ptr; ptr += sizeof(nb_cur);
  14975. ne[j] = ne_cur;
  14976. nb[j] = nb_cur;
  14977. }
  14978. const char * ptr_name = ptr; ptr += GGML_MAX_NAME;
  14979. const char * ptr_op_params = ptr; ptr += GGML_MAX_OP_PARAMS;
  14980. const int32_t * ptr_arg_idx = (const int32_t *) ptr; ptr += GGML_MAX_SRC*sizeof(int32_t);
  14981. struct ggml_tensor * args[GGML_MAX_SRC] = { NULL };
  14982. // parse args
  14983. for (int j = 0; j < GGML_MAX_SRC; ++j) {
  14984. const int32_t arg_idx = ptr_arg_idx[j];
  14985. if (arg_idx == -1) {
  14986. continue;
  14987. }
  14988. if (arg_idx < GGML_MAX_NODES) {
  14989. args[j] = result.leafs[arg_idx];
  14990. } else {
  14991. args[j] = result.nodes[arg_idx - GGML_MAX_NODES];
  14992. }
  14993. }
  14994. // create the tensor
  14995. // "view" operations are handled differently
  14996. // TODO: handle inplace ops - currently a copy is always made
  14997. struct ggml_tensor * tensor = NULL;
  14998. switch (eop) {
  14999. // TODO: implement other view ops
  15000. case GGML_OP_RESHAPE:
  15001. {
  15002. tensor = ggml_reshape_4d(*ctx_eval, args[0], ne[0], ne[1], ne[2], ne[3]);
  15003. } break;
  15004. case GGML_OP_VIEW:
  15005. {
  15006. tensor = ggml_view_4d(*ctx_eval, args[0], ne[0], ne[1], ne[2], ne[3], 0, 0, 0, 0);
  15007. size_t offs;
  15008. memcpy(&offs, ptr_op_params, sizeof(offs));
  15009. tensor->data = ((char *) tensor->data) + offs;
  15010. } break;
  15011. case GGML_OP_TRANSPOSE:
  15012. {
  15013. tensor = ggml_transpose(*ctx_eval, args[0]);
  15014. } break;
  15015. case GGML_OP_PERMUTE:
  15016. {
  15017. tensor = ggml_view_4d(*ctx_eval, args[0], ne[0], ne[1], ne[2], ne[3], 0, 0, 0, 0);
  15018. } break;
  15019. default:
  15020. {
  15021. tensor = ggml_new_tensor(*ctx_eval, (enum ggml_type) type, n_dims, ne);
  15022. tensor->op = eop;
  15023. } break;
  15024. }
  15025. memcpy(tensor->name, ptr_name, GGML_MAX_NAME);
  15026. memcpy(tensor->op_params, ptr_op_params, GGML_MAX_OP_PARAMS);
  15027. for (int j = 0; j < GGML_MAX_DIMS; ++j) {
  15028. tensor->nb[j] = nb[j];
  15029. }
  15030. for (int j = 0; j < GGML_MAX_SRC; ++j) {
  15031. tensor->src[j] = args[j];
  15032. }
  15033. result.nodes[i] = tensor;
  15034. fprintf(stderr, "%s: loaded node %d: '%16s', %3d dims, %9zu bytes\n", __func__, i, tensor->name, n_dims, ggml_nbytes(tensor));
  15035. }
  15036. }
  15037. }
  15038. return result;
  15039. }
  15040. void ggml_graph_print(const struct ggml_cgraph * cgraph) {
  15041. int64_t perf_total_per_op_us[GGML_OP_COUNT] = {0};
  15042. GGML_PRINT("=== GRAPH ===\n");
  15043. GGML_PRINT("n_nodes = %d\n", cgraph->n_nodes);
  15044. for (int i = 0; i < cgraph->n_nodes; i++) {
  15045. struct ggml_tensor * node = cgraph->nodes[i];
  15046. perf_total_per_op_us[node->op] += MAX(1, node->perf_time_us);
  15047. GGML_PRINT(" - %3d: [ %5" PRId64 ", %5" PRId64 ", %5" PRId64 "] %16s %s (%3d) cpu = %7.3f / %7.3f ms, wall = %7.3f / %7.3f ms\n",
  15048. i,
  15049. node->ne[0], node->ne[1], node->ne[2],
  15050. ggml_op_name(node->op), node->is_param ? "x" : node->grad ? "g" : " ", node->perf_runs,
  15051. (double) node->perf_cycles / (double) ggml_cycles_per_ms(),
  15052. (double) node->perf_cycles / (double) ggml_cycles_per_ms() / (double) node->perf_runs,
  15053. (double) node->perf_time_us / 1000.0,
  15054. (double) node->perf_time_us / 1000.0 / node->perf_runs);
  15055. }
  15056. GGML_PRINT("n_leafs = %d\n", cgraph->n_leafs);
  15057. for (int i = 0; i < cgraph->n_leafs; i++) {
  15058. struct ggml_tensor * node = cgraph->leafs[i];
  15059. GGML_PRINT(" - %3d: [ %5" PRId64 ", %5" PRId64 "] %8s\n",
  15060. i,
  15061. node->ne[0], node->ne[1],
  15062. ggml_op_name(node->op),
  15063. ggml_get_name(node));
  15064. }
  15065. for (int i = 0; i < GGML_OP_COUNT; i++) {
  15066. if (perf_total_per_op_us[i] == 0) {
  15067. continue;
  15068. }
  15069. GGML_PRINT("perf_total_per_op_us[%16s] = %7.3f ms\n", ggml_op_name(i), (double) perf_total_per_op_us[i] / 1000.0);
  15070. }
  15071. GGML_PRINT("========================================\n");
  15072. }
  15073. // check if node is part of the graph
  15074. static bool ggml_graph_find(const struct ggml_cgraph * cgraph, const struct ggml_tensor * node) {
  15075. if (cgraph == NULL) {
  15076. return true;
  15077. }
  15078. for (int i = 0; i < cgraph->n_nodes; i++) {
  15079. if (cgraph->nodes[i] == node) {
  15080. return true;
  15081. }
  15082. }
  15083. return false;
  15084. }
  15085. static struct ggml_tensor * ggml_graph_get_parent(const struct ggml_cgraph * cgraph, const struct ggml_tensor * node) {
  15086. for (int i = 0; i < cgraph->n_nodes; i++) {
  15087. struct ggml_tensor * parent = cgraph->nodes[i];
  15088. if (parent->grad == node) {
  15089. return parent;
  15090. }
  15091. }
  15092. return NULL;
  15093. }
  15094. static void ggml_graph_dump_dot_node_edge(FILE * fp, const struct ggml_cgraph * gb, struct ggml_tensor * node, struct ggml_tensor * parent, const char * label) {
  15095. struct ggml_tensor * gparent = ggml_graph_get_parent(gb, node);
  15096. struct ggml_tensor * gparent0 = ggml_graph_get_parent(gb, parent);
  15097. fprintf(fp, " \"%p\":%s -> \"%p\":%s [ arrowhead = %s; style = %s; label = \"%s\"; ]\n",
  15098. gparent0 ? (void *) gparent0 : (void *) parent,
  15099. gparent0 ? "g" : "x",
  15100. gparent ? (void *) gparent : (void *) node,
  15101. gparent ? "g" : "x",
  15102. gparent ? "empty" : "vee",
  15103. gparent ? "dashed" : "solid",
  15104. label);
  15105. }
  15106. static void ggml_graph_dump_dot_leaf_edge(FILE * fp, struct ggml_tensor * node, struct ggml_tensor * parent, const char * label) {
  15107. fprintf(fp, " \"%p\":%s -> \"%p\":%s [ label = \"%s\"; ]\n",
  15108. (void *) parent, "x",
  15109. (void *) node, "x",
  15110. label);
  15111. }
  15112. void ggml_graph_dump_dot(const struct ggml_cgraph * gb, const struct ggml_cgraph * gf, const char * filename) {
  15113. char color[16];
  15114. FILE * fp = fopen(filename, "w");
  15115. GGML_ASSERT(fp);
  15116. fprintf(fp, "digraph G {\n");
  15117. fprintf(fp, " newrank = true;\n");
  15118. fprintf(fp, " rankdir = LR;\n");
  15119. for (int i = 0; i < gb->n_nodes; i++) {
  15120. struct ggml_tensor * node = gb->nodes[i];
  15121. if (ggml_graph_get_parent(gb, node) != NULL) {
  15122. continue;
  15123. }
  15124. if (node->is_param) {
  15125. snprintf(color, sizeof(color), "yellow");
  15126. } else if (node->grad) {
  15127. if (ggml_graph_find(gf, node)) {
  15128. snprintf(color, sizeof(color), "green");
  15129. } else {
  15130. snprintf(color, sizeof(color), "lightblue");
  15131. }
  15132. } else {
  15133. snprintf(color, sizeof(color), "white");
  15134. }
  15135. fprintf(fp, " \"%p\" [ "
  15136. "style = filled; fillcolor = %s; shape = record; "
  15137. "label=\"",
  15138. (void *) node, color);
  15139. if (strlen(node->name) > 0) {
  15140. fprintf(fp, "%s (%s)|", node->name, ggml_type_name(node->type));
  15141. } else {
  15142. fprintf(fp, "(%s)|", ggml_type_name(node->type));
  15143. }
  15144. if (node->n_dims == 2) {
  15145. fprintf(fp, "%d [%" PRId64 ", %" PRId64 "] | <x>%s", i, node->ne[0], node->ne[1], ggml_op_symbol(node->op));
  15146. } else {
  15147. fprintf(fp, "%d [%" PRId64 ", %" PRId64 ", %" PRId64 "] | <x>%s", i, node->ne[0], node->ne[1], node->ne[2], ggml_op_symbol(node->op));
  15148. }
  15149. if (node->grad) {
  15150. fprintf(fp, " | <g>%s\"; ]\n", ggml_op_symbol(node->grad->op));
  15151. } else {
  15152. fprintf(fp, "\"; ]\n");
  15153. }
  15154. }
  15155. for (int i = 0; i < gb->n_leafs; i++) {
  15156. struct ggml_tensor * node = gb->leafs[i];
  15157. snprintf(color, sizeof(color), "pink");
  15158. fprintf(fp, " \"%p\" [ "
  15159. "style = filled; fillcolor = %s; shape = record; "
  15160. "label=\"<x>",
  15161. (void *) node, color);
  15162. if (strlen(node->name) > 0) {
  15163. fprintf(fp, "%s (%s)|", node->name, ggml_type_name(node->type));
  15164. } else {
  15165. fprintf(fp, "(%s)|", ggml_type_name(node->type));
  15166. }
  15167. fprintf(fp, "CONST %d [%" PRId64 ", %" PRId64 "]", i, node->ne[0], node->ne[1]);
  15168. if (ggml_nelements(node) < 5) {
  15169. fprintf(fp, " | (");
  15170. for (int j = 0; j < ggml_nelements(node); j++) {
  15171. if (node->type == GGML_TYPE_I8 || node->type == GGML_TYPE_I16 || node->type == GGML_TYPE_I32) {
  15172. fprintf(fp, "%d", ggml_get_i32_1d(node, j));
  15173. }
  15174. else if (node->type == GGML_TYPE_F32 || node->type == GGML_TYPE_F16) {
  15175. fprintf(fp, "%.1e", (double)ggml_get_f32_1d(node, j));
  15176. }
  15177. else {
  15178. fprintf(fp, "#");
  15179. }
  15180. if (j < ggml_nelements(node) - 1) {
  15181. fprintf(fp, ", ");
  15182. }
  15183. }
  15184. fprintf(fp, ")");
  15185. }
  15186. fprintf(fp, "\"; ]\n");
  15187. }
  15188. for (int i = 0; i < gb->n_nodes; i++) {
  15189. struct ggml_tensor * node = gb->nodes[i];
  15190. for (int j = 0; j < GGML_MAX_SRC; j++) {
  15191. if (node->src[j]) {
  15192. char label[16];
  15193. snprintf(label, sizeof(label), "src %d", j);
  15194. ggml_graph_dump_dot_node_edge(fp, gb, node, node->src[j], label);
  15195. }
  15196. }
  15197. }
  15198. for (int i = 0; i < gb->n_leafs; i++) {
  15199. struct ggml_tensor * node = gb->leafs[i];
  15200. for (int j = 0; j < GGML_MAX_SRC; j++) {
  15201. if (node->src[j]) {
  15202. char label[16];
  15203. snprintf(label, sizeof(label), "src %d", j);
  15204. ggml_graph_dump_dot_leaf_edge(fp, node, node->src[j], label);
  15205. }
  15206. }
  15207. }
  15208. fprintf(fp, "}\n");
  15209. fclose(fp);
  15210. GGML_PRINT("%s: dot -Tpng %s -o %s.png && open %s.png\n", __func__, filename, filename, filename);
  15211. }
  15212. ////////////////////////////////////////////////////////////////////////////////
  15213. static void ggml_opt_set_params(int np, struct ggml_tensor * const ps[], const float * x) {
  15214. int i = 0;
  15215. for (int p = 0; p < np; ++p) {
  15216. const int64_t ne = ggml_nelements(ps[p]) ;
  15217. // TODO: add function to set tensor from array
  15218. for (int64_t j = 0; j < ne; ++j) {
  15219. ggml_set_f32_1d(ps[p], j, x[i++]);
  15220. }
  15221. }
  15222. }
  15223. static void ggml_opt_get_params(int np, struct ggml_tensor * const ps[], float * x) {
  15224. int i = 0;
  15225. for (int p = 0; p < np; ++p) {
  15226. const int64_t ne = ggml_nelements(ps[p]) ;
  15227. // TODO: add function to get all elements at once
  15228. for (int64_t j = 0; j < ne; ++j) {
  15229. x[i++] = ggml_get_f32_1d(ps[p], j);
  15230. }
  15231. }
  15232. }
  15233. static void ggml_opt_get_grad(int np, struct ggml_tensor * const ps[], float * g) {
  15234. int i = 0;
  15235. for (int p = 0; p < np; ++p) {
  15236. const int64_t ne = ggml_nelements(ps[p]) ;
  15237. // TODO: add function to get all elements at once
  15238. for (int64_t j = 0; j < ne; ++j) {
  15239. g[i++] = ggml_get_f32_1d(ps[p]->grad, j);
  15240. }
  15241. }
  15242. }
  15243. //
  15244. // ADAM
  15245. //
  15246. // ref: https://arxiv.org/pdf/1412.6980.pdf
  15247. //
  15248. static enum ggml_opt_result ggml_opt_adam(
  15249. struct ggml_context * ctx,
  15250. struct ggml_opt_context * opt,
  15251. struct ggml_opt_params params,
  15252. struct ggml_tensor * f,
  15253. struct ggml_cgraph * gf,
  15254. struct ggml_cgraph * gb,
  15255. ggml_opt_callback callback,
  15256. void * callback_data) {
  15257. GGML_ASSERT(ggml_is_scalar(f));
  15258. // these will store the parameters we want to optimize
  15259. struct ggml_tensor * ps[GGML_MAX_PARAMS];
  15260. int np = 0;
  15261. int64_t nx = 0;
  15262. for (int i = 0; i < gf->n_nodes; ++i) {
  15263. if (gf->nodes[i]->is_param) {
  15264. GGML_PRINT_DEBUG("found param %d: grad->op = %d\n", np, gf->nodes[i]->grad->op);
  15265. GGML_ASSERT(np < GGML_MAX_PARAMS);
  15266. ps[np++] = gf->nodes[i];
  15267. nx += ggml_nelements(gf->nodes[i]);
  15268. }
  15269. }
  15270. if ((opt->params.type != params.type) || (opt->nx != nx) || (opt->params.past != params.past)) {
  15271. int iter = opt->iter;
  15272. ggml_opt_init(opt->ctx, opt, params, nx);
  15273. opt->iter = iter;
  15274. }
  15275. // constants
  15276. float sched = params.adam.sched;
  15277. const float alpha = params.adam.alpha;
  15278. const float decay = params.adam.decay * alpha;
  15279. const float beta1 = params.adam.beta1;
  15280. const float beta2 = params.adam.beta2;
  15281. const float eps = params.adam.eps;
  15282. const float gclip = params.adam.gclip;
  15283. const int decay_min_ndim = params.adam.decay_min_ndim;
  15284. float * m = opt->adam.m->data; // first moment
  15285. float * v = opt->adam.v->data; // second moment
  15286. float * pf = params.past > 0 ? opt->adam.pf->data : NULL; // past function values
  15287. if (callback) {
  15288. callback(callback_data, &sched);
  15289. }
  15290. // compute the function value
  15291. ggml_graph_reset (gf);
  15292. ggml_set_f32 (f->grad, 1.0f);
  15293. struct ggml_cplan cplan = ggml_graph_plan(gb, params.n_threads);
  15294. struct ggml_object * obj = ggml_new_object(ctx, GGML_OBJECT_WORK_BUFFER, cplan.work_size);
  15295. cplan.work_data = (uint8_t *)ctx->mem_buffer + obj->offs;
  15296. ggml_graph_compute(gb, &cplan);
  15297. opt->adam.fx_prev = ggml_get_f32_1d(f, 0);
  15298. opt->adam.fx_best = opt->adam.fx_prev;
  15299. if (pf) {
  15300. pf[opt->iter % params.past] = opt->adam.fx_prev;
  15301. }
  15302. opt->loss_before = opt->adam.fx_prev;
  15303. opt->loss_after = opt->adam.fx_prev;
  15304. // initialize
  15305. if (opt->just_initialized) {
  15306. opt->adam.n_no_improvement = 0;
  15307. opt->just_initialized = false;
  15308. }
  15309. float * fx_best = &opt->adam.fx_best;
  15310. float * fx_prev = &opt->adam.fx_prev;
  15311. int * n_no_improvement = &opt->adam.n_no_improvement;
  15312. int iter0 = opt->iter;
  15313. // run the optimizer
  15314. for (int t = 0; t < params.adam.n_iter; ++t) {
  15315. opt->iter = iter0 + t + 1;
  15316. GGML_PRINT_DEBUG ("=== iter %d ===\n", t);
  15317. GGML_PRINT_DEBUG ("f = %10.6f\n", ggml_get_f32_1d(f, 0));
  15318. GGML_PRINT_DEBUG_5("df/dx0 = %10.6f\n", ggml_get_f32_1d(ps[0]->grad, 0));
  15319. GGML_PRINT_DEBUG_5("df/dx1 = %10.6f\n", ggml_get_f32_1d(ps[1]->grad, 0));
  15320. for (int i = 0; i < np; ++i) {
  15321. GGML_PRINT_DEBUG("param %d: %10.6f, g = %10.6f\n", i,
  15322. ggml_get_f32_1d(ps[i], 0), ggml_get_f32_1d(ps[i]->grad, 0));
  15323. }
  15324. const int64_t t_start_wall = ggml_time_us();
  15325. const int64_t t_start_cpu = ggml_cycles();
  15326. UNUSED(t_start_wall);
  15327. UNUSED(t_start_cpu);
  15328. {
  15329. float gnorm = 1.0f;
  15330. if (gclip > 0.0f) {
  15331. // gradient clipping
  15332. ggml_float sum = 0.0;
  15333. for (int p = 0; p < np; ++p) {
  15334. const int64_t ne = ggml_nelements(ps[p]);
  15335. for (int64_t j = 0; j < ne; ++j) {
  15336. float g = ggml_get_f32_1d(ps[p]->grad, j);
  15337. sum += (ggml_float)(g*g);
  15338. }
  15339. }
  15340. ggml_float norm = sqrt(sum);
  15341. if (norm > (ggml_float) gclip) {
  15342. gnorm = (float) ((ggml_float) gclip / norm);
  15343. }
  15344. }
  15345. const float beta1h = alpha*sched/(1.0f - powf(beta1, opt->iter));
  15346. const float beta2h = 1.0f/(1.0f - powf(beta2, opt->iter));
  15347. int64_t i = 0;
  15348. for (int p = 0; p < np; ++p) {
  15349. const int64_t ne = ggml_nelements(ps[p]);
  15350. const float p_decay = ((ps[p]->n_dims >= decay_min_ndim) ? decay : 0.0f) * sched;
  15351. for (int64_t j = 0; j < ne; ++j) {
  15352. float x = ggml_get_f32_1d(ps[p], j);
  15353. float g = ggml_get_f32_1d(ps[p]->grad, j)*gnorm;
  15354. m[i] = m[i]*beta1 + g*(1.0f - beta1);
  15355. v[i] = v[i]*beta2 + g*g*(1.0f - beta2);
  15356. float mh = m[i]*beta1h;
  15357. float vh = v[i]*beta2h;
  15358. vh = sqrtf(vh) + eps;
  15359. x = x*(1.0f - p_decay) - mh/vh;
  15360. ggml_set_f32_1d(ps[p], j, x);
  15361. ++i;
  15362. }
  15363. }
  15364. }
  15365. if (callback) {
  15366. callback(callback_data, &sched);
  15367. }
  15368. ggml_graph_reset (gf);
  15369. ggml_set_f32 (f->grad, 1.0f);
  15370. ggml_graph_compute(gb, &cplan);
  15371. const float fx = ggml_get_f32_1d(f, 0);
  15372. opt->loss_after = fx;
  15373. // check convergence
  15374. if (fabsf(fx - fx_prev[0])/fx < params.adam.eps_f) {
  15375. GGML_PRINT_DEBUG("converged\n");
  15376. return GGML_OPT_OK;
  15377. }
  15378. // delta-based convergence test
  15379. if (pf != NULL) {
  15380. // need at least params.past iterations to start checking for convergence
  15381. if (params.past <= iter0 + t) {
  15382. const float rate = (pf[(iter0 + t)%params.past] - fx)/fx;
  15383. if (fabsf(rate) < params.delta) {
  15384. return GGML_OPT_OK;
  15385. }
  15386. }
  15387. pf[(iter0 + t)%params.past] = fx;
  15388. }
  15389. // check for improvement
  15390. if (params.max_no_improvement > 0) {
  15391. if (fx_best[0] > fx) {
  15392. fx_best[0] = fx;
  15393. n_no_improvement[0] = 0;
  15394. } else {
  15395. ++n_no_improvement[0];
  15396. if (n_no_improvement[0] >= params.max_no_improvement) {
  15397. return GGML_OPT_OK;
  15398. }
  15399. }
  15400. }
  15401. fx_prev[0] = fx;
  15402. {
  15403. const int64_t t_end_cpu = ggml_cycles();
  15404. GGML_PRINT_DEBUG("time iter: %5.3f s\n", ((float)(t_end_cpu - t_start_cpu))/CLOCKS_PER_SEC);
  15405. UNUSED(t_end_cpu);
  15406. const int64_t t_end_wall = ggml_time_us();
  15407. GGML_PRINT_DEBUG("wall time iter: %5.3f s\n", (t_end_wall - t_start_wall)/1e6);
  15408. UNUSED(t_end_wall);
  15409. }
  15410. }
  15411. return GGML_OPT_DID_NOT_CONVERGE;
  15412. }
  15413. //
  15414. // L-BFGS
  15415. //
  15416. // the L-BFGS implementation below is based on the following implementation:
  15417. //
  15418. // https://github.com/chokkan/liblbfgs
  15419. //
  15420. struct ggml_lbfgs_iteration_data {
  15421. float alpha;
  15422. float ys;
  15423. float * s;
  15424. float * y;
  15425. };
  15426. static enum ggml_opt_result linesearch_backtracking(
  15427. const struct ggml_opt_params * params,
  15428. int nx,
  15429. float * x,
  15430. float * fx,
  15431. float * g,
  15432. float * d,
  15433. float * step,
  15434. const float * xp,
  15435. struct ggml_tensor * f,
  15436. struct ggml_cgraph * gf,
  15437. struct ggml_cgraph * gb,
  15438. struct ggml_cplan * cplan,
  15439. const int np,
  15440. struct ggml_tensor * ps[],
  15441. ggml_opt_callback callback,
  15442. void * callback_data) {
  15443. int count = 0;
  15444. float width = 0.0f;
  15445. float dg = 0.0f;
  15446. float finit = 0.0f;
  15447. float dginit = 0.0f;
  15448. float dgtest = 0.0f;
  15449. const float dec = 0.5f;
  15450. const float inc = 2.1f;
  15451. if (*step <= 0.f) {
  15452. return GGML_LINESEARCH_INVALID_PARAMETERS;
  15453. }
  15454. // compute the initial gradient in the search direction
  15455. ggml_vec_dot_f32(nx, &dginit, g, d);
  15456. // make sure that d points to a descent direction
  15457. if (0 < dginit) {
  15458. return GGML_LINESEARCH_FAIL;
  15459. }
  15460. // initialize local variables
  15461. finit = *fx;
  15462. dgtest = params->lbfgs.ftol*dginit;
  15463. while (true) {
  15464. if (callback) {
  15465. // LBFG-S does not support learning rate -> ignore learning schedule
  15466. float sched = 0;
  15467. callback(callback_data, &sched);
  15468. }
  15469. ggml_vec_cpy_f32(nx, x, xp);
  15470. ggml_vec_mad_f32(nx, x, d, *step);
  15471. // evaluate the function and gradient values
  15472. {
  15473. ggml_opt_set_params(np, ps, x);
  15474. ggml_graph_reset (gf);
  15475. ggml_set_f32 (f->grad, 1.0f);
  15476. ggml_graph_compute(gb, cplan);
  15477. ggml_opt_get_grad(np, ps, g);
  15478. *fx = ggml_get_f32_1d(f, 0);
  15479. }
  15480. ++count;
  15481. if (*fx > finit + (*step)*dgtest) {
  15482. width = dec;
  15483. } else {
  15484. // Armijo condition is satisfied
  15485. if (params->lbfgs.linesearch == GGML_LINESEARCH_BACKTRACKING_ARMIJO) {
  15486. return count;
  15487. }
  15488. ggml_vec_dot_f32(nx, &dg, g, d);
  15489. // check the Wolfe condition
  15490. if (dg < params->lbfgs.wolfe * dginit) {
  15491. width = inc;
  15492. } else {
  15493. if(params->lbfgs.linesearch == GGML_LINESEARCH_BACKTRACKING_WOLFE) {
  15494. // regular Wolfe conditions
  15495. return count;
  15496. }
  15497. if(dg > -params->lbfgs.wolfe*dginit) {
  15498. width = dec;
  15499. } else {
  15500. // strong Wolfe condition (GGML_LINESEARCH_BACKTRACKING_STRONG_WOLFE)
  15501. return count;
  15502. }
  15503. }
  15504. }
  15505. if (*step < params->lbfgs.min_step) {
  15506. return GGML_LINESEARCH_MINIMUM_STEP;
  15507. }
  15508. if (*step > params->lbfgs.max_step) {
  15509. return GGML_LINESEARCH_MAXIMUM_STEP;
  15510. }
  15511. if (params->lbfgs.max_linesearch <= count) {
  15512. return GGML_LINESEARCH_MAXIMUM_ITERATIONS;
  15513. }
  15514. (*step) *= width;
  15515. }
  15516. return GGML_LINESEARCH_FAIL;
  15517. }
  15518. static enum ggml_opt_result ggml_opt_lbfgs(
  15519. struct ggml_context * ctx,
  15520. struct ggml_opt_context * opt,
  15521. struct ggml_opt_params params,
  15522. struct ggml_tensor * f,
  15523. struct ggml_cgraph * gf,
  15524. struct ggml_cgraph * gb,
  15525. ggml_opt_callback callback,
  15526. void * callback_data) {
  15527. if (params.lbfgs.linesearch == GGML_LINESEARCH_BACKTRACKING_WOLFE ||
  15528. params.lbfgs.linesearch == GGML_LINESEARCH_BACKTRACKING_STRONG_WOLFE) {
  15529. if (params.lbfgs.wolfe <= params.lbfgs.ftol || 1.f <= params.lbfgs.wolfe) {
  15530. return GGML_OPT_INVALID_WOLFE;
  15531. }
  15532. }
  15533. const int m = params.lbfgs.m;
  15534. // these will store the parameters we want to optimize
  15535. struct ggml_tensor * ps[GGML_MAX_PARAMS];
  15536. int np = 0;
  15537. int nx = 0;
  15538. for (int i = 0; i < gf->n_nodes; ++i) {
  15539. if (gf->nodes[i]->is_param) {
  15540. GGML_PRINT_DEBUG("found param %d: grad->op = %d\n", np, gf->nodes[i]->grad->op);
  15541. GGML_ASSERT(np < GGML_MAX_PARAMS);
  15542. ps[np++] = gf->nodes[i];
  15543. nx += ggml_nelements(gf->nodes[i]);
  15544. }
  15545. }
  15546. if ((opt->params.type != params.type) || (opt->nx != nx) || (opt->params.past != params.past) || (opt->params.lbfgs.m != params.lbfgs.m)) {
  15547. int iter = opt->iter;
  15548. ggml_opt_init(ctx, opt, params, nx);
  15549. opt->iter = iter;
  15550. }
  15551. struct ggml_cplan cplan = ggml_graph_plan(gb, params.n_threads);
  15552. struct ggml_object * obj = ggml_new_object(ctx, GGML_OBJECT_WORK_BUFFER, cplan.work_size);
  15553. cplan.work_data = (uint8_t *)ctx->mem_buffer + obj->offs;
  15554. float * x = opt->lbfgs.x->data; // current parameters
  15555. float * xp = opt->lbfgs.xp->data; // previous parameters
  15556. float * g = opt->lbfgs.g->data; // current gradient
  15557. float * gp = opt->lbfgs.gp->data; // previous gradient
  15558. float * d = opt->lbfgs.d->data; // search direction
  15559. float * pf = params.past > 0 ? opt->lbfgs.pf->data : NULL; // past function values
  15560. float fx = 0.0f; // cost function value
  15561. float xnorm = 0.0f; // ||x||
  15562. float gnorm = 0.0f; // ||g||
  15563. // initialize x from the graph nodes
  15564. ggml_opt_get_params(np, ps, x);
  15565. // the L-BFGS memory
  15566. float * lm_alpha = opt->lbfgs.lmal->data;
  15567. float * lm_ys = opt->lbfgs.lmys->data;
  15568. float * lm_s = opt->lbfgs.lms->data;
  15569. float * lm_y = opt->lbfgs.lmy->data;
  15570. if (callback) {
  15571. // LBFG-S does not support learning rate -> ignore learning schedule
  15572. float sched = 0;
  15573. callback(callback_data, &sched);
  15574. }
  15575. // evaluate the function value and its gradient
  15576. {
  15577. ggml_opt_set_params(np, ps, x);
  15578. ggml_graph_reset (gf);
  15579. ggml_set_f32 (f->grad, 1.0f);
  15580. ggml_graph_compute(gb, &cplan);
  15581. ggml_opt_get_grad(np, ps, g);
  15582. fx = ggml_get_f32_1d(f, 0);
  15583. opt->loss_before = fx;
  15584. opt->loss_after = fx;
  15585. }
  15586. // search direction = -gradient
  15587. ggml_vec_neg_f32(nx, d, g);
  15588. // ||x||, ||g||
  15589. ggml_vec_norm_f32(nx, &xnorm, x);
  15590. ggml_vec_norm_f32(nx, &gnorm, g);
  15591. if (xnorm < 1.0f) {
  15592. xnorm = 1.0f;
  15593. }
  15594. // already optimized
  15595. if (gnorm/xnorm <= params.lbfgs.eps) {
  15596. return GGML_OPT_OK;
  15597. }
  15598. if (opt->just_initialized) {
  15599. if (pf) {
  15600. pf[0] = fx;
  15601. }
  15602. opt->lbfgs.fx_best = fx;
  15603. // initial step
  15604. ggml_vec_norm_inv_f32(nx, &opt->lbfgs.step, d);
  15605. opt->lbfgs.j = 0;
  15606. opt->lbfgs.k = 1;
  15607. opt->lbfgs.end = 0;
  15608. opt->lbfgs.n_no_improvement = 0;
  15609. opt->just_initialized = false;
  15610. }
  15611. float * fx_best = &opt->lbfgs.fx_best;
  15612. float * step = &opt->lbfgs.step;
  15613. int * j = &opt->lbfgs.j;
  15614. int * k = &opt->lbfgs.k;
  15615. int * end = &opt->lbfgs.end;
  15616. int * n_no_improvement = &opt->lbfgs.n_no_improvement;
  15617. int ls = 0;
  15618. int bound = 0;
  15619. float ys = 0.0f;
  15620. float yy = 0.0f;
  15621. float beta = 0.0f;
  15622. int it = 0;
  15623. while (true) {
  15624. // store the current position and gradient vectors
  15625. ggml_vec_cpy_f32(nx, xp, x);
  15626. ggml_vec_cpy_f32(nx, gp, g);
  15627. ls = linesearch_backtracking(&params, nx, x, &fx, g, d, step, xp, f, gf, gb, &cplan, np, ps, callback, callback_data);
  15628. if (ls < 0) {
  15629. // linesearch failed - go back to the previous point and return
  15630. ggml_vec_cpy_f32(nx, x, xp);
  15631. ggml_vec_cpy_f32(nx, g, gp);
  15632. return ls;
  15633. }
  15634. opt->loss_after = fx;
  15635. ggml_vec_norm_f32(nx, &xnorm, x);
  15636. ggml_vec_norm_f32(nx, &gnorm, g);
  15637. GGML_PRINT_DEBUG("f = %10.6f\n", ggml_get_f32_1d(f, 0));
  15638. if (xnorm < 1.0f) {
  15639. xnorm = 1.0f;
  15640. }
  15641. if (gnorm/xnorm <= params.lbfgs.eps) {
  15642. // converged
  15643. return GGML_OPT_OK;
  15644. }
  15645. // delta-based convergence test
  15646. if (pf != NULL) {
  15647. // need at least params.past iterations to start checking for convergence
  15648. if (params.past <= k[0]) {
  15649. const float rate = (pf[k[0]%params.past] - fx)/fx;
  15650. if (fabsf(rate) < params.delta) {
  15651. return GGML_OPT_OK;
  15652. }
  15653. }
  15654. pf[k[0]%params.past] = fx;
  15655. }
  15656. // check for improvement
  15657. if (params.max_no_improvement > 0) {
  15658. if (fx < fx_best[0]) {
  15659. fx_best[0] = fx;
  15660. n_no_improvement[0] = 0;
  15661. } else {
  15662. n_no_improvement[0]++;
  15663. if (n_no_improvement[0] >= params.max_no_improvement) {
  15664. return GGML_OPT_OK;
  15665. }
  15666. }
  15667. }
  15668. if (params.lbfgs.n_iter != 0 && params.lbfgs.n_iter < it + 1) {
  15669. // reached the maximum number of iterations
  15670. return GGML_OPT_DID_NOT_CONVERGE;
  15671. }
  15672. // update vectors s and y:
  15673. // s_{k+1} = x_{k+1} - x_{k} = \step * d_{k}.
  15674. // y_{k+1} = g_{k+1} - g_{k}.
  15675. //
  15676. ggml_vec_sub_f32(nx, &lm_s[end[0]*nx], x, xp);
  15677. ggml_vec_sub_f32(nx, &lm_y[end[0]*nx], g, gp);
  15678. // compute scalars ys and yy:
  15679. // ys = y^t \cdot s -> 1 / \rho.
  15680. // yy = y^t \cdot y.
  15681. //
  15682. ggml_vec_dot_f32(nx, &ys, &lm_y[end[0]*nx], &lm_s[end[0]*nx]);
  15683. ggml_vec_dot_f32(nx, &yy, &lm_y[end[0]*nx], &lm_y[end[0]*nx]);
  15684. lm_ys[end[0]] = ys;
  15685. // find new search direction
  15686. // ref: https://en.wikipedia.org/wiki/Limited-memory_BFGS
  15687. bound = (m <= k[0]) ? m : k[0];
  15688. k[0]++;
  15689. it++;
  15690. end[0] = (end[0] + 1)%m;
  15691. // initialize search direction with -g
  15692. ggml_vec_neg_f32(nx, d, g);
  15693. j[0] = end[0];
  15694. for (int i = 0; i < bound; ++i) {
  15695. j[0] = (j[0] + m - 1) % m;
  15696. // \alpha_{j} = \rho_{j} s^{t}_{j} \cdot q_{k+1}
  15697. ggml_vec_dot_f32(nx, &lm_alpha[j[0]], &lm_s[j[0]*nx], d);
  15698. lm_alpha[j[0]] /= lm_ys[j[0]];
  15699. // q_{i} = q_{i+1} - \alpha_{i} y_{i}
  15700. ggml_vec_mad_f32(nx, d, &lm_y[j[0]*nx], -lm_alpha[j[0]]);
  15701. }
  15702. ggml_vec_scale_f32(nx, d, ys/yy);
  15703. for (int i = 0; i < bound; ++i) {
  15704. // \beta_{j} = \rho_{j} y^t_{j} \cdot \gamma_{i}
  15705. ggml_vec_dot_f32(nx, &beta, &lm_y[j[0]*nx], d);
  15706. beta /= lm_ys[j[0]];
  15707. // \gamma_{i+1} = \gamma_{i} + (\alpha_{j} - \beta_{j}) s_{j}
  15708. ggml_vec_mad_f32(nx, d, &lm_s[j[0]*nx], lm_alpha[j[0]] - beta);
  15709. j[0] = (j[0] + 1)%m;
  15710. }
  15711. step[0] = 1.0;
  15712. }
  15713. return GGML_OPT_DID_NOT_CONVERGE;
  15714. }
  15715. struct ggml_opt_params ggml_opt_default_params(enum ggml_opt_type type) {
  15716. struct ggml_opt_params result;
  15717. switch (type) {
  15718. case GGML_OPT_ADAM:
  15719. {
  15720. result = (struct ggml_opt_params) {
  15721. .type = GGML_OPT_ADAM,
  15722. .n_threads = 1,
  15723. .past = 0,
  15724. .delta = 1e-5f,
  15725. .max_no_improvement = 100,
  15726. .print_forward_graph = true,
  15727. .print_backward_graph = true,
  15728. .adam = {
  15729. .n_iter = 10000,
  15730. .sched = 1.000f,
  15731. .decay = 0.0f,
  15732. .decay_min_ndim = 2,
  15733. .alpha = 0.001f,
  15734. .beta1 = 0.9f,
  15735. .beta2 = 0.999f,
  15736. .eps = 1e-8f,
  15737. .eps_f = 1e-5f,
  15738. .eps_g = 1e-3f,
  15739. .gclip = 0.0f,
  15740. },
  15741. };
  15742. } break;
  15743. case GGML_OPT_LBFGS:
  15744. {
  15745. result = (struct ggml_opt_params) {
  15746. .type = GGML_OPT_LBFGS,
  15747. .n_threads = 1,
  15748. .past = 0,
  15749. .delta = 1e-5f,
  15750. .max_no_improvement = 0,
  15751. .print_forward_graph = true,
  15752. .print_backward_graph = true,
  15753. .lbfgs = {
  15754. .m = 6,
  15755. .n_iter = 100,
  15756. .max_linesearch = 20,
  15757. .eps = 1e-5f,
  15758. .ftol = 1e-4f,
  15759. .wolfe = 0.9f,
  15760. .min_step = 1e-20f,
  15761. .max_step = 1e+20f,
  15762. .linesearch = GGML_LINESEARCH_DEFAULT,
  15763. },
  15764. };
  15765. } break;
  15766. }
  15767. return result;
  15768. }
  15769. GGML_API void ggml_opt_init(
  15770. struct ggml_context * ctx,
  15771. struct ggml_opt_context * opt,
  15772. struct ggml_opt_params params,
  15773. int64_t nx) {
  15774. opt->ctx = ctx;
  15775. opt->params = params;
  15776. opt->iter = 0;
  15777. opt->nx = nx;
  15778. opt->just_initialized = true;
  15779. switch (opt->params.type) {
  15780. case GGML_OPT_ADAM:
  15781. {
  15782. opt->adam.m = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, nx);
  15783. opt->adam.v = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, nx);
  15784. opt->adam.pf = params.past > 0
  15785. ? ggml_new_tensor_1d(ctx, GGML_TYPE_F32, params.past)
  15786. : NULL;
  15787. ggml_set_zero(opt->adam.m);
  15788. ggml_set_zero(opt->adam.v);
  15789. if (opt->adam.pf) {
  15790. ggml_set_zero(opt->adam.pf);
  15791. }
  15792. } break;
  15793. case GGML_OPT_LBFGS:
  15794. {
  15795. opt->lbfgs.x = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, nx);
  15796. opt->lbfgs.xp = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, nx);
  15797. opt->lbfgs.g = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, nx);
  15798. opt->lbfgs.gp = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, nx);
  15799. opt->lbfgs.d = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, nx);
  15800. opt->lbfgs.pf = params.past > 0
  15801. ? ggml_new_tensor_1d(ctx, GGML_TYPE_F32, params.past)
  15802. : NULL;
  15803. opt->lbfgs.lmal = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, params.lbfgs.m);
  15804. opt->lbfgs.lmys = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, params.lbfgs.m);
  15805. opt->lbfgs.lms = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, nx, params.lbfgs.m);
  15806. opt->lbfgs.lmy = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, nx, params.lbfgs.m);
  15807. ggml_set_zero(opt->lbfgs.x);
  15808. ggml_set_zero(opt->lbfgs.xp);
  15809. ggml_set_zero(opt->lbfgs.g);
  15810. ggml_set_zero(opt->lbfgs.gp);
  15811. ggml_set_zero(opt->lbfgs.d);
  15812. if (opt->lbfgs.pf) {
  15813. ggml_set_zero(opt->lbfgs.pf);
  15814. }
  15815. ggml_set_zero(opt->lbfgs.lmal);
  15816. ggml_set_zero(opt->lbfgs.lmys);
  15817. ggml_set_zero(opt->lbfgs.lms);
  15818. ggml_set_zero(opt->lbfgs.lmy);
  15819. } break;
  15820. }
  15821. }
  15822. enum ggml_opt_result ggml_opt(
  15823. struct ggml_context * ctx,
  15824. struct ggml_opt_params params,
  15825. struct ggml_tensor * f) {
  15826. bool free_ctx = false;
  15827. if (ctx == NULL) {
  15828. struct ggml_init_params params_ctx = {
  15829. .mem_size = 16*1024*1024,
  15830. .mem_buffer = NULL,
  15831. .no_alloc = false,
  15832. };
  15833. ctx = ggml_init(params_ctx);
  15834. if (ctx == NULL) {
  15835. return GGML_OPT_NO_CONTEXT;
  15836. }
  15837. free_ctx = true;
  15838. }
  15839. enum ggml_opt_result result = GGML_OPT_OK;
  15840. struct ggml_opt_context * opt = (struct ggml_opt_context *) alloca(sizeof(struct ggml_opt_context));
  15841. ggml_opt_init(ctx, opt, params, 0);
  15842. result = ggml_opt_resume(ctx, opt, f);
  15843. if (free_ctx) {
  15844. ggml_free(ctx);
  15845. }
  15846. return result;
  15847. }
  15848. enum ggml_opt_result ggml_opt_resume(
  15849. struct ggml_context * ctx,
  15850. struct ggml_opt_context * opt,
  15851. struct ggml_tensor * f) {
  15852. // build forward + backward compute graphs
  15853. struct ggml_tensor * gfbuf = ggml_new_tensor_1d(ctx, GGML_TYPE_I32, sizeof(struct ggml_cgraph) / ggml_type_size(GGML_TYPE_I32)+ (sizeof(struct ggml_cgraph) % ggml_type_size(GGML_TYPE_I32) ? 1 : 0));
  15854. struct ggml_tensor * gbbuf = ggml_new_tensor_1d(ctx, GGML_TYPE_I32, sizeof(struct ggml_cgraph) / ggml_type_size(GGML_TYPE_I32)+ (sizeof(struct ggml_cgraph) % ggml_type_size(GGML_TYPE_I32) ? 1 : 0));
  15855. struct ggml_cgraph * gf = (struct ggml_cgraph *) gfbuf->data;
  15856. struct ggml_cgraph * gb = (struct ggml_cgraph *) gbbuf->data;
  15857. *gf = ggml_build_forward (f);
  15858. *gb = ggml_build_backward(ctx, gf, true);
  15859. return ggml_opt_resume_g(ctx, opt, f, gf, gb, NULL, NULL);
  15860. }
  15861. enum ggml_opt_result ggml_opt_resume_g(
  15862. struct ggml_context * ctx,
  15863. struct ggml_opt_context * opt,
  15864. struct ggml_tensor * f,
  15865. struct ggml_cgraph * gf,
  15866. struct ggml_cgraph * gb,
  15867. ggml_opt_callback callback,
  15868. void * callback_data) {
  15869. // build forward + backward compute graphs
  15870. enum ggml_opt_result result = GGML_OPT_OK;
  15871. switch (opt->params.type) {
  15872. case GGML_OPT_ADAM:
  15873. {
  15874. result = ggml_opt_adam(ctx, opt, opt->params, f, gf, gb, callback, callback_data);
  15875. } break;
  15876. case GGML_OPT_LBFGS:
  15877. {
  15878. result = ggml_opt_lbfgs(ctx, opt, opt->params, f, gf, gb, callback, callback_data);
  15879. } break;
  15880. }
  15881. if (opt->params.print_forward_graph) {
  15882. ggml_graph_print (gf);
  15883. ggml_graph_dump_dot(gf, NULL, "opt-forward.dot");
  15884. }
  15885. if (opt->params.print_backward_graph) {
  15886. ggml_graph_print (gb);
  15887. ggml_graph_dump_dot(gb, gf, "opt-backward.dot");
  15888. }
  15889. return result;
  15890. }
  15891. ////////////////////////////////////////////////////////////////////////////////
  15892. size_t ggml_quantize_q4_0(const float * src, void * dst, int n, int k, int64_t * hist) {
  15893. assert(k % QK4_0 == 0);
  15894. const int nb = k / QK4_0;
  15895. for (int b = 0; b < n; b += k) {
  15896. block_q4_0 * restrict y = (block_q4_0 *) dst + b/QK4_0;
  15897. quantize_row_q4_0_reference(src + b, y, k);
  15898. for (int i = 0; i < nb; i++) {
  15899. for (int j = 0; j < QK4_0; j += 2) {
  15900. const uint8_t vi0 = y[i].qs[j/2] & 0x0F;
  15901. const uint8_t vi1 = y[i].qs[j/2] >> 4;
  15902. hist[vi0]++;
  15903. hist[vi1]++;
  15904. }
  15905. }
  15906. }
  15907. return (n/QK4_0*sizeof(block_q4_0));
  15908. }
  15909. size_t ggml_quantize_q4_1(const float * src, void * dst, int n, int k, int64_t * hist) {
  15910. assert(k % QK4_1 == 0);
  15911. const int nb = k / QK4_1;
  15912. for (int b = 0; b < n; b += k) {
  15913. block_q4_1 * restrict y = (block_q4_1 *) dst + b/QK4_1;
  15914. quantize_row_q4_1_reference(src + b, y, k);
  15915. for (int i = 0; i < nb; i++) {
  15916. for (int j = 0; j < QK4_1; j += 2) {
  15917. const uint8_t vi0 = y[i].qs[j/2] & 0x0F;
  15918. const uint8_t vi1 = y[i].qs[j/2] >> 4;
  15919. hist[vi0]++;
  15920. hist[vi1]++;
  15921. }
  15922. }
  15923. }
  15924. return (n/QK4_1*sizeof(block_q4_1));
  15925. }
  15926. size_t ggml_quantize_q5_0(const float * src, void * dst, int n, int k, int64_t * hist) {
  15927. assert(k % QK5_0 == 0);
  15928. const int nb = k / QK5_0;
  15929. for (int b = 0; b < n; b += k) {
  15930. block_q5_0 * restrict y = (block_q5_0 *)dst + b/QK5_0;
  15931. quantize_row_q5_0_reference(src + b, y, k);
  15932. for (int i = 0; i < nb; i++) {
  15933. uint32_t qh;
  15934. memcpy(&qh, &y[i].qh, sizeof(qh));
  15935. for (int j = 0; j < QK5_0; j += 2) {
  15936. const uint8_t vh0 = ((qh & (1u << (j + 0 ))) >> (j + 0 )) << 4;
  15937. const uint8_t vh1 = ((qh & (1u << (j + 16))) >> (j + 12));
  15938. // cast to 16 bins
  15939. const uint8_t vi0 = ((y[i].qs[j/2] & 0x0F) | vh0) / 2;
  15940. const uint8_t vi1 = ((y[i].qs[j/2] >> 4) | vh1) / 2;
  15941. hist[vi0]++;
  15942. hist[vi1]++;
  15943. }
  15944. }
  15945. }
  15946. return (n/QK5_0*sizeof(block_q5_0));
  15947. }
  15948. size_t ggml_quantize_q5_1(const float * src, void * dst, int n, int k, int64_t * hist) {
  15949. assert(k % QK5_1 == 0);
  15950. const int nb = k / QK5_1;
  15951. for (int b = 0; b < n; b += k) {
  15952. block_q5_1 * restrict y = (block_q5_1 *)dst + b/QK5_1;
  15953. quantize_row_q5_1_reference(src + b, y, k);
  15954. for (int i = 0; i < nb; i++) {
  15955. uint32_t qh;
  15956. memcpy(&qh, &y[i].qh, sizeof(qh));
  15957. for (int j = 0; j < QK5_1; j += 2) {
  15958. const uint8_t vh0 = ((qh & (1u << (j + 0 ))) >> (j + 0 )) << 4;
  15959. const uint8_t vh1 = ((qh & (1u << (j + 16))) >> (j + 12));
  15960. // cast to 16 bins
  15961. const uint8_t vi0 = ((y[i].qs[j/2] & 0x0F) | vh0) / 2;
  15962. const uint8_t vi1 = ((y[i].qs[j/2] >> 4) | vh1) / 2;
  15963. hist[vi0]++;
  15964. hist[vi1]++;
  15965. }
  15966. }
  15967. }
  15968. return (n/QK5_1*sizeof(block_q5_1));
  15969. }
  15970. size_t ggml_quantize_q8_0(const float * src, void * dst, int n, int k, int64_t * hist) {
  15971. assert(k % QK8_0 == 0);
  15972. const int nb = k / QK8_0;
  15973. for (int b = 0; b < n; b += k) {
  15974. block_q8_0 * restrict y = (block_q8_0 *)dst + b/QK8_0;
  15975. quantize_row_q8_0_reference(src + b, y, k);
  15976. for (int i = 0; i < nb; i++) {
  15977. for (int j = 0; j < QK8_0; ++j) {
  15978. const int8_t vi = y[i].qs[j];
  15979. hist[vi/16 + 8]++;
  15980. }
  15981. }
  15982. }
  15983. return (n/QK8_0*sizeof(block_q8_0));
  15984. }
  15985. size_t ggml_quantize_chunk(enum ggml_type type, const float * src, void * dst, int start, int n, int64_t * hist) {
  15986. size_t result = 0;
  15987. switch (type) {
  15988. case GGML_TYPE_Q4_0:
  15989. {
  15990. GGML_ASSERT(start % QK4_0 == 0);
  15991. block_q4_0 * block = (block_q4_0*)dst + start / QK4_0;
  15992. result = ggml_quantize_q4_0(src + start, block, n, n, hist);
  15993. } break;
  15994. case GGML_TYPE_Q4_1:
  15995. {
  15996. GGML_ASSERT(start % QK4_1 == 0);
  15997. block_q4_1 * block = (block_q4_1*)dst + start / QK4_1;
  15998. result = ggml_quantize_q4_1(src + start, block, n, n, hist);
  15999. } break;
  16000. case GGML_TYPE_Q5_0:
  16001. {
  16002. GGML_ASSERT(start % QK5_0 == 0);
  16003. block_q5_0 * block = (block_q5_0*)dst + start / QK5_0;
  16004. result = ggml_quantize_q5_0(src + start, block, n, n, hist);
  16005. } break;
  16006. case GGML_TYPE_Q5_1:
  16007. {
  16008. GGML_ASSERT(start % QK5_1 == 0);
  16009. block_q5_1 * block = (block_q5_1*)dst + start / QK5_1;
  16010. result = ggml_quantize_q5_1(src + start, block, n, n, hist);
  16011. } break;
  16012. case GGML_TYPE_Q8_0:
  16013. {
  16014. GGML_ASSERT(start % QK8_0 == 0);
  16015. block_q8_0 * block = (block_q8_0*)dst + start / QK8_0;
  16016. result = ggml_quantize_q8_0(src + start, block, n, n, hist);
  16017. } break;
  16018. #ifdef GGML_USE_K_QUANTS
  16019. case GGML_TYPE_Q2_K:
  16020. {
  16021. GGML_ASSERT(start % QK_K == 0);
  16022. block_q2_K * block = (block_q2_K*)dst + start / QK_K;
  16023. result = ggml_quantize_q2_K(src + start, block, n, n, hist);
  16024. } break;
  16025. case GGML_TYPE_Q3_K:
  16026. {
  16027. GGML_ASSERT(start % QK_K == 0);
  16028. block_q3_K * block = (block_q3_K*)dst + start / QK_K;
  16029. result = ggml_quantize_q3_K(src + start, block, n, n, hist);
  16030. } break;
  16031. case GGML_TYPE_Q4_K:
  16032. {
  16033. GGML_ASSERT(start % QK_K == 0);
  16034. block_q4_K * block = (block_q4_K*)dst + start / QK_K;
  16035. result = ggml_quantize_q4_K(src + start, block, n, n, hist);
  16036. } break;
  16037. case GGML_TYPE_Q5_K:
  16038. {
  16039. GGML_ASSERT(start % QK_K == 0);
  16040. block_q5_K * block = (block_q5_K*)dst + start / QK_K;
  16041. result = ggml_quantize_q5_K(src + start, block, n, n, hist);
  16042. } break;
  16043. case GGML_TYPE_Q6_K:
  16044. {
  16045. GGML_ASSERT(start % QK_K == 0);
  16046. block_q6_K * block = (block_q6_K*)dst + start / QK_K;
  16047. result = ggml_quantize_q6_K(src + start, block, n, n, hist);
  16048. } break;
  16049. #endif
  16050. case GGML_TYPE_F16:
  16051. {
  16052. int elemsize = sizeof(ggml_fp16_t);
  16053. ggml_fp32_to_fp16_row(src + start, (ggml_fp16_t *)dst + start, n);
  16054. result = n * elemsize;
  16055. } break;
  16056. case GGML_TYPE_F32:
  16057. {
  16058. int elemsize = sizeof(float);
  16059. result = n * elemsize;
  16060. memcpy((uint8_t *)dst + start * elemsize, src + start, result);
  16061. } break;
  16062. default:
  16063. assert(false);
  16064. }
  16065. return result;
  16066. }
  16067. ////////////////////////////////////////////////////////////////////////////////
  16068. struct gguf_str {
  16069. uint64_t n; // GGUFv2
  16070. char * data;
  16071. };
  16072. static const size_t GGUF_TYPE_SIZE[GGUF_TYPE_COUNT] = {
  16073. [GGUF_TYPE_UINT8] = sizeof(uint8_t),
  16074. [GGUF_TYPE_INT8] = sizeof(int8_t),
  16075. [GGUF_TYPE_UINT16] = sizeof(uint16_t),
  16076. [GGUF_TYPE_INT16] = sizeof(int16_t),
  16077. [GGUF_TYPE_UINT32] = sizeof(uint32_t),
  16078. [GGUF_TYPE_INT32] = sizeof(int32_t),
  16079. [GGUF_TYPE_FLOAT32] = sizeof(float),
  16080. [GGUF_TYPE_BOOL] = sizeof(bool),
  16081. [GGUF_TYPE_STRING] = sizeof(struct gguf_str),
  16082. [GGUF_TYPE_UINT64] = sizeof(uint64_t),
  16083. [GGUF_TYPE_INT64] = sizeof(int64_t),
  16084. [GGUF_TYPE_FLOAT64] = sizeof(double),
  16085. [GGUF_TYPE_ARRAY] = 0, // undefined
  16086. };
  16087. static_assert(GGUF_TYPE_COUNT == 13, "GGUF_TYPE_COUNT != 13");
  16088. static const char * GGUF_TYPE_NAME[GGUF_TYPE_COUNT] = {
  16089. [GGUF_TYPE_UINT8] = "u8",
  16090. [GGUF_TYPE_INT8] = "i8",
  16091. [GGUF_TYPE_UINT16] = "u16",
  16092. [GGUF_TYPE_INT16] = "i16",
  16093. [GGUF_TYPE_UINT32] = "u32",
  16094. [GGUF_TYPE_INT32] = "i32",
  16095. [GGUF_TYPE_FLOAT32] = "f32",
  16096. [GGUF_TYPE_BOOL] = "bool",
  16097. [GGUF_TYPE_STRING] = "str",
  16098. [GGUF_TYPE_ARRAY] = "arr",
  16099. [GGUF_TYPE_UINT64] = "u64",
  16100. [GGUF_TYPE_INT64] = "i64",
  16101. [GGUF_TYPE_FLOAT64] = "f64",
  16102. };
  16103. static_assert(GGUF_TYPE_COUNT == 13, "GGUF_TYPE_COUNT != 13");
  16104. union gguf_value {
  16105. uint8_t uint8;
  16106. int8_t int8;
  16107. uint16_t uint16;
  16108. int16_t int16;
  16109. uint32_t uint32;
  16110. int32_t int32;
  16111. float float32;
  16112. uint64_t uint64;
  16113. int64_t int64;
  16114. double float64;
  16115. bool bool_;
  16116. struct gguf_str str;
  16117. struct {
  16118. enum gguf_type type;
  16119. uint64_t n; // GGUFv2
  16120. void * data;
  16121. } arr;
  16122. };
  16123. struct gguf_kv {
  16124. struct gguf_str key;
  16125. enum gguf_type type;
  16126. union gguf_value value;
  16127. };
  16128. struct gguf_header {
  16129. uint32_t magic;
  16130. uint32_t version;
  16131. uint64_t n_tensors; // GGUFv2
  16132. uint64_t n_kv; // GGUFv2
  16133. };
  16134. struct gguf_tensor_info {
  16135. struct gguf_str name;
  16136. uint32_t n_dims;
  16137. uint64_t ne[GGML_MAX_DIMS];
  16138. enum ggml_type type;
  16139. uint64_t offset; // offset from start of `data`, must be a multiple of `ALIGNMENT`
  16140. // for writing API
  16141. const void * data;
  16142. size_t size;
  16143. };
  16144. struct gguf_context {
  16145. struct gguf_header header;
  16146. struct gguf_kv * kv;
  16147. struct gguf_tensor_info * infos;
  16148. size_t alignment;
  16149. size_t offset; // offset of `data` from beginning of file
  16150. size_t size; // size of `data` in bytes
  16151. //uint8_t * padding;
  16152. void * data;
  16153. };
  16154. static bool gguf_fread_el(FILE * file, void * dst, size_t size, size_t * offset) {
  16155. const size_t n = fread(dst, 1, size, file);
  16156. *offset += n;
  16157. return n == size;
  16158. }
  16159. // NOTE: temporary handling of GGUFv1 >> remove after Oct 2023
  16160. static bool gguf_fread_str_cur(FILE * file, struct gguf_str * p, size_t * offset) {
  16161. p->n = 0;
  16162. p->data = NULL;
  16163. bool ok = true;
  16164. ok = ok && gguf_fread_el(file, &p->n, sizeof(p->n), offset); p->data = calloc(p->n + 1, 1);
  16165. ok = ok && gguf_fread_el(file, p->data, p->n, offset);
  16166. return ok;
  16167. }
  16168. static bool gguf_fread_str_v1(FILE * file, struct gguf_str * p, size_t * offset) {
  16169. p->n = 0;
  16170. p->data = NULL;
  16171. bool ok = true;
  16172. uint32_t n = 0;
  16173. ok = ok && gguf_fread_el(file, &n, sizeof(n), offset); p->data = calloc(n + 1, 1); p->n = n;
  16174. ok = ok && gguf_fread_el(file, p->data, p->n, offset);
  16175. return ok;
  16176. }
  16177. struct gguf_context * gguf_init_empty(void) {
  16178. struct gguf_context * ctx = GGML_ALIGNED_MALLOC(sizeof(struct gguf_context));
  16179. ctx->header.magic = GGUF_MAGIC;
  16180. ctx->header.version = GGUF_VERSION;
  16181. ctx->header.n_tensors = 0;
  16182. ctx->header.n_kv = 0;
  16183. ctx->kv = NULL;
  16184. ctx->infos = NULL;
  16185. ctx->alignment = GGUF_DEFAULT_ALIGNMENT;
  16186. ctx->offset = 0;
  16187. ctx->size = 0;
  16188. ctx->data = NULL;
  16189. return ctx;
  16190. }
  16191. struct gguf_context * gguf_init_from_file(const char * fname, struct gguf_init_params params) {
  16192. FILE * file = fopen(fname, "rb");
  16193. if (!file) {
  16194. return NULL;
  16195. }
  16196. // offset from start of file
  16197. size_t offset = 0;
  16198. uint32_t magic = 0;
  16199. // check the magic before making allocations
  16200. {
  16201. gguf_fread_el(file, &magic, sizeof(magic), &offset);
  16202. if (magic != GGUF_MAGIC) {
  16203. fprintf(stderr, "%s: invalid magic number %08x\n", __func__, magic);
  16204. fclose(file);
  16205. return NULL;
  16206. }
  16207. }
  16208. bool ok = true;
  16209. struct gguf_context * ctx = GGML_ALIGNED_MALLOC(sizeof(struct gguf_context));
  16210. // read the header
  16211. {
  16212. ctx->header.magic = magic;
  16213. ctx->kv = NULL;
  16214. ctx->infos = NULL;
  16215. ctx->data = NULL;
  16216. ok = ok && gguf_fread_el(file, &ctx->header.version, sizeof(ctx->header.version), &offset);
  16217. if (ctx->header.version == 1) {
  16218. // NOTE: temporary handling of GGUFv1 >> remove after Oct 2023
  16219. uint32_t n_tensors = 0;
  16220. uint32_t n_kv = 0;
  16221. ok = ok && gguf_fread_el(file, &n_tensors, sizeof(n_tensors), &offset);
  16222. ok = ok && gguf_fread_el(file, &n_kv, sizeof(n_kv), &offset);
  16223. ctx->header.n_tensors = n_tensors;
  16224. ctx->header.n_kv = n_kv;
  16225. } else {
  16226. ok = ok && gguf_fread_el(file, &ctx->header.n_tensors, sizeof(ctx->header.n_tensors), &offset);
  16227. ok = ok && gguf_fread_el(file, &ctx->header.n_kv, sizeof(ctx->header.n_kv), &offset);
  16228. }
  16229. if (!ok) {
  16230. fprintf(stderr, "%s: failed to read header\n", __func__);
  16231. fclose(file);
  16232. gguf_free(ctx);
  16233. return NULL;
  16234. }
  16235. }
  16236. // NOTE: temporary handling of GGUFv1 >> remove after Oct 2023
  16237. bool (* gguf_fread_str)(FILE *, struct gguf_str *, size_t *) = gguf_fread_str_cur;
  16238. if (ctx->header.version == 1) {
  16239. gguf_fread_str = gguf_fread_str_v1;
  16240. }
  16241. // read the kv pairs
  16242. {
  16243. ctx->kv = malloc(ctx->header.n_kv * sizeof(struct gguf_kv));
  16244. for (uint32_t i = 0; i < ctx->header.n_kv; ++i) {
  16245. struct gguf_kv * kv = &ctx->kv[i];
  16246. //fprintf(stderr, "%s: reading kv %d\n", __func__, i);
  16247. ok = ok && gguf_fread_str(file, &kv->key, &offset);
  16248. ok = ok && gguf_fread_el (file, &kv->type, sizeof(kv->type), &offset);
  16249. //fprintf(stderr, "%s: reading kv with key %s\n", __func__, kv->key.data);
  16250. switch (kv->type) {
  16251. case GGUF_TYPE_UINT8: ok = ok && gguf_fread_el (file, &kv->value.uint8, sizeof(kv->value.uint8), &offset); break;
  16252. case GGUF_TYPE_INT8: ok = ok && gguf_fread_el (file, &kv->value.int8, sizeof(kv->value.int8), &offset); break;
  16253. case GGUF_TYPE_UINT16: ok = ok && gguf_fread_el (file, &kv->value.uint16, sizeof(kv->value.uint16), &offset); break;
  16254. case GGUF_TYPE_INT16: ok = ok && gguf_fread_el (file, &kv->value.int16, sizeof(kv->value.int16), &offset); break;
  16255. case GGUF_TYPE_UINT32: ok = ok && gguf_fread_el (file, &kv->value.uint32, sizeof(kv->value.uint32), &offset); break;
  16256. case GGUF_TYPE_INT32: ok = ok && gguf_fread_el (file, &kv->value.int32, sizeof(kv->value.int32), &offset); break;
  16257. case GGUF_TYPE_FLOAT32: ok = ok && gguf_fread_el (file, &kv->value.float32, sizeof(kv->value.float32), &offset); break;
  16258. case GGUF_TYPE_UINT64: ok = ok && gguf_fread_el (file, &kv->value.uint64, sizeof(kv->value.uint64), &offset); break;
  16259. case GGUF_TYPE_INT64: ok = ok && gguf_fread_el (file, &kv->value.int64, sizeof(kv->value.int64), &offset); break;
  16260. case GGUF_TYPE_FLOAT64: ok = ok && gguf_fread_el (file, &kv->value.float64, sizeof(kv->value.float64), &offset); break;
  16261. case GGUF_TYPE_BOOL: ok = ok && gguf_fread_el (file, &kv->value.bool_, sizeof(kv->value.bool_), &offset); break;
  16262. case GGUF_TYPE_STRING: ok = ok && gguf_fread_str(file, &kv->value.str, &offset); break;
  16263. case GGUF_TYPE_ARRAY:
  16264. {
  16265. ok = ok && gguf_fread_el(file, &kv->value.arr.type, sizeof(kv->value.arr.type), &offset);
  16266. if (ctx->header.version == 1) {
  16267. // NOTE: temporary handling of GGUFv1 >> remove after Oct 2023
  16268. uint32_t n = 0;
  16269. ok = ok && gguf_fread_el(file, &n, sizeof(n), &offset);
  16270. kv->value.arr.n = n;
  16271. } else {
  16272. ok = ok && gguf_fread_el(file, &kv->value.arr.n, sizeof(kv->value.arr.n), &offset);
  16273. }
  16274. switch (kv->value.arr.type) {
  16275. case GGUF_TYPE_UINT8:
  16276. case GGUF_TYPE_INT8:
  16277. case GGUF_TYPE_UINT16:
  16278. case GGUF_TYPE_INT16:
  16279. case GGUF_TYPE_UINT32:
  16280. case GGUF_TYPE_INT32:
  16281. case GGUF_TYPE_FLOAT32:
  16282. case GGUF_TYPE_UINT64:
  16283. case GGUF_TYPE_INT64:
  16284. case GGUF_TYPE_FLOAT64:
  16285. case GGUF_TYPE_BOOL:
  16286. {
  16287. kv->value.arr.data = malloc(kv->value.arr.n * GGUF_TYPE_SIZE[kv->value.arr.type]);
  16288. ok = ok && gguf_fread_el(file, kv->value.arr.data, kv->value.arr.n * GGUF_TYPE_SIZE[kv->value.arr.type], &offset);
  16289. } break;
  16290. case GGUF_TYPE_STRING:
  16291. {
  16292. kv->value.arr.data = malloc(kv->value.arr.n * sizeof(struct gguf_str));
  16293. for (uint32_t j = 0; j < kv->value.arr.n; ++j) {
  16294. ok = ok && gguf_fread_str(file, &((struct gguf_str *) kv->value.arr.data)[j], &offset);
  16295. }
  16296. } break;
  16297. case GGUF_TYPE_ARRAY:
  16298. case GGUF_TYPE_COUNT: GGML_ASSERT(false && "invalid type"); break;
  16299. };
  16300. } break;
  16301. case GGUF_TYPE_COUNT: GGML_ASSERT(false && "invalid type");
  16302. };
  16303. if (!ok) {
  16304. break;
  16305. }
  16306. }
  16307. if (!ok) {
  16308. fprintf(stderr, "%s: failed to read key-value pairs\n", __func__);
  16309. fclose(file);
  16310. gguf_free(ctx);
  16311. return NULL;
  16312. }
  16313. }
  16314. // read the tensor infos
  16315. {
  16316. ctx->infos = malloc(ctx->header.n_tensors * sizeof(struct gguf_tensor_info));
  16317. for (uint32_t i = 0; i < ctx->header.n_tensors; ++i) {
  16318. struct gguf_tensor_info * info = &ctx->infos[i];
  16319. for (int j = 0; j < GGML_MAX_DIMS; ++j) {
  16320. info->ne[j] = 1;
  16321. }
  16322. ok = ok && gguf_fread_str(file, &info->name, &offset);
  16323. ok = ok && gguf_fread_el (file, &info->n_dims, sizeof(info->n_dims), &offset);
  16324. for (uint32_t j = 0; j < info->n_dims; ++j) {
  16325. if (ctx->header.version == 1) {
  16326. // NOTE: temporary handling of GGUFv1 >> remove after Oct 2023
  16327. uint32_t t = 0;
  16328. ok = ok && gguf_fread_el(file, &t, sizeof(t), &offset);
  16329. info->ne[j] = t;
  16330. } else {
  16331. ok = ok && gguf_fread_el(file, &info->ne[j], sizeof(info->ne[j]), &offset);
  16332. }
  16333. }
  16334. ok = ok && gguf_fread_el (file, &info->type, sizeof(info->type), &offset);
  16335. ok = ok && gguf_fread_el (file, &info->offset, sizeof(info->offset), &offset);
  16336. if (!ok) {
  16337. fprintf(stderr, "%s: failed to read tensor info\n", __func__);
  16338. fclose(file);
  16339. gguf_free(ctx);
  16340. return NULL;
  16341. }
  16342. }
  16343. }
  16344. ctx->alignment = GGUF_DEFAULT_ALIGNMENT;
  16345. int alignment_idx = gguf_find_key(ctx, "general.alignment");
  16346. if (alignment_idx != -1) {
  16347. ctx->alignment = gguf_get_val_u32(ctx, alignment_idx);
  16348. }
  16349. // we require the data section to be aligned, so take into account any padding
  16350. {
  16351. const size_t offset_pad = offset % ctx->alignment;
  16352. if (offset_pad != 0) {
  16353. offset += ctx->alignment - offset_pad;
  16354. fseek(file, offset, SEEK_SET);
  16355. }
  16356. }
  16357. // store the current file offset - this is where the data section starts
  16358. ctx->offset = offset;
  16359. // compute the total size of the data section, taking into account the alignment
  16360. {
  16361. ctx->size = 0;
  16362. for (uint32_t i = 0; i < ctx->header.n_tensors; ++i) {
  16363. struct gguf_tensor_info * info = &ctx->infos[i];
  16364. const int64_t ne =
  16365. (int64_t) info->ne[0] *
  16366. (int64_t) info->ne[1] *
  16367. (int64_t) info->ne[2] *
  16368. (int64_t) info->ne[3];
  16369. if (ne % ggml_blck_size(info->type) != 0) {
  16370. fprintf(stderr, "%s: tensor '%s' number of elements (%" PRId64 ") is not a multiple of block size (%d)\n",
  16371. __func__, info->name.data, ne, ggml_blck_size(info->type));
  16372. fclose(file);
  16373. gguf_free(ctx);
  16374. return NULL;
  16375. }
  16376. const size_t size_cur = (ne*ggml_type_size(info->type))/ggml_blck_size(info->type);
  16377. ctx->size += GGML_PAD(size_cur, ctx->alignment);
  16378. }
  16379. }
  16380. // load the tensor data only if requested
  16381. if (params.ctx != NULL) {
  16382. // if the provided gguf_context is no_alloc, then we create "empty" tensors and do not read the binary blob
  16383. // otherwise, we load the binary blob into the created ggml_context as well, and point the "data" members of
  16384. // the ggml_tensor structs to the appropriate locations in the binary blob
  16385. // compute the exact size needed for the new ggml_context
  16386. const size_t mem_size =
  16387. params.no_alloc ?
  16388. (ctx->header.n_tensors )*ggml_tensor_overhead() :
  16389. (ctx->header.n_tensors + 1)*ggml_tensor_overhead() + ctx->size;
  16390. struct ggml_init_params pdata = {
  16391. .mem_size = mem_size,
  16392. .mem_buffer = NULL,
  16393. .no_alloc = params.no_alloc,
  16394. };
  16395. *params.ctx = ggml_init(pdata);
  16396. struct ggml_context * ctx_data = *params.ctx;
  16397. struct ggml_tensor * data = NULL;
  16398. if (!params.no_alloc) {
  16399. data = ggml_new_tensor_1d(ctx_data, GGML_TYPE_I8, ctx->size);
  16400. ok = ok && data != NULL;
  16401. // read the binary blob with the tensor data
  16402. ok = ok && gguf_fread_el(file, data->data, ctx->size, &offset);
  16403. if (!ok) {
  16404. fprintf(stderr, "%s: failed to read tensor data\n", __func__);
  16405. fclose(file);
  16406. ggml_free(ctx_data);
  16407. gguf_free(ctx);
  16408. return NULL;
  16409. }
  16410. ctx->data = data->data;
  16411. }
  16412. ggml_set_no_alloc(ctx_data, true);
  16413. // create the tensors
  16414. for (uint32_t i = 0; i < ctx->header.n_tensors; ++i) {
  16415. const int64_t ne[GGML_MAX_DIMS] = {
  16416. ctx->infos[i].ne[0],
  16417. ctx->infos[i].ne[1],
  16418. ctx->infos[i].ne[2],
  16419. ctx->infos[i].ne[3],
  16420. };
  16421. struct ggml_tensor * cur = ggml_new_tensor(ctx_data, ctx->infos[i].type, ctx->infos[i].n_dims, ne);
  16422. ok = ok && cur != NULL;
  16423. ggml_set_name(cur, ctx->infos[i].name.data);
  16424. if (!ok) {
  16425. break;
  16426. }
  16427. // point the data member to the appropriate location in the binary blob using the tensor infos
  16428. if (!params.no_alloc) {
  16429. //cur->data = (char *) data->data + ctx->infos[i].offset - ctx->offset; // offset from start of file
  16430. cur->data = (char *) data->data + ctx->infos[i].offset; // offset from data
  16431. }
  16432. }
  16433. if (!ok) {
  16434. fprintf(stderr, "%s: failed to read the tensor data\n", __func__);
  16435. fclose(file);
  16436. ggml_free(ctx_data);
  16437. gguf_free(ctx);
  16438. return NULL;
  16439. }
  16440. ggml_set_no_alloc(ctx_data, params.no_alloc);
  16441. }
  16442. fclose(file);
  16443. return ctx;
  16444. }
  16445. void gguf_free(struct gguf_context * ctx) {
  16446. if (ctx == NULL) {
  16447. return;
  16448. }
  16449. if (ctx->kv) {
  16450. // free string memory - not great..
  16451. for (uint32_t i = 0; i < ctx->header.n_kv; ++i) {
  16452. struct gguf_kv * kv = &ctx->kv[i];
  16453. if (kv->key.data) {
  16454. free(kv->key.data);
  16455. }
  16456. if (kv->type == GGUF_TYPE_STRING) {
  16457. if (kv->value.str.data) {
  16458. free(kv->value.str.data);
  16459. }
  16460. }
  16461. if (kv->type == GGUF_TYPE_ARRAY) {
  16462. if (kv->value.arr.data) {
  16463. if (kv->value.arr.type == GGUF_TYPE_STRING) {
  16464. for (uint32_t j = 0; j < kv->value.arr.n; ++j) {
  16465. struct gguf_str * str = &((struct gguf_str *) kv->value.arr.data)[j];
  16466. if (str->data) {
  16467. free(str->data);
  16468. }
  16469. }
  16470. }
  16471. free(kv->value.arr.data);
  16472. }
  16473. }
  16474. }
  16475. free(ctx->kv);
  16476. }
  16477. if (ctx->infos) {
  16478. for (uint32_t i = 0; i < ctx->header.n_tensors; ++i) {
  16479. struct gguf_tensor_info * info = &ctx->infos[i];
  16480. if (info->name.data) {
  16481. free(info->name.data);
  16482. }
  16483. }
  16484. free(ctx->infos);
  16485. }
  16486. GGML_ALIGNED_FREE(ctx);
  16487. }
  16488. const char * gguf_type_name(enum gguf_type type) {
  16489. return GGUF_TYPE_NAME[type];
  16490. }
  16491. int gguf_get_version(struct gguf_context * ctx) {
  16492. return ctx->header.version;
  16493. }
  16494. size_t gguf_get_alignment(struct gguf_context * ctx) {
  16495. return ctx->alignment;
  16496. }
  16497. size_t gguf_get_data_offset(struct gguf_context * ctx) {
  16498. return ctx->offset;
  16499. }
  16500. void * gguf_get_data(struct gguf_context * ctx) {
  16501. return ctx->data;
  16502. }
  16503. int gguf_get_n_kv(struct gguf_context * ctx) {
  16504. return ctx->header.n_kv;
  16505. }
  16506. int gguf_find_key(struct gguf_context * ctx, const char * key) {
  16507. // return -1 if key not found
  16508. int keyfound = -1;
  16509. const int n_kv = gguf_get_n_kv(ctx);
  16510. for (int i = 0; i < n_kv; ++i) {
  16511. if (strcmp(key, gguf_get_key(ctx, i)) == 0) {
  16512. keyfound = i;
  16513. break;
  16514. }
  16515. }
  16516. return keyfound;
  16517. }
  16518. const char * gguf_get_key(struct gguf_context * ctx, int i) {
  16519. return ctx->kv[i].key.data;
  16520. }
  16521. enum gguf_type gguf_get_kv_type(struct gguf_context * ctx, int i) {
  16522. return ctx->kv[i].type;
  16523. }
  16524. enum gguf_type gguf_get_arr_type(struct gguf_context * ctx, int i) {
  16525. return ctx->kv[i].value.arr.type;
  16526. }
  16527. const void * gguf_get_arr_data(struct gguf_context * ctx, int i) {
  16528. return ctx->kv[i].value.arr.data;
  16529. }
  16530. const char * gguf_get_arr_str(struct gguf_context * ctx, int key_id, int i) {
  16531. struct gguf_kv * kv = &ctx->kv[key_id];
  16532. struct gguf_str * str = &((struct gguf_str *) kv->value.arr.data)[i];
  16533. return str->data;
  16534. }
  16535. int gguf_get_arr_n(struct gguf_context * ctx, int i) {
  16536. return ctx->kv[i].value.arr.n;
  16537. }
  16538. uint8_t gguf_get_val_u8(struct gguf_context * ctx, int i) {
  16539. return ctx->kv[i].value.uint8;
  16540. }
  16541. int8_t gguf_get_val_i8(struct gguf_context * ctx, int i) {
  16542. return ctx->kv[i].value.int8;
  16543. }
  16544. uint16_t gguf_get_val_u16(struct gguf_context * ctx, int i) {
  16545. return ctx->kv[i].value.uint16;
  16546. }
  16547. int16_t gguf_get_val_i16(struct gguf_context * ctx, int i) {
  16548. return ctx->kv[i].value.int16;
  16549. }
  16550. uint32_t gguf_get_val_u32(struct gguf_context * ctx, int i) {
  16551. return ctx->kv[i].value.uint32;
  16552. }
  16553. int32_t gguf_get_val_i32(struct gguf_context * ctx, int i) {
  16554. return ctx->kv[i].value.int32;
  16555. }
  16556. float gguf_get_val_f32(struct gguf_context * ctx, int i) {
  16557. return ctx->kv[i].value.float32;
  16558. }
  16559. uint64_t gguf_get_val_u64(struct gguf_context * ctx, int i) {
  16560. return ctx->kv[i].value.uint64;
  16561. }
  16562. int64_t gguf_get_val_i64(struct gguf_context * ctx, int i) {
  16563. return ctx->kv[i].value.int64;
  16564. }
  16565. double gguf_get_val_f64(struct gguf_context * ctx, int i) {
  16566. return ctx->kv[i].value.float64;
  16567. }
  16568. bool gguf_get_val_bool(struct gguf_context * ctx, int i) {
  16569. return ctx->kv[i].value.bool_;
  16570. }
  16571. const char * gguf_get_val_str (struct gguf_context * ctx, int i) {
  16572. return ctx->kv[i].value.str.data;
  16573. }
  16574. int gguf_get_n_tensors(struct gguf_context * ctx) {
  16575. return ctx->header.n_tensors;
  16576. }
  16577. int gguf_find_tensor(struct gguf_context * ctx, const char * name) {
  16578. // return -1 if tensor not found
  16579. int tensorfound = -1;
  16580. const int n_tensors = gguf_get_n_tensors(ctx);
  16581. for (int i = 0; i < n_tensors; ++i) {
  16582. if (strcmp(name, gguf_get_tensor_name(ctx, i)) == 0) {
  16583. tensorfound = i;
  16584. break;
  16585. }
  16586. }
  16587. return tensorfound;
  16588. }
  16589. size_t gguf_get_tensor_offset(struct gguf_context * ctx, int i) {
  16590. return ctx->infos[i].offset;
  16591. }
  16592. char * gguf_get_tensor_name(struct gguf_context * ctx, int i) {
  16593. return ctx->infos[i].name.data;
  16594. }
  16595. // returns the index
  16596. static int gguf_get_or_add_key(struct gguf_context * ctx, const char * key) {
  16597. const int idx = gguf_find_key(ctx, key);
  16598. if (idx >= 0) {
  16599. return idx;
  16600. }
  16601. const int n_kv = gguf_get_n_kv(ctx);
  16602. ctx->kv = realloc(ctx->kv, (n_kv + 1) * sizeof(struct gguf_kv));
  16603. ctx->kv[n_kv].key.n = strlen(key);
  16604. ctx->kv[n_kv].key.data = strdup(key);
  16605. ctx->header.n_kv++;
  16606. return n_kv;
  16607. }
  16608. void gguf_set_val_u8(struct gguf_context * ctx, const char * key, uint8_t val) {
  16609. const int idx = gguf_get_or_add_key(ctx, key);
  16610. ctx->kv[idx].type = GGUF_TYPE_UINT8;
  16611. ctx->kv[idx].value.uint8 = val;
  16612. }
  16613. void gguf_set_val_i8(struct gguf_context * ctx, const char * key, int8_t val) {
  16614. const int idx = gguf_get_or_add_key(ctx, key);
  16615. ctx->kv[idx].type = GGUF_TYPE_INT8;
  16616. ctx->kv[idx].value.int8 = val;
  16617. }
  16618. void gguf_set_val_u16(struct gguf_context * ctx, const char * key, uint16_t val) {
  16619. const int idx = gguf_get_or_add_key(ctx, key);
  16620. ctx->kv[idx].type = GGUF_TYPE_UINT16;
  16621. ctx->kv[idx].value.uint16 = val;
  16622. }
  16623. void gguf_set_val_i16(struct gguf_context * ctx, const char * key, int16_t val) {
  16624. const int idx = gguf_get_or_add_key(ctx, key);
  16625. ctx->kv[idx].type = GGUF_TYPE_INT16;
  16626. ctx->kv[idx].value.int16 = val;
  16627. }
  16628. void gguf_set_val_u32(struct gguf_context * ctx, const char * key, uint32_t val) {
  16629. const int idx = gguf_get_or_add_key(ctx, key);
  16630. ctx->kv[idx].type = GGUF_TYPE_UINT32;
  16631. ctx->kv[idx].value.uint32 = val;
  16632. }
  16633. void gguf_set_val_i32(struct gguf_context * ctx, const char * key, int32_t val) {
  16634. const int idx = gguf_get_or_add_key(ctx, key);
  16635. ctx->kv[idx].type = GGUF_TYPE_INT32;
  16636. ctx->kv[idx].value.int32 = val;
  16637. }
  16638. void gguf_set_val_f32(struct gguf_context * ctx, const char * key, float val) {
  16639. const int idx = gguf_get_or_add_key(ctx, key);
  16640. ctx->kv[idx].type = GGUF_TYPE_FLOAT32;
  16641. ctx->kv[idx].value.float32 = val;
  16642. }
  16643. void gguf_set_val_u64(struct gguf_context * ctx, const char * key, uint64_t val) {
  16644. const int idx = gguf_get_or_add_key(ctx, key);
  16645. ctx->kv[idx].type = GGUF_TYPE_UINT64;
  16646. ctx->kv[idx].value.uint64 = val;
  16647. }
  16648. void gguf_set_val_i64(struct gguf_context * ctx, const char * key, int64_t val) {
  16649. const int idx = gguf_get_or_add_key(ctx, key);
  16650. ctx->kv[idx].type = GGUF_TYPE_INT64;
  16651. ctx->kv[idx].value.int64 = val;
  16652. }
  16653. void gguf_set_val_f64(struct gguf_context * ctx, const char * key, double val) {
  16654. const int idx = gguf_get_or_add_key(ctx, key);
  16655. ctx->kv[idx].type = GGUF_TYPE_FLOAT64;
  16656. ctx->kv[idx].value.float64 = val;
  16657. }
  16658. void gguf_set_val_bool(struct gguf_context * ctx, const char * key, bool val) {
  16659. const int idx = gguf_get_or_add_key(ctx, key);
  16660. ctx->kv[idx].type = GGUF_TYPE_BOOL;
  16661. ctx->kv[idx].value.bool_ = val;
  16662. }
  16663. void gguf_set_val_str(struct gguf_context * ctx, const char * key, const char * val) {
  16664. const int idx = gguf_get_or_add_key(ctx, key);
  16665. ctx->kv[idx].type = GGUF_TYPE_STRING;
  16666. ctx->kv[idx].value.str.n = strlen(val);
  16667. ctx->kv[idx].value.str.data = strdup(val);
  16668. }
  16669. void gguf_set_arr_data(struct gguf_context * ctx, const char * key, enum gguf_type type, const void * data, int n) {
  16670. const int idx = gguf_get_or_add_key(ctx, key);
  16671. ctx->kv[idx].type = GGUF_TYPE_ARRAY;
  16672. ctx->kv[idx].value.arr.type = type;
  16673. ctx->kv[idx].value.arr.n = n;
  16674. ctx->kv[idx].value.arr.data = malloc(n*GGUF_TYPE_SIZE[type]);
  16675. memcpy(ctx->kv[idx].value.arr.data, data, n*GGUF_TYPE_SIZE[type]);
  16676. }
  16677. void gguf_set_arr_str(struct gguf_context * ctx, const char * key, const char ** data, int n) {
  16678. const int idx = gguf_get_or_add_key(ctx, key);
  16679. ctx->kv[idx].type = GGUF_TYPE_ARRAY;
  16680. ctx->kv[idx].value.arr.type = GGUF_TYPE_STRING;
  16681. ctx->kv[idx].value.arr.n = n;
  16682. ctx->kv[idx].value.arr.data = malloc(n*sizeof(struct gguf_str));
  16683. for (int i = 0; i < n; i++) {
  16684. struct gguf_str * str = &((struct gguf_str *)ctx->kv[idx].value.arr.data)[i];
  16685. str->n = strlen(data[i]);
  16686. str->data = strdup(data[i]);
  16687. }
  16688. }
  16689. // set or add KV pairs from another context
  16690. void gguf_set_kv(struct gguf_context * ctx, struct gguf_context * src) {
  16691. for (uint32_t i = 0; i < src->header.n_kv; i++) {
  16692. switch (src->kv[i].type) {
  16693. case GGUF_TYPE_UINT8: gguf_set_val_u8 (ctx, src->kv[i].key.data, src->kv[i].value.uint8); break;
  16694. case GGUF_TYPE_INT8: gguf_set_val_i8 (ctx, src->kv[i].key.data, src->kv[i].value.int8); break;
  16695. case GGUF_TYPE_UINT16: gguf_set_val_u16 (ctx, src->kv[i].key.data, src->kv[i].value.uint16); break;
  16696. case GGUF_TYPE_INT16: gguf_set_val_i16 (ctx, src->kv[i].key.data, src->kv[i].value.int16); break;
  16697. case GGUF_TYPE_UINT32: gguf_set_val_u32 (ctx, src->kv[i].key.data, src->kv[i].value.uint32); break;
  16698. case GGUF_TYPE_INT32: gguf_set_val_i32 (ctx, src->kv[i].key.data, src->kv[i].value.int32); break;
  16699. case GGUF_TYPE_FLOAT32: gguf_set_val_f32 (ctx, src->kv[i].key.data, src->kv[i].value.float32); break;
  16700. case GGUF_TYPE_UINT64: gguf_set_val_u64 (ctx, src->kv[i].key.data, src->kv[i].value.uint64); break;
  16701. case GGUF_TYPE_INT64: gguf_set_val_i64 (ctx, src->kv[i].key.data, src->kv[i].value.int64); break;
  16702. case GGUF_TYPE_FLOAT64: gguf_set_val_f64 (ctx, src->kv[i].key.data, src->kv[i].value.float64); break;
  16703. case GGUF_TYPE_BOOL: gguf_set_val_bool(ctx, src->kv[i].key.data, src->kv[i].value.bool_); break;
  16704. case GGUF_TYPE_STRING: gguf_set_val_str (ctx, src->kv[i].key.data, src->kv[i].value.str.data); break;
  16705. case GGUF_TYPE_ARRAY:
  16706. {
  16707. if (src->kv[i].value.arr.type == GGUF_TYPE_STRING) {
  16708. const char ** data = malloc(src->kv[i].value.arr.n*sizeof(char *));
  16709. for (uint32_t j = 0; j < src->kv[i].value.arr.n; j++) {
  16710. data[j] = ((struct gguf_str *)src->kv[i].value.arr.data)[j].data;
  16711. }
  16712. gguf_set_arr_str(ctx, src->kv[i].key.data, data, src->kv[i].value.arr.n);
  16713. free(data);
  16714. } else if (src->kv[i].value.arr.type == GGUF_TYPE_ARRAY) {
  16715. GGML_ASSERT(false && "nested arrays not supported");
  16716. } else {
  16717. gguf_set_arr_data(ctx, src->kv[i].key.data, src->kv[i].value.arr.type, src->kv[i].value.arr.data, src->kv[i].value.arr.n);
  16718. }
  16719. } break;
  16720. case GGUF_TYPE_COUNT: GGML_ASSERT(false && "invalid type"); break;
  16721. }
  16722. }
  16723. }
  16724. void gguf_add_tensor(
  16725. struct gguf_context * ctx,
  16726. const struct ggml_tensor * tensor) {
  16727. const int idx = ctx->header.n_tensors;
  16728. ctx->infos = realloc(ctx->infos, (idx + 1)*sizeof(struct gguf_tensor_info));
  16729. ctx->infos[idx].name.n = strlen(tensor->name);
  16730. ctx->infos[idx].name.data = strdup(tensor->name);
  16731. for (int i = 0; i < GGML_MAX_DIMS; ++i) {
  16732. ctx->infos[idx].ne[i] = 1;
  16733. }
  16734. ctx->infos[idx].n_dims = tensor->n_dims;
  16735. for (int i = 0; i < tensor->n_dims; i++) {
  16736. ctx->infos[idx].ne[i] = tensor->ne[i];
  16737. }
  16738. ctx->infos[idx].type = tensor->type;
  16739. ctx->infos[idx].offset = 0;
  16740. ctx->infos[idx].data = tensor->data;
  16741. ctx->infos[idx].size = ggml_nbytes(tensor);
  16742. if (ctx->header.n_tensors > 0) {
  16743. ctx->infos[idx].offset = ctx->infos[idx - 1].offset + GGML_PAD(ctx->infos[idx - 1].size, ctx->alignment);
  16744. }
  16745. ctx->header.n_tensors++;
  16746. }
  16747. void gguf_set_tensor_type(struct gguf_context * ctx, const char * name, enum ggml_type type) {
  16748. const int idx = gguf_find_tensor(ctx, name);
  16749. if (idx < 0) {
  16750. GGML_ASSERT(false && "tensor not found");
  16751. }
  16752. ctx->infos[idx].type = type;
  16753. }
  16754. void gguf_set_tensor_data(struct gguf_context * ctx, const char * name, const void * data, size_t size) {
  16755. const int idx = gguf_find_tensor(ctx, name);
  16756. if (idx < 0) {
  16757. GGML_ASSERT(false && "tensor not found");
  16758. }
  16759. ctx->infos[idx].data = data;
  16760. ctx->infos[idx].size = size;
  16761. // update offsets
  16762. for (uint32_t i = idx + 1; i < ctx->header.n_tensors; ++i) {
  16763. ctx->infos[i].offset = ctx->infos[i - 1].offset + GGML_PAD(ctx->infos[i - 1].size, ctx->alignment);
  16764. }
  16765. }
  16766. //static void gguf_fwrite_str(FILE * file, const struct gguf_str * val) {
  16767. // fwrite(&val->n, sizeof(val->n), 1, file);
  16768. // fwrite(val->data, sizeof(char), val->n, file);
  16769. //}
  16770. //
  16771. //static void gguf_fwrite_el(FILE * file, const void * val, size_t size) {
  16772. // fwrite(val, sizeof(char), size, file);
  16773. //}
  16774. struct gguf_buf {
  16775. void * data;
  16776. size_t size;
  16777. size_t offset;
  16778. };
  16779. static struct gguf_buf gguf_buf_init(size_t size) {
  16780. struct gguf_buf buf = {
  16781. /*buf.data =*/ size == 0 ? NULL : malloc(size),
  16782. /*buf.size =*/ size,
  16783. /*buf.offset =*/ 0,
  16784. };
  16785. return buf;
  16786. }
  16787. static void gguf_buf_free(struct gguf_buf buf) {
  16788. if (buf.data) {
  16789. free(buf.data);
  16790. }
  16791. }
  16792. static void gguf_buf_grow(struct gguf_buf * buf, size_t size) {
  16793. if (buf->offset + size > buf->size) {
  16794. buf->size = 1.5*(buf->offset + size);
  16795. if (buf->data) {
  16796. buf->data = realloc(buf->data, buf->size);
  16797. }
  16798. }
  16799. }
  16800. static void gguf_bwrite_str(struct gguf_buf * buf, const struct gguf_str * val) {
  16801. gguf_buf_grow(buf, sizeof(val->n) + val->n);
  16802. if (buf->data) {
  16803. memcpy((char *) buf->data + buf->offset, &val->n, sizeof(val->n));
  16804. }
  16805. buf->offset += sizeof(val->n);
  16806. if (buf->data) {
  16807. memcpy((char *) buf->data + buf->offset, val->data, val->n);
  16808. }
  16809. buf->offset += val->n;
  16810. }
  16811. static void gguf_bwrite_el(struct gguf_buf * buf, const void * val, size_t el_size) {
  16812. gguf_buf_grow(buf, el_size);
  16813. if (buf->data) {
  16814. memcpy((char *) buf->data + buf->offset, val, el_size);
  16815. }
  16816. buf->offset += el_size;
  16817. }
  16818. static void gguf_write_to_buf(struct gguf_context * ctx, struct gguf_buf * buf, bool only_meta) {
  16819. // write header
  16820. gguf_bwrite_el(buf, &ctx->header.magic, sizeof(ctx->header.magic));
  16821. gguf_bwrite_el(buf, &ctx->header.version, sizeof(ctx->header.version));
  16822. gguf_bwrite_el(buf, &ctx->header.n_tensors, sizeof(ctx->header.n_tensors));
  16823. gguf_bwrite_el(buf, &ctx->header.n_kv, sizeof(ctx->header.n_kv));
  16824. // write key-value pairs
  16825. for (uint32_t i = 0; i < ctx->header.n_kv; ++i) {
  16826. struct gguf_kv * kv = &ctx->kv[i];
  16827. gguf_bwrite_str(buf, &kv->key);
  16828. gguf_bwrite_el (buf, &kv->type, sizeof(kv->type));
  16829. switch (kv->type) {
  16830. case GGUF_TYPE_UINT8: gguf_bwrite_el( buf, &kv->value.uint8, sizeof(kv->value.uint8) ); break;
  16831. case GGUF_TYPE_INT8: gguf_bwrite_el (buf, &kv->value.int8, sizeof(kv->value.int8) ); break;
  16832. case GGUF_TYPE_UINT16: gguf_bwrite_el (buf, &kv->value.uint16, sizeof(kv->value.uint16) ); break;
  16833. case GGUF_TYPE_INT16: gguf_bwrite_el (buf, &kv->value.int16, sizeof(kv->value.int16) ); break;
  16834. case GGUF_TYPE_UINT32: gguf_bwrite_el (buf, &kv->value.uint32, sizeof(kv->value.uint32) ); break;
  16835. case GGUF_TYPE_INT32: gguf_bwrite_el (buf, &kv->value.int32, sizeof(kv->value.int32) ); break;
  16836. case GGUF_TYPE_FLOAT32: gguf_bwrite_el (buf, &kv->value.float32, sizeof(kv->value.float32)); break;
  16837. case GGUF_TYPE_UINT64: gguf_bwrite_el (buf, &kv->value.uint64, sizeof(kv->value.uint64) ); break;
  16838. case GGUF_TYPE_INT64: gguf_bwrite_el (buf, &kv->value.int64, sizeof(kv->value.int64) ); break;
  16839. case GGUF_TYPE_FLOAT64: gguf_bwrite_el (buf, &kv->value.float64, sizeof(kv->value.float64)); break;
  16840. case GGUF_TYPE_BOOL: gguf_bwrite_el (buf, &kv->value.bool_, sizeof(kv->value.bool_) ); break;
  16841. case GGUF_TYPE_STRING: gguf_bwrite_str(buf, &kv->value.str ); break;
  16842. case GGUF_TYPE_ARRAY:
  16843. {
  16844. gguf_bwrite_el(buf, &kv->value.arr.type, sizeof(kv->value.arr.type));
  16845. gguf_bwrite_el(buf, &kv->value.arr.n, sizeof(kv->value.arr.n) );
  16846. switch (kv->value.arr.type) {
  16847. case GGUF_TYPE_UINT8:
  16848. case GGUF_TYPE_INT8:
  16849. case GGUF_TYPE_UINT16:
  16850. case GGUF_TYPE_INT16:
  16851. case GGUF_TYPE_UINT32:
  16852. case GGUF_TYPE_INT32:
  16853. case GGUF_TYPE_FLOAT32:
  16854. case GGUF_TYPE_UINT64:
  16855. case GGUF_TYPE_INT64:
  16856. case GGUF_TYPE_FLOAT64:
  16857. case GGUF_TYPE_BOOL:
  16858. {
  16859. gguf_bwrite_el(buf, kv->value.arr.data, kv->value.arr.n * GGUF_TYPE_SIZE[kv->value.arr.type]);
  16860. } break;
  16861. case GGUF_TYPE_STRING:
  16862. {
  16863. for (uint32_t j = 0; j < kv->value.arr.n; ++j) {
  16864. gguf_bwrite_str(buf, &((struct gguf_str *) kv->value.arr.data)[j]);
  16865. }
  16866. } break;
  16867. case GGUF_TYPE_ARRAY:
  16868. case GGUF_TYPE_COUNT: GGML_ASSERT(false && "invalid type"); break;
  16869. };
  16870. } break;
  16871. case GGUF_TYPE_COUNT: GGML_ASSERT(false && "invalid type");
  16872. };
  16873. }
  16874. // write tensor infos
  16875. for (uint32_t i = 0; i < ctx->header.n_tensors; ++i) {
  16876. struct gguf_tensor_info * info = &ctx->infos[i];
  16877. gguf_bwrite_str(buf, &info->name);
  16878. gguf_bwrite_el (buf, &info->n_dims, sizeof(info->n_dims));
  16879. for (uint32_t j = 0; j < info->n_dims; ++j) {
  16880. gguf_bwrite_el(buf, &info->ne[j], sizeof(info->ne[j]));
  16881. }
  16882. gguf_bwrite_el(buf, &info->type, sizeof(info->type));
  16883. gguf_bwrite_el(buf, &info->offset, sizeof(info->offset));
  16884. }
  16885. // we require the data section to be aligned, so take into account any padding
  16886. {
  16887. const size_t offset = buf->offset;
  16888. const size_t offset_pad = GGML_PAD(offset, ctx->alignment);
  16889. if (offset_pad != offset) {
  16890. uint8_t pad = 0;
  16891. for (size_t i = 0; i < offset_pad - offset; ++i) {
  16892. gguf_bwrite_el(buf, &pad, sizeof(pad));
  16893. }
  16894. }
  16895. }
  16896. if (only_meta) {
  16897. return;
  16898. }
  16899. size_t offset = 0;
  16900. // write tensor data
  16901. for (uint32_t i = 0; i < ctx->header.n_tensors; ++i) {
  16902. struct gguf_tensor_info * info = &ctx->infos[i];
  16903. const size_t size = info->size;
  16904. const size_t size_pad = GGML_PAD(size, ctx->alignment);
  16905. gguf_bwrite_el(buf, info->data, size);
  16906. if (size_pad != size) {
  16907. uint8_t pad = 0;
  16908. for (size_t j = 0; j < size_pad - size; ++j) {
  16909. gguf_bwrite_el(buf, &pad, sizeof(pad));
  16910. }
  16911. }
  16912. GGML_ASSERT(offset == info->offset);
  16913. offset += size_pad;
  16914. }
  16915. }
  16916. void gguf_write_to_file(struct gguf_context * ctx, const char * fname, bool only_meta) {
  16917. FILE * file = fopen(fname, "wb");
  16918. if (!file) {
  16919. GGML_ASSERT(false && "failed to open file for writing");
  16920. }
  16921. struct gguf_buf buf = gguf_buf_init(16*1024);
  16922. gguf_write_to_buf(ctx, &buf, only_meta);
  16923. fwrite(buf.data, 1, buf.offset, file);
  16924. gguf_buf_free(buf);
  16925. fclose(file);
  16926. }
  16927. size_t gguf_get_meta_size(struct gguf_context * ctx) {
  16928. // no allocs - only compute size
  16929. struct gguf_buf buf = gguf_buf_init(0);
  16930. gguf_write_to_buf(ctx, &buf, true);
  16931. return buf.offset;
  16932. }
  16933. void gguf_get_meta_data(struct gguf_context * ctx, void * data) {
  16934. struct gguf_buf buf = gguf_buf_init(16*1024);
  16935. gguf_write_to_buf(ctx, &buf, true);
  16936. memcpy(data, buf.data, buf.offset);
  16937. gguf_buf_free(buf);
  16938. }
  16939. ////////////////////////////////////////////////////////////////////////////////
  16940. int ggml_cpu_has_avx(void) {
  16941. #if defined(__AVX__)
  16942. return 1;
  16943. #else
  16944. return 0;
  16945. #endif
  16946. }
  16947. int ggml_cpu_has_avx2(void) {
  16948. #if defined(__AVX2__)
  16949. return 1;
  16950. #else
  16951. return 0;
  16952. #endif
  16953. }
  16954. int ggml_cpu_has_avx512(void) {
  16955. #if defined(__AVX512F__)
  16956. return 1;
  16957. #else
  16958. return 0;
  16959. #endif
  16960. }
  16961. int ggml_cpu_has_avx512_vbmi(void) {
  16962. #if defined(__AVX512VBMI__)
  16963. return 1;
  16964. #else
  16965. return 0;
  16966. #endif
  16967. }
  16968. int ggml_cpu_has_avx512_vnni(void) {
  16969. #if defined(__AVX512VNNI__)
  16970. return 1;
  16971. #else
  16972. return 0;
  16973. #endif
  16974. }
  16975. int ggml_cpu_has_fma(void) {
  16976. #if defined(__FMA__)
  16977. return 1;
  16978. #else
  16979. return 0;
  16980. #endif
  16981. }
  16982. int ggml_cpu_has_neon(void) {
  16983. #if defined(__ARM_NEON)
  16984. return 1;
  16985. #else
  16986. return 0;
  16987. #endif
  16988. }
  16989. int ggml_cpu_has_arm_fma(void) {
  16990. #if defined(__ARM_FEATURE_FMA)
  16991. return 1;
  16992. #else
  16993. return 0;
  16994. #endif
  16995. }
  16996. int ggml_cpu_has_f16c(void) {
  16997. #if defined(__F16C__)
  16998. return 1;
  16999. #else
  17000. return 0;
  17001. #endif
  17002. }
  17003. int ggml_cpu_has_fp16_va(void) {
  17004. #if defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC)
  17005. return 1;
  17006. #else
  17007. return 0;
  17008. #endif
  17009. }
  17010. int ggml_cpu_has_wasm_simd(void) {
  17011. #if defined(__wasm_simd128__)
  17012. return 1;
  17013. #else
  17014. return 0;
  17015. #endif
  17016. }
  17017. int ggml_cpu_has_blas(void) {
  17018. #if defined(GGML_USE_ACCELERATE) || defined(GGML_USE_OPENBLAS) || defined(GGML_USE_CUBLAS) || defined(GGML_USE_CLBLAST)
  17019. return 1;
  17020. #else
  17021. return 0;
  17022. #endif
  17023. }
  17024. int ggml_cpu_has_cublas(void) {
  17025. #if defined(GGML_USE_CUBLAS)
  17026. return 1;
  17027. #else
  17028. return 0;
  17029. #endif
  17030. }
  17031. int ggml_cpu_has_clblast(void) {
  17032. #if defined(GGML_USE_CLBLAST)
  17033. return 1;
  17034. #else
  17035. return 0;
  17036. #endif
  17037. }
  17038. int ggml_cpu_has_gpublas(void) {
  17039. return ggml_cpu_has_cublas() || ggml_cpu_has_clblast();
  17040. }
  17041. int ggml_cpu_has_sse3(void) {
  17042. #if defined(__SSE3__)
  17043. return 1;
  17044. #else
  17045. return 0;
  17046. #endif
  17047. }
  17048. int ggml_cpu_has_ssse3(void) {
  17049. #if defined(__SSSE3__)
  17050. return 1;
  17051. #else
  17052. return 0;
  17053. #endif
  17054. }
  17055. int ggml_cpu_has_vsx(void) {
  17056. #if defined(__POWER9_VECTOR__)
  17057. return 1;
  17058. #else
  17059. return 0;
  17060. #endif
  17061. }
  17062. ////////////////////////////////////////////////////////////////////////////////