ggml.c 610 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788478947904791479247934794479547964797479847994800480148024803480448054806480748084809481048114812481348144815481648174818481948204821482248234824482548264827482848294830483148324833483448354836483748384839484048414842484348444845484648474848484948504851485248534854485548564857485848594860486148624863486448654866486748684869487048714872487348744875487648774878487948804881488248834884488548864887488848894890489148924893489448954896489748984899490049014902490349044905490649074908490949104911491249134914491549164917491849194920492149224923492449254926492749284929493049314932493349344935493649374938493949404941494249434944494549464947494849494950495149524953495449554956495749584959496049614962496349644965496649674968496949704971497249734974497549764977497849794980498149824983498449854986498749884989499049914992499349944995499649974998499950005001500250035004500550065007500850095010501150125013501450155016501750185019502050215022502350245025502650275028502950305031503250335034503550365037503850395040504150425043504450455046504750485049505050515052505350545055505650575058505950605061506250635064506550665067506850695070507150725073507450755076507750785079508050815082508350845085508650875088508950905091509250935094509550965097509850995100510151025103510451055106510751085109511051115112511351145115511651175118511951205121512251235124512551265127512851295130513151325133513451355136513751385139514051415142514351445145514651475148514951505151515251535154515551565157515851595160516151625163516451655166516751685169517051715172517351745175517651775178517951805181518251835184518551865187518851895190519151925193519451955196519751985199520052015202520352045205520652075208520952105211521252135214521552165217521852195220522152225223522452255226522752285229523052315232523352345235523652375238523952405241524252435244524552465247524852495250525152525253525452555256525752585259526052615262526352645265526652675268526952705271527252735274527552765277527852795280528152825283528452855286528752885289529052915292529352945295529652975298529953005301530253035304530553065307530853095310531153125313531453155316531753185319532053215322532353245325532653275328532953305331533253335334533553365337533853395340534153425343534453455346534753485349535053515352535353545355535653575358535953605361536253635364536553665367536853695370537153725373537453755376537753785379538053815382538353845385538653875388538953905391539253935394539553965397539853995400540154025403540454055406540754085409541054115412541354145415541654175418541954205421542254235424542554265427542854295430543154325433543454355436543754385439544054415442544354445445544654475448544954505451545254535454545554565457545854595460546154625463546454655466546754685469547054715472547354745475547654775478547954805481548254835484548554865487548854895490549154925493549454955496549754985499550055015502550355045505550655075508550955105511551255135514551555165517551855195520552155225523552455255526552755285529553055315532553355345535553655375538553955405541554255435544554555465547554855495550555155525553555455555556555755585559556055615562556355645565556655675568556955705571557255735574557555765577557855795580558155825583558455855586558755885589559055915592559355945595559655975598559956005601560256035604560556065607560856095610561156125613561456155616561756185619562056215622562356245625562656275628562956305631563256335634563556365637563856395640564156425643564456455646564756485649565056515652565356545655565656575658565956605661566256635664566556665667566856695670567156725673567456755676567756785679568056815682568356845685568656875688568956905691569256935694569556965697569856995700570157025703570457055706570757085709571057115712571357145715571657175718571957205721572257235724572557265727572857295730573157325733573457355736573757385739574057415742574357445745574657475748574957505751575257535754575557565757575857595760576157625763576457655766576757685769577057715772577357745775577657775778577957805781578257835784578557865787578857895790579157925793579457955796579757985799580058015802580358045805580658075808580958105811581258135814581558165817581858195820582158225823582458255826582758285829583058315832583358345835583658375838583958405841584258435844584558465847584858495850585158525853585458555856585758585859586058615862586358645865586658675868586958705871587258735874587558765877587858795880588158825883588458855886588758885889589058915892589358945895589658975898589959005901590259035904590559065907590859095910591159125913591459155916591759185919592059215922592359245925592659275928592959305931593259335934593559365937593859395940594159425943594459455946594759485949595059515952595359545955595659575958595959605961596259635964596559665967596859695970597159725973597459755976597759785979598059815982598359845985598659875988598959905991599259935994599559965997599859996000600160026003600460056006600760086009601060116012601360146015601660176018601960206021602260236024602560266027602860296030603160326033603460356036603760386039604060416042604360446045604660476048604960506051605260536054605560566057605860596060606160626063606460656066606760686069607060716072607360746075607660776078607960806081608260836084608560866087608860896090609160926093609460956096609760986099610061016102610361046105610661076108610961106111611261136114611561166117611861196120612161226123612461256126612761286129613061316132613361346135613661376138613961406141614261436144614561466147614861496150615161526153615461556156615761586159616061616162616361646165616661676168616961706171617261736174617561766177617861796180618161826183618461856186618761886189619061916192619361946195619661976198619962006201620262036204620562066207620862096210621162126213621462156216621762186219622062216222622362246225622662276228622962306231623262336234623562366237623862396240624162426243624462456246624762486249625062516252625362546255625662576258625962606261626262636264626562666267626862696270627162726273627462756276627762786279628062816282628362846285628662876288628962906291629262936294629562966297629862996300630163026303630463056306630763086309631063116312631363146315631663176318631963206321632263236324632563266327632863296330633163326333633463356336633763386339634063416342634363446345634663476348634963506351635263536354635563566357635863596360636163626363636463656366636763686369637063716372637363746375637663776378637963806381638263836384638563866387638863896390639163926393639463956396639763986399640064016402640364046405640664076408640964106411641264136414641564166417641864196420642164226423642464256426642764286429643064316432643364346435643664376438643964406441644264436444644564466447644864496450645164526453645464556456645764586459646064616462646364646465646664676468646964706471647264736474647564766477647864796480648164826483648464856486648764886489649064916492649364946495649664976498649965006501650265036504650565066507650865096510651165126513651465156516651765186519652065216522652365246525652665276528652965306531653265336534653565366537653865396540654165426543654465456546654765486549655065516552655365546555655665576558655965606561656265636564656565666567656865696570657165726573657465756576657765786579658065816582658365846585658665876588658965906591659265936594659565966597659865996600660166026603660466056606660766086609661066116612661366146615661666176618661966206621662266236624662566266627662866296630663166326633663466356636663766386639664066416642664366446645664666476648664966506651665266536654665566566657665866596660666166626663666466656666666766686669667066716672667366746675667666776678667966806681668266836684668566866687668866896690669166926693669466956696669766986699670067016702670367046705670667076708670967106711671267136714671567166717671867196720672167226723672467256726672767286729673067316732673367346735673667376738673967406741674267436744674567466747674867496750675167526753675467556756675767586759676067616762676367646765676667676768676967706771677267736774677567766777677867796780678167826783678467856786678767886789679067916792679367946795679667976798679968006801680268036804680568066807680868096810681168126813681468156816681768186819682068216822682368246825682668276828682968306831683268336834683568366837683868396840684168426843684468456846684768486849685068516852685368546855685668576858685968606861686268636864686568666867686868696870687168726873687468756876687768786879688068816882688368846885688668876888688968906891689268936894689568966897689868996900690169026903690469056906690769086909691069116912691369146915691669176918691969206921692269236924692569266927692869296930693169326933693469356936693769386939694069416942694369446945694669476948694969506951695269536954695569566957695869596960696169626963696469656966696769686969697069716972697369746975697669776978697969806981698269836984698569866987698869896990699169926993699469956996699769986999700070017002700370047005700670077008700970107011701270137014701570167017701870197020702170227023702470257026702770287029703070317032703370347035703670377038703970407041704270437044704570467047704870497050705170527053705470557056705770587059706070617062706370647065706670677068706970707071707270737074707570767077707870797080708170827083708470857086708770887089709070917092709370947095709670977098709971007101710271037104710571067107710871097110711171127113711471157116711771187119712071217122712371247125712671277128712971307131713271337134713571367137713871397140714171427143714471457146714771487149715071517152715371547155715671577158715971607161716271637164716571667167716871697170717171727173717471757176717771787179718071817182718371847185718671877188718971907191719271937194719571967197719871997200720172027203720472057206720772087209721072117212721372147215721672177218721972207221722272237224722572267227722872297230723172327233723472357236723772387239724072417242724372447245724672477248724972507251725272537254725572567257725872597260726172627263726472657266726772687269727072717272727372747275727672777278727972807281728272837284728572867287728872897290729172927293729472957296729772987299730073017302730373047305730673077308730973107311731273137314731573167317731873197320732173227323732473257326732773287329733073317332733373347335733673377338733973407341734273437344734573467347734873497350735173527353735473557356735773587359736073617362736373647365736673677368736973707371737273737374737573767377737873797380738173827383738473857386738773887389739073917392739373947395739673977398739974007401740274037404740574067407740874097410741174127413741474157416741774187419742074217422742374247425742674277428742974307431743274337434743574367437743874397440744174427443744474457446744774487449745074517452745374547455745674577458745974607461746274637464746574667467746874697470747174727473747474757476747774787479748074817482748374847485748674877488748974907491749274937494749574967497749874997500750175027503750475057506750775087509751075117512751375147515751675177518751975207521752275237524752575267527752875297530753175327533753475357536753775387539754075417542754375447545754675477548754975507551755275537554755575567557755875597560756175627563756475657566756775687569757075717572757375747575757675777578757975807581758275837584758575867587758875897590759175927593759475957596759775987599760076017602760376047605760676077608760976107611761276137614761576167617761876197620762176227623762476257626762776287629763076317632763376347635763676377638763976407641764276437644764576467647764876497650765176527653765476557656765776587659766076617662766376647665766676677668766976707671767276737674767576767677767876797680768176827683768476857686768776887689769076917692769376947695769676977698769977007701770277037704770577067707770877097710771177127713771477157716771777187719772077217722772377247725772677277728772977307731773277337734773577367737773877397740774177427743774477457746774777487749775077517752775377547755775677577758775977607761776277637764776577667767776877697770777177727773777477757776777777787779778077817782778377847785778677877788778977907791779277937794779577967797779877997800780178027803780478057806780778087809781078117812781378147815781678177818781978207821782278237824782578267827782878297830783178327833783478357836783778387839784078417842784378447845784678477848784978507851785278537854785578567857785878597860786178627863786478657866786778687869787078717872787378747875787678777878787978807881788278837884788578867887788878897890789178927893789478957896789778987899790079017902790379047905790679077908790979107911791279137914791579167917791879197920792179227923792479257926792779287929793079317932793379347935793679377938793979407941794279437944794579467947794879497950795179527953795479557956795779587959796079617962796379647965796679677968796979707971797279737974797579767977797879797980798179827983798479857986798779887989799079917992799379947995799679977998799980008001800280038004800580068007800880098010801180128013801480158016801780188019802080218022802380248025802680278028802980308031803280338034803580368037803880398040804180428043804480458046804780488049805080518052805380548055805680578058805980608061806280638064806580668067806880698070807180728073807480758076807780788079808080818082808380848085808680878088808980908091809280938094809580968097809880998100810181028103810481058106810781088109811081118112811381148115811681178118811981208121812281238124812581268127812881298130813181328133813481358136813781388139814081418142814381448145814681478148814981508151815281538154815581568157815881598160816181628163816481658166816781688169817081718172817381748175817681778178817981808181818281838184818581868187818881898190819181928193819481958196819781988199820082018202820382048205820682078208820982108211821282138214821582168217821882198220822182228223822482258226822782288229823082318232823382348235823682378238823982408241824282438244824582468247824882498250825182528253825482558256825782588259826082618262826382648265826682678268826982708271827282738274827582768277827882798280828182828283828482858286828782888289829082918292829382948295829682978298829983008301830283038304830583068307830883098310831183128313831483158316831783188319832083218322832383248325832683278328832983308331833283338334833583368337833883398340834183428343834483458346834783488349835083518352835383548355835683578358835983608361836283638364836583668367836883698370837183728373837483758376837783788379838083818382838383848385838683878388838983908391839283938394839583968397839883998400840184028403840484058406840784088409841084118412841384148415841684178418841984208421842284238424842584268427842884298430843184328433843484358436843784388439844084418442844384448445844684478448844984508451845284538454845584568457845884598460846184628463846484658466846784688469847084718472847384748475847684778478847984808481848284838484848584868487848884898490849184928493849484958496849784988499850085018502850385048505850685078508850985108511851285138514851585168517851885198520852185228523852485258526852785288529853085318532853385348535853685378538853985408541854285438544854585468547854885498550855185528553855485558556855785588559856085618562856385648565856685678568856985708571857285738574857585768577857885798580858185828583858485858586858785888589859085918592859385948595859685978598859986008601860286038604860586068607860886098610861186128613861486158616861786188619862086218622862386248625862686278628862986308631863286338634863586368637863886398640864186428643864486458646864786488649865086518652865386548655865686578658865986608661866286638664866586668667866886698670867186728673867486758676867786788679868086818682868386848685868686878688868986908691869286938694869586968697869886998700870187028703870487058706870787088709871087118712871387148715871687178718871987208721872287238724872587268727872887298730873187328733873487358736873787388739874087418742874387448745874687478748874987508751875287538754875587568757875887598760876187628763876487658766876787688769877087718772877387748775877687778778877987808781878287838784878587868787878887898790879187928793879487958796879787988799880088018802880388048805880688078808880988108811881288138814881588168817881888198820882188228823882488258826882788288829883088318832883388348835883688378838883988408841884288438844884588468847884888498850885188528853885488558856885788588859886088618862886388648865886688678868886988708871887288738874887588768877887888798880888188828883888488858886888788888889889088918892889388948895889688978898889989008901890289038904890589068907890889098910891189128913891489158916891789188919892089218922892389248925892689278928892989308931893289338934893589368937893889398940894189428943894489458946894789488949895089518952895389548955895689578958895989608961896289638964896589668967896889698970897189728973897489758976897789788979898089818982898389848985898689878988898989908991899289938994899589968997899889999000900190029003900490059006900790089009901090119012901390149015901690179018901990209021902290239024902590269027902890299030903190329033903490359036903790389039904090419042904390449045904690479048904990509051905290539054905590569057905890599060906190629063906490659066906790689069907090719072907390749075907690779078907990809081908290839084908590869087908890899090909190929093909490959096909790989099910091019102910391049105910691079108910991109111911291139114911591169117911891199120912191229123912491259126912791289129913091319132913391349135913691379138913991409141914291439144914591469147914891499150915191529153915491559156915791589159916091619162916391649165916691679168916991709171917291739174917591769177917891799180918191829183918491859186918791889189919091919192919391949195919691979198919992009201920292039204920592069207920892099210921192129213921492159216921792189219922092219222922392249225922692279228922992309231923292339234923592369237923892399240924192429243924492459246924792489249925092519252925392549255925692579258925992609261926292639264926592669267926892699270927192729273927492759276927792789279928092819282928392849285928692879288928992909291929292939294929592969297929892999300930193029303930493059306930793089309931093119312931393149315931693179318931993209321932293239324932593269327932893299330933193329333933493359336933793389339934093419342934393449345934693479348934993509351935293539354935593569357935893599360936193629363936493659366936793689369937093719372937393749375937693779378937993809381938293839384938593869387938893899390939193929393939493959396939793989399940094019402940394049405940694079408940994109411941294139414941594169417941894199420942194229423942494259426942794289429943094319432943394349435943694379438943994409441944294439444944594469447944894499450945194529453945494559456945794589459946094619462946394649465946694679468946994709471947294739474947594769477947894799480948194829483948494859486948794889489949094919492949394949495949694979498949995009501950295039504950595069507950895099510951195129513951495159516951795189519952095219522952395249525952695279528952995309531953295339534953595369537953895399540954195429543954495459546954795489549955095519552955395549555955695579558955995609561956295639564956595669567956895699570957195729573957495759576957795789579958095819582958395849585958695879588958995909591959295939594959595969597959895999600960196029603960496059606960796089609961096119612961396149615961696179618961996209621962296239624962596269627962896299630963196329633963496359636963796389639964096419642964396449645964696479648964996509651965296539654965596569657965896599660966196629663966496659666966796689669967096719672967396749675967696779678967996809681968296839684968596869687968896899690969196929693969496959696969796989699970097019702970397049705970697079708970997109711971297139714971597169717971897199720972197229723972497259726972797289729973097319732973397349735973697379738973997409741974297439744974597469747974897499750975197529753975497559756975797589759976097619762976397649765976697679768976997709771977297739774977597769777977897799780978197829783978497859786978797889789979097919792979397949795979697979798979998009801980298039804980598069807980898099810981198129813981498159816981798189819982098219822982398249825982698279828982998309831983298339834983598369837983898399840984198429843984498459846984798489849985098519852985398549855985698579858985998609861986298639864986598669867986898699870987198729873987498759876987798789879988098819882988398849885988698879888988998909891989298939894989598969897989898999900990199029903990499059906990799089909991099119912991399149915991699179918991999209921992299239924992599269927992899299930993199329933993499359936993799389939994099419942994399449945994699479948994999509951995299539954995599569957995899599960996199629963996499659966996799689969997099719972997399749975997699779978997999809981998299839984998599869987998899899990999199929993999499959996999799989999100001000110002100031000410005100061000710008100091001010011100121001310014100151001610017100181001910020100211002210023100241002510026100271002810029100301003110032100331003410035100361003710038100391004010041100421004310044100451004610047100481004910050100511005210053100541005510056100571005810059100601006110062100631006410065100661006710068100691007010071100721007310074100751007610077100781007910080100811008210083100841008510086100871008810089100901009110092100931009410095100961009710098100991010010101101021010310104101051010610107101081010910110101111011210113101141011510116101171011810119101201012110122101231012410125101261012710128101291013010131101321013310134101351013610137101381013910140101411014210143101441014510146101471014810149101501015110152101531015410155101561015710158101591016010161101621016310164101651016610167101681016910170101711017210173101741017510176101771017810179101801018110182101831018410185101861018710188101891019010191101921019310194101951019610197101981019910200102011020210203102041020510206102071020810209102101021110212102131021410215102161021710218102191022010221102221022310224102251022610227102281022910230102311023210233102341023510236102371023810239102401024110242102431024410245102461024710248102491025010251102521025310254102551025610257102581025910260102611026210263102641026510266102671026810269102701027110272102731027410275102761027710278102791028010281102821028310284102851028610287102881028910290102911029210293102941029510296102971029810299103001030110302103031030410305103061030710308103091031010311103121031310314103151031610317103181031910320103211032210323103241032510326103271032810329103301033110332103331033410335103361033710338103391034010341103421034310344103451034610347103481034910350103511035210353103541035510356103571035810359103601036110362103631036410365103661036710368103691037010371103721037310374103751037610377103781037910380103811038210383103841038510386103871038810389103901039110392103931039410395103961039710398103991040010401104021040310404104051040610407104081040910410104111041210413104141041510416104171041810419104201042110422104231042410425104261042710428104291043010431104321043310434104351043610437104381043910440104411044210443104441044510446104471044810449104501045110452104531045410455104561045710458104591046010461104621046310464104651046610467104681046910470104711047210473104741047510476104771047810479104801048110482104831048410485104861048710488104891049010491104921049310494104951049610497104981049910500105011050210503105041050510506105071050810509105101051110512105131051410515105161051710518105191052010521105221052310524105251052610527105281052910530105311053210533105341053510536105371053810539105401054110542105431054410545105461054710548105491055010551105521055310554105551055610557105581055910560105611056210563105641056510566105671056810569105701057110572105731057410575105761057710578105791058010581105821058310584105851058610587105881058910590105911059210593105941059510596105971059810599106001060110602106031060410605106061060710608106091061010611106121061310614106151061610617106181061910620106211062210623106241062510626106271062810629106301063110632106331063410635106361063710638106391064010641106421064310644106451064610647106481064910650106511065210653106541065510656106571065810659106601066110662106631066410665106661066710668106691067010671106721067310674106751067610677106781067910680106811068210683106841068510686106871068810689106901069110692106931069410695106961069710698106991070010701107021070310704107051070610707107081070910710107111071210713107141071510716107171071810719107201072110722107231072410725107261072710728107291073010731107321073310734107351073610737107381073910740107411074210743107441074510746107471074810749107501075110752107531075410755107561075710758107591076010761107621076310764107651076610767107681076910770107711077210773107741077510776107771077810779107801078110782107831078410785107861078710788107891079010791107921079310794107951079610797107981079910800108011080210803108041080510806108071080810809108101081110812108131081410815108161081710818108191082010821108221082310824108251082610827108281082910830108311083210833108341083510836108371083810839108401084110842108431084410845108461084710848108491085010851108521085310854108551085610857108581085910860108611086210863108641086510866108671086810869108701087110872108731087410875108761087710878108791088010881108821088310884108851088610887108881088910890108911089210893108941089510896108971089810899109001090110902109031090410905109061090710908109091091010911109121091310914109151091610917109181091910920109211092210923109241092510926109271092810929109301093110932109331093410935109361093710938109391094010941109421094310944109451094610947109481094910950109511095210953109541095510956109571095810959109601096110962109631096410965109661096710968109691097010971109721097310974109751097610977109781097910980109811098210983109841098510986109871098810989109901099110992109931099410995109961099710998109991100011001110021100311004110051100611007110081100911010110111101211013110141101511016110171101811019110201102111022110231102411025110261102711028110291103011031110321103311034110351103611037110381103911040110411104211043110441104511046110471104811049110501105111052110531105411055110561105711058110591106011061110621106311064110651106611067110681106911070110711107211073110741107511076110771107811079110801108111082110831108411085110861108711088110891109011091110921109311094110951109611097110981109911100111011110211103111041110511106111071110811109111101111111112111131111411115111161111711118111191112011121111221112311124111251112611127111281112911130111311113211133111341113511136111371113811139111401114111142111431114411145111461114711148111491115011151111521115311154111551115611157111581115911160111611116211163111641116511166111671116811169111701117111172111731117411175111761117711178111791118011181111821118311184111851118611187111881118911190111911119211193111941119511196111971119811199112001120111202112031120411205112061120711208112091121011211112121121311214112151121611217112181121911220112211122211223112241122511226112271122811229112301123111232112331123411235112361123711238112391124011241112421124311244112451124611247112481124911250112511125211253112541125511256112571125811259112601126111262112631126411265112661126711268112691127011271112721127311274112751127611277112781127911280112811128211283112841128511286112871128811289112901129111292112931129411295112961129711298112991130011301113021130311304113051130611307113081130911310113111131211313113141131511316113171131811319113201132111322113231132411325113261132711328113291133011331113321133311334113351133611337113381133911340113411134211343113441134511346113471134811349113501135111352113531135411355113561135711358113591136011361113621136311364113651136611367113681136911370113711137211373113741137511376113771137811379113801138111382113831138411385113861138711388113891139011391113921139311394113951139611397113981139911400114011140211403114041140511406114071140811409114101141111412114131141411415114161141711418114191142011421114221142311424114251142611427114281142911430114311143211433114341143511436114371143811439114401144111442114431144411445114461144711448114491145011451114521145311454114551145611457114581145911460114611146211463114641146511466114671146811469114701147111472114731147411475114761147711478114791148011481114821148311484114851148611487114881148911490114911149211493114941149511496114971149811499115001150111502115031150411505115061150711508115091151011511115121151311514115151151611517115181151911520115211152211523115241152511526115271152811529115301153111532115331153411535115361153711538115391154011541115421154311544115451154611547115481154911550115511155211553115541155511556115571155811559115601156111562115631156411565115661156711568115691157011571115721157311574115751157611577115781157911580115811158211583115841158511586115871158811589115901159111592115931159411595115961159711598115991160011601116021160311604116051160611607116081160911610116111161211613116141161511616116171161811619116201162111622116231162411625116261162711628116291163011631116321163311634116351163611637116381163911640116411164211643116441164511646116471164811649116501165111652116531165411655116561165711658116591166011661116621166311664116651166611667116681166911670116711167211673116741167511676116771167811679116801168111682116831168411685116861168711688116891169011691116921169311694116951169611697116981169911700117011170211703117041170511706117071170811709117101171111712117131171411715117161171711718117191172011721117221172311724117251172611727117281172911730117311173211733117341173511736117371173811739117401174111742117431174411745117461174711748117491175011751117521175311754117551175611757117581175911760117611176211763117641176511766117671176811769117701177111772117731177411775117761177711778117791178011781117821178311784117851178611787117881178911790117911179211793117941179511796117971179811799118001180111802118031180411805118061180711808118091181011811118121181311814118151181611817118181181911820118211182211823118241182511826118271182811829118301183111832118331183411835118361183711838118391184011841118421184311844118451184611847118481184911850118511185211853118541185511856118571185811859118601186111862118631186411865118661186711868118691187011871118721187311874118751187611877118781187911880118811188211883118841188511886118871188811889118901189111892118931189411895118961189711898118991190011901119021190311904119051190611907119081190911910119111191211913119141191511916119171191811919119201192111922119231192411925119261192711928119291193011931119321193311934119351193611937119381193911940119411194211943119441194511946119471194811949119501195111952119531195411955119561195711958119591196011961119621196311964119651196611967119681196911970119711197211973119741197511976119771197811979119801198111982119831198411985119861198711988119891199011991119921199311994119951199611997119981199912000120011200212003120041200512006120071200812009120101201112012120131201412015120161201712018120191202012021120221202312024120251202612027120281202912030120311203212033120341203512036120371203812039120401204112042120431204412045120461204712048120491205012051120521205312054120551205612057120581205912060120611206212063120641206512066120671206812069120701207112072120731207412075120761207712078120791208012081120821208312084120851208612087120881208912090120911209212093120941209512096120971209812099121001210112102121031210412105121061210712108121091211012111121121211312114121151211612117121181211912120121211212212123121241212512126121271212812129121301213112132121331213412135121361213712138121391214012141121421214312144121451214612147121481214912150121511215212153121541215512156121571215812159121601216112162121631216412165121661216712168121691217012171121721217312174121751217612177121781217912180121811218212183121841218512186121871218812189121901219112192121931219412195121961219712198121991220012201122021220312204122051220612207122081220912210122111221212213122141221512216122171221812219122201222112222122231222412225122261222712228122291223012231122321223312234122351223612237122381223912240122411224212243122441224512246122471224812249122501225112252122531225412255122561225712258122591226012261122621226312264122651226612267122681226912270122711227212273122741227512276122771227812279122801228112282122831228412285122861228712288122891229012291122921229312294122951229612297122981229912300123011230212303123041230512306123071230812309123101231112312123131231412315123161231712318123191232012321123221232312324123251232612327123281232912330123311233212333123341233512336123371233812339123401234112342123431234412345123461234712348123491235012351123521235312354123551235612357123581235912360123611236212363123641236512366123671236812369123701237112372123731237412375123761237712378123791238012381123821238312384123851238612387123881238912390123911239212393123941239512396123971239812399124001240112402124031240412405124061240712408124091241012411124121241312414124151241612417124181241912420124211242212423124241242512426124271242812429124301243112432124331243412435124361243712438124391244012441124421244312444124451244612447124481244912450124511245212453124541245512456124571245812459124601246112462124631246412465124661246712468124691247012471124721247312474124751247612477124781247912480124811248212483124841248512486124871248812489124901249112492124931249412495124961249712498124991250012501125021250312504125051250612507125081250912510125111251212513125141251512516125171251812519125201252112522125231252412525125261252712528125291253012531125321253312534125351253612537125381253912540125411254212543125441254512546125471254812549125501255112552125531255412555125561255712558125591256012561125621256312564125651256612567125681256912570125711257212573125741257512576125771257812579125801258112582125831258412585125861258712588125891259012591125921259312594125951259612597125981259912600126011260212603126041260512606126071260812609126101261112612126131261412615126161261712618126191262012621126221262312624126251262612627126281262912630126311263212633126341263512636126371263812639126401264112642126431264412645126461264712648126491265012651126521265312654126551265612657126581265912660126611266212663126641266512666126671266812669126701267112672126731267412675126761267712678126791268012681126821268312684126851268612687126881268912690126911269212693126941269512696126971269812699127001270112702127031270412705127061270712708127091271012711127121271312714127151271612717127181271912720127211272212723127241272512726127271272812729127301273112732127331273412735127361273712738127391274012741127421274312744127451274612747127481274912750127511275212753127541275512756127571275812759127601276112762127631276412765127661276712768127691277012771127721277312774127751277612777127781277912780127811278212783127841278512786127871278812789127901279112792127931279412795127961279712798127991280012801128021280312804128051280612807128081280912810128111281212813128141281512816128171281812819128201282112822128231282412825128261282712828128291283012831128321283312834128351283612837128381283912840128411284212843128441284512846128471284812849128501285112852128531285412855128561285712858128591286012861128621286312864128651286612867128681286912870128711287212873128741287512876128771287812879128801288112882128831288412885128861288712888128891289012891128921289312894128951289612897128981289912900129011290212903129041290512906129071290812909129101291112912129131291412915129161291712918129191292012921129221292312924129251292612927129281292912930129311293212933129341293512936129371293812939129401294112942129431294412945129461294712948129491295012951129521295312954129551295612957129581295912960129611296212963129641296512966129671296812969129701297112972129731297412975129761297712978129791298012981129821298312984129851298612987129881298912990129911299212993129941299512996129971299812999130001300113002130031300413005130061300713008130091301013011130121301313014130151301613017130181301913020130211302213023130241302513026130271302813029130301303113032130331303413035130361303713038130391304013041130421304313044130451304613047130481304913050130511305213053130541305513056130571305813059130601306113062130631306413065130661306713068130691307013071130721307313074130751307613077130781307913080130811308213083130841308513086130871308813089130901309113092130931309413095130961309713098130991310013101131021310313104131051310613107131081310913110131111311213113131141311513116131171311813119131201312113122131231312413125131261312713128131291313013131131321313313134131351313613137131381313913140131411314213143131441314513146131471314813149131501315113152131531315413155131561315713158131591316013161131621316313164131651316613167131681316913170131711317213173131741317513176131771317813179131801318113182131831318413185131861318713188131891319013191131921319313194131951319613197131981319913200132011320213203132041320513206132071320813209132101321113212132131321413215132161321713218132191322013221132221322313224132251322613227132281322913230132311323213233132341323513236132371323813239132401324113242132431324413245132461324713248132491325013251132521325313254132551325613257132581325913260132611326213263132641326513266132671326813269132701327113272132731327413275132761327713278132791328013281132821328313284132851328613287132881328913290132911329213293132941329513296132971329813299133001330113302133031330413305133061330713308133091331013311133121331313314133151331613317133181331913320133211332213323133241332513326133271332813329133301333113332133331333413335133361333713338133391334013341133421334313344133451334613347133481334913350133511335213353133541335513356133571335813359133601336113362133631336413365133661336713368133691337013371133721337313374133751337613377133781337913380133811338213383133841338513386133871338813389133901339113392133931339413395133961339713398133991340013401134021340313404134051340613407134081340913410134111341213413134141341513416134171341813419134201342113422134231342413425134261342713428134291343013431134321343313434134351343613437134381343913440134411344213443134441344513446134471344813449134501345113452134531345413455134561345713458134591346013461134621346313464134651346613467134681346913470134711347213473134741347513476134771347813479134801348113482134831348413485134861348713488134891349013491134921349313494134951349613497134981349913500135011350213503135041350513506135071350813509135101351113512135131351413515135161351713518135191352013521135221352313524135251352613527135281352913530135311353213533135341353513536135371353813539135401354113542135431354413545135461354713548135491355013551135521355313554135551355613557135581355913560135611356213563135641356513566135671356813569135701357113572135731357413575135761357713578135791358013581135821358313584135851358613587135881358913590135911359213593135941359513596135971359813599136001360113602136031360413605136061360713608136091361013611136121361313614136151361613617136181361913620136211362213623136241362513626136271362813629136301363113632136331363413635136361363713638136391364013641136421364313644136451364613647136481364913650136511365213653136541365513656136571365813659136601366113662136631366413665136661366713668136691367013671136721367313674136751367613677136781367913680136811368213683136841368513686136871368813689136901369113692136931369413695136961369713698136991370013701137021370313704137051370613707137081370913710137111371213713137141371513716137171371813719137201372113722137231372413725137261372713728137291373013731137321373313734137351373613737137381373913740137411374213743137441374513746137471374813749137501375113752137531375413755137561375713758137591376013761137621376313764137651376613767137681376913770137711377213773137741377513776137771377813779137801378113782137831378413785137861378713788137891379013791137921379313794137951379613797137981379913800138011380213803138041380513806138071380813809138101381113812138131381413815138161381713818138191382013821138221382313824138251382613827138281382913830138311383213833138341383513836138371383813839138401384113842138431384413845138461384713848138491385013851138521385313854138551385613857138581385913860138611386213863138641386513866138671386813869138701387113872138731387413875138761387713878138791388013881138821388313884138851388613887138881388913890138911389213893138941389513896138971389813899139001390113902139031390413905139061390713908139091391013911139121391313914139151391613917139181391913920139211392213923139241392513926139271392813929139301393113932139331393413935139361393713938139391394013941139421394313944139451394613947139481394913950139511395213953139541395513956139571395813959139601396113962139631396413965139661396713968139691397013971139721397313974139751397613977139781397913980139811398213983139841398513986139871398813989139901399113992139931399413995139961399713998139991400014001140021400314004140051400614007140081400914010140111401214013140141401514016140171401814019140201402114022140231402414025140261402714028140291403014031140321403314034140351403614037140381403914040140411404214043140441404514046140471404814049140501405114052140531405414055140561405714058140591406014061140621406314064140651406614067140681406914070140711407214073140741407514076140771407814079140801408114082140831408414085140861408714088140891409014091140921409314094140951409614097140981409914100141011410214103141041410514106141071410814109141101411114112141131411414115141161411714118141191412014121141221412314124141251412614127141281412914130141311413214133141341413514136141371413814139141401414114142141431414414145141461414714148141491415014151141521415314154141551415614157141581415914160141611416214163141641416514166141671416814169141701417114172141731417414175141761417714178141791418014181141821418314184141851418614187141881418914190141911419214193141941419514196141971419814199142001420114202142031420414205142061420714208142091421014211142121421314214142151421614217142181421914220142211422214223142241422514226142271422814229142301423114232142331423414235142361423714238142391424014241142421424314244142451424614247142481424914250142511425214253142541425514256142571425814259142601426114262142631426414265142661426714268142691427014271142721427314274142751427614277142781427914280142811428214283142841428514286142871428814289142901429114292142931429414295142961429714298142991430014301143021430314304143051430614307143081430914310143111431214313143141431514316143171431814319143201432114322143231432414325143261432714328143291433014331143321433314334143351433614337143381433914340143411434214343143441434514346143471434814349143501435114352143531435414355143561435714358143591436014361143621436314364143651436614367143681436914370143711437214373143741437514376143771437814379143801438114382143831438414385143861438714388143891439014391143921439314394143951439614397143981439914400144011440214403144041440514406144071440814409144101441114412144131441414415144161441714418144191442014421144221442314424144251442614427144281442914430144311443214433144341443514436144371443814439144401444114442144431444414445144461444714448144491445014451144521445314454144551445614457144581445914460144611446214463144641446514466144671446814469144701447114472144731447414475144761447714478144791448014481144821448314484144851448614487144881448914490144911449214493144941449514496144971449814499145001450114502145031450414505145061450714508145091451014511145121451314514145151451614517145181451914520145211452214523145241452514526145271452814529145301453114532145331453414535145361453714538145391454014541145421454314544145451454614547145481454914550145511455214553145541455514556145571455814559145601456114562145631456414565145661456714568145691457014571145721457314574145751457614577145781457914580145811458214583145841458514586145871458814589145901459114592145931459414595145961459714598145991460014601146021460314604146051460614607146081460914610146111461214613146141461514616146171461814619146201462114622146231462414625146261462714628146291463014631146321463314634146351463614637146381463914640146411464214643146441464514646146471464814649146501465114652146531465414655146561465714658146591466014661146621466314664146651466614667146681466914670146711467214673146741467514676146771467814679146801468114682146831468414685146861468714688146891469014691146921469314694146951469614697146981469914700147011470214703147041470514706147071470814709147101471114712147131471414715147161471714718147191472014721147221472314724147251472614727147281472914730147311473214733147341473514736147371473814739147401474114742147431474414745147461474714748147491475014751147521475314754147551475614757147581475914760147611476214763147641476514766147671476814769147701477114772147731477414775147761477714778147791478014781147821478314784147851478614787147881478914790147911479214793147941479514796147971479814799148001480114802148031480414805148061480714808148091481014811148121481314814148151481614817148181481914820148211482214823148241482514826148271482814829148301483114832148331483414835148361483714838148391484014841148421484314844148451484614847148481484914850148511485214853148541485514856148571485814859148601486114862148631486414865148661486714868148691487014871148721487314874148751487614877148781487914880148811488214883148841488514886148871488814889148901489114892148931489414895148961489714898148991490014901149021490314904149051490614907149081490914910149111491214913149141491514916149171491814919149201492114922149231492414925149261492714928149291493014931149321493314934149351493614937149381493914940149411494214943149441494514946149471494814949149501495114952149531495414955149561495714958149591496014961149621496314964149651496614967149681496914970149711497214973149741497514976149771497814979149801498114982149831498414985149861498714988149891499014991149921499314994149951499614997149981499915000150011500215003150041500515006150071500815009150101501115012150131501415015150161501715018150191502015021150221502315024150251502615027150281502915030150311503215033150341503515036150371503815039150401504115042150431504415045150461504715048150491505015051150521505315054150551505615057150581505915060150611506215063150641506515066150671506815069150701507115072150731507415075150761507715078150791508015081150821508315084150851508615087150881508915090150911509215093150941509515096150971509815099151001510115102151031510415105151061510715108151091511015111151121511315114151151511615117151181511915120151211512215123151241512515126151271512815129151301513115132151331513415135151361513715138151391514015141151421514315144151451514615147151481514915150151511515215153151541515515156151571515815159151601516115162151631516415165151661516715168151691517015171151721517315174151751517615177151781517915180151811518215183151841518515186151871518815189151901519115192151931519415195151961519715198151991520015201152021520315204152051520615207152081520915210152111521215213152141521515216152171521815219152201522115222152231522415225152261522715228152291523015231152321523315234152351523615237152381523915240152411524215243152441524515246152471524815249152501525115252152531525415255152561525715258152591526015261152621526315264152651526615267152681526915270152711527215273152741527515276152771527815279152801528115282152831528415285152861528715288152891529015291152921529315294152951529615297152981529915300153011530215303153041530515306153071530815309153101531115312153131531415315153161531715318153191532015321153221532315324153251532615327153281532915330153311533215333153341533515336153371533815339153401534115342153431534415345153461534715348153491535015351153521535315354153551535615357153581535915360153611536215363153641536515366153671536815369153701537115372153731537415375153761537715378153791538015381153821538315384153851538615387153881538915390153911539215393153941539515396153971539815399154001540115402154031540415405154061540715408154091541015411154121541315414154151541615417154181541915420154211542215423154241542515426154271542815429154301543115432154331543415435154361543715438154391544015441154421544315444154451544615447154481544915450154511545215453154541545515456154571545815459154601546115462154631546415465154661546715468154691547015471154721547315474154751547615477154781547915480154811548215483154841548515486154871548815489154901549115492154931549415495154961549715498154991550015501155021550315504155051550615507155081550915510155111551215513155141551515516155171551815519155201552115522155231552415525155261552715528155291553015531155321553315534155351553615537155381553915540155411554215543155441554515546155471554815549155501555115552155531555415555155561555715558155591556015561155621556315564155651556615567155681556915570155711557215573155741557515576155771557815579155801558115582155831558415585155861558715588155891559015591155921559315594155951559615597155981559915600156011560215603156041560515606156071560815609156101561115612156131561415615156161561715618156191562015621156221562315624156251562615627156281562915630156311563215633156341563515636156371563815639156401564115642156431564415645156461564715648156491565015651156521565315654156551565615657156581565915660156611566215663156641566515666156671566815669156701567115672156731567415675156761567715678156791568015681156821568315684156851568615687156881568915690156911569215693156941569515696156971569815699157001570115702157031570415705157061570715708157091571015711157121571315714157151571615717157181571915720157211572215723157241572515726157271572815729157301573115732157331573415735157361573715738157391574015741157421574315744157451574615747157481574915750157511575215753157541575515756157571575815759157601576115762157631576415765157661576715768157691577015771157721577315774157751577615777157781577915780157811578215783157841578515786157871578815789157901579115792157931579415795157961579715798157991580015801158021580315804158051580615807158081580915810158111581215813158141581515816158171581815819158201582115822158231582415825158261582715828158291583015831158321583315834158351583615837158381583915840158411584215843158441584515846158471584815849158501585115852158531585415855158561585715858158591586015861158621586315864158651586615867158681586915870158711587215873158741587515876158771587815879158801588115882158831588415885158861588715888158891589015891158921589315894158951589615897158981589915900159011590215903159041590515906159071590815909159101591115912159131591415915159161591715918159191592015921159221592315924159251592615927159281592915930159311593215933159341593515936159371593815939159401594115942159431594415945159461594715948159491595015951159521595315954159551595615957159581595915960159611596215963159641596515966159671596815969159701597115972159731597415975159761597715978159791598015981159821598315984159851598615987159881598915990159911599215993159941599515996159971599815999160001600116002160031600416005160061600716008160091601016011160121601316014160151601616017160181601916020160211602216023160241602516026160271602816029160301603116032160331603416035160361603716038160391604016041160421604316044160451604616047160481604916050160511605216053160541605516056160571605816059160601606116062160631606416065160661606716068160691607016071160721607316074160751607616077160781607916080160811608216083160841608516086160871608816089160901609116092160931609416095160961609716098160991610016101161021610316104161051610616107161081610916110161111611216113161141611516116161171611816119161201612116122161231612416125161261612716128161291613016131161321613316134161351613616137161381613916140161411614216143161441614516146161471614816149161501615116152161531615416155161561615716158161591616016161161621616316164161651616616167161681616916170161711617216173161741617516176161771617816179161801618116182161831618416185161861618716188161891619016191161921619316194161951619616197161981619916200162011620216203162041620516206162071620816209162101621116212162131621416215162161621716218162191622016221162221622316224162251622616227162281622916230162311623216233162341623516236162371623816239162401624116242162431624416245162461624716248162491625016251162521625316254162551625616257162581625916260162611626216263162641626516266162671626816269162701627116272162731627416275162761627716278162791628016281162821628316284162851628616287162881628916290162911629216293162941629516296162971629816299163001630116302163031630416305163061630716308163091631016311163121631316314163151631616317163181631916320163211632216323163241632516326163271632816329163301633116332163331633416335163361633716338163391634016341163421634316344163451634616347163481634916350163511635216353163541635516356163571635816359163601636116362163631636416365163661636716368163691637016371163721637316374163751637616377163781637916380163811638216383163841638516386163871638816389163901639116392163931639416395163961639716398163991640016401164021640316404164051640616407164081640916410164111641216413164141641516416164171641816419164201642116422164231642416425164261642716428164291643016431164321643316434164351643616437164381643916440164411644216443164441644516446164471644816449164501645116452164531645416455164561645716458164591646016461164621646316464164651646616467164681646916470164711647216473164741647516476164771647816479164801648116482164831648416485164861648716488164891649016491164921649316494164951649616497164981649916500165011650216503165041650516506165071650816509165101651116512165131651416515165161651716518165191652016521165221652316524165251652616527165281652916530165311653216533165341653516536165371653816539165401654116542165431654416545165461654716548165491655016551165521655316554165551655616557165581655916560165611656216563165641656516566165671656816569165701657116572165731657416575165761657716578165791658016581165821658316584165851658616587165881658916590165911659216593165941659516596165971659816599166001660116602166031660416605166061660716608166091661016611166121661316614166151661616617166181661916620166211662216623166241662516626166271662816629166301663116632166331663416635166361663716638166391664016641166421664316644166451664616647166481664916650166511665216653166541665516656166571665816659166601666116662166631666416665166661666716668166691667016671166721667316674166751667616677166781667916680166811668216683166841668516686166871668816689166901669116692166931669416695166961669716698166991670016701167021670316704167051670616707167081670916710167111671216713167141671516716167171671816719167201672116722167231672416725167261672716728167291673016731167321673316734167351673616737167381673916740167411674216743167441674516746167471674816749167501675116752167531675416755167561675716758167591676016761167621676316764167651676616767167681676916770167711677216773167741677516776167771677816779167801678116782167831678416785167861678716788167891679016791167921679316794167951679616797167981679916800168011680216803168041680516806168071680816809168101681116812168131681416815168161681716818168191682016821168221682316824168251682616827168281682916830168311683216833168341683516836168371683816839168401684116842168431684416845168461684716848168491685016851168521685316854168551685616857168581685916860168611686216863168641686516866168671686816869168701687116872168731687416875168761687716878168791688016881168821688316884168851688616887168881688916890168911689216893168941689516896168971689816899169001690116902169031690416905169061690716908169091691016911169121691316914169151691616917169181691916920169211692216923169241692516926169271692816929169301693116932169331693416935169361693716938169391694016941169421694316944169451694616947169481694916950169511695216953169541695516956169571695816959169601696116962169631696416965169661696716968169691697016971169721697316974169751697616977169781697916980169811698216983169841698516986169871698816989169901699116992169931699416995169961699716998169991700017001170021700317004170051700617007170081700917010170111701217013170141701517016170171701817019170201702117022170231702417025170261702717028170291703017031170321703317034170351703617037170381703917040170411704217043170441704517046170471704817049170501705117052170531705417055170561705717058170591706017061170621706317064170651706617067170681706917070170711707217073170741707517076170771707817079170801708117082170831708417085170861708717088170891709017091170921709317094170951709617097170981709917100171011710217103171041710517106171071710817109171101711117112171131711417115171161711717118171191712017121171221712317124171251712617127171281712917130171311713217133171341713517136171371713817139171401714117142171431714417145171461714717148171491715017151171521715317154171551715617157171581715917160171611716217163171641716517166171671716817169171701717117172171731717417175171761717717178171791718017181171821718317184171851718617187171881718917190171911719217193171941719517196171971719817199172001720117202172031720417205172061720717208172091721017211172121721317214172151721617217172181721917220172211722217223172241722517226172271722817229172301723117232172331723417235172361723717238172391724017241172421724317244172451724617247172481724917250172511725217253172541725517256172571725817259172601726117262172631726417265172661726717268172691727017271172721727317274172751727617277172781727917280172811728217283172841728517286172871728817289172901729117292172931729417295172961729717298172991730017301173021730317304173051730617307173081730917310173111731217313173141731517316173171731817319173201732117322173231732417325173261732717328173291733017331173321733317334173351733617337173381733917340173411734217343173441734517346173471734817349173501735117352173531735417355173561735717358173591736017361173621736317364173651736617367173681736917370173711737217373173741737517376173771737817379173801738117382173831738417385173861738717388173891739017391173921739317394173951739617397173981739917400174011740217403174041740517406174071740817409174101741117412174131741417415174161741717418174191742017421174221742317424174251742617427174281742917430174311743217433174341743517436174371743817439174401744117442174431744417445174461744717448174491745017451174521745317454174551745617457174581745917460174611746217463174641746517466174671746817469174701747117472174731747417475174761747717478174791748017481174821748317484174851748617487174881748917490174911749217493174941749517496174971749817499175001750117502175031750417505175061750717508175091751017511175121751317514175151751617517175181751917520175211752217523175241752517526175271752817529175301753117532175331753417535175361753717538175391754017541175421754317544175451754617547175481754917550175511755217553175541755517556175571755817559175601756117562175631756417565175661756717568175691757017571175721757317574175751757617577175781757917580175811758217583175841758517586175871758817589175901759117592175931759417595175961759717598175991760017601176021760317604176051760617607176081760917610176111761217613176141761517616176171761817619176201762117622176231762417625176261762717628176291763017631176321763317634176351763617637176381763917640176411764217643176441764517646176471764817649176501765117652176531765417655176561765717658176591766017661176621766317664176651766617667176681766917670176711767217673176741767517676176771767817679176801768117682176831768417685176861768717688176891769017691176921769317694176951769617697176981769917700177011770217703177041770517706177071770817709177101771117712177131771417715177161771717718177191772017721177221772317724177251772617727177281772917730177311773217733177341773517736177371773817739177401774117742177431774417745177461774717748177491775017751177521775317754177551775617757177581775917760177611776217763177641776517766177671776817769177701777117772177731777417775177761777717778177791778017781177821778317784177851778617787177881778917790177911779217793177941779517796177971779817799178001780117802178031780417805178061780717808178091781017811178121781317814178151781617817178181781917820178211782217823178241782517826178271782817829178301783117832178331783417835178361783717838178391784017841178421784317844178451784617847178481784917850178511785217853178541785517856178571785817859178601786117862178631786417865178661786717868178691787017871178721787317874178751787617877178781787917880178811788217883178841788517886178871788817889178901789117892178931789417895178961789717898178991790017901179021790317904179051790617907179081790917910179111791217913179141791517916179171791817919179201792117922179231792417925179261792717928179291793017931179321793317934179351793617937179381793917940179411794217943179441794517946179471794817949179501795117952179531795417955179561795717958179591796017961179621796317964179651796617967179681796917970179711797217973179741797517976179771797817979179801798117982179831798417985179861798717988179891799017991179921799317994179951799617997179981799918000180011800218003180041800518006180071800818009180101801118012180131801418015180161801718018180191802018021180221802318024180251802618027180281802918030180311803218033180341803518036180371803818039180401804118042180431804418045180461804718048180491805018051180521805318054180551805618057180581805918060180611806218063180641806518066180671806818069180701807118072180731807418075180761807718078180791808018081180821808318084180851808618087180881808918090180911809218093180941809518096180971809818099181001810118102181031810418105181061810718108181091811018111181121811318114181151811618117181181811918120181211812218123181241812518126181271812818129181301813118132181331813418135181361813718138181391814018141181421814318144181451814618147181481814918150181511815218153181541815518156181571815818159181601816118162181631816418165181661816718168181691817018171181721817318174181751817618177181781817918180181811818218183181841818518186181871818818189181901819118192181931819418195181961819718198181991820018201182021820318204182051820618207182081820918210182111821218213182141821518216182171821818219182201822118222182231822418225182261822718228182291823018231182321823318234182351823618237182381823918240182411824218243182441824518246182471824818249182501825118252182531825418255182561825718258182591826018261182621826318264182651826618267182681826918270182711827218273182741827518276182771827818279182801828118282182831828418285182861828718288182891829018291182921829318294182951829618297182981829918300183011830218303183041830518306183071830818309183101831118312183131831418315183161831718318183191832018321183221832318324183251832618327183281832918330183311833218333183341833518336183371833818339183401834118342183431834418345183461834718348183491835018351183521835318354183551835618357183581835918360183611836218363183641836518366183671836818369183701837118372183731837418375183761837718378183791838018381183821838318384183851838618387183881838918390183911839218393183941839518396183971839818399184001840118402184031840418405184061840718408184091841018411184121841318414184151841618417184181841918420184211842218423184241842518426184271842818429184301843118432184331843418435184361843718438184391844018441184421844318444184451844618447184481844918450184511845218453184541845518456184571845818459184601846118462184631846418465184661846718468184691847018471184721847318474184751847618477184781847918480184811848218483184841848518486184871848818489184901849118492184931849418495184961849718498184991850018501185021850318504185051850618507185081850918510185111851218513185141851518516185171851818519185201852118522185231852418525185261852718528185291853018531185321853318534185351853618537185381853918540185411854218543185441854518546185471854818549185501855118552185531855418555185561855718558185591856018561185621856318564185651856618567185681856918570185711857218573185741857518576185771857818579185801858118582185831858418585185861858718588185891859018591185921859318594185951859618597185981859918600186011860218603186041860518606186071860818609186101861118612186131861418615186161861718618186191862018621186221862318624186251862618627186281862918630186311863218633186341863518636186371863818639186401864118642186431864418645186461864718648186491865018651186521865318654186551865618657186581865918660186611866218663186641866518666186671866818669186701867118672186731867418675186761867718678186791868018681186821868318684186851868618687186881868918690186911869218693186941869518696186971869818699187001870118702187031870418705187061870718708187091871018711187121871318714187151871618717187181871918720187211872218723187241872518726187271872818729187301873118732187331873418735187361873718738187391874018741187421874318744187451874618747187481874918750187511875218753187541875518756187571875818759187601876118762187631876418765187661876718768187691877018771187721877318774187751877618777187781877918780187811878218783187841878518786187871878818789187901879118792187931879418795187961879718798187991880018801188021880318804188051880618807188081880918810188111881218813188141881518816188171881818819188201882118822188231882418825188261882718828188291883018831188321883318834188351883618837188381883918840188411884218843188441884518846188471884818849188501885118852188531885418855188561885718858188591886018861188621886318864188651886618867188681886918870188711887218873188741887518876188771887818879188801888118882188831888418885188861888718888188891889018891188921889318894188951889618897188981889918900189011890218903189041890518906189071890818909189101891118912189131891418915189161891718918189191892018921189221892318924189251892618927189281892918930189311893218933189341893518936189371893818939189401894118942189431894418945189461894718948189491895018951189521895318954189551895618957189581895918960189611896218963189641896518966189671896818969189701897118972189731897418975189761897718978189791898018981189821898318984189851898618987189881898918990189911899218993189941899518996189971899818999190001900119002190031900419005190061900719008190091901019011190121901319014190151901619017190181901919020190211902219023190241902519026190271902819029190301903119032190331903419035190361903719038190391904019041190421904319044190451904619047190481904919050190511905219053190541905519056190571905819059190601906119062190631906419065190661906719068190691907019071190721907319074190751907619077190781907919080190811908219083190841908519086190871908819089190901909119092190931909419095190961909719098190991910019101191021910319104191051910619107191081910919110191111911219113191141911519116191171911819119191201912119122191231912419125191261912719128191291913019131191321913319134191351913619137191381913919140191411914219143191441914519146191471914819149191501915119152191531915419155191561915719158191591916019161191621916319164191651916619167191681916919170191711917219173191741917519176191771917819179
  1. #define _GNU_SOURCE // Defines CLOCK_MONOTONIC on Linux
  2. #define _CRT_SECURE_NO_DEPRECATE // Disables ridiculous "unsafe" warnigns on Windows
  3. #include "ggml.h"
  4. #ifdef GGML_USE_K_QUANTS
  5. #include "k_quants.h"
  6. #endif
  7. #if defined(_MSC_VER) || defined(__MINGW32__)
  8. #include <malloc.h> // using malloc.h with MSC/MINGW
  9. #elif !defined(__FreeBSD__) && !defined(__NetBSD__) && !defined(__OpenBSD__)
  10. #include <alloca.h>
  11. #endif
  12. #include <assert.h>
  13. #include <errno.h>
  14. #include <time.h>
  15. #include <math.h>
  16. #include <stdlib.h>
  17. #include <string.h>
  18. #include <stdint.h>
  19. #include <inttypes.h>
  20. #include <stdio.h>
  21. #include <float.h>
  22. #include <limits.h>
  23. #include <stdarg.h>
  24. #ifdef GGML_USE_METAL
  25. #include <unistd.h>
  26. #endif
  27. // if C99 - static_assert is noop
  28. // ref: https://stackoverflow.com/a/53923785/4039976
  29. #ifndef static_assert
  30. #define static_assert(cond, msg) struct global_scope_noop_trick
  31. #endif
  32. #if defined(_MSC_VER)
  33. // disable "possible loss of data" to avoid hundreds of casts
  34. // we should just be careful :)
  35. #pragma warning(disable: 4244 4267)
  36. #endif
  37. #if defined(_WIN32)
  38. #include <windows.h>
  39. typedef volatile LONG atomic_int;
  40. typedef atomic_int atomic_bool;
  41. static void atomic_store(atomic_int* ptr, LONG val) {
  42. InterlockedExchange(ptr, val);
  43. }
  44. static LONG atomic_load(atomic_int* ptr) {
  45. return InterlockedCompareExchange(ptr, 0, 0);
  46. }
  47. static LONG atomic_fetch_add(atomic_int* ptr, LONG inc) {
  48. return InterlockedExchangeAdd(ptr, inc);
  49. }
  50. static LONG atomic_fetch_sub(atomic_int* ptr, LONG dec) {
  51. return atomic_fetch_add(ptr, -(dec));
  52. }
  53. typedef HANDLE pthread_t;
  54. typedef DWORD thread_ret_t;
  55. static int pthread_create(pthread_t* out, void* unused, thread_ret_t(*func)(void*), void* arg) {
  56. (void) unused;
  57. HANDLE handle = CreateThread(NULL, 0, (LPTHREAD_START_ROUTINE) func, arg, 0, NULL);
  58. if (handle == NULL)
  59. {
  60. return EAGAIN;
  61. }
  62. *out = handle;
  63. return 0;
  64. }
  65. static int pthread_join(pthread_t thread, void* unused) {
  66. (void) unused;
  67. return (int) WaitForSingleObject(thread, INFINITE);
  68. }
  69. static int sched_yield (void) {
  70. Sleep (0);
  71. return 0;
  72. }
  73. #else
  74. #include <pthread.h>
  75. #include <stdatomic.h>
  76. typedef void* thread_ret_t;
  77. #include <sys/types.h>
  78. #include <sys/stat.h>
  79. #include <unistd.h>
  80. #endif
  81. // __FMA__ and __F16C__ are not defined in MSVC, however they are implied with AVX2/AVX512
  82. #if defined(_MSC_VER) && (defined(__AVX2__) || defined(__AVX512F__))
  83. #ifndef __FMA__
  84. #define __FMA__
  85. #endif
  86. #ifndef __F16C__
  87. #define __F16C__
  88. #endif
  89. #ifndef __SSE3__
  90. #define __SSE3__
  91. #endif
  92. #endif
  93. #ifdef __HAIKU__
  94. #define static_assert(cond, msg) _Static_assert(cond, msg)
  95. #endif
  96. /*#define GGML_PERF*/
  97. #define GGML_DEBUG 0
  98. #define GGML_GELU_FP16
  99. #define GGML_GELU_QUICK_FP16
  100. #define GGML_SILU_FP16
  101. #define GGML_SOFT_MAX_UNROLL 4
  102. #define GGML_VEC_DOT_UNROLL 2
  103. //
  104. // logging
  105. //
  106. #if (GGML_DEBUG >= 1)
  107. #define GGML_PRINT_DEBUG(...) printf(__VA_ARGS__)
  108. #else
  109. #define GGML_PRINT_DEBUG(...)
  110. #endif
  111. #if (GGML_DEBUG >= 5)
  112. #define GGML_PRINT_DEBUG_5(...) printf(__VA_ARGS__)
  113. #else
  114. #define GGML_PRINT_DEBUG_5(...)
  115. #endif
  116. #if (GGML_DEBUG >= 10)
  117. #define GGML_PRINT_DEBUG_10(...) printf(__VA_ARGS__)
  118. #else
  119. #define GGML_PRINT_DEBUG_10(...)
  120. #endif
  121. #define GGML_PRINT(...) printf(__VA_ARGS__)
  122. #ifdef GGML_USE_ACCELERATE
  123. // uncomment to use vDSP for soft max computation
  124. // note: not sure if it is actually faster
  125. //#define GGML_SOFT_MAX_ACCELERATE
  126. #endif
  127. #if UINTPTR_MAX == 0xFFFFFFFF
  128. #define GGML_MEM_ALIGN 4
  129. #else
  130. #define GGML_MEM_ALIGN 16
  131. #endif
  132. //
  133. // logging
  134. //
  135. #if (GGML_DEBUG >= 1)
  136. #define GGML_PRINT_DEBUG(...) printf(__VA_ARGS__)
  137. #else
  138. #define GGML_PRINT_DEBUG(...)
  139. #endif
  140. #if (GGML_DEBUG >= 5)
  141. #define GGML_PRINT_DEBUG_5(...) printf(__VA_ARGS__)
  142. #else
  143. #define GGML_PRINT_DEBUG_5(...)
  144. #endif
  145. #if (GGML_DEBUG >= 10)
  146. #define GGML_PRINT_DEBUG_10(...) printf(__VA_ARGS__)
  147. #else
  148. #define GGML_PRINT_DEBUG_10(...)
  149. #endif
  150. #define GGML_PRINT(...) printf(__VA_ARGS__)
  151. //
  152. // end of logging block
  153. //
  154. #if defined(_MSC_VER) || defined(__MINGW32__)
  155. #define GGML_ALIGNED_MALLOC(size) _aligned_malloc(size, GGML_MEM_ALIGN)
  156. #define GGML_ALIGNED_FREE(ptr) _aligned_free(ptr)
  157. #else
  158. inline static void* ggml_aligned_malloc(size_t size) {
  159. void* aligned_memory = NULL;
  160. #ifdef GGML_USE_METAL
  161. int result = posix_memalign(&aligned_memory, getpagesize(), size);
  162. #else
  163. int result = posix_memalign(&aligned_memory, GGML_MEM_ALIGN, size);
  164. #endif
  165. if (result != 0) {
  166. // Handle allocation failure
  167. const char *error_desc = "unknown allocation error";
  168. switch (result) {
  169. case EINVAL:
  170. error_desc = "invalid alignment value";
  171. break;
  172. case ENOMEM:
  173. error_desc = "insufficient memory";
  174. break;
  175. }
  176. GGML_PRINT("%s: %s (attempted to allocate %6.2f MB)\n",
  177. __func__, error_desc, size/(1024.0*1024.0));
  178. return NULL;
  179. }
  180. return aligned_memory;
  181. }
  182. #define GGML_ALIGNED_MALLOC(size) ggml_aligned_malloc(size)
  183. #define GGML_ALIGNED_FREE(ptr) free(ptr)
  184. #endif
  185. #define UNUSED(x) (void)(x)
  186. #define SWAP(x, y, T) do { T SWAP = x; x = y; y = SWAP; } while (0)
  187. #if defined(GGML_USE_ACCELERATE)
  188. #include <Accelerate/Accelerate.h>
  189. #if defined(GGML_USE_CLBLAST) // allow usage of CLBlast alongside Accelerate functions
  190. #include "ggml-opencl.h"
  191. #endif
  192. #elif defined(GGML_USE_OPENBLAS)
  193. #include <cblas.h>
  194. #elif defined(GGML_USE_CUBLAS)
  195. #include "ggml-cuda.h"
  196. #elif defined(GGML_USE_CLBLAST)
  197. #include "ggml-opencl.h"
  198. #endif
  199. #undef MIN
  200. #undef MAX
  201. #define MIN(a, b) ((a) < (b) ? (a) : (b))
  202. #define MAX(a, b) ((a) > (b) ? (a) : (b))
  203. // floating point type used to accumulate sums
  204. typedef double ggml_float;
  205. // 16-bit float
  206. // on Arm, we use __fp16
  207. // on x86, we use uint16_t
  208. #ifdef __ARM_NEON
  209. // if YCM cannot find <arm_neon.h>, make a symbolic link to it, for example:
  210. //
  211. // $ ln -sfn /Library/Developer/CommandLineTools/usr/lib/clang/13.1.6/include/arm_neon.h ./src/
  212. //
  213. #include <arm_neon.h>
  214. #define GGML_COMPUTE_FP16_TO_FP32(x) ((float) (x))
  215. #define GGML_COMPUTE_FP32_TO_FP16(x) (x)
  216. #define GGML_FP16_TO_FP32(x) ((float) (x))
  217. #define GGML_FP32_TO_FP16(x) (x)
  218. #else
  219. #ifdef __wasm_simd128__
  220. #include <wasm_simd128.h>
  221. #else
  222. #ifdef __POWER9_VECTOR__
  223. #include <altivec.h>
  224. #undef bool
  225. #define bool _Bool
  226. #else
  227. #if defined(_MSC_VER) || defined(__MINGW32__)
  228. #include <intrin.h>
  229. #else
  230. #if !defined(__riscv)
  231. #include <immintrin.h>
  232. #endif
  233. #endif
  234. #endif
  235. #endif
  236. #ifdef __F16C__
  237. #ifdef _MSC_VER
  238. #define GGML_COMPUTE_FP16_TO_FP32(x) _mm_cvtss_f32(_mm_cvtph_ps(_mm_cvtsi32_si128(x)))
  239. #define GGML_COMPUTE_FP32_TO_FP16(x) _mm_extract_epi16(_mm_cvtps_ph(_mm_set_ss(x), 0), 0)
  240. #else
  241. #define GGML_COMPUTE_FP16_TO_FP32(x) _cvtsh_ss(x)
  242. #define GGML_COMPUTE_FP32_TO_FP16(x) _cvtss_sh(x, 0)
  243. #endif
  244. #elif defined(__POWER9_VECTOR__)
  245. #define GGML_COMPUTE_FP16_TO_FP32(x) ggml_compute_fp16_to_fp32(x)
  246. #define GGML_COMPUTE_FP32_TO_FP16(x) ggml_compute_fp32_to_fp16(x)
  247. /* the inline asm below is about 12% faster than the lookup method */
  248. #define GGML_FP16_TO_FP32(x) GGML_COMPUTE_FP16_TO_FP32(x)
  249. #define GGML_FP32_TO_FP16(x) GGML_COMPUTE_FP32_TO_FP16(x)
  250. static inline float ggml_compute_fp16_to_fp32(ggml_fp16_t h) {
  251. register float f;
  252. register double d;
  253. __asm__(
  254. "mtfprd %0,%2\n"
  255. "xscvhpdp %0,%0\n"
  256. "frsp %1,%0\n" :
  257. /* temp */ "=d"(d),
  258. /* out */ "=f"(f):
  259. /* in */ "r"(h));
  260. return f;
  261. }
  262. static inline ggml_fp16_t ggml_compute_fp32_to_fp16(float f) {
  263. register double d;
  264. register ggml_fp16_t r;
  265. __asm__( /* xscvdphp can work on double or single precision */
  266. "xscvdphp %0,%2\n"
  267. "mffprd %1,%0\n" :
  268. /* temp */ "=d"(d),
  269. /* out */ "=r"(r):
  270. /* in */ "f"(f));
  271. return r;
  272. }
  273. #else
  274. // FP16 <-> FP32
  275. // ref: https://github.com/Maratyszcza/FP16
  276. static inline float fp32_from_bits(uint32_t w) {
  277. union {
  278. uint32_t as_bits;
  279. float as_value;
  280. } fp32;
  281. fp32.as_bits = w;
  282. return fp32.as_value;
  283. }
  284. static inline uint32_t fp32_to_bits(float f) {
  285. union {
  286. float as_value;
  287. uint32_t as_bits;
  288. } fp32;
  289. fp32.as_value = f;
  290. return fp32.as_bits;
  291. }
  292. static inline float ggml_compute_fp16_to_fp32(ggml_fp16_t h) {
  293. const uint32_t w = (uint32_t) h << 16;
  294. const uint32_t sign = w & UINT32_C(0x80000000);
  295. const uint32_t two_w = w + w;
  296. const uint32_t exp_offset = UINT32_C(0xE0) << 23;
  297. #if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) || defined(__GNUC__) && !defined(__STRICT_ANSI__)
  298. const float exp_scale = 0x1.0p-112f;
  299. #else
  300. const float exp_scale = fp32_from_bits(UINT32_C(0x7800000));
  301. #endif
  302. const float normalized_value = fp32_from_bits((two_w >> 4) + exp_offset) * exp_scale;
  303. const uint32_t magic_mask = UINT32_C(126) << 23;
  304. const float magic_bias = 0.5f;
  305. const float denormalized_value = fp32_from_bits((two_w >> 17) | magic_mask) - magic_bias;
  306. const uint32_t denormalized_cutoff = UINT32_C(1) << 27;
  307. const uint32_t result = sign |
  308. (two_w < denormalized_cutoff ? fp32_to_bits(denormalized_value) : fp32_to_bits(normalized_value));
  309. return fp32_from_bits(result);
  310. }
  311. static inline ggml_fp16_t ggml_compute_fp32_to_fp16(float f) {
  312. #if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) || defined(__GNUC__) && !defined(__STRICT_ANSI__)
  313. const float scale_to_inf = 0x1.0p+112f;
  314. const float scale_to_zero = 0x1.0p-110f;
  315. #else
  316. const float scale_to_inf = fp32_from_bits(UINT32_C(0x77800000));
  317. const float scale_to_zero = fp32_from_bits(UINT32_C(0x08800000));
  318. #endif
  319. float base = (fabsf(f) * scale_to_inf) * scale_to_zero;
  320. const uint32_t w = fp32_to_bits(f);
  321. const uint32_t shl1_w = w + w;
  322. const uint32_t sign = w & UINT32_C(0x80000000);
  323. uint32_t bias = shl1_w & UINT32_C(0xFF000000);
  324. if (bias < UINT32_C(0x71000000)) {
  325. bias = UINT32_C(0x71000000);
  326. }
  327. base = fp32_from_bits((bias >> 1) + UINT32_C(0x07800000)) + base;
  328. const uint32_t bits = fp32_to_bits(base);
  329. const uint32_t exp_bits = (bits >> 13) & UINT32_C(0x00007C00);
  330. const uint32_t mantissa_bits = bits & UINT32_C(0x00000FFF);
  331. const uint32_t nonsign = exp_bits + mantissa_bits;
  332. return (sign >> 16) | (shl1_w > UINT32_C(0xFF000000) ? UINT16_C(0x7E00) : nonsign);
  333. }
  334. #define GGML_COMPUTE_FP16_TO_FP32(x) ggml_compute_fp16_to_fp32(x)
  335. #define GGML_COMPUTE_FP32_TO_FP16(x) ggml_compute_fp32_to_fp16(x)
  336. #endif // __F16C__
  337. #endif // __ARM_NEON
  338. //
  339. // global data
  340. //
  341. // precomputed gelu table for f16 (128 KB)
  342. static ggml_fp16_t table_gelu_f16[1 << 16];
  343. // precomputed quick gelu table for f16 (128 KB)
  344. static ggml_fp16_t table_gelu_quick_f16[1 << 16];
  345. // precomputed silu table for f16 (128 KB)
  346. static ggml_fp16_t table_silu_f16[1 << 16];
  347. // precomputed exp table for f16 (128 KB)
  348. static ggml_fp16_t table_exp_f16[1 << 16];
  349. // precomputed f32 table for f16 (256 KB)
  350. static float table_f32_f16[1 << 16];
  351. #if defined(__ARM_NEON) || defined(__wasm_simd128__)
  352. #define B1(c,s,n) 0x ## n ## c , 0x ## n ## s
  353. #define B2(c,s,n) B1(c,s,n ## c), B1(c,s,n ## s)
  354. #define B3(c,s,n) B2(c,s,n ## c), B2(c,s,n ## s)
  355. #define B4(c,s,n) B3(c,s,n ## c), B3(c,s,n ## s)
  356. #define B5(c,s,n) B4(c,s,n ## c), B4(c,s,n ## s)
  357. #define B6(c,s,n) B5(c,s,n ## c), B5(c,s,n ## s)
  358. #define B7(c,s,n) B6(c,s,n ## c), B6(c,s,n ## s)
  359. #define B8(c,s ) B7(c,s, c), B7(c,s, s)
  360. // precomputed tables for expanding 8bits to 8 bytes:
  361. static const uint64_t table_b2b_0[1 << 8] = { B8(00, 10) }; // ( b) << 4
  362. static const uint64_t table_b2b_1[1 << 8] = { B8(10, 00) }; // (!b) << 4
  363. #endif
  364. // On ARM NEON, it's quicker to directly convert x -> x instead of calling into ggml_lookup_fp16_to_fp32,
  365. // so we define GGML_FP16_TO_FP32 and GGML_FP32_TO_FP16 elsewhere for NEON.
  366. // This is also true for POWER9.
  367. #if !defined(GGML_FP16_TO_FP32) || !defined(GGML_FP32_TO_FP16)
  368. inline static float ggml_lookup_fp16_to_fp32(ggml_fp16_t f) {
  369. uint16_t s;
  370. memcpy(&s, &f, sizeof(uint16_t));
  371. return table_f32_f16[s];
  372. }
  373. #define GGML_FP16_TO_FP32(x) ggml_lookup_fp16_to_fp32(x)
  374. #define GGML_FP32_TO_FP16(x) GGML_COMPUTE_FP32_TO_FP16(x)
  375. #endif
  376. // note: do not use these inside ggml.c
  377. // these are meant to be used via the ggml.h API
  378. float ggml_fp16_to_fp32(ggml_fp16_t x) {
  379. return (float) GGML_FP16_TO_FP32(x);
  380. }
  381. ggml_fp16_t ggml_fp32_to_fp16(float x) {
  382. return GGML_FP32_TO_FP16(x);
  383. }
  384. void ggml_fp16_to_fp32_row(const ggml_fp16_t * x, float * y, size_t n) {
  385. for (size_t i = 0; i < n; i++) {
  386. y[i] = GGML_FP16_TO_FP32(x[i]);
  387. }
  388. }
  389. void ggml_fp32_to_fp16_row(const float * x, ggml_fp16_t * y, size_t n) {
  390. size_t i = 0;
  391. #if defined(__F16C__)
  392. for (; i + 7 < n; i += 8) {
  393. __m256 x_vec = _mm256_loadu_ps(x + i);
  394. __m128i y_vec = _mm256_cvtps_ph(x_vec, _MM_FROUND_TO_NEAREST_INT);
  395. _mm_storeu_si128((__m128i *)(y + i), y_vec);
  396. }
  397. for(; i + 3 < n; i += 4) {
  398. __m128 x_vec = _mm_loadu_ps(x + i);
  399. __m128i y_vec = _mm_cvtps_ph(x_vec, _MM_FROUND_TO_NEAREST_INT);
  400. _mm_storel_epi64((__m128i *)(y + i), y_vec);
  401. }
  402. #endif
  403. for (; i < n; i++) {
  404. y[i] = GGML_FP32_TO_FP16(x[i]);
  405. }
  406. }
  407. //
  408. // timing
  409. //
  410. #if defined(_MSC_VER) || defined(__MINGW32__)
  411. static int64_t timer_freq, timer_start;
  412. void ggml_time_init(void) {
  413. LARGE_INTEGER t;
  414. QueryPerformanceFrequency(&t);
  415. timer_freq = t.QuadPart;
  416. // The multiplication by 1000 or 1000000 below can cause an overflow if timer_freq
  417. // and the uptime is high enough.
  418. // We subtract the program start time to reduce the likelihood of that happening.
  419. QueryPerformanceCounter(&t);
  420. timer_start = t.QuadPart;
  421. }
  422. int64_t ggml_time_ms(void) {
  423. LARGE_INTEGER t;
  424. QueryPerformanceCounter(&t);
  425. return ((t.QuadPart-timer_start) * 1000) / timer_freq;
  426. }
  427. int64_t ggml_time_us(void) {
  428. LARGE_INTEGER t;
  429. QueryPerformanceCounter(&t);
  430. return ((t.QuadPart-timer_start) * 1000000) / timer_freq;
  431. }
  432. #else
  433. void ggml_time_init(void) {}
  434. int64_t ggml_time_ms(void) {
  435. struct timespec ts;
  436. clock_gettime(CLOCK_MONOTONIC, &ts);
  437. return (int64_t)ts.tv_sec*1000 + (int64_t)ts.tv_nsec/1000000;
  438. }
  439. int64_t ggml_time_us(void) {
  440. struct timespec ts;
  441. clock_gettime(CLOCK_MONOTONIC, &ts);
  442. return (int64_t)ts.tv_sec*1000000 + (int64_t)ts.tv_nsec/1000;
  443. }
  444. #endif
  445. int64_t ggml_cycles(void) {
  446. return clock();
  447. }
  448. int64_t ggml_cycles_per_ms(void) {
  449. return CLOCKS_PER_SEC/1000;
  450. }
  451. #ifdef GGML_PERF
  452. #define ggml_perf_time_ms() ggml_time_ms()
  453. #define ggml_perf_time_us() ggml_time_us()
  454. #define ggml_perf_cycles() ggml_cycles()
  455. #define ggml_perf_cycles_per_ms() ggml_cycles_per_ms()
  456. #else
  457. #define ggml_perf_time_ms() 0
  458. #define ggml_perf_time_us() 0
  459. #define ggml_perf_cycles() 0
  460. #define ggml_perf_cycles_per_ms() 0
  461. #endif
  462. //
  463. // cache line
  464. //
  465. #if defined(__cpp_lib_hardware_interference_size)
  466. #define CACHE_LINE_SIZE hardware_destructive_interference_size
  467. #else
  468. #if defined(__POWER9_VECTOR__)
  469. #define CACHE_LINE_SIZE 128
  470. #else
  471. #define CACHE_LINE_SIZE 64
  472. #endif
  473. #endif
  474. static const size_t CACHE_LINE_SIZE_F32 = CACHE_LINE_SIZE/sizeof(float);
  475. //
  476. // quantization
  477. //
  478. #define MM256_SET_M128I(a, b) _mm256_insertf128_si256(_mm256_castsi128_si256(b), (a), 1)
  479. #if defined(__AVX__) || defined(__AVX2__) || defined(__AVX512F__) || defined(__SSSE3__)
  480. // multiply int8_t, add results pairwise twice
  481. static inline __m128i mul_sum_i8_pairs(const __m128i x, const __m128i y) {
  482. // Get absolute values of x vectors
  483. const __m128i ax = _mm_sign_epi8(x, x);
  484. // Sign the values of the y vectors
  485. const __m128i sy = _mm_sign_epi8(y, x);
  486. // Perform multiplication and create 16-bit values
  487. const __m128i dot = _mm_maddubs_epi16(ax, sy);
  488. const __m128i ones = _mm_set1_epi16(1);
  489. return _mm_madd_epi16(ones, dot);
  490. }
  491. #if __AVX__ || __AVX2__ || __AVX512F__
  492. // horizontally add 8 floats
  493. static inline float hsum_float_8(const __m256 x) {
  494. __m128 res = _mm256_extractf128_ps(x, 1);
  495. res = _mm_add_ps(res, _mm256_castps256_ps128(x));
  496. res = _mm_add_ps(res, _mm_movehl_ps(res, res));
  497. res = _mm_add_ss(res, _mm_movehdup_ps(res));
  498. return _mm_cvtss_f32(res);
  499. }
  500. // horizontally add 8 int32_t
  501. static inline int hsum_i32_8(const __m256i a) {
  502. const __m128i sum128 = _mm_add_epi32(_mm256_castsi256_si128(a), _mm256_extractf128_si256(a, 1));
  503. const __m128i hi64 = _mm_unpackhi_epi64(sum128, sum128);
  504. const __m128i sum64 = _mm_add_epi32(hi64, sum128);
  505. const __m128i hi32 = _mm_shuffle_epi32(sum64, _MM_SHUFFLE(2, 3, 0, 1));
  506. return _mm_cvtsi128_si32(_mm_add_epi32(sum64, hi32));
  507. }
  508. // horizontally add 4 int32_t
  509. static inline int hsum_i32_4(const __m128i a) {
  510. const __m128i hi64 = _mm_unpackhi_epi64(a, a);
  511. const __m128i sum64 = _mm_add_epi32(hi64, a);
  512. const __m128i hi32 = _mm_shuffle_epi32(sum64, _MM_SHUFFLE(2, 3, 0, 1));
  513. return _mm_cvtsi128_si32(_mm_add_epi32(sum64, hi32));
  514. }
  515. #if defined(__AVX2__) || defined(__AVX512F__)
  516. // spread 32 bits to 32 bytes { 0x00, 0xFF }
  517. static inline __m256i bytes_from_bits_32(const uint8_t * x) {
  518. uint32_t x32;
  519. memcpy(&x32, x, sizeof(uint32_t));
  520. const __m256i shuf_mask = _mm256_set_epi64x(
  521. 0x0303030303030303, 0x0202020202020202,
  522. 0x0101010101010101, 0x0000000000000000);
  523. __m256i bytes = _mm256_shuffle_epi8(_mm256_set1_epi32(x32), shuf_mask);
  524. const __m256i bit_mask = _mm256_set1_epi64x(0x7fbfdfeff7fbfdfe);
  525. bytes = _mm256_or_si256(bytes, bit_mask);
  526. return _mm256_cmpeq_epi8(bytes, _mm256_set1_epi64x(-1));
  527. }
  528. // Unpack 32 4-bit fields into 32 bytes
  529. // The output vector contains 32 bytes, each one in [ 0 .. 15 ] interval
  530. static inline __m256i bytes_from_nibbles_32(const uint8_t * rsi)
  531. {
  532. const __m128i tmp = _mm_loadu_si128((const __m128i *)rsi);
  533. const __m256i bytes = MM256_SET_M128I(_mm_srli_epi16(tmp, 4), tmp);
  534. const __m256i lowMask = _mm256_set1_epi8( 0xF );
  535. return _mm256_and_si256(lowMask, bytes);
  536. }
  537. // add int16_t pairwise and return as float vector
  538. static inline __m256 sum_i16_pairs_float(const __m256i x) {
  539. const __m256i ones = _mm256_set1_epi16(1);
  540. const __m256i summed_pairs = _mm256_madd_epi16(ones, x);
  541. return _mm256_cvtepi32_ps(summed_pairs);
  542. }
  543. static inline __m256 mul_sum_us8_pairs_float(const __m256i ax, const __m256i sy) {
  544. #if __AVXVNNI__
  545. const __m256i zero = _mm256_setzero_si256();
  546. const __m256i summed_pairs = _mm256_dpbusd_epi32(zero, ax, sy);
  547. return _mm256_cvtepi32_ps(summed_pairs);
  548. #else
  549. // Perform multiplication and create 16-bit values
  550. const __m256i dot = _mm256_maddubs_epi16(ax, sy);
  551. return sum_i16_pairs_float(dot);
  552. #endif
  553. }
  554. // multiply int8_t, add results pairwise twice and return as float vector
  555. static inline __m256 mul_sum_i8_pairs_float(const __m256i x, const __m256i y) {
  556. #if __AVXVNNIINT8__
  557. const __m256i zero = _mm256_setzero_si256();
  558. const __m256i summed_pairs = _mm256_dpbssd_epi32(zero, x, y);
  559. return _mm256_cvtepi32_ps(summed_pairs);
  560. #else
  561. // Get absolute values of x vectors
  562. const __m256i ax = _mm256_sign_epi8(x, x);
  563. // Sign the values of the y vectors
  564. const __m256i sy = _mm256_sign_epi8(y, x);
  565. return mul_sum_us8_pairs_float(ax, sy);
  566. #endif
  567. }
  568. static inline __m128i packNibbles( __m256i bytes )
  569. {
  570. // Move bits within 16-bit lanes from 0000_abcd_0000_efgh into 0000_0000_abcd_efgh
  571. #if __AVX512F__
  572. const __m256i bytes_srli_4 = _mm256_srli_epi16(bytes, 4); // 0000_0000_abcd_0000
  573. bytes = _mm256_or_si256(bytes, bytes_srli_4); // 0000_abcd_abcd_efgh
  574. return _mm256_cvtepi16_epi8(bytes); // abcd_efgh
  575. #else
  576. const __m256i lowByte = _mm256_set1_epi16( 0xFF );
  577. __m256i high = _mm256_andnot_si256( lowByte, bytes );
  578. __m256i low = _mm256_and_si256( lowByte, bytes );
  579. high = _mm256_srli_epi16( high, 4 );
  580. bytes = _mm256_or_si256( low, high );
  581. // Compress uint16_t lanes into bytes
  582. __m128i r0 = _mm256_castsi256_si128( bytes );
  583. __m128i r1 = _mm256_extracti128_si256( bytes, 1 );
  584. return _mm_packus_epi16( r0, r1 );
  585. #endif
  586. }
  587. #elif defined(__AVX__)
  588. // spread 32 bits to 32 bytes { 0x00, 0xFF }
  589. static inline __m256i bytes_from_bits_32(const uint8_t * x) {
  590. uint32_t x32;
  591. memcpy(&x32, x, sizeof(uint32_t));
  592. const __m128i shuf_maskl = _mm_set_epi64x(0x0101010101010101, 0x0000000000000000);
  593. const __m128i shuf_maskh = _mm_set_epi64x(0x0303030303030303, 0x0202020202020202);
  594. __m128i bytesl = _mm_shuffle_epi8(_mm_set1_epi32(x32), shuf_maskl);
  595. __m128i bytesh = _mm_shuffle_epi8(_mm_set1_epi32(x32), shuf_maskh);
  596. const __m128i bit_mask = _mm_set1_epi64x(0x7fbfdfeff7fbfdfe);
  597. bytesl = _mm_or_si128(bytesl, bit_mask);
  598. bytesh = _mm_or_si128(bytesh, bit_mask);
  599. bytesl = _mm_cmpeq_epi8(bytesl, _mm_set1_epi64x(-1));
  600. bytesh = _mm_cmpeq_epi8(bytesh, _mm_set1_epi64x(-1));
  601. return MM256_SET_M128I(bytesh, bytesl);
  602. }
  603. // Unpack 32 4-bit fields into 32 bytes
  604. // The output vector contains 32 bytes, each one in [ 0 .. 15 ] interval
  605. static inline __m256i bytes_from_nibbles_32(const uint8_t * rsi)
  606. {
  607. // Load 16 bytes from memory
  608. __m128i tmpl = _mm_loadu_si128((const __m128i *)rsi);
  609. __m128i tmph = _mm_srli_epi16(tmpl, 4);
  610. const __m128i lowMask = _mm_set1_epi8(0xF);
  611. tmpl = _mm_and_si128(lowMask, tmpl);
  612. tmph = _mm_and_si128(lowMask, tmph);
  613. return MM256_SET_M128I(tmph, tmpl);
  614. }
  615. // add int16_t pairwise and return as float vector
  616. static inline __m256 sum_i16_pairs_float(const __m128i xh, const __m128i xl) {
  617. const __m128i ones = _mm_set1_epi16(1);
  618. const __m128i summed_pairsl = _mm_madd_epi16(ones, xl);
  619. const __m128i summed_pairsh = _mm_madd_epi16(ones, xh);
  620. const __m256i summed_pairs = MM256_SET_M128I(summed_pairsh, summed_pairsl);
  621. return _mm256_cvtepi32_ps(summed_pairs);
  622. }
  623. static inline __m256 mul_sum_us8_pairs_float(const __m256i ax, const __m256i sy) {
  624. const __m128i axl = _mm256_castsi256_si128(ax);
  625. const __m128i axh = _mm256_extractf128_si256(ax, 1);
  626. const __m128i syl = _mm256_castsi256_si128(sy);
  627. const __m128i syh = _mm256_extractf128_si256(sy, 1);
  628. // Perform multiplication and create 16-bit values
  629. const __m128i dotl = _mm_maddubs_epi16(axl, syl);
  630. const __m128i doth = _mm_maddubs_epi16(axh, syh);
  631. return sum_i16_pairs_float(doth, dotl);
  632. }
  633. // multiply int8_t, add results pairwise twice and return as float vector
  634. static inline __m256 mul_sum_i8_pairs_float(const __m256i x, const __m256i y) {
  635. const __m128i xl = _mm256_castsi256_si128(x);
  636. const __m128i xh = _mm256_extractf128_si256(x, 1);
  637. const __m128i yl = _mm256_castsi256_si128(y);
  638. const __m128i yh = _mm256_extractf128_si256(y, 1);
  639. // Get absolute values of x vectors
  640. const __m128i axl = _mm_sign_epi8(xl, xl);
  641. const __m128i axh = _mm_sign_epi8(xh, xh);
  642. // Sign the values of the y vectors
  643. const __m128i syl = _mm_sign_epi8(yl, xl);
  644. const __m128i syh = _mm_sign_epi8(yh, xh);
  645. // Perform multiplication and create 16-bit values
  646. const __m128i dotl = _mm_maddubs_epi16(axl, syl);
  647. const __m128i doth = _mm_maddubs_epi16(axh, syh);
  648. return sum_i16_pairs_float(doth, dotl);
  649. }
  650. static inline __m128i packNibbles( __m128i bytes1, __m128i bytes2 )
  651. {
  652. // Move bits within 16-bit lanes from 0000_abcd_0000_efgh into 0000_0000_abcd_efgh
  653. const __m128i lowByte = _mm_set1_epi16( 0xFF );
  654. __m128i high = _mm_andnot_si128( lowByte, bytes1 );
  655. __m128i low = _mm_and_si128( lowByte, bytes1 );
  656. high = _mm_srli_epi16( high, 4 );
  657. bytes1 = _mm_or_si128( low, high );
  658. high = _mm_andnot_si128( lowByte, bytes2 );
  659. low = _mm_and_si128( lowByte, bytes2 );
  660. high = _mm_srli_epi16( high, 4 );
  661. bytes2 = _mm_or_si128( low, high );
  662. return _mm_packus_epi16( bytes1, bytes2);
  663. }
  664. #endif
  665. #elif defined(__SSSE3__)
  666. // horizontally add 4x4 floats
  667. static inline float hsum_float_4x4(const __m128 a, const __m128 b, const __m128 c, const __m128 d) {
  668. __m128 res_0 =_mm_hadd_ps(a, b);
  669. __m128 res_1 =_mm_hadd_ps(c, d);
  670. __m128 res =_mm_hadd_ps(res_0, res_1);
  671. res =_mm_hadd_ps(res, res);
  672. res =_mm_hadd_ps(res, res);
  673. return _mm_cvtss_f32(res);
  674. }
  675. #endif // __AVX__ || __AVX2__ || __AVX512F__
  676. #endif // defined(__AVX__) || defined(__AVX2__) || defined(__AVX512F__) || defined(__SSSE3__)
  677. #if defined(__ARM_NEON)
  678. #if !defined(__aarch64__)
  679. inline static uint16_t vaddvq_u8(uint8x16_t v) {
  680. return
  681. (uint16_t)vgetq_lane_u8(v, 0) + (uint16_t)vgetq_lane_u8(v, 1) +
  682. (uint16_t)vgetq_lane_u8(v, 2) + (uint16_t)vgetq_lane_u8(v, 3) +
  683. (uint16_t)vgetq_lane_u8(v, 4) + (uint16_t)vgetq_lane_u8(v, 5) +
  684. (uint16_t)vgetq_lane_u8(v, 6) + (uint16_t)vgetq_lane_u8(v, 7) +
  685. (uint16_t)vgetq_lane_u8(v, 8) + (uint16_t)vgetq_lane_u8(v, 9) +
  686. (uint16_t)vgetq_lane_u8(v, 10) + (uint16_t)vgetq_lane_u8(v, 11) +
  687. (uint16_t)vgetq_lane_u8(v, 12) + (uint16_t)vgetq_lane_u8(v, 13) +
  688. (uint16_t)vgetq_lane_u8(v, 14) + (uint16_t)vgetq_lane_u8(v, 15);
  689. }
  690. inline static int16_t vaddvq_s8(int8x16_t v) {
  691. return
  692. (int16_t)vgetq_lane_s8(v, 0) + (int16_t)vgetq_lane_s8(v, 1) +
  693. (int16_t)vgetq_lane_s8(v, 2) + (int16_t)vgetq_lane_s8(v, 3) +
  694. (int16_t)vgetq_lane_s8(v, 4) + (int16_t)vgetq_lane_s8(v, 5) +
  695. (int16_t)vgetq_lane_s8(v, 6) + (int16_t)vgetq_lane_s8(v, 7) +
  696. (int16_t)vgetq_lane_s8(v, 8) + (int16_t)vgetq_lane_s8(v, 9) +
  697. (int16_t)vgetq_lane_s8(v, 10) + (int16_t)vgetq_lane_s8(v, 11) +
  698. (int16_t)vgetq_lane_s8(v, 12) + (int16_t)vgetq_lane_s8(v, 13) +
  699. (int16_t)vgetq_lane_s8(v, 14) + (int16_t)vgetq_lane_s8(v, 15);
  700. }
  701. inline static int32_t vaddvq_s16(int16x8_t v) {
  702. return
  703. (int32_t)vgetq_lane_s16(v, 0) + (int32_t)vgetq_lane_s16(v, 1) +
  704. (int32_t)vgetq_lane_s16(v, 2) + (int32_t)vgetq_lane_s16(v, 3) +
  705. (int32_t)vgetq_lane_s16(v, 4) + (int32_t)vgetq_lane_s16(v, 5) +
  706. (int32_t)vgetq_lane_s16(v, 6) + (int32_t)vgetq_lane_s16(v, 7);
  707. }
  708. inline static uint32_t vaddvq_u16(uint16x8_t v) {
  709. return
  710. (uint32_t)vgetq_lane_u16(v, 0) + (uint32_t)vgetq_lane_u16(v, 1) +
  711. (uint32_t)vgetq_lane_u16(v, 2) + (uint32_t)vgetq_lane_u16(v, 3) +
  712. (uint32_t)vgetq_lane_u16(v, 4) + (uint32_t)vgetq_lane_u16(v, 5) +
  713. (uint32_t)vgetq_lane_u16(v, 6) + (uint32_t)vgetq_lane_u16(v, 7);
  714. }
  715. inline static int32_t vaddvq_s32(int32x4_t v) {
  716. return vgetq_lane_s32(v, 0) + vgetq_lane_s32(v, 1) + vgetq_lane_s32(v, 2) + vgetq_lane_s32(v, 3);
  717. }
  718. inline static float vaddvq_f32(float32x4_t v) {
  719. return vgetq_lane_f32(v, 0) + vgetq_lane_f32(v, 1) + vgetq_lane_f32(v, 2) + vgetq_lane_f32(v, 3);
  720. }
  721. inline static float vminvq_f32(float32x4_t v) {
  722. return
  723. MIN(MIN(vgetq_lane_f32(v, 0), vgetq_lane_f32(v, 1)),
  724. MIN(vgetq_lane_f32(v, 2), vgetq_lane_f32(v, 3)));
  725. }
  726. inline static float vmaxvq_f32(float32x4_t v) {
  727. return
  728. MAX(MAX(vgetq_lane_f32(v, 0), vgetq_lane_f32(v, 1)),
  729. MAX(vgetq_lane_f32(v, 2), vgetq_lane_f32(v, 3)));
  730. }
  731. inline static int32x4_t vcvtnq_s32_f32(float32x4_t v) {
  732. int32x4_t res;
  733. res[0] = roundf(vgetq_lane_f32(v, 0));
  734. res[1] = roundf(vgetq_lane_f32(v, 1));
  735. res[2] = roundf(vgetq_lane_f32(v, 2));
  736. res[3] = roundf(vgetq_lane_f32(v, 3));
  737. return res;
  738. }
  739. #endif
  740. #endif
  741. #define QK4_0 32
  742. typedef struct {
  743. ggml_fp16_t d; // delta
  744. uint8_t qs[QK4_0 / 2]; // nibbles / quants
  745. } block_q4_0;
  746. static_assert(sizeof(block_q4_0) == sizeof(ggml_fp16_t) + QK4_0 / 2, "wrong q4_0 block size/padding");
  747. #define QK4_1 32
  748. typedef struct {
  749. ggml_fp16_t d; // delta
  750. ggml_fp16_t m; // min
  751. uint8_t qs[QK4_1 / 2]; // nibbles / quants
  752. } block_q4_1;
  753. static_assert(sizeof(block_q4_1) == 2 * sizeof(ggml_fp16_t) + QK4_1 / 2, "wrong q4_1 block size/padding");
  754. #define QK5_0 32
  755. typedef struct {
  756. ggml_fp16_t d; // delta
  757. uint8_t qh[4]; // 5-th bit of quants
  758. uint8_t qs[QK5_0 / 2]; // nibbles / quants
  759. } block_q5_0;
  760. static_assert(sizeof(block_q5_0) == sizeof(ggml_fp16_t) + sizeof(uint32_t) + QK5_0 / 2, "wrong q5_0 block size/padding");
  761. #define QK5_1 32
  762. typedef struct {
  763. ggml_fp16_t d; // delta
  764. ggml_fp16_t m; // min
  765. uint8_t qh[4]; // 5-th bit of quants
  766. uint8_t qs[QK5_1 / 2]; // nibbles / quants
  767. } block_q5_1;
  768. static_assert(sizeof(block_q5_1) == 2 * sizeof(ggml_fp16_t) + sizeof(uint32_t) + QK5_1 / 2, "wrong q5_1 block size/padding");
  769. #define QK8_0 32
  770. typedef struct {
  771. ggml_fp16_t d; // delta
  772. int8_t qs[QK8_0]; // quants
  773. } block_q8_0;
  774. static_assert(sizeof(block_q8_0) == sizeof(ggml_fp16_t) + QK8_0, "wrong q8_0 block size/padding");
  775. #define QK8_1 32
  776. typedef struct {
  777. float d; // delta
  778. float s; // d * sum(qs[i])
  779. int8_t qs[QK8_1]; // quants
  780. } block_q8_1;
  781. static_assert(sizeof(block_q8_1) == 2*sizeof(float) + QK8_1, "wrong q8_1 block size/padding");
  782. // reference implementation for deterministic creation of model files
  783. static void quantize_row_q4_0_reference(const float * restrict x, block_q4_0 * restrict y, int k) {
  784. static const int qk = QK4_0;
  785. assert(k % qk == 0);
  786. const int nb = k / qk;
  787. for (int i = 0; i < nb; i++) {
  788. float amax = 0.0f; // absolute max
  789. float max = 0.0f;
  790. for (int j = 0; j < qk; j++) {
  791. const float v = x[i*qk + j];
  792. if (amax < fabsf(v)) {
  793. amax = fabsf(v);
  794. max = v;
  795. }
  796. }
  797. const float d = max / -8;
  798. const float id = d ? 1.0f/d : 0.0f;
  799. y[i].d = GGML_FP32_TO_FP16(d);
  800. for (int j = 0; j < qk/2; ++j) {
  801. const float x0 = x[i*qk + 0 + j]*id;
  802. const float x1 = x[i*qk + qk/2 + j]*id;
  803. const uint8_t xi0 = MIN(15, (int8_t)(x0 + 8.5f));
  804. const uint8_t xi1 = MIN(15, (int8_t)(x1 + 8.5f));
  805. y[i].qs[j] = xi0;
  806. y[i].qs[j] |= xi1 << 4;
  807. }
  808. }
  809. }
  810. static void quantize_row_q4_0(const float * restrict x, void * restrict y, int k) {
  811. quantize_row_q4_0_reference(x, y, k);
  812. }
  813. static void quantize_row_q4_1_reference(const float * restrict x, block_q4_1 * restrict y, int k) {
  814. const int qk = QK4_1;
  815. assert(k % qk == 0);
  816. const int nb = k / qk;
  817. for (int i = 0; i < nb; i++) {
  818. float min = FLT_MAX;
  819. float max = -FLT_MAX;
  820. for (int j = 0; j < qk; j++) {
  821. const float v = x[i*qk + j];
  822. if (v < min) min = v;
  823. if (v > max) max = v;
  824. }
  825. const float d = (max - min) / ((1 << 4) - 1);
  826. const float id = d ? 1.0f/d : 0.0f;
  827. y[i].d = GGML_FP32_TO_FP16(d);
  828. y[i].m = GGML_FP32_TO_FP16(min);
  829. for (int j = 0; j < qk/2; ++j) {
  830. const float x0 = (x[i*qk + 0 + j] - min)*id;
  831. const float x1 = (x[i*qk + qk/2 + j] - min)*id;
  832. const uint8_t xi0 = MIN(15, (int8_t)(x0 + 0.5f));
  833. const uint8_t xi1 = MIN(15, (int8_t)(x1 + 0.5f));
  834. y[i].qs[j] = xi0;
  835. y[i].qs[j] |= xi1 << 4;
  836. }
  837. }
  838. }
  839. static void quantize_row_q4_1(const float * restrict x, void * restrict y, int k) {
  840. quantize_row_q4_1_reference(x, y, k);
  841. }
  842. static void quantize_row_q5_0_reference(const float * restrict x, block_q5_0 * restrict y, int k) {
  843. static const int qk = QK5_0;
  844. assert(k % qk == 0);
  845. const int nb = k / qk;
  846. for (int i = 0; i < nb; i++) {
  847. float amax = 0.0f; // absolute max
  848. float max = 0.0f;
  849. for (int j = 0; j < qk; j++) {
  850. const float v = x[i*qk + j];
  851. if (amax < fabsf(v)) {
  852. amax = fabsf(v);
  853. max = v;
  854. }
  855. }
  856. const float d = max / -16;
  857. const float id = d ? 1.0f/d : 0.0f;
  858. y[i].d = GGML_FP32_TO_FP16(d);
  859. uint32_t qh = 0;
  860. for (int j = 0; j < qk/2; ++j) {
  861. const float x0 = x[i*qk + 0 + j]*id;
  862. const float x1 = x[i*qk + qk/2 + j]*id;
  863. const uint8_t xi0 = MIN(31, (int8_t)(x0 + 16.5f));
  864. const uint8_t xi1 = MIN(31, (int8_t)(x1 + 16.5f));
  865. y[i].qs[j] = (xi0 & 0x0F) | ((xi1 & 0x0F) << 4);
  866. // get the 5-th bit and store it in qh at the right position
  867. qh |= ((xi0 & 0x10) >> 4) << (j + 0);
  868. qh |= ((xi1 & 0x10) >> 4) << (j + qk/2);
  869. }
  870. memcpy(&y[i].qh, &qh, sizeof(qh));
  871. }
  872. }
  873. static void quantize_row_q5_0(const float * restrict x, void * restrict y, int k) {
  874. quantize_row_q5_0_reference(x, y, k);
  875. }
  876. static void quantize_row_q5_1_reference(const float * restrict x, block_q5_1 * restrict y, int k) {
  877. const int qk = QK5_1;
  878. assert(k % qk == 0);
  879. const int nb = k / qk;
  880. for (int i = 0; i < nb; i++) {
  881. float min = FLT_MAX;
  882. float max = -FLT_MAX;
  883. for (int j = 0; j < qk; j++) {
  884. const float v = x[i*qk + j];
  885. if (v < min) min = v;
  886. if (v > max) max = v;
  887. }
  888. const float d = (max - min) / ((1 << 5) - 1);
  889. const float id = d ? 1.0f/d : 0.0f;
  890. y[i].d = GGML_FP32_TO_FP16(d);
  891. y[i].m = GGML_FP32_TO_FP16(min);
  892. uint32_t qh = 0;
  893. for (int j = 0; j < qk/2; ++j) {
  894. const float x0 = (x[i*qk + 0 + j] - min)*id;
  895. const float x1 = (x[i*qk + qk/2 + j] - min)*id;
  896. const uint8_t xi0 = (uint8_t)(x0 + 0.5f);
  897. const uint8_t xi1 = (uint8_t)(x1 + 0.5f);
  898. y[i].qs[j] = (xi0 & 0x0F) | ((xi1 & 0x0F) << 4);
  899. // get the 5-th bit and store it in qh at the right position
  900. qh |= ((xi0 & 0x10) >> 4) << (j + 0);
  901. qh |= ((xi1 & 0x10) >> 4) << (j + qk/2);
  902. }
  903. memcpy(&y[i].qh, &qh, sizeof(y[i].qh));
  904. }
  905. }
  906. static void quantize_row_q5_1(const float * restrict x, void * restrict y, int k) {
  907. quantize_row_q5_1_reference(x, y, k);
  908. }
  909. // reference implementation for deterministic creation of model files
  910. static void quantize_row_q8_0_reference(const float * restrict x, block_q8_0 * restrict y, int k) {
  911. assert(k % QK8_0 == 0);
  912. const int nb = k / QK8_0;
  913. for (int i = 0; i < nb; i++) {
  914. float amax = 0.0f; // absolute max
  915. for (int j = 0; j < QK8_0; j++) {
  916. const float v = x[i*QK8_0 + j];
  917. amax = MAX(amax, fabsf(v));
  918. }
  919. const float d = amax / ((1 << 7) - 1);
  920. const float id = d ? 1.0f/d : 0.0f;
  921. y[i].d = GGML_FP32_TO_FP16(d);
  922. for (int j = 0; j < QK8_0; ++j) {
  923. const float x0 = x[i*QK8_0 + j]*id;
  924. y[i].qs[j] = roundf(x0);
  925. }
  926. }
  927. }
  928. static void quantize_row_q8_0(const float * restrict x, void * restrict vy, int k) {
  929. assert(QK8_0 == 32);
  930. assert(k % QK8_0 == 0);
  931. const int nb = k / QK8_0;
  932. block_q8_0 * restrict y = vy;
  933. #if defined(__ARM_NEON)
  934. for (int i = 0; i < nb; i++) {
  935. float32x4_t srcv [8];
  936. float32x4_t asrcv[8];
  937. float32x4_t amaxv[8];
  938. for (int j = 0; j < 8; j++) srcv[j] = vld1q_f32(x + i*32 + 4*j);
  939. for (int j = 0; j < 8; j++) asrcv[j] = vabsq_f32(srcv[j]);
  940. for (int j = 0; j < 4; j++) amaxv[2*j] = vmaxq_f32(asrcv[2*j], asrcv[2*j+1]);
  941. for (int j = 0; j < 2; j++) amaxv[4*j] = vmaxq_f32(amaxv[4*j], amaxv[4*j+2]);
  942. for (int j = 0; j < 1; j++) amaxv[8*j] = vmaxq_f32(amaxv[8*j], amaxv[8*j+4]);
  943. const float amax = vmaxvq_f32(amaxv[0]);
  944. const float d = amax / ((1 << 7) - 1);
  945. const float id = d ? 1.0f/d : 0.0f;
  946. y[i].d = GGML_FP32_TO_FP16(d);
  947. for (int j = 0; j < 8; j++) {
  948. const float32x4_t v = vmulq_n_f32(srcv[j], id);
  949. const int32x4_t vi = vcvtnq_s32_f32(v);
  950. y[i].qs[4*j + 0] = vgetq_lane_s32(vi, 0);
  951. y[i].qs[4*j + 1] = vgetq_lane_s32(vi, 1);
  952. y[i].qs[4*j + 2] = vgetq_lane_s32(vi, 2);
  953. y[i].qs[4*j + 3] = vgetq_lane_s32(vi, 3);
  954. }
  955. }
  956. #elif defined(__wasm_simd128__)
  957. for (int i = 0; i < nb; i++) {
  958. v128_t srcv [8];
  959. v128_t asrcv[8];
  960. v128_t amaxv[8];
  961. for (int j = 0; j < 8; j++) srcv[j] = wasm_v128_load(x + i*32 + 4*j);
  962. for (int j = 0; j < 8; j++) asrcv[j] = wasm_f32x4_abs(srcv[j]);
  963. for (int j = 0; j < 4; j++) amaxv[2*j] = wasm_f32x4_max(asrcv[2*j], asrcv[2*j+1]);
  964. for (int j = 0; j < 2; j++) amaxv[4*j] = wasm_f32x4_max(amaxv[4*j], amaxv[4*j+2]);
  965. for (int j = 0; j < 1; j++) amaxv[8*j] = wasm_f32x4_max(amaxv[8*j], amaxv[8*j+4]);
  966. const float amax = MAX(MAX(wasm_f32x4_extract_lane(amaxv[0], 0),
  967. wasm_f32x4_extract_lane(amaxv[0], 1)),
  968. MAX(wasm_f32x4_extract_lane(amaxv[0], 2),
  969. wasm_f32x4_extract_lane(amaxv[0], 3)));
  970. const float d = amax / ((1 << 7) - 1);
  971. const float id = d ? 1.0f/d : 0.0f;
  972. y[i].d = GGML_FP32_TO_FP16(d);
  973. for (int j = 0; j < 8; j++) {
  974. const v128_t v = wasm_f32x4_mul(srcv[j], wasm_f32x4_splat(id));
  975. const v128_t vi = wasm_i32x4_trunc_sat_f32x4(v);
  976. y[i].qs[4*j + 0] = wasm_i32x4_extract_lane(vi, 0);
  977. y[i].qs[4*j + 1] = wasm_i32x4_extract_lane(vi, 1);
  978. y[i].qs[4*j + 2] = wasm_i32x4_extract_lane(vi, 2);
  979. y[i].qs[4*j + 3] = wasm_i32x4_extract_lane(vi, 3);
  980. }
  981. }
  982. #elif defined(__AVX2__) || defined(__AVX__)
  983. for (int i = 0; i < nb; i++) {
  984. // Load elements into 4 AVX vectors
  985. __m256 v0 = _mm256_loadu_ps( x );
  986. __m256 v1 = _mm256_loadu_ps( x + 8 );
  987. __m256 v2 = _mm256_loadu_ps( x + 16 );
  988. __m256 v3 = _mm256_loadu_ps( x + 24 );
  989. x += 32;
  990. // Compute max(abs(e)) for the block
  991. const __m256 signBit = _mm256_set1_ps( -0.0f );
  992. __m256 maxAbs = _mm256_andnot_ps( signBit, v0 );
  993. maxAbs = _mm256_max_ps( maxAbs, _mm256_andnot_ps( signBit, v1 ) );
  994. maxAbs = _mm256_max_ps( maxAbs, _mm256_andnot_ps( signBit, v2 ) );
  995. maxAbs = _mm256_max_ps( maxAbs, _mm256_andnot_ps( signBit, v3 ) );
  996. __m128 max4 = _mm_max_ps( _mm256_extractf128_ps( maxAbs, 1 ), _mm256_castps256_ps128( maxAbs ) );
  997. max4 = _mm_max_ps( max4, _mm_movehl_ps( max4, max4 ) );
  998. max4 = _mm_max_ss( max4, _mm_movehdup_ps( max4 ) );
  999. const float maxScalar = _mm_cvtss_f32( max4 );
  1000. // Quantize these floats
  1001. const float d = maxScalar / 127.f;
  1002. y[i].d = GGML_FP32_TO_FP16(d);
  1003. const float id = ( maxScalar != 0.0f ) ? 127.f / maxScalar : 0.0f;
  1004. const __m256 mul = _mm256_set1_ps( id );
  1005. // Apply the multiplier
  1006. v0 = _mm256_mul_ps( v0, mul );
  1007. v1 = _mm256_mul_ps( v1, mul );
  1008. v2 = _mm256_mul_ps( v2, mul );
  1009. v3 = _mm256_mul_ps( v3, mul );
  1010. // Round to nearest integer
  1011. v0 = _mm256_round_ps( v0, _MM_ROUND_NEAREST );
  1012. v1 = _mm256_round_ps( v1, _MM_ROUND_NEAREST );
  1013. v2 = _mm256_round_ps( v2, _MM_ROUND_NEAREST );
  1014. v3 = _mm256_round_ps( v3, _MM_ROUND_NEAREST );
  1015. // Convert floats to integers
  1016. __m256i i0 = _mm256_cvtps_epi32( v0 );
  1017. __m256i i1 = _mm256_cvtps_epi32( v1 );
  1018. __m256i i2 = _mm256_cvtps_epi32( v2 );
  1019. __m256i i3 = _mm256_cvtps_epi32( v3 );
  1020. #if defined(__AVX2__)
  1021. // Convert int32 to int16
  1022. i0 = _mm256_packs_epi32( i0, i1 ); // 0, 1, 2, 3, 8, 9, 10, 11, 4, 5, 6, 7, 12, 13, 14, 15
  1023. i2 = _mm256_packs_epi32( i2, i3 ); // 16, 17, 18, 19, 24, 25, 26, 27, 20, 21, 22, 23, 28, 29, 30, 31
  1024. // Convert int16 to int8
  1025. i0 = _mm256_packs_epi16( i0, i2 ); // 0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27, 4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31
  1026. // We got our precious signed bytes, but the order is now wrong
  1027. // These AVX2 pack instructions process 16-byte pieces independently
  1028. // The following instruction is fixing the order
  1029. const __m256i perm = _mm256_setr_epi32( 0, 4, 1, 5, 2, 6, 3, 7 );
  1030. i0 = _mm256_permutevar8x32_epi32( i0, perm );
  1031. _mm256_storeu_si256((__m256i *)y[i].qs, i0);
  1032. #else
  1033. // Since we don't have in AVX some necessary functions,
  1034. // we split the registers in half and call AVX2 analogs from SSE
  1035. __m128i ni0 = _mm256_castsi256_si128( i0 );
  1036. __m128i ni1 = _mm256_extractf128_si256( i0, 1);
  1037. __m128i ni2 = _mm256_castsi256_si128( i1 );
  1038. __m128i ni3 = _mm256_extractf128_si256( i1, 1);
  1039. __m128i ni4 = _mm256_castsi256_si128( i2 );
  1040. __m128i ni5 = _mm256_extractf128_si256( i2, 1);
  1041. __m128i ni6 = _mm256_castsi256_si128( i3 );
  1042. __m128i ni7 = _mm256_extractf128_si256( i3, 1);
  1043. // Convert int32 to int16
  1044. ni0 = _mm_packs_epi32( ni0, ni1 );
  1045. ni2 = _mm_packs_epi32( ni2, ni3 );
  1046. ni4 = _mm_packs_epi32( ni4, ni5 );
  1047. ni6 = _mm_packs_epi32( ni6, ni7 );
  1048. // Convert int16 to int8
  1049. ni0 = _mm_packs_epi16( ni0, ni2 );
  1050. ni4 = _mm_packs_epi16( ni4, ni6 );
  1051. _mm_storeu_si128((__m128i *)(y[i].qs + 0), ni0);
  1052. _mm_storeu_si128((__m128i *)(y[i].qs + 16), ni4);
  1053. #endif
  1054. }
  1055. #else
  1056. // scalar
  1057. quantize_row_q8_0_reference(x, y, k);
  1058. #endif
  1059. }
  1060. // reference implementation for deterministic creation of model files
  1061. static void quantize_row_q8_1_reference(const float * restrict x, block_q8_1 * restrict y, int k) {
  1062. assert(QK8_1 == 32);
  1063. assert(k % QK8_1 == 0);
  1064. const int nb = k / QK8_1;
  1065. for (int i = 0; i < nb; i++) {
  1066. float amax = 0.0f; // absolute max
  1067. for (int j = 0; j < QK8_1; j++) {
  1068. const float v = x[i*QK8_1 + j];
  1069. amax = MAX(amax, fabsf(v));
  1070. }
  1071. const float d = amax / ((1 << 7) - 1);
  1072. const float id = d ? 1.0f/d : 0.0f;
  1073. y[i].d = d;
  1074. int sum = 0;
  1075. for (int j = 0; j < QK8_1/2; ++j) {
  1076. const float v0 = x[i*QK8_1 + j]*id;
  1077. const float v1 = x[i*QK8_1 + QK8_1/2 + j]*id;
  1078. y[i].qs[ j] = roundf(v0);
  1079. y[i].qs[QK8_1/2 + j] = roundf(v1);
  1080. sum += y[i].qs[ j];
  1081. sum += y[i].qs[QK8_1/2 + j];
  1082. }
  1083. y[i].s = sum*d;
  1084. }
  1085. }
  1086. static void quantize_row_q8_1(const float * restrict x, void * restrict vy, int k) {
  1087. assert(k % QK8_1 == 0);
  1088. const int nb = k / QK8_1;
  1089. block_q8_1 * restrict y = vy;
  1090. #if defined(__ARM_NEON)
  1091. for (int i = 0; i < nb; i++) {
  1092. float32x4_t srcv [8];
  1093. float32x4_t asrcv[8];
  1094. float32x4_t amaxv[8];
  1095. for (int j = 0; j < 8; j++) srcv[j] = vld1q_f32(x + i*32 + 4*j);
  1096. for (int j = 0; j < 8; j++) asrcv[j] = vabsq_f32(srcv[j]);
  1097. for (int j = 0; j < 4; j++) amaxv[2*j] = vmaxq_f32(asrcv[2*j], asrcv[2*j+1]);
  1098. for (int j = 0; j < 2; j++) amaxv[4*j] = vmaxq_f32(amaxv[4*j], amaxv[4*j+2]);
  1099. for (int j = 0; j < 1; j++) amaxv[8*j] = vmaxq_f32(amaxv[8*j], amaxv[8*j+4]);
  1100. const float amax = vmaxvq_f32(amaxv[0]);
  1101. const float d = amax / ((1 << 7) - 1);
  1102. const float id = d ? 1.0f/d : 0.0f;
  1103. y[i].d = d;
  1104. int32x4_t accv = vdupq_n_s32(0);
  1105. for (int j = 0; j < 8; j++) {
  1106. const float32x4_t v = vmulq_n_f32(srcv[j], id);
  1107. const int32x4_t vi = vcvtnq_s32_f32(v);
  1108. y[i].qs[4*j + 0] = vgetq_lane_s32(vi, 0);
  1109. y[i].qs[4*j + 1] = vgetq_lane_s32(vi, 1);
  1110. y[i].qs[4*j + 2] = vgetq_lane_s32(vi, 2);
  1111. y[i].qs[4*j + 3] = vgetq_lane_s32(vi, 3);
  1112. accv = vaddq_s32(accv, vi);
  1113. }
  1114. y[i].s = d * vaddvq_s32(accv);
  1115. }
  1116. #elif defined(__wasm_simd128__)
  1117. for (int i = 0; i < nb; i++) {
  1118. v128_t srcv [8];
  1119. v128_t asrcv[8];
  1120. v128_t amaxv[8];
  1121. for (int j = 0; j < 8; j++) srcv[j] = wasm_v128_load(x + i*32 + 4*j);
  1122. for (int j = 0; j < 8; j++) asrcv[j] = wasm_f32x4_abs(srcv[j]);
  1123. for (int j = 0; j < 4; j++) amaxv[2*j] = wasm_f32x4_max(asrcv[2*j], asrcv[2*j+1]);
  1124. for (int j = 0; j < 2; j++) amaxv[4*j] = wasm_f32x4_max(amaxv[4*j], amaxv[4*j+2]);
  1125. for (int j = 0; j < 1; j++) amaxv[8*j] = wasm_f32x4_max(amaxv[8*j], amaxv[8*j+4]);
  1126. const float amax = MAX(MAX(wasm_f32x4_extract_lane(amaxv[0], 0),
  1127. wasm_f32x4_extract_lane(amaxv[0], 1)),
  1128. MAX(wasm_f32x4_extract_lane(amaxv[0], 2),
  1129. wasm_f32x4_extract_lane(amaxv[0], 3)));
  1130. const float d = amax / ((1 << 7) - 1);
  1131. const float id = d ? 1.0f/d : 0.0f;
  1132. y[i].d = d;
  1133. v128_t accv = wasm_i32x4_splat(0);
  1134. for (int j = 0; j < 8; j++) {
  1135. const v128_t v = wasm_f32x4_mul(srcv[j], wasm_f32x4_splat(id));
  1136. const v128_t vi = wasm_i32x4_trunc_sat_f32x4(v);
  1137. y[i].qs[4*j + 0] = wasm_i32x4_extract_lane(vi, 0);
  1138. y[i].qs[4*j + 1] = wasm_i32x4_extract_lane(vi, 1);
  1139. y[i].qs[4*j + 2] = wasm_i32x4_extract_lane(vi, 2);
  1140. y[i].qs[4*j + 3] = wasm_i32x4_extract_lane(vi, 3);
  1141. accv = wasm_i32x4_add(accv, vi);
  1142. }
  1143. y[i].s = d * (wasm_i32x4_extract_lane(accv, 0) +
  1144. wasm_i32x4_extract_lane(accv, 1) +
  1145. wasm_i32x4_extract_lane(accv, 2) +
  1146. wasm_i32x4_extract_lane(accv, 3));
  1147. }
  1148. #elif defined(__AVX2__) || defined(__AVX__)
  1149. for (int i = 0; i < nb; i++) {
  1150. // Load elements into 4 AVX vectors
  1151. __m256 v0 = _mm256_loadu_ps( x );
  1152. __m256 v1 = _mm256_loadu_ps( x + 8 );
  1153. __m256 v2 = _mm256_loadu_ps( x + 16 );
  1154. __m256 v3 = _mm256_loadu_ps( x + 24 );
  1155. x += 32;
  1156. // Compute max(abs(e)) for the block
  1157. const __m256 signBit = _mm256_set1_ps( -0.0f );
  1158. __m256 maxAbs = _mm256_andnot_ps( signBit, v0 );
  1159. maxAbs = _mm256_max_ps( maxAbs, _mm256_andnot_ps( signBit, v1 ) );
  1160. maxAbs = _mm256_max_ps( maxAbs, _mm256_andnot_ps( signBit, v2 ) );
  1161. maxAbs = _mm256_max_ps( maxAbs, _mm256_andnot_ps( signBit, v3 ) );
  1162. __m128 max4 = _mm_max_ps( _mm256_extractf128_ps( maxAbs, 1 ), _mm256_castps256_ps128( maxAbs ) );
  1163. max4 = _mm_max_ps( max4, _mm_movehl_ps( max4, max4 ) );
  1164. max4 = _mm_max_ss( max4, _mm_movehdup_ps( max4 ) );
  1165. const float maxScalar = _mm_cvtss_f32( max4 );
  1166. // Quantize these floats
  1167. const float d = maxScalar / 127.f;
  1168. y[i].d = d;
  1169. const float id = ( maxScalar != 0.0f ) ? 127.f / maxScalar : 0.0f;
  1170. const __m256 mul = _mm256_set1_ps( id );
  1171. // Apply the multiplier
  1172. v0 = _mm256_mul_ps( v0, mul );
  1173. v1 = _mm256_mul_ps( v1, mul );
  1174. v2 = _mm256_mul_ps( v2, mul );
  1175. v3 = _mm256_mul_ps( v3, mul );
  1176. // Round to nearest integer
  1177. v0 = _mm256_round_ps( v0, _MM_ROUND_NEAREST );
  1178. v1 = _mm256_round_ps( v1, _MM_ROUND_NEAREST );
  1179. v2 = _mm256_round_ps( v2, _MM_ROUND_NEAREST );
  1180. v3 = _mm256_round_ps( v3, _MM_ROUND_NEAREST );
  1181. // Convert floats to integers
  1182. __m256i i0 = _mm256_cvtps_epi32( v0 );
  1183. __m256i i1 = _mm256_cvtps_epi32( v1 );
  1184. __m256i i2 = _mm256_cvtps_epi32( v2 );
  1185. __m256i i3 = _mm256_cvtps_epi32( v3 );
  1186. #if defined(__AVX2__)
  1187. // Compute the sum of the quants and set y[i].s
  1188. y[i].s = d * hsum_i32_8(_mm256_add_epi32(_mm256_add_epi32(i0, i1), _mm256_add_epi32(i2, i3)));
  1189. // Convert int32 to int16
  1190. i0 = _mm256_packs_epi32( i0, i1 ); // 0, 1, 2, 3, 8, 9, 10, 11, 4, 5, 6, 7, 12, 13, 14, 15
  1191. i2 = _mm256_packs_epi32( i2, i3 ); // 16, 17, 18, 19, 24, 25, 26, 27, 20, 21, 22, 23, 28, 29, 30, 31
  1192. // Convert int16 to int8
  1193. i0 = _mm256_packs_epi16( i0, i2 ); // 0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27, 4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31
  1194. // We got our precious signed bytes, but the order is now wrong
  1195. // These AVX2 pack instructions process 16-byte pieces independently
  1196. // The following instruction is fixing the order
  1197. const __m256i perm = _mm256_setr_epi32( 0, 4, 1, 5, 2, 6, 3, 7 );
  1198. i0 = _mm256_permutevar8x32_epi32( i0, perm );
  1199. _mm256_storeu_si256((__m256i *)y[i].qs, i0);
  1200. #else
  1201. // Since we don't have in AVX some necessary functions,
  1202. // we split the registers in half and call AVX2 analogs from SSE
  1203. __m128i ni0 = _mm256_castsi256_si128( i0 );
  1204. __m128i ni1 = _mm256_extractf128_si256( i0, 1);
  1205. __m128i ni2 = _mm256_castsi256_si128( i1 );
  1206. __m128i ni3 = _mm256_extractf128_si256( i1, 1);
  1207. __m128i ni4 = _mm256_castsi256_si128( i2 );
  1208. __m128i ni5 = _mm256_extractf128_si256( i2, 1);
  1209. __m128i ni6 = _mm256_castsi256_si128( i3 );
  1210. __m128i ni7 = _mm256_extractf128_si256( i3, 1);
  1211. // Compute the sum of the quants and set y[i].s
  1212. const __m128i s0 = _mm_add_epi32(_mm_add_epi32(ni0, ni1), _mm_add_epi32(ni2, ni3));
  1213. const __m128i s1 = _mm_add_epi32(_mm_add_epi32(ni4, ni5), _mm_add_epi32(ni6, ni7));
  1214. y[i].s = d * hsum_i32_4(_mm_add_epi32(s0, s1));
  1215. // Convert int32 to int16
  1216. ni0 = _mm_packs_epi32( ni0, ni1 );
  1217. ni2 = _mm_packs_epi32( ni2, ni3 );
  1218. ni4 = _mm_packs_epi32( ni4, ni5 );
  1219. ni6 = _mm_packs_epi32( ni6, ni7 );
  1220. // Convert int16 to int8
  1221. ni0 = _mm_packs_epi16( ni0, ni2 );
  1222. ni4 = _mm_packs_epi16( ni4, ni6 );
  1223. _mm_storeu_si128((__m128i *)(y[i].qs + 0), ni0);
  1224. _mm_storeu_si128((__m128i *)(y[i].qs + 16), ni4);
  1225. #endif
  1226. }
  1227. #else
  1228. // scalar
  1229. quantize_row_q8_1_reference(x, y, k);
  1230. #endif
  1231. }
  1232. static void dequantize_row_q4_0(const block_q4_0 * restrict x, float * restrict y, int k) {
  1233. static const int qk = QK4_0;
  1234. assert(k % qk == 0);
  1235. const int nb = k / qk;
  1236. for (int i = 0; i < nb; i++) {
  1237. const float d = GGML_FP16_TO_FP32(x[i].d);
  1238. for (int j = 0; j < qk/2; ++j) {
  1239. const int x0 = (x[i].qs[j] & 0x0F) - 8;
  1240. const int x1 = (x[i].qs[j] >> 4) - 8;
  1241. y[i*qk + j + 0 ] = x0*d;
  1242. y[i*qk + j + qk/2] = x1*d;
  1243. }
  1244. }
  1245. }
  1246. static void dequantize_row_q4_1(const block_q4_1 * restrict x, float * restrict y, int k) {
  1247. static const int qk = QK4_1;
  1248. assert(k % qk == 0);
  1249. const int nb = k / qk;
  1250. for (int i = 0; i < nb; i++) {
  1251. const float d = GGML_FP16_TO_FP32(x[i].d);
  1252. const float m = GGML_FP16_TO_FP32(x[i].m);
  1253. for (int j = 0; j < qk/2; ++j) {
  1254. const int x0 = (x[i].qs[j] & 0x0F);
  1255. const int x1 = (x[i].qs[j] >> 4);
  1256. y[i*qk + j + 0 ] = x0*d + m;
  1257. y[i*qk + j + qk/2] = x1*d + m;
  1258. }
  1259. }
  1260. }
  1261. static void dequantize_row_q5_0(const block_q5_0 * restrict x, float * restrict y, int k) {
  1262. static const int qk = QK5_0;
  1263. assert(k % qk == 0);
  1264. const int nb = k / qk;
  1265. for (int i = 0; i < nb; i++) {
  1266. const float d = GGML_FP16_TO_FP32(x[i].d);
  1267. uint32_t qh;
  1268. memcpy(&qh, x[i].qh, sizeof(qh));
  1269. for (int j = 0; j < qk/2; ++j) {
  1270. const uint8_t xh_0 = ((qh >> (j + 0)) << 4) & 0x10;
  1271. const uint8_t xh_1 = ((qh >> (j + 12)) ) & 0x10;
  1272. const int32_t x0 = ((x[i].qs[j] & 0x0F) | xh_0) - 16;
  1273. const int32_t x1 = ((x[i].qs[j] >> 4) | xh_1) - 16;
  1274. y[i*qk + j + 0 ] = x0*d;
  1275. y[i*qk + j + qk/2] = x1*d;
  1276. }
  1277. }
  1278. }
  1279. static void dequantize_row_q5_1(const block_q5_1 * restrict x, float * restrict y, int k) {
  1280. static const int qk = QK5_1;
  1281. assert(k % qk == 0);
  1282. const int nb = k / qk;
  1283. for (int i = 0; i < nb; i++) {
  1284. const float d = GGML_FP16_TO_FP32(x[i].d);
  1285. const float m = GGML_FP16_TO_FP32(x[i].m);
  1286. uint32_t qh;
  1287. memcpy(&qh, x[i].qh, sizeof(qh));
  1288. for (int j = 0; j < qk/2; ++j) {
  1289. const uint8_t xh_0 = ((qh >> (j + 0)) << 4) & 0x10;
  1290. const uint8_t xh_1 = ((qh >> (j + 12)) ) & 0x10;
  1291. const int x0 = (x[i].qs[j] & 0x0F) | xh_0;
  1292. const int x1 = (x[i].qs[j] >> 4) | xh_1;
  1293. y[i*qk + j + 0 ] = x0*d + m;
  1294. y[i*qk + j + qk/2] = x1*d + m;
  1295. }
  1296. }
  1297. }
  1298. static void dequantize_row_q8_0(const void * restrict vx, float * restrict y, int k) {
  1299. static const int qk = QK8_0;
  1300. assert(k % qk == 0);
  1301. const int nb = k / qk;
  1302. const block_q8_0 * restrict x = vx;
  1303. for (int i = 0; i < nb; i++) {
  1304. const float d = GGML_FP16_TO_FP32(x[i].d);
  1305. for (int j = 0; j < qk; ++j) {
  1306. y[i*qk + j] = x[i].qs[j]*d;
  1307. }
  1308. }
  1309. }
  1310. static void ggml_vec_dot_q4_0_q8_0(const int n, float * restrict s, const void * restrict vx, const void * restrict vy);
  1311. static void ggml_vec_dot_q4_1_q8_1(const int n, float * restrict s, const void * restrict vx, const void * restrict vy);
  1312. static void ggml_vec_dot_q5_0_q8_0(const int n, float * restrict s, const void * restrict vx, const void * restrict vy);
  1313. static void ggml_vec_dot_q5_1_q8_1(const int n, float * restrict s, const void * restrict vx, const void * restrict vy);
  1314. static void ggml_vec_dot_q8_0_q8_0(const int n, float * restrict s, const void * restrict vx, const void * restrict vy);
  1315. static const quantize_fns_t quantize_fns[GGML_TYPE_COUNT] = {
  1316. [GGML_TYPE_Q4_0] = {
  1317. .dequantize_row_q = (dequantize_row_q_t) dequantize_row_q4_0,
  1318. .quantize_row_q = quantize_row_q4_0,
  1319. .quantize_row_q_reference = (quantize_row_q_t) quantize_row_q4_0_reference,
  1320. .quantize_row_q_dot = quantize_row_q8_0,
  1321. .vec_dot_q = ggml_vec_dot_q4_0_q8_0,
  1322. .vec_dot_type = GGML_TYPE_Q8_0,
  1323. },
  1324. [GGML_TYPE_Q4_1] = {
  1325. .dequantize_row_q = (dequantize_row_q_t)dequantize_row_q4_1,
  1326. .quantize_row_q = quantize_row_q4_1,
  1327. .quantize_row_q_reference = (quantize_row_q_t) quantize_row_q4_1_reference,
  1328. .quantize_row_q_dot = quantize_row_q8_1,
  1329. .vec_dot_q = ggml_vec_dot_q4_1_q8_1,
  1330. .vec_dot_type = GGML_TYPE_Q8_1,
  1331. },
  1332. [GGML_TYPE_Q5_0] = {
  1333. .dequantize_row_q = (dequantize_row_q_t) dequantize_row_q5_0,
  1334. .quantize_row_q = quantize_row_q5_0,
  1335. .quantize_row_q_reference = (quantize_row_q_t) quantize_row_q5_0_reference,
  1336. .quantize_row_q_dot = quantize_row_q8_0,
  1337. .vec_dot_q = ggml_vec_dot_q5_0_q8_0,
  1338. .vec_dot_type = GGML_TYPE_Q8_0,
  1339. },
  1340. [GGML_TYPE_Q5_1] = {
  1341. .dequantize_row_q = (dequantize_row_q_t) dequantize_row_q5_1,
  1342. .quantize_row_q = quantize_row_q5_1,
  1343. .quantize_row_q_reference = (quantize_row_q_t) quantize_row_q5_1_reference,
  1344. .quantize_row_q_dot = quantize_row_q8_1,
  1345. .vec_dot_q = ggml_vec_dot_q5_1_q8_1,
  1346. .vec_dot_type = GGML_TYPE_Q8_1,
  1347. },
  1348. [GGML_TYPE_Q8_0] = {
  1349. .dequantize_row_q = dequantize_row_q8_0,
  1350. .quantize_row_q = quantize_row_q8_0,
  1351. .quantize_row_q_reference = (quantize_row_q_t) quantize_row_q8_0_reference,
  1352. .quantize_row_q_dot = quantize_row_q8_0,
  1353. .vec_dot_q = ggml_vec_dot_q8_0_q8_0,
  1354. .vec_dot_type = GGML_TYPE_Q8_0,
  1355. },
  1356. [GGML_TYPE_Q8_1] = {
  1357. .dequantize_row_q = NULL, // TODO
  1358. .quantize_row_q = quantize_row_q8_1,
  1359. .quantize_row_q_reference = (quantize_row_q_t) quantize_row_q8_1_reference,
  1360. .quantize_row_q_dot = quantize_row_q8_1,
  1361. .vec_dot_q = NULL, // TODO
  1362. .vec_dot_type = GGML_TYPE_Q8_1,
  1363. },
  1364. #ifdef GGML_USE_K_QUANTS
  1365. [GGML_TYPE_Q2_K] = {
  1366. .dequantize_row_q = (dequantize_row_q_t) dequantize_row_q2_K,
  1367. .quantize_row_q = quantize_row_q2_K,
  1368. .quantize_row_q_reference = (quantize_row_q_t) quantize_row_q2_K_reference,
  1369. .quantize_row_q_dot = quantize_row_q8_K,
  1370. .vec_dot_q = ggml_vec_dot_q2_K_q8_K,
  1371. .vec_dot_type = GGML_TYPE_Q8_K,
  1372. },
  1373. [GGML_TYPE_Q3_K] = {
  1374. .dequantize_row_q = (dequantize_row_q_t) dequantize_row_q3_K,
  1375. .quantize_row_q = quantize_row_q3_K,
  1376. .quantize_row_q_reference = (quantize_row_q_t) quantize_row_q3_K_reference,
  1377. .quantize_row_q_dot = quantize_row_q8_K,
  1378. .vec_dot_q = ggml_vec_dot_q3_K_q8_K,
  1379. .vec_dot_type = GGML_TYPE_Q8_K,
  1380. },
  1381. [GGML_TYPE_Q4_K] = {
  1382. .dequantize_row_q = (dequantize_row_q_t) dequantize_row_q4_K,
  1383. .quantize_row_q = quantize_row_q4_K,
  1384. .quantize_row_q_reference = (quantize_row_q_t) quantize_row_q4_K_reference,
  1385. .quantize_row_q_dot = quantize_row_q8_K,
  1386. .vec_dot_q = ggml_vec_dot_q4_K_q8_K,
  1387. .vec_dot_type = GGML_TYPE_Q8_K,
  1388. },
  1389. [GGML_TYPE_Q5_K] = {
  1390. .dequantize_row_q = (dequantize_row_q_t) dequantize_row_q5_K,
  1391. .quantize_row_q = quantize_row_q5_K,
  1392. .quantize_row_q_reference = (quantize_row_q_t) quantize_row_q5_K_reference,
  1393. .quantize_row_q_dot = quantize_row_q8_K,
  1394. .vec_dot_q = ggml_vec_dot_q5_K_q8_K,
  1395. .vec_dot_type = GGML_TYPE_Q8_K,
  1396. },
  1397. [GGML_TYPE_Q6_K] = {
  1398. .dequantize_row_q = (dequantize_row_q_t) dequantize_row_q6_K,
  1399. .quantize_row_q = quantize_row_q6_K,
  1400. .quantize_row_q_reference = (quantize_row_q_t) quantize_row_q6_K_reference,
  1401. .quantize_row_q_dot = quantize_row_q8_K,
  1402. .vec_dot_q = ggml_vec_dot_q6_K_q8_K,
  1403. .vec_dot_type = GGML_TYPE_Q8_K,
  1404. },
  1405. #endif
  1406. };
  1407. // For internal test use
  1408. quantize_fns_t ggml_internal_get_quantize_fn(size_t i) {
  1409. GGML_ASSERT(i < GGML_TYPE_COUNT);
  1410. return quantize_fns[i];
  1411. }
  1412. //
  1413. // simd mappings
  1414. //
  1415. // we define a common set of C macros which map to specific intrinsics based on the current architecture
  1416. // we then implement the fundamental computation operations below using only these macros
  1417. // adding support for new architectures requires to define the corresponding SIMD macros
  1418. //
  1419. // GGML_F32_STEP / GGML_F16_STEP
  1420. // number of elements to process in a single step
  1421. //
  1422. // GGML_F32_EPR / GGML_F16_EPR
  1423. // number of elements to fit in a single register
  1424. //
  1425. #if defined(__ARM_NEON) && defined(__ARM_FEATURE_FMA)
  1426. #define GGML_SIMD
  1427. // F32 NEON
  1428. #define GGML_F32_STEP 16
  1429. #define GGML_F32_EPR 4
  1430. #define GGML_F32x4 float32x4_t
  1431. #define GGML_F32x4_ZERO vdupq_n_f32(0.0f)
  1432. #define GGML_F32x4_SET1(x) vdupq_n_f32(x)
  1433. #define GGML_F32x4_LOAD vld1q_f32
  1434. #define GGML_F32x4_STORE vst1q_f32
  1435. #define GGML_F32x4_FMA(a, b, c) vfmaq_f32(a, b, c)
  1436. #define GGML_F32x4_ADD vaddq_f32
  1437. #define GGML_F32x4_MUL vmulq_f32
  1438. #define GGML_F32x4_REDUCE_ONE(x) vaddvq_f32(x)
  1439. #define GGML_F32x4_REDUCE(res, x) \
  1440. { \
  1441. int offset = GGML_F32_ARR >> 1; \
  1442. for (int i = 0; i < offset; ++i) { \
  1443. x[i] = vaddq_f32(x[i], x[offset+i]); \
  1444. } \
  1445. offset >>= 1; \
  1446. for (int i = 0; i < offset; ++i) { \
  1447. x[i] = vaddq_f32(x[i], x[offset+i]); \
  1448. } \
  1449. offset >>= 1; \
  1450. for (int i = 0; i < offset; ++i) { \
  1451. x[i] = vaddq_f32(x[i], x[offset+i]); \
  1452. } \
  1453. res = GGML_F32x4_REDUCE_ONE(x[0]); \
  1454. }
  1455. #define GGML_F32_VEC GGML_F32x4
  1456. #define GGML_F32_VEC_ZERO GGML_F32x4_ZERO
  1457. #define GGML_F32_VEC_SET1 GGML_F32x4_SET1
  1458. #define GGML_F32_VEC_LOAD GGML_F32x4_LOAD
  1459. #define GGML_F32_VEC_STORE GGML_F32x4_STORE
  1460. #define GGML_F32_VEC_FMA GGML_F32x4_FMA
  1461. #define GGML_F32_VEC_ADD GGML_F32x4_ADD
  1462. #define GGML_F32_VEC_MUL GGML_F32x4_MUL
  1463. #define GGML_F32_VEC_REDUCE GGML_F32x4_REDUCE
  1464. // F16 NEON
  1465. #if defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC)
  1466. #define GGML_F16_STEP 32
  1467. #define GGML_F16_EPR 8
  1468. #define GGML_F16x8 float16x8_t
  1469. #define GGML_F16x8_ZERO vdupq_n_f16(0.0f)
  1470. #define GGML_F16x8_SET1(x) vdupq_n_f16(x)
  1471. #define GGML_F16x8_LOAD vld1q_f16
  1472. #define GGML_F16x8_STORE vst1q_f16
  1473. #define GGML_F16x8_FMA(a, b, c) vfmaq_f16(a, b, c)
  1474. #define GGML_F16x8_ADD vaddq_f16
  1475. #define GGML_F16x8_MUL vmulq_f16
  1476. #define GGML_F16x8_REDUCE(res, x) \
  1477. { \
  1478. int offset = GGML_F16_ARR >> 1; \
  1479. for (int i = 0; i < offset; ++i) { \
  1480. x[i] = vaddq_f16(x[i], x[offset+i]); \
  1481. } \
  1482. offset >>= 1; \
  1483. for (int i = 0; i < offset; ++i) { \
  1484. x[i] = vaddq_f16(x[i], x[offset+i]); \
  1485. } \
  1486. offset >>= 1; \
  1487. for (int i = 0; i < offset; ++i) { \
  1488. x[i] = vaddq_f16(x[i], x[offset+i]); \
  1489. } \
  1490. const float32x4_t t0 = vcvt_f32_f16(vget_low_f16 (x[0])); \
  1491. const float32x4_t t1 = vcvt_f32_f16(vget_high_f16(x[0])); \
  1492. res = (ggml_float) vaddvq_f32(vaddq_f32(t0, t1)); \
  1493. }
  1494. #define GGML_F16_VEC GGML_F16x8
  1495. #define GGML_F16_VEC_ZERO GGML_F16x8_ZERO
  1496. #define GGML_F16_VEC_SET1 GGML_F16x8_SET1
  1497. #define GGML_F16_VEC_LOAD(p, i) GGML_F16x8_LOAD(p)
  1498. #define GGML_F16_VEC_STORE(p, r, i) GGML_F16x8_STORE(p, r[i])
  1499. #define GGML_F16_VEC_FMA GGML_F16x8_FMA
  1500. #define GGML_F16_VEC_ADD GGML_F16x8_ADD
  1501. #define GGML_F16_VEC_MUL GGML_F16x8_MUL
  1502. #define GGML_F16_VEC_REDUCE GGML_F16x8_REDUCE
  1503. #else
  1504. // if FP16 vector arithmetic is not supported, we use FP32 instead
  1505. // and take advantage of the vcvt_ functions to convert to/from FP16
  1506. #define GGML_F16_STEP 16
  1507. #define GGML_F16_EPR 4
  1508. #define GGML_F32Cx4 float32x4_t
  1509. #define GGML_F32Cx4_ZERO vdupq_n_f32(0.0f)
  1510. #define GGML_F32Cx4_SET1(x) vdupq_n_f32(x)
  1511. #define GGML_F32Cx4_LOAD(x) vcvt_f32_f16(vld1_f16(x))
  1512. #define GGML_F32Cx4_STORE(x, y) vst1_f16(x, vcvt_f16_f32(y))
  1513. #define GGML_F32Cx4_FMA(a, b, c) vfmaq_f32(a, b, c)
  1514. #define GGML_F32Cx4_ADD vaddq_f32
  1515. #define GGML_F32Cx4_MUL vmulq_f32
  1516. #define GGML_F32Cx4_REDUCE GGML_F32x4_REDUCE
  1517. #define GGML_F16_VEC GGML_F32Cx4
  1518. #define GGML_F16_VEC_ZERO GGML_F32Cx4_ZERO
  1519. #define GGML_F16_VEC_SET1 GGML_F32Cx4_SET1
  1520. #define GGML_F16_VEC_LOAD(p, i) GGML_F32Cx4_LOAD(p)
  1521. #define GGML_F16_VEC_STORE(p, r, i) GGML_F32Cx4_STORE(p, r[i])
  1522. #define GGML_F16_VEC_FMA GGML_F32Cx4_FMA
  1523. #define GGML_F16_VEC_ADD GGML_F32Cx4_ADD
  1524. #define GGML_F16_VEC_MUL GGML_F32Cx4_MUL
  1525. #define GGML_F16_VEC_REDUCE GGML_F32Cx4_REDUCE
  1526. #endif
  1527. #elif defined(__AVX__)
  1528. #define GGML_SIMD
  1529. // F32 AVX
  1530. #define GGML_F32_STEP 32
  1531. #define GGML_F32_EPR 8
  1532. #define GGML_F32x8 __m256
  1533. #define GGML_F32x8_ZERO _mm256_setzero_ps()
  1534. #define GGML_F32x8_SET1(x) _mm256_set1_ps(x)
  1535. #define GGML_F32x8_LOAD _mm256_loadu_ps
  1536. #define GGML_F32x8_STORE _mm256_storeu_ps
  1537. #if defined(__FMA__)
  1538. #define GGML_F32x8_FMA(a, b, c) _mm256_fmadd_ps(b, c, a)
  1539. #else
  1540. #define GGML_F32x8_FMA(a, b, c) _mm256_add_ps(_mm256_mul_ps(b, c), a)
  1541. #endif
  1542. #define GGML_F32x8_ADD _mm256_add_ps
  1543. #define GGML_F32x8_MUL _mm256_mul_ps
  1544. #define GGML_F32x8_REDUCE(res, x) \
  1545. { \
  1546. int offset = GGML_F32_ARR >> 1; \
  1547. for (int i = 0; i < offset; ++i) { \
  1548. x[i] = _mm256_add_ps(x[i], x[offset+i]); \
  1549. } \
  1550. offset >>= 1; \
  1551. for (int i = 0; i < offset; ++i) { \
  1552. x[i] = _mm256_add_ps(x[i], x[offset+i]); \
  1553. } \
  1554. offset >>= 1; \
  1555. for (int i = 0; i < offset; ++i) { \
  1556. x[i] = _mm256_add_ps(x[i], x[offset+i]); \
  1557. } \
  1558. const __m128 t0 = _mm_add_ps(_mm256_castps256_ps128(x[0]), \
  1559. _mm256_extractf128_ps(x[0], 1)); \
  1560. const __m128 t1 = _mm_hadd_ps(t0, t0); \
  1561. res = _mm_cvtss_f32(_mm_hadd_ps(t1, t1)); \
  1562. }
  1563. // TODO: is this optimal ?
  1564. #define GGML_F32_VEC GGML_F32x8
  1565. #define GGML_F32_VEC_ZERO GGML_F32x8_ZERO
  1566. #define GGML_F32_VEC_SET1 GGML_F32x8_SET1
  1567. #define GGML_F32_VEC_LOAD GGML_F32x8_LOAD
  1568. #define GGML_F32_VEC_STORE GGML_F32x8_STORE
  1569. #define GGML_F32_VEC_FMA GGML_F32x8_FMA
  1570. #define GGML_F32_VEC_ADD GGML_F32x8_ADD
  1571. #define GGML_F32_VEC_MUL GGML_F32x8_MUL
  1572. #define GGML_F32_VEC_REDUCE GGML_F32x8_REDUCE
  1573. // F16 AVX
  1574. #define GGML_F16_STEP 32
  1575. #define GGML_F16_EPR 8
  1576. // F16 arithmetic is not supported by AVX, so we use F32 instead
  1577. #define GGML_F32Cx8 __m256
  1578. #define GGML_F32Cx8_ZERO _mm256_setzero_ps()
  1579. #define GGML_F32Cx8_SET1(x) _mm256_set1_ps(x)
  1580. #if defined(__F16C__)
  1581. // the _mm256_cvt intrinsics require F16C
  1582. #define GGML_F32Cx8_LOAD(x) _mm256_cvtph_ps(_mm_loadu_si128((__m128i *)(x)))
  1583. #define GGML_F32Cx8_STORE(x, y) _mm_storeu_si128((__m128i *)(x), _mm256_cvtps_ph(y, 0))
  1584. #else
  1585. static inline __m256 __avx_f32cx8_load(ggml_fp16_t *x) {
  1586. float tmp[8];
  1587. for (int i = 0; i < 8; i++) {
  1588. tmp[i] = GGML_FP16_TO_FP32(x[i]);
  1589. }
  1590. return _mm256_loadu_ps(tmp);
  1591. }
  1592. static inline void __avx_f32cx8_store(ggml_fp16_t *x, __m256 y) {
  1593. float arr[8];
  1594. _mm256_storeu_ps(arr, y);
  1595. for (int i = 0; i < 8; i++)
  1596. x[i] = GGML_FP32_TO_FP16(arr[i]);
  1597. }
  1598. #define GGML_F32Cx8_LOAD(x) __avx_f32cx8_load(x)
  1599. #define GGML_F32Cx8_STORE(x, y) __avx_f32cx8_store(x, y)
  1600. #endif
  1601. #define GGML_F32Cx8_FMA GGML_F32x8_FMA
  1602. #define GGML_F32Cx8_ADD _mm256_add_ps
  1603. #define GGML_F32Cx8_MUL _mm256_mul_ps
  1604. #define GGML_F32Cx8_REDUCE GGML_F32x8_REDUCE
  1605. #define GGML_F16_VEC GGML_F32Cx8
  1606. #define GGML_F16_VEC_ZERO GGML_F32Cx8_ZERO
  1607. #define GGML_F16_VEC_SET1 GGML_F32Cx8_SET1
  1608. #define GGML_F16_VEC_LOAD(p, i) GGML_F32Cx8_LOAD(p)
  1609. #define GGML_F16_VEC_STORE(p, r, i) GGML_F32Cx8_STORE(p, r[i])
  1610. #define GGML_F16_VEC_FMA GGML_F32Cx8_FMA
  1611. #define GGML_F16_VEC_ADD GGML_F32Cx8_ADD
  1612. #define GGML_F16_VEC_MUL GGML_F32Cx8_MUL
  1613. #define GGML_F16_VEC_REDUCE GGML_F32Cx8_REDUCE
  1614. #elif defined(__POWER9_VECTOR__)
  1615. #define GGML_SIMD
  1616. // F32 POWER9
  1617. #define GGML_F32_STEP 32
  1618. #define GGML_F32_EPR 4
  1619. #define GGML_F32x4 vector float
  1620. #define GGML_F32x4_ZERO 0.0f
  1621. #define GGML_F32x4_SET1 vec_splats
  1622. #define GGML_F32x4_LOAD(p) vec_xl(0, p)
  1623. #define GGML_F32x4_STORE(p, r) vec_xst(r, 0, p)
  1624. #define GGML_F32x4_FMA(a, b, c) vec_madd(b, c, a)
  1625. #define GGML_F32x4_ADD vec_add
  1626. #define GGML_F32x4_MUL vec_mul
  1627. #define GGML_F32x4_REDUCE(res, x) \
  1628. { \
  1629. int offset = GGML_F32_ARR >> 1; \
  1630. for (int i = 0; i < offset; ++i) { \
  1631. x[i] = vec_add(x[i], x[offset+i]); \
  1632. } \
  1633. offset >>= 1; \
  1634. for (int i = 0; i < offset; ++i) { \
  1635. x[i] = vec_add(x[i], x[offset+i]); \
  1636. } \
  1637. offset >>= 1; \
  1638. for (int i = 0; i < offset; ++i) { \
  1639. x[i] = vec_add(x[i], x[offset+i]); \
  1640. } \
  1641. res = vec_extract(x[0], 0) + \
  1642. vec_extract(x[0], 1) + \
  1643. vec_extract(x[0], 2) + \
  1644. vec_extract(x[0], 3); \
  1645. }
  1646. #define GGML_F32_VEC GGML_F32x4
  1647. #define GGML_F32_VEC_ZERO GGML_F32x4_ZERO
  1648. #define GGML_F32_VEC_SET1 GGML_F32x4_SET1
  1649. #define GGML_F32_VEC_LOAD GGML_F32x4_LOAD
  1650. #define GGML_F32_VEC_STORE GGML_F32x4_STORE
  1651. #define GGML_F32_VEC_FMA GGML_F32x4_FMA
  1652. #define GGML_F32_VEC_ADD GGML_F32x4_ADD
  1653. #define GGML_F32_VEC_MUL GGML_F32x4_MUL
  1654. #define GGML_F32_VEC_REDUCE GGML_F32x4_REDUCE
  1655. // F16 POWER9
  1656. #define GGML_F16_STEP GGML_F32_STEP
  1657. #define GGML_F16_EPR GGML_F32_EPR
  1658. #define GGML_F16_VEC GGML_F32x4
  1659. #define GGML_F16_VEC_ZERO GGML_F32x4_ZERO
  1660. #define GGML_F16_VEC_SET1 GGML_F32x4_SET1
  1661. #define GGML_F16_VEC_FMA GGML_F32x4_FMA
  1662. #define GGML_F16_VEC_REDUCE GGML_F32x4_REDUCE
  1663. // Use vec_xl, not vec_ld, in case the load address is not aligned.
  1664. #define GGML_F16_VEC_LOAD(p, i) (i & 0x1) ? \
  1665. vec_extract_fp32_from_shorth(vec_xl(0, p - GGML_F16_EPR)) : \
  1666. vec_extract_fp32_from_shortl(vec_xl(0, p))
  1667. #define GGML_ENDIAN_BYTE(i) ((unsigned char *)&(uint16_t){1})[i]
  1668. #define GGML_F16_VEC_STORE(p, r, i) \
  1669. if (i & 0x1) \
  1670. vec_xst(vec_pack_to_short_fp32(r[i - GGML_ENDIAN_BYTE(1)], \
  1671. r[i - GGML_ENDIAN_BYTE(0)]), \
  1672. 0, p - GGML_F16_EPR)
  1673. #elif defined(__wasm_simd128__)
  1674. #define GGML_SIMD
  1675. // F32 WASM
  1676. #define GGML_F32_STEP 16
  1677. #define GGML_F32_EPR 4
  1678. #define GGML_F32x4 v128_t
  1679. #define GGML_F32x4_ZERO wasm_f32x4_splat(0.0f)
  1680. #define GGML_F32x4_SET1(x) wasm_f32x4_splat(x)
  1681. #define GGML_F32x4_LOAD wasm_v128_load
  1682. #define GGML_F32x4_STORE wasm_v128_store
  1683. #define GGML_F32x4_FMA(a, b, c) wasm_f32x4_add(wasm_f32x4_mul(b, c), a)
  1684. #define GGML_F32x4_ADD wasm_f32x4_add
  1685. #define GGML_F32x4_MUL wasm_f32x4_mul
  1686. #define GGML_F32x4_REDUCE(res, x) \
  1687. { \
  1688. int offset = GGML_F32_ARR >> 1; \
  1689. for (int i = 0; i < offset; ++i) { \
  1690. x[i] = wasm_f32x4_add(x[i], x[offset+i]); \
  1691. } \
  1692. offset >>= 1; \
  1693. for (int i = 0; i < offset; ++i) { \
  1694. x[i] = wasm_f32x4_add(x[i], x[offset+i]); \
  1695. } \
  1696. offset >>= 1; \
  1697. for (int i = 0; i < offset; ++i) { \
  1698. x[i] = wasm_f32x4_add(x[i], x[offset+i]); \
  1699. } \
  1700. res = wasm_f32x4_extract_lane(x[0], 0) + \
  1701. wasm_f32x4_extract_lane(x[0], 1) + \
  1702. wasm_f32x4_extract_lane(x[0], 2) + \
  1703. wasm_f32x4_extract_lane(x[0], 3); \
  1704. }
  1705. #define GGML_F32_VEC GGML_F32x4
  1706. #define GGML_F32_VEC_ZERO GGML_F32x4_ZERO
  1707. #define GGML_F32_VEC_SET1 GGML_F32x4_SET1
  1708. #define GGML_F32_VEC_LOAD GGML_F32x4_LOAD
  1709. #define GGML_F32_VEC_STORE GGML_F32x4_STORE
  1710. #define GGML_F32_VEC_FMA GGML_F32x4_FMA
  1711. #define GGML_F32_VEC_ADD GGML_F32x4_ADD
  1712. #define GGML_F32_VEC_MUL GGML_F32x4_MUL
  1713. #define GGML_F32_VEC_REDUCE GGML_F32x4_REDUCE
  1714. // F16 WASM
  1715. #define GGML_F16_STEP 16
  1716. #define GGML_F16_EPR 4
  1717. inline static v128_t __wasm_f16x4_load(const ggml_fp16_t * p) {
  1718. float tmp[4];
  1719. tmp[0] = GGML_FP16_TO_FP32(p[0]);
  1720. tmp[1] = GGML_FP16_TO_FP32(p[1]);
  1721. tmp[2] = GGML_FP16_TO_FP32(p[2]);
  1722. tmp[3] = GGML_FP16_TO_FP32(p[3]);
  1723. return wasm_v128_load(tmp);
  1724. }
  1725. inline static void __wasm_f16x4_store(ggml_fp16_t * p, v128_t x) {
  1726. float tmp[4];
  1727. wasm_v128_store(tmp, x);
  1728. p[0] = GGML_FP32_TO_FP16(tmp[0]);
  1729. p[1] = GGML_FP32_TO_FP16(tmp[1]);
  1730. p[2] = GGML_FP32_TO_FP16(tmp[2]);
  1731. p[3] = GGML_FP32_TO_FP16(tmp[3]);
  1732. }
  1733. #define GGML_F16x4 v128_t
  1734. #define GGML_F16x4_ZERO wasm_f32x4_splat(0.0f)
  1735. #define GGML_F16x4_SET1(x) wasm_f32x4_splat(x)
  1736. #define GGML_F16x4_LOAD(x) __wasm_f16x4_load(x)
  1737. #define GGML_F16x4_STORE(x, y) __wasm_f16x4_store(x, y)
  1738. #define GGML_F16x4_FMA GGML_F32x4_FMA
  1739. #define GGML_F16x4_ADD wasm_f32x4_add
  1740. #define GGML_F16x4_MUL wasm_f32x4_mul
  1741. #define GGML_F16x4_REDUCE(res, x) \
  1742. { \
  1743. int offset = GGML_F16_ARR >> 1; \
  1744. for (int i = 0; i < offset; ++i) { \
  1745. x[i] = wasm_f32x4_add(x[i], x[offset+i]); \
  1746. } \
  1747. offset >>= 1; \
  1748. for (int i = 0; i < offset; ++i) { \
  1749. x[i] = wasm_f32x4_add(x[i], x[offset+i]); \
  1750. } \
  1751. offset >>= 1; \
  1752. for (int i = 0; i < offset; ++i) { \
  1753. x[i] = wasm_f32x4_add(x[i], x[offset+i]); \
  1754. } \
  1755. res = wasm_f32x4_extract_lane(x[0], 0) + \
  1756. wasm_f32x4_extract_lane(x[0], 1) + \
  1757. wasm_f32x4_extract_lane(x[0], 2) + \
  1758. wasm_f32x4_extract_lane(x[0], 3); \
  1759. }
  1760. #define GGML_F16_VEC GGML_F16x4
  1761. #define GGML_F16_VEC_ZERO GGML_F16x4_ZERO
  1762. #define GGML_F16_VEC_SET1 GGML_F16x4_SET1
  1763. #define GGML_F16_VEC_LOAD(p, i) GGML_F16x4_LOAD(p)
  1764. #define GGML_F16_VEC_STORE(p, r, i) GGML_F16x4_STORE(p, r[i])
  1765. #define GGML_F16_VEC_FMA GGML_F16x4_FMA
  1766. #define GGML_F16_VEC_ADD GGML_F16x4_ADD
  1767. #define GGML_F16_VEC_MUL GGML_F16x4_MUL
  1768. #define GGML_F16_VEC_REDUCE GGML_F16x4_REDUCE
  1769. #elif defined(__SSE3__)
  1770. #define GGML_SIMD
  1771. // F32 SSE
  1772. #define GGML_F32_STEP 32
  1773. #define GGML_F32_EPR 4
  1774. #define GGML_F32x4 __m128
  1775. #define GGML_F32x4_ZERO _mm_setzero_ps()
  1776. #define GGML_F32x4_SET1(x) _mm_set1_ps(x)
  1777. #define GGML_F32x4_LOAD _mm_loadu_ps
  1778. #define GGML_F32x4_STORE _mm_storeu_ps
  1779. #if defined(__FMA__)
  1780. // TODO: Does this work?
  1781. #define GGML_F32x4_FMA(a, b, c) _mm_fmadd_ps(b, c, a)
  1782. #else
  1783. #define GGML_F32x4_FMA(a, b, c) _mm_add_ps(_mm_mul_ps(b, c), a)
  1784. #endif
  1785. #define GGML_F32x4_ADD _mm_add_ps
  1786. #define GGML_F32x4_MUL _mm_mul_ps
  1787. #define GGML_F32x4_REDUCE(res, x) \
  1788. { \
  1789. int offset = GGML_F32_ARR >> 1; \
  1790. for (int i = 0; i < offset; ++i) { \
  1791. x[i] = _mm_add_ps(x[i], x[offset+i]); \
  1792. } \
  1793. offset >>= 1; \
  1794. for (int i = 0; i < offset; ++i) { \
  1795. x[i] = _mm_add_ps(x[i], x[offset+i]); \
  1796. } \
  1797. offset >>= 1; \
  1798. for (int i = 0; i < offset; ++i) { \
  1799. x[i] = _mm_add_ps(x[i], x[offset+i]); \
  1800. } \
  1801. const __m128 t0 = _mm_hadd_ps(x[0], x[0]); \
  1802. res = _mm_cvtss_f32(_mm_hadd_ps(t0, t0)); \
  1803. }
  1804. // TODO: is this optimal ?
  1805. #define GGML_F32_VEC GGML_F32x4
  1806. #define GGML_F32_VEC_ZERO GGML_F32x4_ZERO
  1807. #define GGML_F32_VEC_SET1 GGML_F32x4_SET1
  1808. #define GGML_F32_VEC_LOAD GGML_F32x4_LOAD
  1809. #define GGML_F32_VEC_STORE GGML_F32x4_STORE
  1810. #define GGML_F32_VEC_FMA GGML_F32x4_FMA
  1811. #define GGML_F32_VEC_ADD GGML_F32x4_ADD
  1812. #define GGML_F32_VEC_MUL GGML_F32x4_MUL
  1813. #define GGML_F32_VEC_REDUCE GGML_F32x4_REDUCE
  1814. // F16 SSE
  1815. #define GGML_F16_STEP 32
  1816. #define GGML_F16_EPR 4
  1817. static inline __m128 __sse_f16x4_load(ggml_fp16_t *x) {
  1818. float tmp[4];
  1819. tmp[0] = GGML_FP16_TO_FP32(x[0]);
  1820. tmp[1] = GGML_FP16_TO_FP32(x[1]);
  1821. tmp[2] = GGML_FP16_TO_FP32(x[2]);
  1822. tmp[3] = GGML_FP16_TO_FP32(x[3]);
  1823. return _mm_loadu_ps(tmp);
  1824. }
  1825. static inline void __sse_f16x4_store(ggml_fp16_t *x, __m128 y) {
  1826. float arr[4];
  1827. _mm_storeu_ps(arr, y);
  1828. x[0] = GGML_FP32_TO_FP16(arr[0]);
  1829. x[1] = GGML_FP32_TO_FP16(arr[1]);
  1830. x[2] = GGML_FP32_TO_FP16(arr[2]);
  1831. x[3] = GGML_FP32_TO_FP16(arr[3]);
  1832. }
  1833. #define GGML_F32Cx4 __m128
  1834. #define GGML_F32Cx4_ZERO _mm_setzero_ps()
  1835. #define GGML_F32Cx4_SET1(x) _mm_set1_ps(x)
  1836. #define GGML_F32Cx4_LOAD(x) __sse_f16x4_load(x)
  1837. #define GGML_F32Cx4_STORE(x, y) __sse_f16x4_store(x, y)
  1838. #define GGML_F32Cx4_FMA GGML_F32x4_FMA
  1839. #define GGML_F32Cx4_ADD _mm_add_ps
  1840. #define GGML_F32Cx4_MUL _mm_mul_ps
  1841. #define GGML_F32Cx4_REDUCE GGML_F32x4_REDUCE
  1842. #define GGML_F16_VEC GGML_F32Cx4
  1843. #define GGML_F16_VEC_ZERO GGML_F32Cx4_ZERO
  1844. #define GGML_F16_VEC_SET1 GGML_F32Cx4_SET1
  1845. #define GGML_F16_VEC_LOAD(p, i) GGML_F32Cx4_LOAD(p)
  1846. #define GGML_F16_VEC_STORE(p, r, i) GGML_F32Cx4_STORE(p, r[i])
  1847. #define GGML_F16_VEC_FMA GGML_F32Cx4_FMA
  1848. #define GGML_F16_VEC_ADD GGML_F32Cx4_ADD
  1849. #define GGML_F16_VEC_MUL GGML_F32Cx4_MUL
  1850. #define GGML_F16_VEC_REDUCE GGML_F32Cx4_REDUCE
  1851. #endif
  1852. // GGML_F32_ARR / GGML_F16_ARR
  1853. // number of registers to use per step
  1854. #ifdef GGML_SIMD
  1855. #define GGML_F32_ARR (GGML_F32_STEP/GGML_F32_EPR)
  1856. #define GGML_F16_ARR (GGML_F16_STEP/GGML_F16_EPR)
  1857. #endif
  1858. //
  1859. // fundamental operations
  1860. //
  1861. inline static void ggml_vec_set_i8(const int n, int8_t * x, const int8_t v) { for (int i = 0; i < n; ++i) x[i] = v; }
  1862. inline static void ggml_vec_set_i16(const int n, int16_t * x, const int16_t v) { for (int i = 0; i < n; ++i) x[i] = v; }
  1863. inline static void ggml_vec_set_i32(const int n, int32_t * x, const int32_t v) { for (int i = 0; i < n; ++i) x[i] = v; }
  1864. inline static void ggml_vec_set_f16(const int n, ggml_fp16_t * x, const int32_t v) { for (int i = 0; i < n; ++i) x[i] = v; }
  1865. inline static void ggml_vec_add_f32 (const int n, float * z, const float * x, const float * y) { for (int i = 0; i < n; ++i) z[i] = x[i] + y[i]; }
  1866. inline static void ggml_vec_add1_f32(const int n, float * z, const float * x, const float v) { for (int i = 0; i < n; ++i) z[i] = x[i] + v; }
  1867. inline static void ggml_vec_acc_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] += x[i]; }
  1868. inline static void ggml_vec_acc1_f32(const int n, float * y, const float v) { for (int i = 0; i < n; ++i) y[i] += v; }
  1869. inline static void ggml_vec_sub_f32 (const int n, float * z, const float * x, const float * y) { for (int i = 0; i < n; ++i) z[i] = x[i] - y[i]; }
  1870. inline static void ggml_vec_set_f32 (const int n, float * x, const float v) { for (int i = 0; i < n; ++i) x[i] = v; }
  1871. inline static void ggml_vec_cpy_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = x[i]; }
  1872. inline static void ggml_vec_neg_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = -x[i]; }
  1873. inline static void ggml_vec_mul_f32 (const int n, float * z, const float * x, const float * y) { for (int i = 0; i < n; ++i) z[i] = x[i]*y[i]; }
  1874. inline static void ggml_vec_div_f32 (const int n, float * z, const float * x, const float * y) { for (int i = 0; i < n; ++i) z[i] = x[i]/y[i]; }
  1875. inline static void ggml_vec_dot_f32(const int n, float * restrict s, const float * restrict x, const float * restrict y) {
  1876. #ifdef GGML_SIMD
  1877. float sumf = 0.0f;
  1878. const int np = (n & ~(GGML_F32_STEP - 1));
  1879. GGML_F32_VEC sum[GGML_F32_ARR] = { GGML_F32_VEC_ZERO };
  1880. GGML_F32_VEC ax[GGML_F32_ARR];
  1881. GGML_F32_VEC ay[GGML_F32_ARR];
  1882. for (int i = 0; i < np; i += GGML_F32_STEP) {
  1883. for (int j = 0; j < GGML_F32_ARR; j++) {
  1884. ax[j] = GGML_F32_VEC_LOAD(x + i + j*GGML_F32_EPR);
  1885. ay[j] = GGML_F32_VEC_LOAD(y + i + j*GGML_F32_EPR);
  1886. sum[j] = GGML_F32_VEC_FMA(sum[j], ax[j], ay[j]);
  1887. }
  1888. }
  1889. // reduce sum0..sum3 to sum0
  1890. GGML_F32_VEC_REDUCE(sumf, sum);
  1891. // leftovers
  1892. for (int i = np; i < n; ++i) {
  1893. sumf += x[i]*y[i];
  1894. }
  1895. #else
  1896. // scalar
  1897. ggml_float sumf = 0.0;
  1898. for (int i = 0; i < n; ++i) {
  1899. sumf += (ggml_float)(x[i]*y[i]);
  1900. }
  1901. #endif
  1902. *s = sumf;
  1903. }
  1904. inline static void ggml_vec_dot_f16(const int n, float * restrict s, ggml_fp16_t * restrict x, ggml_fp16_t * restrict y) {
  1905. ggml_float sumf = 0.0;
  1906. #if defined(GGML_SIMD)
  1907. const int np = (n & ~(GGML_F16_STEP - 1));
  1908. GGML_F16_VEC sum[GGML_F16_ARR] = { GGML_F16_VEC_ZERO };
  1909. GGML_F16_VEC ax[GGML_F16_ARR];
  1910. GGML_F16_VEC ay[GGML_F16_ARR];
  1911. for (int i = 0; i < np; i += GGML_F16_STEP) {
  1912. for (int j = 0; j < GGML_F16_ARR; j++) {
  1913. ax[j] = GGML_F16_VEC_LOAD(x + i + j*GGML_F16_EPR, j);
  1914. ay[j] = GGML_F16_VEC_LOAD(y + i + j*GGML_F16_EPR, j);
  1915. sum[j] = GGML_F16_VEC_FMA(sum[j], ax[j], ay[j]);
  1916. }
  1917. }
  1918. // reduce sum0..sum3 to sum0
  1919. GGML_F16_VEC_REDUCE(sumf, sum);
  1920. // leftovers
  1921. for (int i = np; i < n; ++i) {
  1922. sumf += (ggml_float)(GGML_FP16_TO_FP32(x[i])*GGML_FP16_TO_FP32(y[i]));
  1923. }
  1924. #else
  1925. for (int i = 0; i < n; ++i) {
  1926. sumf += (ggml_float)(GGML_FP16_TO_FP32(x[i])*GGML_FP16_TO_FP32(y[i]));
  1927. }
  1928. #endif
  1929. *s = sumf;
  1930. }
  1931. static void ggml_vec_dot_q4_0_q8_0(const int n, float * restrict s, const void * restrict vx, const void * restrict vy) {
  1932. const int qk = QK8_0;
  1933. const int nb = n / qk;
  1934. assert(n % qk == 0);
  1935. assert(nb % 2 == 0);
  1936. const block_q4_0 * restrict x = vx;
  1937. const block_q8_0 * restrict y = vy;
  1938. #if defined(__ARM_NEON)
  1939. float32x4_t sumv0 = vdupq_n_f32(0.0f);
  1940. float32x4_t sumv1 = vdupq_n_f32(0.0f);
  1941. for (int i = 0; i < nb; i += 2) {
  1942. const block_q4_0 * restrict x0 = &x[i + 0];
  1943. const block_q4_0 * restrict x1 = &x[i + 1];
  1944. const block_q8_0 * restrict y0 = &y[i + 0];
  1945. const block_q8_0 * restrict y1 = &y[i + 1];
  1946. const uint8x16_t m4b = vdupq_n_u8(0x0F);
  1947. const int8x16_t s8b = vdupq_n_s8(0x8);
  1948. const uint8x16_t v0_0 = vld1q_u8(x0->qs);
  1949. const uint8x16_t v0_1 = vld1q_u8(x1->qs);
  1950. // 4-bit -> 8-bit
  1951. const int8x16_t v0_0l = vreinterpretq_s8_u8(vandq_u8 (v0_0, m4b));
  1952. const int8x16_t v0_0h = vreinterpretq_s8_u8(vshrq_n_u8(v0_0, 4));
  1953. const int8x16_t v0_1l = vreinterpretq_s8_u8(vandq_u8 (v0_1, m4b));
  1954. const int8x16_t v0_1h = vreinterpretq_s8_u8(vshrq_n_u8(v0_1, 4));
  1955. // sub 8
  1956. const int8x16_t v0_0ls = vsubq_s8(v0_0l, s8b);
  1957. const int8x16_t v0_0hs = vsubq_s8(v0_0h, s8b);
  1958. const int8x16_t v0_1ls = vsubq_s8(v0_1l, s8b);
  1959. const int8x16_t v0_1hs = vsubq_s8(v0_1h, s8b);
  1960. // load y
  1961. const int8x16_t v1_0l = vld1q_s8(y0->qs);
  1962. const int8x16_t v1_0h = vld1q_s8(y0->qs + 16);
  1963. const int8x16_t v1_1l = vld1q_s8(y1->qs);
  1964. const int8x16_t v1_1h = vld1q_s8(y1->qs + 16);
  1965. #if defined(__ARM_FEATURE_DOTPROD)
  1966. // dot product into int32x4_t
  1967. const int32x4_t p_0 = vdotq_s32(vdotq_s32(vdupq_n_s32(0), v0_0ls, v1_0l), v0_0hs, v1_0h);
  1968. const int32x4_t p_1 = vdotq_s32(vdotq_s32(vdupq_n_s32(0), v0_1ls, v1_1l), v0_1hs, v1_1h);
  1969. sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(p_0), GGML_FP16_TO_FP32(x0->d)*GGML_FP16_TO_FP32(y0->d));
  1970. sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(p_1), GGML_FP16_TO_FP32(x1->d)*GGML_FP16_TO_FP32(y1->d));
  1971. #else
  1972. const int16x8_t pl0l = vmull_s8(vget_low_s8 (v0_0ls), vget_low_s8 (v1_0l));
  1973. const int16x8_t pl0h = vmull_s8(vget_high_s8(v0_0ls), vget_high_s8(v1_0l));
  1974. const int16x8_t ph0l = vmull_s8(vget_low_s8 (v0_0hs), vget_low_s8 (v1_0h));
  1975. const int16x8_t ph0h = vmull_s8(vget_high_s8(v0_0hs), vget_high_s8(v1_0h));
  1976. const int16x8_t pl1l = vmull_s8(vget_low_s8 (v0_1ls), vget_low_s8 (v1_1l));
  1977. const int16x8_t pl1h = vmull_s8(vget_high_s8(v0_1ls), vget_high_s8(v1_1l));
  1978. const int16x8_t ph1l = vmull_s8(vget_low_s8 (v0_1hs), vget_low_s8 (v1_1h));
  1979. const int16x8_t ph1h = vmull_s8(vget_high_s8(v0_1hs), vget_high_s8(v1_1h));
  1980. const int32x4_t pl0 = vaddq_s32(vpaddlq_s16(pl0l), vpaddlq_s16(pl0h));
  1981. const int32x4_t ph0 = vaddq_s32(vpaddlq_s16(ph0l), vpaddlq_s16(ph0h));
  1982. const int32x4_t pl1 = vaddq_s32(vpaddlq_s16(pl1l), vpaddlq_s16(pl1h));
  1983. const int32x4_t ph1 = vaddq_s32(vpaddlq_s16(ph1l), vpaddlq_s16(ph1h));
  1984. sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(vaddq_s32(pl0, ph0)), GGML_FP16_TO_FP32(x0->d)*GGML_FP16_TO_FP32(y0->d));
  1985. sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(vaddq_s32(pl1, ph1)), GGML_FP16_TO_FP32(x1->d)*GGML_FP16_TO_FP32(y1->d));
  1986. #endif
  1987. }
  1988. *s = vaddvq_f32(sumv0) + vaddvq_f32(sumv1);
  1989. #elif defined(__AVX2__)
  1990. // Initialize accumulator with zeros
  1991. __m256 acc = _mm256_setzero_ps();
  1992. // Main loop
  1993. for (int i = 0; i < nb; ++i) {
  1994. /* Compute combined scale for the block */
  1995. const __m256 d = _mm256_set1_ps( GGML_FP16_TO_FP32(x[i].d) * GGML_FP16_TO_FP32(y[i].d) );
  1996. __m256i bx = bytes_from_nibbles_32(x[i].qs);
  1997. // Now we have a vector with bytes in [ 0 .. 15 ] interval. Offset them into [ -8 .. +7 ] interval.
  1998. const __m256i off = _mm256_set1_epi8( 8 );
  1999. bx = _mm256_sub_epi8( bx, off );
  2000. __m256i by = _mm256_loadu_si256((const __m256i *)y[i].qs);
  2001. const __m256 q = mul_sum_i8_pairs_float(bx, by);
  2002. /* Multiply q with scale and accumulate */
  2003. acc = _mm256_fmadd_ps( d, q, acc );
  2004. }
  2005. *s = hsum_float_8(acc);
  2006. #elif defined(__AVX__)
  2007. // Initialize accumulator with zeros
  2008. __m256 acc = _mm256_setzero_ps();
  2009. // Main loop
  2010. for (int i = 0; i < nb; ++i) {
  2011. // Compute combined scale for the block
  2012. const __m256 d = _mm256_set1_ps( GGML_FP16_TO_FP32(x[i].d) * GGML_FP16_TO_FP32(y[i].d) );
  2013. const __m128i lowMask = _mm_set1_epi8(0xF);
  2014. const __m128i off = _mm_set1_epi8(8);
  2015. const __m128i tmp = _mm_loadu_si128((const __m128i *)x[i].qs);
  2016. __m128i bx = _mm_and_si128(lowMask, tmp);
  2017. __m128i by = _mm_loadu_si128((const __m128i *)y[i].qs);
  2018. bx = _mm_sub_epi8(bx, off);
  2019. const __m128i i32_0 = mul_sum_i8_pairs(bx, by);
  2020. bx = _mm_and_si128(lowMask, _mm_srli_epi64(tmp, 4));
  2021. by = _mm_loadu_si128((const __m128i *)(y[i].qs + 16));
  2022. bx = _mm_sub_epi8(bx, off);
  2023. const __m128i i32_1 = mul_sum_i8_pairs(bx, by);
  2024. // Convert int32_t to float
  2025. __m256 p = _mm256_cvtepi32_ps(MM256_SET_M128I(i32_0, i32_1));
  2026. // Apply the scale, and accumulate
  2027. acc = _mm256_add_ps(_mm256_mul_ps( d, p ), acc);
  2028. }
  2029. *s = hsum_float_8(acc);
  2030. #elif defined(__SSSE3__)
  2031. // set constants
  2032. const __m128i lowMask = _mm_set1_epi8(0xF);
  2033. const __m128i off = _mm_set1_epi8(8);
  2034. // Initialize accumulator with zeros
  2035. __m128 acc_0 = _mm_setzero_ps();
  2036. __m128 acc_1 = _mm_setzero_ps();
  2037. __m128 acc_2 = _mm_setzero_ps();
  2038. __m128 acc_3 = _mm_setzero_ps();
  2039. // First round without accumulation
  2040. {
  2041. _mm_prefetch(&x[0] + sizeof(block_q4_0), _MM_HINT_T0);
  2042. _mm_prefetch(&y[0] + sizeof(block_q8_0), _MM_HINT_T0);
  2043. // Compute combined scale for the block 0 and 1
  2044. const __m128 d_0_1 = _mm_set1_ps( GGML_FP16_TO_FP32(x[0].d) * GGML_FP16_TO_FP32(y[0].d) );
  2045. const __m128i tmp_0_1 = _mm_loadu_si128((const __m128i *)x[0].qs);
  2046. __m128i bx_0 = _mm_and_si128(lowMask, tmp_0_1);
  2047. __m128i by_0 = _mm_loadu_si128((const __m128i *)y[0].qs);
  2048. bx_0 = _mm_sub_epi8(bx_0, off);
  2049. const __m128i i32_0 = mul_sum_i8_pairs(bx_0, by_0);
  2050. __m128i bx_1 = _mm_and_si128(lowMask, _mm_srli_epi64(tmp_0_1, 4));
  2051. __m128i by_1 = _mm_loadu_si128((const __m128i *)(y[0].qs + 16));
  2052. bx_1 = _mm_sub_epi8(bx_1, off);
  2053. const __m128i i32_1 = mul_sum_i8_pairs(bx_1, by_1);
  2054. _mm_prefetch(&x[1] + sizeof(block_q4_0), _MM_HINT_T0);
  2055. _mm_prefetch(&y[1] + sizeof(block_q8_0), _MM_HINT_T0);
  2056. // Compute combined scale for the block 2 and 3
  2057. const __m128 d_2_3 = _mm_set1_ps( GGML_FP16_TO_FP32(x[1].d) * GGML_FP16_TO_FP32(y[1].d) );
  2058. const __m128i tmp_2_3 = _mm_loadu_si128((const __m128i *)x[1].qs);
  2059. __m128i bx_2 = _mm_and_si128(lowMask, tmp_2_3);
  2060. __m128i by_2 = _mm_loadu_si128((const __m128i *)y[1].qs);
  2061. bx_2 = _mm_sub_epi8(bx_2, off);
  2062. const __m128i i32_2 = mul_sum_i8_pairs(bx_2, by_2);
  2063. __m128i bx_3 = _mm_and_si128(lowMask, _mm_srli_epi64(tmp_2_3, 4));
  2064. __m128i by_3 = _mm_loadu_si128((const __m128i *)(y[1].qs + 16));
  2065. bx_3 = _mm_sub_epi8(bx_3, off);
  2066. const __m128i i32_3 = mul_sum_i8_pairs(bx_3, by_3);
  2067. // Convert int32_t to float
  2068. __m128 p0 = _mm_cvtepi32_ps(i32_0);
  2069. __m128 p1 = _mm_cvtepi32_ps(i32_1);
  2070. __m128 p2 = _mm_cvtepi32_ps(i32_2);
  2071. __m128 p3 = _mm_cvtepi32_ps(i32_3);
  2072. // Apply the scale
  2073. acc_0 = _mm_mul_ps( d_0_1, p0 );
  2074. acc_1 = _mm_mul_ps( d_0_1, p1 );
  2075. acc_2 = _mm_mul_ps( d_2_3, p2 );
  2076. acc_3 = _mm_mul_ps( d_2_3, p3 );
  2077. }
  2078. // Main loop
  2079. for (int i = 2; i < nb; i+=2) {
  2080. _mm_prefetch(&x[i] + sizeof(block_q4_0), _MM_HINT_T0);
  2081. _mm_prefetch(&y[i] + sizeof(block_q8_0), _MM_HINT_T0);
  2082. // Compute combined scale for the block 0 and 1
  2083. const __m128 d_0_1 = _mm_set1_ps( GGML_FP16_TO_FP32(x[i].d) * GGML_FP16_TO_FP32(y[i].d) );
  2084. const __m128i tmp_0_1 = _mm_loadu_si128((const __m128i *)x[i].qs);
  2085. __m128i bx_0 = _mm_and_si128(lowMask, tmp_0_1);
  2086. __m128i by_0 = _mm_loadu_si128((const __m128i *)y[i].qs);
  2087. bx_0 = _mm_sub_epi8(bx_0, off);
  2088. const __m128i i32_0 = mul_sum_i8_pairs(bx_0, by_0);
  2089. __m128i bx_1 = _mm_and_si128(lowMask, _mm_srli_epi64(tmp_0_1, 4));
  2090. __m128i by_1 = _mm_loadu_si128((const __m128i *)(y[i].qs + 16));
  2091. bx_1 = _mm_sub_epi8(bx_1, off);
  2092. const __m128i i32_1 = mul_sum_i8_pairs(bx_1, by_1);
  2093. _mm_prefetch(&x[i] + 2 * sizeof(block_q4_0), _MM_HINT_T0);
  2094. _mm_prefetch(&y[i] + 2 * sizeof(block_q8_0), _MM_HINT_T0);
  2095. // Compute combined scale for the block 2 and 3
  2096. const __m128 d_2_3 = _mm_set1_ps( GGML_FP16_TO_FP32(x[i + 1].d) * GGML_FP16_TO_FP32(y[i + 1].d) );
  2097. const __m128i tmp_2_3 = _mm_loadu_si128((const __m128i *)x[i + 1].qs);
  2098. __m128i bx_2 = _mm_and_si128(lowMask, tmp_2_3);
  2099. __m128i by_2 = _mm_loadu_si128((const __m128i *)y[i + 1].qs);
  2100. bx_2 = _mm_sub_epi8(bx_2, off);
  2101. const __m128i i32_2 = mul_sum_i8_pairs(bx_2, by_2);
  2102. __m128i bx_3 = _mm_and_si128(lowMask, _mm_srli_epi64(tmp_2_3, 4));
  2103. __m128i by_3 = _mm_loadu_si128((const __m128i *)(y[i + 1].qs + 16));
  2104. bx_3 = _mm_sub_epi8(bx_3, off);
  2105. const __m128i i32_3 = mul_sum_i8_pairs(bx_3, by_3);
  2106. // Convert int32_t to float
  2107. __m128 p0 = _mm_cvtepi32_ps(i32_0);
  2108. __m128 p1 = _mm_cvtepi32_ps(i32_1);
  2109. __m128 p2 = _mm_cvtepi32_ps(i32_2);
  2110. __m128 p3 = _mm_cvtepi32_ps(i32_3);
  2111. // Apply the scale
  2112. __m128 p0_d = _mm_mul_ps( d_0_1, p0 );
  2113. __m128 p1_d = _mm_mul_ps( d_0_1, p1 );
  2114. __m128 p2_d = _mm_mul_ps( d_2_3, p2 );
  2115. __m128 p3_d = _mm_mul_ps( d_2_3, p3 );
  2116. // Acummulate
  2117. acc_0 = _mm_add_ps(p0_d, acc_0);
  2118. acc_1 = _mm_add_ps(p1_d, acc_1);
  2119. acc_2 = _mm_add_ps(p2_d, acc_2);
  2120. acc_3 = _mm_add_ps(p3_d, acc_3);
  2121. }
  2122. *s = hsum_float_4x4(acc_0, acc_1, acc_2, acc_3);
  2123. #else
  2124. // scalar
  2125. float sumf = 0.0;
  2126. for (int i = 0; i < nb; i++) {
  2127. int sumi = 0;
  2128. for (int j = 0; j < qk/2; ++j) {
  2129. const int v0 = (x[i].qs[j] & 0x0F) - 8;
  2130. const int v1 = (x[i].qs[j] >> 4) - 8;
  2131. sumi += (v0 * y[i].qs[j]) + (v1 * y[i].qs[j + qk/2]);
  2132. }
  2133. sumf += sumi*GGML_FP16_TO_FP32(x[i].d)*GGML_FP16_TO_FP32(y[i].d);
  2134. }
  2135. *s = sumf;
  2136. #endif
  2137. }
  2138. static void ggml_vec_dot_q4_1_q8_1(const int n, float * restrict s, const void * restrict vx, const void * restrict vy) {
  2139. const int qk = QK8_1;
  2140. const int nb = n / qk;
  2141. assert(n % qk == 0);
  2142. assert(nb % 2 == 0);
  2143. const block_q4_1 * restrict x = vx;
  2144. const block_q8_1 * restrict y = vy;
  2145. // TODO: add WASM SIMD
  2146. #if defined(__ARM_NEON)
  2147. float32x4_t sumv0 = vdupq_n_f32(0.0f);
  2148. float32x4_t sumv1 = vdupq_n_f32(0.0f);
  2149. float summs = 0;
  2150. for (int i = 0; i < nb; i += 2) {
  2151. const block_q4_1 * restrict x0 = &x[i + 0];
  2152. const block_q4_1 * restrict x1 = &x[i + 1];
  2153. const block_q8_1 * restrict y0 = &y[i + 0];
  2154. const block_q8_1 * restrict y1 = &y[i + 1];
  2155. summs += GGML_FP16_TO_FP32(x0->m) * y0->s + GGML_FP16_TO_FP32(x1->m) * y1->s;
  2156. const uint8x16_t m4b = vdupq_n_u8(0x0F);
  2157. const uint8x16_t v0_0 = vld1q_u8(x0->qs);
  2158. const uint8x16_t v0_1 = vld1q_u8(x1->qs);
  2159. // 4-bit -> 8-bit
  2160. const int8x16_t v0_0l = vreinterpretq_s8_u8(vandq_u8 (v0_0, m4b));
  2161. const int8x16_t v0_0h = vreinterpretq_s8_u8(vshrq_n_u8(v0_0, 4));
  2162. const int8x16_t v0_1l = vreinterpretq_s8_u8(vandq_u8 (v0_1, m4b));
  2163. const int8x16_t v0_1h = vreinterpretq_s8_u8(vshrq_n_u8(v0_1, 4));
  2164. // load y
  2165. const int8x16_t v1_0l = vld1q_s8(y0->qs);
  2166. const int8x16_t v1_0h = vld1q_s8(y0->qs + 16);
  2167. const int8x16_t v1_1l = vld1q_s8(y1->qs);
  2168. const int8x16_t v1_1h = vld1q_s8(y1->qs + 16);
  2169. #if defined(__ARM_FEATURE_DOTPROD)
  2170. // dot product into int32x4_t
  2171. const int32x4_t p_0 = vdotq_s32(vdotq_s32(vdupq_n_s32(0), v0_0l, v1_0l), v0_0h, v1_0h);
  2172. const int32x4_t p_1 = vdotq_s32(vdotq_s32(vdupq_n_s32(0), v0_1l, v1_1l), v0_1h, v1_1h);
  2173. sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(p_0), GGML_FP16_TO_FP32(x0->d)*y0->d);
  2174. sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(p_1), GGML_FP16_TO_FP32(x1->d)*y1->d);
  2175. #else
  2176. const int16x8_t pl0l = vmull_s8(vget_low_s8 (v0_0l), vget_low_s8 (v1_0l));
  2177. const int16x8_t pl0h = vmull_s8(vget_high_s8(v0_0l), vget_high_s8(v1_0l));
  2178. const int16x8_t ph0l = vmull_s8(vget_low_s8 (v0_0h), vget_low_s8 (v1_0h));
  2179. const int16x8_t ph0h = vmull_s8(vget_high_s8(v0_0h), vget_high_s8(v1_0h));
  2180. const int16x8_t pl1l = vmull_s8(vget_low_s8 (v0_1l), vget_low_s8 (v1_1l));
  2181. const int16x8_t pl1h = vmull_s8(vget_high_s8(v0_1l), vget_high_s8(v1_1l));
  2182. const int16x8_t ph1l = vmull_s8(vget_low_s8 (v0_1h), vget_low_s8 (v1_1h));
  2183. const int16x8_t ph1h = vmull_s8(vget_high_s8(v0_1h), vget_high_s8(v1_1h));
  2184. const int32x4_t pl0 = vaddq_s32(vpaddlq_s16(pl0l), vpaddlq_s16(pl0h));
  2185. const int32x4_t ph0 = vaddq_s32(vpaddlq_s16(ph0l), vpaddlq_s16(ph0h));
  2186. const int32x4_t pl1 = vaddq_s32(vpaddlq_s16(pl1l), vpaddlq_s16(pl1h));
  2187. const int32x4_t ph1 = vaddq_s32(vpaddlq_s16(ph1l), vpaddlq_s16(ph1h));
  2188. sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(vaddq_s32(pl0, ph0)), GGML_FP16_TO_FP32(x0->d)*y0->d);
  2189. sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(vaddq_s32(pl1, ph1)), GGML_FP16_TO_FP32(x1->d)*y1->d);
  2190. #endif
  2191. }
  2192. *s = vaddvq_f32(sumv0) + vaddvq_f32(sumv1) + summs;
  2193. #elif defined(__AVX2__) || defined(__AVX__)
  2194. // Initialize accumulator with zeros
  2195. __m256 acc = _mm256_setzero_ps();
  2196. float summs = 0;
  2197. // Main loop
  2198. for (int i = 0; i < nb; ++i) {
  2199. const float d0 = GGML_FP16_TO_FP32(x[i].d);
  2200. const float d1 = y[i].d;
  2201. summs += GGML_FP16_TO_FP32(x[i].m) * y[i].s;
  2202. const __m256 d0v = _mm256_set1_ps( d0 );
  2203. const __m256 d1v = _mm256_set1_ps( d1 );
  2204. // Compute combined scales
  2205. const __m256 d0d1 = _mm256_mul_ps( d0v, d1v );
  2206. // Load 16 bytes, and unpack 4 bit fields into bytes, making 32 bytes
  2207. const __m256i bx = bytes_from_nibbles_32(x[i].qs);
  2208. const __m256i by = _mm256_loadu_si256( (const __m256i *)y[i].qs );
  2209. const __m256 xy = mul_sum_us8_pairs_float(bx, by);
  2210. // Accumulate d0*d1*x*y
  2211. #if defined(__AVX2__)
  2212. acc = _mm256_fmadd_ps( d0d1, xy, acc );
  2213. #else
  2214. acc = _mm256_add_ps( _mm256_mul_ps( d0d1, xy ), acc );
  2215. #endif
  2216. }
  2217. *s = hsum_float_8(acc) + summs;
  2218. #else
  2219. // scalar
  2220. float sumf = 0.0;
  2221. for (int i = 0; i < nb; i++) {
  2222. int sumi = 0;
  2223. for (int j = 0; j < qk/2; ++j) {
  2224. const int v0 = (x[i].qs[j] & 0x0F);
  2225. const int v1 = (x[i].qs[j] >> 4);
  2226. sumi += (v0 * y[i].qs[j]) + (v1 * y[i].qs[j + qk/2]);
  2227. }
  2228. sumf += (GGML_FP16_TO_FP32(x[i].d)*y[i].d)*sumi + GGML_FP16_TO_FP32(x[i].m)*y[i].s;
  2229. }
  2230. *s = sumf;
  2231. #endif
  2232. }
  2233. static void ggml_vec_dot_q5_0_q8_0(const int n, float * restrict s, const void * restrict vx, const void * restrict vy) {
  2234. const int qk = QK8_0;
  2235. const int nb = n / qk;
  2236. assert(n % qk == 0);
  2237. assert(nb % 2 == 0);
  2238. assert(qk == QK5_0);
  2239. const block_q5_0 * restrict x = vx;
  2240. const block_q8_0 * restrict y = vy;
  2241. #if defined(__ARM_NEON)
  2242. float32x4_t sumv0 = vdupq_n_f32(0.0f);
  2243. float32x4_t sumv1 = vdupq_n_f32(0.0f);
  2244. uint32_t qh0;
  2245. uint32_t qh1;
  2246. uint64_t tmp0[4];
  2247. uint64_t tmp1[4];
  2248. for (int i = 0; i < nb; i += 2) {
  2249. const block_q5_0 * restrict x0 = &x[i];
  2250. const block_q5_0 * restrict x1 = &x[i + 1];
  2251. const block_q8_0 * restrict y0 = &y[i];
  2252. const block_q8_0 * restrict y1 = &y[i + 1];
  2253. const uint8x16_t m4b = vdupq_n_u8(0x0F);
  2254. // extract the 5th bit via lookup table ((!b) << 4)
  2255. memcpy(&qh0, x0->qh, sizeof(qh0));
  2256. memcpy(&qh1, x1->qh, sizeof(qh1));
  2257. tmp0[0] = table_b2b_1[(qh0 >> 0) & 0xFF];
  2258. tmp0[1] = table_b2b_1[(qh0 >> 8) & 0xFF];
  2259. tmp0[2] = table_b2b_1[(qh0 >> 16) & 0xFF];
  2260. tmp0[3] = table_b2b_1[(qh0 >> 24) ];
  2261. tmp1[0] = table_b2b_1[(qh1 >> 0) & 0xFF];
  2262. tmp1[1] = table_b2b_1[(qh1 >> 8) & 0xFF];
  2263. tmp1[2] = table_b2b_1[(qh1 >> 16) & 0xFF];
  2264. tmp1[3] = table_b2b_1[(qh1 >> 24) ];
  2265. const int8x16_t qhl0 = vld1q_s8((const int8_t *)(tmp0 + 0));
  2266. const int8x16_t qhh0 = vld1q_s8((const int8_t *)(tmp0 + 2));
  2267. const int8x16_t qhl1 = vld1q_s8((const int8_t *)(tmp1 + 0));
  2268. const int8x16_t qhh1 = vld1q_s8((const int8_t *)(tmp1 + 2));
  2269. const uint8x16_t v0_0 = vld1q_u8(x0->qs);
  2270. const uint8x16_t v0_1 = vld1q_u8(x1->qs);
  2271. // 4-bit -> 8-bit
  2272. int8x16_t v0_0l = vreinterpretq_s8_u8(vandq_u8 (v0_0, m4b));
  2273. int8x16_t v0_0h = vreinterpretq_s8_u8(vshrq_n_u8(v0_0, 4));
  2274. int8x16_t v0_1l = vreinterpretq_s8_u8(vandq_u8 (v0_1, m4b));
  2275. int8x16_t v0_1h = vreinterpretq_s8_u8(vshrq_n_u8(v0_1, 4));
  2276. // add high bit and sub 16 (equivalent to sub 0x10 when bit is zero)
  2277. const int8x16_t v0_0lf = vsubq_s8(v0_0l, qhl0);
  2278. const int8x16_t v0_0hf = vsubq_s8(v0_0h, qhh0);
  2279. const int8x16_t v0_1lf = vsubq_s8(v0_1l, qhl1);
  2280. const int8x16_t v0_1hf = vsubq_s8(v0_1h, qhh1);
  2281. // load y
  2282. const int8x16_t v1_0l = vld1q_s8(y0->qs);
  2283. const int8x16_t v1_0h = vld1q_s8(y0->qs + 16);
  2284. const int8x16_t v1_1l = vld1q_s8(y1->qs);
  2285. const int8x16_t v1_1h = vld1q_s8(y1->qs + 16);
  2286. #if defined(__ARM_FEATURE_DOTPROD)
  2287. sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(vaddq_s32(
  2288. vdotq_s32(vdupq_n_s32(0), v0_0lf, v1_0l),
  2289. vdotq_s32(vdupq_n_s32(0), v0_0hf, v1_0h))), GGML_FP16_TO_FP32(x0->d)*GGML_FP16_TO_FP32(y0->d));
  2290. sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(vaddq_s32(
  2291. vdotq_s32(vdupq_n_s32(0), v0_1lf, v1_1l),
  2292. vdotq_s32(vdupq_n_s32(0), v0_1hf, v1_1h))), GGML_FP16_TO_FP32(x1->d)*GGML_FP16_TO_FP32(y1->d));
  2293. #else
  2294. const int16x8_t pl0l = vmull_s8(vget_low_s8 (v0_0lf), vget_low_s8 (v1_0l));
  2295. const int16x8_t pl0h = vmull_s8(vget_high_s8(v0_0lf), vget_high_s8(v1_0l));
  2296. const int16x8_t ph0l = vmull_s8(vget_low_s8 (v0_0hf), vget_low_s8 (v1_0h));
  2297. const int16x8_t ph0h = vmull_s8(vget_high_s8(v0_0hf), vget_high_s8(v1_0h));
  2298. const int16x8_t pl1l = vmull_s8(vget_low_s8 (v0_1lf), vget_low_s8 (v1_1l));
  2299. const int16x8_t pl1h = vmull_s8(vget_high_s8(v0_1lf), vget_high_s8(v1_1l));
  2300. const int16x8_t ph1l = vmull_s8(vget_low_s8 (v0_1hf), vget_low_s8 (v1_1h));
  2301. const int16x8_t ph1h = vmull_s8(vget_high_s8(v0_1hf), vget_high_s8(v1_1h));
  2302. const int32x4_t pl0 = vaddq_s32(vpaddlq_s16(pl0l), vpaddlq_s16(pl0h));
  2303. const int32x4_t ph0 = vaddq_s32(vpaddlq_s16(ph0l), vpaddlq_s16(ph0h));
  2304. const int32x4_t pl1 = vaddq_s32(vpaddlq_s16(pl1l), vpaddlq_s16(pl1h));
  2305. const int32x4_t ph1 = vaddq_s32(vpaddlq_s16(ph1l), vpaddlq_s16(ph1h));
  2306. sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(vaddq_s32(pl0, ph0)), GGML_FP16_TO_FP32(x0->d)*GGML_FP16_TO_FP32(y0->d));
  2307. sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(vaddq_s32(pl1, ph1)), GGML_FP16_TO_FP32(x1->d)*GGML_FP16_TO_FP32(y1->d));
  2308. #endif
  2309. }
  2310. *s = vaddvq_f32(sumv0) + vaddvq_f32(sumv1);
  2311. #elif defined(__wasm_simd128__)
  2312. v128_t sumv = wasm_f32x4_splat(0.0f);
  2313. uint32_t qh;
  2314. uint64_t tmp[4];
  2315. // TODO: check if unrolling this is better
  2316. for (int i = 0; i < nb; ++i) {
  2317. const block_q5_0 * restrict x0 = &x[i];
  2318. const block_q8_0 * restrict y0 = &y[i];
  2319. const v128_t m4b = wasm_i8x16_splat(0x0F);
  2320. // extract the 5th bit
  2321. memcpy(&qh, x0->qh, sizeof(qh));
  2322. tmp[0] = table_b2b_1[(qh >> 0) & 0xFF];
  2323. tmp[1] = table_b2b_1[(qh >> 8) & 0xFF];
  2324. tmp[2] = table_b2b_1[(qh >> 16) & 0xFF];
  2325. tmp[3] = table_b2b_1[(qh >> 24) ];
  2326. const v128_t qhl = wasm_v128_load(tmp + 0);
  2327. const v128_t qhh = wasm_v128_load(tmp + 2);
  2328. const v128_t v0 = wasm_v128_load(x0->qs);
  2329. // 4-bit -> 8-bit
  2330. const v128_t v0l = wasm_v128_and (v0, m4b);
  2331. const v128_t v0h = wasm_u8x16_shr(v0, 4);
  2332. // add high bit and sub 16 (equivalent to sub 0x10 when bit is zero)
  2333. const v128_t v0lf = wasm_i8x16_sub(v0l, qhl);
  2334. const v128_t v0hf = wasm_i8x16_sub(v0h, qhh);
  2335. // load y
  2336. const v128_t v1l = wasm_v128_load(y0->qs);
  2337. const v128_t v1h = wasm_v128_load(y0->qs + 16);
  2338. // int8x16 -> int16x8
  2339. const v128_t v0lfl = wasm_i16x8_extend_low_i8x16 (v0lf);
  2340. const v128_t v0lfh = wasm_i16x8_extend_high_i8x16(v0lf);
  2341. const v128_t v0hfl = wasm_i16x8_extend_low_i8x16 (v0hf);
  2342. const v128_t v0hfh = wasm_i16x8_extend_high_i8x16(v0hf);
  2343. const v128_t v1ll = wasm_i16x8_extend_low_i8x16 (v1l);
  2344. const v128_t v1lh = wasm_i16x8_extend_high_i8x16(v1l);
  2345. const v128_t v1hl = wasm_i16x8_extend_low_i8x16 (v1h);
  2346. const v128_t v1hh = wasm_i16x8_extend_high_i8x16(v1h);
  2347. // dot product
  2348. sumv = wasm_f32x4_add(sumv, wasm_f32x4_mul(wasm_f32x4_convert_i32x4(
  2349. wasm_i32x4_add(
  2350. wasm_i32x4_add(wasm_i32x4_dot_i16x8(v0lfl, v1ll),
  2351. wasm_i32x4_dot_i16x8(v0lfh, v1lh)),
  2352. wasm_i32x4_add(wasm_i32x4_dot_i16x8(v0hfl, v1hl),
  2353. wasm_i32x4_dot_i16x8(v0hfh, v1hh)))),
  2354. wasm_f32x4_splat(GGML_FP16_TO_FP32(x0->d) * GGML_FP16_TO_FP32(y0->d))));
  2355. }
  2356. *s = wasm_f32x4_extract_lane(sumv, 0) + wasm_f32x4_extract_lane(sumv, 1) +
  2357. wasm_f32x4_extract_lane(sumv, 2) + wasm_f32x4_extract_lane(sumv, 3);
  2358. #elif defined(__AVX2__)
  2359. // Initialize accumulator with zeros
  2360. __m256 acc = _mm256_setzero_ps();
  2361. // Main loop
  2362. for (int i = 0; i < nb; i++) {
  2363. /* Compute combined scale for the block */
  2364. const __m256 d = _mm256_set1_ps(GGML_FP16_TO_FP32(x[i].d) * GGML_FP16_TO_FP32(y[i].d));
  2365. __m256i bx = bytes_from_nibbles_32(x[i].qs);
  2366. __m256i bxhi = bytes_from_bits_32(x[i].qh);
  2367. bxhi = _mm256_andnot_si256(bxhi, _mm256_set1_epi8((char)0xF0));
  2368. bx = _mm256_or_si256(bx, bxhi);
  2369. __m256i by = _mm256_loadu_si256((const __m256i *)y[i].qs);
  2370. const __m256 q = mul_sum_i8_pairs_float(bx, by);
  2371. /* Multiply q with scale and accumulate */
  2372. acc = _mm256_fmadd_ps(d, q, acc);
  2373. }
  2374. *s = hsum_float_8(acc);
  2375. #elif defined(__AVX__)
  2376. // Initialize accumulator with zeros
  2377. __m256 acc = _mm256_setzero_ps();
  2378. __m128i mask = _mm_set1_epi8((char)0xF0);
  2379. // Main loop
  2380. for (int i = 0; i < nb; i++) {
  2381. /* Compute combined scale for the block */
  2382. const __m256 d = _mm256_set1_ps(GGML_FP16_TO_FP32(x[i].d) * GGML_FP16_TO_FP32(y[i].d));
  2383. __m256i bx = bytes_from_nibbles_32(x[i].qs);
  2384. const __m256i bxhi = bytes_from_bits_32(x[i].qh);
  2385. __m128i bxhil = _mm256_castsi256_si128(bxhi);
  2386. __m128i bxhih = _mm256_extractf128_si256(bxhi, 1);
  2387. bxhil = _mm_andnot_si128(bxhil, mask);
  2388. bxhih = _mm_andnot_si128(bxhih, mask);
  2389. __m128i bxl = _mm256_castsi256_si128(bx);
  2390. __m128i bxh = _mm256_extractf128_si256(bx, 1);
  2391. bxl = _mm_or_si128(bxl, bxhil);
  2392. bxh = _mm_or_si128(bxh, bxhih);
  2393. bx = MM256_SET_M128I(bxh, bxl);
  2394. const __m256i by = _mm256_loadu_si256((const __m256i *)y[i].qs);
  2395. const __m256 q = mul_sum_i8_pairs_float(bx, by);
  2396. /* Multiply q with scale and accumulate */
  2397. acc = _mm256_add_ps(_mm256_mul_ps(d, q), acc);
  2398. }
  2399. *s = hsum_float_8(acc);
  2400. #else
  2401. // scalar
  2402. float sumf = 0.0;
  2403. for (int i = 0; i < nb; i++) {
  2404. uint32_t qh;
  2405. memcpy(&qh, x[i].qh, sizeof(qh));
  2406. int sumi = 0;
  2407. for (int j = 0; j < qk/2; ++j) {
  2408. const uint8_t xh_0 = ((qh & (1u << (j + 0 ))) >> (j + 0 )) << 4;
  2409. const uint8_t xh_1 = ((qh & (1u << (j + 16))) >> (j + 12));
  2410. const int32_t x0 = ((x[i].qs[j] & 0x0F) | xh_0) - 16;
  2411. const int32_t x1 = ((x[i].qs[j] >> 4) | xh_1) - 16;
  2412. sumi += (x0 * y[i].qs[j]) + (x1 * y[i].qs[j + qk/2]);
  2413. }
  2414. sumf += (GGML_FP16_TO_FP32(x[i].d)*GGML_FP16_TO_FP32(y[i].d)) * sumi;
  2415. }
  2416. *s = sumf;
  2417. #endif
  2418. }
  2419. static void ggml_vec_dot_q5_1_q8_1(const int n, float * restrict s, const void * restrict vx, const void * restrict vy) {
  2420. const int qk = QK8_1;
  2421. const int nb = n / qk;
  2422. assert(n % qk == 0);
  2423. assert(nb % 2 == 0);
  2424. assert(qk == QK5_1);
  2425. const block_q5_1 * restrict x = vx;
  2426. const block_q8_1 * restrict y = vy;
  2427. #if defined(__ARM_NEON)
  2428. float32x4_t sumv0 = vdupq_n_f32(0.0f);
  2429. float32x4_t sumv1 = vdupq_n_f32(0.0f);
  2430. float summs0 = 0.0f;
  2431. float summs1 = 0.0f;
  2432. uint32_t qh0;
  2433. uint32_t qh1;
  2434. uint64_t tmp0[4];
  2435. uint64_t tmp1[4];
  2436. for (int i = 0; i < nb; i += 2) {
  2437. const block_q5_1 * restrict x0 = &x[i];
  2438. const block_q5_1 * restrict x1 = &x[i + 1];
  2439. const block_q8_1 * restrict y0 = &y[i];
  2440. const block_q8_1 * restrict y1 = &y[i + 1];
  2441. const uint8x16_t m4b = vdupq_n_u8(0x0F);
  2442. summs0 += GGML_FP16_TO_FP32(x0->m) * y0->s;
  2443. summs1 += GGML_FP16_TO_FP32(x1->m) * y1->s;
  2444. // extract the 5th bit via lookup table ((b) << 4)
  2445. memcpy(&qh0, x0->qh, sizeof(qh0));
  2446. memcpy(&qh1, x1->qh, sizeof(qh1));
  2447. tmp0[0] = table_b2b_0[(qh0 >> 0) & 0xFF];
  2448. tmp0[1] = table_b2b_0[(qh0 >> 8) & 0xFF];
  2449. tmp0[2] = table_b2b_0[(qh0 >> 16) & 0xFF];
  2450. tmp0[3] = table_b2b_0[(qh0 >> 24) ];
  2451. tmp1[0] = table_b2b_0[(qh1 >> 0) & 0xFF];
  2452. tmp1[1] = table_b2b_0[(qh1 >> 8) & 0xFF];
  2453. tmp1[2] = table_b2b_0[(qh1 >> 16) & 0xFF];
  2454. tmp1[3] = table_b2b_0[(qh1 >> 24) ];
  2455. const int8x16_t qhl0 = vld1q_s8((const int8_t *)(tmp0 + 0));
  2456. const int8x16_t qhh0 = vld1q_s8((const int8_t *)(tmp0 + 2));
  2457. const int8x16_t qhl1 = vld1q_s8((const int8_t *)(tmp1 + 0));
  2458. const int8x16_t qhh1 = vld1q_s8((const int8_t *)(tmp1 + 2));
  2459. const uint8x16_t v0_0 = vld1q_u8(x0->qs);
  2460. const uint8x16_t v0_1 = vld1q_u8(x1->qs);
  2461. // 4-bit -> 8-bit
  2462. const int8x16_t v0_0l = vreinterpretq_s8_u8(vandq_u8 (v0_0, m4b));
  2463. const int8x16_t v0_0h = vreinterpretq_s8_u8(vshrq_n_u8(v0_0, 4));
  2464. const int8x16_t v0_1l = vreinterpretq_s8_u8(vandq_u8 (v0_1, m4b));
  2465. const int8x16_t v0_1h = vreinterpretq_s8_u8(vshrq_n_u8(v0_1, 4));
  2466. // add high bit
  2467. const int8x16_t v0_0lf = vorrq_s8(v0_0l, qhl0);
  2468. const int8x16_t v0_0hf = vorrq_s8(v0_0h, qhh0);
  2469. const int8x16_t v0_1lf = vorrq_s8(v0_1l, qhl1);
  2470. const int8x16_t v0_1hf = vorrq_s8(v0_1h, qhh1);
  2471. // load y
  2472. const int8x16_t v1_0l = vld1q_s8(y0->qs);
  2473. const int8x16_t v1_0h = vld1q_s8(y0->qs + 16);
  2474. const int8x16_t v1_1l = vld1q_s8(y1->qs);
  2475. const int8x16_t v1_1h = vld1q_s8(y1->qs + 16);
  2476. #if defined(__ARM_FEATURE_DOTPROD)
  2477. sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(vaddq_s32(
  2478. vdotq_s32(vdupq_n_s32(0), v0_0lf, v1_0l),
  2479. vdotq_s32(vdupq_n_s32(0), v0_0hf, v1_0h))), GGML_FP16_TO_FP32(x0->d)*y0->d);
  2480. sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(vaddq_s32(
  2481. vdotq_s32(vdupq_n_s32(0), v0_1lf, v1_1l),
  2482. vdotq_s32(vdupq_n_s32(0), v0_1hf, v1_1h))), GGML_FP16_TO_FP32(x1->d)*y1->d);
  2483. #else
  2484. const int16x8_t pl0l = vmull_s8(vget_low_s8 (v0_0lf), vget_low_s8 (v1_0l));
  2485. const int16x8_t pl0h = vmull_s8(vget_high_s8(v0_0lf), vget_high_s8(v1_0l));
  2486. const int16x8_t ph0l = vmull_s8(vget_low_s8 (v0_0hf), vget_low_s8 (v1_0h));
  2487. const int16x8_t ph0h = vmull_s8(vget_high_s8(v0_0hf), vget_high_s8(v1_0h));
  2488. const int16x8_t pl1l = vmull_s8(vget_low_s8 (v0_1lf), vget_low_s8 (v1_1l));
  2489. const int16x8_t pl1h = vmull_s8(vget_high_s8(v0_1lf), vget_high_s8(v1_1l));
  2490. const int16x8_t ph1l = vmull_s8(vget_low_s8 (v0_1hf), vget_low_s8 (v1_1h));
  2491. const int16x8_t ph1h = vmull_s8(vget_high_s8(v0_1hf), vget_high_s8(v1_1h));
  2492. const int32x4_t pl0 = vaddq_s32(vpaddlq_s16(pl0l), vpaddlq_s16(pl0h));
  2493. const int32x4_t ph0 = vaddq_s32(vpaddlq_s16(ph0l), vpaddlq_s16(ph0h));
  2494. const int32x4_t pl1 = vaddq_s32(vpaddlq_s16(pl1l), vpaddlq_s16(pl1h));
  2495. const int32x4_t ph1 = vaddq_s32(vpaddlq_s16(ph1l), vpaddlq_s16(ph1h));
  2496. sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(vaddq_s32(pl0, ph0)), GGML_FP16_TO_FP32(x0->d)*y0->d);
  2497. sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(vaddq_s32(pl1, ph1)), GGML_FP16_TO_FP32(x1->d)*y1->d);
  2498. #endif
  2499. }
  2500. *s = vaddvq_f32(sumv0) + vaddvq_f32(sumv1) + summs0 + summs1;
  2501. #elif defined(__wasm_simd128__)
  2502. v128_t sumv = wasm_f32x4_splat(0.0f);
  2503. float summs = 0.0f;
  2504. uint32_t qh;
  2505. uint64_t tmp[4];
  2506. // TODO: check if unrolling this is better
  2507. for (int i = 0; i < nb; ++i) {
  2508. const block_q5_1 * restrict x0 = &x[i];
  2509. const block_q8_1 * restrict y0 = &y[i];
  2510. summs += GGML_FP16_TO_FP32(x0->m) * y0->s;
  2511. const v128_t m4b = wasm_i8x16_splat(0x0F);
  2512. // extract the 5th bit
  2513. memcpy(&qh, x0->qh, sizeof(qh));
  2514. tmp[0] = table_b2b_0[(qh >> 0) & 0xFF];
  2515. tmp[1] = table_b2b_0[(qh >> 8) & 0xFF];
  2516. tmp[2] = table_b2b_0[(qh >> 16) & 0xFF];
  2517. tmp[3] = table_b2b_0[(qh >> 24) ];
  2518. const v128_t qhl = wasm_v128_load(tmp + 0);
  2519. const v128_t qhh = wasm_v128_load(tmp + 2);
  2520. const v128_t v0 = wasm_v128_load(x0->qs);
  2521. // 4-bit -> 8-bit
  2522. const v128_t v0l = wasm_v128_and (v0, m4b);
  2523. const v128_t v0h = wasm_u8x16_shr(v0, 4);
  2524. // add high bit
  2525. const v128_t v0lf = wasm_v128_or(v0l, qhl);
  2526. const v128_t v0hf = wasm_v128_or(v0h, qhh);
  2527. // load y
  2528. const v128_t v1l = wasm_v128_load(y0->qs);
  2529. const v128_t v1h = wasm_v128_load(y0->qs + 16);
  2530. // int8x16 -> int16x8
  2531. const v128_t v0lfl = wasm_i16x8_extend_low_i8x16 (v0lf);
  2532. const v128_t v0lfh = wasm_i16x8_extend_high_i8x16(v0lf);
  2533. const v128_t v0hfl = wasm_i16x8_extend_low_i8x16 (v0hf);
  2534. const v128_t v0hfh = wasm_i16x8_extend_high_i8x16(v0hf);
  2535. const v128_t v1ll = wasm_i16x8_extend_low_i8x16 (v1l);
  2536. const v128_t v1lh = wasm_i16x8_extend_high_i8x16(v1l);
  2537. const v128_t v1hl = wasm_i16x8_extend_low_i8x16 (v1h);
  2538. const v128_t v1hh = wasm_i16x8_extend_high_i8x16(v1h);
  2539. // dot product
  2540. sumv = wasm_f32x4_add(sumv,
  2541. wasm_f32x4_mul(wasm_f32x4_convert_i32x4(wasm_i32x4_add(
  2542. wasm_i32x4_add(wasm_i32x4_dot_i16x8(v0lfl, v1ll),
  2543. wasm_i32x4_dot_i16x8(v0lfh, v1lh)),
  2544. wasm_i32x4_add(wasm_i32x4_dot_i16x8(v0hfl, v1hl),
  2545. wasm_i32x4_dot_i16x8(v0hfh, v1hh)))),
  2546. wasm_f32x4_splat(GGML_FP16_TO_FP32(x0->d) * y0->d)));
  2547. }
  2548. *s = wasm_f32x4_extract_lane(sumv, 0) + wasm_f32x4_extract_lane(sumv, 1) +
  2549. wasm_f32x4_extract_lane(sumv, 2) + wasm_f32x4_extract_lane(sumv, 3) + summs;
  2550. #elif defined(__AVX2__)
  2551. // Initialize accumulator with zeros
  2552. __m256 acc = _mm256_setzero_ps();
  2553. float summs = 0.0f;
  2554. // Main loop
  2555. for (int i = 0; i < nb; i++) {
  2556. const __m256 dx = _mm256_set1_ps(GGML_FP16_TO_FP32(x[i].d));
  2557. summs += GGML_FP16_TO_FP32(x[i].m) * y[i].s;
  2558. __m256i bx = bytes_from_nibbles_32(x[i].qs);
  2559. __m256i bxhi = bytes_from_bits_32(x[i].qh);
  2560. bxhi = _mm256_and_si256(bxhi, _mm256_set1_epi8(0x10));
  2561. bx = _mm256_or_si256(bx, bxhi);
  2562. const __m256 dy = _mm256_set1_ps(y[i].d);
  2563. const __m256i by = _mm256_loadu_si256((const __m256i *)y[i].qs);
  2564. const __m256 q = mul_sum_us8_pairs_float(bx, by);
  2565. acc = _mm256_fmadd_ps(q, _mm256_mul_ps(dx, dy), acc);
  2566. }
  2567. *s = hsum_float_8(acc) + summs;
  2568. #elif defined(__AVX__)
  2569. // Initialize accumulator with zeros
  2570. __m256 acc = _mm256_setzero_ps();
  2571. __m128i mask = _mm_set1_epi8(0x10);
  2572. float summs = 0.0f;
  2573. // Main loop
  2574. for (int i = 0; i < nb; i++) {
  2575. const __m256 dx = _mm256_set1_ps(GGML_FP16_TO_FP32(x[i].d));
  2576. summs += GGML_FP16_TO_FP32(x[i].m) * y[i].s;
  2577. __m256i bx = bytes_from_nibbles_32(x[i].qs);
  2578. const __m256i bxhi = bytes_from_bits_32(x[i].qh);
  2579. __m128i bxhil = _mm256_castsi256_si128(bxhi);
  2580. __m128i bxhih = _mm256_extractf128_si256(bxhi, 1);
  2581. bxhil = _mm_and_si128(bxhil, mask);
  2582. bxhih = _mm_and_si128(bxhih, mask);
  2583. __m128i bxl = _mm256_castsi256_si128(bx);
  2584. __m128i bxh = _mm256_extractf128_si256(bx, 1);
  2585. bxl = _mm_or_si128(bxl, bxhil);
  2586. bxh = _mm_or_si128(bxh, bxhih);
  2587. bx = MM256_SET_M128I(bxh, bxl);
  2588. const __m256 dy = _mm256_set1_ps(y[i].d);
  2589. const __m256i by = _mm256_loadu_si256((const __m256i *)y[i].qs);
  2590. const __m256 q = mul_sum_us8_pairs_float(bx, by);
  2591. acc = _mm256_add_ps(_mm256_mul_ps(q, _mm256_mul_ps(dx, dy)), acc);
  2592. }
  2593. *s = hsum_float_8(acc) + summs;
  2594. #else
  2595. // scalar
  2596. float sumf = 0.0;
  2597. for (int i = 0; i < nb; i++) {
  2598. uint32_t qh;
  2599. memcpy(&qh, x[i].qh, sizeof(qh));
  2600. int sumi = 0;
  2601. for (int j = 0; j < qk/2; ++j) {
  2602. const uint8_t xh_0 = ((qh >> (j + 0)) << 4) & 0x10;
  2603. const uint8_t xh_1 = ((qh >> (j + 12)) ) & 0x10;
  2604. const int32_t x0 = (x[i].qs[j] & 0xF) | xh_0;
  2605. const int32_t x1 = (x[i].qs[j] >> 4) | xh_1;
  2606. sumi += (x0 * y[i].qs[j]) + (x1 * y[i].qs[j + qk/2]);
  2607. }
  2608. sumf += (GGML_FP16_TO_FP32(x[i].d)*y[i].d)*sumi + GGML_FP16_TO_FP32(x[i].m)*y[i].s;
  2609. }
  2610. *s = sumf;
  2611. #endif
  2612. }
  2613. static void ggml_vec_dot_q8_0_q8_0(const int n, float * restrict s, const void * restrict vx, const void * restrict vy) {
  2614. const int qk = QK8_0;
  2615. const int nb = n / qk;
  2616. assert(n % qk == 0);
  2617. assert(nb % 2 == 0);
  2618. const block_q8_0 * restrict x = vx;
  2619. const block_q8_0 * restrict y = vy;
  2620. #if defined(__ARM_NEON)
  2621. float32x4_t sumv0 = vdupq_n_f32(0.0f);
  2622. float32x4_t sumv1 = vdupq_n_f32(0.0f);
  2623. for (int i = 0; i < nb; i += 2) {
  2624. const block_q8_0 * restrict x0 = &x[i + 0];
  2625. const block_q8_0 * restrict x1 = &x[i + 1];
  2626. const block_q8_0 * restrict y0 = &y[i + 0];
  2627. const block_q8_0 * restrict y1 = &y[i + 1];
  2628. const int8x16_t x0_0 = vld1q_s8(x0->qs);
  2629. const int8x16_t x0_1 = vld1q_s8(x0->qs + 16);
  2630. const int8x16_t x1_0 = vld1q_s8(x1->qs);
  2631. const int8x16_t x1_1 = vld1q_s8(x1->qs + 16);
  2632. // load y
  2633. const int8x16_t y0_0 = vld1q_s8(y0->qs);
  2634. const int8x16_t y0_1 = vld1q_s8(y0->qs + 16);
  2635. const int8x16_t y1_0 = vld1q_s8(y1->qs);
  2636. const int8x16_t y1_1 = vld1q_s8(y1->qs + 16);
  2637. #if defined(__ARM_FEATURE_DOTPROD)
  2638. sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(vaddq_s32(
  2639. vdotq_s32(vdupq_n_s32(0), x0_0, y0_0),
  2640. vdotq_s32(vdupq_n_s32(0), x0_1, y0_1))), GGML_FP16_TO_FP32(x0->d)*GGML_FP16_TO_FP32(y0->d));
  2641. sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(vaddq_s32(
  2642. vdotq_s32(vdupq_n_s32(0), x1_0, y1_0),
  2643. vdotq_s32(vdupq_n_s32(0), x1_1, y1_1))), GGML_FP16_TO_FP32(x1->d)*GGML_FP16_TO_FP32(y1->d));
  2644. #else
  2645. const int16x8_t p0_0 = vmull_s8(vget_low_s8 (x0_0), vget_low_s8 (y0_0));
  2646. const int16x8_t p0_1 = vmull_s8(vget_high_s8(x0_0), vget_high_s8(y0_0));
  2647. const int16x8_t p0_2 = vmull_s8(vget_low_s8 (x0_1), vget_low_s8 (y0_1));
  2648. const int16x8_t p0_3 = vmull_s8(vget_high_s8(x0_1), vget_high_s8(y0_1));
  2649. const int16x8_t p1_0 = vmull_s8(vget_low_s8 (x1_0), vget_low_s8 (y1_0));
  2650. const int16x8_t p1_1 = vmull_s8(vget_high_s8(x1_0), vget_high_s8(y1_0));
  2651. const int16x8_t p1_2 = vmull_s8(vget_low_s8 (x1_1), vget_low_s8 (y1_1));
  2652. const int16x8_t p1_3 = vmull_s8(vget_high_s8(x1_1), vget_high_s8(y1_1));
  2653. const int32x4_t p0 = vaddq_s32(vpaddlq_s16(p0_0), vpaddlq_s16(p0_1));
  2654. const int32x4_t p1 = vaddq_s32(vpaddlq_s16(p0_2), vpaddlq_s16(p0_3));
  2655. const int32x4_t p2 = vaddq_s32(vpaddlq_s16(p1_0), vpaddlq_s16(p1_1));
  2656. const int32x4_t p3 = vaddq_s32(vpaddlq_s16(p1_2), vpaddlq_s16(p1_3));
  2657. sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(vaddq_s32(p0, p1)), GGML_FP16_TO_FP32(x0->d)*GGML_FP16_TO_FP32(y0->d));
  2658. sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(vaddq_s32(p2, p3)), GGML_FP16_TO_FP32(x1->d)*GGML_FP16_TO_FP32(y1->d));
  2659. #endif
  2660. }
  2661. *s = vaddvq_f32(sumv0) + vaddvq_f32(sumv1);
  2662. #elif defined(__AVX2__) || defined(__AVX__)
  2663. // Initialize accumulator with zeros
  2664. __m256 acc = _mm256_setzero_ps();
  2665. // Main loop
  2666. for (int i = 0; i < nb; ++i) {
  2667. // Compute combined scale for the block
  2668. const __m256 d = _mm256_set1_ps(GGML_FP16_TO_FP32(x[i].d) * GGML_FP16_TO_FP32(y[i].d));
  2669. __m256i bx = _mm256_loadu_si256((const __m256i *)x[i].qs);
  2670. __m256i by = _mm256_loadu_si256((const __m256i *)y[i].qs);
  2671. const __m256 q = mul_sum_i8_pairs_float(bx, by);
  2672. // Multiply q with scale and accumulate
  2673. #if defined(__AVX2__)
  2674. acc = _mm256_fmadd_ps( d, q, acc );
  2675. #else
  2676. acc = _mm256_add_ps( _mm256_mul_ps( d, q ), acc );
  2677. #endif
  2678. }
  2679. *s = hsum_float_8(acc);
  2680. #else
  2681. // scalar
  2682. float sumf = 0.0;
  2683. for (int i = 0; i < nb; i++) {
  2684. int sumi = 0;
  2685. for (int j = 0; j < qk; j++) {
  2686. sumi += x[i].qs[j]*y[i].qs[j];
  2687. }
  2688. sumf += sumi*(GGML_FP16_TO_FP32(x[i].d)*GGML_FP16_TO_FP32(y[i].d));
  2689. }
  2690. *s = sumf;
  2691. #endif
  2692. }
  2693. // compute GGML_VEC_DOT_UNROLL dot products at once
  2694. // xs - x row stride in bytes
  2695. inline static void ggml_vec_dot_f16_unroll(const int n, const int xs, float * restrict s, void * restrict xv, ggml_fp16_t * restrict y) {
  2696. ggml_float sumf[GGML_VEC_DOT_UNROLL] = { 0.0 };
  2697. ggml_fp16_t * restrict x[GGML_VEC_DOT_UNROLL];
  2698. for (int i = 0; i < GGML_VEC_DOT_UNROLL; ++i) {
  2699. x[i] = (ggml_fp16_t *) ((char *) xv + i*xs);
  2700. }
  2701. #if defined(GGML_SIMD)
  2702. const int np = (n & ~(GGML_F16_STEP - 1));
  2703. GGML_F16_VEC sum[GGML_VEC_DOT_UNROLL][GGML_F16_ARR] = { { GGML_F16_VEC_ZERO } };
  2704. GGML_F16_VEC ax[GGML_F16_ARR];
  2705. GGML_F16_VEC ay[GGML_F16_ARR];
  2706. for (int i = 0; i < np; i += GGML_F16_STEP) {
  2707. for (int j = 0; j < GGML_F16_ARR; j++) {
  2708. ay[j] = GGML_F16_VEC_LOAD(y + i + j*GGML_F16_EPR, j);
  2709. for (int k = 0; k < GGML_VEC_DOT_UNROLL; ++k) {
  2710. ax[j] = GGML_F16_VEC_LOAD(x[k] + i + j*GGML_F16_EPR, j);
  2711. sum[k][j] = GGML_F16_VEC_FMA(sum[k][j], ax[j], ay[j]);
  2712. }
  2713. }
  2714. }
  2715. // reduce sum0..sum3 to sum0
  2716. for (int k = 0; k < GGML_VEC_DOT_UNROLL; ++k) {
  2717. GGML_F16_VEC_REDUCE(sumf[k], sum[k]);
  2718. }
  2719. // leftovers
  2720. for (int i = np; i < n; ++i) {
  2721. for (int j = 0; j < GGML_VEC_DOT_UNROLL; ++j) {
  2722. sumf[j] += (ggml_float)(GGML_FP16_TO_FP32(x[j][i])*GGML_FP16_TO_FP32(y[i]));
  2723. }
  2724. }
  2725. #else
  2726. for (int i = 0; i < n; ++i) {
  2727. for (int j = 0; j < GGML_VEC_DOT_UNROLL; ++j) {
  2728. sumf[j] += (ggml_float)(GGML_FP16_TO_FP32(x[j][i])*GGML_FP16_TO_FP32(y[i]));
  2729. }
  2730. }
  2731. #endif
  2732. for (int i = 0; i < GGML_VEC_DOT_UNROLL; ++i) {
  2733. s[i] = sumf[i];
  2734. }
  2735. }
  2736. inline static void ggml_vec_mad_f32(const int n, float * restrict y, const float * restrict x, const float v) {
  2737. #if defined(GGML_SIMD)
  2738. const int np = (n & ~(GGML_F32_STEP - 1));
  2739. GGML_F32_VEC vx = GGML_F32_VEC_SET1(v);
  2740. GGML_F32_VEC ax[GGML_F32_ARR];
  2741. GGML_F32_VEC ay[GGML_F32_ARR];
  2742. for (int i = 0; i < np; i += GGML_F32_STEP) {
  2743. for (int j = 0; j < GGML_F32_ARR; j++) {
  2744. ax[j] = GGML_F32_VEC_LOAD(x + i + j*GGML_F32_EPR);
  2745. ay[j] = GGML_F32_VEC_LOAD(y + i + j*GGML_F32_EPR);
  2746. ay[j] = GGML_F32_VEC_FMA(ay[j], ax[j], vx);
  2747. GGML_F32_VEC_STORE(y + i + j*GGML_F32_EPR, ay[j]);
  2748. }
  2749. }
  2750. // leftovers
  2751. for (int i = np; i < n; ++i) {
  2752. y[i] += x[i]*v;
  2753. }
  2754. #else
  2755. // scalar
  2756. for (int i = 0; i < n; ++i) {
  2757. y[i] += x[i]*v;
  2758. }
  2759. #endif
  2760. }
  2761. //inline static void ggml_vec_scale_f32(const int n, float * y, const float v) { for (int i = 0; i < n; ++i) y[i] *= v; }
  2762. inline static void ggml_vec_scale_f32(const int n, float * y, const float v) {
  2763. #if defined(GGML_SIMD)
  2764. const int np = (n & ~(GGML_F32_STEP - 1));
  2765. GGML_F32_VEC vx = GGML_F32_VEC_SET1(v);
  2766. GGML_F32_VEC ay[GGML_F32_ARR];
  2767. for (int i = 0; i < np; i += GGML_F32_STEP) {
  2768. for (int j = 0; j < GGML_F32_ARR; j++) {
  2769. ay[j] = GGML_F32_VEC_LOAD(y + i + j*GGML_F32_EPR);
  2770. ay[j] = GGML_F32_VEC_MUL(ay[j], vx);
  2771. GGML_F32_VEC_STORE(y + i + j*GGML_F32_EPR, ay[j]);
  2772. }
  2773. }
  2774. // leftovers
  2775. for (int i = np; i < n; ++i) {
  2776. y[i] *= v;
  2777. }
  2778. #else
  2779. // scalar
  2780. for (int i = 0; i < n; ++i) {
  2781. y[i] *= v;
  2782. }
  2783. #endif
  2784. }
  2785. inline static void ggml_vec_norm_f32 (const int n, float * s, const float * x) { ggml_vec_dot_f32(n, s, x, x); *s = sqrtf(*s); }
  2786. inline static void ggml_vec_sqr_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = x[i]*x[i]; }
  2787. inline static void ggml_vec_sqrt_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = sqrtf(x[i]); }
  2788. inline static void ggml_vec_log_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = logf(x[i]); }
  2789. inline static void ggml_vec_abs_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = fabsf(x[i]); }
  2790. inline static void ggml_vec_sgn_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = (x[i] > 0.f) ? 1.f : ((x[i] < 0.f) ? -1.f : 0.f); }
  2791. inline static void ggml_vec_step_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = (x[i] > 0.f) ? 1.f : 0.f; }
  2792. inline static void ggml_vec_relu_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = (x[i] > 0.f) ? x[i] : 0.f; }
  2793. static const float GELU_COEF_A = 0.044715f;
  2794. static const float GELU_QUICK_COEF = -1.702f;
  2795. static const float SQRT_2_OVER_PI = 0.79788456080286535587989211986876f;
  2796. inline static float ggml_gelu_f32(float x) {
  2797. return 0.5f*x*(1.0f + tanhf(SQRT_2_OVER_PI*x*(1.0f + GELU_COEF_A*x*x)));
  2798. }
  2799. inline static void ggml_vec_gelu_f16(const int n, ggml_fp16_t * y, const ggml_fp16_t * x) {
  2800. const uint16_t * i16 = (const uint16_t *) x;
  2801. for (int i = 0; i < n; ++i) {
  2802. y[i] = table_gelu_f16[i16[i]];
  2803. }
  2804. }
  2805. #ifdef GGML_GELU_FP16
  2806. inline static void ggml_vec_gelu_f32(const int n, float * y, const float * x) {
  2807. uint16_t t;
  2808. for (int i = 0; i < n; ++i) {
  2809. ggml_fp16_t fp16 = GGML_FP32_TO_FP16(x[i]);
  2810. memcpy(&t, &fp16, sizeof(uint16_t));
  2811. y[i] = GGML_FP16_TO_FP32(table_gelu_f16[t]);
  2812. }
  2813. }
  2814. #else
  2815. inline static void ggml_vec_gelu_f32(const int n, float * y, const float * x) {
  2816. for (int i = 0; i < n; ++i) {
  2817. y[i] = ggml_gelu_f32(x[i]);
  2818. }
  2819. }
  2820. #endif
  2821. inline static float ggml_gelu_quick_f32(float x) {
  2822. return x*(1.0f/(1.0f+expf(GELU_QUICK_COEF*x)));
  2823. }
  2824. //inline static void ggml_vec_gelu_quick_f16(const int n, ggml_fp16_t * y, const ggml_fp16_t * x) {
  2825. // const uint16_t * i16 = (const uint16_t *) x;
  2826. // for (int i = 0; i < n; ++i) {
  2827. // y[i] = table_gelu_quick_f16[i16[i]];
  2828. // }
  2829. //}
  2830. #ifdef GGML_GELU_QUICK_FP16
  2831. inline static void ggml_vec_gelu_quick_f32(const int n, float * y, const float * x) {
  2832. uint16_t t;
  2833. for (int i = 0; i < n; ++i) {
  2834. ggml_fp16_t fp16 = GGML_FP32_TO_FP16(x[i]);
  2835. memcpy(&t, &fp16, sizeof(uint16_t));
  2836. y[i] = GGML_FP16_TO_FP32(table_gelu_quick_f16[t]);
  2837. }
  2838. }
  2839. #else
  2840. inline static void ggml_vec_gelu_quick_f32(const int n, float * y, const float * x) {
  2841. for (int i = 0; i < n; ++i) {
  2842. y[i] = ggml_gelu_quick_f32(x[i]);
  2843. }
  2844. }
  2845. #endif
  2846. // Sigmoid Linear Unit (SiLU) function
  2847. inline static float ggml_silu_f32(float x) {
  2848. return x/(1.0f + expf(-x));
  2849. }
  2850. //inline static void ggml_vec_silu_f16(const int n, ggml_fp16_t * y, const ggml_fp16_t * x) {
  2851. // const uint16_t * i16 = (const uint16_t *) x;
  2852. // for (int i = 0; i < n; ++i) {
  2853. // y[i] = table_silu_f16[i16[i]];
  2854. // }
  2855. //}
  2856. #ifdef GGML_SILU_FP16
  2857. inline static void ggml_vec_silu_f32(const int n, float * y, const float * x) {
  2858. uint16_t t;
  2859. for (int i = 0; i < n; ++i) {
  2860. ggml_fp16_t fp16 = GGML_FP32_TO_FP16(x[i]);
  2861. memcpy(&t, &fp16, sizeof(uint16_t));
  2862. y[i] = GGML_FP16_TO_FP32(table_silu_f16[t]);
  2863. }
  2864. }
  2865. #else
  2866. inline static void ggml_vec_silu_f32(const int n, float * y, const float * x) {
  2867. for (int i = 0; i < n; ++i) {
  2868. y[i] = ggml_silu_f32(x[i]);
  2869. }
  2870. }
  2871. #endif
  2872. inline static float ggml_silu_backward_f32(float x, float dy) {
  2873. const float s = 1.0f/(1.0f + expf(-x));
  2874. return dy*s*(1.0f + x*(1.0f - s));
  2875. }
  2876. #ifdef GGML_SILU_FP16
  2877. inline static void ggml_vec_silu_backward_f32(const int n, float * dx, const float * x, const float * dy) {
  2878. for (int i = 0; i < n; ++i) {
  2879. // we did not use x[i] to compute forward silu but its f16 equivalent
  2880. // take derivative at f16 of x[i]:
  2881. ggml_fp16_t fp16 = GGML_FP32_TO_FP16(x[i]);
  2882. float usedx = GGML_FP16_TO_FP32(fp16);
  2883. dx[i] = ggml_silu_backward_f32(usedx, dy[i]);
  2884. }
  2885. }
  2886. #else
  2887. inline static void ggml_vec_silu_backward_f32(const int n, float * dx, const float * x, const float * dy) {
  2888. for (int i = 0; i < n; ++i) {
  2889. dx[i] = ggml_silu_backward_f32(x[i], dy[i]);
  2890. }
  2891. }
  2892. #endif
  2893. inline static void ggml_vec_sum_f32(const int n, float * s, const float * x) {
  2894. #ifndef GGML_USE_ACCELERATE
  2895. ggml_float sum = 0.0;
  2896. for (int i = 0; i < n; ++i) {
  2897. sum += (ggml_float)x[i];
  2898. }
  2899. *s = sum;
  2900. #else
  2901. vDSP_sve(x, 1, s, n);
  2902. #endif
  2903. }
  2904. inline static void ggml_vec_sum_ggf(const int n, ggml_float * s, const float * x) {
  2905. ggml_float sum = 0.0;
  2906. for (int i = 0; i < n; ++i) {
  2907. sum += (ggml_float)x[i];
  2908. }
  2909. *s = sum;
  2910. }
  2911. inline static void ggml_vec_max_f32(const int n, float * s, const float * x) {
  2912. #ifndef GGML_USE_ACCELERATE
  2913. float max = -INFINITY;
  2914. for (int i = 0; i < n; ++i) {
  2915. max = MAX(max, x[i]);
  2916. }
  2917. *s = max;
  2918. #else
  2919. vDSP_maxv(x, 1, s, n);
  2920. #endif
  2921. }
  2922. inline static void ggml_vec_norm_inv_f32(const int n, float * s, const float * x) {
  2923. ggml_vec_norm_f32(n, s, x);
  2924. *s = 1.f/(*s);
  2925. }
  2926. //
  2927. // data types
  2928. //
  2929. static const int GGML_BLCK_SIZE[GGML_TYPE_COUNT] = {
  2930. [GGML_TYPE_F32] = 1,
  2931. [GGML_TYPE_F16] = 1,
  2932. [GGML_TYPE_Q4_0] = QK4_0,
  2933. [GGML_TYPE_Q4_1] = QK4_1,
  2934. [GGML_TYPE_Q5_0] = QK5_0,
  2935. [GGML_TYPE_Q5_1] = QK5_1,
  2936. [GGML_TYPE_Q8_0] = QK8_0,
  2937. [GGML_TYPE_Q8_1] = QK8_1,
  2938. #ifdef GGML_USE_K_QUANTS
  2939. [GGML_TYPE_Q2_K] = QK_K,
  2940. [GGML_TYPE_Q3_K] = QK_K,
  2941. [GGML_TYPE_Q4_K] = QK_K,
  2942. [GGML_TYPE_Q5_K] = QK_K,
  2943. [GGML_TYPE_Q6_K] = QK_K,
  2944. [GGML_TYPE_Q8_K] = QK_K,
  2945. #endif
  2946. [GGML_TYPE_I8] = 1,
  2947. [GGML_TYPE_I16] = 1,
  2948. [GGML_TYPE_I32] = 1,
  2949. };
  2950. static_assert(GGML_TYPE_COUNT == 19, "GGML_BLCK_SIZE is outdated");
  2951. static const size_t GGML_TYPE_SIZE[GGML_TYPE_COUNT] = {
  2952. [GGML_TYPE_F32] = sizeof(float),
  2953. [GGML_TYPE_F16] = sizeof(ggml_fp16_t),
  2954. [GGML_TYPE_Q4_0] = sizeof(block_q4_0),
  2955. [GGML_TYPE_Q4_1] = sizeof(block_q4_1),
  2956. [GGML_TYPE_Q5_0] = sizeof(block_q5_0),
  2957. [GGML_TYPE_Q5_1] = sizeof(block_q5_1),
  2958. [GGML_TYPE_Q8_0] = sizeof(block_q8_0),
  2959. [GGML_TYPE_Q8_1] = sizeof(block_q8_1),
  2960. #ifdef GGML_USE_K_QUANTS
  2961. [GGML_TYPE_Q2_K] = sizeof(block_q2_K),
  2962. [GGML_TYPE_Q3_K] = sizeof(block_q3_K),
  2963. [GGML_TYPE_Q4_K] = sizeof(block_q4_K),
  2964. [GGML_TYPE_Q5_K] = sizeof(block_q5_K),
  2965. [GGML_TYPE_Q6_K] = sizeof(block_q6_K),
  2966. [GGML_TYPE_Q8_K] = sizeof(block_q8_K),
  2967. #endif
  2968. [GGML_TYPE_I8] = sizeof(int8_t),
  2969. [GGML_TYPE_I16] = sizeof(int16_t),
  2970. [GGML_TYPE_I32] = sizeof(int32_t),
  2971. };
  2972. static_assert(GGML_TYPE_COUNT == 19, "GGML_TYPE_SIZE is outdated");
  2973. static const char * GGML_TYPE_NAME[GGML_TYPE_COUNT] = {
  2974. [GGML_TYPE_F32] = "f32",
  2975. [GGML_TYPE_F16] = "f16",
  2976. [GGML_TYPE_Q4_0] = "q4_0",
  2977. [GGML_TYPE_Q4_1] = "q4_1",
  2978. [GGML_TYPE_Q5_0] = "q5_0",
  2979. [GGML_TYPE_Q5_1] = "q5_1",
  2980. [GGML_TYPE_Q8_0] = "q8_0",
  2981. [GGML_TYPE_Q8_1] = "q8_1",
  2982. [GGML_TYPE_Q2_K] = "q2_K",
  2983. [GGML_TYPE_Q3_K] = "q3_K",
  2984. [GGML_TYPE_Q4_K] = "q4_K",
  2985. [GGML_TYPE_Q5_K] = "q5_K",
  2986. [GGML_TYPE_Q6_K] = "q6_K",
  2987. [GGML_TYPE_Q8_K] = "q8_K",
  2988. [GGML_TYPE_I8] = "i8",
  2989. [GGML_TYPE_I16] = "i16",
  2990. [GGML_TYPE_I32] = "i32",
  2991. };
  2992. static_assert(GGML_TYPE_COUNT == 19, "GGML_TYPE_NAME is outdated");
  2993. static bool GGML_IS_QUANTIZED[GGML_TYPE_COUNT] = {
  2994. [GGML_TYPE_F32] = false,
  2995. [GGML_TYPE_F16] = false,
  2996. [GGML_TYPE_Q4_0] = true,
  2997. [GGML_TYPE_Q4_1] = true,
  2998. [GGML_TYPE_Q5_0] = true,
  2999. [GGML_TYPE_Q5_1] = true,
  3000. [GGML_TYPE_Q8_0] = true,
  3001. [GGML_TYPE_Q8_1] = true,
  3002. [GGML_TYPE_Q2_K] = true,
  3003. [GGML_TYPE_Q3_K] = true,
  3004. [GGML_TYPE_Q4_K] = true,
  3005. [GGML_TYPE_Q5_K] = true,
  3006. [GGML_TYPE_Q6_K] = true,
  3007. [GGML_TYPE_Q8_K] = true,
  3008. [GGML_TYPE_I8] = false,
  3009. [GGML_TYPE_I16] = false,
  3010. [GGML_TYPE_I32] = false,
  3011. };
  3012. static_assert(GGML_TYPE_COUNT == 19, "GGML_IS_QUANTIZED is outdated");
  3013. static const char * GGML_OP_NAME[GGML_OP_COUNT] = {
  3014. "NONE",
  3015. "DUP",
  3016. "ADD",
  3017. "ADD1",
  3018. "ACC",
  3019. "SUB",
  3020. "MUL",
  3021. "DIV",
  3022. "SQR",
  3023. "SQRT",
  3024. "LOG",
  3025. "SUM",
  3026. "SUM_ROWS",
  3027. "MEAN",
  3028. "REPEAT",
  3029. "REPEAT_BACK",
  3030. "ABS",
  3031. "SGN",
  3032. "NEG",
  3033. "STEP",
  3034. "RELU",
  3035. "GELU",
  3036. "GELU_QUICK",
  3037. "SILU",
  3038. "SILU_BACK",
  3039. "NORM",
  3040. "RMS_NORM",
  3041. "RMS_NORM_BACK",
  3042. "MUL_MAT",
  3043. "OUT_PROD",
  3044. "SCALE",
  3045. "SET",
  3046. "CPY",
  3047. "CONT",
  3048. "RESHAPE",
  3049. "VIEW",
  3050. "PERMUTE",
  3051. "TRANSPOSE",
  3052. "GET_ROWS",
  3053. "GET_ROWS_BACK",
  3054. "DIAG",
  3055. "DIAG_MASK_INF",
  3056. "DIAG_MASK_ZERO",
  3057. "SOFT_MAX",
  3058. "SOFT_MAX_BACK",
  3059. "ROPE",
  3060. "ROPE_BACK",
  3061. "ALIBI",
  3062. "CLAMP",
  3063. "CONV_1D_S1_PH",
  3064. "CONV_1D_S2_PH",
  3065. "CONV_2D_SK_P0",
  3066. "FLASH_ATTN",
  3067. "FLASH_FF",
  3068. "FLASH_ATTN_BACK",
  3069. "WIN_PART",
  3070. "WIN_UNPART",
  3071. "MAP_UNARY",
  3072. "MAP_BINARY",
  3073. "MAP_CUSTOM1",
  3074. "MAP_CUSTOM2",
  3075. "MAP_CUSTOM3",
  3076. "CROSS_ENTROPY_LOSS",
  3077. "CROSS_ENTROPY_LOSS_BACK",
  3078. };
  3079. static_assert(GGML_OP_COUNT == 64, "GGML_OP_COUNT != 64");
  3080. static const char * GGML_OP_SYMBOL[GGML_OP_COUNT] = {
  3081. "none",
  3082. "x",
  3083. "x+y",
  3084. "x+y",
  3085. "view(x,nb,offset)+=y->x",
  3086. "x-y",
  3087. "x*y",
  3088. "x/y",
  3089. "x^2",
  3090. "√x",
  3091. "log(x)",
  3092. "Σx",
  3093. "Σx_k",
  3094. "Σx/n",
  3095. "repeat(x)",
  3096. "repeat_back(x)",
  3097. "abs(x)",
  3098. "sgn(x)",
  3099. "-x",
  3100. "step(x)",
  3101. "relu(x)",
  3102. "gelu(x)",
  3103. "gelu_quick(x)",
  3104. "silu(x)",
  3105. "silu_back(x)",
  3106. "norm(x)",
  3107. "rms_norm(x)",
  3108. "rms_norm_back(x)",
  3109. "X*Y",
  3110. "X*Y",
  3111. "x*v",
  3112. "y-\\>view(x)",
  3113. "x-\\>y",
  3114. "cont(x)",
  3115. "reshape(x)",
  3116. "view(x)",
  3117. "permute(x)",
  3118. "transpose(x)",
  3119. "get_rows(x)",
  3120. "get_rows_back(x)",
  3121. "diag(x)",
  3122. "diag_mask_inf(x)",
  3123. "diag_mask_zero(x)",
  3124. "soft_max(x)",
  3125. "soft_max_back(x)",
  3126. "rope(x)",
  3127. "rope_back(x)",
  3128. "alibi(x)",
  3129. "clamp(x)",
  3130. "conv_1d_s1_ph(x)",
  3131. "conv_1d_s2_ph(x)",
  3132. "conv_2d_sk_p0(x)",
  3133. "flash_attn(x)",
  3134. "flash_ff(x)",
  3135. "flash_attn_back(x)",
  3136. "win_part(x)",
  3137. "win_unpart(x)",
  3138. "f(x)",
  3139. "f(x,y)",
  3140. "custom(x)",
  3141. "custom(x,y)",
  3142. "custom(x,y,z)",
  3143. "cross_entropy_loss(x,y)",
  3144. "cross_entropy_loss_back(x,y)",
  3145. };
  3146. static_assert(GGML_OP_COUNT == 64, "GGML_OP_COUNT != 64");
  3147. static_assert(sizeof(struct ggml_object)%GGML_MEM_ALIGN == 0, "ggml_object size must be a multiple of GGML_MEM_ALIGN");
  3148. static_assert(sizeof(struct ggml_tensor)%GGML_MEM_ALIGN == 0, "ggml_tensor size must be a multiple of GGML_MEM_ALIGN");
  3149. //
  3150. // ggml context
  3151. //
  3152. struct ggml_context {
  3153. size_t mem_size;
  3154. void * mem_buffer;
  3155. bool mem_buffer_owned;
  3156. bool no_alloc;
  3157. bool no_alloc_save; // this is used to save the no_alloc state when using scratch buffers
  3158. int n_objects;
  3159. struct ggml_object * objects_begin;
  3160. struct ggml_object * objects_end;
  3161. struct ggml_scratch scratch;
  3162. struct ggml_scratch scratch_save;
  3163. };
  3164. struct ggml_context_container {
  3165. bool used;
  3166. struct ggml_context context;
  3167. };
  3168. //
  3169. // NUMA support
  3170. //
  3171. #define GGML_NUMA_MAX_NODES 8
  3172. #define GGML_NUMA_MAX_CPUS 512
  3173. struct ggml_numa_node {
  3174. uint32_t cpus[GGML_NUMA_MAX_CPUS]; // hardware threads on this node
  3175. uint32_t n_cpus;
  3176. };
  3177. struct ggml_numa_nodes {
  3178. struct ggml_numa_node nodes[GGML_NUMA_MAX_NODES];
  3179. uint32_t n_nodes;
  3180. uint32_t total_cpus; // hardware threads on system
  3181. };
  3182. //
  3183. // ggml state
  3184. //
  3185. struct ggml_state {
  3186. struct ggml_context_container contexts[GGML_MAX_CONTEXTS];
  3187. struct ggml_numa_nodes numa;
  3188. };
  3189. // global state
  3190. static struct ggml_state g_state;
  3191. static atomic_int g_state_barrier = 0;
  3192. // barrier via spin lock
  3193. inline static void ggml_critical_section_start(void) {
  3194. int processing = atomic_fetch_add(&g_state_barrier, 1);
  3195. while (processing > 0) {
  3196. // wait for other threads to finish
  3197. atomic_fetch_sub(&g_state_barrier, 1);
  3198. sched_yield(); // TODO: reconsider this
  3199. processing = atomic_fetch_add(&g_state_barrier, 1);
  3200. }
  3201. }
  3202. // TODO: make this somehow automatically executed
  3203. // some sort of "sentry" mechanism
  3204. inline static void ggml_critical_section_end(void) {
  3205. atomic_fetch_sub(&g_state_barrier, 1);
  3206. }
  3207. void ggml_numa_init(void) {
  3208. if (g_state.numa.n_nodes > 0) {
  3209. fprintf(stderr, "ggml_numa_init: NUMA already initialized\n");
  3210. return;
  3211. }
  3212. #ifdef __linux__
  3213. struct stat st;
  3214. char path[256];
  3215. int rv;
  3216. // enumerate nodes
  3217. while (g_state.numa.n_nodes < GGML_NUMA_MAX_NODES) {
  3218. rv = snprintf(path, sizeof(path), "/sys/devices/system/node/node%u", g_state.numa.n_nodes);
  3219. GGML_ASSERT(rv > 0 && (unsigned)rv < sizeof(path));
  3220. if (stat(path, &st) != 0) { break; }
  3221. ++g_state.numa.n_nodes;
  3222. }
  3223. // enumerate CPUs
  3224. while (g_state.numa.total_cpus < GGML_NUMA_MAX_CPUS) {
  3225. rv = snprintf(path, sizeof(path), "/sys/devices/system/cpu/cpu%u", g_state.numa.total_cpus);
  3226. GGML_ASSERT(rv > 0 && (unsigned)rv < sizeof(path));
  3227. if (stat(path, &st) != 0) { break; }
  3228. ++g_state.numa.total_cpus;
  3229. }
  3230. GGML_PRINT_DEBUG("found %u numa nodes, %u CPUs\n", g_state.numa.n_nodes, g_state.numa.total_cpus);
  3231. if (g_state.numa.n_nodes < 1 || g_state.numa.total_cpus < 1) {
  3232. g_state.numa.n_nodes = 0;
  3233. return;
  3234. }
  3235. for (uint32_t n = 0; n < g_state.numa.n_nodes; ++n) {
  3236. struct ggml_numa_node * node = &g_state.numa.nodes[n];
  3237. GGML_PRINT_DEBUG("CPUs on node %u:", n);
  3238. node->n_cpus = 0;
  3239. for (uint32_t c = 0; c < g_state.numa.total_cpus; ++c) {
  3240. rv = snprintf(path, sizeof(path), "/sys/devices/system/node/node%u/cpu%u", n, c);
  3241. GGML_ASSERT(rv > 0 && (unsigned)rv < sizeof(path));
  3242. if (stat(path, &st) == 0) {
  3243. node->cpus[node->n_cpus++] = c;
  3244. GGML_PRINT_DEBUG(" %u", c);
  3245. }
  3246. }
  3247. GGML_PRINT_DEBUG("\n");
  3248. }
  3249. if (ggml_is_numa()) {
  3250. FILE *fptr = fopen("/proc/sys/kernel/numa_balancing", "r");
  3251. if (fptr != NULL) {
  3252. char buf[42];
  3253. if (fgets(buf, sizeof(buf), fptr) && strncmp(buf, "0\n", sizeof(buf)) != 0) {
  3254. GGML_PRINT("WARNING: /proc/sys/kernel/numa_balancing is enabled, this has been observed to impair performance\n");
  3255. }
  3256. fclose(fptr);
  3257. }
  3258. }
  3259. #else
  3260. // TODO
  3261. #endif
  3262. }
  3263. bool ggml_is_numa(void) {
  3264. return g_state.numa.n_nodes > 1;
  3265. }
  3266. ////////////////////////////////////////////////////////////////////////////////
  3267. void ggml_print_object(const struct ggml_object * obj) {
  3268. GGML_PRINT(" - ggml_object: offset = %zu, size = %zu, next = %p\n",
  3269. obj->offs, obj->size, (const void *) obj->next);
  3270. }
  3271. void ggml_print_objects(const struct ggml_context * ctx) {
  3272. struct ggml_object * obj = ctx->objects_begin;
  3273. GGML_PRINT("%s: objects in context %p:\n", __func__, (const void *) ctx);
  3274. while (obj != NULL) {
  3275. ggml_print_object(obj);
  3276. obj = obj->next;
  3277. }
  3278. GGML_PRINT("%s: --- end ---\n", __func__);
  3279. }
  3280. int64_t ggml_nelements(const struct ggml_tensor * tensor) {
  3281. static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function");
  3282. return tensor->ne[0]*tensor->ne[1]*tensor->ne[2]*tensor->ne[3];
  3283. }
  3284. int64_t ggml_nrows(const struct ggml_tensor * tensor) {
  3285. static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function");
  3286. return tensor->ne[1]*tensor->ne[2]*tensor->ne[3];
  3287. }
  3288. size_t ggml_nbytes(const struct ggml_tensor * tensor) {
  3289. static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function");
  3290. // this should handle cases where the tensor is not contiguous in memory
  3291. // probaby just:
  3292. //
  3293. // return tensor->ne[3]*tensor->nb[3]
  3294. //
  3295. // is enough, but just in case, adding the second part
  3296. return MAX(tensor->ne[3]*tensor->nb[3], (ggml_nelements(tensor)*GGML_TYPE_SIZE[tensor->type])/GGML_BLCK_SIZE[tensor->type]);
  3297. }
  3298. size_t ggml_nbytes_split(const struct ggml_tensor * tensor, int nrows_split) {
  3299. static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function");
  3300. return (nrows_split*tensor->ne[0]*GGML_TYPE_SIZE[tensor->type])/GGML_BLCK_SIZE[tensor->type];
  3301. }
  3302. int ggml_blck_size(enum ggml_type type) {
  3303. return GGML_BLCK_SIZE[type];
  3304. }
  3305. size_t ggml_type_size(enum ggml_type type) {
  3306. return GGML_TYPE_SIZE[type];
  3307. }
  3308. float ggml_type_sizef(enum ggml_type type) {
  3309. return ((float)(GGML_TYPE_SIZE[type]))/GGML_BLCK_SIZE[type];
  3310. }
  3311. const char * ggml_type_name(enum ggml_type type) {
  3312. return GGML_TYPE_NAME[type];
  3313. }
  3314. const char * ggml_op_name(enum ggml_op op) {
  3315. return GGML_OP_NAME[op];
  3316. }
  3317. size_t ggml_element_size(const struct ggml_tensor * tensor) {
  3318. return GGML_TYPE_SIZE[tensor->type];
  3319. }
  3320. static inline bool ggml_is_scalar(const struct ggml_tensor * tensor) {
  3321. static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function");
  3322. return tensor->ne[0] == 1 && tensor->ne[1] == 1 && tensor->ne[2] == 1 && tensor->ne[3] == 1;
  3323. }
  3324. static inline bool ggml_is_vector(const struct ggml_tensor * tensor) {
  3325. static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function");
  3326. return tensor->ne[1] == 1 && tensor->ne[2] == 1 && tensor->ne[3] == 1;
  3327. }
  3328. static inline bool ggml_is_matrix(const struct ggml_tensor * tensor) {
  3329. static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function");
  3330. return tensor->ne[2] == 1 && tensor->ne[3] == 1;
  3331. }
  3332. static inline bool ggml_can_mul_mat(const struct ggml_tensor * t0, const struct ggml_tensor * t1) {
  3333. static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function");
  3334. return
  3335. (t0->ne[0] == t1->ne[0]) &&
  3336. (t0->ne[2] == t1->ne[2]) &&
  3337. (t0->ne[3] == t1->ne[3]);
  3338. }
  3339. static inline bool ggml_can_out_prod(const struct ggml_tensor * t0, const struct ggml_tensor * t1) {
  3340. static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function");
  3341. return
  3342. (t0->ne[1] == t1->ne[1]) &&
  3343. (t0->ne[2] == t1->ne[2]) &&
  3344. (t0->ne[3] == t1->ne[3]);
  3345. }
  3346. bool ggml_is_quantized(enum ggml_type type) {
  3347. return GGML_IS_QUANTIZED[type];
  3348. }
  3349. enum ggml_type ggml_ftype_to_ggml_type(enum ggml_ftype ftype) {
  3350. enum ggml_type wtype = GGML_TYPE_COUNT;
  3351. switch (ftype) {
  3352. case GGML_FTYPE_ALL_F32: wtype = GGML_TYPE_F32; break;
  3353. case GGML_FTYPE_MOSTLY_F16: wtype = GGML_TYPE_F16; break;
  3354. case GGML_FTYPE_MOSTLY_Q4_0: wtype = GGML_TYPE_Q4_0; break;
  3355. case GGML_FTYPE_MOSTLY_Q4_1: wtype = GGML_TYPE_Q4_1; break;
  3356. case GGML_FTYPE_MOSTLY_Q5_0: wtype = GGML_TYPE_Q5_0; break;
  3357. case GGML_FTYPE_MOSTLY_Q5_1: wtype = GGML_TYPE_Q5_1; break;
  3358. case GGML_FTYPE_MOSTLY_Q8_0: wtype = GGML_TYPE_Q8_0; break;
  3359. case GGML_FTYPE_MOSTLY_Q2_K: wtype = GGML_TYPE_Q2_K; break;
  3360. case GGML_FTYPE_MOSTLY_Q3_K: wtype = GGML_TYPE_Q3_K; break;
  3361. case GGML_FTYPE_MOSTLY_Q4_K: wtype = GGML_TYPE_Q4_K; break;
  3362. case GGML_FTYPE_MOSTLY_Q5_K: wtype = GGML_TYPE_Q5_K; break;
  3363. case GGML_FTYPE_MOSTLY_Q6_K: wtype = GGML_TYPE_Q6_K; break;
  3364. case GGML_FTYPE_UNKNOWN: wtype = GGML_TYPE_COUNT; break;
  3365. case GGML_FTYPE_MOSTLY_Q4_1_SOME_F16: wtype = GGML_TYPE_COUNT; break;
  3366. }
  3367. GGML_ASSERT(wtype != GGML_TYPE_COUNT);
  3368. return wtype;
  3369. }
  3370. size_t ggml_tensor_overhead(void) {
  3371. return GGML_OBJECT_SIZE + GGML_TENSOR_SIZE + 16;
  3372. }
  3373. bool ggml_is_transposed(const struct ggml_tensor * tensor) {
  3374. return tensor->nb[0] > tensor->nb[1];
  3375. }
  3376. bool ggml_is_contiguous(const struct ggml_tensor * tensor) {
  3377. static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function");
  3378. return
  3379. tensor->nb[0] == GGML_TYPE_SIZE[tensor->type] &&
  3380. tensor->nb[1] == (tensor->nb[0]*tensor->ne[0])/GGML_BLCK_SIZE[tensor->type] &&
  3381. tensor->nb[2] == tensor->nb[1]*tensor->ne[1] &&
  3382. tensor->nb[3] == tensor->nb[2]*tensor->ne[2];
  3383. }
  3384. bool ggml_is_permuted(const struct ggml_tensor * tensor) {
  3385. static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function");
  3386. return tensor->nb[0] > tensor->nb[1] || tensor->nb[1] > tensor->nb[2] || tensor->nb[2] > tensor->nb[3];
  3387. }
  3388. static inline bool ggml_is_padded_1d(const struct ggml_tensor * tensor) {
  3389. static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function");
  3390. return
  3391. tensor->nb[0] == GGML_TYPE_SIZE[tensor->type] &&
  3392. tensor->nb[2] == tensor->nb[1]*tensor->ne[1] &&
  3393. tensor->nb[3] == tensor->nb[2]*tensor->ne[2];
  3394. }
  3395. static inline bool ggml_are_same_shape(const struct ggml_tensor * t0, const struct ggml_tensor * t1) {
  3396. static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function");
  3397. return
  3398. (t0->ne[0] == t1->ne[0] ) &&
  3399. (t0->ne[1] == t1->ne[1] ) &&
  3400. (t0->ne[2] == t1->ne[2] ) &&
  3401. (t0->ne[3] == t1->ne[3] );
  3402. }
  3403. // check if t1 can be represented as a repeatition of t0
  3404. static inline bool ggml_can_repeat(const struct ggml_tensor * t0, const struct ggml_tensor * t1) {
  3405. static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function");
  3406. return
  3407. (t1->ne[0]%t0->ne[0] == 0) &&
  3408. (t1->ne[1]%t0->ne[1] == 0) &&
  3409. (t1->ne[2]%t0->ne[2] == 0) &&
  3410. (t1->ne[3]%t0->ne[3] == 0);
  3411. }
  3412. static inline bool ggml_can_repeat_rows(const struct ggml_tensor * t0, const struct ggml_tensor * t1) {
  3413. static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function");
  3414. return (t0->ne[0] == t1->ne[0]) && ggml_can_repeat(t0, t1);
  3415. }
  3416. static inline int ggml_up32(int n) {
  3417. return (n + 31) & ~31;
  3418. }
  3419. //static inline int ggml_up64(int n) {
  3420. // return (n + 63) & ~63;
  3421. //}
  3422. static inline int ggml_up(int n, int m) {
  3423. // assert m is a power of 2
  3424. GGML_ASSERT((m & (m - 1)) == 0);
  3425. return (n + m - 1) & ~(m - 1);
  3426. }
  3427. // assert that pointer is aligned to GGML_MEM_ALIGN
  3428. #define ggml_assert_aligned(ptr) \
  3429. GGML_ASSERT(((uintptr_t) (ptr))%GGML_MEM_ALIGN == 0)
  3430. ////////////////////////////////////////////////////////////////////////////////
  3431. struct ggml_context * ggml_init(struct ggml_init_params params) {
  3432. // make this function thread safe
  3433. ggml_critical_section_start();
  3434. static bool is_first_call = true;
  3435. if (is_first_call) {
  3436. // initialize time system (required on Windows)
  3437. ggml_time_init();
  3438. // initialize GELU, Quick GELU, SILU and EXP F32 tables
  3439. {
  3440. const uint64_t t_start = ggml_time_us(); UNUSED(t_start);
  3441. ggml_fp16_t ii;
  3442. for (int i = 0; i < (1 << 16); ++i) {
  3443. uint16_t ui = i;
  3444. memcpy(&ii, &ui, sizeof(ii));
  3445. const float f = table_f32_f16[i] = GGML_COMPUTE_FP16_TO_FP32(ii);
  3446. table_gelu_f16[i] = GGML_FP32_TO_FP16(ggml_gelu_f32(f));
  3447. table_gelu_quick_f16[i] = GGML_FP32_TO_FP16(ggml_gelu_quick_f32(f));
  3448. table_silu_f16[i] = GGML_FP32_TO_FP16(ggml_silu_f32(f));
  3449. table_exp_f16[i] = GGML_FP32_TO_FP16(expf(f));
  3450. }
  3451. const uint64_t t_end = ggml_time_us(); UNUSED(t_end);
  3452. GGML_PRINT_DEBUG("%s: GELU, Quick GELU, SILU and EXP tables initialized in %f ms\n", __func__, (t_end - t_start)/1000.0f);
  3453. }
  3454. // initialize g_state
  3455. {
  3456. const uint64_t t_start = ggml_time_us(); UNUSED(t_start);
  3457. g_state = (struct ggml_state) {
  3458. /*.contexts =*/ { { 0 } },
  3459. /*.numa =*/ {
  3460. .n_nodes = 0,
  3461. .total_cpus = 0,
  3462. },
  3463. };
  3464. for (int i = 0; i < GGML_MAX_CONTEXTS; ++i) {
  3465. g_state.contexts[i].used = false;
  3466. }
  3467. const uint64_t t_end = ggml_time_us(); UNUSED(t_end);
  3468. GGML_PRINT_DEBUG("%s: g_state initialized in %f ms\n", __func__, (t_end - t_start)/1000.0f);
  3469. }
  3470. #if defined(GGML_USE_CUBLAS)
  3471. ggml_init_cublas();
  3472. #elif defined(GGML_USE_CLBLAST)
  3473. ggml_cl_init();
  3474. #endif
  3475. is_first_call = false;
  3476. }
  3477. // find non-used context in g_state
  3478. struct ggml_context * ctx = NULL;
  3479. for (int i = 0; i < GGML_MAX_CONTEXTS; i++) {
  3480. if (!g_state.contexts[i].used) {
  3481. g_state.contexts[i].used = true;
  3482. ctx = &g_state.contexts[i].context;
  3483. GGML_PRINT_DEBUG("%s: found unused context %d\n", __func__, i);
  3484. break;
  3485. }
  3486. }
  3487. if (ctx == NULL) {
  3488. GGML_PRINT_DEBUG("%s: no unused context found\n", __func__);
  3489. ggml_critical_section_end();
  3490. return NULL;
  3491. }
  3492. const size_t mem_size = (params.mem_size + GGML_MEM_ALIGN - 1) & ~(GGML_MEM_ALIGN - 1);
  3493. *ctx = (struct ggml_context) {
  3494. /*.mem_size =*/ mem_size,
  3495. /*.mem_buffer =*/ params.mem_buffer ? params.mem_buffer : GGML_ALIGNED_MALLOC(mem_size),
  3496. /*.mem_buffer_owned =*/ params.mem_buffer ? false : true,
  3497. /*.no_alloc =*/ params.no_alloc,
  3498. /*.no_alloc_save =*/ params.no_alloc,
  3499. /*.n_objects =*/ 0,
  3500. /*.objects_begin =*/ NULL,
  3501. /*.objects_end =*/ NULL,
  3502. /*.scratch =*/ { 0, 0, NULL, },
  3503. /*.scratch_save =*/ { 0, 0, NULL, },
  3504. };
  3505. GGML_ASSERT(ctx->mem_buffer != NULL);
  3506. ggml_assert_aligned(ctx->mem_buffer);
  3507. GGML_PRINT_DEBUG("%s: context initialized\n", __func__);
  3508. ggml_critical_section_end();
  3509. return ctx;
  3510. }
  3511. void ggml_free(struct ggml_context * ctx) {
  3512. // make this function thread safe
  3513. ggml_critical_section_start();
  3514. bool found = false;
  3515. for (int i = 0; i < GGML_MAX_CONTEXTS; i++) {
  3516. if (&g_state.contexts[i].context == ctx) {
  3517. g_state.contexts[i].used = false;
  3518. GGML_PRINT_DEBUG("%s: context %d with %d objects has been freed. memory used = %zu\n",
  3519. __func__, i, ctx->n_objects, ctx->objects_end->offs + ctx->objects_end->size);
  3520. if (ctx->mem_buffer_owned) {
  3521. GGML_ALIGNED_FREE(ctx->mem_buffer);
  3522. }
  3523. found = true;
  3524. break;
  3525. }
  3526. }
  3527. if (!found) {
  3528. GGML_PRINT_DEBUG("%s: context not found\n", __func__);
  3529. }
  3530. ggml_critical_section_end();
  3531. }
  3532. size_t ggml_used_mem(const struct ggml_context * ctx) {
  3533. return ctx->objects_end == NULL ? 0 : ctx->objects_end->offs + ctx->objects_end->size;
  3534. }
  3535. size_t ggml_set_scratch(struct ggml_context * ctx, struct ggml_scratch scratch) {
  3536. const size_t result = ctx->scratch.data ? ctx->scratch.offs : 0;
  3537. ctx->scratch = scratch;
  3538. return result;
  3539. }
  3540. void ggml_set_no_alloc(struct ggml_context * ctx, bool no_alloc) {
  3541. ctx->no_alloc = no_alloc;
  3542. }
  3543. void * ggml_get_mem_buffer(const struct ggml_context * ctx) {
  3544. return ctx->mem_buffer;
  3545. }
  3546. size_t ggml_get_mem_size(const struct ggml_context * ctx) {
  3547. return ctx->mem_size;
  3548. }
  3549. size_t ggml_get_max_tensor_size(const struct ggml_context * ctx) {
  3550. size_t max_size = 0;
  3551. struct ggml_object * obj = ctx->objects_begin;
  3552. while (obj != NULL) {
  3553. struct ggml_tensor * tensor = (struct ggml_tensor *) ((char *) ctx->mem_buffer + obj->offs);
  3554. const size_t size = ggml_nbytes(tensor);
  3555. if (max_size < size) {
  3556. max_size = size;
  3557. }
  3558. obj = obj->next;
  3559. }
  3560. return max_size;
  3561. }
  3562. // IMPORTANT:
  3563. // when creating "opt" tensors, always save and load the scratch buffer
  3564. // this is an error prone process, but it is necessary to support inplace
  3565. // operators when using scratch buffers
  3566. // TODO: implement a better way
  3567. void ggml_scratch_save(struct ggml_context * ctx) {
  3568. // this is needed to allow opt tensors to store their data
  3569. // TODO: again, need to find a better way
  3570. ctx->no_alloc_save = ctx->no_alloc;
  3571. ctx->no_alloc = false;
  3572. ctx->scratch_save = ctx->scratch;
  3573. ctx->scratch.data = NULL;
  3574. }
  3575. void ggml_scratch_load(struct ggml_context * ctx) {
  3576. ctx->no_alloc = ctx->no_alloc_save;
  3577. ctx->scratch = ctx->scratch_save;
  3578. }
  3579. ////////////////////////////////////////////////////////////////////////////////
  3580. struct ggml_tensor * ggml_new_tensor_impl(
  3581. struct ggml_context * ctx,
  3582. enum ggml_type type,
  3583. int n_dims,
  3584. const int64_t* ne,
  3585. void* data) {
  3586. // always insert objects at the end of the context's memory pool
  3587. struct ggml_object * obj_cur = ctx->objects_end;
  3588. const size_t cur_offs = obj_cur == NULL ? 0 : obj_cur->offs;
  3589. const size_t cur_size = obj_cur == NULL ? 0 : obj_cur->size;
  3590. const size_t cur_end = cur_offs + cur_size;
  3591. size_t size_needed = 0;
  3592. if (data == NULL && !ctx->no_alloc) {
  3593. size_needed += GGML_TYPE_SIZE[type]*(ne[0]/GGML_BLCK_SIZE[type]);
  3594. for (int i = 1; i < n_dims; i++) {
  3595. size_needed *= ne[i];
  3596. }
  3597. // align to GGML_MEM_ALIGN
  3598. size_needed = ((size_needed + GGML_MEM_ALIGN - 1)/GGML_MEM_ALIGN)*GGML_MEM_ALIGN;
  3599. }
  3600. char * const mem_buffer = ctx->mem_buffer;
  3601. struct ggml_object * const obj_new = (struct ggml_object *)(mem_buffer + cur_end);
  3602. if (ctx->scratch.data == NULL || data != NULL) {
  3603. size_needed += GGML_TENSOR_SIZE;
  3604. if (cur_end + size_needed + GGML_OBJECT_SIZE > ctx->mem_size) {
  3605. GGML_PRINT("%s: not enough space in the context's memory pool (needed %zu, available %zu)\n",
  3606. __func__, cur_end + size_needed + GGML_OBJECT_SIZE, ctx->mem_size);
  3607. assert(false);
  3608. return NULL;
  3609. }
  3610. *obj_new = (struct ggml_object) {
  3611. .offs = cur_end + GGML_OBJECT_SIZE,
  3612. .size = size_needed,
  3613. .next = NULL,
  3614. };
  3615. } else {
  3616. if (ctx->scratch.offs + size_needed > ctx->scratch.size) {
  3617. GGML_PRINT("%s: not enough space in the scratch memory pool (needed %zu, available %zu)\n",
  3618. __func__, ctx->scratch.offs + size_needed, ctx->scratch.size);
  3619. assert(false);
  3620. return NULL;
  3621. }
  3622. if (cur_end + GGML_TENSOR_SIZE + GGML_OBJECT_SIZE > ctx->mem_size) {
  3623. GGML_PRINT("%s: not enough space in the context's memory pool (needed %zu, available %zu)\n",
  3624. __func__, cur_end + GGML_TENSOR_SIZE + GGML_OBJECT_SIZE, ctx->mem_size);
  3625. assert(false);
  3626. return NULL;
  3627. }
  3628. data = (char * const) ctx->scratch.data + ctx->scratch.offs;
  3629. *obj_new = (struct ggml_object) {
  3630. .offs = cur_end + GGML_OBJECT_SIZE,
  3631. .size = GGML_TENSOR_SIZE,
  3632. .next = NULL,
  3633. };
  3634. //printf("scratch offs = %zu, size_needed = %zu\n", ctx->scratch.offs, size_needed);
  3635. ctx->scratch.offs += size_needed;
  3636. }
  3637. if (obj_cur != NULL) {
  3638. obj_cur->next = obj_new;
  3639. } else {
  3640. // this is the first object in this context
  3641. ctx->objects_begin = obj_new;
  3642. }
  3643. ctx->objects_end = obj_new;
  3644. //printf("%s: inserted new object at %zu, size = %zu\n", __func__, cur_end, obj_new->size);
  3645. struct ggml_tensor * const result = (struct ggml_tensor *)(mem_buffer + obj_new->offs);
  3646. ggml_assert_aligned(result);
  3647. *result = (struct ggml_tensor) {
  3648. /*.type =*/ type,
  3649. /*.backend =*/ GGML_BACKEND_CPU,
  3650. /*.n_dims =*/ n_dims,
  3651. /*.ne =*/ { 1, 1, 1, 1 },
  3652. /*.nb =*/ { 0, 0, 0, 0 },
  3653. /*.op =*/ GGML_OP_NONE,
  3654. /*.is_param =*/ false,
  3655. /*.grad =*/ NULL,
  3656. /*.src0 =*/ NULL,
  3657. /*.src1 =*/ NULL,
  3658. /*.opt =*/ { NULL },
  3659. /*.n_tasks =*/ 0,
  3660. /*.perf_runs =*/ 0,
  3661. /*.perf_cycles =*/ 0,
  3662. /*.perf_time_us =*/ 0,
  3663. /*.data =*/ (data == NULL && !ctx->no_alloc) ? (void *)(result + 1) : data,
  3664. /*.name =*/ { 0 },
  3665. /*.extra =*/ NULL,
  3666. /*.pad =*/ { 0 },
  3667. };
  3668. // TODO: this should not be needed as long as we don't rely on aligned SIMD loads
  3669. //ggml_assert_aligned(result->data);
  3670. for (int i = 0; i < n_dims; i++) {
  3671. result->ne[i] = ne[i];
  3672. }
  3673. result->nb[0] = GGML_TYPE_SIZE[type];
  3674. result->nb[1] = result->nb[0]*(result->ne[0]/GGML_BLCK_SIZE[type]);
  3675. for (int i = 2; i < GGML_MAX_DIMS; i++) {
  3676. result->nb[i] = result->nb[i - 1]*result->ne[i - 1];
  3677. }
  3678. ctx->n_objects++;
  3679. return result;
  3680. }
  3681. struct ggml_tensor * ggml_new_tensor(
  3682. struct ggml_context * ctx,
  3683. enum ggml_type type,
  3684. int n_dims,
  3685. const int64_t * ne) {
  3686. return ggml_new_tensor_impl(ctx, type, n_dims, ne, NULL);
  3687. }
  3688. struct ggml_tensor * ggml_new_tensor_1d(
  3689. struct ggml_context * ctx,
  3690. enum ggml_type type,
  3691. int64_t ne0) {
  3692. return ggml_new_tensor(ctx, type, 1, &ne0);
  3693. }
  3694. struct ggml_tensor * ggml_new_tensor_2d(
  3695. struct ggml_context * ctx,
  3696. enum ggml_type type,
  3697. int64_t ne0,
  3698. int64_t ne1) {
  3699. const int64_t ne[2] = { ne0, ne1 };
  3700. return ggml_new_tensor(ctx, type, 2, ne);
  3701. }
  3702. struct ggml_tensor * ggml_new_tensor_3d(
  3703. struct ggml_context * ctx,
  3704. enum ggml_type type,
  3705. int64_t ne0,
  3706. int64_t ne1,
  3707. int64_t ne2) {
  3708. const int64_t ne[3] = { ne0, ne1, ne2 };
  3709. return ggml_new_tensor(ctx, type, 3, ne);
  3710. }
  3711. struct ggml_tensor * ggml_new_tensor_4d(
  3712. struct ggml_context * ctx,
  3713. enum ggml_type type,
  3714. int64_t ne0,
  3715. int64_t ne1,
  3716. int64_t ne2,
  3717. int64_t ne3) {
  3718. const int64_t ne[4] = { ne0, ne1, ne2, ne3 };
  3719. return ggml_new_tensor(ctx, type, 4, ne);
  3720. }
  3721. struct ggml_tensor * ggml_new_i32(struct ggml_context * ctx, int32_t value) {
  3722. ggml_scratch_save(ctx);
  3723. struct ggml_tensor * result = ggml_new_tensor_1d(ctx, GGML_TYPE_I32, 1);
  3724. ggml_scratch_load(ctx);
  3725. ggml_set_i32(result, value);
  3726. return result;
  3727. }
  3728. struct ggml_tensor * ggml_new_f32(struct ggml_context * ctx, float value) {
  3729. ggml_scratch_save(ctx);
  3730. struct ggml_tensor * result = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, 1);
  3731. ggml_scratch_load(ctx);
  3732. ggml_set_f32(result, value);
  3733. return result;
  3734. }
  3735. struct ggml_tensor * ggml_dup_tensor(struct ggml_context * ctx, const struct ggml_tensor * src) {
  3736. return ggml_new_tensor_impl(ctx, src->type, src->n_dims, src->ne, NULL);
  3737. }
  3738. struct ggml_tensor * ggml_set_zero(struct ggml_tensor * tensor) {
  3739. memset(tensor->data, 0, ggml_nbytes(tensor));
  3740. return tensor;
  3741. }
  3742. struct ggml_tensor * ggml_set_i32 (struct ggml_tensor * tensor, int32_t value) {
  3743. const int n = ggml_nrows(tensor);
  3744. const int nc = tensor->ne[0];
  3745. const size_t n1 = tensor->nb[1];
  3746. char * const data = tensor->data;
  3747. switch (tensor->type) {
  3748. case GGML_TYPE_I8:
  3749. {
  3750. assert(tensor->nb[0] == sizeof(int8_t));
  3751. for (int i = 0; i < n; i++) {
  3752. ggml_vec_set_i8(nc, (int8_t *)(data + i*n1), value);
  3753. }
  3754. } break;
  3755. case GGML_TYPE_I16:
  3756. {
  3757. assert(tensor->nb[0] == sizeof(int16_t));
  3758. for (int i = 0; i < n; i++) {
  3759. ggml_vec_set_i16(nc, (int16_t *)(data + i*n1), value);
  3760. }
  3761. } break;
  3762. case GGML_TYPE_I32:
  3763. {
  3764. assert(tensor->nb[0] == sizeof(int32_t));
  3765. for (int i = 0; i < n; i++) {
  3766. ggml_vec_set_i32(nc, (int32_t *)(data + i*n1), value);
  3767. }
  3768. } break;
  3769. case GGML_TYPE_F16:
  3770. {
  3771. assert(tensor->nb[0] == sizeof(ggml_fp16_t));
  3772. for (int i = 0; i < n; i++) {
  3773. ggml_vec_set_f16(nc, (ggml_fp16_t *)(data + i*n1), value);
  3774. }
  3775. } break;
  3776. case GGML_TYPE_F32:
  3777. {
  3778. assert(tensor->nb[0] == sizeof(float));
  3779. for (int i = 0; i < n; i++) {
  3780. ggml_vec_set_f32(nc, (float *)(data + i*n1), value);
  3781. }
  3782. } break;
  3783. default:
  3784. {
  3785. GGML_ASSERT(false);
  3786. } break;
  3787. }
  3788. return tensor;
  3789. }
  3790. struct ggml_tensor * ggml_set_f32(struct ggml_tensor * tensor, float value) {
  3791. const int n = ggml_nrows(tensor);
  3792. const int nc = tensor->ne[0];
  3793. const size_t n1 = tensor->nb[1];
  3794. char * const data = tensor->data;
  3795. switch (tensor->type) {
  3796. case GGML_TYPE_I8:
  3797. {
  3798. assert(tensor->nb[0] == sizeof(int8_t));
  3799. for (int i = 0; i < n; i++) {
  3800. ggml_vec_set_i8(nc, (int8_t *)(data + i*n1), value);
  3801. }
  3802. } break;
  3803. case GGML_TYPE_I16:
  3804. {
  3805. assert(tensor->nb[0] == sizeof(int16_t));
  3806. for (int i = 0; i < n; i++) {
  3807. ggml_vec_set_i16(nc, (int16_t *)(data + i*n1), value);
  3808. }
  3809. } break;
  3810. case GGML_TYPE_I32:
  3811. {
  3812. assert(tensor->nb[0] == sizeof(int32_t));
  3813. for (int i = 0; i < n; i++) {
  3814. ggml_vec_set_i32(nc, (int32_t *)(data + i*n1), value);
  3815. }
  3816. } break;
  3817. case GGML_TYPE_F16:
  3818. {
  3819. assert(tensor->nb[0] == sizeof(ggml_fp16_t));
  3820. for (int i = 0; i < n; i++) {
  3821. ggml_vec_set_f16(nc, (ggml_fp16_t *)(data + i*n1), value);
  3822. }
  3823. } break;
  3824. case GGML_TYPE_F32:
  3825. {
  3826. assert(tensor->nb[0] == sizeof(float));
  3827. for (int i = 0; i < n; i++) {
  3828. ggml_vec_set_f32(nc, (float *)(data + i*n1), value);
  3829. }
  3830. } break;
  3831. default:
  3832. {
  3833. GGML_ASSERT(false);
  3834. } break;
  3835. }
  3836. return tensor;
  3837. }
  3838. int32_t ggml_get_i32_1d(const struct ggml_tensor * tensor, int i) {
  3839. switch (tensor->type) {
  3840. case GGML_TYPE_I8:
  3841. {
  3842. GGML_ASSERT(tensor->nb[0] == sizeof(int8_t));
  3843. return ((int8_t *)(tensor->data))[i];
  3844. } break;
  3845. case GGML_TYPE_I16:
  3846. {
  3847. GGML_ASSERT(tensor->nb[0] == sizeof(int16_t));
  3848. return ((int16_t *)(tensor->data))[i];
  3849. } break;
  3850. case GGML_TYPE_I32:
  3851. {
  3852. GGML_ASSERT(tensor->nb[0] == sizeof(int32_t));
  3853. return ((int32_t *)(tensor->data))[i];
  3854. } break;
  3855. case GGML_TYPE_F16:
  3856. {
  3857. GGML_ASSERT(tensor->nb[0] == sizeof(ggml_fp16_t));
  3858. return GGML_FP16_TO_FP32(((ggml_fp16_t *)(tensor->data))[i]);
  3859. } break;
  3860. case GGML_TYPE_F32:
  3861. {
  3862. GGML_ASSERT(tensor->nb[0] == sizeof(float));
  3863. return ((float *)(tensor->data))[i];
  3864. } break;
  3865. default:
  3866. {
  3867. GGML_ASSERT(false);
  3868. } break;
  3869. }
  3870. return 0.0f;
  3871. }
  3872. void ggml_set_i32_1d(const struct ggml_tensor * tensor, int i, int32_t value) {
  3873. switch (tensor->type) {
  3874. case GGML_TYPE_I8:
  3875. {
  3876. GGML_ASSERT(tensor->nb[0] == sizeof(int8_t));
  3877. ((int8_t *)(tensor->data))[i] = value;
  3878. } break;
  3879. case GGML_TYPE_I16:
  3880. {
  3881. GGML_ASSERT(tensor->nb[0] == sizeof(int16_t));
  3882. ((int16_t *)(tensor->data))[i] = value;
  3883. } break;
  3884. case GGML_TYPE_I32:
  3885. {
  3886. GGML_ASSERT(tensor->nb[0] == sizeof(int32_t));
  3887. ((int32_t *)(tensor->data))[i] = value;
  3888. } break;
  3889. case GGML_TYPE_F16:
  3890. {
  3891. GGML_ASSERT(tensor->nb[0] == sizeof(ggml_fp16_t));
  3892. ((ggml_fp16_t *)(tensor->data))[i] = GGML_FP32_TO_FP16(value);
  3893. } break;
  3894. case GGML_TYPE_F32:
  3895. {
  3896. GGML_ASSERT(tensor->nb[0] == sizeof(float));
  3897. ((float *)(tensor->data))[i] = value;
  3898. } break;
  3899. default:
  3900. {
  3901. GGML_ASSERT(false);
  3902. } break;
  3903. }
  3904. }
  3905. float ggml_get_f32_1d(const struct ggml_tensor * tensor, int i) {
  3906. switch (tensor->type) {
  3907. case GGML_TYPE_I8:
  3908. {
  3909. GGML_ASSERT(tensor->nb[0] == sizeof(int8_t));
  3910. return ((int8_t *)(tensor->data))[i];
  3911. } break;
  3912. case GGML_TYPE_I16:
  3913. {
  3914. GGML_ASSERT(tensor->nb[0] == sizeof(int16_t));
  3915. return ((int16_t *)(tensor->data))[i];
  3916. } break;
  3917. case GGML_TYPE_I32:
  3918. {
  3919. GGML_ASSERT(tensor->nb[0] == sizeof(int32_t));
  3920. return ((int32_t *)(tensor->data))[i];
  3921. } break;
  3922. case GGML_TYPE_F16:
  3923. {
  3924. GGML_ASSERT(tensor->nb[0] == sizeof(ggml_fp16_t));
  3925. return GGML_FP16_TO_FP32(((ggml_fp16_t *)(tensor->data))[i]);
  3926. } break;
  3927. case GGML_TYPE_F32:
  3928. {
  3929. GGML_ASSERT(tensor->nb[0] == sizeof(float));
  3930. return ((float *)(tensor->data))[i];
  3931. } break;
  3932. default:
  3933. {
  3934. GGML_ASSERT(false);
  3935. } break;
  3936. }
  3937. return 0.0f;
  3938. }
  3939. void ggml_set_f32_1d(const struct ggml_tensor * tensor, int i, float value) {
  3940. switch (tensor->type) {
  3941. case GGML_TYPE_I8:
  3942. {
  3943. GGML_ASSERT(tensor->nb[0] == sizeof(int8_t));
  3944. ((int8_t *)(tensor->data))[i] = value;
  3945. } break;
  3946. case GGML_TYPE_I16:
  3947. {
  3948. GGML_ASSERT(tensor->nb[0] == sizeof(int16_t));
  3949. ((int16_t *)(tensor->data))[i] = value;
  3950. } break;
  3951. case GGML_TYPE_I32:
  3952. {
  3953. GGML_ASSERT(tensor->nb[0] == sizeof(int32_t));
  3954. ((int32_t *)(tensor->data))[i] = value;
  3955. } break;
  3956. case GGML_TYPE_F16:
  3957. {
  3958. GGML_ASSERT(tensor->nb[0] == sizeof(ggml_fp16_t));
  3959. ((ggml_fp16_t *)(tensor->data))[i] = GGML_FP32_TO_FP16(value);
  3960. } break;
  3961. case GGML_TYPE_F32:
  3962. {
  3963. GGML_ASSERT(tensor->nb[0] == sizeof(float));
  3964. ((float *)(tensor->data))[i] = value;
  3965. } break;
  3966. default:
  3967. {
  3968. GGML_ASSERT(false);
  3969. } break;
  3970. }
  3971. }
  3972. void * ggml_get_data(const struct ggml_tensor * tensor) {
  3973. return tensor->data;
  3974. }
  3975. float * ggml_get_data_f32(const struct ggml_tensor * tensor) {
  3976. assert(tensor->type == GGML_TYPE_F32);
  3977. return (float *)(tensor->data);
  3978. }
  3979. const char * ggml_get_name(const struct ggml_tensor * tensor) {
  3980. return tensor->name;
  3981. }
  3982. struct ggml_tensor * ggml_set_name(struct ggml_tensor * tensor, const char * name) {
  3983. strncpy(tensor->name, name, sizeof(tensor->name));
  3984. tensor->name[sizeof(tensor->name) - 1] = '\0';
  3985. return tensor;
  3986. }
  3987. struct ggml_tensor * ggml_format_name(struct ggml_tensor * tensor, const char * fmt, ...) {
  3988. va_list args;
  3989. va_start(args, fmt);
  3990. vsnprintf(tensor->name, sizeof(tensor->name), fmt, args);
  3991. va_end(args);
  3992. return tensor;
  3993. }
  3994. struct ggml_tensor * ggml_view_tensor(
  3995. struct ggml_context * ctx,
  3996. const struct ggml_tensor * src) {
  3997. struct ggml_tensor * result = ggml_new_tensor_impl(ctx, src->type, src->n_dims, src->ne, src->data);
  3998. ggml_format_name(result, "%s (view)", src->name);
  3999. result->nb[0] = src->nb[0];
  4000. result->nb[1] = src->nb[1];
  4001. result->nb[2] = src->nb[2];
  4002. result->nb[3] = src->nb[3];
  4003. return result;
  4004. }
  4005. struct ggml_tensor * ggml_get_tensor(struct ggml_context * ctx, const char * name) {
  4006. struct ggml_object * obj = ctx->objects_begin;
  4007. char * const mem_buffer = ctx->mem_buffer;
  4008. while (obj != NULL) {
  4009. struct ggml_tensor * cur = (struct ggml_tensor *)(mem_buffer + obj->offs);
  4010. if (strcmp(cur->name, name) == 0) {
  4011. return cur;
  4012. }
  4013. obj = obj->next;
  4014. }
  4015. return NULL;
  4016. }
  4017. ////////////////////////////////////////////////////////////////////////////////
  4018. // ggml_dup
  4019. struct ggml_tensor * ggml_dup_impl(
  4020. struct ggml_context * ctx,
  4021. struct ggml_tensor * a,
  4022. bool inplace) {
  4023. bool is_node = false;
  4024. if (!inplace && (a->grad)) {
  4025. is_node = true;
  4026. }
  4027. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  4028. result->op = GGML_OP_DUP;
  4029. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4030. result->src0 = a;
  4031. result->src1 = NULL;
  4032. return result;
  4033. }
  4034. struct ggml_tensor * ggml_dup(
  4035. struct ggml_context * ctx,
  4036. struct ggml_tensor * a) {
  4037. return ggml_dup_impl(ctx, a, false);
  4038. }
  4039. struct ggml_tensor * ggml_dup_inplace(
  4040. struct ggml_context * ctx,
  4041. struct ggml_tensor * a) {
  4042. return ggml_dup_impl(ctx, a, true);
  4043. }
  4044. // ggml_add
  4045. struct ggml_tensor * ggml_add_impl(
  4046. struct ggml_context * ctx,
  4047. struct ggml_tensor * a,
  4048. struct ggml_tensor * b,
  4049. bool inplace) {
  4050. GGML_ASSERT(ggml_are_same_shape(a, b));
  4051. bool is_node = false;
  4052. if (a->grad || b->grad) {
  4053. is_node = true;
  4054. }
  4055. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  4056. result->op = GGML_OP_ADD;
  4057. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4058. result->src0 = a;
  4059. result->src1 = b;
  4060. return result;
  4061. }
  4062. struct ggml_tensor * ggml_add(
  4063. struct ggml_context * ctx,
  4064. struct ggml_tensor * a,
  4065. struct ggml_tensor * b) {
  4066. return ggml_add_impl(ctx, a, b, false);
  4067. }
  4068. struct ggml_tensor * ggml_add_inplace(
  4069. struct ggml_context * ctx,
  4070. struct ggml_tensor * a,
  4071. struct ggml_tensor * b) {
  4072. return ggml_add_impl(ctx, a, b, true);
  4073. }
  4074. // ggml_add1
  4075. struct ggml_tensor * ggml_add1_impl(
  4076. struct ggml_context * ctx,
  4077. struct ggml_tensor * a,
  4078. struct ggml_tensor * b,
  4079. bool inplace) {
  4080. GGML_ASSERT(ggml_is_scalar(b));
  4081. GGML_ASSERT(ggml_is_padded_1d(a));
  4082. bool is_node = false;
  4083. if (a->grad || b->grad) {
  4084. is_node = true;
  4085. }
  4086. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  4087. result->op = GGML_OP_ADD1;
  4088. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4089. result->src0 = a;
  4090. result->src1 = b;
  4091. return result;
  4092. }
  4093. struct ggml_tensor * ggml_add1(
  4094. struct ggml_context * ctx,
  4095. struct ggml_tensor * a,
  4096. struct ggml_tensor * b) {
  4097. return ggml_add1_impl(ctx, a, b, false);
  4098. }
  4099. struct ggml_tensor * ggml_add1_inplace(
  4100. struct ggml_context * ctx,
  4101. struct ggml_tensor * a,
  4102. struct ggml_tensor * b) {
  4103. return ggml_add1_impl(ctx, a, b, true);
  4104. }
  4105. // ggml_acc
  4106. struct ggml_tensor * ggml_acc_impl(
  4107. struct ggml_context * ctx,
  4108. struct ggml_tensor * a,
  4109. struct ggml_tensor * b,
  4110. size_t nb1,
  4111. size_t nb2,
  4112. size_t nb3,
  4113. size_t offset,
  4114. bool inplace) {
  4115. GGML_ASSERT(ggml_nelements(b) <= ggml_nelements(a));
  4116. GGML_ASSERT(ggml_is_contiguous(a));
  4117. GGML_ASSERT(a->type == GGML_TYPE_F32);
  4118. GGML_ASSERT(b->type == GGML_TYPE_F32);
  4119. bool is_node = false;
  4120. if (!inplace && (a->grad || b->grad)) {
  4121. is_node = true;
  4122. }
  4123. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  4124. ggml_scratch_save(ctx);
  4125. struct ggml_tensor * c = ggml_new_tensor_1d(ctx, GGML_TYPE_I32, 5);
  4126. ((int32_t *) c->data)[0] = nb1;
  4127. ((int32_t *) c->data)[1] = nb2;
  4128. ((int32_t *) c->data)[2] = nb3;
  4129. ((int32_t *) c->data)[3] = offset;
  4130. ((int32_t *) c->data)[4] = inplace ? 1 : 0;
  4131. ggml_scratch_load(ctx);
  4132. result->op = GGML_OP_ACC;
  4133. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4134. result->src0 = a;
  4135. result->src1 = b;
  4136. result->opt[0] = c;
  4137. return result;
  4138. }
  4139. struct ggml_tensor * ggml_acc(
  4140. struct ggml_context * ctx,
  4141. struct ggml_tensor * a,
  4142. struct ggml_tensor * b,
  4143. size_t nb1,
  4144. size_t nb2,
  4145. size_t nb3,
  4146. size_t offset) {
  4147. return ggml_acc_impl(ctx, a, b, nb1, nb2, nb3, offset, false);
  4148. }
  4149. struct ggml_tensor * ggml_acc_inplace(
  4150. struct ggml_context * ctx,
  4151. struct ggml_tensor * a,
  4152. struct ggml_tensor * b,
  4153. size_t nb1,
  4154. size_t nb2,
  4155. size_t nb3,
  4156. size_t offset) {
  4157. return ggml_acc_impl(ctx, a, b, nb1, nb2, nb3, offset, true);
  4158. }
  4159. // ggml_sub
  4160. struct ggml_tensor * ggml_sub_impl(
  4161. struct ggml_context * ctx,
  4162. struct ggml_tensor * a,
  4163. struct ggml_tensor * b,
  4164. bool inplace) {
  4165. GGML_ASSERT(ggml_are_same_shape(a, b));
  4166. bool is_node = false;
  4167. if (!inplace && (a->grad || b->grad)) {
  4168. is_node = true;
  4169. }
  4170. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  4171. result->op = GGML_OP_SUB;
  4172. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4173. result->src0 = a;
  4174. result->src1 = b;
  4175. return result;
  4176. }
  4177. struct ggml_tensor * ggml_sub(
  4178. struct ggml_context * ctx,
  4179. struct ggml_tensor * a,
  4180. struct ggml_tensor * b) {
  4181. return ggml_sub_impl(ctx, a, b, false);
  4182. }
  4183. struct ggml_tensor * ggml_sub_inplace(
  4184. struct ggml_context * ctx,
  4185. struct ggml_tensor * a,
  4186. struct ggml_tensor * b) {
  4187. return ggml_sub_impl(ctx, a, b, true);
  4188. }
  4189. // ggml_mul
  4190. struct ggml_tensor * ggml_mul_impl(
  4191. struct ggml_context * ctx,
  4192. struct ggml_tensor * a,
  4193. struct ggml_tensor * b,
  4194. bool inplace) {
  4195. // TODO: support less-strict constraint
  4196. // GGML_ASSERT(ggml_can_repeat(b, a));
  4197. GGML_ASSERT(ggml_can_repeat_rows(b, a));
  4198. bool is_node = false;
  4199. if (!inplace && (a->grad || b->grad)) {
  4200. // TODO: support backward pass for broadcasting
  4201. GGML_ASSERT(ggml_are_same_shape(a, b));
  4202. is_node = true;
  4203. }
  4204. if (inplace) {
  4205. GGML_ASSERT(is_node == false);
  4206. }
  4207. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  4208. result->op = GGML_OP_MUL;
  4209. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4210. result->src0 = a;
  4211. result->src1 = b;
  4212. return result;
  4213. }
  4214. struct ggml_tensor * ggml_mul(
  4215. struct ggml_context * ctx,
  4216. struct ggml_tensor * a,
  4217. struct ggml_tensor * b) {
  4218. return ggml_mul_impl(ctx, a, b, false);
  4219. }
  4220. struct ggml_tensor * ggml_mul_inplace(
  4221. struct ggml_context * ctx,
  4222. struct ggml_tensor * a,
  4223. struct ggml_tensor * b) {
  4224. return ggml_mul_impl(ctx, a, b, true);
  4225. }
  4226. // ggml_div
  4227. struct ggml_tensor * ggml_div_impl(
  4228. struct ggml_context * ctx,
  4229. struct ggml_tensor * a,
  4230. struct ggml_tensor * b,
  4231. bool inplace) {
  4232. GGML_ASSERT(ggml_are_same_shape(a, b));
  4233. bool is_node = false;
  4234. if (!inplace && (a->grad || b->grad)) {
  4235. is_node = true;
  4236. }
  4237. if (inplace) {
  4238. GGML_ASSERT(is_node == false);
  4239. }
  4240. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  4241. result->op = GGML_OP_DIV;
  4242. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4243. result->src0 = a;
  4244. result->src1 = b;
  4245. return result;
  4246. }
  4247. struct ggml_tensor * ggml_div(
  4248. struct ggml_context * ctx,
  4249. struct ggml_tensor * a,
  4250. struct ggml_tensor * b) {
  4251. return ggml_div_impl(ctx, a, b, false);
  4252. }
  4253. struct ggml_tensor * ggml_div_inplace(
  4254. struct ggml_context * ctx,
  4255. struct ggml_tensor * a,
  4256. struct ggml_tensor * b) {
  4257. return ggml_div_impl(ctx, a, b, true);
  4258. }
  4259. // ggml_sqr
  4260. struct ggml_tensor * ggml_sqr_impl(
  4261. struct ggml_context * ctx,
  4262. struct ggml_tensor * a,
  4263. bool inplace) {
  4264. bool is_node = false;
  4265. if (!inplace && (a->grad)) {
  4266. is_node = true;
  4267. }
  4268. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  4269. result->op = GGML_OP_SQR;
  4270. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4271. result->src0 = a;
  4272. result->src1 = NULL;
  4273. return result;
  4274. }
  4275. struct ggml_tensor * ggml_sqr(
  4276. struct ggml_context * ctx,
  4277. struct ggml_tensor * a) {
  4278. return ggml_sqr_impl(ctx, a, false);
  4279. }
  4280. struct ggml_tensor * ggml_sqr_inplace(
  4281. struct ggml_context * ctx,
  4282. struct ggml_tensor * a) {
  4283. return ggml_sqr_impl(ctx, a, true);
  4284. }
  4285. // ggml_sqrt
  4286. struct ggml_tensor * ggml_sqrt_impl(
  4287. struct ggml_context * ctx,
  4288. struct ggml_tensor * a,
  4289. bool inplace) {
  4290. bool is_node = false;
  4291. if (!inplace && (a->grad)) {
  4292. is_node = true;
  4293. }
  4294. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  4295. result->op = GGML_OP_SQRT;
  4296. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4297. result->src0 = a;
  4298. result->src1 = NULL;
  4299. return result;
  4300. }
  4301. struct ggml_tensor * ggml_sqrt(
  4302. struct ggml_context * ctx,
  4303. struct ggml_tensor * a) {
  4304. return ggml_sqrt_impl(ctx, a, false);
  4305. }
  4306. struct ggml_tensor * ggml_sqrt_inplace(
  4307. struct ggml_context * ctx,
  4308. struct ggml_tensor * a) {
  4309. return ggml_sqrt_impl(ctx, a, true);
  4310. }
  4311. // ggml_log
  4312. struct ggml_tensor * ggml_log_impl(
  4313. struct ggml_context * ctx,
  4314. struct ggml_tensor * a,
  4315. bool inplace) {
  4316. bool is_node = false;
  4317. if (!inplace && (a->grad)) {
  4318. is_node = true;
  4319. }
  4320. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  4321. result->op = GGML_OP_LOG;
  4322. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4323. result->src0 = a;
  4324. result->src1 = NULL;
  4325. return result;
  4326. }
  4327. struct ggml_tensor * ggml_log(
  4328. struct ggml_context * ctx,
  4329. struct ggml_tensor * a) {
  4330. return ggml_log_impl(ctx, a, false);
  4331. }
  4332. struct ggml_tensor * ggml_log_inplace(
  4333. struct ggml_context * ctx,
  4334. struct ggml_tensor * a) {
  4335. return ggml_log_impl(ctx, a, true);
  4336. }
  4337. // ggml_sum
  4338. struct ggml_tensor * ggml_sum(
  4339. struct ggml_context * ctx,
  4340. struct ggml_tensor * a) {
  4341. bool is_node = false;
  4342. if (a->grad) {
  4343. is_node = true;
  4344. }
  4345. struct ggml_tensor * result = ggml_new_tensor_1d(ctx, a->type, 1);
  4346. result->op = GGML_OP_SUM;
  4347. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4348. result->src0 = a;
  4349. result->src1 = NULL;
  4350. return result;
  4351. }
  4352. // ggml_sum_rows
  4353. struct ggml_tensor * ggml_sum_rows(
  4354. struct ggml_context * ctx,
  4355. struct ggml_tensor * a) {
  4356. bool is_node = false;
  4357. if (a->grad) {
  4358. is_node = true;
  4359. }
  4360. int64_t ne[4] = {1,1,1,1};
  4361. for (int i=1; i<a->n_dims; ++i) {
  4362. ne[i] = a->ne[i];
  4363. }
  4364. struct ggml_tensor * result = ggml_new_tensor(ctx, a->type, a->n_dims, ne);
  4365. result->op = GGML_OP_SUM_ROWS;
  4366. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4367. result->src0 = a;
  4368. result->src1 = NULL;
  4369. return result;
  4370. }
  4371. // ggml_mean
  4372. struct ggml_tensor * ggml_mean(
  4373. struct ggml_context * ctx,
  4374. struct ggml_tensor * a) {
  4375. bool is_node = false;
  4376. if (a->grad) {
  4377. GGML_ASSERT(false); // TODO: implement
  4378. is_node = true;
  4379. }
  4380. int64_t ne[GGML_MAX_DIMS] = { 1, a->ne[1], a->ne[2], a->ne[3] };
  4381. struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, a->n_dims, ne);
  4382. result->op = GGML_OP_MEAN;
  4383. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4384. result->src0 = a;
  4385. result->src1 = NULL;
  4386. return result;
  4387. }
  4388. // ggml_repeat
  4389. struct ggml_tensor * ggml_repeat(
  4390. struct ggml_context * ctx,
  4391. struct ggml_tensor * a,
  4392. struct ggml_tensor * b) {
  4393. GGML_ASSERT(ggml_can_repeat(a, b));
  4394. bool is_node = false;
  4395. if (a->grad) {
  4396. is_node = true;
  4397. }
  4398. if (ggml_are_same_shape(a, b) && !is_node) {
  4399. return a;
  4400. }
  4401. struct ggml_tensor * result = ggml_new_tensor(ctx, a->type, b->n_dims, b->ne);
  4402. result->op = GGML_OP_REPEAT;
  4403. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4404. result->src0 = a;
  4405. result->src1 = b;
  4406. return result;
  4407. }
  4408. // ggml_repeat_back
  4409. struct ggml_tensor * ggml_repeat_back(
  4410. struct ggml_context * ctx,
  4411. struct ggml_tensor * a,
  4412. struct ggml_tensor * b) {
  4413. GGML_ASSERT(ggml_can_repeat(b, a));
  4414. bool is_node = false;
  4415. if (a->grad) {
  4416. is_node = true;
  4417. }
  4418. if (ggml_are_same_shape(a, b) && !is_node) {
  4419. return a;
  4420. }
  4421. struct ggml_tensor * result = ggml_new_tensor(ctx, a->type, b->n_dims, b->ne);
  4422. result->op = GGML_OP_REPEAT_BACK;
  4423. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4424. result->src0 = a;
  4425. result->src1 = b;
  4426. return result;
  4427. }
  4428. // ggml_abs
  4429. struct ggml_tensor * ggml_abs_impl(
  4430. struct ggml_context * ctx,
  4431. struct ggml_tensor * a,
  4432. bool inplace) {
  4433. bool is_node = false;
  4434. if (!inplace && (a->grad)) {
  4435. is_node = true;
  4436. }
  4437. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  4438. result->op = GGML_OP_ABS;
  4439. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4440. result->src0 = a;
  4441. result->src1 = NULL;
  4442. return result;
  4443. }
  4444. struct ggml_tensor * ggml_abs(
  4445. struct ggml_context * ctx,
  4446. struct ggml_tensor * a) {
  4447. return ggml_abs_impl(ctx, a, false);
  4448. }
  4449. struct ggml_tensor * ggml_abs_inplace(
  4450. struct ggml_context * ctx,
  4451. struct ggml_tensor * a) {
  4452. return ggml_abs_impl(ctx, a, true);
  4453. }
  4454. // ggml_sgn
  4455. struct ggml_tensor * ggml_sgn_impl(
  4456. struct ggml_context * ctx,
  4457. struct ggml_tensor * a,
  4458. bool inplace) {
  4459. bool is_node = false;
  4460. if (!inplace && (a->grad)) {
  4461. is_node = true;
  4462. }
  4463. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  4464. result->op = GGML_OP_SGN;
  4465. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4466. result->src0 = a;
  4467. result->src1 = NULL;
  4468. return result;
  4469. }
  4470. struct ggml_tensor * ggml_sgn(
  4471. struct ggml_context * ctx,
  4472. struct ggml_tensor * a) {
  4473. return ggml_sgn_impl(ctx, a, false);
  4474. }
  4475. struct ggml_tensor * ggml_sgn_inplace(
  4476. struct ggml_context * ctx,
  4477. struct ggml_tensor * a) {
  4478. return ggml_sgn_impl(ctx, a, true);
  4479. }
  4480. // ggml_neg
  4481. struct ggml_tensor * ggml_neg_impl(
  4482. struct ggml_context * ctx,
  4483. struct ggml_tensor * a,
  4484. bool inplace) {
  4485. bool is_node = false;
  4486. if (!inplace && (a->grad)) {
  4487. is_node = true;
  4488. }
  4489. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  4490. result->op = GGML_OP_NEG;
  4491. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4492. result->src0 = a;
  4493. result->src1 = NULL;
  4494. return result;
  4495. }
  4496. struct ggml_tensor * ggml_neg(
  4497. struct ggml_context * ctx,
  4498. struct ggml_tensor * a) {
  4499. return ggml_neg_impl(ctx, a, false);
  4500. }
  4501. struct ggml_tensor * ggml_neg_inplace(
  4502. struct ggml_context * ctx,
  4503. struct ggml_tensor * a) {
  4504. return ggml_neg_impl(ctx, a, true);
  4505. }
  4506. // ggml_step
  4507. struct ggml_tensor * ggml_step_impl(
  4508. struct ggml_context * ctx,
  4509. struct ggml_tensor * a,
  4510. bool inplace) {
  4511. bool is_node = false;
  4512. if (!inplace && (a->grad)) {
  4513. is_node = true;
  4514. }
  4515. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  4516. result->op = GGML_OP_STEP;
  4517. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4518. result->src0 = a;
  4519. result->src1 = NULL;
  4520. return result;
  4521. }
  4522. struct ggml_tensor * ggml_step(
  4523. struct ggml_context * ctx,
  4524. struct ggml_tensor * a) {
  4525. return ggml_step_impl(ctx, a, false);
  4526. }
  4527. struct ggml_tensor * ggml_step_inplace(
  4528. struct ggml_context * ctx,
  4529. struct ggml_tensor * a) {
  4530. return ggml_step_impl(ctx, a, true);
  4531. }
  4532. // ggml_relu
  4533. struct ggml_tensor * ggml_relu_impl(
  4534. struct ggml_context * ctx,
  4535. struct ggml_tensor * a,
  4536. bool inplace) {
  4537. bool is_node = false;
  4538. if (!inplace && (a->grad)) {
  4539. is_node = true;
  4540. }
  4541. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  4542. result->op = GGML_OP_RELU;
  4543. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4544. result->src0 = a;
  4545. result->src1 = NULL;
  4546. return result;
  4547. }
  4548. struct ggml_tensor * ggml_relu(
  4549. struct ggml_context * ctx,
  4550. struct ggml_tensor * a) {
  4551. return ggml_relu_impl(ctx, a, false);
  4552. }
  4553. struct ggml_tensor * ggml_relu_inplace(
  4554. struct ggml_context * ctx,
  4555. struct ggml_tensor * a) {
  4556. return ggml_relu_impl(ctx, a, true);
  4557. }
  4558. // ggml_gelu
  4559. struct ggml_tensor * ggml_gelu_impl(
  4560. struct ggml_context * ctx,
  4561. struct ggml_tensor * a,
  4562. bool inplace) {
  4563. bool is_node = false;
  4564. if (!inplace && (a->grad)) {
  4565. is_node = true;
  4566. }
  4567. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  4568. result->op = GGML_OP_GELU;
  4569. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4570. result->src0 = a;
  4571. result->src1 = NULL;
  4572. return result;
  4573. }
  4574. struct ggml_tensor * ggml_gelu(
  4575. struct ggml_context * ctx,
  4576. struct ggml_tensor * a) {
  4577. return ggml_gelu_impl(ctx, a, false);
  4578. }
  4579. struct ggml_tensor * ggml_gelu_inplace(
  4580. struct ggml_context * ctx,
  4581. struct ggml_tensor * a) {
  4582. return ggml_gelu_impl(ctx, a, true);
  4583. }
  4584. // ggml_gelu_quick
  4585. struct ggml_tensor * ggml_gelu_quick_impl(
  4586. struct ggml_context * ctx,
  4587. struct ggml_tensor * a,
  4588. bool inplace) {
  4589. bool is_node = false;
  4590. if (!inplace && (a->grad)) {
  4591. is_node = true;
  4592. }
  4593. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  4594. result->op = GGML_OP_GELU_QUICK;
  4595. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4596. result->src0 = a;
  4597. result->src1 = NULL;
  4598. return result;
  4599. }
  4600. struct ggml_tensor * ggml_gelu_quick(
  4601. struct ggml_context * ctx,
  4602. struct ggml_tensor * a) {
  4603. return ggml_gelu_quick_impl(ctx, a, false);
  4604. }
  4605. struct ggml_tensor * ggml_gelu_quick_inplace(
  4606. struct ggml_context * ctx,
  4607. struct ggml_tensor * a) {
  4608. return ggml_gelu_quick_impl(ctx, a, true);
  4609. }
  4610. // ggml_silu
  4611. struct ggml_tensor * ggml_silu_impl(
  4612. struct ggml_context * ctx,
  4613. struct ggml_tensor * a,
  4614. bool inplace) {
  4615. bool is_node = false;
  4616. if (!inplace && (a->grad)) {
  4617. is_node = true;
  4618. }
  4619. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  4620. result->op = GGML_OP_SILU;
  4621. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4622. result->src0 = a;
  4623. result->src1 = NULL;
  4624. return result;
  4625. }
  4626. struct ggml_tensor * ggml_silu(
  4627. struct ggml_context * ctx,
  4628. struct ggml_tensor * a) {
  4629. return ggml_silu_impl(ctx, a, false);
  4630. }
  4631. struct ggml_tensor * ggml_silu_inplace(
  4632. struct ggml_context * ctx,
  4633. struct ggml_tensor * a) {
  4634. return ggml_silu_impl(ctx, a, true);
  4635. }
  4636. // ggml_silu_back
  4637. struct ggml_tensor * ggml_silu_back(
  4638. struct ggml_context * ctx,
  4639. struct ggml_tensor * a,
  4640. struct ggml_tensor * b) {
  4641. bool is_node = false;
  4642. if (a->grad || b->grad) {
  4643. // TODO: implement backward
  4644. is_node = true;
  4645. }
  4646. struct ggml_tensor * result = ggml_dup_tensor(ctx, a);
  4647. result->op = GGML_OP_SILU_BACK;
  4648. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4649. result->src0 = a;
  4650. result->src1 = b;
  4651. return result;
  4652. }
  4653. // ggml_norm
  4654. struct ggml_tensor * ggml_norm_impl(
  4655. struct ggml_context * ctx,
  4656. struct ggml_tensor * a,
  4657. bool inplace) {
  4658. bool is_node = false;
  4659. if (!inplace && (a->grad)) {
  4660. GGML_ASSERT(false); // TODO: implement backward
  4661. is_node = true;
  4662. }
  4663. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  4664. result->op = GGML_OP_NORM;
  4665. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4666. result->src0 = a;
  4667. result->src1 = NULL; // TODO: maybe store epsilon here?
  4668. return result;
  4669. }
  4670. struct ggml_tensor * ggml_norm(
  4671. struct ggml_context * ctx,
  4672. struct ggml_tensor * a) {
  4673. return ggml_norm_impl(ctx, a, false);
  4674. }
  4675. struct ggml_tensor * ggml_norm_inplace(
  4676. struct ggml_context * ctx,
  4677. struct ggml_tensor * a) {
  4678. return ggml_norm_impl(ctx, a, true);
  4679. }
  4680. struct ggml_tensor * ggml_rms_norm_impl(
  4681. struct ggml_context * ctx,
  4682. struct ggml_tensor * a,
  4683. bool inplace) {
  4684. bool is_node = false;
  4685. if (!inplace && (a->grad)) {
  4686. is_node = true;
  4687. }
  4688. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  4689. result->op = GGML_OP_RMS_NORM;
  4690. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4691. result->src0 = a;
  4692. result->src1 = NULL; // TODO: maybe store epsilon here?
  4693. return result;
  4694. }
  4695. struct ggml_tensor * ggml_rms_norm(
  4696. struct ggml_context * ctx,
  4697. struct ggml_tensor * a) {
  4698. return ggml_rms_norm_impl(ctx, a, false);
  4699. }
  4700. struct ggml_tensor * ggml_rms_norm_inplace(
  4701. struct ggml_context * ctx,
  4702. struct ggml_tensor * a) {
  4703. return ggml_rms_norm_impl(ctx, a, true);
  4704. }
  4705. struct ggml_tensor * ggml_rms_norm_back(
  4706. struct ggml_context * ctx,
  4707. struct ggml_tensor * a,
  4708. struct ggml_tensor * b) {
  4709. bool is_node = false;
  4710. if (a->grad) {
  4711. // TODO: implement backward
  4712. is_node = true;
  4713. }
  4714. struct ggml_tensor * result = ggml_dup_tensor(ctx, a);
  4715. result->op = GGML_OP_RMS_NORM_BACK;
  4716. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4717. result->src0 = a;
  4718. result->src1 = b;
  4719. return result;
  4720. }
  4721. // ggml_mul_mat
  4722. struct ggml_tensor * ggml_mul_mat(
  4723. struct ggml_context * ctx,
  4724. struct ggml_tensor * a,
  4725. struct ggml_tensor * b) {
  4726. GGML_ASSERT(ggml_can_mul_mat(a, b));
  4727. GGML_ASSERT(!ggml_is_transposed(a));
  4728. bool is_node = false;
  4729. if (a->grad || b->grad) {
  4730. is_node = true;
  4731. }
  4732. const int64_t ne[4] = { a->ne[1], b->ne[1], a->ne[2], b->ne[3] };
  4733. struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, MIN(a->n_dims, b->n_dims), ne);
  4734. result->op = GGML_OP_MUL_MAT;
  4735. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4736. result->src0 = a;
  4737. result->src1 = b;
  4738. return result;
  4739. }
  4740. // ggml_out_prod
  4741. struct ggml_tensor * ggml_out_prod(
  4742. struct ggml_context * ctx,
  4743. struct ggml_tensor * a,
  4744. struct ggml_tensor * b) {
  4745. GGML_ASSERT(ggml_can_out_prod(a, b));
  4746. GGML_ASSERT(!ggml_is_transposed(a));
  4747. bool is_node = false;
  4748. if (a->grad || b->grad) {
  4749. is_node = true;
  4750. }
  4751. const int64_t ne[4] = { a->ne[0], b->ne[0], a->ne[2], b->ne[3] };
  4752. struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, MIN(a->n_dims, b->n_dims), ne);
  4753. result->op = GGML_OP_OUT_PROD;
  4754. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4755. result->src0 = a;
  4756. result->src1 = b;
  4757. return result;
  4758. }
  4759. // ggml_scale
  4760. struct ggml_tensor * ggml_scale_impl(
  4761. struct ggml_context * ctx,
  4762. struct ggml_tensor * a,
  4763. struct ggml_tensor * b,
  4764. bool inplace) {
  4765. GGML_ASSERT(ggml_is_scalar(b));
  4766. GGML_ASSERT(ggml_is_padded_1d(a));
  4767. bool is_node = false;
  4768. if (a->grad || b->grad) {
  4769. is_node = true;
  4770. }
  4771. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  4772. result->op = GGML_OP_SCALE;
  4773. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4774. result->src0 = a;
  4775. result->src1 = b;
  4776. return result;
  4777. }
  4778. struct ggml_tensor * ggml_scale(
  4779. struct ggml_context * ctx,
  4780. struct ggml_tensor * a,
  4781. struct ggml_tensor * b) {
  4782. return ggml_scale_impl(ctx, a, b, false);
  4783. }
  4784. struct ggml_tensor * ggml_scale_inplace(
  4785. struct ggml_context * ctx,
  4786. struct ggml_tensor * a,
  4787. struct ggml_tensor * b) {
  4788. return ggml_scale_impl(ctx, a, b, true);
  4789. }
  4790. // ggml_set
  4791. struct ggml_tensor * ggml_set_impl(
  4792. struct ggml_context * ctx,
  4793. struct ggml_tensor * a,
  4794. struct ggml_tensor * b,
  4795. size_t nb1,
  4796. size_t nb2,
  4797. size_t nb3,
  4798. size_t offset,
  4799. bool inplace) {
  4800. GGML_ASSERT(ggml_nelements(a) >= ggml_nelements(b));
  4801. bool is_node = false;
  4802. if (a->grad || b->grad) {
  4803. is_node = true;
  4804. }
  4805. // make a view of the destination
  4806. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  4807. ggml_scratch_save(ctx);
  4808. struct ggml_tensor * c = ggml_new_tensor_1d(ctx, GGML_TYPE_I32, 5);
  4809. (( int32_t * ) c->data)[0] = nb1;
  4810. (( int32_t * ) c->data)[1] = nb2;
  4811. (( int32_t * ) c->data)[2] = nb3;
  4812. (( int32_t * ) c->data)[3] = offset;
  4813. (( int32_t * ) c->data)[4] = inplace ? 1 : 0;
  4814. ggml_scratch_load(ctx);
  4815. result->op = GGML_OP_SET;
  4816. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4817. result->src0 = a;
  4818. result->src1 = b;
  4819. result->opt[0] = c;
  4820. return result;
  4821. }
  4822. struct ggml_tensor * ggml_set(
  4823. struct ggml_context * ctx,
  4824. struct ggml_tensor * a,
  4825. struct ggml_tensor * b,
  4826. size_t nb1,
  4827. size_t nb2,
  4828. size_t nb3,
  4829. size_t offset) {
  4830. return ggml_set_impl(ctx, a, b, nb1, nb2, nb3, offset, false);
  4831. }
  4832. struct ggml_tensor * ggml_set_inplace(
  4833. struct ggml_context * ctx,
  4834. struct ggml_tensor * a,
  4835. struct ggml_tensor * b,
  4836. size_t nb1,
  4837. size_t nb2,
  4838. size_t nb3,
  4839. size_t offset) {
  4840. return ggml_set_impl(ctx, a, b, nb1, nb2, nb3, offset, true);
  4841. }
  4842. struct ggml_tensor * ggml_set_1d(
  4843. struct ggml_context * ctx,
  4844. struct ggml_tensor * a,
  4845. struct ggml_tensor * b,
  4846. size_t offset) {
  4847. return ggml_set_impl(ctx, a, b, a->nb[1], a->nb[2], a->nb[3], offset, false);
  4848. }
  4849. struct ggml_tensor * ggml_set_1d_inplace(
  4850. struct ggml_context * ctx,
  4851. struct ggml_tensor * a,
  4852. struct ggml_tensor * b,
  4853. size_t offset) {
  4854. return ggml_set_impl(ctx, a, b, a->nb[1], a->nb[2], a->nb[3], offset, true);
  4855. }
  4856. struct ggml_tensor * ggml_set_2d(
  4857. struct ggml_context * ctx,
  4858. struct ggml_tensor * a,
  4859. struct ggml_tensor * b,
  4860. size_t nb1,
  4861. size_t offset) {
  4862. return ggml_set_impl(ctx, a, b, nb1, a->nb[2], a->nb[3], offset, false);
  4863. }
  4864. struct ggml_tensor * ggml_set_2d_inplace(
  4865. struct ggml_context * ctx,
  4866. struct ggml_tensor * a,
  4867. struct ggml_tensor * b,
  4868. size_t nb1,
  4869. size_t offset) {
  4870. return ggml_set_impl(ctx, a, b, nb1, a->nb[2], a->nb[3], offset, false);
  4871. }
  4872. // ggml_cpy
  4873. struct ggml_tensor * ggml_cpy_impl(
  4874. struct ggml_context * ctx,
  4875. struct ggml_tensor * a,
  4876. struct ggml_tensor * b,
  4877. bool inplace) {
  4878. GGML_ASSERT(ggml_nelements(a) == ggml_nelements(b));
  4879. bool is_node = false;
  4880. if (!inplace && (a->grad || b->grad)) {
  4881. is_node = true;
  4882. }
  4883. // make a view of the destination
  4884. struct ggml_tensor * result = ggml_view_tensor(ctx, b);
  4885. if (strlen(b->name) > 0) {
  4886. ggml_format_name(result, "%s (copy of %s)", b->name, a->name);
  4887. } else {
  4888. ggml_format_name(result, "%s (copy)", a->name);
  4889. }
  4890. result->op = GGML_OP_CPY;
  4891. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4892. result->src0 = a;
  4893. result->src1 = b;
  4894. return result;
  4895. }
  4896. struct ggml_tensor * ggml_cpy(
  4897. struct ggml_context * ctx,
  4898. struct ggml_tensor * a,
  4899. struct ggml_tensor * b) {
  4900. return ggml_cpy_impl(ctx, a, b, false);
  4901. }
  4902. struct ggml_tensor * ggml_cpy_inplace(
  4903. struct ggml_context * ctx,
  4904. struct ggml_tensor * a,
  4905. struct ggml_tensor * b) {
  4906. return ggml_cpy_impl(ctx, a, b, true);
  4907. }
  4908. // ggml_cont
  4909. struct ggml_tensor * ggml_cont_impl(
  4910. struct ggml_context * ctx,
  4911. struct ggml_tensor * a,
  4912. bool inplace) {
  4913. bool is_node = false;
  4914. if (!inplace && a->grad) {
  4915. is_node = true;
  4916. }
  4917. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  4918. ggml_format_name(result, "%s (cont)", a->name);
  4919. result->op = GGML_OP_CONT;
  4920. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4921. result->src0 = a;
  4922. result->src1 = NULL;
  4923. return result;
  4924. }
  4925. struct ggml_tensor * ggml_cont(
  4926. struct ggml_context * ctx,
  4927. struct ggml_tensor * a) {
  4928. return ggml_cont_impl(ctx, a, false);
  4929. }
  4930. struct ggml_tensor * ggml_cont_inplace(
  4931. struct ggml_context * ctx,
  4932. struct ggml_tensor * a) {
  4933. return ggml_cont_impl(ctx, a, true);
  4934. }
  4935. // ggml_reshape
  4936. struct ggml_tensor * ggml_reshape(
  4937. struct ggml_context * ctx,
  4938. struct ggml_tensor * a,
  4939. struct ggml_tensor * b) {
  4940. GGML_ASSERT(ggml_is_contiguous(a));
  4941. GGML_ASSERT(ggml_is_contiguous(b));
  4942. GGML_ASSERT(ggml_nelements(a) == ggml_nelements(b));
  4943. bool is_node = false;
  4944. if (a->grad) {
  4945. is_node = true;
  4946. }
  4947. if (b->grad) {
  4948. // gradient propagation is not supported
  4949. //GGML_ASSERT(false);
  4950. }
  4951. struct ggml_tensor * result = ggml_new_tensor_impl(ctx, a->type, b->n_dims, b->ne, a->data);
  4952. ggml_format_name(result, "%s (reshaped)", a->name);
  4953. result->op = GGML_OP_RESHAPE;
  4954. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4955. result->src0 = a;
  4956. result->src1 = NULL;
  4957. return result;
  4958. }
  4959. struct ggml_tensor * ggml_reshape_1d(
  4960. struct ggml_context * ctx,
  4961. struct ggml_tensor * a,
  4962. int64_t ne0) {
  4963. GGML_ASSERT(ggml_is_contiguous(a));
  4964. GGML_ASSERT(ggml_nelements(a) == ne0);
  4965. bool is_node = false;
  4966. if (a->grad) {
  4967. is_node = true;
  4968. }
  4969. const int64_t ne[1] = { ne0 };
  4970. struct ggml_tensor * result = ggml_new_tensor_impl(ctx, a->type, 1, ne, a->data);
  4971. ggml_format_name(result, "%s (reshaped)", a->name);
  4972. result->op = GGML_OP_RESHAPE;
  4973. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4974. result->src0 = a;
  4975. result->src1 = NULL;
  4976. return result;
  4977. }
  4978. struct ggml_tensor * ggml_reshape_2d(
  4979. struct ggml_context * ctx,
  4980. struct ggml_tensor * a,
  4981. int64_t ne0,
  4982. int64_t ne1) {
  4983. GGML_ASSERT(ggml_is_contiguous(a));
  4984. GGML_ASSERT(ggml_nelements(a) == ne0*ne1);
  4985. bool is_node = false;
  4986. if (a->grad) {
  4987. is_node = true;
  4988. }
  4989. const int64_t ne[2] = { ne0, ne1 };
  4990. struct ggml_tensor * result = ggml_new_tensor_impl(ctx, a->type, 2, ne, a->data);
  4991. ggml_format_name(result, "%s (reshaped)", a->name);
  4992. result->op = GGML_OP_RESHAPE;
  4993. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  4994. result->src0 = a;
  4995. result->src1 = NULL;
  4996. return result;
  4997. }
  4998. struct ggml_tensor * ggml_reshape_3d(
  4999. struct ggml_context * ctx,
  5000. struct ggml_tensor * a,
  5001. int64_t ne0,
  5002. int64_t ne1,
  5003. int64_t ne2) {
  5004. GGML_ASSERT(ggml_is_contiguous(a));
  5005. GGML_ASSERT(ggml_nelements(a) == ne0*ne1*ne2);
  5006. bool is_node = false;
  5007. if (a->grad) {
  5008. is_node = true;
  5009. }
  5010. const int64_t ne[3] = { ne0, ne1, ne2 };
  5011. struct ggml_tensor * result = ggml_new_tensor_impl(ctx, a->type, 3, ne, a->data);
  5012. ggml_format_name(result, "%s (reshaped)", a->name);
  5013. result->op = GGML_OP_RESHAPE;
  5014. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5015. result->src0 = a;
  5016. result->src1 = NULL;
  5017. return result;
  5018. }
  5019. struct ggml_tensor * ggml_reshape_4d(
  5020. struct ggml_context * ctx,
  5021. struct ggml_tensor * a,
  5022. int64_t ne0,
  5023. int64_t ne1,
  5024. int64_t ne2,
  5025. int64_t ne3) {
  5026. GGML_ASSERT(ggml_is_contiguous(a));
  5027. GGML_ASSERT(ggml_nelements(a) == ne0*ne1*ne2*ne3);
  5028. bool is_node = false;
  5029. if (a->grad) {
  5030. is_node = true;
  5031. }
  5032. const int64_t ne[4] = { ne0, ne1, ne2, ne3 };
  5033. struct ggml_tensor * result = ggml_new_tensor_impl(ctx, a->type, 4, ne, a->data);
  5034. ggml_format_name(result, "%s (reshaped)", a->name);
  5035. result->op = GGML_OP_RESHAPE;
  5036. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5037. result->src0 = a;
  5038. result->src1 = NULL;
  5039. return result;
  5040. }
  5041. // ggml_view_1d
  5042. struct ggml_tensor * ggml_view_1d(
  5043. struct ggml_context * ctx,
  5044. struct ggml_tensor * a,
  5045. int64_t ne0,
  5046. size_t offset) {
  5047. bool is_node = false;
  5048. if (a->grad) {
  5049. is_node = true;
  5050. }
  5051. struct ggml_tensor * result = ggml_new_tensor_impl(ctx, a->type, 1, &ne0, (char *) a->data + offset);
  5052. ggml_format_name(result, "%s (view)", a->name);
  5053. ggml_scratch_save(ctx);
  5054. struct ggml_tensor * offs = ggml_new_tensor_1d(ctx, GGML_TYPE_I32, 2);
  5055. ggml_set_name(offs, "offset");
  5056. memcpy(offs->data, &offset, 2*sizeof(int32_t));
  5057. ggml_scratch_load(ctx);
  5058. result->op = GGML_OP_VIEW;
  5059. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5060. result->src0 = a;
  5061. result->src1 = NULL;
  5062. result->opt[0] = offs;
  5063. return result;
  5064. }
  5065. // ggml_view_2d
  5066. struct ggml_tensor * ggml_view_2d(
  5067. struct ggml_context * ctx,
  5068. struct ggml_tensor * a,
  5069. int64_t ne0,
  5070. int64_t ne1,
  5071. size_t nb1,
  5072. size_t offset) {
  5073. bool is_node = false;
  5074. if (a->grad) {
  5075. is_node = true;
  5076. }
  5077. const int64_t ne[GGML_MAX_DIMS] = { ne0, ne1, 1, 1 };
  5078. struct ggml_tensor * result = ggml_new_tensor_impl(ctx, a->type, 2, ne, (char *) a->data + offset);
  5079. ggml_format_name(result, "%s (view)", a->name);
  5080. ggml_scratch_save(ctx);
  5081. struct ggml_tensor * offs = ggml_new_tensor_1d(ctx, GGML_TYPE_I32, 2);
  5082. ggml_set_name(offs, "offset");
  5083. memcpy(offs->data, &offset, 2*sizeof(int32_t));
  5084. ggml_scratch_load(ctx);
  5085. result->nb[1] = nb1;
  5086. result->nb[2] = result->nb[1]*ne1;
  5087. result->nb[3] = result->nb[2];
  5088. result->op = GGML_OP_VIEW;
  5089. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5090. result->src0 = a;
  5091. result->src1 = NULL;
  5092. result->opt[0] = offs;
  5093. return result;
  5094. }
  5095. // ggml_view_3d
  5096. struct ggml_tensor * ggml_view_3d(
  5097. struct ggml_context * ctx,
  5098. struct ggml_tensor * a,
  5099. int64_t ne0,
  5100. int64_t ne1,
  5101. int64_t ne2,
  5102. size_t nb1,
  5103. size_t nb2,
  5104. size_t offset) {
  5105. bool is_node = false;
  5106. if (a->grad) {
  5107. is_node = true;
  5108. }
  5109. const int64_t ne[GGML_MAX_DIMS] = { ne0, ne1, ne2, 1 };
  5110. struct ggml_tensor * result = ggml_new_tensor_impl(ctx, a->type, 3, ne, (char *) a->data + offset);
  5111. ggml_format_name(result, "%s (view)", a->name);
  5112. ggml_scratch_save(ctx);
  5113. struct ggml_tensor * offs = ggml_new_tensor_1d(ctx, GGML_TYPE_I32, 2);
  5114. ggml_set_name(offs, "offset");
  5115. memcpy(offs->data, &offset, 2*sizeof(int32_t));
  5116. ggml_scratch_load(ctx);
  5117. result->nb[1] = nb1;
  5118. result->nb[2] = nb2;
  5119. result->nb[3] = result->nb[2]*ne2;
  5120. result->op = GGML_OP_VIEW;
  5121. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5122. result->src0 = a;
  5123. result->src1 = NULL;
  5124. result->opt[0] = offs;
  5125. return result;
  5126. }
  5127. // ggml_view_4d
  5128. struct ggml_tensor * ggml_view_4d(
  5129. struct ggml_context * ctx,
  5130. struct ggml_tensor * a,
  5131. int64_t ne0,
  5132. int64_t ne1,
  5133. int64_t ne2,
  5134. int64_t ne3,
  5135. size_t nb1,
  5136. size_t nb2,
  5137. size_t nb3,
  5138. size_t offset) {
  5139. bool is_node = false;
  5140. if (a->grad) {
  5141. is_node = true;
  5142. }
  5143. const int64_t ne[GGML_MAX_DIMS] = { ne0, ne1, ne2, ne3 };
  5144. struct ggml_tensor * result = ggml_new_tensor_impl(ctx, a->type, 4, ne, (char *) a->data + offset);
  5145. ggml_format_name(result, "%s (view)", a->name);
  5146. ggml_scratch_save(ctx);
  5147. struct ggml_tensor * offs = ggml_new_tensor_1d(ctx, GGML_TYPE_I32, 2);
  5148. ggml_set_name(offs, "offset");
  5149. memcpy(offs->data, &offset, 2*sizeof(int32_t));
  5150. ggml_scratch_load(ctx);
  5151. result->nb[1] = nb1;
  5152. result->nb[2] = nb2;
  5153. result->nb[3] = nb3;
  5154. result->op = GGML_OP_VIEW;
  5155. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5156. result->src0 = a;
  5157. result->src1 = NULL;
  5158. result->opt[0] = offs;
  5159. return result;
  5160. }
  5161. // ggml_permute
  5162. struct ggml_tensor * ggml_permute(
  5163. struct ggml_context * ctx,
  5164. struct ggml_tensor * a,
  5165. int axis0,
  5166. int axis1,
  5167. int axis2,
  5168. int axis3) {
  5169. GGML_ASSERT(axis0 >= 0 && axis0 < GGML_MAX_DIMS);
  5170. GGML_ASSERT(axis1 >= 0 && axis1 < GGML_MAX_DIMS);
  5171. GGML_ASSERT(axis2 >= 0 && axis2 < GGML_MAX_DIMS);
  5172. GGML_ASSERT(axis3 >= 0 && axis3 < GGML_MAX_DIMS);
  5173. GGML_ASSERT(axis0 != axis1);
  5174. GGML_ASSERT(axis0 != axis2);
  5175. GGML_ASSERT(axis0 != axis3);
  5176. GGML_ASSERT(axis1 != axis2);
  5177. GGML_ASSERT(axis1 != axis3);
  5178. GGML_ASSERT(axis2 != axis3);
  5179. bool is_node = false;
  5180. if (a->grad) {
  5181. is_node = true;
  5182. }
  5183. struct ggml_tensor * result = ggml_view_tensor(ctx, a);
  5184. ggml_format_name(result, "%s (permuted)", a->name);
  5185. int ne[GGML_MAX_DIMS];
  5186. int nb[GGML_MAX_DIMS];
  5187. ne[axis0] = a->ne[0];
  5188. ne[axis1] = a->ne[1];
  5189. ne[axis2] = a->ne[2];
  5190. ne[axis3] = a->ne[3];
  5191. nb[axis0] = a->nb[0];
  5192. nb[axis1] = a->nb[1];
  5193. nb[axis2] = a->nb[2];
  5194. nb[axis3] = a->nb[3];
  5195. result->ne[0] = ne[0];
  5196. result->ne[1] = ne[1];
  5197. result->ne[2] = ne[2];
  5198. result->ne[3] = ne[3];
  5199. result->nb[0] = nb[0];
  5200. result->nb[1] = nb[1];
  5201. result->nb[2] = nb[2];
  5202. result->nb[3] = nb[3];
  5203. result->op = GGML_OP_PERMUTE;
  5204. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5205. result->src0 = a;
  5206. result->src1 = NULL;
  5207. if (is_node) {
  5208. ggml_scratch_save(ctx);
  5209. struct ggml_tensor * b = ggml_new_tensor_1d(ctx, GGML_TYPE_I32, 4);
  5210. ((int32_t *) b->data)[0] = axis0;
  5211. ((int32_t *) b->data)[1] = axis1;
  5212. ((int32_t *) b->data)[2] = axis2;
  5213. ((int32_t *) b->data)[3] = axis3;
  5214. ggml_scratch_load(ctx);
  5215. result->opt[0] = b;
  5216. }
  5217. return result;
  5218. }
  5219. // ggml_transpose
  5220. struct ggml_tensor * ggml_transpose(
  5221. struct ggml_context * ctx,
  5222. struct ggml_tensor * a) {
  5223. bool is_node = false;
  5224. if (a->grad) {
  5225. is_node = true;
  5226. }
  5227. struct ggml_tensor * result = ggml_view_tensor(ctx, a);
  5228. ggml_format_name(result, "%s (transposed)", a->name);
  5229. result->ne[0] = a->ne[1];
  5230. result->ne[1] = a->ne[0];
  5231. result->nb[0] = a->nb[1];
  5232. result->nb[1] = a->nb[0];
  5233. result->op = GGML_OP_TRANSPOSE;
  5234. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5235. result->src0 = a;
  5236. result->src1 = NULL;
  5237. return result;
  5238. }
  5239. // ggml_get_rows
  5240. struct ggml_tensor * ggml_get_rows(
  5241. struct ggml_context * ctx,
  5242. struct ggml_tensor * a,
  5243. struct ggml_tensor * b) {
  5244. GGML_ASSERT(ggml_is_matrix(a) && ggml_is_vector(b) && b->type == GGML_TYPE_I32);
  5245. bool is_node = false;
  5246. if (a->grad || b->grad) {
  5247. is_node = true;
  5248. }
  5249. // TODO: implement non F32 return
  5250. //struct ggml_tensor * result = ggml_new_tensor_2d(ctx, a->type, a->ne[0], b->ne[0]);
  5251. struct ggml_tensor * result = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, a->ne[0], b->ne[0]);
  5252. result->op = GGML_OP_GET_ROWS;
  5253. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5254. result->src0 = a;
  5255. result->src1 = b;
  5256. return result;
  5257. }
  5258. // ggml_get_rows_back
  5259. struct ggml_tensor * ggml_get_rows_back(
  5260. struct ggml_context * ctx,
  5261. struct ggml_tensor * a,
  5262. struct ggml_tensor * b,
  5263. struct ggml_tensor * c) {
  5264. GGML_ASSERT(ggml_is_matrix(a) && ggml_is_vector(b) && b->type == GGML_TYPE_I32);
  5265. GGML_ASSERT(ggml_is_matrix(c) && (a->ne[0] == c->ne[0]));
  5266. bool is_node = false;
  5267. if (a->grad || b->grad) {
  5268. is_node = true;
  5269. }
  5270. // TODO: implement non F32 return
  5271. //struct ggml_tensor * result = ggml_new_tensor_2d(ctx, a->type, a->ne[0], b->ne[0]);
  5272. struct ggml_tensor * result = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, c->ne[0], c->ne[1]);
  5273. result->op = GGML_OP_GET_ROWS_BACK;
  5274. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5275. result->src0 = a;
  5276. result->src1 = b;
  5277. result->opt[0] = c;
  5278. return result;
  5279. }
  5280. // ggml_diag
  5281. struct ggml_tensor * ggml_diag(
  5282. struct ggml_context * ctx,
  5283. struct ggml_tensor * a) {
  5284. GGML_ASSERT(a->ne[1] == 1);
  5285. bool is_node = false;
  5286. if (a->grad) {
  5287. is_node = true;
  5288. }
  5289. const int64_t ne[4] = { a->ne[0], a->ne[0], a->ne[2], a->ne[3] };
  5290. struct ggml_tensor * result = ggml_new_tensor(ctx, a->type, MAX(a->n_dims, 2), ne);
  5291. result->op = GGML_OP_DIAG;
  5292. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5293. result->src0 = a;
  5294. result->src1 = NULL;
  5295. return result;
  5296. }
  5297. // ggml_diag_mask_inf
  5298. struct ggml_tensor * ggml_diag_mask_inf_impl(
  5299. struct ggml_context * ctx,
  5300. struct ggml_tensor * a,
  5301. int n_past,
  5302. bool inplace) {
  5303. bool is_node = false;
  5304. if (a->grad) {
  5305. is_node = true;
  5306. }
  5307. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  5308. ggml_scratch_save(ctx);
  5309. struct ggml_tensor * b = ggml_new_tensor_1d(ctx, GGML_TYPE_I32, 2);
  5310. ((int32_t *) b->data)[0] = n_past;
  5311. ((int32_t *) b->data)[1] = inplace ? 1 : 0;
  5312. ggml_scratch_load(ctx);
  5313. result->op = GGML_OP_DIAG_MASK_INF;
  5314. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5315. result->src0 = a;
  5316. result->src1 = b;
  5317. return result;
  5318. }
  5319. struct ggml_tensor * ggml_diag_mask_inf(
  5320. struct ggml_context * ctx,
  5321. struct ggml_tensor * a,
  5322. int n_past) {
  5323. return ggml_diag_mask_inf_impl(ctx, a, n_past, false);
  5324. }
  5325. struct ggml_tensor * ggml_diag_mask_inf_inplace(
  5326. struct ggml_context * ctx,
  5327. struct ggml_tensor * a,
  5328. int n_past) {
  5329. return ggml_diag_mask_inf_impl(ctx, a, n_past, true);
  5330. }
  5331. // ggml_diag_mask_zero
  5332. struct ggml_tensor * ggml_diag_mask_zero_impl(
  5333. struct ggml_context * ctx,
  5334. struct ggml_tensor * a,
  5335. int n_past,
  5336. bool inplace) {
  5337. bool is_node = false;
  5338. if (a->grad) {
  5339. is_node = true;
  5340. }
  5341. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  5342. ggml_scratch_save(ctx);
  5343. struct ggml_tensor * b = ggml_new_tensor_1d(ctx, GGML_TYPE_I32, 2);
  5344. ggml_set_name(b, "n_past, inplace");
  5345. ((int32_t *) b->data)[0] = n_past;
  5346. ((int32_t *) b->data)[1] = inplace ? 1 : 0;
  5347. ggml_scratch_load(ctx);
  5348. result->op = GGML_OP_DIAG_MASK_ZERO;
  5349. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5350. result->src0 = a;
  5351. result->src1 = b;
  5352. return result;
  5353. }
  5354. struct ggml_tensor * ggml_diag_mask_zero(
  5355. struct ggml_context * ctx,
  5356. struct ggml_tensor * a,
  5357. int n_past) {
  5358. return ggml_diag_mask_zero_impl(ctx, a, n_past, false);
  5359. }
  5360. struct ggml_tensor * ggml_diag_mask_zero_inplace(
  5361. struct ggml_context * ctx,
  5362. struct ggml_tensor * a,
  5363. int n_past) {
  5364. return ggml_diag_mask_zero_impl(ctx, a, n_past, true);
  5365. }
  5366. // ggml_soft_max
  5367. struct ggml_tensor * ggml_soft_max_impl(
  5368. struct ggml_context * ctx,
  5369. struct ggml_tensor * a,
  5370. bool inplace) {
  5371. bool is_node = false;
  5372. if (a->grad) {
  5373. is_node = true;
  5374. }
  5375. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  5376. result->op = GGML_OP_SOFT_MAX;
  5377. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5378. result->src0 = a;
  5379. result->src1 = NULL;
  5380. return result;
  5381. }
  5382. struct ggml_tensor * ggml_soft_max(
  5383. struct ggml_context * ctx,
  5384. struct ggml_tensor * a) {
  5385. return ggml_soft_max_impl(ctx, a, false);
  5386. }
  5387. struct ggml_tensor * ggml_soft_max_inplace(
  5388. struct ggml_context * ctx,
  5389. struct ggml_tensor * a) {
  5390. return ggml_soft_max_impl(ctx, a, true);
  5391. }
  5392. // ggml_soft_max_back
  5393. struct ggml_tensor * ggml_soft_max_back_impl(
  5394. struct ggml_context * ctx,
  5395. struct ggml_tensor * a,
  5396. struct ggml_tensor * b,
  5397. bool inplace) {
  5398. bool is_node = false;
  5399. if (a->grad || b->grad) {
  5400. is_node = true; // TODO : implement backward pass
  5401. }
  5402. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  5403. result->op = GGML_OP_SOFT_MAX_BACK;
  5404. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5405. result->src0 = a;
  5406. result->src1 = b;
  5407. return result;
  5408. }
  5409. struct ggml_tensor * ggml_soft_max_back(
  5410. struct ggml_context * ctx,
  5411. struct ggml_tensor * a,
  5412. struct ggml_tensor * b) {
  5413. return ggml_soft_max_back_impl(ctx, a, b, false);
  5414. }
  5415. struct ggml_tensor * ggml_soft_max_back_inplace(
  5416. struct ggml_context * ctx,
  5417. struct ggml_tensor * a,
  5418. struct ggml_tensor * b) {
  5419. return ggml_soft_max_back_impl(ctx, a, b, true);
  5420. }
  5421. // ggml_rope
  5422. struct ggml_tensor * ggml_rope_impl(
  5423. struct ggml_context * ctx,
  5424. struct ggml_tensor * a,
  5425. int n_past,
  5426. int n_dims,
  5427. int mode,
  5428. int n_ctx,
  5429. bool inplace) {
  5430. GGML_ASSERT(n_past >= 0);
  5431. bool is_node = false;
  5432. if (a->grad) {
  5433. is_node = true;
  5434. }
  5435. struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  5436. ggml_scratch_save(ctx);
  5437. struct ggml_tensor * b = ggml_new_tensor_1d(ctx, GGML_TYPE_I32, 4);
  5438. ((int32_t *) b->data)[0] = n_past;
  5439. ((int32_t *) b->data)[1] = n_dims;
  5440. ((int32_t *) b->data)[2] = mode;
  5441. ((int32_t *) b->data)[3] = n_ctx;
  5442. ggml_scratch_load(ctx);
  5443. result->op = GGML_OP_ROPE;
  5444. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5445. result->src0 = a;
  5446. result->src1 = b;
  5447. return result;
  5448. }
  5449. struct ggml_tensor * ggml_rope(
  5450. struct ggml_context * ctx,
  5451. struct ggml_tensor * a,
  5452. int n_past,
  5453. int n_dims,
  5454. int mode,
  5455. int n_ctx) {
  5456. return ggml_rope_impl(ctx, a, n_past, n_dims, mode, n_ctx, false);
  5457. }
  5458. struct ggml_tensor * ggml_rope_inplace(
  5459. struct ggml_context * ctx,
  5460. struct ggml_tensor * a,
  5461. int n_past,
  5462. int n_dims,
  5463. int mode,
  5464. int n_ctx) {
  5465. return ggml_rope_impl(ctx, a, n_past, n_dims, mode, n_ctx, true);
  5466. }
  5467. // ggml_rope_back
  5468. struct ggml_tensor * ggml_rope_back(
  5469. struct ggml_context * ctx,
  5470. struct ggml_tensor * a,
  5471. int n_past,
  5472. int n_dims,
  5473. int mode) {
  5474. GGML_ASSERT(n_past >= 0);
  5475. bool is_node = false;
  5476. if (a->grad) {
  5477. is_node = false; // TODO: implement backward
  5478. }
  5479. struct ggml_tensor * result = ggml_dup_tensor(ctx, a);
  5480. ggml_scratch_save(ctx);
  5481. struct ggml_tensor * b = ggml_new_tensor_1d(ctx, GGML_TYPE_I32, 3);
  5482. ggml_set_name(b, "n_past, n_dims, mode");
  5483. ((int32_t *) b->data)[0] = n_past;
  5484. ((int32_t *) b->data)[1] = n_dims;
  5485. ((int32_t *) b->data)[2] = mode;
  5486. ggml_scratch_load(ctx);
  5487. result->op = GGML_OP_ROPE_BACK;
  5488. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5489. result->src0 = a;
  5490. result->src1 = b;
  5491. return result;
  5492. }
  5493. // ggml_alibi
  5494. struct ggml_tensor * ggml_alibi(
  5495. struct ggml_context * ctx,
  5496. struct ggml_tensor * a,
  5497. int n_past,
  5498. int n_head,
  5499. float bias_max) {
  5500. GGML_ASSERT(n_past >= 0);
  5501. bool is_node = false;
  5502. if (a->grad) {
  5503. GGML_ASSERT(false); // TODO: implement backward
  5504. is_node = true;
  5505. }
  5506. // TODO: when implement backward, fix this:
  5507. //struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  5508. struct ggml_tensor * result = ggml_view_tensor(ctx, a);
  5509. ggml_scratch_save(ctx);
  5510. struct ggml_tensor * b = ggml_new_tensor_1d(ctx, GGML_TYPE_I32, 3);
  5511. ((int32_t *) b->data)[0] = n_past;
  5512. ((int32_t *) b->data)[1] = n_head;
  5513. GGML_ASSERT(sizeof(float) == sizeof(int32_t));
  5514. (((float *) b->data)[2]) = bias_max;
  5515. ggml_scratch_load(ctx);
  5516. result->op = GGML_OP_ALIBI;
  5517. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5518. result->src0 = a;
  5519. result->src1 = b;
  5520. return result;
  5521. }
  5522. // ggml_clamp
  5523. struct ggml_tensor * ggml_clamp(
  5524. struct ggml_context * ctx,
  5525. struct ggml_tensor * a,
  5526. float min,
  5527. float max) {
  5528. bool is_node = false;
  5529. if (a->grad) {
  5530. GGML_ASSERT(false); // TODO: implement backward
  5531. is_node = true;
  5532. }
  5533. // TODO: when implement backward, fix this:
  5534. struct ggml_tensor * result = ggml_view_tensor(ctx, a);
  5535. ggml_scratch_save(ctx);
  5536. struct ggml_tensor * b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, 2);
  5537. ((float *) b->data)[0] = min;
  5538. ((float *) b->data)[1] = max;
  5539. ggml_scratch_load(ctx);
  5540. result->op = GGML_OP_CLAMP;
  5541. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5542. result->src0 = a;
  5543. result->src1 = b;
  5544. return result;
  5545. }
  5546. // ggml_conv_1d_s1_ph
  5547. struct ggml_tensor * ggml_conv_1d_s1_ph(
  5548. struct ggml_context * ctx,
  5549. struct ggml_tensor * a,
  5550. struct ggml_tensor * b) {
  5551. GGML_ASSERT(ggml_is_matrix(b));
  5552. GGML_ASSERT(a->ne[1] == b->ne[1]);
  5553. GGML_ASSERT(a->ne[3] == 1);
  5554. bool is_node = false;
  5555. if (a->grad || b->grad) {
  5556. GGML_ASSERT(false); // TODO: implement backward
  5557. is_node = true;
  5558. }
  5559. const int64_t ne[4] = { b->ne[0], a->ne[2], 1, 1, };
  5560. struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, 2, ne);
  5561. result->op = GGML_OP_CONV_1D_S1_PH;
  5562. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5563. result->src0 = a;
  5564. result->src1 = b;
  5565. return result;
  5566. }
  5567. // ggml_conv_1d_s2_ph
  5568. struct ggml_tensor * ggml_conv_1d_s2_ph(
  5569. struct ggml_context * ctx,
  5570. struct ggml_tensor * a,
  5571. struct ggml_tensor * b) {
  5572. GGML_ASSERT(ggml_is_matrix(b));
  5573. GGML_ASSERT(a->ne[1] == b->ne[1]);
  5574. GGML_ASSERT(a->ne[3] == 1);
  5575. bool is_node = false;
  5576. if (a->grad || b->grad) {
  5577. GGML_ASSERT(false); // TODO: implement backward
  5578. is_node = true;
  5579. }
  5580. const int64_t ne[4] = { b->ne[0]/2, a->ne[2], 1, 1, };
  5581. struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, 2, ne);
  5582. result->op = GGML_OP_CONV_1D_S2_PH;
  5583. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5584. result->src0 = a;
  5585. result->src1 = b;
  5586. return result;
  5587. }
  5588. // ggml_conv_2d_sk_p0
  5589. struct ggml_tensor * ggml_conv_2d_sk_p0(
  5590. struct ggml_context * ctx,
  5591. struct ggml_tensor * a,
  5592. struct ggml_tensor * b) {
  5593. GGML_ASSERT(b->ne[3] == 1);
  5594. GGML_ASSERT(a->ne[2] == b->ne[2]);
  5595. GGML_ASSERT(b->ne[0] % a->ne[0] == 0);
  5596. GGML_ASSERT(b->ne[1] % a->ne[1] == 0);
  5597. bool is_node = false;
  5598. if (a->grad || b->grad) {
  5599. GGML_ASSERT(false); // TODO: implement backward
  5600. is_node = true;
  5601. }
  5602. const int64_t ne[4] = { b->ne[0]/a->ne[0], b->ne[1]/a->ne[1], a->ne[3], 1, };
  5603. struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, 4, ne);
  5604. result->op = GGML_OP_CONV_2D_SK_P0;
  5605. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5606. result->src0 = a;
  5607. result->src1 = b;
  5608. return result;
  5609. }
  5610. // ggml_flash_attn
  5611. struct ggml_tensor * ggml_flash_attn(
  5612. struct ggml_context * ctx,
  5613. struct ggml_tensor * q,
  5614. struct ggml_tensor * k,
  5615. struct ggml_tensor * v,
  5616. bool masked) {
  5617. GGML_ASSERT(ggml_can_mul_mat(k, q));
  5618. // TODO: check if vT can be multiplied by (k*qT)
  5619. bool is_node = false;
  5620. if (q->grad || k->grad || v->grad) {
  5621. is_node = true;
  5622. }
  5623. //struct ggml_tensor * result = ggml_dup_tensor(ctx, q);
  5624. struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, 4, q->ne);
  5625. result->op = GGML_OP_FLASH_ATTN;
  5626. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5627. result->src0 = q;
  5628. result->src1 = k;
  5629. result->opt[0] = v;
  5630. result->opt[1] = ggml_new_i32(ctx, masked ? 1 : 0);
  5631. return result;
  5632. }
  5633. // ggml_flash_ff
  5634. struct ggml_tensor * ggml_flash_ff(
  5635. struct ggml_context * ctx,
  5636. struct ggml_tensor * a,
  5637. struct ggml_tensor * b0,
  5638. struct ggml_tensor * b1,
  5639. struct ggml_tensor * c0,
  5640. struct ggml_tensor * c1) {
  5641. GGML_ASSERT(ggml_can_mul_mat(b0, a));
  5642. // TODO: more checks
  5643. bool is_node = false;
  5644. if (a->grad || b0->grad || b1->grad || c0->grad || c1->grad) {
  5645. is_node = true;
  5646. }
  5647. //struct ggml_tensor * result = ggml_dup_tensor(ctx, a);
  5648. struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, 4, a->ne);
  5649. result->op = GGML_OP_FLASH_FF;
  5650. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5651. result->src0 = a;
  5652. result->src1 = b0;
  5653. result->opt[0] = b1;
  5654. result->opt[1] = c0;
  5655. result->opt[2] = c1;
  5656. return result;
  5657. }
  5658. // ggml_flash_attn_back
  5659. struct ggml_tensor * ggml_flash_attn_back(
  5660. struct ggml_context * ctx,
  5661. struct ggml_tensor * q,
  5662. struct ggml_tensor * k,
  5663. struct ggml_tensor * v,
  5664. struct ggml_tensor * d,
  5665. bool masked) {
  5666. GGML_ASSERT(ggml_can_mul_mat(k, q));
  5667. // TODO: check if vT can be multiplied by (k*qT)
  5668. // d shape [D,N,ne2,ne3]
  5669. // q shape [D,N,ne2,ne3]
  5670. // k shape [D,M,ne2,ne3]
  5671. // v shape [M,D,ne2,ne3]
  5672. const int64_t D = q->ne[0];
  5673. const int64_t N = q->ne[1];
  5674. const int64_t M = k->ne[1];
  5675. const int64_t ne2 = q->ne[2];
  5676. const int64_t ne3 = q->ne[3];
  5677. GGML_ASSERT(k->ne[0] == D);
  5678. GGML_ASSERT(v->ne[0] == M);
  5679. GGML_ASSERT(v->ne[1] == D);
  5680. GGML_ASSERT(d->ne[0] == D);
  5681. GGML_ASSERT(d->ne[1] == N);
  5682. GGML_ASSERT(k->ne[2] == ne2);
  5683. GGML_ASSERT(k->ne[3] == ne3);
  5684. GGML_ASSERT(v->ne[2] == ne2);
  5685. GGML_ASSERT(v->ne[3] == ne3);
  5686. GGML_ASSERT(d->ne[2] == ne2);
  5687. GGML_ASSERT(d->ne[3] == ne3);
  5688. bool is_node = false;
  5689. if (q->grad || k->grad || v->grad) {
  5690. // when using this operation (in backwards pass) these grads are set.
  5691. // we don't want to create (big) grad of our result, so is_node is false.
  5692. is_node = false;
  5693. }
  5694. // store gradients of q, k and v as continuous tensors concatenated in result.
  5695. // q shape[D,N,ne2,ne3] ; k shape [D,M,ne2,ne3] ; v shape [M,D,ne2,ne3]
  5696. // gradq->data = result->data
  5697. // gradk->data = result->data + nb0*D*N*ne2*ne3
  5698. // gradv->data = result->data + nb0*D*N*ne2*ne3 + nb0*D*M*ne2*ne3
  5699. // note: v and gradv are actually transposed, i.e. v->ne[0] != D.
  5700. int64_t ne[4] = {D,M+N+M,ne2,ne3};
  5701. struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, 4, ne);
  5702. result->op = GGML_OP_FLASH_ATTN_BACK;
  5703. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5704. result->src0 = q;
  5705. result->src1 = k;
  5706. result->opt[0] = v;
  5707. result->opt[1] = d;
  5708. result->opt[2] = ggml_new_i32(ctx, masked ? 1 : 0);
  5709. return result;
  5710. }
  5711. // ggml_win_part
  5712. struct ggml_tensor * ggml_win_part(
  5713. struct ggml_context * ctx,
  5714. struct ggml_tensor * a,
  5715. int w) {
  5716. GGML_ASSERT(a->ne[3] == 1);
  5717. GGML_ASSERT(a->type == GGML_TYPE_F32);
  5718. bool is_node = false;
  5719. if (a->grad) {
  5720. GGML_ASSERT(false); // TODO: implement backward
  5721. is_node = true;
  5722. }
  5723. // padding
  5724. const int px = (w - a->ne[1]%w)%w;
  5725. const int py = (w - a->ne[2]%w)%w;
  5726. const int npx = (px + a->ne[1])/w;
  5727. const int npy = (py + a->ne[2])/w;
  5728. const int np = npx*npy;
  5729. const int64_t ne[4] = { a->ne[0], w, w, np, };
  5730. struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, 4, ne);
  5731. ggml_scratch_save(ctx);
  5732. struct ggml_tensor * b = ggml_new_tensor_1d(ctx, GGML_TYPE_I32, 3);
  5733. ((int32_t *) b->data)[0] = npx;
  5734. ((int32_t *) b->data)[1] = npy;
  5735. ((int32_t *) b->data)[2] = w;
  5736. ggml_scratch_load(ctx);
  5737. result->op = GGML_OP_WIN_PART;
  5738. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5739. result->src0 = a;
  5740. result->src1 = NULL;
  5741. result->opt[0] = b;
  5742. return result;
  5743. }
  5744. // ggml_win_unpart
  5745. struct ggml_tensor * ggml_win_unpart(
  5746. struct ggml_context * ctx,
  5747. struct ggml_tensor * a,
  5748. int w0,
  5749. int h0,
  5750. int w) {
  5751. GGML_ASSERT(a->type == GGML_TYPE_F32);
  5752. bool is_node = false;
  5753. if (a->grad) {
  5754. GGML_ASSERT(false); // TODO: implement backward
  5755. is_node = true;
  5756. }
  5757. const int64_t ne[4] = { a->ne[0], w0, h0, 1, };
  5758. struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, 3, ne);
  5759. ggml_scratch_save(ctx);
  5760. struct ggml_tensor * b = ggml_new_tensor_1d(ctx, GGML_TYPE_I32, 1);
  5761. ((int32_t *) b->data)[0] = w;
  5762. ggml_scratch_load(ctx);
  5763. result->op = GGML_OP_WIN_UNPART;
  5764. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5765. result->src0 = a;
  5766. result->src1 = NULL;
  5767. result->opt[0] = b;
  5768. return result;
  5769. }
  5770. // ggml_map_unary
  5771. struct ggml_tensor * ggml_map_unary_impl_f32(
  5772. struct ggml_context * ctx,
  5773. struct ggml_tensor * a,
  5774. const ggml_unary_op_f32_t fun,
  5775. bool inplace) {
  5776. bool is_node = false;
  5777. if (!inplace && a->grad) {
  5778. is_node = true;
  5779. }
  5780. struct ggml_tensor *result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  5781. ggml_scratch_save(ctx);
  5782. struct ggml_tensor * addr_tensor = ggml_new_tensor_1d(ctx, GGML_TYPE_I32, sizeof(void *) / sizeof(int32_t));
  5783. *((void (**)(void))addr_tensor->data) = (void (*)(void))fun;
  5784. ggml_scratch_load(ctx);
  5785. result->op = GGML_OP_MAP_UNARY;
  5786. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5787. result->src0 = a;
  5788. result->opt[0] = addr_tensor;
  5789. return result;
  5790. }
  5791. struct ggml_tensor * ggml_map_unary_f32(
  5792. struct ggml_context * ctx,
  5793. struct ggml_tensor * a,
  5794. const ggml_unary_op_f32_t fun) {
  5795. return ggml_map_unary_impl_f32(ctx, a, fun, false);
  5796. }
  5797. struct ggml_tensor * ggml_map_unary_inplace_f32(
  5798. struct ggml_context * ctx,
  5799. struct ggml_tensor * a,
  5800. const ggml_unary_op_f32_t fun) {
  5801. return ggml_map_unary_impl_f32(ctx, a, fun, true);
  5802. }
  5803. // ggml_map_binary
  5804. struct ggml_tensor * ggml_map_binary_impl_f32(
  5805. struct ggml_context * ctx,
  5806. struct ggml_tensor * a,
  5807. struct ggml_tensor * b,
  5808. const ggml_binary_op_f32_t fun,
  5809. bool inplace) {
  5810. GGML_ASSERT(ggml_are_same_shape(a, b));
  5811. bool is_node = false;
  5812. if (!inplace && (a->grad || b->grad)) {
  5813. is_node = true;
  5814. }
  5815. struct ggml_tensor *result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  5816. ggml_scratch_save(ctx);
  5817. struct ggml_tensor * addr_tensor = ggml_new_tensor_1d(ctx, GGML_TYPE_I32, sizeof(void *) / sizeof(int32_t));
  5818. *((void (**)(void))addr_tensor->data) = (void (*)(void))fun;
  5819. ggml_scratch_load(ctx);
  5820. result->op = GGML_OP_MAP_BINARY;
  5821. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5822. result->src0 = a;
  5823. result->src1 = b;
  5824. result->opt[0] = addr_tensor;
  5825. return result;
  5826. }
  5827. struct ggml_tensor * ggml_map_binary_f32(
  5828. struct ggml_context * ctx,
  5829. struct ggml_tensor * a,
  5830. struct ggml_tensor * b,
  5831. const ggml_binary_op_f32_t fun) {
  5832. return ggml_map_binary_impl_f32(ctx, a, b, fun, false);
  5833. }
  5834. struct ggml_tensor * ggml_map_binary_inplace_f32(
  5835. struct ggml_context * ctx,
  5836. struct ggml_tensor * a,
  5837. struct ggml_tensor * b,
  5838. const ggml_binary_op_f32_t fun) {
  5839. return ggml_map_binary_impl_f32(ctx, a, b, fun, true);
  5840. }
  5841. // ggml_map_custom1
  5842. struct ggml_tensor * ggml_map_custom1_impl_f32(
  5843. struct ggml_context * ctx,
  5844. struct ggml_tensor * a,
  5845. const ggml_custom1_op_f32_t fun,
  5846. bool inplace) {
  5847. bool is_node = false;
  5848. if (!inplace && a->grad) {
  5849. is_node = true;
  5850. }
  5851. struct ggml_tensor *result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  5852. ggml_scratch_save(ctx);
  5853. struct ggml_tensor * addr_tensor = ggml_new_tensor_1d(ctx, GGML_TYPE_I32, sizeof(void *) / sizeof(int32_t));
  5854. *((void (**)(void))addr_tensor->data) = (void (*)(void))fun;
  5855. ggml_scratch_load(ctx);
  5856. result->op = GGML_OP_MAP_CUSTOM1;
  5857. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5858. result->src0 = a;
  5859. result->opt[0] = addr_tensor;
  5860. return result;
  5861. }
  5862. struct ggml_tensor * ggml_map_custom1_f32(
  5863. struct ggml_context * ctx,
  5864. struct ggml_tensor * a,
  5865. const ggml_custom1_op_f32_t fun) {
  5866. return ggml_map_custom1_impl_f32(ctx, a, fun, false);
  5867. }
  5868. struct ggml_tensor * ggml_map_custom1_inplace_f32(
  5869. struct ggml_context * ctx,
  5870. struct ggml_tensor * a,
  5871. const ggml_custom1_op_f32_t fun) {
  5872. return ggml_map_custom1_impl_f32(ctx, a, fun, true);
  5873. }
  5874. // ggml_map_custom2
  5875. struct ggml_tensor * ggml_map_custom2_impl_f32(
  5876. struct ggml_context * ctx,
  5877. struct ggml_tensor * a,
  5878. struct ggml_tensor * b,
  5879. const ggml_custom2_op_f32_t fun,
  5880. bool inplace) {
  5881. bool is_node = false;
  5882. if (!inplace && (a->grad || b->grad)) {
  5883. is_node = true;
  5884. }
  5885. struct ggml_tensor *result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  5886. ggml_scratch_save(ctx);
  5887. struct ggml_tensor * addr_tensor = ggml_new_tensor_1d(ctx, GGML_TYPE_I32, sizeof(void *) / sizeof(int32_t));
  5888. *((void (**)(void))addr_tensor->data) = (void (*)(void))fun;
  5889. ggml_scratch_load(ctx);
  5890. result->op = GGML_OP_MAP_CUSTOM2;
  5891. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5892. result->src0 = a;
  5893. result->src1 = b;
  5894. result->opt[0] = addr_tensor;
  5895. return result;
  5896. }
  5897. struct ggml_tensor * ggml_map_custom2_f32(
  5898. struct ggml_context * ctx,
  5899. struct ggml_tensor * a,
  5900. struct ggml_tensor * b,
  5901. const ggml_custom2_op_f32_t fun) {
  5902. return ggml_map_custom2_impl_f32(ctx, a, b, fun, false);
  5903. }
  5904. struct ggml_tensor * ggml_map_custom2_inplace_f32(
  5905. struct ggml_context * ctx,
  5906. struct ggml_tensor * a,
  5907. struct ggml_tensor * b,
  5908. const ggml_custom2_op_f32_t fun) {
  5909. return ggml_map_custom2_impl_f32(ctx, a, b, fun, true);
  5910. }
  5911. // ggml_map_custom3
  5912. struct ggml_tensor * ggml_map_custom3_impl_f32(
  5913. struct ggml_context * ctx,
  5914. struct ggml_tensor * a,
  5915. struct ggml_tensor * b,
  5916. struct ggml_tensor * c,
  5917. const ggml_custom3_op_f32_t fun,
  5918. bool inplace) {
  5919. bool is_node = false;
  5920. if (!inplace && (a->grad || b->grad || c->grad)) {
  5921. is_node = true;
  5922. }
  5923. struct ggml_tensor *result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
  5924. ggml_scratch_save(ctx);
  5925. struct ggml_tensor * addr_tensor = ggml_new_tensor_1d(ctx, GGML_TYPE_I32, sizeof(void *) / sizeof(int32_t));
  5926. *((void (**)(void))addr_tensor->data) = (void (*)(void))fun;
  5927. ggml_scratch_load(ctx);
  5928. result->op = GGML_OP_MAP_CUSTOM3;
  5929. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5930. result->src0 = a;
  5931. result->src1 = b;
  5932. result->opt[0] = addr_tensor;
  5933. result->opt[1] = c;
  5934. return result;
  5935. }
  5936. struct ggml_tensor * ggml_map_custom3_f32(
  5937. struct ggml_context * ctx,
  5938. struct ggml_tensor * a,
  5939. struct ggml_tensor * b,
  5940. struct ggml_tensor * c,
  5941. const ggml_custom3_op_f32_t fun) {
  5942. return ggml_map_custom3_impl_f32(ctx, a, b, c, fun, false);
  5943. }
  5944. struct ggml_tensor * ggml_map_custom3_inplace_f32(
  5945. struct ggml_context * ctx,
  5946. struct ggml_tensor * a,
  5947. struct ggml_tensor * b,
  5948. struct ggml_tensor * c,
  5949. const ggml_custom3_op_f32_t fun) {
  5950. return ggml_map_custom3_impl_f32(ctx, a, b, c, fun, true);
  5951. }
  5952. // ggml_cross_entropy_loss
  5953. struct ggml_tensor * ggml_cross_entropy_loss(
  5954. struct ggml_context * ctx,
  5955. struct ggml_tensor * a,
  5956. struct ggml_tensor * b) {
  5957. GGML_ASSERT(ggml_are_same_shape(a, b));
  5958. bool is_node = false;
  5959. if (a->grad || b->grad) {
  5960. is_node = true;
  5961. }
  5962. struct ggml_tensor * result = ggml_new_tensor_1d(ctx, a->type, 1);
  5963. result->op = GGML_OP_CROSS_ENTROPY_LOSS;
  5964. result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
  5965. result->src0 = a;
  5966. result->src1 = b;
  5967. return result;
  5968. }
  5969. // ggml_cross_entropy_loss_back
  5970. struct ggml_tensor * ggml_cross_entropy_loss_back(
  5971. struct ggml_context * ctx,
  5972. struct ggml_tensor * a,
  5973. struct ggml_tensor * b,
  5974. struct ggml_tensor * c) {
  5975. GGML_ASSERT(ggml_are_same_shape(a, b));
  5976. GGML_ASSERT(ggml_is_scalar(c));
  5977. struct ggml_tensor * result = ggml_dup_tensor(ctx, a);
  5978. result->op = GGML_OP_CROSS_ENTROPY_LOSS_BACK;
  5979. result->grad = NULL;
  5980. result->src0 = a;
  5981. result->src1 = b;
  5982. result->opt[0] = c;
  5983. return result;
  5984. }
  5985. ////////////////////////////////////////////////////////////////////////////////
  5986. void ggml_set_param(
  5987. struct ggml_context * ctx,
  5988. struct ggml_tensor * tensor) {
  5989. tensor->is_param = true;
  5990. GGML_ASSERT(tensor->grad == NULL);
  5991. tensor->grad = ggml_dup_tensor(ctx, tensor);
  5992. }
  5993. // ggml_compute_forward_dup
  5994. static void ggml_compute_forward_dup_same_cont(
  5995. const struct ggml_compute_params * params,
  5996. const struct ggml_tensor * src0,
  5997. struct ggml_tensor * dst) {
  5998. GGML_ASSERT(ggml_nelements(dst) == ggml_nelements(src0));
  5999. GGML_ASSERT(ggml_is_contiguous(dst) && ggml_is_contiguous(src0));
  6000. GGML_ASSERT(src0->type == dst->type);
  6001. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  6002. return;
  6003. }
  6004. const size_t nb00 = src0->nb[0];
  6005. const size_t nb0 = dst->nb[0];
  6006. const int ith = params->ith; // thread index
  6007. const int nth = params->nth; // number of threads
  6008. // parallelize by elements
  6009. const int ne = ggml_nelements(dst);
  6010. const int dr = (ne + nth - 1) / nth;
  6011. const int ie0 = dr * ith;
  6012. const int ie1 = MIN(ie0 + dr, ne);
  6013. if (ie0 < ie1) {
  6014. memcpy(
  6015. ((char *) dst->data + ie0*nb0),
  6016. ((char *) src0->data + ie0*nb00),
  6017. (ie1 - ie0) * GGML_TYPE_SIZE[src0->type]);
  6018. }
  6019. }
  6020. static void ggml_compute_forward_dup_f16(
  6021. const struct ggml_compute_params * params,
  6022. const struct ggml_tensor * src0,
  6023. struct ggml_tensor * dst) {
  6024. GGML_ASSERT(ggml_nelements(dst) == ggml_nelements(src0));
  6025. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  6026. return;
  6027. }
  6028. const int64_t ne00 = src0->ne[0];
  6029. const int64_t ne01 = src0->ne[1];
  6030. const int64_t ne02 = src0->ne[2];
  6031. const int64_t ne03 = src0->ne[3];
  6032. const int64_t ne0 = dst->ne[0];
  6033. const int64_t ne1 = dst->ne[1];
  6034. const int64_t ne2 = dst->ne[2];
  6035. const int64_t ne3 = dst->ne[3];
  6036. const size_t nb00 = src0->nb[0];
  6037. const size_t nb01 = src0->nb[1];
  6038. const size_t nb02 = src0->nb[2];
  6039. const size_t nb03 = src0->nb[3];
  6040. const size_t nb0 = dst->nb[0];
  6041. const size_t nb1 = dst->nb[1];
  6042. const size_t nb2 = dst->nb[2];
  6043. const size_t nb3 = dst->nb[3];
  6044. const int ith = params->ith; // thread index
  6045. const int nth = params->nth; // number of threads
  6046. if (ggml_is_contiguous(src0) && ggml_is_contiguous(dst) && src0->type == dst->type) {
  6047. ggml_compute_forward_dup_same_cont(params, src0, dst);
  6048. return;
  6049. }
  6050. // parallelize by rows
  6051. const int nr = ne01;
  6052. // number of rows per thread
  6053. const int dr = (nr + nth - 1) / nth;
  6054. // row range for this thread
  6055. const int ir0 = dr * ith;
  6056. const int ir1 = MIN(ir0 + dr, nr);
  6057. if (src0->type == dst->type &&
  6058. ne00 == ne0 &&
  6059. nb00 == GGML_TYPE_SIZE[src0->type] && nb0 == GGML_TYPE_SIZE[dst->type]) {
  6060. // copy by rows
  6061. const size_t rs = ne00*nb00;
  6062. for (int64_t i03 = 0; i03 < ne03; i03++) {
  6063. for (int64_t i02 = 0; i02 < ne02; i02++) {
  6064. for (int64_t i01 = ir0; i01 < ir1; i01++) {
  6065. memcpy(
  6066. ((char *) dst->data + i01*nb1 + i02*nb2 + i03*nb3),
  6067. ((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03),
  6068. rs);
  6069. }
  6070. }
  6071. }
  6072. return;
  6073. }
  6074. // TODO: add more special-case implementations for tensor shapes/strides that can benefit from memcpy
  6075. if (ggml_is_contiguous(dst)) {
  6076. if (nb00 == sizeof(ggml_fp16_t)) {
  6077. if (dst->type == GGML_TYPE_F16) {
  6078. size_t id = 0;
  6079. const size_t rs = ne00 * nb00;
  6080. char * dst_ptr = (char *) dst->data;
  6081. for (int i03 = 0; i03 < ne03; i03++) {
  6082. for (int i02 = 0; i02 < ne02; i02++) {
  6083. id += rs * ir0;
  6084. for (int i01 = ir0; i01 < ir1; i01++) {
  6085. const char * src0_ptr = (char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03;
  6086. memcpy(dst_ptr + id, src0_ptr, rs);
  6087. id += rs;
  6088. }
  6089. id += rs * (ne01 - ir1);
  6090. }
  6091. }
  6092. } else if (dst->type == GGML_TYPE_F32) {
  6093. size_t id = 0;
  6094. float * dst_ptr = (float *) dst->data;
  6095. for (int i03 = 0; i03 < ne03; i03++) {
  6096. for (int i02 = 0; i02 < ne02; i02++) {
  6097. id += ne00 * ir0;
  6098. for (int i01 = ir0; i01 < ir1; i01++) {
  6099. const ggml_fp16_t * src0_ptr = (ggml_fp16_t *) ((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03);
  6100. for (int i00 = 0; i00 < ne00; i00++) {
  6101. dst_ptr[id] = GGML_FP16_TO_FP32(src0_ptr[i00]);
  6102. id++;
  6103. }
  6104. }
  6105. id += ne00 * (ne01 - ir1);
  6106. }
  6107. }
  6108. } else if (ggml_is_quantized(dst->type)) {
  6109. quantize_row_q_t const quantize_row_q = quantize_fns[dst->type].quantize_row_q;
  6110. float * src0_f32 = (float *) params->wdata + (ne00 + CACHE_LINE_SIZE_F32) * ith;
  6111. size_t id = 0;
  6112. size_t rs = nb0 * (ne00 / GGML_BLCK_SIZE[dst->type]);
  6113. char * dst_ptr = (char *) dst->data;
  6114. for (int i03 = 0; i03 < ne03; i03++) {
  6115. for (int i02 = 0; i02 < ne02; i02++) {
  6116. id += rs * ir0;
  6117. for (int i01 = ir0; i01 < ir1; i01++) {
  6118. const ggml_fp16_t * src0_ptr = (ggml_fp16_t *) ((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03);
  6119. for (int i00 = 0; i00 < ne00; i00++) {
  6120. src0_f32[i00] = GGML_FP16_TO_FP32(src0_ptr[i00]);
  6121. }
  6122. quantize_row_q(src0_f32, dst_ptr + id, ne00);
  6123. id += rs;
  6124. }
  6125. id += rs * (ne01 - ir1);
  6126. }
  6127. }
  6128. } else {
  6129. GGML_ASSERT(false); // TODO: implement
  6130. }
  6131. } else {
  6132. //printf("%s: this is not optimal - fix me\n", __func__);
  6133. if (dst->type == GGML_TYPE_F32) {
  6134. size_t id = 0;
  6135. float * dst_ptr = (float *) dst->data;
  6136. for (int i03 = 0; i03 < ne03; i03++) {
  6137. for (int i02 = 0; i02 < ne02; i02++) {
  6138. id += ne00 * ir0;
  6139. for (int i01 = ir0; i01 < ir1; i01++) {
  6140. for (int i00 = 0; i00 < ne00; i00++) {
  6141. const ggml_fp16_t * src0_ptr = (ggml_fp16_t *) ((char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03);
  6142. dst_ptr[id] = GGML_FP16_TO_FP32(*src0_ptr);
  6143. id++;
  6144. }
  6145. }
  6146. id += ne00 * (ne01 - ir1);
  6147. }
  6148. }
  6149. } else if (dst->type == GGML_TYPE_F16) {
  6150. size_t id = 0;
  6151. ggml_fp16_t * dst_ptr = (ggml_fp16_t *) dst->data;
  6152. for (int i03 = 0; i03 < ne03; i03++) {
  6153. for (int i02 = 0; i02 < ne02; i02++) {
  6154. id += ne00 * ir0;
  6155. for (int i01 = ir0; i01 < ir1; i01++) {
  6156. for (int i00 = 0; i00 < ne00; i00++) {
  6157. const ggml_fp16_t * src0_ptr = (ggml_fp16_t *) ((char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03);
  6158. dst_ptr[id] = *src0_ptr;
  6159. id++;
  6160. }
  6161. }
  6162. id += ne00 * (ne01 - ir1);
  6163. }
  6164. }
  6165. } else {
  6166. GGML_ASSERT(false); // TODO: implement
  6167. }
  6168. }
  6169. return;
  6170. }
  6171. // dst counters
  6172. int64_t i10 = 0;
  6173. int64_t i11 = 0;
  6174. int64_t i12 = 0;
  6175. int64_t i13 = 0;
  6176. if (dst->type == GGML_TYPE_F16) {
  6177. for (int64_t i03 = 0; i03 < ne03; i03++) {
  6178. for (int64_t i02 = 0; i02 < ne02; i02++) {
  6179. i10 += ne00 * ir0;
  6180. while (i10 >= ne0) {
  6181. i10 -= ne0;
  6182. if (++i11 == ne1) {
  6183. i11 = 0;
  6184. if (++i12 == ne2) {
  6185. i12 = 0;
  6186. if (++i13 == ne3) {
  6187. i13 = 0;
  6188. }
  6189. }
  6190. }
  6191. }
  6192. for (int64_t i01 = ir0; i01 < ir1; i01++) {
  6193. for (int64_t i00 = 0; i00 < ne00; i00++) {
  6194. const char * src0_ptr = ((char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03);
  6195. char * dst_ptr = ((char *) dst->data + i10*nb0 + i11*nb1 + i12*nb2 + i13*nb3);
  6196. memcpy(dst_ptr, src0_ptr, sizeof(ggml_fp16_t));
  6197. if (++i10 == ne00) {
  6198. i10 = 0;
  6199. if (++i11 == ne01) {
  6200. i11 = 0;
  6201. if (++i12 == ne02) {
  6202. i12 = 0;
  6203. if (++i13 == ne03) {
  6204. i13 = 0;
  6205. }
  6206. }
  6207. }
  6208. }
  6209. }
  6210. }
  6211. i10 += ne00 * (ne01 - ir1);
  6212. while (i10 >= ne0) {
  6213. i10 -= ne0;
  6214. if (++i11 == ne1) {
  6215. i11 = 0;
  6216. if (++i12 == ne2) {
  6217. i12 = 0;
  6218. if (++i13 == ne3) {
  6219. i13 = 0;
  6220. }
  6221. }
  6222. }
  6223. }
  6224. }
  6225. }
  6226. } else if (dst->type == GGML_TYPE_F32) {
  6227. for (int64_t i03 = 0; i03 < ne03; i03++) {
  6228. for (int64_t i02 = 0; i02 < ne02; i02++) {
  6229. i10 += ne00 * ir0;
  6230. while (i10 >= ne0) {
  6231. i10 -= ne0;
  6232. if (++i11 == ne1) {
  6233. i11 = 0;
  6234. if (++i12 == ne2) {
  6235. i12 = 0;
  6236. if (++i13 == ne3) {
  6237. i13 = 0;
  6238. }
  6239. }
  6240. }
  6241. }
  6242. for (int64_t i01 = ir0; i01 < ir1; i01++) {
  6243. for (int64_t i00 = 0; i00 < ne00; i00++) {
  6244. const char * src0_ptr = ((char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03);
  6245. char * dst_ptr = ((char *) dst->data + i10*nb0 + i11*nb1 + i12*nb2 + i13*nb3);
  6246. *(float *) dst_ptr = GGML_FP16_TO_FP32(*(const ggml_fp16_t *) src0_ptr);
  6247. if (++i10 == ne0) {
  6248. i10 = 0;
  6249. if (++i11 == ne1) {
  6250. i11 = 0;
  6251. if (++i12 == ne2) {
  6252. i12 = 0;
  6253. if (++i13 == ne3) {
  6254. i13 = 0;
  6255. }
  6256. }
  6257. }
  6258. }
  6259. }
  6260. }
  6261. i10 += ne00 * (ne01 - ir1);
  6262. while (i10 >= ne0) {
  6263. i10 -= ne0;
  6264. if (++i11 == ne1) {
  6265. i11 = 0;
  6266. if (++i12 == ne2) {
  6267. i12 = 0;
  6268. if (++i13 == ne3) {
  6269. i13 = 0;
  6270. }
  6271. }
  6272. }
  6273. }
  6274. }
  6275. }
  6276. } else {
  6277. GGML_ASSERT(false); // TODO: implement
  6278. }
  6279. }
  6280. static void ggml_compute_forward_dup_f32(
  6281. const struct ggml_compute_params * params,
  6282. const struct ggml_tensor * src0,
  6283. struct ggml_tensor * dst) {
  6284. GGML_ASSERT(ggml_nelements(dst) == ggml_nelements(src0));
  6285. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  6286. return;
  6287. }
  6288. const int64_t ne00 = src0->ne[0];
  6289. const int64_t ne01 = src0->ne[1];
  6290. const int64_t ne02 = src0->ne[2];
  6291. const int64_t ne03 = src0->ne[3];
  6292. const int64_t ne0 = dst->ne[0];
  6293. const int64_t ne1 = dst->ne[1];
  6294. const int64_t ne2 = dst->ne[2];
  6295. const int64_t ne3 = dst->ne[3];
  6296. const size_t nb00 = src0->nb[0];
  6297. const size_t nb01 = src0->nb[1];
  6298. const size_t nb02 = src0->nb[2];
  6299. const size_t nb03 = src0->nb[3];
  6300. const size_t nb0 = dst->nb[0];
  6301. const size_t nb1 = dst->nb[1];
  6302. const size_t nb2 = dst->nb[2];
  6303. const size_t nb3 = dst->nb[3];
  6304. const int ith = params->ith; // thread index
  6305. const int nth = params->nth; // number of threads
  6306. if (ggml_is_contiguous(src0) && ggml_is_contiguous(dst) && src0->type == dst->type) {
  6307. ggml_compute_forward_dup_same_cont(params, src0, dst);
  6308. return;
  6309. }
  6310. // parallelize by rows
  6311. const int nr = ne01;
  6312. // number of rows per thread
  6313. const int dr = (nr + nth - 1) / nth;
  6314. // row range for this thread
  6315. const int ir0 = dr * ith;
  6316. const int ir1 = MIN(ir0 + dr, nr);
  6317. if (src0->type == dst->type &&
  6318. ne00 == ne0 &&
  6319. nb00 == GGML_TYPE_SIZE[src0->type] && nb0 == GGML_TYPE_SIZE[dst->type]) {
  6320. // copy by rows
  6321. const size_t rs = ne00*nb00;
  6322. for (int64_t i03 = 0; i03 < ne03; i03++) {
  6323. for (int64_t i02 = 0; i02 < ne02; i02++) {
  6324. for (int64_t i01 = ir0; i01 < ir1; i01++) {
  6325. memcpy(
  6326. ((char *) dst->data + i01*nb1 + i02*nb2 + i03*nb3),
  6327. ((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03),
  6328. rs);
  6329. }
  6330. }
  6331. }
  6332. return;
  6333. }
  6334. if (ggml_is_contiguous(dst)) {
  6335. // TODO: simplify
  6336. if (nb00 == sizeof(float)) {
  6337. if (dst->type == GGML_TYPE_F32) {
  6338. size_t id = 0;
  6339. const size_t rs = ne00 * nb00;
  6340. char * dst_ptr = (char *) dst->data;
  6341. for (int i03 = 0; i03 < ne03; i03++) {
  6342. for (int i02 = 0; i02 < ne02; i02++) {
  6343. id += rs * ir0;
  6344. for (int i01 = ir0; i01 < ir1; i01++) {
  6345. const char * src0_ptr = (char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03;
  6346. memcpy(dst_ptr + id, src0_ptr, rs);
  6347. id += rs;
  6348. }
  6349. id += rs * (ne01 - ir1);
  6350. }
  6351. }
  6352. } else if (dst->type == GGML_TYPE_F16) {
  6353. size_t id = 0;
  6354. ggml_fp16_t * dst_ptr = (ggml_fp16_t *) dst->data;
  6355. for (int i03 = 0; i03 < ne03; i03++) {
  6356. for (int i02 = 0; i02 < ne02; i02++) {
  6357. id += ne00 * ir0;
  6358. for (int i01 = ir0; i01 < ir1; i01++) {
  6359. for (int i00 = 0; i00 < ne00; i00++) {
  6360. const float * src0_ptr = (float *) ((char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03);
  6361. dst_ptr[id] = GGML_FP32_TO_FP16(*src0_ptr);
  6362. id++;
  6363. }
  6364. }
  6365. id += ne00 * (ne01 - ir1);
  6366. }
  6367. }
  6368. } else if (ggml_is_quantized(dst->type)) {
  6369. quantize_row_q_t const quantize_row_q = quantize_fns[dst->type].quantize_row_q;
  6370. size_t id = 0;
  6371. size_t rs = nb0 * (ne00 / GGML_BLCK_SIZE[dst->type]);
  6372. char * dst_ptr = (char *) dst->data;
  6373. for (int i03 = 0; i03 < ne03; i03++) {
  6374. for (int i02 = 0; i02 < ne02; i02++) {
  6375. id += rs * ir0;
  6376. for (int i01 = ir0; i01 < ir1; i01++) {
  6377. const float * src0_ptr = (float *) ((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03);
  6378. quantize_row_q(src0_ptr, dst_ptr + id, ne00);
  6379. id += rs;
  6380. }
  6381. id += rs * (ne01 - ir1);
  6382. }
  6383. }
  6384. } else {
  6385. GGML_ASSERT(false); // TODO: implement
  6386. }
  6387. } else {
  6388. //printf("%s: this is not optimal - fix me\n", __func__);
  6389. if (dst->type == GGML_TYPE_F32) {
  6390. size_t id = 0;
  6391. float * dst_ptr = (float *) dst->data;
  6392. for (int i03 = 0; i03 < ne03; i03++) {
  6393. for (int i02 = 0; i02 < ne02; i02++) {
  6394. id += ne00 * ir0;
  6395. for (int i01 = ir0; i01 < ir1; i01++) {
  6396. for (int i00 = 0; i00 < ne00; i00++) {
  6397. const float * src0_ptr = (float *) ((char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03);
  6398. dst_ptr[id] = *src0_ptr;
  6399. id++;
  6400. }
  6401. }
  6402. id += ne00 * (ne01 - ir1);
  6403. }
  6404. }
  6405. } else if (dst->type == GGML_TYPE_F16) {
  6406. size_t id = 0;
  6407. ggml_fp16_t * dst_ptr = (ggml_fp16_t *) dst->data;
  6408. for (int i03 = 0; i03 < ne03; i03++) {
  6409. for (int i02 = 0; i02 < ne02; i02++) {
  6410. id += ne00 * ir0;
  6411. for (int i01 = ir0; i01 < ir1; i01++) {
  6412. for (int i00 = 0; i00 < ne00; i00++) {
  6413. const float * src0_ptr = (float *) ((char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03);
  6414. dst_ptr[id] = GGML_FP32_TO_FP16(*src0_ptr);
  6415. id++;
  6416. }
  6417. }
  6418. id += ne00 * (ne01 - ir1);
  6419. }
  6420. }
  6421. } else {
  6422. GGML_ASSERT(false); // TODO: implement
  6423. }
  6424. }
  6425. return;
  6426. }
  6427. // dst counters
  6428. int64_t i10 = 0;
  6429. int64_t i11 = 0;
  6430. int64_t i12 = 0;
  6431. int64_t i13 = 0;
  6432. if (dst->type == GGML_TYPE_F32) {
  6433. for (int64_t i03 = 0; i03 < ne03; i03++) {
  6434. for (int64_t i02 = 0; i02 < ne02; i02++) {
  6435. i10 += ne00 * ir0;
  6436. while (i10 >= ne0) {
  6437. i10 -= ne0;
  6438. if (++i11 == ne1) {
  6439. i11 = 0;
  6440. if (++i12 == ne2) {
  6441. i12 = 0;
  6442. if (++i13 == ne3) {
  6443. i13 = 0;
  6444. }
  6445. }
  6446. }
  6447. }
  6448. for (int64_t i01 = ir0; i01 < ir1; i01++) {
  6449. for (int64_t i00 = 0; i00 < ne00; i00++) {
  6450. const char * src0_ptr = ((char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03);
  6451. char * dst_ptr = ((char *) dst->data + i10*nb0 + i11*nb1 + i12*nb2 + i13*nb3);
  6452. memcpy(dst_ptr, src0_ptr, sizeof(float));
  6453. if (++i10 == ne0) {
  6454. i10 = 0;
  6455. if (++i11 == ne1) {
  6456. i11 = 0;
  6457. if (++i12 == ne2) {
  6458. i12 = 0;
  6459. if (++i13 == ne3) {
  6460. i13 = 0;
  6461. }
  6462. }
  6463. }
  6464. }
  6465. }
  6466. }
  6467. i10 += ne00 * (ne01 - ir1);
  6468. while (i10 >= ne0) {
  6469. i10 -= ne0;
  6470. if (++i11 == ne1) {
  6471. i11 = 0;
  6472. if (++i12 == ne2) {
  6473. i12 = 0;
  6474. if (++i13 == ne3) {
  6475. i13 = 0;
  6476. }
  6477. }
  6478. }
  6479. }
  6480. }
  6481. }
  6482. } else if (dst->type == GGML_TYPE_F16) {
  6483. for (int64_t i03 = 0; i03 < ne03; i03++) {
  6484. for (int64_t i02 = 0; i02 < ne02; i02++) {
  6485. i10 += ne00 * ir0;
  6486. while (i10 >= ne0) {
  6487. i10 -= ne0;
  6488. if (++i11 == ne1) {
  6489. i11 = 0;
  6490. if (++i12 == ne2) {
  6491. i12 = 0;
  6492. if (++i13 == ne3) {
  6493. i13 = 0;
  6494. }
  6495. }
  6496. }
  6497. }
  6498. for (int64_t i01 = ir0; i01 < ir1; i01++) {
  6499. for (int64_t i00 = 0; i00 < ne00; i00++) {
  6500. const char * src0_ptr = ((char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03);
  6501. char * dst_ptr = ((char *) dst->data + i10*nb0 + i11*nb1 + i12*nb2 + i13*nb3);
  6502. *(ggml_fp16_t *) dst_ptr = GGML_FP32_TO_FP16(*(const float *) src0_ptr);
  6503. if (++i10 == ne0) {
  6504. i10 = 0;
  6505. if (++i11 == ne1) {
  6506. i11 = 0;
  6507. if (++i12 == ne2) {
  6508. i12 = 0;
  6509. if (++i13 == ne3) {
  6510. i13 = 0;
  6511. }
  6512. }
  6513. }
  6514. }
  6515. }
  6516. }
  6517. i10 += ne00 * (ne01 - ir1);
  6518. while (i10 >= ne0) {
  6519. i10 -= ne0;
  6520. if (++i11 == ne1) {
  6521. i11 = 0;
  6522. if (++i12 == ne2) {
  6523. i12 = 0;
  6524. if (++i13 == ne3) {
  6525. i13 = 0;
  6526. }
  6527. }
  6528. }
  6529. }
  6530. }
  6531. }
  6532. } else {
  6533. GGML_ASSERT(false); // TODO: implement
  6534. }
  6535. }
  6536. static void ggml_compute_forward_dup(
  6537. const struct ggml_compute_params * params,
  6538. const struct ggml_tensor * src0,
  6539. struct ggml_tensor * dst) {
  6540. if (ggml_is_contiguous(src0) && ggml_is_contiguous(dst) && src0->type == dst->type) {
  6541. ggml_compute_forward_dup_same_cont(params, src0, dst);
  6542. return;
  6543. }
  6544. switch (src0->type) {
  6545. case GGML_TYPE_F16:
  6546. {
  6547. ggml_compute_forward_dup_f16(params, src0, dst);
  6548. } break;
  6549. case GGML_TYPE_F32:
  6550. {
  6551. ggml_compute_forward_dup_f32(params, src0, dst);
  6552. } break;
  6553. default:
  6554. {
  6555. GGML_ASSERT(false);
  6556. } break;
  6557. }
  6558. }
  6559. // ggml_compute_forward_add
  6560. static void ggml_compute_forward_add_f32(
  6561. const struct ggml_compute_params * params,
  6562. const struct ggml_tensor * src0,
  6563. const struct ggml_tensor * src1,
  6564. struct ggml_tensor * dst) {
  6565. GGML_ASSERT(ggml_are_same_shape(src0, src1) && ggml_are_same_shape(src0, dst));
  6566. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  6567. return;
  6568. }
  6569. const int ith = params->ith;
  6570. const int nth = params->nth;
  6571. const int nr = ggml_nrows(src0);
  6572. const int64_t ne0 = src0->ne[0];
  6573. const int64_t ne1 = src0->ne[1];
  6574. const int64_t ne2 = src0->ne[2];
  6575. const size_t nb00 = src0->nb[0];
  6576. const size_t nb01 = src0->nb[1];
  6577. const size_t nb02 = src0->nb[2];
  6578. const size_t nb03 = src0->nb[3];
  6579. const size_t nb10 = src1->nb[0];
  6580. const size_t nb11 = src1->nb[1];
  6581. const size_t nb12 = src1->nb[2];
  6582. const size_t nb13 = src1->nb[3];
  6583. const size_t nb0 = dst->nb[0];
  6584. const size_t nb1 = dst->nb[1];
  6585. const size_t nb2 = dst->nb[2];
  6586. const size_t nb3 = dst->nb[3];
  6587. GGML_ASSERT( nb0 == sizeof(float));
  6588. GGML_ASSERT(nb00 == sizeof(float));
  6589. // rows per thread
  6590. const int dr = (nr + nth - 1)/nth;
  6591. // row range for this thread
  6592. const int ir0 = dr*ith;
  6593. const int ir1 = MIN(ir0 + dr, nr);
  6594. if (nb10 == sizeof(float)) {
  6595. for (int ir = ir0; ir < ir1; ++ir) {
  6596. // src0, src1 and dst are same shape => same indices
  6597. const int i3 = ir/(ne2*ne1);
  6598. const int i2 = (ir - i3*ne2*ne1)/ne1;
  6599. const int i1 = (ir - i3*ne2*ne1 - i2*ne1);
  6600. #ifdef GGML_USE_ACCELERATE
  6601. vDSP_vadd(
  6602. (float *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01), 1,
  6603. (float *) ((char *) src1->data + i3*nb13 + i2*nb12 + i1*nb11), 1,
  6604. (float *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 ), 1,
  6605. ne0);
  6606. #else
  6607. ggml_vec_add_f32(ne0,
  6608. (float *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 ),
  6609. (float *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01),
  6610. (float *) ((char *) src1->data + i3*nb13 + i2*nb12 + i1*nb11));
  6611. #endif
  6612. // }
  6613. // }
  6614. }
  6615. } else {
  6616. // src1 is not contiguous
  6617. for (int ir = ir0; ir < ir1; ++ir) {
  6618. // src0, src1 and dst are same shape => same indices
  6619. const int i3 = ir/(ne2*ne1);
  6620. const int i2 = (ir - i3*ne2*ne1)/ne1;
  6621. const int i1 = (ir - i3*ne2*ne1 - i2*ne1);
  6622. float * dst_ptr = (float *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 );
  6623. float * src0_ptr = (float *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01);
  6624. for (int i0 = 0; i0 < ne0; i0++) {
  6625. float * src1_ptr = (float *) ((char *) src1->data + i3*nb13 + i2*nb12 + i1*nb11 + i0*nb10);
  6626. dst_ptr[i0] = src0_ptr[i0] + *src1_ptr;
  6627. }
  6628. }
  6629. }
  6630. }
  6631. static void ggml_compute_forward_add_f16_f32(
  6632. const struct ggml_compute_params * params,
  6633. const struct ggml_tensor * src0,
  6634. const struct ggml_tensor * src1,
  6635. struct ggml_tensor * dst) {
  6636. GGML_ASSERT(ggml_are_same_shape(src0, src1) && ggml_are_same_shape(src0, dst));
  6637. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  6638. return;
  6639. }
  6640. const int ith = params->ith;
  6641. const int nth = params->nth;
  6642. const int nr = ggml_nrows(src0);
  6643. const int64_t ne0 = src0->ne[0];
  6644. const int64_t ne1 = src0->ne[1];
  6645. const int64_t ne2 = src0->ne[2];
  6646. const size_t nb00 = src0->nb[0];
  6647. const size_t nb01 = src0->nb[1];
  6648. const size_t nb02 = src0->nb[2];
  6649. const size_t nb03 = src0->nb[3];
  6650. const size_t nb10 = src1->nb[0];
  6651. const size_t nb11 = src1->nb[1];
  6652. const size_t nb12 = src1->nb[2];
  6653. const size_t nb13 = src1->nb[3];
  6654. const size_t nb0 = dst->nb[0];
  6655. const size_t nb1 = dst->nb[1];
  6656. const size_t nb2 = dst->nb[2];
  6657. const size_t nb3 = dst->nb[3];
  6658. GGML_ASSERT(src0->type == GGML_TYPE_F16);
  6659. GGML_ASSERT(src1->type == GGML_TYPE_F32);
  6660. GGML_ASSERT(dst->type == GGML_TYPE_F16);
  6661. GGML_ASSERT( nb0 == sizeof(ggml_fp16_t));
  6662. GGML_ASSERT(nb00 == sizeof(ggml_fp16_t));
  6663. // rows per thread
  6664. const int dr = (nr + nth - 1)/nth;
  6665. // row range for this thread
  6666. const int ir0 = dr*ith;
  6667. const int ir1 = MIN(ir0 + dr, nr);
  6668. if (nb10 == sizeof(float)) {
  6669. for (int ir = ir0; ir < ir1; ++ir) {
  6670. // src0, src1 and dst are same shape => same indices
  6671. const int i3 = ir/(ne2*ne1);
  6672. const int i2 = (ir - i3*ne2*ne1)/ne1;
  6673. const int i1 = (ir - i3*ne2*ne1 - i2*ne1);
  6674. ggml_fp16_t * dst_ptr = (ggml_fp16_t *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1);
  6675. ggml_fp16_t * src0_ptr = (ggml_fp16_t *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01);
  6676. float * src1_ptr = (float *) ((char *) src1->data + i3*nb13 + i2*nb12 + i1*nb11);
  6677. for (int i = 0; i < ne0; i++) {
  6678. dst_ptr[i] = GGML_FP32_TO_FP16(GGML_FP16_TO_FP32(src0_ptr[i]) + src1_ptr[i]);
  6679. }
  6680. }
  6681. }
  6682. else {
  6683. // src1 is not contiguous
  6684. GGML_ASSERT(false);
  6685. }
  6686. }
  6687. static void ggml_compute_forward_add_f16_f16(
  6688. const struct ggml_compute_params * params,
  6689. const struct ggml_tensor * src0,
  6690. const struct ggml_tensor * src1,
  6691. struct ggml_tensor * dst) {
  6692. GGML_ASSERT(ggml_are_same_shape(src0, src1) && ggml_are_same_shape(src0, dst));
  6693. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  6694. return;
  6695. }
  6696. const int ith = params->ith;
  6697. const int nth = params->nth;
  6698. const int nr = ggml_nrows(src0);
  6699. const int64_t ne0 = src0->ne[0];
  6700. const int64_t ne1 = src0->ne[1];
  6701. const int64_t ne2 = src0->ne[2];
  6702. const size_t nb00 = src0->nb[0];
  6703. const size_t nb01 = src0->nb[1];
  6704. const size_t nb02 = src0->nb[2];
  6705. const size_t nb03 = src0->nb[3];
  6706. const size_t nb10 = src1->nb[0];
  6707. const size_t nb11 = src1->nb[1];
  6708. const size_t nb12 = src1->nb[2];
  6709. const size_t nb13 = src1->nb[3];
  6710. const size_t nb0 = dst->nb[0];
  6711. const size_t nb1 = dst->nb[1];
  6712. const size_t nb2 = dst->nb[2];
  6713. const size_t nb3 = dst->nb[3];
  6714. GGML_ASSERT(src0->type == GGML_TYPE_F16);
  6715. GGML_ASSERT(src1->type == GGML_TYPE_F16);
  6716. GGML_ASSERT(dst->type == GGML_TYPE_F16);
  6717. GGML_ASSERT( nb0 == sizeof(ggml_fp16_t));
  6718. GGML_ASSERT(nb00 == sizeof(ggml_fp16_t));
  6719. // rows per thread
  6720. const int dr = (nr + nth - 1)/nth;
  6721. // row range for this thread
  6722. const int ir0 = dr*ith;
  6723. const int ir1 = MIN(ir0 + dr, nr);
  6724. if (nb10 == sizeof(ggml_fp16_t)) {
  6725. for (int ir = ir0; ir < ir1; ++ir) {
  6726. // src0, src1 and dst are same shape => same indices
  6727. const int i3 = ir/(ne2*ne1);
  6728. const int i2 = (ir - i3*ne2*ne1)/ne1;
  6729. const int i1 = (ir - i3*ne2*ne1 - i2*ne1);
  6730. ggml_fp16_t * dst_ptr = (ggml_fp16_t *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1);
  6731. ggml_fp16_t * src0_ptr = (ggml_fp16_t *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01);
  6732. ggml_fp16_t * src1_ptr = (ggml_fp16_t *) ((char *) src1->data + i3*nb13 + i2*nb12 + i1*nb11);
  6733. for (int i = 0; i < ne0; i++) {
  6734. dst_ptr[i] = GGML_FP32_TO_FP16(GGML_FP16_TO_FP32(src0_ptr[i]) + GGML_FP16_TO_FP32(src1_ptr[i]));
  6735. }
  6736. }
  6737. }
  6738. else {
  6739. // src1 is not contiguous
  6740. GGML_ASSERT(false);
  6741. }
  6742. }
  6743. static void ggml_compute_forward_add_q_f32(
  6744. const struct ggml_compute_params * params,
  6745. const struct ggml_tensor * src0,
  6746. const struct ggml_tensor * src1,
  6747. struct ggml_tensor * dst) {
  6748. GGML_ASSERT(ggml_are_same_shape(src0, src1) && ggml_are_same_shape(src0, dst));
  6749. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  6750. return;
  6751. }
  6752. const int nr = ggml_nrows(src0);
  6753. const int64_t ne00 = src0->ne[0];
  6754. const int64_t ne01 = src0->ne[1];
  6755. const int64_t ne02 = src0->ne[2];
  6756. //const int64_t ne03 = src0->ne[3];
  6757. const size_t nb00 = src0->nb[0];
  6758. const size_t nb01 = src0->nb[1];
  6759. const size_t nb02 = src0->nb[2];
  6760. const size_t nb03 = src0->nb[3];
  6761. const size_t nb10 = src1->nb[0];
  6762. const size_t nb11 = src1->nb[1];
  6763. const size_t nb12 = src1->nb[2];
  6764. const size_t nb13 = src1->nb[3];
  6765. const size_t nb0 = dst->nb[0];
  6766. const size_t nb1 = dst->nb[1];
  6767. const size_t nb2 = dst->nb[2];
  6768. const size_t nb3 = dst->nb[3];
  6769. const int ith = params->ith;
  6770. const int nth = params->nth;
  6771. const enum ggml_type type = src0->type;
  6772. dequantize_row_q_t const dequantize_row_q = quantize_fns[type].dequantize_row_q;
  6773. quantize_row_q_t const quantize_row_q = quantize_fns[type].quantize_row_q;
  6774. // we don't support permuted src0 or src1
  6775. GGML_ASSERT(nb00 == GGML_TYPE_SIZE[type]);
  6776. GGML_ASSERT(nb10 == sizeof(float));
  6777. // dst cannot be transposed or permuted
  6778. GGML_ASSERT(nb0 <= nb1);
  6779. GGML_ASSERT(nb1 <= nb2);
  6780. GGML_ASSERT(nb2 <= nb3);
  6781. GGML_ASSERT(ggml_is_quantized(src0->type));
  6782. GGML_ASSERT(dst->type == src0->type);
  6783. GGML_ASSERT(src1->type == GGML_TYPE_F32);
  6784. // rows per thread
  6785. const int dr = (nr + nth - 1)/nth;
  6786. // row range for this thread
  6787. const int ir0 = dr*ith;
  6788. const int ir1 = MIN(ir0 + dr, nr);
  6789. float * wdata = (float *) params->wdata + (ne00 + CACHE_LINE_SIZE_F32) * ith;
  6790. for (int ir = ir0; ir < ir1; ++ir) {
  6791. // src0 indices
  6792. const int i03 = ir/(ne02*ne01);
  6793. const int i02 = (ir - i03*ne02*ne01)/ne01;
  6794. const int i01 = (ir - i03*ne02*ne01 - i02*ne01);
  6795. // src1 and dst are same shape as src0 => same indices
  6796. const int i13 = i03;
  6797. const int i12 = i02;
  6798. const int i11 = i01;
  6799. const int i3 = i03;
  6800. const int i2 = i02;
  6801. const int i1 = i01;
  6802. void * src0_row = (void *) ((char *) src0->data + (i01*nb01 + i02*nb02 + i03*nb03));
  6803. float * src1_row = (float *)((char *) src1->data + (i11*nb11 + i12*nb12 + i13*nb13));
  6804. void * dst_row = (void *) ((char *) dst->data + ( i1*nb1 + i2*nb2 + i3*nb3));
  6805. assert(ne00 % 32 == 0);
  6806. // unquantize row from src0 to temp buffer
  6807. dequantize_row_q(src0_row, wdata, ne00);
  6808. // add src1
  6809. ggml_vec_acc_f32(ne00, wdata, src1_row);
  6810. // quantize row to dst
  6811. quantize_row_q(wdata, dst_row, ne00);
  6812. }
  6813. }
  6814. static void ggml_compute_forward_add(
  6815. const struct ggml_compute_params * params,
  6816. const struct ggml_tensor * src0,
  6817. const struct ggml_tensor * src1,
  6818. struct ggml_tensor * dst) {
  6819. switch (src0->type) {
  6820. case GGML_TYPE_F32:
  6821. {
  6822. ggml_compute_forward_add_f32(params, src0, src1, dst);
  6823. } break;
  6824. case GGML_TYPE_F16:
  6825. {
  6826. if (src1->type == GGML_TYPE_F16) {
  6827. ggml_compute_forward_add_f16_f16(params, src0, src1, dst);
  6828. }
  6829. else if (src1->type == GGML_TYPE_F32) {
  6830. ggml_compute_forward_add_f16_f32(params, src0, src1, dst);
  6831. }
  6832. else {
  6833. GGML_ASSERT(false);
  6834. }
  6835. } break;
  6836. case GGML_TYPE_Q4_0:
  6837. case GGML_TYPE_Q4_1:
  6838. case GGML_TYPE_Q5_0:
  6839. case GGML_TYPE_Q5_1:
  6840. case GGML_TYPE_Q8_0:
  6841. case GGML_TYPE_Q2_K:
  6842. case GGML_TYPE_Q3_K:
  6843. case GGML_TYPE_Q4_K:
  6844. case GGML_TYPE_Q5_K:
  6845. case GGML_TYPE_Q6_K:
  6846. {
  6847. ggml_compute_forward_add_q_f32(params, src0, src1, dst);
  6848. } break;
  6849. default:
  6850. {
  6851. GGML_ASSERT(false);
  6852. } break;
  6853. }
  6854. }
  6855. // ggml_compute_forward_add1
  6856. static void ggml_compute_forward_add1_f32(
  6857. const struct ggml_compute_params * params,
  6858. const struct ggml_tensor * src0,
  6859. const struct ggml_tensor * src1,
  6860. struct ggml_tensor * dst) {
  6861. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  6862. GGML_ASSERT(ggml_is_scalar(src1));
  6863. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  6864. return;
  6865. }
  6866. const int ith = params->ith;
  6867. const int nth = params->nth;
  6868. const int nr = ggml_nrows(src0);
  6869. const int64_t ne0 = src0->ne[0];
  6870. const int64_t ne1 = src0->ne[1];
  6871. const int64_t ne2 = src0->ne[2];
  6872. const size_t nb00 = src0->nb[0];
  6873. const size_t nb01 = src0->nb[1];
  6874. const size_t nb02 = src0->nb[2];
  6875. const size_t nb03 = src0->nb[3];
  6876. const size_t nb0 = dst->nb[0];
  6877. const size_t nb1 = dst->nb[1];
  6878. const size_t nb2 = dst->nb[2];
  6879. const size_t nb3 = dst->nb[3];
  6880. GGML_ASSERT( nb0 == sizeof(float));
  6881. GGML_ASSERT(nb00 == sizeof(float));
  6882. // rows per thread
  6883. const int dr = (nr + nth - 1)/nth;
  6884. // row range for this thread
  6885. const int ir0 = dr*ith;
  6886. const int ir1 = MIN(ir0 + dr, nr);
  6887. for (int ir = ir0; ir < ir1; ++ir) {
  6888. // src0 and dst are same shape => same indices
  6889. const int i3 = ir/(ne2*ne1);
  6890. const int i2 = (ir - i3*ne2*ne1)/ne1;
  6891. const int i1 = (ir - i3*ne2*ne1 - i2*ne1);
  6892. #ifdef GGML_USE_ACCELERATE
  6893. UNUSED(ggml_vec_add1_f32);
  6894. vDSP_vadd(
  6895. (float *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01), 1,
  6896. (float *) ((char *) src1->data), 0,
  6897. (float *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 ), 1,
  6898. ne0);
  6899. #else
  6900. ggml_vec_add1_f32(ne0,
  6901. (float *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 ),
  6902. (float *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01),
  6903. *(float *) src1->data);
  6904. #endif
  6905. }
  6906. }
  6907. static void ggml_compute_forward_add1_f16_f32(
  6908. const struct ggml_compute_params * params,
  6909. const struct ggml_tensor * src0,
  6910. const struct ggml_tensor * src1,
  6911. struct ggml_tensor * dst) {
  6912. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  6913. GGML_ASSERT(ggml_is_scalar(src1));
  6914. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  6915. return;
  6916. }
  6917. // scalar to add
  6918. const float v = *(float *) src1->data;
  6919. const int ith = params->ith;
  6920. const int nth = params->nth;
  6921. const int nr = ggml_nrows(src0);
  6922. const int64_t ne0 = src0->ne[0];
  6923. const int64_t ne1 = src0->ne[1];
  6924. const int64_t ne2 = src0->ne[2];
  6925. const size_t nb00 = src0->nb[0];
  6926. const size_t nb01 = src0->nb[1];
  6927. const size_t nb02 = src0->nb[2];
  6928. const size_t nb03 = src0->nb[3];
  6929. const size_t nb0 = dst->nb[0];
  6930. const size_t nb1 = dst->nb[1];
  6931. const size_t nb2 = dst->nb[2];
  6932. const size_t nb3 = dst->nb[3];
  6933. GGML_ASSERT(src0->type == GGML_TYPE_F16);
  6934. GGML_ASSERT(src1->type == GGML_TYPE_F32);
  6935. GGML_ASSERT(dst->type == GGML_TYPE_F16);
  6936. GGML_ASSERT( nb0 == sizeof(ggml_fp16_t));
  6937. GGML_ASSERT(nb00 == sizeof(ggml_fp16_t));
  6938. // rows per thread
  6939. const int dr = (nr + nth - 1)/nth;
  6940. // row range for this thread
  6941. const int ir0 = dr*ith;
  6942. const int ir1 = MIN(ir0 + dr, nr);
  6943. for (int ir = ir0; ir < ir1; ++ir) {
  6944. // src0 and dst are same shape => same indices
  6945. const int i3 = ir/(ne2*ne1);
  6946. const int i2 = (ir - i3*ne2*ne1)/ne1;
  6947. const int i1 = (ir - i3*ne2*ne1 - i2*ne1);
  6948. ggml_fp16_t * dst_ptr = (ggml_fp16_t *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 );
  6949. ggml_fp16_t * src0_ptr = (ggml_fp16_t *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01);
  6950. for (int i = 0; i < ne0; i++) {
  6951. dst_ptr[i] = GGML_FP32_TO_FP16(GGML_FP16_TO_FP32(src0_ptr[i]) + v);
  6952. }
  6953. }
  6954. }
  6955. static void ggml_compute_forward_add1_f16_f16(
  6956. const struct ggml_compute_params * params,
  6957. const struct ggml_tensor * src0,
  6958. const struct ggml_tensor * src1,
  6959. struct ggml_tensor * dst) {
  6960. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  6961. GGML_ASSERT(ggml_is_scalar(src1));
  6962. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  6963. return;
  6964. }
  6965. // scalar to add
  6966. const float v = GGML_FP16_TO_FP32(*(ggml_fp16_t *) src1->data);
  6967. const int ith = params->ith;
  6968. const int nth = params->nth;
  6969. const int nr = ggml_nrows(src0);
  6970. const int64_t ne0 = src0->ne[0];
  6971. const int64_t ne1 = src0->ne[1];
  6972. const int64_t ne2 = src0->ne[2];
  6973. const size_t nb00 = src0->nb[0];
  6974. const size_t nb01 = src0->nb[1];
  6975. const size_t nb02 = src0->nb[2];
  6976. const size_t nb03 = src0->nb[3];
  6977. const size_t nb0 = dst->nb[0];
  6978. const size_t nb1 = dst->nb[1];
  6979. const size_t nb2 = dst->nb[2];
  6980. const size_t nb3 = dst->nb[3];
  6981. GGML_ASSERT(src0->type == GGML_TYPE_F16);
  6982. GGML_ASSERT(src1->type == GGML_TYPE_F16);
  6983. GGML_ASSERT(dst->type == GGML_TYPE_F16);
  6984. GGML_ASSERT( nb0 == sizeof(ggml_fp16_t));
  6985. GGML_ASSERT(nb00 == sizeof(ggml_fp16_t));
  6986. // rows per thread
  6987. const int dr = (nr + nth - 1)/nth;
  6988. // row range for this thread
  6989. const int ir0 = dr*ith;
  6990. const int ir1 = MIN(ir0 + dr, nr);
  6991. for (int ir = ir0; ir < ir1; ++ir) {
  6992. // src0 and dst are same shape => same indices
  6993. const int i3 = ir/(ne2*ne1);
  6994. const int i2 = (ir - i3*ne2*ne1)/ne1;
  6995. const int i1 = (ir - i3*ne2*ne1 - i2*ne1);
  6996. ggml_fp16_t * dst_ptr = (ggml_fp16_t *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 );
  6997. ggml_fp16_t * src0_ptr = (ggml_fp16_t *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01);
  6998. for (int i = 0; i < ne0; i++) {
  6999. dst_ptr[i] = GGML_FP32_TO_FP16(GGML_FP16_TO_FP32(src0_ptr[i]) + v);
  7000. }
  7001. }
  7002. }
  7003. static void ggml_compute_forward_add1_q_f32(
  7004. const struct ggml_compute_params * params,
  7005. const struct ggml_tensor * src0,
  7006. const struct ggml_tensor * src1,
  7007. struct ggml_tensor * dst) {
  7008. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  7009. GGML_ASSERT(ggml_is_scalar(src1));
  7010. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  7011. return;
  7012. }
  7013. // scalar to add
  7014. const float v = *(float *) src1->data;
  7015. const int ith = params->ith;
  7016. const int nth = params->nth;
  7017. const int nr = ggml_nrows(src0);
  7018. const int64_t ne0 = src0->ne[0];
  7019. const int64_t ne1 = src0->ne[1];
  7020. const int64_t ne2 = src0->ne[2];
  7021. const size_t nb00 = src0->nb[0];
  7022. const size_t nb01 = src0->nb[1];
  7023. const size_t nb02 = src0->nb[2];
  7024. const size_t nb03 = src0->nb[3];
  7025. const size_t nb0 = dst->nb[0];
  7026. const size_t nb1 = dst->nb[1];
  7027. const size_t nb2 = dst->nb[2];
  7028. const size_t nb3 = dst->nb[3];
  7029. const enum ggml_type type = src0->type;
  7030. dequantize_row_q_t const dequantize_row_q = quantize_fns[type].dequantize_row_q;
  7031. quantize_row_q_t const quantize_row_q = quantize_fns[type].quantize_row_q;
  7032. // we don't support permuted src0
  7033. GGML_ASSERT(nb00 == GGML_TYPE_SIZE[type]);
  7034. // dst cannot be transposed or permuted
  7035. GGML_ASSERT(nb0 <= nb1);
  7036. GGML_ASSERT(nb1 <= nb2);
  7037. GGML_ASSERT(nb2 <= nb3);
  7038. GGML_ASSERT(ggml_is_quantized(src0->type));
  7039. GGML_ASSERT(dst->type == src0->type);
  7040. GGML_ASSERT(src1->type == GGML_TYPE_F32);
  7041. // rows per thread
  7042. const int dr = (nr + nth - 1)/nth;
  7043. // row range for this thread
  7044. const int ir0 = dr*ith;
  7045. const int ir1 = MIN(ir0 + dr, nr);
  7046. float * wdata = (float *) params->wdata + (ne0 + CACHE_LINE_SIZE_F32) * ith;
  7047. for (int ir = ir0; ir < ir1; ++ir) {
  7048. // src0 and dst are same shape => same indices
  7049. const int i3 = ir/(ne2*ne1);
  7050. const int i2 = (ir - i3*ne2*ne1)/ne1;
  7051. const int i1 = (ir - i3*ne2*ne1 - i2*ne1);
  7052. void * src0_row = (void *) ((char *) src0->data + (i1*nb01 + i2*nb02 + i3*nb03));
  7053. void * dst_row = (void *) ((char *) dst->data + (i1*nb1 + i2*nb2 + i3*nb0 ));
  7054. assert(ne0 % 32 == 0);
  7055. // unquantize row from src0 to temp buffer
  7056. dequantize_row_q(src0_row, wdata, ne0);
  7057. // add src1
  7058. ggml_vec_acc1_f32(ne0, wdata, v);
  7059. // quantize row to dst
  7060. quantize_row_q(wdata, dst_row, ne0);
  7061. }
  7062. }
  7063. static void ggml_compute_forward_add1(
  7064. const struct ggml_compute_params * params,
  7065. const struct ggml_tensor * src0,
  7066. const struct ggml_tensor * src1,
  7067. struct ggml_tensor * dst) {
  7068. switch (src0->type) {
  7069. case GGML_TYPE_F32:
  7070. {
  7071. ggml_compute_forward_add1_f32(params, src0, src1, dst);
  7072. } break;
  7073. case GGML_TYPE_F16:
  7074. {
  7075. if (src1->type == GGML_TYPE_F16) {
  7076. ggml_compute_forward_add1_f16_f16(params, src0, src1, dst);
  7077. }
  7078. else if (src1->type == GGML_TYPE_F32) {
  7079. ggml_compute_forward_add1_f16_f32(params, src0, src1, dst);
  7080. }
  7081. else {
  7082. GGML_ASSERT(false);
  7083. }
  7084. } break;
  7085. case GGML_TYPE_Q4_0:
  7086. case GGML_TYPE_Q4_1:
  7087. case GGML_TYPE_Q5_0:
  7088. case GGML_TYPE_Q5_1:
  7089. case GGML_TYPE_Q8_0:
  7090. case GGML_TYPE_Q8_1:
  7091. case GGML_TYPE_Q2_K:
  7092. case GGML_TYPE_Q3_K:
  7093. case GGML_TYPE_Q4_K:
  7094. case GGML_TYPE_Q5_K:
  7095. case GGML_TYPE_Q6_K:
  7096. {
  7097. ggml_compute_forward_add1_q_f32(params, src0, src1, dst);
  7098. } break;
  7099. default:
  7100. {
  7101. GGML_ASSERT(false);
  7102. } break;
  7103. }
  7104. }
  7105. // ggml_compute_forward_acc
  7106. static void ggml_compute_forward_acc_f32(
  7107. const struct ggml_compute_params * params,
  7108. const struct ggml_tensor * src0,
  7109. const struct ggml_tensor * src1,
  7110. const struct ggml_tensor * opt0,
  7111. struct ggml_tensor * dst) {
  7112. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  7113. GGML_ASSERT(ggml_is_contiguous(dst) && ggml_is_contiguous(src0));
  7114. GGML_ASSERT(opt0->type == GGML_TYPE_I32);
  7115. GGML_ASSERT(ggml_nelements(opt0) == 5);
  7116. // view src0 and dst with these strides and data offset inbytes during acc
  7117. // nb0 is implicitely element_size because src0 and dst are contiguous
  7118. size_t nb1 = ((int32_t *) opt0->data)[0];
  7119. size_t nb2 = ((int32_t *) opt0->data)[1];
  7120. size_t nb3 = ((int32_t *) opt0->data)[2];
  7121. size_t offset = ((int32_t *) opt0->data)[3];
  7122. bool inplace = (bool) ((int32_t *) opt0->data)[4];
  7123. if (!inplace && (params->type == GGML_TASK_INIT)) {
  7124. // memcpy needs to be synchronized across threads to avoid race conditions.
  7125. // => do it in INIT phase
  7126. memcpy(
  7127. ((char *) dst->data),
  7128. ((char *) src0->data),
  7129. ggml_nbytes(dst));
  7130. }
  7131. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  7132. return;
  7133. }
  7134. const int ith = params->ith;
  7135. const int nth = params->nth;
  7136. const int nr = ggml_nrows(src1);
  7137. const int nc = src1->ne[0];
  7138. const int64_t ne10 = src1->ne[0];
  7139. const int64_t ne11 = src1->ne[1];
  7140. const int64_t ne12 = src1->ne[2];
  7141. const int64_t ne13 = src1->ne[3];
  7142. const size_t nb10 = src1->nb[0];
  7143. const size_t nb11 = src1->nb[1];
  7144. const size_t nb12 = src1->nb[2];
  7145. const size_t nb13 = src1->nb[3];
  7146. // src0 and dst as viewed during acc
  7147. const size_t nb0 = ggml_element_size(src0);
  7148. const size_t nb00 = nb0;
  7149. const size_t nb01 = nb1;
  7150. const size_t nb02 = nb2;
  7151. const size_t nb03 = nb3;
  7152. GGML_ASSERT(offset + (ne10 == 0 ? 0 : ne10-1)*nb0 + (ne11 == 0 ? 0 : ne11-1)*nb1 + (ne12 == 0 ? 0 : ne12-1)*nb2 + (ne13 == 0 ? 0 : ne13-1)*nb3 < ggml_nbytes(dst));
  7153. GGML_ASSERT(offset + (ne10 == 0 ? 0 : ne10-1)*nb00 + (ne11 == 0 ? 0 : ne11-1)*nb01 + (ne12 == 0 ? 0 : ne12-1)*nb02 + (ne13 == 0 ? 0 : ne13-1)*nb03 < ggml_nbytes(src0));
  7154. GGML_ASSERT(nb10 == sizeof(float));
  7155. // rows per thread
  7156. const int dr = (nr + nth - 1)/nth;
  7157. // row range for this thread
  7158. const int ir0 = dr*ith;
  7159. const int ir1 = MIN(ir0 + dr, nr);
  7160. for (int ir = ir0; ir < ir1; ++ir) {
  7161. // src0 and dst are viewed with shape of src1 and offset
  7162. // => same indices
  7163. const int i3 = ir/(ne12*ne11);
  7164. const int i2 = (ir - i3*ne12*ne11)/ne11;
  7165. const int i1 = (ir - i3*ne12*ne11 - i2*ne11);
  7166. #ifdef GGML_USE_ACCELERATE
  7167. vDSP_vadd(
  7168. (float *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + offset), 1,
  7169. (float *) ((char *) src1->data + i3*nb13 + i2*nb12 + i1*nb11), 1,
  7170. (float *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + offset), 1, nc);
  7171. #else
  7172. ggml_vec_add_f32(nc,
  7173. (float *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + offset),
  7174. (float *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + offset),
  7175. (float *) ((char *) src1->data + i3*nb13 + i2*nb12 + i1*nb11));
  7176. #endif
  7177. }
  7178. }
  7179. static void ggml_compute_forward_acc(
  7180. const struct ggml_compute_params * params,
  7181. const struct ggml_tensor * src0,
  7182. const struct ggml_tensor * src1,
  7183. const struct ggml_tensor * opt0,
  7184. struct ggml_tensor * dst) {
  7185. switch (src0->type) {
  7186. case GGML_TYPE_F32:
  7187. {
  7188. ggml_compute_forward_acc_f32(params, src0, src1, opt0, dst);
  7189. } break;
  7190. case GGML_TYPE_F16:
  7191. case GGML_TYPE_Q4_0:
  7192. case GGML_TYPE_Q4_1:
  7193. case GGML_TYPE_Q5_0:
  7194. case GGML_TYPE_Q5_1:
  7195. case GGML_TYPE_Q8_0:
  7196. case GGML_TYPE_Q8_1:
  7197. case GGML_TYPE_Q2_K:
  7198. case GGML_TYPE_Q3_K:
  7199. case GGML_TYPE_Q4_K:
  7200. case GGML_TYPE_Q5_K:
  7201. case GGML_TYPE_Q6_K:
  7202. default:
  7203. {
  7204. GGML_ASSERT(false);
  7205. } break;
  7206. }
  7207. }
  7208. // ggml_compute_forward_sub
  7209. static void ggml_compute_forward_sub_f32(
  7210. const struct ggml_compute_params * params,
  7211. const struct ggml_tensor * src0,
  7212. const struct ggml_tensor * src1,
  7213. struct ggml_tensor * dst) {
  7214. assert(params->ith == 0);
  7215. assert(ggml_are_same_shape(src0, src1) && ggml_are_same_shape(src0, dst));
  7216. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  7217. return;
  7218. }
  7219. const int nr = ggml_nrows(src0);
  7220. const int64_t ne0 = src0->ne[0];
  7221. const int64_t ne1 = src0->ne[1];
  7222. const int64_t ne2 = src0->ne[2];
  7223. const size_t nb00 = src0->nb[0];
  7224. const size_t nb01 = src0->nb[1];
  7225. const size_t nb02 = src0->nb[2];
  7226. const size_t nb03 = src0->nb[3];
  7227. const size_t nb10 = src1->nb[0];
  7228. const size_t nb11 = src1->nb[1];
  7229. const size_t nb12 = src1->nb[2];
  7230. const size_t nb13 = src1->nb[3];
  7231. const size_t nb0 = dst->nb[0];
  7232. const size_t nb1 = dst->nb[1];
  7233. const size_t nb2 = dst->nb[2];
  7234. const size_t nb3 = dst->nb[3];
  7235. GGML_ASSERT( nb0 == sizeof(float));
  7236. GGML_ASSERT(nb00 == sizeof(float));
  7237. if (nb10 == sizeof(float)) {
  7238. for (int ir = 0; ir < nr; ++ir) {
  7239. // src0, src1 and dst are same shape => same indices
  7240. const int i3 = ir/(ne2*ne1);
  7241. const int i2 = (ir - i3*ne2*ne1)/ne1;
  7242. const int i1 = (ir - i3*ne2*ne1 - i2*ne1);
  7243. #ifdef GGML_USE_ACCELERATE
  7244. vDSP_vsub(
  7245. (float *) ((char *) src1->data + i3*nb13 + i2*nb12 + i1*nb11), 1,
  7246. (float *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01), 1,
  7247. (float *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 ), 1,
  7248. ne0);
  7249. #else
  7250. ggml_vec_sub_f32(ne0,
  7251. (float *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 ),
  7252. (float *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01),
  7253. (float *) ((char *) src1->data + i3*nb13 + i2*nb12 + i1*nb11));
  7254. #endif
  7255. // }
  7256. // }
  7257. }
  7258. } else {
  7259. // src1 is not contiguous
  7260. for (int ir = 0; ir < nr; ++ir) {
  7261. // src0, src1 and dst are same shape => same indices
  7262. const int i3 = ir/(ne2*ne1);
  7263. const int i2 = (ir - i3*ne2*ne1)/ne1;
  7264. const int i1 = (ir - i3*ne2*ne1 - i2*ne1);
  7265. float * dst_ptr = (float *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 );
  7266. float * src0_ptr = (float *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01);
  7267. for (int i0 = 0; i0 < ne0; i0++) {
  7268. float * src1_ptr = (float *) ((char *) src1->data + i3*nb13 + i2*nb12 + i1*nb11 + i0*nb10);
  7269. dst_ptr[i0] = src0_ptr[i0] - *src1_ptr;
  7270. }
  7271. }
  7272. }
  7273. }
  7274. static void ggml_compute_forward_sub(
  7275. const struct ggml_compute_params * params,
  7276. const struct ggml_tensor * src0,
  7277. const struct ggml_tensor * src1,
  7278. struct ggml_tensor * dst) {
  7279. switch (src0->type) {
  7280. case GGML_TYPE_F32:
  7281. {
  7282. ggml_compute_forward_sub_f32(params, src0, src1, dst);
  7283. } break;
  7284. default:
  7285. {
  7286. GGML_ASSERT(false);
  7287. } break;
  7288. }
  7289. }
  7290. // ggml_compute_forward_mul
  7291. static void ggml_compute_forward_mul_f32(
  7292. const struct ggml_compute_params * params,
  7293. const struct ggml_tensor * src0,
  7294. const struct ggml_tensor * src1,
  7295. struct ggml_tensor * dst) {
  7296. GGML_ASSERT(ggml_can_repeat_rows(src1, src0) && ggml_are_same_shape(src0, dst));
  7297. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  7298. return;
  7299. }
  7300. const int ith = params->ith;
  7301. const int nth = params->nth;
  7302. #ifdef GGML_USE_CLBLAST
  7303. if (src1->backend == GGML_BACKEND_GPU) {
  7304. if (ith == 0) {
  7305. ggml_cl_mul(src0, src1, dst);
  7306. }
  7307. return;
  7308. }
  7309. #endif
  7310. const int64_t nr = ggml_nrows(src0);
  7311. const int64_t ne00 = src0->ne[0];
  7312. const int64_t ne01 = src0->ne[1];
  7313. const int64_t ne02 = src0->ne[2];
  7314. const int64_t ne10 = src1->ne[0];
  7315. const int64_t ne11 = src1->ne[1];
  7316. const int64_t ne12 = src1->ne[2];
  7317. const int64_t ne13 = src1->ne[3];
  7318. const size_t nb00 = src0->nb[0];
  7319. const size_t nb01 = src0->nb[1];
  7320. const size_t nb02 = src0->nb[2];
  7321. const size_t nb03 = src0->nb[3];
  7322. const size_t nb10 = src1->nb[0];
  7323. const size_t nb11 = src1->nb[1];
  7324. const size_t nb12 = src1->nb[2];
  7325. const size_t nb13 = src1->nb[3];
  7326. const size_t nb0 = dst->nb[0];
  7327. const size_t nb1 = dst->nb[1];
  7328. const size_t nb2 = dst->nb[2];
  7329. const size_t nb3 = dst->nb[3];
  7330. GGML_ASSERT( nb0 == sizeof(float));
  7331. GGML_ASSERT(nb00 == sizeof(float));
  7332. GGML_ASSERT(ne00 == ne10);
  7333. if (nb10 == sizeof(float)) {
  7334. for (int64_t ir = ith; ir < nr; ir += nth) {
  7335. // src0 and dst are same shape => same indices
  7336. const int64_t i03 = ir/(ne02*ne01);
  7337. const int64_t i02 = (ir - i03*ne02*ne01)/ne01;
  7338. const int64_t i01 = (ir - i03*ne02*ne01 - i02*ne01);
  7339. const int64_t i13 = i03 % ne13;
  7340. const int64_t i12 = i02 % ne12;
  7341. const int64_t i11 = i01 % ne11;
  7342. float * dst_ptr = (float *) ((char *) dst->data + i03*nb3 + i02*nb2 + i01*nb1 );
  7343. float * src0_ptr = (float *) ((char *) src0->data + i03*nb03 + i02*nb02 + i01*nb01);
  7344. float * src1_ptr = (float *) ((char *) src1->data + i13*nb13 + i12*nb12 + i11*nb11);
  7345. #ifdef GGML_USE_ACCELERATE
  7346. UNUSED(ggml_vec_mul_f32);
  7347. vDSP_vmul( src0_ptr, 1, src1_ptr, 1, dst_ptr, 1, ne00);
  7348. #else
  7349. ggml_vec_mul_f32(ne00, dst_ptr, src0_ptr, src1_ptr);
  7350. #endif
  7351. // }
  7352. // }
  7353. }
  7354. } else {
  7355. // src1 is not contiguous
  7356. for (int64_t ir = ith; ir < nr; ir += nth) {
  7357. // src0 and dst are same shape => same indices
  7358. // src1 is broadcastable across src0 and dst in i1, i2, i3
  7359. const int64_t i03 = ir/(ne02*ne01);
  7360. const int64_t i02 = (ir - i03*ne02*ne01)/ne01;
  7361. const int64_t i01 = (ir - i03*ne02*ne01 - i02*ne01);
  7362. const int64_t i13 = i03 % ne13;
  7363. const int64_t i12 = i02 % ne12;
  7364. const int64_t i11 = i01 % ne11;
  7365. float * dst_ptr = (float *) ((char *) dst->data + i03*nb3 + i02*nb2 + i01*nb1 );
  7366. float * src0_ptr = (float *) ((char *) src0->data + i03*nb03 + i02*nb02 + i01*nb01);
  7367. for (int64_t i0 = 0; i0 < ne00; i0++) {
  7368. float * src1_ptr = (float *) ((char *) src1->data + i13*nb13 + i12*nb12 + i11*nb11 + i0*nb10);
  7369. dst_ptr[i0] = src0_ptr[i0] * (*src1_ptr);
  7370. }
  7371. }
  7372. }
  7373. }
  7374. static void ggml_compute_forward_mul(
  7375. const struct ggml_compute_params * params,
  7376. const struct ggml_tensor * src0,
  7377. const struct ggml_tensor * src1,
  7378. struct ggml_tensor * dst) {
  7379. switch (src0->type) {
  7380. case GGML_TYPE_F32:
  7381. {
  7382. ggml_compute_forward_mul_f32(params, src0, src1, dst);
  7383. } break;
  7384. default:
  7385. {
  7386. GGML_ASSERT(false);
  7387. } break;
  7388. }
  7389. }
  7390. // ggml_compute_forward_div
  7391. static void ggml_compute_forward_div_f32(
  7392. const struct ggml_compute_params * params,
  7393. const struct ggml_tensor * src0,
  7394. const struct ggml_tensor * src1,
  7395. struct ggml_tensor * dst) {
  7396. assert(params->ith == 0);
  7397. assert(ggml_are_same_shape(src0, src1) && ggml_are_same_shape(src0, dst));
  7398. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  7399. return;
  7400. }
  7401. const int nr = ggml_nrows(src0);
  7402. const int64_t ne0 = src0->ne[0];
  7403. const int64_t ne1 = src0->ne[1];
  7404. const int64_t ne2 = src0->ne[2];
  7405. const size_t nb00 = src0->nb[0];
  7406. const size_t nb01 = src0->nb[1];
  7407. const size_t nb02 = src0->nb[2];
  7408. const size_t nb03 = src0->nb[3];
  7409. const size_t nb10 = src1->nb[0];
  7410. const size_t nb11 = src1->nb[1];
  7411. const size_t nb12 = src1->nb[2];
  7412. const size_t nb13 = src1->nb[3];
  7413. const size_t nb0 = dst->nb[0];
  7414. const size_t nb1 = dst->nb[1];
  7415. const size_t nb2 = dst->nb[2];
  7416. const size_t nb3 = dst->nb[3];
  7417. GGML_ASSERT( nb0 == sizeof(float));
  7418. GGML_ASSERT(nb00 == sizeof(float));
  7419. if (nb10 == sizeof(float)) {
  7420. for (int ir = 0; ir < nr; ++ir) {
  7421. // src0, src1 and dst are same shape => same indices
  7422. const int i3 = ir/(ne2*ne1);
  7423. const int i2 = (ir - i3*ne2*ne1)/ne1;
  7424. const int i1 = (ir - i3*ne2*ne1 - i2*ne1);
  7425. #ifdef GGML_USE_ACCELERATE
  7426. vDSP_vdiv(
  7427. (float *) ((char *) src1->data + i3*nb13 + i2*nb12 + i1*nb11), 1,
  7428. (float *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01), 1,
  7429. (float *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 ), 1,
  7430. ne0);
  7431. #else
  7432. ggml_vec_div_f32(ne0,
  7433. (float *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 ),
  7434. (float *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01),
  7435. (float *) ((char *) src1->data + i3*nb13 + i2*nb12 + i1*nb11));
  7436. #endif
  7437. // }
  7438. // }
  7439. }
  7440. } else {
  7441. // src1 is not contiguous
  7442. for (int ir = 0; ir < nr; ++ir) {
  7443. // src0, src1 and dst are same shape => same indices
  7444. const int i3 = ir/(ne2*ne1);
  7445. const int i2 = (ir - i3*ne2*ne1)/ne1;
  7446. const int i1 = (ir - i3*ne2*ne1 - i2*ne1);
  7447. float * dst_ptr = (float *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 );
  7448. float * src0_ptr = (float *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01);
  7449. for (int i0 = 0; i0 < ne0; i0++) {
  7450. float * src1_ptr = (float *) ((char *) src1->data + i3*nb13 + i2*nb12 + i1*nb11 + i0*nb10);
  7451. dst_ptr[i0] = src0_ptr[i0] / (*src1_ptr);
  7452. }
  7453. }
  7454. }
  7455. }
  7456. static void ggml_compute_forward_div(
  7457. const struct ggml_compute_params * params,
  7458. const struct ggml_tensor * src0,
  7459. const struct ggml_tensor * src1,
  7460. struct ggml_tensor * dst) {
  7461. switch (src0->type) {
  7462. case GGML_TYPE_F32:
  7463. {
  7464. ggml_compute_forward_div_f32(params, src0, src1, dst);
  7465. } break;
  7466. default:
  7467. {
  7468. GGML_ASSERT(false);
  7469. } break;
  7470. }
  7471. }
  7472. // ggml_compute_forward_sqr
  7473. static void ggml_compute_forward_sqr_f32(
  7474. const struct ggml_compute_params * params,
  7475. const struct ggml_tensor * src0,
  7476. struct ggml_tensor * dst) {
  7477. assert(params->ith == 0);
  7478. assert(ggml_are_same_shape(src0, dst));
  7479. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  7480. return;
  7481. }
  7482. const int n = ggml_nrows(src0);
  7483. const int nc = src0->ne[0];
  7484. assert( dst->nb[0] == sizeof(float));
  7485. assert(src0->nb[0] == sizeof(float));
  7486. for (int i = 0; i < n; i++) {
  7487. ggml_vec_sqr_f32(nc,
  7488. (float *) ((char *) dst->data + i*( dst->nb[1])),
  7489. (float *) ((char *) src0->data + i*(src0->nb[1])));
  7490. }
  7491. }
  7492. static void ggml_compute_forward_sqr(
  7493. const struct ggml_compute_params * params,
  7494. const struct ggml_tensor * src0,
  7495. struct ggml_tensor * dst) {
  7496. switch (src0->type) {
  7497. case GGML_TYPE_F32:
  7498. {
  7499. ggml_compute_forward_sqr_f32(params, src0, dst);
  7500. } break;
  7501. default:
  7502. {
  7503. GGML_ASSERT(false);
  7504. } break;
  7505. }
  7506. }
  7507. // ggml_compute_forward_sqrt
  7508. static void ggml_compute_forward_sqrt_f32(
  7509. const struct ggml_compute_params * params,
  7510. const struct ggml_tensor * src0,
  7511. struct ggml_tensor * dst) {
  7512. assert(params->ith == 0);
  7513. assert(ggml_are_same_shape(src0, dst));
  7514. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  7515. return;
  7516. }
  7517. const int n = ggml_nrows(src0);
  7518. const int nc = src0->ne[0];
  7519. assert( dst->nb[0] == sizeof(float));
  7520. assert(src0->nb[0] == sizeof(float));
  7521. for (int i = 0; i < n; i++) {
  7522. ggml_vec_sqrt_f32(nc,
  7523. (float *) ((char *) dst->data + i*( dst->nb[1])),
  7524. (float *) ((char *) src0->data + i*(src0->nb[1])));
  7525. }
  7526. }
  7527. static void ggml_compute_forward_sqrt(
  7528. const struct ggml_compute_params * params,
  7529. const struct ggml_tensor * src0,
  7530. struct ggml_tensor * dst) {
  7531. switch (src0->type) {
  7532. case GGML_TYPE_F32:
  7533. {
  7534. ggml_compute_forward_sqrt_f32(params, src0, dst);
  7535. } break;
  7536. default:
  7537. {
  7538. GGML_ASSERT(false);
  7539. } break;
  7540. }
  7541. }
  7542. // ggml_compute_forward_log
  7543. static void ggml_compute_forward_log_f32(
  7544. const struct ggml_compute_params * params,
  7545. const struct ggml_tensor * src0,
  7546. struct ggml_tensor * dst) {
  7547. GGML_ASSERT(params->ith == 0);
  7548. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  7549. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  7550. return;
  7551. }
  7552. const int n = ggml_nrows(src0);
  7553. const int nc = src0->ne[0];
  7554. GGML_ASSERT( dst->nb[0] == sizeof(float));
  7555. GGML_ASSERT(src0->nb[0] == sizeof(float));
  7556. for (int i = 0; i < n; i++) {
  7557. ggml_vec_log_f32(nc,
  7558. (float *) ((char *) dst->data + i*( dst->nb[1])),
  7559. (float *) ((char *) src0->data + i*(src0->nb[1])));
  7560. }
  7561. }
  7562. static void ggml_compute_forward_log(
  7563. const struct ggml_compute_params * params,
  7564. const struct ggml_tensor * src0,
  7565. struct ggml_tensor * dst) {
  7566. switch (src0->type) {
  7567. case GGML_TYPE_F32:
  7568. {
  7569. ggml_compute_forward_log_f32(params, src0, dst);
  7570. } break;
  7571. default:
  7572. {
  7573. GGML_ASSERT(false);
  7574. } break;
  7575. }
  7576. }
  7577. // ggml_compute_forward_sum
  7578. static void ggml_compute_forward_sum_f32(
  7579. const struct ggml_compute_params * params,
  7580. const struct ggml_tensor * src0,
  7581. struct ggml_tensor * dst) {
  7582. assert(params->ith == 0);
  7583. assert(ggml_is_scalar(dst));
  7584. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  7585. return;
  7586. }
  7587. assert(ggml_is_scalar(dst));
  7588. assert(src0->nb[0] == sizeof(float));
  7589. const int64_t ne00 = src0->ne[0];
  7590. const int64_t ne01 = src0->ne[1];
  7591. const int64_t ne02 = src0->ne[2];
  7592. const int64_t ne03 = src0->ne[3];
  7593. const size_t nb01 = src0->nb[1];
  7594. const size_t nb02 = src0->nb[2];
  7595. const size_t nb03 = src0->nb[3];
  7596. ggml_float sum = 0;
  7597. ggml_float row_sum = 0;
  7598. for (int64_t i03 = 0; i03 < ne03; i03++) {
  7599. for (int64_t i02 = 0; i02 < ne02; i02++) {
  7600. for (int64_t i01 = 0; i01 < ne01; i01++) {
  7601. ggml_vec_sum_ggf(ne00,
  7602. &row_sum,
  7603. (float *) ((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03));
  7604. sum += row_sum;
  7605. }
  7606. }
  7607. }
  7608. ((float *) dst->data)[0] = sum;
  7609. }
  7610. static void ggml_compute_forward_sum(
  7611. const struct ggml_compute_params * params,
  7612. const struct ggml_tensor * src0,
  7613. struct ggml_tensor * dst) {
  7614. switch (src0->type) {
  7615. case GGML_TYPE_F32:
  7616. {
  7617. ggml_compute_forward_sum_f32(params, src0, dst);
  7618. } break;
  7619. default:
  7620. {
  7621. GGML_ASSERT(false);
  7622. } break;
  7623. }
  7624. }
  7625. // ggml_compute_forward_sum_rows
  7626. static void ggml_compute_forward_sum_rows_f32(
  7627. const struct ggml_compute_params * params,
  7628. const struct ggml_tensor * src0,
  7629. struct ggml_tensor * dst) {
  7630. GGML_ASSERT(params->ith == 0);
  7631. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  7632. return;
  7633. }
  7634. GGML_ASSERT(src0->nb[0] == sizeof(float));
  7635. GGML_ASSERT(dst->nb[0] == sizeof(float));
  7636. const int64_t ne00 = src0->ne[0];
  7637. const int64_t ne01 = src0->ne[1];
  7638. const int64_t ne02 = src0->ne[2];
  7639. const int64_t ne03 = src0->ne[3];
  7640. const int64_t ne0 = dst->ne[0];
  7641. const int64_t ne1 = dst->ne[1];
  7642. const int64_t ne2 = dst->ne[2];
  7643. const int64_t ne3 = dst->ne[3];
  7644. GGML_ASSERT(ne0 == 1);
  7645. GGML_ASSERT(ne1 == ne01);
  7646. GGML_ASSERT(ne2 == ne02);
  7647. GGML_ASSERT(ne3 == ne03);
  7648. const size_t nb01 = src0->nb[1];
  7649. const size_t nb02 = src0->nb[2];
  7650. const size_t nb03 = src0->nb[3];
  7651. const size_t nb1 = dst->nb[1];
  7652. const size_t nb2 = dst->nb[2];
  7653. const size_t nb3 = dst->nb[3];
  7654. for (int64_t i3 = 0; i3 < ne03; i3++) {
  7655. for (int64_t i2 = 0; i2 < ne02; i2++) {
  7656. for (int64_t i1 = 0; i1 < ne01; i1++) {
  7657. float* src_row = (float *) ((char *) src0->data + i1*nb01 + i2*nb02 + i3*nb03);
  7658. float* dst_row = (float *) ((char *) dst->data + i1*nb1 + i2*nb2 + i3*nb3);
  7659. float row_sum = 0;
  7660. ggml_vec_sum_f32(ne00, &row_sum, src_row);
  7661. dst_row[0] = row_sum;
  7662. }
  7663. }
  7664. }
  7665. }
  7666. static void ggml_compute_forward_sum_rows(
  7667. const struct ggml_compute_params * params,
  7668. const struct ggml_tensor * src0,
  7669. struct ggml_tensor * dst) {
  7670. switch (src0->type) {
  7671. case GGML_TYPE_F32:
  7672. {
  7673. ggml_compute_forward_sum_rows_f32(params, src0, dst);
  7674. } break;
  7675. default:
  7676. {
  7677. GGML_ASSERT(false);
  7678. } break;
  7679. }
  7680. }
  7681. // ggml_compute_forward_mean
  7682. static void ggml_compute_forward_mean_f32(
  7683. const struct ggml_compute_params * params,
  7684. const struct ggml_tensor * src0,
  7685. struct ggml_tensor * dst) {
  7686. assert(params->ith == 0);
  7687. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  7688. return;
  7689. }
  7690. assert(src0->nb[0] == sizeof(float));
  7691. const int64_t ne00 = src0->ne[0];
  7692. const int64_t ne01 = src0->ne[1];
  7693. const int64_t ne02 = src0->ne[2];
  7694. const int64_t ne03 = src0->ne[3];
  7695. const size_t nb01 = src0->nb[1];
  7696. const size_t nb02 = src0->nb[2];
  7697. const size_t nb03 = src0->nb[3];
  7698. const int64_t ne0 = dst->ne[0];
  7699. const int64_t ne1 = dst->ne[1];
  7700. const int64_t ne2 = dst->ne[2];
  7701. const int64_t ne3 = dst->ne[3];
  7702. assert(ne0 == 1);
  7703. assert(ne1 == ne01);
  7704. assert(ne2 == ne02);
  7705. assert(ne3 == ne03);
  7706. UNUSED(ne0);
  7707. UNUSED(ne1);
  7708. UNUSED(ne2);
  7709. UNUSED(ne3);
  7710. const size_t nb1 = dst->nb[1];
  7711. const size_t nb2 = dst->nb[2];
  7712. const size_t nb3 = dst->nb[3];
  7713. for (int64_t i03 = 0; i03 < ne03; i03++) {
  7714. for (int64_t i02 = 0; i02 < ne02; i02++) {
  7715. for (int64_t i01 = 0; i01 < ne01; i01++) {
  7716. ggml_vec_sum_f32(ne00,
  7717. (float *) ((char *) dst->data + i01*nb1 + i02*nb2 + i03*nb3),
  7718. (float *) ((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03));
  7719. *(float *) ((char *) dst->data + i01*nb1 + i02*nb2 + i03*nb3) /= (float) ne00;
  7720. }
  7721. }
  7722. }
  7723. }
  7724. static void ggml_compute_forward_mean(
  7725. const struct ggml_compute_params * params,
  7726. const struct ggml_tensor * src0,
  7727. struct ggml_tensor * dst) {
  7728. switch (src0->type) {
  7729. case GGML_TYPE_F32:
  7730. {
  7731. ggml_compute_forward_mean_f32(params, src0, dst);
  7732. } break;
  7733. default:
  7734. {
  7735. GGML_ASSERT(false);
  7736. } break;
  7737. }
  7738. }
  7739. // ggml_compute_forward_repeat
  7740. static void ggml_compute_forward_repeat_f32(
  7741. const struct ggml_compute_params * params,
  7742. const struct ggml_tensor * src0,
  7743. struct ggml_tensor * dst) {
  7744. GGML_ASSERT(params->ith == 0);
  7745. GGML_ASSERT(ggml_can_repeat(src0, dst));
  7746. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  7747. return;
  7748. }
  7749. const int64_t ne0 = dst->ne[0];
  7750. const int64_t ne1 = dst->ne[1];
  7751. const int64_t ne2 = dst->ne[2];
  7752. const int64_t ne3 = dst->ne[3];
  7753. const int64_t ne00 = src0->ne[0];
  7754. const int64_t ne01 = src0->ne[1];
  7755. const int64_t ne02 = src0->ne[2];
  7756. const int64_t ne03 = src0->ne[3];
  7757. const size_t nb0 = dst->nb[0];
  7758. const size_t nb1 = dst->nb[1];
  7759. const size_t nb2 = dst->nb[2];
  7760. const size_t nb3 = dst->nb[3];
  7761. const size_t nb00 = src0->nb[0];
  7762. const size_t nb01 = src0->nb[1];
  7763. const size_t nb02 = src0->nb[2];
  7764. const size_t nb03 = src0->nb[3];
  7765. // guaranteed to be an integer due to the check in ggml_can_repeat
  7766. const int nr0 = (int)(ne0/ne00);
  7767. const int nr1 = (int)(ne1/ne01);
  7768. const int nr2 = (int)(ne2/ne02);
  7769. const int nr3 = (int)(ne3/ne03);
  7770. // TODO: support for transposed / permuted tensors
  7771. GGML_ASSERT(nb0 == sizeof(float));
  7772. GGML_ASSERT(nb00 == sizeof(float));
  7773. // TODO: maybe this is not optimal?
  7774. for (int i3 = 0; i3 < nr3; i3++) {
  7775. for (int k3 = 0; k3 < ne03; k3++) {
  7776. for (int i2 = 0; i2 < nr2; i2++) {
  7777. for (int k2 = 0; k2 < ne02; k2++) {
  7778. for (int i1 = 0; i1 < nr1; i1++) {
  7779. for (int k1 = 0; k1 < ne01; k1++) {
  7780. for (int i0 = 0; i0 < nr0; i0++) {
  7781. ggml_vec_cpy_f32(ne00,
  7782. (float *) ((char *) dst->data + (i3*ne03 + k3)*nb3 + (i2*ne02 + k2)*nb2 + (i1*ne01 + k1)*nb1 + (i0*ne00)*nb0),
  7783. (float *) ((char *) src0->data + ( k3)*nb03 + ( k2)*nb02 + ( k1)*nb01));
  7784. }
  7785. }
  7786. }
  7787. }
  7788. }
  7789. }
  7790. }
  7791. }
  7792. static void ggml_compute_forward_repeat(
  7793. const struct ggml_compute_params * params,
  7794. const struct ggml_tensor * src0,
  7795. struct ggml_tensor * dst) {
  7796. switch (src0->type) {
  7797. case GGML_TYPE_F32:
  7798. {
  7799. ggml_compute_forward_repeat_f32(params, src0, dst);
  7800. } break;
  7801. default:
  7802. {
  7803. GGML_ASSERT(false);
  7804. } break;
  7805. }
  7806. }
  7807. // ggml_compute_forward_repeat_back
  7808. static void ggml_compute_forward_repeat_back_f32(
  7809. const struct ggml_compute_params * params,
  7810. const struct ggml_tensor * src0,
  7811. struct ggml_tensor * dst) {
  7812. GGML_ASSERT(params->ith == 0);
  7813. GGML_ASSERT(ggml_can_repeat(dst, src0));
  7814. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  7815. return;
  7816. }
  7817. const int64_t ne0 = dst->ne[0];
  7818. const int64_t ne1 = dst->ne[1];
  7819. const int64_t ne2 = dst->ne[2];
  7820. const int64_t ne3 = dst->ne[3];
  7821. const int64_t ne00 = src0->ne[0];
  7822. const int64_t ne01 = src0->ne[1];
  7823. const int64_t ne02 = src0->ne[2];
  7824. const int64_t ne03 = src0->ne[3];
  7825. const size_t nb0 = dst->nb[0];
  7826. const size_t nb1 = dst->nb[1];
  7827. const size_t nb2 = dst->nb[2];
  7828. const size_t nb3 = dst->nb[3];
  7829. const size_t nb00 = src0->nb[0];
  7830. const size_t nb01 = src0->nb[1];
  7831. const size_t nb02 = src0->nb[2];
  7832. const size_t nb03 = src0->nb[3];
  7833. // guaranteed to be an integer due to the check in ggml_can_repeat
  7834. const int nr0 = (int)(ne00/ne0);
  7835. const int nr1 = (int)(ne01/ne1);
  7836. const int nr2 = (int)(ne02/ne2);
  7837. const int nr3 = (int)(ne03/ne3);
  7838. // TODO: support for transposed / permuted tensors
  7839. GGML_ASSERT(nb0 == sizeof(float));
  7840. GGML_ASSERT(nb00 == sizeof(float));
  7841. if (ggml_is_contiguous(dst)) {
  7842. ggml_vec_set_f32(ne0*ne1*ne2*ne3, dst->data, 0);
  7843. } else {
  7844. for (int k3 = 0; k3 < ne3; k3++) {
  7845. for (int k2 = 0; k2 < ne2; k2++) {
  7846. for (int k1 = 0; k1 < ne1; k1++) {
  7847. ggml_vec_set_f32(ne0,
  7848. (float *) ((char *) dst->data + k1*nb1 + k2*nb2 + k3*nb3),
  7849. 0);
  7850. }
  7851. }
  7852. }
  7853. }
  7854. // TODO: maybe this is not optimal?
  7855. for (int i3 = 0; i3 < nr3; i3++) {
  7856. for (int k3 = 0; k3 < ne3; k3++) {
  7857. for (int i2 = 0; i2 < nr2; i2++) {
  7858. for (int k2 = 0; k2 < ne2; k2++) {
  7859. for (int i1 = 0; i1 < nr1; i1++) {
  7860. for (int k1 = 0; k1 < ne1; k1++) {
  7861. for (int i0 = 0; i0 < nr0; i0++) {
  7862. ggml_vec_acc_f32(ne0,
  7863. (float *) ((char *) dst->data + ( k3)*nb3 + ( k2)*nb2 + ( k1)*nb1),
  7864. (float *) ((char *) src0->data + (i3*ne3 + k3)*nb03 + (i2*ne2 + k2)*nb02 + (i1*ne1 + k1)*nb01 + (i0*ne0)*nb00));
  7865. }
  7866. }
  7867. }
  7868. }
  7869. }
  7870. }
  7871. }
  7872. }
  7873. static void ggml_compute_forward_repeat_back(
  7874. const struct ggml_compute_params * params,
  7875. const struct ggml_tensor * src0,
  7876. struct ggml_tensor * dst) {
  7877. switch (src0->type) {
  7878. case GGML_TYPE_F32:
  7879. {
  7880. ggml_compute_forward_repeat_back_f32(params, src0, dst);
  7881. } break;
  7882. default:
  7883. {
  7884. GGML_ASSERT(false);
  7885. } break;
  7886. }
  7887. }
  7888. // ggml_compute_forward_abs
  7889. static void ggml_compute_forward_abs_f32(
  7890. const struct ggml_compute_params * params,
  7891. const struct ggml_tensor * src0,
  7892. struct ggml_tensor * dst) {
  7893. assert(params->ith == 0);
  7894. assert(ggml_are_same_shape(src0, dst));
  7895. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  7896. return;
  7897. }
  7898. const int n = ggml_nrows(src0);
  7899. const int nc = src0->ne[0];
  7900. assert(dst->nb[0] == sizeof(float));
  7901. assert(src0->nb[0] == sizeof(float));
  7902. for (int i = 0; i < n; i++) {
  7903. ggml_vec_abs_f32(nc,
  7904. (float *) ((char *) dst->data + i*( dst->nb[1])),
  7905. (float *) ((char *) src0->data + i*(src0->nb[1])));
  7906. }
  7907. }
  7908. static void ggml_compute_forward_abs(
  7909. const struct ggml_compute_params * params,
  7910. const struct ggml_tensor * src0,
  7911. struct ggml_tensor * dst) {
  7912. switch (src0->type) {
  7913. case GGML_TYPE_F32:
  7914. {
  7915. ggml_compute_forward_abs_f32(params, src0, dst);
  7916. } break;
  7917. default:
  7918. {
  7919. GGML_ASSERT(false);
  7920. } break;
  7921. }
  7922. }
  7923. // ggml_compute_forward_sgn
  7924. static void ggml_compute_forward_sgn_f32(
  7925. const struct ggml_compute_params * params,
  7926. const struct ggml_tensor * src0,
  7927. struct ggml_tensor * dst) {
  7928. assert(params->ith == 0);
  7929. assert(ggml_are_same_shape(src0, dst));
  7930. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  7931. return;
  7932. }
  7933. const int n = ggml_nrows(src0);
  7934. const int nc = src0->ne[0];
  7935. assert(dst->nb[0] == sizeof(float));
  7936. assert(src0->nb[0] == sizeof(float));
  7937. for (int i = 0; i < n; i++) {
  7938. ggml_vec_sgn_f32(nc,
  7939. (float *) ((char *) dst->data + i*( dst->nb[1])),
  7940. (float *) ((char *) src0->data + i*(src0->nb[1])));
  7941. }
  7942. }
  7943. static void ggml_compute_forward_sgn(
  7944. const struct ggml_compute_params * params,
  7945. const struct ggml_tensor * src0,
  7946. struct ggml_tensor * dst) {
  7947. switch (src0->type) {
  7948. case GGML_TYPE_F32:
  7949. {
  7950. ggml_compute_forward_sgn_f32(params, src0, dst);
  7951. } break;
  7952. default:
  7953. {
  7954. GGML_ASSERT(false);
  7955. } break;
  7956. }
  7957. }
  7958. // ggml_compute_forward_neg
  7959. static void ggml_compute_forward_neg_f32(
  7960. const struct ggml_compute_params * params,
  7961. const struct ggml_tensor * src0,
  7962. struct ggml_tensor * dst) {
  7963. assert(params->ith == 0);
  7964. assert(ggml_are_same_shape(src0, dst));
  7965. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  7966. return;
  7967. }
  7968. const int n = ggml_nrows(src0);
  7969. const int nc = src0->ne[0];
  7970. assert(dst->nb[0] == sizeof(float));
  7971. assert(src0->nb[0] == sizeof(float));
  7972. for (int i = 0; i < n; i++) {
  7973. ggml_vec_neg_f32(nc,
  7974. (float *) ((char *) dst->data + i*( dst->nb[1])),
  7975. (float *) ((char *) src0->data + i*(src0->nb[1])));
  7976. }
  7977. }
  7978. static void ggml_compute_forward_neg(
  7979. const struct ggml_compute_params * params,
  7980. const struct ggml_tensor * src0,
  7981. struct ggml_tensor * dst) {
  7982. switch (src0->type) {
  7983. case GGML_TYPE_F32:
  7984. {
  7985. ggml_compute_forward_neg_f32(params, src0, dst);
  7986. } break;
  7987. default:
  7988. {
  7989. GGML_ASSERT(false);
  7990. } break;
  7991. }
  7992. }
  7993. // ggml_compute_forward_step
  7994. static void ggml_compute_forward_step_f32(
  7995. const struct ggml_compute_params * params,
  7996. const struct ggml_tensor * src0,
  7997. struct ggml_tensor * dst) {
  7998. assert(params->ith == 0);
  7999. assert(ggml_are_same_shape(src0, dst));
  8000. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  8001. return;
  8002. }
  8003. const int n = ggml_nrows(src0);
  8004. const int nc = src0->ne[0];
  8005. assert(dst->nb[0] == sizeof(float));
  8006. assert(src0->nb[0] == sizeof(float));
  8007. for (int i = 0; i < n; i++) {
  8008. ggml_vec_step_f32(nc,
  8009. (float *) ((char *) dst->data + i*( dst->nb[1])),
  8010. (float *) ((char *) src0->data + i*(src0->nb[1])));
  8011. }
  8012. }
  8013. static void ggml_compute_forward_step(
  8014. const struct ggml_compute_params * params,
  8015. const struct ggml_tensor * src0,
  8016. struct ggml_tensor * dst) {
  8017. switch (src0->type) {
  8018. case GGML_TYPE_F32:
  8019. {
  8020. ggml_compute_forward_step_f32(params, src0, dst);
  8021. } break;
  8022. default:
  8023. {
  8024. GGML_ASSERT(false);
  8025. } break;
  8026. }
  8027. }
  8028. // ggml_compute_forward_relu
  8029. static void ggml_compute_forward_relu_f32(
  8030. const struct ggml_compute_params * params,
  8031. const struct ggml_tensor * src0,
  8032. struct ggml_tensor * dst) {
  8033. assert(params->ith == 0);
  8034. assert(ggml_are_same_shape(src0, dst));
  8035. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  8036. return;
  8037. }
  8038. const int n = ggml_nrows(src0);
  8039. const int nc = src0->ne[0];
  8040. assert(dst->nb[0] == sizeof(float));
  8041. assert(src0->nb[0] == sizeof(float));
  8042. for (int i = 0; i < n; i++) {
  8043. ggml_vec_relu_f32(nc,
  8044. (float *) ((char *) dst->data + i*( dst->nb[1])),
  8045. (float *) ((char *) src0->data + i*(src0->nb[1])));
  8046. }
  8047. }
  8048. static void ggml_compute_forward_relu(
  8049. const struct ggml_compute_params * params,
  8050. const struct ggml_tensor * src0,
  8051. struct ggml_tensor * dst) {
  8052. switch (src0->type) {
  8053. case GGML_TYPE_F32:
  8054. {
  8055. ggml_compute_forward_relu_f32(params, src0, dst);
  8056. } break;
  8057. default:
  8058. {
  8059. GGML_ASSERT(false);
  8060. } break;
  8061. }
  8062. }
  8063. // ggml_compute_forward_gelu
  8064. static void ggml_compute_forward_gelu_f32(
  8065. const struct ggml_compute_params * params,
  8066. const struct ggml_tensor * src0,
  8067. struct ggml_tensor * dst) {
  8068. GGML_ASSERT(ggml_is_contiguous(src0));
  8069. GGML_ASSERT(ggml_is_contiguous(dst));
  8070. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  8071. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  8072. return;
  8073. }
  8074. const int ith = params->ith;
  8075. const int nth = params->nth;
  8076. const int nc = src0->ne[0];
  8077. const int nr = ggml_nrows(src0);
  8078. // rows per thread
  8079. const int dr = (nr + nth - 1)/nth;
  8080. // row range for this thread
  8081. const int ir0 = dr*ith;
  8082. const int ir1 = MIN(ir0 + dr, nr);
  8083. for (int i1 = ir0; i1 < ir1; i1++) {
  8084. ggml_vec_gelu_f32(nc,
  8085. (float *) ((char *) dst->data + i1*( dst->nb[1])),
  8086. (float *) ((char *) src0->data + i1*(src0->nb[1])));
  8087. #ifndef NDEBUG
  8088. for (int k = 0; k < nc; k++) {
  8089. const float x = ((float *) ((char *) dst->data + i1*( dst->nb[1])))[k];
  8090. UNUSED(x);
  8091. assert(!isnan(x));
  8092. assert(!isinf(x));
  8093. }
  8094. #endif
  8095. }
  8096. }
  8097. static void ggml_compute_forward_gelu(
  8098. const struct ggml_compute_params * params,
  8099. const struct ggml_tensor * src0,
  8100. struct ggml_tensor * dst) {
  8101. switch (src0->type) {
  8102. case GGML_TYPE_F32:
  8103. {
  8104. ggml_compute_forward_gelu_f32(params, src0, dst);
  8105. } break;
  8106. default:
  8107. {
  8108. GGML_ASSERT(false);
  8109. } break;
  8110. }
  8111. }
  8112. // ggml_compute_forward_gelu_quick
  8113. static void ggml_compute_forward_gelu_quick_f32(
  8114. const struct ggml_compute_params * params,
  8115. const struct ggml_tensor * src0,
  8116. struct ggml_tensor * dst) {
  8117. GGML_ASSERT(ggml_is_contiguous(src0));
  8118. GGML_ASSERT(ggml_is_contiguous(dst));
  8119. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  8120. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  8121. return;
  8122. }
  8123. const int ith = params->ith;
  8124. const int nth = params->nth;
  8125. const int nc = src0->ne[0];
  8126. const int nr = ggml_nrows(src0);
  8127. // rows per thread
  8128. const int dr = (nr + nth - 1)/nth;
  8129. // row range for this thread
  8130. const int ir0 = dr*ith;
  8131. const int ir1 = MIN(ir0 + dr, nr);
  8132. for (int i1 = ir0; i1 < ir1; i1++) {
  8133. ggml_vec_gelu_quick_f32(nc,
  8134. (float *) ((char *) dst->data + i1*( dst->nb[1])),
  8135. (float *) ((char *) src0->data + i1*(src0->nb[1])));
  8136. #ifndef NDEBUG
  8137. for (int k = 0; k < nc; k++) {
  8138. const float x = ((float *) ((char *) dst->data + i1*( dst->nb[1])))[k];
  8139. UNUSED(x);
  8140. assert(!isnan(x));
  8141. assert(!isinf(x));
  8142. }
  8143. #endif
  8144. }
  8145. }
  8146. static void ggml_compute_forward_gelu_quick(
  8147. const struct ggml_compute_params * params,
  8148. const struct ggml_tensor * src0,
  8149. struct ggml_tensor * dst) {
  8150. switch (src0->type) {
  8151. case GGML_TYPE_F32:
  8152. {
  8153. ggml_compute_forward_gelu_quick_f32(params, src0, dst);
  8154. } break;
  8155. default:
  8156. {
  8157. GGML_ASSERT(false);
  8158. } break;
  8159. }
  8160. }
  8161. // ggml_compute_forward_silu
  8162. static void ggml_compute_forward_silu_f32(
  8163. const struct ggml_compute_params * params,
  8164. const struct ggml_tensor * src0,
  8165. struct ggml_tensor * dst) {
  8166. GGML_ASSERT(ggml_is_contiguous(src0));
  8167. GGML_ASSERT(ggml_is_contiguous(dst));
  8168. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  8169. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  8170. return;
  8171. }
  8172. const int ith = params->ith;
  8173. const int nth = params->nth;
  8174. const int nc = src0->ne[0];
  8175. const int nr = ggml_nrows(src0);
  8176. // rows per thread
  8177. const int dr = (nr + nth - 1)/nth;
  8178. // row range for this thread
  8179. const int ir0 = dr*ith;
  8180. const int ir1 = MIN(ir0 + dr, nr);
  8181. for (int i1 = ir0; i1 < ir1; i1++) {
  8182. ggml_vec_silu_f32(nc,
  8183. (float *) ((char *) dst->data + i1*( dst->nb[1])),
  8184. (float *) ((char *) src0->data + i1*(src0->nb[1])));
  8185. #ifndef NDEBUG
  8186. for (int k = 0; k < nc; k++) {
  8187. const float x = ((float *) ((char *) dst->data + i1*( dst->nb[1])))[k];
  8188. UNUSED(x);
  8189. assert(!isnan(x));
  8190. assert(!isinf(x));
  8191. }
  8192. #endif
  8193. }
  8194. }
  8195. static void ggml_compute_forward_silu(
  8196. const struct ggml_compute_params * params,
  8197. const struct ggml_tensor * src0,
  8198. struct ggml_tensor * dst) {
  8199. switch (src0->type) {
  8200. case GGML_TYPE_F32:
  8201. {
  8202. ggml_compute_forward_silu_f32(params, src0, dst);
  8203. } break;
  8204. default:
  8205. {
  8206. GGML_ASSERT(false);
  8207. } break;
  8208. }
  8209. }
  8210. // ggml_compute_forward_silu_back
  8211. static void ggml_compute_forward_silu_back_f32(
  8212. const struct ggml_compute_params * params,
  8213. const struct ggml_tensor * src0,
  8214. const struct ggml_tensor * grad,
  8215. struct ggml_tensor * dst) {
  8216. GGML_ASSERT(ggml_is_contiguous(grad));
  8217. GGML_ASSERT(ggml_is_contiguous(src0));
  8218. GGML_ASSERT(ggml_is_contiguous(dst));
  8219. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  8220. GGML_ASSERT(ggml_are_same_shape(src0, grad));
  8221. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  8222. return;
  8223. }
  8224. const int ith = params->ith;
  8225. const int nth = params->nth;
  8226. const int nc = src0->ne[0];
  8227. const int nr = ggml_nrows(src0);
  8228. // rows per thread
  8229. const int dr = (nr + nth - 1)/nth;
  8230. // row range for this thread
  8231. const int ir0 = dr*ith;
  8232. const int ir1 = MIN(ir0 + dr, nr);
  8233. for (int i1 = ir0; i1 < ir1; i1++) {
  8234. ggml_vec_silu_backward_f32(nc,
  8235. (float *) ((char *) dst->data + i1*( dst->nb[1])),
  8236. (float *) ((char *) src0->data + i1*(src0->nb[1])),
  8237. (float *) ((char *) grad->data + i1*(grad->nb[1])));
  8238. #ifndef NDEBUG
  8239. for (int k = 0; k < nc; k++) {
  8240. const float x = ((float *) ((char *) dst->data + i1*( dst->nb[1])))[k];
  8241. UNUSED(x);
  8242. assert(!isnan(x));
  8243. assert(!isinf(x));
  8244. }
  8245. #endif
  8246. }
  8247. }
  8248. static void ggml_compute_forward_silu_back(
  8249. const struct ggml_compute_params * params,
  8250. const struct ggml_tensor * src0,
  8251. const struct ggml_tensor * grad,
  8252. struct ggml_tensor * dst) {
  8253. switch (src0->type) {
  8254. case GGML_TYPE_F32:
  8255. {
  8256. ggml_compute_forward_silu_back_f32(params, src0, grad, dst);
  8257. } break;
  8258. default:
  8259. {
  8260. GGML_ASSERT(false);
  8261. } break;
  8262. }
  8263. }
  8264. // ggml_compute_forward_norm
  8265. static void ggml_compute_forward_norm_f32(
  8266. const struct ggml_compute_params * params,
  8267. const struct ggml_tensor * src0,
  8268. struct ggml_tensor * dst) {
  8269. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  8270. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  8271. return;
  8272. }
  8273. GGML_ASSERT(src0->nb[0] == sizeof(float));
  8274. const int ith = params->ith;
  8275. const int nth = params->nth;
  8276. const int64_t ne00 = src0->ne[0];
  8277. const int64_t ne01 = src0->ne[1];
  8278. const int64_t ne02 = src0->ne[2];
  8279. const int64_t ne03 = src0->ne[3];
  8280. const size_t nb01 = src0->nb[1];
  8281. const size_t nb02 = src0->nb[2];
  8282. const size_t nb03 = src0->nb[3];
  8283. const size_t nb1 = dst->nb[1];
  8284. const size_t nb2 = dst->nb[2];
  8285. const size_t nb3 = dst->nb[3];
  8286. const float eps = 1e-5f; // TODO: make this a parameter
  8287. // TODO: optimize
  8288. for (int64_t i03 = 0; i03 < ne03; i03++) {
  8289. for (int64_t i02 = 0; i02 < ne02; i02++) {
  8290. for (int64_t i01 = ith; i01 < ne01; i01 += nth) {
  8291. const float * x = (float *) ((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03);
  8292. ggml_float sum = 0.0;
  8293. for (int64_t i00 = 0; i00 < ne00; i00++) {
  8294. sum += (ggml_float)x[i00];
  8295. }
  8296. float mean = sum/ne00;
  8297. float * y = (float *) ((char *) dst->data + i01*nb1 + i02*nb2 + i03*nb3);
  8298. ggml_float sum2 = 0.0;
  8299. for (int64_t i00 = 0; i00 < ne00; i00++) {
  8300. float v = x[i00] - mean;
  8301. y[i00] = v;
  8302. sum2 += (ggml_float)(v*v);
  8303. }
  8304. float variance = sum2/ne00;
  8305. const float scale = 1.0f/sqrtf(variance + eps);
  8306. ggml_vec_scale_f32(ne00, y, scale);
  8307. }
  8308. }
  8309. }
  8310. }
  8311. static void ggml_compute_forward_norm(
  8312. const struct ggml_compute_params * params,
  8313. const struct ggml_tensor * src0,
  8314. struct ggml_tensor * dst) {
  8315. switch (src0->type) {
  8316. case GGML_TYPE_F32:
  8317. {
  8318. ggml_compute_forward_norm_f32(params, src0, dst);
  8319. } break;
  8320. default:
  8321. {
  8322. GGML_ASSERT(false);
  8323. } break;
  8324. }
  8325. }
  8326. static void ggml_compute_forward_rms_norm_f32(
  8327. const struct ggml_compute_params * params,
  8328. const struct ggml_tensor * src0,
  8329. struct ggml_tensor * dst) {
  8330. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  8331. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  8332. return;
  8333. }
  8334. GGML_ASSERT(src0->nb[0] == sizeof(float));
  8335. const int ith = params->ith;
  8336. const int nth = params->nth;
  8337. const int64_t ne00 = src0->ne[0];
  8338. const int64_t ne01 = src0->ne[1];
  8339. const int64_t ne02 = src0->ne[2];
  8340. const int64_t ne03 = src0->ne[3];
  8341. const size_t nb01 = src0->nb[1];
  8342. const size_t nb02 = src0->nb[2];
  8343. const size_t nb03 = src0->nb[3];
  8344. const size_t nb1 = dst->nb[1];
  8345. const size_t nb2 = dst->nb[2];
  8346. const size_t nb3 = dst->nb[3];
  8347. const float eps = 1e-6f; // TODO: make this a parameter
  8348. // TODO: optimize
  8349. for (int64_t i03 = 0; i03 < ne03; i03++) {
  8350. for (int64_t i02 = 0; i02 < ne02; i02++) {
  8351. for (int64_t i01 = ith; i01 < ne01; i01 += nth) {
  8352. const float * x = (float *) ((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03);
  8353. ggml_float sum = 0.0;
  8354. for (int64_t i00 = 0; i00 < ne00; i00++) {
  8355. sum += (ggml_float)(x[i00] * x[i00]);
  8356. }
  8357. const float mean = sum/ne00;
  8358. float * y = (float *) ((char *) dst->data + i01*nb1 + i02*nb2 + i03*nb3);
  8359. memcpy(y, x, ne00 * sizeof(float));
  8360. // for (int i00 = 0; i00 < ne00; i00++) {
  8361. // y[i00] = x[i00];
  8362. // }
  8363. const float scale = 1.0f/sqrtf(mean + eps);
  8364. ggml_vec_scale_f32(ne00, y, scale);
  8365. }
  8366. }
  8367. }
  8368. }
  8369. static void ggml_compute_forward_rms_norm(
  8370. const struct ggml_compute_params * params,
  8371. const struct ggml_tensor * src0,
  8372. struct ggml_tensor * dst) {
  8373. switch (src0->type) {
  8374. case GGML_TYPE_F32:
  8375. {
  8376. ggml_compute_forward_rms_norm_f32(params, src0, dst);
  8377. } break;
  8378. default:
  8379. {
  8380. GGML_ASSERT(false);
  8381. } break;
  8382. }
  8383. }
  8384. static void ggml_compute_forward_rms_norm_back_f32(
  8385. const struct ggml_compute_params * params,
  8386. const struct ggml_tensor * src0,
  8387. const struct ggml_tensor * src1,
  8388. struct ggml_tensor * dst) {
  8389. GGML_ASSERT(ggml_are_same_shape(src0, dst) && ggml_are_same_shape(src0, src1));
  8390. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  8391. return;
  8392. }
  8393. GGML_ASSERT(src0->nb[0] == sizeof(float));
  8394. const int ith = params->ith;
  8395. const int nth = params->nth;
  8396. const int64_t ne00 = src0->ne[0];
  8397. const int64_t ne01 = src0->ne[1];
  8398. const int64_t ne02 = src0->ne[2];
  8399. const int64_t ne03 = src0->ne[3];
  8400. const size_t nb01 = src0->nb[1];
  8401. const size_t nb02 = src0->nb[2];
  8402. const size_t nb03 = src0->nb[3];
  8403. const size_t nb11 = src1->nb[1];
  8404. const size_t nb12 = src1->nb[2];
  8405. const size_t nb13 = src1->nb[3];
  8406. const size_t nb1 = dst->nb[1];
  8407. const size_t nb2 = dst->nb[2];
  8408. const size_t nb3 = dst->nb[3];
  8409. const float eps = 1e-6f; // TODO: make this a parameter
  8410. // TODO: optimize
  8411. for (int64_t i03 = 0; i03 < ne03; i03++) {
  8412. for (int64_t i02 = 0; i02 < ne02; i02++) {
  8413. for (int64_t i01 = ith; i01 < ne01; i01 += nth) {
  8414. // src1 is same shape as src0 => same indices
  8415. const int64_t i11 = i01;
  8416. const int64_t i12 = i02;
  8417. const int64_t i13 = i03;
  8418. const float * x = (float *) ((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03);
  8419. const float * dz = (float *) ((char *) src1->data + i11*nb11 + i12*nb12 + i13*nb13);
  8420. ggml_float sum_xx = 0.0;
  8421. ggml_float sum_xdz = 0.0;
  8422. for (int64_t i00 = 0; i00 < ne00; i00++) {
  8423. sum_xx += (ggml_float)(x[i00] * x[i00]);
  8424. sum_xdz += (ggml_float)(x[i00] * dz[i00]);
  8425. }
  8426. //const float mean = (float)(sum_xx)/ne00;
  8427. const float mean_eps = (float)(sum_xx)/ne00 + eps;
  8428. const float sum_eps = (float)(sum_xx) + eps*ne00;
  8429. //const float mean_xdz = (float)(sum_xdz)/ne00;
  8430. // we could cache rms from forward pass to improve performance.
  8431. // to do this implement ggml_rms and compose ggml_rms_norm using ggml_rms.
  8432. //const float rms = sqrtf(mean_eps);
  8433. const float rrms = 1.0f / sqrtf(mean_eps);
  8434. //const float scale = -rrms/(ne00 * mean_eps); // -1/(n*rms**3)
  8435. {
  8436. // z = rms_norm(x)
  8437. //
  8438. // rms_norm(src0) =
  8439. // scale(
  8440. // src0,
  8441. // div(
  8442. // 1,
  8443. // sqrt(
  8444. // add(
  8445. // scale(
  8446. // sum(
  8447. // sqr(
  8448. // src0)),
  8449. // (1.0/N)),
  8450. // eps))));
  8451. // postorder:
  8452. // ## op args grad
  8453. // 00 param src0 grad[#00]
  8454. // 01 const 1
  8455. // 02 sqr (#00) grad[#02]
  8456. // 03 sum (#02) grad[#03]
  8457. // 04 const 1/N
  8458. // 05 scale (#03, #04) grad[#05]
  8459. // 06 const eps
  8460. // 07 add (#05, #06) grad[#07]
  8461. // 08 sqrt (#07) grad[#08]
  8462. // 09 div (#01,#08) grad[#09]
  8463. // 10 scale (#00,#09) grad[#10]
  8464. //
  8465. // backward pass, given grad[#10]
  8466. // #10: scale
  8467. // grad[#00] += scale(grad[#10],#09)
  8468. // grad[#09] += sum(mul(grad[#10],#00))
  8469. // #09: div
  8470. // grad[#08] += neg(mul(grad[#09], div(#09,#08)))
  8471. // #08: sqrt
  8472. // grad[#07] += mul(grad[#08], div(0.5, #08))
  8473. // #07: add
  8474. // grad[#05] += grad[#07]
  8475. // #05: scale
  8476. // grad[#03] += scale(grad[#05],#04)
  8477. // #03: sum
  8478. // grad[#02] += repeat(grad[#03], #02)
  8479. // #02:
  8480. // grad[#00] += scale(mul(#00, grad[#02]), 2.0)
  8481. //
  8482. // substitute and simplify:
  8483. // grad[#00] = scale(grad(#10), #09) + scale(mul(#00, grad[#02]), 2.0)
  8484. // grad[#02] = repeat(grad[#03], #02)
  8485. // grad[#02] = repeat(scale(grad[#05],#04), #02)
  8486. // grad[#02] = repeat(scale(grad[#07],#04), #02)
  8487. // grad[#02] = repeat(scale(mul(grad[#08], div(0.5, #08)),#04), #02)
  8488. // grad[#02] = repeat(scale(mul(neg(mul(grad[#09], div(#09,#08))), div(0.5, #08)),#04), #02)
  8489. // grad[#02] = repeat(scale(mul(neg(mul(sum(mul(grad[#10],#00)), div(#09,#08))), div(0.5, #08)),#04), #02)
  8490. // grad[#02] = repeat(-(sum(mul(grad[#10],#00)) * div(#09,#08) * div(0.5, #08) * (1/N)), #02)
  8491. // grad[#02] = repeat(-(sum(mul(grad[#10],#00)) * div(div(#01,#08),#08) * div(0.5, #08) * (1/N)), #02)
  8492. // grad[#02] = repeat(-(sum(mul(grad[#10],#00)) * div(1,#08*#08) * div(0.5, #08) * (1/N)), #02)
  8493. // grad[#02] = repeat(-(sum(mul(grad[#10],#00)) * div(1,#07) * div(0.5, #08) * (1/N)), #02)
  8494. // grad[#00] = scale(grad(#10), #09) + scale(mul(#00, grad[#02]), 2.0)
  8495. // grad[#00] = scale(grad(#10), #09) + scale(mul(#00, repeat(-(sum(mul(grad[#10],#00)) * div(1,#07) * div(0.5, #08) * (1/N)), #02)), 2.0)
  8496. // grad[#00] = scale(grad(#10), #09) + scale(scale(#00, -(sum(mul(grad[#10],#00)) * div(1,#07) * div(0.5, #08) * (1/N))), 2.0)
  8497. // grad[#00] = scale(grad(#10), #09) + scale(#00, -(sum(mul(grad[#10],#00)) * div(1,#07) * div(1,#08) * (1/N)))
  8498. // grad[#00] = scale(grad(#10), #09) + scale(#00, sum(mul(grad[#10],#00)) * div(1,#07*#08) * (-1/N))
  8499. // grad[#00] = scale(grad(#10), #09) + scale(#00, sum(mul(grad[#10],#00)) * div(1,#07*#08) * (-1/N))
  8500. // grad[#00] = scale(grad(#10), #09) + scale(#00, sum(mul(grad[#10],#00)) * div(1,mean_eps*rms) * (-1/N))
  8501. // grad[#00] = scale(grad(#10), #09) + scale(#00, sum(mul(grad[#10],#00)) * div(-1,rms*N*mean_eps))
  8502. // grad[#00] = scale(grad(#10), #09) + scale(#00, sum(mul(grad[#10],#00)) * div(-1,rms*N*(sum_xx/N+eps)))
  8503. // grad[#00] = scale(grad(#10), #09) + scale(#00, sum(mul(grad[#10],#00)) * div(-1,rms*N*sum_xx+rms*N*eps))
  8504. // grad[#00] = scale(dz, rrms) + scale(x, sum(mul(dz,x)) * div(-1,rms*N*mean_eps))
  8505. // grad[#00] = scale(dz, rrms) + scale(x, sum_xdz * div(-1,rms*N*mean_eps))
  8506. // a = b*c + d*e
  8507. // a = b*c*f/f + d*e*f/f
  8508. // a = (b*c*f + d*e*f)*(1/f)
  8509. // a = (b*c*(1/c) + d*e*(1/c))*(1/(1/c))
  8510. // a = (b + d*e/c)*c
  8511. // b = dz, c = rrms, d = x, e = sum_xdz * div(-1,rms*N*mean_eps)
  8512. // a = (dz + x*sum_xdz * div(-1,rms*N*mean_eps)/rrms)*rrms
  8513. // a = (dz + x*sum_xdz * div(-1,rms*N*mean_eps)*rms)*rrms
  8514. // a = (dz + x*sum_xdz * div(-rms,rms*N*mean_eps))*rrms
  8515. // a = (dz + x*sum_xdz * div(-1,N*mean_eps))*rrms
  8516. // a = (dz + x*div(-sum_xdz,N*mean_eps))*rrms
  8517. // a = (dz + x*div(-mean_xdz,mean_eps))*rrms
  8518. // grad[#00] = scale(dz + scale(x, div(-mean_xdz,mean_eps)),rrms)
  8519. // grad[#00] = scale(dz + scale(x, -mean_xdz/mean_eps),rrms)
  8520. // dx = scale(dz + scale(x, -mean_xdz/mean_eps),rrms)
  8521. }
  8522. // dx = scale(dz + scale(x, -mean_xdz/mean_eps),rrms)
  8523. // post-order:
  8524. // dx := x
  8525. // dx := scale(dx,-mean_xdz/mean_eps)
  8526. // dx := add(dx, dz)
  8527. // dx := scale(dx, rrms)
  8528. float * dx = (float *) ((char *) dst->data + i01*nb1 + i02*nb2 + i03*nb3);
  8529. ggml_vec_cpy_f32 (ne00, dx, x);
  8530. // ggml_vec_scale_f32(ne00, dx, -mean_xdz/mean_eps);
  8531. ggml_vec_scale_f32(ne00, dx, (float)(-sum_xdz)/sum_eps);
  8532. ggml_vec_acc_f32 (ne00, dx, dz);
  8533. ggml_vec_scale_f32(ne00, dx, rrms);
  8534. }
  8535. }
  8536. }
  8537. }
  8538. static void ggml_compute_forward_rms_norm_back(
  8539. const struct ggml_compute_params * params,
  8540. const struct ggml_tensor * src0,
  8541. const struct ggml_tensor * src1,
  8542. struct ggml_tensor * dst) {
  8543. switch (src0->type) {
  8544. case GGML_TYPE_F32:
  8545. {
  8546. ggml_compute_forward_rms_norm_back_f32(params, src0, src1, dst);
  8547. } break;
  8548. default:
  8549. {
  8550. GGML_ASSERT(false);
  8551. } break;
  8552. }
  8553. }
  8554. // ggml_compute_forward_mul_mat
  8555. #if defined(GGML_USE_ACCELERATE) || defined(GGML_USE_OPENBLAS)
  8556. // helper function to determine if it is better to use BLAS or not
  8557. // for large matrices, BLAS is faster
  8558. static bool ggml_compute_forward_mul_mat_use_blas(
  8559. const struct ggml_tensor * src0,
  8560. const struct ggml_tensor * src1,
  8561. struct ggml_tensor * dst) {
  8562. //const int64_t ne00 = src0->ne[0];
  8563. //const int64_t ne01 = src0->ne[1];
  8564. const int64_t ne10 = src1->ne[0];
  8565. const int64_t ne0 = dst->ne[0];
  8566. const int64_t ne1 = dst->ne[1];
  8567. // TODO: find the optimal values for these
  8568. if (ggml_is_contiguous(src0) &&
  8569. ggml_is_contiguous(src1) &&
  8570. (ne0 >= 32 && ne1 >= 32 && ne10 >= 32)) {
  8571. /*printf("BLAS: %d %d %d %d %d\n", ne0, ne1, ne10, ne00, ne01);*/
  8572. return true;
  8573. }
  8574. return false;
  8575. }
  8576. #endif
  8577. static void ggml_compute_forward_mul_mat_f32(
  8578. const struct ggml_compute_params * params,
  8579. const struct ggml_tensor * src0,
  8580. const struct ggml_tensor * src1,
  8581. struct ggml_tensor * dst) {
  8582. int64_t t0 = ggml_perf_time_us();
  8583. UNUSED(t0);
  8584. const int64_t ne00 = src0->ne[0];
  8585. const int64_t ne01 = src0->ne[1];
  8586. const int64_t ne02 = src0->ne[2];
  8587. const int64_t ne03 = src0->ne[3];
  8588. #if defined(GGML_USE_ACCELERATE) || defined(GGML_USE_OPENBLAS)
  8589. const int64_t ne10 = src1->ne[0];
  8590. #endif
  8591. const int64_t ne11 = src1->ne[1];
  8592. #ifndef NDEBUG
  8593. const int64_t ne12 = src1->ne[2];
  8594. const int64_t ne13 = src1->ne[3];
  8595. const int64_t ne0 = dst->ne[0];
  8596. const int64_t ne1 = dst->ne[1];
  8597. const int64_t ne2 = dst->ne[2];
  8598. const int64_t ne3 = dst->ne[3];
  8599. const int nb00 = src0->nb[0];
  8600. #endif
  8601. const int nb01 = src0->nb[1];
  8602. const int nb02 = src0->nb[2];
  8603. const int nb03 = src0->nb[3];
  8604. #ifndef NDEBUG
  8605. const int nb10 = src1->nb[0];
  8606. #endif
  8607. const int nb11 = src1->nb[1];
  8608. const int nb12 = src1->nb[2];
  8609. const int nb13 = src1->nb[3];
  8610. const int nb0 = dst->nb[0];
  8611. const int nb1 = dst->nb[1];
  8612. const int nb2 = dst->nb[2];
  8613. const int nb3 = dst->nb[3];
  8614. const int ith = params->ith;
  8615. const int nth = params->nth;
  8616. assert(ne02 == ne12);
  8617. assert(ne03 == ne13);
  8618. assert(ne2 == ne12);
  8619. assert(ne3 == ne13);
  8620. // we don't support permuted src0 or src1
  8621. assert(nb00 == sizeof(float));
  8622. assert(nb10 == sizeof(float));
  8623. // dst cannot be transposed or permuted
  8624. assert(nb0 == sizeof(float));
  8625. assert(nb0 <= nb1);
  8626. assert(nb1 <= nb2);
  8627. assert(nb2 <= nb3);
  8628. assert(ne0 == ne01);
  8629. assert(ne1 == ne11);
  8630. assert(ne2 == ne02);
  8631. assert(ne3 == ne03);
  8632. // nb01 >= nb00 - src0 is not transposed
  8633. // compute by src0 rows
  8634. #if defined(GGML_USE_CLBLAST)
  8635. if (ggml_cl_can_mul_mat(src0, src1, dst)) {
  8636. if (params->ith == 0 && params->type == GGML_TASK_COMPUTE) {
  8637. ggml_cl_mul_mat(src0, src1, dst, params->wdata, params->wsize);
  8638. }
  8639. return;
  8640. }
  8641. #endif
  8642. #if defined(GGML_USE_ACCELERATE) || defined(GGML_USE_OPENBLAS)
  8643. if (ggml_compute_forward_mul_mat_use_blas(src0, src1, dst)) {
  8644. if (params->ith != 0) {
  8645. return;
  8646. }
  8647. if (params->type == GGML_TASK_INIT) {
  8648. return;
  8649. }
  8650. if (params->type == GGML_TASK_FINALIZE) {
  8651. return;
  8652. }
  8653. for (int64_t i03 = 0; i03 < ne03; i03++) {
  8654. for (int64_t i02 = 0; i02 < ne02; i02++) {
  8655. const float * x = (float *) ((char *) src0->data + i02*nb02 + i03*nb03);
  8656. const float * y = (float *) ((char *) src1->data + i02*nb12 + i03*nb13);
  8657. float * d = (float *) ((char *) dst->data + i02*nb2 + i03*nb3);
  8658. cblas_sgemm(CblasRowMajor, CblasNoTrans, CblasTrans,
  8659. ne11, ne01, ne10,
  8660. 1.0f, y, ne10,
  8661. x, ne00,
  8662. 0.0f, d, ne01);
  8663. }
  8664. }
  8665. //printf("CBLAS F32 = %f ms, %d x %d x %d x %d\n", (ggml_perf_time_us() - t0)/1000.0, ne0, ne1, ne2, ne3);
  8666. return;
  8667. }
  8668. #endif
  8669. if (params->type == GGML_TASK_INIT) {
  8670. return;
  8671. }
  8672. if (params->type == GGML_TASK_FINALIZE) {
  8673. return;
  8674. }
  8675. // parallelize by src0 rows using ggml_vec_dot_f32
  8676. // total rows in src0
  8677. const int nr = ne01*ne02*ne03;
  8678. // rows per thread
  8679. const int dr = (nr + nth - 1)/nth;
  8680. // row range for this thread
  8681. const int ir0 = dr*ith;
  8682. const int ir1 = MIN(ir0 + dr, nr);
  8683. for (int ir = ir0; ir < ir1; ++ir) {
  8684. // src0 indices
  8685. const int i03 = ir/(ne02*ne01);
  8686. const int i02 = (ir - i03*ne02*ne01)/ne01;
  8687. const int i01 = (ir - i03*ne02*ne01 - i02*ne01);
  8688. for (int64_t ic = 0; ic < ne11; ++ic) {
  8689. // src1 indices
  8690. const int i13 = i03;
  8691. const int i12 = i02;
  8692. const int i11 = ic;
  8693. // dst indices
  8694. const int i0 = i01;
  8695. const int i1 = i11;
  8696. const int i2 = i02;
  8697. const int i3 = i03;
  8698. ggml_vec_dot_f32(ne00,
  8699. (float *) ((char *) dst->data + (i0*nb0 + i1*nb1 + i2*nb2 + i3*nb3)),
  8700. (float *) ((char *) src0->data + (i01*nb01 + i02*nb02 + i03*nb03)),
  8701. (float *) ((char *) src1->data + (i11*nb11 + i12*nb12 + i13*nb13)));
  8702. }
  8703. }
  8704. //int64_t t1 = ggml_perf_time_us();
  8705. //static int64_t acc = 0;
  8706. //acc += t1 - t0;
  8707. //if (t1 - t0 > 10) {
  8708. // printf("\n");
  8709. // printf("ne00 = %5d, ne01 = %5d, ne02 = %5d, ne03 = %5d\n", ne00, ne01, ne02, ne03);
  8710. // printf("nb00 = %5d, nb01 = %5d, nb02 = %5d, nb03 = %5d\n", nb00, nb01, nb02, nb03);
  8711. // printf("ne10 = %5d, ne11 = %5d, ne12 = %5d, ne13 = %5d\n", ne10, ne11, ne12, ne13);
  8712. // printf("nb10 = %5d, nb11 = %5d, nb12 = %5d, nb13 = %5d\n", nb10, nb11, nb12, nb13);
  8713. // printf("XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX task %d/%d: %d us, acc = %d\n", ith, nth, (int) (t1 - t0), (int) acc);
  8714. //}
  8715. }
  8716. static void ggml_compute_forward_mul_mat_f16_f32(
  8717. const struct ggml_compute_params * params,
  8718. const struct ggml_tensor * src0,
  8719. const struct ggml_tensor * src1,
  8720. struct ggml_tensor * dst) {
  8721. int64_t t0 = ggml_perf_time_us();
  8722. UNUSED(t0);
  8723. const int64_t ne00 = src0->ne[0];
  8724. const int64_t ne01 = src0->ne[1];
  8725. const int64_t ne02 = src0->ne[2];
  8726. const int64_t ne03 = src0->ne[3];
  8727. const int64_t ne10 = src1->ne[0];
  8728. const int64_t ne11 = src1->ne[1];
  8729. const int64_t ne12 = src1->ne[2];
  8730. const int64_t ne13 = src1->ne[3];
  8731. const int64_t ne0 = dst->ne[0];
  8732. const int64_t ne1 = dst->ne[1];
  8733. const int64_t ne2 = dst->ne[2];
  8734. const int64_t ne3 = dst->ne[3];
  8735. //const int64_t ne = ne0*ne1*ne2*ne3;
  8736. const int nb00 = src0->nb[0];
  8737. const int nb01 = src0->nb[1];
  8738. const int nb02 = src0->nb[2];
  8739. const int nb03 = src0->nb[3];
  8740. const int nb10 = src1->nb[0];
  8741. const int nb11 = src1->nb[1];
  8742. const int nb12 = src1->nb[2];
  8743. const int nb13 = src1->nb[3];
  8744. const int nb0 = dst->nb[0];
  8745. const int nb1 = dst->nb[1];
  8746. const int nb2 = dst->nb[2];
  8747. const int nb3 = dst->nb[3];
  8748. const int ith = params->ith;
  8749. const int nth = params->nth;
  8750. GGML_ASSERT(ne02 == ne12);
  8751. GGML_ASSERT(ne03 == ne13);
  8752. GGML_ASSERT(ne2 == ne12);
  8753. GGML_ASSERT(ne3 == ne13);
  8754. // TODO: we don't support permuted src0
  8755. GGML_ASSERT(nb00 == sizeof(ggml_fp16_t));
  8756. // dst cannot be transposed or permuted
  8757. GGML_ASSERT(nb0 == sizeof(float));
  8758. GGML_ASSERT(nb0 <= nb1);
  8759. GGML_ASSERT(nb1 <= nb2);
  8760. GGML_ASSERT(nb2 <= nb3);
  8761. GGML_ASSERT(ne0 == ne01);
  8762. GGML_ASSERT(ne1 == ne11);
  8763. GGML_ASSERT(ne2 == ne02);
  8764. GGML_ASSERT(ne3 == ne03);
  8765. // nb01 >= nb00 - src0 is not transposed
  8766. // compute by src0 rows
  8767. #if defined(GGML_USE_CLBLAST)
  8768. if (ggml_cl_can_mul_mat(src0, src1, dst)) {
  8769. if (params->ith == 0 && params->type == GGML_TASK_COMPUTE) {
  8770. ggml_cl_mul_mat(src0, src1, dst, params->wdata, params->wsize);
  8771. }
  8772. return;
  8773. }
  8774. #endif
  8775. #if defined(GGML_USE_ACCELERATE) || defined(GGML_USE_OPENBLAS)
  8776. if (ggml_compute_forward_mul_mat_use_blas(src0, src1, dst)) {
  8777. GGML_ASSERT(nb10 == sizeof(float));
  8778. if (params->ith != 0) {
  8779. return;
  8780. }
  8781. if (params->type == GGML_TASK_INIT) {
  8782. return;
  8783. }
  8784. if (params->type == GGML_TASK_FINALIZE) {
  8785. return;
  8786. }
  8787. for (int64_t i03 = 0; i03 < ne03; i03++) {
  8788. for (int64_t i02 = 0; i02 < ne02; i02++) {
  8789. float * const wdata = params->wdata;
  8790. {
  8791. size_t id = 0;
  8792. for (int64_t i01 = 0; i01 < ne01; ++i01) {
  8793. for (int64_t i00 = 0; i00 < ne00; ++i00) {
  8794. wdata[id++] = GGML_FP16_TO_FP32(*(ggml_fp16_t *) ((char *) src0->data + i03*nb03 + i02*nb02 + i01*nb01 + i00*nb00));
  8795. }
  8796. }
  8797. assert(id*sizeof(float) <= params->wsize);
  8798. }
  8799. const float * x = wdata;
  8800. const float * y = (float *) ((char *) src1->data + i02*nb12 + i03*nb13);
  8801. float * d = (float *) ((char *) dst->data + i02*nb2 + i03*nb3);
  8802. // zT = y * xT
  8803. cblas_sgemm(CblasRowMajor, CblasNoTrans, CblasTrans,
  8804. ne11, ne01, ne10,
  8805. 1.0f, y, ne10,
  8806. x, ne00,
  8807. 0.0f, d, ne01);
  8808. }
  8809. }
  8810. /*printf("CBLAS F16 = %f ms, %d x %d x %d x %d\n", (ggml_perf_time_us() - t0)/1000.0, ne0, ne1, ne2, ne3);*/
  8811. return;
  8812. }
  8813. #endif
  8814. if (params->type == GGML_TASK_INIT) {
  8815. ggml_fp16_t * const wdata = params->wdata;
  8816. size_t id = 0;
  8817. for (int64_t i13 = 0; i13 < ne13; ++i13) {
  8818. for (int64_t i12 = 0; i12 < ne12; ++i12) {
  8819. for (int64_t i11 = 0; i11 < ne11; ++i11) {
  8820. for (int64_t i10 = 0; i10 < ne10; ++i10) {
  8821. wdata[id++] = GGML_FP32_TO_FP16(*(float *)((char *) src1->data + i13*nb13 + i12*nb12 + i11*nb11 + i10*nb10));
  8822. }
  8823. }
  8824. }
  8825. }
  8826. GGML_ASSERT(id*sizeof(ggml_fp16_t) <= params->wsize);
  8827. return;
  8828. }
  8829. if (params->type == GGML_TASK_FINALIZE) {
  8830. return;
  8831. }
  8832. // fp16 -> half the size, so divide by 2
  8833. // TODO: do not support transposed src1
  8834. assert(nb10/2 == sizeof(ggml_fp16_t));
  8835. // parallelize by src0 rows using ggml_vec_dot_f16
  8836. // total rows in src0
  8837. const int nr = ne01*ne02*ne03;
  8838. // rows per thread
  8839. const int dr = (nr + nth - 1)/nth;
  8840. // row range for this thread
  8841. const int ir0 = dr*ith;
  8842. const int ir1 = MIN(ir0 + dr, nr);
  8843. ggml_fp16_t * wdata = params->wdata;
  8844. for (int ir = ir0; ir < ir1; ++ir) {
  8845. // src0 indices
  8846. const int i03 = ir/(ne02*ne01);
  8847. const int i02 = (ir - i03*ne02*ne01)/ne01;
  8848. const int i01 = (ir - i03*ne02*ne01 - i02*ne01);
  8849. const int i13 = i03;
  8850. const int i12 = i02;
  8851. const int i0 = i01;
  8852. const int i2 = i02;
  8853. const int i3 = i03;
  8854. ggml_fp16_t * src0_row = (ggml_fp16_t *) ((char *) src0->data + (i01*nb01 + i02*nb02 + i03*nb03));
  8855. ggml_fp16_t * src1_col = wdata + ( 0 + i12*ne11 + i13*ne12*ne11)*ne00;
  8856. float * dst_col = (float *) ((char *) dst->data + (i0*nb0 + 0*nb1 + i2*nb2 + i3*nb3));
  8857. for (int64_t ic = 0; ic < ne11; ++ic) {
  8858. ggml_vec_dot_f16(ne00, &dst_col[ic*ne0], src0_row, src1_col + ic*ne00);
  8859. }
  8860. }
  8861. //int64_t t1 = ggml_time_us();
  8862. //static int64_t acc = 0;
  8863. //acc += t1 - t0;
  8864. //if (t1 - t0 > 10) {
  8865. // printf("\n");
  8866. // printf("ne00 = %5d, ne01 = %5d, ne02 = %5d, ne03 = %5d\n", ne00, ne01, ne02, ne03);
  8867. // printf("nb00 = %5d, nb01 = %5d, nb02 = %5d, nb03 = %5d\n", nb00, nb01, nb02, nb03);
  8868. // printf("ne10 = %5d, ne11 = %5d, ne12 = %5d, ne13 = %5d\n", ne10, ne11, ne12, ne13);
  8869. // printf("XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX task %d/%d: %d us, acc = %d\n", ith, nth, (int) (t1 - t0), (int) acc);
  8870. //}
  8871. }
  8872. static void ggml_compute_forward_mul_mat_q_f32(
  8873. const struct ggml_compute_params * params,
  8874. const struct ggml_tensor * src0,
  8875. const struct ggml_tensor * src1,
  8876. struct ggml_tensor * dst) {
  8877. int64_t t0 = ggml_perf_time_us();
  8878. UNUSED(t0);
  8879. const int64_t ne00 = src0->ne[0];
  8880. const int64_t ne01 = src0->ne[1];
  8881. const int64_t ne02 = src0->ne[2];
  8882. const int64_t ne03 = src0->ne[3];
  8883. const int64_t ne10 = src1->ne[0];
  8884. const int64_t ne11 = src1->ne[1];
  8885. const int64_t ne12 = src1->ne[2];
  8886. const int64_t ne13 = src1->ne[3];
  8887. const int64_t ne0 = dst->ne[0];
  8888. const int64_t ne1 = dst->ne[1];
  8889. const int64_t ne2 = dst->ne[2];
  8890. const int64_t ne3 = dst->ne[3];
  8891. const int nb00 = src0->nb[0];
  8892. const int nb01 = src0->nb[1];
  8893. const int nb02 = src0->nb[2];
  8894. const int nb03 = src0->nb[3];
  8895. const int nb10 = src1->nb[0];
  8896. const int nb11 = src1->nb[1];
  8897. const int nb12 = src1->nb[2];
  8898. const int nb13 = src1->nb[3];
  8899. const int nb0 = dst->nb[0];
  8900. const int nb1 = dst->nb[1];
  8901. const int nb2 = dst->nb[2];
  8902. const int nb3 = dst->nb[3];
  8903. const int ith = params->ith;
  8904. const int nth = params->nth;
  8905. GGML_ASSERT(ne02 == ne12);
  8906. GGML_ASSERT(ne03 == ne13);
  8907. GGML_ASSERT(ne2 == ne12);
  8908. GGML_ASSERT(ne3 == ne13);
  8909. const enum ggml_type type = src0->type;
  8910. quantize_row_q_t const quantize_row_q_dot = quantize_fns[type].quantize_row_q_dot;
  8911. vec_dot_q_t const vec_dot_q = quantize_fns[type].vec_dot_q;
  8912. enum ggml_type const vec_dot_type = quantize_fns[type].vec_dot_type;
  8913. // we don't support permuted src0 or src1
  8914. GGML_ASSERT(nb00 == (int) GGML_TYPE_SIZE[type]);
  8915. GGML_ASSERT(nb10 == sizeof(float));
  8916. // dst cannot be transposed or permuted
  8917. GGML_ASSERT(nb0 == sizeof(float));
  8918. GGML_ASSERT(nb0 <= nb1);
  8919. GGML_ASSERT(nb1 <= nb2);
  8920. GGML_ASSERT(nb2 <= nb3);
  8921. GGML_ASSERT(ne0 == ne01);
  8922. GGML_ASSERT(ne1 == ne11);
  8923. GGML_ASSERT(ne2 == ne02);
  8924. GGML_ASSERT(ne3 == ne03);
  8925. // nb01 >= nb00 - src0 is not transposed
  8926. // compute by src0 rows
  8927. #if defined(GGML_USE_CLBLAST)
  8928. if (ggml_cl_can_mul_mat(src0, src1, dst)) {
  8929. if (params->ith == 0 && params->type == GGML_TASK_COMPUTE) {
  8930. ggml_cl_mul_mat(src0, src1, dst, params->wdata, params->wsize);
  8931. }
  8932. return;
  8933. }
  8934. #endif
  8935. #if defined(GGML_USE_ACCELERATE) || defined(GGML_USE_OPENBLAS)
  8936. if (ggml_compute_forward_mul_mat_use_blas(src0, src1, dst)) {
  8937. if (params->ith != 0) {
  8938. return;
  8939. }
  8940. if (params->type == GGML_TASK_INIT) {
  8941. return;
  8942. }
  8943. if (params->type == GGML_TASK_FINALIZE) {
  8944. return;
  8945. }
  8946. float * const wdata = params->wdata;
  8947. dequantize_row_q_t const dequantize_row_q = quantize_fns[type].dequantize_row_q;
  8948. for (int64_t i03 = 0; i03 < ne03; i03++) {
  8949. for (int64_t i02 = 0; i02 < ne02; i02++) {
  8950. const float * y = (float *) ((char *) src1->data + i02*nb12 + i03*nb13);
  8951. float * d = (float *) ((char *) dst->data + i02*nb2 + i03*nb3);
  8952. {
  8953. size_t id = 0;
  8954. for (int64_t i01 = 0; i01 < ne01; ++i01) {
  8955. dequantize_row_q((char *) src0->data + i03*nb03 + i02*nb02 + i01*nb01, wdata + id, ne00);
  8956. id += ne00;
  8957. }
  8958. assert(id*sizeof(float) <= params->wsize);
  8959. }
  8960. const float * x = wdata;
  8961. cblas_sgemm(CblasRowMajor, CblasNoTrans, CblasTrans,
  8962. ne11, ne01, ne10,
  8963. 1.0f, y, ne10,
  8964. x, ne00,
  8965. 0.0f, d, ne01);
  8966. }
  8967. }
  8968. //printf("CBLAS = %f ms, %d x %d x %d x %d\n", (ggml_perf_time_us() - t0)/1000.0, ne0, ne1, ne2, ne3);
  8969. return;
  8970. }
  8971. #endif
  8972. if (params->type == GGML_TASK_INIT) {
  8973. char * wdata = params->wdata;
  8974. const size_t row_size = ne10*GGML_TYPE_SIZE[vec_dot_type]/GGML_BLCK_SIZE[vec_dot_type];
  8975. for (int64_t i13 = 0; i13 < ne13; ++i13) {
  8976. for (int64_t i12 = 0; i12 < ne12; ++i12) {
  8977. for (int64_t i11 = 0; i11 < ne11; ++i11) {
  8978. quantize_row_q_dot((float *)((char *) src1->data + i13*nb13 + i12*nb12 + i11*nb11), (void *) wdata, ne10);
  8979. wdata += row_size;
  8980. }
  8981. }
  8982. }
  8983. return;
  8984. }
  8985. if (params->type == GGML_TASK_FINALIZE) {
  8986. return;
  8987. }
  8988. // parallelize by src0 rows using ggml_vec_dot_q
  8989. // total rows in src0
  8990. const int nr = ne01*ne02*ne03;
  8991. // rows per thread
  8992. const int dr = (nr + nth - 1)/nth;
  8993. // row range for this thread
  8994. const int ir0 = dr*ith;
  8995. const int ir1 = MIN(ir0 + dr, nr);
  8996. void * wdata = params->wdata;
  8997. const size_t row_size = ne00*GGML_TYPE_SIZE[vec_dot_type]/GGML_BLCK_SIZE[vec_dot_type];
  8998. for (int ir = ir0; ir < ir1; ++ir) {
  8999. // src0 indices
  9000. const int i03 = ir/(ne02*ne01);
  9001. const int i02 = (ir - i03*ne02*ne01)/ne01;
  9002. const int i01 = (ir - i03*ne02*ne01 - i02*ne01);
  9003. const int i13 = i03;
  9004. const int i12 = i02;
  9005. const int i0 = i01;
  9006. const int i2 = i02;
  9007. const int i3 = i03;
  9008. void * src0_row = (void *) ((char *) src0->data + (i01*nb01 + i02*nb02 + i03*nb03));
  9009. char * src1_col = ((char *) wdata + ( (0 + i12*ne11 + i13*ne12*ne11)*row_size));
  9010. float * dst_col = (float *) ((char *) dst->data + (i0*nb0 + 0*nb1 + i2*nb2 + i3*nb3));
  9011. assert(ne00 % 32 == 0);
  9012. for (int64_t ic = 0; ic < ne11; ++ic) {
  9013. vec_dot_q(ne00, &dst_col[ic*ne0], src0_row, (void *) (src1_col + ic*row_size));
  9014. }
  9015. }
  9016. //int64_t t1 = ggml_time_us();
  9017. //static int64_t acc = 0;
  9018. //acc += t1 - t0;
  9019. //if (t1 - t0 > 10) {
  9020. // printf("\n");
  9021. // printf("ne00 = %5d, ne01 = %5d, ne02 = %5d, ne03 = %5d\n", ne00, ne01, ne02, ne03);
  9022. // printf("nb00 = %5d, nb01 = %5d, nb02 = %5d, nb03 = %5d\n", nb00, nb01, nb02, nb03);
  9023. // printf("ne10 = %5d, ne11 = %5d, ne12 = %5d, ne13 = %5d\n", ne10, ne11, ne12, ne13);
  9024. // printf("XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX task %d/%d: %d us, acc = %d\n", ith, nth, (int) (t1 - t0), (int) acc);
  9025. //}
  9026. }
  9027. static void ggml_compute_forward_mul_mat(
  9028. const struct ggml_compute_params * params,
  9029. const struct ggml_tensor * src0,
  9030. const struct ggml_tensor * src1,
  9031. struct ggml_tensor * dst) {
  9032. switch (src0->type) {
  9033. case GGML_TYPE_Q4_0:
  9034. case GGML_TYPE_Q4_1:
  9035. case GGML_TYPE_Q5_0:
  9036. case GGML_TYPE_Q5_1:
  9037. case GGML_TYPE_Q8_0:
  9038. case GGML_TYPE_Q8_1:
  9039. case GGML_TYPE_Q2_K:
  9040. case GGML_TYPE_Q3_K:
  9041. case GGML_TYPE_Q4_K:
  9042. case GGML_TYPE_Q5_K:
  9043. case GGML_TYPE_Q6_K:
  9044. {
  9045. ggml_compute_forward_mul_mat_q_f32(params, src0, src1, dst);
  9046. } break;
  9047. case GGML_TYPE_F16:
  9048. {
  9049. ggml_compute_forward_mul_mat_f16_f32(params, src0, src1, dst);
  9050. } break;
  9051. case GGML_TYPE_F32:
  9052. {
  9053. ggml_compute_forward_mul_mat_f32(params, src0, src1, dst);
  9054. } break;
  9055. default:
  9056. {
  9057. GGML_ASSERT(false);
  9058. } break;
  9059. }
  9060. }
  9061. // ggml_compute_forward_out_prod
  9062. static void ggml_compute_forward_out_prod_f32(
  9063. const struct ggml_compute_params * params,
  9064. const struct ggml_tensor * src0,
  9065. const struct ggml_tensor * src1,
  9066. struct ggml_tensor * dst) {
  9067. int64_t t0 = ggml_perf_time_us();
  9068. UNUSED(t0);
  9069. const int64_t ne00 = src0->ne[0];
  9070. const int64_t ne01 = src0->ne[1];
  9071. const int64_t ne02 = src0->ne[2];
  9072. const int64_t ne03 = src0->ne[3];
  9073. const int64_t ne10 = src1->ne[0];
  9074. //const int64_t ne11 = src1->ne[1];
  9075. const int64_t ne12 = src1->ne[2];
  9076. const int64_t ne13 = src1->ne[3];
  9077. const int64_t ne0 = dst->ne[0];
  9078. const int64_t ne1 = dst->ne[1];
  9079. const int64_t ne2 = dst->ne[2];
  9080. const int64_t ne3 = dst->ne[3];
  9081. const int nb00 = src0->nb[0];
  9082. const int nb01 = src0->nb[1];
  9083. const int nb02 = src0->nb[2];
  9084. const int nb03 = src0->nb[3];
  9085. const int nb10 = src1->nb[0];
  9086. const int nb11 = src1->nb[1];
  9087. const int nb12 = src1->nb[2];
  9088. const int nb13 = src1->nb[3];
  9089. const int nb0 = dst->nb[0];
  9090. const int nb1 = dst->nb[1];
  9091. const int nb2 = dst->nb[2];
  9092. const int nb3 = dst->nb[3];
  9093. const int ith = params->ith;
  9094. const int nth = params->nth;
  9095. GGML_ASSERT(ne02 == ne12);
  9096. GGML_ASSERT(ne03 == ne13);
  9097. GGML_ASSERT(ne2 == ne12);
  9098. GGML_ASSERT(ne3 == ne13);
  9099. // we don't support permuted src0 or src1
  9100. GGML_ASSERT(nb00 == sizeof(float));
  9101. // dst cannot be transposed or permuted
  9102. GGML_ASSERT(nb0 == sizeof(float));
  9103. // GGML_ASSERT(nb0 <= nb1);
  9104. // GGML_ASSERT(nb1 <= nb2);
  9105. // GGML_ASSERT(nb2 <= nb3);
  9106. GGML_ASSERT(ne0 == ne00);
  9107. GGML_ASSERT(ne1 == ne10);
  9108. GGML_ASSERT(ne2 == ne02);
  9109. GGML_ASSERT(ne3 == ne03);
  9110. // nb01 >= nb00 - src0 is not transposed
  9111. // compute by src0 rows
  9112. // TODO: #if defined(GGML_USE_CUBLAS) ggml_cuda_out_prod
  9113. // TODO: #if defined(GGML_USE_ACCELERATE) || defined(GGML_USE_OPENBLAS) || defined(GGML_USE_CLBLAST)
  9114. if (params->type == GGML_TASK_INIT) {
  9115. ggml_vec_set_f32(ne0*ne1*ne2*ne3, dst->data, 0);
  9116. return;
  9117. }
  9118. if (params->type == GGML_TASK_FINALIZE) {
  9119. return;
  9120. }
  9121. // parallelize by last three dimensions
  9122. // total rows in dst
  9123. const int64_t nr = ne1*ne2*ne3;
  9124. // rows per thread
  9125. const int64_t dr = (nr + nth - 1)/nth;
  9126. // row range for this thread
  9127. const int64_t ir0 = dr*ith;
  9128. const int64_t ir1 = MIN(ir0 + dr, nr);
  9129. // dst[:,:,:,:] = 0
  9130. // for i2,i3:
  9131. // for i1:
  9132. // for i01:
  9133. // for i0:
  9134. // dst[i0,i1,i2,i3] += src0[i0,i01,i2,i3] * src1[i1,i01,i2,i3]
  9135. for (int64_t ir = ir0; ir < ir1; ++ir) {
  9136. // dst indices
  9137. const int64_t i3 = ir/(ne2*ne1);
  9138. const int64_t i2 = (ir - i3*ne2*ne1)/ne1;
  9139. const int64_t i1 = (ir - i3*ne2*ne1 - i2*ne1);
  9140. const int64_t i02 = i2;
  9141. const int64_t i03 = i3;
  9142. //const int64_t i10 = i1;
  9143. const int64_t i12 = i2;
  9144. const int64_t i13 = i3;
  9145. for (int64_t i01 = 0; i01 < ne01; ++i01) {
  9146. const int64_t i11 = i01;
  9147. float * s0 = (float *) ((char *) src0->data + ( i01*nb01 + i02*nb02 + i03*nb03));
  9148. float * s1 = (float *) ((char *) src1->data + (i1*nb10 + i11*nb11 + i12*nb12 + i13*nb13));
  9149. float * d = (float *) ((char *) dst->data + ( i1*nb1 + i2*nb2 + i3*nb3));
  9150. ggml_vec_mad_f32(ne0, d, s0, *s1);
  9151. // for (int64_t i0 = 0; i0 < ne0; ++i0) {
  9152. // d[i0] += s0[i0] * s1[i1];
  9153. // }
  9154. }
  9155. }
  9156. //int64_t t1 = ggml_perf_time_us();
  9157. //static int64_t acc = 0;
  9158. //acc += t1 - t0;
  9159. //if (t1 - t0 > 10) {
  9160. // printf("\n");
  9161. // printf("ne00 = %5d, ne01 = %5d, ne02 = %5d, ne03 = %5d\n", ne00, ne01, ne02, ne03);
  9162. // printf("nb00 = %5d, nb01 = %5d, nb02 = %5d, nb03 = %5d\n", nb00, nb01, nb02, nb03);
  9163. // printf("ne10 = %5d, ne11 = %5d, ne12 = %5d, ne13 = %5d\n", ne10, ne11, ne12, ne13);
  9164. // printf("nb10 = %5d, nb11 = %5d, nb12 = %5d, nb13 = %5d\n", nb10, nb11, nb12, nb13);
  9165. // printf("XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX task %d/%d: %d us, acc = %d\n", ith, nth, (int) (t1 - t0), (int) acc);
  9166. //}
  9167. }
  9168. static void ggml_compute_forward_out_prod(
  9169. const struct ggml_compute_params * params,
  9170. const struct ggml_tensor * src0,
  9171. const struct ggml_tensor * src1,
  9172. struct ggml_tensor * dst) {
  9173. switch (src0->type) {
  9174. case GGML_TYPE_Q4_0:
  9175. case GGML_TYPE_Q4_1:
  9176. case GGML_TYPE_Q5_0:
  9177. case GGML_TYPE_Q5_1:
  9178. case GGML_TYPE_Q8_0:
  9179. case GGML_TYPE_Q8_1:
  9180. {
  9181. GGML_ASSERT(false); // todo
  9182. // ggml_compute_forward_out_prod_q_f32(params, src0, src1, dst);
  9183. } break;
  9184. case GGML_TYPE_F16:
  9185. {
  9186. GGML_ASSERT(false); // todo
  9187. // ggml_compute_forward_out_prod_f16_f32(params, src0, src1, dst);
  9188. } break;
  9189. case GGML_TYPE_F32:
  9190. {
  9191. ggml_compute_forward_out_prod_f32(params, src0, src1, dst);
  9192. } break;
  9193. default:
  9194. {
  9195. GGML_ASSERT(false);
  9196. } break;
  9197. }
  9198. }
  9199. // ggml_compute_forward_scale
  9200. static void ggml_compute_forward_scale_f32(
  9201. const struct ggml_compute_params * params,
  9202. const struct ggml_tensor * src0,
  9203. const struct ggml_tensor * src1,
  9204. struct ggml_tensor * dst) {
  9205. GGML_ASSERT(ggml_is_contiguous(src0));
  9206. GGML_ASSERT(ggml_is_contiguous(dst));
  9207. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  9208. GGML_ASSERT(ggml_is_scalar(src1));
  9209. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  9210. return;
  9211. }
  9212. // scale factor
  9213. const float v = *(float *) src1->data;
  9214. const int ith = params->ith;
  9215. const int nth = params->nth;
  9216. const int nc = src0->ne[0];
  9217. const int nr = ggml_nrows(src0);
  9218. // rows per thread
  9219. const int dr = (nr + nth - 1)/nth;
  9220. // row range for this thread
  9221. const int ir0 = dr*ith;
  9222. const int ir1 = MIN(ir0 + dr, nr);
  9223. const size_t nb01 = src0->nb[1];
  9224. const size_t nb1 = dst->nb[1];
  9225. for (int i1 = ir0; i1 < ir1; i1++) {
  9226. if (dst->data != src0->data) {
  9227. // src0 is same shape as dst => same indices
  9228. memcpy((char *)dst->data + i1*nb1, (char *)src0->data + i1*nb01, nc * sizeof(float));
  9229. }
  9230. ggml_vec_scale_f32(nc, (float *) ((char *) dst->data + i1*nb1), v);
  9231. }
  9232. }
  9233. static void ggml_compute_forward_scale(
  9234. const struct ggml_compute_params * params,
  9235. const struct ggml_tensor * src0,
  9236. const struct ggml_tensor * src1,
  9237. struct ggml_tensor * dst) {
  9238. switch (src0->type) {
  9239. case GGML_TYPE_F32:
  9240. {
  9241. ggml_compute_forward_scale_f32(params, src0, src1, dst);
  9242. } break;
  9243. default:
  9244. {
  9245. GGML_ASSERT(false);
  9246. } break;
  9247. }
  9248. }
  9249. // ggml_compute_forward_set
  9250. static void ggml_compute_forward_set_f32(
  9251. const struct ggml_compute_params * params,
  9252. const struct ggml_tensor * src0,
  9253. const struct ggml_tensor * src1,
  9254. const struct ggml_tensor * opt0,
  9255. struct ggml_tensor * dst) {
  9256. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  9257. GGML_ASSERT(ggml_is_contiguous(dst) && ggml_is_contiguous(src0));
  9258. GGML_ASSERT(opt0->type == GGML_TYPE_I32);
  9259. GGML_ASSERT(ggml_nelements(opt0) == 5);
  9260. // view src0 and dst with these strides and data offset inbytes during set
  9261. // nb0 is implicitely element_size because src0 and dst are contiguous
  9262. size_t nb1 = ((int32_t *) opt0->data)[0];
  9263. size_t nb2 = ((int32_t *) opt0->data)[1];
  9264. size_t nb3 = ((int32_t *) opt0->data)[2];
  9265. size_t offset = ((int32_t *) opt0->data)[3];
  9266. bool inplace = (bool) ((int32_t *) opt0->data)[4];
  9267. if (!inplace && (params->type == GGML_TASK_INIT)) {
  9268. // memcpy needs to be synchronized across threads to avoid race conditions.
  9269. // => do it in INIT phase
  9270. memcpy(
  9271. ((char *) dst->data),
  9272. ((char *) src0->data),
  9273. ggml_nbytes(dst));
  9274. }
  9275. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  9276. return;
  9277. }
  9278. const int ith = params->ith;
  9279. const int nth = params->nth;
  9280. const int nr = ggml_nrows(src1);
  9281. const int nc = src1->ne[0];
  9282. const int64_t ne10 = src1->ne[0];
  9283. const int64_t ne11 = src1->ne[1];
  9284. const int64_t ne12 = src1->ne[2];
  9285. const int64_t ne13 = src1->ne[3];
  9286. const size_t nb10 = src1->nb[0];
  9287. const size_t nb11 = src1->nb[1];
  9288. const size_t nb12 = src1->nb[2];
  9289. const size_t nb13 = src1->nb[3];
  9290. // src0 and dst as viewed during set
  9291. const size_t nb0 = ggml_element_size(src0);
  9292. const int im0 = (ne10 == 0 ? 0 : ne10-1);
  9293. const int im1 = (ne11 == 0 ? 0 : ne11-1);
  9294. const int im2 = (ne12 == 0 ? 0 : ne12-1);
  9295. const int im3 = (ne13 == 0 ? 0 : ne13-1);
  9296. GGML_ASSERT(offset + im0*nb0 + im1*nb1 + im2*nb2 + im3*nb3 <= ggml_nbytes(dst));
  9297. GGML_ASSERT(nb10 == sizeof(float));
  9298. // rows per thread
  9299. const int dr = (nr + nth - 1)/nth;
  9300. // row range for this thread
  9301. const int ir0 = dr*ith;
  9302. const int ir1 = MIN(ir0 + dr, nr);
  9303. for (int ir = ir0; ir < ir1; ++ir) {
  9304. // src0 and dst are viewed with shape of src1 and offset
  9305. // => same indices
  9306. const int i3 = ir/(ne12*ne11);
  9307. const int i2 = (ir - i3*ne12*ne11)/ne11;
  9308. const int i1 = (ir - i3*ne12*ne11 - i2*ne11);
  9309. ggml_vec_cpy_f32(nc,
  9310. (float *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + offset),
  9311. (float *) ((char *) src1->data + i3*nb13 + i2*nb12 + i1*nb11));
  9312. }
  9313. }
  9314. static void ggml_compute_forward_set(
  9315. const struct ggml_compute_params * params,
  9316. const struct ggml_tensor * src0,
  9317. const struct ggml_tensor * src1,
  9318. const struct ggml_tensor * opt0,
  9319. struct ggml_tensor * dst) {
  9320. switch (src0->type) {
  9321. case GGML_TYPE_F32:
  9322. {
  9323. ggml_compute_forward_set_f32(params, src0, src1, opt0, dst);
  9324. } break;
  9325. case GGML_TYPE_F16:
  9326. case GGML_TYPE_Q4_0:
  9327. case GGML_TYPE_Q4_1:
  9328. case GGML_TYPE_Q5_0:
  9329. case GGML_TYPE_Q5_1:
  9330. case GGML_TYPE_Q8_0:
  9331. case GGML_TYPE_Q8_1:
  9332. case GGML_TYPE_Q2_K:
  9333. case GGML_TYPE_Q3_K:
  9334. case GGML_TYPE_Q4_K:
  9335. case GGML_TYPE_Q5_K:
  9336. case GGML_TYPE_Q6_K:
  9337. default:
  9338. {
  9339. GGML_ASSERT(false);
  9340. } break;
  9341. }
  9342. }
  9343. // ggml_compute_forward_cpy
  9344. static void ggml_compute_forward_cpy(
  9345. const struct ggml_compute_params * params,
  9346. const struct ggml_tensor * src0,
  9347. struct ggml_tensor * dst) {
  9348. ggml_compute_forward_dup(params, src0, dst);
  9349. }
  9350. // ggml_compute_forward_cont
  9351. static void ggml_compute_forward_cont(
  9352. const struct ggml_compute_params * params,
  9353. const struct ggml_tensor * src0,
  9354. struct ggml_tensor * dst) {
  9355. ggml_compute_forward_dup(params, src0, dst);
  9356. }
  9357. // ggml_compute_forward_reshape
  9358. static void ggml_compute_forward_reshape(
  9359. const struct ggml_compute_params * params,
  9360. const struct ggml_tensor * src0,
  9361. struct ggml_tensor * dst) {
  9362. // NOP
  9363. UNUSED(params);
  9364. UNUSED(src0);
  9365. UNUSED(dst);
  9366. }
  9367. // ggml_compute_forward_view
  9368. static void ggml_compute_forward_view(
  9369. const struct ggml_compute_params * params,
  9370. const struct ggml_tensor * src0) {
  9371. // NOP
  9372. UNUSED(params);
  9373. UNUSED(src0);
  9374. }
  9375. // ggml_compute_forward_permute
  9376. static void ggml_compute_forward_permute(
  9377. const struct ggml_compute_params * params,
  9378. const struct ggml_tensor * src0) {
  9379. // NOP
  9380. UNUSED(params);
  9381. UNUSED(src0);
  9382. }
  9383. // ggml_compute_forward_transpose
  9384. static void ggml_compute_forward_transpose(
  9385. const struct ggml_compute_params * params,
  9386. const struct ggml_tensor * src0) {
  9387. // NOP
  9388. UNUSED(params);
  9389. UNUSED(src0);
  9390. }
  9391. // ggml_compute_forward_get_rows
  9392. static void ggml_compute_forward_get_rows_q(
  9393. const struct ggml_compute_params * params,
  9394. const struct ggml_tensor * src0,
  9395. const struct ggml_tensor * src1,
  9396. struct ggml_tensor * dst) {
  9397. assert(params->ith == 0);
  9398. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  9399. return;
  9400. }
  9401. const int nc = src0->ne[0];
  9402. const int nr = ggml_nelements(src1);
  9403. const enum ggml_type type = src0->type;
  9404. dequantize_row_q_t const dequantize_row_q = quantize_fns[type].dequantize_row_q;
  9405. assert( dst->ne[0] == nc);
  9406. assert( dst->ne[1] == nr);
  9407. assert(src0->nb[0] == GGML_TYPE_SIZE[type]);
  9408. for (int i = 0; i < nr; ++i) {
  9409. const int r = ((int32_t *) src1->data)[i];
  9410. dequantize_row_q(
  9411. (const void *) ((char *) src0->data + r*src0->nb[1]),
  9412. (float *) ((char *) dst->data + i*dst->nb[1]), nc);
  9413. }
  9414. }
  9415. static void ggml_compute_forward_get_rows_f16(
  9416. const struct ggml_compute_params * params,
  9417. const struct ggml_tensor * src0,
  9418. const struct ggml_tensor * src1,
  9419. struct ggml_tensor * dst) {
  9420. assert(params->ith == 0);
  9421. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  9422. return;
  9423. }
  9424. const int nc = src0->ne[0];
  9425. const int nr = ggml_nelements(src1);
  9426. assert( dst->ne[0] == nc);
  9427. assert( dst->ne[1] == nr);
  9428. assert(src0->nb[0] == sizeof(ggml_fp16_t));
  9429. for (int i = 0; i < nr; ++i) {
  9430. const int r = ((int32_t *) src1->data)[i];
  9431. for (int j = 0; j < nc; ++j) {
  9432. ggml_fp16_t v = ((ggml_fp16_t *) ((char *) src0->data + r*src0->nb[1]))[j];
  9433. ((float *) ((char *) dst->data + i*dst->nb[1]))[j] = GGML_FP16_TO_FP32(v);
  9434. }
  9435. }
  9436. }
  9437. static void ggml_compute_forward_get_rows_f32(
  9438. const struct ggml_compute_params * params,
  9439. const struct ggml_tensor * src0,
  9440. const struct ggml_tensor * src1,
  9441. struct ggml_tensor * dst) {
  9442. assert(params->ith == 0);
  9443. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  9444. return;
  9445. }
  9446. const int nc = src0->ne[0];
  9447. const int nr = ggml_nelements(src1);
  9448. assert( dst->ne[0] == nc);
  9449. assert( dst->ne[1] == nr);
  9450. assert(src0->nb[0] == sizeof(float));
  9451. for (int i = 0; i < nr; ++i) {
  9452. const int r = ((int32_t *) src1->data)[i];
  9453. ggml_vec_cpy_f32(nc,
  9454. (float *) ((char *) dst->data + i*dst->nb[1]),
  9455. (float *) ((char *) src0->data + r*src0->nb[1]));
  9456. }
  9457. }
  9458. static void ggml_compute_forward_get_rows(
  9459. const struct ggml_compute_params * params,
  9460. const struct ggml_tensor * src0,
  9461. const struct ggml_tensor * src1,
  9462. struct ggml_tensor * dst) {
  9463. switch (src0->type) {
  9464. case GGML_TYPE_Q4_0:
  9465. case GGML_TYPE_Q4_1:
  9466. case GGML_TYPE_Q5_0:
  9467. case GGML_TYPE_Q5_1:
  9468. case GGML_TYPE_Q8_0:
  9469. case GGML_TYPE_Q8_1:
  9470. case GGML_TYPE_Q2_K:
  9471. case GGML_TYPE_Q3_K:
  9472. case GGML_TYPE_Q4_K:
  9473. case GGML_TYPE_Q5_K:
  9474. case GGML_TYPE_Q6_K:
  9475. {
  9476. ggml_compute_forward_get_rows_q(params, src0, src1, dst);
  9477. } break;
  9478. case GGML_TYPE_F16:
  9479. {
  9480. ggml_compute_forward_get_rows_f16(params, src0, src1, dst);
  9481. } break;
  9482. case GGML_TYPE_F32:
  9483. {
  9484. ggml_compute_forward_get_rows_f32(params, src0, src1, dst);
  9485. } break;
  9486. default:
  9487. {
  9488. GGML_ASSERT(false);
  9489. } break;
  9490. }
  9491. //static bool first = true;
  9492. //printf("ne0 = %d, ne1 = %d, ne2 = %d\n", dst->ne[0], dst->ne[1], dst->ne[2]);
  9493. //if (first) {
  9494. // first = false;
  9495. //} else {
  9496. // for (int k = 0; k < dst->ne[1]; ++k) {
  9497. // for (int j = 0; j < dst->ne[0]/16; ++j) {
  9498. // for (int i = 0; i < 16; ++i) {
  9499. // printf("%8.4f ", ((float *) dst->data)[k*dst->ne[0] + j*16 + i]);
  9500. // }
  9501. // printf("\n");
  9502. // }
  9503. // printf("\n");
  9504. // }
  9505. // printf("\n");
  9506. // exit(0);
  9507. //}
  9508. }
  9509. // ggml_compute_forward_get_rows_back
  9510. static void ggml_compute_forward_get_rows_back_f32_f16(
  9511. const struct ggml_compute_params * params,
  9512. const struct ggml_tensor * src0,
  9513. const struct ggml_tensor * src1,
  9514. const struct ggml_tensor * opt0,
  9515. struct ggml_tensor * dst) {
  9516. GGML_ASSERT(params->ith == 0);
  9517. GGML_ASSERT(ggml_are_same_shape(opt0, dst));
  9518. GGML_ASSERT(ggml_is_contiguous(opt0));
  9519. GGML_ASSERT(ggml_is_contiguous(dst));
  9520. ggml_compute_forward_dup_same_cont(params, opt0, dst);
  9521. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  9522. return;
  9523. }
  9524. const int nc = src0->ne[0];
  9525. const int nr = ggml_nelements(src1);
  9526. GGML_ASSERT( dst->ne[0] == nc);
  9527. GGML_ASSERT(src0->nb[0] == sizeof(ggml_fp16_t));
  9528. for (int i = 0; i < nr; ++i) {
  9529. const int r = ((int32_t *) src1->data)[i];
  9530. for (int j = 0; j < nc; ++j) {
  9531. ggml_fp16_t v = ((ggml_fp16_t *) ((char *) src0->data + i*src0->nb[1]))[j];
  9532. ((float *) ((char *) dst->data + r*dst->nb[1]))[j] += GGML_FP16_TO_FP32(v);
  9533. }
  9534. }
  9535. }
  9536. static void ggml_compute_forward_get_rows_back_f32(
  9537. const struct ggml_compute_params * params,
  9538. const struct ggml_tensor * src0,
  9539. const struct ggml_tensor * src1,
  9540. const struct ggml_tensor * opt0,
  9541. struct ggml_tensor * dst) {
  9542. GGML_ASSERT(params->ith == 0);
  9543. GGML_ASSERT(ggml_are_same_shape(opt0, dst));
  9544. GGML_ASSERT(ggml_is_contiguous(opt0));
  9545. GGML_ASSERT(ggml_is_contiguous(dst));
  9546. // ggml_compute_forward_dup_same_cont(params, opt0, dst);
  9547. if (params->type == GGML_TASK_INIT) {
  9548. memset(dst->data, 0, ggml_nbytes(dst));
  9549. }
  9550. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  9551. return;
  9552. }
  9553. const int nc = src0->ne[0];
  9554. const int nr = ggml_nelements(src1);
  9555. GGML_ASSERT( dst->ne[0] == nc);
  9556. GGML_ASSERT(src0->nb[0] == sizeof(float));
  9557. for (int i = 0; i < nr; ++i) {
  9558. const int r = ((int32_t *) src1->data)[i];
  9559. ggml_vec_add_f32(nc,
  9560. (float *) ((char *) dst->data + r*dst->nb[1]),
  9561. (float *) ((char *) dst->data + r*dst->nb[1]),
  9562. (float *) ((char *) src0->data + i*src0->nb[1]));
  9563. }
  9564. }
  9565. static void ggml_compute_forward_get_rows_back(
  9566. const struct ggml_compute_params * params,
  9567. const struct ggml_tensor * src0,
  9568. const struct ggml_tensor * src1,
  9569. const struct ggml_tensor * opt0,
  9570. struct ggml_tensor * dst) {
  9571. switch (src0->type) {
  9572. case GGML_TYPE_F16:
  9573. {
  9574. ggml_compute_forward_get_rows_back_f32_f16(params, src0, src1, opt0, dst);
  9575. } break;
  9576. case GGML_TYPE_F32:
  9577. {
  9578. ggml_compute_forward_get_rows_back_f32(params, src0, src1, opt0, dst);
  9579. } break;
  9580. default:
  9581. {
  9582. GGML_ASSERT(false);
  9583. } break;
  9584. }
  9585. //static bool first = true;
  9586. //printf("ne0 = %d, ne1 = %d, ne2 = %d\n", dst->ne[0], dst->ne[1], dst->ne[2]);
  9587. //if (first) {
  9588. // first = false;
  9589. //} else {
  9590. // for (int k = 0; k < dst->ne[1]; ++k) {
  9591. // for (int j = 0; j < dst->ne[0]/16; ++j) {
  9592. // for (int i = 0; i < 16; ++i) {
  9593. // printf("%8.4f ", ((float *) dst->data)[k*dst->ne[0] + j*16 + i]);
  9594. // }
  9595. // printf("\n");
  9596. // }
  9597. // printf("\n");
  9598. // }
  9599. // printf("\n");
  9600. // exit(0);
  9601. //}
  9602. }
  9603. // ggml_compute_forward_diag
  9604. static void ggml_compute_forward_diag_f32(
  9605. const struct ggml_compute_params * params,
  9606. const struct ggml_tensor * src0,
  9607. struct ggml_tensor * dst) {
  9608. GGML_ASSERT(params->ith == 0);
  9609. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  9610. return;
  9611. }
  9612. // TODO: handle transposed/permuted matrices
  9613. const int ne00 = src0->ne[0];
  9614. const int ne01 = src0->ne[1];
  9615. const int ne02 = src0->ne[2];
  9616. const int ne03 = src0->ne[3];
  9617. const int ne0 = dst->ne[0];
  9618. const int ne1 = dst->ne[1];
  9619. const int ne2 = dst->ne[2];
  9620. const int ne3 = dst->ne[3];
  9621. GGML_ASSERT(ne00 == ne0);
  9622. GGML_ASSERT(ne00 == ne1);
  9623. GGML_ASSERT(ne01 == 1);
  9624. GGML_ASSERT(ne02 == ne2);
  9625. GGML_ASSERT(ne03 == ne3);
  9626. const int nb00 = src0->nb[0];
  9627. //const int nb01 = src0->nb[1];
  9628. const int nb02 = src0->nb[2];
  9629. const int nb03 = src0->nb[3];
  9630. const int nb0 = dst->nb[0];
  9631. const int nb1 = dst->nb[1];
  9632. const int nb2 = dst->nb[2];
  9633. const int nb3 = dst->nb[3];
  9634. GGML_ASSERT(nb00 == sizeof(float));
  9635. GGML_ASSERT(nb0 == sizeof(float));
  9636. for (int i3 = 0; i3 < ne3; i3++) {
  9637. for (int i2 = 0; i2 < ne2; i2++) {
  9638. for (int i1 = 0; i1 < ne1; i1++) {
  9639. float * d = (float *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1);
  9640. float * s = (float *)((char *) src0->data + i3*nb03 + i2*nb02);
  9641. for (int i0 = 0; i0 < i1; i0++) {
  9642. d[i0] = 0;
  9643. }
  9644. d[i1] = s[i1];
  9645. for (int i0 = i1+1; i0 < ne0; i0++) {
  9646. d[i0] = 0;
  9647. }
  9648. }
  9649. }
  9650. }
  9651. }
  9652. static void ggml_compute_forward_diag(
  9653. const struct ggml_compute_params * params,
  9654. const struct ggml_tensor * src0,
  9655. struct ggml_tensor * dst) {
  9656. switch (src0->type) {
  9657. case GGML_TYPE_F32:
  9658. {
  9659. ggml_compute_forward_diag_f32(params, src0, dst);
  9660. } break;
  9661. default:
  9662. {
  9663. GGML_ASSERT(false);
  9664. } break;
  9665. }
  9666. }
  9667. // ggml_compute_forward_diag_mask_inf
  9668. static void ggml_compute_forward_diag_mask_f32(
  9669. const struct ggml_compute_params * params,
  9670. const struct ggml_tensor * src0,
  9671. const struct ggml_tensor * src1,
  9672. struct ggml_tensor * dst,
  9673. const float value) {
  9674. GGML_ASSERT(src1->type == GGML_TYPE_I32);
  9675. GGML_ASSERT(ggml_nelements(src1) == 2);
  9676. const int ith = params->ith;
  9677. const int nth = params->nth;
  9678. const int n_past = ((int32_t *) src1->data)[0];
  9679. const bool inplace = (bool)((int32_t *) src1->data)[1];
  9680. GGML_ASSERT(n_past >= 0);
  9681. if (!inplace && (params->type == GGML_TASK_INIT)) {
  9682. // memcpy needs to be synchronized across threads to avoid race conditions.
  9683. // => do it in INIT phase
  9684. GGML_ASSERT(ggml_nelements(dst) == ggml_nelements(src0));
  9685. GGML_ASSERT(ggml_is_contiguous(dst) && ggml_is_contiguous(src0));
  9686. memcpy(
  9687. ((char *) dst->data),
  9688. ((char *) src0->data),
  9689. ggml_nbytes(dst));
  9690. }
  9691. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  9692. return;
  9693. }
  9694. // TODO: handle transposed/permuted matrices
  9695. const int n = ggml_nrows(src0);
  9696. const int nc = src0->ne[0];
  9697. const int nr = src0->ne[1];
  9698. const int nz = n/nr;
  9699. GGML_ASSERT( dst->nb[0] == sizeof(float));
  9700. GGML_ASSERT(src0->nb[0] == sizeof(float));
  9701. for (int k = 0; k < nz; k++) {
  9702. for (int j = ith; j < nr; j += nth) {
  9703. for (int i = n_past; i < nc; i++) {
  9704. if (i > n_past + j) {
  9705. *(float *)((char *) dst->data + k*dst->nb[2] + j*dst->nb[1] + i*dst->nb[0]) = value;
  9706. }
  9707. }
  9708. }
  9709. }
  9710. }
  9711. static void ggml_compute_forward_diag_mask_inf(
  9712. const struct ggml_compute_params * params,
  9713. const struct ggml_tensor * src0,
  9714. const struct ggml_tensor * src1,
  9715. struct ggml_tensor * dst) {
  9716. switch (src0->type) {
  9717. case GGML_TYPE_F32:
  9718. {
  9719. ggml_compute_forward_diag_mask_f32(params, src0, src1, dst, -INFINITY);
  9720. } break;
  9721. default:
  9722. {
  9723. GGML_ASSERT(false);
  9724. } break;
  9725. }
  9726. }
  9727. static void ggml_compute_forward_diag_mask_zero(
  9728. const struct ggml_compute_params * params,
  9729. const struct ggml_tensor * src0,
  9730. const struct ggml_tensor * src1,
  9731. struct ggml_tensor * dst) {
  9732. switch (src0->type) {
  9733. case GGML_TYPE_F32:
  9734. {
  9735. ggml_compute_forward_diag_mask_f32(params, src0, src1, dst, 0);
  9736. } break;
  9737. default:
  9738. {
  9739. GGML_ASSERT(false);
  9740. } break;
  9741. }
  9742. }
  9743. // ggml_compute_forward_soft_max
  9744. static void ggml_compute_forward_soft_max_f32(
  9745. const struct ggml_compute_params * params,
  9746. const struct ggml_tensor * src0,
  9747. struct ggml_tensor * dst) {
  9748. GGML_ASSERT(ggml_is_contiguous(src0));
  9749. GGML_ASSERT(ggml_is_contiguous(dst));
  9750. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  9751. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  9752. return;
  9753. }
  9754. // TODO: handle transposed/permuted matrices
  9755. const int ith = params->ith;
  9756. const int nth = params->nth;
  9757. const int nc = src0->ne[0];
  9758. const int nr = ggml_nrows(src0);
  9759. // rows per thread
  9760. const int dr = (nr + nth - 1)/nth;
  9761. // row range for this thread
  9762. const int ir0 = dr*ith;
  9763. const int ir1 = MIN(ir0 + dr, nr);
  9764. for (int i1 = ir0; i1 < ir1; i1++) {
  9765. float *sp = (float *)((char *) src0->data + i1*src0->nb[1]);
  9766. float *dp = (float *)((char *) dst->data + i1*dst->nb[1]);
  9767. #ifndef NDEBUG
  9768. for (int i = 0; i < nc; ++i) {
  9769. //printf("p[%d] = %f\n", i, p[i]);
  9770. assert(!isnan(sp[i]));
  9771. }
  9772. #endif
  9773. float max = -INFINITY;
  9774. ggml_vec_max_f32(nc, &max, sp);
  9775. ggml_float sum = 0.0;
  9776. uint16_t scvt;
  9777. for (int i = 0; i < nc; i++) {
  9778. if (sp[i] == -INFINITY) {
  9779. dp[i] = 0.0f;
  9780. } else {
  9781. // const float val = (sp[i] == -INFINITY) ? 0.0 : exp(sp[i] - max);
  9782. ggml_fp16_t s = GGML_FP32_TO_FP16(sp[i] - max);
  9783. memcpy(&scvt, &s, sizeof(scvt));
  9784. const float val = GGML_FP16_TO_FP32(table_exp_f16[scvt]);
  9785. sum += (ggml_float)val;
  9786. dp[i] = val;
  9787. }
  9788. }
  9789. assert(sum > 0.0);
  9790. sum = 1.0/sum;
  9791. ggml_vec_scale_f32(nc, dp, sum);
  9792. #ifndef NDEBUG
  9793. for (int i = 0; i < nc; ++i) {
  9794. assert(!isnan(dp[i]));
  9795. assert(!isinf(dp[i]));
  9796. }
  9797. #endif
  9798. }
  9799. }
  9800. static void ggml_compute_forward_soft_max(
  9801. const struct ggml_compute_params * params,
  9802. const struct ggml_tensor * src0,
  9803. struct ggml_tensor * dst) {
  9804. switch (src0->type) {
  9805. case GGML_TYPE_F32:
  9806. {
  9807. ggml_compute_forward_soft_max_f32(params, src0, dst);
  9808. } break;
  9809. default:
  9810. {
  9811. GGML_ASSERT(false);
  9812. } break;
  9813. }
  9814. }
  9815. // ggml_compute_forward_soft_max_back
  9816. static void ggml_compute_forward_soft_max_back_f32(
  9817. const struct ggml_compute_params * params,
  9818. const struct ggml_tensor * src0,
  9819. const struct ggml_tensor * src1,
  9820. struct ggml_tensor * dst) {
  9821. GGML_ASSERT(ggml_is_contiguous(src0));
  9822. GGML_ASSERT(ggml_is_contiguous(src1));
  9823. GGML_ASSERT(ggml_is_contiguous(dst));
  9824. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  9825. GGML_ASSERT(ggml_are_same_shape(src1, dst));
  9826. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  9827. return;
  9828. }
  9829. // TODO: handle transposed/permuted matrices
  9830. const int ith = params->ith;
  9831. const int nth = params->nth;
  9832. const int nc = src0->ne[0];
  9833. const int nr = ggml_nrows(src0);
  9834. // rows per thread
  9835. const int dr = (nr + nth - 1)/nth;
  9836. // row range for this thread
  9837. const int ir0 = dr*ith;
  9838. const int ir1 = MIN(ir0 + dr, nr);
  9839. for (int i1 = ir0; i1 < ir1; i1++) {
  9840. float *dy = (float *)((char *) src0->data + i1*src0->nb[1]);
  9841. float *y = (float *)((char *) src1->data + i1*src1->nb[1]);
  9842. float *dx = (float *)((char *) dst->data + i1*dst->nb[1]);
  9843. #ifndef NDEBUG
  9844. for (int i = 0; i < nc; ++i) {
  9845. //printf("p[%d] = %f\n", i, p[i]);
  9846. assert(!isnan(dy[i]));
  9847. assert(!isnan(y[i]));
  9848. }
  9849. #endif
  9850. // Jii = yi - yi*yi
  9851. // Jij = -yi*yj
  9852. // J = diag(y)-y.T*y
  9853. // dx = J * dy
  9854. // dxk = sum_i(Jki * dyi)
  9855. // dxk = sum_i(-yk*yi * dyi) - (-yk*yk)*dyk + (yk - yk*yk)*dyk
  9856. // dxk = sum_i(-yk*yi * dyi) + yk*dyk
  9857. // dxk = -yk * sum_i(yi * dyi) + yk*dyk
  9858. // dxk = -yk * dot(y, dy) + yk*dyk
  9859. // dxk = yk * (- dot(y, dy) + dyk)
  9860. // dxk = yk * (dyk - dot(y, dy))
  9861. //
  9862. // post-order:
  9863. // dot_y_dy := dot(y, dy)
  9864. // dx := dy
  9865. // dx := dx - dot_y_dy
  9866. // dx := dx * y
  9867. // linear runtime, no additional memory
  9868. float dot_y_dy = 0;
  9869. ggml_vec_dot_f32 (nc, &dot_y_dy, y, dy);
  9870. ggml_vec_cpy_f32 (nc, dx, dy);
  9871. ggml_vec_acc1_f32(nc, dx, -dot_y_dy);
  9872. ggml_vec_mul_f32 (nc, dx, dx, y);
  9873. #ifndef NDEBUG
  9874. for (int i = 0; i < nc; ++i) {
  9875. assert(!isnan(dx[i]));
  9876. assert(!isinf(dx[i]));
  9877. }
  9878. #endif
  9879. }
  9880. }
  9881. static void ggml_compute_forward_soft_max_back(
  9882. const struct ggml_compute_params * params,
  9883. const struct ggml_tensor * src0,
  9884. const struct ggml_tensor * src1,
  9885. struct ggml_tensor * dst) {
  9886. switch (src0->type) {
  9887. case GGML_TYPE_F32:
  9888. {
  9889. ggml_compute_forward_soft_max_back_f32(params, src0, src1, dst);
  9890. } break;
  9891. default:
  9892. {
  9893. GGML_ASSERT(false);
  9894. } break;
  9895. }
  9896. }
  9897. // ggml_compute_forward_alibi
  9898. static void ggml_compute_forward_alibi_f32(
  9899. const struct ggml_compute_params * params,
  9900. const struct ggml_tensor * src0,
  9901. const struct ggml_tensor * src1,
  9902. struct ggml_tensor * dst) {
  9903. assert(params->ith == 0);
  9904. GGML_ASSERT(src1->type == GGML_TYPE_I32);
  9905. GGML_ASSERT(ggml_nelements(src1) == 3);
  9906. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  9907. return;
  9908. }
  9909. const int n_past = ((int32_t *) src1->data)[0];
  9910. const int n_head = ((int32_t *) src1->data)[1];
  9911. const float max_bias = ((float *) src1->data)[2];
  9912. assert(n_past >= 0);
  9913. const int ne0 = src0->ne[0]; // all_seq_len = n_past + ne1
  9914. const int ne1 = src0->ne[1]; // seq_len_without_past
  9915. //const int ne2 = src0->ne[2]; // n_head -> this is k
  9916. //const int ne3 = src0->ne[3]; // 1 -> bsz
  9917. const int n = ggml_nrows(src0);
  9918. const int ne2_ne3 = n/ne1; // ne2*ne3
  9919. const int nb0 = src0->nb[0];
  9920. const int nb1 = src0->nb[1];
  9921. const int nb2 = src0->nb[2];
  9922. //const int nb3 = src0->nb[3];
  9923. assert(nb0 == sizeof(float));
  9924. assert(ne1 + n_past == ne0); (void) n_past;
  9925. // add alibi to src0 (KQ_scaled)
  9926. const int n_heads_log2_floor = 1 << (int) floor(log2(n_head));
  9927. const float m0 = powf(2.0f, -(max_bias) / n_heads_log2_floor);
  9928. const float m1 = powf(2.0f, -(max_bias / 2.0f) / n_heads_log2_floor);
  9929. for (int i = 0; i < ne0; i++) {
  9930. for (int j = 0; j < ne1; j++) {
  9931. for (int k = 0; k < ne2_ne3; k++) {
  9932. float * const src = (float *)((char *) src0->data + i*nb0 + j*nb1 + k*nb2);
  9933. float * pdst = (float *)((char *) dst->data + i*nb0 + j*nb1 + k*nb2);
  9934. // TODO: k*nb2 or k*nb3
  9935. float m_k;
  9936. if (k < n_heads_log2_floor) {
  9937. m_k = powf(m0, k + 1);
  9938. } else {
  9939. m_k = powf(m1, 2 * (k - n_heads_log2_floor) + 1);
  9940. }
  9941. pdst[0] = (i-ne0+1) * m_k + src[0];
  9942. }
  9943. }
  9944. }
  9945. }
  9946. static void ggml_compute_forward_alibi_f16(
  9947. const struct ggml_compute_params * params,
  9948. const struct ggml_tensor * src0,
  9949. const struct ggml_tensor * src1,
  9950. struct ggml_tensor * dst) {
  9951. assert(params->ith == 0);
  9952. GGML_ASSERT(src1->type == GGML_TYPE_I32);
  9953. GGML_ASSERT(ggml_nelements(src1) == 3);
  9954. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  9955. return;
  9956. }
  9957. const int n_past = ((int32_t *) src1->data)[0];
  9958. const int n_head = ((int32_t *) src1->data)[1];
  9959. const float max_bias = ((float *) src1->data)[2];
  9960. assert(n_past >= 0);
  9961. const int ne0 = src0->ne[0]; // all_seq_len = n_past + ne1
  9962. const int ne1 = src0->ne[1]; // seq_len_without_past
  9963. //const int ne2 = src0->ne[2]; // n_head -> this is k
  9964. //const int ne3 = src0->ne[3]; // 1 -> bsz
  9965. const int n = ggml_nrows(src0);
  9966. const int ne2_ne3 = n/ne1; // ne2*ne3
  9967. const int nb0 = src0->nb[0];
  9968. const int nb1 = src0->nb[1];
  9969. const int nb2 = src0->nb[2];
  9970. //const int nb3 = src0->nb[3];
  9971. assert(nb0 == sizeof(ggml_fp16_t));
  9972. assert(ne1 + n_past == ne0); (void) n_past;
  9973. // add alibi to src0 (KQ_scaled)
  9974. const int n_heads_log2_floor = 1 << (int) floor(log2(n_head));
  9975. const float m0 = powf(2.0f, -(max_bias) / n_heads_log2_floor);
  9976. const float m1 = powf(2.0f, -(max_bias / 2.0f) / n_heads_log2_floor);
  9977. for (int i = 0; i < ne0; i++) {
  9978. for (int j = 0; j < ne1; j++) {
  9979. for (int k = 0; k < ne2_ne3; k++) {
  9980. ggml_fp16_t * const src = (ggml_fp16_t *)((char *) src0->data + i*nb0 + j*nb1 + k*nb2);
  9981. float * pdst = (float *)((char *) dst->data + i*nb0 + j*nb1 + k*nb2);
  9982. // TODO: k*nb2 or k*nb3
  9983. float m_k;
  9984. if (k < n_heads_log2_floor) {
  9985. m_k = powf(m0, k + 1);
  9986. } else {
  9987. m_k = powf(m1, 2 * (k - n_heads_log2_floor) + 1);
  9988. }
  9989. // we return F32
  9990. pdst[0] = (i-ne0+1) * m_k + GGML_FP16_TO_FP32(src[0]);
  9991. }
  9992. }
  9993. }
  9994. }
  9995. static void ggml_compute_forward_alibi(
  9996. const struct ggml_compute_params * params,
  9997. const struct ggml_tensor * src0,
  9998. const struct ggml_tensor * src1,
  9999. struct ggml_tensor * dst) {
  10000. switch (src0->type) {
  10001. case GGML_TYPE_F16:
  10002. {
  10003. ggml_compute_forward_alibi_f16(params, src0, src1, dst);
  10004. } break;
  10005. case GGML_TYPE_F32:
  10006. {
  10007. ggml_compute_forward_alibi_f32(params, src0, src1, dst);
  10008. } break;
  10009. case GGML_TYPE_Q4_0:
  10010. case GGML_TYPE_Q4_1:
  10011. case GGML_TYPE_Q5_0:
  10012. case GGML_TYPE_Q5_1:
  10013. case GGML_TYPE_Q8_0:
  10014. case GGML_TYPE_Q8_1:
  10015. case GGML_TYPE_Q2_K:
  10016. case GGML_TYPE_Q3_K:
  10017. case GGML_TYPE_Q4_K:
  10018. case GGML_TYPE_Q5_K:
  10019. case GGML_TYPE_Q6_K:
  10020. case GGML_TYPE_Q8_K:
  10021. case GGML_TYPE_I8:
  10022. case GGML_TYPE_I16:
  10023. case GGML_TYPE_I32:
  10024. case GGML_TYPE_COUNT:
  10025. {
  10026. GGML_ASSERT(false);
  10027. } break;
  10028. }
  10029. }
  10030. // ggml_compute_forward_clamp
  10031. static void ggml_compute_forward_clamp_f32(
  10032. const struct ggml_compute_params * params,
  10033. const struct ggml_tensor * src0,
  10034. const struct ggml_tensor * src1,
  10035. struct ggml_tensor * dst) {
  10036. assert(params->ith == 0);
  10037. GGML_ASSERT(src1->type == GGML_TYPE_F32);
  10038. GGML_ASSERT(ggml_nelements(src1) == 2);
  10039. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  10040. return;
  10041. }
  10042. const float min = ((float *) src1->data)[0];
  10043. const float max = ((float *) src1->data)[1];
  10044. const int ith = params->ith;
  10045. const int nth = params->nth;
  10046. const int n = ggml_nrows(src0);
  10047. const int nc = src0->ne[0];
  10048. const size_t nb00 = src0->nb[0];
  10049. const size_t nb01 = src0->nb[1];
  10050. const size_t nb0 = dst->nb[0];
  10051. const size_t nb1 = dst->nb[1];
  10052. GGML_ASSERT( nb0 == sizeof(float));
  10053. GGML_ASSERT(nb00 == sizeof(float));
  10054. for (int j = ith; j < n; j += nth) {
  10055. float * dst_ptr = (float *) ((char *) dst->data + j*nb1);
  10056. float * src0_ptr = (float *) ((char *) src0->data + j*nb01);
  10057. for (int i = 0; i < nc; i++) {
  10058. dst_ptr[i] = MAX(MIN(src0_ptr[i], max), min);
  10059. }
  10060. }
  10061. }
  10062. static void ggml_compute_forward_clamp(
  10063. const struct ggml_compute_params * params,
  10064. const struct ggml_tensor * src0,
  10065. const struct ggml_tensor * src1,
  10066. struct ggml_tensor * dst) {
  10067. switch (src0->type) {
  10068. case GGML_TYPE_F32:
  10069. {
  10070. ggml_compute_forward_clamp_f32(params, src0, src1, dst);
  10071. } break;
  10072. case GGML_TYPE_F16:
  10073. case GGML_TYPE_Q4_0:
  10074. case GGML_TYPE_Q4_1:
  10075. case GGML_TYPE_Q5_0:
  10076. case GGML_TYPE_Q5_1:
  10077. case GGML_TYPE_Q8_0:
  10078. case GGML_TYPE_Q8_1:
  10079. case GGML_TYPE_Q2_K:
  10080. case GGML_TYPE_Q3_K:
  10081. case GGML_TYPE_Q4_K:
  10082. case GGML_TYPE_Q5_K:
  10083. case GGML_TYPE_Q6_K:
  10084. case GGML_TYPE_Q8_K:
  10085. case GGML_TYPE_I8:
  10086. case GGML_TYPE_I16:
  10087. case GGML_TYPE_I32:
  10088. case GGML_TYPE_COUNT:
  10089. {
  10090. GGML_ASSERT(false);
  10091. } break;
  10092. }
  10093. }
  10094. // ggml_compute_forward_rope
  10095. static void ggml_compute_forward_rope_f32(
  10096. const struct ggml_compute_params * params,
  10097. const struct ggml_tensor * src0,
  10098. const struct ggml_tensor * src1,
  10099. struct ggml_tensor * dst) {
  10100. GGML_ASSERT(src1->type == GGML_TYPE_I32);
  10101. GGML_ASSERT(ggml_nelements(src1) == 4);
  10102. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  10103. return;
  10104. }
  10105. const int n_past = ((int32_t *) src1->data)[0];
  10106. const int n_dims = ((int32_t *) src1->data)[1];
  10107. const int mode = ((int32_t *) src1->data)[2];
  10108. const int n_ctx = ((int32_t *) src1->data)[3];
  10109. assert(n_past >= 0);
  10110. const size_t nb00 = src0->nb[0];
  10111. const size_t nb01 = src0->nb[1];
  10112. const size_t nb02 = src0->nb[2];
  10113. const size_t nb03 = src0->nb[3];
  10114. const int64_t ne0 = dst->ne[0];
  10115. const int64_t ne1 = dst->ne[1];
  10116. const int64_t ne2 = dst->ne[2];
  10117. const int64_t ne3 = dst->ne[3];
  10118. const size_t nb0 = dst->nb[0];
  10119. const size_t nb1 = dst->nb[1];
  10120. const size_t nb2 = dst->nb[2];
  10121. const size_t nb3 = dst->nb[3];
  10122. //printf("ne0: %d, ne1: %d, ne2: %d, ne3: %d\n", ne0, ne1, ne2, ne3);
  10123. //printf("n_past = %d, ne2 = %d\n", n_past, ne2);
  10124. GGML_ASSERT(nb00 == sizeof(float));
  10125. const int ith = params->ith;
  10126. const int nth = params->nth;
  10127. const int nr = ggml_nrows(dst);
  10128. GGML_ASSERT(n_dims <= ne0);
  10129. GGML_ASSERT(n_dims % 2 == 0);
  10130. // rows per thread
  10131. const int dr = (nr + nth - 1)/nth;
  10132. // row range for this thread
  10133. const int ir0 = dr*ith;
  10134. const int ir1 = MIN(ir0 + dr, nr);
  10135. // row index used to determine which thread to use
  10136. int ir = 0;
  10137. const float theta_scale = powf(10000.0, -2.0f/n_dims);
  10138. const bool is_neox = mode & 2;
  10139. const bool is_glm = mode & 4;
  10140. for (int64_t i3 = 0; i3 < ne3; i3++) {
  10141. for (int64_t i2 = ((mode & 1) == 0 ? 0 : n_past); i2 < ne2; i2++) {
  10142. const int64_t p = ((mode & 1) == 0 ? n_past + i2 : i2);
  10143. for (int64_t i1 = 0; i1 < ne1; i1++) {
  10144. if (ir++ < ir0) continue;
  10145. if (ir > ir1) break;
  10146. float theta = (float)p;
  10147. if (is_glm) {
  10148. theta = MIN(p, n_ctx - 2);
  10149. float block_theta = MAX(p - (n_ctx - 2), 0);
  10150. for (int64_t i0 = 0; i0 < ne0 / 4; i0++) {
  10151. const float cos_theta = cosf(theta);
  10152. const float sin_theta = sinf(theta);
  10153. const float cos_block_theta = cosf(block_theta);
  10154. const float sin_block_theta = sinf(block_theta);
  10155. theta *= theta_scale;
  10156. block_theta *= theta_scale;
  10157. const float * const src = (float *)((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00);
  10158. float * dst_data = (float *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0);
  10159. const float x0 = src[0];
  10160. const float x1 = src[n_dims/2];
  10161. const float x2 = src[n_dims];
  10162. const float x3 = src[n_dims/2*3];
  10163. dst_data[0] = x0*cos_theta - x1*sin_theta;
  10164. dst_data[n_dims/2] = x0*sin_theta + x1*cos_theta;
  10165. dst_data[n_dims] = x2*cos_block_theta - x3*sin_block_theta;
  10166. dst_data[n_dims/2*3] = x2*sin_block_theta + x3*cos_block_theta;
  10167. }
  10168. } else if (!is_neox) {
  10169. for (int64_t i0 = 0; i0 < ne0; i0 += 2) {
  10170. const float cos_theta = cosf(theta);
  10171. const float sin_theta = sinf(theta);
  10172. theta *= theta_scale;
  10173. const float * const src = (float *)((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00);
  10174. float * dst_data = (float *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0);
  10175. const float x0 = src[0];
  10176. const float x1 = src[1];
  10177. dst_data[0] = x0*cos_theta - x1*sin_theta;
  10178. dst_data[1] = x0*sin_theta + x1*cos_theta;
  10179. }
  10180. } else {
  10181. // TODO: this is probably wrong, but I can't figure it out ..
  10182. // ref: https://github.com/huggingface/transformers/blob/main/src/transformers/models/gpt_neox/modeling_gpt_neox.py#LL251C1-L294C28
  10183. for (int64_t ib = 0; ib < ne0/n_dims; ++ib) {
  10184. for (int64_t ic = 0; ic < n_dims; ic += 2) {
  10185. const float cos_theta = cosf(theta);
  10186. const float sin_theta = sinf(theta);
  10187. theta *= theta_scale;
  10188. const int64_t i0 = ib*n_dims + ic/2;
  10189. const float * const src = (float *)((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00);
  10190. float * dst_data = (float *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0);
  10191. const float x0 = src[0];
  10192. const float x1 = src[n_dims/2];
  10193. dst_data[0] = x0*cos_theta - x1*sin_theta;
  10194. dst_data[n_dims/2] = x0*sin_theta + x1*cos_theta;
  10195. }
  10196. }
  10197. }
  10198. }
  10199. }
  10200. }
  10201. }
  10202. static void ggml_compute_forward_rope_f16(
  10203. const struct ggml_compute_params * params,
  10204. const struct ggml_tensor * src0,
  10205. const struct ggml_tensor * src1,
  10206. struct ggml_tensor * dst) {
  10207. GGML_ASSERT(src1->type == GGML_TYPE_I32);
  10208. GGML_ASSERT(ggml_nelements(src1) == 4);
  10209. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  10210. return;
  10211. }
  10212. const int n_past = ((int32_t *) src1->data)[0];
  10213. const int n_dims = ((int32_t *) src1->data)[1];
  10214. const int mode = ((int32_t *) src1->data)[2];
  10215. const int n_ctx = ((int32_t *) src1->data)[3];
  10216. assert(n_past >= 0);
  10217. const size_t nb00 = src0->nb[0];
  10218. const size_t nb01 = src0->nb[1];
  10219. const size_t nb02 = src0->nb[2];
  10220. const size_t nb03 = src0->nb[3];
  10221. const int64_t ne0 = dst->ne[0];
  10222. const int64_t ne1 = dst->ne[1];
  10223. const int64_t ne2 = dst->ne[2];
  10224. const int64_t ne3 = dst->ne[3];
  10225. const size_t nb0 = dst->nb[0];
  10226. const size_t nb1 = dst->nb[1];
  10227. const size_t nb2 = dst->nb[2];
  10228. const size_t nb3 = dst->nb[3];
  10229. //printf("ne0: %d, ne1: %d, ne2: %d, ne3: %d\n", ne0, ne1, ne2, ne3);
  10230. //printf("n_past = %d, ne2 = %d\n", n_past, ne2);
  10231. GGML_ASSERT(nb0 == sizeof(ggml_fp16_t));
  10232. const int ith = params->ith;
  10233. const int nth = params->nth;
  10234. const int nr = ggml_nrows(dst);
  10235. GGML_ASSERT(n_dims <= ne0);
  10236. GGML_ASSERT(n_dims % 2 == 0);
  10237. // rows per thread
  10238. const int dr = (nr + nth - 1)/nth;
  10239. // row range for this thread
  10240. const int ir0 = dr*ith;
  10241. const int ir1 = MIN(ir0 + dr, nr);
  10242. // row index used to determine which thread to use
  10243. int ir = 0;
  10244. const float theta_scale = powf(10000.0, -2.0f/n_dims);
  10245. const bool is_neox = mode & 2;
  10246. const bool is_glm = mode & 4;
  10247. for (int64_t i3 = 0; i3 < ne3; i3++) {
  10248. for (int64_t i2 = ((mode & 1) == 0 ? 0 : n_past); i2 < ne2; i2++) {
  10249. const int64_t p = ((mode & 1) == 0 ? n_past + i2 : i2);
  10250. for (int64_t i1 = 0; i1 < ne1; i1++) {
  10251. if (ir++ < ir0) continue;
  10252. if (ir > ir1) break;
  10253. float theta = (float)p;
  10254. if (is_glm) {
  10255. theta = MIN(p, n_ctx - 2);
  10256. float block_theta = MAX(p - (n_ctx - 2), 0);
  10257. for (int64_t i0 = 0; i0 < ne0 / 4; i0++) {
  10258. const float cos_theta = cosf(theta);
  10259. const float sin_theta = sinf(theta);
  10260. const float cos_block_theta = cosf(block_theta);
  10261. const float sin_block_theta = sinf(block_theta);
  10262. theta *= theta_scale;
  10263. block_theta *= theta_scale;
  10264. const ggml_fp16_t * const src = (ggml_fp16_t *)((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00);
  10265. ggml_fp16_t * dst_data = (ggml_fp16_t *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0);
  10266. const float x0 = GGML_FP16_TO_FP32(src[0]);
  10267. const float x1 = GGML_FP16_TO_FP32(src[n_dims/2]);
  10268. const float x2 = GGML_FP16_TO_FP32(src[n_dims]);
  10269. const float x3 = GGML_FP16_TO_FP32(src[n_dims/2*3]);
  10270. dst_data[0] = GGML_FP32_TO_FP16(x0*cos_theta - x1*sin_theta);
  10271. dst_data[n_dims/2] = GGML_FP32_TO_FP16(x0*sin_theta + x1*cos_theta);
  10272. dst_data[n_dims] = GGML_FP32_TO_FP16(x2*cos_block_theta - x3*sin_block_theta);
  10273. dst_data[n_dims/2*3] = GGML_FP32_TO_FP16(x2*sin_block_theta + x3*cos_block_theta);
  10274. }
  10275. } if (!is_neox) {
  10276. for (int64_t i0 = 0; i0 < ne0; i0 += 2) {
  10277. const float cos_theta = cosf(theta);
  10278. const float sin_theta = sinf(theta);
  10279. theta *= theta_scale;
  10280. const ggml_fp16_t * const src = (ggml_fp16_t *)((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00);
  10281. ggml_fp16_t * dst_data = (ggml_fp16_t *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0);
  10282. const float x0 = GGML_FP16_TO_FP32(src[0]);
  10283. const float x1 = GGML_FP16_TO_FP32(src[1]);
  10284. dst_data[0] = GGML_FP32_TO_FP16(x0*cos_theta - x1*sin_theta);
  10285. dst_data[1] = GGML_FP32_TO_FP16(x0*sin_theta + x1*cos_theta);
  10286. }
  10287. } else {
  10288. // TODO: this is probably wrong, but I can't figure it out ..
  10289. // ref: https://github.com/huggingface/transformers/blob/main/src/transformers/models/gpt_neox/modeling_gpt_neox.py#LL251C1-L294C28
  10290. for (int64_t ib = 0; ib < ne0/n_dims; ++ib) {
  10291. for (int64_t ic = 0; ic < n_dims; ic += 2) {
  10292. const float cos_theta = cosf(theta);
  10293. const float sin_theta = sinf(theta);
  10294. theta *= theta_scale;
  10295. const int64_t i0 = ib*n_dims + ic/2;
  10296. const ggml_fp16_t * const src = (ggml_fp16_t *)((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00);
  10297. ggml_fp16_t * dst_data = (ggml_fp16_t *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0);
  10298. const float x0 = GGML_FP16_TO_FP32(src[0]);
  10299. const float x1 = GGML_FP16_TO_FP32(src[n_dims/2]);
  10300. dst_data[0] = GGML_FP32_TO_FP16(x0*cos_theta - x1*sin_theta);
  10301. dst_data[n_dims/2] = GGML_FP32_TO_FP16(x0*sin_theta + x1*cos_theta);
  10302. }
  10303. }
  10304. }
  10305. }
  10306. }
  10307. }
  10308. }
  10309. static void ggml_compute_forward_rope(
  10310. const struct ggml_compute_params * params,
  10311. const struct ggml_tensor * src0,
  10312. const struct ggml_tensor * src1,
  10313. struct ggml_tensor * dst) {
  10314. switch (src0->type) {
  10315. case GGML_TYPE_F16:
  10316. {
  10317. ggml_compute_forward_rope_f16(params, src0, src1, dst);
  10318. } break;
  10319. case GGML_TYPE_F32:
  10320. {
  10321. ggml_compute_forward_rope_f32(params, src0, src1, dst);
  10322. } break;
  10323. default:
  10324. {
  10325. GGML_ASSERT(false);
  10326. } break;
  10327. }
  10328. }
  10329. // ggml_compute_forward_rope_back
  10330. static void ggml_compute_forward_rope_back_f32(
  10331. const struct ggml_compute_params * params,
  10332. const struct ggml_tensor * src0,
  10333. const struct ggml_tensor * src1,
  10334. struct ggml_tensor * dst) {
  10335. assert(src1->type == GGML_TYPE_I32);
  10336. assert(ggml_nelements(src1) == 3);
  10337. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  10338. return;
  10339. }
  10340. // y = rope(x, src1)
  10341. // dx = rope_back(dy, src1)
  10342. // src0 is dy, src1 contains options
  10343. const int n_past = ((int32_t *) src1->data)[0];
  10344. const int n_dims = ((int32_t *) src1->data)[1];
  10345. const int mode = ((int32_t *) src1->data)[2];
  10346. assert(n_past >= 0);
  10347. const size_t nb00 = src0->nb[0];
  10348. const size_t nb01 = src0->nb[1];
  10349. const size_t nb02 = src0->nb[2];
  10350. const size_t nb03 = src0->nb[3];
  10351. const int64_t ne0 = dst->ne[0];
  10352. const int64_t ne1 = dst->ne[1];
  10353. const int64_t ne2 = dst->ne[2];
  10354. const int64_t ne3 = dst->ne[3];
  10355. const size_t nb0 = dst->nb[0];
  10356. const size_t nb1 = dst->nb[1];
  10357. const size_t nb2 = dst->nb[2];
  10358. const size_t nb3 = dst->nb[3];
  10359. //printf("ne0: %d, ne1: %d, ne2: %d, ne3: %d\n", ne0, ne1, ne2, ne3);
  10360. //printf("n_past = %d, ne2 = %d\n", n_past, ne2);
  10361. assert(nb0 == sizeof(float));
  10362. const int ith = params->ith;
  10363. const int nth = params->nth;
  10364. const int nr = ggml_nrows(dst);
  10365. // rows per thread
  10366. const int dr = (nr + nth - 1)/nth;
  10367. // row range for this thread
  10368. const int ir0 = dr*ith;
  10369. const int ir1 = MIN(ir0 + dr, nr);
  10370. // row index used to determine which thread to use
  10371. int ir = 0;
  10372. const float theta_scale = powf(10000.0, -2.0f/n_dims);
  10373. const bool is_neox = mode & 2;
  10374. for (int64_t i3 = 0; i3 < ne3; i3++) {
  10375. for (int64_t i2 = ((mode & 1) == 0 ? 0 : n_past); i2 < ne2; i2++) {
  10376. const int64_t p = ((mode & 1) == 0 ? n_past + i2 : i2);
  10377. for (int64_t i1 = 0; i1 < ne1; i1++) {
  10378. if (ir++ < ir0) continue;
  10379. if (ir > ir1) break;
  10380. float theta = (float)p;
  10381. if (!is_neox) {
  10382. for (int64_t i0 = 0; i0 < ne0; i0 += 2) {
  10383. const float cos_theta = cosf(theta);
  10384. const float sin_theta = sinf(theta);
  10385. theta *= theta_scale;
  10386. const float * const dy = (float *)((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00);
  10387. float * dx = (float *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0);
  10388. const float dy0 = dy[0];
  10389. const float dy1 = dy[1];
  10390. dx[0] = dy0*cos_theta + dy1*sin_theta;
  10391. dx[1] = - dy0*sin_theta + dy1*cos_theta;
  10392. }
  10393. } else {
  10394. for (int64_t ib = 0; ib < ne0/n_dims; ++ib) {
  10395. for (int64_t ic = 0; ic < n_dims; ic += 2) {
  10396. const float cos_theta = cosf(theta);
  10397. const float sin_theta = sinf(theta);
  10398. theta *= theta_scale;
  10399. const int64_t i0 = ib*n_dims + ic/2;
  10400. const float * const dy = (float *)((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00);
  10401. float * dx = (float *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0);
  10402. const float dy0 = dy[0];
  10403. const float dy1 = dy[n_dims/2];
  10404. dx[0] = dy0*cos_theta + dy1*sin_theta;
  10405. dx[n_dims/2] = - dy0*sin_theta + dy1*cos_theta;
  10406. }
  10407. }
  10408. }
  10409. }
  10410. }
  10411. }
  10412. }
  10413. static void ggml_compute_forward_rope_back_f16(
  10414. const struct ggml_compute_params * params,
  10415. const struct ggml_tensor * src0,
  10416. const struct ggml_tensor * src1,
  10417. struct ggml_tensor * dst) {
  10418. assert(src1->type == GGML_TYPE_I32);
  10419. assert(ggml_nelements(src1) == 3);
  10420. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  10421. return;
  10422. }
  10423. // y = rope(x, src1)
  10424. // dx = rope_back(dy, src1)
  10425. // src0 is dy, src1 contains options
  10426. const int n_past = ((int32_t *) src1->data)[0];
  10427. const int n_dims = ((int32_t *) src1->data)[1];
  10428. const int mode = ((int32_t *) src1->data)[2];
  10429. assert(n_past >= 0);
  10430. const size_t nb00 = src0->nb[0];
  10431. const size_t nb01 = src0->nb[1];
  10432. const size_t nb02 = src0->nb[2];
  10433. const size_t nb03 = src0->nb[3];
  10434. const int64_t ne0 = dst->ne[0];
  10435. const int64_t ne1 = dst->ne[1];
  10436. const int64_t ne2 = dst->ne[2];
  10437. const int64_t ne3 = dst->ne[3];
  10438. const size_t nb0 = dst->nb[0];
  10439. const size_t nb1 = dst->nb[1];
  10440. const size_t nb2 = dst->nb[2];
  10441. const size_t nb3 = dst->nb[3];
  10442. //printf("ne0: %d, ne1: %d, ne2: %d, ne3: %d\n", ne0, ne1, ne2, ne3);
  10443. //printf("n_past = %d, ne2 = %d\n", n_past, ne2);
  10444. assert(nb0 == sizeof(ggml_fp16_t));
  10445. const int ith = params->ith;
  10446. const int nth = params->nth;
  10447. const int nr = ggml_nrows(dst);
  10448. // rows per thread
  10449. const int dr = (nr + nth - 1)/nth;
  10450. // row range for this thread
  10451. const int ir0 = dr*ith;
  10452. const int ir1 = MIN(ir0 + dr, nr);
  10453. // row index used to determine which thread to use
  10454. int ir = 0;
  10455. const float theta_scale = powf(10000.0, -2.0f/n_dims);
  10456. const bool is_neox = mode & 2;
  10457. for (int64_t i3 = 0; i3 < ne3; i3++) {
  10458. for (int64_t i2 = ((mode & 1) == 0 ? 0 : n_past); i2 < ne2; i2++) {
  10459. const int64_t p = ((mode & 1) == 0 ? n_past + i2 : i2);
  10460. for (int64_t i1 = 0; i1 < ne1; i1++) {
  10461. if (ir++ < ir0) continue;
  10462. if (ir > ir1) break;
  10463. float theta = (float)p;
  10464. if (!is_neox) {
  10465. for (int64_t i0 = 0; i0 < ne0; i0 += 2) {
  10466. const float cos_theta = cosf(theta);
  10467. const float sin_theta = sinf(theta);
  10468. theta *= theta_scale;
  10469. const ggml_fp16_t * const dy = (ggml_fp16_t *)((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00);
  10470. ggml_fp16_t * dx = (ggml_fp16_t *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0);
  10471. const float dy0 = GGML_FP16_TO_FP32(dy[0]);
  10472. const float dy1 = GGML_FP16_TO_FP32(dy[1]);
  10473. dx[0] = GGML_FP32_TO_FP16( dy0*cos_theta + dy1*sin_theta);
  10474. dx[1] = GGML_FP32_TO_FP16(-dy0*sin_theta + dy1*cos_theta);
  10475. }
  10476. } else {
  10477. for (int64_t ib = 0; ib < ne0/n_dims; ++ib) {
  10478. for (int64_t ic = 0; ic < n_dims; ic += 2) {
  10479. const float cos_theta = cosf(theta);
  10480. const float sin_theta = sinf(theta);
  10481. theta *= theta_scale;
  10482. const int64_t i0 = ib*n_dims + ic/2;
  10483. const ggml_fp16_t * const dy = (ggml_fp16_t *)((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00);
  10484. ggml_fp16_t * dx = (ggml_fp16_t *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0);
  10485. const float dy0 = GGML_FP16_TO_FP32(dy[0]);
  10486. const float dy1 = GGML_FP16_TO_FP32(dy[n_dims/2]);
  10487. dx[0] = GGML_FP32_TO_FP16( dy0*cos_theta + dy1*sin_theta);
  10488. dx[n_dims/2] = GGML_FP32_TO_FP16(-dy0*sin_theta + dy1*cos_theta);
  10489. }
  10490. }
  10491. }
  10492. }
  10493. }
  10494. }
  10495. }
  10496. static void ggml_compute_forward_rope_back(
  10497. const struct ggml_compute_params * params,
  10498. const struct ggml_tensor * src0,
  10499. const struct ggml_tensor * src1,
  10500. struct ggml_tensor * dst) {
  10501. switch (src0->type) {
  10502. case GGML_TYPE_F16:
  10503. {
  10504. ggml_compute_forward_rope_back_f16(params, src0, src1, dst);
  10505. } break;
  10506. case GGML_TYPE_F32:
  10507. {
  10508. ggml_compute_forward_rope_back_f32(params, src0, src1, dst);
  10509. } break;
  10510. default:
  10511. {
  10512. GGML_ASSERT(false);
  10513. } break;
  10514. }
  10515. }
  10516. // ggml_compute_forward_conv_1d_s1_ph
  10517. static void ggml_compute_forward_conv_1d_s1_ph_f16_f32(
  10518. const struct ggml_compute_params * params,
  10519. const struct ggml_tensor * src0,
  10520. const struct ggml_tensor * src1,
  10521. struct ggml_tensor * dst) {
  10522. GGML_ASSERT(src0->type == GGML_TYPE_F16);
  10523. GGML_ASSERT(src1->type == GGML_TYPE_F32);
  10524. GGML_ASSERT( dst->type == GGML_TYPE_F32);
  10525. int64_t t0 = ggml_perf_time_us();
  10526. UNUSED(t0);
  10527. const int64_t ne00 = src0->ne[0];
  10528. const int64_t ne01 = src0->ne[1];
  10529. const int64_t ne02 = src0->ne[2];
  10530. //const int64_t ne03 = src0->ne[3];
  10531. const int64_t ne10 = src1->ne[0];
  10532. const int64_t ne11 = src1->ne[1];
  10533. //const int64_t ne12 = src1->ne[2];
  10534. //const int64_t ne13 = src1->ne[3];
  10535. //const int64_t ne0 = dst->ne[0];
  10536. //const int64_t ne1 = dst->ne[1];
  10537. //const int64_t ne2 = dst->ne[2];
  10538. //const int64_t ne3 = dst->ne[3];
  10539. //const int64_t ne = ne0*ne1*ne2*ne3;
  10540. const int nb00 = src0->nb[0];
  10541. const int nb01 = src0->nb[1];
  10542. const int nb02 = src0->nb[2];
  10543. //const int nb03 = src0->nb[3];
  10544. const int nb10 = src1->nb[0];
  10545. const int nb11 = src1->nb[1];
  10546. //const int nb12 = src1->nb[2];
  10547. //const int nb13 = src1->nb[3];
  10548. //const int nb0 = dst->nb[0];
  10549. const int nb1 = dst->nb[1];
  10550. //const int nb2 = dst->nb[2];
  10551. //const int nb3 = dst->nb[3];
  10552. const int ith = params->ith;
  10553. const int nth = params->nth;
  10554. const int nk = ne00;
  10555. const int nh = nk/2;
  10556. const int ew0 = ggml_up32(ne01);
  10557. GGML_ASSERT(ne00 % 2 == 1); // TODO: support even kernel sizes
  10558. GGML_ASSERT(nb00 == sizeof(ggml_fp16_t));
  10559. GGML_ASSERT(nb10 == sizeof(float));
  10560. if (params->type == GGML_TASK_INIT) {
  10561. // TODO: fix this memset (wsize is overestimated)
  10562. memset(params->wdata, 0, params->wsize);
  10563. // prepare kernel data (src0)
  10564. {
  10565. ggml_fp16_t * const wdata = (ggml_fp16_t *) params->wdata + 0;
  10566. for (int64_t i02 = 0; i02 < ne02; i02++) {
  10567. for (int64_t i01 = 0; i01 < ne01; i01++) {
  10568. const ggml_fp16_t * const src = (ggml_fp16_t *)((char *) src0->data + i02*nb02 + i01*nb01);
  10569. ggml_fp16_t * dst_data = wdata + i02*ew0*ne00;
  10570. for (int64_t i00 = 0; i00 < ne00; i00++) {
  10571. dst_data[i00*ew0 + i01] = src[i00];
  10572. }
  10573. }
  10574. }
  10575. }
  10576. // prepare source data (src1)
  10577. {
  10578. ggml_fp16_t * const wdata = (ggml_fp16_t *) params->wdata + ne02*ew0*ne00;
  10579. for (int64_t i11 = 0; i11 < ne11; i11++) {
  10580. const float * const src = (float *)((char *) src1->data + i11*nb11);
  10581. ggml_fp16_t * dst_data = wdata;
  10582. for (int64_t i10 = 0; i10 < ne10; i10++) {
  10583. dst_data[(i10 + nh)*ew0 + i11] = GGML_FP32_TO_FP16(src[i10]);
  10584. }
  10585. }
  10586. }
  10587. return;
  10588. }
  10589. if (params->type == GGML_TASK_FINALIZE) {
  10590. return;
  10591. }
  10592. // total rows in dst
  10593. const int nr = ne02;
  10594. // rows per thread
  10595. const int dr = (nr + nth - 1)/nth;
  10596. // row range for this thread
  10597. const int ir0 = dr*ith;
  10598. const int ir1 = MIN(ir0 + dr, nr);
  10599. for (int i1 = ir0; i1 < ir1; i1++) {
  10600. float * dst_data = (float *)((char *) dst->data + i1*nb1);
  10601. for (int64_t i0 = 0; i0 < ne10; ++i0) {
  10602. dst_data[i0] = 0;
  10603. for (int k = -nh; k <= nh; k++) {
  10604. float v = 0.0f;
  10605. ggml_vec_dot_f16(ew0, &v,
  10606. (ggml_fp16_t *) params->wdata + i1*ew0*ne00 + (nh + k)*ew0,
  10607. (ggml_fp16_t *) params->wdata + ne02*ew0*ne00 + (i0 + nh + k)*ew0);
  10608. dst_data[i0] += v;
  10609. }
  10610. }
  10611. }
  10612. }
  10613. static void ggml_compute_forward_conv_1d_s1_ph_f32(
  10614. const struct ggml_compute_params * params,
  10615. const struct ggml_tensor * src0,
  10616. const struct ggml_tensor * src1,
  10617. struct ggml_tensor * dst) {
  10618. GGML_ASSERT(src0->type == GGML_TYPE_F32);
  10619. GGML_ASSERT(src1->type == GGML_TYPE_F32);
  10620. GGML_ASSERT( dst->type == GGML_TYPE_F32);
  10621. int64_t t0 = ggml_perf_time_us();
  10622. UNUSED(t0);
  10623. const int64_t ne00 = src0->ne[0];
  10624. const int64_t ne01 = src0->ne[1];
  10625. const int64_t ne02 = src0->ne[2];
  10626. //const int64_t ne03 = src0->ne[3];
  10627. const int64_t ne10 = src1->ne[0];
  10628. const int64_t ne11 = src1->ne[1];
  10629. //const int64_t ne12 = src1->ne[2];
  10630. //const int64_t ne13 = src1->ne[3];
  10631. //const int64_t ne0 = dst->ne[0];
  10632. //const int64_t ne1 = dst->ne[1];
  10633. //const int64_t ne2 = dst->ne[2];
  10634. //const int64_t ne3 = dst->ne[3];
  10635. //const int64_t ne = ne0*ne1*ne2*ne3;
  10636. const int nb00 = src0->nb[0];
  10637. const int nb01 = src0->nb[1];
  10638. const int nb02 = src0->nb[2];
  10639. //const int nb03 = src0->nb[3];
  10640. const int nb10 = src1->nb[0];
  10641. const int nb11 = src1->nb[1];
  10642. //const int nb12 = src1->nb[2];
  10643. //const int nb13 = src1->nb[3];
  10644. //const int nb0 = dst->nb[0];
  10645. const int nb1 = dst->nb[1];
  10646. //const int nb2 = dst->nb[2];
  10647. //const int nb3 = dst->nb[3];
  10648. const int ith = params->ith;
  10649. const int nth = params->nth;
  10650. const int nk = ne00;
  10651. const int nh = nk/2;
  10652. const int ew0 = ggml_up32(ne01);
  10653. GGML_ASSERT(ne00 % 2 == 1); // TODO: support even kernel sizes
  10654. GGML_ASSERT(nb00 == sizeof(float));
  10655. GGML_ASSERT(nb10 == sizeof(float));
  10656. if (params->type == GGML_TASK_INIT) {
  10657. // TODO: fix this memset (wsize is overestimated)
  10658. memset(params->wdata, 0, params->wsize);
  10659. // prepare kernel data (src0)
  10660. {
  10661. float * const wdata = (float *) params->wdata + 0;
  10662. for (int64_t i02 = 0; i02 < ne02; i02++) {
  10663. for (int64_t i01 = 0; i01 < ne01; i01++) {
  10664. const float * const src = (float *)((char *) src0->data + i02*nb02 + i01*nb01);
  10665. float * dst_data = wdata + i02*ew0*ne00;
  10666. for (int64_t i00 = 0; i00 < ne00; i00++) {
  10667. dst_data[i00*ew0 + i01] = src[i00];
  10668. }
  10669. }
  10670. }
  10671. }
  10672. // prepare source data (src1)
  10673. {
  10674. float * const wdata = (float *) params->wdata + ne02*ew0*ne00;
  10675. for (int64_t i11 = 0; i11 < ne11; i11++) {
  10676. const float * const src = (float *)((char *) src1->data + i11*nb11);
  10677. float * dst_data = wdata;
  10678. for (int64_t i10 = 0; i10 < ne10; i10++) {
  10679. dst_data[(i10 + nh)*ew0 + i11] = src[i10];
  10680. }
  10681. }
  10682. }
  10683. return;
  10684. }
  10685. if (params->type == GGML_TASK_FINALIZE) {
  10686. return;
  10687. }
  10688. // total rows in dst
  10689. const int nr = ne02;
  10690. // rows per thread
  10691. const int dr = (nr + nth - 1)/nth;
  10692. // row range for this thread
  10693. const int ir0 = dr*ith;
  10694. const int ir1 = MIN(ir0 + dr, nr);
  10695. for (int i1 = ir0; i1 < ir1; i1++) {
  10696. float * dst_data = (float *)((char *) dst->data + i1*nb1);
  10697. for (int64_t i0 = 0; i0 < ne10; ++i0) {
  10698. dst_data[i0] = 0;
  10699. for (int k = -nh; k <= nh; k++) {
  10700. float v = 0.0f;
  10701. ggml_vec_dot_f32(ew0, &v,
  10702. (float *) params->wdata + i1*ew0*ne00 + (nh + k)*ew0,
  10703. (float *) params->wdata + ne02*ew0*ne00 + (i0 + nh + k)*ew0);
  10704. dst_data[i0] += v;
  10705. }
  10706. }
  10707. }
  10708. }
  10709. static void ggml_compute_forward_conv_1d_s1_ph(
  10710. const struct ggml_compute_params * params,
  10711. const struct ggml_tensor * src0,
  10712. const struct ggml_tensor * src1,
  10713. struct ggml_tensor * dst) {
  10714. switch (src0->type) {
  10715. case GGML_TYPE_F16:
  10716. {
  10717. ggml_compute_forward_conv_1d_s1_ph_f16_f32(params, src0, src1, dst);
  10718. } break;
  10719. case GGML_TYPE_F32:
  10720. {
  10721. ggml_compute_forward_conv_1d_s1_ph_f32(params, src0, src1, dst);
  10722. } break;
  10723. default:
  10724. {
  10725. GGML_ASSERT(false);
  10726. } break;
  10727. }
  10728. }
  10729. // ggml_compute_forward_conv_1d_s2_ph
  10730. static void ggml_compute_forward_conv_1d_s2_ph_f16_f32(
  10731. const struct ggml_compute_params * params,
  10732. const struct ggml_tensor * src0,
  10733. const struct ggml_tensor * src1,
  10734. struct ggml_tensor * dst) {
  10735. GGML_ASSERT(src0->type == GGML_TYPE_F16);
  10736. GGML_ASSERT(src1->type == GGML_TYPE_F32);
  10737. GGML_ASSERT( dst->type == GGML_TYPE_F32);
  10738. int64_t t0 = ggml_perf_time_us();
  10739. UNUSED(t0);
  10740. const int64_t ne00 = src0->ne[0];
  10741. const int64_t ne01 = src0->ne[1];
  10742. const int64_t ne02 = src0->ne[2];
  10743. //const int64_t ne03 = src0->ne[3];
  10744. const int64_t ne10 = src1->ne[0];
  10745. const int64_t ne11 = src1->ne[1];
  10746. //const int64_t ne12 = src1->ne[2];
  10747. //const int64_t ne13 = src1->ne[3];
  10748. //const int64_t ne0 = dst->ne[0];
  10749. //const int64_t ne1 = dst->ne[1];
  10750. //const int64_t ne2 = dst->ne[2];
  10751. //const int64_t ne3 = dst->ne[3];
  10752. //const int64_t ne = ne0*ne1*ne2*ne3;
  10753. const int nb00 = src0->nb[0];
  10754. const int nb01 = src0->nb[1];
  10755. const int nb02 = src0->nb[2];
  10756. //const int nb03 = src0->nb[3];
  10757. const int nb10 = src1->nb[0];
  10758. const int nb11 = src1->nb[1];
  10759. //const int nb12 = src1->nb[2];
  10760. //const int nb13 = src1->nb[3];
  10761. //const int nb0 = dst->nb[0];
  10762. const int nb1 = dst->nb[1];
  10763. //const int nb2 = dst->nb[2];
  10764. //const int nb3 = dst->nb[3];
  10765. const int ith = params->ith;
  10766. const int nth = params->nth;
  10767. const int nk = ne00;
  10768. const int nh = nk/2;
  10769. const int ew0 = ggml_up32(ne01);
  10770. GGML_ASSERT(ne00 % 2 == 1); // TODO: support even kernel sizes
  10771. GGML_ASSERT(nb00 == sizeof(ggml_fp16_t));
  10772. GGML_ASSERT(nb10 == sizeof(float));
  10773. if (params->type == GGML_TASK_INIT) {
  10774. // TODO: fix this memset (wsize is overestimated)
  10775. memset(params->wdata, 0, params->wsize);
  10776. // prepare kernel data (src0)
  10777. {
  10778. ggml_fp16_t * const wdata = (ggml_fp16_t *) params->wdata + 0;
  10779. for (int64_t i02 = 0; i02 < ne02; i02++) {
  10780. for (int64_t i01 = 0; i01 < ne01; i01++) {
  10781. const ggml_fp16_t * const src = (ggml_fp16_t *)((char *) src0->data + i02*nb02 + i01*nb01);
  10782. ggml_fp16_t * dst_data = wdata + i02*ew0*ne00;
  10783. for (int64_t i00 = 0; i00 < ne00; i00++) {
  10784. dst_data[i00*ew0 + i01] = src[i00];
  10785. }
  10786. }
  10787. }
  10788. }
  10789. // prepare source data (src1)
  10790. {
  10791. ggml_fp16_t * const wdata = (ggml_fp16_t *) params->wdata + ne02*ew0*ne00;
  10792. for (int64_t i11 = 0; i11 < ne11; i11++) {
  10793. const float * const src = (float *)((char *) src1->data + i11*nb11);
  10794. ggml_fp16_t * dst_data = wdata;
  10795. for (int64_t i10 = 0; i10 < ne10; i10++) {
  10796. dst_data[(i10 + nh)*ew0 + i11] = GGML_FP32_TO_FP16(src[i10]);
  10797. }
  10798. }
  10799. }
  10800. return;
  10801. }
  10802. if (params->type == GGML_TASK_FINALIZE) {
  10803. return;
  10804. }
  10805. // total rows in dst
  10806. const int nr = ne02;
  10807. // rows per thread
  10808. const int dr = (nr + nth - 1)/nth;
  10809. // row range for this thread
  10810. const int ir0 = dr*ith;
  10811. const int ir1 = MIN(ir0 + dr, nr);
  10812. for (int i1 = ir0; i1 < ir1; i1++) {
  10813. float * dst_data = (float *)((char *) dst->data + i1*nb1);
  10814. for (int64_t i0 = 0; i0 < ne10; i0 += 2) {
  10815. dst_data[i0/2] = 0;
  10816. for (int k = -nh; k <= nh; k++) {
  10817. float v = 0.0f;
  10818. ggml_vec_dot_f16(ew0, &v,
  10819. (ggml_fp16_t *) params->wdata + i1*ew0*ne00 + (nh + k)*ew0,
  10820. (ggml_fp16_t *) params->wdata + ne02*ew0*ne00 + (i0 + nh + k)*ew0);
  10821. dst_data[i0/2] += v;
  10822. }
  10823. }
  10824. }
  10825. }
  10826. static void ggml_compute_forward_conv_1d_s2_ph_f32(
  10827. const struct ggml_compute_params * params,
  10828. const struct ggml_tensor * src0,
  10829. const struct ggml_tensor * src1,
  10830. struct ggml_tensor * dst) {
  10831. GGML_ASSERT(src0->type == GGML_TYPE_F32);
  10832. GGML_ASSERT(src1->type == GGML_TYPE_F32);
  10833. GGML_ASSERT( dst->type == GGML_TYPE_F32);
  10834. int64_t t0 = ggml_perf_time_us();
  10835. UNUSED(t0);
  10836. const int64_t ne00 = src0->ne[0];
  10837. const int64_t ne01 = src0->ne[1];
  10838. const int64_t ne02 = src0->ne[2];
  10839. //const int64_t ne03 = src0->ne[3];
  10840. const int64_t ne10 = src1->ne[0];
  10841. const int64_t ne11 = src1->ne[1];
  10842. //const int64_t ne12 = src1->ne[2];
  10843. //const int64_t ne13 = src1->ne[3];
  10844. //const int64_t ne0 = dst->ne[0];
  10845. //const int64_t ne1 = dst->ne[1];
  10846. //const int64_t ne2 = dst->ne[2];
  10847. //const int64_t ne3 = dst->ne[3];
  10848. //const int64_t ne = ne0*ne1*ne2*ne3;
  10849. const int nb00 = src0->nb[0];
  10850. const int nb01 = src0->nb[1];
  10851. const int nb02 = src0->nb[2];
  10852. //const int nb03 = src0->nb[3];
  10853. const int nb10 = src1->nb[0];
  10854. const int nb11 = src1->nb[1];
  10855. //const int nb12 = src1->nb[2];
  10856. //const int nb13 = src1->nb[3];
  10857. //const int nb0 = dst->nb[0];
  10858. const int nb1 = dst->nb[1];
  10859. //const int nb2 = dst->nb[2];
  10860. //const int nb3 = dst->nb[3];
  10861. const int ith = params->ith;
  10862. const int nth = params->nth;
  10863. const int nk = ne00;
  10864. const int nh = nk/2;
  10865. const int ew0 = ggml_up32(ne01);
  10866. GGML_ASSERT(ne00 % 2 == 1); // TODO: support even kernel sizes
  10867. GGML_ASSERT(nb00 == sizeof(float));
  10868. GGML_ASSERT(nb10 == sizeof(float));
  10869. if (params->type == GGML_TASK_INIT) {
  10870. // TODO: fix this memset (wsize is overestimated)
  10871. memset(params->wdata, 0, params->wsize);
  10872. // prepare kernel data (src0)
  10873. {
  10874. float * const wdata = (float *) params->wdata + 0;
  10875. for (int64_t i02 = 0; i02 < ne02; i02++) {
  10876. for (int64_t i01 = 0; i01 < ne01; i01++) {
  10877. const float * const src = (float *)((char *) src0->data + i02*nb02 + i01*nb01);
  10878. float * dst_data = wdata + i02*ew0*ne00;
  10879. for (int64_t i00 = 0; i00 < ne00; i00++) {
  10880. dst_data[i00*ew0 + i01] = src[i00];
  10881. }
  10882. }
  10883. }
  10884. }
  10885. // prepare source data (src1)
  10886. {
  10887. float * const wdata = (float *) params->wdata + ne02*ew0*ne00;
  10888. for (int64_t i11 = 0; i11 < ne11; i11++) {
  10889. const float * const src = (float *)((char *) src1->data + i11*nb11);
  10890. float * dst_data = wdata;
  10891. for (int64_t i10 = 0; i10 < ne10; i10++) {
  10892. dst_data[(i10 + nh)*ew0 + i11] = src[i10];
  10893. }
  10894. }
  10895. }
  10896. return;
  10897. }
  10898. if (params->type == GGML_TASK_FINALIZE) {
  10899. return;
  10900. }
  10901. // total rows in dst
  10902. const int nr = ne02;
  10903. // rows per thread
  10904. const int dr = (nr + nth - 1)/nth;
  10905. // row range for this thread
  10906. const int ir0 = dr*ith;
  10907. const int ir1 = MIN(ir0 + dr, nr);
  10908. for (int i1 = ir0; i1 < ir1; i1++) {
  10909. float * dst_data = (float *)((char *) dst->data + i1*nb1);
  10910. for (int64_t i0 = 0; i0 < ne10; i0 += 2) {
  10911. dst_data[i0/2] = 0;
  10912. for (int k = -nh; k <= nh; k++) {
  10913. float v = 0.0f;
  10914. ggml_vec_dot_f32(ew0, &v,
  10915. (float *) params->wdata + i1*ew0*ne00 + (nh + k)*ew0,
  10916. (float *) params->wdata + ne02*ew0*ne00 + (i0 + nh + k)*ew0);
  10917. dst_data[i0/2] += v;
  10918. }
  10919. }
  10920. }
  10921. }
  10922. static void ggml_compute_forward_conv_1d_s2_ph(
  10923. const struct ggml_compute_params * params,
  10924. const struct ggml_tensor * src0,
  10925. const struct ggml_tensor * src1,
  10926. struct ggml_tensor * dst) {
  10927. switch (src0->type) {
  10928. case GGML_TYPE_F16:
  10929. {
  10930. ggml_compute_forward_conv_1d_s2_ph_f16_f32(params, src0, src1, dst);
  10931. } break;
  10932. case GGML_TYPE_F32:
  10933. {
  10934. ggml_compute_forward_conv_1d_s2_ph_f32(params, src0, src1, dst);
  10935. } break;
  10936. default:
  10937. {
  10938. GGML_ASSERT(false);
  10939. } break;
  10940. }
  10941. }
  10942. // ggml_compute_forward_conv_2d_sk_p0
  10943. static void ggml_compute_forward_conv_2d_sk_p0_f16_f32(
  10944. const struct ggml_compute_params * params,
  10945. const struct ggml_tensor * src0,
  10946. const struct ggml_tensor * src1,
  10947. struct ggml_tensor * dst) {
  10948. GGML_ASSERT(src0->type == GGML_TYPE_F16);
  10949. GGML_ASSERT(src1->type == GGML_TYPE_F32);
  10950. GGML_ASSERT( dst->type == GGML_TYPE_F32);
  10951. int64_t t0 = ggml_perf_time_us();
  10952. UNUSED(t0);
  10953. const int ne00 = src0->ne[0];
  10954. const int ne01 = src0->ne[1];
  10955. const int ne02 = src0->ne[2];
  10956. //const int ne03 = src0->ne[3];
  10957. const int ne10 = src1->ne[0];
  10958. //const int ne11 = src1->ne[1];
  10959. const int ne12 = src1->ne[2];
  10960. //const int ne13 = src1->ne[3];
  10961. const int ne0 = dst->ne[0];
  10962. const int ne1 = dst->ne[1];
  10963. const int ne2 = dst->ne[2];
  10964. //const int ne3 = dst->ne[3];
  10965. //const int ne = ne0*ne1*ne2*ne3;
  10966. const int nb00 = src0->nb[0];
  10967. //const int nb01 = src0->nb[1];
  10968. //const int nb02 = src0->nb[2];
  10969. const int nb03 = src0->nb[3];
  10970. const int nb10 = src1->nb[0];
  10971. //const int nb11 = src1->nb[1];
  10972. const int nb12 = src1->nb[2];
  10973. //const int nb13 = src1->nb[3];
  10974. //const int nb0 = dst->nb[0];
  10975. //const int nb1 = dst->nb[1];
  10976. const int nb2 = dst->nb[2];
  10977. //const int nb3 = dst->nb[3];
  10978. const int ith = params->ith;
  10979. const int nth = params->nth;
  10980. const int nk0 = ne00;
  10981. const int nk1 = ne01;
  10982. // size of the convolution row - the kernel size unrolled across all channels
  10983. const int ew0 = nk0*nk1*ne02;
  10984. GGML_ASSERT(nb00 == sizeof(ggml_fp16_t));
  10985. GGML_ASSERT(nb10 == sizeof(float));
  10986. if (params->type == GGML_TASK_INIT) {
  10987. // TODO: fix this memset (wsize is overestimated)
  10988. memset(params->wdata, 0, params->wsize);
  10989. // prepare source data (src1)
  10990. {
  10991. ggml_fp16_t * const wdata = (ggml_fp16_t *) params->wdata + 0;
  10992. for (int i12 = 0; i12 < ne12; i12++) {
  10993. const float * const src = (float *)((char *) src1->data + i12*nb12);
  10994. ggml_fp16_t * dst_data = wdata;
  10995. for (int i1 = 0; i1 < ne1; i1++) {
  10996. for (int i0 = 0; i0 < ne0; i0++) {
  10997. for (int ik1 = 0; ik1 < nk1; ik1++) {
  10998. for (int ik0 = 0; ik0 < nk0; ik0++) {
  10999. dst_data[(i1*ne0 + i0)*ew0 + i12*(nk0*nk1) + ik1*nk0 + ik0] =
  11000. GGML_FP32_TO_FP16(src[(i1*nk1 + ik1)*ne10 + (i0*nk0 + ik0)]);
  11001. }
  11002. }
  11003. }
  11004. }
  11005. }
  11006. }
  11007. return;
  11008. }
  11009. if (params->type == GGML_TASK_FINALIZE) {
  11010. return;
  11011. }
  11012. // total patches in dst
  11013. const int np = ne2;
  11014. // patches per thread
  11015. const int dp = (np + nth - 1)/nth;
  11016. // patch range for this thread
  11017. const int ip0 = dp*ith;
  11018. const int ip1 = MIN(ip0 + dp, np);
  11019. ggml_fp16_t * const wdata = (ggml_fp16_t *) params->wdata + 0;
  11020. for (int i2 = ip0; i2 < ip1; i2++) {
  11021. float * dst_data = (float *)((char *) dst->data + i2*nb2);
  11022. for (int i1 = 0; i1 < ne1; ++i1) {
  11023. for (int i0 = 0; i0 < ne0; ++i0) {
  11024. ggml_vec_dot_f16(ew0, dst_data + i1*ne0 + i0,
  11025. (ggml_fp16_t *) ((char *) src0->data + i2*nb03),
  11026. (ggml_fp16_t *) wdata + (i1*ne0 + i0)*ew0);
  11027. }
  11028. }
  11029. }
  11030. }
  11031. static void ggml_compute_forward_conv_2d_sk_p0(
  11032. const struct ggml_compute_params * params,
  11033. const struct ggml_tensor * src0,
  11034. const struct ggml_tensor * src1,
  11035. struct ggml_tensor * dst) {
  11036. switch (src0->type) {
  11037. case GGML_TYPE_F16:
  11038. {
  11039. ggml_compute_forward_conv_2d_sk_p0_f16_f32(params, src0, src1, dst);
  11040. } break;
  11041. case GGML_TYPE_F32:
  11042. {
  11043. //ggml_compute_forward_conv_2d_sk_p0_f32(params, src0, src1, dst);
  11044. GGML_ASSERT(false);
  11045. } break;
  11046. default:
  11047. {
  11048. GGML_ASSERT(false);
  11049. } break;
  11050. }
  11051. }
  11052. // ggml_compute_forward_flash_attn
  11053. static void ggml_compute_forward_flash_attn_f32(
  11054. const struct ggml_compute_params * params,
  11055. const struct ggml_tensor * q,
  11056. const struct ggml_tensor * k,
  11057. const struct ggml_tensor * v,
  11058. const bool masked,
  11059. struct ggml_tensor * dst) {
  11060. int64_t t0 = ggml_perf_time_us();
  11061. UNUSED(t0);
  11062. const int64_t neq0 = q->ne[0];
  11063. const int64_t neq1 = q->ne[1];
  11064. const int64_t neq2 = q->ne[2];
  11065. const int64_t neq3 = q->ne[3];
  11066. const int64_t nek0 = k->ne[0];
  11067. const int64_t nek1 = k->ne[1];
  11068. //const int64_t nek2 = k->ne[2];
  11069. //const int64_t nek3 = k->ne[3];
  11070. //const int64_t nev0 = v->ne[0];
  11071. const int64_t nev1 = v->ne[1];
  11072. //const int64_t nev2 = v->ne[2];
  11073. //const int64_t nev3 = v->ne[3];
  11074. const int64_t ne0 = dst->ne[0];
  11075. const int64_t ne1 = dst->ne[1];
  11076. //const int64_t ne2 = dst->ne[2];
  11077. //const int64_t ne3 = dst->ne[3];
  11078. const int nbk0 = k->nb[0];
  11079. const int nbk1 = k->nb[1];
  11080. const int nbk2 = k->nb[2];
  11081. const int nbk3 = k->nb[3];
  11082. const int nbq0 = q->nb[0];
  11083. const int nbq1 = q->nb[1];
  11084. const int nbq2 = q->nb[2];
  11085. const int nbq3 = q->nb[3];
  11086. const int nbv0 = v->nb[0];
  11087. const int nbv1 = v->nb[1];
  11088. const int nbv2 = v->nb[2];
  11089. const int nbv3 = v->nb[3];
  11090. const int nb0 = dst->nb[0];
  11091. const int nb1 = dst->nb[1];
  11092. const int nb2 = dst->nb[2];
  11093. const int nb3 = dst->nb[3];
  11094. const int ith = params->ith;
  11095. const int nth = params->nth;
  11096. const int64_t D = neq0;
  11097. const int64_t N = neq1;
  11098. const int64_t P = nek1 - N;
  11099. const int64_t M = P + N;
  11100. const int Mup = ggml_up(M, GGML_SOFT_MAX_UNROLL);
  11101. GGML_ASSERT(ne0 == D);
  11102. GGML_ASSERT(ne1 == N);
  11103. GGML_ASSERT(P >= 0);
  11104. GGML_ASSERT(nbq0 == sizeof(float));
  11105. GGML_ASSERT(nbk0 == sizeof(float));
  11106. GGML_ASSERT(nbv0 == sizeof(float));
  11107. GGML_ASSERT(neq0 == D);
  11108. GGML_ASSERT(nek0 == D);
  11109. GGML_ASSERT(nev1 == D);
  11110. GGML_ASSERT(neq1 == N);
  11111. GGML_ASSERT(nek1 == N + P);
  11112. GGML_ASSERT(nev1 == D);
  11113. // dst cannot be transposed or permuted
  11114. GGML_ASSERT(nb0 == sizeof(float));
  11115. GGML_ASSERT(nb0 <= nb1);
  11116. GGML_ASSERT(nb1 <= nb2);
  11117. GGML_ASSERT(nb2 <= nb3);
  11118. if (params->type == GGML_TASK_INIT) {
  11119. return;
  11120. }
  11121. if (params->type == GGML_TASK_FINALIZE) {
  11122. return;
  11123. }
  11124. // parallelize by q rows using ggml_vec_dot_f32
  11125. // total rows in q
  11126. const int nr = neq1*neq2*neq3;
  11127. // rows per thread
  11128. const int dr = (nr + nth - 1)/nth;
  11129. // row range for this thread
  11130. const int ir0 = dr*ith;
  11131. const int ir1 = MIN(ir0 + dr, nr);
  11132. const float scale = 1.0f/sqrtf(D);
  11133. //printf("P=%d N=%d D=%d ir0=%d ir1=%d scale = %f\n", P, N, D, ir0, ir1, scale);
  11134. for (int ir = ir0; ir < ir1; ++ir) {
  11135. // q indices
  11136. const int iq3 = ir/(neq2*neq1);
  11137. const int iq2 = (ir - iq3*neq2*neq1)/neq1;
  11138. const int iq1 = (ir - iq3*neq2*neq1 - iq2*neq1);
  11139. float * S = (float *) params->wdata + ith*(Mup + CACHE_LINE_SIZE_F32);
  11140. for (int i = M; i < Mup; ++i) {
  11141. S[i] = -INFINITY;
  11142. }
  11143. for (int64_t ic = 0; ic < nek1; ++ic) {
  11144. // k indices
  11145. const int ik3 = iq3;
  11146. const int ik2 = iq2;
  11147. const int ik1 = ic;
  11148. // S indices
  11149. const int i1 = ik1;
  11150. ggml_vec_dot_f32(neq0,
  11151. S + i1,
  11152. (float *) ((char *) k->data + (ik1*nbk1 + ik2*nbk2 + ik3*nbk3)),
  11153. (float *) ((char *) q->data + (iq1*nbq1 + iq2*nbq2 + iq3*nbq3)));
  11154. }
  11155. // scale
  11156. ggml_vec_scale_f32(nek1, S, scale);
  11157. if (masked) {
  11158. for (int64_t i = P; i < M; i++) {
  11159. if (i > P + iq1) {
  11160. S[i] = -INFINITY;
  11161. }
  11162. }
  11163. }
  11164. // softmax
  11165. {
  11166. float max = -INFINITY;
  11167. ggml_vec_max_f32(M, &max, S);
  11168. ggml_float sum = 0.0;
  11169. {
  11170. #ifdef GGML_SOFT_MAX_ACCELERATE
  11171. max = -max;
  11172. vDSP_vsadd(S, 1, &max, S, 1, Mup);
  11173. vvexpf(S, S, &Mup);
  11174. ggml_vec_sum_f32(Mup, &sum, S);
  11175. #else
  11176. uint16_t scvt[GGML_SOFT_MAX_UNROLL];
  11177. ggml_float sump[GGML_SOFT_MAX_UNROLL] = { 0.0 };
  11178. for (int i = 0; i < Mup; i += GGML_SOFT_MAX_UNROLL) {
  11179. float * SS = S + i;
  11180. for (int j = 0; j < GGML_SOFT_MAX_UNROLL; ++j) {
  11181. if (SS[j] == -INFINITY) {
  11182. SS[j] = 0.0f;
  11183. } else {
  11184. ggml_fp16_t s = GGML_FP32_TO_FP16(SS[j] - max);
  11185. memcpy(&scvt[j], &s, sizeof(uint16_t));
  11186. const float val = GGML_FP16_TO_FP32(table_exp_f16[scvt[j]]);
  11187. sump[j] += (ggml_float)val;
  11188. SS[j] = val;
  11189. }
  11190. }
  11191. }
  11192. for (int i = 0; i < GGML_SOFT_MAX_UNROLL; i++) {
  11193. sum += sump[i];
  11194. }
  11195. #endif
  11196. }
  11197. assert(sum > 0.0);
  11198. sum = 1.0/sum;
  11199. ggml_vec_scale_f32(M, S, sum);
  11200. #ifndef NDEBUG
  11201. for (int i = 0; i < M; ++i) {
  11202. assert(!isnan(S[i]));
  11203. assert(!isinf(S[i]));
  11204. }
  11205. #endif
  11206. }
  11207. for (int64_t ic = 0; ic < nev1; ++ic) {
  11208. // dst indices
  11209. const int i1 = iq1;
  11210. const int i2 = iq2;
  11211. const int i3 = iq3;
  11212. ggml_vec_dot_f32(nek1,
  11213. (float *) ((char *) dst->data + (ic*nb0 + i1*nb1 + i2*nb2 + i3*nb3)),
  11214. (float *) ((char *) v->data + ( ic*nbv1 + i2*nbv2 + i3*nbv3)),
  11215. S);
  11216. }
  11217. }
  11218. }
  11219. static void ggml_compute_forward_flash_attn_f16(
  11220. const struct ggml_compute_params * params,
  11221. const struct ggml_tensor * q,
  11222. const struct ggml_tensor * k,
  11223. const struct ggml_tensor * v,
  11224. const bool masked,
  11225. struct ggml_tensor * dst) {
  11226. int64_t t0 = ggml_perf_time_us();
  11227. UNUSED(t0);
  11228. const int64_t neq0 = q->ne[0];
  11229. const int64_t neq1 = q->ne[1];
  11230. const int64_t neq2 = q->ne[2];
  11231. const int64_t neq3 = q->ne[3];
  11232. const int64_t nek0 = k->ne[0];
  11233. const int64_t nek1 = k->ne[1];
  11234. //const int64_t nek2 = k->ne[2];
  11235. //const int64_t nek3 = k->ne[3];
  11236. //const int64_t nev0 = v->ne[0];
  11237. const int64_t nev1 = v->ne[1];
  11238. //const int64_t nev2 = v->ne[2];
  11239. //const int64_t nev3 = v->ne[3];
  11240. const int64_t ne0 = dst->ne[0];
  11241. const int64_t ne1 = dst->ne[1];
  11242. //const int64_t ne2 = dst->ne[2];
  11243. //const int64_t ne3 = dst->ne[3];
  11244. const int nbk0 = k->nb[0];
  11245. const int nbk1 = k->nb[1];
  11246. const int nbk2 = k->nb[2];
  11247. const int nbk3 = k->nb[3];
  11248. const int nbq0 = q->nb[0];
  11249. const int nbq1 = q->nb[1];
  11250. const int nbq2 = q->nb[2];
  11251. const int nbq3 = q->nb[3];
  11252. const int nbv0 = v->nb[0];
  11253. const int nbv1 = v->nb[1];
  11254. const int nbv2 = v->nb[2];
  11255. const int nbv3 = v->nb[3];
  11256. const int nb0 = dst->nb[0];
  11257. const int nb1 = dst->nb[1];
  11258. const int nb2 = dst->nb[2];
  11259. const int nb3 = dst->nb[3];
  11260. const int ith = params->ith;
  11261. const int nth = params->nth;
  11262. const int64_t D = neq0;
  11263. const int64_t N = neq1;
  11264. const int64_t P = nek1 - N;
  11265. const int64_t M = P + N;
  11266. const int Mup = ggml_up(M, GGML_SOFT_MAX_UNROLL);
  11267. GGML_ASSERT(ne0 == D);
  11268. GGML_ASSERT(ne1 == N);
  11269. GGML_ASSERT(P >= 0);
  11270. GGML_ASSERT(nbq0 == sizeof(ggml_fp16_t));
  11271. GGML_ASSERT(nbk0 == sizeof(ggml_fp16_t));
  11272. GGML_ASSERT(nbv0 == sizeof(ggml_fp16_t));
  11273. GGML_ASSERT(neq0 == D);
  11274. GGML_ASSERT(nek0 == D);
  11275. GGML_ASSERT(nev1 == D);
  11276. GGML_ASSERT(neq1 == N);
  11277. GGML_ASSERT(nek1 == N + P);
  11278. GGML_ASSERT(nev1 == D);
  11279. // dst cannot be transposed or permuted
  11280. GGML_ASSERT(nb0 == sizeof(float));
  11281. GGML_ASSERT(nb0 <= nb1);
  11282. GGML_ASSERT(nb1 <= nb2);
  11283. GGML_ASSERT(nb2 <= nb3);
  11284. if (params->type == GGML_TASK_INIT) {
  11285. return;
  11286. }
  11287. if (params->type == GGML_TASK_FINALIZE) {
  11288. return;
  11289. }
  11290. // parallelize by q rows using ggml_vec_dot_f32
  11291. // total rows in q
  11292. const int nr = neq1*neq2*neq3;
  11293. // rows per thread
  11294. const int dr = (nr + nth - 1)/nth;
  11295. // row range for this thread
  11296. const int ir0 = dr*ith;
  11297. const int ir1 = MIN(ir0 + dr, nr);
  11298. const float scale = 1.0f/sqrtf(D);
  11299. //printf("P=%d N=%d D=%d ir0=%d ir1=%d scale = %f\n", P, N, D, ir0, ir1, scale);
  11300. for (int ir = ir0; ir < ir1; ++ir) {
  11301. // q indices
  11302. const int iq3 = ir/(neq2*neq1);
  11303. const int iq2 = (ir - iq3*neq2*neq1)/neq1;
  11304. const int iq1 = (ir - iq3*neq2*neq1 - iq2*neq1);
  11305. float * S = (float *) params->wdata + ith*(2*Mup + CACHE_LINE_SIZE_F32);
  11306. for (int i = M; i < Mup; ++i) {
  11307. S[i] = -INFINITY;
  11308. }
  11309. if (GGML_VEC_DOT_UNROLL > 2 || nek1 % GGML_VEC_DOT_UNROLL != 0) {
  11310. for (int64_t ic = 0; ic < nek1; ++ic) {
  11311. // k indices
  11312. const int ik3 = iq3;
  11313. const int ik2 = iq2;
  11314. const int ik1 = ic;
  11315. // S indices
  11316. const int i1 = ik1;
  11317. ggml_vec_dot_f16(neq0,
  11318. S + i1,
  11319. (ggml_fp16_t *) ((char *) k->data + (ik1*nbk1 + ik2*nbk2 + ik3*nbk3)),
  11320. (ggml_fp16_t *) ((char *) q->data + (iq1*nbq1 + iq2*nbq2 + iq3*nbq3)));
  11321. }
  11322. } else {
  11323. for (int64_t ic = 0; ic < nek1; ic += GGML_VEC_DOT_UNROLL) {
  11324. // k indices
  11325. const int ik3 = iq3;
  11326. const int ik2 = iq2;
  11327. const int ik1 = ic;
  11328. // S indices
  11329. const int i1 = ik1;
  11330. ggml_vec_dot_f16_unroll(neq0, nbk1,
  11331. S + i1,
  11332. ((char *) k->data + (ik1*nbk1 + ik2*nbk2 + ik3*nbk3)),
  11333. (ggml_fp16_t *) ((char *) q->data + (iq1*nbq1 + iq2*nbq2 + iq3*nbq3)));
  11334. }
  11335. }
  11336. // scale
  11337. ggml_vec_scale_f32(nek1, S, scale);
  11338. if (masked) {
  11339. for (int64_t i = P; i < M; i++) {
  11340. if (i > P + iq1) {
  11341. S[i] = -INFINITY;
  11342. }
  11343. }
  11344. }
  11345. // softmax
  11346. {
  11347. float max = -INFINITY;
  11348. ggml_vec_max_f32(M, &max, S);
  11349. ggml_float sum = 0.0;
  11350. {
  11351. #ifdef GGML_SOFT_MAX_ACCELERATE
  11352. max = -max;
  11353. vDSP_vsadd(S, 1, &max, S, 1, Mup);
  11354. vvexpf(S, S, &Mup);
  11355. ggml_vec_sum_f32(Mup, &sum, S);
  11356. #else
  11357. uint16_t scvt[GGML_SOFT_MAX_UNROLL];
  11358. ggml_float sump[GGML_SOFT_MAX_UNROLL] = { 0.0 };
  11359. for (int i = 0; i < Mup; i += GGML_SOFT_MAX_UNROLL) {
  11360. float * SS = S + i;
  11361. for (int j = 0; j < GGML_SOFT_MAX_UNROLL; ++j) {
  11362. if (SS[j] == -INFINITY) {
  11363. SS[j] = 0.0f;
  11364. } else {
  11365. ggml_fp16_t s = GGML_FP32_TO_FP16(SS[j] - max);
  11366. memcpy(&scvt[j], &s, sizeof(uint16_t));
  11367. const float val = GGML_FP16_TO_FP32(table_exp_f16[scvt[j]]);
  11368. sump[j] += (ggml_float)val;
  11369. SS[j] = val;
  11370. }
  11371. }
  11372. }
  11373. for (int i = 0; i < GGML_SOFT_MAX_UNROLL; i++) {
  11374. sum += sump[i];
  11375. }
  11376. #endif
  11377. }
  11378. assert(sum > 0.0);
  11379. sum = 1.0/sum;
  11380. ggml_vec_scale_f32(M, S, sum);
  11381. #ifndef NDEBUG
  11382. for (int i = 0; i < M; ++i) {
  11383. assert(!isnan(S[i]));
  11384. assert(!isinf(S[i]));
  11385. }
  11386. #endif
  11387. }
  11388. ggml_fp16_t * S16 = (ggml_fp16_t *) ((float *) params->wdata + ith*(2*Mup + CACHE_LINE_SIZE_F32) + Mup);
  11389. for (int64_t i = 0; i < M; i++) {
  11390. S16[i] = GGML_FP32_TO_FP16(S[i]);
  11391. }
  11392. if (GGML_VEC_DOT_UNROLL == 1 || (nev1 % GGML_VEC_DOT_UNROLL != 0)) {
  11393. for (int64_t ic = 0; ic < nev1; ++ic) {
  11394. // dst indices
  11395. const int i1 = iq1;
  11396. const int i2 = iq2;
  11397. const int i3 = iq3;
  11398. ggml_vec_dot_f16(nek1,
  11399. (float *) ((char *) dst->data + (ic*nb0 + i1*nb1 + i2*nb2 + i3*nb3)),
  11400. (ggml_fp16_t *) ((char *) v->data + ( ic*nbv1 + i2*nbv2 + i3*nbv3)),
  11401. S16);
  11402. }
  11403. } else {
  11404. for (int64_t ic = 0; ic < nev1; ic += GGML_VEC_DOT_UNROLL) {
  11405. // dst indices
  11406. const int i1 = iq1;
  11407. const int i2 = iq2;
  11408. const int i3 = iq3;
  11409. ggml_vec_dot_f16_unroll(nek1, nbv1,
  11410. (float *) ((char *) dst->data + (ic*nb0 + i1*nb1 + i2*nb2 + i3*nb3)),
  11411. ((char *) v->data + ( ic*nbv1 + i2*nbv2 + i3*nbv3)),
  11412. S16);
  11413. }
  11414. }
  11415. }
  11416. }
  11417. static void ggml_compute_forward_flash_attn(
  11418. const struct ggml_compute_params * params,
  11419. const struct ggml_tensor * q,
  11420. const struct ggml_tensor * k,
  11421. const struct ggml_tensor * v,
  11422. const bool masked,
  11423. struct ggml_tensor * dst) {
  11424. switch (q->type) {
  11425. case GGML_TYPE_F16:
  11426. {
  11427. ggml_compute_forward_flash_attn_f16(params, q, k, v, masked, dst);
  11428. } break;
  11429. case GGML_TYPE_F32:
  11430. {
  11431. ggml_compute_forward_flash_attn_f32(params, q, k, v, masked, dst);
  11432. } break;
  11433. default:
  11434. {
  11435. GGML_ASSERT(false);
  11436. } break;
  11437. }
  11438. }
  11439. // ggml_compute_forward_flash_ff
  11440. static void ggml_compute_forward_flash_ff_f16(
  11441. const struct ggml_compute_params * params,
  11442. const struct ggml_tensor * a, // F16
  11443. const struct ggml_tensor * b0, // F16 fc_w
  11444. const struct ggml_tensor * b1, // F32 fc_b
  11445. const struct ggml_tensor * c0, // F16 proj_w
  11446. const struct ggml_tensor * c1, // F32 proj_b
  11447. struct ggml_tensor * dst) {
  11448. int64_t t0 = ggml_perf_time_us();
  11449. UNUSED(t0);
  11450. const int64_t nea0 = a->ne[0];
  11451. const int64_t nea1 = a->ne[1];
  11452. const int64_t nea2 = a->ne[2];
  11453. const int64_t nea3 = a->ne[3];
  11454. const int64_t neb00 = b0->ne[0];
  11455. const int64_t neb01 = b0->ne[1];
  11456. //const int64_t neb02 = b0->ne[2];
  11457. //const int64_t neb03 = b0->ne[3];
  11458. const int64_t neb10 = b1->ne[0];
  11459. const int64_t neb11 = b1->ne[1];
  11460. //const int64_t neb12 = b1->ne[2];
  11461. //const int64_t neb13 = b1->ne[3];
  11462. const int64_t nec00 = c0->ne[0];
  11463. const int64_t nec01 = c0->ne[1];
  11464. //const int64_t nec02 = c0->ne[2];
  11465. //const int64_t nec03 = c0->ne[3];
  11466. const int64_t nec10 = c1->ne[0];
  11467. const int64_t nec11 = c1->ne[1];
  11468. //const int64_t nec12 = c1->ne[2];
  11469. //const int64_t nec13 = c1->ne[3];
  11470. const int64_t ne0 = dst->ne[0];
  11471. const int64_t ne1 = dst->ne[1];
  11472. const int64_t ne2 = dst->ne[2];
  11473. //const int64_t ne3 = dst->ne[3];
  11474. const int nba0 = a->nb[0];
  11475. const int nba1 = a->nb[1];
  11476. const int nba2 = a->nb[2];
  11477. const int nba3 = a->nb[3];
  11478. const int nbb00 = b0->nb[0];
  11479. const int nbb01 = b0->nb[1];
  11480. const int nbb02 = b0->nb[2];
  11481. const int nbb03 = b0->nb[3];
  11482. const int nbb10 = b1->nb[0];
  11483. //const int nbb11 = b1->nb[1];
  11484. //const int nbb12 = b1->nb[2];
  11485. //const int nbb13 = b1->nb[3];
  11486. const int nbc00 = c0->nb[0];
  11487. const int nbc01 = c0->nb[1];
  11488. const int nbc02 = c0->nb[2];
  11489. const int nbc03 = c0->nb[3];
  11490. const int nbc10 = c1->nb[0];
  11491. //const int nbc11 = c1->nb[1];
  11492. //const int nbc12 = c1->nb[2];
  11493. //const int nbc13 = c1->nb[3];
  11494. const int nb0 = dst->nb[0];
  11495. const int nb1 = dst->nb[1];
  11496. const int nb2 = dst->nb[2];
  11497. const int nb3 = dst->nb[3];
  11498. const int ith = params->ith;
  11499. const int nth = params->nth;
  11500. const int64_t D = nea0;
  11501. //const int64_t N = nea1;
  11502. const int64_t M = neb01;
  11503. GGML_ASSERT(ne0 == nea0);
  11504. GGML_ASSERT(ne1 == nea1);
  11505. GGML_ASSERT(ne2 == nea2);
  11506. GGML_ASSERT(nba0 == sizeof(ggml_fp16_t));
  11507. GGML_ASSERT(nbb00 == sizeof(ggml_fp16_t));
  11508. GGML_ASSERT(nbb10 == sizeof(float));
  11509. GGML_ASSERT(nbc00 == sizeof(ggml_fp16_t));
  11510. GGML_ASSERT(nbc10 == sizeof(float));
  11511. GGML_ASSERT(neb00 == D);
  11512. GGML_ASSERT(neb01 == M);
  11513. GGML_ASSERT(neb10 == M);
  11514. GGML_ASSERT(neb11 == 1);
  11515. GGML_ASSERT(nec00 == M);
  11516. GGML_ASSERT(nec01 == D);
  11517. GGML_ASSERT(nec10 == D);
  11518. GGML_ASSERT(nec11 == 1);
  11519. // dst cannot be transposed or permuted
  11520. GGML_ASSERT(nb0 == sizeof(float));
  11521. GGML_ASSERT(nb0 <= nb1);
  11522. GGML_ASSERT(nb1 <= nb2);
  11523. GGML_ASSERT(nb2 <= nb3);
  11524. if (params->type == GGML_TASK_INIT) {
  11525. return;
  11526. }
  11527. if (params->type == GGML_TASK_FINALIZE) {
  11528. return;
  11529. }
  11530. // parallelize by a rows using ggml_vec_dot_f32
  11531. // total rows in a
  11532. const int nr = nea1*nea2*nea3;
  11533. // rows per thread
  11534. const int dr = (nr + nth - 1)/nth;
  11535. // row range for this thread
  11536. const int ir0 = dr*ith;
  11537. const int ir1 = MIN(ir0 + dr, nr);
  11538. for (int ir = ir0; ir < ir1; ++ir) {
  11539. // a indices
  11540. const int ia3 = ir/(nea2*nea1);
  11541. const int ia2 = (ir - ia3*nea2*nea1)/nea1;
  11542. const int ia1 = (ir - ia3*nea2*nea1 - ia2*nea1);
  11543. float * S = (float *) params->wdata + ith*(2*M + CACHE_LINE_SIZE_F32);
  11544. for (int64_t ic = 0; ic < neb01; ++ic) {
  11545. // b0 indices
  11546. const int ib03 = ia3;
  11547. const int ib02 = ia2;
  11548. const int ib01 = ic;
  11549. // S indices
  11550. const int i1 = ib01;
  11551. ggml_vec_dot_f16(nea0,
  11552. S + i1,
  11553. (ggml_fp16_t *) ((char *) b0->data + (ib01*nbb01 + ib02*nbb02 + ib03*nbb03)),
  11554. (ggml_fp16_t *) ((char *) a->data + ( ia1*nba1 + ia2*nba2 + ia3*nba3)));
  11555. }
  11556. ggml_vec_add_f32(neb01, S, S, (float *) b1->data);
  11557. //ggml_vec_gelu_f32(neb01, S, S);
  11558. ggml_fp16_t * S16 = (ggml_fp16_t *) ((float *) params->wdata + ith*(2*M + CACHE_LINE_SIZE_F32) + M);
  11559. for (int64_t i = 0; i < M; i++) {
  11560. S16[i] = GGML_FP32_TO_FP16(S[i]);
  11561. }
  11562. ggml_vec_gelu_f16(neb01, S16, S16);
  11563. {
  11564. // dst indices
  11565. const int i1 = ia1;
  11566. const int i2 = ia2;
  11567. const int i3 = ia3;
  11568. for (int64_t ic = 0; ic < nec01; ++ic) {
  11569. ggml_vec_dot_f16(neb01,
  11570. (float *) ((char *) dst->data + (ic*nb0 + i1*nb1 + i2*nb2 + i3*nb3)),
  11571. (ggml_fp16_t *) ((char *) c0->data + ( ic*nbc01 + i2*nbc02 + i3*nbc03)),
  11572. S16);
  11573. }
  11574. ggml_vec_add_f32(nec01,
  11575. (float *) ((char *) dst->data + (i1*nb1 + i2*nb2 + i3*nb3)),
  11576. (float *) ((char *) dst->data + (i1*nb1 + i2*nb2 + i3*nb3)),
  11577. (float *) c1->data);
  11578. }
  11579. }
  11580. }
  11581. static void ggml_compute_forward_flash_ff(
  11582. const struct ggml_compute_params * params,
  11583. const struct ggml_tensor * a,
  11584. const struct ggml_tensor * b0,
  11585. const struct ggml_tensor * b1,
  11586. const struct ggml_tensor * c0,
  11587. const struct ggml_tensor * c1,
  11588. struct ggml_tensor * dst) {
  11589. switch (b0->type) {
  11590. case GGML_TYPE_F16:
  11591. {
  11592. ggml_compute_forward_flash_ff_f16(params, a, b0, b1, c0, c1, dst);
  11593. } break;
  11594. case GGML_TYPE_F32:
  11595. {
  11596. GGML_ASSERT(false); // TODO
  11597. } break;
  11598. default:
  11599. {
  11600. GGML_ASSERT(false);
  11601. } break;
  11602. }
  11603. }
  11604. // ggml_compute_forward_flash_attn_back
  11605. static void ggml_compute_forward_flash_attn_back_f32(
  11606. const struct ggml_compute_params * params,
  11607. const struct ggml_tensor * q,
  11608. const struct ggml_tensor * k,
  11609. const struct ggml_tensor * v,
  11610. const struct ggml_tensor * d,
  11611. const bool masked,
  11612. struct ggml_tensor * dst) {
  11613. int64_t t0 = ggml_perf_time_us();
  11614. UNUSED(t0);
  11615. const int64_t neq0 = q->ne[0];
  11616. const int64_t neq1 = q->ne[1];
  11617. const int64_t neq2 = q->ne[2];
  11618. const int64_t neq3 = q->ne[3];
  11619. const int64_t nek0 = k->ne[0];
  11620. const int64_t nek1 = k->ne[1];
  11621. //const int64_t nek2 = k->ne[2];
  11622. //const int64_t nek3 = k->ne[3];
  11623. const int64_t nev0 = v->ne[0];
  11624. const int64_t nev1 = v->ne[1];
  11625. //const int64_t nev2 = v->ne[2];
  11626. //const int64_t nev3 = v->ne[3];
  11627. const int64_t ned0 = d->ne[0];
  11628. const int64_t ned1 = d->ne[1];
  11629. //const int64_t ned2 = d->ne[2];
  11630. //const int64_t ned3 = d->ne[3];
  11631. const int64_t ne0 = dst->ne[0];
  11632. const int64_t ne1 = dst->ne[1];
  11633. const int64_t ne2 = dst->ne[2];
  11634. const int64_t ne3 = dst->ne[3];
  11635. const int nbk0 = k->nb[0];
  11636. const int nbk1 = k->nb[1];
  11637. const int nbk2 = k->nb[2];
  11638. const int nbk3 = k->nb[3];
  11639. const int nbq0 = q->nb[0];
  11640. const int nbq1 = q->nb[1];
  11641. const int nbq2 = q->nb[2];
  11642. const int nbq3 = q->nb[3];
  11643. const int nbv0 = v->nb[0];
  11644. const int nbv1 = v->nb[1];
  11645. const int nbv2 = v->nb[2];
  11646. const int nbv3 = v->nb[3];
  11647. const int nbd0 = d->nb[0];
  11648. const int nbd1 = d->nb[1];
  11649. const int nbd2 = d->nb[2];
  11650. const int nbd3 = d->nb[3];
  11651. const int nb0 = dst->nb[0];
  11652. const int nb1 = dst->nb[1];
  11653. const int nb2 = dst->nb[2];
  11654. const int nb3 = dst->nb[3];
  11655. const int ith = params->ith;
  11656. const int nth = params->nth;
  11657. const int64_t D = neq0;
  11658. const int64_t N = neq1;
  11659. const int64_t P = nek1 - N;
  11660. const int64_t M = P + N;
  11661. const int Mup = ggml_up(M, GGML_SOFT_MAX_UNROLL);
  11662. const int mxDM = MAX(D, Mup);
  11663. // GGML_ASSERT(ne0 == D);
  11664. // GGML_ASSERT(ne1 == N);
  11665. GGML_ASSERT(P >= 0);
  11666. GGML_ASSERT(nbq0 == sizeof(float));
  11667. GGML_ASSERT(nbk0 == sizeof(float));
  11668. GGML_ASSERT(nbv0 == sizeof(float));
  11669. GGML_ASSERT(neq0 == D);
  11670. GGML_ASSERT(nek0 == D);
  11671. GGML_ASSERT(nev1 == D);
  11672. GGML_ASSERT(ned0 == D);
  11673. GGML_ASSERT(neq1 == N);
  11674. GGML_ASSERT(nek1 == N + P);
  11675. GGML_ASSERT(nev1 == D);
  11676. GGML_ASSERT(ned1 == N);
  11677. // dst cannot be transposed or permuted
  11678. GGML_ASSERT(nb0 == sizeof(float));
  11679. GGML_ASSERT(nb0 <= nb1);
  11680. GGML_ASSERT(nb1 <= nb2);
  11681. GGML_ASSERT(nb2 <= nb3);
  11682. if (params->type == GGML_TASK_INIT) {
  11683. if (ith == 0) {
  11684. memset(dst->data, 0, nb0*ne0*ne1*ne2*ne3);
  11685. }
  11686. return;
  11687. }
  11688. if (params->type == GGML_TASK_FINALIZE) {
  11689. return;
  11690. }
  11691. // parallelize by q rows using ggml_vec_dot_f32
  11692. // total rows in q
  11693. const int nr = neq2*neq3;
  11694. // rows per thread
  11695. const int dr = (nr + nth - 1)/nth;
  11696. // row range for this thread
  11697. const int ir0 = dr*ith;
  11698. const int ir1 = MIN(ir0 + dr, nr);
  11699. const float scale = 1.0f/sqrtf(D);
  11700. //printf("P=%d N=%d D=%d ir0=%d ir1=%d scale = %f\n", P, N, D, ir0, ir1, scale);
  11701. for (int ir = ir0; ir < ir1; ++ir) {
  11702. // q indices
  11703. const int iq3 = ir/(neq2);
  11704. const int iq2 = ir - iq3*neq2;
  11705. for ( int iq1 = 0; iq1 < neq1; ++iq1) {
  11706. // not sure about CACHE_LINE_SIZE_F32..
  11707. // - maybe it must not be multiplied by 2 and excluded from .. in SM 1*(..) offset?
  11708. float * S = (float *) params->wdata + ith*2*(mxDM + CACHE_LINE_SIZE_F32) + 0*(mxDM+CACHE_LINE_SIZE_F32);
  11709. float * SM = (float *) params->wdata + ith*2*(mxDM + CACHE_LINE_SIZE_F32) + 1*(mxDM+CACHE_LINE_SIZE_F32);
  11710. for (int i = M; i < Mup; ++i) {
  11711. S[i] = -INFINITY;
  11712. }
  11713. for (int64_t ic = 0; ic < nek1; ++ic) {
  11714. // k indices
  11715. const int ik3 = iq3;
  11716. const int ik2 = iq2;
  11717. const int ik1 = ic;
  11718. // S indices
  11719. const int i1 = ik1;
  11720. ggml_vec_dot_f32(neq0,
  11721. S + i1,
  11722. (float *) ((char *) k->data + (ik1*nbk1 + ik2*nbk2 + ik3*nbk3)),
  11723. (float *) ((char *) q->data + (iq1*nbq1 + iq2*nbq2 + iq3*nbq3)));
  11724. }
  11725. // scale
  11726. ggml_vec_scale_f32(nek1, S, scale);
  11727. if (masked) {
  11728. for (int64_t i = P; i < M; i++) {
  11729. if (i > P + iq1) {
  11730. S[i] = -INFINITY;
  11731. }
  11732. }
  11733. }
  11734. // softmax
  11735. {
  11736. float max = -INFINITY;
  11737. ggml_vec_max_f32(M, &max, S);
  11738. ggml_float sum = 0.0;
  11739. {
  11740. #ifdef GGML_SOFT_MAX_ACCELERATE
  11741. max = -max;
  11742. vDSP_vsadd(SM, 1, &max, SM, 1, Mup);
  11743. vvexpf(SM, SM, &Mup);
  11744. ggml_vec_sum_f32(Mup, &sum, SM);
  11745. #else
  11746. uint16_t scvt[GGML_SOFT_MAX_UNROLL];
  11747. ggml_float sump[GGML_SOFT_MAX_UNROLL] = { 0.0 };
  11748. for (int i = 0; i < Mup; i += GGML_SOFT_MAX_UNROLL) {
  11749. float * SR = S + i;
  11750. float * SW = SM + i;
  11751. for (int j = 0; j < GGML_SOFT_MAX_UNROLL; ++j) {
  11752. if (SR[j] == -INFINITY) {
  11753. SW[j] = 0.0f;
  11754. } else {
  11755. ggml_fp16_t s = GGML_FP32_TO_FP16(SR[j] - max);
  11756. memcpy(&scvt[j], &s, sizeof(uint16_t));
  11757. const float val = GGML_FP16_TO_FP32(table_exp_f16[scvt[j]]);
  11758. sump[j] += (ggml_float)val;
  11759. SW[j] = val;
  11760. }
  11761. }
  11762. }
  11763. for (int i = 0; i < GGML_SOFT_MAX_UNROLL; i++) {
  11764. sum += sump[i];
  11765. }
  11766. #endif
  11767. }
  11768. assert(sum > 0.0);
  11769. sum = 1.0/sum;
  11770. ggml_vec_scale_f32(M, SM, sum);
  11771. }
  11772. // step-by-step explanation
  11773. {
  11774. // forward-process shape grads from backward process
  11775. // parallel_for iq2,iq3:
  11776. // k[:D,:M,:,:] [D,M,:,:] grad[k][:D,:M,iq2,iq3] += grad[kcur]
  11777. // q[:D,:N,:,:] [D,N,:,:] grad[q][:D,iq1,iq2,iq3] += grad[qcur]
  11778. // v[:M,:D,:,:] [M,D,:,:] grad[v][:M,:D,iq2,iq3] += grad[vcur]
  11779. // for iq1:
  11780. // kcur = k[:D,:M,iq2,iq3] [D,M,1,1] grad[kcur] = grad[S1].T @ qcur
  11781. // qcur = q[:D,iq1,iq2,iq3] [D,1,1,1] grad[qcur] = grad[S1] @ kcur
  11782. // vcur = v[:M,:D,iq2,iq3] [M,D,1,1] grad[vcur] = grad[S5].T @ S4
  11783. // S0 = -Inf [D,1,1,1]
  11784. // ~S1[i] = dot(kcur[:D,i], qcur)
  11785. // S1 = qcur @ kcur.T [M,1,1,1] grad[S1] = grad[S2] * scale
  11786. // S2 = S1 * scale [M,1,1,1] grad[S2] = diag_mask_zero(grad[S3], P)
  11787. // S3 = diag_mask_inf(S2, P) [M,1,1,1] grad[S3] = S4 * (grad[S4] - dot(S4, grad[S4]))
  11788. // S4 = softmax(S3) [M,1,1,1] grad[S4] = grad[S5] @ vcur
  11789. // ~S5[i] = dot(vcur[:,i], S4)
  11790. // S5 = S4 @ vcur.T [D,1,1,1] grad[S5] = d[:D,iq1,iq2,iq3]
  11791. // ~dst[i,iq1,iq2,iq3] = S5[i] ^
  11792. // dst[:D,iq1,iq2,iq3] = S5 | grad[dst[:D,iq1,iq2,iq3]] = d[:D,iq1,iq2,iq3]
  11793. // dst backward-/ grad[dst] = d
  11794. //
  11795. // output gradients with their dependencies:
  11796. //
  11797. // grad[kcur] = grad[S1].T @ qcur
  11798. // grad[S1] = diag_mask_zero(grad[S3], P) * scale
  11799. // grad[S3] = S4 * (grad[S4] - dot(S4, grad[S4]))
  11800. // grad[S4] = grad[S5] @ vcur
  11801. // grad[S4] = d[:D,iq1,iq2,iq3] @ vcur
  11802. // grad[qcur] = grad[S1] @ kcur
  11803. // grad[vcur] = grad[S5].T @ S4
  11804. // grad[vcur] = d[:D,iq1,iq2,iq3].T @ S4
  11805. //
  11806. // in post-order:
  11807. //
  11808. // S1 = qcur @ kcur.T
  11809. // S2 = S1 * scale
  11810. // S3 = diag_mask_inf(S2, P)
  11811. // S4 = softmax(S3)
  11812. // grad[S4] = d[:D,iq1,iq2,iq3] @ vcur
  11813. // grad[S3] = S4 * (grad[S4] - dot(S4, grad[S4]))
  11814. // grad[S1] = diag_mask_zero(grad[S3], P) * scale
  11815. // grad[qcur] = grad[S1] @ kcur
  11816. // grad[kcur] = grad[S1].T @ qcur
  11817. // grad[vcur] = d[:D,iq1,iq2,iq3].T @ S4
  11818. //
  11819. // using less variables (SM=S4):
  11820. //
  11821. // S = diag_mask_inf(qcur @ kcur.T * scale, P)
  11822. // SM = softmax(S)
  11823. // S = d[:D,iq1,iq2,iq3] @ vcur
  11824. // dot_SM_gradSM = dot(SM, S)
  11825. // S = SM * (S - dot(SM, S))
  11826. // S = diag_mask_zero(S, P) * scale
  11827. //
  11828. // grad[q][:D,iq1,iq2,iq3] += S @ kcur
  11829. // grad[k][:D,:M,iq2,iq3] += S.T @ qcur
  11830. // grad[v][:M,:D,iq2,iq3] += d[:D,iq1,iq2,iq3].T @ SM
  11831. }
  11832. // S = gradSM = d[:D,iq1,iq2,iq3] @ vcur
  11833. // S = d[:D,iq1,iq2,iq3] @ vcur
  11834. // S[:M] += vcur[:M,ic] * d[ic,iq1,iq2,iq3]
  11835. ggml_vec_set_f32(M, S, 0);
  11836. for (int64_t ic = 0; ic < D; ++ic) {
  11837. // dst indices
  11838. const int i1 = iq1;
  11839. const int i2 = iq2;
  11840. const int i3 = iq3;
  11841. ggml_vec_mad_f32(M,
  11842. S,
  11843. (float *) ((char *) v->data + ( ic*nbv1 + i2*nbv2 + i3*nbv3)),
  11844. *(float *) ((char *) d->data + (ic*nbd0 + i1*nbd1 + i2*nbd2 + i3*nbd3)));
  11845. }
  11846. // S = SM * (S - dot(SM, S))
  11847. float dot_SM_gradSM = 0;
  11848. ggml_vec_dot_f32 (M, &dot_SM_gradSM, SM, S);
  11849. ggml_vec_acc1_f32(M, S, -dot_SM_gradSM);
  11850. ggml_vec_mul_f32 (M, S, S, SM);
  11851. // S = diag_mask_zero(S, P) * scale
  11852. if (masked) {
  11853. // for (int64_t i = P + iq1 + 1; i < M; i++) {
  11854. // S[i] = 0;
  11855. // }
  11856. for (int64_t i = P; i < M; i++) {
  11857. if (i > P + iq1) {
  11858. S[i] = 0;
  11859. }
  11860. }
  11861. }
  11862. ggml_vec_scale_f32(M, S, scale);
  11863. void * grad_q = (char *) dst->data;
  11864. void * grad_k = (char *) dst->data + nb0*D*N*neq2*neq3;
  11865. void * grad_v = (char *) dst->data + nb0*D*N*neq2*neq3 + nb0*D*M*neq2*neq3;
  11866. const size_t nbgq1 = nb0*neq0;
  11867. const size_t nbgq2 = nb0*neq0*neq1;
  11868. const size_t nbgq3 = nb0*neq0*neq1*neq2;
  11869. const size_t nbgk1 = nb0*nek0;
  11870. const size_t nbgk2 = nb0*nek0*nek1;
  11871. const size_t nbgk3 = nb0*nek0*nek1*neq2;
  11872. const size_t nbgv1 = nb0*nev0;
  11873. const size_t nbgv2 = nb0*nev0*nev1;
  11874. const size_t nbgv3 = nb0*nev0*nev1*neq2;
  11875. // S shape [M,1]
  11876. // SM shape [M,1]
  11877. // kcur shape [D,M]
  11878. // qcur shape [D,1]
  11879. // vcur shape [M,D]
  11880. //
  11881. // grad[q][:D,iq1,iq2,iq3] += S @ kcur
  11882. // grad[q][:D,iq1,iq2,iq3] += shape[M,1] @ shape[D,M]
  11883. // grad[q][:D,iq1,iq2,iq3] += S[ic] * kcur[:D,ic]
  11884. //
  11885. //// grad[q][ic,iq1,iq2,iq3] += dot(kcur[:,ic],S.T)
  11886. //// grad[q][ic,iq1,iq2,iq3] += dot(k[:D,ic,iq2,iq3],S.T)
  11887. for (int64_t ic = 0; ic < M; ++ic) {
  11888. // dst indices
  11889. const int i1 = iq1;
  11890. const int i2 = iq2;
  11891. const int i3 = iq3;
  11892. ggml_vec_mad_f32(D,
  11893. (float *) ((char *) grad_q + (i1*nbgq1 + i2*nbgq2 + i3*nbgq3)),
  11894. (float *) ((char *) k->data + (ic*nbk1 + i2*nbk2 + i3*nbk3)),
  11895. S[ic]);
  11896. }
  11897. // grad[k][:D,:M,iq2,iq3] += S.T @ qcur
  11898. // grad[k][:D,ic,iq2,iq3] += S.T[0,ic] * qcur[:D,0]
  11899. // grad[k][:D,ic,iq2,iq3] += S[ic] * qcur[:D,0]
  11900. for (int64_t ic = 0; ic < M; ++ic) {
  11901. // dst indices
  11902. const int i1 = iq1;
  11903. const int i2 = iq2;
  11904. const int i3 = iq3;
  11905. // ggml_vec_set_f32(D,
  11906. // (float *) ((char *) grad_k + (ic*nbgk1 + i2*nbgk2 + i3*nbgk3)),
  11907. // 0);
  11908. ggml_vec_mad_f32(D,
  11909. (float *) ((char *) grad_k + (ic*nbgk1 + i2*nbgk2 + i3*nbgk3)),
  11910. (float *) ((char *) q->data + (i1*nbq1 + i2*nbq2 + i3*nbq3)),
  11911. S[ic]);
  11912. }
  11913. // grad[v][:M,:D,iq2,iq3] += d[:D,iq1,iq2,iq3].T @ SM
  11914. // grad[v][:M,ic,iq2,iq3] += d[:D,iq1,iq2,iq3].T[0,ic] * SM[:M]
  11915. // grad[v][:M,ic,iq2,iq3] += d[ic,iq1,iq2,iq3] * SM[:M]
  11916. for (int64_t ic = 0; ic < D; ++ic) {
  11917. // dst indices
  11918. const int i1 = iq1;
  11919. const int i2 = iq2;
  11920. const int i3 = iq3;
  11921. // ggml_vec_set_f32(M,
  11922. // (float *) ((char *) grad_v + ( ic*nbgv1 + i2*nbgv2 + i3*nbgv3)),
  11923. // 0);
  11924. ggml_vec_mad_f32(M,
  11925. (float *) ((char *) grad_v + ( ic*nbgv1 + i2*nbgv2 + i3*nbgv3)),
  11926. SM,
  11927. *(float *) ((char *) d->data + (ic*nbd0 + i1*nbd1 + i2*nbd2 + i3*nbd3)));
  11928. }
  11929. }
  11930. }
  11931. }
  11932. static void ggml_compute_forward_flash_attn_back(
  11933. const struct ggml_compute_params * params,
  11934. const struct ggml_tensor * q,
  11935. const struct ggml_tensor * k,
  11936. const struct ggml_tensor * v,
  11937. const struct ggml_tensor * d,
  11938. const bool masked,
  11939. struct ggml_tensor * dst) {
  11940. switch (q->type) {
  11941. case GGML_TYPE_F32:
  11942. {
  11943. ggml_compute_forward_flash_attn_back_f32(params, q, k, v, d, masked, dst);
  11944. } break;
  11945. default:
  11946. {
  11947. GGML_ASSERT(false);
  11948. } break;
  11949. }
  11950. }
  11951. // ggml_compute_forward_win_part
  11952. static void ggml_compute_forward_win_part_f32(
  11953. const struct ggml_compute_params * params,
  11954. const struct ggml_tensor * src0,
  11955. const struct ggml_tensor * opt0,
  11956. struct ggml_tensor * dst) {
  11957. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  11958. return;
  11959. }
  11960. const int64_t ne00 = src0->ne[0]; UNUSED(ne00);
  11961. const int64_t ne01 = src0->ne[1];
  11962. const int64_t ne02 = src0->ne[2];
  11963. const int64_t ne03 = src0->ne[3]; UNUSED(ne03);
  11964. const int64_t ne0 = dst->ne[0];
  11965. const int64_t ne1 = dst->ne[1];
  11966. const int64_t ne2 = dst->ne[2];
  11967. const int64_t ne3 = dst->ne[3]; UNUSED(ne3);
  11968. const int32_t nep0 = ((const int32_t *)(opt0->data))[0];
  11969. const int32_t nep1 = ((const int32_t *)(opt0->data))[1];
  11970. const int32_t w = ((const int32_t *)(opt0->data))[2];
  11971. assert(ne00 == ne0);
  11972. assert(ne3 == nep0*nep1);
  11973. // TODO: optimize / multi-thread
  11974. for (int py = 0; py < nep1; ++py) {
  11975. for (int px = 0; px < nep0; ++px) {
  11976. const int64_t i3 = py*nep0 + px;
  11977. for (int64_t i2 = 0; i2 < ne2; ++i2) {
  11978. for (int64_t i1 = 0; i1 < ne1; ++i1) {
  11979. for (int64_t i0 = 0; i0 < ne0; ++i0) {
  11980. const int64_t i02 = py*w + i2;
  11981. const int64_t i01 = px*w + i1;
  11982. const int64_t i00 = i0;
  11983. const int64_t i = i3*ne2*ne1*ne0 + i2*ne1*ne0 + i1*ne0 + i0;
  11984. const int64_t j = i02*ne01*ne00 + i01*ne00 + i00;
  11985. if (py*w + i2 >= ne02 || px*w + i1 >= ne01) {
  11986. ((float *) dst->data)[i] = 0.0f;
  11987. } else {
  11988. ((float *) dst->data)[i] = ((float *) src0->data)[j];
  11989. }
  11990. }
  11991. }
  11992. }
  11993. }
  11994. }
  11995. }
  11996. static void ggml_compute_forward_win_part(
  11997. const struct ggml_compute_params * params,
  11998. const struct ggml_tensor * src0,
  11999. const struct ggml_tensor * opt0,
  12000. struct ggml_tensor * dst) {
  12001. switch (src0->type) {
  12002. case GGML_TYPE_F32:
  12003. {
  12004. ggml_compute_forward_win_part_f32(params, src0, opt0, dst);
  12005. } break;
  12006. default:
  12007. {
  12008. GGML_ASSERT(false);
  12009. } break;
  12010. }
  12011. }
  12012. // ggml_compute_forward_win_unpart
  12013. static void ggml_compute_forward_win_unpart_f32(
  12014. const struct ggml_compute_params * params,
  12015. const struct ggml_tensor * src0,
  12016. const struct ggml_tensor * opt0,
  12017. struct ggml_tensor * dst) {
  12018. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  12019. return;
  12020. }
  12021. const int64_t ne00 = src0->ne[0];
  12022. const int64_t ne01 = src0->ne[1];
  12023. const int64_t ne02 = src0->ne[2];
  12024. //const int64_t ne03 = src0->ne[3];
  12025. const int64_t ne0 = dst->ne[0];
  12026. const int64_t ne1 = dst->ne[1];
  12027. const int64_t ne2 = dst->ne[2];
  12028. const int32_t w = ((const int32_t *)(opt0->data))[0];
  12029. // padding
  12030. const int px = (w - ne1%w)%w;
  12031. //const int py = (w - ne2%w)%w;
  12032. const int npx = (px + ne1)/w;
  12033. //const int npy = (py + ne2)/w;
  12034. assert(ne0 == ne00);
  12035. // TODO: optimize / multi-thread
  12036. for (int64_t i2 = 0; i2 < ne2; ++i2) {
  12037. for (int64_t i1 = 0; i1 < ne1; ++i1) {
  12038. for (int64_t i0 = 0; i0 < ne0; ++i0) {
  12039. const int ip2 = i2/w;
  12040. const int ip1 = i1/w;
  12041. const int64_t i02 = i2%w;
  12042. const int64_t i01 = i1%w;
  12043. const int64_t i00 = i0;
  12044. const int64_t i = (ip2*npx + ip1)*ne02*ne01*ne00 + i02*ne01*ne00 + i01*ne00 + i00;
  12045. const int64_t j = i2*ne1*ne0 + i1*ne0 + i0;
  12046. ((float *) dst->data)[j] = ((float *) src0->data)[i];
  12047. }
  12048. }
  12049. }
  12050. }
  12051. static void ggml_compute_forward_win_unpart(
  12052. const struct ggml_compute_params * params,
  12053. const struct ggml_tensor * src0,
  12054. const struct ggml_tensor * opt0,
  12055. struct ggml_tensor * dst) {
  12056. switch (src0->type) {
  12057. case GGML_TYPE_F32:
  12058. {
  12059. ggml_compute_forward_win_unpart_f32(params, src0, opt0, dst);
  12060. } break;
  12061. default:
  12062. {
  12063. GGML_ASSERT(false);
  12064. } break;
  12065. }
  12066. }
  12067. // ggml_compute_forward_map_unary
  12068. static void ggml_compute_forward_map_unary_f32(
  12069. const struct ggml_compute_params * params,
  12070. const struct ggml_tensor * src0,
  12071. struct ggml_tensor * dst,
  12072. const ggml_unary_op_f32_t fun) {
  12073. GGML_ASSERT(ggml_are_same_shape(src0, dst));
  12074. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  12075. return;
  12076. }
  12077. const int n = ggml_nrows(src0);
  12078. const int nc = src0->ne[0];
  12079. assert( dst->nb[0] == sizeof(float));
  12080. assert(src0->nb[0] == sizeof(float));
  12081. for (int i = 0; i < n; i++) {
  12082. fun(nc,
  12083. (float *) ((char *) dst->data + i*( dst->nb[1])),
  12084. (float *) ((char *) src0->data + i*(src0->nb[1])));
  12085. }
  12086. }
  12087. static void ggml_compute_forward_map_unary(
  12088. const struct ggml_compute_params * params,
  12089. const struct ggml_tensor * src0,
  12090. struct ggml_tensor * dst,
  12091. const ggml_unary_op_f32_t fun) {
  12092. switch (src0->type) {
  12093. case GGML_TYPE_F32:
  12094. {
  12095. ggml_compute_forward_map_unary_f32(params, src0, dst, fun);
  12096. } break;
  12097. default:
  12098. {
  12099. GGML_ASSERT(false);
  12100. } break;
  12101. }
  12102. }
  12103. // ggml_compute_forward_map_binary
  12104. static void ggml_compute_forward_map_binary_f32(
  12105. const struct ggml_compute_params * params,
  12106. const struct ggml_tensor * src0,
  12107. const struct ggml_tensor * src1,
  12108. struct ggml_tensor * dst,
  12109. const ggml_binary_op_f32_t fun) {
  12110. assert(params->ith == 0);
  12111. assert(ggml_are_same_shape(src0, src1) && ggml_are_same_shape(src0, dst));
  12112. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  12113. return;
  12114. }
  12115. const int n = ggml_nrows(src0);
  12116. const int nc = src0->ne[0];
  12117. assert( dst->nb[0] == sizeof(float));
  12118. assert(src0->nb[0] == sizeof(float));
  12119. assert(src1->nb[0] == sizeof(float));
  12120. for (int i = 0; i < n; i++) {
  12121. fun(nc,
  12122. (float *) ((char *) dst->data + i*( dst->nb[1])),
  12123. (float *) ((char *) src0->data + i*(src0->nb[1])),
  12124. (float *) ((char *) src1->data + i*(src1->nb[1])));
  12125. }
  12126. }
  12127. static void ggml_compute_forward_map_binary(
  12128. const struct ggml_compute_params * params,
  12129. const struct ggml_tensor * src0,
  12130. const struct ggml_tensor * src1,
  12131. struct ggml_tensor * dst,
  12132. const ggml_binary_op_f32_t fun) {
  12133. switch (src0->type) {
  12134. case GGML_TYPE_F32:
  12135. {
  12136. ggml_compute_forward_map_binary_f32(params, src0, src1, dst, fun);
  12137. } break;
  12138. default:
  12139. {
  12140. GGML_ASSERT(false);
  12141. } break;
  12142. }
  12143. }
  12144. // ggml_compute_forward_map_custom1
  12145. static void ggml_compute_forward_map_custom1_f32(
  12146. const struct ggml_compute_params * params,
  12147. const struct ggml_tensor * a,
  12148. struct ggml_tensor * dst,
  12149. const ggml_custom1_op_f32_t fun) {
  12150. assert(params->ith == 0);
  12151. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  12152. return;
  12153. }
  12154. fun(dst, a);
  12155. }
  12156. static void ggml_compute_forward_map_custom1(
  12157. const struct ggml_compute_params * params,
  12158. const struct ggml_tensor * a,
  12159. struct ggml_tensor * dst,
  12160. const ggml_custom1_op_f32_t fun) {
  12161. switch (a->type) {
  12162. case GGML_TYPE_F32:
  12163. {
  12164. ggml_compute_forward_map_custom1_f32(params, a, dst, fun);
  12165. } break;
  12166. default:
  12167. {
  12168. GGML_ASSERT(false);
  12169. } break;
  12170. }
  12171. }
  12172. // ggml_compute_forward_map_custom2
  12173. static void ggml_compute_forward_map_custom2_f32(
  12174. const struct ggml_compute_params * params,
  12175. const struct ggml_tensor * a,
  12176. const struct ggml_tensor * b,
  12177. struct ggml_tensor * dst,
  12178. const ggml_custom2_op_f32_t fun) {
  12179. assert(params->ith == 0);
  12180. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  12181. return;
  12182. }
  12183. fun(dst, a, b);
  12184. }
  12185. static void ggml_compute_forward_map_custom2(
  12186. const struct ggml_compute_params * params,
  12187. const struct ggml_tensor * a,
  12188. const struct ggml_tensor * b,
  12189. struct ggml_tensor * dst,
  12190. const ggml_custom2_op_f32_t fun) {
  12191. switch (a->type) {
  12192. case GGML_TYPE_F32:
  12193. {
  12194. ggml_compute_forward_map_custom2_f32(params, a, b, dst, fun);
  12195. } break;
  12196. default:
  12197. {
  12198. GGML_ASSERT(false);
  12199. } break;
  12200. }
  12201. }
  12202. // ggml_compute_forward_map_custom3
  12203. static void ggml_compute_forward_map_custom3_f32(
  12204. const struct ggml_compute_params * params,
  12205. const struct ggml_tensor * a,
  12206. const struct ggml_tensor * b,
  12207. const struct ggml_tensor * c,
  12208. struct ggml_tensor * dst,
  12209. const ggml_custom3_op_f32_t fun) {
  12210. assert(params->ith == 0);
  12211. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  12212. return;
  12213. }
  12214. fun(dst, a, b, c);
  12215. }
  12216. static void ggml_compute_forward_map_custom3(
  12217. const struct ggml_compute_params * params,
  12218. const struct ggml_tensor * a,
  12219. const struct ggml_tensor * b,
  12220. const struct ggml_tensor * c,
  12221. struct ggml_tensor * dst,
  12222. const ggml_custom3_op_f32_t fun) {
  12223. switch (a->type) {
  12224. case GGML_TYPE_F32:
  12225. {
  12226. ggml_compute_forward_map_custom3_f32(params, a, b, c, dst, fun);
  12227. } break;
  12228. default:
  12229. {
  12230. GGML_ASSERT(false);
  12231. } break;
  12232. }
  12233. }
  12234. // ggml_compute_forward_cross_entropy_loss
  12235. static void ggml_compute_forward_cross_entropy_loss_f32(
  12236. const struct ggml_compute_params * params,
  12237. const struct ggml_tensor * src0,
  12238. const struct ggml_tensor * src1,
  12239. struct ggml_tensor * dst) {
  12240. GGML_ASSERT(ggml_is_contiguous(src0));
  12241. GGML_ASSERT(ggml_is_contiguous(src1));
  12242. GGML_ASSERT(ggml_is_scalar(dst));
  12243. GGML_ASSERT(ggml_are_same_shape(src0, src1));
  12244. const int ith = params->ith;
  12245. const int nth = params->nth;
  12246. float * sums = (float *) params->wdata;
  12247. // TODO: handle transposed/permuted matrices
  12248. const int nc = src0->ne[0];
  12249. const int nr = ggml_nrows(src0);
  12250. if (params->type == GGML_TASK_INIT) {
  12251. if (ith == 0) {
  12252. memset(sums, 0, sizeof(float) * (nth + nth * nc));
  12253. }
  12254. return;
  12255. }
  12256. if (params->type == GGML_TASK_FINALIZE) {
  12257. if (ith == 0) {
  12258. float * dp = (float *) dst->data;
  12259. ggml_vec_sum_f32(nth, dp, sums);
  12260. dp[0] *= -1.0f;
  12261. }
  12262. return;
  12263. }
  12264. const double eps = 1e-9;
  12265. // rows per thread
  12266. const int dr = (nr + nth - 1)/nth;
  12267. // row range for this thread
  12268. const int ir0 = dr*ith;
  12269. const int ir1 = MIN(ir0 + dr, nr);
  12270. for (int i1 = ir0; i1 < ir1; i1++) {
  12271. float * s0 = (float *)((char *) src0->data + i1*src0->nb[1]);
  12272. float * s1 = (float *)((char *) src1->data + i1*src1->nb[1]);
  12273. float * st = (float *) params->wdata + nth + ith*nc;
  12274. #ifndef NDEBUG
  12275. for (int i = 0; i < nc; ++i) {
  12276. //printf("p[%d] = %f\n", i, p[i]);
  12277. assert(!isnan(s0[i]));
  12278. assert(!isnan(s1[i]));
  12279. }
  12280. #endif
  12281. // soft_max
  12282. ggml_float sum = 0.0;
  12283. {
  12284. float max = -INFINITY;
  12285. ggml_vec_max_f32(nc, &max, s0);
  12286. uint16_t scvt;
  12287. for (int i = 0; i < nc; i++) {
  12288. if (s0[i] == -INFINITY) {
  12289. st[i] = 0.0f;
  12290. } else {
  12291. // const float val = (s0[i] == -INFINITY) ? 0.0 : exp(s0[i] - max);
  12292. ggml_fp16_t s = GGML_FP32_TO_FP16(s0[i] - max);
  12293. memcpy(&scvt, &s, sizeof(scvt));
  12294. const float val = GGML_FP16_TO_FP32(table_exp_f16[scvt]);
  12295. sum += (ggml_float)val;
  12296. st[i] = val;
  12297. }
  12298. }
  12299. assert(sum > 0.0);
  12300. // sum = 1.0/sum;
  12301. }
  12302. // avoid log(0) by rescaling from [0..1] to [eps..1]
  12303. sum = (1.0 - eps) / sum;
  12304. ggml_vec_scale_f32(nc, st, sum);
  12305. ggml_vec_add1_f32(nc, st, st, eps);
  12306. ggml_vec_log_f32(nc, st, st);
  12307. ggml_vec_mul_f32(nc, st, st, s1);
  12308. ggml_vec_sum_f32(nc, sums + ith, st);
  12309. #ifndef NDEBUG
  12310. for (int i = 0; i < nc; ++i) {
  12311. assert(!isnan(st[i]));
  12312. assert(!isinf(st[i]));
  12313. }
  12314. #endif
  12315. }
  12316. }
  12317. static void ggml_compute_forward_cross_entropy_loss(
  12318. const struct ggml_compute_params * params,
  12319. const struct ggml_tensor * src0,
  12320. const struct ggml_tensor * src1,
  12321. struct ggml_tensor * dst) {
  12322. switch (src0->type) {
  12323. case GGML_TYPE_F32:
  12324. {
  12325. ggml_compute_forward_cross_entropy_loss_f32(params, src0, src1, dst);
  12326. } break;
  12327. default:
  12328. {
  12329. GGML_ASSERT(false);
  12330. } break;
  12331. }
  12332. }
  12333. // ggml_compute_forward_cross_entropy_loss_back
  12334. static void ggml_compute_forward_cross_entropy_loss_back_f32(
  12335. const struct ggml_compute_params * params,
  12336. const struct ggml_tensor * src0,
  12337. const struct ggml_tensor * src1,
  12338. const struct ggml_tensor * opt0,
  12339. struct ggml_tensor * dst) {
  12340. GGML_ASSERT(ggml_is_contiguous(dst));
  12341. GGML_ASSERT(ggml_is_contiguous(src0));
  12342. GGML_ASSERT(ggml_is_contiguous(src1));
  12343. GGML_ASSERT(ggml_is_contiguous(opt0));
  12344. GGML_ASSERT(ggml_are_same_shape(src0, src1) && ggml_are_same_shape(src0, dst));
  12345. const int64_t ith = params->ith;
  12346. const int64_t nth = params->nth;
  12347. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  12348. return;
  12349. }
  12350. const float eps = 1e-9f;
  12351. // TODO: handle transposed/permuted matrices
  12352. const int64_t nc = src0->ne[0];
  12353. const int64_t nr = ggml_nrows(src0);
  12354. // rows per thread
  12355. const int64_t dr = (nr + nth - 1)/nth;
  12356. // row range for this thread
  12357. const int64_t ir0 = dr*ith;
  12358. const int64_t ir1 = MIN(ir0 + dr, nr);
  12359. float * d = (float *) opt0->data;
  12360. for (int64_t i1 = ir0; i1 < ir1; i1++) {
  12361. float * ds0 = (float *)((char *) dst->data + i1*dst->nb[1]);
  12362. float * s0 = (float *)((char *) src0->data + i1*src0->nb[1]);
  12363. float * s1 = (float *)((char *) src1->data + i1*src1->nb[1]);
  12364. float * sm = (float *) params->wdata + ith*nc;
  12365. #ifndef NDEBUG
  12366. for (int i = 0; i < nc; ++i) {
  12367. //printf("p[%d] = %f\n", i, p[i]);
  12368. assert(!isnan(s0[i]));
  12369. assert(!isnan(s1[i]));
  12370. }
  12371. #endif
  12372. // step by step explanation:
  12373. {
  12374. //float * sums = (float *) params->wdata;
  12375. // forward pass with annotated gradients from backward pass
  12376. // (built by going in reverse operation order, adding to gradients of current operation args)
  12377. // st0 = exp(s0-max(s0)) grad[st0] = grad[st1]*(1.0 - eps)/sum
  12378. // from softmax_back: grad[s0] = st1_k * (grad[st1]_k - dot(st1, grad[st1]))
  12379. // ggml_vec_scale_f32(nc, st, sum); // st1 = st0*/sum = softmax(s0) grad[st1] = grad[st2]*(1.0 - eps)
  12380. // ggml_vec_scale_f32(nc, st, (1.0f - eps)); // st2 = st1*(1.0 - eps) grad[st2] = grad[st3]
  12381. // ggml_vec_add1_f32(nc, st, st, eps); // st3 = st2 + eps grad[st3] = grad[st4]/st3
  12382. // ggml_vec_log_f32(nc, st, st); // st4 = log(st3) grad[st4] = grad[st5] * s1
  12383. // ggml_vec_mul_f32(nc, st, st, s1); // st5 = st4 * s1 grad[st5] = grad[sums[ith]]
  12384. // ggml_vec_sum_f32(nc, sums + ith, st); // sums[ith] = st5 grad[sums[ith]] = grad[cross_entropy_loss] = -grad[cel]
  12385. // substitute into grad[st1], because we can reuse softmax_back from this point on
  12386. // grad[st1] = -grad[cel]*s1*(1.0 - eps)/(eps + softmax(s0)*(1.0 - eps))
  12387. // postorder:
  12388. // grad[st1] := softmax(s0)
  12389. // grad[st1] := grad[st1]*(1.0 - eps)
  12390. // grad[st1] := grad[st1] + eps
  12391. // grad[st1] := s1 / grad[st1]
  12392. // grad[st1] := grad[st1]*(1.0-eps)*-grad[cel]
  12393. // src0 gradients by going through softmax_back
  12394. // grad[s0] = st1_k * (grad[st1]_k - dot(st1, grad[st1]))
  12395. // from softmax_back:
  12396. // dxk = yk * (dyk - dot(y, dy))
  12397. // dot_y_dy := dot(y, dy)
  12398. // dx := dy
  12399. // dx := dx - dot_y_dy
  12400. // dx := dx * y
  12401. // postorder:
  12402. // dot_st1_dst1 := dot(st1, grad[st1])
  12403. // grad[s0] := grad[st1]
  12404. // grad[s0] := grad[s0] - dot_st1_dst1
  12405. // grad[s0] := grad[s0] * st1
  12406. // prepend postorder from grad[st1] directly using grad[s0] as memory location, as we will grad[s0] := grad[st1]
  12407. // sm := softmax(s0)
  12408. // grad[s0] := sm*(1.0 - eps)
  12409. // grad[s0] := grad[s0] + eps
  12410. // grad[s0] := s1 / grad[s0]
  12411. // grad[s0] := grad[s0]*(1.0-eps)*-grad[cel]
  12412. // dot_st1_dst1 := dot(sm, grad[s0])
  12413. // grad[s0] := grad[s0] - dot_st1_dst1
  12414. // grad[s0] := grad[s0] * sm
  12415. }
  12416. // soft_max
  12417. ggml_float sum = 0.0;
  12418. {
  12419. float max = -INFINITY;
  12420. ggml_vec_max_f32(nc, &max, s0);
  12421. uint16_t scvt;
  12422. for (int i = 0; i < nc; i++) {
  12423. if (s0[i] == -INFINITY) {
  12424. sm[i] = 0.0f;
  12425. } else {
  12426. // const float val = (s0[i] == -INFINITY) ? 0.0 : exp(s0[i] - max);
  12427. ggml_fp16_t s = GGML_FP32_TO_FP16(s0[i] - max);
  12428. memcpy(&scvt, &s, sizeof(scvt));
  12429. const float val = GGML_FP16_TO_FP32(table_exp_f16[scvt]);
  12430. sum += (ggml_float)val;
  12431. sm[i] = val;
  12432. }
  12433. }
  12434. assert(sum > 0.0);
  12435. sum = 1.0/sum;
  12436. }
  12437. float dot_st1_dst1 = 0;
  12438. ggml_vec_scale_f32(nc, sm, sum);
  12439. ggml_vec_cpy_f32 (nc, ds0, sm);
  12440. ggml_vec_scale_f32(nc, ds0, (1.0f - eps));
  12441. ggml_vec_add1_f32 (nc, ds0, ds0, eps);
  12442. ggml_vec_div_f32 (nc, ds0, s1, ds0);
  12443. ggml_vec_scale_f32(nc, ds0, -(1.0f - eps)*d[0]);
  12444. ggml_vec_dot_f32 (nc, &dot_st1_dst1, sm, ds0);
  12445. ggml_vec_acc1_f32 (nc, ds0, -dot_st1_dst1);
  12446. ggml_vec_mul_f32 (nc, ds0, ds0, sm);
  12447. #ifndef NDEBUG
  12448. for (int i = 0; i < nc; ++i) {
  12449. assert(!isnan(sm[i]));
  12450. assert(!isinf(sm[i]));
  12451. assert(!isnan(ds0[i]));
  12452. assert(!isinf(ds0[i]));
  12453. }
  12454. #endif
  12455. }
  12456. }
  12457. static void ggml_compute_forward_cross_entropy_loss_back(
  12458. const struct ggml_compute_params * params,
  12459. const struct ggml_tensor * src0,
  12460. const struct ggml_tensor * src1,
  12461. const struct ggml_tensor * opt0,
  12462. struct ggml_tensor * dst) {
  12463. switch (src0->type) {
  12464. case GGML_TYPE_F32:
  12465. {
  12466. ggml_compute_forward_cross_entropy_loss_back_f32(params, src0, src1, opt0, dst);
  12467. } break;
  12468. default:
  12469. {
  12470. GGML_ASSERT(false);
  12471. } break;
  12472. }
  12473. }
  12474. /////////////////////////////////
  12475. static void ggml_compute_forward(struct ggml_compute_params * params, struct ggml_tensor * tensor) {
  12476. GGML_ASSERT(params);
  12477. #ifdef GGML_USE_CUBLAS
  12478. bool skip_cpu = ggml_cuda_compute_forward(params, tensor);
  12479. if (skip_cpu) {
  12480. return;
  12481. }
  12482. GGML_ASSERT(tensor->src0 == NULL || tensor->src0->backend == GGML_BACKEND_CPU);
  12483. GGML_ASSERT(tensor->src1 == NULL || tensor->src1->backend == GGML_BACKEND_CPU);
  12484. #endif // GGML_USE_CUBLAS
  12485. switch (tensor->op) {
  12486. case GGML_OP_DUP:
  12487. {
  12488. ggml_compute_forward_dup(params, tensor->src0, tensor);
  12489. } break;
  12490. case GGML_OP_ADD:
  12491. {
  12492. ggml_compute_forward_add(params, tensor->src0, tensor->src1, tensor);
  12493. } break;
  12494. case GGML_OP_ADD1:
  12495. {
  12496. ggml_compute_forward_add1(params, tensor->src0, tensor->src1, tensor);
  12497. } break;
  12498. case GGML_OP_ACC:
  12499. {
  12500. ggml_compute_forward_acc(params, tensor->src0, tensor->src1, tensor->opt[0], tensor);
  12501. } break;
  12502. case GGML_OP_SUB:
  12503. {
  12504. ggml_compute_forward_sub(params, tensor->src0, tensor->src1, tensor);
  12505. } break;
  12506. case GGML_OP_MUL:
  12507. {
  12508. ggml_compute_forward_mul(params, tensor->src0, tensor->src1, tensor);
  12509. } break;
  12510. case GGML_OP_DIV:
  12511. {
  12512. ggml_compute_forward_div(params, tensor->src0, tensor->src1, tensor);
  12513. } break;
  12514. case GGML_OP_SQR:
  12515. {
  12516. ggml_compute_forward_sqr(params, tensor->src0, tensor);
  12517. } break;
  12518. case GGML_OP_SQRT:
  12519. {
  12520. ggml_compute_forward_sqrt(params, tensor->src0, tensor);
  12521. } break;
  12522. case GGML_OP_LOG:
  12523. {
  12524. ggml_compute_forward_log(params, tensor->src0, tensor);
  12525. } break;
  12526. case GGML_OP_SUM:
  12527. {
  12528. ggml_compute_forward_sum(params, tensor->src0, tensor);
  12529. } break;
  12530. case GGML_OP_SUM_ROWS:
  12531. {
  12532. ggml_compute_forward_sum_rows(params, tensor->src0, tensor);
  12533. } break;
  12534. case GGML_OP_MEAN:
  12535. {
  12536. ggml_compute_forward_mean(params, tensor->src0, tensor);
  12537. } break;
  12538. case GGML_OP_REPEAT:
  12539. {
  12540. ggml_compute_forward_repeat(params, tensor->src0, tensor);
  12541. } break;
  12542. case GGML_OP_REPEAT_BACK:
  12543. {
  12544. ggml_compute_forward_repeat_back(params, tensor->src0, tensor);
  12545. } break;
  12546. case GGML_OP_ABS:
  12547. {
  12548. ggml_compute_forward_abs(params, tensor->src0, tensor);
  12549. } break;
  12550. case GGML_OP_SGN:
  12551. {
  12552. ggml_compute_forward_sgn(params, tensor->src0, tensor);
  12553. } break;
  12554. case GGML_OP_NEG:
  12555. {
  12556. ggml_compute_forward_neg(params, tensor->src0, tensor);
  12557. } break;
  12558. case GGML_OP_STEP:
  12559. {
  12560. ggml_compute_forward_step(params, tensor->src0, tensor);
  12561. } break;
  12562. case GGML_OP_RELU:
  12563. {
  12564. ggml_compute_forward_relu(params, tensor->src0, tensor);
  12565. } break;
  12566. case GGML_OP_GELU:
  12567. {
  12568. ggml_compute_forward_gelu(params, tensor->src0, tensor);
  12569. } break;
  12570. case GGML_OP_GELU_QUICK:
  12571. {
  12572. ggml_compute_forward_gelu_quick(params, tensor->src0, tensor);
  12573. } break;
  12574. case GGML_OP_SILU:
  12575. {
  12576. ggml_compute_forward_silu(params, tensor->src0, tensor);
  12577. } break;
  12578. case GGML_OP_SILU_BACK:
  12579. {
  12580. ggml_compute_forward_silu_back(params, tensor->src0, tensor->src1, tensor);
  12581. } break;
  12582. case GGML_OP_NORM:
  12583. {
  12584. ggml_compute_forward_norm(params, tensor->src0, tensor);
  12585. } break;
  12586. case GGML_OP_RMS_NORM:
  12587. {
  12588. ggml_compute_forward_rms_norm(params, tensor->src0, tensor);
  12589. } break;
  12590. case GGML_OP_RMS_NORM_BACK:
  12591. {
  12592. ggml_compute_forward_rms_norm_back(params, tensor->src0, tensor->src1, tensor);
  12593. } break;
  12594. case GGML_OP_MUL_MAT:
  12595. {
  12596. ggml_compute_forward_mul_mat(params, tensor->src0, tensor->src1, tensor);
  12597. } break;
  12598. case GGML_OP_OUT_PROD:
  12599. {
  12600. ggml_compute_forward_out_prod(params, tensor->src0, tensor->src1, tensor);
  12601. } break;
  12602. case GGML_OP_SCALE:
  12603. {
  12604. ggml_compute_forward_scale(params, tensor->src0, tensor->src1, tensor);
  12605. } break;
  12606. case GGML_OP_SET:
  12607. {
  12608. ggml_compute_forward_set(params, tensor->src0, tensor->src1, tensor->opt[0], tensor);
  12609. } break;
  12610. case GGML_OP_CPY:
  12611. {
  12612. ggml_compute_forward_cpy(params, tensor->src0, tensor);
  12613. } break;
  12614. case GGML_OP_CONT:
  12615. {
  12616. ggml_compute_forward_cont(params, tensor->src0, tensor);
  12617. } break;
  12618. case GGML_OP_RESHAPE:
  12619. {
  12620. ggml_compute_forward_reshape(params, tensor->src0, tensor);
  12621. } break;
  12622. case GGML_OP_VIEW:
  12623. {
  12624. ggml_compute_forward_view(params, tensor->src0);
  12625. } break;
  12626. case GGML_OP_PERMUTE:
  12627. {
  12628. ggml_compute_forward_permute(params, tensor->src0);
  12629. } break;
  12630. case GGML_OP_TRANSPOSE:
  12631. {
  12632. ggml_compute_forward_transpose(params, tensor->src0);
  12633. } break;
  12634. case GGML_OP_GET_ROWS:
  12635. {
  12636. ggml_compute_forward_get_rows(params, tensor->src0, tensor->src1, tensor);
  12637. } break;
  12638. case GGML_OP_GET_ROWS_BACK:
  12639. {
  12640. ggml_compute_forward_get_rows_back(params, tensor->src0, tensor->src1, tensor->opt[0], tensor);
  12641. } break;
  12642. case GGML_OP_DIAG:
  12643. {
  12644. ggml_compute_forward_diag(params, tensor->src0, tensor);
  12645. } break;
  12646. case GGML_OP_DIAG_MASK_INF:
  12647. {
  12648. ggml_compute_forward_diag_mask_inf(params, tensor->src0, tensor->src1, tensor);
  12649. } break;
  12650. case GGML_OP_DIAG_MASK_ZERO:
  12651. {
  12652. ggml_compute_forward_diag_mask_zero(params, tensor->src0, tensor->src1, tensor);
  12653. } break;
  12654. case GGML_OP_SOFT_MAX:
  12655. {
  12656. ggml_compute_forward_soft_max(params, tensor->src0, tensor);
  12657. } break;
  12658. case GGML_OP_SOFT_MAX_BACK:
  12659. {
  12660. ggml_compute_forward_soft_max_back(params, tensor->src0, tensor->src1, tensor);
  12661. } break;
  12662. case GGML_OP_ROPE:
  12663. {
  12664. ggml_compute_forward_rope(params, tensor->src0, tensor->src1, tensor);
  12665. } break;
  12666. case GGML_OP_ROPE_BACK:
  12667. {
  12668. ggml_compute_forward_rope_back(params, tensor->src0, tensor->src1, tensor);
  12669. } break;
  12670. case GGML_OP_ALIBI:
  12671. {
  12672. ggml_compute_forward_alibi(params, tensor->src0, tensor->src1, tensor);
  12673. } break;
  12674. case GGML_OP_CLAMP:
  12675. {
  12676. ggml_compute_forward_clamp(params, tensor->src0, tensor->src1, tensor);
  12677. } break;
  12678. case GGML_OP_CONV_1D_S1_PH:
  12679. {
  12680. ggml_compute_forward_conv_1d_s1_ph(params, tensor->src0, tensor->src1, tensor);
  12681. } break;
  12682. case GGML_OP_CONV_1D_S2_PH:
  12683. {
  12684. ggml_compute_forward_conv_1d_s2_ph(params, tensor->src0, tensor->src1, tensor);
  12685. } break;
  12686. case GGML_OP_CONV_2D_SK_P0:
  12687. {
  12688. ggml_compute_forward_conv_2d_sk_p0(params, tensor->src0, tensor->src1, tensor);
  12689. } break;
  12690. case GGML_OP_FLASH_ATTN:
  12691. {
  12692. const int32_t t = ggml_get_i32_1d(tensor->opt[1], 0);
  12693. GGML_ASSERT(t == 0 || t == 1);
  12694. const bool masked = t != 0;
  12695. ggml_compute_forward_flash_attn(params, tensor->src0, tensor->src1, tensor->opt[0], masked, tensor);
  12696. } break;
  12697. case GGML_OP_FLASH_FF:
  12698. {
  12699. ggml_compute_forward_flash_ff(params, tensor->src0, tensor->src1, tensor->opt[0], tensor->opt[1], tensor->opt[2], tensor);
  12700. } break;
  12701. case GGML_OP_FLASH_ATTN_BACK:
  12702. {
  12703. int32_t t = ggml_get_i32_1d(tensor->opt[2], 0);
  12704. GGML_ASSERT(t == 0 || t == 1);
  12705. bool masked = t != 0;
  12706. ggml_compute_forward_flash_attn_back(params, tensor->src0, tensor->src1, tensor->opt[0], tensor->opt[1], masked, tensor);
  12707. } break;
  12708. case GGML_OP_WIN_PART:
  12709. {
  12710. ggml_compute_forward_win_part(params, tensor->src0, tensor->opt[0], tensor);
  12711. } break;
  12712. case GGML_OP_WIN_UNPART:
  12713. {
  12714. ggml_compute_forward_win_unpart(params, tensor->src0, tensor->opt[0], tensor);
  12715. } break;
  12716. case GGML_OP_MAP_UNARY:
  12717. {
  12718. const ggml_unary_op_f32_t fun = *((ggml_unary_op_f32_t *)tensor->opt[0]->data);
  12719. ggml_compute_forward_map_unary(params, tensor->src0, tensor, fun);
  12720. }
  12721. break;
  12722. case GGML_OP_MAP_BINARY:
  12723. {
  12724. const ggml_binary_op_f32_t fun = *((ggml_binary_op_f32_t *)tensor->opt[0]->data);
  12725. ggml_compute_forward_map_binary(params, tensor->src0, tensor->src1, tensor, fun);
  12726. }
  12727. break;
  12728. case GGML_OP_MAP_CUSTOM1:
  12729. {
  12730. const ggml_custom1_op_f32_t fun = *((ggml_custom1_op_f32_t *)tensor->opt[0]->data);
  12731. ggml_compute_forward_map_custom1(params, tensor->src0, tensor, fun);
  12732. }
  12733. break;
  12734. case GGML_OP_MAP_CUSTOM2:
  12735. {
  12736. const ggml_custom2_op_f32_t fun = *((ggml_custom2_op_f32_t *)tensor->opt[0]->data);
  12737. ggml_compute_forward_map_custom2(params, tensor->src0, tensor->src1, tensor, fun);
  12738. }
  12739. break;
  12740. case GGML_OP_MAP_CUSTOM3:
  12741. {
  12742. const ggml_custom3_op_f32_t fun = *((ggml_custom3_op_f32_t *)tensor->opt[0]->data);
  12743. ggml_compute_forward_map_custom3(params, tensor->src0, tensor->src1, tensor->opt[1], tensor, fun);
  12744. }
  12745. break;
  12746. case GGML_OP_CROSS_ENTROPY_LOSS:
  12747. {
  12748. ggml_compute_forward_cross_entropy_loss(params, tensor->src0, tensor->src1, tensor);
  12749. }
  12750. break;
  12751. case GGML_OP_CROSS_ENTROPY_LOSS_BACK:
  12752. {
  12753. ggml_compute_forward_cross_entropy_loss_back(params, tensor->src0, tensor->src1, tensor->opt[0], tensor);
  12754. }
  12755. break;
  12756. case GGML_OP_NONE:
  12757. {
  12758. // nop
  12759. } break;
  12760. case GGML_OP_COUNT:
  12761. {
  12762. GGML_ASSERT(false);
  12763. } break;
  12764. }
  12765. }
  12766. ////////////////////////////////////////////////////////////////////////////////
  12767. static void ggml_compute_backward(struct ggml_context * ctx, struct ggml_tensor * tensor, bool inplace) {
  12768. struct ggml_tensor * src0 = tensor->src0;
  12769. struct ggml_tensor * src1 = tensor->src1;
  12770. switch (tensor->op) {
  12771. case GGML_OP_DUP:
  12772. {
  12773. if (src0->grad) {
  12774. src0->grad = ggml_add_impl(ctx, src0->grad, tensor->grad, inplace);
  12775. }
  12776. } break;
  12777. case GGML_OP_ADD:
  12778. {
  12779. if (src0->grad) {
  12780. src0->grad = ggml_add_impl(ctx, src0->grad, tensor->grad, inplace);
  12781. }
  12782. if (src1->grad) {
  12783. src1->grad = ggml_add_impl(ctx, src1->grad, tensor->grad, inplace);
  12784. }
  12785. } break;
  12786. case GGML_OP_ADD1:
  12787. {
  12788. if (src0->grad) {
  12789. src0->grad = ggml_add_impl(ctx, src0->grad, tensor->grad, inplace);
  12790. }
  12791. if (src1->grad) {
  12792. src1->grad = ggml_add_impl(ctx,
  12793. src1->grad,
  12794. ggml_mean(ctx, tensor->grad), // TODO: should probably be sum instead of mean
  12795. inplace);
  12796. }
  12797. } break;
  12798. case GGML_OP_ACC:
  12799. {
  12800. if (src0->grad) {
  12801. src0->grad = ggml_add_impl(ctx, src0->grad, tensor->grad, inplace);
  12802. }
  12803. if (src1->grad) {
  12804. GGML_ASSERT(ggml_nelements(tensor->opt[0]) == 5);
  12805. GGML_ASSERT(tensor->opt[0]->type == GGML_TYPE_I32);
  12806. const size_t nb1 = (( int32_t * ) tensor->opt[0]->data)[0];
  12807. const size_t nb2 = (( int32_t * ) tensor->opt[0]->data)[1];
  12808. const size_t nb3 = (( int32_t * ) tensor->opt[0]->data)[2];
  12809. const size_t offset = (( int32_t * ) tensor->opt[0]->data)[3];
  12810. struct ggml_tensor * tensor_grad_view = ggml_view_4d(ctx,
  12811. tensor->grad,
  12812. src1->grad->ne[0],
  12813. src1->grad->ne[1],
  12814. src1->grad->ne[2],
  12815. src1->grad->ne[3],
  12816. nb1, nb2, nb3, offset);
  12817. src1->grad =
  12818. ggml_add_impl(ctx,
  12819. src1->grad,
  12820. ggml_reshape(ctx,
  12821. ggml_cont(ctx, tensor_grad_view),
  12822. src1->grad),
  12823. inplace);
  12824. }
  12825. } break;
  12826. case GGML_OP_SUB:
  12827. {
  12828. if (src0->grad) {
  12829. src0->grad = ggml_add_impl(ctx, src0->grad, tensor->grad, inplace);
  12830. }
  12831. if (src1->grad) {
  12832. src1->grad = ggml_sub_impl(ctx, src1->grad, tensor->grad, inplace);
  12833. }
  12834. } break;
  12835. case GGML_OP_MUL:
  12836. {
  12837. if (src0->grad) {
  12838. src0->grad =
  12839. ggml_add_impl(ctx,
  12840. src0->grad,
  12841. ggml_mul(ctx, src1, tensor->grad),
  12842. inplace);
  12843. }
  12844. if (src1->grad) {
  12845. src1->grad =
  12846. ggml_add_impl(ctx,
  12847. src1->grad,
  12848. ggml_mul(ctx, src0, tensor->grad),
  12849. inplace);
  12850. }
  12851. } break;
  12852. case GGML_OP_DIV:
  12853. {
  12854. if (src0->grad) {
  12855. src0->grad =
  12856. ggml_add_impl(ctx,
  12857. src0->grad,
  12858. ggml_div(ctx, tensor->grad, src1),
  12859. inplace);
  12860. }
  12861. if (src1->grad) {
  12862. src1->grad =
  12863. ggml_sub_impl(ctx,
  12864. src1->grad,
  12865. ggml_mul(ctx,
  12866. tensor->grad,
  12867. ggml_div(ctx, tensor, src1)),
  12868. inplace);
  12869. }
  12870. } break;
  12871. case GGML_OP_SQR:
  12872. {
  12873. if (src0->grad) {
  12874. src0->grad =
  12875. ggml_add_impl(ctx,
  12876. src0->grad,
  12877. ggml_scale(ctx,
  12878. ggml_mul(ctx, src0, tensor->grad),
  12879. ggml_new_f32(ctx, 2.0f)),
  12880. inplace);
  12881. }
  12882. } break;
  12883. case GGML_OP_SQRT:
  12884. {
  12885. if (src0->grad) {
  12886. src0->grad =
  12887. ggml_add_impl(ctx,
  12888. src0->grad,
  12889. ggml_scale(ctx,
  12890. ggml_div(ctx,
  12891. tensor->grad,
  12892. tensor),
  12893. ggml_new_f32(ctx, 0.5f)),
  12894. inplace);
  12895. }
  12896. } break;
  12897. case GGML_OP_LOG:
  12898. {
  12899. if (src0->grad) {
  12900. src0->grad =
  12901. ggml_add_impl(ctx,
  12902. src0->grad,
  12903. ggml_div(ctx,
  12904. tensor->grad,
  12905. src0),
  12906. inplace);
  12907. }
  12908. } break;
  12909. case GGML_OP_SUM:
  12910. {
  12911. if (src0->grad) {
  12912. src0->grad =
  12913. ggml_add1_impl(ctx,
  12914. src0->grad,
  12915. tensor->grad,
  12916. inplace);
  12917. }
  12918. } break;
  12919. case GGML_OP_SUM_ROWS:
  12920. {
  12921. if (src0->grad) {
  12922. src0->grad =
  12923. ggml_add_impl(ctx,
  12924. src0->grad,
  12925. ggml_repeat(ctx,
  12926. tensor->grad,
  12927. src0->grad),
  12928. inplace);
  12929. }
  12930. } break;
  12931. case GGML_OP_MEAN:
  12932. {
  12933. GGML_ASSERT(false); // TODO: implement
  12934. } break;
  12935. case GGML_OP_REPEAT:
  12936. {
  12937. // necessary for llama
  12938. if (src0->grad) {
  12939. src0->grad = ggml_add_impl(ctx,
  12940. src0->grad,
  12941. ggml_repeat_back(ctx, tensor->grad, src0->grad),
  12942. inplace);
  12943. }
  12944. } break;
  12945. case GGML_OP_REPEAT_BACK:
  12946. {
  12947. if (src0->grad) {
  12948. // TODO: test this
  12949. src0->grad = ggml_add_impl(ctx,
  12950. src0->grad,
  12951. ggml_repeat(ctx, tensor->grad, src0->grad),
  12952. inplace);
  12953. }
  12954. } break;
  12955. case GGML_OP_ABS:
  12956. {
  12957. if (src0->grad) {
  12958. src0->grad =
  12959. ggml_add_impl(ctx,
  12960. src0->grad,
  12961. ggml_mul(ctx,
  12962. ggml_sgn(ctx, src0),
  12963. tensor->grad),
  12964. inplace);
  12965. }
  12966. } break;
  12967. case GGML_OP_SGN:
  12968. {
  12969. if (src0->grad) {
  12970. // noop
  12971. }
  12972. } break;
  12973. case GGML_OP_NEG:
  12974. {
  12975. if (src0->grad) {
  12976. src0->grad = ggml_sub_impl(ctx, src0->grad, tensor->grad, inplace);
  12977. }
  12978. } break;
  12979. case GGML_OP_STEP:
  12980. {
  12981. if (src0->grad) {
  12982. // noop
  12983. }
  12984. } break;
  12985. case GGML_OP_RELU:
  12986. {
  12987. if (src0->grad) {
  12988. src0->grad = ggml_sub_impl(ctx,
  12989. src0->grad,
  12990. ggml_mul(ctx,
  12991. ggml_step(ctx, src0),
  12992. tensor->grad),
  12993. inplace);
  12994. }
  12995. } break;
  12996. case GGML_OP_GELU:
  12997. {
  12998. GGML_ASSERT(false); // TODO: not implemented
  12999. } break;
  13000. case GGML_OP_GELU_QUICK:
  13001. {
  13002. GGML_ASSERT(false); // TODO: not implemented
  13003. } break;
  13004. case GGML_OP_ALIBI:
  13005. {
  13006. GGML_ASSERT(false); // TODO: not implemented
  13007. } break;
  13008. case GGML_OP_CLAMP:
  13009. {
  13010. GGML_ASSERT(false); // TODO: not implemented
  13011. } break;
  13012. case GGML_OP_SILU:
  13013. {
  13014. // necessary for llama
  13015. if (src0->grad) {
  13016. src0->grad = ggml_add_impl(ctx,
  13017. src0->grad,
  13018. ggml_silu_back(ctx, src0, tensor->grad),
  13019. inplace);
  13020. }
  13021. } break;
  13022. case GGML_OP_SILU_BACK:
  13023. {
  13024. GGML_ASSERT(false); // TODO: not implemented
  13025. } break;
  13026. case GGML_OP_NORM:
  13027. {
  13028. GGML_ASSERT(false); // TODO: not implemented
  13029. } break;
  13030. case GGML_OP_RMS_NORM:
  13031. {
  13032. // necessary for llama
  13033. if (src0->grad) {
  13034. src0->grad = ggml_add_impl(ctx,
  13035. src0->grad,
  13036. ggml_rms_norm_back(ctx, src0, tensor->grad),
  13037. inplace);
  13038. }
  13039. } break;
  13040. case GGML_OP_RMS_NORM_BACK:
  13041. {
  13042. GGML_ASSERT(false); // TODO: not implemented
  13043. } break;
  13044. case GGML_OP_MUL_MAT:
  13045. {
  13046. // https://cs231n.github.io/optimization-2/#staged
  13047. // # forward pass
  13048. // s0 = np.random.randn(5, 10)
  13049. // s1 = np.random.randn(10, 3)
  13050. // t = s0.dot(s1)
  13051. // # now suppose we had the gradient on t from above in the circuit
  13052. // dt = np.random.randn(*t.shape) # same shape as t
  13053. // ds0 = dt.dot(s1.T) #.T gives the transpose of the matrix
  13054. // ds1 = t.T.dot(dt)
  13055. // tensor.shape [m,p]
  13056. // src0.shape [n,m]
  13057. // src1.shape [n,p]
  13058. // necessary for llama
  13059. if (src0->grad) {
  13060. src0->grad =
  13061. ggml_add_impl(ctx,
  13062. src0->grad,
  13063. ggml_out_prod(ctx, // [n,m]
  13064. src1, // [n,p]
  13065. tensor->grad), // [m,p]
  13066. inplace);
  13067. }
  13068. if (src1->grad) {
  13069. src1->grad =
  13070. ggml_add_impl(ctx,
  13071. src1->grad,
  13072. // ggml_mul_mat(ctx, // [n,p]
  13073. // ggml_cont(ctx, // [m,n]
  13074. // ggml_transpose(ctx, src0)), // [m,n]
  13075. // tensor->grad), // [m,p]
  13076. // // when src0 is bigger than tensor->grad (this is mostly the case in llama),
  13077. // // avoid transpose of src0, rather transpose smaller tensor->grad
  13078. // // and then use ggml_out_prod
  13079. ggml_out_prod(ctx, // [n,p]
  13080. src0, // [n,m]
  13081. ggml_transpose(ctx, // [p,m]
  13082. tensor->grad)), // [m,p]
  13083. inplace);
  13084. }
  13085. } break;
  13086. case GGML_OP_OUT_PROD:
  13087. {
  13088. GGML_ASSERT(false); // TODO: not implemented
  13089. } break;
  13090. case GGML_OP_SCALE:
  13091. {
  13092. // necessary for llama
  13093. if (src0->grad) {
  13094. src0->grad =
  13095. ggml_add_impl(ctx,
  13096. src0->grad,
  13097. ggml_scale_impl(ctx, tensor->grad, src1, false),
  13098. inplace);
  13099. }
  13100. if (src1->grad) {
  13101. src1->grad =
  13102. ggml_add_impl(ctx,
  13103. src1->grad,
  13104. ggml_sum(ctx, ggml_mul_impl(ctx, tensor->grad, src0, false)),
  13105. inplace);
  13106. }
  13107. } break;
  13108. case GGML_OP_SET:
  13109. {
  13110. GGML_ASSERT(ggml_nelements(tensor->opt[0]) == 5);
  13111. GGML_ASSERT(tensor->opt[0]->type == GGML_TYPE_I32);
  13112. const size_t nb1 = (( int32_t * ) tensor->opt[0]->data)[0];
  13113. const size_t nb2 = (( int32_t * ) tensor->opt[0]->data)[1];
  13114. const size_t nb3 = (( int32_t * ) tensor->opt[0]->data)[2];
  13115. const size_t offset = (( int32_t * ) tensor->opt[0]->data)[3];
  13116. struct ggml_tensor * tensor_grad_view = NULL;
  13117. if (src0->grad || src1->grad) {
  13118. GGML_ASSERT(src0->type == tensor->type);
  13119. GGML_ASSERT(tensor->grad->type == tensor->type);
  13120. GGML_ASSERT(tensor->grad->type == src1->grad->type);
  13121. tensor_grad_view = ggml_view_4d(ctx,
  13122. tensor->grad,
  13123. src1->grad->ne[0],
  13124. src1->grad->ne[1],
  13125. src1->grad->ne[2],
  13126. src1->grad->ne[3],
  13127. nb1, nb2, nb3, offset);
  13128. }
  13129. if (src0->grad) {
  13130. src0->grad = ggml_add_impl(ctx,
  13131. src0->grad,
  13132. ggml_acc_impl(ctx,
  13133. tensor->grad,
  13134. ggml_neg(ctx, tensor_grad_view),
  13135. nb1, nb2, nb3, offset, false),
  13136. inplace);
  13137. }
  13138. if (src1->grad) {
  13139. src1->grad =
  13140. ggml_add_impl(ctx,
  13141. src1->grad,
  13142. ggml_reshape(ctx,
  13143. ggml_cont(ctx, tensor_grad_view),
  13144. src1->grad),
  13145. inplace);
  13146. }
  13147. } break;
  13148. case GGML_OP_CPY:
  13149. {
  13150. // necessary for llama
  13151. // cpy overwrites value of src1 by src0 and returns view(src1)
  13152. // the overwriting is mathematically equivalent to:
  13153. // tensor = src0 * 1 + src1 * 0
  13154. if (src0->grad) {
  13155. // dsrc0 = dtensor * 1
  13156. src0->grad = ggml_add_impl(ctx, src0->grad, tensor->grad, inplace);
  13157. }
  13158. if (src1->grad) {
  13159. // dsrc1 = dtensor * 0 -> noop
  13160. }
  13161. } break;
  13162. case GGML_OP_CONT:
  13163. {
  13164. // same as cpy
  13165. if (src0->grad) {
  13166. GGML_ASSERT(ggml_is_contiguous(src0->grad));
  13167. GGML_ASSERT(ggml_is_contiguous(tensor->grad));
  13168. src0->grad = ggml_add_impl(ctx, src0->grad, tensor->grad, inplace);
  13169. }
  13170. } break;
  13171. case GGML_OP_RESHAPE:
  13172. {
  13173. // necessary for llama
  13174. if (src0->grad) {
  13175. src0->grad =
  13176. ggml_add_impl(ctx, src0->grad,
  13177. ggml_reshape(ctx, tensor->grad, src0->grad),
  13178. inplace);
  13179. }
  13180. } break;
  13181. case GGML_OP_VIEW:
  13182. {
  13183. // necessary for llama
  13184. if (src0->grad) {
  13185. size_t offset;
  13186. GGML_ASSERT(sizeof(offset) <= ggml_nbytes(tensor->opt[0]));
  13187. memcpy(&offset, tensor->opt[0]->data, sizeof(offset));
  13188. size_t nb1 = tensor->nb[1];
  13189. size_t nb2 = tensor->nb[2];
  13190. size_t nb3 = tensor->nb[3];
  13191. if (src0->type != src0->grad->type) {
  13192. // gradient is typically F32, but src0 could be other type
  13193. size_t ng = ggml_element_size(src0->grad);
  13194. size_t n0 = ggml_element_size(src0);
  13195. GGML_ASSERT(offset % n0 == 0);
  13196. GGML_ASSERT(nb1 % n0 == 0);
  13197. GGML_ASSERT(nb2 % n0 == 0);
  13198. GGML_ASSERT(nb3 % n0 == 0);
  13199. offset = (offset / n0) * ng;
  13200. nb1 = (nb1 / n0) * ng;
  13201. nb2 = (nb2 / n0) * ng;
  13202. nb3 = (nb3 / n0) * ng;
  13203. }
  13204. src0->grad = ggml_acc_impl(ctx, src0->grad, tensor->grad, nb1, nb2, nb3, offset, inplace);
  13205. }
  13206. } break;
  13207. case GGML_OP_PERMUTE:
  13208. {
  13209. // necessary for llama
  13210. if (src0->grad) {
  13211. int32_t * axes = (int32_t *) tensor->opt[0]->data;
  13212. int axis0 = axes[0] & 0x3;
  13213. int axis1 = axes[1] & 0x3;
  13214. int axis2 = axes[2] & 0x3;
  13215. int axis3 = axes[3] & 0x3;
  13216. int axes_backward[4] = {0,0,0,0};
  13217. axes_backward[axis0] = 0;
  13218. axes_backward[axis1] = 1;
  13219. axes_backward[axis2] = 2;
  13220. axes_backward[axis3] = 3;
  13221. src0->grad =
  13222. ggml_add_impl(ctx, src0->grad,
  13223. ggml_permute(ctx,
  13224. tensor->grad,
  13225. axes_backward[0],
  13226. axes_backward[1],
  13227. axes_backward[2],
  13228. axes_backward[3]),
  13229. inplace);
  13230. }
  13231. } break;
  13232. case GGML_OP_TRANSPOSE:
  13233. {
  13234. // necessary for llama
  13235. if (src0->grad) {
  13236. src0->grad =
  13237. ggml_add_impl(ctx, src0->grad,
  13238. ggml_transpose(ctx, tensor->grad),
  13239. inplace);
  13240. }
  13241. } break;
  13242. case GGML_OP_GET_ROWS:
  13243. {
  13244. // necessary for llama (only for tokenizer)
  13245. if (src0->grad) {
  13246. src0->grad =
  13247. ggml_add_impl(ctx, src0->grad,
  13248. ggml_get_rows_back(ctx, tensor->grad, src1, src0->grad),
  13249. inplace);
  13250. }
  13251. if (src1->grad) {
  13252. // noop
  13253. }
  13254. } break;
  13255. case GGML_OP_GET_ROWS_BACK:
  13256. {
  13257. GGML_ASSERT(false); // TODO: not implemented
  13258. } break;
  13259. case GGML_OP_DIAG:
  13260. {
  13261. GGML_ASSERT(false); // TODO: not implemented
  13262. } break;
  13263. case GGML_OP_DIAG_MASK_INF:
  13264. {
  13265. // necessary for llama
  13266. if (src0->grad) {
  13267. assert(src1->type == GGML_TYPE_I32);
  13268. assert(ggml_nelements(src1) == 2);
  13269. const int n_past = ((int32_t *) src1->data)[0];
  13270. src0->grad =
  13271. ggml_add_impl(ctx, src0->grad,
  13272. ggml_diag_mask_zero_impl(ctx, tensor->grad, n_past, false),
  13273. inplace);
  13274. }
  13275. if (src1->grad) {
  13276. // noop
  13277. }
  13278. } break;
  13279. case GGML_OP_DIAG_MASK_ZERO:
  13280. {
  13281. // necessary for llama
  13282. if (src0->grad) {
  13283. assert(src1->type == GGML_TYPE_I32);
  13284. assert(ggml_nelements(src1) == 2);
  13285. const int n_past = ((int32_t *) src1->data)[0];
  13286. src0->grad =
  13287. ggml_add_impl(ctx, src0->grad,
  13288. ggml_diag_mask_zero_impl(ctx, tensor->grad, n_past, false),
  13289. inplace);
  13290. }
  13291. if (src1->grad) {
  13292. // noop
  13293. }
  13294. } break;
  13295. case GGML_OP_SOFT_MAX:
  13296. {
  13297. // necessary for llama
  13298. if (src0->grad) {
  13299. src0->grad =
  13300. ggml_add_impl(ctx, src0->grad,
  13301. ggml_soft_max_back(ctx, tensor->grad, tensor),
  13302. inplace);
  13303. }
  13304. } break;
  13305. case GGML_OP_SOFT_MAX_BACK:
  13306. {
  13307. GGML_ASSERT(false); // TODO: not implemented
  13308. } break;
  13309. case GGML_OP_ROPE:
  13310. {
  13311. // necessary for llama
  13312. if (src0->grad) {
  13313. assert(src1->type == GGML_TYPE_I32);
  13314. assert(ggml_nelements(src1) == 3);
  13315. const int n_past = ((int32_t *) src1->data)[0];
  13316. const int n_dims = ((int32_t *) src1->data)[1];
  13317. const int mode = ((int32_t *) src1->data)[2];
  13318. src0->grad = ggml_add_impl(ctx,
  13319. src0->grad,
  13320. ggml_rope_back(ctx,
  13321. tensor->grad,
  13322. n_past,
  13323. n_dims,
  13324. mode),
  13325. inplace);
  13326. }
  13327. if (src1->grad) {
  13328. // noop
  13329. }
  13330. } break;
  13331. case GGML_OP_ROPE_BACK:
  13332. {
  13333. if (src0->grad) {
  13334. assert(src1->type == GGML_TYPE_I32);
  13335. assert(ggml_nelements(src1) == 4);
  13336. const int n_past = ((int32_t *) src1->data)[0];
  13337. const int n_dims = ((int32_t *) src1->data)[1];
  13338. const int mode = ((int32_t *) src1->data)[2];
  13339. const int n_ctx = ((int32_t *) src1->data)[3];
  13340. src0->grad = ggml_add_impl(ctx,
  13341. src0->grad,
  13342. ggml_rope(ctx,
  13343. tensor->grad,
  13344. n_past,
  13345. n_dims,
  13346. mode,
  13347. n_ctx),
  13348. inplace);
  13349. }
  13350. if (src1->grad) {
  13351. // noop
  13352. }
  13353. } break;
  13354. case GGML_OP_CONV_1D_S1_PH:
  13355. {
  13356. GGML_ASSERT(false); // TODO: not implemented
  13357. } break;
  13358. case GGML_OP_CONV_1D_S2_PH:
  13359. {
  13360. GGML_ASSERT(false); // TODO: not implemented
  13361. } break;
  13362. case GGML_OP_CONV_2D_SK_P0:
  13363. {
  13364. GGML_ASSERT(false); // TODO: not implemented
  13365. } break;
  13366. case GGML_OP_FLASH_ATTN:
  13367. {
  13368. struct ggml_tensor * flash_grad = NULL;
  13369. if (src0->grad || src1->grad || tensor->opt[0]->grad) {
  13370. int32_t t = ggml_get_i32_1d(tensor->opt[1], 0);
  13371. GGML_ASSERT(t == 0 || t == 1);
  13372. bool masked = t != 0;
  13373. flash_grad =
  13374. ggml_flash_attn_back(ctx,
  13375. src0,
  13376. src1,
  13377. tensor->opt[0],
  13378. tensor->grad,
  13379. masked);
  13380. }
  13381. if (src0->grad) {
  13382. struct ggml_tensor * grad_q = NULL;
  13383. const size_t nb0 = flash_grad->nb[0];
  13384. const size_t offset = 0;
  13385. switch(src0->n_dims) {
  13386. case 2:
  13387. {
  13388. grad_q = ggml_view_2d(ctx,
  13389. flash_grad,
  13390. src0->ne[0],
  13391. src0->ne[1],
  13392. nb0*src0->ne[0],
  13393. offset);
  13394. } break;
  13395. case 3:
  13396. {
  13397. grad_q = ggml_view_3d(ctx,
  13398. flash_grad,
  13399. src0->ne[0],
  13400. src0->ne[1],
  13401. src0->ne[2],
  13402. nb0*src0->ne[0],
  13403. nb0*src0->ne[0]*src0->ne[1],
  13404. offset);
  13405. } break;
  13406. case 4:
  13407. {
  13408. grad_q = ggml_view_4d(ctx,
  13409. flash_grad,
  13410. src0->ne[0],
  13411. src0->ne[1],
  13412. src0->ne[2],
  13413. src0->ne[3],
  13414. nb0*src0->ne[0],
  13415. nb0*src0->ne[0]*src0->ne[1],
  13416. nb0*src0->ne[0]*src0->ne[1]*src0->ne[2],
  13417. offset);
  13418. } break;
  13419. }
  13420. src0->grad = ggml_add_impl(ctx,
  13421. src0->grad,
  13422. grad_q,
  13423. inplace);
  13424. }
  13425. if (src1->grad) {
  13426. struct ggml_tensor * grad_k = NULL;
  13427. const size_t nb0 = flash_grad->nb[0];
  13428. const size_t offset = nb0*src0->ne[0]*src0->ne[1]*src0->ne[2]*src0->ne[3];
  13429. switch(src1->n_dims) {
  13430. case 2:
  13431. {
  13432. grad_k = ggml_view_2d(ctx,
  13433. flash_grad,
  13434. src1->ne[0],
  13435. src1->ne[1],
  13436. nb0*src1->ne[0],
  13437. offset);
  13438. } break;
  13439. case 3:
  13440. {
  13441. grad_k = ggml_view_3d(ctx,
  13442. flash_grad,
  13443. src1->ne[0],
  13444. src1->ne[1],
  13445. src1->ne[2],
  13446. nb0*src1->ne[0],
  13447. nb0*src1->ne[0]*src1->ne[1],
  13448. offset);
  13449. } break;
  13450. case 4:
  13451. {
  13452. grad_k = ggml_view_4d(ctx,
  13453. flash_grad,
  13454. src1->ne[0],
  13455. src1->ne[1],
  13456. src1->ne[2],
  13457. src1->ne[3],
  13458. nb0*src1->ne[0],
  13459. nb0*src1->ne[0]*src1->ne[1],
  13460. nb0*src1->ne[0]*src1->ne[1]*src1->ne[2],
  13461. offset);
  13462. } break;
  13463. }
  13464. src1->grad = ggml_add_impl(ctx,
  13465. src1->grad,
  13466. grad_k,
  13467. inplace);
  13468. }
  13469. struct ggml_tensor * opt0 = tensor->opt[0];
  13470. if (opt0->grad) {
  13471. struct ggml_tensor * grad_v = NULL;
  13472. const size_t nb0 = flash_grad->nb[0];
  13473. const size_t offset = nb0*src0->ne[0]*src0->ne[1]*src0->ne[2]*src0->ne[3]
  13474. + nb0*src1->ne[0]*src1->ne[1]*src1->ne[2]*src1->ne[3];
  13475. switch(opt0->n_dims) {
  13476. case 2:
  13477. {
  13478. grad_v = ggml_view_2d(ctx,
  13479. flash_grad,
  13480. opt0->ne[0],
  13481. opt0->ne[1],
  13482. nb0*opt0->ne[0],
  13483. offset);
  13484. } break;
  13485. case 3:
  13486. {
  13487. grad_v = ggml_view_3d(ctx,
  13488. flash_grad,
  13489. opt0->ne[0],
  13490. opt0->ne[1],
  13491. opt0->ne[2],
  13492. nb0*opt0->ne[0],
  13493. nb0*opt0->ne[0]*opt0->ne[1],
  13494. offset);
  13495. } break;
  13496. case 4:
  13497. {
  13498. grad_v = ggml_view_4d(ctx,
  13499. flash_grad,
  13500. opt0->ne[0],
  13501. opt0->ne[1],
  13502. opt0->ne[2],
  13503. opt0->ne[3],
  13504. nb0*opt0->ne[0],
  13505. nb0*opt0->ne[0]*opt0->ne[1],
  13506. nb0*opt0->ne[0]*opt0->ne[1]*opt0->ne[2],
  13507. offset);
  13508. } break;
  13509. }
  13510. opt0->grad = ggml_add_impl(ctx,
  13511. opt0->grad,
  13512. grad_v,
  13513. inplace);
  13514. }
  13515. } break;
  13516. case GGML_OP_FLASH_FF:
  13517. {
  13518. GGML_ASSERT(false); // not supported
  13519. } break;
  13520. case GGML_OP_FLASH_ATTN_BACK:
  13521. {
  13522. GGML_ASSERT(false); // not supported
  13523. } break;
  13524. case GGML_OP_WIN_PART:
  13525. case GGML_OP_WIN_UNPART:
  13526. case GGML_OP_MAP_UNARY:
  13527. case GGML_OP_MAP_BINARY:
  13528. case GGML_OP_MAP_CUSTOM1:
  13529. case GGML_OP_MAP_CUSTOM2:
  13530. case GGML_OP_MAP_CUSTOM3:
  13531. {
  13532. GGML_ASSERT(false); // not supported
  13533. } break;
  13534. case GGML_OP_CROSS_ENTROPY_LOSS:
  13535. {
  13536. if (src0->grad) {
  13537. src0->grad = ggml_add_impl(ctx,
  13538. src0->grad,
  13539. ggml_cross_entropy_loss_back(ctx,
  13540. src0,
  13541. src1,
  13542. tensor->grad),
  13543. inplace);
  13544. }
  13545. } break;
  13546. case GGML_OP_CROSS_ENTROPY_LOSS_BACK:
  13547. {
  13548. GGML_ASSERT(false); // not supported
  13549. } break;
  13550. case GGML_OP_NONE:
  13551. {
  13552. // nop
  13553. } break;
  13554. case GGML_OP_COUNT:
  13555. {
  13556. GGML_ASSERT(false);
  13557. } break;
  13558. }
  13559. }
  13560. static void ggml_visit_parents(struct ggml_cgraph * cgraph, struct ggml_tensor * node) {
  13561. if (node->grad == NULL) {
  13562. // this usually happens when we generate intermediate nodes from constants in the backward pass
  13563. // it can also happen during forward pass, if the user performs computations with constants
  13564. if (node->op != GGML_OP_NONE) {
  13565. //GGML_PRINT_DEBUG("%s: warning: node %p has no grad, but op %d\n", __func__, (void *) node, node->op);
  13566. }
  13567. }
  13568. // check if already visited
  13569. for (int i = 0; i < cgraph->n_nodes; i++) {
  13570. if (cgraph->nodes[i] == node) {
  13571. return;
  13572. }
  13573. }
  13574. for (int i = 0; i < cgraph->n_leafs; i++) {
  13575. if (cgraph->leafs[i] == node) {
  13576. return;
  13577. }
  13578. }
  13579. if (node->src0) {
  13580. ggml_visit_parents(cgraph, node->src0);
  13581. }
  13582. if (node->src1) {
  13583. ggml_visit_parents(cgraph, node->src1);
  13584. }
  13585. for (int i = 0; i < GGML_MAX_OPT; ++i) {
  13586. if (node->opt[i]) {
  13587. ggml_visit_parents(cgraph, node->opt[i]);
  13588. }
  13589. }
  13590. if (node->op == GGML_OP_NONE && node->grad == NULL) {
  13591. // reached a leaf node, not part of the gradient graph (e.g. a constant)
  13592. GGML_ASSERT(cgraph->n_leafs < GGML_MAX_NODES);
  13593. if (strlen(node->name) == 0) {
  13594. ggml_format_name(node, "leaf_%d", cgraph->n_leafs);
  13595. }
  13596. cgraph->leafs[cgraph->n_leafs] = node;
  13597. cgraph->n_leafs++;
  13598. } else {
  13599. GGML_ASSERT(cgraph->n_nodes < GGML_MAX_NODES);
  13600. if (strlen(node->name) == 0) {
  13601. ggml_format_name(node, "node_%d", cgraph->n_nodes);
  13602. }
  13603. cgraph->nodes[cgraph->n_nodes] = node;
  13604. cgraph->grads[cgraph->n_nodes] = node->grad;
  13605. cgraph->n_nodes++;
  13606. }
  13607. }
  13608. static void ggml_build_forward_impl(struct ggml_cgraph * cgraph, struct ggml_tensor * tensor, bool expand) {
  13609. if (!expand) {
  13610. cgraph->n_nodes = 0;
  13611. cgraph->n_leafs = 0;
  13612. }
  13613. const int n0 = cgraph->n_nodes;
  13614. UNUSED(n0);
  13615. ggml_visit_parents(cgraph, tensor);
  13616. const int n_new = cgraph->n_nodes - n0;
  13617. GGML_PRINT_DEBUG("%s: visited %d new nodes\n", __func__, n_new);
  13618. if (n_new > 0) {
  13619. // the last added node should always be starting point
  13620. GGML_ASSERT(cgraph->nodes[cgraph->n_nodes - 1] == tensor);
  13621. }
  13622. }
  13623. void ggml_build_forward_expand(struct ggml_cgraph * cgraph, struct ggml_tensor * tensor) {
  13624. ggml_build_forward_impl(cgraph, tensor, true);
  13625. }
  13626. struct ggml_cgraph ggml_build_forward(struct ggml_tensor * tensor) {
  13627. struct ggml_cgraph result = {
  13628. /*.n_nodes =*/ 0,
  13629. /*.n_leafs =*/ 0,
  13630. /*.n_threads =*/ GGML_DEFAULT_N_THREADS,
  13631. /*.work_size =*/ 0,
  13632. /*.work =*/ NULL,
  13633. /*.nodes =*/ { NULL },
  13634. /*.grads =*/ { NULL },
  13635. /*.leafs =*/ { NULL },
  13636. /*.perf_runs =*/ 0,
  13637. /*.perf_cycles =*/ 0,
  13638. /*.perf_time_us =*/ 0,
  13639. };
  13640. ggml_build_forward_impl(&result, tensor, false);
  13641. return result;
  13642. }
  13643. struct ggml_cgraph ggml_build_backward(struct ggml_context * ctx, struct ggml_cgraph * gf, bool keep) {
  13644. struct ggml_cgraph result = *gf;
  13645. GGML_ASSERT(gf->n_nodes > 0);
  13646. // if we are keeping the gradient graph, we have to detach the gradient nodes from the original graph
  13647. if (keep) {
  13648. for (int i = 0; i < gf->n_nodes; i++) {
  13649. struct ggml_tensor * node = gf->nodes[i];
  13650. if (node->grad) {
  13651. node->grad = ggml_dup_tensor(ctx, node);
  13652. gf->grads[i] = node->grad;
  13653. }
  13654. }
  13655. }
  13656. for (int i = gf->n_nodes - 1; i >= 0; i--) {
  13657. struct ggml_tensor * node = gf->nodes[i];
  13658. // because we detached the grad nodes from the original graph, we can afford inplace operations
  13659. if (node->grad) {
  13660. ggml_compute_backward(ctx, node, keep);
  13661. }
  13662. }
  13663. for (int i = gf->n_nodes - 1; i >= 0; i--) {
  13664. struct ggml_tensor * node = gf->nodes[i];
  13665. if (node->is_param) {
  13666. GGML_PRINT_DEBUG("%s: found root node %p\n", __func__, (void *) node);
  13667. ggml_build_forward_impl(&result, node->grad, true);
  13668. }
  13669. }
  13670. return result;
  13671. }
  13672. //
  13673. // thread data
  13674. //
  13675. // synchronization is done via busy loops
  13676. // I tried using spin locks, but not sure how to use them correctly - the things I tried were slower than busy loops
  13677. //
  13678. #ifdef __APPLE__
  13679. //#include <os/lock.h>
  13680. //
  13681. //typedef os_unfair_lock ggml_lock_t;
  13682. //
  13683. //#define ggml_lock_init(x) UNUSED(x)
  13684. //#define ggml_lock_destroy(x) UNUSED(x)
  13685. //#define ggml_lock_lock os_unfair_lock_lock
  13686. //#define ggml_lock_unlock os_unfair_lock_unlock
  13687. //
  13688. //#define GGML_LOCK_INITIALIZER OS_UNFAIR_LOCK_INIT
  13689. typedef int ggml_lock_t;
  13690. #define ggml_lock_init(x) UNUSED(x)
  13691. #define ggml_lock_destroy(x) UNUSED(x)
  13692. #define ggml_lock_lock(x) UNUSED(x)
  13693. #define ggml_lock_unlock(x) UNUSED(x)
  13694. #define GGML_LOCK_INITIALIZER 0
  13695. typedef pthread_t ggml_thread_t;
  13696. #define ggml_thread_create pthread_create
  13697. #define ggml_thread_join pthread_join
  13698. #else
  13699. //typedef pthread_spinlock_t ggml_lock_t;
  13700. //#define ggml_lock_init(x) pthread_spin_init(x, PTHREAD_PROCESS_PRIVATE)
  13701. //#define ggml_lock_destroy pthread_spin_destroy
  13702. //#define ggml_lock_lock pthread_spin_lock
  13703. //#define ggml_lock_unlock pthread_spin_unlock
  13704. typedef int ggml_lock_t;
  13705. #define ggml_lock_init(x) UNUSED(x)
  13706. #define ggml_lock_destroy(x) UNUSED(x)
  13707. #if defined(__x86_64__) || (defined(_MSC_VER) && defined(_M_AMD64))
  13708. #define ggml_lock_lock(x) _mm_pause()
  13709. #else
  13710. #define ggml_lock_lock(x) UNUSED(x)
  13711. #endif
  13712. #define ggml_lock_unlock(x) UNUSED(x)
  13713. #define GGML_LOCK_INITIALIZER 0
  13714. typedef pthread_t ggml_thread_t;
  13715. #define ggml_thread_create pthread_create
  13716. #define ggml_thread_join pthread_join
  13717. #endif
  13718. #ifdef __linux__
  13719. void set_numa_thread_affinity(int thread_n, int n_threads) {
  13720. if (!ggml_is_numa()) {
  13721. return;
  13722. }
  13723. // run thread on node_num thread_n / (threads per node)
  13724. const int node_num = thread_n / ((n_threads + g_state.numa.n_nodes - 1) / g_state.numa.n_nodes);
  13725. struct ggml_numa_node * node = &g_state.numa.nodes[node_num];
  13726. size_t setsize = CPU_ALLOC_SIZE(g_state.numa.total_cpus);
  13727. cpu_set_t * cpus = CPU_ALLOC(g_state.numa.total_cpus);
  13728. CPU_ZERO_S(setsize, cpus);
  13729. for (size_t i = 0; i < node->n_cpus; ++i) {
  13730. CPU_SET_S(node->cpus[i], setsize, cpus);
  13731. }
  13732. int rv = pthread_setaffinity_np(pthread_self(), setsize, cpus);
  13733. if (rv) {
  13734. fprintf(stderr, "warning: pthread_setaffinity_np() failed: %s\n",
  13735. strerror(rv));
  13736. }
  13737. CPU_FREE(cpus);
  13738. }
  13739. void clear_numa_thread_affinity(void) {
  13740. if (!ggml_is_numa()) {
  13741. return;
  13742. }
  13743. size_t setsize = CPU_ALLOC_SIZE(g_state.numa.total_cpus);
  13744. cpu_set_t * cpus = CPU_ALLOC(g_state.numa.total_cpus);
  13745. CPU_ZERO_S(setsize, cpus);
  13746. for (unsigned i = 0; i < g_state.numa.total_cpus; ++i) {
  13747. CPU_SET_S(i, setsize, cpus);
  13748. }
  13749. int rv = pthread_setaffinity_np(pthread_self(), setsize, cpus);
  13750. if (rv) {
  13751. fprintf(stderr, "warning: pthread_setaffinity_np() failed: %s\n",
  13752. strerror(rv));
  13753. }
  13754. CPU_FREE(cpus);
  13755. }
  13756. #else
  13757. // TODO: Windows etc.
  13758. // (the linux implementation may also work on BSD, someone should test)
  13759. void set_numa_thread_affinity(int thread_n, int n_threads) { UNUSED(thread_n); UNUSED(n_threads); }
  13760. void clear_numa_thread_affinity(void) {}
  13761. #endif
  13762. struct ggml_compute_state_shared {
  13763. struct ggml_cgraph * cgraph;
  13764. int64_t perf_node_start_cycles;
  13765. int64_t perf_node_start_time_us;
  13766. int n_threads;
  13767. // synchronization primitives
  13768. atomic_int n_active; // num active threads
  13769. atomic_int node_n; // active graph node
  13770. };
  13771. struct ggml_compute_state {
  13772. ggml_thread_t thrd;
  13773. int ith;
  13774. struct ggml_compute_state_shared * shared;
  13775. };
  13776. static void ggml_graph_compute_perf_stats_node(struct ggml_tensor * node, const struct ggml_compute_state_shared * st) {
  13777. int64_t cycles_cur = ggml_perf_cycles() - st->perf_node_start_cycles;
  13778. int64_t time_us_cur = ggml_perf_time_us() - st->perf_node_start_time_us;
  13779. node->perf_runs++;
  13780. node->perf_cycles += cycles_cur;
  13781. node->perf_time_us += time_us_cur;
  13782. }
  13783. static thread_ret_t ggml_graph_compute_thread(void * data) {
  13784. struct ggml_compute_state * state = (struct ggml_compute_state *) data;
  13785. struct ggml_cgraph * cgraph = state->shared->cgraph;
  13786. const int n_threads = state->shared->n_threads;
  13787. set_numa_thread_affinity(state->ith, n_threads);
  13788. int node_n = -1;
  13789. while (true) {
  13790. if (atomic_fetch_sub(&state->shared->n_active, 1) == 1) {
  13791. // all other threads are finished and spinning
  13792. // do finalize and init here so we don't have synchronize again
  13793. struct ggml_compute_params params = {
  13794. /*.type =*/ GGML_TASK_FINALIZE,
  13795. /*.ith =*/ 0,
  13796. /*.nth =*/ 0,
  13797. /*.wsize =*/ cgraph->work ? ggml_nbytes(cgraph->work) : 0,
  13798. /*.wdata =*/ cgraph->work ? cgraph->work->data : NULL,
  13799. };
  13800. if (node_n != -1) {
  13801. /* FINALIZE */
  13802. struct ggml_tensor * node = state->shared->cgraph->nodes[node_n];
  13803. params.nth = node->n_tasks;
  13804. ggml_compute_forward(&params, node);
  13805. ggml_graph_compute_perf_stats_node(node, state->shared);
  13806. }
  13807. // distribute new work or execute it direct if 1T
  13808. while (++node_n < cgraph->n_nodes) {
  13809. GGML_PRINT_DEBUG_5("%s: %d/%d\n", __func__, node_n, cgraph->n_nodes);
  13810. struct ggml_tensor * node = cgraph->nodes[node_n];
  13811. state->shared->perf_node_start_cycles = ggml_perf_cycles();
  13812. state->shared->perf_node_start_time_us = ggml_perf_time_us();
  13813. /* INIT */
  13814. params.type = GGML_TASK_INIT;
  13815. params.nth = node->n_tasks;
  13816. ggml_compute_forward(&params, node);
  13817. if (node->n_tasks == 1) {
  13818. // TODO: maybe push node_n to the atomic but if other threads see n_tasks is 1,
  13819. // they do something more efficient than spinning (?)
  13820. params.type = GGML_TASK_COMPUTE;
  13821. ggml_compute_forward(&params, node);
  13822. params.type = GGML_TASK_FINALIZE;
  13823. ggml_compute_forward(&params, node);
  13824. ggml_graph_compute_perf_stats_node(node, state->shared);
  13825. } else {
  13826. break;
  13827. }
  13828. }
  13829. atomic_store(&state->shared->n_active, n_threads);
  13830. atomic_store(&state->shared->node_n, node_n);
  13831. } else {
  13832. // wait for other threads to finish
  13833. const int last = node_n;
  13834. do {
  13835. sched_yield();
  13836. node_n = atomic_load(&state->shared->node_n);
  13837. } while (node_n == last);
  13838. }
  13839. // check if we should stop
  13840. if (node_n >= cgraph->n_nodes) break;
  13841. /* COMPUTE */
  13842. struct ggml_tensor * node = cgraph->nodes[node_n];
  13843. struct ggml_compute_params params = {
  13844. /*.type =*/ GGML_TASK_COMPUTE,
  13845. /*.ith =*/ state->ith,
  13846. /*.nth =*/ node->n_tasks,
  13847. /*.wsize =*/ cgraph->work ? ggml_nbytes(cgraph->work) : 0,
  13848. /*.wdata =*/ cgraph->work ? cgraph->work->data : NULL,
  13849. };
  13850. if (state->ith < node->n_tasks) {
  13851. ggml_compute_forward(&params, node);
  13852. }
  13853. }
  13854. return 0;
  13855. }
  13856. void ggml_graph_compute(struct ggml_context * ctx, struct ggml_cgraph * cgraph) {
  13857. const int n_threads = cgraph->n_threads;
  13858. struct ggml_compute_state_shared state_shared = {
  13859. /*.cgraph =*/ cgraph,
  13860. /*.perf_node_start_cycles =*/ 0,
  13861. /*.perf_node_start_time_us =*/ 0,
  13862. /*.n_threads =*/ n_threads,
  13863. /*.n_active =*/ n_threads,
  13864. /*.node_n =*/ -1,
  13865. };
  13866. struct ggml_compute_state * workers = alloca(sizeof(struct ggml_compute_state)*n_threads);
  13867. // initialize tasks + work buffer
  13868. {
  13869. size_t work_size = 0;
  13870. // thread scheduling for the different operations
  13871. for (int i = 0; i < cgraph->n_nodes; i++) {
  13872. struct ggml_tensor * node = cgraph->nodes[i];
  13873. switch (node->op) {
  13874. case GGML_OP_CPY:
  13875. case GGML_OP_DUP:
  13876. {
  13877. node->n_tasks = n_threads;
  13878. size_t cur = 0;
  13879. if (ggml_is_quantized(node->type)) {
  13880. cur = GGML_TYPE_SIZE[GGML_TYPE_F32] * node->ne[0] * n_threads;
  13881. }
  13882. work_size = MAX(work_size, cur);
  13883. } break;
  13884. case GGML_OP_ADD:
  13885. case GGML_OP_ADD1:
  13886. {
  13887. node->n_tasks = n_threads;
  13888. size_t cur = 0;
  13889. if (ggml_is_quantized(node->src0->type)) {
  13890. cur = GGML_TYPE_SIZE[GGML_TYPE_F32] * node->src0->ne[0] * n_threads;
  13891. }
  13892. work_size = MAX(work_size, cur);
  13893. } break;
  13894. case GGML_OP_ACC:
  13895. {
  13896. node->n_tasks = n_threads;
  13897. size_t cur = 0;
  13898. if (ggml_is_quantized(node->src0->type)) {
  13899. cur = GGML_TYPE_SIZE[GGML_TYPE_F32] * node->src1->ne[0] * n_threads;
  13900. }
  13901. work_size = MAX(work_size, cur);
  13902. } break;
  13903. case GGML_OP_SUB:
  13904. case GGML_OP_DIV:
  13905. case GGML_OP_SQR:
  13906. case GGML_OP_SQRT:
  13907. case GGML_OP_LOG:
  13908. case GGML_OP_SUM:
  13909. case GGML_OP_SUM_ROWS:
  13910. case GGML_OP_MEAN:
  13911. case GGML_OP_REPEAT:
  13912. case GGML_OP_REPEAT_BACK:
  13913. case GGML_OP_ABS:
  13914. case GGML_OP_SGN:
  13915. case GGML_OP_NEG:
  13916. case GGML_OP_STEP:
  13917. case GGML_OP_RELU:
  13918. {
  13919. node->n_tasks = 1;
  13920. } break;
  13921. case GGML_OP_MUL:
  13922. case GGML_OP_GELU:
  13923. case GGML_OP_GELU_QUICK:
  13924. case GGML_OP_SILU:
  13925. case GGML_OP_SILU_BACK:
  13926. case GGML_OP_NORM:
  13927. case GGML_OP_RMS_NORM:
  13928. case GGML_OP_RMS_NORM_BACK:
  13929. {
  13930. node->n_tasks = n_threads;
  13931. } break;
  13932. case GGML_OP_MUL_MAT:
  13933. case GGML_OP_OUT_PROD:
  13934. {
  13935. node->n_tasks = n_threads;
  13936. // TODO: use different scheduling for different matrix sizes
  13937. //const int nr0 = ggml_nrows(node->src0);
  13938. //const int nr1 = ggml_nrows(node->src1);
  13939. //node->n_tasks = MIN(n_threads, MAX(1, nr0/128));
  13940. //printf("nr0 = %8d, nr1 = %8d, nr0*nr1 = %8d, n_tasks = %d\n", nr0, nr1, nr0*nr1, node->n_tasks);
  13941. size_t cur = 0;
  13942. #if defined(GGML_USE_CUBLAS)
  13943. if (ggml_cuda_can_mul_mat(node->src0, node->src1, node)) {
  13944. node->n_tasks = 1; // TODO: this actually is doing nothing
  13945. // the threads are still spinning
  13946. }
  13947. else
  13948. #elif defined(GGML_USE_CLBLAST)
  13949. if (ggml_cl_can_mul_mat(node->src0, node->src1, node)) {
  13950. node->n_tasks = 1; // TODO: this actually is doing nothing
  13951. // the threads are still spinning
  13952. cur = ggml_cl_mul_mat_get_wsize(node->src0, node->src1, node);
  13953. }
  13954. else
  13955. #endif
  13956. if (node->src0->type == GGML_TYPE_F16 && node->src1->type == GGML_TYPE_F32) {
  13957. #if defined(GGML_USE_ACCELERATE) || defined(GGML_USE_OPENBLAS)
  13958. if (ggml_compute_forward_mul_mat_use_blas(node->src0, node->src1, node)) {
  13959. node->n_tasks = 1; // TODO: this actually is doing nothing
  13960. // the threads are still spinning
  13961. // here we need memory just for single 2D matrix from src0
  13962. cur = GGML_TYPE_SIZE[GGML_TYPE_F32]*(node->src0->ne[0]*node->src0->ne[1]);
  13963. } else {
  13964. cur = GGML_TYPE_SIZE[GGML_TYPE_F16]*ggml_nelements(node->src1);
  13965. }
  13966. #else
  13967. cur = GGML_TYPE_SIZE[GGML_TYPE_F16]*ggml_nelements(node->src1);
  13968. #endif
  13969. } else if (node->src0->type == GGML_TYPE_F32 && node->src1->type == GGML_TYPE_F32) {
  13970. cur = 0;
  13971. #if defined(GGML_USE_ACCELERATE) || defined(GGML_USE_OPENBLAS)
  13972. if (ggml_compute_forward_mul_mat_use_blas(node->src0, node->src1, node)) {
  13973. node->n_tasks = 1;
  13974. }
  13975. #endif
  13976. } else if (ggml_is_quantized(node->src0->type) && node->src1->type == GGML_TYPE_F32) {
  13977. #if defined(GGML_USE_ACCELERATE) || defined(GGML_USE_OPENBLAS)
  13978. if (ggml_compute_forward_mul_mat_use_blas(node->src0, node->src1, node)) {
  13979. node->n_tasks = 1;
  13980. cur = GGML_TYPE_SIZE[GGML_TYPE_F32]*(node->src0->ne[0]*node->src0->ne[1]);
  13981. } else
  13982. #endif
  13983. {
  13984. const enum ggml_type type_q = quantize_fns[node->src0->type].vec_dot_type;
  13985. cur = GGML_TYPE_SIZE[type_q]*ggml_nelements(node->src1)/GGML_BLCK_SIZE[type_q];
  13986. }
  13987. } else {
  13988. GGML_ASSERT(false);
  13989. }
  13990. work_size = MAX(work_size, cur);
  13991. } break;
  13992. case GGML_OP_SCALE:
  13993. {
  13994. node->n_tasks = 1;
  13995. } break;
  13996. case GGML_OP_SET:
  13997. case GGML_OP_CONT:
  13998. case GGML_OP_RESHAPE:
  13999. case GGML_OP_VIEW:
  14000. case GGML_OP_PERMUTE:
  14001. case GGML_OP_TRANSPOSE:
  14002. case GGML_OP_GET_ROWS:
  14003. case GGML_OP_GET_ROWS_BACK:
  14004. case GGML_OP_DIAG:
  14005. case GGML_OP_DIAG_MASK_ZERO:
  14006. {
  14007. node->n_tasks = 1;
  14008. } break;
  14009. case GGML_OP_DIAG_MASK_INF:
  14010. case GGML_OP_SOFT_MAX:
  14011. case GGML_OP_SOFT_MAX_BACK:
  14012. case GGML_OP_ROPE:
  14013. case GGML_OP_ROPE_BACK:
  14014. {
  14015. node->n_tasks = n_threads;
  14016. } break;
  14017. case GGML_OP_ALIBI:
  14018. {
  14019. node->n_tasks = 1; //TODO
  14020. } break;
  14021. case GGML_OP_CLAMP:
  14022. {
  14023. node->n_tasks = 1; //TODO
  14024. } break;
  14025. case GGML_OP_CONV_1D_S1_PH:
  14026. case GGML_OP_CONV_1D_S2_PH:
  14027. {
  14028. node->n_tasks = n_threads;
  14029. GGML_ASSERT(node->src0->ne[3] == 1);
  14030. GGML_ASSERT(node->src1->ne[2] == 1);
  14031. GGML_ASSERT(node->src1->ne[3] == 1);
  14032. size_t cur = 0;
  14033. const int nk = node->src0->ne[0];
  14034. if (node->src0->type == GGML_TYPE_F16 &&
  14035. node->src1->type == GGML_TYPE_F32) {
  14036. cur = sizeof(ggml_fp16_t)*(
  14037. nk*ggml_up32(node->src0->ne[1])*node->src0->ne[2] +
  14038. ( 2*(nk/2) + node->src1->ne[0])*node->src1->ne[1]
  14039. );
  14040. } else if (node->src0->type == GGML_TYPE_F32 &&
  14041. node->src1->type == GGML_TYPE_F32) {
  14042. cur = sizeof(float)*(
  14043. nk*ggml_up32(node->src0->ne[1])*node->src0->ne[2] +
  14044. ( 2*(nk/2) + node->src1->ne[0])*node->src1->ne[1]
  14045. );
  14046. } else {
  14047. GGML_ASSERT(false);
  14048. }
  14049. work_size = MAX(work_size, cur);
  14050. } break;
  14051. case GGML_OP_CONV_2D_SK_P0:
  14052. {
  14053. node->n_tasks = n_threads;
  14054. GGML_ASSERT(node->src1->ne[3] == 1);
  14055. const int64_t ne00 = node->src0->ne[0]; // W
  14056. const int64_t ne01 = node->src0->ne[1]; // H
  14057. const int64_t ne02 = node->src0->ne[2]; // C
  14058. const int64_t ne03 = node->src0->ne[3]; // N
  14059. const int64_t ne10 = node->src1->ne[0]; // W
  14060. const int64_t ne11 = node->src1->ne[1]; // H
  14061. const int64_t ne12 = node->src1->ne[2]; // C
  14062. const int64_t nk = ne00*ne01;
  14063. UNUSED(ne02);
  14064. UNUSED(ne03);
  14065. UNUSED(nk);
  14066. size_t cur = 0;
  14067. if (node->src0->type == GGML_TYPE_F16 &&
  14068. node->src1->type == GGML_TYPE_F32) {
  14069. cur = sizeof(ggml_fp16_t)*(ne10*ne11*ne12);
  14070. } else if (node->src0->type == GGML_TYPE_F32 &&
  14071. node->src1->type == GGML_TYPE_F32) {
  14072. cur = sizeof(float)* (ne10*ne11*ne12);
  14073. } else {
  14074. GGML_ASSERT(false);
  14075. }
  14076. work_size = MAX(work_size, cur);
  14077. } break;
  14078. case GGML_OP_FLASH_ATTN:
  14079. {
  14080. node->n_tasks = n_threads;
  14081. size_t cur = 0;
  14082. const int64_t ne11 = ggml_up(node->src1->ne[1], GGML_SOFT_MAX_UNROLL);
  14083. if (node->src1->type == GGML_TYPE_F32) {
  14084. cur = sizeof(float)*ne11*node->n_tasks; // TODO: this can become (n_tasks-1)
  14085. cur += sizeof(float)*ne11*node->n_tasks; // this is overestimated by x2
  14086. }
  14087. if (node->src1->type == GGML_TYPE_F16) {
  14088. cur = sizeof(float)*ne11*node->n_tasks; // TODO: this can become (n_tasks-1)
  14089. cur += sizeof(float)*ne11*node->n_tasks; // this is overestimated by x2
  14090. }
  14091. work_size = MAX(work_size, cur);
  14092. } break;
  14093. case GGML_OP_FLASH_FF:
  14094. {
  14095. node->n_tasks = n_threads;
  14096. size_t cur = 0;
  14097. if (node->src1->type == GGML_TYPE_F32) {
  14098. cur = sizeof(float)*node->src1->ne[1]*node->n_tasks; // TODO: this can become (n_tasks-1)
  14099. cur += sizeof(float)*node->src1->ne[1]*node->n_tasks; // this is overestimated by x2
  14100. }
  14101. if (node->src1->type == GGML_TYPE_F16) {
  14102. cur = sizeof(float)*node->src1->ne[1]*node->n_tasks; // TODO: this can become (n_tasks-1)
  14103. cur += sizeof(float)*node->src1->ne[1]*node->n_tasks; // this is overestimated by x2
  14104. }
  14105. work_size = MAX(work_size, cur);
  14106. } break;
  14107. case GGML_OP_FLASH_ATTN_BACK:
  14108. {
  14109. node->n_tasks = n_threads;
  14110. size_t cur = 0;
  14111. const int64_t D = node->src0->ne[0];
  14112. const int64_t ne11 = ggml_up(node->src1->ne[1], GGML_SOFT_MAX_UNROLL);
  14113. const int64_t mxDn = MAX(D, ne11) * 2; // *2 because of S and SM in ggml_compute_forward_flash_attn_back
  14114. if (node->src1->type == GGML_TYPE_F32) {
  14115. cur = sizeof(float)*mxDn*node->n_tasks; // TODO: this can become (n_tasks-1)
  14116. cur += sizeof(float)*mxDn*node->n_tasks; // this is overestimated by x2
  14117. }
  14118. if (node->src1->type == GGML_TYPE_F16) {
  14119. cur = sizeof(float)*mxDn*node->n_tasks; // TODO: this can become (n_tasks-1)
  14120. cur += sizeof(float)*mxDn*node->n_tasks; // this is overestimated by x2
  14121. }
  14122. work_size = MAX(work_size, cur);
  14123. } break;
  14124. case GGML_OP_WIN_PART:
  14125. case GGML_OP_WIN_UNPART:
  14126. case GGML_OP_MAP_UNARY:
  14127. case GGML_OP_MAP_BINARY:
  14128. case GGML_OP_MAP_CUSTOM1:
  14129. case GGML_OP_MAP_CUSTOM2:
  14130. case GGML_OP_MAP_CUSTOM3:
  14131. {
  14132. node->n_tasks = 1;
  14133. } break;
  14134. case GGML_OP_CROSS_ENTROPY_LOSS:
  14135. {
  14136. node->n_tasks = n_threads;
  14137. size_t cur = ggml_type_size(node->type)*(node->n_tasks + node->src0->ne[0]*node->n_tasks);
  14138. work_size = MAX(work_size, cur);
  14139. } break;
  14140. case GGML_OP_CROSS_ENTROPY_LOSS_BACK:
  14141. {
  14142. node->n_tasks = n_threads;
  14143. size_t cur = ggml_type_size(node->type)*node->src0->ne[0]*node->n_tasks;
  14144. work_size = MAX(work_size, cur);
  14145. } break;
  14146. case GGML_OP_NONE:
  14147. {
  14148. node->n_tasks = 1;
  14149. } break;
  14150. case GGML_OP_COUNT:
  14151. {
  14152. GGML_ASSERT(false);
  14153. } break;
  14154. }
  14155. }
  14156. if (cgraph->work != NULL && work_size > cgraph->work_size) {
  14157. GGML_ASSERT(false); // TODO: better handling
  14158. }
  14159. if (work_size > 0 && cgraph->work == NULL) {
  14160. cgraph->work_size = work_size + CACHE_LINE_SIZE*(n_threads - 1);
  14161. GGML_PRINT_DEBUG("%s: allocating work buffer for graph (%zu bytes)\n", __func__, cgraph->work_size);
  14162. cgraph->work = ggml_new_tensor_1d(ctx, GGML_TYPE_I8, cgraph->work_size);
  14163. }
  14164. }
  14165. // create thread pool
  14166. if (n_threads > 1) {
  14167. for (int j = 1; j < n_threads; ++j) {
  14168. workers[j] = (struct ggml_compute_state) {
  14169. .thrd = 0,
  14170. .ith = j,
  14171. .shared = &state_shared,
  14172. };
  14173. const int rc = ggml_thread_create(&workers[j].thrd, NULL, ggml_graph_compute_thread, &workers[j]);
  14174. GGML_ASSERT(rc == 0);
  14175. }
  14176. }
  14177. workers[0].ith = 0;
  14178. workers[0].shared = &state_shared;
  14179. const int64_t perf_start_cycles = ggml_perf_cycles();
  14180. const int64_t perf_start_time_us = ggml_perf_time_us();
  14181. // this is a work thread too
  14182. ggml_graph_compute_thread(&workers[0]);
  14183. // don't leave affinity set on the main thread
  14184. clear_numa_thread_affinity();
  14185. // join thread pool
  14186. if (n_threads > 1) {
  14187. for (int j = 1; j < n_threads; j++) {
  14188. const int rc = ggml_thread_join(workers[j].thrd, NULL);
  14189. GGML_ASSERT(rc == 0);
  14190. }
  14191. }
  14192. // performance stats (graph)
  14193. {
  14194. int64_t perf_cycles_cur = ggml_perf_cycles() - perf_start_cycles;
  14195. int64_t perf_time_us_cur = ggml_perf_time_us() - perf_start_time_us;
  14196. cgraph->perf_runs++;
  14197. cgraph->perf_cycles += perf_cycles_cur;
  14198. cgraph->perf_time_us += perf_time_us_cur;
  14199. GGML_PRINT_DEBUG("%s: perf (%d) - cpu = %.3f / %.3f ms, wall = %.3f / %.3f ms\n",
  14200. __func__, cgraph->perf_runs,
  14201. (double) perf_cycles_cur / (double) ggml_cycles_per_ms(),
  14202. (double) cgraph->perf_cycles / (double) ggml_cycles_per_ms() / (double) cgraph->perf_runs,
  14203. (double) perf_time_us_cur / 1000.0,
  14204. (double) cgraph->perf_time_us / 1000.0 / cgraph->perf_runs);
  14205. }
  14206. }
  14207. void ggml_graph_reset(struct ggml_cgraph * cgraph) {
  14208. for (int i = 0; i < cgraph->n_nodes; i++) {
  14209. struct ggml_tensor * grad = cgraph->grads[i];
  14210. if (grad) {
  14211. ggml_set_zero(grad);
  14212. }
  14213. }
  14214. }
  14215. struct ggml_tensor * ggml_graph_get_tensor(struct ggml_cgraph * cgraph, const char * name) {
  14216. for (int i = 0; i < cgraph->n_leafs; i++) {
  14217. struct ggml_tensor * leaf = cgraph->leafs[i];
  14218. if (strcmp(leaf->name, name) == 0) {
  14219. return leaf;
  14220. }
  14221. }
  14222. for (int i = 0; i < cgraph->n_nodes; i++) {
  14223. struct ggml_tensor * node = cgraph->nodes[i];
  14224. if (strcmp(node->name, name) == 0) {
  14225. return node;
  14226. }
  14227. }
  14228. return NULL;
  14229. }
  14230. static void ggml_graph_export_leaf(const struct ggml_tensor * tensor, FILE * fout) {
  14231. const int64_t * ne = tensor->ne;
  14232. const size_t * nb = tensor->nb;
  14233. fprintf(fout, "%-6s %-12s %8d %" PRId64 " %" PRId64 " %" PRId64 " %" PRId64 " %16zu %16zu %16zu %16zu %16p %32s\n",
  14234. ggml_type_name(tensor->type),
  14235. ggml_op_name (tensor->op),
  14236. tensor->n_dims,
  14237. ne[0], ne[1], ne[2], ne[3],
  14238. nb[0], nb[1], nb[2], nb[3],
  14239. tensor->data,
  14240. tensor->name);
  14241. }
  14242. static void ggml_graph_export_node(const struct ggml_tensor * tensor, const char * arg, FILE * fout) {
  14243. const int64_t * ne = tensor->ne;
  14244. const size_t * nb = tensor->nb;
  14245. fprintf(fout, "%-6s %-6s %-12s %8d %" PRId64 " %" PRId64 " %" PRId64 " %" PRId64 " %16zu %16zu %16zu %16zu %8d %16p %32s\n",
  14246. arg,
  14247. ggml_type_name(tensor->type),
  14248. ggml_op_name (tensor->op),
  14249. tensor->n_dims,
  14250. ne[0], ne[1], ne[2], ne[3],
  14251. nb[0], nb[1], nb[2], nb[3],
  14252. tensor->n_tasks,
  14253. tensor->data,
  14254. tensor->name);
  14255. }
  14256. void ggml_graph_export(const struct ggml_cgraph * cgraph, const char * fname) {
  14257. //assert(cgraph->work == NULL);
  14258. //assert(cgraph->work_size == 0);
  14259. uint64_t size_eval = 0;
  14260. // compute size of intermediate results
  14261. // TODO: does not take into account scratch buffers !!!!
  14262. for (int i = 0; i < cgraph->n_nodes; ++i) {
  14263. size_eval += ggml_nbytes(cgraph->nodes[i]);
  14264. }
  14265. // print
  14266. {
  14267. FILE * fout = stdout;
  14268. fprintf(fout, "\n");
  14269. fprintf(fout, "%-16s %8x\n", "magic", GGML_FILE_MAGIC);
  14270. fprintf(fout, "%-16s %8d\n", "version", GGML_FILE_VERSION);
  14271. fprintf(fout, "%-16s %8d\n", "leafs", cgraph->n_leafs);
  14272. fprintf(fout, "%-16s %8d\n", "nodes", cgraph->n_nodes);
  14273. fprintf(fout, "%-16s %" PRIu64 "\n", "eval", size_eval);
  14274. // header
  14275. fprintf(fout, "\n");
  14276. fprintf(fout, "%-6s %-12s %8s %8s %8s %8s %8s %16s %16s %16s %16s %16s %16s\n",
  14277. "TYPE", "OP", "NDIMS", "NE0", "NE1", "NE2", "NE3", "NB0", "NB1", "NB2", "NB3", "DATA", "NAME");
  14278. for (int i = 0; i < cgraph->n_leafs; ++i) {
  14279. ggml_graph_export_leaf(cgraph->leafs[i], fout);
  14280. GGML_ASSERT(cgraph->leafs[i]->op == GGML_OP_NONE);
  14281. GGML_ASSERT(cgraph->leafs[i]->src0 == NULL);
  14282. GGML_ASSERT(cgraph->leafs[i]->src1 == NULL);
  14283. }
  14284. // header
  14285. fprintf(fout, "\n");
  14286. fprintf(fout, "%-6s %-6s %-12s %8s %8s %8s %8s %8s %16s %16s %16s %16s %8s %16s %16s\n",
  14287. "ARG", "TYPE", "OP", "NDIMS", "NE0", "NE1", "NE2", "NE3", "NB0", "NB1", "NB2", "NB3", "NTASKS", "DATA", "NAME");
  14288. for (int i = 0; i < cgraph->n_nodes; ++i) {
  14289. ggml_graph_export_node(cgraph->nodes[i], "DST", fout);
  14290. if (cgraph->nodes[i]->src0) {
  14291. ggml_graph_export_node(cgraph->nodes[i]->src0, "SRC0", fout);
  14292. }
  14293. if (cgraph->nodes[i]->src1) {
  14294. ggml_graph_export_node(cgraph->nodes[i]->src1, "SRC1", fout);
  14295. }
  14296. for (int j = 0; j < GGML_MAX_OPT; ++j) {
  14297. if (cgraph->nodes[i]->opt[j]) {
  14298. ggml_graph_export_node(cgraph->nodes[i]->opt[j], "OPT", fout);
  14299. }
  14300. }
  14301. fprintf(fout, "\n");
  14302. }
  14303. fprintf(fout, "\n");
  14304. }
  14305. // write binary data
  14306. {
  14307. FILE * fout = fopen(fname, "wb");
  14308. if (!fout) {
  14309. fprintf(stderr, "%s: failed to open %s\n", __func__, fname);
  14310. return;
  14311. }
  14312. // header
  14313. {
  14314. const uint32_t magic = GGML_FILE_MAGIC;
  14315. const uint32_t version = GGML_FILE_VERSION;
  14316. const uint32_t n_leafs = cgraph->n_leafs;
  14317. const uint32_t nodes = cgraph->n_nodes;
  14318. fwrite(&magic, sizeof(uint32_t), 1, fout);
  14319. fwrite(&version, sizeof(uint32_t), 1, fout);
  14320. fwrite(&n_leafs, sizeof(uint32_t), 1, fout);
  14321. fwrite(&nodes, sizeof(uint32_t), 1, fout);
  14322. fwrite(&size_eval, sizeof(uint64_t), 1, fout);
  14323. }
  14324. // leafs
  14325. {
  14326. for (int i = 0; i < cgraph->n_leafs; ++i) {
  14327. const struct ggml_tensor * tensor = cgraph->leafs[i];
  14328. const uint32_t type = tensor->type;
  14329. const uint32_t op = tensor->op;
  14330. const uint32_t n_dims = tensor->n_dims;
  14331. fwrite(&type, sizeof(uint32_t), 1, fout);
  14332. fwrite(&op, sizeof(uint32_t), 1, fout);
  14333. fwrite(&n_dims, sizeof(uint32_t), 1, fout);
  14334. for (int j = 0; j < GGML_MAX_DIMS; ++j) {
  14335. const uint64_t ne = tensor->ne[j];
  14336. const uint64_t nb = tensor->nb[j];
  14337. fwrite(&ne, sizeof(uint64_t), 1, fout);
  14338. fwrite(&nb, sizeof(uint64_t), 1, fout);
  14339. }
  14340. // store the pointer address
  14341. {
  14342. const uint64_t ptr = (uint64_t) tensor->data;
  14343. fwrite(&ptr, sizeof(uint64_t), 1, fout);
  14344. }
  14345. fwrite(tensor->name, sizeof(char), GGML_MAX_NAME, fout);
  14346. // dump the data
  14347. // TODO: pad this to 32 byte boundary
  14348. {
  14349. const size_t size = ggml_nbytes(tensor);
  14350. fwrite(tensor->data, sizeof(char), size, fout);
  14351. }
  14352. }
  14353. }
  14354. // nodes
  14355. {
  14356. for (int i = 0; i < cgraph->n_nodes; ++i) {
  14357. const struct ggml_tensor * tensor = cgraph->nodes[i];
  14358. const uint32_t type = tensor->type;
  14359. const uint32_t op = tensor->op;
  14360. const uint32_t n_dims = tensor->n_dims;
  14361. fwrite(&type, sizeof(uint32_t), 1, fout);
  14362. fwrite(&op, sizeof(uint32_t), 1, fout);
  14363. fwrite(&n_dims, sizeof(uint32_t), 1, fout);
  14364. for (int j = 0; j < GGML_MAX_DIMS; ++j) {
  14365. const uint64_t ne = tensor->ne[j];
  14366. const uint64_t nb = tensor->nb[j];
  14367. fwrite(&ne, sizeof(uint64_t), 1, fout);
  14368. fwrite(&nb, sizeof(uint64_t), 1, fout);
  14369. }
  14370. // store the pointer address
  14371. {
  14372. const uint64_t ptr = (uint64_t) tensor->data;
  14373. fwrite(&ptr, sizeof(uint64_t), 1, fout);
  14374. }
  14375. fwrite(tensor->name, sizeof(char), GGML_MAX_NAME, fout);
  14376. // output the op arguments
  14377. {
  14378. struct ggml_tensor * args[2 + GGML_MAX_OPT] = { NULL };
  14379. args[0] = tensor->src0;
  14380. args[1] = tensor->src1;
  14381. for (int j = 0; j < GGML_MAX_OPT; ++j) {
  14382. args[2 + j] = tensor->opt[j];
  14383. }
  14384. for (int j = 0; j < 2 + GGML_MAX_OPT; ++j) {
  14385. if (args[j]) {
  14386. int32_t idx = -1;
  14387. // check if leaf
  14388. {
  14389. for (int k = 0; k < cgraph->n_leafs; ++k) {
  14390. if (args[j] == cgraph->leafs[k]) {
  14391. idx = k;
  14392. break;
  14393. }
  14394. }
  14395. }
  14396. // check if node
  14397. if (idx == -1) {
  14398. for (int k = 0; k < cgraph->n_nodes; ++k) {
  14399. if (args[j] == cgraph->nodes[k]) {
  14400. idx = GGML_MAX_NODES + k;
  14401. break;
  14402. }
  14403. }
  14404. }
  14405. if (idx == -1) {
  14406. fprintf(stderr, "%s: failed to find tensor, arg = %d, node = %d\n", __func__, j, i);
  14407. return;
  14408. }
  14409. fwrite(&idx, sizeof(int32_t), 1, fout);
  14410. } else {
  14411. const int32_t nul = -1;
  14412. fwrite(&nul, sizeof(int32_t), 1, fout);
  14413. }
  14414. }
  14415. }
  14416. }
  14417. }
  14418. fclose(fout);
  14419. }
  14420. }
  14421. struct ggml_cgraph ggml_graph_import(const char * fname, struct ggml_context ** ctx_data, struct ggml_context ** ctx_eval) {
  14422. assert(*ctx_data == NULL);
  14423. assert(*ctx_eval == NULL);
  14424. struct ggml_cgraph result = { 0 };
  14425. struct ggml_tensor * data = NULL;
  14426. // read file into data
  14427. {
  14428. FILE * fin = fopen(fname, "rb");
  14429. if (!fin) {
  14430. fprintf(stderr, "%s: failed to open %s\n", __func__, fname);
  14431. return result;
  14432. }
  14433. size_t fsize = 0;
  14434. fseek(fin, 0, SEEK_END);
  14435. fsize = ftell(fin);
  14436. fseek(fin, 0, SEEK_SET);
  14437. // create the data context
  14438. {
  14439. const size_t overhead = 1*ggml_tensor_overhead();
  14440. struct ggml_init_params params = {
  14441. .mem_size = fsize + overhead,
  14442. .mem_buffer = NULL,
  14443. .no_alloc = false,
  14444. };
  14445. *ctx_data = ggml_init(params);
  14446. if (!*ctx_data) {
  14447. fprintf(stderr, "%s: failed to create ggml context\n", __func__);
  14448. fclose(fin);
  14449. return result;
  14450. }
  14451. }
  14452. data = ggml_new_tensor_1d(*ctx_data, GGML_TYPE_I8, fsize);
  14453. {
  14454. const size_t ret = fread(data->data, sizeof(char), fsize, fin);
  14455. if (ret != fsize) {
  14456. fprintf(stderr, "%s: failed to read %s\n", __func__, fname);
  14457. fclose(fin);
  14458. return result;
  14459. }
  14460. }
  14461. fclose(fin);
  14462. }
  14463. // populate result
  14464. {
  14465. char * ptr = (char *) data->data;
  14466. const uint32_t magic = *(const uint32_t *) ptr; ptr += sizeof(magic);
  14467. if (magic != GGML_FILE_MAGIC) {
  14468. fprintf(stderr, "%s: invalid magic number, got %08x\n", __func__, magic);
  14469. return result;
  14470. }
  14471. const uint32_t version = *(const uint32_t *) ptr; ptr += sizeof(version);
  14472. if (version != GGML_FILE_VERSION) {
  14473. fprintf(stderr, "%s: invalid version number\n", __func__);
  14474. return result;
  14475. }
  14476. const uint32_t n_leafs = *(const uint32_t *) ptr; ptr += sizeof(n_leafs);
  14477. const uint32_t n_nodes = *(const uint32_t *) ptr; ptr += sizeof(n_nodes);
  14478. const uint64_t size_eval = *(const uint64_t *) ptr; ptr += sizeof(size_eval);
  14479. result.n_leafs = n_leafs;
  14480. result.n_nodes = n_nodes;
  14481. // create the data context
  14482. {
  14483. const size_t overhead = (n_leafs + n_nodes)*ggml_tensor_overhead();
  14484. struct ggml_init_params params = {
  14485. .mem_size = size_eval + overhead,
  14486. .mem_buffer = NULL,
  14487. .no_alloc = true,
  14488. };
  14489. *ctx_eval = ggml_init(params);
  14490. if (!*ctx_eval) {
  14491. fprintf(stderr, "%s: failed to create ggml context\n", __func__);
  14492. return result;
  14493. }
  14494. }
  14495. // leafs
  14496. {
  14497. uint32_t type;
  14498. uint32_t op;
  14499. uint32_t n_dims;
  14500. for (uint32_t i = 0; i < n_leafs; ++i) {
  14501. type = *(const uint32_t *) ptr; ptr += sizeof(type);
  14502. op = *(const uint32_t *) ptr; ptr += sizeof(op);
  14503. n_dims = *(const uint32_t *) ptr; ptr += sizeof(n_dims);
  14504. int64_t ne[GGML_MAX_DIMS];
  14505. size_t nb[GGML_MAX_DIMS];
  14506. for (int j = 0; j < GGML_MAX_DIMS; ++j) {
  14507. uint64_t ne_cur;
  14508. uint64_t nb_cur;
  14509. ne_cur = *(const uint64_t *) ptr; ptr += sizeof(ne_cur);
  14510. nb_cur = *(const uint64_t *) ptr; ptr += sizeof(nb_cur);
  14511. ne[j] = ne_cur;
  14512. nb[j] = nb_cur;
  14513. }
  14514. struct ggml_tensor * tensor = ggml_new_tensor(*ctx_eval, (enum ggml_type) type, n_dims, ne);
  14515. tensor->op = (enum ggml_op) op;
  14516. uint64_t ptr_cur = *(const uint64_t *) ptr; ptr += sizeof(ptr_cur);
  14517. memcpy(tensor->name, ptr, GGML_MAX_NAME); ptr += GGML_MAX_NAME;
  14518. tensor->data = (void *) ptr;
  14519. for (int j = 0; j < GGML_MAX_DIMS; ++j) {
  14520. tensor->nb[j] = nb[j];
  14521. }
  14522. result.leafs[i] = tensor;
  14523. ptr += ggml_nbytes(tensor);
  14524. fprintf(stderr, "%s: loaded leaf %d: '%16s', %3d dims, %9zu bytes\n", __func__, i, tensor->name, n_dims, ggml_nbytes(tensor));
  14525. }
  14526. }
  14527. ggml_set_no_alloc(*ctx_eval, false);
  14528. // nodes
  14529. {
  14530. uint32_t type;
  14531. uint32_t op;
  14532. uint32_t n_dims;
  14533. for (uint32_t i = 0; i < n_nodes; ++i) {
  14534. type = *(const uint32_t *) ptr; ptr += sizeof(type);
  14535. op = *(const uint32_t *) ptr; ptr += sizeof(op);
  14536. n_dims = *(const uint32_t *) ptr; ptr += sizeof(n_dims);
  14537. enum ggml_op eop = (enum ggml_op) op;
  14538. int64_t ne[GGML_MAX_DIMS];
  14539. size_t nb[GGML_MAX_DIMS];
  14540. for (int j = 0; j < GGML_MAX_DIMS; ++j) {
  14541. uint64_t ne_cur;
  14542. uint64_t nb_cur;
  14543. ne_cur = *(const uint64_t *) ptr; ptr += sizeof(ne_cur);
  14544. nb_cur = *(const uint64_t *) ptr; ptr += sizeof(nb_cur);
  14545. ne[j] = ne_cur;
  14546. nb[j] = nb_cur;
  14547. }
  14548. uint64_t ptr_cur = *(const uint64_t *) ptr; ptr += sizeof(ptr_cur); // TODO: not yet used
  14549. const char * ptr_name = ptr; ptr += GGML_MAX_NAME;
  14550. const int32_t * ptr_arg_idx = (const int32_t *) ptr; ptr += (2 + GGML_MAX_OPT)*sizeof(int32_t);
  14551. struct ggml_tensor * args[2 + GGML_MAX_OPT] = { NULL };
  14552. // parse args
  14553. for (int j = 0; j < 2 + GGML_MAX_OPT; ++j) {
  14554. const int32_t arg_idx = ptr_arg_idx[j];
  14555. if (arg_idx == -1) {
  14556. continue;
  14557. }
  14558. if (arg_idx < GGML_MAX_NODES) {
  14559. args[j] = result.leafs[arg_idx];
  14560. } else {
  14561. args[j] = result.nodes[arg_idx - GGML_MAX_NODES];
  14562. }
  14563. }
  14564. // create the tensor
  14565. // "view" operations are handled differently
  14566. // TODO: handle inplace ops - currently a copy is always made
  14567. struct ggml_tensor * tensor = NULL;
  14568. switch (eop) {
  14569. // TODO: implement other view ops
  14570. case GGML_OP_RESHAPE:
  14571. {
  14572. tensor = ggml_reshape_4d(*ctx_eval, args[0], ne[0], ne[1], ne[2], ne[3]);
  14573. } break;
  14574. case GGML_OP_VIEW:
  14575. {
  14576. tensor = ggml_view_4d(*ctx_eval, args[0], ne[0], ne[1], ne[2], ne[3], 0, 0, 0, 0);
  14577. uint64_t offs;
  14578. memcpy(&offs, args[2]->data, sizeof(offs));
  14579. tensor->data = ((char *) tensor->data) + offs;
  14580. } break;
  14581. case GGML_OP_TRANSPOSE:
  14582. {
  14583. tensor = ggml_transpose(*ctx_eval, args[0]);
  14584. } break;
  14585. case GGML_OP_PERMUTE:
  14586. {
  14587. tensor = ggml_view_4d(*ctx_eval, args[0], ne[0], ne[1], ne[2], ne[3], 0, 0, 0, 0);
  14588. } break;
  14589. default:
  14590. {
  14591. tensor = ggml_new_tensor(*ctx_eval, (enum ggml_type) type, n_dims, ne);
  14592. tensor->op = eop;
  14593. } break;
  14594. }
  14595. memcpy(tensor->name, ptr_name, GGML_MAX_NAME);
  14596. for (int j = 0; j < GGML_MAX_DIMS; ++j) {
  14597. tensor->nb[j] = nb[j];
  14598. }
  14599. tensor->src0 = args[0];
  14600. tensor->src1 = args[1];
  14601. for (int j = 0; j < GGML_MAX_OPT; ++j) {
  14602. tensor->opt[j] = args[2 + j];
  14603. }
  14604. result.nodes[i] = tensor;
  14605. fprintf(stderr, "%s: loaded node %d: '%16s', %3d dims, %9zu bytes\n", __func__, i, tensor->name, n_dims, ggml_nbytes(tensor));
  14606. }
  14607. }
  14608. }
  14609. return result;
  14610. }
  14611. void ggml_graph_print(const struct ggml_cgraph * cgraph) {
  14612. int64_t perf_total_per_op_us[GGML_OP_COUNT] = {0};
  14613. GGML_PRINT("=== GRAPH ===\n");
  14614. GGML_PRINT_DEBUG("n_threads = %d\n", cgraph->n_threads);
  14615. GGML_PRINT_DEBUG("total work size = %zu bytes\n", cgraph->work_size);
  14616. GGML_PRINT("n_nodes = %d\n", cgraph->n_nodes);
  14617. for (int i = 0; i < cgraph->n_nodes; i++) {
  14618. struct ggml_tensor * node = cgraph->nodes[i];
  14619. perf_total_per_op_us[node->op] += MAX(1, node->perf_time_us);
  14620. GGML_PRINT(" - %3d: [ %5" PRId64 ", %5" PRId64 ", %5" PRId64 "] %16s %s (%3d) cpu = %7.3f / %7.3f ms, wall = %7.3f / %7.3f ms\n",
  14621. i,
  14622. node->ne[0], node->ne[1], node->ne[2],
  14623. GGML_OP_NAME[node->op], node->is_param ? "x" : node->grad ? "g" : " ", node->perf_runs,
  14624. (double) node->perf_cycles / (double) ggml_cycles_per_ms(),
  14625. (double) node->perf_cycles / (double) ggml_cycles_per_ms() / (double) node->perf_runs,
  14626. (double) node->perf_time_us / 1000.0,
  14627. (double) node->perf_time_us / 1000.0 / node->perf_runs);
  14628. }
  14629. GGML_PRINT("n_leafs = %d\n", cgraph->n_leafs);
  14630. for (int i = 0; i < cgraph->n_leafs; i++) {
  14631. struct ggml_tensor * node = cgraph->leafs[i];
  14632. GGML_PRINT(" - %3d: [ %5" PRId64 ", %5" PRId64 "] %8s\n",
  14633. i,
  14634. node->ne[0], node->ne[1],
  14635. GGML_OP_NAME[node->op]);
  14636. }
  14637. for (int i = 0; i < GGML_OP_COUNT; i++) {
  14638. if (perf_total_per_op_us[i] == 0) {
  14639. continue;
  14640. }
  14641. GGML_PRINT("perf_total_per_op_us[%16s] = %7.3f ms\n", GGML_OP_NAME[i], (double) perf_total_per_op_us[i] / 1000.0);
  14642. }
  14643. GGML_PRINT("========================================\n");
  14644. }
  14645. // check if node is part of the graph
  14646. static bool ggml_graph_find(const struct ggml_cgraph * cgraph, const struct ggml_tensor * node) {
  14647. if (cgraph == NULL) {
  14648. return true;
  14649. }
  14650. for (int i = 0; i < cgraph->n_nodes; i++) {
  14651. if (cgraph->nodes[i] == node) {
  14652. return true;
  14653. }
  14654. }
  14655. return false;
  14656. }
  14657. static struct ggml_tensor * ggml_graph_get_parent(const struct ggml_cgraph * cgraph, const struct ggml_tensor * node) {
  14658. for (int i = 0; i < cgraph->n_nodes; i++) {
  14659. struct ggml_tensor * parent = cgraph->nodes[i];
  14660. if (parent->grad == node) {
  14661. return parent;
  14662. }
  14663. }
  14664. return NULL;
  14665. }
  14666. static void ggml_graph_dump_dot_node_edge(FILE * fp, const struct ggml_cgraph * gb, struct ggml_tensor * node, struct ggml_tensor * parent, const char * label) {
  14667. struct ggml_tensor * gparent = ggml_graph_get_parent(gb, node);
  14668. struct ggml_tensor * gparent0 = ggml_graph_get_parent(gb, parent);
  14669. fprintf(fp, " \"%p\":%s -> \"%p\":%s [ arrowhead = %s; style = %s; label = \"%s\"; ]\n",
  14670. gparent0 ? (void *) gparent0 : (void *) parent,
  14671. gparent0 ? "g" : "x",
  14672. gparent ? (void *) gparent : (void *) node,
  14673. gparent ? "g" : "x",
  14674. gparent ? "empty" : "vee",
  14675. gparent ? "dashed" : "solid",
  14676. label);
  14677. }
  14678. static void ggml_graph_dump_dot_leaf_edge(FILE * fp, struct ggml_tensor * node, struct ggml_tensor * parent, const char * label) {
  14679. fprintf(fp, " \"%p\":%s -> \"%p\":%s [ label = \"%s\"; ]\n",
  14680. (void *) parent, "x",
  14681. (void *) node, "x",
  14682. label);
  14683. }
  14684. void ggml_graph_dump_dot(const struct ggml_cgraph * gb, const struct ggml_cgraph * gf, const char * filename) {
  14685. char color[16];
  14686. FILE * fp = fopen(filename, "w");
  14687. GGML_ASSERT(fp);
  14688. fprintf(fp, "digraph G {\n");
  14689. fprintf(fp, " newrank = true;\n");
  14690. fprintf(fp, " rankdir = LR;\n");
  14691. for (int i = 0; i < gb->n_nodes; i++) {
  14692. struct ggml_tensor * node = gb->nodes[i];
  14693. if (ggml_graph_get_parent(gb, node) != NULL) {
  14694. continue;
  14695. }
  14696. if (node->is_param) {
  14697. snprintf(color, sizeof(color), "yellow");
  14698. } else if (node->grad) {
  14699. if (ggml_graph_find(gf, node)) {
  14700. snprintf(color, sizeof(color), "green");
  14701. } else {
  14702. snprintf(color, sizeof(color), "lightblue");
  14703. }
  14704. } else {
  14705. snprintf(color, sizeof(color), "white");
  14706. }
  14707. fprintf(fp, " \"%p\" [ "
  14708. "style = filled; fillcolor = %s; shape = record; "
  14709. "label=\"",
  14710. (void *) node, color);
  14711. if (strlen(node->name) > 0) {
  14712. fprintf(fp, "%s (%s)|", node->name, ggml_type_name(node->type));
  14713. } else {
  14714. fprintf(fp, "(%s)|", ggml_type_name(node->type));
  14715. }
  14716. if (node->n_dims == 2) {
  14717. fprintf(fp, "%d [%" PRId64 ", %" PRId64 "] | <x>%s", i, node->ne[0], node->ne[1], GGML_OP_SYMBOL[node->op]);
  14718. } else {
  14719. fprintf(fp, "%d [%" PRId64 ", %" PRId64 ", %" PRId64 "] | <x>%s", i, node->ne[0], node->ne[1], node->ne[2], GGML_OP_SYMBOL[node->op]);
  14720. }
  14721. if (node->grad) {
  14722. fprintf(fp, " | <g>%s\"; ]\n", GGML_OP_SYMBOL[node->grad->op]);
  14723. } else {
  14724. fprintf(fp, "\"; ]\n");
  14725. }
  14726. }
  14727. for (int i = 0; i < gb->n_leafs; i++) {
  14728. struct ggml_tensor * node = gb->leafs[i];
  14729. snprintf(color, sizeof(color), "pink");
  14730. fprintf(fp, " \"%p\" [ "
  14731. "style = filled; fillcolor = %s; shape = record; "
  14732. "label=\"<x>",
  14733. (void *) node, color);
  14734. if (strlen(node->name) > 0) {
  14735. fprintf(fp, "%s (%s)|", node->name, ggml_type_name(node->type));
  14736. } else {
  14737. fprintf(fp, "(%s)|", ggml_type_name(node->type));
  14738. }
  14739. fprintf(fp, "CONST %d [%" PRId64 ", %" PRId64 "]", i, node->ne[0], node->ne[1]);
  14740. if (ggml_nelements(node) < 5) {
  14741. fprintf(fp, " | (");
  14742. for (int j = 0; j < ggml_nelements(node); j++) {
  14743. if (node->type == GGML_TYPE_I8 || node->type == GGML_TYPE_I16 || node->type == GGML_TYPE_I32) {
  14744. fprintf(fp, "%d", ggml_get_i32_1d(node, j));
  14745. }
  14746. else if (node->type == GGML_TYPE_F32 || node->type == GGML_TYPE_F16) {
  14747. fprintf(fp, "%.1e", (double)ggml_get_f32_1d(node, j));
  14748. }
  14749. else {
  14750. fprintf(fp, "#");
  14751. }
  14752. if (j < ggml_nelements(node) - 1) {
  14753. fprintf(fp, ", ");
  14754. }
  14755. }
  14756. fprintf(fp, ")");
  14757. }
  14758. fprintf(fp, "\"; ]\n");
  14759. }
  14760. for (int i = 0; i < gb->n_nodes; i++) {
  14761. struct ggml_tensor * node = gb->nodes[i];
  14762. if (node->src0) {
  14763. ggml_graph_dump_dot_node_edge(fp, gb, node, node->src0, "x");
  14764. }
  14765. if (node->src1) {
  14766. ggml_graph_dump_dot_node_edge(fp, gb, node, node->src1, "y");
  14767. }
  14768. for (int j = 0; j < GGML_MAX_OPT; j++) {
  14769. if (node->opt[j]) {
  14770. char label[16];
  14771. snprintf(label, sizeof(label), "opt %d", j);
  14772. ggml_graph_dump_dot_node_edge(fp, gb, node, node->opt[j], label);
  14773. }
  14774. }
  14775. }
  14776. for (int i = 0; i < gb->n_leafs; i++) {
  14777. struct ggml_tensor * node = gb->leafs[i];
  14778. if (node->src0) {
  14779. ggml_graph_dump_dot_leaf_edge(fp, node, node->src0, "x");
  14780. }
  14781. if (node->src1) {
  14782. ggml_graph_dump_dot_leaf_edge(fp, node, node->src1, "y");
  14783. }
  14784. for (int j = 0; j < GGML_MAX_OPT; j++) {
  14785. if (node->opt[j]) {
  14786. char label[16];
  14787. snprintf(label, sizeof(label), "opt %d", j);
  14788. ggml_graph_dump_dot_leaf_edge(fp, node, node->opt[j], label);
  14789. }
  14790. }
  14791. }
  14792. fprintf(fp, "}\n");
  14793. fclose(fp);
  14794. GGML_PRINT("%s: dot -Tpng %s -o %s.png && open %s.png\n", __func__, filename, filename, filename);
  14795. }
  14796. ////////////////////////////////////////////////////////////////////////////////
  14797. static void ggml_opt_set_params(int np, struct ggml_tensor * const ps[], const float * x) {
  14798. int i = 0;
  14799. for (int p = 0; p < np; ++p) {
  14800. const int64_t ne = ggml_nelements(ps[p]) ;
  14801. // TODO: add function to set tensor from array
  14802. for (int64_t j = 0; j < ne; ++j) {
  14803. ggml_set_f32_1d(ps[p], j, x[i++]);
  14804. }
  14805. }
  14806. }
  14807. static void ggml_opt_get_params(int np, struct ggml_tensor * const ps[], float * x) {
  14808. int i = 0;
  14809. for (int p = 0; p < np; ++p) {
  14810. const int64_t ne = ggml_nelements(ps[p]) ;
  14811. // TODO: add function to get all elements at once
  14812. for (int64_t j = 0; j < ne; ++j) {
  14813. x[i++] = ggml_get_f32_1d(ps[p], j);
  14814. }
  14815. }
  14816. }
  14817. static void ggml_opt_get_grad(int np, struct ggml_tensor * const ps[], float * g) {
  14818. int i = 0;
  14819. for (int p = 0; p < np; ++p) {
  14820. const int64_t ne = ggml_nelements(ps[p]) ;
  14821. // TODO: add function to get all elements at once
  14822. for (int64_t j = 0; j < ne; ++j) {
  14823. g[i++] = ggml_get_f32_1d(ps[p]->grad, j);
  14824. }
  14825. }
  14826. }
  14827. //
  14828. // ADAM
  14829. //
  14830. // ref: https://arxiv.org/pdf/1412.6980.pdf
  14831. //
  14832. static enum ggml_opt_result ggml_opt_adam(
  14833. struct ggml_context * ctx,
  14834. struct ggml_opt_context * opt,
  14835. struct ggml_opt_params params,
  14836. struct ggml_tensor * f,
  14837. struct ggml_cgraph * gf,
  14838. struct ggml_cgraph * gb) {
  14839. GGML_ASSERT(ggml_is_scalar(f));
  14840. gf->n_threads = params.n_threads;
  14841. gb->n_threads = params.n_threads;
  14842. // these will store the parameters we want to optimize
  14843. struct ggml_tensor * ps[GGML_MAX_PARAMS];
  14844. int np = 0;
  14845. int nx = 0;
  14846. for (int i = 0; i < gf->n_nodes; ++i) {
  14847. if (gf->nodes[i]->is_param) {
  14848. GGML_PRINT_DEBUG("found param %d: grad->op = %d\n", np, gf->nodes[i]->grad->op);
  14849. GGML_ASSERT(np < GGML_MAX_PARAMS);
  14850. ps[np++] = gf->nodes[i];
  14851. nx += ggml_nelements(gf->nodes[i]);
  14852. }
  14853. }
  14854. if ((opt->params.type != params.type) || (opt->nx != nx) || (opt->params.past != params.past)) {
  14855. int iter = opt->iter;
  14856. ggml_opt_init(opt->ctx, opt, params, nx);
  14857. opt->iter = iter;
  14858. }
  14859. // constants
  14860. const float sched = params.adam.sched;
  14861. const float decay = params.adam.decay * sched;
  14862. const float alpha = params.adam.alpha * sched;
  14863. const float beta1 = params.adam.beta1;
  14864. const float beta2 = params.adam.beta2;
  14865. const float eps = params.adam.eps;
  14866. float * x = opt->adam.x->data; // view of the parameters
  14867. float * g1 = opt->adam.g1->data; // gradient
  14868. float * g2 = opt->adam.g2->data; // gradient squared
  14869. float * m = opt->adam.m->data; // first moment
  14870. float * v = opt->adam.v->data; // second moment
  14871. float * mh = opt->adam.mh->data; // first moment hat
  14872. float * vh = opt->adam.vh->data; // second moment hat
  14873. float * pf = params.past > 0 ? opt->adam.pf->data : NULL; // past function values
  14874. // update view
  14875. ggml_opt_get_params(np, ps, x);
  14876. // compute the function value
  14877. ggml_graph_reset (gf);
  14878. ggml_set_f32 (f->grad, 1.0f);
  14879. ggml_graph_compute(ctx, gb);
  14880. opt->adam.fx_prev = ggml_get_f32_1d(f, 0);
  14881. opt->adam.fx_best = opt->adam.fx_prev;
  14882. if (pf) {
  14883. pf[opt->iter % params.past] = opt->adam.fx_prev;
  14884. }
  14885. // initialize
  14886. if (opt->just_initialized) {
  14887. opt->adam.n_no_improvement = 0;
  14888. opt->just_initialized = false;
  14889. }
  14890. float * fx_best = &opt->adam.fx_best;
  14891. float * fx_prev = &opt->adam.fx_prev;
  14892. int * n_no_improvement = &opt->adam.n_no_improvement;
  14893. int iter0 = opt->iter;
  14894. // run the optimizer
  14895. for (int t = 0; t < params.adam.n_iter; ++t) {
  14896. opt->iter = iter0 + t + 1;
  14897. GGML_PRINT_DEBUG ("=== iter %d ===\n", t);
  14898. GGML_PRINT_DEBUG ("f = %10.6f\n", ggml_get_f32_1d(f, 0));
  14899. GGML_PRINT_DEBUG_5("df/dx0 = %10.6f\n", ggml_get_f32_1d(ps[0]->grad, 0));
  14900. GGML_PRINT_DEBUG_5("df/dx1 = %10.6f\n", ggml_get_f32_1d(ps[1]->grad, 0));
  14901. for (int i = 0; i < np; ++i) {
  14902. GGML_PRINT_DEBUG("param %d: %10.6f, g = %10.6f\n", i,
  14903. ggml_get_f32_1d(ps[i], 0), ggml_get_f32_1d(ps[i]->grad, 0));
  14904. }
  14905. const int64_t t_start_wall = ggml_time_us();
  14906. const int64_t t_start_cpu = ggml_cycles();
  14907. UNUSED(t_start_wall);
  14908. UNUSED(t_start_cpu);
  14909. {
  14910. // update the gradient
  14911. ggml_opt_get_grad(np, ps, g1);
  14912. // m_t = beta1*m_t-1 + (1 - beta1)*g_t
  14913. ggml_vec_scale_f32(nx, m, beta1);
  14914. ggml_vec_mad_f32 (nx, m, g1, 1.0f - beta1);
  14915. // g2 = g1^2
  14916. ggml_vec_sqr_f32 (nx, g2, g1);
  14917. // v_t = beta2*v_t-1 + (1 - beta2)*g_t^2
  14918. ggml_vec_scale_f32(nx, v, beta2);
  14919. ggml_vec_mad_f32 (nx, v, g2, 1.0f - beta2);
  14920. // m^hat = m_t / (1 - beta1^t)
  14921. // v^hat = v_t / (1 - beta2^t)
  14922. // x_t = x_t-1 - sched*(alpha*m^hat/(sqrt(v^hat) + eps) + decay*x_t-1)
  14923. // x_t = x_t-1 - sched*alpha*m^hat/(sqrt(v^hat) + eps) - sched*decay*x_t-1
  14924. // x_t = x_t-1*(1-sched*decay) - sched*alpha*m^hat/(sqrt(v^hat) + eps)
  14925. // x_t = x_t-1*(1-sched*decay) + sched*decay*(-alpha/decay)*m^hat/(sqrt(v^hat) + eps)
  14926. // x_t = mix(x_t-1, (-alpha/decay)*m^hat/(sqrt(v^hat) + eps), sched*decay)
  14927. ggml_vec_cpy_f32 (nx, mh, m);
  14928. ggml_vec_cpy_f32 (nx, vh, v);
  14929. ggml_vec_scale_f32(nx, mh, alpha/(1.0f - powf(beta1, opt->iter)));
  14930. ggml_vec_scale_f32(nx, vh, 1.0f/(1.0f - powf(beta2, opt->iter)));
  14931. ggml_vec_sqrt_f32 (nx, vh, vh);
  14932. ggml_vec_acc1_f32 (nx, vh, eps);
  14933. ggml_vec_div_f32 (nx, mh, mh, vh);
  14934. ggml_vec_scale_f32(nx, x, 1.0f - decay);
  14935. ggml_vec_sub_f32 (nx, x, x, mh);
  14936. // update the parameters
  14937. ggml_opt_set_params(np, ps, x);
  14938. }
  14939. ggml_graph_reset (gf);
  14940. ggml_set_f32 (f->grad, 1.0f);
  14941. ggml_graph_compute(ctx, gb);
  14942. const float fx = ggml_get_f32_1d(f, 0);
  14943. // check convergence
  14944. if (fabsf(fx - fx_prev[0])/fx < params.adam.eps_f) {
  14945. GGML_PRINT_DEBUG("converged\n");
  14946. return GGML_OPT_OK;
  14947. }
  14948. // delta-based convergence test
  14949. if (pf != NULL) {
  14950. // need at least params.past iterations to start checking for convergence
  14951. if (params.past <= iter0 + t) {
  14952. const float rate = (pf[(iter0 + t)%params.past] - fx)/fx;
  14953. if (fabsf(rate) < params.delta) {
  14954. return GGML_OPT_OK;
  14955. }
  14956. }
  14957. pf[(iter0 + t)%params.past] = fx;
  14958. }
  14959. // check for improvement
  14960. if (params.max_no_improvement > 0) {
  14961. if (fx_best[0] > fx) {
  14962. fx_best[0] = fx;
  14963. n_no_improvement[0] = 0;
  14964. } else {
  14965. ++n_no_improvement[0];
  14966. if (n_no_improvement[0] >= params.max_no_improvement) {
  14967. return GGML_OPT_OK;
  14968. }
  14969. }
  14970. }
  14971. fx_prev[0] = fx;
  14972. {
  14973. const int64_t t_end_cpu = ggml_cycles();
  14974. GGML_PRINT_DEBUG("time iter: %5.3f s\n", ((float)(t_end_cpu - t_start_cpu))/CLOCKS_PER_SEC);
  14975. UNUSED(t_end_cpu);
  14976. const int64_t t_end_wall = ggml_time_us();
  14977. GGML_PRINT_DEBUG("wall time iter: %5.3f s\n", (t_end_wall - t_start_wall)/1e6);
  14978. UNUSED(t_end_wall);
  14979. }
  14980. }
  14981. return GGML_OPT_DID_NOT_CONVERGE;
  14982. }
  14983. //
  14984. // L-BFGS
  14985. //
  14986. // the L-BFGS implementation below is based on the following implementation:
  14987. //
  14988. // https://github.com/chokkan/liblbfgs
  14989. //
  14990. struct ggml_lbfgs_iteration_data {
  14991. float alpha;
  14992. float ys;
  14993. float * s;
  14994. float * y;
  14995. };
  14996. static enum ggml_opt_result linesearch_backtracking(
  14997. struct ggml_context * ctx,
  14998. const struct ggml_opt_params * params,
  14999. int nx,
  15000. float * x,
  15001. float * fx,
  15002. float * g,
  15003. float * d,
  15004. float * step,
  15005. const float * xp,
  15006. struct ggml_tensor * f,
  15007. struct ggml_cgraph * gf,
  15008. struct ggml_cgraph * gb,
  15009. const int np,
  15010. struct ggml_tensor * ps[]) {
  15011. int count = 0;
  15012. float width = 0.0f;
  15013. float dg = 0.0f;
  15014. float finit = 0.0f;
  15015. float dginit = 0.0f;
  15016. float dgtest = 0.0f;
  15017. const float dec = 0.5f;
  15018. const float inc = 2.1f;
  15019. if (*step <= 0.f) {
  15020. return GGML_LINESEARCH_INVALID_PARAMETERS;
  15021. }
  15022. // compute the initial gradient in the search direction
  15023. ggml_vec_dot_f32(nx, &dginit, g, d);
  15024. // make sure that d points to a descent direction
  15025. if (0 < dginit) {
  15026. return GGML_LINESEARCH_FAIL;
  15027. }
  15028. // initialize local variables
  15029. finit = *fx;
  15030. dgtest = params->lbfgs.ftol*dginit;
  15031. while (true) {
  15032. ggml_vec_cpy_f32(nx, x, xp);
  15033. ggml_vec_mad_f32(nx, x, d, *step);
  15034. // evaluate the function and gradient values
  15035. {
  15036. ggml_opt_set_params(np, ps, x);
  15037. ggml_graph_reset (gf);
  15038. ggml_set_f32 (f->grad, 1.0f);
  15039. ggml_graph_compute(ctx, gb);
  15040. ggml_opt_get_grad(np, ps, g);
  15041. *fx = ggml_get_f32_1d(f, 0);
  15042. }
  15043. ++count;
  15044. if (*fx > finit + (*step)*dgtest) {
  15045. width = dec;
  15046. } else {
  15047. // Armijo condition is satisfied
  15048. if (params->lbfgs.linesearch == GGML_LINESEARCH_BACKTRACKING_ARMIJO) {
  15049. return count;
  15050. }
  15051. ggml_vec_dot_f32(nx, &dg, g, d);
  15052. // check the Wolfe condition
  15053. if (dg < params->lbfgs.wolfe * dginit) {
  15054. width = inc;
  15055. } else {
  15056. if(params->lbfgs.linesearch == GGML_LINESEARCH_BACKTRACKING_WOLFE) {
  15057. // regular Wolfe conditions
  15058. return count;
  15059. }
  15060. if(dg > -params->lbfgs.wolfe*dginit) {
  15061. width = dec;
  15062. } else {
  15063. // strong Wolfe condition (GGML_LINESEARCH_BACKTRACKING_STRONG_WOLFE)
  15064. return count;
  15065. }
  15066. return count;
  15067. }
  15068. }
  15069. if (*step < params->lbfgs.min_step) {
  15070. return GGML_LINESEARCH_MINIMUM_STEP;
  15071. }
  15072. if (*step > params->lbfgs.max_step) {
  15073. return GGML_LINESEARCH_MAXIMUM_STEP;
  15074. }
  15075. if (params->lbfgs.max_linesearch <= count) {
  15076. return GGML_LINESEARCH_MAXIMUM_ITERATIONS;
  15077. }
  15078. (*step) *= width;
  15079. }
  15080. return GGML_LINESEARCH_FAIL;
  15081. }
  15082. static enum ggml_opt_result ggml_opt_lbfgs(
  15083. struct ggml_context * ctx,
  15084. struct ggml_opt_context * opt,
  15085. struct ggml_opt_params params,
  15086. struct ggml_tensor * f,
  15087. struct ggml_cgraph * gf,
  15088. struct ggml_cgraph * gb) {
  15089. if (params.lbfgs.linesearch == GGML_LINESEARCH_BACKTRACKING_WOLFE ||
  15090. params.lbfgs.linesearch == GGML_LINESEARCH_BACKTRACKING_STRONG_WOLFE) {
  15091. if (params.lbfgs.wolfe <= params.lbfgs.ftol || 1.f <= params.lbfgs.wolfe) {
  15092. return GGML_OPT_INVALID_WOLFE;
  15093. }
  15094. }
  15095. gf->n_threads = params.n_threads;
  15096. gb->n_threads = params.n_threads;
  15097. const int m = params.lbfgs.m;
  15098. // these will store the parameters we want to optimize
  15099. struct ggml_tensor * ps[GGML_MAX_PARAMS];
  15100. int np = 0;
  15101. int nx = 0;
  15102. for (int i = 0; i < gf->n_nodes; ++i) {
  15103. if (gf->nodes[i]->is_param) {
  15104. GGML_PRINT_DEBUG("found param %d: grad->op = %d\n", np, gf->nodes[i]->grad->op);
  15105. GGML_ASSERT(np < GGML_MAX_PARAMS);
  15106. ps[np++] = gf->nodes[i];
  15107. nx += ggml_nelements(gf->nodes[i]);
  15108. }
  15109. }
  15110. if ((opt->params.type != params.type) || (opt->nx != nx) || (opt->params.past != params.past) || (opt->params.lbfgs.m != params.lbfgs.m)) {
  15111. int iter = opt->iter;
  15112. ggml_opt_init(ctx, opt, params, nx);
  15113. opt->iter = iter;
  15114. }
  15115. float * x = opt->lbfgs.x->data; // current parameters
  15116. float * xp = opt->lbfgs.xp->data; // previous parameters
  15117. float * g = opt->lbfgs.g->data; // current gradient
  15118. float * gp = opt->lbfgs.gp->data; // previous gradient
  15119. float * d = opt->lbfgs.d->data; // search direction
  15120. float * pf = params.past > 0 ? opt->lbfgs.pf->data : NULL; // past function values
  15121. float fx = 0.0f; // cost function value
  15122. float xnorm = 0.0f; // ||x||
  15123. float gnorm = 0.0f; // ||g||
  15124. // initialize x from the graph nodes
  15125. ggml_opt_get_params(np, ps, x);
  15126. // the L-BFGS memory
  15127. float * lm_alpha = opt->lbfgs.lmal->data;
  15128. float * lm_ys = opt->lbfgs.lmys->data;
  15129. float * lm_s = opt->lbfgs.lms->data;
  15130. float * lm_y = opt->lbfgs.lmy->data;
  15131. // evaluate the function value and its gradient
  15132. {
  15133. ggml_opt_set_params(np, ps, x);
  15134. ggml_graph_reset (gf);
  15135. ggml_set_f32 (f->grad, 1.0f);
  15136. ggml_graph_compute(ctx, gb);
  15137. ggml_opt_get_grad(np, ps, g);
  15138. fx = ggml_get_f32_1d(f, 0);
  15139. }
  15140. // search direction = -gradient
  15141. ggml_vec_neg_f32(nx, d, g);
  15142. // ||x||, ||g||
  15143. ggml_vec_norm_f32(nx, &xnorm, x);
  15144. ggml_vec_norm_f32(nx, &gnorm, g);
  15145. if (xnorm < 1.0f) {
  15146. xnorm = 1.0f;
  15147. }
  15148. // already optimized
  15149. if (gnorm/xnorm <= params.lbfgs.eps) {
  15150. return GGML_OPT_OK;
  15151. }
  15152. if (opt->just_initialized) {
  15153. if (pf) {
  15154. pf[0] = fx;
  15155. }
  15156. opt->lbfgs.fx_best = fx;
  15157. // initial step
  15158. ggml_vec_norm_inv_f32(nx, &opt->lbfgs.step, d);
  15159. opt->lbfgs.j = 0;
  15160. opt->lbfgs.k = 1;
  15161. opt->lbfgs.end = 0;
  15162. opt->lbfgs.n_no_improvement = 0;
  15163. opt->just_initialized = false;
  15164. }
  15165. float * fx_best = &opt->lbfgs.fx_best;
  15166. float * step = &opt->lbfgs.step;
  15167. int * j = &opt->lbfgs.j;
  15168. int * k = &opt->lbfgs.k;
  15169. int * end = &opt->lbfgs.end;
  15170. int * n_no_improvement = &opt->lbfgs.n_no_improvement;
  15171. int ls = 0;
  15172. int bound = 0;
  15173. float ys = 0.0f;
  15174. float yy = 0.0f;
  15175. float beta = 0.0f;
  15176. int it = 0;
  15177. while (true) {
  15178. // store the current position and gradient vectors
  15179. ggml_vec_cpy_f32(nx, xp, x);
  15180. ggml_vec_cpy_f32(nx, gp, g);
  15181. ls = linesearch_backtracking(ctx, &params, nx, x, &fx, g, d, step, xp, f, gf, gb, np, ps);
  15182. if (ls < 0) {
  15183. // linesearch failed - go back to the previous point and return
  15184. ggml_vec_cpy_f32(nx, x, xp);
  15185. ggml_vec_cpy_f32(nx, g, gp);
  15186. return ls;
  15187. }
  15188. ggml_vec_norm_f32(nx, &xnorm, x);
  15189. ggml_vec_norm_f32(nx, &gnorm, g);
  15190. GGML_PRINT_DEBUG("f = %10.6f\n", ggml_get_f32_1d(f, 0));
  15191. if (xnorm < 1.0f) {
  15192. xnorm = 1.0f;
  15193. }
  15194. if (gnorm/xnorm <= params.lbfgs.eps) {
  15195. // converged
  15196. return GGML_OPT_OK;
  15197. }
  15198. // delta-based convergence test
  15199. if (pf != NULL) {
  15200. // need at least params.past iterations to start checking for convergence
  15201. if (params.past <= k[0]) {
  15202. const float rate = (pf[k[0]%params.past] - fx)/fx;
  15203. if (fabsf(rate) < params.delta) {
  15204. return GGML_OPT_OK;
  15205. }
  15206. }
  15207. pf[k[0]%params.past] = fx;
  15208. }
  15209. // check for improvement
  15210. if (params.max_no_improvement > 0) {
  15211. if (fx < fx_best[0]) {
  15212. fx_best[0] = fx;
  15213. n_no_improvement[0] = 0;
  15214. } else {
  15215. n_no_improvement[0]++;
  15216. if (n_no_improvement[0] >= params.max_no_improvement) {
  15217. return GGML_OPT_OK;
  15218. }
  15219. }
  15220. }
  15221. if (params.lbfgs.n_iter != 0 && params.lbfgs.n_iter < it + 1) {
  15222. // reached the maximum number of iterations
  15223. return GGML_OPT_DID_NOT_CONVERGE;
  15224. }
  15225. // update vectors s and y:
  15226. // s_{k+1} = x_{k+1} - x_{k} = \step * d_{k}.
  15227. // y_{k+1} = g_{k+1} - g_{k}.
  15228. //
  15229. ggml_vec_sub_f32(nx, &lm_s[end[0]*nx], x, xp);
  15230. ggml_vec_sub_f32(nx, &lm_y[end[0]*nx], g, gp);
  15231. // compute scalars ys and yy:
  15232. // ys = y^t \cdot s -> 1 / \rho.
  15233. // yy = y^t \cdot y.
  15234. //
  15235. ggml_vec_dot_f32(nx, &ys, &lm_y[end[0]*nx], &lm_s[end[0] *nx]);
  15236. ggml_vec_dot_f32(nx, &yy, &lm_y[end[0]*nx], &lm_y[end[0]*nx]);
  15237. lm_ys[end[0]] = ys;
  15238. // find new search direction
  15239. // ref: https://en.wikipedia.org/wiki/Limited-memory_BFGS
  15240. bound = (m <= k[0]) ? m : k[0];
  15241. k[0]++;
  15242. it++;
  15243. end[0] = (end[0] + 1)%m;
  15244. // initialize search direction with -g
  15245. ggml_vec_neg_f32(nx, d, g);
  15246. j[0] = end[0];
  15247. for (int i = 0; i < bound; ++i) {
  15248. j[0] = (j[0] + m - 1) % m;
  15249. // \alpha_{j} = \rho_{j} s^{t}_{j} \cdot q_{k+1}
  15250. ggml_vec_dot_f32(nx, &lm_alpha[j[0]], &lm_s[j[0]*nx], d);
  15251. lm_alpha[j[0]] /= lm_ys[j[0]];
  15252. // q_{i} = q_{i+1} - \alpha_{i} y_{i}
  15253. ggml_vec_mad_f32(nx, d, &lm_y[j[0]*nx], -lm_alpha[j[0]]);
  15254. }
  15255. ggml_vec_scale_f32(nx, d, ys/yy);
  15256. for (int i = 0; i < bound; ++i) {
  15257. // \beta_{j} = \rho_{j} y^t_{j} \cdot \gamma_{i}
  15258. ggml_vec_dot_f32(nx, &beta, &lm_y[j[0]*nx], d);
  15259. beta /= lm_ys[j[0]];
  15260. // \gamma_{i+1} = \gamma_{i} + (\alpha_{j} - \beta_{j}) s_{j}
  15261. ggml_vec_mad_f32(nx, d, &lm_s[j[0]*nx], lm_alpha[j[0]] - beta);
  15262. j[0] = (j[0] + 1)%m;
  15263. }
  15264. step[0] = 1.0;
  15265. }
  15266. return GGML_OPT_DID_NOT_CONVERGE;
  15267. }
  15268. struct ggml_opt_params ggml_opt_default_params(enum ggml_opt_type type) {
  15269. struct ggml_opt_params result;
  15270. switch (type) {
  15271. case GGML_OPT_ADAM:
  15272. {
  15273. result = (struct ggml_opt_params) {
  15274. .type = GGML_OPT_ADAM,
  15275. .n_threads = 1,
  15276. .past = 0,
  15277. .delta = 1e-5f,
  15278. .max_no_improvement = 100,
  15279. .print_forward_graph = true,
  15280. .print_backward_graph = true,
  15281. .adam = {
  15282. .n_iter = 10000,
  15283. .sched = 1.000f,
  15284. .decay = 0.001f,
  15285. .alpha = 0.001f,
  15286. .beta1 = 0.9f,
  15287. .beta2 = 0.999f,
  15288. .eps = 1e-8f,
  15289. .eps_f = 1e-5f,
  15290. .eps_g = 1e-3f,
  15291. },
  15292. };
  15293. } break;
  15294. case GGML_OPT_LBFGS:
  15295. {
  15296. result = (struct ggml_opt_params) {
  15297. .type = GGML_OPT_LBFGS,
  15298. .n_threads = 1,
  15299. .past = 0,
  15300. .delta = 1e-5f,
  15301. .max_no_improvement = 0,
  15302. .print_forward_graph = true,
  15303. .print_backward_graph = true,
  15304. .lbfgs = {
  15305. .m = 6,
  15306. .n_iter = 100,
  15307. .max_linesearch = 20,
  15308. .eps = 1e-5f,
  15309. .ftol = 1e-4f,
  15310. .wolfe = 0.9f,
  15311. .min_step = 1e-20f,
  15312. .max_step = 1e+20f,
  15313. .linesearch = GGML_LINESEARCH_DEFAULT,
  15314. },
  15315. };
  15316. } break;
  15317. }
  15318. return result;
  15319. }
  15320. GGML_API void ggml_opt_init(
  15321. struct ggml_context * ctx,
  15322. struct ggml_opt_context * opt,
  15323. struct ggml_opt_params params,
  15324. int64_t nx) {
  15325. opt->ctx = ctx;
  15326. opt->params = params;
  15327. opt->iter = 0;
  15328. opt->nx = nx;
  15329. opt->just_initialized = true;
  15330. switch (opt->params.type) {
  15331. case GGML_OPT_ADAM:
  15332. {
  15333. opt->adam.x = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, nx);
  15334. opt->adam.g1 = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, nx);
  15335. opt->adam.g2 = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, nx);
  15336. opt->adam.m = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, nx);
  15337. opt->adam.v = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, nx);
  15338. opt->adam.mh = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, nx);
  15339. opt->adam.vh = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, nx);
  15340. opt->adam.pf = params.past > 0
  15341. ? ggml_new_tensor_1d(ctx, GGML_TYPE_F32, params.past)
  15342. : NULL;
  15343. ggml_set_zero(opt->adam.x);
  15344. ggml_set_zero(opt->adam.g1);
  15345. ggml_set_zero(opt->adam.g2);
  15346. ggml_set_zero(opt->adam.m);
  15347. ggml_set_zero(opt->adam.v);
  15348. ggml_set_zero(opt->adam.mh);
  15349. ggml_set_zero(opt->adam.vh);
  15350. if (opt->adam.pf) {
  15351. ggml_set_zero(opt->adam.pf);
  15352. }
  15353. } break;
  15354. case GGML_OPT_LBFGS:
  15355. {
  15356. opt->lbfgs.x = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, nx);
  15357. opt->lbfgs.xp = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, nx);
  15358. opt->lbfgs.g = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, nx);
  15359. opt->lbfgs.gp = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, nx);
  15360. opt->lbfgs.d = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, nx);
  15361. opt->lbfgs.pf = params.past > 0
  15362. ? ggml_new_tensor_1d(ctx, GGML_TYPE_F32, params.past)
  15363. : NULL;
  15364. opt->lbfgs.lmal = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, params.lbfgs.m);
  15365. opt->lbfgs.lmys = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, params.lbfgs.m);
  15366. opt->lbfgs.lms = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, nx, params.lbfgs.m);
  15367. opt->lbfgs.lmy = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, nx, params.lbfgs.m);
  15368. ggml_set_zero(opt->lbfgs.x);
  15369. ggml_set_zero(opt->lbfgs.xp);
  15370. ggml_set_zero(opt->lbfgs.g);
  15371. ggml_set_zero(opt->lbfgs.gp);
  15372. ggml_set_zero(opt->lbfgs.d);
  15373. if (opt->lbfgs.pf) {
  15374. ggml_set_zero(opt->lbfgs.pf);
  15375. }
  15376. ggml_set_zero(opt->lbfgs.lmal);
  15377. ggml_set_zero(opt->lbfgs.lmys);
  15378. ggml_set_zero(opt->lbfgs.lms);
  15379. ggml_set_zero(opt->lbfgs.lmy);
  15380. } break;
  15381. }
  15382. }
  15383. enum ggml_opt_result ggml_opt(
  15384. struct ggml_context * ctx,
  15385. struct ggml_opt_params params,
  15386. struct ggml_tensor * f) {
  15387. bool free_ctx = false;
  15388. if (ctx == NULL) {
  15389. struct ggml_init_params params_ctx = {
  15390. .mem_size = 16*1024*1024,
  15391. .mem_buffer = NULL,
  15392. .no_alloc = false,
  15393. };
  15394. ctx = ggml_init(params_ctx);
  15395. if (ctx == NULL) {
  15396. return GGML_OPT_NO_CONTEXT;
  15397. }
  15398. free_ctx = true;
  15399. }
  15400. enum ggml_opt_result result = GGML_OPT_OK;
  15401. struct ggml_opt_context * opt = (struct ggml_opt_context *) alloca(sizeof(struct ggml_opt_context));
  15402. ggml_opt_init(ctx, opt, params, 0);
  15403. result = ggml_opt_resume(ctx, opt, f);
  15404. if (free_ctx) {
  15405. ggml_free(ctx);
  15406. }
  15407. return result;
  15408. }
  15409. enum ggml_opt_result ggml_opt_resume(
  15410. struct ggml_context * ctx,
  15411. struct ggml_opt_context * opt,
  15412. struct ggml_tensor * f) {
  15413. // build forward + backward compute graphs
  15414. struct ggml_tensor * gfbuf = ggml_new_tensor_1d(ctx, GGML_TYPE_I32, sizeof(struct ggml_cgraph) / GGML_TYPE_SIZE[GGML_TYPE_I32]+ (sizeof(struct ggml_cgraph) % GGML_TYPE_SIZE[GGML_TYPE_I32] ? 1 : 0));
  15415. struct ggml_tensor * gbbuf = ggml_new_tensor_1d(ctx, GGML_TYPE_I32, sizeof(struct ggml_cgraph) / GGML_TYPE_SIZE[GGML_TYPE_I32]+ (sizeof(struct ggml_cgraph) % GGML_TYPE_SIZE[GGML_TYPE_I32] ? 1 : 0));
  15416. struct ggml_cgraph * gf = (struct ggml_cgraph *) gfbuf->data;
  15417. struct ggml_cgraph * gb = (struct ggml_cgraph *) gbbuf->data;
  15418. *gf = ggml_build_forward (f);
  15419. *gb = ggml_build_backward(ctx, gf, true);
  15420. return ggml_opt_resume_g(ctx, opt, f, gf, gb);
  15421. }
  15422. enum ggml_opt_result ggml_opt_resume_g(
  15423. struct ggml_context * ctx,
  15424. struct ggml_opt_context * opt,
  15425. struct ggml_tensor * f,
  15426. struct ggml_cgraph * gf,
  15427. struct ggml_cgraph * gb) {
  15428. // build forward + backward compute graphs
  15429. enum ggml_opt_result result = GGML_OPT_OK;
  15430. switch (opt->params.type) {
  15431. case GGML_OPT_ADAM:
  15432. {
  15433. result = ggml_opt_adam(ctx, opt, opt->params, f, gf, gb);
  15434. } break;
  15435. case GGML_OPT_LBFGS:
  15436. {
  15437. result = ggml_opt_lbfgs(ctx, opt, opt->params, f, gf, gb);
  15438. } break;
  15439. }
  15440. if (opt->params.print_forward_graph) {
  15441. ggml_graph_print (gf);
  15442. ggml_graph_dump_dot(gf, NULL, "opt-forward.dot");
  15443. }
  15444. if (opt->params.print_backward_graph) {
  15445. ggml_graph_print (gb);
  15446. ggml_graph_dump_dot(gb, gf, "opt-backward.dot");
  15447. }
  15448. return result;
  15449. }
  15450. ////////////////////////////////////////////////////////////////////////////////
  15451. size_t ggml_quantize_q4_0(const float * src, void * dst, int n, int k, int64_t * hist) {
  15452. assert(k % QK4_0 == 0);
  15453. const int nb = k / QK4_0;
  15454. for (int b = 0; b < n; b += k) {
  15455. block_q4_0 * restrict y = (block_q4_0 *) dst + b/QK4_0;
  15456. quantize_row_q4_0_reference(src + b, y, k);
  15457. for (int i = 0; i < nb; i++) {
  15458. for (int j = 0; j < QK4_0; j += 2) {
  15459. const uint8_t vi0 = y[i].qs[j/2] & 0x0F;
  15460. const uint8_t vi1 = y[i].qs[j/2] >> 4;
  15461. hist[vi0]++;
  15462. hist[vi1]++;
  15463. }
  15464. }
  15465. }
  15466. return (n/QK4_0*sizeof(block_q4_0));
  15467. }
  15468. size_t ggml_quantize_q4_1(const float * src, void * dst, int n, int k, int64_t * hist) {
  15469. assert(k % QK4_1 == 0);
  15470. const int nb = k / QK4_1;
  15471. for (int b = 0; b < n; b += k) {
  15472. block_q4_1 * restrict y = (block_q4_1 *) dst + b/QK4_1;
  15473. quantize_row_q4_1_reference(src + b, y, k);
  15474. for (int i = 0; i < nb; i++) {
  15475. for (int j = 0; j < QK4_1; j += 2) {
  15476. const uint8_t vi0 = y[i].qs[j/2] & 0x0F;
  15477. const uint8_t vi1 = y[i].qs[j/2] >> 4;
  15478. hist[vi0]++;
  15479. hist[vi1]++;
  15480. }
  15481. }
  15482. }
  15483. return (n/QK4_1*sizeof(block_q4_1));
  15484. }
  15485. size_t ggml_quantize_q5_0(const float * src, void * dst, int n, int k, int64_t * hist) {
  15486. assert(k % QK5_0 == 0);
  15487. const int nb = k / QK5_0;
  15488. for (int b = 0; b < n; b += k) {
  15489. block_q5_0 * restrict y = (block_q5_0 *)dst + b/QK5_0;
  15490. quantize_row_q5_0_reference(src + b, y, k);
  15491. for (int i = 0; i < nb; i++) {
  15492. uint32_t qh;
  15493. memcpy(&qh, &y[i].qh, sizeof(qh));
  15494. for (int j = 0; j < QK5_0; j += 2) {
  15495. const uint8_t vh0 = ((qh & (1u << (j + 0 ))) >> (j + 0 )) << 4;
  15496. const uint8_t vh1 = ((qh & (1u << (j + 16))) >> (j + 12));
  15497. // cast to 16 bins
  15498. const uint8_t vi0 = ((y[i].qs[j/2] & 0x0F) | vh0) / 2;
  15499. const uint8_t vi1 = ((y[i].qs[j/2] >> 4) | vh1) / 2;
  15500. hist[vi0]++;
  15501. hist[vi1]++;
  15502. }
  15503. }
  15504. }
  15505. return (n/QK5_0*sizeof(block_q5_0));
  15506. }
  15507. size_t ggml_quantize_q5_1(const float * src, void * dst, int n, int k, int64_t * hist) {
  15508. assert(k % QK5_1 == 0);
  15509. const int nb = k / QK5_1;
  15510. for (int b = 0; b < n; b += k) {
  15511. block_q5_1 * restrict y = (block_q5_1 *)dst + b/QK5_1;
  15512. quantize_row_q5_1_reference(src + b, y, k);
  15513. for (int i = 0; i < nb; i++) {
  15514. uint32_t qh;
  15515. memcpy(&qh, &y[i].qh, sizeof(qh));
  15516. for (int j = 0; j < QK5_1; j += 2) {
  15517. const uint8_t vh0 = ((qh & (1u << (j + 0 ))) >> (j + 0 )) << 4;
  15518. const uint8_t vh1 = ((qh & (1u << (j + 16))) >> (j + 12));
  15519. // cast to 16 bins
  15520. const uint8_t vi0 = ((y[i].qs[j/2] & 0x0F) | vh0) / 2;
  15521. const uint8_t vi1 = ((y[i].qs[j/2] >> 4) | vh1) / 2;
  15522. hist[vi0]++;
  15523. hist[vi1]++;
  15524. }
  15525. }
  15526. }
  15527. return (n/QK5_1*sizeof(block_q5_1));
  15528. }
  15529. size_t ggml_quantize_q8_0(const float * src, void * dst, int n, int k, int64_t * hist) {
  15530. assert(k % QK8_0 == 0);
  15531. const int nb = k / QK8_0;
  15532. for (int b = 0; b < n; b += k) {
  15533. block_q8_0 * restrict y = (block_q8_0 *)dst + b/QK8_0;
  15534. quantize_row_q8_0_reference(src + b, y, k);
  15535. for (int i = 0; i < nb; i++) {
  15536. for (int j = 0; j < QK8_0; ++j) {
  15537. const int8_t vi = y[i].qs[j];
  15538. hist[vi/16 + 8]++;
  15539. }
  15540. }
  15541. }
  15542. return (n/QK8_0*sizeof(block_q8_0));
  15543. }
  15544. size_t ggml_quantize_chunk(enum ggml_type type, const float * src, void * dst, int start, int n, int64_t * hist) {
  15545. size_t result = 0;
  15546. switch (type) {
  15547. case GGML_TYPE_Q4_0:
  15548. {
  15549. GGML_ASSERT(start % QK4_0 == 0);
  15550. block_q4_0 * block = (block_q4_0*)dst + start / QK4_0;
  15551. result = ggml_quantize_q4_0(src + start, block, n, n, hist);
  15552. } break;
  15553. case GGML_TYPE_Q4_1:
  15554. {
  15555. GGML_ASSERT(start % QK4_1 == 0);
  15556. block_q4_1 * block = (block_q4_1*)dst + start / QK4_1;
  15557. result = ggml_quantize_q4_1(src + start, block, n, n, hist);
  15558. } break;
  15559. case GGML_TYPE_Q5_0:
  15560. {
  15561. GGML_ASSERT(start % QK5_0 == 0);
  15562. block_q5_0 * block = (block_q5_0*)dst + start / QK5_0;
  15563. result = ggml_quantize_q5_0(src + start, block, n, n, hist);
  15564. } break;
  15565. case GGML_TYPE_Q5_1:
  15566. {
  15567. GGML_ASSERT(start % QK5_1 == 0);
  15568. block_q5_1 * block = (block_q5_1*)dst + start / QK5_1;
  15569. result = ggml_quantize_q5_1(src + start, block, n, n, hist);
  15570. } break;
  15571. case GGML_TYPE_Q8_0:
  15572. {
  15573. GGML_ASSERT(start % QK8_0 == 0);
  15574. block_q8_0 * block = (block_q8_0*)dst + start / QK8_0;
  15575. result = ggml_quantize_q8_0(src + start, block, n, n, hist);
  15576. } break;
  15577. #ifdef GGML_USE_K_QUANTS
  15578. case GGML_TYPE_Q2_K:
  15579. {
  15580. GGML_ASSERT(start % QK_K == 0);
  15581. block_q2_K * block = (block_q2_K*)dst + start / QK_K;
  15582. result = ggml_quantize_q2_K(src + start, block, n, n, hist);
  15583. } break;
  15584. case GGML_TYPE_Q3_K:
  15585. {
  15586. GGML_ASSERT(start % QK_K == 0);
  15587. block_q3_K * block = (block_q3_K*)dst + start / QK_K;
  15588. result = ggml_quantize_q3_K(src + start, block, n, n, hist);
  15589. } break;
  15590. case GGML_TYPE_Q4_K:
  15591. {
  15592. GGML_ASSERT(start % QK_K == 0);
  15593. block_q4_K * block = (block_q4_K*)dst + start / QK_K;
  15594. result = ggml_quantize_q4_K(src + start, block, n, n, hist);
  15595. } break;
  15596. case GGML_TYPE_Q5_K:
  15597. {
  15598. GGML_ASSERT(start % QK_K == 0);
  15599. block_q5_K * block = (block_q5_K*)dst + start / QK_K;
  15600. result = ggml_quantize_q5_K(src + start, block, n, n, hist);
  15601. } break;
  15602. case GGML_TYPE_Q6_K:
  15603. {
  15604. GGML_ASSERT(start % QK_K == 0);
  15605. block_q6_K * block = (block_q6_K*)dst + start / QK_K;
  15606. result = ggml_quantize_q6_K(src + start, block, n, n, hist);
  15607. } break;
  15608. #endif
  15609. case GGML_TYPE_F16:
  15610. {
  15611. int elemsize = sizeof(ggml_fp16_t);
  15612. ggml_fp32_to_fp16_row(src + start, (ggml_fp16_t *)dst + start, n);
  15613. result = n * elemsize;
  15614. } break;
  15615. case GGML_TYPE_F32:
  15616. {
  15617. int elemsize = sizeof(float);
  15618. result = n * elemsize;
  15619. memcpy((uint8_t *)dst + start * elemsize, src + start, result);
  15620. } break;
  15621. default:
  15622. assert(false);
  15623. }
  15624. return result;
  15625. }
  15626. ////////////////////////////////////////////////////////////////////////////////
  15627. int ggml_cpu_has_avx(void) {
  15628. #if defined(__AVX__)
  15629. return 1;
  15630. #else
  15631. return 0;
  15632. #endif
  15633. }
  15634. int ggml_cpu_has_avx2(void) {
  15635. #if defined(__AVX2__)
  15636. return 1;
  15637. #else
  15638. return 0;
  15639. #endif
  15640. }
  15641. int ggml_cpu_has_avx512(void) {
  15642. #if defined(__AVX512F__)
  15643. return 1;
  15644. #else
  15645. return 0;
  15646. #endif
  15647. }
  15648. int ggml_cpu_has_avx512_vbmi(void) {
  15649. #if defined(__AVX512VBMI__)
  15650. return 1;
  15651. #else
  15652. return 0;
  15653. #endif
  15654. }
  15655. int ggml_cpu_has_avx512_vnni(void) {
  15656. #if defined(__AVX512VNNI__)
  15657. return 1;
  15658. #else
  15659. return 0;
  15660. #endif
  15661. }
  15662. int ggml_cpu_has_fma(void) {
  15663. #if defined(__FMA__)
  15664. return 1;
  15665. #else
  15666. return 0;
  15667. #endif
  15668. }
  15669. int ggml_cpu_has_neon(void) {
  15670. #if defined(__ARM_NEON)
  15671. return 1;
  15672. #else
  15673. return 0;
  15674. #endif
  15675. }
  15676. int ggml_cpu_has_arm_fma(void) {
  15677. #if defined(__ARM_FEATURE_FMA)
  15678. return 1;
  15679. #else
  15680. return 0;
  15681. #endif
  15682. }
  15683. int ggml_cpu_has_f16c(void) {
  15684. #if defined(__F16C__)
  15685. return 1;
  15686. #else
  15687. return 0;
  15688. #endif
  15689. }
  15690. int ggml_cpu_has_fp16_va(void) {
  15691. #if defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC)
  15692. return 1;
  15693. #else
  15694. return 0;
  15695. #endif
  15696. }
  15697. int ggml_cpu_has_wasm_simd(void) {
  15698. #if defined(__wasm_simd128__)
  15699. return 1;
  15700. #else
  15701. return 0;
  15702. #endif
  15703. }
  15704. int ggml_cpu_has_blas(void) {
  15705. #if defined(GGML_USE_ACCELERATE) || defined(GGML_USE_OPENBLAS) || defined(GGML_USE_CUBLAS) || defined(GGML_USE_CLBLAST)
  15706. return 1;
  15707. #else
  15708. return 0;
  15709. #endif
  15710. }
  15711. int ggml_cpu_has_cublas(void) {
  15712. #if defined(GGML_USE_CUBLAS)
  15713. return 1;
  15714. #else
  15715. return 0;
  15716. #endif
  15717. }
  15718. int ggml_cpu_has_clblast(void) {
  15719. #if defined(GGML_USE_CLBLAST)
  15720. return 1;
  15721. #else
  15722. return 0;
  15723. #endif
  15724. }
  15725. int ggml_cpu_has_gpublas(void) {
  15726. return ggml_cpu_has_cublas() || ggml_cpu_has_clblast();
  15727. }
  15728. int ggml_cpu_has_sse3(void) {
  15729. #if defined(__SSE3__)
  15730. return 1;
  15731. #else
  15732. return 0;
  15733. #endif
  15734. }
  15735. int ggml_cpu_has_vsx(void) {
  15736. #if defined(__POWER9_VECTOR__)
  15737. return 1;
  15738. #else
  15739. return 0;
  15740. #endif
  15741. }
  15742. ////////////////////////////////////////////////////////////////////////////////